From 80bbf94169c0714efae98ddbfdb7c4d28032f043 Mon Sep 17 00:00:00 2001 From: chenhanzhang Date: Wed, 26 Nov 2025 23:42:41 +0800 Subject: [PATCH] New Resource: alicloud_realtime_compute_deployment; New Resource: alicloud_realtime_compute_job. --- alicloud/provider.go | 2 + ...ce_alicloud_realtime_compute_deployment.go | 1455 +++++++++++++++++ ...icloud_realtime_compute_deployment_test.go | 1243 ++++++++++++++ .../resource_alicloud_realtime_compute_job.go | 460 ++++++ ...urce_alicloud_realtime_compute_job_test.go | 132 ++ .../service_alicloud_realtime_compute_v2.go | 171 ++ .../realtime_compute_deployment.html.markdown | 283 ++++ .../docs/r/realtime_compute_job.html.markdown | 166 ++ 8 files changed, 3912 insertions(+) create mode 100644 alicloud/resource_alicloud_realtime_compute_deployment.go create mode 100644 alicloud/resource_alicloud_realtime_compute_deployment_test.go create mode 100644 alicloud/resource_alicloud_realtime_compute_job.go create mode 100644 alicloud/resource_alicloud_realtime_compute_job_test.go create mode 100644 website/docs/r/realtime_compute_deployment.html.markdown create mode 100644 website/docs/r/realtime_compute_job.html.markdown diff --git a/alicloud/provider.go b/alicloud/provider.go index 6faaf138c57d..9cc40786d1b4 100644 --- a/alicloud/provider.go +++ b/alicloud/provider.go @@ -916,6 +916,8 @@ func Provider() terraform.ResourceProvider { "alicloud_vpc_ipam_ipams": dataSourceAliCloudVpcIpamIpams(), }, ResourcesMap: map[string]*schema.Resource{ + "alicloud_realtime_compute_job": resourceAliCloudRealtimeComputeJob(), + "alicloud_realtime_compute_deployment": resourceAliCloudRealtimeComputeDeployment(), "alicloud_cloud_firewall_ai_traffic_analysis_status": resourceAliCloudCloudFirewallAiTrafficAnalysisStatus(), "alicloud_eflo_vpd_grant_rule": resourceAliCloudEfloVpdGrantRule(), "alicloud_wafv3_defense_resource_group": resourceAliCloudWafv3DefenseResourceGroup(), diff --git a/alicloud/resource_alicloud_realtime_compute_deployment.go b/alicloud/resource_alicloud_realtime_compute_deployment.go new file mode 100644 index 000000000000..5bac20e44c4b --- /dev/null +++ b/alicloud/resource_alicloud_realtime_compute_deployment.go @@ -0,0 +1,1455 @@ +// Package alicloud. This file is generated automatically. Please do not modify it manually, thank you! +package alicloud + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/PaesslerAG/jsonpath" + "github.com/aliyun/terraform-provider-alicloud/alicloud/connectivity" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAliCloudRealtimeComputeDeployment() *schema.Resource { + return &schema.Resource{ + Create: resourceAliCloudRealtimeComputeDeploymentCreate, + Read: resourceAliCloudRealtimeComputeDeploymentRead, + Update: resourceAliCloudRealtimeComputeDeploymentUpdate, + Delete: resourceAliCloudRealtimeComputeDeploymentDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "artifact": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kind": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: StringInSlice([]string{"SQLSCRIPT", "JAR", "PYTHON"}, false), + }, + "jar_artifact": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "jar_uri": { + Type: schema.TypeString, + Optional: true, + }, + "main_args": { + Type: schema.TypeString, + Optional: true, + }, + "entry_class": { + Type: schema.TypeString, + Optional: true, + }, + "additional_dependencies": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "sql_artifact": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sql_script": { + Type: schema.TypeString, + Optional: true, + }, + "additional_dependencies": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "python_artifact": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "entry_module": { + Type: schema.TypeString, + Optional: true, + }, + "main_args": { + Type: schema.TypeString, + Optional: true, + }, + "additional_python_archives": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "additional_python_libraries": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "additional_dependencies": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "python_artifact_uri": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "batch_resource_setting": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "basic_resource_setting": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "taskmanager_resource_setting_spec": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "memory": { + Type: schema.TypeString, + Optional: true, + }, + "cpu": { + Type: schema.TypeFloat, + Optional: true, + }, + }, + }, + }, + "parallelism": { + Type: schema.TypeInt, + Optional: true, + }, + "jobmanager_resource_setting_spec": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "memory": { + Type: schema.TypeString, + Optional: true, + }, + "cpu": { + Type: schema.TypeFloat, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "max_slot": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "deployment_id": { + Type: schema.TypeString, + Computed: true, + }, + "deployment_name": { + Type: schema.TypeString, + Required: true, + }, + "deployment_target": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: StringInSlice([]string{"SESSION", "PER_JOB"}, false), + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "engine_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "execution_mode": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: StringInSlice([]string{"STREAMING", "BATCH"}, false), + }, + "flink_conf": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + }, + "local_variables": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": { + Type: schema.TypeString, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "logging": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log4j2_configuration_template": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "logging_profile": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "log4j_loggers": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "logger_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "logger_level": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "log_reserve_policy": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "open_history": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "expiration_days": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "namespace": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "resource_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "streaming_resource_setting": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "basic_resource_setting": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "taskmanager_resource_setting_spec": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "memory": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "cpu": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + }, + }, + }, + }, + "parallelism": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "jobmanager_resource_setting_spec": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "memory": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "cpu": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "resource_setting_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: StringInSlice([]string{"BASIC", "EXPERT"}, false), + }, + "expert_resource_setting": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_plan": { + Type: schema.TypeString, + Optional: true, + }, + "jobmanager_resource_setting_spec": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "memory": { + Type: schema.TypeString, + Optional: true, + }, + "cpu": { + Type: schema.TypeFloat, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func resourceAliCloudRealtimeComputeDeploymentCreate(d *schema.ResourceData, meta interface{}) error { + + client := meta.(*connectivity.AliyunClient) + + namespace := d.Get("namespace") + action := fmt.Sprintf("/api/v2/namespaces/%s/deployments", namespace) + var request map[string]interface{} + var response map[string]interface{} + header := make(map[string]*string) + query := make(map[string]*string) + body := make(map[string]interface{}) + var err error + request = make(map[string]interface{}) + header["workspace"] = StringPointer(d.Get("resource_id").(string)) + + streamingResourceSetting := make(map[string]interface{}) + + if v := d.Get("streaming_resource_setting"); !IsNil(v) { + expertResourceSetting := make(map[string]interface{}) + jobmanagerResourceSettingSpec := make(map[string]interface{}) + cpu1, _ := jsonpath.Get("$[0].expert_resource_setting[0].jobmanager_resource_setting_spec[0].cpu", d.Get("streaming_resource_setting")) + if cpu1 != nil && cpu1 != "" { + jobmanagerResourceSettingSpec["cpu"] = cpu1 + } + memory1, _ := jsonpath.Get("$[0].expert_resource_setting[0].jobmanager_resource_setting_spec[0].memory", d.Get("streaming_resource_setting")) + if memory1 != nil && memory1 != "" { + jobmanagerResourceSettingSpec["memory"] = memory1 + } + + if len(jobmanagerResourceSettingSpec) > 0 { + expertResourceSetting["jobmanagerResourceSettingSpec"] = jobmanagerResourceSettingSpec + } + resourcePlan1, _ := jsonpath.Get("$[0].expert_resource_setting[0].resource_plan", d.Get("streaming_resource_setting")) + if resourcePlan1 != nil && resourcePlan1 != "" { + expertResourceSetting["resourcePlan"] = resourcePlan1 + } + + if len(expertResourceSetting) > 0 { + streamingResourceSetting["expertResourceSetting"] = expertResourceSetting + } + resourceSettingMode1, _ := jsonpath.Get("$[0].resource_setting_mode", v) + if resourceSettingMode1 != nil && resourceSettingMode1 != "" { + streamingResourceSetting["resourceSettingMode"] = resourceSettingMode1 + } + basicResourceSetting := make(map[string]interface{}) + taskmanagerResourceSettingSpec := make(map[string]interface{}) + memory3, _ := jsonpath.Get("$[0].basic_resource_setting[0].taskmanager_resource_setting_spec[0].memory", d.Get("streaming_resource_setting")) + if memory3 != nil && memory3 != "" { + taskmanagerResourceSettingSpec["memory"] = memory3 + } + cpu3, _ := jsonpath.Get("$[0].basic_resource_setting[0].taskmanager_resource_setting_spec[0].cpu", d.Get("streaming_resource_setting")) + if cpu3 != nil && cpu3 != "" { + taskmanagerResourceSettingSpec["cpu"] = cpu3 + } + + if len(taskmanagerResourceSettingSpec) > 0 { + basicResourceSetting["taskmanagerResourceSettingSpec"] = taskmanagerResourceSettingSpec + } + jobmanagerResourceSettingSpec1 := make(map[string]interface{}) + memory5, _ := jsonpath.Get("$[0].basic_resource_setting[0].jobmanager_resource_setting_spec[0].memory", d.Get("streaming_resource_setting")) + if memory5 != nil && memory5 != "" { + jobmanagerResourceSettingSpec1["memory"] = memory5 + } + cpu5, _ := jsonpath.Get("$[0].basic_resource_setting[0].jobmanager_resource_setting_spec[0].cpu", d.Get("streaming_resource_setting")) + if cpu5 != nil && cpu5 != "" { + jobmanagerResourceSettingSpec1["cpu"] = cpu5 + } + + if len(jobmanagerResourceSettingSpec1) > 0 { + basicResourceSetting["jobmanagerResourceSettingSpec"] = jobmanagerResourceSettingSpec1 + } + parallelism1, _ := jsonpath.Get("$[0].basic_resource_setting[0].parallelism", d.Get("streaming_resource_setting")) + if parallelism1 != nil && parallelism1 != "" { + basicResourceSetting["parallelism"] = parallelism1 + } + + if len(basicResourceSetting) > 0 { + streamingResourceSetting["basicResourceSetting"] = basicResourceSetting + } + + request["streamingResourceSetting"] = streamingResourceSetting + } + + if v, ok := d.GetOk("flink_conf"); ok { + request["flinkConf"] = v + } + logging := make(map[string]interface{}) + + if v := d.Get("logging"); !IsNil(v) { + if v, ok := d.GetOk("logging"); ok { + localData, err := jsonpath.Get("$[0].log4j_loggers", v) + if err != nil { + localData = make([]interface{}, 0) + } + localMaps := make([]interface{}, 0) + for _, dataLoop := range convertToInterfaceArray(localData) { + dataLoopTmp := make(map[string]interface{}) + if dataLoop != nil { + dataLoopTmp = dataLoop.(map[string]interface{}) + } + dataLoopMap := make(map[string]interface{}) + dataLoopMap["loggerLevel"] = dataLoopTmp["logger_level"] + dataLoopMap["loggerName"] = dataLoopTmp["logger_name"] + localMaps = append(localMaps, dataLoopMap) + } + logging["log4jLoggers"] = localMaps + } + + logReservePolicy := make(map[string]interface{}) + openHistory1, _ := jsonpath.Get("$[0].log_reserve_policy[0].open_history", d.Get("logging")) + if openHistory1 != nil && openHistory1 != "" { + logReservePolicy["openHistory"] = openHistory1 + } + expirationDays1, _ := jsonpath.Get("$[0].log_reserve_policy[0].expiration_days", d.Get("logging")) + if expirationDays1 != nil && expirationDays1 != "" { + logReservePolicy["expirationDays"] = expirationDays1 + } + + if len(logReservePolicy) > 0 { + logging["logReservePolicy"] = logReservePolicy + } + loggingProfile1, _ := jsonpath.Get("$[0].logging_profile", v) + if loggingProfile1 != nil && loggingProfile1 != "" { + logging["loggingProfile"] = loggingProfile1 + } + log4J2ConfigurationTemplate, _ := jsonpath.Get("$[0].log4j2_configuration_template", v) + if log4J2ConfigurationTemplate != nil && log4J2ConfigurationTemplate != "" { + logging["log4j2ConfigurationTemplate"] = log4J2ConfigurationTemplate + } + + request["logging"] = logging + } + + batchResourceSetting := make(map[string]interface{}) + + if v := d.Get("batch_resource_setting"); !IsNil(v) { + basicResourceSetting1 := make(map[string]interface{}) + taskmanagerResourceSettingSpec1 := make(map[string]interface{}) + cpu7, _ := jsonpath.Get("$[0].basic_resource_setting[0].taskmanager_resource_setting_spec[0].cpu", d.Get("batch_resource_setting")) + if cpu7 != nil && cpu7 != "" { + taskmanagerResourceSettingSpec1["cpu"] = cpu7 + } + memory7, _ := jsonpath.Get("$[0].basic_resource_setting[0].taskmanager_resource_setting_spec[0].memory", d.Get("batch_resource_setting")) + if memory7 != nil && memory7 != "" { + taskmanagerResourceSettingSpec1["memory"] = memory7 + } + + if len(taskmanagerResourceSettingSpec1) > 0 { + basicResourceSetting1["taskmanagerResourceSettingSpec"] = taskmanagerResourceSettingSpec1 + } + parallelism3, _ := jsonpath.Get("$[0].basic_resource_setting[0].parallelism", d.Get("batch_resource_setting")) + if parallelism3 != nil && parallelism3 != "" { + basicResourceSetting1["parallelism"] = parallelism3 + } + jobmanagerResourceSettingSpec2 := make(map[string]interface{}) + cpu9, _ := jsonpath.Get("$[0].basic_resource_setting[0].jobmanager_resource_setting_spec[0].cpu", d.Get("batch_resource_setting")) + if cpu9 != nil && cpu9 != "" { + jobmanagerResourceSettingSpec2["cpu"] = cpu9 + } + memory9, _ := jsonpath.Get("$[0].basic_resource_setting[0].jobmanager_resource_setting_spec[0].memory", d.Get("batch_resource_setting")) + if memory9 != nil && memory9 != "" { + jobmanagerResourceSettingSpec2["memory"] = memory9 + } + + if len(jobmanagerResourceSettingSpec2) > 0 { + basicResourceSetting1["jobmanagerResourceSettingSpec"] = jobmanagerResourceSettingSpec2 + } + + if len(basicResourceSetting1) > 0 { + batchResourceSetting["basicResourceSetting"] = basicResourceSetting1 + } + maxSlot1, _ := jsonpath.Get("$[0].max_slot", v) + if maxSlot1 != nil && maxSlot1 != "" { + batchResourceSetting["maxSlot"] = maxSlot1 + } + + request["batchResourceSetting"] = batchResourceSetting + } + + if v, ok := d.GetOk("labels"); ok { + request["labels"] = v + } + if v, ok := d.GetOk("local_variables"); ok { + localVariablesMapsArray := make([]interface{}, 0) + for _, dataLoop1 := range convertToInterfaceArray(v) { + dataLoop1Tmp := dataLoop1.(map[string]interface{}) + dataLoop1Map := make(map[string]interface{}) + dataLoop1Map["value"] = dataLoop1Tmp["value"] + dataLoop1Map["name"] = dataLoop1Tmp["name"] + localVariablesMapsArray = append(localVariablesMapsArray, dataLoop1Map) + } + request["localVariables"] = localVariablesMapsArray + } + + if v, ok := d.GetOk("engine_version"); ok { + request["engineVersion"] = v + } + deploymentTarget := make(map[string]interface{}) + + if v := d.Get("deployment_target"); v != nil { + name3, _ := jsonpath.Get("$[0].name", v) + if name3 != nil && name3 != "" { + deploymentTarget["name"] = name3 + } + mode1, _ := jsonpath.Get("$[0].mode", v) + if mode1 != nil && mode1 != "" { + deploymentTarget["mode"] = mode1 + } + + request["deploymentTarget"] = deploymentTarget + } + + request["executionMode"] = d.Get("execution_mode") + artifact := make(map[string]interface{}) + + if v := d.Get("artifact"); v != nil { + pythonArtifact := make(map[string]interface{}) + additionalDependencies1, _ := jsonpath.Get("$[0].python_artifact[0].additional_dependencies", d.Get("artifact")) + if additionalDependencies1 != nil && additionalDependencies1 != "" { + pythonArtifact["additionalDependencies"] = additionalDependencies1 + } + additionalPythonArchives1, _ := jsonpath.Get("$[0].python_artifact[0].additional_python_archives", d.Get("artifact")) + if additionalPythonArchives1 != nil && additionalPythonArchives1 != "" { + pythonArtifact["additionalPythonArchives"] = additionalPythonArchives1 + } + pythonArtifactUri1, _ := jsonpath.Get("$[0].python_artifact[0].python_artifact_uri", d.Get("artifact")) + if pythonArtifactUri1 != nil && pythonArtifactUri1 != "" { + pythonArtifact["pythonArtifactUri"] = pythonArtifactUri1 + } + mainArgs1, _ := jsonpath.Get("$[0].python_artifact[0].main_args", d.Get("artifact")) + if mainArgs1 != nil && mainArgs1 != "" { + pythonArtifact["mainArgs"] = mainArgs1 + } + additionalPythonLibraries1, _ := jsonpath.Get("$[0].python_artifact[0].additional_python_libraries", d.Get("artifact")) + if additionalPythonLibraries1 != nil && additionalPythonLibraries1 != "" { + pythonArtifact["additionalPythonLibraries"] = additionalPythonLibraries1 + } + entryModule1, _ := jsonpath.Get("$[0].python_artifact[0].entry_module", d.Get("artifact")) + if entryModule1 != nil && entryModule1 != "" { + pythonArtifact["entryModule"] = entryModule1 + } + + if len(pythonArtifact) > 0 { + artifact["pythonArtifact"] = pythonArtifact + } + jarArtifact := make(map[string]interface{}) + jarUri1, _ := jsonpath.Get("$[0].jar_artifact[0].jar_uri", d.Get("artifact")) + if jarUri1 != nil && jarUri1 != "" { + jarArtifact["jarUri"] = jarUri1 + } + additionalDependencies3, _ := jsonpath.Get("$[0].jar_artifact[0].additional_dependencies", d.Get("artifact")) + if additionalDependencies3 != nil && additionalDependencies3 != "" { + jarArtifact["additionalDependencies"] = additionalDependencies3 + } + mainArgs3, _ := jsonpath.Get("$[0].jar_artifact[0].main_args", d.Get("artifact")) + if mainArgs3 != nil && mainArgs3 != "" { + jarArtifact["mainArgs"] = mainArgs3 + } + entryClass1, _ := jsonpath.Get("$[0].jar_artifact[0].entry_class", d.Get("artifact")) + if entryClass1 != nil && entryClass1 != "" { + jarArtifact["entryClass"] = entryClass1 + } + + if len(jarArtifact) > 0 { + artifact["jarArtifact"] = jarArtifact + } + sqlArtifact := make(map[string]interface{}) + sqlScript1, _ := jsonpath.Get("$[0].sql_artifact[0].sql_script", d.Get("artifact")) + if sqlScript1 != nil && sqlScript1 != "" { + sqlArtifact["sqlScript"] = sqlScript1 + } + additionalDependencies5, _ := jsonpath.Get("$[0].sql_artifact[0].additional_dependencies", d.Get("artifact")) + if additionalDependencies5 != nil && additionalDependencies5 != "" { + sqlArtifact["additionalDependencies"] = additionalDependencies5 + } + + if len(sqlArtifact) > 0 { + artifact["sqlArtifact"] = sqlArtifact + } + kind1, _ := jsonpath.Get("$[0].kind", v) + if kind1 != nil && kind1 != "" { + artifact["kind"] = kind1 + } + + request["artifact"] = artifact + } + + if v, ok := d.GetOk("description"); ok { + request["description"] = v + } + request["name"] = d.Get("deployment_name") + body = request + wait := incrementalWait(3*time.Second, 5*time.Second) + err = resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { + response, err = client.RoaPost("ververica", "2022-07-18", action, query, header, body, true) + if err != nil { + if NeedRetry(err) { + wait() + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + addDebug(action, response, request) + + if err != nil { + return WrapErrorf(err, DefaultErrorMsg, "alicloud_realtime_compute_deployment", action, AlibabaCloudSdkGoERROR) + } + + dataworkspaceVar, _ := jsonpath.Get("$.data.workspace", response) + datanamespaceVar, _ := jsonpath.Get("$.data.namespace", response) + datadeploymentIdVar, _ := jsonpath.Get("$.data.deploymentId", response) + d.SetId(fmt.Sprintf("%v:%v:%v", dataworkspaceVar, datanamespaceVar, datadeploymentIdVar)) + + return resourceAliCloudRealtimeComputeDeploymentRead(d, meta) +} + +func resourceAliCloudRealtimeComputeDeploymentRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*connectivity.AliyunClient) + realtimeComputeServiceV2 := RealtimeComputeServiceV2{client} + + objectRaw, err := realtimeComputeServiceV2.DescribeRealtimeComputeDeployment(d.Id()) + if err != nil { + if !d.IsNewResource() && NotFoundError(err) { + log.Printf("[DEBUG] Resource alicloud_realtime_compute_deployment DescribeRealtimeComputeDeployment Failed!!! %s", err) + d.SetId("") + return nil + } + return WrapError(err) + } + + d.Set("deployment_name", objectRaw["name"]) + d.Set("description", objectRaw["description"]) + d.Set("engine_version", objectRaw["engineVersion"]) + d.Set("execution_mode", objectRaw["executionMode"]) + d.Set("flink_conf", objectRaw["flinkConf"]) + d.Set("labels", objectRaw["labels"]) + d.Set("deployment_id", objectRaw["deploymentId"]) + d.Set("namespace", objectRaw["namespace"]) + d.Set("resource_id", objectRaw["workspace"]) + + artifactMaps := make([]map[string]interface{}, 0) + artifactMap := make(map[string]interface{}) + artifactRaw := make(map[string]interface{}) + if objectRaw["artifact"] != nil { + artifactRaw = objectRaw["artifact"].(map[string]interface{}) + } + if len(artifactRaw) > 0 { + artifactMap["kind"] = artifactRaw["kind"] + + jarArtifactMaps := make([]map[string]interface{}, 0) + jarArtifactMap := make(map[string]interface{}) + jarArtifactRaw := make(map[string]interface{}) + if artifactRaw["jarArtifact"] != nil { + jarArtifactRaw = artifactRaw["jarArtifact"].(map[string]interface{}) + } + if len(jarArtifactRaw) > 0 { + jarArtifactMap["entry_class"] = jarArtifactRaw["entryClass"] + jarArtifactMap["jar_uri"] = jarArtifactRaw["jarUri"] + jarArtifactMap["main_args"] = jarArtifactRaw["mainArgs"] + + additionalDependenciesRaw := make([]interface{}, 0) + if jarArtifactRaw["additionalDependencies"] != nil { + additionalDependenciesRaw = convertToInterfaceArray(jarArtifactRaw["additionalDependencies"]) + } + + jarArtifactMap["additional_dependencies"] = additionalDependenciesRaw + jarArtifactMaps = append(jarArtifactMaps, jarArtifactMap) + } + artifactMap["jar_artifact"] = jarArtifactMaps + pythonArtifactMaps := make([]map[string]interface{}, 0) + pythonArtifactMap := make(map[string]interface{}) + pythonArtifactRaw := make(map[string]interface{}) + if artifactRaw["pythonArtifact"] != nil { + pythonArtifactRaw = artifactRaw["pythonArtifact"].(map[string]interface{}) + } + if len(pythonArtifactRaw) > 0 { + pythonArtifactMap["entry_module"] = pythonArtifactRaw["entryModule"] + pythonArtifactMap["main_args"] = pythonArtifactRaw["mainArgs"] + pythonArtifactMap["python_artifact_uri"] = pythonArtifactRaw["pythonArtifactUri"] + + additionalDependenciesRaw := make([]interface{}, 0) + if pythonArtifactRaw["additionalDependencies"] != nil { + additionalDependenciesRaw = convertToInterfaceArray(pythonArtifactRaw["additionalDependencies"]) + } + + pythonArtifactMap["additional_dependencies"] = additionalDependenciesRaw + additionalPythonArchivesRaw := make([]interface{}, 0) + if pythonArtifactRaw["additionalPythonArchives"] != nil { + additionalPythonArchivesRaw = convertToInterfaceArray(pythonArtifactRaw["additionalPythonArchives"]) + } + + pythonArtifactMap["additional_python_archives"] = additionalPythonArchivesRaw + additionalPythonLibrariesRaw := make([]interface{}, 0) + if pythonArtifactRaw["additionalPythonLibraries"] != nil { + additionalPythonLibrariesRaw = convertToInterfaceArray(pythonArtifactRaw["additionalPythonLibraries"]) + } + + pythonArtifactMap["additional_python_libraries"] = additionalPythonLibrariesRaw + pythonArtifactMaps = append(pythonArtifactMaps, pythonArtifactMap) + } + artifactMap["python_artifact"] = pythonArtifactMaps + sqlArtifactMaps := make([]map[string]interface{}, 0) + sqlArtifactMap := make(map[string]interface{}) + sqlArtifactRaw := make(map[string]interface{}) + if artifactRaw["sqlArtifact"] != nil { + sqlArtifactRaw = artifactRaw["sqlArtifact"].(map[string]interface{}) + } + if len(sqlArtifactRaw) > 0 { + sqlArtifactMap["sql_script"] = sqlArtifactRaw["sqlScript"] + + additionalDependenciesRaw := make([]interface{}, 0) + if sqlArtifactRaw["additionalDependencies"] != nil { + additionalDependenciesRaw = convertToInterfaceArray(sqlArtifactRaw["additionalDependencies"]) + } + + sqlArtifactMap["additional_dependencies"] = additionalDependenciesRaw + sqlArtifactMaps = append(sqlArtifactMaps, sqlArtifactMap) + } + artifactMap["sql_artifact"] = sqlArtifactMaps + artifactMaps = append(artifactMaps, artifactMap) + } + if err := d.Set("artifact", artifactMaps); err != nil { + return err + } + batchResourceSettingMaps := make([]map[string]interface{}, 0) + batchResourceSettingMap := make(map[string]interface{}) + batchResourceSettingRaw := make(map[string]interface{}) + if objectRaw["batchResourceSetting"] != nil { + batchResourceSettingRaw = objectRaw["batchResourceSetting"].(map[string]interface{}) + } + if len(batchResourceSettingRaw) > 0 { + batchResourceSettingMap["max_slot"] = batchResourceSettingRaw["maxSlot"] + + basicResourceSettingMaps := make([]map[string]interface{}, 0) + basicResourceSettingMap := make(map[string]interface{}) + basicResourceSettingRaw := make(map[string]interface{}) + if batchResourceSettingRaw["basicResourceSetting"] != nil { + basicResourceSettingRaw = batchResourceSettingRaw["basicResourceSetting"].(map[string]interface{}) + } + if len(basicResourceSettingRaw) > 0 { + basicResourceSettingMap["parallelism"] = basicResourceSettingRaw["parallelism"] + + jobmanagerResourceSettingSpecMaps := make([]map[string]interface{}, 0) + jobmanagerResourceSettingSpecMap := make(map[string]interface{}) + jobmanagerResourceSettingSpecRaw := make(map[string]interface{}) + if basicResourceSettingRaw["jobmanagerResourceSettingSpec"] != nil { + jobmanagerResourceSettingSpecRaw = basicResourceSettingRaw["jobmanagerResourceSettingSpec"].(map[string]interface{}) + } + if len(jobmanagerResourceSettingSpecRaw) > 0 { + jobmanagerResourceSettingSpecMap["cpu"] = jobmanagerResourceSettingSpecRaw["cpu"] + jobmanagerResourceSettingSpecMap["memory"] = jobmanagerResourceSettingSpecRaw["memory"] + + jobmanagerResourceSettingSpecMaps = append(jobmanagerResourceSettingSpecMaps, jobmanagerResourceSettingSpecMap) + } + basicResourceSettingMap["jobmanager_resource_setting_spec"] = jobmanagerResourceSettingSpecMaps + taskmanagerResourceSettingSpecMaps := make([]map[string]interface{}, 0) + taskmanagerResourceSettingSpecMap := make(map[string]interface{}) + taskmanagerResourceSettingSpecRaw := make(map[string]interface{}) + if basicResourceSettingRaw["taskmanagerResourceSettingSpec"] != nil { + taskmanagerResourceSettingSpecRaw = basicResourceSettingRaw["taskmanagerResourceSettingSpec"].(map[string]interface{}) + } + if len(taskmanagerResourceSettingSpecRaw) > 0 { + taskmanagerResourceSettingSpecMap["cpu"] = taskmanagerResourceSettingSpecRaw["cpu"] + taskmanagerResourceSettingSpecMap["memory"] = taskmanagerResourceSettingSpecRaw["memory"] + + taskmanagerResourceSettingSpecMaps = append(taskmanagerResourceSettingSpecMaps, taskmanagerResourceSettingSpecMap) + } + basicResourceSettingMap["taskmanager_resource_setting_spec"] = taskmanagerResourceSettingSpecMaps + basicResourceSettingMaps = append(basicResourceSettingMaps, basicResourceSettingMap) + } + batchResourceSettingMap["basic_resource_setting"] = basicResourceSettingMaps + batchResourceSettingMaps = append(batchResourceSettingMaps, batchResourceSettingMap) + } + if err := d.Set("batch_resource_setting", batchResourceSettingMaps); err != nil { + return err + } + deploymentTargetMaps := make([]map[string]interface{}, 0) + deploymentTargetMap := make(map[string]interface{}) + deploymentTargetRaw := make(map[string]interface{}) + if objectRaw["deploymentTarget"] != nil { + deploymentTargetRaw = objectRaw["deploymentTarget"].(map[string]interface{}) + } + if len(deploymentTargetRaw) > 0 { + deploymentTargetMap["mode"] = deploymentTargetRaw["mode"] + deploymentTargetMap["name"] = deploymentTargetRaw["name"] + + deploymentTargetMaps = append(deploymentTargetMaps, deploymentTargetMap) + } + if err := d.Set("deployment_target", deploymentTargetMaps); err != nil { + return err + } + localVariablesRaw := objectRaw["localVariables"] + localVariablesMaps := make([]map[string]interface{}, 0) + if localVariablesRaw != nil { + for _, localVariablesChildRaw := range convertToInterfaceArray(localVariablesRaw) { + localVariablesMap := make(map[string]interface{}) + localVariablesChildRaw := localVariablesChildRaw.(map[string]interface{}) + localVariablesMap["name"] = localVariablesChildRaw["name"] + localVariablesMap["value"] = localVariablesChildRaw["value"] + + localVariablesMaps = append(localVariablesMaps, localVariablesMap) + } + } + if err := d.Set("local_variables", localVariablesMaps); err != nil { + return err + } + loggingMaps := make([]map[string]interface{}, 0) + loggingMap := make(map[string]interface{}) + loggingRaw := make(map[string]interface{}) + if objectRaw["logging"] != nil { + loggingRaw = objectRaw["logging"].(map[string]interface{}) + } + if len(loggingRaw) > 0 { + loggingMap["log4j2_configuration_template"] = loggingRaw["log4j2ConfigurationTemplate"] + loggingMap["logging_profile"] = loggingRaw["loggingProfile"] + + log4jLoggersRaw := loggingRaw["log4jLoggers"] + log4JLoggersMaps := make([]map[string]interface{}, 0) + if log4jLoggersRaw != nil { + for _, log4jLoggersChildRaw := range convertToInterfaceArray(log4jLoggersRaw) { + log4JLoggersMap := make(map[string]interface{}) + log4jLoggersChildRaw := log4jLoggersChildRaw.(map[string]interface{}) + log4JLoggersMap["logger_level"] = log4jLoggersChildRaw["loggerLevel"] + log4JLoggersMap["logger_name"] = log4jLoggersChildRaw["loggerName"] + + log4JLoggersMaps = append(log4JLoggersMaps, log4JLoggersMap) + } + } + loggingMap["log4j_loggers"] = log4JLoggersMaps + logReservePolicyMaps := make([]map[string]interface{}, 0) + logReservePolicyMap := make(map[string]interface{}) + logReservePolicyRaw := make(map[string]interface{}) + if loggingRaw["logReservePolicy"] != nil { + logReservePolicyRaw = loggingRaw["logReservePolicy"].(map[string]interface{}) + } + if len(logReservePolicyRaw) > 0 { + logReservePolicyMap["expiration_days"] = logReservePolicyRaw["expirationDays"] + logReservePolicyMap["open_history"] = logReservePolicyRaw["openHistory"] + + logReservePolicyMaps = append(logReservePolicyMaps, logReservePolicyMap) + } + loggingMap["log_reserve_policy"] = logReservePolicyMaps + loggingMaps = append(loggingMaps, loggingMap) + } + if err := d.Set("logging", loggingMaps); err != nil { + return err + } + streamingResourceSettingMaps := make([]map[string]interface{}, 0) + streamingResourceSettingMap := make(map[string]interface{}) + streamingResourceSettingRaw := make(map[string]interface{}) + if objectRaw["streamingResourceSetting"] != nil { + streamingResourceSettingRaw = objectRaw["streamingResourceSetting"].(map[string]interface{}) + } + if len(streamingResourceSettingRaw) > 0 { + streamingResourceSettingMap["resource_setting_mode"] = streamingResourceSettingRaw["resourceSettingMode"] + + basicResourceSettingMaps := make([]map[string]interface{}, 0) + basicResourceSettingMap := make(map[string]interface{}) + basicResourceSettingRaw := make(map[string]interface{}) + if streamingResourceSettingRaw["basicResourceSetting"] != nil { + basicResourceSettingRaw = streamingResourceSettingRaw["basicResourceSetting"].(map[string]interface{}) + } + if len(basicResourceSettingRaw) > 0 { + basicResourceSettingMap["parallelism"] = basicResourceSettingRaw["parallelism"] + + jobmanagerResourceSettingSpecMaps := make([]map[string]interface{}, 0) + jobmanagerResourceSettingSpecMap := make(map[string]interface{}) + jobmanagerResourceSettingSpecRaw := make(map[string]interface{}) + if basicResourceSettingRaw["jobmanagerResourceSettingSpec"] != nil { + jobmanagerResourceSettingSpecRaw = basicResourceSettingRaw["jobmanagerResourceSettingSpec"].(map[string]interface{}) + } + if len(jobmanagerResourceSettingSpecRaw) > 0 { + jobmanagerResourceSettingSpecMap["cpu"] = jobmanagerResourceSettingSpecRaw["cpu"] + jobmanagerResourceSettingSpecMap["memory"] = jobmanagerResourceSettingSpecRaw["memory"] + + jobmanagerResourceSettingSpecMaps = append(jobmanagerResourceSettingSpecMaps, jobmanagerResourceSettingSpecMap) + } + basicResourceSettingMap["jobmanager_resource_setting_spec"] = jobmanagerResourceSettingSpecMaps + taskmanagerResourceSettingSpecMaps := make([]map[string]interface{}, 0) + taskmanagerResourceSettingSpecMap := make(map[string]interface{}) + taskmanagerResourceSettingSpecRaw := make(map[string]interface{}) + if basicResourceSettingRaw["taskmanagerResourceSettingSpec"] != nil { + taskmanagerResourceSettingSpecRaw = basicResourceSettingRaw["taskmanagerResourceSettingSpec"].(map[string]interface{}) + } + if len(taskmanagerResourceSettingSpecRaw) > 0 { + taskmanagerResourceSettingSpecMap["cpu"] = taskmanagerResourceSettingSpecRaw["cpu"] + taskmanagerResourceSettingSpecMap["memory"] = taskmanagerResourceSettingSpecRaw["memory"] + + taskmanagerResourceSettingSpecMaps = append(taskmanagerResourceSettingSpecMaps, taskmanagerResourceSettingSpecMap) + } + basicResourceSettingMap["taskmanager_resource_setting_spec"] = taskmanagerResourceSettingSpecMaps + basicResourceSettingMaps = append(basicResourceSettingMaps, basicResourceSettingMap) + } + streamingResourceSettingMap["basic_resource_setting"] = basicResourceSettingMaps + expertResourceSettingMaps := make([]map[string]interface{}, 0) + expertResourceSettingMap := make(map[string]interface{}) + expertResourceSettingRaw := make(map[string]interface{}) + if streamingResourceSettingRaw["expertResourceSetting"] != nil { + expertResourceSettingRaw = streamingResourceSettingRaw["expertResourceSetting"].(map[string]interface{}) + } + if len(expertResourceSettingRaw) > 0 { + expertResourceSettingMap["resource_plan"] = expertResourceSettingRaw["resourcePlan"] + + jobmanagerResourceSettingSpecMaps := make([]map[string]interface{}, 0) + jobmanagerResourceSettingSpecMap := make(map[string]interface{}) + jobmanagerResourceSettingSpecRaw := make(map[string]interface{}) + if expertResourceSettingRaw["jobmanagerResourceSettingSpec"] != nil { + jobmanagerResourceSettingSpecRaw = expertResourceSettingRaw["jobmanagerResourceSettingSpec"].(map[string]interface{}) + } + if len(jobmanagerResourceSettingSpecRaw) > 0 { + jobmanagerResourceSettingSpecMap["cpu"] = jobmanagerResourceSettingSpecRaw["cpu"] + jobmanagerResourceSettingSpecMap["memory"] = jobmanagerResourceSettingSpecRaw["memory"] + + jobmanagerResourceSettingSpecMaps = append(jobmanagerResourceSettingSpecMaps, jobmanagerResourceSettingSpecMap) + } + expertResourceSettingMap["jobmanager_resource_setting_spec"] = jobmanagerResourceSettingSpecMaps + expertResourceSettingMaps = append(expertResourceSettingMaps, expertResourceSettingMap) + } + streamingResourceSettingMap["expert_resource_setting"] = expertResourceSettingMaps + streamingResourceSettingMaps = append(streamingResourceSettingMaps, streamingResourceSettingMap) + } + if err := d.Set("streaming_resource_setting", streamingResourceSettingMaps); err != nil { + return err + } + + return nil +} + +func resourceAliCloudRealtimeComputeDeploymentUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*connectivity.AliyunClient) + var request map[string]interface{} + var response map[string]interface{} + var header map[string]*string + var query map[string]*string + var body map[string]interface{} + update := false + + var err error + parts := strings.Split(d.Id(), ":") + namespace := parts[1] + deploymentId := parts[2] + action := fmt.Sprintf("/api/v2/namespaces/%s/deployments/%s", namespace, deploymentId) + request = make(map[string]interface{}) + query = make(map[string]*string) + header = make(map[string]*string) + body = make(map[string]interface{}) + header["workspace"] = StringPointer(parts[0]) + + if d.HasChange("streaming_resource_setting") { + update = true + } + streamingResourceSetting := make(map[string]interface{}) + + if v := d.Get("streaming_resource_setting"); !IsNil(v) || d.HasChange("streaming_resource_setting") { + expertResourceSetting := make(map[string]interface{}) + resourcePlan1, _ := jsonpath.Get("$[0].expert_resource_setting[0].resource_plan", d.Get("streaming_resource_setting")) + if resourcePlan1 != nil && (d.HasChange("streaming_resource_setting.0.expert_resource_setting.0.resource_plan") || resourcePlan1 != "") { + expertResourceSetting["resourcePlan"] = resourcePlan1 + } + jobmanagerResourceSettingSpec := make(map[string]interface{}) + memory1, _ := jsonpath.Get("$[0].expert_resource_setting[0].jobmanager_resource_setting_spec[0].memory", d.Get("streaming_resource_setting")) + if memory1 != nil && (d.HasChange("streaming_resource_setting.0.expert_resource_setting.0.jobmanager_resource_setting_spec.0.memory") || memory1 != "") { + jobmanagerResourceSettingSpec["memory"] = memory1 + } + cpu1, _ := jsonpath.Get("$[0].expert_resource_setting[0].jobmanager_resource_setting_spec[0].cpu", d.Get("streaming_resource_setting")) + if cpu1 != nil && (d.HasChange("streaming_resource_setting.0.expert_resource_setting.0.jobmanager_resource_setting_spec.0.cpu") || cpu1 != "") { + jobmanagerResourceSettingSpec["cpu"] = cpu1 + } + + if len(jobmanagerResourceSettingSpec) > 0 { + expertResourceSetting["jobmanagerResourceSettingSpec"] = jobmanagerResourceSettingSpec + } + + if len(expertResourceSetting) > 0 { + streamingResourceSetting["expertResourceSetting"] = expertResourceSetting + } + resourceSettingMode1, _ := jsonpath.Get("$[0].resource_setting_mode", v) + if resourceSettingMode1 != nil && (d.HasChange("streaming_resource_setting.0.resource_setting_mode") || resourceSettingMode1 != "") { + streamingResourceSetting["resourceSettingMode"] = resourceSettingMode1 + } + basicResourceSetting := make(map[string]interface{}) + taskmanagerResourceSettingSpec := make(map[string]interface{}) + memory3, _ := jsonpath.Get("$[0].basic_resource_setting[0].taskmanager_resource_setting_spec[0].memory", d.Get("streaming_resource_setting")) + if memory3 != nil && (d.HasChange("streaming_resource_setting.0.basic_resource_setting.0.taskmanager_resource_setting_spec.0.memory") || memory3 != "") { + taskmanagerResourceSettingSpec["memory"] = memory3 + } + cpu3, _ := jsonpath.Get("$[0].basic_resource_setting[0].taskmanager_resource_setting_spec[0].cpu", d.Get("streaming_resource_setting")) + if cpu3 != nil && (d.HasChange("streaming_resource_setting.0.basic_resource_setting.0.taskmanager_resource_setting_spec.0.cpu") || cpu3 != "") { + taskmanagerResourceSettingSpec["cpu"] = cpu3 + } + + if len(taskmanagerResourceSettingSpec) > 0 { + basicResourceSetting["taskmanagerResourceSettingSpec"] = taskmanagerResourceSettingSpec + } + jobmanagerResourceSettingSpec1 := make(map[string]interface{}) + cpu5, _ := jsonpath.Get("$[0].basic_resource_setting[0].jobmanager_resource_setting_spec[0].cpu", d.Get("streaming_resource_setting")) + if cpu5 != nil && (d.HasChange("streaming_resource_setting.0.basic_resource_setting.0.jobmanager_resource_setting_spec.0.cpu") || cpu5 != "") { + jobmanagerResourceSettingSpec1["cpu"] = cpu5 + } + memory5, _ := jsonpath.Get("$[0].basic_resource_setting[0].jobmanager_resource_setting_spec[0].memory", d.Get("streaming_resource_setting")) + if memory5 != nil && (d.HasChange("streaming_resource_setting.0.basic_resource_setting.0.jobmanager_resource_setting_spec.0.memory") || memory5 != "") { + jobmanagerResourceSettingSpec1["memory"] = memory5 + } + + if len(jobmanagerResourceSettingSpec1) > 0 { + basicResourceSetting["jobmanagerResourceSettingSpec"] = jobmanagerResourceSettingSpec1 + } + parallelism1, _ := jsonpath.Get("$[0].basic_resource_setting[0].parallelism", d.Get("streaming_resource_setting")) + if parallelism1 != nil && (d.HasChange("streaming_resource_setting.0.basic_resource_setting.0.parallelism") || parallelism1 != "") { + basicResourceSetting["parallelism"] = parallelism1 + } + + if len(basicResourceSetting) > 0 { + streamingResourceSetting["basicResourceSetting"] = basicResourceSetting + } + + request["streamingResourceSetting"] = streamingResourceSetting + } + + if d.HasChange("artifact") { + update = true + } + artifact := make(map[string]interface{}) + + if v := d.Get("artifact"); v != nil { + pythonArtifact := make(map[string]interface{}) + additionalDependencies1, _ := jsonpath.Get("$[0].python_artifact[0].additional_dependencies", d.Get("artifact")) + if additionalDependencies1 != nil && (d.HasChange("artifact.0.python_artifact.0.additional_dependencies") || additionalDependencies1 != "") { + pythonArtifact["additionalDependencies"] = additionalDependencies1 + } + additionalPythonArchives1, _ := jsonpath.Get("$[0].python_artifact[0].additional_python_archives", d.Get("artifact")) + if additionalPythonArchives1 != nil && (d.HasChange("artifact.0.python_artifact.0.additional_python_archives") || additionalPythonArchives1 != "") { + pythonArtifact["additionalPythonArchives"] = additionalPythonArchives1 + } + pythonArtifactUri1, _ := jsonpath.Get("$[0].python_artifact[0].python_artifact_uri", d.Get("artifact")) + if pythonArtifactUri1 != nil && (d.HasChange("artifact.0.python_artifact.0.python_artifact_uri") || pythonArtifactUri1 != "") { + pythonArtifact["pythonArtifactUri"] = pythonArtifactUri1 + } + mainArgs1, _ := jsonpath.Get("$[0].python_artifact[0].main_args", d.Get("artifact")) + if mainArgs1 != nil && (d.HasChange("artifact.0.python_artifact.0.main_args") || mainArgs1 != "") { + pythonArtifact["mainArgs"] = mainArgs1 + } + additionalPythonLibraries1, _ := jsonpath.Get("$[0].python_artifact[0].additional_python_libraries", d.Get("artifact")) + if additionalPythonLibraries1 != nil && (d.HasChange("artifact.0.python_artifact.0.additional_python_libraries") || additionalPythonLibraries1 != "") { + pythonArtifact["additionalPythonLibraries"] = additionalPythonLibraries1 + } + entryModule1, _ := jsonpath.Get("$[0].python_artifact[0].entry_module", d.Get("artifact")) + if entryModule1 != nil && (d.HasChange("artifact.0.python_artifact.0.entry_module") || entryModule1 != "") { + pythonArtifact["entryModule"] = entryModule1 + } + + if len(pythonArtifact) > 0 { + artifact["pythonArtifact"] = pythonArtifact + } + jarArtifact := make(map[string]interface{}) + jarUri1, _ := jsonpath.Get("$[0].jar_artifact[0].jar_uri", d.Get("artifact")) + if jarUri1 != nil && (d.HasChange("artifact.0.jar_artifact.0.jar_uri") || jarUri1 != "") { + jarArtifact["jarUri"] = jarUri1 + } + additionalDependencies3, _ := jsonpath.Get("$[0].jar_artifact[0].additional_dependencies", d.Get("artifact")) + if additionalDependencies3 != nil && (d.HasChange("artifact.0.jar_artifact.0.additional_dependencies") || additionalDependencies3 != "") { + jarArtifact["additionalDependencies"] = additionalDependencies3 + } + mainArgs3, _ := jsonpath.Get("$[0].jar_artifact[0].main_args", d.Get("artifact")) + if mainArgs3 != nil && (d.HasChange("artifact.0.jar_artifact.0.main_args") || mainArgs3 != "") { + jarArtifact["mainArgs"] = mainArgs3 + } + entryClass1, _ := jsonpath.Get("$[0].jar_artifact[0].entry_class", d.Get("artifact")) + if entryClass1 != nil && (d.HasChange("artifact.0.jar_artifact.0.entry_class") || entryClass1 != "") { + jarArtifact["entryClass"] = entryClass1 + } + + if len(jarArtifact) > 0 { + artifact["jarArtifact"] = jarArtifact + } + sqlArtifact := make(map[string]interface{}) + sqlScript1, _ := jsonpath.Get("$[0].sql_artifact[0].sql_script", d.Get("artifact")) + if sqlScript1 != nil && (d.HasChange("artifact.0.sql_artifact.0.sql_script") || sqlScript1 != "") { + sqlArtifact["sqlScript"] = sqlScript1 + } + additionalDependencies5, _ := jsonpath.Get("$[0].sql_artifact[0].additional_dependencies", d.Get("artifact")) + if additionalDependencies5 != nil && (d.HasChange("artifact.0.sql_artifact.0.additional_dependencies") || additionalDependencies5 != "") { + sqlArtifact["additionalDependencies"] = additionalDependencies5 + } + + if len(sqlArtifact) > 0 { + artifact["sqlArtifact"] = sqlArtifact + } + kind1, _ := jsonpath.Get("$[0].kind", v) + if kind1 != nil && (d.HasChange("artifact.0.kind") || kind1 != "") { + artifact["kind"] = kind1 + } + + request["artifact"] = artifact + } + + if d.HasChange("flink_conf") { + update = true + } + if v, ok := d.GetOk("flink_conf"); ok || d.HasChange("flink_conf") { + request["flinkConf"] = v + } + if d.HasChange("logging") { + update = true + } + logging := make(map[string]interface{}) + + if v := d.Get("logging"); !IsNil(v) || d.HasChange("logging") { + logReservePolicy := make(map[string]interface{}) + expirationDays1, _ := jsonpath.Get("$[0].log_reserve_policy[0].expiration_days", d.Get("logging")) + if expirationDays1 != nil && (d.HasChange("logging.0.log_reserve_policy.0.expiration_days") || expirationDays1 != "") { + logReservePolicy["expirationDays"] = expirationDays1 + } + openHistory1, _ := jsonpath.Get("$[0].log_reserve_policy[0].open_history", d.Get("logging")) + if openHistory1 != nil && (d.HasChange("logging.0.log_reserve_policy.0.open_history") || openHistory1 != "") { + logReservePolicy["openHistory"] = openHistory1 + } + + if len(logReservePolicy) > 0 { + logging["logReservePolicy"] = logReservePolicy + } + if v, ok := d.GetOk("logging"); ok { + localData, err := jsonpath.Get("$[0].log4j_loggers", v) + if err != nil { + localData = make([]interface{}, 0) + } + localMaps := make([]interface{}, 0) + for _, dataLoop := range convertToInterfaceArray(localData) { + dataLoopTmp := make(map[string]interface{}) + if dataLoop != nil { + dataLoopTmp = dataLoop.(map[string]interface{}) + } + dataLoopMap := make(map[string]interface{}) + dataLoopMap["loggerLevel"] = dataLoopTmp["logger_level"] + dataLoopMap["loggerName"] = dataLoopTmp["logger_name"] + localMaps = append(localMaps, dataLoopMap) + } + logging["log4jLoggers"] = localMaps + } + + loggingProfile1, _ := jsonpath.Get("$[0].logging_profile", v) + if loggingProfile1 != nil && (d.HasChange("logging.0.logging_profile") || loggingProfile1 != "") { + logging["loggingProfile"] = loggingProfile1 + } + log4J2ConfigurationTemplate, _ := jsonpath.Get("$[0].log4j2_configuration_template", v) + if log4J2ConfigurationTemplate != nil && (d.HasChange("logging.0.log4j2_configuration_template") || log4J2ConfigurationTemplate != "") { + logging["log4j2ConfigurationTemplate"] = log4J2ConfigurationTemplate + } + + request["logging"] = logging + } + + if d.HasChange("batch_resource_setting") { + update = true + } + batchResourceSetting := make(map[string]interface{}) + + if v := d.Get("batch_resource_setting"); !IsNil(v) || d.HasChange("batch_resource_setting") { + basicResourceSetting1 := make(map[string]interface{}) + taskmanagerResourceSettingSpec1 := make(map[string]interface{}) + cpu7, _ := jsonpath.Get("$[0].basic_resource_setting[0].taskmanager_resource_setting_spec[0].cpu", d.Get("batch_resource_setting")) + if cpu7 != nil && (d.HasChange("batch_resource_setting.0.basic_resource_setting.0.taskmanager_resource_setting_spec.0.cpu") || cpu7 != "") { + taskmanagerResourceSettingSpec1["cpu"] = cpu7 + } + memory7, _ := jsonpath.Get("$[0].basic_resource_setting[0].taskmanager_resource_setting_spec[0].memory", d.Get("batch_resource_setting")) + if memory7 != nil && (d.HasChange("batch_resource_setting.0.basic_resource_setting.0.taskmanager_resource_setting_spec.0.memory") || memory7 != "") { + taskmanagerResourceSettingSpec1["memory"] = memory7 + } + + if len(taskmanagerResourceSettingSpec1) > 0 { + basicResourceSetting1["taskmanagerResourceSettingSpec"] = taskmanagerResourceSettingSpec1 + } + parallelism3, _ := jsonpath.Get("$[0].basic_resource_setting[0].parallelism", d.Get("batch_resource_setting")) + if parallelism3 != nil && (d.HasChange("batch_resource_setting.0.basic_resource_setting.0.parallelism") || parallelism3 != "") { + basicResourceSetting1["parallelism"] = parallelism3 + } + jobmanagerResourceSettingSpec2 := make(map[string]interface{}) + cpu9, _ := jsonpath.Get("$[0].basic_resource_setting[0].jobmanager_resource_setting_spec[0].cpu", d.Get("batch_resource_setting")) + if cpu9 != nil && (d.HasChange("batch_resource_setting.0.basic_resource_setting.0.jobmanager_resource_setting_spec.0.cpu") || cpu9 != "") { + jobmanagerResourceSettingSpec2["cpu"] = cpu9 + } + memory9, _ := jsonpath.Get("$[0].basic_resource_setting[0].jobmanager_resource_setting_spec[0].memory", d.Get("batch_resource_setting")) + if memory9 != nil && (d.HasChange("batch_resource_setting.0.basic_resource_setting.0.jobmanager_resource_setting_spec.0.memory") || memory9 != "") { + jobmanagerResourceSettingSpec2["memory"] = memory9 + } + + if len(jobmanagerResourceSettingSpec2) > 0 { + basicResourceSetting1["jobmanagerResourceSettingSpec"] = jobmanagerResourceSettingSpec2 + } + + if len(basicResourceSetting1) > 0 { + batchResourceSetting["basicResourceSetting"] = basicResourceSetting1 + } + maxSlot1, _ := jsonpath.Get("$[0].max_slot", v) + if maxSlot1 != nil && (d.HasChange("batch_resource_setting.0.max_slot") || maxSlot1 != "") { + batchResourceSetting["maxSlot"] = maxSlot1 + } + + request["batchResourceSetting"] = batchResourceSetting + } + + if d.HasChange("labels") { + update = true + } + if v, ok := d.GetOk("labels"); ok || d.HasChange("labels") { + request["labels"] = v + } + if d.HasChange("deployment_target") { + update = true + } + deploymentTarget := make(map[string]interface{}) + + if v := d.Get("deployment_target"); v != nil { + mode1, _ := jsonpath.Get("$[0].mode", v) + if mode1 != nil && (d.HasChange("deployment_target.0.mode") || mode1 != "") { + deploymentTarget["mode"] = mode1 + } + name1, _ := jsonpath.Get("$[0].name", v) + if name1 != nil && (d.HasChange("deployment_target.0.name") || name1 != "") { + deploymentTarget["name"] = name1 + } + + request["deploymentTarget"] = deploymentTarget + } + + if d.HasChange("local_variables") { + update = true + } + if v, ok := d.GetOk("local_variables"); ok || d.HasChange("local_variables") { + localVariablesMapsArray := make([]interface{}, 0) + for _, dataLoop1 := range convertToInterfaceArray(v) { + dataLoop1Tmp := dataLoop1.(map[string]interface{}) + dataLoop1Map := make(map[string]interface{}) + dataLoop1Map["value"] = dataLoop1Tmp["value"] + dataLoop1Map["name"] = dataLoop1Tmp["name"] + localVariablesMapsArray = append(localVariablesMapsArray, dataLoop1Map) + } + request["localVariables"] = localVariablesMapsArray + } + + if d.HasChange("description") { + update = true + } + if v, ok := d.GetOk("description"); ok || d.HasChange("description") { + request["description"] = v + } + if d.HasChange("engine_version") { + update = true + } + if v, ok := d.GetOk("engine_version"); ok || d.HasChange("engine_version") { + request["engineVersion"] = v + } + if d.HasChange("deployment_name") { + update = true + } + request["name"] = d.Get("deployment_name") + body = request + if update { + wait := incrementalWait(3*time.Second, 5*time.Second) + err = resource.Retry(d.Timeout(schema.TimeoutUpdate), func() *resource.RetryError { + response, err = client.RoaPut("ververica", "2022-07-18", action, query, header, body, true) + if err != nil { + if NeedRetry(err) { + wait() + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + addDebug(action, response, request) + if err != nil { + return WrapErrorf(err, DefaultErrorMsg, d.Id(), action, AlibabaCloudSdkGoERROR) + } + } + + return resourceAliCloudRealtimeComputeDeploymentRead(d, meta) +} + +func resourceAliCloudRealtimeComputeDeploymentDelete(d *schema.ResourceData, meta interface{}) error { + + client := meta.(*connectivity.AliyunClient) + parts := strings.Split(d.Id(), ":") + namespace := parts[1] + deploymentId := parts[2] + action := fmt.Sprintf("/api/v2/namespaces/%s/deployments/%s", namespace, deploymentId) + var request map[string]interface{} + var response map[string]interface{} + header := make(map[string]*string) + query := make(map[string]*string) + var err error + request = make(map[string]interface{}) + header["workspace"] = StringPointer(parts[0]) + + wait := incrementalWait(3*time.Second, 5*time.Second) + err = resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { + response, err = client.RoaDelete("ververica", "2022-07-18", action, query, header, nil, true) + if err != nil { + if NeedRetry(err) { + wait() + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + addDebug(action, response, request) + + if err != nil { + if IsExpectedErrors(err, []string{"990301"}) || NotFoundError(err) { + return nil + } + return WrapErrorf(err, DefaultErrorMsg, d.Id(), action, AlibabaCloudSdkGoERROR) + } + + return nil +} + +func convertRealtimeComputeDeploymentflinkConfRequest(source interface{}) interface{} { + source = fmt.Sprint(source) + switch source { + case "flinkConf": + return "" + } + return source +} diff --git a/alicloud/resource_alicloud_realtime_compute_deployment_test.go b/alicloud/resource_alicloud_realtime_compute_deployment_test.go new file mode 100644 index 000000000000..61ec274ef3ea --- /dev/null +++ b/alicloud/resource_alicloud_realtime_compute_deployment_test.go @@ -0,0 +1,1243 @@ +// Package alicloud. This file is generated automatically. Please do not modify it manually, thank you! +package alicloud + +import ( + "fmt" + "testing" + + "github.com/aliyun/terraform-provider-alicloud/alicloud/connectivity" + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" +) + +// Test RealtimeCompute Deployment. >>> Resource test cases, automatically generated. +// Case 测试MAP 11896 +func TestAccAliCloudRealtimeComputeDeployment_basic11896(t *testing.T) { + var v map[string]interface{} + resourceId := "alicloud_realtime_compute_deployment.default" + ra := resourceAttrInit(resourceId, AlicloudRealtimeComputeDeploymentMap11896) + rc := resourceCheckInitWithDescribeMethod(resourceId, &v, func() interface{} { + return &RealtimeComputeServiceV2{testAccProvider.Meta().(*connectivity.AliyunClient)} + }, "DescribeRealtimeComputeDeployment") + rac := resourceAttrCheckInit(rc, ra) + testAccCheck := rac.resourceAttrMapUpdateSet() + rand := acctest.RandIntRange(10000, 99999) + name := fmt.Sprintf("tfaccrealtimecompute%d", rand) + testAccConfig := resourceTestAccConfigFunc(resourceId, name, AlicloudRealtimeComputeDeploymentBasicDependence11896) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckWithRegions(t, true, []connectivity.Region{"cn-beijing"}) + testAccPreCheck(t) + }, + IDRefreshName: resourceId, + Providers: testAccProviders, + CheckDestroy: rac.checkResourceDestroy(), + Steps: []resource.TestStep{ + { + Config: testAccConfig(map[string]interface{}{ + "logging": []map[string]interface{}{ + { + "logging_profile": "default", + "log4j_loggers": []map[string]interface{}{ + { + "logger_name": "", + "logger_level": "INFO", + }, + }, + "log_reserve_policy": []map[string]interface{}{ + { + "open_history": "true", + "expiration_days": "7", + }, + }, + }, + }, + "deployment_name": name, + "description": "This is a test deployment.", + "engine_version": "vvr-8.0.6-flink-1.17", + "local_variables": []map[string]interface{}{ + { + "value": "value", + "name": "name", + }, + }, + "execution_mode": "STREAMING", + "labels": map[string]interface{}{ + "\"vvp\"": "nb", + }, + "deployment_target": []map[string]interface{}{ + { + "mode": "PER_JOB", + "name": "default-queue", + }, + }, + "streaming_resource_setting": []map[string]interface{}{ + { + "basic_resource_setting": []map[string]interface{}{ + { + "taskmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "1Gi", + "cpu": "1", + }, + }, + "parallelism": "1", + "jobmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "1Gi", + "cpu": "1", + }, + }, + }, + }, + "resource_setting_mode": "BASIC", + }, + }, + "namespace": "code-test-tf-deployment-default", + "artifact": []map[string]interface{}{ + { + "kind": "SQLSCRIPT", + "sql_artifact": []map[string]interface{}{ + { + "sql_script": "create temporary table `datagen` ( id varchar, name varchar ) with ( 'connector' = 'datagen' ); create temporary table `blackhole` ( id varchar, name varchar ) with ( 'connector' = 'blackhole' ); insert into blackhole select * from datagen;", + "additional_dependencies": []string{ + "oss://bucket-name/a.jar"}, + }, + }, + }, + }, + "resource_id": "e7d4d4f4510947", + "flink_conf": map[string]interface{}{ + "\"execution.checkpointing.interval\"": "180s", + "\"execution.checkpointing.min-pause\"": "180s", + "\"restart-strategy\"": "fixed-delay", + "\"restart-strategy.fixed-delay.attempts\"": "2147483647", + "\"restart-strategy.fixed-delay.delay\"": "10 s", + "\"table.exec.state.ttl\"": "36 h", + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{ + "deployment_name": name, + "description": "This is a test deployment.", + "engine_version": "vvr-8.0.6-flink-1.17", + "local_variables.#": "1", + "execution_mode": "STREAMING", + "namespace": CHECKSET, + "resource_id": CHECKSET, + }), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "logging": []map[string]interface{}{ + { + "log4j2_configuration_template": "test-template", + "log4j_loggers": []map[string]interface{}{ + { + "logger_name": "StdOut", + "logger_level": "DEBUG", + }, + }, + "log_reserve_policy": []map[string]interface{}{ + { + "open_history": "false", + "expiration_days": "5", + }, + }, + }, + }, + "deployment_name": name + "_update", + "description": "This is a test deployment 2.", + "engine_version": "vvr-8.0.10-flink-1.17", + "local_variables": []map[string]interface{}{ + { + "value": "value1", + "name": "name1", + }, + { + "value": "value2", + "name": "name2", + }, + }, + "labels": map[string]interface{}{ + "\"vvp\"": "b", + }, + "streaming_resource_setting": []map[string]interface{}{ + { + "basic_resource_setting": []map[string]interface{}{ + { + "taskmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "2Gi", + "cpu": "2", + }, + }, + "parallelism": "2", + "jobmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "2Gi", + "cpu": "2", + }, + }, + }, + }, + "resource_setting_mode": "BASIC", + }, + }, + "artifact": []map[string]interface{}{ + { + "kind": "SQLSCRIPT", + "sql_artifact": []map[string]interface{}{ + { + "sql_script": "CREATE TABLE result_table (id BIGINT, name STRING) WITH ( 'connector' = 'jdbc', 'url' = 'jdbc:mysql://localhost:3306/test', 'table-name' = 'result_table' );", + "additional_dependencies": []string{ + "oss://bucket-name/a.jar", "oss://bucket-name/b.jar", "oss://bucket-name/c.jar"}, + }, + }, + }, + }, + "flink_conf": map[string]interface{}{ + "\"execution.checkpointing.interval\"": "180s", + "\"restart-strategy\"": "fixed-delay", + "\"restart-strategy.fixed-delay.attempts\"": "2147483647", + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{ + "deployment_name": name + "_update", + "description": "This is a test deployment 2.", + "engine_version": "vvr-8.0.10-flink-1.17", + "local_variables.#": "2", + }), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "streaming_resource_setting": []map[string]interface{}{ + { + "resource_setting_mode": "EXPERT", + "expert_resource_setting": []map[string]interface{}{ + { + "resource_plan": "{\\\\n \\\\\\\"ssgProfiles\\\\\\\": [\\\\n {\\\\n \\\\\\\"name\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"cpu\\\\\\\": 0.26,\\\\n \\\\\\\"heap\\\\\\\": \\\\\\\"1 gb\\\\\\\",\\\\n \\\\\\\"offHeap\\\\\\\": \\\\\\\"32 mb\\\\\\\",\\\\n \\\\\\\"managed\\\\\\\": {},\\\\n \\\\\\\"extended\\\\\\\": {}\\\\n }\\\\n ],\\\\n \\\\\\\"nodes\\\\\\\": [\\\\n {\\\\n \\\\\\\"id\\\\\\\": 1,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecTableSourceScan\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"Source: datagen_source[7]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n },\\\\n {\\\\n \\\\\\\"id\\\\\\\": 2,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecCalc\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"Calc[8]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n },\\\\n {\\\\n \\\\\\\"id\\\\\\\": 3,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecSink\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"ConstraintEnforcer[9]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n },\\\\n {\\\\n \\\\\\\"id\\\\\\\": 4,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecSink\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"Sink: vvptest[9]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n }\\\\n ],\\\\n \\\\\\\"edges\\\\\\\": [\\\\n {\\\\n \\\\\\\"source\\\\\\\": 1,\\\\n \\\\\\\"target\\\\\\\": 2,\\\\n \\\\\\\"mode\\\\\\\": \\\\\\\"PIPELINED\\\\\\\",\\\\n \\\\\\\"strategy\\\\\\\": \\\\\\\"FORWARD\\\\\\\"\\\\n },\\\\n {\\\\n \\\\\\\"source\\\\\\\": 2,\\\\n \\\\\\\"target\\\\\\\": 3,\\\\n \\\\\\\"mode\\\\\\\": \\\\\\\"PIPELINED\\\\\\\",\\\\n \\\\\\\"strategy\\\\\\\": \\\\\\\"FORWARD\\\\\\\"\\\\n },\\\\n {\\\\n \\\\\\\"source\\\\\\\": 3,\\\\n \\\\\\\"target\\\\\\\": 4,\\\\n \\\\\\\"mode\\\\\\\": \\\\\\\"PIPELINED\\\\\\\",\\\\n \\\\\\\"strategy\\\\\\\": \\\\\\\"FORWARD\\\\\\\"\\\\n }\\\\n ],\\\\n \\\\\\\"vertices\\\\\\\": {\\\\n \\\\\\\"717c7b8afebbfb7137f6f0f99beb2a94\\\\\\\": [\\\\n 1,\\\\n 2,\\\\n 3,\\\\n 4\\\\n ]\\\\n }\\\\n}", + "jobmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "2Gi", + "cpu": "2", + }, + }, + }, + }, + }, + }, + "artifact": []map[string]interface{}{ + { + "kind": "SQLSCRIPT", + "sql_artifact": []map[string]interface{}{ + { + "sql_script": "CREATE TABLE result_table (id BIGINT, name STRING) WITH ( 'connector' = 'jdbc', 'url' = 'jdbc:mysql://localhost:3306/test', 'table-name' = 'result_table' );", + "additional_dependencies": []string{}, + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{}), + ), + }, + { + ResourceName: resourceId, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +var AlicloudRealtimeComputeDeploymentMap11896 = map[string]string{} + +func AlicloudRealtimeComputeDeploymentBasicDependence11896(name string) string { + return fmt.Sprintf(` +variable "name" { + default = "%s" +} + + +`, name) +} + +// Case 已重置副本 11895 +func TestAccAliCloudRealtimeComputeDeployment_basic11895(t *testing.T) { + var v map[string]interface{} + resourceId := "alicloud_realtime_compute_deployment.default" + ra := resourceAttrInit(resourceId, AlicloudRealtimeComputeDeploymentMap11895) + rc := resourceCheckInitWithDescribeMethod(resourceId, &v, func() interface{} { + return &RealtimeComputeServiceV2{testAccProvider.Meta().(*connectivity.AliyunClient)} + }, "DescribeRealtimeComputeDeployment") + rac := resourceAttrCheckInit(rc, ra) + testAccCheck := rac.resourceAttrMapUpdateSet() + rand := acctest.RandIntRange(10000, 99999) + name := fmt.Sprintf("tfaccrealtimecompute%d", rand) + testAccConfig := resourceTestAccConfigFunc(resourceId, name, AlicloudRealtimeComputeDeploymentBasicDependence11895) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckWithRegions(t, true, []connectivity.Region{"cn-beijing"}) + testAccPreCheck(t) + }, + IDRefreshName: resourceId, + Providers: testAccProviders, + CheckDestroy: rac.checkResourceDestroy(), + Steps: []resource.TestStep{ + { + Config: testAccConfig(map[string]interface{}{ + "logging": []map[string]interface{}{ + { + "logging_profile": "default", + "log4j_loggers": []map[string]interface{}{ + { + "logger_name": "", + "logger_level": "INFO", + }, + }, + "log_reserve_policy": []map[string]interface{}{ + { + "open_history": "true", + "expiration_days": "7", + }, + }, + }, + }, + "deployment_name": name, + "description": "This is a test deployment.", + "engine_version": "vvr-8.0.6-flink-1.17", + "local_variables": []map[string]interface{}{ + { + "value": "value", + "name": "name", + }, + }, + "execution_mode": "STREAMING", + "labels": map[string]interface{}{ + "\"vvp\"": "nb", + }, + "deployment_target": []map[string]interface{}{ + { + "mode": "PER_JOB", + "name": "default-queue", + }, + }, + "streaming_resource_setting": []map[string]interface{}{ + { + "basic_resource_setting": []map[string]interface{}{ + { + "taskmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "1Gi", + "cpu": "1", + }, + }, + "parallelism": "1", + "jobmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "1Gi", + "cpu": "1", + }, + }, + }, + }, + "resource_setting_mode": "BASIC", + }, + }, + "namespace": "code-test-tf-deployment-default", + "artifact": []map[string]interface{}{ + { + "kind": "SQLSCRIPT", + "sql_artifact": []map[string]interface{}{ + { + "sql_script": "create temporary table `datagen` ( id varchar, name varchar ) with ( 'connector' = 'datagen' ); create temporary table `blackhole` ( id varchar, name varchar ) with ( 'connector' = 'blackhole' ); insert into blackhole select * from datagen;", + "additional_dependencies": []string{ + "oss://bucket-name/a.jar"}, + }, + }, + }, + }, + "resource_id": "e7d4d4f4510947", + "flink_conf": map[string]interface{}{ + "\"execution.checkpointing.interval\"": "180s", + "\"execution.checkpointing.min-pause\"": "180s", + "\"restart-strategy\"": "fixed-delay", + "\"restart-strategy.fixed-delay.attempts\"": "2147483647", + "\"restart-strategy.fixed-delay.delay\"": "10 s", + "\"table.exec.state.ttl\"": "36 h", + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{ + "deployment_name": name, + "description": "This is a test deployment.", + "engine_version": "vvr-8.0.6-flink-1.17", + "local_variables.#": "1", + "execution_mode": "STREAMING", + "namespace": CHECKSET, + "resource_id": CHECKSET, + }), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "logging": []map[string]interface{}{ + { + "log4j_loggers": []map[string]interface{}{ + { + "logger_name": "StdOut", + "logger_level": "DEBUG", + }, + }, + "log_reserve_policy": []map[string]interface{}{ + { + "open_history": "false", + "expiration_days": "5", + }, + }, + }, + }, + "deployment_name": name + "_update", + "description": "This is a test deployment 2.", + "engine_version": "vvr-8.0.10-flink-1.17", + "local_variables": []map[string]interface{}{ + { + "value": "value1", + "name": "name1", + }, + { + "value": "value2", + "name": "name2", + }, + }, + "labels": map[string]interface{}{ + "\"vvp\"": "b", + }, + "streaming_resource_setting": []map[string]interface{}{ + { + "basic_resource_setting": []map[string]interface{}{ + { + "taskmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "2Gi", + "cpu": "2", + }, + }, + "parallelism": "2", + "jobmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "2Gi", + "cpu": "2", + }, + }, + }, + }, + "resource_setting_mode": "BASIC", + }, + }, + "artifact": []map[string]interface{}{ + { + "kind": "SQLSCRIPT", + "sql_artifact": []map[string]interface{}{ + { + "sql_script": "CREATE TABLE result_table (id BIGINT, name STRING) WITH ( 'connector' = 'jdbc', 'url' = 'jdbc:mysql://localhost:3306/test', 'table-name' = 'result_table' );", + "additional_dependencies": []string{ + "oss://bucket-name/a.jar", "oss://bucket-name/b.jar", "oss://bucket-name/c.jar"}, + }, + }, + }, + }, + "flink_conf": map[string]interface{}{ + "\"execution.checkpointing.interval\"": "180s", + "\"restart-strategy\"": "fixed-delay", + "\"restart-strategy.fixed-delay.attempts\"": "2147483647", + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{ + "deployment_name": name + "_update", + "description": "This is a test deployment 2.", + "engine_version": "vvr-8.0.10-flink-1.17", + "local_variables.#": "2", + }), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "streaming_resource_setting": []map[string]interface{}{ + { + "resource_setting_mode": "EXPERT", + "expert_resource_setting": []map[string]interface{}{ + { + "resource_plan": "{\\\\n \\\\\\\"ssgProfiles\\\\\\\": [\\\\n {\\\\n \\\\\\\"name\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"cpu\\\\\\\": 0.26,\\\\n \\\\\\\"heap\\\\\\\": \\\\\\\"1 gb\\\\\\\",\\\\n \\\\\\\"offHeap\\\\\\\": \\\\\\\"32 mb\\\\\\\",\\\\n \\\\\\\"managed\\\\\\\": {},\\\\n \\\\\\\"extended\\\\\\\": {}\\\\n }\\\\n ],\\\\n \\\\\\\"nodes\\\\\\\": [\\\\n {\\\\n \\\\\\\"id\\\\\\\": 1,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecTableSourceScan\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"Source: datagen_source[7]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n },\\\\n {\\\\n \\\\\\\"id\\\\\\\": 2,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecCalc\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"Calc[8]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n },\\\\n {\\\\n \\\\\\\"id\\\\\\\": 3,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecSink\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"ConstraintEnforcer[9]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n },\\\\n {\\\\n \\\\\\\"id\\\\\\\": 4,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecSink\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"Sink: vvptest[9]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n }\\\\n ],\\\\n \\\\\\\"edges\\\\\\\": [\\\\n {\\\\n \\\\\\\"source\\\\\\\": 1,\\\\n \\\\\\\"target\\\\\\\": 2,\\\\n \\\\\\\"mode\\\\\\\": \\\\\\\"PIPELINED\\\\\\\",\\\\n \\\\\\\"strategy\\\\\\\": \\\\\\\"FORWARD\\\\\\\"\\\\n },\\\\n {\\\\n \\\\\\\"source\\\\\\\": 2,\\\\n \\\\\\\"target\\\\\\\": 3,\\\\n \\\\\\\"mode\\\\\\\": \\\\\\\"PIPELINED\\\\\\\",\\\\n \\\\\\\"strategy\\\\\\\": \\\\\\\"FORWARD\\\\\\\"\\\\n },\\\\n {\\\\n \\\\\\\"source\\\\\\\": 3,\\\\n \\\\\\\"target\\\\\\\": 4,\\\\n \\\\\\\"mode\\\\\\\": \\\\\\\"PIPELINED\\\\\\\",\\\\n \\\\\\\"strategy\\\\\\\": \\\\\\\"FORWARD\\\\\\\"\\\\n }\\\\n ],\\\\n \\\\\\\"vertices\\\\\\\": {\\\\n \\\\\\\"717c7b8afebbfb7137f6f0f99beb2a94\\\\\\\": [\\\\n 1,\\\\n 2,\\\\n 3,\\\\n 4\\\\n ]\\\\n }\\\\n}", + "jobmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "2Gi", + "cpu": "2", + }, + }, + }, + }, + }, + }, + "artifact": []map[string]interface{}{ + { + "kind": "SQLSCRIPT", + "sql_artifact": []map[string]interface{}{ + { + "sql_script": "CREATE TABLE result_table (id BIGINT, name STRING) WITH ( 'connector' = 'jdbc', 'url' = 'jdbc:mysql://localhost:3306/test', 'table-name' = 'result_table' );", + "additional_dependencies": []string{}, + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{}), + ), + }, + { + ResourceName: resourceId, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +var AlicloudRealtimeComputeDeploymentMap11895 = map[string]string{} + +func AlicloudRealtimeComputeDeploymentBasicDependence11895(name string) string { + return fmt.Sprintf(` +variable "name" { + default = "%s" +} + +`, name) +} + +// Case test_4 11872 +func TestAccAliCloudRealtimeComputeDeployment_basic11872(t *testing.T) { + var v map[string]interface{} + resourceId := "alicloud_realtime_compute_deployment.default" + ra := resourceAttrInit(resourceId, AlicloudRealtimeComputeDeploymentMap11872) + rc := resourceCheckInitWithDescribeMethod(resourceId, &v, func() interface{} { + return &RealtimeComputeServiceV2{testAccProvider.Meta().(*connectivity.AliyunClient)} + }, "DescribeRealtimeComputeDeployment") + rac := resourceAttrCheckInit(rc, ra) + testAccCheck := rac.resourceAttrMapUpdateSet() + rand := acctest.RandIntRange(10000, 99999) + name := fmt.Sprintf("tfaccrealtimecompute%d", rand) + testAccConfig := resourceTestAccConfigFunc(resourceId, name, AlicloudRealtimeComputeDeploymentBasicDependence11872) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckWithRegions(t, true, []connectivity.Region{"cn-beijing"}) + testAccPreCheck(t) + }, + IDRefreshName: resourceId, + Providers: testAccProviders, + CheckDestroy: rac.checkResourceDestroy(), + Steps: []resource.TestStep{ + { + Config: testAccConfig(map[string]interface{}{ + "deployment_name": name, + "engine_version": "vvr-8.0.6-flink-1.17", + "resource_id": "e7d4d4f4510947", + "execution_mode": "STREAMING", + "deployment_target": []map[string]interface{}{ + { + "mode": "PER_JOB", + "name": "default-queue", + }, + }, + "namespace": "code-test-tf-deployment-default", + "artifact": []map[string]interface{}{ + { + "kind": "PYTHON", + "python_artifact": []map[string]interface{}{ + { + "entry_module": "test.py", + "main_args": "start from main", + "additional_python_archives": []string{ + "oss://bucket-name/c.jar"}, + "additional_python_libraries": []string{ + "oss://bucket-name/b.jar"}, + "additional_dependencies": []string{ + "oss://bucket-name/a.jar"}, + "python_artifact_uri": "oss://bucket-name/main.py", + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{ + "deployment_name": name, + "engine_version": "vvr-8.0.6-flink-1.17", + "execution_mode": "STREAMING", + }), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "artifact": []map[string]interface{}{ + { + "kind": "PYTHON", + "python_artifact": []map[string]interface{}{ + { + "entry_module": "test1.py", + "main_args": "start from main1", + "additional_python_archives": []string{ + "oss://bucket-name/a2.jar", "oss://bucket-name/b2.jar", "oss://bucket-name/c2.jar"}, + "additional_python_libraries": []string{ + "oss://bucket-name/a1.jar", "oss://bucket-name/b1.jar", "oss://bucket-name/c1.jar"}, + "additional_dependencies": []string{ + "oss://bucket-name/a.jar", "oss://bucket-name/b.jar", "oss://bucket-name/c.jar"}, + "python_artifact_uri": "oss://bucket-name/main1.py", + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{}), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "artifact": []map[string]interface{}{ + { + "kind": "PYTHON", + "python_artifact": []map[string]interface{}{ + { + "entry_module": "test1.py", + "main_args": "start from main1", + "additional_python_archives": []string{}, + "additional_python_libraries": []string{}, + "additional_dependencies": []string{}, + "python_artifact_uri": "oss://bucket-name/main.py", + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{}), + ), + }, + { + ResourceName: resourceId, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +var AlicloudRealtimeComputeDeploymentMap11872 = map[string]string{} + +func AlicloudRealtimeComputeDeploymentBasicDependence11872(name string) string { + return fmt.Sprintf(` +variable "name" { + default = "%s" +} + +`, name) +} + +// Case test3 11873 +func TestAccAliCloudRealtimeComputeDeployment_basic11873(t *testing.T) { + var v map[string]interface{} + resourceId := "alicloud_realtime_compute_deployment.default" + ra := resourceAttrInit(resourceId, AlicloudRealtimeComputeDeploymentMap11873) + rc := resourceCheckInitWithDescribeMethod(resourceId, &v, func() interface{} { + return &RealtimeComputeServiceV2{testAccProvider.Meta().(*connectivity.AliyunClient)} + }, "DescribeRealtimeComputeDeployment") + rac := resourceAttrCheckInit(rc, ra) + testAccCheck := rac.resourceAttrMapUpdateSet() + rand := acctest.RandIntRange(10000, 99999) + name := fmt.Sprintf("tfaccrealtimecompute%d", rand) + testAccConfig := resourceTestAccConfigFunc(resourceId, name, AlicloudRealtimeComputeDeploymentBasicDependence11873) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckWithRegions(t, true, []connectivity.Region{"cn-beijing"}) + testAccPreCheck(t) + }, + IDRefreshName: resourceId, + Providers: testAccProviders, + CheckDestroy: rac.checkResourceDestroy(), + Steps: []resource.TestStep{ + { + Config: testAccConfig(map[string]interface{}{ + "deployment_name": name, + "description": "This is a test deployment.", + "engine_version": "vvr-8.0.10-flink-1.17", + "resource_id": "e7d4d4f4510947", + "execution_mode": "STREAMING", + "deployment_target": []map[string]interface{}{ + { + "mode": "PER_JOB", + "name": "default-queue", + }, + }, + "streaming_resource_setting": []map[string]interface{}{ + { + "resource_setting_mode": "EXPERT", + "expert_resource_setting": []map[string]interface{}{ + { + "resource_plan": "{\\\\n \\\\\\\"ssgProfiles\\\\\\\": [\\\\n {\\\\n \\\\\\\"name\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"cpu\\\\\\\": 0.26,\\\\n \\\\\\\"heap\\\\\\\": \\\\\\\"1 gb\\\\\\\",\\\\n \\\\\\\"offHeap\\\\\\\": \\\\\\\"32 mb\\\\\\\",\\\\n \\\\\\\"managed\\\\\\\": {},\\\\n \\\\\\\"extended\\\\\\\": {}\\\\n }\\\\n ],\\\\n \\\\\\\"nodes\\\\\\\": [\\\\n {\\\\n \\\\\\\"id\\\\\\\": 1,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecTableSourceScan\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"Source: datagen_source[7]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n },\\\\n {\\\\n \\\\\\\"id\\\\\\\": 2,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecCalc\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"Calc[8]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n },\\\\n {\\\\n \\\\\\\"id\\\\\\\": 3,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecSink\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"ConstraintEnforcer[9]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n },\\\\n {\\\\n \\\\\\\"id\\\\\\\": 4,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecSink\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"Sink: vvptest[9]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n }\\\\n ],\\\\n \\\\\\\"edges\\\\\\\": [\\\\n {\\\\n \\\\\\\"source\\\\\\\": 1,\\\\n \\\\\\\"target\\\\\\\": 2,\\\\n \\\\\\\"mode\\\\\\\": \\\\\\\"PIPELINED\\\\\\\",\\\\n \\\\\\\"strategy\\\\\\\": \\\\\\\"FORWARD\\\\\\\"\\\\n },\\\\n {\\\\n \\\\\\\"source\\\\\\\": 2,\\\\n \\\\\\\"target\\\\\\\": 3,\\\\n \\\\\\\"mode\\\\\\\": \\\\\\\"PIPELINED\\\\\\\",\\\\n \\\\\\\"strategy\\\\\\\": \\\\\\\"FORWARD\\\\\\\"\\\\n },\\\\n {\\\\n \\\\\\\"source\\\\\\\": 3,\\\\n \\\\\\\"target\\\\\\\": 4,\\\\n \\\\\\\"mode\\\\\\\": \\\\\\\"PIPELINED\\\\\\\",\\\\n \\\\\\\"strategy\\\\\\\": \\\\\\\"FORWARD\\\\\\\"\\\\n }\\\\n ],\\\\n \\\\\\\"vertices\\\\\\\": {\\\\n \\\\\\\"717c7b8afebbfb7137f6f0f99beb2a94\\\\\\\": [\\\\n 1,\\\\n 2,\\\\n 3,\\\\n 4\\\\n ]\\\\n }\\\\n}", + "jobmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "2Gi", + "cpu": "2", + }, + }, + }, + }, + }, + }, + "namespace": "code-test-tf-deployment-default", + "artifact": []map[string]interface{}{ + { + "kind": "SQLSCRIPT", + "sql_artifact": []map[string]interface{}{ + { + "sql_script": "create temporary table `datagen` ( id varchar, name varchar ) with ( 'connector' = 'datagen' ); create temporary table `blackhole` ( id varchar, name varchar ) with ( 'connector' = 'blackhole' ); insert into blackhole select * from datagen;", + "additional_dependencies": []string{ + "oss://bucket-name/a.jar"}, + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{ + "deployment_name": name, + "description": "This is a test deployment.", + "engine_version": "vvr-8.0.10-flink-1.17", + "execution_mode": "STREAMING", + }), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "deployment_target": []map[string]interface{}{ + { + "mode": "SESSION", + "name": "tf", + }, + }, + "streaming_resource_setting": []map[string]interface{}{ + { + "resource_setting_mode": "EXPERT", + "expert_resource_setting": []map[string]interface{}{ + { + "resource_plan": "{\\\\n \\\\\\\"ssgProfiles\\\\\\\": [\\\\n {\\\\n \\\\\\\"name\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"cpu\\\\\\\": 0.26,\\\\n \\\\\\\"heap\\\\\\\": \\\\\\\"1 gb\\\\\\\",\\\\n \\\\\\\"offHeap\\\\\\\": \\\\\\\"32 mb\\\\\\\",\\\\n \\\\\\\"managed\\\\\\\": {},\\\\n \\\\\\\"extended\\\\\\\": {}\\\\n }\\\\n ],\\\\n \\\\\\\"nodes\\\\\\\": [\\\\n {\\\\n \\\\\\\"id\\\\\\\": 1,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecTableSourceScan\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"Source: datagen_source[7]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n },\\\\n {\\\\n \\\\\\\"id\\\\\\\": 2,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecCalc\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"Calc[8]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n },\\\\n {\\\\n \\\\\\\"id\\\\\\\": 3,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecSink\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"ConstraintEnforcer[9]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 1,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n },\\\\n {\\\\n \\\\\\\"id\\\\\\\": 4,\\\\n \\\\\\\"type\\\\\\\": \\\\\\\"StreamExecSink\\\\\\\",\\\\n \\\\\\\"desc\\\\\\\": \\\\\\\"Sink: vvptest[9]\\\\\\\",\\\\n \\\\\\\"profile\\\\\\\": {\\\\n \\\\\\\"group\\\\\\\": \\\\\\\"default\\\\\\\",\\\\n \\\\\\\"parallelism\\\\\\\": 2,\\\\n \\\\\\\"maxParallelism\\\\\\\": 32768,\\\\n \\\\\\\"minParallelism\\\\\\\": 1\\\\n }\\\\n }\\\\n ],\\\\n \\\\\\\"edges\\\\\\\": [\\\\n {\\\\n \\\\\\\"source\\\\\\\": 1,\\\\n \\\\\\\"target\\\\\\\": 2,\\\\n \\\\\\\"mode\\\\\\\": \\\\\\\"PIPELINED\\\\\\\",\\\\n \\\\\\\"strategy\\\\\\\": \\\\\\\"FORWARD\\\\\\\"\\\\n },\\\\n {\\\\n \\\\\\\"source\\\\\\\": 2,\\\\n \\\\\\\"target\\\\\\\": 3,\\\\n \\\\\\\"mode\\\\\\\": \\\\\\\"PIPELINED\\\\\\\",\\\\n \\\\\\\"strategy\\\\\\\": \\\\\\\"FORWARD\\\\\\\"\\\\n },\\\\n {\\\\n \\\\\\\"source\\\\\\\": 3,\\\\n \\\\\\\"target\\\\\\\": 4,\\\\n \\\\\\\"mode\\\\\\\": \\\\\\\"PIPELINED\\\\\\\",\\\\n \\\\\\\"strategy\\\\\\\": \\\\\\\"FORWARD\\\\\\\"\\\\n }\\\\n ],\\\\n \\\\\\\"vertices\\\\\\\": {\\\\n \\\\\\\"717c7b8afebbfb7137f6f0f99beb2a94\\\\\\\": [\\\\n 1,\\\\n 2,\\\\n 3,\\\\n 4\\\\n ]\\\\n }\\\\n}", + "jobmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "1Gi", + "cpu": "1", + }, + }, + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{}), + ), + }, + { + ResourceName: resourceId, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +var AlicloudRealtimeComputeDeploymentMap11873 = map[string]string{} + +func AlicloudRealtimeComputeDeploymentBasicDependence11873(name string) string { + return fmt.Sprintf(` +variable "name" { + default = "%s" +} + +`, name) +} + +// Case test_2 11870 +func TestAccAliCloudRealtimeComputeDeployment_basic11870(t *testing.T) { + var v map[string]interface{} + resourceId := "alicloud_realtime_compute_deployment.default" + ra := resourceAttrInit(resourceId, AlicloudRealtimeComputeDeploymentMap11870) + rc := resourceCheckInitWithDescribeMethod(resourceId, &v, func() interface{} { + return &RealtimeComputeServiceV2{testAccProvider.Meta().(*connectivity.AliyunClient)} + }, "DescribeRealtimeComputeDeployment") + rac := resourceAttrCheckInit(rc, ra) + testAccCheck := rac.resourceAttrMapUpdateSet() + rand := acctest.RandIntRange(10000, 99999) + name := fmt.Sprintf("tfaccrealtimecompute%d", rand) + testAccConfig := resourceTestAccConfigFunc(resourceId, name, AlicloudRealtimeComputeDeploymentBasicDependence11870) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckWithRegions(t, true, []connectivity.Region{"cn-beijing"}) + testAccPreCheck(t) + }, + IDRefreshName: resourceId, + Providers: testAccProviders, + CheckDestroy: rac.checkResourceDestroy(), + Steps: []resource.TestStep{ + { + Config: testAccConfig(map[string]interface{}{ + "deployment_name": name, + "description": "This is a test deployment.", + "engine_version": "vvr-8.0.6-flink-1.17", + "resource_id": "e7d4d4f4510947", + "batch_resource_setting": []map[string]interface{}{ + { + "basic_resource_setting": []map[string]interface{}{ + { + "taskmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "1Gi", + "cpu": "1", + }, + }, + "parallelism": "1", + "jobmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "1Gi", + "cpu": "1", + }, + }, + }, + }, + "max_slot": "1", + }, + }, + "execution_mode": "BATCH", + "deployment_target": []map[string]interface{}{ + { + "mode": "PER_JOB", + "name": "default-queue", + }, + }, + "namespace": "code-test-tf-deployment-default", + "artifact": []map[string]interface{}{ + { + "kind": "SQLSCRIPT", + "sql_artifact": []map[string]interface{}{ + { + "sql_script": "create temporary table `datagen` ( id varchar, name varchar ) with ( 'connector' = 'datagen' ); create temporary table `blackhole` ( id varchar, name varchar ) with ( 'connector' = 'blackhole' ); insert into blackhole select * from datagen;", + "additional_dependencies": []string{ + "oss://bucket-name/a.jar"}, + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{ + "deployment_name": name, + "description": "This is a test deployment.", + "engine_version": "vvr-8.0.6-flink-1.17", + "execution_mode": "BATCH", + }), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "batch_resource_setting": []map[string]interface{}{ + { + "basic_resource_setting": []map[string]interface{}{ + { + "taskmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "2Gi", + "cpu": "2", + }, + }, + "parallelism": "2", + "jobmanager_resource_setting_spec": []map[string]interface{}{ + { + "memory": "2Gi", + "cpu": "2", + }, + }, + }, + }, + "max_slot": "2", + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{}), + ), + }, + { + ResourceName: resourceId, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +var AlicloudRealtimeComputeDeploymentMap11870 = map[string]string{} + +func AlicloudRealtimeComputeDeploymentBasicDependence11870(name string) string { + return fmt.Sprintf(` +variable "name" { + default = "%s" +} + +`, name) +} + +// Case test_1_副本1763709151937_副本1763709161121 11871 +func TestAccAliCloudRealtimeComputeDeployment_basic11871(t *testing.T) { + var v map[string]interface{} + resourceId := "alicloud_realtime_compute_deployment.default" + ra := resourceAttrInit(resourceId, AlicloudRealtimeComputeDeploymentMap11871) + rc := resourceCheckInitWithDescribeMethod(resourceId, &v, func() interface{} { + return &RealtimeComputeServiceV2{testAccProvider.Meta().(*connectivity.AliyunClient)} + }, "DescribeRealtimeComputeDeployment") + rac := resourceAttrCheckInit(rc, ra) + testAccCheck := rac.resourceAttrMapUpdateSet() + rand := acctest.RandIntRange(10000, 99999) + name := fmt.Sprintf("tfaccrealtimecompute%d", rand) + testAccConfig := resourceTestAccConfigFunc(resourceId, name, AlicloudRealtimeComputeDeploymentBasicDependence11871) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckWithRegions(t, true, []connectivity.Region{"cn-beijing"}) + testAccPreCheck(t) + }, + IDRefreshName: resourceId, + Providers: testAccProviders, + CheckDestroy: rac.checkResourceDestroy(), + Steps: []resource.TestStep{ + { + Config: testAccConfig(map[string]interface{}{ + "logging": []map[string]interface{}{ + { + "log4j2_configuration_template": "\\\\n\\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n {%- for name, level in userConfiguredLoggers -%} \\\\n \\\\n {%- endfor -%}\\\\n \\\\n\\\\n \\\\n", + "log4j_loggers": []map[string]interface{}{ + { + "logger_name": "StdOut", + "logger_level": "DEBUG", + }, + { + "logger_name": "", + "logger_level": "INFO", + }, + }, + "log_reserve_policy": []map[string]interface{}{ + { + "open_history": "false", + "expiration_days": "5", + }, + }, + }, + }, + "deployment_name": name, + "engine_version": "vvr-8.0.6-flink-1.17", + "resource_id": "e7d4d4f4510947", + "execution_mode": "STREAMING", + "deployment_target": []map[string]interface{}{ + { + "mode": "PER_JOB", + "name": "default-queue", + }, + }, + "namespace": "code-test-tf-deployment-default", + "artifact": []map[string]interface{}{ + { + "kind": "JAR", + "jar_artifact": []map[string]interface{}{ + { + "jar_uri": "oss://bucket-name/main.jar", + "main_args": "start from main", + "entry_class": "org.apache.flink.test", + "additional_dependencies": []string{ + "oss://bucket-name/a.jar"}, + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{ + "deployment_name": name, + "engine_version": "vvr-8.0.6-flink-1.17", + "execution_mode": "STREAMING", + }), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "artifact": []map[string]interface{}{ + { + "kind": "JAR", + "jar_artifact": []map[string]interface{}{ + { + "jar_uri": "oss://bucket-name/main.jar", + "main_args": "start from main1", + "entry_class": "org.apache.flink.test1", + "additional_dependencies": []string{}, + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{}), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "logging": []map[string]interface{}{ + { + "logging_profile": "", + "log4j_loggers": []map[string]interface{}{ + { + "logger_level": "INFO", + }, + }, + "log_reserve_policy": []map[string]interface{}{ + { + "open_history": "true", + "expiration_days": "7", + }, + }, + }, + }, + "artifact": []map[string]interface{}{ + { + "kind": "JAR", + "jar_artifact": []map[string]interface{}{ + { + "jar_uri": "oss://bucket-name/main1.jar", + "main_args": "start from main1", + "entry_class": "org.apache.flink.test1", + "additional_dependencies": []string{ + "oss://bucket-name/a.jar", "oss://bucket-name/b.jar", "oss://bucket-name/c.jar"}, + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{}), + ), + }, + { + ResourceName: resourceId, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +var AlicloudRealtimeComputeDeploymentMap11871 = map[string]string{} + +func AlicloudRealtimeComputeDeploymentBasicDependence11871(name string) string { + return fmt.Sprintf(` +variable "name" { + default = "%s" +} + +`, name) +} + +// Case test_1 11869 +func TestAccAliCloudRealtimeComputeDeployment_basic11869(t *testing.T) { + var v map[string]interface{} + resourceId := "alicloud_realtime_compute_deployment.default" + ra := resourceAttrInit(resourceId, AlicloudRealtimeComputeDeploymentMap11869) + rc := resourceCheckInitWithDescribeMethod(resourceId, &v, func() interface{} { + return &RealtimeComputeServiceV2{testAccProvider.Meta().(*connectivity.AliyunClient)} + }, "DescribeRealtimeComputeDeployment") + rac := resourceAttrCheckInit(rc, ra) + testAccCheck := rac.resourceAttrMapUpdateSet() + rand := acctest.RandIntRange(10000, 99999) + name := fmt.Sprintf("tfaccrealtimecompute%d", rand) + testAccConfig := resourceTestAccConfigFunc(resourceId, name, AlicloudRealtimeComputeDeploymentBasicDependence11869) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckWithRegions(t, true, []connectivity.Region{"cn-beijing"}) + testAccPreCheck(t) + }, + IDRefreshName: resourceId, + Providers: testAccProviders, + CheckDestroy: rac.checkResourceDestroy(), + Steps: []resource.TestStep{ + { + Config: testAccConfig(map[string]interface{}{ + "logging": []map[string]interface{}{ + { + "logging_profile": "default", + "log4j2_configuration_template": "\\\\n\\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n \\\\n {%- for name, level in userConfiguredLoggers -%} \\\\n \\\\n {%- endfor -%}\\\\n \\\\n\\\\n \\\\n", + "log4j_loggers": []map[string]interface{}{ + { + "logger_name": "StdOut", + "logger_level": "DEBUG", + }, + { + "logger_name": "", + "logger_level": "INFO", + }, + }, + "log_reserve_policy": []map[string]interface{}{ + { + "open_history": "false", + "expiration_days": "5", + }, + }, + }, + }, + "deployment_name": name, + "engine_version": "vvr-8.0.6-flink-1.17", + "resource_id": "e7d4d4f4510947", + "execution_mode": "STREAMING", + "deployment_target": []map[string]interface{}{ + { + "mode": "PER_JOB", + "name": "default-queue", + }, + }, + "namespace": "code-test-tf-deployment-default", + "artifact": []map[string]interface{}{ + { + "kind": "JAR", + "jar_artifact": []map[string]interface{}{ + { + "jar_uri": "oss://bucket-name/main.jar", + "main_args": "start from main", + "entry_class": "org.apache.flink.test", + "additional_dependencies": []string{ + "oss://bucket-name/a.jar"}, + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{ + "deployment_name": name, + "engine_version": "vvr-8.0.6-flink-1.17", + "execution_mode": "STREAMING", + }), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "logging": []map[string]interface{}{ + { + "logging_profile": "", + "log4j_loggers": []map[string]interface{}{ + { + "logger_name": "", + "logger_level": "INFO", + }, + }, + "log_reserve_policy": []map[string]interface{}{ + { + "open_history": "true", + "expiration_days": "7", + }, + }, + }, + }, + "artifact": []map[string]interface{}{ + { + "kind": "JAR", + "jar_artifact": []map[string]interface{}{ + { + "jar_uri": "oss://bucket-name/main.jar", + "main_args": "start from main1", + "entry_class": "org.apache.flink.test1", + "additional_dependencies": []string{}, + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{}), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "logging": []map[string]interface{}{ + { + "logging_profile": "", + "log4j_loggers": []map[string]interface{}{ + { + "logger_name": "", + "logger_level": "INFO", + }, + }, + "log_reserve_policy": []map[string]interface{}{ + { + "open_history": "true", + "expiration_days": "5", + }, + }, + }, + }, + "artifact": []map[string]interface{}{ + { + "kind": "JAR", + "jar_artifact": []map[string]interface{}{ + { + "jar_uri": "oss://bucket-name/main1.jar", + "main_args": "start from main1", + "entry_class": "org.apache.flink.test1", + "additional_dependencies": []string{ + "oss://bucket-name/a.jar", "oss://bucket-name/b.jar", "oss://bucket-name/c.jar"}, + }, + }, + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{}), + ), + }, + { + ResourceName: resourceId, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{}, + }, + }, + }) +} + +var AlicloudRealtimeComputeDeploymentMap11869 = map[string]string{} + +func AlicloudRealtimeComputeDeploymentBasicDependence11869(name string) string { + return fmt.Sprintf(` +variable "name" { + default = "%s" +} + +`, name) +} + +// Test RealtimeCompute Deployment. <<< Resource test cases, automatically generated. diff --git a/alicloud/resource_alicloud_realtime_compute_job.go b/alicloud/resource_alicloud_realtime_compute_job.go new file mode 100644 index 000000000000..cd826d3730d6 --- /dev/null +++ b/alicloud/resource_alicloud_realtime_compute_job.go @@ -0,0 +1,460 @@ +// Package alicloud. This file is generated automatically. Please do not modify it manually, thank you! +package alicloud + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/PaesslerAG/jsonpath" + "github.com/aliyun/terraform-provider-alicloud/alicloud/connectivity" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAliCloudRealtimeComputeJob() *schema.Resource { + return &schema.Resource{ + Create: resourceAliCloudRealtimeComputeJobCreate, + Read: resourceAliCloudRealtimeComputeJobRead, + Update: resourceAliCloudRealtimeComputeJobUpdate, + Delete: resourceAliCloudRealtimeComputeJobDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "deployment_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "job_id": { + Type: schema.TypeString, + Computed: true, + }, + "local_variables": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "namespace": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "resource_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "resource_queue_name": { + Type: schema.TypeString, + Optional: true, + }, + "restore_strategy": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "savepoint_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "kind": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: StringInSlice([]string{"NONE", "LATEST_SAVEPOINT", "FROM_SAVEPOINT", "LATEST_STATE"}, false), + }, + "allow_non_restored_state": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "job_start_time_in_ms": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "status": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "health_score": { + Type: schema.TypeInt, + Computed: true, + }, + "current_job_status": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "running": { + Type: schema.TypeList, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "observed_flink_job_restarts": { + Type: schema.TypeInt, + Computed: true, + }, + "observed_flink_job_status": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "risk_level": { + Type: schema.TypeString, + Computed: true, + }, + "failure": { + Type: schema.TypeList, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": { + Type: schema.TypeString, + Computed: true, + }, + "failed_at": { + Type: schema.TypeInt, + Computed: true, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "stop_strategy": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: StringInSlice([]string{"NONE", "STOP_WITH_SAVEPOINT", "STOP_WITH_DRAIN"}, false), + }, + }, + } +} + +func resourceAliCloudRealtimeComputeJobCreate(d *schema.ResourceData, meta interface{}) error { + + client := meta.(*connectivity.AliyunClient) + + namespace := d.Get("namespace") + action := fmt.Sprintf("/api/v2/namespaces/%s/jobs:start", namespace) + var request map[string]interface{} + var response map[string]interface{} + header := make(map[string]*string) + query := make(map[string]*string) + body := make(map[string]interface{}) + var err error + request = make(map[string]interface{}) + header["workspace"] = StringPointer(d.Get("resource_id").(string)) + + if v, ok := d.GetOk("local_variables"); ok { + localVariablesMapsArray := make([]interface{}, 0) + for _, dataLoop := range convertToInterfaceArray(v) { + dataLoopTmp := dataLoop.(map[string]interface{}) + dataLoopMap := make(map[string]interface{}) + dataLoopMap["value"] = dataLoopTmp["value"] + dataLoopMap["name"] = dataLoopTmp["name"] + localVariablesMapsArray = append(localVariablesMapsArray, dataLoopMap) + } + request["localVariables"] = localVariablesMapsArray + } + + restoreStrategy := make(map[string]interface{}) + + if v := d.Get("restore_strategy"); !IsNil(v) { + kind1, _ := jsonpath.Get("$[0].kind", v) + if kind1 != nil && kind1 != "" { + restoreStrategy["kind"] = kind1 + } + savepointId1, _ := jsonpath.Get("$[0].savepoint_id", v) + if savepointId1 != nil && savepointId1 != "" { + restoreStrategy["savepointId"] = savepointId1 + } + allowNonRestoredState1, _ := jsonpath.Get("$[0].allow_non_restored_state", v) + if allowNonRestoredState1 != nil && allowNonRestoredState1 != "" { + restoreStrategy["allowNonRestoredState"] = allowNonRestoredState1 + } + jobStartTimeInMs1, _ := jsonpath.Get("$[0].job_start_time_in_ms", v) + if jobStartTimeInMs1 != nil && jobStartTimeInMs1 != "" { + restoreStrategy["jobStartTimeInMs"] = jobStartTimeInMs1 + } + + request["restoreStrategy"] = restoreStrategy + } + + if v, ok := d.GetOk("deployment_id"); ok { + request["deploymentId"] = v + } + if v, ok := d.GetOk("resource_queue_name"); ok { + request["resourceQueueName"] = v + } + body = request + wait := incrementalWait(3*time.Second, 5*time.Second) + err = resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { + response, err = client.RoaPost("ververica", "2022-07-18", action, query, header, body, true) + if err != nil { + if NeedRetry(err) { + wait() + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + addDebug(action, response, request) + + if err != nil { + return WrapErrorf(err, DefaultErrorMsg, "alicloud_realtime_compute_job", action, AlibabaCloudSdkGoERROR) + } + + dataworkspaceVar, _ := jsonpath.Get("$.data.workspace", response) + datanamespaceVar, _ := jsonpath.Get("$.data.namespace", response) + datajobIdVar, _ := jsonpath.Get("$.data.jobId", response) + d.SetId(fmt.Sprintf("%v:%v:%v", dataworkspaceVar, datanamespaceVar, datajobIdVar)) + + realtimeComputeServiceV2 := RealtimeComputeServiceV2{client} + stateConf := BuildStateConf([]string{}, []string{"RUNNING"}, d.Timeout(schema.TimeoutCreate), 15*time.Second, realtimeComputeServiceV2.RealtimeComputeJobStateRefreshFunc(d.Id(), "$.status.currentJobStatus", []string{})) + if _, err := stateConf.WaitForState(); err != nil { + return WrapErrorf(err, IdMsg, d.Id()) + } + + return resourceAliCloudRealtimeComputeJobUpdate(d, meta) +} + +func resourceAliCloudRealtimeComputeJobRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*connectivity.AliyunClient) + realtimeComputeServiceV2 := RealtimeComputeServiceV2{client} + + objectRaw, err := realtimeComputeServiceV2.DescribeRealtimeComputeJob(d.Id()) + if err != nil { + if !d.IsNewResource() && NotFoundError(err) { + log.Printf("[DEBUG] Resource alicloud_realtime_compute_job DescribeRealtimeComputeJob Failed!!! %s", err) + d.SetId("") + return nil + } + return WrapError(err) + } + + d.Set("deployment_id", objectRaw["deploymentId"]) + d.Set("job_id", objectRaw["jobId"]) + d.Set("namespace", objectRaw["namespace"]) + d.Set("resource_id", objectRaw["workspace"]) + + localVariablesRaw := objectRaw["localVariables"] + localVariablesMaps := make([]map[string]interface{}, 0) + if localVariablesRaw != nil { + for _, localVariablesChildRaw := range convertToInterfaceArray(localVariablesRaw) { + localVariablesMap := make(map[string]interface{}) + localVariablesChildRaw := localVariablesChildRaw.(map[string]interface{}) + localVariablesMap["name"] = localVariablesChildRaw["name"] + localVariablesMap["value"] = localVariablesChildRaw["value"] + + localVariablesMaps = append(localVariablesMaps, localVariablesMap) + } + } + if err := d.Set("local_variables", localVariablesMaps); err != nil { + return err + } + restoreStrategyMaps := make([]map[string]interface{}, 0) + restoreStrategyMap := make(map[string]interface{}) + restoreStrategyRaw := make(map[string]interface{}) + if objectRaw["restoreStrategy"] != nil { + restoreStrategyRaw = objectRaw["restoreStrategy"].(map[string]interface{}) + } + if len(restoreStrategyRaw) > 0 { + restoreStrategyMap["allow_non_restored_state"] = restoreStrategyRaw["allowNonRestoredState"] + restoreStrategyMap["job_start_time_in_ms"] = restoreStrategyRaw["jobStartTimeInMs"] + restoreStrategyMap["kind"] = restoreStrategyRaw["kind"] + restoreStrategyMap["savepoint_id"] = restoreStrategyRaw["savepointId"] + + restoreStrategyMaps = append(restoreStrategyMaps, restoreStrategyMap) + } + if err := d.Set("restore_strategy", restoreStrategyMaps); err != nil { + return err + } + statusMaps := make([]map[string]interface{}, 0) + statusMap := make(map[string]interface{}) + statusRaw := make(map[string]interface{}) + if objectRaw["status"] != nil { + statusRaw = objectRaw["status"].(map[string]interface{}) + } + if len(statusRaw) > 0 { + statusMap["current_job_status"] = statusRaw["currentJobStatus"] + statusMap["health_score"] = statusRaw["healthScore"] + statusMap["risk_level"] = statusRaw["riskLevel"] + + failureMaps := make([]map[string]interface{}, 0) + failureMap := make(map[string]interface{}) + failureRaw := make(map[string]interface{}) + if statusRaw["failure"] != nil { + failureRaw = statusRaw["failure"].(map[string]interface{}) + } + if len(failureRaw) > 0 { + failureMap["failed_at"] = failureRaw["failedAt"] + failureMap["message"] = failureRaw["message"] + failureMap["reason"] = failureRaw["reason"] + + failureMaps = append(failureMaps, failureMap) + } + statusMap["failure"] = failureMaps + runningMaps := make([]map[string]interface{}, 0) + runningMap := make(map[string]interface{}) + runningRaw := make(map[string]interface{}) + if statusRaw["running"] != nil { + runningRaw = statusRaw["running"].(map[string]interface{}) + } + if len(runningRaw) > 0 { + runningMap["observed_flink_job_restarts"] = runningRaw["observedFlinkJobRestarts"] + runningMap["observed_flink_job_status"] = runningRaw["observedFlinkJobStatus"] + + runningMaps = append(runningMaps, runningMap) + } + statusMap["running"] = runningMaps + statusMaps = append(statusMaps, statusMap) + } + if err := d.Set("status", statusMaps); err != nil { + return err + } + + return nil +} + +func resourceAliCloudRealtimeComputeJobUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*connectivity.AliyunClient) + var request map[string]interface{} + var response map[string]interface{} + var header map[string]*string + var query map[string]*string + var body map[string]interface{} + + realtimeComputeServiceV2 := RealtimeComputeServiceV2{client} + objectRaw, _ := realtimeComputeServiceV2.DescribeRealtimeComputeJob(d.Id()) + + if d.HasChange("status.0.current_job_status") { + var err error + target := d.Get("status.0.current_job_status").(string) + + currentStatus, err := jsonpath.Get("$.status.currentJobStatus", objectRaw) + if err != nil { + return WrapErrorf(err, FailedGetAttributeMsg, d.Id(), "$.status.currentJobStatus", objectRaw) + } + if fmt.Sprint(currentStatus) != target { + if target == "CANCELLED" { + parts := strings.Split(d.Id(), ":") + namespace := parts[1] + jobId := parts[2] + action := fmt.Sprintf("/api/v2/namespaces/%s/jobs/%s:stop", namespace, jobId) + request = make(map[string]interface{}) + query = make(map[string]*string) + header = make(map[string]*string) + body = make(map[string]interface{}) + header["workspace"] = StringPointer(parts[0]) + + request["stopStrategy"] = d.Get("stop_strategy") + body = request + wait := incrementalWait(3*time.Second, 5*time.Second) + err = resource.Retry(d.Timeout(schema.TimeoutUpdate), func() *resource.RetryError { + response, err = client.RoaPost("ververica", "2022-07-18", action, query, header, body, true) + if err != nil { + if NeedRetry(err) { + wait() + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + addDebug(action, response, request) + if err != nil { + return WrapErrorf(err, DefaultErrorMsg, d.Id(), action, AlibabaCloudSdkGoERROR) + } + realtimeComputeServiceV2 := RealtimeComputeServiceV2{client} + stateConf := BuildStateConf([]string{}, []string{"CANCELLED"}, d.Timeout(schema.TimeoutUpdate), 30*time.Second, realtimeComputeServiceV2.RealtimeComputeJobStateRefreshFunc(d.Id(), "$.status.currentJobStatus", []string{})) + if _, err := stateConf.WaitForState(); err != nil { + return WrapErrorf(err, IdMsg, d.Id()) + } + + } + } + } + + return resourceAliCloudRealtimeComputeJobRead(d, meta) +} + +func resourceAliCloudRealtimeComputeJobDelete(d *schema.ResourceData, meta interface{}) error { + + client := meta.(*connectivity.AliyunClient) + parts := strings.Split(d.Id(), ":") + namespace := parts[1] + jobId := parts[2] + action := fmt.Sprintf("/api/v2/namespaces/%s/jobs/%s", namespace, jobId) + var request map[string]interface{} + var response map[string]interface{} + header := make(map[string]*string) + query := make(map[string]*string) + var err error + request = make(map[string]interface{}) + header["workspace"] = StringPointer(parts[0]) + + wait := incrementalWait(3*time.Second, 5*time.Second) + err = resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { + response, err = client.RoaDelete("ververica", "2022-07-18", action, query, header, nil, true) + if err != nil { + if NeedRetry(err) { + wait() + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + addDebug(action, response, request) + + if err != nil { + if IsExpectedErrors(err, []string{"990301"}) || NotFoundError(err) { + return nil + } + return WrapErrorf(err, DefaultErrorMsg, d.Id(), action, AlibabaCloudSdkGoERROR) + } + + return nil +} diff --git a/alicloud/resource_alicloud_realtime_compute_job_test.go b/alicloud/resource_alicloud_realtime_compute_job_test.go new file mode 100644 index 000000000000..01b7456e0409 --- /dev/null +++ b/alicloud/resource_alicloud_realtime_compute_job_test.go @@ -0,0 +1,132 @@ +// Package alicloud. This file is generated automatically. Please do not modify it manually, thank you! +package alicloud + +import ( + "fmt" + "testing" + + "github.com/aliyun/terraform-provider-alicloud/alicloud/connectivity" + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" +) + +// Test RealtimeCompute Job. >>> Resource test cases, automatically generated. +// Case fix_bug 11874 +func TestAccAliCloudRealtimeComputeJob_basic11874(t *testing.T) { + var v map[string]interface{} + resourceId := "alicloud_realtime_compute_job.default" + ra := resourceAttrInit(resourceId, AlicloudRealtimeComputeJobMap11874) + rc := resourceCheckInitWithDescribeMethod(resourceId, &v, func() interface{} { + return &RealtimeComputeServiceV2{testAccProvider.Meta().(*connectivity.AliyunClient)} + }, "DescribeRealtimeComputeJob") + rac := resourceAttrCheckInit(rc, ra) + testAccCheck := rac.resourceAttrMapUpdateSet() + rand := acctest.RandIntRange(10000, 99999) + name := fmt.Sprintf("tfaccrealtimecompute%d", rand) + testAccConfig := resourceTestAccConfigFunc(resourceId, name, AlicloudRealtimeComputeJobBasicDependence11874) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckWithRegions(t, true, []connectivity.Region{"cn-beijing"}) + testAccPreCheck(t) + }, + IDRefreshName: resourceId, + Providers: testAccProviders, + CheckDestroy: rac.checkResourceDestroy(), + Steps: []resource.TestStep{ + { + Config: testAccConfig(map[string]interface{}{ + "resource_queue_name": "default-queue", + "resource_id": "e7d4d4f4510947", + "local_variables": []map[string]interface{}{ + { + "value": "qq", + "name": "tt", + }, + }, + "restore_strategy": []map[string]interface{}{ + { + "kind": "NONE", + "job_start_time_in_ms": "1763694521254", + }, + }, + "namespace": "code-test-tf-deployment-default", + "deployment_id": "${alicloud_realtime_compute_deployment.create_Deployment5.deployment_id}", + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{ + "resource_queue_name": "default-queue", + "resource_id": CHECKSET, + "local_variables.#": "1", + "namespace": CHECKSET, + }), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "stop_strategy": "NONE", + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{ + "stop_strategy": "NONE", + }), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "status": []map[string]interface{}{ + { + "current_job_status": "CANCELLED", + }, + }, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{}), + ), + }, + { + ResourceName: resourceId, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"resource_queue_name", "stop_strategy"}, + }, + }, + }) +} + +var AlicloudRealtimeComputeJobMap11874 = map[string]string{ + "status.#": CHECKSET, +} + +func AlicloudRealtimeComputeJobBasicDependence11874(name string) string { + sqlScript := `create temporary table ` + "`datagen`" + ` ( id varchar, name varchar ) with ( 'connector' = 'datagen' ); +create temporary table ` + "`blackhole`" + ` ( id varchar, name varchar ) with ( 'connector' = 'blackhole' ); +insert into blackhole select * from datagen;` + + return fmt.Sprintf(` +variable "name" { + default = "%s" +} + +resource "alicloud_realtime_compute_deployment" "create_Deployment5" { + deployment_name = "tf-test-deployment-sql-24" + engine_version = "vvr-8.0.10-flink-1.17" + resource_id = "e7d4d4f4510947" + execution_mode = "STREAMING" + + deployment_target { + mode = "PER_JOB" + name = "default-queue" + } + + namespace = "code-test-tf-deployment-default" + + artifact { + kind = "SQLSCRIPT" + sql_artifact { + sql_script = %q + } + } +}`, name, sqlScript) +} + +// Test RealtimeCompute Job. <<< Resource test cases, automatically generated. diff --git a/alicloud/service_alicloud_realtime_compute_v2.go b/alicloud/service_alicloud_realtime_compute_v2.go index e59e7dd2fcff..b91de114019b 100644 --- a/alicloud/service_alicloud_realtime_compute_v2.go +++ b/alicloud/service_alicloud_realtime_compute_v2.go @@ -2,6 +2,7 @@ package alicloud import ( "fmt" + "strings" "time" "github.com/PaesslerAG/jsonpath" @@ -170,3 +171,173 @@ func (s *RealtimeComputeServiceV2) SetResourceTags(d *schema.ResourceData, resou } // SetResourceTags >>> tag function encapsulated. + +// DescribeRealtimeComputeDeployment <<< Encapsulated get interface for RealtimeCompute Deployment. + +func (s *RealtimeComputeServiceV2) DescribeRealtimeComputeDeployment(id string) (object map[string]interface{}, err error) { + client := s.client + var request map[string]interface{} + var response map[string]interface{} + var query map[string]*string + var header map[string]*string + parts := strings.Split(id, ":") + if len(parts) != 3 { + err = WrapError(fmt.Errorf("invalid Resource Id %s. Expected parts' length %d, got %d", id, 3, len(parts))) + return nil, err + } + deploymentId := parts[2] + namespace := parts[1] + request = make(map[string]interface{}) + query = make(map[string]*string) + header = make(map[string]*string) + header["workspace"] = StringPointer(parts[0]) + + action := fmt.Sprintf("/api/v2/namespaces/%s/deployments/%s", namespace, deploymentId) + + wait := incrementalWait(3*time.Second, 5*time.Second) + err = resource.Retry(1*time.Minute, func() *resource.RetryError { + response, err = client.RoaGet("ververica", "2022-07-18", action, query, header, nil) + + if err != nil { + if NeedRetry(err) { + wait() + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + addDebug(action, response, request) + if err != nil { + if IsExpectedErrors(err, []string{"990301"}) { + return object, WrapErrorf(NotFoundErr("Deployment", id), NotFoundMsg, response) + } + return object, WrapErrorf(err, DefaultErrorMsg, id, action, AlibabaCloudSdkGoERROR) + } + + v, err := jsonpath.Get("$.data", response) + if err != nil { + return object, WrapErrorf(err, FailedGetAttributeMsg, id, "$.data", response) + } + + return v.(map[string]interface{}), nil +} + +func (s *RealtimeComputeServiceV2) RealtimeComputeDeploymentStateRefreshFunc(id string, field string, failStates []string) resource.StateRefreshFunc { + return s.RealtimeComputeDeploymentStateRefreshFuncWithApi(id, field, failStates, s.DescribeRealtimeComputeDeployment) +} + +func (s *RealtimeComputeServiceV2) RealtimeComputeDeploymentStateRefreshFuncWithApi(id string, field string, failStates []string, call func(id string) (map[string]interface{}, error)) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + object, err := call(id) + if err != nil { + if NotFoundError(err) { + return object, "", nil + } + return nil, "", WrapError(err) + } + v, err := jsonpath.Get(field, object) + currentStatus := fmt.Sprint(v) + + if strings.HasPrefix(field, "#") { + v, _ := jsonpath.Get(strings.TrimPrefix(field, "#"), object) + if v != nil { + currentStatus = "#CHECKSET" + } + } + + for _, failState := range failStates { + if currentStatus == failState { + return object, currentStatus, WrapError(Error(FailedToReachTargetStatus, currentStatus)) + } + } + return object, currentStatus, nil + } +} + +// DescribeRealtimeComputeDeployment >>> Encapsulated. + +// DescribeRealtimeComputeJob <<< Encapsulated get interface for RealtimeCompute Job. + +func (s *RealtimeComputeServiceV2) DescribeRealtimeComputeJob(id string) (object map[string]interface{}, err error) { + client := s.client + var request map[string]interface{} + var response map[string]interface{} + var query map[string]*string + var header map[string]*string + parts := strings.Split(id, ":") + if len(parts) != 3 { + err = WrapError(fmt.Errorf("invalid Resource Id %s. Expected parts' length %d, got %d", id, 3, len(parts))) + return nil, err + } + jobId := parts[2] + namespace := parts[1] + request = make(map[string]interface{}) + query = make(map[string]*string) + header = make(map[string]*string) + header["workspace"] = StringPointer(parts[0]) + + action := fmt.Sprintf("/api/v2/namespaces/%s/jobs/%s", namespace, jobId) + + wait := incrementalWait(3*time.Second, 5*time.Second) + err = resource.Retry(1*time.Minute, func() *resource.RetryError { + response, err = client.RoaGet("ververica", "2022-07-18", action, query, header, nil) + + if err != nil { + if NeedRetry(err) { + wait() + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + addDebug(action, response, request) + if err != nil { + if IsExpectedErrors(err, []string{"990301"}) { + return object, WrapErrorf(NotFoundErr("Job", id), NotFoundMsg, response) + } + return object, WrapErrorf(err, DefaultErrorMsg, id, action, AlibabaCloudSdkGoERROR) + } + + v, err := jsonpath.Get("$.data", response) + if err != nil { + return object, WrapErrorf(err, FailedGetAttributeMsg, id, "$.data", response) + } + + return v.(map[string]interface{}), nil +} + +func (s *RealtimeComputeServiceV2) RealtimeComputeJobStateRefreshFunc(id string, field string, failStates []string) resource.StateRefreshFunc { + return s.RealtimeComputeJobStateRefreshFuncWithApi(id, field, failStates, s.DescribeRealtimeComputeJob) +} + +func (s *RealtimeComputeServiceV2) RealtimeComputeJobStateRefreshFuncWithApi(id string, field string, failStates []string, call func(id string) (map[string]interface{}, error)) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + object, err := call(id) + if err != nil { + if NotFoundError(err) { + return object, "", nil + } + return nil, "", WrapError(err) + } + v, err := jsonpath.Get(field, object) + currentStatus := fmt.Sprint(v) + + if strings.HasPrefix(field, "#") { + v, _ := jsonpath.Get(strings.TrimPrefix(field, "#"), object) + if v != nil { + currentStatus = "#CHECKSET" + } + } + + for _, failState := range failStates { + if currentStatus == failState { + return object, currentStatus, WrapError(Error(FailedToReachTargetStatus, currentStatus)) + } + } + return object, currentStatus, nil + } +} + +// DescribeRealtimeComputeJob >>> Encapsulated. diff --git a/website/docs/r/realtime_compute_deployment.html.markdown b/website/docs/r/realtime_compute_deployment.html.markdown new file mode 100644 index 000000000000..8a8581d28272 --- /dev/null +++ b/website/docs/r/realtime_compute_deployment.html.markdown @@ -0,0 +1,283 @@ +--- +subcategory: "Realtime Compute" +layout: "alicloud" +page_title: "Alicloud: alicloud_realtime_compute_deployment" +description: |- + Provides a Alicloud Realtime Compute Deployment resource. +--- + +# alicloud_realtime_compute_deployment + +Provides a Realtime Compute Deployment resource. + +Deployment in the Realtime Compute console. + +For information about Realtime Compute Deployment and how to use it, see [What is Deployment](https://next.api.alibabacloud.com/document/ververica/2022-07-18/CreateDeployment). + +-> **NOTE:** Available since v1.264.0. + +## Example Usage + +Basic Usage + +```terraform +variable "name" { + default = "terraform-example" +} + +provider "alicloud" { + region = "cn-hangzhou" +} + +resource "alicloud_vpc" "create_Vpc" { + is_default = false + cidr_block = "172.16.0.0/16" + vpc_name = "example-tf-vpc-deployment" +} + +resource "alicloud_vswitch" "create_Vswitch" { + is_default = false + vpc_id = alicloud_vpc.create_Vpc.id + zone_id = "cn-beijing-g" + cidr_block = "172.16.0.0/24" + vswitch_name = "example-tf-vSwitch-deployment" +} + +resource "alicloud_oss_bucket" "create_bucket" { +} + +resource "alicloud_realtime_compute_vvp_instance" "create_VvpInstance" { + vvp_instance_name = "code-example-tf-deployment" + storage { + oss { + bucket = alicloud_oss_bucket.create_bucket.id + } + } + vpc_id = alicloud_vpc.create_Vpc.id + vswitch_ids = ["${alicloud_vswitch.create_Vswitch.id}"] + resource_spec { + cpu = "4" + memory_gb = "16" + } + payment_type = "PayAsYouGo" +} + + +resource "alicloud_realtime_compute_deployment" "default" { + logging { + logging_profile = "default" + log4j_loggers { + logger_level = "INFO" + } + log_reserve_policy { + open_history = true + expiration_days = "7" + } + } + deployment_name = "tf-example-deployment-sql-74" + description = "This is a example deployment." + engine_version = "vvr-8.0.6-flink-1.17" + local_variables { + value = "value" + name = "name" + } + execution_mode = "STREAMING" + labels { + } + deployment_target { + mode = "PER_JOB" + name = "default-queue" + } + streaming_resource_setting { + basic_resource_setting { + taskmanager_resource_setting_spec { + memory = "1Gi" + cpu = 1 + } + parallelism = "1" + jobmanager_resource_setting_spec { + memory = "1Gi" + cpu = 1 + } + } + resource_setting_mode = "BASIC" + } + namespace = "${alicloud_realtime_compute_vvp_instance.create_VvpInstance.vvp_instance_name}-default" + artifact { + kind = "SQLSCRIPT" + sql_artifact { + sql_script = "create temporary table `datagen` ( id varchar, name varchar ) with ( \\'connector\\' = \\'datagen\\' ); create temporary table `blackhole` ( id varchar, name varchar ) with ( \\'connector\\' = \\'blackhole\\' ); insert into blackhole select * from datagen;" + additional_dependencies = ["oss://bucket-name/a.jar"] + } + } + resource_id = alicloud_realtime_compute_vvp_instance.create_VvpInstance.resource_id + flink_conf { + } +} +``` + +## Argument Reference + +The following arguments are supported: +* `artifact` - (Required, List) The content of deployment See [`artifact`](#artifact) below. +* `batch_resource_setting` - (Optional, List) batch resource setting See [`batch_resource_setting`](#batch_resource_setting) below. +* `deployment_name` - (Required) Name of the deployment +* `deployment_target` - (Required, List) Deployment target See [`deployment_target`](#deployment_target) below. +* `description` - (Optional) The description of deployment +* `engine_version` - (Optional, Computed) The engine version of the deployment +* `execution_mode` - (Required, ForceNew) Execution mode,STREAMING/BATCH +* `flink_conf` - (Optional, Computed, Map) flink configurations +* `labels` - (Optional, Map) deployment label +* `local_variables` - (Optional, Set) Local variables See [`local_variables`](#local_variables) below. +* `logging` - (Optional, Computed, List) logging config See [`logging`](#logging) below. +* `namespace` - (Required, ForceNew) The name of vvpnamespace. +* `resource_id` - (Optional, ForceNew, Computed) ResourceId of vvpinstance +* `streaming_resource_setting` - (Optional, Computed, List) streaming resource setting See [`streaming_resource_setting`](#streaming_resource_setting) below. + +### `artifact` + +The artifact supports the following: +* `jar_artifact` - (Optional, List) Jar artifact See [`jar_artifact`](#artifact-jar_artifact) below. +* `kind` - (Required, ForceNew) Artifact kind +* `python_artifact` - (Optional, List) Python artifact See [`python_artifact`](#artifact-python_artifact) below. +* `sql_artifact` - (Optional, List) Sql artifact See [`sql_artifact`](#artifact-sql_artifact) below. + +### `artifact-jar_artifact` + +The artifact-jar_artifact supports the following: +* `additional_dependencies` - (Optional, List) The additional dependencies of jar +* `entry_class` - (Optional) entry class of jar +* `jar_uri` - (Optional) The url of JAR +* `main_args` - (Optional) Main args of jar + +### `artifact-python_artifact` + +The artifact-python_artifact supports the following: +* `additional_dependencies` - (Optional, List) The url of additional dependencies +* `additional_python_archives` - (Optional, List) The url of python archives +* `additional_python_libraries` - (Optional, List) The url of python lib +* `entry_module` - (Optional) Entry module +* `main_args` - (Optional) Main args +* `python_artifact_uri` - (Optional) The url of python artifact + +### `artifact-sql_artifact` + +The artifact-sql_artifact supports the following: +* `additional_dependencies` - (Optional, List) The url of additional dependencies +* `sql_script` - (Optional) Sql script + +### `batch_resource_setting` + +The batch_resource_setting supports the following: +* `basic_resource_setting` - (Optional, List) basic resource setting See [`basic_resource_setting`](#batch_resource_setting-basic_resource_setting) below. +* `max_slot` - (Optional, Int) max slot + +### `batch_resource_setting-basic_resource_setting` + +The batch_resource_setting-basic_resource_setting supports the following: +* `jobmanager_resource_setting_spec` - (Optional, List) JobManager resource setting See [`jobmanager_resource_setting_spec`](#batch_resource_setting-basic_resource_setting-jobmanager_resource_setting_spec) below. +* `parallelism` - (Optional, Int) Parallelism +* `taskmanager_resource_setting_spec` - (Optional, List) TaskManager resource setting See [`taskmanager_resource_setting_spec`](#batch_resource_setting-basic_resource_setting-taskmanager_resource_setting_spec) below. + +### `batch_resource_setting-basic_resource_setting-jobmanager_resource_setting_spec` + +The batch_resource_setting-basic_resource_setting-jobmanager_resource_setting_spec supports the following: +* `cpu` - (Optional, Float) CPU +* `memory` - (Optional) Memory + +### `batch_resource_setting-basic_resource_setting-taskmanager_resource_setting_spec` + +The batch_resource_setting-basic_resource_setting-taskmanager_resource_setting_spec supports the following: +* `cpu` - (Optional, Float) CPU +* `memory` - (Optional) Memory + +### `deployment_target` + +The deployment_target supports the following: +* `mode` - (Required) deployment mode +* `name` - (Required) target name + +### `local_variables` + +The local_variables supports the following: +* `name` - (Optional) Local variable name +* `value` - (Optional) Local variable value + +### `logging` + +The logging supports the following: +* `log4j2_configuration_template` - (Optional, Computed) Custom Log Template +* `log4j_loggers` - (Optional, Computed, Set) log4j config See [`log4j_loggers`](#logging-log4j_loggers) below. +* `log_reserve_policy` - (Optional, Computed, List) Log reserve policy See [`log_reserve_policy`](#logging-log_reserve_policy) below. +* `logging_profile` - (Optional, Computed) System log template + +### `logging-log4j_loggers` + +The logging-log4j_loggers supports the following: +* `logger_level` - (Optional, Computed) Logger level +* `logger_name` - (Optional, Computed) Class name of the output log + +### `logging-log_reserve_policy` + +The logging-log_reserve_policy supports the following: +* `expiration_days` - (Optional, Computed, Int) Expiration days +* `open_history` - (Optional, Computed) Enable log saving + +### `streaming_resource_setting` + +The streaming_resource_setting supports the following: +* `basic_resource_setting` - (Optional, Computed, List) resource setting in basic mode See [`basic_resource_setting`](#streaming_resource_setting-basic_resource_setting) below. +* `expert_resource_setting` - (Optional, Computed, List) expert resource setting See [`expert_resource_setting`](#streaming_resource_setting-expert_resource_setting) below. +* `resource_setting_mode` - (Optional, Computed) Resource setting mode + +### `streaming_resource_setting-basic_resource_setting` + +The streaming_resource_setting-basic_resource_setting supports the following: +* `jobmanager_resource_setting_spec` - (Optional, Computed, List) JobManager resource setting See [`jobmanager_resource_setting_spec`](#streaming_resource_setting-basic_resource_setting-jobmanager_resource_setting_spec) below. +* `parallelism` - (Optional, Computed, Int) Parallelism +* `taskmanager_resource_setting_spec` - (Optional, Computed, List) TaskManager basic resource setting See [`taskmanager_resource_setting_spec`](#streaming_resource_setting-basic_resource_setting-taskmanager_resource_setting_spec) below. + +### `streaming_resource_setting-expert_resource_setting` + +The streaming_resource_setting-expert_resource_setting supports the following: +* `jobmanager_resource_setting_spec` - (Optional, List) JobManager resource setting See [`jobmanager_resource_setting_spec`](#streaming_resource_setting-expert_resource_setting-jobmanager_resource_setting_spec) below. +* `resource_plan` - (Optional) resource plan in expert mode + +### `streaming_resource_setting-expert_resource_setting-jobmanager_resource_setting_spec` + +The streaming_resource_setting-expert_resource_setting-jobmanager_resource_setting_spec supports the following: +* `cpu` - (Optional, Float) CPU +* `memory` - (Optional) 内存 + +### `streaming_resource_setting-basic_resource_setting-jobmanager_resource_setting_spec` + +The streaming_resource_setting-basic_resource_setting-jobmanager_resource_setting_spec supports the following: +* `cpu` - (Optional, Computed, Float) CPU +* `memory` - (Optional, Computed) Memory + +### `streaming_resource_setting-basic_resource_setting-taskmanager_resource_setting_spec` + +The streaming_resource_setting-basic_resource_setting-taskmanager_resource_setting_spec supports the following: +* `cpu` - (Optional, Computed, Float) CPU +* `memory` - (Optional, Computed) Memory + +## Attributes Reference + +The following attributes are exported: +* `id` - The ID of the resource supplied above.The value is formulated as `::`. +* `deployment_id` - The first ID of the resource + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts) for certain actions: +* `create` - (Defaults to 5 mins) Used when create the Deployment. +* `delete` - (Defaults to 5 mins) Used when delete the Deployment. +* `update` - (Defaults to 5 mins) Used when update the Deployment. + +## Import + +Realtime Compute Deployment can be imported using the id, e.g. + +```shell +$ terraform import alicloud_realtime_compute_deployment.example :: +``` \ No newline at end of file diff --git a/website/docs/r/realtime_compute_job.html.markdown b/website/docs/r/realtime_compute_job.html.markdown new file mode 100644 index 000000000000..a911463a124c --- /dev/null +++ b/website/docs/r/realtime_compute_job.html.markdown @@ -0,0 +1,166 @@ +--- +subcategory: "Realtime Compute" +layout: "alicloud" +page_title: "Alicloud: alicloud_realtime_compute_job" +description: |- + Provides a Alicloud Realtime Compute Job resource. +--- + +# alicloud_realtime_compute_job + +Provides a Realtime Compute Job resource. + + + +For information about Realtime Compute Job and how to use it, see [What is Job](https://next.api.alibabacloud.com/document/ververica/2022-07-18/StartJobWithParams). + +-> **NOTE:** Available since v1.264.0. + +## Example Usage + +Basic Usage + +```terraform +variable "name" { + default = "terraform-example" +} + +provider "alicloud" { + region = "cn-hangzhou" +} + +resource "alicloud_vpc" "create_Vpc5" { + is_default = false + cidr_block = "172.16.0.0/16" + vpc_name = "example-tf-vpc-deployment" +} + +resource "alicloud_vswitch" "create_Vswitch5" { + is_default = false + vpc_id = alicloud_vpc.create_Vpc5.id + zone_id = "cn-beijing-g" + cidr_block = "172.16.0.0/24" + vswitch_name = "example-tf-vSwitch-deployment" +} + +resource "alicloud_oss_bucket" "create_bucket5" { +} + +resource "alicloud_realtime_compute_vvp_instance" "create_VvpInstance5" { + vvp_instance_name = "code-example-tf-deployment" + storage { + oss { + bucket = alicloud_oss_bucket.create_bucket5.id + } + } + vpc_id = alicloud_vpc.create_Vpc5.id + vswitch_ids = ["{{$.create_Vswitch5.VSwitchId}}"] + resource_spec { + cpu = "4" + memory_gb = "16" + } + payment_type = "PayAsYouGo" +} + +resource "alicloud_realtime_compute_deployment" "create_Deployment5" { + deployment_name = "tf-example-deployment-sql-24" + engine_version = "vvr-8.0.10-flink-1.17" + resource_id = alicloud_realtime_compute_vvp_instance.create_VvpInstance5.resource_id + execution_mode = "STREAMING" + deployment_target { + mode = "PER_JOB" + name = "default-queue" + } + namespace = "${alicloud_realtime_compute_vvp_instance.create_VvpInstance5.vvp_instance_name}-default" + artifact { + kind = "SQLSCRIPT" + sql_artifact { + sql_script = "create temporary table `datagen` ( id varchar, name varchar ) with ( \\'connector\\' = \\'datagen\\' ); create temporary table `blackhole` ( id varchar, name varchar ) with ( \\'connector\\' = \\'blackhole\\' ); insert into blackhole select * from datagen;" + } + } +} + + +resource "alicloud_realtime_compute_job" "default" { + deployment_id = alicloud_realtime_compute_deployment.create_Deployment5.deployment_id + resource_queue_name = "default-queue" + resource_id = alicloud_realtime_compute_vvp_instance.create_VvpInstance5.resource_id + local_variables { + value = "qq" + name = "tt" + } + restore_strategy { + kind = "NONE" + job_start_time_in_ms = "1763694521254" + } + namespace = "${alicloud_realtime_compute_vvp_instance.create_VvpInstance5.vvp_instance_name}-default" +} +``` + +## Argument Reference + +The following arguments are supported: +* `resource_id` - (Required, ForceNew) resourceId +* `deployment_id` - (Optional, ForceNew) deploymentId +* `local_variables` - (Optional, ForceNew, List) Local variables See [`local_variables`](#local_variables) below. +* `namespace` - (Required, ForceNew) namespace +* `resource_queue_name` - (Optional) Resource Queue for Job Run + +-> **NOTE:** The parameter is immutable after resource creation. It only applies during resource creation and has no effect when modified post-creation. + +* `restore_strategy` - (Optional, ForceNew, List) Restore strategy See [`restore_strategy`](#restore_strategy) below. +* `status` - (Optional, Computed, List) job status See [`status`](#status) below. +* `stop_strategy` - (Optional) Job Stop Policy + +-> **NOTE:** This parameter only applies during resource update. If modified in isolation without other property changes, Terraform will not trigger any action. + + +### `local_variables` + +The local_variables supports the following: +* `name` - (Optional, ForceNew) Local variables name +* `value` - (Optional, ForceNew) Local variables value + +### `restore_strategy` + +The restore_strategy supports the following: +* `allow_non_restored_state` - (Optional, ForceNew) Stateless startup +* `job_start_time_in_ms` - (Optional, ForceNew, Int) Stateless start time. When stateless start is selected, you can set this parameter to enable all source tables that support startTime to read data from this time. +* `kind` - (Optional, ForceNew) Restore type +* `savepoint_id` - (Optional, ForceNew) SavepointId + +### `status` + +The status supports the following: +* `current_job_status` - (Optional) Job current status + +## Attributes Reference + +The following attributes are exported: +* `id` - The ID of the resource supplied above.The value is formulated as `::`. +* `job_id` - The first ID of the resource +* `status` - job status + * `failure` - Job failure information + * `failed_at` - Job failure time + * `message` - Failure Information Details + * `reason` - Failure Reason + * `health_score` - Job Run Health Score + * `risk_level` - Risk level, which indicates the risk level of the operation status of the job. + * `running` - job running status, which has value when the job is Running. + * `observed_flink_job_restarts` - Number of job restarts + * `observed_flink_job_status` - Flink job status + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts) for certain actions: +* `create` - (Defaults to 5 mins) Used when create the Job. +* `delete` - (Defaults to 5 mins) Used when delete the Job. +* `update` - (Defaults to 5 mins) Used when update the Job. + +## Import + +Realtime Compute Job can be imported using the id, e.g. + +```shell +$ terraform import alicloud_realtime_compute_job.example :: +``` \ No newline at end of file