From 3e83f9453c50f0503953e5054c9e91b565aeb63b Mon Sep 17 00:00:00 2001 From: danaelhe Date: Tue, 10 Sep 2024 16:59:26 -0400 Subject: [PATCH 01/13] Databases: Add Logsink Support --- .../database/resource_database_logsink.go | 331 ++++++++++++++++++ digitalocean/provider.go | 1 + docs/resources/database_logsink.md | 82 +++++ examples/logsink/main.tf | 37 ++ examples/tutorial/main.tf | 22 ++ go.mod | 2 +- go.sum | 4 +- .../github.com/digitalocean/godo/CHANGELOG.md | 25 ++ vendor/github.com/digitalocean/godo/apps.go | 8 + .../github.com/digitalocean/godo/databases.go | 214 ++++++++++- vendor/github.com/digitalocean/godo/godo.go | 2 +- vendor/modules.txt | 2 +- 12 files changed, 723 insertions(+), 7 deletions(-) create mode 100644 digitalocean/database/resource_database_logsink.go create mode 100644 docs/resources/database_logsink.md create mode 100644 examples/logsink/main.tf create mode 100644 examples/tutorial/main.tf diff --git a/digitalocean/database/resource_database_logsink.go b/digitalocean/database/resource_database_logsink.go new file mode 100644 index 000000000..5bbeb4799 --- /dev/null +++ b/digitalocean/database/resource_database_logsink.go @@ -0,0 +1,331 @@ +package database + +import ( + "context" + "errors" + "fmt" + "log" + "strings" + "time" + + "github.com/digitalocean/godo" + "github.com/digitalocean/terraform-provider-digitalocean/digitalocean/config" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceDigitalOceanDatabaseLogsink() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceDigitalOceanDatabaseLogsinkCreate, + ReadContext: resourceDigitalOceanDatabaseLogsinkRead, + UpdateContext: resourceDigitalOceanDatabaseLogsinkUpdate, + DeleteContext: resourceDigitalOceanDatabaseLogsinkDelete, + Importer: &schema.ResourceImporter{ + State: resourceDigitalOceanDatabaseLogsinkImport, + }, + + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.NoZeroValues, + }, + "sink_id": { + Type: schema.TypeString, + ForceNew: true, + Computed: true, + }, + "sink_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "sink_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "rsyslog", + "elasticsearch", + "opensearch", + }, false), + }, + "config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "server": { + Type: schema.TypeString, + Optional: true, + Description: "DNS name or IPv4 address of the rsyslog server. Required for rsyslog.", + }, + "port": { + Type: schema.TypeInt, + Optional: true, + Description: "The internal port on which the rsyslog server is listening. Required for rsyslog", + }, + "tls": { + Type: schema.TypeBool, + Optional: true, + Description: "Use TLS (as the messages are not filtered and may contain sensitive information, it is highly recommended to set this to true if the remote server supports it). Required for rsyslog.", + }, + "format": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "rfc5424", + "rfc3164", + "custom", + }, false), + Description: "Message format used by the server, this can be either rfc3164 (the old BSD style message format), rfc5424 (current syslog message format) or custom. Required for rsyslog.", + }, + "logline": { + Type: schema.TypeString, + Optional: true, + Description: "Conditional (required if format == custom). Syslog log line template for a custom format, supporting limited rsyslog style templating (using %tag%). Supported tags are: HOSTNAME, app-name, msg, msgid, pri, procid, structured-data, timestamp and timestamp:::date-rfc3339.", + }, + "sd": { + Type: schema.TypeString, + Optional: true, + Description: "Content of the structured data block of rfc5424 message", + }, + "ca": { + Type: schema.TypeString, + Optional: true, + Description: "PEM encoded CA certificate", + }, + "key": { + Type: schema.TypeString, + Optional: true, + Description: "(PEM format) client key if the server requires client authentication", + }, + "cert": { + Type: schema.TypeString, + Optional: true, + Description: "(PEM format) client cert to use", + }, + "index_days_max": { + Type: schema.TypeInt, + Optional: true, + Description: "Default 7 days. Maximum number of days of logs to keep", + }, + "url": { + Type: schema.TypeString, + Optional: true, + Description: "Connection URL. Required for Elasticsearch and Opensearch.", + }, + "index_prefix": { + Type: schema.TypeString, + Optional: true, + Description: "Index prefix. Required for Opensearch and Elasticsearch.", + }, + "timeout": { + Type: schema.TypeFloat, + Optional: true, + Description: "Default 10 days. Elasticsearch/Opensearch request timeout limit", + }, + }, + }, + }, + }, + } +} + +func resourceDigitalOceanDatabaseLogsinkCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + + opts := &godo.DatabaseCreateLogsinkRequest{ + Name: d.Get("sink_name").(string), + Type: d.Get("sink_type").(string), + } + + if v, ok := d.GetOk("config"); ok { + opts.Config = expandLogsinkConfig(v.([]interface{})) + } + + log.Printf("[DEBUG] Database logsink create configuration: %#v", opts) + logsink, _, err := client.Databases.CreateLogsink(context.Background(), clusterID, opts) + if err != nil { + return diag.Errorf("Error creating database logsink: %s", err) + } + + time.Sleep(30 * time.Second) + + log.Printf("[DEBUGGGG] Database LOGSINK NAMEE: %#v", logsink.Name) + + logsinkIDFormat := makeDatabaseLogsinkID(clusterID, logsink.ID) + log.Printf("[DEBUGGGG] Database logsink create configuration: %#v", logsinkIDFormat) + d.SetId(logsinkIDFormat) + + return resourceDigitalOceanDatabaseLogsinkRead(ctx, d, meta) +} + +func resourceDigitalOceanDatabaseLogsinkUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + opts := &godo.DatabaseUpdateLogsinkRequest{} + + if v, ok := d.GetOk("config"); ok { + opts.Config = expandLogsinkConfig(v.([]interface{})) + } + + log.Printf("[DEBUG] Database logsink update configuration: %#v", opts) + _, err := client.Databases.UpdateLogsink(context.Background(), clusterID, d.Id(), opts) + if err != nil { + return diag.Errorf("Error updating database logsink: %s", err) + } + + return resourceDigitalOceanDatabaseLogsinkRead(ctx, d, meta) +} + +func resourceDigitalOceanDatabaseLogsinkDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + + log.Printf("[INFO] Deleting logsink: %s", d.Id()) + _, err := client.Databases.DeleteLogsink(ctx, clusterID, d.Id()) + if err != nil { + return diag.Errorf("Error deleting logsink topic: %s", err) + } + + d.SetId("") + return nil +} + +func expandLogsinkConfig(config []interface{}) *godo.DatabaseLogsinkConfig { + logsinkConfigOpts := &godo.DatabaseLogsinkConfig{} + configMap := config[0].(map[string]interface{}) + + if v, ok := configMap["server"]; ok { + logsinkConfigOpts.Server = v.(string) + } + + if v, ok := configMap["port"]; ok { + logsinkConfigOpts.Port = v.(int) + } + + if v, ok := configMap["tls"]; ok { + logsinkConfigOpts.TLS = v.(bool) + } + + if v, ok := configMap["format"]; ok { + logsinkConfigOpts.Format = v.(string) + } + + if v, ok := configMap["logline"]; ok { + logsinkConfigOpts.Logline = v.(string) + } + + if v, ok := configMap["sd"]; ok { + logsinkConfigOpts.SD = v.(string) + } + + if v, ok := configMap["ca"]; ok { + logsinkConfigOpts.CA = v.(string) + } + + if v, ok := configMap["key"]; ok { + logsinkConfigOpts.Key = v.(string) + } + + if v, ok := configMap["cert"]; ok { + logsinkConfigOpts.Cert = v.(string) + } + + if v, ok := configMap["url"]; ok { + logsinkConfigOpts.URL = v.(string) + } + + if v, ok := configMap["index_prefix"]; ok { + logsinkConfigOpts.IndexPrefix = v.(string) + } + + if v, ok := configMap["index_days_max"]; ok { + logsinkConfigOpts.IndexDaysMax = v.(int) + } + + if v, ok := configMap["timeout"]; ok { + logsinkConfigOpts.Timeout = v.(float32) + } + + return logsinkConfigOpts +} + +func resourceDigitalOceanDatabaseLogsinkRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + client := meta.(*config.CombinedConfig).GodoClient() + clusterID := d.Get("cluster_id").(string) + logsinkID := d.Get("sink_id").(string) + + logsink, resp, err := client.Databases.GetLogsink(context.TODO(), clusterID, logsinkID) + if err != nil { + // If the logsink is somehow already destroyed, mark as + // successfully gone + if resp != nil && resp.StatusCode == 404 { + d.SetId("") + return nil + } + + return diag.Errorf("Error retrieving logsink: %s", err) + } + + d.Set("sink_name", logsink.Name) + d.Set("sink_type", logsink.Type) + + err = d.Set("config", flattenDatabaseLogsinkConfig(logsink.Config)) + if err != nil { + return diag.FromErr(err) + } else { + return nil + } +} + +func resourceDigitalOceanDatabaseLogsinkImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + log.Printf("[DEBUG44] Database logsink create configuration: %#v", d.Id()) + + if strings.Contains(d.Id(), ",") { + s := strings.Split(d.Id(), ",") + log.Printf("[DEBUG33] Database logsink create configuration: %#v", s) + + d.SetId(makeDatabaseLogsinkID(s[0], s[1])) + d.Set("cluster_id", s[0]) + d.Set("sink_id", s[1]) + } else { + return nil, errors.New("must use the ID of the source kafka cluster and the name of the topic joined with a comma (e.g. `id,name`)") + } + + return []*schema.ResourceData{d}, nil +} + +func makeDatabaseLogsinkID(clusterID string, logsinkID string) string { + return fmt.Sprintf("%s/logsink/%s", clusterID, logsinkID) +} + +func flattenDatabaseLogsinkConfig(config *godo.DatabaseLogsinkConfig) map[string]interface{} { + + if config != nil { + r := make(map[string]interface{}) + + r["server"] = (*config).Server + r["port"] = (*config).Port + r["tls"] = (*config).TLS + r["format"] = (*config).Format + r["logline"] = (*config).Logline + r["sd"] = (*config).SD + r["ca"] = (*config).CA + r["key"] = (*config).Key + r["cert"] = (*config).Cert + r["url"] = (*config).URL + r["index_prefix"] = (*config).IndexPrefix + r["index_days_max"] = (*config).IndexDaysMax + r["timeout"] = (*config).Timeout + + return r + } + + return nil +} diff --git a/digitalocean/provider.go b/digitalocean/provider.go index dc1019636..121103c9b 100644 --- a/digitalocean/provider.go +++ b/digitalocean/provider.go @@ -153,6 +153,7 @@ func Provider() *schema.Provider { "digitalocean_database_postgresql_config": database.ResourceDigitalOceanDatabasePostgreSQLConfig(), "digitalocean_database_mysql_config": database.ResourceDigitalOceanDatabaseMySQLConfig(), "digitalocean_database_kafka_topic": database.ResourceDigitalOceanDatabaseKafkaTopic(), + "digitalocean_database_logsink": database.ResourceDigitalOceanDatabaseLogsink(), "digitalocean_domain": domain.ResourceDigitalOceanDomain(), "digitalocean_droplet": droplet.ResourceDigitalOceanDroplet(), "digitalocean_droplet_snapshot": snapshot.ResourceDigitalOceanDropletSnapshot(), diff --git a/docs/resources/database_logsink.md b/docs/resources/database_logsink.md new file mode 100644 index 000000000..be9fc05bf --- /dev/null +++ b/docs/resources/database_logsink.md @@ -0,0 +1,82 @@ +--- +page_title: "DigitalOcean: digitalocean_database_logsink" +--- + +# digitalocean\_database\_logsink + +Provides a DigitalOcean database logsink capabilities. Can be configured with rsyslog, elasticsearch, and opensearch. + +## Example Usage + +```hcl +resource "digitalocean_database_logsink" "logsink-01" { + cluster_id = digitalocean_database_cluster.doby.id + sink_name = "sinkexample" + sink_type = "opensearch" + + + config { + url= "https://user:passwd@192.168.0.1:25060" + index_prefix= "opensearch-logs" + index_days_max= 5 + } +} + +resource "digitalocean_database_cluster" "doby" { + name = "dobydb" + engine = "pg" + version = "15" + size = "db-s-1vcpu-2gb" + region = "nyc1" + node_count = 1 + tags = ["production"] +} +``` + + +## Argument Reference + +The following arguments are supported. See the [DigitalOcean API documentation](https://docs.digitalocean.com/reference/api/api-reference/#operation/databases_create_logsink) +for additional details on each option. + +* `cluster_id` - (Required) The ID of the target MySQL cluster. +* `sink_name` - (Required) The name of the Logsink. +* `sink_type` - (Required) Sink type. Enum: `rsyslog` `elasticsearch` `opensearch` +* `config` - (Required) Logsink configurations. + - `rsyslog` configuration options: + - `server` - (Required) The DNS name or IPv4 address of the rsyslog server. + - `port` - (Required) An integer of the internal port on which the rsyslog server is listening. + - `tls` - (Required) A boolean to use TLS (as the messages are not filtered and may contain sensitive information, it is highly recommended to set this to true if the remote server supports it). + - `format` - (Required) A message format used by the server, this can be either rfc3164 (the old BSD style message format), rfc5424 (current syslog message format) or custom. Enum: `rfc5424`, `rfc3164`, or `custom`. + - `logline` - (Optional) Only required if format == custom. Syslog log line template for a custom format, supporting limited rsyslog style templating (using %tag%). Supported tags are: HOSTNAME, app-name, msg, msgid, pri, procid, structured-data, timestamp and timestamp:::date-rfc3339. + - `sd` - (Optional) content of the structured data block of rfc5424 message. + - `ca` - (Optional) PEM encoded CA certificate. + - `key` - (Optional) (PEM format) client key if the server requires client authentication + - `cert` - (Optional) (PEM format) client cert to use + - `elasticsearch` configuration options: + - `url` - (Required) Elasticsearch connection URL. + - `index_prefix` - (Required) Elasticsearch index prefix. + - `index_days_max` - (Optional) Maximum number of days of logs to keep. + - `timeout` - (Optional) Elasticsearch request timeout limit. + - `ca` - (Optional) PEM encoded CA certificate. + - `opensearch` configuration options: + - `url` - (Required) Opensearch connection URL. + - `index_prefix` - (Required) Opensearch index prefix. + - `index_days_max` - (Optional) Maximum number of days of logs to keep. + - `timeout` - (Optional) Opensearch request timeout limit. + - `ca` - (Optional) PEM encoded CA certificate. + + + + +## Attributes Reference + +All above attributes are exported. If an attribute was set outside of Terraform, it will be computed. + +## Import + +A MySQL database cluster's configuration can be imported using the `id` the parent cluster, e.g. + +``` +terraform import digitalocean_database_mysql_config.example 4b62829a-9c42-465b-aaa3-84051048e712 +``` diff --git a/examples/logsink/main.tf b/examples/logsink/main.tf new file mode 100644 index 000000000..b0b487aff --- /dev/null +++ b/examples/logsink/main.tf @@ -0,0 +1,37 @@ +terraform { + required_providers { + digitalocean = { + source = "digitalocean/digitalocean" + version = ">= 2.8.0" + } + } +} + +provider "digitalocean" { + # You need to set this in your .bashrc + # export DIGITALOCEAN_TOKEN="Your API TOKEN" + # +} + +resource "digitalocean_database_logsink" "logsink-01" { + cluster_id = digitalocean_database_cluster.doby.id + sink_name = "fox2" + sink_type = "opensearch" + + + config { + url= "https://user:passwd@192.168.0.1:25060" + index_prefix= "opensearch-logs" + index_days_max= 5 + } +} + +resource "digitalocean_database_cluster" "doby" { + name = "dobydb" + engine = "pg" + version = "15" + size = "db-s-1vcpu-2gb" + region = "nyc1" + node_count = 1 + tags = ["production"] +} \ No newline at end of file diff --git a/examples/tutorial/main.tf b/examples/tutorial/main.tf new file mode 100644 index 000000000..f22d9b733 --- /dev/null +++ b/examples/tutorial/main.tf @@ -0,0 +1,22 @@ +terraform { + required_providers { + digitalocean = { + source = "digitalocean/digitalocean" + version = ">= 2.8.0" + } + } +} + +provider "digitalocean" { + # You need to set this in your .bashrc + # export DIGITALOCEAN_TOKEN="Your API TOKEN" + # +} + + +resource "digitalocean_droplet" "web" { + image = "ubuntu-20-04-x64" + name = "web2" + region = "nyc2" + size = "s-1vcpu-1gb" +} diff --git a/go.mod b/go.mod index 0999d190d..dd40c0cfc 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/digitalocean/terraform-provider-digitalocean require ( github.com/aws/aws-sdk-go v1.42.18 - github.com/digitalocean/godo v1.119.1-0.20240726213151-e56b8a3e1755 + github.com/digitalocean/godo v1.124.0 github.com/hashicorp/awspolicyequivalence v1.5.0 github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version v1.6.0 diff --git a/go.sum b/go.sum index 9bc39bf23..2bbd4ed38 100644 --- a/go.sum +++ b/go.sum @@ -25,8 +25,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitalocean/godo v1.119.1-0.20240726213151-e56b8a3e1755 h1:21uc6tNgFS/5MiYz+KzDhf5tVO38TN8FPO6803yNAjI= -github.com/digitalocean/godo v1.119.1-0.20240726213151-e56b8a3e1755/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= +github.com/digitalocean/godo v1.124.0 h1:qroI1QdtcgnXF/pefq9blZRbXqBw1Ry/aHh2pnu/328= +github.com/digitalocean/godo v1.124.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index 21c247094..3112c9ffb 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,30 @@ # Change Log +## [v1.124.0] - 2024-09-10 + +- #721 - @vsharma6855 - [DBAAS] | Add API endpoint for applying cluster patches + +## [v1.123.0] - 2024-09-06 + +- #719 - @andrewsomething - apps: mark ListTiers and GetTier as deprecated + +## [v1.122.0] - 2024-09-04 + +- #717 - @danaelhe - DB: Fix Logsink Attribute Types +- #716 - @bhardwajRahul - Databases: Add support for OpenSearch ACL + +## [v1.121.0] - 2024-08-20 + +- #715 - @danaelhe - Databases: Bring back Logsink Support +- #710 - @bhardwajRahul - Update GODO to include new Openseach index crud changes +- #712 - @danaelhe - Database: Namespace logsink +- #711 - @danaelhe - Databases: Add Logsinks CRUD support + +## [v1.120.0] - 2024-08-08 + +- #708 - @markusthoemmes - APPS-9201 Add `UpdateAllSourceVersions` parameter to update app calls +- #706 - @andrewsomething - database: Add Size to DatabaseReplica struct + ## [v1.119.0] - 2024-07-24 - #704 - @ElanHasson - APPS-9133 - Add support for OPENSEARCH as a database engine option diff --git a/vendor/github.com/digitalocean/godo/apps.go b/vendor/github.com/digitalocean/godo/apps.go index cd72f7408..ac792658e 100644 --- a/vendor/github.com/digitalocean/godo/apps.go +++ b/vendor/github.com/digitalocean/godo/apps.go @@ -80,6 +80,8 @@ type AppLogs struct { // AppUpdateRequest represents a request to update an app. type AppUpdateRequest struct { Spec *AppSpec `json:"spec"` + // Whether or not to update the source versions (for example fetching a new commit or image digest) of all components. By default (when this is false) only newly added sources will be updated to avoid changes like updating the scale of a component from also updating the respective code. + UpdateAllSourceVersions bool `json:"update_all_source_versions"` } // DeploymentCreateRequest represents a request to create a deployment. @@ -382,6 +384,9 @@ func (s *AppsServiceOp) ListRegions(ctx context.Context) ([]*AppRegion, *Respons } // ListTiers lists available app tiers. +// +// Deprecated: The '/v2/apps/tiers' endpoint has been deprecated as app tiers +// are no longer tied to instance sizes. The concept of tiers is being retired. func (s *AppsServiceOp) ListTiers(ctx context.Context) ([]*AppTier, *Response, error) { path := fmt.Sprintf("%s/tiers", appsBasePath) req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) @@ -397,6 +402,9 @@ func (s *AppsServiceOp) ListTiers(ctx context.Context) ([]*AppTier, *Response, e } // GetTier retrieves information about a specific app tier. +// +// Deprecated: The '/v2/apps/tiers/{slug}' endpoints have been deprecated as app +// tiers are no longer tied to instance sizes. The concept of tiers is being retired. func (s *AppsServiceOp) GetTier(ctx context.Context, slug string) (*AppTier, *Response, error) { path := fmt.Sprintf("%s/tiers/%s", appsBasePath, slug) req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index 4e926b7c7..8305770c6 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -16,6 +16,7 @@ const ( databaseResizePath = databaseBasePath + "/%s/resize" databaseMigratePath = databaseBasePath + "/%s/migrate" databaseMaintenancePath = databaseBasePath + "/%s/maintenance" + databaseUpdateInstallationPath = databaseBasePath + "/%s/install_update" databaseBackupsPath = databaseBasePath + "/%s/backups" databaseUsersPath = databaseBasePath + "/%s/users" databaseUserPath = databaseBasePath + "/%s/users/%s" @@ -36,6 +37,10 @@ const ( databaseTopicsPath = databaseBasePath + "/%s/topics" databaseMetricsCredentialsPath = databaseBasePath + "/metrics/credentials" databaseEvents = databaseBasePath + "/%s/events" + databaseIndexesPath = databaseBasePath + "/%s/indexes" + databaseIndexPath = databaseBasePath + "/%s/indexes/%s" + databaseLogsinkPath = databaseBasePath + "/%s/logsink/%s" + databaseLogsinksPath = databaseBasePath + "/%s/logsink" ) // SQL Mode constants allow for MySQL-specific SQL flavor configuration. @@ -116,6 +121,7 @@ type DatabasesService interface { Resize(context.Context, string, *DatabaseResizeRequest) (*Response, error) Migrate(context.Context, string, *DatabaseMigrateRequest) (*Response, error) UpdateMaintenance(context.Context, string, *DatabaseUpdateMaintenanceRequest) (*Response, error) + InstallUpdate(context.Context, string) (*Response, error) ListBackups(context.Context, string, *ListOptions) ([]DatabaseBackup, *Response, error) GetUser(context.Context, string, string) (*DatabaseUser, *Response, error) ListUsers(context.Context, string, *ListOptions) ([]DatabaseUser, *Response, error) @@ -159,6 +165,13 @@ type DatabasesService interface { GetMetricsCredentials(context.Context) (*DatabaseMetricsCredentials, *Response, error) UpdateMetricsCredentials(context.Context, *DatabaseUpdateMetricsCredentialsRequest) (*Response, error) ListDatabaseEvents(context.Context, string, *ListOptions) ([]DatabaseEvent, *Response, error) + ListIndexes(context.Context, string, *ListOptions) ([]DatabaseIndex, *Response, error) + DeleteIndex(context.Context, string, string) (*Response, error) + CreateLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateLogsinkRequest) (*DatabaseLogsink, *Response, error) + GetLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseLogsink, *Response, error) + ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseLogsink, *Response, error) + UpdateLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateLogsinkRequest) (*Response, error) + DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error) } // DatabasesServiceOp handles communication with the Databases related methods @@ -241,9 +254,16 @@ type KafkaACL struct { Topic string `json:"topic,omitempty"` } -// DatabaseUserSettings contains Kafka-specific user settings +// OpenSearchACL contains OpenSearch specific user access control information +type OpenSearchACL struct { + Permission string `json:"permission,omitempty"` + Index string `json:"index,omitempty"` +} + +// DatabaseUserSettings contains user settings type DatabaseUserSettings struct { - ACL []*KafkaACL `json:"acl,omitempty"` + ACL []*KafkaACL `json:"acl,omitempty"` + OpenSearchACL []*OpenSearchACL `json:"opensearch_acl,omitempty"` } // DatabaseMySQLUserSettings contains MySQL-specific user settings @@ -323,6 +343,14 @@ type DatabaseTopic struct { Config *TopicConfig `json:"config,omitempty"` } +// DatabaseLogsink represents a logsink +type DatabaseLogsink struct { + ID string `json:"sink_id"` + Name string `json:"sink_name,omitempty"` + Type string `json:"sink_type,omitempty"` + Config *DatabaseLogsinkConfig `json:"config,omitempty"` +} + // TopicPartition represents the state of a Kafka topic partition type TopicPartition struct { EarliestOffset uint64 `json:"earliest_offset,omitempty"` @@ -472,6 +500,35 @@ type DatabaseFirewallRule struct { CreatedAt time.Time `json:"created_at"` } +// DatabaseCreateLogsinkRequest is used to create logsink for a database cluster +type DatabaseCreateLogsinkRequest struct { + Name string `json:"sink_name"` + Type string `json:"sink_type"` + Config *DatabaseLogsinkConfig `json:"config"` +} + +// DatabaseUpdateLogsinkRequest is used to update logsink for a database cluster +type DatabaseUpdateLogsinkRequest struct { + Config *DatabaseLogsinkConfig `json:"config"` +} + +// DatabaseLogsinkConfig represents one of the configurable options (rsyslog_logsink, elasticsearch_logsink, or opensearch_logsink) for a logsink. +type DatabaseLogsinkConfig struct { + URL string `json:"url,omitempty"` + IndexPrefix string `json:"index_prefix,omitempty"` + IndexDaysMax int `json:"index_days_max,omitempty"` + Timeout float32 `json:"timeout,omitempty"` + Server string `json:"server,omitempty"` + Port int `json:"port,omitempty"` + TLS bool `json:"tls,omitempty"` + Format string `json:"format,omitempty"` + Logline string `json:"logline,omitempty"` + SD string `json:"sd,omitempty"` + CA string `json:"ca,omitempty"` + Key string `json:"key,omitempty"` + Cert string `json:"cert,omitempty"` +} + // PostgreSQLConfig holds advanced configurations for PostgreSQL database clusters. type PostgreSQLConfig struct { AutovacuumFreezeMaxAge *int `json:"autovacuum_freeze_max_age,omitempty"` @@ -680,6 +737,10 @@ type databaseTopicsRoot struct { Topics []DatabaseTopic `json:"topics"` } +type databaseLogsinksRoot struct { + Sinks []DatabaseLogsink `json:"sinks"` +} + type databaseMetricsCredentialsRoot struct { Credentials *DatabaseMetricsCredentials `json:"credentials"` } @@ -733,6 +794,28 @@ type ListDatabaseEventsRoot struct { Events []DatabaseEvent `json:"events"` } +type DatabaseIndex struct { + IndexName string `json:"index_name"` + NumberofShards uint64 `json:"number_of_shards"` + NumberofReplicas uint64 `json:"number_of_replicas"` + Size int64 `json:"size,omitempty"` + Health string `json:"health,omitempty"` + Status string `json:"status,omitempty"` + Docs int64 `json:"docs,omitempty"` + CreateTime string `json:"create_time"` + Replication *IndexReplication `json:"replication,omitempty"` +} + +type IndexReplication struct { + LeaderIndex string `json:"leader_index,omitempty"` + LeaderProject string `json:"leader_project,omitempty"` + LeaderService string `json:"leader_service,omitempty"` +} + +type databaseIndexesRoot struct { + Indexes []DatabaseIndex `json:"indexes"` +} + // URN returns a URN identifier for the database func (d Database) URN() string { return ToURN("dbaas", d.ID) @@ -859,6 +942,20 @@ func (svc *DatabasesServiceOp) UpdateMaintenance(ctx context.Context, databaseID return resp, nil } +// InstallUpdate starts installation of updates +func (svc *DatabasesServiceOp) InstallUpdate(ctx context.Context, databaseID string) (*Response, error) { + path := fmt.Sprintf(databaseUpdateInstallationPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodPut, path, nil) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + // ListBackups returns a list of the current backups of a database func (svc *DatabasesServiceOp) ListBackups(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseBackup, *Response, error) { path := fmt.Sprintf(databaseBackupsPath, databaseID) @@ -1560,3 +1657,116 @@ func (svc *DatabasesServiceOp) ListDatabaseEvents(ctx context.Context, databaseI return root.Events, resp, nil } + +// ListIndexes returns all indexes for a given opensearch cluster +func (svc *DatabasesServiceOp) ListIndexes(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseIndex, *Response, error) { + path := fmt.Sprintf(databaseIndexesPath, databaseID) + path, err := addOptions(path, opts) + if err != nil { + return nil, nil, err + } + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(databaseIndexesRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Indexes, resp, nil +} + +// DeleteIndex will delete an existing opensearch index +func (svc *DatabasesServiceOp) DeleteIndex(ctx context.Context, databaseID, name string) (*Response, error) { + path := fmt.Sprintf(databaseIndexPath, databaseID, name) + req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +// CreateLogsink creates a new logsink for a database +func (svc *DatabasesServiceOp) CreateLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateLogsinkRequest) (*DatabaseLogsink, *Response, error) { + path := fmt.Sprintf(databaseLogsinksPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createLogsink) + if err != nil { + return nil, nil, err + } + + root := new(DatabaseLogsink) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root, resp, nil +} + +// GetLogsink gets a logsink for a database +func (svc *DatabasesServiceOp) GetLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseLogsink, *Response, error) { + path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(DatabaseLogsink) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root, resp, nil +} + +// ListTopics returns all topics for a given kafka cluster +func (svc *DatabasesServiceOp) ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseLogsink, *Response, error) { + path := fmt.Sprintf(databaseLogsinksPath, databaseID) + path, err := addOptions(path, opts) + if err != nil { + return nil, nil, err + } + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(databaseLogsinksRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Sinks, resp, nil +} + +// UpdateLogsink updates a logsink for a database cluster +func (svc *DatabasesServiceOp) UpdateLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateLogsinkRequest) (*Response, error) { + path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) + req, err := svc.client.NewRequest(ctx, http.MethodPut, path, updateLogsink) + if err != nil { + return nil, err + } + + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +// DeleteLogsink deletes a logsink for a database cluster +func (svc *DatabasesServiceOp) DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error) { + path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) + req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index 2ba06e56e..adad738b6 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -21,7 +21,7 @@ import ( ) const ( - libraryVersion = "1.119.0" + libraryVersion = "1.124.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" diff --git a/vendor/modules.txt b/vendor/modules.txt index ba80fa7b5..b108a85a7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,7 +58,7 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/digitalocean/godo v1.119.1-0.20240726213151-e56b8a3e1755 +# github.com/digitalocean/godo v1.124.0 ## explicit; go 1.20 github.com/digitalocean/godo github.com/digitalocean/godo/metrics From c6c46879552b4c5be9a7f66c9ef90c44c2c4e7fd Mon Sep 17 00:00:00 2001 From: Anna Lushnikova Date: Wed, 2 Oct 2024 15:39:03 -0400 Subject: [PATCH 02/13] fix indention --- docs/resources/database_logsink.md | 16 ++++++---------- examples/logsink/main.tf | 16 ++++++++-------- examples/tutorial/main.tf | 2 +- 3 files changed, 15 insertions(+), 19 deletions(-) diff --git a/docs/resources/database_logsink.md b/docs/resources/database_logsink.md index be9fc05bf..c4cc5540f 100644 --- a/docs/resources/database_logsink.md +++ b/docs/resources/database_logsink.md @@ -10,15 +10,15 @@ Provides a DigitalOcean database logsink capabilities. Can be configured with rs ```hcl resource "digitalocean_database_logsink" "logsink-01" { - cluster_id = digitalocean_database_cluster.doby.id - sink_name = "sinkexample" - sink_type = "opensearch" + cluster_id = digitalocean_database_cluster.doby.id + sink_name = "sinkexample" + sink_type = "opensearch" config { - url= "https://user:passwd@192.168.0.1:25060" - index_prefix= "opensearch-logs" - index_days_max= 5 + url = "https://user:passwd@192.168.0.1:25060" + index_prefix = "opensearch-logs" + index_days_max = 5 } } @@ -33,7 +33,6 @@ resource "digitalocean_database_cluster" "doby" { } ``` - ## Argument Reference The following arguments are supported. See the [DigitalOcean API documentation](https://docs.digitalocean.com/reference/api/api-reference/#operation/databases_create_logsink) @@ -66,9 +65,6 @@ for additional details on each option. - `timeout` - (Optional) Opensearch request timeout limit. - `ca` - (Optional) PEM encoded CA certificate. - - - ## Attributes Reference All above attributes are exported. If an attribute was set outside of Terraform, it will be computed. diff --git a/examples/logsink/main.tf b/examples/logsink/main.tf index b0b487aff..4090d7b61 100644 --- a/examples/logsink/main.tf +++ b/examples/logsink/main.tf @@ -1,7 +1,7 @@ terraform { required_providers { digitalocean = { - source = "digitalocean/digitalocean" + source = "digitalocean/digitalocean" version = ">= 2.8.0" } } @@ -14,15 +14,15 @@ provider "digitalocean" { } resource "digitalocean_database_logsink" "logsink-01" { - cluster_id = digitalocean_database_cluster.doby.id - sink_name = "fox2" - sink_type = "opensearch" + cluster_id = digitalocean_database_cluster.doby.id + sink_name = "fox2" + sink_type = "opensearch" config { - url= "https://user:passwd@192.168.0.1:25060" - index_prefix= "opensearch-logs" - index_days_max= 5 + url = "https://user:passwd@192.168.0.1:25060" + index_prefix = "opensearch-logs" + index_days_max = 5 } } @@ -34,4 +34,4 @@ resource "digitalocean_database_cluster" "doby" { region = "nyc1" node_count = 1 tags = ["production"] -} \ No newline at end of file +} diff --git a/examples/tutorial/main.tf b/examples/tutorial/main.tf index f22d9b733..306d75bc0 100644 --- a/examples/tutorial/main.tf +++ b/examples/tutorial/main.tf @@ -1,7 +1,7 @@ terraform { required_providers { digitalocean = { - source = "digitalocean/digitalocean" + source = "digitalocean/digitalocean" version = ">= 2.8.0" } } From 344ef5832a8e20c102b63d2912ee63c6e218c256 Mon Sep 17 00:00:00 2001 From: Anna Lushnikova Date: Thu, 3 Oct 2024 08:51:10 -0400 Subject: [PATCH 03/13] minor updates: fix timeout, update an error --- digitalocean/database/resource_database_logsink.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/digitalocean/database/resource_database_logsink.go b/digitalocean/database/resource_database_logsink.go index 5bbeb4799..e766b552a 100644 --- a/digitalocean/database/resource_database_logsink.go +++ b/digitalocean/database/resource_database_logsink.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "log" + "math" "strings" "time" @@ -124,9 +125,10 @@ func ResourceDigitalOceanDatabaseLogsink() *schema.Resource { Description: "Index prefix. Required for Opensearch and Elasticsearch.", }, "timeout": { - Type: schema.TypeFloat, - Optional: true, - Description: "Default 10 days. Elasticsearch/Opensearch request timeout limit", + Type: schema.TypeFloat, + Optional: true, + Description: "Default 10 days. Elasticsearch/Opensearch request timeout limit", + ValidateFunc: validation.FloatBetween(10, 120), }, }, }, @@ -250,7 +252,9 @@ func expandLogsinkConfig(config []interface{}) *godo.DatabaseLogsinkConfig { } if v, ok := configMap["timeout"]; ok { - logsinkConfigOpts.Timeout = v.(float32) + if v.(float64) > float64(math.SmallestNonzeroFloat32) || v.(float64) < float64(math.MaxFloat32) { + logsinkConfigOpts.Timeout = float32(v.(float64)) + } } return logsinkConfigOpts @@ -295,7 +299,7 @@ func resourceDigitalOceanDatabaseLogsinkImport(d *schema.ResourceData, meta inte d.Set("cluster_id", s[0]) d.Set("sink_id", s[1]) } else { - return nil, errors.New("must use the ID of the source kafka cluster and the name of the topic joined with a comma (e.g. `id,name`)") + return nil, errors.New("must use the ID of the source cluster and logsink id joined with a comma (e.g. `id,sink_id`)") } return []*schema.ResourceData{d}, nil From 9c1f3363e18057673eff47c141c214ac8f9291ce Mon Sep 17 00:00:00 2001 From: Anna Lushnikova Date: Fri, 4 Oct 2024 16:38:44 -0400 Subject: [PATCH 04/13] minor updates: new draft godo, set sink_id, add logs for debugging. --- .../database/resource_database_logsink.go | 27 ++++-- go.mod | 4 +- go.sum | 4 +- .../github.com/digitalocean/godo/CHANGELOG.md | 5 + .../github.com/digitalocean/godo/databases.go | 94 ++++++++++++++++++- vendor/github.com/digitalocean/godo/godo.go | 2 +- .../digitalocean/godo/kubernetes.go | 12 ++- vendor/modules.txt | 2 +- 8 files changed, 127 insertions(+), 23 deletions(-) diff --git a/digitalocean/database/resource_database_logsink.go b/digitalocean/database/resource_database_logsink.go index e766b552a..b9cf1a144 100644 --- a/digitalocean/database/resource_database_logsink.go +++ b/digitalocean/database/resource_database_logsink.go @@ -150,7 +150,6 @@ func resourceDigitalOceanDatabaseLogsinkCreate(ctx context.Context, d *schema.Re opts.Config = expandLogsinkConfig(v.([]interface{})) } - log.Printf("[DEBUG] Database logsink create configuration: %#v", opts) logsink, _, err := client.Databases.CreateLogsink(context.Background(), clusterID, opts) if err != nil { return diag.Errorf("Error creating database logsink: %s", err) @@ -163,6 +162,10 @@ func resourceDigitalOceanDatabaseLogsinkCreate(ctx context.Context, d *schema.Re logsinkIDFormat := makeDatabaseLogsinkID(clusterID, logsink.ID) log.Printf("[DEBUGGGG] Database logsink create configuration: %#v", logsinkIDFormat) d.SetId(logsinkIDFormat) + d.Set("sink_id", logsink.ID) + + log.Printf("[DEBUGGGG] Database LOGSINK - logsink.ID: %#v", logsink.ID) + log.Printf("[DEBUGGGG] Database LOGSINK - d sink_id: %#v", d.Get("sink_id").(string)) return resourceDigitalOceanDatabaseLogsinkRead(ctx, d, meta) } @@ -201,7 +204,7 @@ func resourceDigitalOceanDatabaseLogsinkDelete(ctx context.Context, d *schema.Re func expandLogsinkConfig(config []interface{}) *godo.DatabaseLogsinkConfig { logsinkConfigOpts := &godo.DatabaseLogsinkConfig{} - configMap := config[0].(map[string]interface{}) + configMap := config[0].(map[string]interface{}) // TODO: check out expandAppSpecServices if v, ok := configMap["server"]; ok { logsinkConfigOpts.Server = v.(string) @@ -265,7 +268,10 @@ func resourceDigitalOceanDatabaseLogsinkRead(ctx context.Context, d *schema.Reso clusterID := d.Get("cluster_id").(string) logsinkID := d.Get("sink_id").(string) - logsink, resp, err := client.Databases.GetLogsink(context.TODO(), clusterID, logsinkID) + logsink, resp, err := client.Databases.GetLogsink(ctx, clusterID, logsinkID) + log.Printf("[DEBUG] Read LOGSINK - logsink: %#v", logsink) + log.Printf("[DEBUG] Read LOGSINK - resp: %#v", resp) + log.Printf("[DEBUG] Read LOGSINK - err: %#v", err) if err != nil { // If the logsink is somehow already destroyed, mark as // successfully gone @@ -277,15 +283,18 @@ func resourceDigitalOceanDatabaseLogsinkRead(ctx context.Context, d *schema.Reso return diag.Errorf("Error retrieving logsink: %s", err) } - d.Set("sink_name", logsink.Name) + d.Set("sink_name", logsink.Name) // TODO: nil - fix d.Set("sink_type", logsink.Type) - err = d.Set("config", flattenDatabaseLogsinkConfig(logsink.Config)) - if err != nil { - return diag.FromErr(err) - } else { - return nil + log.Printf("[DEBUG] TRACE 1") + log.Printf("[DEBUG] logsink.Config: %v ", logsink.Config) + if err := d.Set("config", flattenDatabaseLogsinkConfig(logsink.Config)); err != nil { + log.Printf("[DEBUG] TRACE 2") + return diag.Errorf("Error setting logsink config: %#v", err) } + log.Printf("[DEBUG] TRACE 3") + + return nil } func resourceDigitalOceanDatabaseLogsinkImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { diff --git a/go.mod b/go.mod index c2440d8a1..12fe199d7 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,9 @@ module github.com/digitalocean/terraform-provider-digitalocean require ( github.com/aws/aws-sdk-go v1.42.18 - github.com/digitalocean/godo v1.125.1-0.20240920194833-57fbfebd23d4 + github.com/digitalocean/godo v1.126.1-0.20241003201848-56a323c1ff6f github.com/hashicorp/awspolicyequivalence v1.5.0 + github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.26.1 @@ -25,7 +26,6 @@ require ( github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.4.8 // indirect diff --git a/go.sum b/go.sum index 633f76663..37527c5d3 100644 --- a/go.sum +++ b/go.sum @@ -25,8 +25,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitalocean/godo v1.125.1-0.20240920194833-57fbfebd23d4 h1:G/lf5YrNl4bDJyp3yJRld3J5BTFpQStYJHEnE6SxigY= -github.com/digitalocean/godo v1.125.1-0.20240920194833-57fbfebd23d4/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.126.1-0.20241003201848-56a323c1ff6f h1:TpEhmJ6LohoBcd8ACfZt+hfwbrKWOmZNiSKXgHtmkQ4= +github.com/digitalocean/godo v1.126.1-0.20241003201848-56a323c1ff6f/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index b045687a9..371272169 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,10 @@ # Change Log +## [v1.126.0] - 2024-09-25 + +- #732 - @gottwald - DOKS: add custom CIDR fields +- #727 - @loosla - [databases]: add support for Kafka advanced configuration + ## [v1.125.0] - 2024-09-17 - #726 - @loosla - [databases]: add support for MongoDB advanced configuration diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index e168186ff..0cd1e51cf 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -154,11 +154,13 @@ type DatabasesService interface { GetRedisConfig(context.Context, string) (*RedisConfig, *Response, error) GetMySQLConfig(context.Context, string) (*MySQLConfig, *Response, error) GetMongoDBConfig(context.Context, string) (*MongoDBConfig, *Response, error) + GetOpensearchConfig(context.Context, string) (*OpensearchConfig, *Response, error) GetKafkaConfig(context.Context, string) (*KafkaConfig, *Response, error) UpdatePostgreSQLConfig(context.Context, string, *PostgreSQLConfig) (*Response, error) UpdateRedisConfig(context.Context, string, *RedisConfig) (*Response, error) UpdateMySQLConfig(context.Context, string, *MySQLConfig) (*Response, error) UpdateMongoDBConfig(context.Context, string, *MongoDBConfig) (*Response, error) + UpdateOpensearchConfig(context.Context, string, *OpensearchConfig) (*Response, error) UpdateKafkaConfig(context.Context, string, *KafkaConfig) (*Response, error) ListOptions(todo context.Context) (*DatabaseOptions, *Response, error) UpgradeMajorVersion(context.Context, string, *UpgradeVersionRequest) (*Response, error) @@ -683,6 +685,48 @@ type KafkaConfig struct { AutoCreateTopicsEnable *bool `json:"auto_create_topics_enable,omitempty"` } +// OpensearchConfig holds advanced configurations for Opensearch database clusters. +type OpensearchConfig struct { + HttpMaxContentLengthBytes *int `json:"http_max_content_length_bytes,omitempty"` + HttpMaxHeaderSizeBytes *int `json:"http_max_header_size_bytes,omitempty"` + HttpMaxInitialLineLengthBytes *int `json:"http_max_initial_line_length_bytes,omitempty"` + IndicesQueryBoolMaxClauseCount *int `json:"indices_query_bool_max_clause_count,omitempty"` + IndicesFielddataCacheSizePercentage *int `json:"indices_fielddata_cache_size_percentage,omitempty"` + IndicesMemoryIndexBufferSizePercentage *int `json:"indices_memory_index_buffer_size_percentage,omitempty"` + IndicesMemoryMinIndexBufferSizeMb *int `json:"indices_memory_min_index_buffer_size_mb,omitempty"` + IndicesMemoryMaxIndexBufferSizeMb *int `json:"indices_memory_max_index_buffer_size_mb,omitempty"` + IndicesQueriesCacheSizePercentage *int `json:"indices_queries_cache_size_percentage,omitempty"` + IndicesRecoveryMaxMbPerSec *int `json:"indices_recovery_max_mb_per_sec,omitempty"` + IndicesRecoveryMaxConcurrentFileChunks *int `json:"indices_recovery_max_concurrent_file_chunks,omitempty"` + ThreadPoolSearchSize *int `json:"thread_pool_search_size,omitempty"` + ThreadPoolSearchThrottledSize *int `json:"thread_pool_search_throttled_size,omitempty"` + ThreadPoolGetSize *int `json:"thread_pool_get_size,omitempty"` + ThreadPoolAnalyzeSize *int `json:"thread_pool_analyze_size,omitempty"` + ThreadPoolWriteSize *int `json:"thread_pool_write_size,omitempty"` + ThreadPoolForceMergeSize *int `json:"thread_pool_force_merge_size,omitempty"` + ThreadPoolSearchQueueSize *int `json:"thread_pool_search_queue_size,omitempty"` + ThreadPoolSearchThrottledQueueSize *int `json:"thread_pool_search_throttled_queue_size,omitempty"` + ThreadPoolGetQueueSize *int `json:"thread_pool_get_queue_size,omitempty"` + ThreadPoolAnalyzeQueueSize *int `json:"thread_pool_analyze_queue_size,omitempty"` + ThreadPoolWriteQueueSize *int `json:"thread_pool_write_queue_size,omitempty"` + IsmEnabled *bool `json:"ism_enabled,omitempty"` + IsmHistoryEnabled *bool `json:"ism_history_enabled,omitempty"` + IsmHistoryMaxAgeHours *int `json:"ism_history_max_age_hours,omitempty"` + IsmHistoryMaxDocs *uint64 `json:"ism_history_max_docs,omitempty"` + IsmHistoryRolloverCheckPeriodHours *int `json:"ism_history_rollover_check_period_hours,omitempty"` + IsmHistoryRolloverRetentionPeriodDays *int `json:"ism_history_rollover_retention_period_days,omitempty"` + SearchMaxBuckets *int `json:"search_max_buckets,omitempty"` + ActionAutoCreateIndexEnabled *bool `json:"action_auto_create_index_enabled,omitempty"` + EnableSecurityAudit *bool `json:"enable_security_audit,omitempty"` + ActionDestructiveRequiresName *bool `json:"action_destructive_requires_name,omitempty"` + ClusterMaxShardsPerNode *int `json:"cluster_max_shards_per_node,omitempty"` + OverrideMainResponseVersion *bool `json:"override_main_response_version,omitempty"` + ScriptMaxCompilationsRate *string `json:"script_max_compilations_rate,omitempty"` + ClusterRoutingAllocationNodeConcurrentRecoveries *int `json:"cluster_routing_allocation_node_concurrent_recoveries,omitempty"` + ReindexRemoteWhitelist []string `json:"reindex_remote_whitelist,omitempty"` + PluginsAlertingFilterByBackendRolesEnabled *bool `json:"plugins_alerting_filter_by_backend_roles_enabled,omitempty"` +} + type databaseUserRoot struct { User *DatabaseUser `json:"user"` } @@ -727,6 +771,10 @@ type databaseMongoDBConfigRoot struct { Config *MongoDBConfig `json:"config"` } +type databaseOpensearchConfigRoot struct { + Config *OpensearchConfig `json:"config"` +} + type databaseKafkaConfigRoot struct { Config *KafkaConfig `json:"config"` } @@ -780,6 +828,10 @@ type databaseTopicsRoot struct { Topics []DatabaseTopic `json:"topics"` } +type databaseLogsinkRoot struct { + Sink *DatabaseLogsink `json:"sink"` +} + type databaseLogsinksRoot struct { Sinks []DatabaseLogsink `json:"sinks"` } @@ -1606,6 +1658,38 @@ func (svc *DatabasesServiceOp) UpdateKafkaConfig(ctx context.Context, databaseID return resp, nil } +// GetOpensearchConfig retrieves the config for a Opensearch database cluster. +func (svc *DatabasesServiceOp) GetOpensearchConfig(ctx context.Context, databaseID string) (*OpensearchConfig, *Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(databaseOpensearchConfigRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Config, resp, nil +} + +// UpdateOpensearchConfig updates the config for a Opensearch database cluster. +func (svc *DatabasesServiceOp) UpdateOpensearchConfig(ctx context.Context, databaseID string, config *OpensearchConfig) (*Response, error) { + path := fmt.Sprintf(databaseConfigPath, databaseID) + root := &databaseOpensearchConfigRoot{ + Config: config, + } + req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + // ListOptions gets the database options available. func (svc *DatabasesServiceOp) ListOptions(ctx context.Context) (*DatabaseOptions, *Response, error) { root := new(databaseOptionsRoot) @@ -1806,12 +1890,13 @@ func (svc *DatabasesServiceOp) CreateLogsink(ctx context.Context, databaseID str return nil, nil, err } - root := new(DatabaseLogsink) + root := new(databaseLogsinkRoot) + resp, err := svc.client.Do(ctx, req, root) if err != nil { return nil, resp, err } - return root, resp, nil + return root.Sink, resp, nil } // GetLogsink gets a logsink for a database @@ -1822,7 +1907,9 @@ func (svc *DatabasesServiceOp) GetLogsink(ctx context.Context, databaseID string return nil, nil, err } + // root := new(databaseLogsinkRoot) root := new(DatabaseLogsink) + resp, err := svc.client.Do(ctx, req, root) if err != nil { return nil, resp, err @@ -1857,7 +1944,8 @@ func (svc *DatabasesServiceOp) UpdateLogsink(ctx context.Context, databaseID str return nil, err } - resp, err := svc.client.Do(ctx, req, nil) + root := new(databaseLogsinkRoot) + resp, err := svc.client.Do(ctx, req, root) if err != nil { return resp, err } diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index 45c0f115f..3702ac1f7 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -21,7 +21,7 @@ import ( ) const ( - libraryVersion = "1.125.0" + libraryVersion = "1.126.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go index 8ef9d241e..9b3bcfa1a 100644 --- a/vendor/github.com/digitalocean/godo/kubernetes.go +++ b/vendor/github.com/digitalocean/godo/kubernetes.go @@ -65,11 +65,13 @@ type KubernetesServiceOp struct { // KubernetesClusterCreateRequest represents a request to create a Kubernetes cluster. type KubernetesClusterCreateRequest struct { - Name string `json:"name,omitempty"` - RegionSlug string `json:"region,omitempty"` - VersionSlug string `json:"version,omitempty"` - Tags []string `json:"tags,omitempty"` - VPCUUID string `json:"vpc_uuid,omitempty"` + Name string `json:"name,omitempty"` + RegionSlug string `json:"region,omitempty"` + VersionSlug string `json:"version,omitempty"` + Tags []string `json:"tags,omitempty"` + VPCUUID string `json:"vpc_uuid,omitempty"` + ClusterSubnet string `json:"cluster_subnet,omitempty"` + ServiceSubnet string `json:"service_subnet,omitempty"` // Create cluster with highly available control plane HA bool `json:"ha"` diff --git a/vendor/modules.txt b/vendor/modules.txt index 1ba5f61c4..ca97600a0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,7 +58,7 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/digitalocean/godo v1.125.1-0.20240920194833-57fbfebd23d4 +# github.com/digitalocean/godo v1.126.1-0.20241003201848-56a323c1ff6f ## explicit; go 1.22 github.com/digitalocean/godo github.com/digitalocean/godo/metrics From 90afd10c1761c685d283bf0581606098bbee5ae4 Mon Sep 17 00:00:00 2001 From: Anna Lushnikova Date: Mon, 7 Oct 2024 15:57:26 -0400 Subject: [PATCH 05/13] use logsink config by type --- .../database/resource_database_logsink.go | 372 ++++++++++++------ go.mod | 2 +- go.sum | 4 +- .../github.com/digitalocean/godo/databases.go | 219 +++++++---- vendor/modules.txt | 2 +- 5 files changed, 415 insertions(+), 184 deletions(-) diff --git a/digitalocean/database/resource_database_logsink.go b/digitalocean/database/resource_database_logsink.go index b9cf1a144..41bf4eb39 100644 --- a/digitalocean/database/resource_database_logsink.go +++ b/digitalocean/database/resource_database_logsink.go @@ -4,10 +4,8 @@ import ( "context" "errors" "fmt" - "log" "math" "strings" - "time" "github.com/digitalocean/godo" "github.com/digitalocean/terraform-provider-digitalocean/digitalocean/config" @@ -52,37 +50,36 @@ func ResourceDigitalOceanDatabaseLogsink() *schema.Resource { "opensearch", }, false), }, - "config": { + "rsyslog_config": { Type: schema.TypeList, Optional: true, - Computed: true, - ForceNew: false, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "server": { Type: schema.TypeString, - Optional: true, - Description: "DNS name or IPv4 address of the rsyslog server. Required for rsyslog.", + Required: true, + Description: "DNS name or IPv4 address of the rsyslog server", }, "port": { Type: schema.TypeInt, - Optional: true, - Description: "The internal port on which the rsyslog server is listening. Required for rsyslog", + Required: true, + Description: "The internal port on which the rsyslog server is listening", }, "tls": { Type: schema.TypeBool, - Optional: true, - Description: "Use TLS (as the messages are not filtered and may contain sensitive information, it is highly recommended to set this to true if the remote server supports it). Required for rsyslog.", + Required: true, + Description: "Use TLS (as the messages are not filtered and may contain sensitive information, it is highly recommended to set this to true if the remote server supports it)", }, "format": { Type: schema.TypeString, - Optional: true, + Required: true, ValidateFunc: validation.StringInSlice([]string{ "rfc5424", "rfc3164", "custom", }, false), - Description: "Message format used by the server, this can be either rfc3164 (the old BSD style message format), rfc5424 (current syslog message format) or custom. Required for rsyslog.", + Description: "Message format used by the server, this can be either rfc3164 (the old BSD style message format), rfc5424 (current syslog message format) or custom", }, "logline": { Type: schema.TypeString, @@ -109,27 +106,76 @@ func ResourceDigitalOceanDatabaseLogsink() *schema.Resource { Optional: true, Description: "(PEM format) client cert to use", }, + }, + }, + }, + "elasticsearch_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + Description: "Connection URL. Required for Elasticsearch", + }, + "index_prefix": { + Type: schema.TypeString, + Required: true, + Description: "Index prefix. Required for Elasticsearch", + }, "index_days_max": { Type: schema.TypeInt, Optional: true, Description: "Default 7 days. Maximum number of days of logs to keep", }, - "url": { + "timeout": { + Type: schema.TypeFloat, + Required: true, + Description: "Default 10 days. Required for Elasticsearch", + ValidateFunc: validation.FloatBetween(10, 120), + }, + "ca": { Type: schema.TypeString, Optional: true, - Description: "Connection URL. Required for Elasticsearch and Opensearch.", + Description: "PEM encoded CA certificate", + }, + }, + }, + }, + "opensearch_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + Description: "Connection URL. Required for Opensearch", }, "index_prefix": { Type: schema.TypeString, + Required: true, + Description: "Index prefix. Required for Opensearch", + }, + "index_days_max": { + Type: schema.TypeInt, Optional: true, - Description: "Index prefix. Required for Opensearch and Elasticsearch.", + Description: "Default 7 days. Maximum number of days of logs to keep", }, "timeout": { Type: schema.TypeFloat, Optional: true, - Description: "Default 10 days. Elasticsearch/Opensearch request timeout limit", + Description: "Default 10 days", ValidateFunc: validation.FloatBetween(10, 120), }, + "ca": { + Type: schema.TypeString, + Optional: true, + Description: "PEM encoded CA certificate", + }, }, }, }, @@ -137,17 +183,132 @@ func ResourceDigitalOceanDatabaseLogsink() *schema.Resource { } } +func expandLogsinkRsyslogConfig(config []interface{}) *godo.RsyslogLogsinkConfig { + logsinkConfigOpts := &godo.RsyslogLogsinkConfig{} + if len(config) == 0 || config[0] == nil { + return logsinkConfigOpts + } + configMap := config[0].(map[string]interface{}) + if v, ok := configMap["server"]; ok { + logsinkConfigOpts.Server = v.(string) + } + if v, ok := configMap["port"]; ok { + logsinkConfigOpts.Port = v.(int) + } + if v, ok := configMap["tls"]; ok { + logsinkConfigOpts.TLS = v.(bool) + } + if v, ok := configMap["format"]; ok { + logsinkConfigOpts.Format = v.(string) + } + if v, ok := configMap["logline"]; ok { + logsinkConfigOpts.Logline = v.(string) + } + if v, ok := configMap["sd"]; ok { + logsinkConfigOpts.SD = v.(string) + } + if v, ok := configMap["ca"]; ok { + logsinkConfigOpts.CA = v.(string) + } + if v, ok := configMap["key"]; ok { + logsinkConfigOpts.Key = v.(string) + } + if v, ok := configMap["cert"]; ok { + logsinkConfigOpts.Cert = v.(string) + } + + return logsinkConfigOpts +} + +func expandLogsinkElasticsearchConfig(config []interface{}) *godo.ElasticsearchLogsinkConfig { + logsinkConfigOpts := &godo.ElasticsearchLogsinkConfig{} + if len(config) == 0 || config[0] == nil { + return logsinkConfigOpts + } + configMap := config[0].(map[string]interface{}) + if v, ok := configMap["url"]; ok { + logsinkConfigOpts.URL = v.(string) + } + if v, ok := configMap["index_prefix"]; ok { + logsinkConfigOpts.IndexPrefix = v.(string) + } + if v, ok := configMap["index_days_max"]; ok { + logsinkConfigOpts.IndexDaysMax = v.(int) + } + if v, ok := configMap["timeout"]; ok { + if v.(float64) > float64(math.SmallestNonzeroFloat32) || v.(float64) < float64(math.MaxFloat32) { + logsinkConfigOpts.Timeout = float32(v.(float64)) + } + } + if v, ok := configMap["ca"]; ok { + logsinkConfigOpts.CA = v.(string) + } + + return logsinkConfigOpts +} + +func expandLogsinkOpensearchConfig(config []interface{}) *godo.OpensearchLogsinkConfig { + logsinkConfigOpts := &godo.OpensearchLogsinkConfig{} + if len(config) == 0 || config[0] == nil { + return logsinkConfigOpts + } + configMap := config[0].(map[string]interface{}) + if v, ok := configMap["url"]; ok { + logsinkConfigOpts.URL = v.(string) + } + if v, ok := configMap["index_prefix"]; ok { + logsinkConfigOpts.IndexPrefix = v.(string) + } + if v, ok := configMap["index_days_max"]; ok { + logsinkConfigOpts.IndexDaysMax = v.(int) + } + if v, ok := configMap["timeout"]; ok { + if v.(float64) > float64(math.SmallestNonzeroFloat32) || v.(float64) < float64(math.MaxFloat32) { + logsinkConfigOpts.Timeout = float32(v.(float64)) + } + } + if v, ok := configMap["ca"]; ok { + logsinkConfigOpts.CA = v.(string) + } + + return logsinkConfigOpts +} + func resourceDigitalOceanDatabaseLogsinkCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*config.CombinedConfig).GodoClient() clusterID := d.Get("cluster_id").(string) - + sinkType := d.Get("sink_type").(string) opts := &godo.DatabaseCreateLogsinkRequest{ Name: d.Get("sink_name").(string), Type: d.Get("sink_type").(string), } - if v, ok := d.GetOk("config"); ok { - opts.Config = expandLogsinkConfig(v.([]interface{})) + var iCfg interface{} + + switch sinkType { + case "rsyslog": + if v, ok := d.GetOk("rsyslog_config"); ok { + iCfg = expandLogsinkRsyslogConfig(v.([]interface{})) + } else { + return diag.Errorf("Error creating database logsink: rsyslog_config is required when type is rsyslog") + } + case "elasticsearch": + if v, ok := d.GetOk("elasticsearch_config"); ok { + iCfg = expandLogsinkElasticsearchConfig(v.([]interface{})) + } else { + return diag.Errorf("Error creating database logsink: elasticsearch_config is required when type is elasticsearch") + } + case "opensearch": + if v, ok := d.GetOk("opensearch_config"); ok { + iCfg = expandLogsinkOpensearchConfig(v.([]interface{})) + } else { + return diag.Errorf("Error creating database logsink: opensearch_config is required when type is opensearch") + } + } + + opts.Config = &iCfg + if opts.Config == nil { + return diag.Errorf("Error creating database logsink: config is required") } logsink, _, err := client.Databases.CreateLogsink(context.Background(), clusterID, opts) @@ -155,18 +316,10 @@ func resourceDigitalOceanDatabaseLogsinkCreate(ctx context.Context, d *schema.Re return diag.Errorf("Error creating database logsink: %s", err) } - time.Sleep(30 * time.Second) - - log.Printf("[DEBUGGGG] Database LOGSINK NAMEE: %#v", logsink.Name) - logsinkIDFormat := makeDatabaseLogsinkID(clusterID, logsink.ID) - log.Printf("[DEBUGGGG] Database logsink create configuration: %#v", logsinkIDFormat) d.SetId(logsinkIDFormat) d.Set("sink_id", logsink.ID) - log.Printf("[DEBUGGGG] Database LOGSINK - logsink.ID: %#v", logsink.ID) - log.Printf("[DEBUGGGG] Database LOGSINK - d sink_id: %#v", d.Get("sink_id").(string)) - return resourceDigitalOceanDatabaseLogsinkRead(ctx, d, meta) } @@ -175,11 +328,33 @@ func resourceDigitalOceanDatabaseLogsinkUpdate(ctx context.Context, d *schema.Re clusterID := d.Get("cluster_id").(string) opts := &godo.DatabaseUpdateLogsinkRequest{} - if v, ok := d.GetOk("config"); ok { - opts.Config = expandLogsinkConfig(v.([]interface{})) + sinkType := d.Get("sink_type").(string) + + var iCfg interface{} + + switch sinkType { + case "rsyslog": + if v, ok := d.GetOk("rsyslog_config"); ok { + iCfg = expandLogsinkRsyslogConfig(v.([]interface{})) + } else { + return diag.Errorf("Error creating database logsink: rsyslog_config is required when type is rsyslog") + } + case "elasticsearch": + if v, ok := d.GetOk("elasticsearch_config"); ok { + iCfg = expandLogsinkElasticsearchConfig(v.([]interface{})) + } else { + return diag.Errorf("Error creating database logsink: elasticsearch_config is required when type is elasticsearch") + } + case "opensearch": + if v, ok := d.GetOk("opensearch_config"); ok { + iCfg = expandLogsinkOpensearchConfig(v.([]interface{})) + } else { + return diag.Errorf("Error creating database logsink: opensearch_config is required when type is opensearch") + } } - log.Printf("[DEBUG] Database logsink update configuration: %#v", opts) + opts.Config = &iCfg + _, err := client.Databases.UpdateLogsink(context.Background(), clusterID, d.Id(), opts) if err != nil { return diag.Errorf("Error updating database logsink: %s", err) @@ -191,9 +366,9 @@ func resourceDigitalOceanDatabaseLogsinkUpdate(ctx context.Context, d *schema.Re func resourceDigitalOceanDatabaseLogsinkDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*config.CombinedConfig).GodoClient() clusterID := d.Get("cluster_id").(string) + logsinkID := d.Get("sink_id").(string) - log.Printf("[INFO] Deleting logsink: %s", d.Id()) - _, err := client.Databases.DeleteLogsink(ctx, clusterID, d.Id()) + _, err := client.Databases.DeleteLogsink(ctx, clusterID, logsinkID) if err != nil { return diag.Errorf("Error deleting logsink topic: %s", err) } @@ -202,76 +377,12 @@ func resourceDigitalOceanDatabaseLogsinkDelete(ctx context.Context, d *schema.Re return nil } -func expandLogsinkConfig(config []interface{}) *godo.DatabaseLogsinkConfig { - logsinkConfigOpts := &godo.DatabaseLogsinkConfig{} - configMap := config[0].(map[string]interface{}) // TODO: check out expandAppSpecServices - - if v, ok := configMap["server"]; ok { - logsinkConfigOpts.Server = v.(string) - } - - if v, ok := configMap["port"]; ok { - logsinkConfigOpts.Port = v.(int) - } - - if v, ok := configMap["tls"]; ok { - logsinkConfigOpts.TLS = v.(bool) - } - - if v, ok := configMap["format"]; ok { - logsinkConfigOpts.Format = v.(string) - } - - if v, ok := configMap["logline"]; ok { - logsinkConfigOpts.Logline = v.(string) - } - - if v, ok := configMap["sd"]; ok { - logsinkConfigOpts.SD = v.(string) - } - - if v, ok := configMap["ca"]; ok { - logsinkConfigOpts.CA = v.(string) - } - - if v, ok := configMap["key"]; ok { - logsinkConfigOpts.Key = v.(string) - } - - if v, ok := configMap["cert"]; ok { - logsinkConfigOpts.Cert = v.(string) - } - - if v, ok := configMap["url"]; ok { - logsinkConfigOpts.URL = v.(string) - } - - if v, ok := configMap["index_prefix"]; ok { - logsinkConfigOpts.IndexPrefix = v.(string) - } - - if v, ok := configMap["index_days_max"]; ok { - logsinkConfigOpts.IndexDaysMax = v.(int) - } - - if v, ok := configMap["timeout"]; ok { - if v.(float64) > float64(math.SmallestNonzeroFloat32) || v.(float64) < float64(math.MaxFloat32) { - logsinkConfigOpts.Timeout = float32(v.(float64)) - } - } - - return logsinkConfigOpts -} - func resourceDigitalOceanDatabaseLogsinkRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*config.CombinedConfig).GodoClient() clusterID := d.Get("cluster_id").(string) logsinkID := d.Get("sink_id").(string) logsink, resp, err := client.Databases.GetLogsink(ctx, clusterID, logsinkID) - log.Printf("[DEBUG] Read LOGSINK - logsink: %#v", logsink) - log.Printf("[DEBUG] Read LOGSINK - resp: %#v", resp) - log.Printf("[DEBUG] Read LOGSINK - err: %#v", err) if err != nil { // If the logsink is somehow already destroyed, mark as // successfully gone @@ -283,27 +394,42 @@ func resourceDigitalOceanDatabaseLogsinkRead(ctx context.Context, d *schema.Reso return diag.Errorf("Error retrieving logsink: %s", err) } - d.Set("sink_name", logsink.Name) // TODO: nil - fix + d.Set("sink_name", logsink.Name) d.Set("sink_type", logsink.Type) - log.Printf("[DEBUG] TRACE 1") - log.Printf("[DEBUG] logsink.Config: %v ", logsink.Config) - if err := d.Set("config", flattenDatabaseLogsinkConfig(logsink.Config)); err != nil { - log.Printf("[DEBUG] TRACE 2") - return diag.Errorf("Error setting logsink config: %#v", err) + switch logsink.Type { + case "rsyslog": + if cfg, ok := (*logsink.Config).(*godo.RsyslogLogsinkConfig); ok { + if err := d.Set("config", flattenLogsinkRsyslogConfig(cfg)); err != nil { + return diag.Errorf("Error setting logsink config: %#v", err) + } + } else { + return diag.Errorf("Error asserting logsink config to RsyslogLogsinkConfig") + } + case "elasticsearch": + if cfg, ok := (*logsink.Config).(*godo.ElasticsearchLogsinkConfig); ok { + if err := d.Set("config", flattenLogsinkElasticsearchConfig(cfg)); err != nil { + return diag.Errorf("Error setting logsink config: %#v", err) + } + } else { + return diag.Errorf("Error asserting logsink config to ElasticsearchLogsinkConfig") + } + case "opensearch": + if cfg, ok := (*logsink.Config).(*godo.OpensearchLogsinkConfig); ok { + if err := d.Set("config", flattenLogsinkOpensearchConfig(cfg)); err != nil { + return diag.Errorf("Error setting logsink config: %#v", err) + } + } else { + return diag.Errorf("Error asserting logsink config to OpensearchLogsinkConfig") + } } - log.Printf("[DEBUG] TRACE 3") return nil } func resourceDigitalOceanDatabaseLogsinkImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - log.Printf("[DEBUG44] Database logsink create configuration: %#v", d.Id()) - if strings.Contains(d.Id(), ",") { s := strings.Split(d.Id(), ",") - log.Printf("[DEBUG33] Database logsink create configuration: %#v", s) - d.SetId(makeDatabaseLogsinkID(s[0], s[1])) d.Set("cluster_id", s[0]) d.Set("sink_id", s[1]) @@ -318,11 +444,9 @@ func makeDatabaseLogsinkID(clusterID string, logsinkID string) string { return fmt.Sprintf("%s/logsink/%s", clusterID, logsinkID) } -func flattenDatabaseLogsinkConfig(config *godo.DatabaseLogsinkConfig) map[string]interface{} { - +func flattenLogsinkRsyslogConfig(config *godo.RsyslogLogsinkConfig) map[string]interface{} { if config != nil { r := make(map[string]interface{}) - r["server"] = (*config).Server r["port"] = (*config).Port r["tls"] = (*config).TLS @@ -332,6 +456,32 @@ func flattenDatabaseLogsinkConfig(config *godo.DatabaseLogsinkConfig) map[string r["ca"] = (*config).CA r["key"] = (*config).Key r["cert"] = (*config).Cert + + return r + } + + return nil +} + +func flattenLogsinkElasticsearchConfig(config *godo.ElasticsearchLogsinkConfig) map[string]interface{} { + if config != nil { + r := make(map[string]interface{}) + r["ca"] = (*config).CA + r["url"] = (*config).URL + r["index_prefix"] = (*config).IndexPrefix + r["index_days_max"] = (*config).IndexDaysMax + r["timeout"] = (*config).Timeout + + return r + } + + return nil +} + +func flattenLogsinkOpensearchConfig(config *godo.OpensearchLogsinkConfig) map[string]interface{} { + if config != nil { + r := make(map[string]interface{}) + r["ca"] = (*config).CA r["url"] = (*config).URL r["index_prefix"] = (*config).IndexPrefix r["index_days_max"] = (*config).IndexDaysMax diff --git a/go.mod b/go.mod index 12fe199d7..5128db5be 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/digitalocean/terraform-provider-digitalocean require ( github.com/aws/aws-sdk-go v1.42.18 - github.com/digitalocean/godo v1.126.1-0.20241003201848-56a323c1ff6f + github.com/digitalocean/godo v1.126.1-0.20241007162943-72c4c32eb2e0 github.com/hashicorp/awspolicyequivalence v1.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-uuid v1.0.3 diff --git a/go.sum b/go.sum index 37527c5d3..abde222c8 100644 --- a/go.sum +++ b/go.sum @@ -25,8 +25,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitalocean/godo v1.126.1-0.20241003201848-56a323c1ff6f h1:TpEhmJ6LohoBcd8ACfZt+hfwbrKWOmZNiSKXgHtmkQ4= -github.com/digitalocean/godo v1.126.1-0.20241003201848-56a323c1ff6f/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.126.1-0.20241007162943-72c4c32eb2e0 h1:k+8/hvab7mKkIn0T8o10qGOTDTCMH+R2SUkXHLtPs2Q= +github.com/digitalocean/godo v1.126.1-0.20241007162943-72c4c32eb2e0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index 0cd1e51cf..b63d48b73 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -174,11 +174,21 @@ type DatabasesService interface { ListDatabaseEvents(context.Context, string, *ListOptions) ([]DatabaseEvent, *Response, error) ListIndexes(context.Context, string, *ListOptions) ([]DatabaseIndex, *Response, error) DeleteIndex(context.Context, string, string) (*Response, error) - CreateLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateLogsinkRequest) (*DatabaseLogsink, *Response, error) + // GetRsyslogLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseRsyslogLogsink, *Response, error) + // CreateRsyslogLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateRsyslogLogsinkRequest) (*DatabaseRsyslogLogsink, *Response, error) + // UpdateRsyslogLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateRsyslogLogsinkRequest) (*Response, error) + // GetElasticsearchLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseElasticsearchLogsink, *Response, error) + // CreateElasticsearchLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateElasticsearchLogsinkRequest) (*DatabaseElasticsearchLogsink, *Response, error) + // UpdateElasticsearchLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateElasticsearchLogsinkRequest) (*Response, error) + // GetOpensearchLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseOpensearchLogsink, *Response, error) + // CreateOpensearchLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateOpensearchLogsinkRequest) (*DatabaseOpensearchLogsink, *Response, error) + // UpdateOpensearchLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateOpensearchLogsinkRequest) (*Response, error) + ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]interface{}, *Response, error) + DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error) + GetLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseLogsink, *Response, error) - ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseLogsink, *Response, error) + CreateLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateLogsinkRequest) (*DatabaseLogsink, *Response, error) UpdateLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateLogsinkRequest) (*Response, error) - DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error) } // DatabasesServiceOp handles communication with the Databases related methods @@ -350,14 +360,6 @@ type DatabaseTopic struct { Config *TopicConfig `json:"config,omitempty"` } -// DatabaseLogsink represents a logsink -type DatabaseLogsink struct { - ID string `json:"sink_id"` - Name string `json:"sink_name,omitempty"` - Type string `json:"sink_type,omitempty"` - Config *DatabaseLogsinkConfig `json:"config,omitempty"` -} - // TopicPartition represents the state of a Kafka topic partition type TopicPartition struct { EarliestOffset uint64 `json:"earliest_offset,omitempty"` @@ -507,33 +509,115 @@ type DatabaseFirewallRule struct { CreatedAt time.Time `json:"created_at"` } -// DatabaseCreateLogsinkRequest is used to create logsink for a database cluster +// DatabaseLogsink represents a logsink. +type DatabaseLogsink struct { + ID string `json:"sink_id"` + Name string `json:"sink_name,required"` + Type string `json:"sink_type,required"` + Config *interface{} `json:"config,required"` +} + +// DatabaseCreateLogsinkRequest is used to create logsink for a database cluster. type DatabaseCreateLogsinkRequest struct { - Name string `json:"sink_name"` - Type string `json:"sink_type"` - Config *DatabaseLogsinkConfig `json:"config"` + Name string `json:"sink_name"` + Type string `json:"sink_type"` + Config *interface{} `json:"config"` } -// DatabaseUpdateLogsinkRequest is used to update logsink for a database cluster +// DatabaseUpdateLogsinkRequest is used to update logsink for a database cluster. type DatabaseUpdateLogsinkRequest struct { - Config *DatabaseLogsinkConfig `json:"config"` + Config *interface{} `json:"config"` +} + +// DatabaseRsyslogLogsink represents a rsyslog logsink. +type DatabaseRsyslogLogsink struct { + ID string `json:"sink_id"` + Name string `json:"sink_name,required"` + Type string `json:"sink_type,required"` + Config *RsyslogLogsinkConfig `json:"config,required"` +} + +// DatabaseCreateRsyslogLogsinkRequest is used to create rsyslog logsink for a database cluster. +type DatabaseCreateRsyslogLogsinkRequest struct { + Name string `json:"sink_name"` + Type string `json:"sink_type"` + Config *RsyslogLogsinkConfig `json:"config"` +} + +// DatabaseUpdateRsyslogLogsinkRequest is used to update rsyslog logsink for a database cluster. +type DatabaseUpdateRsyslogLogsinkRequest struct { + Config *RsyslogLogsinkConfig `json:"config"` +} + +// RsyslogLogsinkConfig represents rsyslog logsink configuration. +type RsyslogLogsinkConfig struct { + Server string `json:"server,required"` + Port int `json:"port,required"` + TLS bool `json:"tls,required"` + Format string `json:"format,required"` + Logline string `json:"logline,omitempty"` + SD string `json:"sd,omitempty"` + CA string `json:"ca,omitempty"` + Key string `json:"key,omitempty"` + Cert string `json:"cert,omitempty"` +} + +// DatabaseElasticsearchLogsink represents an elasticsearch logsink. +type DatabaseElasticsearchLogsink struct { + ID string `json:"sink_id"` + Name string `json:"sink_name,required"` + Type string `json:"sink_type,required"` + Config *ElasticsearchLogsinkConfig `json:"config,required"` +} + +// DatabaseCreateElasticsearchLogsinkRequest is used to create elasticsearch logsink for a database cluster. +type DatabaseCreateElasticsearchLogsinkRequest struct { + Name string `json:"sink_name"` + Type string `json:"sink_type"` + Config *ElasticsearchLogsinkConfig `json:"config"` } -// DatabaseLogsinkConfig represents one of the configurable options (rsyslog_logsink, elasticsearch_logsink, or opensearch_logsink) for a logsink. -type DatabaseLogsinkConfig struct { - URL string `json:"url,omitempty"` - IndexPrefix string `json:"index_prefix,omitempty"` +// DatabaseUpdateElasticsearchLogsinkRequest is used to update elasticsearch logsink for a database cluster. +type DatabaseUpdateElasticsearchLogsinkRequest struct { + Config *ElasticsearchLogsinkConfig `json:"config"` +} + +// ElasticsearchLogsinkConfig represents elasticsearch logsink configuration. +type ElasticsearchLogsinkConfig struct { + URL string `json:"url,required"` + IndexPrefix string `json:"index_prefix,required"` + IndexDaysMax int `json:"index_days_max,omitempty"` + Timeout float32 `json:"timeout,omitempty"` + CA string `json:"ca,omitempty"` +} + +// DatabaseOpensearchLogsink represents an opensearch logsink. +type DatabaseOpensearchLogsink struct { + ID string `json:"sink_id"` + Name string `json:"sink_name,required"` + Type string `json:"sink_type,required"` + Config *OpensearchLogsinkConfig `json:"config,required"` +} + +// DatabaseCreateOpensearchLogsinkRequest is used to create opensearch logsink for a database cluster. +type DatabaseCreateOpensearchLogsinkRequest struct { + Name string `json:"sink_name"` + Type string `json:"sink_type"` + Config *OpensearchLogsinkConfig `json:"config"` +} + +// DatabaseUpdateOpensearchLogsinkRequest is used to update opensearch logsink for a database cluster. +type DatabaseUpdateOpensearchLogsinkRequest struct { + Config *OpensearchLogsinkConfig `json:"config"` +} + +// OpensearchLogsinkConfig represents opensearch logsink configuration. +type OpensearchLogsinkConfig struct { + URL string `json:"url,required"` + IndexPrefix string `json:"index_prefix,required"` IndexDaysMax int `json:"index_days_max,omitempty"` Timeout float32 `json:"timeout,omitempty"` - Server string `json:"server,omitempty"` - Port int `json:"port,omitempty"` - TLS bool `json:"tls,omitempty"` - Format string `json:"format,omitempty"` - Logline string `json:"logline,omitempty"` - SD string `json:"sd,omitempty"` CA string `json:"ca,omitempty"` - Key string `json:"key,omitempty"` - Cert string `json:"cert,omitempty"` } // PostgreSQLConfig holds advanced configurations for PostgreSQL database clusters. @@ -712,7 +796,7 @@ type OpensearchConfig struct { IsmEnabled *bool `json:"ism_enabled,omitempty"` IsmHistoryEnabled *bool `json:"ism_history_enabled,omitempty"` IsmHistoryMaxAgeHours *int `json:"ism_history_max_age_hours,omitempty"` - IsmHistoryMaxDocs *uint64 `json:"ism_history_max_docs,omitempty"` + IsmHistoryMaxDocs *int64 `json:"ism_history_max_docs,omitempty"` IsmHistoryRolloverCheckPeriodHours *int `json:"ism_history_rollover_check_period_hours,omitempty"` IsmHistoryRolloverRetentionPeriodDays *int `json:"ism_history_rollover_retention_period_days,omitempty"` SearchMaxBuckets *int `json:"search_max_buckets,omitempty"` @@ -829,11 +913,11 @@ type databaseTopicsRoot struct { } type databaseLogsinkRoot struct { - Sink *DatabaseLogsink `json:"sink"` + Sink DatabaseLogsink `json:"sink"` } type databaseLogsinksRoot struct { - Sinks []DatabaseLogsink `json:"sinks"` + Sinks []interface{} `json:"sinks"` } type databaseMetricsCredentialsRoot struct { @@ -1882,83 +1966,80 @@ func (svc *DatabasesServiceOp) DeleteIndex(ctx context.Context, databaseID, name return resp, nil } -// CreateLogsink creates a new logsink for a database -func (svc *DatabasesServiceOp) CreateLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateLogsinkRequest) (*DatabaseLogsink, *Response, error) { +// ListTopics returns all topics for a given kafka cluster. +func (svc *DatabasesServiceOp) ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]interface{}, *Response, error) { path := fmt.Sprintf(databaseLogsinksPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createLogsink) + path, err := addOptions(path, opts) if err != nil { return nil, nil, err } - - root := new(databaseLogsinkRoot) - + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(databaseLogsinksRoot) resp, err := svc.client.Do(ctx, req, root) if err != nil { return nil, resp, err } - return root.Sink, resp, nil + return root.Sinks, resp, nil } -// GetLogsink gets a logsink for a database -func (svc *DatabasesServiceOp) GetLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseLogsink, *Response, error) { +// DeleteLogsink deletes a logsink for a database cluster. +func (svc *DatabasesServiceOp) DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error) { path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) if err != nil { - return nil, nil, err + return nil, err } - - // root := new(databaseLogsinkRoot) - root := new(DatabaseLogsink) - - resp, err := svc.client.Do(ctx, req, root) + resp, err := svc.client.Do(ctx, req, nil) if err != nil { - return nil, resp, err + return resp, err } - return root, resp, nil + return resp, nil } -// ListTopics returns all topics for a given kafka cluster -func (svc *DatabasesServiceOp) ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseLogsink, *Response, error) { - path := fmt.Sprintf(databaseLogsinksPath, databaseID) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } +// GetLogsink gets a logsink for a database. +func (svc *DatabasesServiceOp) GetLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseLogsink, *Response, error) { + path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) if err != nil { return nil, nil, err } - root := new(databaseLogsinksRoot) + + root := new(databaseLogsinkRoot) resp, err := svc.client.Do(ctx, req, root) if err != nil { return nil, resp, err } - return root.Sinks, resp, nil + return &root.Sink, resp, nil } -// UpdateLogsink updates a logsink for a database cluster -func (svc *DatabasesServiceOp) UpdateLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateLogsinkRequest) (*Response, error) { - path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) - req, err := svc.client.NewRequest(ctx, http.MethodPut, path, updateLogsink) +// CreateLogsink creates a new logsink for a database. +func (svc *DatabasesServiceOp) CreateLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateLogsinkRequest) (*DatabaseLogsink, *Response, error) { + path := fmt.Sprintf(databaseLogsinksPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createLogsink) if err != nil { - return nil, err + return nil, nil, err } root := new(databaseLogsinkRoot) resp, err := svc.client.Do(ctx, req, root) if err != nil { - return resp, err + return nil, resp, err } - return resp, nil + + return &root.Sink, resp, nil } -// DeleteLogsink deletes a logsink for a database cluster -func (svc *DatabasesServiceOp) DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error) { +// UpdateLogsink updates a logsink for a database cluster. +func (svc *DatabasesServiceOp) UpdateLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateLogsinkRequest) (*Response, error) { path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) + req, err := svc.client.NewRequest(ctx, http.MethodPut, path, updateLogsink) if err != nil { return nil, err } + resp, err := svc.client.Do(ctx, req, nil) if err != nil { return resp, err diff --git a/vendor/modules.txt b/vendor/modules.txt index ca97600a0..e85f80af4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,7 +58,7 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/digitalocean/godo v1.126.1-0.20241003201848-56a323c1ff6f +# github.com/digitalocean/godo v1.126.1-0.20241007162943-72c4c32eb2e0 ## explicit; go 1.22 github.com/digitalocean/godo github.com/digitalocean/godo/metrics From ef8d8aeb03a6e84cb782314bb3b2fdd42038acf5 Mon Sep 17 00:00:00 2001 From: Anna Lushnikova Date: Mon, 7 Oct 2024 16:23:40 -0400 Subject: [PATCH 06/13] update error message --- digitalocean/database/resource_database_logsink.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/digitalocean/database/resource_database_logsink.go b/digitalocean/database/resource_database_logsink.go index 41bf4eb39..3fa05abfc 100644 --- a/digitalocean/database/resource_database_logsink.go +++ b/digitalocean/database/resource_database_logsink.go @@ -337,19 +337,19 @@ func resourceDigitalOceanDatabaseLogsinkUpdate(ctx context.Context, d *schema.Re if v, ok := d.GetOk("rsyslog_config"); ok { iCfg = expandLogsinkRsyslogConfig(v.([]interface{})) } else { - return diag.Errorf("Error creating database logsink: rsyslog_config is required when type is rsyslog") + return diag.Errorf("Error updating database logsink: rsyslog_config is required when type is rsyslog") } case "elasticsearch": if v, ok := d.GetOk("elasticsearch_config"); ok { iCfg = expandLogsinkElasticsearchConfig(v.([]interface{})) } else { - return diag.Errorf("Error creating database logsink: elasticsearch_config is required when type is elasticsearch") + return diag.Errorf("Error updating database logsink: elasticsearch_config is required when type is elasticsearch") } case "opensearch": if v, ok := d.GetOk("opensearch_config"); ok { iCfg = expandLogsinkOpensearchConfig(v.([]interface{})) } else { - return diag.Errorf("Error creating database logsink: opensearch_config is required when type is opensearch") + return diag.Errorf("Error updating database logsink: opensearch_config is required when type is opensearch") } } From 8cc4f948c0ed516ae37b623f1699e6d505aff3f6 Mon Sep 17 00:00:00 2001 From: Anna Lushnikova Date: Mon, 7 Oct 2024 18:15:36 -0400 Subject: [PATCH 07/13] update documentation --- docs/resources/database_logsink.md | 32 +++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/docs/resources/database_logsink.md b/docs/resources/database_logsink.md index c4cc5540f..65ccd49e5 100644 --- a/docs/resources/database_logsink.md +++ b/docs/resources/database_logsink.md @@ -11,17 +11,43 @@ Provides a DigitalOcean database logsink capabilities. Can be configured with rs ```hcl resource "digitalocean_database_logsink" "logsink-01" { cluster_id = digitalocean_database_cluster.doby.id - sink_name = "sinkexample" + sink_name = "sinkexample1" sink_type = "opensearch" - - config { + opensearch_config { url = "https://user:passwd@192.168.0.1:25060" index_prefix = "opensearch-logs" index_days_max = 5 } } +resource "digitalocean_database_logsink" "logsink-02" { + cluster_id = digitalocean_database_cluster.dotest.id + sink_name = "sinkexample2" + sink_type = "rsyslog" + + rsyslog_config { + server = "192.168.10.2" + port = 514 + tls = "true" + format = "rfc5424" + logline = "msg" + sd = "test-2" + } +} + +resource "digitalocean_database_logsink" "logsink-03" { + cluster_id = digitalocean_database_cluster.doby.id + sink_name = "sinkexample3" + sink_type = "elasticsearch" + + opensearch_config { + url = "https://user:passwd@192.168.0.1:25060" + index_prefix = "opensearch-logs" + index_days_max = 3 + } +} + resource "digitalocean_database_cluster" "doby" { name = "dobydb" engine = "pg" From 50164706566e644d361584d1517c9a4d11a51a5f Mon Sep 17 00:00:00 2001 From: Anna Lushnikova Date: Wed, 9 Oct 2024 10:22:15 -0400 Subject: [PATCH 08/13] update godo version, add acceptance tests --- .../resource_database_logsink_test.go | 59 ++++++++ go.mod | 2 +- go.sum | 4 +- .../github.com/digitalocean/godo/databases.go | 138 +++++------------- vendor/modules.txt | 2 +- 5 files changed, 97 insertions(+), 108 deletions(-) create mode 100644 digitalocean/database/resource_database_logsink_test.go diff --git a/digitalocean/database/resource_database_logsink_test.go b/digitalocean/database/resource_database_logsink_test.go new file mode 100644 index 000000000..6c0015d6b --- /dev/null +++ b/digitalocean/database/resource_database_logsink_test.go @@ -0,0 +1,59 @@ +package database_test + +import ( + "fmt" + "testing" + + "github.com/digitalocean/terraform-provider-digitalocean/digitalocean/acceptance" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDigitalOceanDatabaseLogsink_Basic(t *testing.T) { + name := acceptance.RandomTestName() + dbConfig := fmt.Sprintf(testAccCheckDigitalOceanDatabaseClusterPostgreSQL, name, "15") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.TestAccPreCheck(t) }, + ProviderFactories: acceptance.TestAccProviderFactories, + CheckDestroy: testAccCheckDigitalOceanDatabaseClusterDestroy, + Steps: []resource.TestStep{ + { + Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseLogsinkBasic, dbConfig, "lname", "opensearch", "https://user:passwd@192.168.0.1:25060", "logs", 5), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "opensearch_config.0.url", "https://user:passwd@192.168.0.1:25060"), + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "opensearch_config.0.index_prefix", "logs"), + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "opensearch_config.0.index_days_max", "5"), + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "sink_type", "opensearch"), + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "sink_name", "lname"), + ), + }, + + { + Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseLogsinkBasic, dbConfig, "new-lname", "opensearch", "https://user:passwd@192.168.0.1:25060", "logs", 4), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "sink_name", "new-lname"), + resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "opensearch_config.0.index_days_max", "4"), + ), + }, + { + ResourceName: "digitalocean_database_logsink.logsink", + Destroy: true, + }, + }, + }) +} + +const testAccCheckDigitalOceanDatabaseLogsinkBasic = ` +%s + +resource "digitalocean_database_logsink" "logsink" { + cluster_id = digitalocean_database_cluster.foobar.id + sink_name = "%s" + sink_type = "%s" + + opensearch_config { + url = "%s" + index_prefix = "%s" + index_days_max = %d + } +}` diff --git a/go.mod b/go.mod index 5128db5be..a3a296124 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/digitalocean/terraform-provider-digitalocean require ( github.com/aws/aws-sdk-go v1.42.18 - github.com/digitalocean/godo v1.126.1-0.20241007162943-72c4c32eb2e0 + github.com/digitalocean/godo v1.126.1-0.20241008185245-efb15764f26a github.com/hashicorp/awspolicyequivalence v1.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-uuid v1.0.3 diff --git a/go.sum b/go.sum index abde222c8..f3cd2e14c 100644 --- a/go.sum +++ b/go.sum @@ -25,8 +25,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitalocean/godo v1.126.1-0.20241007162943-72c4c32eb2e0 h1:k+8/hvab7mKkIn0T8o10qGOTDTCMH+R2SUkXHLtPs2Q= -github.com/digitalocean/godo v1.126.1-0.20241007162943-72c4c32eb2e0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.126.1-0.20241008185245-efb15764f26a h1:o154fyvSL+l2xz94LPSXq5c55rAYjy6wfVuiIZDhmBY= +github.com/digitalocean/godo v1.126.1-0.20241008185245-efb15764f26a/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index b63d48b73..7cb9e98ef 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -174,21 +174,11 @@ type DatabasesService interface { ListDatabaseEvents(context.Context, string, *ListOptions) ([]DatabaseEvent, *Response, error) ListIndexes(context.Context, string, *ListOptions) ([]DatabaseIndex, *Response, error) DeleteIndex(context.Context, string, string) (*Response, error) - // GetRsyslogLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseRsyslogLogsink, *Response, error) - // CreateRsyslogLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateRsyslogLogsinkRequest) (*DatabaseRsyslogLogsink, *Response, error) - // UpdateRsyslogLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateRsyslogLogsinkRequest) (*Response, error) - // GetElasticsearchLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseElasticsearchLogsink, *Response, error) - // CreateElasticsearchLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateElasticsearchLogsinkRequest) (*DatabaseElasticsearchLogsink, *Response, error) - // UpdateElasticsearchLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateElasticsearchLogsinkRequest) (*Response, error) - // GetOpensearchLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseOpensearchLogsink, *Response, error) - // CreateOpensearchLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateOpensearchLogsinkRequest) (*DatabaseOpensearchLogsink, *Response, error) - // UpdateOpensearchLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateOpensearchLogsinkRequest) (*Response, error) - ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]interface{}, *Response, error) - DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error) - - GetLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseLogsink, *Response, error) CreateLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateLogsinkRequest) (*DatabaseLogsink, *Response, error) + GetLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseLogsink, *Response, error) + ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseLogsink, *Response, error) UpdateLogsink(ctx context.Context, databaseID string, logsinkID string, updateLogsink *DatabaseUpdateLogsinkRequest) (*Response, error) + DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error) } // DatabasesServiceOp handles communication with the Databases related methods @@ -529,26 +519,6 @@ type DatabaseUpdateLogsinkRequest struct { Config *interface{} `json:"config"` } -// DatabaseRsyslogLogsink represents a rsyslog logsink. -type DatabaseRsyslogLogsink struct { - ID string `json:"sink_id"` - Name string `json:"sink_name,required"` - Type string `json:"sink_type,required"` - Config *RsyslogLogsinkConfig `json:"config,required"` -} - -// DatabaseCreateRsyslogLogsinkRequest is used to create rsyslog logsink for a database cluster. -type DatabaseCreateRsyslogLogsinkRequest struct { - Name string `json:"sink_name"` - Type string `json:"sink_type"` - Config *RsyslogLogsinkConfig `json:"config"` -} - -// DatabaseUpdateRsyslogLogsinkRequest is used to update rsyslog logsink for a database cluster. -type DatabaseUpdateRsyslogLogsinkRequest struct { - Config *RsyslogLogsinkConfig `json:"config"` -} - // RsyslogLogsinkConfig represents rsyslog logsink configuration. type RsyslogLogsinkConfig struct { Server string `json:"server,required"` @@ -562,26 +532,6 @@ type RsyslogLogsinkConfig struct { Cert string `json:"cert,omitempty"` } -// DatabaseElasticsearchLogsink represents an elasticsearch logsink. -type DatabaseElasticsearchLogsink struct { - ID string `json:"sink_id"` - Name string `json:"sink_name,required"` - Type string `json:"sink_type,required"` - Config *ElasticsearchLogsinkConfig `json:"config,required"` -} - -// DatabaseCreateElasticsearchLogsinkRequest is used to create elasticsearch logsink for a database cluster. -type DatabaseCreateElasticsearchLogsinkRequest struct { - Name string `json:"sink_name"` - Type string `json:"sink_type"` - Config *ElasticsearchLogsinkConfig `json:"config"` -} - -// DatabaseUpdateElasticsearchLogsinkRequest is used to update elasticsearch logsink for a database cluster. -type DatabaseUpdateElasticsearchLogsinkRequest struct { - Config *ElasticsearchLogsinkConfig `json:"config"` -} - // ElasticsearchLogsinkConfig represents elasticsearch logsink configuration. type ElasticsearchLogsinkConfig struct { URL string `json:"url,required"` @@ -591,26 +541,6 @@ type ElasticsearchLogsinkConfig struct { CA string `json:"ca,omitempty"` } -// DatabaseOpensearchLogsink represents an opensearch logsink. -type DatabaseOpensearchLogsink struct { - ID string `json:"sink_id"` - Name string `json:"sink_name,required"` - Type string `json:"sink_type,required"` - Config *OpensearchLogsinkConfig `json:"config,required"` -} - -// DatabaseCreateOpensearchLogsinkRequest is used to create opensearch logsink for a database cluster. -type DatabaseCreateOpensearchLogsinkRequest struct { - Name string `json:"sink_name"` - Type string `json:"sink_type"` - Config *OpensearchLogsinkConfig `json:"config"` -} - -// DatabaseUpdateOpensearchLogsinkRequest is used to update opensearch logsink for a database cluster. -type DatabaseUpdateOpensearchLogsinkRequest struct { - Config *OpensearchLogsinkConfig `json:"config"` -} - // OpensearchLogsinkConfig represents opensearch logsink configuration. type OpensearchLogsinkConfig struct { URL string `json:"url,required"` @@ -917,7 +847,7 @@ type databaseLogsinkRoot struct { } type databaseLogsinksRoot struct { - Sinks []interface{} `json:"sinks"` + Sinks []DatabaseLogsink `json:"sinks"` } type databaseMetricsCredentialsRoot struct { @@ -1966,40 +1896,24 @@ func (svc *DatabasesServiceOp) DeleteIndex(ctx context.Context, databaseID, name return resp, nil } -// ListTopics returns all topics for a given kafka cluster. -func (svc *DatabasesServiceOp) ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]interface{}, *Response, error) { +// CreateLogsink creates a new logsink for a database cluster. +func (svc *DatabasesServiceOp) CreateLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateLogsinkRequest) (*DatabaseLogsink, *Response, error) { path := fmt.Sprintf(databaseLogsinksPath, databaseID) - path, err := addOptions(path, opts) - if err != nil { - return nil, nil, err - } - req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createLogsink) if err != nil { return nil, nil, err } - root := new(databaseLogsinksRoot) + + root := new(databaseLogsinkRoot) resp, err := svc.client.Do(ctx, req, root) if err != nil { return nil, resp, err } - return root.Sinks, resp, nil -} -// DeleteLogsink deletes a logsink for a database cluster. -func (svc *DatabasesServiceOp) DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error) { - path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) - req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) - if err != nil { - return nil, err - } - resp, err := svc.client.Do(ctx, req, nil) - if err != nil { - return resp, err - } - return resp, nil + return &root.Sink, resp, nil } -// GetLogsink gets a logsink for a database. +// GetLogsink gets a logsink for a database cluster. func (svc *DatabasesServiceOp) GetLogsink(ctx context.Context, databaseID string, logsinkID string) (*DatabaseLogsink, *Response, error) { path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) @@ -2015,21 +1929,23 @@ func (svc *DatabasesServiceOp) GetLogsink(ctx context.Context, databaseID string return &root.Sink, resp, nil } -// CreateLogsink creates a new logsink for a database. -func (svc *DatabasesServiceOp) CreateLogsink(ctx context.Context, databaseID string, createLogsink *DatabaseCreateLogsinkRequest) (*DatabaseLogsink, *Response, error) { +// ListTopics returns all logsinks for a given database cluster. +func (svc *DatabasesServiceOp) ListLogsinks(ctx context.Context, databaseID string, opts *ListOptions) ([]DatabaseLogsink, *Response, error) { path := fmt.Sprintf(databaseLogsinksPath, databaseID) - req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createLogsink) + path, err := addOptions(path, opts) if err != nil { return nil, nil, err } - - root := new(databaseLogsinkRoot) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(databaseLogsinksRoot) resp, err := svc.client.Do(ctx, req, root) if err != nil { return nil, resp, err } - - return &root.Sink, resp, nil + return root.Sinks, resp, nil } // UpdateLogsink updates a logsink for a database cluster. @@ -2046,3 +1962,17 @@ func (svc *DatabasesServiceOp) UpdateLogsink(ctx context.Context, databaseID str } return resp, nil } + +// DeleteLogsink deletes a logsink for a database cluster. +func (svc *DatabasesServiceOp) DeleteLogsink(ctx context.Context, databaseID, logsinkID string) (*Response, error) { + path := fmt.Sprintf(databaseLogsinkPath, databaseID, logsinkID) + req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index e85f80af4..e76680215 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,7 +58,7 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/digitalocean/godo v1.126.1-0.20241007162943-72c4c32eb2e0 +# github.com/digitalocean/godo v1.126.1-0.20241008185245-efb15764f26a ## explicit; go 1.22 github.com/digitalocean/godo github.com/digitalocean/godo/metrics From a46cabfc0869ddf3a9f00e78435dc45723fb2e5f Mon Sep 17 00:00:00 2001 From: Anna Lushnikova Date: Wed, 9 Oct 2024 10:54:12 -0400 Subject: [PATCH 09/13] clean up logsink acceptance tests --- digitalocean/database/resource_database_logsink_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/digitalocean/database/resource_database_logsink_test.go b/digitalocean/database/resource_database_logsink_test.go index 6c0015d6b..8623879bb 100644 --- a/digitalocean/database/resource_database_logsink_test.go +++ b/digitalocean/database/resource_database_logsink_test.go @@ -27,7 +27,6 @@ func TestAccDigitalOceanDatabaseLogsink_Basic(t *testing.T) { resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "sink_name", "lname"), ), }, - { Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseLogsinkBasic, dbConfig, "new-lname", "opensearch", "https://user:passwd@192.168.0.1:25060", "logs", 4), Check: resource.ComposeAggregateTestCheckFunc( @@ -35,10 +34,6 @@ func TestAccDigitalOceanDatabaseLogsink_Basic(t *testing.T) { resource.TestCheckResourceAttr("digitalocean_database_logsink.logsink", "opensearch_config.0.index_days_max", "4"), ), }, - { - ResourceName: "digitalocean_database_logsink.logsink", - Destroy: true, - }, }, }) } From a0815f0b7e3ee97df9f53fc51679c190dccc2234 Mon Sep 17 00:00:00 2001 From: Anna Lushnikova Date: Wed, 9 Oct 2024 14:59:30 -0400 Subject: [PATCH 10/13] using an api call schema for read --- .../database/resource_database_logsink.go | 78 +++++++++++-------- .../resource_database_logsink_test.go | 1 + go.mod | 2 +- go.sum | 4 +- .../github.com/digitalocean/godo/databases.go | 4 +- vendor/modules.txt | 2 +- 6 files changed, 53 insertions(+), 38 deletions(-) diff --git a/digitalocean/database/resource_database_logsink.go b/digitalocean/database/resource_database_logsink.go index 3fa05abfc..f0d138044 100644 --- a/digitalocean/database/resource_database_logsink.go +++ b/digitalocean/database/resource_database_logsink.go @@ -2,6 +2,7 @@ package database import ( "context" + "encoding/json" "errors" "fmt" "math" @@ -326,6 +327,7 @@ func resourceDigitalOceanDatabaseLogsinkCreate(ctx context.Context, d *schema.Re func resourceDigitalOceanDatabaseLogsinkUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*config.CombinedConfig).GodoClient() clusterID := d.Get("cluster_id").(string) + sinkID := d.Get("sink_id").(string) opts := &godo.DatabaseUpdateLogsinkRequest{} sinkType := d.Get("sink_type").(string) @@ -355,7 +357,7 @@ func resourceDigitalOceanDatabaseLogsinkUpdate(ctx context.Context, d *schema.Re opts.Config = &iCfg - _, err := client.Databases.UpdateLogsink(context.Background(), clusterID, d.Id(), opts) + _, err := client.Databases.UpdateLogsink(context.Background(), clusterID, sinkID, opts) if err != nil { return diag.Errorf("Error updating database logsink: %s", err) } @@ -399,28 +401,40 @@ func resourceDigitalOceanDatabaseLogsinkRead(ctx context.Context, d *schema.Reso switch logsink.Type { case "rsyslog": - if cfg, ok := (*logsink.Config).(*godo.RsyslogLogsinkConfig); ok { - if err := d.Set("config", flattenLogsinkRsyslogConfig(cfg)); err != nil { - return diag.Errorf("Error setting logsink config: %#v", err) - } - } else { - return diag.Errorf("Error asserting logsink config to RsyslogLogsinkConfig") + jsonData, err := json.Marshal(*logsink.Config) + if err != nil { + return diag.Errorf("Error marshaling rsyslog logsink config: %#v", err) + } + var cfg *godo.RsyslogLogsinkConfig + if err = json.Unmarshal(jsonData, &cfg); err != nil { + return diag.Errorf("Error unmarshaling rsyslog logsink config: %#v", err) + } + if err := d.Set("rsyslog_config", flattenLogsinkRsyslogConfig(cfg)); err != nil { + return diag.Errorf("Error setting rsyslog logsink config: %#v", err) } case "elasticsearch": - if cfg, ok := (*logsink.Config).(*godo.ElasticsearchLogsinkConfig); ok { - if err := d.Set("config", flattenLogsinkElasticsearchConfig(cfg)); err != nil { - return diag.Errorf("Error setting logsink config: %#v", err) - } - } else { - return diag.Errorf("Error asserting logsink config to ElasticsearchLogsinkConfig") + jsonData, err := json.Marshal(*logsink.Config) + if err != nil { + return diag.Errorf("Error marshaling elasticsearch logsink config: %#v", err) + } + var cfg *godo.ElasticsearchLogsinkConfig + if err = json.Unmarshal(jsonData, &cfg); err != nil { + return diag.Errorf("Error unmarshaling elasticsearch logsink config: %#v", err) + } + if err := d.Set("elasticsearch_config", flattenLogsinkElasticsearchConfig(cfg)); err != nil { + return diag.Errorf("Error setting elasticsearch logsink config: %#v", err) } case "opensearch": - if cfg, ok := (*logsink.Config).(*godo.OpensearchLogsinkConfig); ok { - if err := d.Set("config", flattenLogsinkOpensearchConfig(cfg)); err != nil { - return diag.Errorf("Error setting logsink config: %#v", err) - } - } else { - return diag.Errorf("Error asserting logsink config to OpensearchLogsinkConfig") + jsonData, err := json.Marshal(*logsink.Config) + if err != nil { + return diag.Errorf("Error marshaling opensearch logsink config: %#v", err) + } + var cfg *godo.OpensearchLogsinkConfig + if err = json.Unmarshal(jsonData, &cfg); err != nil { + return diag.Errorf("Error unmarshaling opensearch logsink config: %#v", err) + } + if err := d.Set("opensearch_config", flattenLogsinkOpensearchConfig(cfg)); err != nil { + return diag.Errorf("Error setting opensearch logsink config: %#v", err) } } @@ -441,10 +455,11 @@ func resourceDigitalOceanDatabaseLogsinkImport(d *schema.ResourceData, meta inte } func makeDatabaseLogsinkID(clusterID string, logsinkID string) string { - return fmt.Sprintf("%s/logsink/%s", clusterID, logsinkID) + return fmt.Sprintf("%s/logsink/%s", clusterID, logsinkID) // TODO: maybe better use for godo? } -func flattenLogsinkRsyslogConfig(config *godo.RsyslogLogsinkConfig) map[string]interface{} { +func flattenLogsinkRsyslogConfig(config *godo.RsyslogLogsinkConfig) []interface{} { + result := make([]interface{}, 0) if config != nil { r := make(map[string]interface{}) r["server"] = (*config).Server @@ -456,14 +471,14 @@ func flattenLogsinkRsyslogConfig(config *godo.RsyslogLogsinkConfig) map[string]i r["ca"] = (*config).CA r["key"] = (*config).Key r["cert"] = (*config).Cert - - return r + result = append(result, r) } - return nil + return result } -func flattenLogsinkElasticsearchConfig(config *godo.ElasticsearchLogsinkConfig) map[string]interface{} { +func flattenLogsinkElasticsearchConfig(config *godo.ElasticsearchLogsinkConfig) []interface{} { + result := make([]interface{}, 0) if config != nil { r := make(map[string]interface{}) r["ca"] = (*config).CA @@ -471,14 +486,14 @@ func flattenLogsinkElasticsearchConfig(config *godo.ElasticsearchLogsinkConfig) r["index_prefix"] = (*config).IndexPrefix r["index_days_max"] = (*config).IndexDaysMax r["timeout"] = (*config).Timeout - - return r + result = append(result, r) } - return nil + return result } -func flattenLogsinkOpensearchConfig(config *godo.OpensearchLogsinkConfig) map[string]interface{} { +func flattenLogsinkOpensearchConfig(config *godo.OpensearchLogsinkConfig) []interface{} { + result := make([]interface{}, 0) if config != nil { r := make(map[string]interface{}) r["ca"] = (*config).CA @@ -486,9 +501,8 @@ func flattenLogsinkOpensearchConfig(config *godo.OpensearchLogsinkConfig) map[st r["index_prefix"] = (*config).IndexPrefix r["index_days_max"] = (*config).IndexDaysMax r["timeout"] = (*config).Timeout - - return r + result = append(result, r) } - return nil + return result } diff --git a/digitalocean/database/resource_database_logsink_test.go b/digitalocean/database/resource_database_logsink_test.go index 8623879bb..c5888fed2 100644 --- a/digitalocean/database/resource_database_logsink_test.go +++ b/digitalocean/database/resource_database_logsink_test.go @@ -50,5 +50,6 @@ resource "digitalocean_database_logsink" "logsink" { url = "%s" index_prefix = "%s" index_days_max = %d + timeout = 10 } }` diff --git a/go.mod b/go.mod index a3a296124..42b086bd2 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/digitalocean/terraform-provider-digitalocean require ( github.com/aws/aws-sdk-go v1.42.18 - github.com/digitalocean/godo v1.126.1-0.20241008185245-efb15764f26a + github.com/digitalocean/godo v1.126.1-0.20241009162050-0f32c4e3c663 github.com/hashicorp/awspolicyequivalence v1.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-uuid v1.0.3 diff --git a/go.sum b/go.sum index f3cd2e14c..e6eea570d 100644 --- a/go.sum +++ b/go.sum @@ -25,8 +25,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitalocean/godo v1.126.1-0.20241008185245-efb15764f26a h1:o154fyvSL+l2xz94LPSXq5c55rAYjy6wfVuiIZDhmBY= -github.com/digitalocean/godo v1.126.1-0.20241008185245-efb15764f26a/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.126.1-0.20241009162050-0f32c4e3c663 h1:IFFOFSxzQbzYHnC2w5EKhg3hVCVJaEij2TarBL7JjHo= +github.com/digitalocean/godo v1.126.1-0.20241009162050-0f32c4e3c663/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index 7cb9e98ef..ec9fb5b09 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -1921,12 +1921,12 @@ func (svc *DatabasesServiceOp) GetLogsink(ctx context.Context, databaseID string return nil, nil, err } - root := new(databaseLogsinkRoot) + root := new(DatabaseLogsink) resp, err := svc.client.Do(ctx, req, root) if err != nil { return nil, resp, err } - return &root.Sink, resp, nil + return root, resp, nil } // ListTopics returns all logsinks for a given database cluster. diff --git a/vendor/modules.txt b/vendor/modules.txt index e76680215..ff683e91e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,7 +58,7 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/digitalocean/godo v1.126.1-0.20241008185245-efb15764f26a +# github.com/digitalocean/godo v1.126.1-0.20241009162050-0f32c4e3c663 ## explicit; go 1.22 github.com/digitalocean/godo github.com/digitalocean/godo/metrics From dc4c0421f07c0c0778bd3d666cf089f130c036e5 Mon Sep 17 00:00:00 2001 From: Anna Lushnikova Date: Wed, 9 Oct 2024 15:13:15 -0400 Subject: [PATCH 11/13] fix indention in test --- digitalocean/database/resource_database_logsink_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/digitalocean/database/resource_database_logsink_test.go b/digitalocean/database/resource_database_logsink_test.go index c5888fed2..19cf0eebc 100644 --- a/digitalocean/database/resource_database_logsink_test.go +++ b/digitalocean/database/resource_database_logsink_test.go @@ -50,6 +50,6 @@ resource "digitalocean_database_logsink" "logsink" { url = "%s" index_prefix = "%s" index_days_max = %d - timeout = 10 + timeout = 10 } }` From 50f907bdaf917a228117e95f9868681be3838193 Mon Sep 17 00:00:00 2001 From: Anna Lushnikova Date: Wed, 9 Oct 2024 15:55:20 -0400 Subject: [PATCH 12/13] update logsink example with a new name by type --- examples/logsink/main.tf | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/logsink/main.tf b/examples/logsink/main.tf index 4090d7b61..7740de60a 100644 --- a/examples/logsink/main.tf +++ b/examples/logsink/main.tf @@ -18,8 +18,7 @@ resource "digitalocean_database_logsink" "logsink-01" { sink_name = "fox2" sink_type = "opensearch" - - config { + opensearch_config { url = "https://user:passwd@192.168.0.1:25060" index_prefix = "opensearch-logs" index_days_max = 5 From 021a9eeb1abd6d61f083e8dc470f3a7fe8e7d3f6 Mon Sep 17 00:00:00 2001 From: Anna Lushnikova Date: Tue, 15 Oct 2024 16:10:55 -0400 Subject: [PATCH 13/13] use DatabaseLogsinkConfig instead of interface{} --- .../database/resource_database_logsink.go | 32 +++++------ go.mod | 2 +- go.sum | 4 +- .../github.com/digitalocean/godo/databases.go | 54 +++++++------------ vendor/modules.txt | 2 +- 5 files changed, 40 insertions(+), 54 deletions(-) diff --git a/digitalocean/database/resource_database_logsink.go b/digitalocean/database/resource_database_logsink.go index f0d138044..2732bd64a 100644 --- a/digitalocean/database/resource_database_logsink.go +++ b/digitalocean/database/resource_database_logsink.go @@ -184,8 +184,8 @@ func ResourceDigitalOceanDatabaseLogsink() *schema.Resource { } } -func expandLogsinkRsyslogConfig(config []interface{}) *godo.RsyslogLogsinkConfig { - logsinkConfigOpts := &godo.RsyslogLogsinkConfig{} +func expandLogsinkRsyslogConfig(config []interface{}) *godo.DatabaseLogsinkConfig { + logsinkConfigOpts := &godo.DatabaseLogsinkConfig{} if len(config) == 0 || config[0] == nil { return logsinkConfigOpts } @@ -221,8 +221,8 @@ func expandLogsinkRsyslogConfig(config []interface{}) *godo.RsyslogLogsinkConfig return logsinkConfigOpts } -func expandLogsinkElasticsearchConfig(config []interface{}) *godo.ElasticsearchLogsinkConfig { - logsinkConfigOpts := &godo.ElasticsearchLogsinkConfig{} +func expandLogsinkElasticsearchConfig(config []interface{}) *godo.DatabaseLogsinkConfig { + logsinkConfigOpts := &godo.DatabaseLogsinkConfig{} if len(config) == 0 || config[0] == nil { return logsinkConfigOpts } @@ -248,8 +248,8 @@ func expandLogsinkElasticsearchConfig(config []interface{}) *godo.ElasticsearchL return logsinkConfigOpts } -func expandLogsinkOpensearchConfig(config []interface{}) *godo.OpensearchLogsinkConfig { - logsinkConfigOpts := &godo.OpensearchLogsinkConfig{} +func expandLogsinkOpensearchConfig(config []interface{}) *godo.DatabaseLogsinkConfig { + logsinkConfigOpts := &godo.DatabaseLogsinkConfig{} if len(config) == 0 || config[0] == nil { return logsinkConfigOpts } @@ -284,7 +284,7 @@ func resourceDigitalOceanDatabaseLogsinkCreate(ctx context.Context, d *schema.Re Type: d.Get("sink_type").(string), } - var iCfg interface{} + var iCfg *godo.DatabaseLogsinkConfig switch sinkType { case "rsyslog": @@ -307,7 +307,7 @@ func resourceDigitalOceanDatabaseLogsinkCreate(ctx context.Context, d *schema.Re } } - opts.Config = &iCfg + opts.Config = iCfg if opts.Config == nil { return diag.Errorf("Error creating database logsink: config is required") } @@ -332,7 +332,7 @@ func resourceDigitalOceanDatabaseLogsinkUpdate(ctx context.Context, d *schema.Re sinkType := d.Get("sink_type").(string) - var iCfg interface{} + var iCfg *godo.DatabaseLogsinkConfig switch sinkType { case "rsyslog": @@ -355,7 +355,7 @@ func resourceDigitalOceanDatabaseLogsinkUpdate(ctx context.Context, d *schema.Re } } - opts.Config = &iCfg + opts.Config = iCfg _, err := client.Databases.UpdateLogsink(context.Background(), clusterID, sinkID, opts) if err != nil { @@ -405,7 +405,7 @@ func resourceDigitalOceanDatabaseLogsinkRead(ctx context.Context, d *schema.Reso if err != nil { return diag.Errorf("Error marshaling rsyslog logsink config: %#v", err) } - var cfg *godo.RsyslogLogsinkConfig + var cfg *godo.DatabaseLogsinkConfig if err = json.Unmarshal(jsonData, &cfg); err != nil { return diag.Errorf("Error unmarshaling rsyslog logsink config: %#v", err) } @@ -417,7 +417,7 @@ func resourceDigitalOceanDatabaseLogsinkRead(ctx context.Context, d *schema.Reso if err != nil { return diag.Errorf("Error marshaling elasticsearch logsink config: %#v", err) } - var cfg *godo.ElasticsearchLogsinkConfig + var cfg *godo.DatabaseLogsinkConfig if err = json.Unmarshal(jsonData, &cfg); err != nil { return diag.Errorf("Error unmarshaling elasticsearch logsink config: %#v", err) } @@ -429,7 +429,7 @@ func resourceDigitalOceanDatabaseLogsinkRead(ctx context.Context, d *schema.Reso if err != nil { return diag.Errorf("Error marshaling opensearch logsink config: %#v", err) } - var cfg *godo.OpensearchLogsinkConfig + var cfg *godo.DatabaseLogsinkConfig if err = json.Unmarshal(jsonData, &cfg); err != nil { return diag.Errorf("Error unmarshaling opensearch logsink config: %#v", err) } @@ -458,7 +458,7 @@ func makeDatabaseLogsinkID(clusterID string, logsinkID string) string { return fmt.Sprintf("%s/logsink/%s", clusterID, logsinkID) // TODO: maybe better use for godo? } -func flattenLogsinkRsyslogConfig(config *godo.RsyslogLogsinkConfig) []interface{} { +func flattenLogsinkRsyslogConfig(config *godo.DatabaseLogsinkConfig) []interface{} { result := make([]interface{}, 0) if config != nil { r := make(map[string]interface{}) @@ -477,7 +477,7 @@ func flattenLogsinkRsyslogConfig(config *godo.RsyslogLogsinkConfig) []interface{ return result } -func flattenLogsinkElasticsearchConfig(config *godo.ElasticsearchLogsinkConfig) []interface{} { +func flattenLogsinkElasticsearchConfig(config *godo.DatabaseLogsinkConfig) []interface{} { result := make([]interface{}, 0) if config != nil { r := make(map[string]interface{}) @@ -492,7 +492,7 @@ func flattenLogsinkElasticsearchConfig(config *godo.ElasticsearchLogsinkConfig) return result } -func flattenLogsinkOpensearchConfig(config *godo.OpensearchLogsinkConfig) []interface{} { +func flattenLogsinkOpensearchConfig(config *godo.DatabaseLogsinkConfig) []interface{} { result := make([]interface{}, 0) if config != nil { r := make(map[string]interface{}) diff --git a/go.mod b/go.mod index 42b086bd2..afc3b5e9e 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/digitalocean/terraform-provider-digitalocean require ( github.com/aws/aws-sdk-go v1.42.18 - github.com/digitalocean/godo v1.126.1-0.20241009162050-0f32c4e3c663 + github.com/digitalocean/godo v1.126.1-0.20241015182733-2c2e059f2ea4 github.com/hashicorp/awspolicyequivalence v1.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-uuid v1.0.3 diff --git a/go.sum b/go.sum index e6eea570d..832505af3 100644 --- a/go.sum +++ b/go.sum @@ -25,8 +25,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitalocean/godo v1.126.1-0.20241009162050-0f32c4e3c663 h1:IFFOFSxzQbzYHnC2w5EKhg3hVCVJaEij2TarBL7JjHo= -github.com/digitalocean/godo v1.126.1-0.20241009162050-0f32c4e3c663/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.126.1-0.20241015182733-2c2e059f2ea4 h1:7CBSVopixDaCNKGsRZjSvs9vPa/3IZ9gTwAIV4gWpO4= +github.com/digitalocean/godo v1.126.1-0.20241015182733-2c2e059f2ea4/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index ec9fb5b09..bc38c25e1 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -501,53 +501,39 @@ type DatabaseFirewallRule struct { // DatabaseLogsink represents a logsink. type DatabaseLogsink struct { - ID string `json:"sink_id"` - Name string `json:"sink_name,required"` - Type string `json:"sink_type,required"` - Config *interface{} `json:"config,required"` + ID string `json:"sink_id"` + Name string `json:"sink_name,required"` + Type string `json:"sink_type,required"` + Config *DatabaseLogsinkConfig `json:"config,required"` } // DatabaseCreateLogsinkRequest is used to create logsink for a database cluster. type DatabaseCreateLogsinkRequest struct { - Name string `json:"sink_name"` - Type string `json:"sink_type"` - Config *interface{} `json:"config"` + Name string `json:"sink_name"` + Type string `json:"sink_type"` + Config *DatabaseLogsinkConfig `json:"config"` } // DatabaseUpdateLogsinkRequest is used to update logsink for a database cluster. type DatabaseUpdateLogsinkRequest struct { - Config *interface{} `json:"config"` -} - -// RsyslogLogsinkConfig represents rsyslog logsink configuration. -type RsyslogLogsinkConfig struct { - Server string `json:"server,required"` - Port int `json:"port,required"` - TLS bool `json:"tls,required"` - Format string `json:"format,required"` - Logline string `json:"logline,omitempty"` - SD string `json:"sd,omitempty"` - CA string `json:"ca,omitempty"` - Key string `json:"key,omitempty"` - Cert string `json:"cert,omitempty"` -} - -// ElasticsearchLogsinkConfig represents elasticsearch logsink configuration. -type ElasticsearchLogsinkConfig struct { - URL string `json:"url,required"` - IndexPrefix string `json:"index_prefix,required"` - IndexDaysMax int `json:"index_days_max,omitempty"` - Timeout float32 `json:"timeout,omitempty"` - CA string `json:"ca,omitempty"` + Config *DatabaseLogsinkConfig `json:"config"` } -// OpensearchLogsinkConfig represents opensearch logsink configuration. -type OpensearchLogsinkConfig struct { - URL string `json:"url,required"` - IndexPrefix string `json:"index_prefix,required"` +// DatabaseLogsinkConfig represents one of the configurable options (rsyslog_logsink, elasticsearch_logsink, or opensearch_logsink) for a logsink. +type DatabaseLogsinkConfig struct { + URL string `json:"url,omitempty"` + IndexPrefix string `json:"index_prefix,omitempty"` IndexDaysMax int `json:"index_days_max,omitempty"` Timeout float32 `json:"timeout,omitempty"` + Server string `json:"server,omitempty"` + Port int `json:"port,omitempty"` + TLS bool `json:"tls,omitempty"` + Format string `json:"format,omitempty"` + Logline string `json:"logline,omitempty"` + SD string `json:"sd,omitempty"` CA string `json:"ca,omitempty"` + Key string `json:"key,omitempty"` + Cert string `json:"cert,omitempty"` } // PostgreSQLConfig holds advanced configurations for PostgreSQL database clusters. diff --git a/vendor/modules.txt b/vendor/modules.txt index ff683e91e..457392592 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,7 +58,7 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/digitalocean/godo v1.126.1-0.20241009162050-0f32c4e3c663 +# github.com/digitalocean/godo v1.126.1-0.20241015182733-2c2e059f2ea4 ## explicit; go 1.22 github.com/digitalocean/godo github.com/digitalocean/godo/metrics