Skip to content

Commit

Permalink
Merge pull request #265 from digitalocean/database_inbound_sources
Browse files Browse the repository at this point in the history
add support for getting/setting firewall rules
  • Loading branch information
bentranter authored Oct 30, 2019
2 parents 97ac73b + a30aa47 commit 6e1df21
Show file tree
Hide file tree
Showing 2 changed files with 105 additions and 15 deletions.
72 changes: 57 additions & 15 deletions databases.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,22 @@ import (
)

const (
databaseBasePath = "/v2/databases"
databaseSinglePath = databaseBasePath + "/%s"
databaseResizePath = databaseBasePath + "/%s/resize"
databaseMigratePath = databaseBasePath + "/%s/migrate"
databaseMaintenancePath = databaseBasePath + "/%s/maintenance"
databaseBackupsPath = databaseBasePath + "/%s/backups"
databaseUsersPath = databaseBasePath + "/%s/users"
databaseUserPath = databaseBasePath + "/%s/users/%s"
databaseDBPath = databaseBasePath + "/%s/dbs/%s"
databaseDBsPath = databaseBasePath + "/%s/dbs"
databasePoolPath = databaseBasePath + "/%s/pools/%s"
databasePoolsPath = databaseBasePath + "/%s/pools"
databaseReplicaPath = databaseBasePath + "/%s/replicas/%s"
databaseReplicasPath = databaseBasePath + "/%s/replicas"
evictionPolicyPath = databaseBasePath + "/%s/eviction_policy"
databaseBasePath = "/v2/databases"
databaseSinglePath = databaseBasePath + "/%s"
databaseResizePath = databaseBasePath + "/%s/resize"
databaseMigratePath = databaseBasePath + "/%s/migrate"
databaseMaintenancePath = databaseBasePath + "/%s/maintenance"
databaseBackupsPath = databaseBasePath + "/%s/backups"
databaseUsersPath = databaseBasePath + "/%s/users"
databaseUserPath = databaseBasePath + "/%s/users/%s"
databaseDBPath = databaseBasePath + "/%s/dbs/%s"
databaseDBsPath = databaseBasePath + "/%s/dbs"
databasePoolPath = databaseBasePath + "/%s/pools/%s"
databasePoolsPath = databaseBasePath + "/%s/pools"
databaseReplicaPath = databaseBasePath + "/%s/replicas/%s"
databaseReplicasPath = databaseBasePath + "/%s/replicas"
evictionPolicyPath = databaseBasePath + "/%s/eviction_policy"
databaseFirewallRulesPath = databaseBasePath + "/%s/firewall"
)

// DatabasesService is an interface for interfacing with the databases endpoints
Expand Down Expand Up @@ -55,6 +56,8 @@ type DatabasesService interface {
DeleteReplica(context.Context, string, string) (*Response, error)
GetEvictionPolicy(context.Context, string) (string, *Response, error)
SetEvictionPolicy(context.Context, string, string) (*Response, error)
GetFirewallRules(context.Context, string) (*Response, error)
UpdateFirewallRules(context.Context, string, *DatabaseUpdateFirewallRulesRequest) (*Response, error)
}

// DatabasesServiceOp handles communication with the Databases related methods
Expand Down Expand Up @@ -211,6 +214,20 @@ type DatabaseCreateReplicaRequest struct {
Tags []string `json:"tags,omitempty"`
}

// DatabaseUpdateFirewallRulesRequest is used to set the firewall rules for a database
type DatabaseUpdateFirewallRulesRequest struct {
Rules []*DatabaseFirewallRule `json:"rules"`
}

// DatabaseFirewallRule is a rule describing an inbound source to a database
type DatabaseFirewallRule struct {
UUID string `json:"uuid"`
ClusterUUID string `json:"cluster_uuid"`
Type string `json:"type"`
Value string `json:"value"`
CreatedAt time.Time `json:"created_at"`
}

type databaseUserRoot struct {
User *DatabaseUser `json:"user"`
}
Expand Down Expand Up @@ -259,6 +276,10 @@ type evictionPolicyRoot struct {
EvictionPolicy string `json:"eviction_policy"`
}

type databaseFirewallRuleRoot struct {
Rules []*DatabaseFirewallRule `json:"rules"`
}

func (d Database) URN() string {
return ToURN("dbaas", d.ID)
}
Expand Down Expand Up @@ -669,3 +690,24 @@ func (svc *DatabasesServiceOp) SetEvictionPolicy(ctx context.Context, databaseID
}
return resp, nil
}

// GetFirewallRules loads the inbound sources for a given cluster.
func (svc *DatabasesServiceOp) GetFirewallRules(ctx context.Context, databaseID string) (*Response, error) {
path := fmt.Sprintf(databaseFirewallRulesPath, databaseID)
root := new(databaseFirewallRuleRoot)
req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
}
return svc.client.Do(ctx, req, root)
}

// UpdateFirewallRules sets the inbound sources for a given cluster.
func (svc *DatabasesServiceOp) UpdateFirewallRules(ctx context.Context, databaseID string, firewallRulesReq *DatabaseUpdateFirewallRulesRequest) (*Response, error) {
path := fmt.Sprintf(databaseFirewallRulesPath, databaseID)
req, err := svc.client.NewRequest(ctx, http.MethodPut, path, firewallRulesReq)
if err != nil {
return nil, err
}
return svc.client.Do(ctx, req, nil)
}
48 changes: 48 additions & 0 deletions databases_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1183,3 +1183,51 @@ func TestDatabases_GetEvictionPolicy(t *testing.T) {
require.NoError(t, err)
require.Equal(t, want, got)
}

func TestDatabases_GetFirewallRules(t *testing.T) {
setup()
defer teardown()

dbID := "deadbeef-dead-4aa5-beef-deadbeef347d"

path := fmt.Sprintf("/v2/databases/%s/firewall", dbID)

body := ` {"rules": [{
"type": "ip_addr",
"value": "192.168.1.1",
"uuid": "deadbeef-dead-4aa5-beef-deadbeef347d",
"cluster_uuid": "deadbeef-dead-4aa5-beef-deadbeef347d"
}]} `

mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, http.MethodGet)
fmt.Fprint(w, body)
})

_, err := client.Databases.GetFirewallRules(ctx, dbID)
require.NoError(t, err)
}

func TestDatabases_UpdateFirewallRules(t *testing.T) {
setup()
defer teardown()

dbID := "deadbeef-dead-4aa5-beef-deadbeef347d"

path := fmt.Sprintf("/v2/databases/%s/firewall", dbID)

mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
testMethod(t, r, http.MethodPut)
})

_, err := client.Databases.UpdateFirewallRules(ctx, dbID, &DatabaseUpdateFirewallRulesRequest{
Rules: []*DatabaseFirewallRule{
{
Type: "ip_addr",
Value: "192.168.1.1",
UUID: "deadbeef-dead-4aa5-beef-deadbeef347d",
},
},
})
require.NoError(t, err)
}

0 comments on commit 6e1df21

Please sign in to comment.