diff --git a/listen.go b/listen.go index e0d67c6ab1e..78ae8bf2d0b 100644 --- a/listen.go +++ b/listen.go @@ -167,7 +167,7 @@ func (sl *sharedListener) setDeadline() error { return err } -// Destruct is called by the UsagePool when the listener is +// Destruct is called by UsagePoolOf when the listener is // finally not being used anymore. It closes the socket. func (sl *sharedListener) Destruct() error { return sl.Listener.Close() diff --git a/listen_unix.go b/listen_unix.go index 8870da5e9c9..f96e46fc98f 100644 --- a/listen_unix.go +++ b/listen_unix.go @@ -105,7 +105,7 @@ func listenTCPOrUnix(ctx context.Context, lnKey string, network, address string, // whether to enforce shutdown delays, for example (see #5393). ln, err := config.Listen(ctx, network, address) if err == nil { - listenerPool.LoadOrStore(lnKey, nil) + listenerPool.LoadOrStore(lnKey, deleteListener{}) } // if new listener is a unix socket, make sure we can reuse it later @@ -171,3 +171,7 @@ func (dl deleteListener) Close() error { _, _ = listenerPool.Delete(dl.lnKey) return dl.Listener.Close() } + +func (dl deleteListener) Destruct() error { + return nil +} diff --git a/listeners.go b/listeners.go index 4cd00b21e79..f5bf28fe68a 100644 --- a/listeners.go +++ b/listeners.go @@ -787,7 +787,7 @@ type ListenerWrapper interface { } // listenerPool stores and allows reuse of active listeners. -var listenerPool = NewUsagePool() +var listenerPool = NewUsagePoolOf[string, Destructor]() const maxPortSpan = 65535 diff --git a/logging.go b/logging.go index 98b031c6a2a..43474cdae14 100644 --- a/logging.go +++ b/logging.go @@ -690,7 +690,12 @@ var ( defaultLoggerMu sync.RWMutex ) -var writers = NewUsagePool() +type DestructableWriter interface { + Destructor + io.Writer +} + +var writers = NewUsagePoolOf[string, DestructableWriter]() const DefaultLoggerName = "default" diff --git a/modules/caddyhttp/ip_range.go b/modules/caddyhttp/ip_range.go index b1db254752f..300ac050f5f 100644 --- a/modules/caddyhttp/ip_range.go +++ b/modules/caddyhttp/ip_range.go @@ -40,7 +40,7 @@ func init() { // so that it's ready to be used when requests start getting handled. // A read lock should probably be used to get the cached value if the // ranges can change at runtime (e.g. periodically refreshed). -// Using a `caddy.UsagePool` may be a good idea to avoid having refetch +// Using a `caddy.UsagePoolOf` may be a good idea to avoid having refetch // the values when a config reload occurs, which would waste time. // // If the list of IP ranges cannot be sourced, then provisioning SHOULD diff --git a/modules/caddyhttp/reverseproxy/admin.go b/modules/caddyhttp/reverseproxy/admin.go index f64d1ecf0aa..39cb6c4f85a 100644 --- a/modules/caddyhttp/reverseproxy/admin.go +++ b/modules/caddyhttp/reverseproxy/admin.go @@ -76,25 +76,7 @@ func (adminUpstreams) handleUpstreams(w http.ResponseWriter, r *http.Request) er // Iterate over the upstream pool (needs to be fast) var rangeErr error - hosts.Range(func(key, val any) bool { - address, ok := key.(string) - if !ok { - rangeErr = caddy.APIError{ - HTTPStatus: http.StatusInternalServerError, - Err: fmt.Errorf("could not type assert upstream address"), - } - return false - } - - upstream, ok := val.(*Host) - if !ok { - rangeErr = caddy.APIError{ - HTTPStatus: http.StatusInternalServerError, - Err: fmt.Errorf("could not type assert upstream struct"), - } - return false - } - + hosts.Range(func(address string, upstream *Host) bool { results = append(results, upstreamStatus{ Address: address, NumRequests: upstream.NumRequests(), diff --git a/modules/caddyhttp/reverseproxy/hosts.go b/modules/caddyhttp/reverseproxy/hosts.go index 83a39d80706..a23715ec6bc 100644 --- a/modules/caddyhttp/reverseproxy/hosts.go +++ b/modules/caddyhttp/reverseproxy/hosts.go @@ -128,7 +128,7 @@ func (u *Upstream) fillHost() { host := new(Host) existingHost, loaded := hosts.LoadOrStore(u.String(), host) if loaded { - host = existingHost.(*Host) + host = existingHost } u.Host = host } @@ -140,6 +140,10 @@ type Host struct { fails int64 } +func (h *Host) Destruct() error { + return nil +} + // NumRequests returns the number of active requests to the upstream. func (h *Host) NumRequests() int { return int(atomic.LoadInt64(&h.numRequests)) @@ -229,7 +233,7 @@ func GetDialInfo(ctx context.Context) (DialInfo, bool) { // currently in use by active configuration(s). This // allows the state of remote hosts to be preserved // through config reloads. -var hosts = caddy.NewUsagePool() +var hosts = caddy.NewUsagePoolOf[string, *Host]() // dialInfoVarKey is the key used for the variable that holds // the dial info for the upstream connection. diff --git a/modules/caddypki/acmeserver/acmeserver.go b/modules/caddypki/acmeserver/acmeserver.go index 454720bf36a..d01f4b6353a 100644 --- a/modules/caddypki/acmeserver/acmeserver.go +++ b/modules/caddypki/acmeserver/acmeserver.go @@ -241,7 +241,7 @@ func (ash Handler) openDatabase() (*db.AuthDB, error) { err := os.MkdirAll(dbFolder, 0o755) if err != nil { - return nil, fmt.Errorf("making folder for CA database: %v", err) + return databaseCloser{}, fmt.Errorf("making folder for CA database: %v", err) } dbConfig := &db.Config{ @@ -256,7 +256,7 @@ func (ash Handler) openDatabase() (*db.AuthDB, error) { ash.logger.Debug("loaded preexisting CA database", zap.String("db_key", key)) } - return database.(databaseCloser).DB, err + return database.DB, err } // makeClient creates an ACME client which will use a custom @@ -312,7 +312,7 @@ const defaultPathPrefix = "/acme/" var ( keyCleaner = regexp.MustCompile(`[^\w.-_]`) - databasePool = caddy.NewUsagePool() + databasePool = caddy.NewUsagePoolOf[string, databaseCloser]() ) type databaseCloser struct { diff --git a/modules/caddytls/connpolicy.go b/modules/caddytls/connpolicy.go index 74c83241107..decf0fa1671 100644 --- a/modules/caddytls/connpolicy.go +++ b/modules/caddytls/connpolicy.go @@ -20,7 +20,6 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io" "os" "path/filepath" "strings" @@ -324,7 +323,7 @@ func (p *ConnectionPolicy) buildStandardTLSConfig(ctx caddy.Context) error { } ctx.OnCancel(func() { _, _ = secretsLogPool.Delete(filename) }) - cfg.KeyLogWriter = logFile.(io.Writer) + cfg.KeyLogWriter = logFile tlsApp.logger.Warn("TLS SECURITY COMPROMISED: secrets logging is enabled!", zap.String("log_filename", filename)) @@ -598,4 +597,4 @@ type destructableWriter struct{ *os.File } func (d destructableWriter) Destruct() error { return d.Close() } -var secretsLogPool = caddy.NewUsagePool() +var secretsLogPool = caddy.NewUsagePoolOf[string, destructableWriter]() diff --git a/usagepool.go b/usagepool.go index 7007849fb95..f49944102c1 100644 --- a/usagepool.go +++ b/usagepool.go @@ -20,7 +20,7 @@ import ( "sync/atomic" ) -// UsagePool is a thread-safe map that pools values +// UsagePoolOf is a thread-safe map that pools values // based on usage (reference counting). Values are // only inserted if they do not already exist. There // are two ways to add values to the pool: @@ -47,23 +47,24 @@ import ( // was stored. Deleting too many times will panic. // // The implementation does not use a sync.Pool because -// UsagePool needs additional atomicity to run the +// UsagePoolOf needs additional atomicity to run the // constructor functions when creating a new value when // LoadOrNew is used. (We could probably use sync.Pool // but we'd still have to layer our own additional locks // on top.) // -// An empty UsagePool is NOT safe to use; always call -// NewUsagePool() to make a new one. -type UsagePool struct { +// An empty UsagePoolOf is NOT safe to use; always call +// NewUsagePoolOf() to make a new one. +type UsagePoolOf[K comparable, V any] struct { sync.RWMutex - pool map[any]*usagePoolVal + pool map[K]*usagePoolVal[V] } -// NewUsagePool returns a new usage pool that is ready to use. -func NewUsagePool() *UsagePool { - return &UsagePool{ - pool: make(map[any]*usagePoolVal), +// NewUsagePoolOf returns a new usage pool with comparable key type K and Destructor +// interface type V that is ready to use. +func NewUsagePoolOf[K comparable, V any]() *UsagePoolOf[K, V] { + return &UsagePoolOf[K, V]{ + pool: make(map[K]*usagePoolVal[V]), } } @@ -74,8 +75,8 @@ func NewUsagePool() *UsagePool { // or constructed value is returned. The loaded return value is true // if the value already existed and was loaded, or false if it was // newly constructed. -func (up *UsagePool) LoadOrNew(key any, construct Constructor) (value any, loaded bool, err error) { - var upv *usagePoolVal +func (up *UsagePoolOf[K, V]) LoadOrNew(key K, construct Constructor) (value V, loaded bool, err error) { + var upv *usagePoolVal[V] up.Lock() upv, loaded = up.pool[key] if loaded { @@ -86,11 +87,12 @@ func (up *UsagePool) LoadOrNew(key any, construct Constructor) (value any, loade err = upv.err upv.RUnlock() } else { - upv = &usagePoolVal{refs: 1} + upv = &usagePoolVal[V]{refs: 1} upv.Lock() up.pool[key] = upv up.Unlock() - value, err = construct() + destructable, err := construct() + value = destructable.(V) if err == nil { upv.value = value } else { @@ -113,8 +115,8 @@ func (up *UsagePool) LoadOrNew(key any, construct Constructor) (value any, loade // already exists, or stores it if it does not exist. It returns the // value that was either loaded or stored, and true if the value already // existed and was -func (up *UsagePool) LoadOrStore(key, val any) (value any, loaded bool) { - var upv *usagePoolVal +func (up *UsagePoolOf[K, V]) LoadOrStore(key K, val V) (value V, loaded bool) { + var upv *usagePoolVal[V] up.Lock() upv, loaded = up.pool[key] if loaded { @@ -129,7 +131,7 @@ func (up *UsagePool) LoadOrStore(key, val any) (value any, loaded bool) { } upv.Unlock() } else { - upv = &usagePoolVal{refs: 1, value: val} + upv = &usagePoolVal[V]{refs: 1, value: val} up.pool[key] = upv up.Unlock() value = val @@ -144,7 +146,7 @@ func (up *UsagePool) LoadOrStore(key, val any) (value any, loaded bool) { // This method is somewhat naive and acquires a read lock on the // entire pool during iteration, so do your best to make f() really // fast, m'kay? -func (up *UsagePool) Range(f func(key, value any) bool) { +func (up *UsagePoolOf[K, V]) Range(f func(key K, value V) bool) { up.RLock() defer up.RUnlock() for key, upv := range up.pool { @@ -166,7 +168,7 @@ func (up *UsagePool) Range(f func(key, value any) bool) { // true if the usage count reached 0 and the value was deleted. // It panics if the usage count drops below 0; always call // Delete precisely as many times as LoadOrStore. -func (up *UsagePool) Delete(key any) (deleted bool, err error) { +func (up *UsagePoolOf[K, V]) Delete(key K) (deleted bool, err error) { up.Lock() upv, ok := up.pool[key] if !ok { @@ -180,7 +182,7 @@ func (up *UsagePool) Delete(key any) (deleted bool, err error) { upv.RLock() val := upv.value upv.RUnlock() - if destructor, ok := val.(Destructor); ok { + if destructor, ok := any(val).(Destructor); ok { err = destructor.Destruct() } deleted = true @@ -196,13 +198,13 @@ func (up *UsagePool) Delete(key any) (deleted bool, err error) { // References returns the number of references (count of usages) to a // key in the pool, and true if the key exists, or false otherwise. -func (up *UsagePool) References(key any) (int, bool) { +func (up *UsagePoolOf[K, V]) References(key K) (int, bool) { up.RLock() upv, loaded := up.pool[key] up.RUnlock() if loaded { // I wonder if it'd be safer to read this value during - // our lock on the UsagePool... guess we'll see... + // our lock on the UsagePoolOf... guess we'll see... refs := atomic.LoadInt32(&upv.refs) return int(refs), true } @@ -219,9 +221,19 @@ type Destructor interface { Destruct() error } -type usagePoolVal struct { +type usagePoolVal[V any] struct { refs int32 // accessed atomically; must be 64-bit aligned for 32-bit systems - value any + value V err error sync.RWMutex } + +// UsagePool is DEPRECATED: Use UsagePoolOf instead. This type will likely be changed or removed in the future. +type UsagePool = UsagePoolOf[any, any] + +// NewUsagePool is DEPRECATED: Use NewUsagePoolOf instead. This type will likely be changed or removed in the future. +func NewUsagePool() *UsagePool { + return &UsagePool{ + pool: make(map[any]*usagePoolVal[any]), + } +}