A type-safe, namespace-aware configuration library for Go with support for multiple storage backends, built-in resilience, and OpenTelemetry instrumentation.
- Multiple Storage Backends: Memory, PostgreSQL, MongoDB, SQLite, File, Redis, Kubernetes (ConfigMaps/Secrets)
- Version History: Retrieve and paginate historical versions of any config key
- Multi-Store: Combine stores for caching, fallback, or replication patterns
- Namespace Isolation: Organize configuration by environment, tenant, or service
- Built-in Resilience: Internal cache ensures app works during backend outages
- Type-safe Values: Strongly typed access with automatic conversion
- Codecs: JSON, YAML, TOML encoding support
- OpenTelemetry: Tracing and metrics instrumentation
- Struct Binding: Bind configuration to Go structs with validation
- Live Binding: Auto-reload structs on config changes via polling
This library is designed for use cases like feature flags, rate limits, and dynamic configuration where eventual consistency is acceptable. The key principle is: having some configuration (even slightly stale) is better than having no configuration at all.
The library maintains an internal in-memory cache that serves as a resilience layer:
- If the backend store becomes temporarily unavailable, cached values continue to be served
- Each application instance maintains its own local cache
- Cache is automatically invalidated via the store's native change stream (MongoDB Change Streams, PostgreSQL LISTEN/NOTIFY)
- No external dependencies like Redis required for caching
go get github.com/rbaliyan/configpackage main
import (
"context"
"log"
"github.com/rbaliyan/config"
"github.com/rbaliyan/config/memory"
)
func main() {
ctx := context.Background()
// Create manager with memory store
mgr, err := config.New(
config.WithStore(memory.NewStore()),
)
if err != nil {
log.Fatal(err)
}
// Connect to backend
if err := mgr.Connect(ctx); err != nil {
log.Fatal(err)
}
defer mgr.Close(ctx)
// Get configuration for a namespace (use "" for default)
cfg := mgr.Namespace("production")
// Set a value
if err := cfg.Set(ctx, "app/timeout", 30); err != nil {
log.Fatal(err)
}
// Get a value
val, err := cfg.Get(ctx, "app/timeout")
if err != nil {
log.Fatal(err)
}
// Unmarshal into a typed variable
var timeout int
if err := val.Unmarshal(&timeout); err != nil {
log.Fatal(err)
}
log.Printf("Timeout: %d", timeout)
}In-memory storage for testing and single-instance deployments.
import "github.com/rbaliyan/config/memory"
store := memory.NewStore()Persistent storage with LISTEN/NOTIFY for real-time updates.
import (
"database/sql"
"github.com/lib/pq"
"github.com/rbaliyan/config/postgres"
)
db, _ := sql.Open("postgres", "postgres://localhost/mydb")
listener := pq.NewListener(dsn, 10*time.Second, time.Minute, nil)
store := postgres.NewStore(db, listener,
postgres.WithTable("config_entries"),
postgres.WithNotifyChannel("config_changes"),
)Persistent storage with change streams for real-time updates.
import (
"go.mongodb.org/mongo-driver/v2/mongo"
"go.mongodb.org/mongo-driver/v2/mongo/options"
"github.com/rbaliyan/config/mongodb"
)
client, _ := mongo.Connect(options.Client().ApplyURI("mongodb://localhost:27017"))
store := mongodb.NewStore(client,
mongodb.WithDatabase("config"),
mongodb.WithCollection("entries"),
)Lightweight persistent storage with no external dependencies.
import "github.com/rbaliyan/config/sqlite"
store := sqlite.NewStore("config.db",
sqlite.WithTable("config_entries"),
)Load configuration from YAML, TOML, or JSON files on disk.
By default the file store is read-only: Set and Delete return config.ErrReadOnly and Watch returns config.ErrWatchNotSupported. Pass file.WithWritable() to enable writes, in which case writes are persisted to a sidecar file (default: {path}.writes.yaml) that overlays the base file, and Watch is served by polling both files at WithWatchInterval (default 2s).
import "github.com/rbaliyan/config/file"
// Read-only (default).
store := file.NewStore("config.yaml")
// Writable: Set/Delete persist to config.yaml.writes.yaml, Watch is polling-based.
writable := file.NewStore("config.yaml",
file.WithWritable(),
file.WithWatchInterval(5*time.Second),
)Top-level keys in the file become namespaces; nested keys are flattened with / (configurable via WithKeySeparator). Top-level scalar values go into the namespace set by WithDefaultNamespace (default: "default").
Pass WithExpansion or WithAngleBracketExpander to substitute ${VAR} or <VAR> placeholders at load time — see Variable Expansion.
Persistent storage backed by Redis. Each namespace is stored as a single Redis hash ({keyPrefix}:{namespace}); change events are published on a single pub/sub channel ({keyPrefix}:changes). Writes and notifications are performed atomically via Lua scripts, so watchers never observe a write they cannot read back.
import "github.com/rbaliyan/config/redis"
store := redis.NewStore(
redis.WithAddress("redis.internal:6379"),
redis.WithKeyPrefix("cfg"), // hash names become "cfg:{namespace}"
redis.WithPassword("s3cret"),
)
// Or connect to a Redis Cluster.
cluster := redis.NewStore(
redis.WithCluster("redis-0:6379", "redis-1:6379", "redis-2:6379"),
)Persistent storage backed by Kubernetes ConfigMaps and Secrets, with real-time Watch via informers. Config namespaces map to Kubernetes namespaces (or a single fixed namespace via WithK8sNamespace). Each config namespace is backed by one ConfigMap named config-{namespace}; keys prefixed with secret/ (configurable via WithSecretKeyPrefix) are routed to a Secret named config-secrets-{namespace} instead.
This store is in a separate Go module (github.com/rbaliyan/config/k8s) to keep client-go out of the core module.
go get github.com/rbaliyan/config/k8simport (
"github.com/rbaliyan/config/k8s"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
clientConfig, _ := clientcmd.BuildConfigFromFlags("", "/path/to/kubeconfig")
clientset, _ := kubernetes.NewForConfig(clientConfig)
store := k8s.NewStore(clientset,
k8s.WithK8sNamespace("my-app"), // restrict to one k8s namespace
k8s.WithSecretKeyPrefix("secret/"), // keys with this prefix -> Secrets
k8s.WithResyncPeriod(10*time.Minute),
)
if err := store.Connect(ctx); err != nil { // waits for informer cache sync
return err
}
defer store.Close(ctx)Config key / characters are replaced with . to form valid Kubernetes data keys (e.g. app/timeout becomes app.timeout).
val, err := cfg.Get(ctx, "database/port")
if err != nil {
if config.IsNotFound(err) {
// Key doesn't exist
}
return err
}
// Type-safe access via Value interface
port, err := val.Int64()
str, err := val.String()
flag, err := val.Bool()
num, err := val.Float64()
// Unmarshal into any type
var dbConfig DatabaseConfig
if err := val.Unmarshal(&dbConfig); err != nil {
return err
}
// Access metadata
meta := val.Metadata()
version := meta.Version()
created := meta.CreatedAt()
updated := meta.UpdatedAt()// Simple values
cfg.Set(ctx, "app/timeout", 30)
cfg.Set(ctx, "app/name", "myservice")
cfg.Set(ctx, "app/enabled", true)
// Complex values
cfg.Set(ctx, "app/servers", []string{"host1", "host2"})
cfg.Set(ctx, "app/limits", map[string]int{"max": 100, "min": 1})
// With options
cfg.Set(ctx, "app/config", value,
config.WithType(config.TypeCustom),
config.WithSetCodec(yamlCodec),
)Control create/update behavior with conditional write options:
// Create only - fails if key already exists
err := cfg.Set(ctx, "feature/flag", true, config.WithIfNotExists())
if config.IsKeyExists(err) {
// Key already existed, value not changed
}
// Update only - fails if key doesn't exist
err = cfg.Set(ctx, "feature/flag", false, config.WithIfExists())
if config.IsNotFound(err) {
// Key didn't exist, nothing updated
}
// Default (upsert) - creates or updates
cfg.Set(ctx, "feature/flag", true) // Always succeedsThese options leverage atomic database operations:
- PostgreSQL: Uses
ON CONFLICT DO NOTHING/UPDATEwith row count checks - MongoDB: Uses
InsertOne/FindOneAndUpdatewith upsert control
// List all keys with prefix using the Filter builder
limit := 100
page, err := cfg.Find(ctx, config.NewFilter().
WithPrefix("app/database").
WithLimit(limit).
Build())
for key, val := range page.Results() {
str, _ := val.String()
fmt.Printf("%s = %s\n", key, str)
}
// Pagination: check if len(results) < limit to determine if more pages exist
if len(page.Results()) == page.Limit() {
nextPage, _ := cfg.Find(ctx, config.NewFilter().
WithPrefix("app/database").
WithLimit(limit).
WithCursor(page.NextCursor()).
Build())
// Process nextPage...
}Stores that implement VersionedStore retain historical versions of config entries. Use GetVersions to retrieve them:
// Check if versioning is available
if vr, ok := cfg.(config.VersionedReader); ok {
// Get a specific version
page, err := vr.GetVersions(ctx, "app/timeout",
config.NewVersionFilter().WithVersion(2).Build())
// List all versions (newest first) with pagination
page, err := vr.GetVersions(ctx, "app/timeout",
config.NewVersionFilter().WithLimit(10).Build())
for _, v := range page.Versions() {
fmt.Printf("v%d: %v (updated %s)\n",
v.Metadata().Version(), v, v.Metadata().UpdatedAt())
}
// Paginate through all versions
if len(page.Versions()) == page.Limit() {
nextPage, _ := vr.GetVersions(ctx, "app/timeout",
config.NewVersionFilter().
WithLimit(10).
WithCursor(page.NextCursor()).
Build())
// ...
}
}The memory store supports versioning with an optional history cap:
store := memory.NewStore(memory.WithMaxHistory(100)) // Keep up to 100 versions per keyNamespaces provide isolation between different environments or tenants.
// Get configs for different namespaces
prodCfg := mgr.Namespace("production")
devCfg := mgr.Namespace("development")
// Same key, different values per namespace
prodCfg.Set(ctx, "timeout", 60)
devCfg.Set(ctx, "timeout", 5)
// Use "" for the default namespace
defaultCfg := mgr.Namespace("")Namespace names may contain alphanumeric characters, underscores, dashes, dots, and colons (e.g. org.example:env.prod).
The internal: prefix is reserved for system namespaces used by infrastructure components such as key rotation and service discovery. Use IsSystemNamespace() to check whether a namespace is reserved:
config.IsSystemNamespace("internal:config:crypto") // true
config.IsSystemNamespace("production") // falseServer-side authorizers can use this to block client writes to system namespaces while allowing infrastructure components to manage them.
Access configuration from context without explicit dependency injection.
// Add manager to context
ctx = config.ContextWithManager(ctx, mgr)
ctx = config.ContextWithNamespace(ctx, "production")
// Use anywhere in your application
val, err := config.Get(ctx, "app/setting")
err = config.Set(ctx, "app/setting", "value")Combine multiple stores for caching, fallback, or replication:
import "github.com/rbaliyan/config/multi"
// Cache + Backend pattern
cacheStore := memory.NewStore()
backendStore := postgres.NewStore(db, listener)
store := multi.NewStoreWithOptions(
[]config.Store{cacheStore, backendStore},
[]multi.Option{multi.WithStrategy(multi.StrategyReadThrough)},
)
// Primary + Backup pattern
primaryStore := postgres.NewStore(primaryDB, primaryListener)
backupStore := postgres.NewStore(backupDB, backupListener)
store := multi.NewStoreWithOptions(
[]config.Store{primaryStore, backupStore},
[]multi.Option{multi.WithStrategy(multi.StrategyFallback)},
)
mgr, _ := config.New(config.WithStore(store))Strategies:
StrategyFallback: Read from first available store, write to all storesStrategyReadThrough: Read through stores (cache miss populates earlier stores), write to all storesStrategyWriteThrough: Write to all stores, read from first available store
All strategies write to all stores to maintain consistency. The difference is in read behavior:
- Fallback/WriteThrough: Return first successful read
- ReadThrough: Try each store in order, populate earlier stores on cache miss
Multi-Store supports health checks and statistics when the underlying stores implement the optional interfaces:
// Health check (if underlying stores implement HealthChecker)
if err := store.Health(ctx); err != nil {
log.Printf("Store unhealthy: %v", err)
}
// Statistics (if underlying stores implement StatsProvider)
stats, err := store.Stats(ctx)
if err == nil {
log.Printf("Total entries: %d", stats.TotalEntries)
}Wrap stores with tracing and metrics. Both are disabled by default and must be explicitly enabled.
import "github.com/rbaliyan/config/otel"
// Enable tracing and metrics explicitly
instrumentedStore, _ := otel.WrapStore(store,
otel.WithServiceName("my-service"),
otel.WithBackendName("postgres"),
otel.WithTracesEnabled(true), // Opt-in, disabled by default
otel.WithMetricsEnabled(true), // Opt-in, disabled by default
)
mgr, _ := config.New(config.WithStore(instrumentedStore))Metrics exported:
config.operations.total- Counter of all operationsconfig.errors.total- Counter of errors by typeconfig.operation.duration- Histogram of operation latency
Bind configuration to Go structs with validation. Struct fields are automatically mapped to hierarchical keys using the configured struct tag (default: json).
import "github.com/rbaliyan/config/bind"
type DatabaseConfig struct {
Host string `json:"host" validate:"required"`
Port int `json:"port" validate:"required,min=1,max=65535"`
}
binder := bind.New(cfg, bind.WithTagValidation())
bound := binder.Bind()
// Store a struct - creates keys: database/host, database/port
err := bound.SetStruct(ctx, "database", DatabaseConfig{Host: "localhost", Port: 5432})
// Retrieve a struct - reads keys: database/host, database/port
var dbConfig DatabaseConfig
err = bound.GetStruct(ctx, "database", &dbConfig)Nested structs are also supported:
type AppConfig struct {
Name string `json:"name"`
Cache CacheConfig `json:"cache"`
}
type CacheConfig struct {
TTL int `json:"ttl"`
Enabled bool `json:"enabled"`
}
// SetStruct creates: app/name, app/cache/ttl, app/cache/enabled
err := bound.SetStruct(ctx, "app", AppConfig{
Name: "myapp",
Cache: CacheConfig{TTL: 300, Enabled: true},
})Use nonrecursive to store a nested struct as a single JSON value instead of flattening:
type Credentials struct {
Username string `json:"username"`
Password string `json:"password"`
}
type AppConfig struct {
Name string `json:"name"`
Creds Credentials `json:"creds,nonrecursive"` // Store as single JSON value
}
// SetStruct creates: app/name, app/creds (not app/creds/username, app/creds/password)
// Useful when fields are tightly coupled and should be updated atomicallyKeep a typed struct automatically synchronized with configuration using polling and atomic swap:
import "github.com/rbaliyan/config/live"
type DatabaseConfig struct {
Host string `json:"host"`
Port int `json:"port"`
}
ref, err := live.New[DatabaseConfig](ctx, cfg, "database",
live.PollInterval(10*time.Second),
live.OnChange(func(old, new DatabaseConfig) {
log.Printf("config changed: %s -> %s", old.Host, new.Host)
}),
live.OnError(func(err error) {
log.Printf("reload error: %v", err)
}),
)
if err != nil {
return err
}
defer ref.Close()
// Hot path: single atomic load, zero contention
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
snap := ref.Load()
fmt.Fprintf(w, "Host: %s, Port: %d", snap.Host, snap.Port)
})Options:
PollInterval(d)- Set polling interval (default: 30s)OnChange(fn)- Callback with old and new values on changeOnError(fn)- Callback on reload error
Methods:
Load()- Get current snapshot (atomic, zero-cost)Close()- Stop background pollingReloadNow(ctx)- Force immediate reloadLastReload()- Get last reload timestampLastError()- Get last error (nil if successful)ReloadCount()- Get total successful reload count
Multiple encoding formats are supported.
import "github.com/rbaliyan/config/codec"
// Available codecs
jsonCodec := codec.Get("json")
yamlCodec := codec.Get("yaml")
tomlCodec := codec.Get("toml")
// Use with manager
mgr, _ := config.New(
config.WithStore(store),
config.WithCodec(yamlCodec),
)The library tracks value types for better type safety.
const (
TypeInt // int, int64
TypeFloat // float64
TypeString // string
TypeBool // bool
TypeMapStringInt // map[string]int
TypeMapStringFloat // map[string]float64
TypeMapStringString // map[string]string
TypeListInt // []int
TypeListFloat // []float64
TypeListString // []string
TypeCustom // any other type
)val, err := cfg.Get(ctx, "key")
if err != nil {
switch {
case config.IsNotFound(err):
// Key doesn't exist
case config.IsTypeMismatch(err):
// Type conversion failed
case config.IsKeyExists(err):
// Key already exists (from WithIfNotExists)
default:
// Other error
}
}Placeholder tokens in string values can be substituted at parse time (file
store only) or at query time (any backend via expand.Store).
Placeholders are resolved once when the file is loaded. Changes to the underlying source (env vars, etc.) are not reflected until the store reloads.
// ${VAR} and ${VAR:-default} from environment variables.
store := file.NewStore("config.yaml",
file.WithExpansion(file.EnvExpander()),
)
// <secret-name> from a custom secrets provider.
store := file.NewStore("config.yaml",
file.WithAngleBracketExpander(func(name string) (string, bool) {
return vault.Get(name) // your lookup
}),
)
// Chain multiple sources: custom overrides first, env fallback.
store := file.NewStore("config.yaml",
file.WithExpansion(overridesFn),
file.WithExpansion(file.EnvExpander()),
)Config file syntax:
database:
host: ${DB_HOST} # replaced with env var
port: ${DB_PORT:-5432} # fallback to 5432 if unset
password: <db_password> # replaced via angle-bracket expander
literal: \${NOT_EXPANDED} # backslash disables substitutionexpand.NewStore wraps any config.Store and expands placeholders on every
Get and Find, so changes to the source are reflected immediately.
import "github.com/rbaliyan/config/expand"
inner, _ := memory.NewStore(), inner.Connect(ctx)
s, err := expand.NewStore(inner,
expand.WithDollarExpander(expand.EnvExpander()), // ${VAR}
expand.WithAngleExpander(secretsFn), // <VAR>
)
// Reads expand at call time; writes pass through to the inner store unchanged.
val, _ := s.Get(ctx, "app", "host")config.Secret wraps a sensitive string and masks it in all output so secrets
are never accidentally leaked into logs, error messages, or HTTP responses.
type AppConfig struct {
DBPassword config.Secret `yaml:"db_password"`
APIKey config.Secret `json:"api_key"`
}
var cfg AppConfig
// Decode from YAML/JSON/TOML: Secret.Value() holds the real string.
yaml.Unmarshal(data, &cfg)
fmt.Println(cfg.DBPassword) // ******
log.Info("config loaded", "cfg", cfg) // *** no leak ***
realPwd := cfg.DBPassword.Value() // "actual-password"
// Marshal back to YAML/JSON: non-zero Secret writes "******", zero writes "".
out, _ := yaml.Marshal(cfg)
// IsZero reports whether no value has been set.
if cfg.APIKey.IsZero() {
log.Warn("API key not configured")
}UnmarshalText("******") produces a zero Secret (empty value), preventing a
masked token from being treated as a real credential on round-trip.
mgr, err := config.New(
config.WithStore(store), // Required: storage backend
config.WithCodec(yamlCodec), // Optional: default codec (default: JSON)
config.WithLogger(slogLogger), // Optional: custom logger
)MIT License