diff --git a/core/core.go b/core/core.go index 28186ed..f8f4fc5 100644 --- a/core/core.go +++ b/core/core.go @@ -2,6 +2,8 @@ package core import ( "fmt" + "log" + "github.com/lukaszbudnik/migrator/config" "github.com/lukaszbudnik/migrator/db" "github.com/lukaszbudnik/migrator/loader" @@ -9,7 +11,6 @@ import ( "github.com/lukaszbudnik/migrator/notifications" "github.com/lukaszbudnik/migrator/types" "github.com/lukaszbudnik/migrator/utils" - "log" ) const ( @@ -45,7 +46,6 @@ type ExecuteFlags struct { func GetDiskMigrations(config *config.Config, createLoader func(*config.Config) loader.Loader) []types.Migration { loader := createLoader(config) diskMigrations := loader.GetDiskMigrations() - log.Printf("Read disk migrations: %d", len(diskMigrations)) return diskMigrations } @@ -56,7 +56,6 @@ func GetDBTenants(config *config.Config, createConnector func(*config.Config) db connector.Init() defer connector.Dispose() dbTenants := connector.GetTenants() - log.Printf("Read DB tenants: %d", len(dbTenants)) return dbTenants } @@ -67,7 +66,6 @@ func GetDBMigrations(config *config.Config, createConnector func(*config.Config) connector.Init() defer connector.Dispose() dbMigrations := connector.GetDBMigrations() - log.Printf("Read DB migrations: %d", len(dbMigrations)) return dbMigrations } @@ -75,10 +73,14 @@ func GetDBMigrations(config *config.Config, createConnector func(*config.Config) // and using connector created by a function passed as second argument and disk loader created by a function passed as third argument func ApplyMigrations(config *config.Config, createConnector func(*config.Config) db.Connector, createLoader func(*config.Config) loader.Loader) []types.Migration { diskMigrations := GetDiskMigrations(config, createLoader) + log.Printf("Read disk migrations: %d", len(diskMigrations)) + dbMigrations := GetDBMigrations(config, createConnector) - migrationsToApply := migrations.ComputeMigrationsToApply(diskMigrations, dbMigrations) + log.Printf("Read DB migrations: %d", len(dbMigrations)) + migrationsToApply := migrations.ComputeMigrationsToApply(diskMigrations, dbMigrations) log.Printf("Found migrations to apply: %d", len(migrationsToApply)) + doApplyMigrations(migrationsToApply, config, createConnector) notifier := notifications.CreateNotifier(config) @@ -98,12 +100,13 @@ func ApplyMigrations(config *config.Config, createConnector func(*config.Config) func AddTenant(tenant string, config *config.Config, createConnector func(*config.Config) db.Connector, createLoader func(*config.Config) loader.Loader) []types.Migration { diskMigrations := GetDiskMigrations(config, createLoader) + log.Printf("Read disk migrations: %d", len(diskMigrations)) // filter only tenant schemas // var migrationsToApply []types.Migration migrationsToApply := migrations.FilterTenantMigrations(diskMigrations) - log.Printf("Found migrations to apply: %d", len(migrationsToApply)) + doAddTenantAndApplyMigrations(tenant, migrationsToApply, config, createConnector) notifier := notifications.CreateNotifier(config) @@ -119,6 +122,14 @@ func AddTenant(tenant string, config *config.Config, createConnector func(*confi return diskMigrations } +// VerifyMigrations loads disk and db migrations and verifies their checksums +// see migrations.VerifyCheckSums for more information +func VerifyMigrations(config *config.Config, createConnector func(*config.Config) db.Connector, createLoader func(*config.Config) loader.Loader) (bool, []types.Migration) { + diskMigrations := GetDiskMigrations(config, createLoader) + dbMigrations := GetDBMigrations(config, createConnector) + return migrations.VerifyCheckSums(diskMigrations, dbMigrations) +} + // ExecuteMigrator is a function which executes actions on resources defined in config passed as first argument action defined as second argument // and using connector created by a function passed as third argument and disk loader created by a function passed as fourth argument func ExecuteMigrator(config *config.Config, executeFlags ExecuteFlags) { @@ -136,20 +147,34 @@ func doExecuteMigrator(config *config.Config, executeFlags ExecuteFlags, createC } case GetDBMigrationsAction: dbMigrations := GetDBMigrations(config, createConnector) + log.Printf("Read DB migrations: %d", len(dbMigrations)) if len(dbMigrations) > 0 { log.Printf("List of db migrations\n%v", utils.MigrationDBArrayToString(dbMigrations)) } case AddTenantAction: - AddTenant(executeFlags.Tenant, config, createConnector, createLoader) + verified, offendingMigrations := VerifyMigrations(config, createConnector, createLoader) + if !verified { + log.Printf("Checksum verification failed.") + log.Printf("List of offending disk migrations\n%v", utils.MigrationArrayToString(offendingMigrations)) + } else { + AddTenant(executeFlags.Tenant, config, createConnector, createLoader) + } case GetDBTenantsAction: dbTenants := GetDBTenants(config, createConnector) + log.Printf("Read DB tenants: %d", len(dbTenants)) if len(dbTenants) > 0 { log.Printf("List of db tenants\n%v", utils.TenantArrayToString(dbTenants)) } case ApplyAction: - migrationsApplied := ApplyMigrations(config, createConnector, createLoader) - if len(migrationsApplied) > 0 { - log.Printf("List of migrations applied\n%v", utils.MigrationArrayToString(migrationsApplied)) + verified, offendingMigrations := VerifyMigrations(config, createConnector, createLoader) + if !verified { + log.Printf("Checksum verification failed.") + log.Printf("List of offending disk migrations\n%v", utils.MigrationArrayToString(offendingMigrations)) + } else { + migrationsApplied := ApplyMigrations(config, createConnector, createLoader) + if len(migrationsApplied) > 0 { + log.Printf("List of migrations applied\n%v", utils.MigrationArrayToString(migrationsApplied)) + } } } } diff --git a/core/core_mocks.go b/core/core_mocks.go index 1d56b4d..f097958 100644 --- a/core/core_mocks.go +++ b/core/core_mocks.go @@ -1,26 +1,39 @@ package core import ( + "time" + "github.com/lukaszbudnik/migrator/config" "github.com/lukaszbudnik/migrator/db" "github.com/lukaszbudnik/migrator/loader" "github.com/lukaszbudnik/migrator/types" - "time" ) type mockedDiskLoader struct { } func (m *mockedDiskLoader) GetDiskMigrations() []types.Migration { - m1 := types.MigrationDefinition{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema} - m2 := types.MigrationDefinition{Name: "201602220001.sql", SourceDir: "source", File: "source/201602220001.sql", MigrationType: types.MigrationTypeSingleSchema} - return []types.Migration{{MigrationDefinition: m1, Contents: "select abc"}, {MigrationDefinition: m2, Contents: "select def"}} + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema, Contents: "select abc", CheckSum: "abc"} + m2 := types.Migration{Name: "201602220001.sql", SourceDir: "source", File: "source/201602220001.sql", MigrationType: types.MigrationTypeSingleSchema, Contents: "select def", CheckSum: "def"} + return []types.Migration{m1, m2} } func createMockedDiskLoader(config *config.Config) loader.Loader { return new(mockedDiskLoader) } +type mockedBrokenCheckSumDiskLoader struct { +} + +func (m *mockedBrokenCheckSumDiskLoader) GetDiskMigrations() []types.Migration { + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema, Contents: "select abc", CheckSum: "xxx"} + return []types.Migration{m1} +} + +func createBrokenCheckSumMockedDiskLoader(config *config.Config) loader.Loader { + return new(mockedBrokenCheckSumDiskLoader) +} + type mockedConnector struct { } @@ -54,9 +67,9 @@ func (m *mockedConnector) AddTenantAndApplyMigrations(string, []types.Migration) } func (m *mockedConnector) GetDBMigrations() []types.MigrationDB { - m1 := types.MigrationDefinition{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema} + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema, CheckSum: "abc"} d1 := time.Date(2016, 02, 22, 16, 41, 1, 123, time.UTC) - ms := []types.MigrationDB{{MigrationDefinition: m1, Schema: "source", Created: d1}} + ms := []types.MigrationDB{{Migration: m1, Schema: "source", Created: d1}} return ms } diff --git a/core/core_test.go b/core/core_test.go index 25112e8..e6758b4 100644 --- a/core/core_test.go +++ b/core/core_test.go @@ -1,15 +1,10 @@ -// These are integration tests. -// The following tests must be working in order to get this one working: -// * config_test.go -// * migrations_test.go -// DB & Disk operations are mocked using xcli_mocks.go - package core import ( + "testing" + "github.com/lukaszbudnik/migrator/config" "github.com/stretchr/testify/assert" - "testing" ) const ( @@ -57,6 +52,14 @@ func TestApplyMigrations(t *testing.T) { doExecuteMigrator(config, executeFlags, createMockedConnector, createMockedDiskLoader) } +func TestApplyMigrationsVerificationFailed(t *testing.T) { + config, err := config.FromFile(configFile) + assert.Nil(t, err) + executeFlags := ExecuteFlags{} + executeFlags.Action = ApplyAction + doExecuteMigrator(config, executeFlags, createMockedConnector, createBrokenCheckSumMockedDiskLoader) +} + func TestAddTenant(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) @@ -64,3 +67,11 @@ func TestAddTenant(t *testing.T) { executeFlags.Action = AddTenantAction doExecuteMigrator(config, executeFlags, createMockedConnector, createMockedDiskLoader) } + +func TestAddTenantVerificationFailed(t *testing.T) { + config, err := config.FromFile(configFile) + assert.Nil(t, err) + executeFlags := ExecuteFlags{} + executeFlags.Action = AddTenantAction + doExecuteMigrator(config, executeFlags, createMockedConnector, createBrokenCheckSumMockedDiskLoader) +} diff --git a/coverage.sh b/coverage.sh index eb1d5cf..c0a79ca 100755 --- a/coverage.sh +++ b/coverage.sh @@ -17,5 +17,5 @@ do continue fi go test -race -covermode=atomic -coverprofile=coverage-$package.txt ./$package - cat coverage-$package.txt | sed '/^mode/d' >> coverage.txt + cat coverage-$package.txt | sed '/^mode/d' | sed '/_mocks.go/d' >> coverage.txt done diff --git a/db/db.go b/db/db.go index 8bf56ad..26517a2 100644 --- a/db/db.go +++ b/db/db.go @@ -138,12 +138,14 @@ func (bc *BaseConnector) GetDBMigrations() []types.MigrationDB { migrationType types.MigrationType schema string created time.Time + contents string + checksum string ) - if err := rows.Scan(&name, &sourceDir, &filename, &migrationType, &schema, &created); err != nil { + if err := rows.Scan(&name, &sourceDir, &filename, &migrationType, &schema, &created, &contents, &checksum); err != nil { log.Panicf("Could not read DB migration: %v", err) } - mdef := types.MigrationDefinition{Name: name, SourceDir: sourceDir, File: filename, MigrationType: migrationType} - dbMigrations = append(dbMigrations, types.MigrationDB{MigrationDefinition: mdef, Schema: schema, Created: created}) + mdef := types.Migration{Name: name, SourceDir: sourceDir, File: filename, MigrationType: migrationType, Contents: contents, CheckSum: checksum} + dbMigrations = append(dbMigrations, types.MigrationDB{Migration: mdef, Schema: schema, Created: created}) } return dbMigrations @@ -252,7 +254,7 @@ func (bc *BaseConnector) applyMigrationsInTx(tx *sql.Tx, tenants []string, migra log.Panicf("SQL failed, transaction rollback was called: %v %v", err, contents) } - _, err = tx.Stmt(insert).Exec(m.Name, m.SourceDir, m.File, m.MigrationType, s) + _, err = tx.Stmt(insert).Exec(m.Name, m.SourceDir, m.File, m.MigrationType, s, m.Contents, m.CheckSum) if err != nil { tx.Rollback() log.Panicf("Failed to add migration entry, transaction rollback was called: %v", err) diff --git a/db/db_dialect.go b/db/db_dialect.go index 60f6c1c..f7195e4 100644 --- a/db/db_dialect.go +++ b/db/db_dialect.go @@ -2,8 +2,9 @@ package db import ( "fmt" - "github.com/lukaszbudnik/migrator/config" "log" + + "github.com/lukaszbudnik/migrator/config" ) // Dialect returns SQL statements for given DB @@ -22,7 +23,7 @@ type BaseDialect struct { } const ( - selectMigrationsSQL = "select name, source_dir as sd, filename, type, db_schema, created from %v.%v order by name, source_dir" + selectMigrationsSQL = "select name, source_dir as sd, filename, type, db_schema, created, contents, checksum from %v.%v order by name, source_dir" selectTenantsSQL = "select name from %v.%v" createMigrationsTableSQL = ` create table if not exists %v.%v ( @@ -32,7 +33,9 @@ create table if not exists %v.%v ( filename varchar(200) not null, type int not null, db_schema varchar(200) not null, - created timestamp default now() + created timestamp default now(), + contents text, + checksum varchar(64) ) ` createTenantsTableSQL = ` diff --git a/db/db_mssql.go b/db/db_mssql.go index df28aca..1885a48 100644 --- a/db/db_mssql.go +++ b/db/db_mssql.go @@ -11,7 +11,7 @@ type msSQLDialect struct { } const ( - insertMigrationMSSQLDialectSQL = "insert into %v.%v (name, source_dir, filename, type, db_schema) values (@p1, @p2, @p3, @p4, @p5)" + insertMigrationMSSQLDialectSQL = "insert into %v.%v (name, source_dir, filename, type, db_schema, contents, checksum) values (@p1, @p2, @p3, @p4, @p5, @p6, @p7)" insertTenantMSSQLDialectSQL = "insert into %v.%v (name) values (@p1)" createTenantsTableMSSQLDialectSQL = ` IF NOT EXISTS (select * from information_schema.tables where table_schema = '%v' and table_name = '%v') @@ -33,7 +33,9 @@ BEGIN filename varchar(200) not null, type int not null, db_schema varchar(200) not null, - created datetime default CURRENT_TIMESTAMP + created datetime default CURRENT_TIMESTAMP, + contents text, + checksum varchar(64) ); END ` diff --git a/db/db_mysql.go b/db/db_mysql.go index 5f57ab3..619643b 100644 --- a/db/db_mysql.go +++ b/db/db_mysql.go @@ -11,7 +11,7 @@ type mySQLDialect struct { } const ( - insertMigrationMySQLDialectSQL = "insert into %v.%v (name, source_dir, filename, type, db_schema) values (?, ?, ?, ?, ?)" + insertMigrationMySQLDialectSQL = "insert into %v.%v (name, source_dir, filename, type, db_schema, contents, checksum) values (?, ?, ?, ?, ?, ?, ?)" insertTenantMySQLDialectSQL = "insert into %v.%v (name) values (?)" ) diff --git a/db/db_postgresql.go b/db/db_postgresql.go index edf359a..5f78578 100644 --- a/db/db_postgresql.go +++ b/db/db_postgresql.go @@ -11,7 +11,7 @@ type postgreSQLDialect struct { } const ( - insertMigrationPostgreSQLDialectSQL = "insert into %v.%v (name, source_dir, filename, type, db_schema) values ($1, $2, $3, $4, $5)" + insertMigrationPostgreSQLDialectSQL = "insert into %v.%v (name, source_dir, filename, type, db_schema, contents, checksum) values ($1, $2, $3, $4, $5, $6, $7)" insertTenantPostgreSQLDialectSQL = "insert into %v.%v (name) values ($1)" ) diff --git a/db/db_test.go b/db/db_test.go index 9f7dc5a..ee80922 100644 --- a/db/db_test.go +++ b/db/db_test.go @@ -1,17 +1,14 @@ package db -// These are integration tests which talk to database. -// These tests are almost self-contain -// they only depended on config package (reading config from file) - import ( "fmt" - "github.com/lukaszbudnik/migrator/config" - "github.com/lukaszbudnik/migrator/types" - "github.com/stretchr/testify/assert" "strings" "testing" "time" + + "github.com/lukaszbudnik/migrator/config" + "github.com/lukaszbudnik/migrator/types" + "github.com/stretchr/testify/assert" ) func TestDBCreateConnectorPanicUnknownDriver(t *testing.T) { @@ -77,8 +74,8 @@ func TestDBApplyMigrationsPanicSQLSyntaxError(t *testing.T) { connector := CreateConnector(config) connector.Init() defer connector.Dispose() - m := types.MigrationDefinition{Name: "201602220002.sql", SourceDir: "error", File: "error/201602220002.sql", MigrationType: types.MigrationTypeTenantSchema} - ms := []types.Migration{{MigrationDefinition: m, Contents: "createtablexyx ( idint primary key (id) )"}} + m := types.Migration{Name: "201602220002.sql", SourceDir: "error", File: "error/201602220002.sql", MigrationType: types.MigrationTypeTenantSchema, Contents: "createtablexyx ( idint primary key (id) )"} + ms := []types.Migration{m} assert.Panics(t, func() { connector.ApplyMigrations(ms) @@ -124,19 +121,13 @@ func TestDBApplyMigrations(t *testing.T) { t2 := time.Now().UnixNano() t3 := time.Now().UnixNano() - publicdef1 := types.MigrationDefinition{Name: fmt.Sprintf("%v.sql", p1), SourceDir: "public", File: fmt.Sprintf("public/%v.sql", p1), MigrationType: types.MigrationTypeSingleSchema} - publicdef2 := types.MigrationDefinition{Name: fmt.Sprintf("%v.sql", p2), SourceDir: "public", File: fmt.Sprintf("public/%v.sql", p2), MigrationType: types.MigrationTypeSingleSchema} - publicdef3 := types.MigrationDefinition{Name: fmt.Sprintf("%v.sql", p3), SourceDir: "public", File: fmt.Sprintf("public/%v.sql", p3), MigrationType: types.MigrationTypeSingleSchema} - public1 := types.Migration{MigrationDefinition: publicdef1, Contents: "drop table if exists modules"} - public2 := types.Migration{MigrationDefinition: publicdef2, Contents: "create table modules ( k int, v text )"} - public3 := types.Migration{MigrationDefinition: publicdef3, Contents: "insert into modules values ( 123, '123' )"} + public1 := types.Migration{Name: fmt.Sprintf("%v.sql", p1), SourceDir: "public", File: fmt.Sprintf("public/%v.sql", p1), MigrationType: types.MigrationTypeSingleSchema, Contents: "drop table if exists modules"} + public2 := types.Migration{Name: fmt.Sprintf("%v.sql", p2), SourceDir: "public", File: fmt.Sprintf("public/%v.sql", p2), MigrationType: types.MigrationTypeSingleSchema, Contents: "create table modules ( k int, v text )"} + public3 := types.Migration{Name: fmt.Sprintf("%v.sql", p3), SourceDir: "public", File: fmt.Sprintf("public/%v.sql", p3), MigrationType: types.MigrationTypeSingleSchema, Contents: "insert into modules values ( 123, '123' )"} - tenantdef1 := types.MigrationDefinition{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantSchema} - tenantdef2 := types.MigrationDefinition{Name: fmt.Sprintf("%v.sql", t2), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t2), MigrationType: types.MigrationTypeTenantSchema} - tenantdef3 := types.MigrationDefinition{Name: fmt.Sprintf("%v.sql", t3), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t2), MigrationType: types.MigrationTypeTenantSchema} - tenant1 := types.Migration{MigrationDefinition: tenantdef1, Contents: "drop table if exists {schema}.settings"} - tenant2 := types.Migration{MigrationDefinition: tenantdef2, Contents: "create table {schema}.settings (k int, v text)"} - tenant3 := types.Migration{MigrationDefinition: tenantdef3, Contents: "insert into {schema}.settings values (456, '456') "} + tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantSchema, Contents: "drop table if exists {schema}.settings"} + tenant2 := types.Migration{Name: fmt.Sprintf("%v.sql", t2), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t2), MigrationType: types.MigrationTypeTenantSchema, Contents: "create table {schema}.settings (k int, v text)"} + tenant3 := types.Migration{Name: fmt.Sprintf("%v.sql", t3), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t2), MigrationType: types.MigrationTypeTenantSchema, Contents: "insert into {schema}.settings values (456, '456') "} migrationsToApply := []types.Migration{public1, public2, public3, tenant1, tenant2, tenant3} @@ -234,12 +225,9 @@ func TestAddTenantAndApplyMigrations(t *testing.T) { t2 := time.Now().UnixNano() t3 := time.Now().UnixNano() - tenantdef1 := types.MigrationDefinition{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantSchema} - tenantdef2 := types.MigrationDefinition{Name: fmt.Sprintf("%v.sql", t2), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t2), MigrationType: types.MigrationTypeTenantSchema} - tenantdef3 := types.MigrationDefinition{Name: fmt.Sprintf("%v.sql", t3), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t3), MigrationType: types.MigrationTypeTenantSchema} - tenant1 := types.Migration{MigrationDefinition: tenantdef1, Contents: "drop table if exists {schema}.settings"} - tenant2 := types.Migration{MigrationDefinition: tenantdef2, Contents: "create table {schema}.settings (k int, v text) "} - tenant3 := types.Migration{MigrationDefinition: tenantdef3, Contents: "insert into {schema}.settings values (456, '456') "} + tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantSchema, Contents: "drop table if exists {schema}.settings"} + tenant2 := types.Migration{Name: fmt.Sprintf("%v.sql", t2), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t2), MigrationType: types.MigrationTypeTenantSchema, Contents: "create table {schema}.settings (k int, v text)"} + tenant3 := types.Migration{Name: fmt.Sprintf("%v.sql", t3), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t3), MigrationType: types.MigrationTypeTenantSchema, Contents: "insert into {schema}.settings values (456, '456')"} migrationsToApply := []types.Migration{tenant1, tenant2, tenant3} @@ -263,7 +251,7 @@ func TestMySQLGetMigrationInsertSQL(t *testing.T) { insertMigrationSQL := dialect.GetMigrationInsertSQL() - assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema) values (?, ?, ?, ?, ?)", insertMigrationSQL) + assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema, contents, checksum) values (?, ?, ?, ?, ?, ?, ?)", insertMigrationSQL) } func TestPostgreSQLGetMigrationInsertSQL(t *testing.T) { @@ -276,7 +264,7 @@ func TestPostgreSQLGetMigrationInsertSQL(t *testing.T) { insertMigrationSQL := dialect.GetMigrationInsertSQL() - assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema) values ($1, $2, $3, $4, $5)", insertMigrationSQL) + assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema, contents, checksum) values ($1, $2, $3, $4, $5, $6, $7)", insertMigrationSQL) } func TestMSSQLGetMigrationInsertSQL(t *testing.T) { @@ -289,7 +277,7 @@ func TestMSSQLGetMigrationInsertSQL(t *testing.T) { insertMigrationSQL := dialect.GetMigrationInsertSQL() - assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema) values (@p1, @p2, @p3, @p4, @p5)", insertMigrationSQL) + assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema, contents, checksum) values (@p1, @p2, @p3, @p4, @p5, @p6, @p7)", insertMigrationSQL) } func TestMySQLGetTenantInsertSQLDefault(t *testing.T) { @@ -387,7 +375,9 @@ BEGIN filename varchar(200) not null, type int not null, db_schema varchar(200) not null, - created datetime default CURRENT_TIMESTAMP + created datetime default CURRENT_TIMESTAMP, + contents text, + checksum varchar(64) ); END ` @@ -434,7 +424,9 @@ create table if not exists migrator.migrator_migrations ( filename varchar(200) not null, type int not null, db_schema varchar(200) not null, - created timestamp default now() + created timestamp default now(), + contents text, + checksum varchar(64) ) ` diff --git a/loader/disk_loader.go b/loader/disk_loader.go index 8d9c30c..1c7f70f 100644 --- a/loader/disk_loader.go +++ b/loader/disk_loader.go @@ -1,14 +1,17 @@ package loader import ( - "github.com/lukaszbudnik/migrator/config" - "github.com/lukaszbudnik/migrator/types" - "github.com/lukaszbudnik/migrator/utils" + "crypto/sha256" + "encoding/hex" "io/ioutil" "log" "os" "path/filepath" "sort" + + "github.com/lukaszbudnik/migrator/config" + "github.com/lukaszbudnik/migrator/types" + "github.com/lukaszbudnik/migrator/utils" ) // DiskLoader is struct used for implementing Loader interface for loading migrations from disk @@ -69,12 +72,14 @@ func (dl *DiskLoader) readMigrationsFromSchemaDirs(migrations map[string][]types } for _, file := range files { if !file.IsDir() { - mdef := types.MigrationDefinition{Name: file.Name(), SourceDir: sourceDir, File: filepath.Join(sourceDir, file.Name()), MigrationType: migrationType} - contents, err := ioutil.ReadFile(filepath.Join(dl.Config.BaseDir, mdef.File)) + contents, err := ioutil.ReadFile(filepath.Join(dl.Config.BaseDir, sourceDir, file.Name())) if err != nil { log.Panicf("Could not read migration contents: %v", err) } - m := types.Migration{MigrationDefinition: mdef, Contents: string(contents)} + hasher := sha256.New() + hasher.Write([]byte(contents)) + m := types.Migration{Name: file.Name(), SourceDir: sourceDir, File: filepath.Join(sourceDir, file.Name()), MigrationType: migrationType, Contents: string(contents), CheckSum: hex.EncodeToString(hasher.Sum(nil))} + e, ok := migrations[m.Name] if ok { e = append(e, m) diff --git a/loader/disk_loader_test.go b/loader/disk_loader_test.go index a0f7621..f9517f7 100644 --- a/loader/disk_loader_test.go +++ b/loader/disk_loader_test.go @@ -1,9 +1,10 @@ package loader import ( + "testing" + "github.com/lukaszbudnik/migrator/config" "github.com/stretchr/testify/assert" - "testing" ) func TestDiskPanicReadDiskMigrationsNonExistingBaseDir(t *testing.T) { @@ -23,19 +24,21 @@ func TestDiskGetDiskMigrations(t *testing.T) { var config config.Config config.BaseDir = "../test/migrations" - config.SingleSchemas = []string{"public", "ref"} + config.SingleSchemas = []string{"config", "ref"} config.TenantSchemas = []string{"tenants"} loader := CreateLoader(&config) migrations := loader.GetDiskMigrations() - assert.Len(t, migrations, 6) + assert.Len(t, migrations, 8) - assert.Equal(t, "public/201602160001.sql", migrations[0].File) - assert.Equal(t, "tenants/201602160002.sql", migrations[1].File) - assert.Equal(t, "tenants/201602160003.sql", migrations[2].File) - assert.Equal(t, "public/201602160004.sql", migrations[3].File) - assert.Equal(t, "ref/201602160004.sql", migrations[4].File) - assert.Equal(t, "tenants/201602160004.sql", migrations[5].File) + assert.Equal(t, "config/201602160001.sql", migrations[0].File) + assert.Equal(t, "config/201602160002.sql", migrations[1].File) + assert.Equal(t, "tenants/201602160002.sql", migrations[2].File) + assert.Equal(t, "ref/201602160003.sql", migrations[3].File) + assert.Equal(t, "tenants/201602160003.sql", migrations[4].File) + assert.Equal(t, "ref/201602160004.sql", migrations[5].File) + assert.Equal(t, "tenants/201602160004.sql", migrations[6].File) + assert.Equal(t, "tenants/201602160005.sql", migrations[7].File) } diff --git a/migrations/migrations.go b/migrations/migrations.go index 197d274..b1a7b67 100644 --- a/migrations/migrations.go +++ b/migrations/migrations.go @@ -1,24 +1,25 @@ package migrations import ( - "github.com/lukaszbudnik/migrator/types" "log" + + "github.com/lukaszbudnik/migrator/types" ) -func flattenMigrationDBs(dbMigrations []types.MigrationDB) []types.MigrationDefinition { - var flattened []types.MigrationDefinition - var previousMigrationDefinition types.MigrationDefinition +func flattenMigrationDBs(dbMigrations []types.MigrationDB) []types.Migration { + var flattened []types.Migration + var previousMigration types.Migration for i, m := range dbMigrations { - if i == 0 || m.MigrationType == types.MigrationTypeSingleSchema || m.MigrationDefinition != previousMigrationDefinition { - flattened = append(flattened, m.MigrationDefinition) - previousMigrationDefinition = m.MigrationDefinition + if i == 0 || m.MigrationType == types.MigrationTypeSingleSchema || m.Migration != previousMigration { + flattened = append(flattened, m.Migration) + previousMigration = m.Migration } } return flattened } // difference returns the elements on disk which are not yet in DB -func difference(diskMigrations []types.Migration, flattenedMigrationDBs []types.MigrationDefinition) []types.Migration { +func difference(diskMigrations []types.Migration, flattenedMigrationDBs []types.Migration) []types.Migration { // key is Migration.File existsInDB := map[string]bool{} for _, m := range flattenedMigrationDBs { @@ -33,6 +34,31 @@ func difference(diskMigrations []types.Migration, flattenedMigrationDBs []types. return diff } +// intersect returns the elements on disk and in DB +func intersect(diskMigrations []types.Migration, flattenedMigrationDBs []types.Migration) []struct { + disk types.Migration + db types.Migration +} { + // key is Migration.File + existsInDB := map[string]types.Migration{} + for _, m := range flattenedMigrationDBs { + existsInDB[m.File] = m + } + intersect := []struct { + disk types.Migration + db types.Migration + }{} + for _, m := range diskMigrations { + if db, ok := existsInDB[m.File]; ok { + intersect = append(intersect, struct { + disk types.Migration + db types.Migration + }{m, db}) + } + } + return intersect +} + // ComputeMigrationsToApply computes which disk migrations should be applied to DB based on migrations already present in DB func ComputeMigrationsToApply(diskMigrations []types.Migration, dbMigrations []types.MigrationDB) []types.Migration { flattenedMigrationDBs := flattenMigrationDBs(dbMigrations) @@ -59,3 +85,23 @@ func FilterTenantMigrations(diskMigrations []types.Migration) []types.Migration return filteredTenantMigrations } + +// VerifyCheckSums verifies if CheckSum of disk and flattened DB migrations match +// returns bool indicating if offending (i.e., modified) disk migrations were found +// if bool is false the function returns a slice of offending migrations +// if bool is true the slice of effending migrations is empty +func VerifyCheckSums(diskMigrations []types.Migration, dbMigrations []types.MigrationDB) (bool, []types.Migration) { + + flattenedMigrationDBs := flattenMigrationDBs(dbMigrations) + + intersect := intersect(diskMigrations, flattenedMigrationDBs) + var offendingMigrations []types.Migration + var result = true + for _, t := range intersect { + if t.disk.CheckSum != t.db.CheckSum { + offendingMigrations = append(offendingMigrations, t.disk) + result = false + } + } + return result, offendingMigrations +} diff --git a/migrations/migrations_test.go b/migrations/migrations_test.go index 2492ff5..e7b274d 100644 --- a/migrations/migrations_test.go +++ b/migrations/migrations_test.go @@ -1,58 +1,59 @@ package migrations import ( - "github.com/lukaszbudnik/migrator/types" - "github.com/stretchr/testify/assert" "testing" "time" + + "github.com/lukaszbudnik/migrator/types" + "github.com/stretchr/testify/assert" ) func TestMigrationsFlattenMigrationDBs1(t *testing.T) { - m1 := types.MigrationDefinition{Name: "001.sql", SourceDir: "public", File: "public/001.sql", MigrationType: types.MigrationTypeSingleSchema} - db1 := types.MigrationDB{MigrationDefinition: m1, Schema: "public", Created: time.Now()} + m1 := types.Migration{Name: "001.sql", SourceDir: "public", File: "public/001.sql", MigrationType: types.MigrationTypeSingleSchema} + db1 := types.MigrationDB{Migration: m1, Schema: "public", Created: time.Now()} - m2 := types.MigrationDefinition{Name: "002.sql", SourceDir: "tenants", File: "tenants/002.sql", MigrationType: types.MigrationTypeTenantSchema} - db2 := types.MigrationDB{MigrationDefinition: m2, Schema: "abc", Created: time.Now()} + m2 := types.Migration{Name: "002.sql", SourceDir: "tenants", File: "tenants/002.sql", MigrationType: types.MigrationTypeTenantSchema} + db2 := types.MigrationDB{Migration: m2, Schema: "abc", Created: time.Now()} - db3 := types.MigrationDB{MigrationDefinition: m2, Schema: "def", Created: time.Now()} + db3 := types.MigrationDB{Migration: m2, Schema: "def", Created: time.Now()} - m4 := types.MigrationDefinition{Name: "003.sql", SourceDir: "ref", File: "ref/003.sql", MigrationType: types.MigrationTypeSingleSchema} - db4 := types.MigrationDB{MigrationDefinition: m4, Schema: "ref", Created: time.Now()} + m4 := types.Migration{Name: "003.sql", SourceDir: "ref", File: "ref/003.sql", MigrationType: types.MigrationTypeSingleSchema} + db4 := types.MigrationDB{Migration: m4, Schema: "ref", Created: time.Now()} dbs := []types.MigrationDB{db1, db2, db3, db4} migrations := flattenMigrationDBs(dbs) - assert.Equal(t, []types.MigrationDefinition{m1, m2, m4}, migrations) + assert.Equal(t, []types.Migration{m1, m2, m4}, migrations) } func TestMigrationsFlattenMigrationDBs2(t *testing.T) { - m2 := types.MigrationDefinition{Name: "002.sql", SourceDir: "tenants", File: "tenants/002.sql", MigrationType: types.MigrationTypeTenantSchema} - db2 := types.MigrationDB{MigrationDefinition: m2, Schema: "abc", Created: time.Now()} + m2 := types.Migration{Name: "002.sql", SourceDir: "tenants", File: "tenants/002.sql", MigrationType: types.MigrationTypeTenantSchema} + db2 := types.MigrationDB{Migration: m2, Schema: "abc", Created: time.Now()} - db3 := types.MigrationDB{MigrationDefinition: m2, Schema: "def", Created: time.Now()} + db3 := types.MigrationDB{Migration: m2, Schema: "def", Created: time.Now()} - m4 := types.MigrationDefinition{Name: "003.sql", SourceDir: "ref", File: "ref/003.sql", MigrationType: types.MigrationTypeSingleSchema} - db4 := types.MigrationDB{MigrationDefinition: m4, Schema: "ref", Created: time.Now()} + m4 := types.Migration{Name: "003.sql", SourceDir: "ref", File: "ref/003.sql", MigrationType: types.MigrationTypeSingleSchema} + db4 := types.MigrationDB{Migration: m4, Schema: "ref", Created: time.Now()} dbs := []types.MigrationDB{db2, db3, db4} migrations := flattenMigrationDBs(dbs) - assert.Equal(t, []types.MigrationDefinition{m2, m4}, migrations) + assert.Equal(t, []types.Migration{m2, m4}, migrations) } func TestComputeMigrationsToApply(t *testing.T) { - mdef1 := types.MigrationDefinition{Name: "a", SourceDir: "a", File: "a", MigrationType: types.MigrationTypeSingleSchema} - mdef2 := types.MigrationDefinition{Name: "b", SourceDir: "b", File: "b", MigrationType: types.MigrationTypeTenantSchema} - mdef3 := types.MigrationDefinition{Name: "c", SourceDir: "c", File: "c", MigrationType: types.MigrationTypeTenantSchema} - mdef4 := types.MigrationDefinition{Name: "d", SourceDir: "d", File: "d", MigrationType: types.MigrationTypeSingleSchema} + mdef1 := types.Migration{Name: "a", SourceDir: "a", File: "a", MigrationType: types.MigrationTypeSingleSchema} + mdef2 := types.Migration{Name: "b", SourceDir: "b", File: "b", MigrationType: types.MigrationTypeTenantSchema} + mdef3 := types.Migration{Name: "c", SourceDir: "c", File: "c", MigrationType: types.MigrationTypeTenantSchema} + mdef4 := types.Migration{Name: "d", SourceDir: "d", File: "d", MigrationType: types.MigrationTypeSingleSchema} - diskMigrations := []types.Migration{{MigrationDefinition: mdef1, Contents: ""}, {MigrationDefinition: mdef2, Contents: ""}, {MigrationDefinition: mdef3, Contents: ""}, {MigrationDefinition: mdef4, Contents: ""}} - dbMigrations := []types.MigrationDB{{MigrationDefinition: mdef1, Schema: "a", Created: time.Now()}, {MigrationDefinition: mdef2, Schema: "abc", Created: time.Now()}, {MigrationDefinition: mdef2, Schema: "def", Created: time.Now()}} + diskMigrations := []types.Migration{mdef1, mdef2, mdef3, mdef4} + dbMigrations := []types.MigrationDB{{Migration: mdef1, Schema: "a", Created: time.Now()}, {Migration: mdef2, Schema: "abc", Created: time.Now()}, {Migration: mdef2, Schema: "def", Created: time.Now()}} migrations := ComputeMigrationsToApply(diskMigrations, dbMigrations) assert.Len(t, migrations, 2) @@ -62,18 +63,18 @@ func TestComputeMigrationsToApply(t *testing.T) { } func TestFilterTenantMigrations(t *testing.T) { - mdef1 := types.MigrationDefinition{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeTenantSchema} - mdef2 := types.MigrationDefinition{Name: "20181111", SourceDir: "public", File: "public/20181111", MigrationType: types.MigrationTypeSingleSchema} - mdef3 := types.MigrationDefinition{Name: "20181112", SourceDir: "public", File: "public/20181112", MigrationType: types.MigrationTypeSingleSchema} + mdef1 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeTenantSchema} + mdef2 := types.Migration{Name: "20181111", SourceDir: "public", File: "public/20181111", MigrationType: types.MigrationTypeSingleSchema} + mdef3 := types.Migration{Name: "20181112", SourceDir: "public", File: "public/20181112", MigrationType: types.MigrationTypeSingleSchema} - dev1 := types.MigrationDefinition{Name: "20181119", SourceDir: "tenants", File: "tenants/20181119", MigrationType: types.MigrationTypeTenantSchema} - dev1p1 := types.MigrationDefinition{Name: "201811190", SourceDir: "public", File: "public/201811190", MigrationType: types.MigrationTypeSingleSchema} - dev1p2 := types.MigrationDefinition{Name: "20181191", SourceDir: "public", File: "public/201811191", MigrationType: types.MigrationTypeSingleSchema} + dev1 := types.Migration{Name: "20181119", SourceDir: "tenants", File: "tenants/20181119", MigrationType: types.MigrationTypeTenantSchema} + dev1p1 := types.Migration{Name: "201811190", SourceDir: "public", File: "public/201811190", MigrationType: types.MigrationTypeSingleSchema} + dev1p2 := types.Migration{Name: "20181191", SourceDir: "public", File: "public/201811191", MigrationType: types.MigrationTypeSingleSchema} - dev2 := types.MigrationDefinition{Name: "20181120", SourceDir: "tenants", File: "tenants/20181120", MigrationType: types.MigrationTypeTenantSchema} - dev2p := types.MigrationDefinition{Name: "20181120", SourceDir: "public", File: "public/20181120", MigrationType: types.MigrationTypeSingleSchema} + dev2 := types.Migration{Name: "20181120", SourceDir: "tenants", File: "tenants/20181120", MigrationType: types.MigrationTypeTenantSchema} + dev2p := types.Migration{Name: "20181120", SourceDir: "public", File: "public/20181120", MigrationType: types.MigrationTypeSingleSchema} - diskMigrations := []types.Migration{{MigrationDefinition: mdef1, Contents: ""}, {MigrationDefinition: mdef2, Contents: ""}, {MigrationDefinition: mdef3, Contents: ""}, {MigrationDefinition: dev1, Contents: ""}, {MigrationDefinition: dev1p1, Contents: ""}, {MigrationDefinition: dev1p2, Contents: ""}, {MigrationDefinition: dev2, Contents: ""}, {MigrationDefinition: dev2p, Contents: ""}} + diskMigrations := []types.Migration{mdef1, mdef2, mdef3, dev1, dev1p1, dev1p2, dev2, dev2p} migrations := FilterTenantMigrations(diskMigrations) assert.Len(t, migrations, 3) @@ -96,19 +97,19 @@ func TestComputeMigrationsToApplyDifferentTimestamps(t *testing.T) { // migrator should detect dev1 migrations // previous implementation relied only on counts and such migration was not applied - mdef1 := types.MigrationDefinition{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeTenantSchema} - mdef2 := types.MigrationDefinition{Name: "20181111", SourceDir: "public", File: "public/20181111", MigrationType: types.MigrationTypeSingleSchema} - mdef3 := types.MigrationDefinition{Name: "20181112", SourceDir: "public", File: "public/20181112", MigrationType: types.MigrationTypeSingleSchema} + mdef1 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeTenantSchema} + mdef2 := types.Migration{Name: "20181111", SourceDir: "public", File: "public/20181111", MigrationType: types.MigrationTypeSingleSchema} + mdef3 := types.Migration{Name: "20181112", SourceDir: "public", File: "public/20181112", MigrationType: types.MigrationTypeSingleSchema} - dev1 := types.MigrationDefinition{Name: "20181119", SourceDir: "tenants", File: "tenants/20181119", MigrationType: types.MigrationTypeTenantSchema} - dev1p1 := types.MigrationDefinition{Name: "201811190", SourceDir: "public", File: "public/201811190", MigrationType: types.MigrationTypeSingleSchema} - dev1p2 := types.MigrationDefinition{Name: "20181191", SourceDir: "public", File: "public/201811191", MigrationType: types.MigrationTypeSingleSchema} + dev1 := types.Migration{Name: "20181119", SourceDir: "tenants", File: "tenants/20181119", MigrationType: types.MigrationTypeTenantSchema} + dev1p1 := types.Migration{Name: "201811190", SourceDir: "public", File: "public/201811190", MigrationType: types.MigrationTypeSingleSchema} + dev1p2 := types.Migration{Name: "20181191", SourceDir: "public", File: "public/201811191", MigrationType: types.MigrationTypeSingleSchema} - dev2 := types.MigrationDefinition{Name: "20181120", SourceDir: "tenants", File: "tenants/20181120", MigrationType: types.MigrationTypeTenantSchema} - dev2p := types.MigrationDefinition{Name: "20181120", SourceDir: "public", File: "public/20181120", MigrationType: types.MigrationTypeSingleSchema} + dev2 := types.Migration{Name: "20181120", SourceDir: "tenants", File: "tenants/20181120", MigrationType: types.MigrationTypeTenantSchema} + dev2p := types.Migration{Name: "20181120", SourceDir: "public", File: "public/20181120", MigrationType: types.MigrationTypeSingleSchema} - diskMigrations := []types.Migration{{MigrationDefinition: mdef1, Contents: ""}, {MigrationDefinition: mdef2, Contents: ""}, {MigrationDefinition: mdef3, Contents: ""}, {MigrationDefinition: dev1, Contents: ""}, {MigrationDefinition: dev1p1, Contents: ""}, {MigrationDefinition: dev1p2, Contents: ""}, {MigrationDefinition: dev2, Contents: ""}, {MigrationDefinition: dev2p, Contents: ""}} - dbMigrations := []types.MigrationDB{{MigrationDefinition: mdef1, Schema: "abc", Created: time.Now()}, {MigrationDefinition: mdef1, Schema: "def", Created: time.Now()}, {MigrationDefinition: mdef2, Schema: "public", Created: time.Now()}, {MigrationDefinition: mdef3, Schema: "public", Created: time.Now()}, {MigrationDefinition: dev2, Schema: "abc", Created: time.Now()}, {MigrationDefinition: dev2, Schema: "def", Created: time.Now()}, {MigrationDefinition: dev2p, Schema: "public", Created: time.Now()}} + diskMigrations := []types.Migration{mdef1, mdef2, mdef3, dev1, dev1p1, dev1p2, dev2, dev2p} + dbMigrations := []types.MigrationDB{{Migration: mdef1, Schema: "abc", Created: time.Now()}, {Migration: mdef1, Schema: "def", Created: time.Now()}, {Migration: mdef2, Schema: "public", Created: time.Now()}, {Migration: mdef3, Schema: "public", Created: time.Now()}, {Migration: dev2, Schema: "abc", Created: time.Now()}, {Migration: dev2, Schema: "def", Created: time.Now()}, {Migration: dev2p, Schema: "public", Created: time.Now()}} migrations := ComputeMigrationsToApply(diskMigrations, dbMigrations) assert.Len(t, migrations, 3) @@ -117,3 +118,42 @@ func TestComputeMigrationsToApplyDifferentTimestamps(t *testing.T) { assert.Equal(t, dev1p1.File, migrations[1].File) assert.Equal(t, dev1p2.File, migrations[2].File) } + +func TestIntersect(t *testing.T) { + mdef1 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeTenantSchema} + mdef2 := types.Migration{Name: "20181111", SourceDir: "public", File: "public/20181111", MigrationType: types.MigrationTypeSingleSchema} + mdef3 := types.Migration{Name: "20181112", SourceDir: "public", File: "public/20181112", MigrationType: types.MigrationTypeSingleSchema} + + dev1 := types.Migration{Name: "20181119", SourceDir: "tenants", File: "tenants/20181119", MigrationType: types.MigrationTypeTenantSchema} + dev1p1 := types.Migration{Name: "201811190", SourceDir: "public", File: "public/201811190", MigrationType: types.MigrationTypeSingleSchema} + dev1p2 := types.Migration{Name: "20181191", SourceDir: "public", File: "public/201811191", MigrationType: types.MigrationTypeSingleSchema} + + dev2 := types.Migration{Name: "20181120", SourceDir: "tenants", File: "tenants/20181120", MigrationType: types.MigrationTypeTenantSchema} + dev2p := types.Migration{Name: "20181120", SourceDir: "public", File: "public/20181120", MigrationType: types.MigrationTypeSingleSchema} + + diskMigrations := []types.Migration{mdef1, mdef2, mdef3, dev1, dev1p1, dev1p2, dev2, dev2p} + dbMigrations := []types.Migration{mdef1, mdef2, mdef3, dev2, dev2p} + + intersect := intersect(diskMigrations, dbMigrations) + assert.Len(t, intersect, 5) + for i := range intersect { + assert.Equal(t, intersect[i].disk, intersect[i].db) + assert.Equal(t, intersect[i].disk, dbMigrations[i]) + } +} + +func TestVerifyCheckSumsOK(t *testing.T) { + mdef1 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeTenantSchema, CheckSum: "abc"} + mdef2 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeSingleSchema, CheckSum: "abc"} + verified, offendingMigrations := VerifyCheckSums([]types.Migration{mdef1}, []types.MigrationDB{{Migration: mdef2}}) + assert.True(t, verified) + assert.Empty(t, offendingMigrations) +} + +func TestVerifyCheckSumsKO(t *testing.T) { + mdef1 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeTenantSchema, CheckSum: "abc"} + mdef2 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeSingleSchema, CheckSum: "abcd"} + verified, offendingMigrations := VerifyCheckSums([]types.Migration{mdef1}, []types.MigrationDB{{Migration: mdef2}}) + assert.False(t, verified) + assert.Equal(t, mdef1, offendingMigrations[0]) +} diff --git a/server/server.go b/server/server.go index eae0a8e..d38df1a 100644 --- a/server/server.go +++ b/server/server.go @@ -3,14 +3,16 @@ package server import ( "encoding/json" "fmt" - "github.com/lukaszbudnik/migrator/config" - "github.com/lukaszbudnik/migrator/core" - "github.com/lukaszbudnik/migrator/db" - "github.com/lukaszbudnik/migrator/loader" "io/ioutil" "log" "net/http" "strings" + + "github.com/lukaszbudnik/migrator/config" + "github.com/lukaszbudnik/migrator/core" + "github.com/lukaszbudnik/migrator/db" + "github.com/lukaszbudnik/migrator/loader" + "github.com/lukaszbudnik/migrator/types" ) const ( @@ -28,6 +30,25 @@ func getPort(config *config.Config) string { return config.Port } +func errorResponse(w http.ResponseWriter, errorStatus int, response interface{}) { + w.WriteHeader(errorStatus) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) +} + +func errorResponseStatusErrorMessage(w http.ResponseWriter, errorStatus int, errorMessage string) { + errorResponse(w, errorStatus, struct{ ErrorMessage string }{errorMessage}) +} + +func errorMethodNotAllowedResponse(w http.ResponseWriter) { + errorResponseStatusErrorMessage(w, http.StatusMethodNotAllowed, "405 method not allowed") +} + +func jsonResponse(w http.ResponseWriter, response interface{}) { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) +} + func defaultHandler(w http.ResponseWriter, r *http.Request) { http.NotFound(w, r) } @@ -40,7 +61,7 @@ func makeHandler(handler func(w http.ResponseWriter, r *http.Request, config *co func configHandler(w http.ResponseWriter, r *http.Request, config *config.Config, createConnector func(*config.Config) db.Connector, createLoader func(*config.Config) loader.Loader) { if r.Method != http.MethodGet { - http.Error(w, "405 method not allowed", http.StatusMethodNotAllowed) + errorMethodNotAllowedResponse(w) return } w.Header().Set("Content-Type", "application/x-yaml") @@ -49,12 +70,11 @@ func configHandler(w http.ResponseWriter, r *http.Request, config *config.Config func diskMigrationsHandler(w http.ResponseWriter, r *http.Request, config *config.Config, createConnector func(*config.Config) db.Connector, createLoader func(*config.Config) loader.Loader) { if r.Method != http.MethodGet { - http.Error(w, "405 method not allowed", http.StatusMethodNotAllowed) + errorMethodNotAllowedResponse(w) return } diskMigrations := core.GetDiskMigrations(config, createLoader) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(diskMigrations) + jsonResponse(w, diskMigrations) } func migrationsHandler(w http.ResponseWriter, r *http.Request, config *config.Config, createConnector func(*config.Config) db.Connector, createLoader func(*config.Config) loader.Loader) { @@ -62,14 +82,21 @@ func migrationsHandler(w http.ResponseWriter, r *http.Request, config *config.Co switch r.Method { case http.MethodGet: dbMigrations := core.GetDBMigrations(config, createConnector) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(dbMigrations) + jsonResponse(w, dbMigrations) case http.MethodPost: - migrationsApplied := core.ApplyMigrations(config, createConnector, createLoader) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(migrationsApplied) + verified, offendingMigrations := core.VerifyMigrations(config, createConnector, createLoader) + if !verified { + log.Printf("Checksum verification failed.") + errorResponse(w, http.StatusFailedDependency, struct { + ErrorMessage string + OffendingMigrations []types.Migration + }{"Checksum verification failed. Please review offending migrations.", offendingMigrations}) + } else { + migrationsApplied := core.ApplyMigrations(config, createConnector, createLoader) + jsonResponse(w, migrationsApplied) + } default: - http.Error(w, "405 method not allowed", http.StatusMethodNotAllowed) + errorMethodNotAllowedResponse(w) } } @@ -79,25 +106,32 @@ func tenantsHandler(w http.ResponseWriter, r *http.Request, config *config.Confi switch r.Method { case http.MethodGet: tenants := core.GetDBTenants(config, createConnector) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(tenants) + jsonResponse(w, tenants) case http.MethodPost: body, err := ioutil.ReadAll(r.Body) if err != nil { - http.Error(w, "500 internal server error", http.StatusInternalServerError) + errorResponseStatusErrorMessage(w, http.StatusInternalServerError, "500 internal server error") return } var param tenantParam err = json.Unmarshal(body, ¶m) if err != nil || param.Name == "" { - http.Error(w, "400 bad request", http.StatusBadRequest) + errorResponseStatusErrorMessage(w, http.StatusBadRequest, "400 bad request") return } - migrationsApplied := core.AddTenant(param.Name, config, createConnector, createLoader) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(migrationsApplied) + verified, offendingMigrations := core.VerifyMigrations(config, createConnector, createLoader) + if !verified { + log.Printf("Checksum verification failed.") + errorResponse(w, http.StatusFailedDependency, struct { + ErrorMessage string + OffendingMigrations []types.Migration + }{"Checksum verification failed. Please review offending migrations.", offendingMigrations}) + } else { + migrationsApplied := core.AddTenant(param.Name, config, createConnector, createLoader) + jsonResponse(w, migrationsApplied) + } default: - http.Error(w, "405 method not allowed", http.StatusMethodNotAllowed) + errorMethodNotAllowedResponse(w) } } diff --git a/server/server_mocks.go b/server/server_mocks.go index f6e17ca..fee5d6e 100644 --- a/server/server_mocks.go +++ b/server/server_mocks.go @@ -1,20 +1,33 @@ package server import ( + "time" + "github.com/lukaszbudnik/migrator/config" "github.com/lukaszbudnik/migrator/db" "github.com/lukaszbudnik/migrator/loader" "github.com/lukaszbudnik/migrator/types" - "time" ) type mockedDiskLoader struct { } func (m *mockedDiskLoader) GetDiskMigrations() []types.Migration { - m1 := types.MigrationDefinition{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema} - m2 := types.MigrationDefinition{Name: "201602220001.sql", SourceDir: "source", File: "source/201602220001.sql", MigrationType: types.MigrationTypeSingleSchema} - return []types.Migration{{MigrationDefinition: m1, Contents: "select abc"}, {MigrationDefinition: m2, Contents: "select def"}} + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema, Contents: "select abc"} + m2 := types.Migration{Name: "201602220001.sql", SourceDir: "source", File: "source/201602220001.sql", MigrationType: types.MigrationTypeSingleSchema, Contents: "select def"} + return []types.Migration{m1, m2} +} + +type mockedBrokenCheckSumDiskLoader struct { +} + +func (m *mockedBrokenCheckSumDiskLoader) GetDiskMigrations() []types.Migration { + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema, Contents: "select abc", CheckSum: "xxx"} + return []types.Migration{m1} +} + +func createBrokenCheckSumMockedDiskLoader(config *config.Config) loader.Loader { + return new(mockedBrokenCheckSumDiskLoader) } func createMockedDiskLoader(config *config.Config) loader.Loader { @@ -54,9 +67,9 @@ func (m *mockedConnector) GetTenants() []string { } func (m *mockedConnector) GetDBMigrations() []types.MigrationDB { - m1 := types.MigrationDefinition{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema} + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema} d1 := time.Date(2016, 02, 22, 16, 41, 1, 123, time.UTC) - ms := []types.MigrationDB{{MigrationDefinition: m1, Schema: "source", Created: d1}} + ms := []types.MigrationDB{{Migration: m1, Schema: "source", Created: d1}} return ms } diff --git a/server/server_test.go b/server/server_test.go index 661dbd6..aea6cdf 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -1,19 +1,14 @@ -// These are integration tests. -// The following tests must be working in order to get this one working: -// * config_test.go -// * migrations_test.go -// DB & Disk operations are mocked using xcli_mocks.go - package server import ( "bytes" - "github.com/lukaszbudnik/migrator/config" - "github.com/stretchr/testify/assert" "net/http" "net/http/httptest" "strings" "testing" + + "github.com/lukaszbudnik/migrator/config" + "github.com/stretchr/testify/assert" ) var ( @@ -91,7 +86,7 @@ func TestServerTenantsPost(t *testing.T) { assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) - assert.Equal(t, `[{"Name":"201602220000.sql","SourceDir":"source","File":"source/201602220000.sql","MigrationType":1,"Contents":"select abc"},{"Name":"201602220001.sql","SourceDir":"source","File":"source/201602220001.sql","MigrationType":1,"Contents":"select def"}]`, strings.TrimSpace(w.Body.String())) + assert.Equal(t, `[{"Name":"201602220000.sql","SourceDir":"source","File":"source/201602220000.sql","MigrationType":1,"Contents":"select abc","CheckSum":""},{"Name":"201602220001.sql","SourceDir":"source","File":"source/201602220001.sql","MigrationType":1,"Contents":"select def","CheckSum":""}]`, strings.TrimSpace(w.Body.String())) } func TestServerTenantsPostBadRequest(t *testing.T) { @@ -109,6 +104,22 @@ func TestServerTenantsPostBadRequest(t *testing.T) { assert.Equal(t, http.StatusBadRequest, w.Code) } +func TestServerTenantsPostFailedDependency(t *testing.T) { + config, err := config.FromFile(configFile) + assert.Nil(t, err) + + json := []byte(`{"name": "new_tenant"}`) + req, _ := http.NewRequest(http.MethodPost, "http://example.com/tenants", bytes.NewBuffer(json)) + + w := httptest.NewRecorder() + handler := makeHandler(tenantsHandler, config, createMockedConnector, createBrokenCheckSumMockedDiskLoader) + handler(w, req) + + assert.Equal(t, http.StatusFailedDependency, w.Code) + assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) + assert.Equal(t, `{"ErrorMessage":"Checksum verification failed. Please review offending migrations.","OffendingMigrations":[{"Name":"201602220000.sql","SourceDir":"source","File":"source/201602220000.sql","MigrationType":1,"Contents":"select abc","CheckSum":"xxx"}]}`, strings.TrimSpace(w.Body.String())) +} + func TestServerDiskMigrationsGet(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) @@ -121,7 +132,7 @@ func TestServerDiskMigrationsGet(t *testing.T) { assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) - assert.Equal(t, `[{"Name":"201602220000.sql","SourceDir":"source","File":"source/201602220000.sql","MigrationType":1,"Contents":"select abc"},{"Name":"201602220001.sql","SourceDir":"source","File":"source/201602220001.sql","MigrationType":1,"Contents":"select def"}]`, strings.TrimSpace(w.Body.String())) + assert.Equal(t, `[{"Name":"201602220000.sql","SourceDir":"source","File":"source/201602220000.sql","MigrationType":1,"Contents":"select abc","CheckSum":""},{"Name":"201602220001.sql","SourceDir":"source","File":"source/201602220001.sql","MigrationType":1,"Contents":"select def","CheckSum":""}]`, strings.TrimSpace(w.Body.String())) } func TestServerMigrationsGet(t *testing.T) { @@ -136,7 +147,7 @@ func TestServerMigrationsGet(t *testing.T) { assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) - assert.Equal(t, `[{"Name":"201602220000.sql","SourceDir":"source","File":"source/201602220000.sql","MigrationType":1,"Schema":"source","Created":"2016-02-22T16:41:01.000000123Z"}]`, strings.TrimSpace(w.Body.String())) + assert.Equal(t, `[{"Name":"201602220000.sql","SourceDir":"source","File":"source/201602220000.sql","MigrationType":1,"Contents":"","CheckSum":"","Schema":"source","Created":"2016-02-22T16:41:01.000000123Z"}]`, strings.TrimSpace(w.Body.String())) } func TestServerMigrationsPost(t *testing.T) { @@ -151,7 +162,22 @@ func TestServerMigrationsPost(t *testing.T) { assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) - assert.Equal(t, `[{"Name":"201602220001.sql","SourceDir":"source","File":"source/201602220001.sql","MigrationType":1,"Contents":"select def"}]`, strings.TrimSpace(w.Body.String())) + assert.Equal(t, `[{"Name":"201602220001.sql","SourceDir":"source","File":"source/201602220001.sql","MigrationType":1,"Contents":"select def","CheckSum":""}]`, strings.TrimSpace(w.Body.String())) +} + +func TestServerMigrationsPostFailedDependency(t *testing.T) { + config, err := config.FromFile(configFile) + assert.Nil(t, err) + + req, _ := http.NewRequest(http.MethodPost, "http://example.com/migrations", nil) + + w := httptest.NewRecorder() + handler := makeHandler(migrationsHandler, config, createMockedConnector, createBrokenCheckSumMockedDiskLoader) + handler(w, req) + + assert.Equal(t, http.StatusFailedDependency, w.Code) + assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) + assert.Equal(t, `{"ErrorMessage":"Checksum verification failed. Please review offending migrations.","OffendingMigrations":[{"Name":"201602220000.sql","SourceDir":"source","File":"source/201602220000.sql","MigrationType":1,"Contents":"select abc","CheckSum":"xxx"}]}`, strings.TrimSpace(w.Body.String())) } func TestServerMigrationsMethodNotAllowed(t *testing.T) { diff --git a/test/docker/scripts/mysql-create-and-setup-container.sh b/test/docker/scripts/mysql-create-and-setup-container.sh index 0048f6c..f64d45d 100644 --- a/test/docker/scripts/mysql-create-and-setup-container.sh +++ b/test/docker/scripts/mysql-create-and-setup-container.sh @@ -14,7 +14,7 @@ function mysql_start() { -P \ "$flavour" - sleep 15 + sleep 20 running=$(docker inspect -f {{.State.Running}} "migrator-$name") diff --git a/test/migrations/config/201602160001.sql b/test/migrations/config/201602160001.sql new file mode 100644 index 0000000..1bd66fc --- /dev/null +++ b/test/migrations/config/201602160001.sql @@ -0,0 +1 @@ +create schema config; diff --git a/test/migrations/public/201602160001.sql b/test/migrations/config/201602160002.sql similarity index 69% rename from test/migrations/public/201602160001.sql rename to test/migrations/config/201602160002.sql index 8176f8c..e43d3fc 100644 --- a/test/migrations/public/201602160001.sql +++ b/test/migrations/config/201602160002.sql @@ -1,4 +1,4 @@ -create table config ( +create table {schema}.config ( id integer, k varchar(100), v varchar(100), diff --git a/test/migrations/public/201602160004.sql b/test/migrations/public/201602160004.sql deleted file mode 100644 index ba1f1de..0000000 --- a/test/migrations/public/201602160004.sql +++ /dev/null @@ -1 +0,0 @@ -create schema ref; diff --git a/test/migrations/ref/201602160003.sql b/test/migrations/ref/201602160003.sql new file mode 100644 index 0000000..7ce8b4e --- /dev/null +++ b/test/migrations/ref/201602160003.sql @@ -0,0 +1 @@ +create schema {schema}; diff --git a/test/migrations/ref/201602160004.sql b/test/migrations/ref/201602160004.sql index 9a71ea1..75c92ef 100644 --- a/test/migrations/ref/201602160004.sql +++ b/test/migrations/ref/201602160004.sql @@ -1,2 +1 @@ -create table {schema}.roles (id integer, name varchar(100)); -alter table {schema}.roles add constraint pk_id primary key (id); +create table {schema}.roles (id integer primary key, name varchar(100)); diff --git a/test/migrations/tenants/201602160002.sql b/test/migrations/tenants/201602160002.sql index 20ac502..8fe0096 100644 --- a/test/migrations/tenants/201602160002.sql +++ b/test/migrations/tenants/201602160002.sql @@ -1,3 +1 @@ -create table {schema}.module (id integer, id_config integer); - -alter table {schema}.module add foreign key (id_config) references config(id); +create table {schema}.module (id integer, id_config integer, foreign key (id_config) references config.config(id)); diff --git a/test/migrations/tenants/201602160004.sql b/test/migrations/tenants/201602160004.sql index 69b806f..7af97b1 100644 --- a/test/migrations/tenants/201602160004.sql +++ b/test/migrations/tenants/201602160004.sql @@ -1,2 +1 @@ -alter table {schema}.users add column id_role integer; -alter table {schema}.users add foreign key (id_role) references ref.roles(id); +alter table {schema}.users add id_role integer; diff --git a/test/migrations/tenants/201602160005.sql b/test/migrations/tenants/201602160005.sql new file mode 100644 index 0000000..583c90d --- /dev/null +++ b/test/migrations/tenants/201602160005.sql @@ -0,0 +1 @@ +alter table {schema}.users add foreign key (id_role) references ref.roles(id); diff --git a/types/types.go b/types/types.go index 8706224..960047e 100644 --- a/types/types.go +++ b/types/types.go @@ -14,23 +14,19 @@ const ( MigrationTypeTenantSchema MigrationType = 2 ) -// MigrationDefinition contains basic information about migration -type MigrationDefinition struct { +// Migration contains basic information about migration +type Migration struct { Name string SourceDir string File string MigrationType MigrationType + Contents string + CheckSum string } -// Migration embeds MigrationDefinition and contains its contents -type Migration struct { - MigrationDefinition - Contents string -} - -// MigrationDB embeds MigrationDefinition and contain other DB properties +// MigrationDB embeds Migration and adds DB-specific fields type MigrationDB struct { - MigrationDefinition + Migration Schema string Created time.Time } diff --git a/utils/utils.go b/utils/utils.go index c57dbdd..abf3f68 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -3,8 +3,10 @@ package utils import ( "bytes" "fmt" + "io" + "text/tabwriter" + "github.com/lukaszbudnik/migrator/types" - "strings" ) // Contains returns true when element is present in slice @@ -17,91 +19,55 @@ func Contains(slice []string, element *string) bool { return false } -// MigrationToString creates a string representation of Migration -func MigrationToString(m *types.Migration) string { - return fmt.Sprintf("| %-10s | %-20s | %-30s | %4d |", m.SourceDir, m.Name, m.File, m.MigrationType) -} - -// MigrationDBToString creates a string representation of MigrationDB -func MigrationDBToString(m *types.MigrationDB) string { - created := fmt.Sprintf("%v", m.Created) - index := strings.Index(created, ".") - created = created[:index] - return fmt.Sprintf("| %-10s | %-20s | %-30s | %-10s | %-20s | %4d |", m.SourceDir, m.Name, m.File, m.Schema, created, m.MigrationType) -} - // MigrationArrayToString creates a string representation of Migration array func MigrationArrayToString(migrations []types.Migration) string { - var buffer bytes.Buffer - - buffer.WriteString("+") - buffer.WriteString(strings.Repeat("-", 75)) - buffer.WriteString("+\n") - - buffer.WriteString(fmt.Sprintf("| %-10s | %-20s | %-30s | %4s |\n", "SourceDir", "Name", "File", "Type")) + buffer := new(bytes.Buffer) + w := tabwriter.NewWriter(buffer, 0, 0, 1, ' ', tabwriter.Debug) - buffer.WriteString("+") - buffer.WriteString(strings.Repeat("-", 75)) - buffer.WriteString("+\n") + fmt.Fprintf(w, "%v \t %v \t %v \t %v \t %v", "SourceDir", "Name", "File", "Type", "CheckSum") for _, m := range migrations { - buffer.WriteString(fmt.Sprintf("%v\n", MigrationToString(&m))) + formatMigration(w, &m) } - buffer.WriteString("+") - buffer.WriteString(strings.Repeat("-", 75)) - buffer.WriteString("+") - + w.Flush() return buffer.String() } +func formatMigration(w io.Writer, m *types.Migration) { + fmt.Fprintf(w, "\n%v \t %v \t %v \t %v \t %v", m.SourceDir, m.Name, m.File, m.MigrationType, m.CheckSum) +} + // MigrationDBArrayToString creates a string representation of MigrationDB array func MigrationDBArrayToString(migrations []types.MigrationDB) string { - var buffer bytes.Buffer - - buffer.WriteString("+") - buffer.WriteString(strings.Repeat("-", 111)) - buffer.WriteString("+\n") + buffer := new(bytes.Buffer) + w := tabwriter.NewWriter(buffer, 0, 0, 1, ' ', tabwriter.Debug) - buffer.WriteString(fmt.Sprintf("| %-10s | %-20s | %-30s | %-10s | %-20s | %4s |\n", "SourceDir", "Name", "File", "Schema", "Created", "Type")) - - buffer.WriteString("+") - buffer.WriteString(strings.Repeat("-", 111)) - buffer.WriteString("+\n") + fmt.Fprintf(w, "%v \t %v \t %v \t %v \t %v \t %v \t %v", "SourceDir", "Name", "File", "Schema", "Created", "Type", "CheckSum") for _, m := range migrations { - buffer.WriteString(fmt.Sprintf("%v\n", MigrationDBToString(&m))) + formatMigrationDB(w, &m) } - buffer.WriteString("+") - buffer.WriteString(strings.Repeat("-", 111)) - buffer.WriteString("+") - + w.Flush() return buffer.String() } +func formatMigrationDB(w io.Writer, m *types.MigrationDB) { + fmt.Fprintf(w, "\n%v \t %v \t %v \t %v \t %v \t %v \t %v", m.SourceDir, m.Name, m.File, m.Schema, m.Created, m.MigrationType, m.CheckSum) +} + // TenantArrayToString creates a string representation of Tenant array func TenantArrayToString(dbTenants []string) string { var buffer bytes.Buffer - buffer.WriteString("+") - buffer.WriteString(strings.Repeat("-", 30)) - buffer.WriteString("+\n") - - buffer.WriteString(fmt.Sprintf("| %-28s |\n", "Name")) - - buffer.WriteString("+") - buffer.WriteString(strings.Repeat("-", 30)) - buffer.WriteString("+\n") + buffer.WriteString("Name") for _, t := range dbTenants { - buffer.WriteString(fmt.Sprintf("| %-28s |\n", t)) + buffer.WriteString("\n") + buffer.WriteString(t) } - buffer.WriteString("+") - buffer.WriteString(strings.Repeat("-", 30)) - buffer.WriteString("+") - return buffer.String() } diff --git a/utils/utils_test.go b/utils/utils_test.go index 75f3c03..4303ef7 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -2,10 +2,11 @@ package utils import ( "fmt" - "github.com/lukaszbudnik/migrator/types" - "github.com/stretchr/testify/assert" "testing" "time" + + "github.com/lukaszbudnik/migrator/types" + "github.com/stretchr/testify/assert" ) func TestContainsFound(t *testing.T) { @@ -31,13 +32,10 @@ func TestContainsNotFound(t *testing.T) { func TestTenantArrayToString(t *testing.T) { dbTenants := []string{"abcabc", "dedededededededededede", "opopopop"} - expected := `+------------------------------+ -| Name | -+------------------------------+ -| abcabc | -| dedededededededededede | -| opopopop | -+------------------------------+` + expected := `Name +abcabc +dedededededededededede +opopopop` actual := TenantArrayToString(dbTenants) @@ -46,18 +44,15 @@ func TestTenantArrayToString(t *testing.T) { func TestMigrationArrayToString(t *testing.T) { - m1 := types.MigrationDefinition{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema} - m2 := types.MigrationDefinition{Name: "201602220001.sql", SourceDir: "tenants", File: "tenants/201602220001.sql", MigrationType: types.MigrationTypeTenantSchema} - m3 := types.MigrationDefinition{Name: "201602220002.sql", SourceDir: "tenants", File: "tenants/201602220002.sql", MigrationType: types.MigrationTypeTenantSchema} - var ms = []types.Migration{{MigrationDefinition: m1, Contents: ""}, {MigrationDefinition: m2, Contents: ""}, {MigrationDefinition: m3, Contents: ""}} - - expected := `+---------------------------------------------------------------------------+ -| SourceDir | Name | File | Type | -+---------------------------------------------------------------------------+ -| source | 201602220000.sql | source/201602220000.sql | 1 | -| tenants | 201602220001.sql | tenants/201602220001.sql | 2 | -| tenants | 201602220002.sql | tenants/201602220002.sql | 2 | -+---------------------------------------------------------------------------+` + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema, CheckSum: "abc...123..."} + m2 := types.Migration{Name: "201602220001.sql", SourceDir: "tenants", File: "tenants/201602220001.sql", MigrationType: types.MigrationTypeTenantSchema, CheckSum: "abc...123..."} + m3 := types.Migration{Name: "201602220002.sql", SourceDir: "tenants", File: "tenants/201602220002.sql", MigrationType: types.MigrationTypeTenantSchema, CheckSum: "abc...123..."} + var ms = []types.Migration{m1, m2, m3} + + expected := `SourceDir | Name | File | Type | CheckSum +source | 201602220000.sql | source/201602220000.sql | 1 | abc...123... +tenants | 201602220001.sql | tenants/201602220001.sql | 2 | abc...123... +tenants | 201602220002.sql | tenants/201602220002.sql | 2 | abc...123...` actual := MigrationArrayToString(ms) assert.Equal(t, expected, actual) @@ -67,29 +62,23 @@ func TestMigrationArrayToStringEmpty(t *testing.T) { var ms = []types.Migration{} - expected := `+---------------------------------------------------------------------------+ -| SourceDir | Name | File | Type | -+---------------------------------------------------------------------------+ -+---------------------------------------------------------------------------+` + expected := `SourceDir | Name | File | Type | CheckSum` actual := MigrationArrayToString(ms) assert.Equal(t, expected, actual) } func TestMigrationDBArrayToString(t *testing.T) { - m1 := types.MigrationDefinition{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema} - m2 := types.MigrationDefinition{Name: "201602220001.sql", SourceDir: "tenants", File: "tenants/201602220001.sql", MigrationType: types.MigrationTypeTenantSchema} + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleSchema, CheckSum: "abc"} + m2 := types.Migration{Name: "201602220001.sql", SourceDir: "tenants", File: "tenants/201602220001.sql", MigrationType: types.MigrationTypeTenantSchema, CheckSum: "def"} d1 := time.Date(2016, 02, 22, 16, 41, 1, 123, time.UTC) d2 := time.Date(2016, 02, 22, 16, 41, 2, 456, time.UTC) - var ms = []types.MigrationDB{{MigrationDefinition: m1, Schema: "source", Created: d1}, {MigrationDefinition: m2, Schema: "abc", Created: d2}, {MigrationDefinition: m2, Schema: "def", Created: d2}} - - expected := `+---------------------------------------------------------------------------------------------------------------+ -| SourceDir | Name | File | Schema | Created | Type | -+---------------------------------------------------------------------------------------------------------------+ -| source | 201602220000.sql | source/201602220000.sql | source | 2016-02-22 16:41:01 | 1 | -| tenants | 201602220001.sql | tenants/201602220001.sql | abc | 2016-02-22 16:41:02 | 2 | -| tenants | 201602220001.sql | tenants/201602220001.sql | def | 2016-02-22 16:41:02 | 2 | -+---------------------------------------------------------------------------------------------------------------+` + var ms = []types.MigrationDB{{Migration: m1, Schema: "source", Created: d1}, {Migration: m2, Schema: "abc", Created: d2}, {Migration: m2, Schema: "def", Created: d2}} + + expected := `SourceDir | Name | File | Schema | Created | Type | CheckSum +source | 201602220000.sql | source/201602220000.sql | source | 2016-02-22 16:41:01.000000123 +0000 UTC | 1 | abc +tenants | 201602220001.sql | tenants/201602220001.sql | abc | 2016-02-22 16:41:02.000000456 +0000 UTC | 2 | def +tenants | 201602220001.sql | tenants/201602220001.sql | def | 2016-02-22 16:41:02.000000456 +0000 UTC | 2 | def` actual := MigrationDBArrayToString(ms) assert.Equal(t, expected, actual) @@ -98,10 +87,7 @@ func TestMigrationDBArrayToString(t *testing.T) { func TestMigrationDBArrayToStringEmpty(t *testing.T) { var ms = []types.MigrationDB{} - expected := `+---------------------------------------------------------------------------------------------------------------+ -| SourceDir | Name | File | Schema | Created | Type | -+---------------------------------------------------------------------------------------------------------------+ -+---------------------------------------------------------------------------------------------------------------+` + expected := `SourceDir | Name | File | Schema | Created | Type | CheckSum` actual := MigrationDBArrayToString(ms) assert.Equal(t, expected, actual)