refactor: update db migrator

This commit is contained in:
Steven
2024-08-29 08:21:23 +08:00
parent 784d91ab75
commit 85f5f03be9
19 changed files with 307 additions and 412 deletions

View File

@ -1,76 +0,0 @@
-- migration_history
CREATE TABLE migration_history (
version TEXT NOT NULL PRIMARY KEY,
created_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW())
);
-- workspace_setting
CREATE TABLE workspace_setting (
key TEXT NOT NULL UNIQUE,
value TEXT NOT NULL
);
-- user
CREATE TABLE "user" (
id SERIAL PRIMARY KEY,
created_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
updated_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
email TEXT NOT NULL UNIQUE,
nickname TEXT NOT NULL,
password_hash TEXT NOT NULL,
role TEXT NOT NULL CHECK (role IN ('ADMIN', 'USER')) DEFAULT 'USER'
);
CREATE INDEX idx_user_email ON "user"(email);
-- user_setting
CREATE TABLE user_setting (
user_id INTEGER REFERENCES "user"(id) NOT NULL,
key TEXT NOT NULL,
value TEXT NOT NULL,
PRIMARY KEY (user_id, key)
);
-- shortcut
CREATE TABLE shortcut (
id SERIAL PRIMARY KEY,
creator_id INTEGER REFERENCES "user"(id) NOT NULL,
created_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
updated_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
name TEXT NOT NULL UNIQUE,
link TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
visibility TEXT NOT NULL CHECK (visibility IN ('PRIVATE', 'WORKSPACE', 'PUBLIC')) DEFAULT 'PRIVATE',
tag TEXT NOT NULL DEFAULT '',
og_metadata TEXT NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_shortcut_name ON shortcut(name);
-- activity
CREATE TABLE activity (
id SERIAL PRIMARY KEY,
creator_id INTEGER NOT NULL,
created_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
type TEXT NOT NULL DEFAULT '',
level TEXT NOT NULL CHECK (level IN ('INFO', 'WARN', 'ERROR')) DEFAULT 'INFO',
payload TEXT NOT NULL DEFAULT '{}'
);
-- collection
CREATE TABLE collection (
id SERIAL PRIMARY KEY,
creator_id INTEGER REFERENCES "user"(id) NOT NULL,
created_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
updated_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
name TEXT NOT NULL UNIQUE,
title TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
shortcut_ids INTEGER ARRAY NOT NULL,
visibility TEXT NOT NULL CHECK (visibility IN ('PRIVATE', 'WORKSPACE', 'PUBLIC')) DEFAULT 'PRIVATE'
);
CREATE INDEX idx_collection_name ON collection(name);

View File

@ -1,76 +0,0 @@
-- migration_history
CREATE TABLE migration_history (
version TEXT NOT NULL PRIMARY KEY,
created_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW())
);
-- workspace_setting
CREATE TABLE workspace_setting (
key TEXT NOT NULL UNIQUE,
value TEXT NOT NULL
);
-- user
CREATE TABLE "user" (
id SERIAL PRIMARY KEY,
created_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
updated_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
email TEXT NOT NULL UNIQUE,
nickname TEXT NOT NULL,
password_hash TEXT NOT NULL,
role TEXT NOT NULL CHECK (role IN ('ADMIN', 'USER')) DEFAULT 'USER'
);
CREATE INDEX idx_user_email ON "user"(email);
-- user_setting
CREATE TABLE user_setting (
user_id INTEGER REFERENCES "user"(id) NOT NULL,
key TEXT NOT NULL,
value TEXT NOT NULL,
PRIMARY KEY (user_id, key)
);
-- shortcut
CREATE TABLE shortcut (
id SERIAL PRIMARY KEY,
creator_id INTEGER REFERENCES "user"(id) NOT NULL,
created_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
updated_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
name TEXT NOT NULL UNIQUE,
link TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
visibility TEXT NOT NULL CHECK (visibility IN ('PRIVATE', 'WORKSPACE', 'PUBLIC')) DEFAULT 'PRIVATE',
tag TEXT NOT NULL DEFAULT '',
og_metadata TEXT NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_shortcut_name ON shortcut(name);
-- activity
CREATE TABLE activity (
id SERIAL PRIMARY KEY,
creator_id INTEGER NOT NULL,
created_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
type TEXT NOT NULL DEFAULT '',
level TEXT NOT NULL CHECK (level IN ('INFO', 'WARN', 'ERROR')) DEFAULT 'INFO',
payload TEXT NOT NULL DEFAULT '{}'
);
-- collection
CREATE TABLE collection (
id SERIAL PRIMARY KEY,
creator_id INTEGER REFERENCES "user"(id) NOT NULL,
created_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
updated_ts BIGINT NOT NULL DEFAULT EXTRACT(EPOCH FROM NOW()),
name TEXT NOT NULL UNIQUE,
title TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
shortcut_ids INTEGER ARRAY NOT NULL,
visibility TEXT NOT NULL CHECK (visibility IN ('PRIVATE', 'WORKSPACE', 'PUBLIC')) DEFAULT 'PRIVATE'
);
CREATE INDEX idx_collection_name ON collection(name);

View File

@ -1,171 +0,0 @@
package postgres
import (
"context"
"embed"
"fmt"
"io/fs"
"regexp"
"sort"
"strings"
"github.com/pkg/errors"
"github.com/yourselfhosted/slash/server/common"
"github.com/yourselfhosted/slash/store"
)
const (
latestSchemaFileName = "LATEST.sql"
)
//go:embed migration
var migrationFS embed.FS
func (d *DB) Migrate(ctx context.Context) error {
if d.profile.IsDev() {
return d.nonProdMigrate(ctx)
}
return d.prodMigrate(ctx)
}
func (d *DB) nonProdMigrate(ctx context.Context) error {
rows, err := d.db.QueryContext(ctx, "SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema';")
if err != nil {
return errors.Errorf("failed to query database tables: %s", err)
}
if rows.Err() != nil {
return errors.Errorf("failed to query database tables: %s", err)
}
defer rows.Close()
var tables []string
for rows.Next() {
var table string
err := rows.Scan(&table)
if err != nil {
return errors.Errorf("failed to scan table name: %s", err)
}
tables = append(tables, table)
}
if len(tables) != 0 {
return nil
}
buf, err := migrationFS.ReadFile("migration/dev/" + latestSchemaFileName)
if err != nil {
return errors.Errorf("failed to read latest schema file: %s", err)
}
stmt := string(buf)
if _, err := d.db.ExecContext(ctx, stmt); err != nil {
return errors.Errorf("failed to exec SQL %s: %s", stmt, err)
}
return nil
}
func (d *DB) prodMigrate(ctx context.Context) error {
currentVersion := common.GetCurrentVersion(d.profile.Mode)
migrationHistoryList, err := d.ListMigrationHistories(ctx, &store.FindMigrationHistory{})
// If there is no migration history, we should apply the latest schema.
if err != nil || len(migrationHistoryList) == 0 {
latestSchemaBytes, err := migrationFS.ReadFile("migration/prod/" + latestSchemaFileName)
if err != nil {
return errors.Errorf("failed to read latest schema file: %s", err)
}
latestSchema := string(latestSchemaBytes)
if _, err := d.db.ExecContext(ctx, latestSchema); err != nil {
return errors.Errorf("failed to exec SQL %s: %s", latestSchema, err)
}
// After applying the latest schema, we should insert the latest version to migration_history.
if _, err := d.UpsertMigrationHistory(ctx, &store.UpsertMigrationHistory{
Version: currentVersion,
}); err != nil {
return errors.Wrap(err, "failed to upsert migration history")
}
return nil
}
migrationHistoryVersionList := []string{}
for _, migrationHistory := range migrationHistoryList {
migrationHistoryVersionList = append(migrationHistoryVersionList, migrationHistory.Version)
}
sort.Sort(common.SortVersion(migrationHistoryVersionList))
latestMigrationHistoryVersion := migrationHistoryVersionList[len(migrationHistoryVersionList)-1]
// If the latest migration history version is greater than or equal to the current version, we will not apply any migration.
if !common.IsVersionGreaterThan(common.GetSchemaVersion(currentVersion), latestMigrationHistoryVersion) {
return nil
}
println("start migrate")
for _, minorVersion := range getMinorVersionList() {
normalizedVersion := minorVersion + ".0"
if common.IsVersionGreaterThan(normalizedVersion, latestMigrationHistoryVersion) && common.IsVersionGreaterOrEqualThan(currentVersion, normalizedVersion) {
println("applying migration for", normalizedVersion)
if err := d.applyMigrationForMinorVersion(ctx, minorVersion); err != nil {
return errors.Wrap(err, "failed to apply minor version migration")
}
}
}
println("end migrate")
return nil
}
func (d *DB) applyMigrationForMinorVersion(ctx context.Context, minorVersion string) error {
filenames, err := fs.Glob(migrationFS, fmt.Sprintf("migration/prod/%s/*.sql", minorVersion))
if err != nil {
return errors.Wrap(err, "failed to read ddl files")
}
sort.Strings(filenames)
// Loop over all migration files and execute them in order.
for _, filename := range filenames {
buf, err := migrationFS.ReadFile(filename)
if err != nil {
return errors.Wrapf(err, "failed to read minor version migration file, filename=%s", filename)
}
for _, stmt := range strings.Split(string(buf), ";") {
if strings.TrimSpace(stmt) == "" {
continue
}
if _, err := d.db.ExecContext(ctx, stmt); err != nil {
return errors.Wrapf(err, "migrate error: %s", stmt)
}
}
}
// Upsert the newest version to migration_history.
version := minorVersion + ".0"
if _, err = d.UpsertMigrationHistory(ctx, &store.UpsertMigrationHistory{Version: version}); err != nil {
return errors.Wrapf(err, "failed to upsert migration history with version: %s", version)
}
return nil
}
// minorDirRegexp is a regular expression for minor version directory.
var minorDirRegexp = regexp.MustCompile(`^migration/prod/[0-9]+\.[0-9]+$`)
func getMinorVersionList() []string {
minorVersionList := []string{}
if err := fs.WalkDir(migrationFS, "migration", func(path string, file fs.DirEntry, err error) error {
if err != nil {
return err
}
if file.IsDir() && minorDirRegexp.MatchString(path) {
minorVersionList = append(minorVersionList, file.Name())
}
return nil
}); err != nil {
panic(err)
}
sort.Sort(common.SortVersion(minorVersionList))
return minorVersionList
}

View File

@ -1,76 +0,0 @@
-- migration_history
CREATE TABLE migration_history (
version TEXT NOT NULL PRIMARY KEY,
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now'))
);
-- workspace_setting
CREATE TABLE workspace_setting (
key TEXT NOT NULL UNIQUE,
value TEXT NOT NULL
);
-- user
CREATE TABLE user (
id INTEGER PRIMARY KEY AUTOINCREMENT,
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
email TEXT NOT NULL UNIQUE,
nickname TEXT NOT NULL,
password_hash TEXT NOT NULL,
role TEXT NOT NULL CHECK (role IN ('ADMIN', 'USER')) DEFAULT 'USER'
);
CREATE INDEX idx_user_email ON user(email);
-- user_setting
CREATE TABLE user_setting (
user_id INTEGER NOT NULL,
key TEXT NOT NULL,
value TEXT NOT NULL,
UNIQUE(user_id, key)
);
-- shortcut
CREATE TABLE shortcut (
id INTEGER PRIMARY KEY AUTOINCREMENT,
creator_id INTEGER NOT NULL,
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
name TEXT NOT NULL UNIQUE,
link TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
visibility TEXT NOT NULL CHECK (visibility IN ('PRIVATE', 'WORKSPACE', 'PUBLIC')) DEFAULT 'PRIVATE',
tag TEXT NOT NULL DEFAULT '',
og_metadata TEXT NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_shortcut_name ON shortcut(name);
-- activity
CREATE TABLE activity (
id INTEGER PRIMARY KEY AUTOINCREMENT,
creator_id INTEGER NOT NULL,
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
type TEXT NOT NULL DEFAULT '',
level TEXT NOT NULL CHECK (level IN ('INFO', 'WARN', 'ERROR')) DEFAULT 'INFO',
payload TEXT NOT NULL DEFAULT '{}'
);
-- collection
CREATE TABLE collection (
id INTEGER PRIMARY KEY AUTOINCREMENT,
creator_id INTEGER NOT NULL,
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
name TEXT NOT NULL UNIQUE,
title TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
shortcut_ids INTEGER[] NOT NULL,
visibility TEXT NOT NULL CHECK (visibility IN ('PRIVATE', 'WORKSPACE', 'PUBLIC')) DEFAULT 'PRIVATE'
);
CREATE INDEX idx_collection_name ON collection(name);

View File

@ -1,2 +0,0 @@
CREATE INDEX IF NOT EXISTS idx_user_email ON user(email);
CREATE INDEX IF NOT EXISTS idx_shortcut_name ON shortcut(name);

View File

@ -1 +0,0 @@
ALTER TABLE shortcut ADD COLUMN og_metadata TEXT NOT NULL DEFAULT '{}';

View File

@ -1 +0,0 @@
ALTER TABLE shortcut ADD COLUMN title TEXT NOT NULL DEFAULT '';

View File

@ -1 +0,0 @@
DROP TABLE IF EXISTS idp;

View File

@ -1,14 +0,0 @@
-- collection
CREATE TABLE collection (
id INTEGER PRIMARY KEY AUTOINCREMENT,
creator_id INTEGER NOT NULL,
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
name TEXT NOT NULL UNIQUE,
title TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
shortcut_ids INTEGER[] NOT NULL,
visibility TEXT NOT NULL CHECK (visibility IN ('PRIVATE', 'WORKSPACE', 'PUBLIC')) DEFAULT 'PRIVATE'
);
CREATE INDEX idx_collection_name ON collection(name);

View File

@ -1,76 +0,0 @@
-- migration_history
CREATE TABLE migration_history (
version TEXT NOT NULL PRIMARY KEY,
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now'))
);
-- workspace_setting
CREATE TABLE workspace_setting (
key TEXT NOT NULL UNIQUE,
value TEXT NOT NULL
);
-- user
CREATE TABLE user (
id INTEGER PRIMARY KEY AUTOINCREMENT,
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
email TEXT NOT NULL UNIQUE,
nickname TEXT NOT NULL,
password_hash TEXT NOT NULL,
role TEXT NOT NULL CHECK (role IN ('ADMIN', 'USER')) DEFAULT 'USER'
);
CREATE INDEX idx_user_email ON user(email);
-- user_setting
CREATE TABLE user_setting (
user_id INTEGER NOT NULL,
key TEXT NOT NULL,
value TEXT NOT NULL,
UNIQUE(user_id, key)
);
-- shortcut
CREATE TABLE shortcut (
id INTEGER PRIMARY KEY AUTOINCREMENT,
creator_id INTEGER NOT NULL,
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
name TEXT NOT NULL UNIQUE,
link TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
visibility TEXT NOT NULL CHECK (visibility IN ('PRIVATE', 'WORKSPACE', 'PUBLIC')) DEFAULT 'PRIVATE',
tag TEXT NOT NULL DEFAULT '',
og_metadata TEXT NOT NULL DEFAULT '{}'
);
CREATE INDEX idx_shortcut_name ON shortcut(name);
-- activity
CREATE TABLE activity (
id INTEGER PRIMARY KEY AUTOINCREMENT,
creator_id INTEGER NOT NULL,
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
type TEXT NOT NULL DEFAULT '',
level TEXT NOT NULL CHECK (level IN ('INFO', 'WARN', 'ERROR')) DEFAULT 'INFO',
payload TEXT NOT NULL DEFAULT '{}'
);
-- collection
CREATE TABLE collection (
id INTEGER PRIMARY KEY AUTOINCREMENT,
creator_id INTEGER NOT NULL,
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
name TEXT NOT NULL UNIQUE,
title TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
shortcut_ids INTEGER[] NOT NULL,
visibility TEXT NOT NULL CHECK (visibility IN ('PRIVATE', 'WORKSPACE', 'PUBLIC')) DEFAULT 'PRIVATE'
);
CREATE INDEX idx_collection_name ON collection(name);

View File

@ -1,233 +0,0 @@
package sqlite
import (
"context"
"embed"
"fmt"
"io/fs"
"os"
"regexp"
"sort"
"time"
"github.com/pkg/errors"
"github.com/yourselfhosted/slash/server/common"
"github.com/yourselfhosted/slash/store"
)
//go:embed migration
var migrationFS embed.FS
//go:embed seed
var seedFS embed.FS
// Migrate applies the latest schema to the database.
func (d *DB) Migrate(ctx context.Context) error {
currentVersion := common.GetCurrentVersion(d.profile.Mode)
if d.profile.Mode == "prod" {
_, err := os.Stat(d.profile.DSN)
if err != nil {
// If db file not exists, we should create a new one with latest schema.
if errors.Is(err, os.ErrNotExist) {
if err := d.applyLatestSchema(ctx); err != nil {
return errors.Wrap(err, "failed to apply latest schema")
}
// Upsert the newest version to migration_history.
if _, err := d.UpsertMigrationHistory(ctx, &store.UpsertMigrationHistory{
Version: currentVersion,
}); err != nil {
return errors.Wrap(err, "failed to upsert migration history")
}
} else {
return errors.Wrap(err, "failed to get db file stat")
}
} else {
// If db file exists, we should check if we need to migrate the database.
migrationHistoryList, err := d.ListMigrationHistories(ctx, &store.FindMigrationHistory{})
if err != nil {
return errors.Wrap(err, "failed to find migration history")
}
// If no migration history, we should apply the latest version migration and upsert the migration history.
if len(migrationHistoryList) == 0 {
minorVersion := common.GetMinorVersion(currentVersion)
if err := d.applyMigrationForMinorVersion(ctx, minorVersion); err != nil {
return errors.Wrapf(err, "failed to apply version %s migration", minorVersion)
}
_, err := d.UpsertMigrationHistory(ctx, &store.UpsertMigrationHistory{
Version: currentVersion,
})
if err != nil {
return errors.Wrap(err, "failed to upsert migration history")
}
return nil
}
migrationHistoryVersionList := []string{}
for _, migrationHistory := range migrationHistoryList {
migrationHistoryVersionList = append(migrationHistoryVersionList, migrationHistory.Version)
}
sort.Sort(common.SortVersion(migrationHistoryVersionList))
latestMigrationHistoryVersion := migrationHistoryVersionList[len(migrationHistoryVersionList)-1]
if common.IsVersionGreaterThan(common.GetSchemaVersion(currentVersion), latestMigrationHistoryVersion) {
minorVersionList := getMinorVersionList()
// backup the raw database file before migration
rawBytes, err := os.ReadFile(d.profile.DSN)
if err != nil {
return errors.Wrap(err, "failed to read raw database file")
}
backupDBFilePath := fmt.Sprintf("%s/slash_%s_%d_backup.db", d.profile.Data, d.profile.Version, time.Now().Unix())
if err := os.WriteFile(backupDBFilePath, rawBytes, 0644); err != nil {
return errors.Wrap(err, "failed to write raw database file")
}
println("succeed to copy a backup database file")
println("start migrate")
for _, minorVersion := range minorVersionList {
normalizedVersion := minorVersion + ".0"
if common.IsVersionGreaterThan(normalizedVersion, latestMigrationHistoryVersion) && common.IsVersionGreaterOrEqualThan(currentVersion, normalizedVersion) {
println("applying migration for", normalizedVersion)
if err := d.applyMigrationForMinorVersion(ctx, minorVersion); err != nil {
return errors.Wrap(err, "failed to apply minor version migration")
}
}
}
println("end migrate")
// remove the created backup db file after migrate succeed
if err := os.Remove(backupDBFilePath); err != nil {
println(fmt.Sprintf("Failed to remove temp database file, err %v", err))
}
}
}
} else {
// In non-prod mode, we should always migrate the database.
if _, err := os.Stat(d.profile.DSN); errors.Is(err, os.ErrNotExist) {
if err := d.applyLatestSchema(ctx); err != nil {
return errors.Wrap(err, "failed to apply latest schema")
}
// In demo mode, we should seed the database.
if d.profile.Mode == "demo" {
if err := d.Seed(ctx); err != nil {
return errors.Wrap(err, "failed to seed")
}
}
}
}
return nil
}
const (
latestSchemaFileName = "LATEST.sql"
)
func (d *DB) applyLatestSchema(ctx context.Context) error {
schemaMode := "dev"
if d.profile.Mode == "prod" {
schemaMode = "prod"
}
latestSchemaPath := fmt.Sprintf("migration/%s/%s", schemaMode, latestSchemaFileName)
buf, err := migrationFS.ReadFile(latestSchemaPath)
if err != nil {
return errors.Wrapf(err, "failed to read latest schema %q", latestSchemaPath)
}
stmt := string(buf)
if err := d.execute(ctx, stmt); err != nil {
return errors.Wrapf(err, "migrate error: %s", stmt)
}
return nil
}
func (d *DB) applyMigrationForMinorVersion(ctx context.Context, minorVersion string) error {
filenames, err := fs.Glob(migrationFS, fmt.Sprintf("%s/%s/*.sql", "migration/prod", minorVersion))
if err != nil {
return errors.Wrap(err, "failed to read ddl files")
}
sort.Strings(filenames)
migrationStmt := ""
// Loop over all migration files and execute them in order.
for _, filename := range filenames {
buf, err := migrationFS.ReadFile(filename)
if err != nil {
return errors.Wrapf(err, "failed to read minor version migration file, filename=%s", filename)
}
stmt := string(buf)
migrationStmt += stmt
if err := d.execute(ctx, stmt); err != nil {
return errors.Wrapf(err, "migrate error: %s", stmt)
}
}
// Upsert the newest version to migration_history.
version := minorVersion + ".0"
if _, err = d.UpsertMigrationHistory(ctx, &store.UpsertMigrationHistory{
Version: version,
}); err != nil {
return errors.Wrapf(err, "failed to upsert migration history with version: %s", version)
}
return nil
}
func (d *DB) Seed(ctx context.Context) error {
filenames, err := fs.Glob(seedFS, fmt.Sprintf("%s/*.sql", "seed"))
if err != nil {
return errors.Wrap(err, "failed to read seed files")
}
sort.Strings(filenames)
// Loop over all seed files and execute them in order.
for _, filename := range filenames {
buf, err := seedFS.ReadFile(filename)
if err != nil {
return errors.Wrapf(err, "failed to read seed file, filename=%s", filename)
}
stmt := string(buf)
if err := d.execute(ctx, stmt); err != nil {
return errors.Wrapf(err, "seed error: %s", stmt)
}
}
return nil
}
// execute runs a single SQL statement within a transaction.
func (d *DB) execute(ctx context.Context, stmt string) error {
tx, err := d.db.Begin()
if err != nil {
return err
}
defer tx.Rollback()
if _, err := tx.ExecContext(ctx, stmt); err != nil {
return errors.Wrap(err, "failed to execute statement")
}
return tx.Commit()
}
// minorDirRegexp is a regular expression for minor version directory.
var minorDirRegexp = regexp.MustCompile(`^migration/prod/[0-9]+\.[0-9]+$`)
func getMinorVersionList() []string {
minorVersionList := []string{}
if err := fs.WalkDir(migrationFS, "migration", func(path string, file fs.DirEntry, err error) error {
if err != nil {
return err
}
if file.IsDir() && minorDirRegexp.MatchString(path) {
minorVersionList = append(minorVersionList, file.Name())
}
return nil
}); err != nil {
panic(err)
}
sort.Sort(common.SortVersion(minorVersionList))
return minorVersionList
}

View File

@ -1,34 +0,0 @@
INSERT INTO
user (
`id`,
`role`,
`email`,
`nickname`,
`password_hash`
)
VALUES
(
101,
'ADMIN',
'slash@yourselfhosted.com',
'Slasher',
'$2a$10$H8HBWGcG/hoePhFy5SiNKOHxMD6omIpyEEWbl/fIorFC814bXW.Ua'
);
INSERT INTO
user (
`id`,
`role`,
`email`,
`nickname`,
`password_hash`
)
VALUES
(
102,
'USER',
'steven@yourselfhosted.com',
'Steven',
-- raw password: secret
'$2a$14$ajq8Q7fbtFRQvXpdCq7Jcuy.Rx1h/L4J60Otx.gyNLbAYctGMJ9tK'
);

View File

@ -1,94 +0,0 @@
INSERT INTO
shortcut (
`id`,
`creator_id`,
`name`,
`link`,
`visibility`
)
VALUES
(
1,
101,
'discord',
'https://discord.gg/QZqUuUAhDV',
'PUBLIC'
);
INSERT INTO
shortcut (
`id`,
`creator_id`,
`name`,
`link`,
`visibility`,
`tag`,
`og_metadata`
)
VALUES
(
2,
101,
'ai-infra',
'https://star-history.com/blog/open-source-ai-infra-projects',
'PUBLIC',
'star-history ai',
'{"title":"Open Source AI Infra for Your Next Project","description":"Some open-source infra projects that can be directly used for your next project. 💡","image":"https://star-history.com/blog/assets/open-source-ai-infra-projects/banner.webp"}'
);
INSERT INTO
shortcut (
`id`,
`creator_id`,
`name`,
`link`,
`visibility`,
`tag`,
`og_metadata`
)
VALUES
(
3,
101,
'schema-change',
'https://www.bytebase.com/blog/how-to-handle-database-schema-change/#what-is-a-database-schema-change',
'PUBLIC',
'database article👍',
'{"title":"How to Handle Database Migration / Schema Change?","description":"A database schema is the structure of a database, which describes the relationships between the different tables and fields in the database. A database schema change, also known as schema migration, or simply migration refers to any alteration to this structure, such as adding a new table, modifying the data type of a field, or changing the relationships between tables.","image":"https://www.bytebase.com/_next/image/?url=%2Fcontent%2Fblog%2Fhow-to-handle-database-schema-change%2Fchange.webp\u0026w=2048\u0026q=75"}'
);
INSERT INTO
shortcut (
`id`,
`creator_id`,
`name`,
`link`,
`tag`,
`visibility`
)
VALUES
(
4,
101,
'sqlchat',
'https://www.sqlchat.ai',
'ai chatbot sql',
'WORKSPACE'
);
INSERT INTO
shortcut (
`id`,
`creator_id`,
`name`,
`link`,
`visibility`
)
VALUES
(
5,
102,
'stevenlgtm',
'https://github.com/boojack',
'PUBLIC'
);

View File

@ -1,20 +0,0 @@
INSERT INTO
collection (
`id`,
`creator_id`,
`name`,
`title`,
`description`,
`visibility`,
`shortcut_ids`
)
VALUES
(
1,
101,
'minecraft',
'Minecraft',
'My daily thoughts and ideas',
'PUBLIC',
'1,2,3,4,5'
);