mirror of
https://github.com/aykhans/slash-e.git
synced 2025-06-15 04:17:50 +00:00
feat: init project
This commit is contained in:
258
store/db/db.go
Normal file
258
store/db/db.go
Normal file
@ -0,0 +1,258 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/boojack/corgi/server/profile"
|
||||
"github.com/boojack/corgi/server/version"
|
||||
)
|
||||
|
||||
//go:embed migration
|
||||
var migrationFS embed.FS
|
||||
|
||||
//go:embed seed
|
||||
var seedFS embed.FS
|
||||
|
||||
type DB struct {
|
||||
// sqlite db connection instance
|
||||
Db *sql.DB
|
||||
profile *profile.Profile
|
||||
}
|
||||
|
||||
// NewDB returns a new instance of DB associated with the given datasource name.
|
||||
func NewDB(profile *profile.Profile) *DB {
|
||||
db := &DB{
|
||||
profile: profile,
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *DB) Open(ctx context.Context) (err error) {
|
||||
// Ensure a DSN is set before attempting to open the database.
|
||||
if db.profile.DSN == "" {
|
||||
return fmt.Errorf("dsn required")
|
||||
}
|
||||
|
||||
// Connect to the database.
|
||||
sqlDB, err := sql.Open("sqlite3", db.profile.DSN)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open db with dsn: %s, err: %w", db.profile.DSN, err)
|
||||
}
|
||||
|
||||
db.Db = sqlDB
|
||||
// If mode is dev, we should migrate and seed the database.
|
||||
if db.profile.Mode == "dev" {
|
||||
if err := db.applyLatestSchema(ctx); err != nil {
|
||||
return fmt.Errorf("failed to apply latest schema: %w", err)
|
||||
}
|
||||
if err := db.seed(ctx); err != nil {
|
||||
return fmt.Errorf("failed to seed: %w", err)
|
||||
}
|
||||
} else {
|
||||
// If db file not exists, we should migrate the database.
|
||||
if _, err := os.Stat(db.profile.DSN); errors.Is(err, os.ErrNotExist) {
|
||||
err := db.applyLatestSchema(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply latest schema: %w", err)
|
||||
}
|
||||
} else {
|
||||
err := db.createMigrationHistoryTable(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create migration_history table: %w", err)
|
||||
}
|
||||
|
||||
currentVersion := version.GetCurrentVersion(db.profile.Mode)
|
||||
migrationHistory, err := db.FindMigrationHistory(ctx, &MigrationHistoryFind{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if migrationHistory == nil {
|
||||
migrationHistory, err = db.UpsertMigrationHistory(ctx, &MigrationHistoryUpsert{
|
||||
Version: currentVersion,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if version.IsVersionGreaterThan(version.GetSchemaVersion(currentVersion), migrationHistory.Version) {
|
||||
minorVersionList := getMinorVersionList()
|
||||
|
||||
// backup the raw database file before migration
|
||||
rawBytes, err := os.ReadFile(db.profile.DSN)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read raw database file, err: %w", err)
|
||||
}
|
||||
backupDBFilePath := fmt.Sprintf("%s/corgi_%s_%d_backup.db", db.profile.Data, db.profile.Version, time.Now().Unix())
|
||||
if err := os.WriteFile(backupDBFilePath, rawBytes, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write raw database file, err: %w", err)
|
||||
}
|
||||
|
||||
println("succeed to copy a backup database file")
|
||||
println("start migrate")
|
||||
for _, minorVersion := range minorVersionList {
|
||||
normalizedVersion := minorVersion + ".0"
|
||||
if version.IsVersionGreaterThan(normalizedVersion, migrationHistory.Version) && version.IsVersionGreaterOrEqualThan(currentVersion, normalizedVersion) {
|
||||
println("applying migration for", normalizedVersion)
|
||||
if err := db.applyMigrationForMinorVersion(ctx, minorVersion); err != nil {
|
||||
return fmt.Errorf("failed to apply minor version migration: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println("end migrate")
|
||||
// remove the created backup db file after migrate succeed
|
||||
if err := os.Remove(backupDBFilePath); err != nil {
|
||||
println(fmt.Sprintf("Failed to remove temp database file, err %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
const (
|
||||
latestSchemaFileName = "LATEST__SCHEMA.sql"
|
||||
)
|
||||
|
||||
func (db *DB) applyLatestSchema(ctx context.Context) error {
|
||||
latestSchemaPath := fmt.Sprintf("%s/%s/%s", "migration", db.profile.Mode, latestSchemaFileName)
|
||||
buf, err := migrationFS.ReadFile(latestSchemaPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read latest schema %q, error %w", latestSchemaPath, err)
|
||||
}
|
||||
stmt := string(buf)
|
||||
if err := db.execute(ctx, stmt); err != nil {
|
||||
return fmt.Errorf("migrate error: statement:%s err=%w", stmt, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) applyMigrationForMinorVersion(ctx context.Context, minorVersion string) error {
|
||||
filenames, err := fs.Glob(migrationFS, fmt.Sprintf("%s/%s/*.sql", "migration/prod", minorVersion))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sort.Strings(filenames)
|
||||
migrationStmt := ""
|
||||
|
||||
// Loop over all migration files and execute them in order.
|
||||
for _, filename := range filenames {
|
||||
buf, err := migrationFS.ReadFile(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read minor version migration file, filename=%s err=%w", filename, err)
|
||||
}
|
||||
stmt := string(buf)
|
||||
migrationStmt += stmt
|
||||
if err := db.execute(ctx, stmt); err != nil {
|
||||
return fmt.Errorf("migrate error: statement:%s err=%w", stmt, err)
|
||||
}
|
||||
}
|
||||
|
||||
tx, err := db.Db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// upsert the newest version to migration_history
|
||||
if _, err = upsertMigrationHistory(ctx, tx, &MigrationHistoryUpsert{
|
||||
Version: minorVersion + ".0",
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (db *DB) seed(ctx context.Context) error {
|
||||
filenames, err := fs.Glob(seedFS, fmt.Sprintf("%s/*.sql", "seed"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sort.Strings(filenames)
|
||||
|
||||
// Loop over all seed files and execute them in order.
|
||||
for _, filename := range filenames {
|
||||
buf, err := seedFS.ReadFile(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read seed file, filename=%s err=%w", filename, err)
|
||||
}
|
||||
stmt := string(buf)
|
||||
if err := db.execute(ctx, stmt); err != nil {
|
||||
return fmt.Errorf("seed error: statement:%s err=%w", stmt, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// execute runs a single SQL statement within a transaction.
|
||||
func (db *DB) execute(ctx context.Context, stmt string) error {
|
||||
tx, err := db.Db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
if _, err := tx.ExecContext(ctx, stmt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// minorDirRegexp is a regular expression for minor version directory.
|
||||
var minorDirRegexp = regexp.MustCompile(`^migration/prod/[0-9]+\.[0-9]+$`)
|
||||
|
||||
func getMinorVersionList() []string {
|
||||
minorVersionList := []string{}
|
||||
|
||||
if err := fs.WalkDir(migrationFS, "migration", func(path string, file fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if file.IsDir() && minorDirRegexp.MatchString(path) {
|
||||
minorVersionList = append(minorVersionList, file.Name())
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
sort.Strings(minorVersionList)
|
||||
|
||||
return minorVersionList
|
||||
}
|
||||
|
||||
// createMigrationHistoryTable creates the migration_history table if it doesn't exist.
|
||||
func (db *DB) createMigrationHistoryTable(ctx context.Context) error {
|
||||
tx, err := db.Db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
if err := createTable(ctx, tx, `
|
||||
CREATE TABLE IF NOT EXISTS migration_history (
|
||||
version TEXT NOT NULL PRIMARY KEY,
|
||||
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
159
store/db/migration/dev/LATEST__SCHEMA.sql
Normal file
159
store/db/migration/dev/LATEST__SCHEMA.sql
Normal file
@ -0,0 +1,159 @@
|
||||
-- drop all tables
|
||||
DROP TABLE IF EXISTS `activity`;
|
||||
DROP TABLE IF EXISTS `shortcut_organizer`;
|
||||
DROP TABLE IF EXISTS `shortcut`;
|
||||
DROP TABLE IF EXISTS `workspace_user`;
|
||||
DROP TABLE IF EXISTS `user_setting`;
|
||||
DROP TABLE IF EXISTS `user`;
|
||||
DROP TABLE IF EXISTS `workspace_setting`;
|
||||
DROP TABLE IF EXISTS `workspace`;
|
||||
|
||||
-- workspace
|
||||
CREATE TABLE workspace (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
creator_id INTEGER NOT NULL,
|
||||
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
|
||||
name TEXT NOT NULL UNIQUE,
|
||||
description TEXT NOT NULL DEFAULT '',
|
||||
FOREIGN KEY(creator_id) REFERENCES user(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
INSERT INTO
|
||||
sqlite_sequence (name, seq)
|
||||
VALUES
|
||||
('workspace', 10);
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS `trigger_update_workspace_modification_time`
|
||||
AFTER
|
||||
UPDATE
|
||||
ON `workspace` FOR EACH ROW BEGIN
|
||||
UPDATE
|
||||
`workspace`
|
||||
SET
|
||||
updated_ts = (strftime('%s', 'now'))
|
||||
WHERE
|
||||
rowid = old.rowid;
|
||||
END;
|
||||
|
||||
-- workspace_setting
|
||||
CREATE TABLE workspace_setting (
|
||||
workspace_id INTEGER NOT NULL,
|
||||
key TEXT NOT NULL,
|
||||
value TEXT NOT NULL,
|
||||
FOREIGN KEY(workspace_id) REFERENCES workspace(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX workspace_setting_key_workspace_id_index ON workspace_setting(key, workspace_id);
|
||||
|
||||
-- user
|
||||
CREATE TABLE user (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
|
||||
email TEXT NOT NULL UNIQUE,
|
||||
name TEXT NOT NULL,
|
||||
password_hash TEXT NOT NULL
|
||||
);
|
||||
|
||||
INSERT INTO
|
||||
sqlite_sequence (name, seq)
|
||||
VALUES
|
||||
('user', 100);
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS `trigger_update_user_modification_time`
|
||||
AFTER
|
||||
UPDATE
|
||||
ON `user` FOR EACH ROW BEGIN
|
||||
UPDATE
|
||||
`user`
|
||||
SET
|
||||
updated_ts = (strftime('%s', 'now'))
|
||||
WHERE
|
||||
rowid = old.rowid;
|
||||
END;
|
||||
|
||||
-- user_setting
|
||||
CREATE TABLE user_setting (
|
||||
user_id INTEGER NOT NULL,
|
||||
key TEXT NOT NULL,
|
||||
value TEXT NOT NULL,
|
||||
FOREIGN KEY(user_id) REFERENCES user(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX user_setting_key_user_id_index ON user_setting(key, user_id);
|
||||
|
||||
-- workspace_user
|
||||
CREATE TABLE workspace_user (
|
||||
workspace_id INTEGER NOT NULL,
|
||||
user_id INTEGER NOT NULL,
|
||||
role TEXT NOT NULL CHECK (role IN ('ADMIN', 'USER')) DEFAULT 'USER',
|
||||
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
FOREIGN KEY(workspace_id) REFERENCES workspace(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY(user_id) REFERENCES user(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX workspace_user_workspace_id_user_id_index ON workspace_user(workspace_id, user_id);
|
||||
|
||||
-- shortcut
|
||||
CREATE TABLE shortcut (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
creator_id INTEGER NOT NULL,
|
||||
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
|
||||
workspace_id INTEGER NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
link TEXT NOT NULL DEFAULT '',
|
||||
visibility TEXT NOT NULL CHECK (row_status IN ('PRIVATE', 'WORKSPACE')) DEFAULT 'PRIVATE',
|
||||
FOREIGN KEY(creator_id) REFERENCES user(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY(workspace_id) REFERENCES workspace(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX shortcut_workspace_id_name_index ON shortcut(workspace_id, name);
|
||||
|
||||
INSERT INTO
|
||||
sqlite_sequence (name, seq)
|
||||
VALUES
|
||||
('shortcut', 1000);
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS `trigger_update_shortcut_modification_time`
|
||||
AFTER
|
||||
UPDATE
|
||||
ON `shortcut` FOR EACH ROW BEGIN
|
||||
UPDATE
|
||||
`shortcut`
|
||||
SET
|
||||
updated_ts = (strftime('%s', 'now'))
|
||||
WHERE
|
||||
rowid = old.rowid;
|
||||
END;
|
||||
|
||||
-- shortcut_organizer
|
||||
CREATE TABLE shortcut_organizer (
|
||||
shortcut_id INTEGER NOT NULL,
|
||||
user_id INTEGER NOT NULL,
|
||||
pinned INTEGER NOT NULL CHECK (pinned IN (0, 1)) DEFAULT 0,
|
||||
FOREIGN KEY(shortcut_id) REFERENCES shortcut(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY(user_id) REFERENCES user(id) ON DELETE CASCADE,
|
||||
UNIQUE(shortcut_id, user_id)
|
||||
);
|
||||
|
||||
-- activity
|
||||
CREATE TABLE activity (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
creator_id INTEGER NOT NULL,
|
||||
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
type TEXT NOT NULL,
|
||||
comment TEXT NOT NULL,
|
||||
payload TEXT NOT NULL,
|
||||
FOREIGN KEY(creator_id) REFERENCES user(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
INSERT INTO
|
||||
sqlite_sequence (name, seq)
|
||||
VALUES
|
||||
('activity', 10000);
|
157
store/db/migration/prod/LATEST__SCHEMA.sql
Normal file
157
store/db/migration/prod/LATEST__SCHEMA.sql
Normal file
@ -0,0 +1,157 @@
|
||||
-- drop all tables
|
||||
DROP TABLE IF EXISTS `activity`;
|
||||
DROP TABLE IF EXISTS `shortcut_organizer`;
|
||||
DROP TABLE IF EXISTS `shortcut`;
|
||||
DROP TABLE IF EXISTS `workspace_user`;
|
||||
DROP TABLE IF EXISTS `user_setting`;
|
||||
DROP TABLE IF EXISTS `user`;
|
||||
DROP TABLE IF EXISTS `workspace_setting`;
|
||||
DROP TABLE IF EXISTS `workspace`;
|
||||
|
||||
-- workspace
|
||||
CREATE TABLE workspace (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
|
||||
name TEXT NOT NULL UNIQUE,
|
||||
description TEXT NOT NULL DEFAULT ''
|
||||
)
|
||||
|
||||
INSERT INTO
|
||||
sqlite_sequence (name, seq)
|
||||
VALUES
|
||||
('workspace', 10);
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS `trigger_update_workspace_modification_time`
|
||||
AFTER
|
||||
UPDATE
|
||||
ON `workspace` FOR EACH ROW BEGIN
|
||||
UPDATE
|
||||
`workspace`
|
||||
SET
|
||||
updated_ts = (strftime('%s', 'now'))
|
||||
WHERE
|
||||
rowid = old.rowid;
|
||||
END;
|
||||
|
||||
-- workspace_setting
|
||||
CREATE TABLE workspace_setting (
|
||||
workspace_id INTEGER NOT NULL,
|
||||
key TEXT NOT NULL,
|
||||
value TEXT NOT NULL,
|
||||
FOREIGN KEY(workspace_id) REFERENCES workspace(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX workspace_setting_key_workspace_id_index ON workspace_setting(key, workspace_id);
|
||||
|
||||
-- user
|
||||
CREATE TABLE user (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
|
||||
email TEXT NOT NULL UNIQUE,
|
||||
name TEXT NOT NULL,
|
||||
password_hash TEXT NOT NULL
|
||||
);
|
||||
|
||||
INSERT INTO
|
||||
sqlite_sequence (name, seq)
|
||||
VALUES
|
||||
('user', 100);
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS `trigger_update_user_modification_time`
|
||||
AFTER
|
||||
UPDATE
|
||||
ON `user` FOR EACH ROW BEGIN
|
||||
UPDATE
|
||||
`user`
|
||||
SET
|
||||
updated_ts = (strftime('%s', 'now'))
|
||||
WHERE
|
||||
rowid = old.rowid;
|
||||
END;
|
||||
|
||||
-- user_setting
|
||||
CREATE TABLE user_setting (
|
||||
user_id INTEGER NOT NULL,
|
||||
key TEXT NOT NULL,
|
||||
value TEXT NOT NULL,
|
||||
FOREIGN KEY(user_id) REFERENCES user(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX user_setting_key_user_id_index ON user_setting(key, user_id);
|
||||
|
||||
-- workspace_user
|
||||
CREATE TABLE workspace_user (
|
||||
workspace_id INTEGER NOT NULL,
|
||||
user_id INTEGER NOT NULL,
|
||||
role TEXT NOT NULL CHECK (role IN ('ADMIN', 'USER')) DEFAULT 'USER',
|
||||
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
FOREIGN KEY(workspace_id) REFERENCES workspace(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY(user_id) REFERENCES user(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX workspace_user_workspace_id_user_id_index ON workspace_user(workspace_id, user_id);
|
||||
|
||||
-- shortcut
|
||||
CREATE TABLE shortcut (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
creator_id INTEGER NOT NULL,
|
||||
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
row_status TEXT NOT NULL CHECK (row_status IN ('NORMAL', 'ARCHIVED')) DEFAULT 'NORMAL',
|
||||
workspace_id INTEGER NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
link TEXT NOT NULL DEFAULT '',
|
||||
visibility TEXT NOT NULL CHECK (row_status IN ('PRIVATE', 'WORKSPACE')) DEFAULT 'PRIVATE',
|
||||
FOREIGN KEY(creator_id) REFERENCES user(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY(workspace_id) REFERENCES workspace(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX shortcut_workspace_id_name_index ON shortcut(workspace_id, name);
|
||||
|
||||
INSERT INTO
|
||||
sqlite_sequence (name, seq)
|
||||
VALUES
|
||||
('shortcut', 1000);
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS `trigger_update_shortcut_modification_time`
|
||||
AFTER
|
||||
UPDATE
|
||||
ON `shortcut` FOR EACH ROW BEGIN
|
||||
UPDATE
|
||||
`shortcut`
|
||||
SET
|
||||
updated_ts = (strftime('%s', 'now'))
|
||||
WHERE
|
||||
rowid = old.rowid;
|
||||
END;
|
||||
|
||||
-- shortcut_organizer
|
||||
CREATE TABLE shortcut_organizer (
|
||||
shortcut_id INTEGER NOT NULL,
|
||||
user_id INTEGER NOT NULL,
|
||||
pinned INTEGER NOT NULL CHECK (pinned IN (0, 1)) DEFAULT 0,
|
||||
FOREIGN KEY(shortcut_id) REFERENCES shortcut(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY(user_id) REFERENCES user(id) ON DELETE CASCADE,
|
||||
UNIQUE(shortcut_id, user_id)
|
||||
);
|
||||
|
||||
-- activity
|
||||
CREATE TABLE activity (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
creator_id INTEGER NOT NULL,
|
||||
created_ts BIGINT NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
type TEXT NOT NULL,
|
||||
comment TEXT NOT NULL,
|
||||
payload TEXT NOT NULL,
|
||||
FOREIGN KEY(creator_id) REFERENCES user(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
INSERT INTO
|
||||
sqlite_sequence (name, seq)
|
||||
VALUES
|
||||
('activity', 10000);
|
134
store/db/migration_history.go
Normal file
134
store/db/migration_history.go
Normal file
@ -0,0 +1,134 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type MigrationHistory struct {
|
||||
Version string
|
||||
CreatedTs int64
|
||||
}
|
||||
|
||||
type MigrationHistoryUpsert struct {
|
||||
Version string
|
||||
}
|
||||
|
||||
type MigrationHistoryFind struct {
|
||||
Version *string
|
||||
}
|
||||
|
||||
func (db *DB) FindMigrationHistory(ctx context.Context, find *MigrationHistoryFind) (*MigrationHistory, error) {
|
||||
tx, err := db.Db.Begin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
list, err := findMigrationHistoryList(ctx, tx, find)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(list) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
migrationHistory := list[0]
|
||||
return migrationHistory, nil
|
||||
}
|
||||
|
||||
func (db *DB) UpsertMigrationHistory(ctx context.Context, upsert *MigrationHistoryUpsert) (*MigrationHistory, error) {
|
||||
tx, err := db.Db.Begin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
migrationHistory, err := upsertMigrationHistory(ctx, tx, upsert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return migrationHistory, nil
|
||||
}
|
||||
|
||||
func findMigrationHistoryList(ctx context.Context, tx *sql.Tx, find *MigrationHistoryFind) ([]*MigrationHistory, error) {
|
||||
where, args := []string{"1 = 1"}, []interface{}{}
|
||||
|
||||
if v := find.Version; v != nil {
|
||||
where, args = append(where, "version = ?"), append(args, *v)
|
||||
}
|
||||
|
||||
query := `
|
||||
SELECT
|
||||
version,
|
||||
created_ts
|
||||
FROM
|
||||
migration_history
|
||||
WHERE ` + strings.Join(where, " AND ") + `
|
||||
ORDER BY version DESC
|
||||
`
|
||||
rows, err := tx.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
migrationHistoryList := make([]*MigrationHistory, 0)
|
||||
for rows.Next() {
|
||||
var migrationHistory MigrationHistory
|
||||
if err := rows.Scan(
|
||||
&migrationHistory.Version,
|
||||
&migrationHistory.CreatedTs,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migrationHistoryList = append(migrationHistoryList, &migrationHistory)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return migrationHistoryList, nil
|
||||
}
|
||||
|
||||
func upsertMigrationHistory(ctx context.Context, tx *sql.Tx, upsert *MigrationHistoryUpsert) (*MigrationHistory, error) {
|
||||
query := `
|
||||
INSERT INTO migration_history (
|
||||
version
|
||||
)
|
||||
VALUES (?)
|
||||
ON CONFLICT(version) DO UPDATE
|
||||
SET
|
||||
version=EXCLUDED.version
|
||||
RETURNING version, created_ts
|
||||
`
|
||||
row, err := tx.QueryContext(ctx, query, upsert.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer row.Close()
|
||||
|
||||
row.Next()
|
||||
var migrationHistory MigrationHistory
|
||||
if err := row.Scan(
|
||||
&migrationHistory.Version,
|
||||
&migrationHistory.CreatedTs,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := row.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &migrationHistory, nil
|
||||
}
|
8
store/db/seed/10000__reset.sql
Normal file
8
store/db/seed/10000__reset.sql
Normal file
@ -0,0 +1,8 @@
|
||||
DELETE FROM activity;
|
||||
DELETE FROM shortcut_organizer;
|
||||
DELETE FROM shortcut;
|
||||
DELETE FROM workspace_user;
|
||||
DELETE FROM user_setting;
|
||||
DELETE FROM user;
|
||||
DELETE FROM workspace_setting;
|
||||
DELETE FROM workspace;
|
65
store/db/table.go
Normal file
65
store/db/table.go
Normal file
@ -0,0 +1,65 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Table struct {
|
||||
Name string
|
||||
SQL string
|
||||
}
|
||||
|
||||
//lint:ignore U1000 Ignore unused function temporarily for debugging
|
||||
//nolint:all
|
||||
func findTable(ctx context.Context, tx *sql.Tx, tableName string) (*Table, error) {
|
||||
where, args := []string{"1 = 1"}, []interface{}{}
|
||||
|
||||
where, args = append(where, "type = ?"), append(args, "table")
|
||||
where, args = append(where, "name = ?"), append(args, tableName)
|
||||
|
||||
query := `
|
||||
SELECT
|
||||
tbl_name,
|
||||
sql
|
||||
FROM sqlite_schema
|
||||
WHERE ` + strings.Join(where, " AND ")
|
||||
rows, err := tx.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
tableList := make([]*Table, 0)
|
||||
for rows.Next() {
|
||||
var table Table
|
||||
if err := rows.Scan(
|
||||
&table.Name,
|
||||
&table.SQL,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tableList = append(tableList, &table)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(tableList) == 0 {
|
||||
return nil, nil
|
||||
} else {
|
||||
return tableList[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
func createTable(ctx context.Context, tx *sql.Tx, stmt string) error {
|
||||
_, err := tx.ExecContext(ctx, stmt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user