summaryrefslogtreecommitdiff
path: root/internal
diff options
context:
space:
mode:
authorGrailFinder <wohilas@gmail.com>2024-04-07 09:53:04 +0300
committerGrailFinder <wohilas@gmail.com>2024-04-07 09:53:04 +0300
commit4a42a4465c8ff5496229f8883b9d1532bf7c9cab (patch)
treee0cbccdfe6a770cbbdecee2e2176ee697997229e /internal
parentb662265234d70d648a7adce74f3d9cc245456d3a (diff)
Feat: db connection and migrations
Diffstat (limited to 'internal')
-rw-r--r--internal/database/migrations/001_init.down.sql4
-rw-r--r--internal/database/migrations/001_init.up.sql24
-rw-r--r--internal/database/migrations/init.go3
-rw-r--r--internal/database/migrations/migrations.bindata.go267
-rw-r--r--internal/database/repos/main.go15
-rw-r--r--internal/database/sql/main.go141
-rw-r--r--internal/handlers/main.go25
-rw-r--r--internal/models/models.go4
-rw-r--r--internal/server/main.go8
-rw-r--r--internal/service/main.go0
10 files changed, 477 insertions, 14 deletions
diff --git a/internal/database/migrations/001_init.down.sql b/internal/database/migrations/001_init.down.sql
new file mode 100644
index 0000000..4b46d47
--- /dev/null
+++ b/internal/database/migrations/001_init.down.sql
@@ -0,0 +1,4 @@
+BEGIN;
+DROP TABLE IF EXISTS user_score;
+DROP TABLE IF EXISTS action;
+COMMIT;
diff --git a/internal/database/migrations/001_init.up.sql b/internal/database/migrations/001_init.up.sql
new file mode 100644
index 0000000..7edbd96
--- /dev/null
+++ b/internal/database/migrations/001_init.up.sql
@@ -0,0 +1,24 @@
+BEGIN;
+CREATE TABLE user_score (
+ id INT GENERATED BY DEFAULT AS IDENTITY,
+ username TEXT UNIQUE NOT NULL,
+ burn_time TIMESTAMP NOT NULL DEFAULT NOW() + interval '1 day',
+ score SMALLINT NOT NULL,
+ created_at timestamp NOT NULL DEFAULT NOW()
+);
+
+CREATE TABLE action (
+ id INT GENERATED BY DEFAULT AS IDENTITY,
+ name TEXT NOT NULL,
+ magnitude SMALLSERIAL NOT NULL DEFAULT 1,
+ repeatable BOOLEAN NOT NULL DEFAULT FALSE,
+ type TEXT NOT NULL,
+ done BOOLEAN NOT NULL DEFAULT FALSE,
+ username TEXT NOT NULL,
+ created_at timestamp NOT NULL DEFAULT NOW(),
+ UNIQUE(username, name),
+ CONSTRAINT fk_user_score
+ FOREIGN KEY(username)
+ REFERENCES user_score(username)
+);
+COMMIT;
diff --git a/internal/database/migrations/init.go b/internal/database/migrations/init.go
new file mode 100644
index 0000000..2b5a212
--- /dev/null
+++ b/internal/database/migrations/init.go
@@ -0,0 +1,3 @@
+package migrations
+
+//go:generate go-bindata -o ./migrations.bindata.go -pkg migrations -ignore=\\*.go ./...
diff --git a/internal/database/migrations/migrations.bindata.go b/internal/database/migrations/migrations.bindata.go
new file mode 100644
index 0000000..8197c0a
--- /dev/null
+++ b/internal/database/migrations/migrations.bindata.go
@@ -0,0 +1,267 @@
+// Code generated for package migrations by go-bindata DO NOT EDIT. (@generated)
+// sources:
+// 001_init.down.sql
+// 001_init.up.sql
+package migrations
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+func bindataRead(data []byte, name string) ([]byte, error) {
+ gz, err := gzip.NewReader(bytes.NewBuffer(data))
+ if err != nil {
+ return nil, fmt.Errorf("Read %q: %v", name, err)
+ }
+
+ var buf bytes.Buffer
+ _, err = io.Copy(&buf, gz)
+ clErr := gz.Close()
+
+ if err != nil {
+ return nil, fmt.Errorf("Read %q: %v", name, err)
+ }
+ if clErr != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+type asset struct {
+ bytes []byte
+ info os.FileInfo
+}
+
+type bindataFileInfo struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+}
+
+// Name return file name
+func (fi bindataFileInfo) Name() string {
+ return fi.name
+}
+
+// Size return file size
+func (fi bindataFileInfo) Size() int64 {
+ return fi.size
+}
+
+// Mode return file mode
+func (fi bindataFileInfo) Mode() os.FileMode {
+ return fi.mode
+}
+
+// Mode return file modify time
+func (fi bindataFileInfo) ModTime() time.Time {
+ return fi.modTime
+}
+
+// IsDir return file whether a directory
+func (fi bindataFileInfo) IsDir() bool {
+ return fi.mode&os.ModeDir != 0
+}
+
+// Sys return file is sys mode
+func (fi bindataFileInfo) Sys() interface{} {
+ return nil
+}
+
+var __001_initDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x72\x75\xf7\xf4\xb3\xe6\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\x28\x2d\x4e\x2d\x8a\x2f\x4e\xce\x2f\x4a\xc5\xa1\x20\x31\xb9\x24\x33\x3f\xcf\x9a\xcb\xd9\xdf\xd7\xd7\x33\xc4\x9a\x0b\x10\x00\x00\xff\xff\x8c\xa0\x7b\x43\x4d\x00\x00\x00")
+
+func _001_initDownSqlBytes() ([]byte, error) {
+ return bindataRead(
+ __001_initDownSql,
+ "001_init.down.sql",
+ )
+}
+
+func _001_initDownSql() (*asset, error) {
+ bytes, err := _001_initDownSqlBytes()
+ if err != nil {
+ return nil, err
+ }
+
+ info := bindataFileInfo{name: "001_init.down.sql", size: 77, mode: os.FileMode(420), modTime: time.Unix(1712472256, 0)}
+ a := &asset{bytes: bytes, info: info}
+ return a, nil
+}
+
+var __001_initUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x92\x31\x6f\xc2\x30\x10\x85\xf7\xfc\x8a\xb7\x01\x2a\x0b\x33\x93\x13\x2e\xc8\xaa\xe3\xb4\xb6\xa3\x96\x29\x32\xc4\xad\xa2\x42\x40\xc1\x54\xe2\xdf\x57\xc1\x14\x1a\xd1\x4a\x15\xf3\x7d\xf7\xee\xdd\xbd\x8b\x69\xce\xe5\x34\x4a\x14\x31\x43\x30\x2c\x16\x84\xc3\xde\xb5\xe5\x7e\xb5\x6d\x1d\x86\x11\x00\xd4\x15\xb8\x34\x98\x93\x24\xc5\x0c\xcd\x10\x2f\x30\xa3\x94\x15\xc2\x80\x69\xf0\x19\x49\xc3\xcd\x62\x7c\x82\xbb\xee\xc6\x6e\x1c\x0c\xbd\x1a\x14\x92\x3f\x17\x04\x99\x1b\xc8\x42\x88\x80\x2c\x0f\x6d\x53\xfa\xba\x63\x78\x46\xda\xb0\xec\xe9\x42\x5c\x84\x65\xfe\x32\x1c\xe1\x01\x75\xe3\x5d\xfb\x69\xd7\x18\x4c\x50\xd9\xe3\x20\x48\x04\x7b\x3a\x63\x42\x74\xd6\xfa\xfa\xab\xd6\x59\xef\xaa\xd2\x7a\x74\x53\xf6\xde\x6e\x76\x7f\x0c\x88\x46\xd3\xa8\xbf\xbd\x5d\xf9\x7a\xdb\xdc\xb3\xf9\x75\xeb\xbe\x9d\x8d\x7d\x6f\x6a\x7f\xa8\xce\x7e\x35\x29\xce\xc4\xad\x9f\x49\xa0\x5b\xb7\x73\xd6\xdb\xe5\xda\x21\xce\x73\x41\x4c\xde\xa2\x29\x13\x9a\x02\xee\x8f\xbb\x5f\x87\x56\xdb\xe6\x7f\x02\xfd\xbc\xee\x3e\x64\x68\x08\x71\x0f\xbf\x35\xc7\xa7\x9b\x9c\x6b\x49\x2e\xb5\x51\xac\xbb\xe7\xdb\x47\x79\x7d\xb2\x53\x11\x48\x73\x45\x7c\x2e\xf1\x48\x8b\x4b\xff\x08\xe7\xa2\xa2\x94\x14\xc9\x84\xf4\x8f\xef\xbc\x62\x5d\x8a\x49\x9e\x65\xdc\x4c\xa3\xaf\x00\x00\x00\xff\xff\x2f\x40\x52\xc4\xd2\x02\x00\x00")
+
+func _001_initUpSqlBytes() ([]byte, error) {
+ return bindataRead(
+ __001_initUpSql,
+ "001_init.up.sql",
+ )
+}
+
+func _001_initUpSql() (*asset, error) {
+ bytes, err := _001_initUpSqlBytes()
+ if err != nil {
+ return nil, err
+ }
+
+ info := bindataFileInfo{name: "001_init.up.sql", size: 722, mode: os.FileMode(420), modTime: time.Unix(1712472259, 0)}
+ a := &asset{bytes: bytes, info: info}
+ return a, nil
+}
+
+// Asset loads and returns the asset for the given name.
+// It returns an error if the asset could not be found or
+// could not be loaded.
+func Asset(name string) ([]byte, error) {
+ cannonicalName := strings.Replace(name, "\\", "/", -1)
+ if f, ok := _bindata[cannonicalName]; ok {
+ a, err := f()
+ if err != nil {
+ return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
+ }
+ return a.bytes, nil
+ }
+ return nil, fmt.Errorf("Asset %s not found", name)
+}
+
+// MustAsset is like Asset but panics when Asset would return an error.
+// It simplifies safe initialization of global variables.
+func MustAsset(name string) []byte {
+ a, err := Asset(name)
+ if err != nil {
+ panic("asset: Asset(" + name + "): " + err.Error())
+ }
+
+ return a
+}
+
+// AssetInfo loads and returns the asset info for the given name.
+// It returns an error if the asset could not be found or
+// could not be loaded.
+func AssetInfo(name string) (os.FileInfo, error) {
+ cannonicalName := strings.Replace(name, "\\", "/", -1)
+ if f, ok := _bindata[cannonicalName]; ok {
+ a, err := f()
+ if err != nil {
+ return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
+ }
+ return a.info, nil
+ }
+ return nil, fmt.Errorf("AssetInfo %s not found", name)
+}
+
+// AssetNames returns the names of the assets.
+func AssetNames() []string {
+ names := make([]string, 0, len(_bindata))
+ for name := range _bindata {
+ names = append(names, name)
+ }
+ return names
+}
+
+// _bindata is a table, holding each asset generator, mapped to its name.
+var _bindata = map[string]func() (*asset, error){
+ "001_init.down.sql": _001_initDownSql,
+ "001_init.up.sql": _001_initUpSql,
+}
+
+// AssetDir returns the file names below a certain
+// directory embedded in the file by go-bindata.
+// For example if you run go-bindata on data/... and data contains the
+// following hierarchy:
+// data/
+// foo.txt
+// img/
+// a.png
+// b.png
+// then AssetDir("data") would return []string{"foo.txt", "img"}
+// AssetDir("data/img") would return []string{"a.png", "b.png"}
+// AssetDir("foo.txt") and AssetDir("notexist") would return an error
+// AssetDir("") will return []string{"data"}.
+func AssetDir(name string) ([]string, error) {
+ node := _bintree
+ if len(name) != 0 {
+ cannonicalName := strings.Replace(name, "\\", "/", -1)
+ pathList := strings.Split(cannonicalName, "/")
+ for _, p := range pathList {
+ node = node.Children[p]
+ if node == nil {
+ return nil, fmt.Errorf("Asset %s not found", name)
+ }
+ }
+ }
+ if node.Func != nil {
+ return nil, fmt.Errorf("Asset %s not found", name)
+ }
+ rv := make([]string, 0, len(node.Children))
+ for childName := range node.Children {
+ rv = append(rv, childName)
+ }
+ return rv, nil
+}
+
+type bintree struct {
+ Func func() (*asset, error)
+ Children map[string]*bintree
+}
+
+var _bintree = &bintree{nil, map[string]*bintree{
+ "001_init.down.sql": &bintree{_001_initDownSql, map[string]*bintree{}},
+ "001_init.up.sql": &bintree{_001_initUpSql, map[string]*bintree{}},
+}}
+
+// RestoreAsset restores an asset under the given directory
+func RestoreAsset(dir, name string) error {
+ data, err := Asset(name)
+ if err != nil {
+ return err
+ }
+ info, err := AssetInfo(name)
+ if err != nil {
+ return err
+ }
+ err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
+ if err != nil {
+ return err
+ }
+ err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RestoreAssets restores an asset under the given directory recursively
+func RestoreAssets(dir, name string) error {
+ children, err := AssetDir(name)
+ // File
+ if err != nil {
+ return RestoreAsset(dir, name)
+ }
+ // Dir
+ for _, child := range children {
+ err = RestoreAssets(dir, filepath.Join(name, child))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func _filePath(dir, name string) string {
+ cannonicalName := strings.Replace(name, "\\", "/", -1)
+ return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
+}
diff --git a/internal/database/repos/main.go b/internal/database/repos/main.go
new file mode 100644
index 0000000..e6f50be
--- /dev/null
+++ b/internal/database/repos/main.go
@@ -0,0 +1,15 @@
+package repos
+
+import (
+ "github.com/jmoiron/sqlx"
+)
+
+type Provider struct {
+ db *sqlx.DB
+}
+
+func NewProvider(conn *sqlx.DB) *Provider {
+ return &Provider{
+ db: conn,
+ }
+}
diff --git a/internal/database/sql/main.go b/internal/database/sql/main.go
new file mode 100644
index 0000000..80d5f5c
--- /dev/null
+++ b/internal/database/sql/main.go
@@ -0,0 +1,141 @@
+package database
+
+import (
+ "apjournal/internal/database/migrations"
+ "os"
+ "time"
+
+ "github.com/jmoiron/sqlx"
+
+ // driver postgres for migrations
+ "log/slog"
+
+ "github.com/golang-migrate/migrate"
+ _ "github.com/golang-migrate/migrate/database/postgres"
+ bindata "github.com/golang-migrate/migrate/source/go_bindata"
+ _ "github.com/jackc/pgx/v5/stdlib" // register pgx driver
+ "github.com/pkg/errors"
+)
+
+var log = slog.New(slog.NewJSONHandler(os.Stdout, nil))
+
+type DB struct {
+ Conn *sqlx.DB
+ URI string
+}
+
+func (d *DB) CloseAll() error {
+ for _, conn := range []*sqlx.DB{d.Conn} {
+ if err := closeConn(conn); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func closeConn(conn *sqlx.DB) error {
+ return conn.Close()
+}
+
+func Init(DBURI string) (*DB, error) {
+ var result DB
+ var err error
+ result.Conn, err = openDBConnection(DBURI, "pgx")
+ if err != nil {
+ return nil, err
+ }
+ result.URI = DBURI
+
+ if err := testConnection(result.Conn); err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+func InitWithMigrate(DBURI string, up bool) (*DB, error) {
+ var (
+ result DB
+ err error
+ )
+ result.Conn, err = openDBConnection(DBURI, "pgx")
+ if err != nil {
+ return nil, err
+ }
+ result.URI = DBURI
+
+ if err = testConnection(result.Conn); err != nil {
+ return nil, err
+ }
+ if err = result.Migrate(DBURI, up); err != nil {
+ return nil, err
+ }
+ return &result, nil
+}
+
+func openDBConnection(dbURI, driver string) (*sqlx.DB, error) {
+ conn, err := sqlx.Open(driver, dbURI)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+func testConnection(conn *sqlx.DB) error {
+ err := conn.Ping()
+ if err != nil {
+ return errors.Wrap(err, "can't ping database")
+ }
+ return nil
+}
+
+func (db *DB) Migrate(url string, up bool) error {
+ source := bindata.Resource(migrations.AssetNames(), migrations.Asset)
+ driver, err := bindata.WithInstance(source)
+ if err != nil {
+ return errors.WithStack(errors.WithMessage(err,
+ "unable to instantiate driver from bindata"))
+ }
+ migration, err := migrate.NewWithSourceInstance("go-bindata",
+ driver, url)
+ if err != nil {
+ return errors.WithStack(errors.WithMessage(err,
+ "unable to start migration"))
+ }
+ if up {
+ if err = migration.Up(); err != nil && err.Error() != "no change" {
+ return errors.WithStack(errors.WithMessage(err,
+ "unable to migrate up"))
+ }
+ } else {
+ if err = migration.Down(); err != nil &&
+ err.Error() != "no change" {
+ return errors.WithStack(errors.WithMessage(err,
+ "unable to migrate down"))
+ }
+ }
+ return nil
+}
+
+func (d *DB) PingRoutine(interval time.Duration) {
+ ticker := time.NewTicker(interval)
+ done := make(chan bool)
+
+ for {
+ select {
+ case <-done:
+ return
+ case t := <-ticker.C:
+ if err := testConnection(d.Conn); err != nil {
+ log.Error("failed to ping postrges db", "error", err, "ping_at", t)
+ // reconnect
+ if err := closeConn(d.Conn); err != nil {
+ log.Error("failed to close db connection", "error", err, "ping_at", t)
+ }
+ d.Conn, err = openDBConnection(d.URI, "pgx")
+ if err != nil {
+ log.Error("failed to reconnect", "error", err, "ping_at", t)
+ }
+ }
+ }
+ }
+}
diff --git a/internal/handlers/main.go b/internal/handlers/main.go
index dc8ba2b..efafcde 100644
--- a/internal/handlers/main.go
+++ b/internal/handlers/main.go
@@ -2,33 +2,37 @@ package handlers
import (
"apjournal/config"
+ "apjournal/internal/database/repos"
"apjournal/internal/models"
"html/template"
"log/slog"
"net/http"
"os"
"strconv"
+ "time"
+
+ "github.com/jmoiron/sqlx"
)
// Handlers structure
type Handlers struct {
- cfg config.Config
- // s *service.Service
- log *slog.Logger
+ cfg config.Config
+ log *slog.Logger
+ repo *repos.Provider
}
// NewHandlers constructor
func NewHandlers(
// cfg config.Config, s *service.Service, l *slog.Logger,
- cfg config.Config, l *slog.Logger,
+ cfg config.Config, l *slog.Logger, conn *sqlx.DB,
) *Handlers {
if l == nil {
l = slog.New(slog.NewJSONHandler(os.Stdout, nil))
}
h := &Handlers{
- cfg: cfg,
- // s: s,
- log: l,
+ cfg: cfg,
+ log: l,
+ repo: repos.NewProvider(conn),
}
return h
}
@@ -49,7 +53,8 @@ func (h *Handlers) MainPage(w http.ResponseWriter, r *http.Request) {
panic(err)
}
// tmpl.Execute(w, us)
- us.ID = "test"
+ us.Username = "test"
+ us.BurnTime = time.Now().Add(time.Duration(24) * time.Hour)
tmpl.ExecuteTemplate(w, "main", us)
}
@@ -88,16 +93,18 @@ func (h *Handlers) HandleForm(w http.ResponseWriter, r *http.Request) {
Type: at,
Repeatable: repeat,
}
+ // TODO: check that name + userid key is unique
us.Actions = append(us.Actions, act)
tmpl := template.Must(template.ParseGlob("components/*.html"))
// tmpl := template.Must(template.ParseFiles("components/index.html"))
// tmpl.Execute(w, us)
+ // TODO: redirect to the main page instead
tmpl.ExecuteTemplate(w, "main", us)
}
func (h *Handlers) HandleDoneAction(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
- h.log.Info("got postform request", "payload", r.PostForm)
+ h.log.Info("got done request", "payload", r.PostForm)
actionName := r.PostFormValue("name")
h.log.Info("got postform request", "name", actionName)
// change counter of user score
diff --git a/internal/models/models.go b/internal/models/models.go
index 5d6dfa6..58191a2 100644
--- a/internal/models/models.go
+++ b/internal/models/models.go
@@ -11,16 +11,18 @@ const (
type (
UserScore struct {
- ID string
+ Username string
Actions []Action
BurnTime time.Time
Score int8
}
Action struct {
+ ID uint32
Name string
Magnitude uint8
Repeatable bool
Type ActionType
Done bool
+ Username string
}
)
diff --git a/internal/server/main.go b/internal/server/main.go
index 45f3d41..95d0369 100644
--- a/internal/server/main.go
+++ b/internal/server/main.go
@@ -9,6 +9,8 @@ import (
"os"
"os/signal"
"syscall"
+
+ "github.com/jmoiron/sqlx"
)
// Server interface
@@ -39,11 +41,9 @@ func (srv *server) stopOnSignal(close context.CancelFunc) {
os.Exit(0)
}
-func NewServer(cfg config.Config, log *slog.Logger) Server {
+func NewServer(cfg config.Config, log *slog.Logger, conn *sqlx.DB) Server {
ctx, close := context.WithCancel(context.Background())
- // s := service.NewService(ctx, cfg, store.MemCache)
- actions := handlers.NewHandlers(cfg, log)
-
+ actions := handlers.NewHandlers(cfg, log, conn)
return &server{
config: cfg,
actions: actions,
diff --git a/internal/service/main.go b/internal/service/main.go
deleted file mode 100644
index e69de29..0000000
--- a/internal/service/main.go
+++ /dev/null