fix validation project + pimp cli + do not display debug + refacto deployers init

This commit is contained in:
rmanach 2025-04-09 15:25:21 +02:00
parent 480d2875c2
commit ab1f5a79c3
9 changed files with 283 additions and 159 deletions

View File

@ -124,6 +124,7 @@ linters:
- unparam
- unused
- errname
- reassign
# Disable specific linter
# https://golangci-lint.run/usage/linters/#disabled-by-default
disable:
@ -222,7 +223,8 @@ linters:
# All available settings of specific linters.
settings:
# See the dedicated "linters.settings" documentation section.
option: value
govet:
enable-all: true
# Defines a set of rules to ignore issues.
# It does not skip the analysis, and so does not ignore "typecheck" errors.
exclusions:
@ -297,9 +299,9 @@ formatters:
- goimports
- golines
# Formatters settings.
settings:
# See the dedicated "formatters.settings" documentation section.
option: value
# settings:
# # See the dedicated "formatters.settings" documentation section.
# option: value
# exclusions:
# # Mode of the generated files analysis.
# #

View File

@ -130,7 +130,7 @@ hmdeploy --path /path/my-project --destroy
```
## Next steps
* Improve the CLI arguments
* ~~Improve the CLI arguments~~
* ~~Destroy~~
* Post-install script
* Deals with bugs

View File

@ -15,8 +15,8 @@ import (
)
type SSHConn struct {
addr string
client *ssh.Client
addr string
}
var _ IConnection = (*SSHConn)(nil)

View File

@ -39,16 +39,16 @@ const (
// - `Deploy() error`: run shell command to deploy the archive remotly
// - `Build() error`: build the archive
// - `Clear() error`: clean all the ressources locally and remotly
type deployer struct {
type deployer struct { //nolint:govet // ll
ctx context.Context
fnCancel context.CancelFunc
type_ DeployerType
project *models.Project
processing atomic.Bool
chDone chan struct{}
processing atomic.Bool
type_ DeployerType
errFlag error
project *models.Project
}
func newDeployer(ctx context.Context, type_ DeployerType, project *models.Project) *deployer {
@ -107,7 +107,7 @@ func (d *deployer) Done() <-chan struct{} {
for {
select {
case <-d.ctx.Done():
log.Warn().Str("deployer", string(d.type_)).Msg("context done catch")
log.Debug().Str("deployer", string(d.type_)).Msg("context done catch")
timeout := time.NewTicker(GracefulTimeout)
tick := time.NewTicker(time.Second)
@ -132,7 +132,7 @@ func (d *deployer) Done() <-chan struct{} {
}
}
case <-d.chDone:
log.Info().Str("deployer", string(d.type_)).Msg("terminated")
log.Debug().Str("deployer", string(d.type_)).Msg("terminated")
chDone <- struct{}{}
return
}

View File

@ -56,7 +56,7 @@ func (sd *SwarmDeployer) close() error {
}
func (sd *SwarmDeployer) clean() (err error) {
if err := os.Remove(sd.archivePath); err != nil {
if err = os.Remove(sd.archivePath); err != nil {
log.Err(err).Str("archive", sd.archivePath).Msg("unable to clean local swarm archive file")
}
_, err = sd.conn.Execute(
@ -109,11 +109,11 @@ func (sd *SwarmDeployer) Build() error {
defer os.Remove(tarFile) //nolint: errcheck // defered
// copy the file directly instead of adding it in the tar archive
log.Info().Str("image", tarFile).Msg("Transferring image...")
if err := sd.conn.CopyFile(tarFile, filepath.Base(tarFile)); err != nil {
return err
}
log.Info().Str("image", sd.project.ImageNames[idx]).Msg("image added to archive")
log.Info().Str("image", tarFile).Msg("image transferred with success")
}
if envFilePath := sd.project.Deps.EnvFile; envFilePath != "" {
@ -171,7 +171,7 @@ func (sd *SwarmDeployer) Deploy() error {
archiveDestPath := filepath.Base(sd.archivePath)
log.Info().
Str("archive", sd.archivePath).
Msg("archive built with success, tranfering to swarm for deployment...")
Msg("archive built with success, tranferring to swarm for deployment...")
if err := sd.conn.CopyFile(sd.archivePath, archiveDestPath); err != nil {
sd.setDone(err)
return err

238
main.go
View File

@ -15,6 +15,7 @@ import (
"gitea.thegux.fr/hmdeploy/docker"
"gitea.thegux.fr/hmdeploy/models"
"gitea.thegux.fr/hmdeploy/scheduler"
"gitea.thegux.fr/hmdeploy/utils"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
@ -37,8 +38,90 @@ var (
ErrGenerateTasksTree = errors.New("unable to generate tasks tree")
)
type Deployers struct {
nd *deployers.NginxDeployer
sd deployers.SwarmDeployer
destroy bool
}
// generateTasksTree returns a list of linked `Task` to submit.
//
// It's here that all tasks are linked each other to provide the deployment ordering.
func (d *Deployers) generateTasksTree() scheduler.Tasks {
tasks := []*scheduler.Task{}
if d.destroy {
swarmDestroy := scheduler.NewTask("swarm-destroy", d.sd.Destroy)
if d.nd != nil {
destroyTask := scheduler.NewTask("nginx-destroy", d.nd.Destroy, swarmDestroy)
tasks = append(tasks, destroyTask)
return tasks
}
tasks = append(tasks, swarmDestroy)
return tasks
}
var swarmTask *scheduler.Task
if d.nd != nil {
deployNginx := scheduler.NewTask("nginx-deploy", d.nd.Deploy)
swarmTask = scheduler.NewTask("swarm-deploy", d.sd.Deploy, deployNginx)
} else {
swarmTask = scheduler.NewTask("swarm-deploy", d.sd.Deploy)
}
swarmTask = scheduler.NewTask("swarm-build", d.sd.Build, swarmTask)
tasks = append(tasks, swarmTask)
if d.nd != nil {
tasks = append(tasks, scheduler.NewTask("nginx-build", d.nd.Build))
}
return tasks
}
// waitForCompletion waits for all deployers to complete.
//
// After the completion, deployers `Clear` methods are executed to clean all ressources.
// Then the scheduler is stopped to terminate the engine.
func (d *Deployers) waitForCompletion(s *scheduler.Scheduler) error {
var wg sync.WaitGroup
deps := []deployers.IDeployer{&d.sd}
if d.nd != nil {
deps = append(deps, d.nd)
}
for idx := range deps {
if d := deps[idx]; d != nil {
wg.Add(1)
go func() {
defer wg.Done()
<-d.Done()
}()
}
}
wg.Wait()
var errs []error
for idx := range deps {
if dep := deps[idx]; d != nil {
errs = append(errs, dep.Error())
if !d.destroy {
s.Submit(scheduler.NewTask(string(dep.Type()), dep.Clear)) //nolint: errcheck // TODO
}
}
}
s.Stop()
<-s.Done()
return errors.Join(errs...)
}
type Option struct {
fnCancel context.CancelFunc
destroy bool
noNginx bool
}
type InitOption func(o *Option)
@ -49,8 +132,24 @@ func WithGlobalCancellation(fnCancel context.CancelFunc) InitOption {
}
}
func initLogger() {
func WithNoNginx() InitOption {
return func(o *Option) {
o.noNginx = true
}
}
func WithDestroy() InitOption {
return func(o *Option) {
o.destroy = true
}
}
func initLogger(debug bool) {
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
zerolog.SetGlobalLevel(zerolog.InfoLevel)
if debug {
zerolog.SetGlobalLevel(zerolog.DebugLevel)
}
log.Logger = log.With().Caller().Logger().Output(zerolog.ConsoleWriter{Out: os.Stderr})
}
@ -89,137 +188,72 @@ func initDeployers(
hmmap *models.HMMap,
project *models.Project,
options ...InitOption,
) ([]deployers.IDeployer, error) {
) (Deployers, error) {
var opt Option
for _, o := range options {
o(&opt)
}
deps := Deployers{
destroy: opt.destroy,
}
swarmNet := hmmap.GetSwarmNetInfo()
if swarmNet == nil {
return nil, fmt.Errorf("%w, swarm net info does not exist", ErrNetInfoNotFound)
return deps, fmt.Errorf("%w, swarm net info does not exist", ErrNetInfoNotFound)
}
dcli := docker.NewClient()
sd, err := deployers.NewSwarmDeployer(ctx, project, swarmNet, &dcli)
if err != nil {
return nil, fmt.Errorf("%w, unable to init swarm deployer, err=%v", ErrDeployerInit, err)
return deps, fmt.Errorf("%w, unable to init swarm deployer, err=%v", ErrDeployerInit, err)
}
deps.sd = sd
var nd deployers.NginxDeployer
if project.Deps.NginxFile != "" {
if !opt.noNginx && project.Deps.NginxFile != "" {
nginxNet := hmmap.GetNginxNetInfo()
if nginxNet == nil {
return nil, fmt.Errorf("%w, nginx net info does not exist", ErrNetInfoNotFound)
return deps, fmt.Errorf("%w, nginx net info does not exist", ErrNetInfoNotFound)
}
d, err := deployers.NewNginxDeployer(ctx, project, nginxNet)
if err != nil {
return nil, fmt.Errorf(
return deps, fmt.Errorf(
"%w, unable to init nginx deployer, err=%v",
ErrDeployerInit,
err,
)
}
nd = d
}
var opt Option
for _, o := range options {
o(&opt)
deps.nd = &d
}
if opt.fnCancel != nil {
sd.SetCancellationFunc(opt.fnCancel)
nd.SetCancellationFunc(opt.fnCancel)
}
return []deployers.IDeployer{&sd, &nd}, nil
}
// generateTasksTree returns a list of linked `Task` to submit.
//
// It's here that all tasks are linked each other to provide the deployment ordering.
func generateTasksTree(deployers []deployers.IDeployer, destroy bool) ([]*scheduler.Task, error) {
if len(deployers) != MaxDeployers {
return nil, fmt.Errorf("%w, deployers len should be equals to 2", ErrGenerateTasksTree)
}
sd := deployers[0]
nd := deployers[1]
tasks := []*scheduler.Task{}
if destroy {
swarmDestroy := scheduler.NewTask("swarm-destroy", sd.Destroy)
destroyTask := scheduler.NewTask("nginx-destroy", nd.Destroy, swarmDestroy)
tasks = append(tasks, destroyTask)
return tasks, nil
}
var swarmTask *scheduler.Task
if nd != nil {
deployNginx := scheduler.NewTask("nginx-deploy", nd.Deploy)
swarmTask = scheduler.NewTask("swarm-deploy", sd.Deploy, deployNginx)
} else {
swarmTask = scheduler.NewTask("swarm-deploy", sd.Deploy)
}
swarmTask = scheduler.NewTask("swarm-build", sd.Build, swarmTask)
tasks = append(tasks, swarmTask, scheduler.NewTask("nginx-build", nd.Build))
return tasks, nil
}
// waitForCompletion waits for all deployers to complete.
//
// After the completion, deployers `Clear` methods are executed to clean all ressources.
// Then the scheduler is stopped to terminate the engine.
func waitForCompletion(
deployers []deployers.IDeployer,
s *scheduler.Scheduler,
destroy bool,
) error {
var wg sync.WaitGroup
for idx := range deployers {
if d := deployers[idx]; d != nil {
wg.Add(1)
go func() {
defer wg.Done()
<-d.Done()
}()
if deps.nd != nil {
deps.nd.SetCancellationFunc(opt.fnCancel)
}
}
wg.Wait()
var errs []error
for idx := range deployers {
if d := deployers[idx]; d != nil {
errs = append(errs, d.Error())
if !destroy {
s.Submit(scheduler.NewTask(string(d.Type()), d.Clear)) //nolint: errcheck // TODO
}
}
}
s.Stop()
<-s.Done()
return errors.Join(errs...)
return deps, nil
}
func main() {
func main() { //nolint: all //hjf
ctx, fnCancel := signal.NotifyContext(
context.Background(),
os.Interrupt,
os.Kill,
)
initLogger()
log.Info().Msg("hmdeploy started")
projectDir := flag.String("path", ".", "define the .homeserver project root dir")
destroy := flag.Bool("destroy", false, "delete the deployed project")
noNginx := flag.Bool("no-nginx", false, "no Nginx deployment")
debug := flag.Bool("debug", false, "show debug logs")
flag.Parse()
initLogger(*debug)
log.Info().Msg("hmdeploy started")
hmmap, err := loadHMMap()
if err != nil {
log.Fatal().Err(err).Msg("failed to load conf")
@ -234,14 +268,24 @@ func main() {
Str("name", project.Name).
Msg("project initialized with success")
deployers, err := initDeployers(ctx, &hmmap, &project, WithGlobalCancellation(fnCancel))
initOptions := []InitOption{WithGlobalCancellation(fnCancel)}
if *noNginx {
initOptions = append(initOptions, WithNoNginx())
}
if *destroy {
initOptions = append(initOptions, WithDestroy())
}
deps, err := initDeployers(ctx, &hmmap, &project, initOptions...)
if err != nil {
log.Fatal().Err(err).Msg("unable to init deployers")
}
tasks, err := generateTasksTree(deployers, *destroy)
if err != nil {
log.Fatal().Err(err).Msg("unable to generate tasks tree")
tasks := deps.generateTasksTree()
tasks.Display()
if err := utils.Confirm(ctx, *destroy); err != nil {
log.Fatal().Err(err).Msg("error while confirming execution")
}
s := scheduler.NewScheduler(
@ -251,7 +295,7 @@ func main() {
tasks...,
)
if err := waitForCompletion(deployers, s, *destroy); err != nil {
if err := deps.waitForCompletion(s); err != nil {
log.Fatal().
Err(err).
Str("name", project.Name).

View File

@ -4,7 +4,6 @@ import (
"encoding/json"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
@ -23,15 +22,13 @@ const (
var ErrProjectConfFile = errors.New("project error")
func getFileInfo(baseDir, filePath string) (string, fs.FileInfo, error) {
var fInf fs.FileInfo
func getFilepath(baseDir, filePath string) (string, error) {
filePath = filepath.Join(baseDir, filePath)
if !filepath.IsAbs(filePath) {
filePath = filepath.Join(baseDir, filePath)
filePath, err := filepath.Abs(filePath)
filePath, err := filepath.Abs(filePath) //nolint: govet
if err != nil {
return filePath, fInf, fmt.Errorf(
return filePath, fmt.Errorf(
"%w, file=%s, err=%v",
ErrProjectConfFile,
filePath,
@ -40,61 +37,66 @@ func getFileInfo(baseDir, filePath string) (string, fs.FileInfo, error) {
}
}
fInf, err := os.Stat(filePath)
fileInfo, err := os.Stat(filePath)
if err != nil {
return filePath, fInf, fmt.Errorf(
"%w, unable to stat file=%s, err=%v",
return filePath, fmt.Errorf(
"%w, file=%s, err=%v",
ErrProjectConfFile,
filePath,
err,
)
}
return filePath, fInf, nil
if fileInfo.IsDir() {
return filePath, fmt.Errorf(
"%w, file=%s, err=%s",
ErrProjectConfFile,
filePath,
"must be a file",
)
}
return filePath, nil
}
// Project handles the details and file informations of your project.
type Project struct {
Name string `json:"name"`
Dir string
ImageNames []string `json:"images"`
Deps struct {
Name string `json:"name"`
Dir string
Deps struct {
EnvFile string `json:"env"`
EnvFileInfo fs.FileInfo
ComposeFile string `json:"compose"`
ComposeFileInfo fs.FileInfo
NginxFile string `json:"nginx"`
NginxFileInfo fs.FileInfo
ComposeFile string `json:"compose"`
NginxFile string `json:"nginx"`
} `json:"dependencies"`
ImageNames []string `json:"images"`
}
func (p *Project) validate() error {
cpath, cfs, err := getFileInfo(p.Dir, p.Deps.ComposeFile)
cpath, err := getFilepath(p.Dir, p.Deps.ComposeFile)
if err != nil {
return err
}
p.Deps.ComposeFileInfo = cfs
p.Deps.ComposeFile = cpath
if p.Deps.EnvFile != "" {
epath, efs, err := getFileInfo(p.Dir, p.Deps.EnvFile)
epath, err := getFilepath(p.Dir, p.Deps.EnvFile)
if err != nil {
return err
}
p.Deps.EnvFileInfo = efs
p.Deps.EnvFile = epath
} else {
log.Warn().Msg("no .env file provided, hoping one it's set elsewhere...")
}
npath, nfs, err := getFileInfo(p.Dir, p.Deps.NginxFile)
if err != nil {
return err
if p.Deps.NginxFile != "" {
npath, err := getFilepath(p.Dir, p.Deps.NginxFile)
if err != nil {
return err
}
p.Deps.NginxFile = npath
} else {
log.Warn().Msg("no Nginx conf file provided, Nginx deployment discarded")
}
p.Deps.NginxFileInfo = nfs
p.Deps.NginxFile = npath
return nil
}

View File

@ -3,6 +3,7 @@ package scheduler
import (
"context"
"errors"
"fmt"
"sync"
"sync/atomic"
@ -27,8 +28,8 @@ type FnJob func() error
// taskStore is a thread safe `Task` store.
type taskStore struct {
l sync.RWMutex
tasks map[string]*Task
l sync.RWMutex
}
func newTaskStore() taskStore {
@ -49,7 +50,7 @@ func (ts *taskStore) setStatus(task *Task, status TaskStatus) {
defer ts.l.Unlock()
if _, ok := ts.tasks[task.Name]; !ok {
log.Warn().Str("name", task.Name).Msg("unable to update task status, does not exist")
log.Debug().Str("name", task.Name).Msg("unable to update task status, does not exist")
return
}
@ -63,6 +64,53 @@ func (ts *taskStore) len() int {
return len(ts.tasks)
}
type Tasks []*Task
type tasksOptions func(*options)
type options struct {
layer int
}
func withLayer(layer int) tasksOptions {
return func(o *options) {
o.layer = layer
}
}
// Display displays on stdout the Tasks tree execution layers.
// Each layer represents the tasks going to be executed by the scheduler.
// TODO: display dependencies
func (ts Tasks) Display() {
fmt.Println("> Tasks execution layers")
ts.display()
}
func (ts Tasks) display(opts ...tasksOptions) {
var opt options
for _, o := range opts {
o(&opt)
}
if opt.layer == 0 {
opt.layer = 1
}
if len(ts) == 0 {
return
}
fmt.Println(fmt.Sprintf("------ layer %d ------", opt.layer))
nextTasks := Tasks{}
for idx := range ts {
fmt.Print(ts[idx].Name + " ")
nextTasks = append(nextTasks, ts[idx].Next...)
}
fmt.Println("")
opt.layer += 1
nextTasks.display(withLayer(opt.layer))
}
// Task represents an execution unit handle by the scheduler.
//
// Next field links to next executable tasks (tree kind).
@ -84,16 +132,16 @@ func NewTask(name string, job FnJob, next ...*Task) *Task {
// Scheduler is a simple scheduler.
// Handling tasks and executes them, that's all.
type Scheduler struct {
ctx context.Context
fnCancel context.CancelFunc
wg sync.WaitGroup
type Scheduler struct { //nolint: govet // ll
capacity atomic.Uint32
workers uint8
chTasks chan *Task
wg sync.WaitGroup
chTasks chan *Task
tasks taskStore
ctx context.Context
fnCancel context.CancelFunc
tasks taskStore
}
// NewScheduler instantiates a new `Scheduler`.
@ -134,7 +182,7 @@ func (s *Scheduler) run() {
s.tasks.setStatus(t, Running)
if err := t.Job(); err != nil {
log.Err(err).Str("task", t.Name).Msg("error executing task")
log.Debug().Err(err).Str("task", t.Name).Msg("error executing task")
s.tasks.setStatus(t, Failed)
continue
}
@ -145,7 +193,7 @@ func (s *Scheduler) run() {
s.Submit(nt) //nolint: errcheck // TODO
}
case <-s.ctx.Done():
log.Warn().Msg("context done, stopping worker...")
log.Debug().Msg("context done, stopping worker...")
return
}
}
@ -160,7 +208,7 @@ func (s *Scheduler) Stop() {
func (s *Scheduler) Submit(task *Task) error {
select {
case <-s.ctx.Done():
log.Error().Msg("unable to submit new task, scheduler is stopping...")
log.Debug().Msg("unable to submit new task, scheduler is stopping...")
return ErrSchedulerContextDone
default:
}
@ -181,7 +229,7 @@ func (s *Scheduler) Done() <-chan struct{} {
for { //nolint: staticcheck // no
select {
case <-s.ctx.Done():
log.Info().Msg("waiting for scheduler task completion...")
log.Debug().Msg("waiting for scheduler task completion...")
s.wg.Wait()
chDone <- struct{}{}
return

View File

@ -3,14 +3,19 @@ package utils
import (
"archive/tar"
"compress/gzip"
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/rs/zerolog/log"
)
const confirmChar = "Y"
func addToArchive(tw *tar.Writer, filename string) error {
file, err := os.Open(filename)
if err != nil {
@ -29,7 +34,7 @@ func addToArchive(tw *tar.Writer, filename string) error {
}
header.Name = filepath.Base(file.Name())
if err := tw.WriteHeader(header); err != nil {
if err = tw.WriteHeader(header); err != nil {
return err
}
@ -70,3 +75,26 @@ func CreateArchive(destDir, name string, files ...string) (string, error) {
return archivePath, nil
}
func Confirm(ctx context.Context, destroy bool) error {
logMsg := "deploy"
if destroy {
logMsg = "undeploy"
}
log.Warn().Msg(fmt.Sprintf("Confirm to %s ? Y to confirm", logMsg))
var text string
if _, err := fmt.Fscanf(os.Stdin, "%s", &text); err != nil {
if !strings.Contains(err.Error(), "newline") {
return err
}
}
if !strings.EqualFold(text, confirmChar) {
log.Info().Msg("Ok, bye !")
os.Exit(0)
}
return nil
}