fix validation project + pimp cli + do not display debug + refacto deployers init

This commit is contained in:
rmanach 2025-04-09 15:25:21 +02:00
parent 480d2875c2
commit ab1f5a79c3
9 changed files with 283 additions and 159 deletions

View File

@ -124,6 +124,7 @@ linters:
- unparam - unparam
- unused - unused
- errname - errname
- reassign
# Disable specific linter # Disable specific linter
# https://golangci-lint.run/usage/linters/#disabled-by-default # https://golangci-lint.run/usage/linters/#disabled-by-default
disable: disable:
@ -222,7 +223,8 @@ linters:
# All available settings of specific linters. # All available settings of specific linters.
settings: settings:
# See the dedicated "linters.settings" documentation section. # See the dedicated "linters.settings" documentation section.
option: value govet:
enable-all: true
# Defines a set of rules to ignore issues. # Defines a set of rules to ignore issues.
# It does not skip the analysis, and so does not ignore "typecheck" errors. # It does not skip the analysis, and so does not ignore "typecheck" errors.
exclusions: exclusions:
@ -297,9 +299,9 @@ formatters:
- goimports - goimports
- golines - golines
# Formatters settings. # Formatters settings.
settings: # settings:
# See the dedicated "formatters.settings" documentation section. # # See the dedicated "formatters.settings" documentation section.
option: value # option: value
# exclusions: # exclusions:
# # Mode of the generated files analysis. # # Mode of the generated files analysis.
# # # #

View File

@ -130,7 +130,7 @@ hmdeploy --path /path/my-project --destroy
``` ```
## Next steps ## Next steps
* Improve the CLI arguments * ~~Improve the CLI arguments~~
* ~~Destroy~~ * ~~Destroy~~
* Post-install script * Post-install script
* Deals with bugs * Deals with bugs

View File

@ -15,8 +15,8 @@ import (
) )
type SSHConn struct { type SSHConn struct {
addr string
client *ssh.Client client *ssh.Client
addr string
} }
var _ IConnection = (*SSHConn)(nil) var _ IConnection = (*SSHConn)(nil)

View File

@ -39,16 +39,16 @@ const (
// - `Deploy() error`: run shell command to deploy the archive remotly // - `Deploy() error`: run shell command to deploy the archive remotly
// - `Build() error`: build the archive // - `Build() error`: build the archive
// - `Clear() error`: clean all the ressources locally and remotly // - `Clear() error`: clean all the ressources locally and remotly
type deployer struct { type deployer struct { //nolint:govet // ll
ctx context.Context ctx context.Context
fnCancel context.CancelFunc fnCancel context.CancelFunc
type_ DeployerType
project *models.Project
processing atomic.Bool
chDone chan struct{} chDone chan struct{}
processing atomic.Bool
type_ DeployerType
errFlag error errFlag error
project *models.Project
} }
func newDeployer(ctx context.Context, type_ DeployerType, project *models.Project) *deployer { func newDeployer(ctx context.Context, type_ DeployerType, project *models.Project) *deployer {
@ -107,7 +107,7 @@ func (d *deployer) Done() <-chan struct{} {
for { for {
select { select {
case <-d.ctx.Done(): case <-d.ctx.Done():
log.Warn().Str("deployer", string(d.type_)).Msg("context done catch") log.Debug().Str("deployer", string(d.type_)).Msg("context done catch")
timeout := time.NewTicker(GracefulTimeout) timeout := time.NewTicker(GracefulTimeout)
tick := time.NewTicker(time.Second) tick := time.NewTicker(time.Second)
@ -132,7 +132,7 @@ func (d *deployer) Done() <-chan struct{} {
} }
} }
case <-d.chDone: case <-d.chDone:
log.Info().Str("deployer", string(d.type_)).Msg("terminated") log.Debug().Str("deployer", string(d.type_)).Msg("terminated")
chDone <- struct{}{} chDone <- struct{}{}
return return
} }

View File

@ -56,7 +56,7 @@ func (sd *SwarmDeployer) close() error {
} }
func (sd *SwarmDeployer) clean() (err error) { func (sd *SwarmDeployer) clean() (err error) {
if err := os.Remove(sd.archivePath); err != nil { if err = os.Remove(sd.archivePath); err != nil {
log.Err(err).Str("archive", sd.archivePath).Msg("unable to clean local swarm archive file") log.Err(err).Str("archive", sd.archivePath).Msg("unable to clean local swarm archive file")
} }
_, err = sd.conn.Execute( _, err = sd.conn.Execute(
@ -109,11 +109,11 @@ func (sd *SwarmDeployer) Build() error {
defer os.Remove(tarFile) //nolint: errcheck // defered defer os.Remove(tarFile) //nolint: errcheck // defered
// copy the file directly instead of adding it in the tar archive // copy the file directly instead of adding it in the tar archive
log.Info().Str("image", tarFile).Msg("Transferring image...")
if err := sd.conn.CopyFile(tarFile, filepath.Base(tarFile)); err != nil { if err := sd.conn.CopyFile(tarFile, filepath.Base(tarFile)); err != nil {
return err return err
} }
log.Info().Str("image", tarFile).Msg("image transferred with success")
log.Info().Str("image", sd.project.ImageNames[idx]).Msg("image added to archive")
} }
if envFilePath := sd.project.Deps.EnvFile; envFilePath != "" { if envFilePath := sd.project.Deps.EnvFile; envFilePath != "" {
@ -171,7 +171,7 @@ func (sd *SwarmDeployer) Deploy() error {
archiveDestPath := filepath.Base(sd.archivePath) archiveDestPath := filepath.Base(sd.archivePath)
log.Info(). log.Info().
Str("archive", sd.archivePath). Str("archive", sd.archivePath).
Msg("archive built with success, tranfering to swarm for deployment...") Msg("archive built with success, tranferring to swarm for deployment...")
if err := sd.conn.CopyFile(sd.archivePath, archiveDestPath); err != nil { if err := sd.conn.CopyFile(sd.archivePath, archiveDestPath); err != nil {
sd.setDone(err) sd.setDone(err)
return err return err

240
main.go
View File

@ -15,6 +15,7 @@ import (
"gitea.thegux.fr/hmdeploy/docker" "gitea.thegux.fr/hmdeploy/docker"
"gitea.thegux.fr/hmdeploy/models" "gitea.thegux.fr/hmdeploy/models"
"gitea.thegux.fr/hmdeploy/scheduler" "gitea.thegux.fr/hmdeploy/scheduler"
"gitea.thegux.fr/hmdeploy/utils"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
) )
@ -37,8 +38,90 @@ var (
ErrGenerateTasksTree = errors.New("unable to generate tasks tree") ErrGenerateTasksTree = errors.New("unable to generate tasks tree")
) )
type Deployers struct {
nd *deployers.NginxDeployer
sd deployers.SwarmDeployer
destroy bool
}
// generateTasksTree returns a list of linked `Task` to submit.
//
// It's here that all tasks are linked each other to provide the deployment ordering.
func (d *Deployers) generateTasksTree() scheduler.Tasks {
tasks := []*scheduler.Task{}
if d.destroy {
swarmDestroy := scheduler.NewTask("swarm-destroy", d.sd.Destroy)
if d.nd != nil {
destroyTask := scheduler.NewTask("nginx-destroy", d.nd.Destroy, swarmDestroy)
tasks = append(tasks, destroyTask)
return tasks
}
tasks = append(tasks, swarmDestroy)
return tasks
}
var swarmTask *scheduler.Task
if d.nd != nil {
deployNginx := scheduler.NewTask("nginx-deploy", d.nd.Deploy)
swarmTask = scheduler.NewTask("swarm-deploy", d.sd.Deploy, deployNginx)
} else {
swarmTask = scheduler.NewTask("swarm-deploy", d.sd.Deploy)
}
swarmTask = scheduler.NewTask("swarm-build", d.sd.Build, swarmTask)
tasks = append(tasks, swarmTask)
if d.nd != nil {
tasks = append(tasks, scheduler.NewTask("nginx-build", d.nd.Build))
}
return tasks
}
// waitForCompletion waits for all deployers to complete.
//
// After the completion, deployers `Clear` methods are executed to clean all ressources.
// Then the scheduler is stopped to terminate the engine.
func (d *Deployers) waitForCompletion(s *scheduler.Scheduler) error {
var wg sync.WaitGroup
deps := []deployers.IDeployer{&d.sd}
if d.nd != nil {
deps = append(deps, d.nd)
}
for idx := range deps {
if d := deps[idx]; d != nil {
wg.Add(1)
go func() {
defer wg.Done()
<-d.Done()
}()
}
}
wg.Wait()
var errs []error
for idx := range deps {
if dep := deps[idx]; d != nil {
errs = append(errs, dep.Error())
if !d.destroy {
s.Submit(scheduler.NewTask(string(dep.Type()), dep.Clear)) //nolint: errcheck // TODO
}
}
}
s.Stop()
<-s.Done()
return errors.Join(errs...)
}
type Option struct { type Option struct {
fnCancel context.CancelFunc fnCancel context.CancelFunc
destroy bool
noNginx bool
} }
type InitOption func(o *Option) type InitOption func(o *Option)
@ -49,8 +132,24 @@ func WithGlobalCancellation(fnCancel context.CancelFunc) InitOption {
} }
} }
func initLogger() { func WithNoNginx() InitOption {
return func(o *Option) {
o.noNginx = true
}
}
func WithDestroy() InitOption {
return func(o *Option) {
o.destroy = true
}
}
func initLogger(debug bool) {
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
zerolog.SetGlobalLevel(zerolog.InfoLevel)
if debug {
zerolog.SetGlobalLevel(zerolog.DebugLevel)
}
log.Logger = log.With().Caller().Logger().Output(zerolog.ConsoleWriter{Out: os.Stderr}) log.Logger = log.With().Caller().Logger().Output(zerolog.ConsoleWriter{Out: os.Stderr})
} }
@ -89,137 +188,72 @@ func initDeployers(
hmmap *models.HMMap, hmmap *models.HMMap,
project *models.Project, project *models.Project,
options ...InitOption, options ...InitOption,
) ([]deployers.IDeployer, error) { ) (Deployers, error) {
var opt Option
for _, o := range options {
o(&opt)
}
deps := Deployers{
destroy: opt.destroy,
}
swarmNet := hmmap.GetSwarmNetInfo() swarmNet := hmmap.GetSwarmNetInfo()
if swarmNet == nil { if swarmNet == nil {
return nil, fmt.Errorf("%w, swarm net info does not exist", ErrNetInfoNotFound) return deps, fmt.Errorf("%w, swarm net info does not exist", ErrNetInfoNotFound)
} }
dcli := docker.NewClient() dcli := docker.NewClient()
sd, err := deployers.NewSwarmDeployer(ctx, project, swarmNet, &dcli) sd, err := deployers.NewSwarmDeployer(ctx, project, swarmNet, &dcli)
if err != nil { if err != nil {
return nil, fmt.Errorf("%w, unable to init swarm deployer, err=%v", ErrDeployerInit, err) return deps, fmt.Errorf("%w, unable to init swarm deployer, err=%v", ErrDeployerInit, err)
} }
deps.sd = sd
var nd deployers.NginxDeployer if !opt.noNginx && project.Deps.NginxFile != "" {
if project.Deps.NginxFile != "" {
nginxNet := hmmap.GetNginxNetInfo() nginxNet := hmmap.GetNginxNetInfo()
if nginxNet == nil { if nginxNet == nil {
return nil, fmt.Errorf("%w, nginx net info does not exist", ErrNetInfoNotFound) return deps, fmt.Errorf("%w, nginx net info does not exist", ErrNetInfoNotFound)
} }
d, err := deployers.NewNginxDeployer(ctx, project, nginxNet) d, err := deployers.NewNginxDeployer(ctx, project, nginxNet)
if err != nil { if err != nil {
return nil, fmt.Errorf( return deps, fmt.Errorf(
"%w, unable to init nginx deployer, err=%v", "%w, unable to init nginx deployer, err=%v",
ErrDeployerInit, ErrDeployerInit,
err, err,
) )
} }
nd = d deps.nd = &d
}
var opt Option
for _, o := range options {
o(&opt)
} }
if opt.fnCancel != nil { if opt.fnCancel != nil {
sd.SetCancellationFunc(opt.fnCancel) sd.SetCancellationFunc(opt.fnCancel)
nd.SetCancellationFunc(opt.fnCancel) if deps.nd != nil {
deps.nd.SetCancellationFunc(opt.fnCancel)
}
} }
return []deployers.IDeployer{&sd, &nd}, nil return deps, nil
} }
// generateTasksTree returns a list of linked `Task` to submit. func main() { //nolint: all //hjf
//
// It's here that all tasks are linked each other to provide the deployment ordering.
func generateTasksTree(deployers []deployers.IDeployer, destroy bool) ([]*scheduler.Task, error) {
if len(deployers) != MaxDeployers {
return nil, fmt.Errorf("%w, deployers len should be equals to 2", ErrGenerateTasksTree)
}
sd := deployers[0]
nd := deployers[1]
tasks := []*scheduler.Task{}
if destroy {
swarmDestroy := scheduler.NewTask("swarm-destroy", sd.Destroy)
destroyTask := scheduler.NewTask("nginx-destroy", nd.Destroy, swarmDestroy)
tasks = append(tasks, destroyTask)
return tasks, nil
}
var swarmTask *scheduler.Task
if nd != nil {
deployNginx := scheduler.NewTask("nginx-deploy", nd.Deploy)
swarmTask = scheduler.NewTask("swarm-deploy", sd.Deploy, deployNginx)
} else {
swarmTask = scheduler.NewTask("swarm-deploy", sd.Deploy)
}
swarmTask = scheduler.NewTask("swarm-build", sd.Build, swarmTask)
tasks = append(tasks, swarmTask, scheduler.NewTask("nginx-build", nd.Build))
return tasks, nil
}
// waitForCompletion waits for all deployers to complete.
//
// After the completion, deployers `Clear` methods are executed to clean all ressources.
// Then the scheduler is stopped to terminate the engine.
func waitForCompletion(
deployers []deployers.IDeployer,
s *scheduler.Scheduler,
destroy bool,
) error {
var wg sync.WaitGroup
for idx := range deployers {
if d := deployers[idx]; d != nil {
wg.Add(1)
go func() {
defer wg.Done()
<-d.Done()
}()
}
}
wg.Wait()
var errs []error
for idx := range deployers {
if d := deployers[idx]; d != nil {
errs = append(errs, d.Error())
if !destroy {
s.Submit(scheduler.NewTask(string(d.Type()), d.Clear)) //nolint: errcheck // TODO
}
}
}
s.Stop()
<-s.Done()
return errors.Join(errs...)
}
func main() {
ctx, fnCancel := signal.NotifyContext( ctx, fnCancel := signal.NotifyContext(
context.Background(), context.Background(),
os.Interrupt, os.Interrupt,
os.Kill, os.Kill,
) )
initLogger()
log.Info().Msg("hmdeploy started")
projectDir := flag.String("path", ".", "define the .homeserver project root dir") projectDir := flag.String("path", ".", "define the .homeserver project root dir")
destroy := flag.Bool("destroy", false, "delete the deployed project") destroy := flag.Bool("destroy", false, "delete the deployed project")
noNginx := flag.Bool("no-nginx", false, "no Nginx deployment")
debug := flag.Bool("debug", false, "show debug logs")
flag.Parse() flag.Parse()
initLogger(*debug)
log.Info().Msg("hmdeploy started")
hmmap, err := loadHMMap() hmmap, err := loadHMMap()
if err != nil { if err != nil {
log.Fatal().Err(err).Msg("failed to load conf") log.Fatal().Err(err).Msg("failed to load conf")
@ -234,14 +268,24 @@ func main() {
Str("name", project.Name). Str("name", project.Name).
Msg("project initialized with success") Msg("project initialized with success")
deployers, err := initDeployers(ctx, &hmmap, &project, WithGlobalCancellation(fnCancel)) initOptions := []InitOption{WithGlobalCancellation(fnCancel)}
if *noNginx {
initOptions = append(initOptions, WithNoNginx())
}
if *destroy {
initOptions = append(initOptions, WithDestroy())
}
deps, err := initDeployers(ctx, &hmmap, &project, initOptions...)
if err != nil { if err != nil {
log.Fatal().Err(err).Msg("unable to init deployers") log.Fatal().Err(err).Msg("unable to init deployers")
} }
tasks, err := generateTasksTree(deployers, *destroy) tasks := deps.generateTasksTree()
if err != nil { tasks.Display()
log.Fatal().Err(err).Msg("unable to generate tasks tree")
if err := utils.Confirm(ctx, *destroy); err != nil {
log.Fatal().Err(err).Msg("error while confirming execution")
} }
s := scheduler.NewScheduler( s := scheduler.NewScheduler(
@ -251,7 +295,7 @@ func main() {
tasks..., tasks...,
) )
if err := waitForCompletion(deployers, s, *destroy); err != nil { if err := deps.waitForCompletion(s); err != nil {
log.Fatal(). log.Fatal().
Err(err). Err(err).
Str("name", project.Name). Str("name", project.Name).

View File

@ -4,7 +4,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io/fs"
"os" "os"
"path/filepath" "path/filepath"
@ -23,15 +22,13 @@ const (
var ErrProjectConfFile = errors.New("project error") var ErrProjectConfFile = errors.New("project error")
func getFileInfo(baseDir, filePath string) (string, fs.FileInfo, error) { func getFilepath(baseDir, filePath string) (string, error) {
var fInf fs.FileInfo
if !filepath.IsAbs(filePath) {
filePath = filepath.Join(baseDir, filePath) filePath = filepath.Join(baseDir, filePath)
filePath, err := filepath.Abs(filePath) if !filepath.IsAbs(filePath) {
filePath, err := filepath.Abs(filePath) //nolint: govet
if err != nil { if err != nil {
return filePath, fInf, fmt.Errorf( return filePath, fmt.Errorf(
"%w, file=%s, err=%v", "%w, file=%s, err=%v",
ErrProjectConfFile, ErrProjectConfFile,
filePath, filePath,
@ -40,61 +37,66 @@ func getFileInfo(baseDir, filePath string) (string, fs.FileInfo, error) {
} }
} }
fInf, err := os.Stat(filePath) fileInfo, err := os.Stat(filePath)
if err != nil { if err != nil {
return filePath, fInf, fmt.Errorf( return filePath, fmt.Errorf(
"%w, unable to stat file=%s, err=%v", "%w, file=%s, err=%v",
ErrProjectConfFile, ErrProjectConfFile,
filePath, filePath,
err, err,
) )
} }
return filePath, fInf, nil if fileInfo.IsDir() {
return filePath, fmt.Errorf(
"%w, file=%s, err=%s",
ErrProjectConfFile,
filePath,
"must be a file",
)
}
return filePath, nil
} }
// Project handles the details and file informations of your project. // Project handles the details and file informations of your project.
type Project struct { type Project struct {
Name string `json:"name"` Name string `json:"name"`
Dir string Dir string
ImageNames []string `json:"images"`
Deps struct { Deps struct {
EnvFile string `json:"env"` EnvFile string `json:"env"`
EnvFileInfo fs.FileInfo
ComposeFile string `json:"compose"` ComposeFile string `json:"compose"`
ComposeFileInfo fs.FileInfo
NginxFile string `json:"nginx"` NginxFile string `json:"nginx"`
NginxFileInfo fs.FileInfo
} `json:"dependencies"` } `json:"dependencies"`
ImageNames []string `json:"images"`
} }
func (p *Project) validate() error { func (p *Project) validate() error {
cpath, cfs, err := getFileInfo(p.Dir, p.Deps.ComposeFile) cpath, err := getFilepath(p.Dir, p.Deps.ComposeFile)
if err != nil { if err != nil {
return err return err
} }
p.Deps.ComposeFileInfo = cfs
p.Deps.ComposeFile = cpath p.Deps.ComposeFile = cpath
if p.Deps.EnvFile != "" { if p.Deps.EnvFile != "" {
epath, efs, err := getFileInfo(p.Dir, p.Deps.EnvFile) epath, err := getFilepath(p.Dir, p.Deps.EnvFile)
if err != nil { if err != nil {
return err return err
} }
p.Deps.EnvFileInfo = efs
p.Deps.EnvFile = epath p.Deps.EnvFile = epath
} else { } else {
log.Warn().Msg("no .env file provided, hoping one it's set elsewhere...") log.Warn().Msg("no .env file provided, hoping one it's set elsewhere...")
} }
npath, nfs, err := getFileInfo(p.Dir, p.Deps.NginxFile) if p.Deps.NginxFile != "" {
npath, err := getFilepath(p.Dir, p.Deps.NginxFile)
if err != nil { if err != nil {
return err return err
} }
p.Deps.NginxFileInfo = nfs
p.Deps.NginxFile = npath p.Deps.NginxFile = npath
} else {
log.Warn().Msg("no Nginx conf file provided, Nginx deployment discarded")
}
return nil return nil
} }

View File

@ -3,6 +3,7 @@ package scheduler
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -27,8 +28,8 @@ type FnJob func() error
// taskStore is a thread safe `Task` store. // taskStore is a thread safe `Task` store.
type taskStore struct { type taskStore struct {
l sync.RWMutex
tasks map[string]*Task tasks map[string]*Task
l sync.RWMutex
} }
func newTaskStore() taskStore { func newTaskStore() taskStore {
@ -49,7 +50,7 @@ func (ts *taskStore) setStatus(task *Task, status TaskStatus) {
defer ts.l.Unlock() defer ts.l.Unlock()
if _, ok := ts.tasks[task.Name]; !ok { if _, ok := ts.tasks[task.Name]; !ok {
log.Warn().Str("name", task.Name).Msg("unable to update task status, does not exist") log.Debug().Str("name", task.Name).Msg("unable to update task status, does not exist")
return return
} }
@ -63,6 +64,53 @@ func (ts *taskStore) len() int {
return len(ts.tasks) return len(ts.tasks)
} }
type Tasks []*Task
type tasksOptions func(*options)
type options struct {
layer int
}
func withLayer(layer int) tasksOptions {
return func(o *options) {
o.layer = layer
}
}
// Display displays on stdout the Tasks tree execution layers.
// Each layer represents the tasks going to be executed by the scheduler.
// TODO: display dependencies
func (ts Tasks) Display() {
fmt.Println("> Tasks execution layers")
ts.display()
}
func (ts Tasks) display(opts ...tasksOptions) {
var opt options
for _, o := range opts {
o(&opt)
}
if opt.layer == 0 {
opt.layer = 1
}
if len(ts) == 0 {
return
}
fmt.Println(fmt.Sprintf("------ layer %d ------", opt.layer))
nextTasks := Tasks{}
for idx := range ts {
fmt.Print(ts[idx].Name + " ")
nextTasks = append(nextTasks, ts[idx].Next...)
}
fmt.Println("")
opt.layer += 1
nextTasks.display(withLayer(opt.layer))
}
// Task represents an execution unit handle by the scheduler. // Task represents an execution unit handle by the scheduler.
// //
// Next field links to next executable tasks (tree kind). // Next field links to next executable tasks (tree kind).
@ -84,15 +132,15 @@ func NewTask(name string, job FnJob, next ...*Task) *Task {
// Scheduler is a simple scheduler. // Scheduler is a simple scheduler.
// Handling tasks and executes them, that's all. // Handling tasks and executes them, that's all.
type Scheduler struct { type Scheduler struct { //nolint: govet // ll
ctx context.Context
fnCancel context.CancelFunc
wg sync.WaitGroup
capacity atomic.Uint32 capacity atomic.Uint32
workers uint8 workers uint8
chTasks chan *Task chTasks chan *Task
wg sync.WaitGroup
ctx context.Context
fnCancel context.CancelFunc
tasks taskStore tasks taskStore
} }
@ -134,7 +182,7 @@ func (s *Scheduler) run() {
s.tasks.setStatus(t, Running) s.tasks.setStatus(t, Running)
if err := t.Job(); err != nil { if err := t.Job(); err != nil {
log.Err(err).Str("task", t.Name).Msg("error executing task") log.Debug().Err(err).Str("task", t.Name).Msg("error executing task")
s.tasks.setStatus(t, Failed) s.tasks.setStatus(t, Failed)
continue continue
} }
@ -145,7 +193,7 @@ func (s *Scheduler) run() {
s.Submit(nt) //nolint: errcheck // TODO s.Submit(nt) //nolint: errcheck // TODO
} }
case <-s.ctx.Done(): case <-s.ctx.Done():
log.Warn().Msg("context done, stopping worker...") log.Debug().Msg("context done, stopping worker...")
return return
} }
} }
@ -160,7 +208,7 @@ func (s *Scheduler) Stop() {
func (s *Scheduler) Submit(task *Task) error { func (s *Scheduler) Submit(task *Task) error {
select { select {
case <-s.ctx.Done(): case <-s.ctx.Done():
log.Error().Msg("unable to submit new task, scheduler is stopping...") log.Debug().Msg("unable to submit new task, scheduler is stopping...")
return ErrSchedulerContextDone return ErrSchedulerContextDone
default: default:
} }
@ -181,7 +229,7 @@ func (s *Scheduler) Done() <-chan struct{} {
for { //nolint: staticcheck // no for { //nolint: staticcheck // no
select { select {
case <-s.ctx.Done(): case <-s.ctx.Done():
log.Info().Msg("waiting for scheduler task completion...") log.Debug().Msg("waiting for scheduler task completion...")
s.wg.Wait() s.wg.Wait()
chDone <- struct{}{} chDone <- struct{}{}
return return

View File

@ -3,14 +3,19 @@ package utils
import ( import (
"archive/tar" "archive/tar"
"compress/gzip" "compress/gzip"
"context"
"fmt" "fmt"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"time" "time"
"github.com/rs/zerolog/log"
) )
const confirmChar = "Y"
func addToArchive(tw *tar.Writer, filename string) error { func addToArchive(tw *tar.Writer, filename string) error {
file, err := os.Open(filename) file, err := os.Open(filename)
if err != nil { if err != nil {
@ -29,7 +34,7 @@ func addToArchive(tw *tar.Writer, filename string) error {
} }
header.Name = filepath.Base(file.Name()) header.Name = filepath.Base(file.Name())
if err := tw.WriteHeader(header); err != nil { if err = tw.WriteHeader(header); err != nil {
return err return err
} }
@ -70,3 +75,26 @@ func CreateArchive(destDir, name string, files ...string) (string, error) {
return archivePath, nil return archivePath, nil
} }
func Confirm(ctx context.Context, destroy bool) error {
logMsg := "deploy"
if destroy {
logMsg = "undeploy"
}
log.Warn().Msg(fmt.Sprintf("Confirm to %s ? Y to confirm", logMsg))
var text string
if _, err := fmt.Fscanf(os.Stdin, "%s", &text); err != nil {
if !strings.Contains(err.Error(), "newline") {
return err
}
}
if !strings.EqualFold(text, confirmChar) {
log.Info().Msg("Ok, bye !")
os.Exit(0)
}
return nil
}