rework tasks dependencies
This commit is contained in:
parent
3ccfcfc451
commit
0dae6ae400
@ -30,16 +30,28 @@ var _ IDeployer = (*SwarmDeployer)(nil)
|
||||
func NewSwarmDeployer(
|
||||
ctx context.Context,
|
||||
project *models.Project,
|
||||
netInfo *models.HMNetInfo,
|
||||
dloc docker.IClient,
|
||||
drem *docker.RemoteClient,
|
||||
) SwarmDeployer {
|
||||
) (SwarmDeployer, error) {
|
||||
var sd SwarmDeployer
|
||||
|
||||
conn, err := connection.NewSSHConn(
|
||||
netInfo.IP.String(),
|
||||
netInfo.SSH.User,
|
||||
netInfo.SSH.Port,
|
||||
netInfo.SSH.PrivKey,
|
||||
)
|
||||
if err != nil {
|
||||
return sd, nil
|
||||
}
|
||||
|
||||
sd.conn = &conn
|
||||
sd.dloc = dloc
|
||||
sd.drem = drem
|
||||
sd.deployer = newDeployer(ctx, Swarm, project)
|
||||
|
||||
return sd
|
||||
return sd, nil
|
||||
}
|
||||
|
||||
func (sd *SwarmDeployer) close() error {
|
||||
|
||||
@ -275,6 +275,7 @@ func (c *RemoteClient) checkState(
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
ticker.Stop()
|
||||
log.Info().
|
||||
Str("project", projectName).
|
||||
Str("state", string(target)).
|
||||
@ -299,6 +300,8 @@ func (c *RemoteClient) checkState(
|
||||
if ready {
|
||||
return
|
||||
}
|
||||
|
||||
ticker.Reset(stateTickDuration)
|
||||
case <-ctxTimeout.Done():
|
||||
msg := "swarm deployment skipped"
|
||||
if errors.Is(ctxTimeout.Err(), context.DeadlineExceeded) {
|
||||
|
||||
145
main.go
145
main.go
@ -44,42 +44,89 @@ var (
|
||||
|
||||
type Deployers struct {
|
||||
nd *deployers.NginxDeployer
|
||||
sd deployers.SwarmDeployer
|
||||
sd *deployers.SwarmDeployer
|
||||
destroy bool
|
||||
}
|
||||
|
||||
func (d *Deployers) generateDestroyTasks() scheduler.Tasks {
|
||||
tasks := []*scheduler.Task{}
|
||||
|
||||
// only nginx deployer
|
||||
if d.nd != nil && d.sd == nil {
|
||||
rootTask := scheduler.NewTask("nginx-destroy", d.nd.Destroy)
|
||||
tasks = append(tasks, rootTask)
|
||||
return tasks
|
||||
}
|
||||
|
||||
// both deployers enabled
|
||||
if d.nd != nil && d.sd != nil {
|
||||
rootTask := scheduler.NewTask("nginx-destroy", d.nd.Destroy)
|
||||
swarmDestroy := scheduler.NewTask("swarm-destroy", d.sd.Destroy)
|
||||
rootTask.AddNext(swarmDestroy)
|
||||
|
||||
tasks = append(tasks, rootTask)
|
||||
return tasks
|
||||
}
|
||||
|
||||
// only swarm deployer
|
||||
if d.sd != nil && d.nd == nil {
|
||||
rootTask := scheduler.NewTask("swarm-destroy", d.sd.Destroy)
|
||||
tasks = append(tasks, rootTask)
|
||||
}
|
||||
|
||||
return tasks
|
||||
}
|
||||
|
||||
func (d *Deployers) generateDeployTasks() scheduler.Tasks {
|
||||
tasks := []*scheduler.Task{}
|
||||
|
||||
// only nginx deployer
|
||||
if d.nd != nil && d.sd == nil {
|
||||
rootTask := scheduler.NewTask("nginx-build", d.nd.Build)
|
||||
rootTask.AddNext(scheduler.NewTask("nginx-deploy", d.nd.Deploy))
|
||||
|
||||
tasks = append(tasks, rootTask)
|
||||
return tasks
|
||||
}
|
||||
|
||||
// both deployers enabled
|
||||
if d.nd != nil && d.sd != nil {
|
||||
nginxDeploy := scheduler.NewTask("nginx-deploy", d.nd.Deploy)
|
||||
|
||||
swarmDeploy := scheduler.NewTask("swarm-deploy", d.sd.Deploy)
|
||||
swarmDeploy.AddNext(nginxDeploy)
|
||||
|
||||
nginxBuild := scheduler.NewTask("nginx-build", d.nd.Build)
|
||||
nginxDeploy.AddParent(nginxBuild) // nginx deployment depends on the build success
|
||||
swarmBuild := scheduler.NewTask("swarm-build", d.sd.Build)
|
||||
swarmBuild.AddNext(swarmDeploy)
|
||||
|
||||
tasks = append(tasks, nginxBuild, swarmBuild)
|
||||
|
||||
return tasks
|
||||
}
|
||||
|
||||
// only swarm deployer
|
||||
if d.sd != nil && d.nd == nil {
|
||||
rootTask := scheduler.NewTask("swarm-build", d.nd.Build)
|
||||
rootTask.AddNext(scheduler.NewTask("swarm-deploy", d.nd.Deploy))
|
||||
|
||||
tasks = append(tasks, rootTask)
|
||||
return tasks
|
||||
}
|
||||
|
||||
return tasks
|
||||
}
|
||||
|
||||
// generateTasksTree returns a list of linked `Task` to submit.
|
||||
//
|
||||
// It's here that all tasks are linked each other to provide the deployment ordering.
|
||||
func (d *Deployers) generateTasksTree() scheduler.Tasks {
|
||||
tasks := []*scheduler.Task{}
|
||||
|
||||
if d.destroy {
|
||||
swarmDestroy := scheduler.NewTask("swarm-destroy", d.sd.Destroy)
|
||||
if d.nd != nil {
|
||||
destroyTask := scheduler.NewTask("nginx-destroy", d.nd.Destroy, swarmDestroy)
|
||||
tasks = append(tasks, destroyTask)
|
||||
return tasks
|
||||
}
|
||||
tasks = append(tasks, swarmDestroy)
|
||||
return tasks
|
||||
return d.generateDestroyTasks()
|
||||
}
|
||||
|
||||
var swarmTask *scheduler.Task
|
||||
if d.nd != nil {
|
||||
deployNginx := scheduler.NewTask("nginx-deploy", d.nd.Deploy)
|
||||
swarmTask = scheduler.NewTask("swarm-deploy", d.sd.Deploy, deployNginx)
|
||||
} else {
|
||||
swarmTask = scheduler.NewTask("swarm-deploy", d.sd.Deploy)
|
||||
}
|
||||
|
||||
swarmTask = scheduler.NewTask("swarm-build", d.sd.Build, swarmTask)
|
||||
tasks = append(tasks, swarmTask)
|
||||
if d.nd != nil {
|
||||
tasks = append(tasks, scheduler.NewTask("nginx-build", d.nd.Build))
|
||||
}
|
||||
|
||||
return tasks
|
||||
return d.generateDeployTasks()
|
||||
}
|
||||
|
||||
// waitForCompletion waits for all deployers to complete.
|
||||
@ -89,7 +136,7 @@ func (d *Deployers) generateTasksTree() scheduler.Tasks {
|
||||
func (d *Deployers) waitForCompletion(s *scheduler.Scheduler) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
deps := []deployers.IDeployer{&d.sd}
|
||||
deps := []deployers.IDeployer{d.sd}
|
||||
if d.nd != nil {
|
||||
deps = append(deps, d.nd)
|
||||
}
|
||||
@ -128,6 +175,7 @@ type Option struct {
|
||||
fnCancel context.CancelFunc
|
||||
destroy bool
|
||||
noNginx bool
|
||||
noSwarm bool
|
||||
}
|
||||
|
||||
type InitOption func(o *Option)
|
||||
@ -144,6 +192,12 @@ func WithNoNginx() InitOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithNoSwarm() InitOption {
|
||||
return func(o *Option) {
|
||||
o.noSwarm = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithDestroy() InitOption {
|
||||
return func(o *Option) {
|
||||
o.destroy = true
|
||||
@ -188,6 +242,10 @@ func loadHMMap() (models.HMMap, error) {
|
||||
//
|
||||
// You can provide as an optional arg:
|
||||
// - WithGlobalCancellation(fnCancel context.CancelFunc): close the global context, notifying all deployers to stop
|
||||
// - WithNoSwarm(): disable Swarm deployment
|
||||
// - WithNoNginx(): disable Nginx deployment
|
||||
//
|
||||
//nolint:funlen // not that so much...
|
||||
func initDeployers(
|
||||
ctx context.Context,
|
||||
hmmap *models.HMMap,
|
||||
@ -214,8 +272,21 @@ func initDeployers(
|
||||
return deps, err
|
||||
}
|
||||
|
||||
sd := deployers.NewSwarmDeployer(ctx, project, &dloc, &drem)
|
||||
deps.sd = sd
|
||||
if !opt.noSwarm {
|
||||
sd, err := deployers.NewSwarmDeployer(ctx, project, swarmNet, &dloc, &drem)
|
||||
if err != nil {
|
||||
return deps, fmt.Errorf(
|
||||
"%w, unable to init swarm deployer, err=%v",
|
||||
ErrDeployerInit,
|
||||
err,
|
||||
)
|
||||
}
|
||||
deps.sd = &sd
|
||||
|
||||
if opt.fnCancel != nil {
|
||||
sd.SetCancellationFunc(opt.fnCancel)
|
||||
}
|
||||
}
|
||||
|
||||
if !opt.noNginx && project.Deps.NginxFile != "" {
|
||||
nginxNet := hmmap.GetNginxNetInfo()
|
||||
@ -233,12 +304,8 @@ func initDeployers(
|
||||
}
|
||||
|
||||
deps.nd = &d
|
||||
}
|
||||
|
||||
if opt.fnCancel != nil {
|
||||
sd.SetCancellationFunc(opt.fnCancel)
|
||||
if deps.nd != nil {
|
||||
deps.nd.SetCancellationFunc(opt.fnCancel)
|
||||
if opt.fnCancel != nil {
|
||||
d.SetCancellationFunc(opt.fnCancel)
|
||||
}
|
||||
}
|
||||
|
||||
@ -345,6 +412,7 @@ func main() { //nolint: funlen //TODO: to reduce
|
||||
projectDir := flag.String("path", ".", "define the .homeserver project root dir")
|
||||
destroy := flag.Bool("destroy", false, "delete the deployed project")
|
||||
noNginx := flag.Bool("no-nginx", false, "no Nginx deployment")
|
||||
noSwarm := flag.Bool("no-swarm", false, "no Swarm deployment")
|
||||
debug := flag.Bool("debug", false, "show debug logs")
|
||||
details := flag.Bool("details", false, "extract swarm details and return")
|
||||
version := flag.Bool("version", false, "extract swarm details and return")
|
||||
@ -387,6 +455,9 @@ func main() { //nolint: funlen //TODO: to reduce
|
||||
if *noNginx {
|
||||
initOptions = append(initOptions, WithNoNginx())
|
||||
}
|
||||
if *noSwarm {
|
||||
initOptions = append(initOptions, WithNoSwarm())
|
||||
}
|
||||
if *destroy {
|
||||
initOptions = append(initOptions, WithDestroy())
|
||||
}
|
||||
@ -397,6 +468,10 @@ func main() { //nolint: funlen //TODO: to reduce
|
||||
}
|
||||
|
||||
tasks := deps.generateTasksTree()
|
||||
if len(tasks) == 0 {
|
||||
log.Info().Msg("nothing todo... bye !")
|
||||
os.Exit(0)
|
||||
}
|
||||
tasks.Display()
|
||||
|
||||
if !*confirm {
|
||||
|
||||
@ -13,6 +13,10 @@ import (
|
||||
var (
|
||||
ErrSchedulerMaxCapacityReached = errors.New("unable to add new task, max capacity reached")
|
||||
ErrSchedulerContextDone = errors.New("context done, scheduler stopped")
|
||||
|
||||
ErrTaskDoesNotExist = errors.New("task does not exist")
|
||||
ErrParentTaskDoesNotExist = errors.New("parent task does not exist")
|
||||
ErrParentTaskFailed = errors.New("parent task failed")
|
||||
)
|
||||
|
||||
type TaskStatus string
|
||||
@ -22,6 +26,7 @@ const (
|
||||
Running TaskStatus = "running"
|
||||
Success = "success"
|
||||
Failed = "failed"
|
||||
Unknown = "unknown"
|
||||
)
|
||||
|
||||
type FnJob func() error
|
||||
@ -57,6 +62,44 @@ func (ts *taskStore) setStatus(task *Task, status TaskStatus) {
|
||||
ts.tasks[task.Name].Status = status
|
||||
}
|
||||
|
||||
func (ts *taskStore) isReady(task *Task) (bool, error) {
|
||||
ts.l.RLock()
|
||||
defer ts.l.RUnlock()
|
||||
|
||||
t, ok := ts.tasks[task.Name]
|
||||
if !ok {
|
||||
log.Debug().Str("name", task.Name).Msg("unable to get task, does not exist")
|
||||
return false, ErrTaskDoesNotExist
|
||||
}
|
||||
|
||||
if t.Status != Pending {
|
||||
return false, fmt.Errorf("bad status to be run: %s", t.Status)
|
||||
}
|
||||
|
||||
if t.Parents == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
isReady := false
|
||||
for idx := range t.Parents {
|
||||
pt, ok := ts.tasks[t.Parents[idx].Name]
|
||||
if !ok {
|
||||
log.Debug().
|
||||
Str("name", t.Parents[idx].Name).
|
||||
Msg("unable to get parent task, does not exist")
|
||||
return false, ErrParentTaskDoesNotExist
|
||||
}
|
||||
|
||||
if pt.Status == Failed {
|
||||
return false, ErrParentTaskFailed
|
||||
}
|
||||
|
||||
isReady = pt.Status == Success
|
||||
}
|
||||
|
||||
return isReady, nil
|
||||
}
|
||||
|
||||
func (ts *taskStore) len() int {
|
||||
ts.l.RLock()
|
||||
defer ts.l.RUnlock()
|
||||
@ -115,10 +158,11 @@ func (ts Tasks) display(opts ...tasksOptions) {
|
||||
//
|
||||
// Next field links to next executable tasks (tree kind).
|
||||
type Task struct {
|
||||
Name string
|
||||
Job FnJob
|
||||
Status TaskStatus
|
||||
Next []*Task
|
||||
Name string
|
||||
Job FnJob
|
||||
Status TaskStatus
|
||||
Next []*Task
|
||||
Parents []*Task
|
||||
}
|
||||
|
||||
func NewTask(name string, job FnJob, next ...*Task) *Task {
|
||||
@ -130,6 +174,17 @@ func NewTask(name string, job FnJob, next ...*Task) *Task {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Task) AddNext(next ...*Task) {
|
||||
t.Next = append(t.Next, next...)
|
||||
}
|
||||
|
||||
func (t *Task) AddParent(parents ...*Task) {
|
||||
if t.Parents == nil {
|
||||
t.Parents = []*Task{}
|
||||
}
|
||||
t.Parents = append(t.Parents, parents...)
|
||||
}
|
||||
|
||||
// Scheduler is a simple scheduler.
|
||||
// Handling tasks and executes them, that's all.
|
||||
type Scheduler struct { //nolint: govet // ll
|
||||
@ -179,6 +234,19 @@ func (s *Scheduler) run() {
|
||||
for {
|
||||
select {
|
||||
case t := <-s.chTasks:
|
||||
ok, err := s.tasks.isReady(t)
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Str("task", t.Name).Msg("error checking task status")
|
||||
s.tasks.setStatus(t, Failed)
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
log.Debug().Str("task", t.Name).Msg("task not ready yet, re-scheduling...")
|
||||
s.Submit(t)
|
||||
continue
|
||||
}
|
||||
|
||||
s.tasks.setStatus(t, Running)
|
||||
|
||||
if err := t.Job(); err != nil {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user