526 lines
12 KiB
Go
526 lines
12 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"errors"
|
|
"flag"
|
|
"fmt"
|
|
"os"
|
|
"os/signal"
|
|
"path"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
|
|
"gitea.thegux.fr/hmdeploy/deployers"
|
|
"gitea.thegux.fr/hmdeploy/docker"
|
|
"gitea.thegux.fr/hmdeploy/models"
|
|
"gitea.thegux.fr/hmdeploy/scheduler"
|
|
"gitea.thegux.fr/hmdeploy/utils"
|
|
"github.com/rs/zerolog"
|
|
"github.com/rs/zerolog/log"
|
|
)
|
|
|
|
const (
|
|
HMDeployDirname string = ".homeserver"
|
|
NetworkFilename string = "map.json"
|
|
|
|
SchedulerNbWorkers uint8 = 4
|
|
SchedulerQueueCapacity uint32 = 30
|
|
|
|
MaxDeployers int = 2
|
|
)
|
|
|
|
var Version string
|
|
|
|
var HOME_PATH = os.Getenv("HOME")
|
|
|
|
var (
|
|
ErrNetInfoNotFound = errors.New("unable to get net info")
|
|
ErrDeployerInit = errors.New("unable to initialize deployer")
|
|
ErrGenerateTasksTree = errors.New("unable to generate tasks tree")
|
|
)
|
|
|
|
type Deployers struct {
|
|
nd *deployers.NginxDeployer
|
|
sd *deployers.SwarmDeployer
|
|
destroy bool
|
|
}
|
|
|
|
func (d *Deployers) generateDestroyTasks() scheduler.Tasks {
|
|
tasks := []*scheduler.Task{}
|
|
|
|
// only nginx deployer
|
|
if d.nd != nil && d.sd == nil {
|
|
rootTask := scheduler.NewTask("nginx-destroy", d.nd.Destroy)
|
|
tasks = append(tasks, rootTask)
|
|
return tasks
|
|
}
|
|
|
|
// both deployers enabled
|
|
if d.nd != nil && d.sd != nil {
|
|
rootTask := scheduler.NewTask("nginx-destroy", d.nd.Destroy)
|
|
swarmDestroy := scheduler.NewTask("swarm-destroy", d.sd.Destroy)
|
|
rootTask.AddNext(swarmDestroy)
|
|
|
|
tasks = append(tasks, rootTask)
|
|
return tasks
|
|
}
|
|
|
|
// only swarm deployer
|
|
if d.sd != nil && d.nd == nil {
|
|
rootTask := scheduler.NewTask("swarm-destroy", d.sd.Destroy)
|
|
tasks = append(tasks, rootTask)
|
|
}
|
|
|
|
return tasks
|
|
}
|
|
|
|
func (d *Deployers) generateDeployTasks() scheduler.Tasks {
|
|
tasks := []*scheduler.Task{}
|
|
|
|
// only nginx deployer
|
|
if d.nd != nil && d.sd == nil {
|
|
rootTask := scheduler.NewTask("nginx-build", d.nd.Build)
|
|
rootTask.AddNext(scheduler.NewTask("nginx-deploy", d.nd.Deploy))
|
|
|
|
tasks = append(tasks, rootTask)
|
|
return tasks
|
|
}
|
|
|
|
// both deployers enabled
|
|
if d.nd != nil && d.sd != nil {
|
|
nginxDeploy := scheduler.NewTask("nginx-deploy", d.nd.Deploy)
|
|
|
|
swarmDeploy := scheduler.NewTask("swarm-deploy", d.sd.Deploy)
|
|
swarmDeploy.AddNext(nginxDeploy)
|
|
|
|
nginxBuild := scheduler.NewTask("nginx-build", d.nd.Build)
|
|
nginxDeploy.AddParent(nginxBuild) // nginx deployment depends on the build success
|
|
swarmBuild := scheduler.NewTask("swarm-build", d.sd.Build)
|
|
swarmBuild.AddNext(swarmDeploy)
|
|
|
|
tasks = append(tasks, nginxBuild, swarmBuild)
|
|
|
|
return tasks
|
|
}
|
|
|
|
// only swarm deployer
|
|
if d.sd != nil && d.nd == nil {
|
|
rootTask := scheduler.NewTask("swarm-build", d.nd.Build)
|
|
rootTask.AddNext(scheduler.NewTask("swarm-deploy", d.nd.Deploy))
|
|
|
|
tasks = append(tasks, rootTask)
|
|
return tasks
|
|
}
|
|
|
|
return tasks
|
|
}
|
|
|
|
// generateTasksTree returns a list of linked `Task` to submit.
|
|
//
|
|
// It's here that all tasks are linked each other to provide the deployment ordering.
|
|
func (d *Deployers) generateTasksTree() scheduler.Tasks {
|
|
if d.destroy {
|
|
return d.generateDestroyTasks()
|
|
}
|
|
|
|
return d.generateDeployTasks()
|
|
}
|
|
|
|
// waitForCompletion waits for all deployers to complete.
|
|
//
|
|
// After the completion, deployers `Clear` methods are executed to clean all ressources.
|
|
// Then the scheduler is stopped to terminate the engine.
|
|
func (d *Deployers) waitForCompletion(s *scheduler.Scheduler) error {
|
|
var wg sync.WaitGroup
|
|
|
|
deps := []deployers.IDeployer{}
|
|
if d.nd != nil {
|
|
deps = append(deps, d.nd)
|
|
}
|
|
if d.sd != nil {
|
|
deps = append(deps, d.sd)
|
|
}
|
|
|
|
for idx := range deps {
|
|
if d := deps[idx]; d != nil {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
<-d.Done()
|
|
}()
|
|
}
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
var errs []error
|
|
for idx := range deps {
|
|
if dep := deps[idx]; d != nil {
|
|
errs = append(errs, dep.Error())
|
|
if !d.destroy {
|
|
s.Submit(
|
|
scheduler.NewTask(string(dep.Type()), dep.Clear),
|
|
) //nolint: errcheck // TODO
|
|
}
|
|
}
|
|
}
|
|
|
|
s.Stop()
|
|
<-s.Done()
|
|
|
|
return errors.Join(errs...)
|
|
}
|
|
|
|
type Option struct {
|
|
fnCancel context.CancelFunc
|
|
destroy bool
|
|
noNginx bool
|
|
noSwarm bool
|
|
}
|
|
|
|
type InitOption func(o *Option)
|
|
|
|
func WithGlobalCancellation(fnCancel context.CancelFunc) InitOption {
|
|
return func(o *Option) {
|
|
o.fnCancel = fnCancel
|
|
}
|
|
}
|
|
|
|
func WithNoNginx() InitOption {
|
|
return func(o *Option) {
|
|
o.noNginx = true
|
|
}
|
|
}
|
|
|
|
func WithNoSwarm() InitOption {
|
|
return func(o *Option) {
|
|
o.noSwarm = true
|
|
}
|
|
}
|
|
|
|
func WithDestroy() InitOption {
|
|
return func(o *Option) {
|
|
o.destroy = true
|
|
}
|
|
}
|
|
|
|
func initLogger(debug bool) {
|
|
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
|
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
|
ctx := log.With()
|
|
if debug {
|
|
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
|
ctx = ctx.Caller()
|
|
}
|
|
log.Logger = ctx.Logger().Output(zerolog.ConsoleWriter{Out: os.Stderr})
|
|
}
|
|
|
|
// loadHMMap loads your instance configuration map from `$HOME/.homeserver` dir.
|
|
func loadHMMap(baseDir string) (models.HMMap, error) {
|
|
var hmmap models.HMMap
|
|
|
|
hmmap_path := path.Join(baseDir, NetworkFilename)
|
|
c, err := os.ReadFile(hmmap_path)
|
|
if err != nil {
|
|
return hmmap, fmt.Errorf(
|
|
"unable to load configuration from src=%s, err=%v",
|
|
hmmap_path,
|
|
err,
|
|
)
|
|
}
|
|
|
|
if err := json.Unmarshal(c, &hmmap); err != nil {
|
|
return hmmap, fmt.Errorf(
|
|
"unable to parse configuration from src=%s, err=%v",
|
|
hmmap_path,
|
|
err,
|
|
)
|
|
}
|
|
|
|
return hmmap, nil
|
|
}
|
|
|
|
func initSwarmDeployer(
|
|
ctx context.Context,
|
|
project *models.Project,
|
|
swarmNet *models.HMNetInfo,
|
|
fnCancel context.CancelFunc,
|
|
) (deployers.SwarmDeployer, error) {
|
|
dloc := docker.NewLocalClient()
|
|
drem, err := docker.NewRemoteClient(swarmNet)
|
|
if err != nil {
|
|
return deployers.SwarmDeployer{}, err
|
|
}
|
|
|
|
sd, err := deployers.NewSwarmDeployer(ctx, project, swarmNet, &dloc, &drem)
|
|
if err != nil {
|
|
return deployers.SwarmDeployer{}, fmt.Errorf(
|
|
"%w, unable to init swarm deployer, err=%v",
|
|
ErrDeployerInit,
|
|
err,
|
|
)
|
|
}
|
|
|
|
if fnCancel != nil {
|
|
sd.SetCancellationFunc(fnCancel)
|
|
}
|
|
|
|
return sd, nil
|
|
}
|
|
|
|
// initDeployers instanciates from `Project` and `HMMap` needed deployers and returns them.
|
|
//
|
|
// You can provide as an optional arg:
|
|
// - WithGlobalCancellation(fnCancel context.CancelFunc): close the global context, notifying all deployers to stop
|
|
// - WithNoSwarm(): disable Swarm deployment
|
|
// - WithNoNginx(): disable Nginx deployment
|
|
func initDeployers(
|
|
ctx context.Context,
|
|
hmmap *models.HMMap,
|
|
project *models.Project,
|
|
options ...InitOption,
|
|
) (Deployers, error) {
|
|
var opt Option
|
|
for _, o := range options {
|
|
o(&opt)
|
|
}
|
|
|
|
deps := Deployers{
|
|
destroy: opt.destroy,
|
|
}
|
|
|
|
if !opt.noSwarm && project.GetComposePath() != "" {
|
|
swarmNet := hmmap.GetSwarmNetInfo()
|
|
if swarmNet == nil {
|
|
return deps, fmt.Errorf("%w, swarm net info does not exist", ErrNetInfoNotFound)
|
|
}
|
|
|
|
sd, err := initSwarmDeployer(ctx, project, swarmNet, opt.fnCancel)
|
|
if err != nil {
|
|
return deps, err
|
|
}
|
|
|
|
deps.sd = &sd
|
|
}
|
|
|
|
if !opt.noNginx && project.GetNginxConfPath() != "" {
|
|
nginxNet := hmmap.GetNginxNetInfo()
|
|
if nginxNet == nil {
|
|
return deps, fmt.Errorf("%w, nginx net info does not exist", ErrNetInfoNotFound)
|
|
}
|
|
|
|
d, err := deployers.NewNginxDeployer(ctx, project, nginxNet)
|
|
if err != nil {
|
|
return deps, fmt.Errorf(
|
|
"%w, unable to init nginx deployer, err=%v",
|
|
ErrDeployerInit,
|
|
err,
|
|
)
|
|
}
|
|
|
|
deps.nd = &d
|
|
if opt.fnCancel != nil {
|
|
d.SetCancellationFunc(opt.fnCancel)
|
|
}
|
|
}
|
|
|
|
return deps, nil
|
|
}
|
|
|
|
//nolint:funlen,mnd // TODO(rmanach): could be splitted
|
|
func getSwarmServicesDetails(hm *models.HMMap) error {
|
|
swarmNet := hm.GetSwarmNetInfo()
|
|
if swarmNet == nil {
|
|
return fmt.Errorf("%w, swarm net info does not exist", ErrNetInfoNotFound)
|
|
}
|
|
|
|
cli, err := docker.NewRemoteClient(swarmNet)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
services, err := cli.ExtractServicesDetails()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
tb := utils.NewTable(
|
|
utils.WithColSeparator(" | "),
|
|
utils.WithHeaderBorderStyle("*"),
|
|
utils.WithRowSeparator("-"),
|
|
utils.WithHeader("App", 15),
|
|
utils.WithHeader("Name", 15),
|
|
utils.WithHeader("Image", 25),
|
|
utils.WithHeader("Tag", 10),
|
|
utils.WithHeader("Target->Published", 30),
|
|
utils.WithHeader("Networks", 20),
|
|
utils.WithHeader("Replicas", 10),
|
|
utils.WithHeader("Status", 10),
|
|
utils.WithHeader("Error", 20),
|
|
)
|
|
|
|
for idx := range services {
|
|
columns := []utils.Column{}
|
|
columns = append(
|
|
columns,
|
|
utils.NewColumn("app", services[idx].App),
|
|
utils.NewColumn("name", services[idx].Name),
|
|
utils.NewColumn("image", services[idx].Image.Name),
|
|
utils.NewColumn("tag", services[idx].Image.Tag),
|
|
utils.NewColumn("networks", strings.Join(services[idx].Networks, ", ")),
|
|
)
|
|
|
|
ports := []string{}
|
|
for idy := range services[idx].Ports {
|
|
ports = append(
|
|
ports,
|
|
fmt.Sprintf(
|
|
"%d->%d",
|
|
services[idx].Ports[idy].Target,
|
|
services[idx].Ports[idy].Published,
|
|
),
|
|
)
|
|
}
|
|
|
|
columns = append(columns, utils.NewColumn("target->published", strings.Join(ports, ", ")))
|
|
|
|
colSubLines := []utils.Column{}
|
|
for idy := range services[idx].Replicas {
|
|
nbCol := utils.NewColumn("replicas", strconv.Itoa(services[idx].Replicas[idy].Pos))
|
|
statusCol := utils.NewColumn("status", string(services[idx].Replicas[idy].State))
|
|
errorCol := utils.NewColumn("error", services[idx].Replicas[idy].Error)
|
|
|
|
if idy == 0 {
|
|
columns = append(columns, nbCol, statusCol, errorCol)
|
|
continue
|
|
}
|
|
|
|
colSubLines = append(colSubLines, nbCol, statusCol, errorCol)
|
|
}
|
|
|
|
mainRow, err := tb.AddRow(columns...)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
subRow, err := tb.AddRow(colSubLines...)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if len(colSubLines) > 0 {
|
|
mainRow.AddNext(subRow)
|
|
}
|
|
}
|
|
|
|
tb.Render()
|
|
return nil
|
|
}
|
|
|
|
func main() { //nolint: funlen // TODO: to reduce
|
|
ctx, fnCancel := signal.NotifyContext(
|
|
context.Background(),
|
|
os.Interrupt,
|
|
os.Kill,
|
|
)
|
|
|
|
projectDir := flag.String("path", ".", "define the .homeserver project root dir")
|
|
configDir := flag.String(
|
|
"config",
|
|
path.Join(HOME_PATH, HMDeployDirname),
|
|
"define the configuration directory",
|
|
)
|
|
destroy := flag.Bool("destroy", false, "delete the deployed project")
|
|
noNginx := flag.Bool("no-nginx", false, "no Nginx deployment")
|
|
noSwarm := flag.Bool("no-swarm", false, "no Swarm deployment")
|
|
debug := flag.Bool("debug", false, "show debug logs")
|
|
details := flag.Bool("details", false, "extract swarm details and return")
|
|
version := flag.Bool("version", false, "show program version and return")
|
|
confirm := flag.Bool(
|
|
"confirm",
|
|
false,
|
|
"do not ask for confirmation, you're the best, you don't need confirmation",
|
|
)
|
|
flag.Parse()
|
|
|
|
if *version {
|
|
fmt.Println("hmdeploy version: v" + Version)
|
|
os.Exit(0)
|
|
}
|
|
|
|
initLogger(*debug)
|
|
|
|
hmmap, err := loadHMMap(*configDir)
|
|
if err != nil {
|
|
log.Fatal().Err(err).Msg("failed to load conf")
|
|
}
|
|
|
|
if *details {
|
|
if err = getSwarmServicesDetails(&hmmap); err != nil {
|
|
log.Fatal().Err(err).Msg("unable to extract swarm services details")
|
|
}
|
|
return
|
|
}
|
|
|
|
project, err := models.ProjectFromDir(*projectDir)
|
|
if err != nil {
|
|
log.Fatal().Str("dir", *projectDir).Err(err).Msg("unable to init project from directory")
|
|
}
|
|
log.Info().
|
|
Str("dir", project.Dir).
|
|
Str("name", project.Name).
|
|
Msg("project initialized with success")
|
|
|
|
initOptions := []InitOption{WithGlobalCancellation(fnCancel)}
|
|
if *noNginx {
|
|
initOptions = append(initOptions, WithNoNginx())
|
|
}
|
|
if *noSwarm {
|
|
initOptions = append(initOptions, WithNoSwarm())
|
|
}
|
|
if *destroy {
|
|
initOptions = append(initOptions, WithDestroy())
|
|
}
|
|
|
|
deps, err := initDeployers(ctx, &hmmap, &project, initOptions...)
|
|
if err != nil {
|
|
log.Fatal().Err(err).Msg("unable to init deployers")
|
|
}
|
|
|
|
tasks := deps.generateTasksTree()
|
|
if len(tasks) == 0 {
|
|
log.Info().Msg("nothing todo... bye !")
|
|
os.Exit(0)
|
|
}
|
|
tasks.Display()
|
|
|
|
if !*confirm {
|
|
if err := utils.Confirm(ctx, *destroy); err != nil {
|
|
log.Fatal().Err(err).Msg("error while confirming execution")
|
|
}
|
|
}
|
|
|
|
s := scheduler.NewScheduler(
|
|
context.Background(),
|
|
SchedulerQueueCapacity,
|
|
SchedulerNbWorkers,
|
|
tasks...,
|
|
)
|
|
|
|
if err := deps.waitForCompletion(s); err != nil {
|
|
log.Fatal().
|
|
Err(err).
|
|
Str("name", project.Name).
|
|
Msg("unable to deploy project, see logs for details")
|
|
}
|
|
|
|
msg := "project deployed successfully"
|
|
if *destroy {
|
|
msg = "project undeployed successfully"
|
|
}
|
|
log.Info().Str("name", project.Name).Msg(msg)
|
|
}
|