422 lines
		
	
	
		
			9.8 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			422 lines
		
	
	
		
			9.8 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| package main
 | |
| 
 | |
| import (
 | |
| 	"context"
 | |
| 	"encoding/json"
 | |
| 	"errors"
 | |
| 	"flag"
 | |
| 	"fmt"
 | |
| 	"os"
 | |
| 	"os/signal"
 | |
| 	"path"
 | |
| 	"strconv"
 | |
| 	"strings"
 | |
| 	"sync"
 | |
| 
 | |
| 	"gitea.thegux.fr/hmdeploy/deployers"
 | |
| 	"gitea.thegux.fr/hmdeploy/docker"
 | |
| 	"gitea.thegux.fr/hmdeploy/models"
 | |
| 	"gitea.thegux.fr/hmdeploy/scheduler"
 | |
| 	"gitea.thegux.fr/hmdeploy/utils"
 | |
| 	"github.com/rs/zerolog"
 | |
| 	"github.com/rs/zerolog/log"
 | |
| )
 | |
| 
 | |
| const (
 | |
| 	HMDeployDirname string = ".homeserver"
 | |
| 	NetworkFilename string = "map.json"
 | |
| 
 | |
| 	SchedulerNbWorkers     uint8  = 4
 | |
| 	SchedulerQueueCapacity uint32 = 30
 | |
| 
 | |
| 	MaxDeployers int = 2
 | |
| )
 | |
| 
 | |
| var Version string
 | |
| 
 | |
| var HOME_PATH = os.Getenv("HOME")
 | |
| 
 | |
| var (
 | |
| 	ErrNetInfoNotFound   = errors.New("unable to get net info")
 | |
| 	ErrDeployerInit      = errors.New("unable to initialize deployer")
 | |
| 	ErrGenerateTasksTree = errors.New("unable to generate tasks tree")
 | |
| )
 | |
| 
 | |
| type Deployers struct {
 | |
| 	nd      *deployers.NginxDeployer
 | |
| 	sd      deployers.SwarmDeployer
 | |
| 	destroy bool
 | |
| }
 | |
| 
 | |
| // generateTasksTree returns a list of linked `Task` to submit.
 | |
| //
 | |
| // It's here that all tasks are linked each other to provide the deployment ordering.
 | |
| func (d *Deployers) generateTasksTree() scheduler.Tasks {
 | |
| 	tasks := []*scheduler.Task{}
 | |
| 
 | |
| 	if d.destroy {
 | |
| 		swarmDestroy := scheduler.NewTask("swarm-destroy", d.sd.Destroy)
 | |
| 		if d.nd != nil {
 | |
| 			destroyTask := scheduler.NewTask("nginx-destroy", d.nd.Destroy, swarmDestroy)
 | |
| 			tasks = append(tasks, destroyTask)
 | |
| 			return tasks
 | |
| 		}
 | |
| 		tasks = append(tasks, swarmDestroy)
 | |
| 		return tasks
 | |
| 	}
 | |
| 
 | |
| 	var swarmTask *scheduler.Task
 | |
| 	if d.nd != nil {
 | |
| 		deployNginx := scheduler.NewTask("nginx-deploy", d.nd.Deploy)
 | |
| 		swarmTask = scheduler.NewTask("swarm-deploy", d.sd.Deploy, deployNginx)
 | |
| 	} else {
 | |
| 		swarmTask = scheduler.NewTask("swarm-deploy", d.sd.Deploy)
 | |
| 	}
 | |
| 
 | |
| 	swarmTask = scheduler.NewTask("swarm-build", d.sd.Build, swarmTask)
 | |
| 	tasks = append(tasks, swarmTask)
 | |
| 	if d.nd != nil {
 | |
| 		tasks = append(tasks, scheduler.NewTask("nginx-build", d.nd.Build))
 | |
| 	}
 | |
| 
 | |
| 	return tasks
 | |
| }
 | |
| 
 | |
| // waitForCompletion waits for all deployers to complete.
 | |
| //
 | |
| // After the completion, deployers `Clear` methods are executed to clean all ressources.
 | |
| // Then the scheduler is stopped to terminate the engine.
 | |
| func (d *Deployers) waitForCompletion(s *scheduler.Scheduler) error {
 | |
| 	var wg sync.WaitGroup
 | |
| 
 | |
| 	deps := []deployers.IDeployer{&d.sd}
 | |
| 	if d.nd != nil {
 | |
| 		deps = append(deps, d.nd)
 | |
| 	}
 | |
| 
 | |
| 	for idx := range deps {
 | |
| 		if d := deps[idx]; d != nil {
 | |
| 			wg.Add(1)
 | |
| 			go func() {
 | |
| 				defer wg.Done()
 | |
| 				<-d.Done()
 | |
| 			}()
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	wg.Wait()
 | |
| 
 | |
| 	var errs []error
 | |
| 	for idx := range deps {
 | |
| 		if dep := deps[idx]; d != nil {
 | |
| 			errs = append(errs, dep.Error())
 | |
| 			if !d.destroy {
 | |
| 				s.Submit(
 | |
| 					scheduler.NewTask(string(dep.Type()), dep.Clear),
 | |
| 				) //nolint: errcheck // TODO
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	s.Stop()
 | |
| 	<-s.Done()
 | |
| 
 | |
| 	return errors.Join(errs...)
 | |
| }
 | |
| 
 | |
| type Option struct {
 | |
| 	fnCancel context.CancelFunc
 | |
| 	destroy  bool
 | |
| 	noNginx  bool
 | |
| }
 | |
| 
 | |
| type InitOption func(o *Option)
 | |
| 
 | |
| func WithGlobalCancellation(fnCancel context.CancelFunc) InitOption {
 | |
| 	return func(o *Option) {
 | |
| 		o.fnCancel = fnCancel
 | |
| 	}
 | |
| }
 | |
| 
 | |
| func WithNoNginx() InitOption {
 | |
| 	return func(o *Option) {
 | |
| 		o.noNginx = true
 | |
| 	}
 | |
| }
 | |
| 
 | |
| func WithDestroy() InitOption {
 | |
| 	return func(o *Option) {
 | |
| 		o.destroy = true
 | |
| 	}
 | |
| }
 | |
| 
 | |
| func initLogger(debug bool) {
 | |
| 	zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
 | |
| 	zerolog.SetGlobalLevel(zerolog.InfoLevel)
 | |
| 	if debug {
 | |
| 		zerolog.SetGlobalLevel(zerolog.DebugLevel)
 | |
| 	}
 | |
| 	log.Logger = log.With().Caller().Logger().Output(zerolog.ConsoleWriter{Out: os.Stderr})
 | |
| }
 | |
| 
 | |
| // loadHMMap loads your instance configuration map from `$HOME/.homeserver` dir.
 | |
| func loadHMMap() (models.HMMap, error) {
 | |
| 	var hmmap models.HMMap
 | |
| 
 | |
| 	hmmap_path := path.Join(HOME_PATH, HMDeployDirname, NetworkFilename)
 | |
| 	c, err := os.ReadFile(hmmap_path)
 | |
| 	if err != nil {
 | |
| 		return hmmap, fmt.Errorf(
 | |
| 			"unable to load configuration from src=%s, err=%v",
 | |
| 			hmmap_path,
 | |
| 			err,
 | |
| 		)
 | |
| 	}
 | |
| 
 | |
| 	if err := json.Unmarshal(c, &hmmap); err != nil {
 | |
| 		return hmmap, fmt.Errorf(
 | |
| 			"unable to parse configuration from src=%s, err=%v",
 | |
| 			hmmap_path,
 | |
| 			err,
 | |
| 		)
 | |
| 	}
 | |
| 
 | |
| 	return hmmap, nil
 | |
| }
 | |
| 
 | |
| // initDeployers instanciates from `Project` and `HMMap` needed deployers and returns them.
 | |
| //
 | |
| // You can provide as an optional arg:
 | |
| //   - WithGlobalCancellation(fnCancel context.CancelFunc): close the global context, notifying all deployers to stop
 | |
| func initDeployers(
 | |
| 	ctx context.Context,
 | |
| 	hmmap *models.HMMap,
 | |
| 	project *models.Project,
 | |
| 	options ...InitOption,
 | |
| ) (Deployers, error) {
 | |
| 	var opt Option
 | |
| 	for _, o := range options {
 | |
| 		o(&opt)
 | |
| 	}
 | |
| 
 | |
| 	deps := Deployers{
 | |
| 		destroy: opt.destroy,
 | |
| 	}
 | |
| 
 | |
| 	swarmNet := hmmap.GetSwarmNetInfo()
 | |
| 	if swarmNet == nil {
 | |
| 		return deps, fmt.Errorf("%w, swarm net info does not exist", ErrNetInfoNotFound)
 | |
| 	}
 | |
| 
 | |
| 	dcli := docker.NewLocalClient()
 | |
| 	sd, err := deployers.NewSwarmDeployer(ctx, project, swarmNet, &dcli)
 | |
| 	if err != nil {
 | |
| 		return deps, fmt.Errorf("%w, unable to init swarm deployer, err=%v", ErrDeployerInit, err)
 | |
| 	}
 | |
| 	deps.sd = sd
 | |
| 
 | |
| 	if !opt.noNginx && project.Deps.NginxFile != "" {
 | |
| 		nginxNet := hmmap.GetNginxNetInfo()
 | |
| 		if nginxNet == nil {
 | |
| 			return deps, fmt.Errorf("%w, nginx net info does not exist", ErrNetInfoNotFound)
 | |
| 		}
 | |
| 
 | |
| 		d, err := deployers.NewNginxDeployer(ctx, project, nginxNet)
 | |
| 		if err != nil {
 | |
| 			return deps, fmt.Errorf(
 | |
| 				"%w, unable to init nginx deployer, err=%v",
 | |
| 				ErrDeployerInit,
 | |
| 				err,
 | |
| 			)
 | |
| 		}
 | |
| 
 | |
| 		deps.nd = &d
 | |
| 	}
 | |
| 
 | |
| 	if opt.fnCancel != nil {
 | |
| 		sd.SetCancellationFunc(opt.fnCancel)
 | |
| 		if deps.nd != nil {
 | |
| 			deps.nd.SetCancellationFunc(opt.fnCancel)
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	return deps, nil
 | |
| }
 | |
| 
 | |
| //nolint:funlen,mnd // TODO(rmanach): could be splitted
 | |
| func getSwarmServicesDetails(hm *models.HMMap) error {
 | |
| 	swarmNet := hm.GetSwarmNetInfo()
 | |
| 	if swarmNet == nil {
 | |
| 		return fmt.Errorf("%w, swarm net info does not exist", ErrNetInfoNotFound)
 | |
| 	}
 | |
| 
 | |
| 	cli, err := docker.NewRemoteClient(swarmNet)
 | |
| 	if err != nil {
 | |
| 		return err
 | |
| 	}
 | |
| 
 | |
| 	services, err := cli.ExtractServicesDetails()
 | |
| 	if err != nil {
 | |
| 		return err
 | |
| 	}
 | |
| 
 | |
| 	tb := utils.NewTable(
 | |
| 		utils.WithColSeparator(" | "),
 | |
| 		utils.WithHeaderBorderStyle("*"),
 | |
| 		utils.WithRowSeparator("-"),
 | |
| 		utils.WithHeader("App", 15),
 | |
| 		utils.WithHeader("Name", 15),
 | |
| 		utils.WithHeader("Image", 25),
 | |
| 		utils.WithHeader("Tag", 10),
 | |
| 		utils.WithHeader("Target->Published", 30),
 | |
| 		utils.WithHeader("Networks", 20),
 | |
| 		utils.WithHeader("Replicas", 10),
 | |
| 		utils.WithHeader("Status", 10),
 | |
| 		utils.WithHeader("Error", 20),
 | |
| 	)
 | |
| 
 | |
| 	for idx := range services {
 | |
| 		columns := []utils.Column{}
 | |
| 		columns = append(
 | |
| 			columns,
 | |
| 			utils.NewColumn("app", services[idx].App),
 | |
| 			utils.NewColumn("name", services[idx].Name),
 | |
| 			utils.NewColumn("image", services[idx].Image.Name),
 | |
| 			utils.NewColumn("tag", services[idx].Image.Tag),
 | |
| 			utils.NewColumn("networks", strings.Join(services[idx].Networks, ", ")),
 | |
| 		)
 | |
| 
 | |
| 		ports := []string{}
 | |
| 		for idy := range services[idx].Ports {
 | |
| 			ports = append(
 | |
| 				ports,
 | |
| 				fmt.Sprintf(
 | |
| 					"%d->%d",
 | |
| 					services[idx].Ports[idy].Target,
 | |
| 					services[idx].Ports[idy].Published,
 | |
| 				),
 | |
| 			)
 | |
| 		}
 | |
| 
 | |
| 		columns = append(columns, utils.NewColumn("target->published", strings.Join(ports, ", ")))
 | |
| 
 | |
| 		colSubLines := []utils.Column{}
 | |
| 		for idy := range services[idx].Replicas {
 | |
| 			nbCol := utils.NewColumn("replicas", strconv.Itoa(services[idx].Replicas[idy].Pos))
 | |
| 			statusCol := utils.NewColumn("status", string(services[idx].Replicas[idy].State))
 | |
| 			errorCol := utils.NewColumn("error", services[idx].Replicas[idy].Error)
 | |
| 
 | |
| 			if idy == 0 {
 | |
| 				columns = append(columns, nbCol, statusCol, errorCol)
 | |
| 				continue
 | |
| 			}
 | |
| 
 | |
| 			colSubLines = append(colSubLines, nbCol, statusCol, errorCol)
 | |
| 		}
 | |
| 
 | |
| 		mainRow, err := tb.AddRow(columns...)
 | |
| 		if err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 
 | |
| 		subRow, err := tb.AddRow(colSubLines...)
 | |
| 		if err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 
 | |
| 		if len(colSubLines) > 0 {
 | |
| 			mainRow.AddNext(subRow)
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	tb.Render()
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| func main() { //nolint: funlen //TODO: to reduce
 | |
| 	ctx, fnCancel := signal.NotifyContext(
 | |
| 		context.Background(),
 | |
| 		os.Interrupt,
 | |
| 		os.Kill,
 | |
| 	)
 | |
| 
 | |
| 	projectDir := flag.String("path", ".", "define the .homeserver project root dir")
 | |
| 	destroy := flag.Bool("destroy", false, "delete the deployed project")
 | |
| 	noNginx := flag.Bool("no-nginx", false, "no Nginx deployment")
 | |
| 	debug := flag.Bool("debug", false, "show debug logs")
 | |
| 	details := flag.Bool("details", false, "extract swarm details and return")
 | |
| 	version := flag.Bool("version", false, "extract swarm details and return")
 | |
| 	confirm := flag.Bool(
 | |
| 		"confirm",
 | |
| 		false,
 | |
| 		"do not ask for confirmation, you're the best, you don't need confirmation",
 | |
| 	)
 | |
| 	flag.Parse()
 | |
| 
 | |
| 	if *version {
 | |
| 		fmt.Println("hmdeploy version: v" + Version)
 | |
| 		os.Exit(0)
 | |
| 	}
 | |
| 
 | |
| 	initLogger(*debug)
 | |
| 
 | |
| 	hmmap, err := loadHMMap()
 | |
| 	if err != nil {
 | |
| 		log.Fatal().Err(err).Msg("failed to load conf")
 | |
| 	}
 | |
| 
 | |
| 	if *details {
 | |
| 		if err = getSwarmServicesDetails(&hmmap); err != nil {
 | |
| 			log.Fatal().Err(err).Msg("unable to extract swarm services details")
 | |
| 		}
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	project, err := models.ProjectFromDir(*projectDir)
 | |
| 	if err != nil {
 | |
| 		log.Fatal().Str("dir", *projectDir).Err(err).Msg("unable to init project from directory")
 | |
| 	}
 | |
| 	log.Info().
 | |
| 		Str("dir", project.Dir).
 | |
| 		Str("name", project.Name).
 | |
| 		Msg("project initialized with success")
 | |
| 
 | |
| 	initOptions := []InitOption{WithGlobalCancellation(fnCancel)}
 | |
| 	if *noNginx {
 | |
| 		initOptions = append(initOptions, WithNoNginx())
 | |
| 	}
 | |
| 	if *destroy {
 | |
| 		initOptions = append(initOptions, WithDestroy())
 | |
| 	}
 | |
| 
 | |
| 	deps, err := initDeployers(ctx, &hmmap, &project, initOptions...)
 | |
| 	if err != nil {
 | |
| 		log.Fatal().Err(err).Msg("unable to init deployers")
 | |
| 	}
 | |
| 
 | |
| 	tasks := deps.generateTasksTree()
 | |
| 	tasks.Display()
 | |
| 
 | |
| 	if !*confirm {
 | |
| 		if err := utils.Confirm(ctx, *destroy); err != nil {
 | |
| 			log.Fatal().Err(err).Msg("error while confirming execution")
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	s := scheduler.NewScheduler(
 | |
| 		context.Background(),
 | |
| 		SchedulerQueueCapacity,
 | |
| 		SchedulerNbWorkers,
 | |
| 		tasks...,
 | |
| 	)
 | |
| 
 | |
| 	if err := deps.waitForCompletion(s); err != nil {
 | |
| 		log.Fatal().
 | |
| 			Err(err).
 | |
| 			Str("name", project.Name).
 | |
| 			Msg("unable to deploy project, see logs for details")
 | |
| 	}
 | |
| 
 | |
| 	log.Info().Str("name", project.Name).Msg("project deployed successfully")
 | |
| }
 | 
