package main import ( "context" "encoding/json" "errors" "flag" "fmt" "os" "os/signal" "path" "sync" "gitea.thegux.fr/hmdeploy/deployers" "gitea.thegux.fr/hmdeploy/docker" "gitea.thegux.fr/hmdeploy/models" "gitea.thegux.fr/hmdeploy/scheduler" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) const ( HMDeployDirname string = ".homeserver" NetworkFilename string = "map.json" SchedulerNbWorkers uint8 = 4 SchedulerQueueCapacity uint32 = 30 MaxDeployers int = 2 ) var HOME_PATH = os.Getenv("HOME") var ( ErrNetInfoNotFound = errors.New("unable to get net info") ErrDeployerInit = errors.New("unable to initialize deployer") ErrGenerateTasksTree = errors.New("unable to generate tasks tree") ) type Option struct { fnCancel context.CancelFunc } type InitOption func(o *Option) func WithGlobalCancellation(fnCancel context.CancelFunc) InitOption { return func(o *Option) { o.fnCancel = fnCancel } } func initLogger() { zerolog.TimeFieldFormat = zerolog.TimeFormatUnix log.Logger = log.With().Caller().Logger().Output(zerolog.ConsoleWriter{Out: os.Stderr}) } // loadHMMap loads your instance configuration map from `$HOME/.homeserver` dir. func loadHMMap() (models.HMMap, error) { var hmmap models.HMMap hmmap_path := path.Join(HOME_PATH, HMDeployDirname, NetworkFilename) c, err := os.ReadFile(hmmap_path) if err != nil { return hmmap, fmt.Errorf( "unable to load configuration from src=%s, err=%v", hmmap_path, err, ) } if err := json.Unmarshal(c, &hmmap); err != nil { return hmmap, fmt.Errorf( "unable to parse configuration from src=%s, err=%v", hmmap_path, err, ) } log.Info().Str("conf", hmmap_path).Msg("hmmap load successfully") return hmmap, nil } // initDeployers instanciates from `Project` and `HMMap` needed deployers and returns them. // // You can provide as an optional arg: // - WithGlobalCancellation(fnCancel context.CancelFunc): close the global context, notifying all deployers to stop func initDeployers( ctx context.Context, hmmap *models.HMMap, project *models.Project, options ...InitOption, ) ([]deployers.IDeployer, error) { swarmNet := hmmap.GetSwarmNetInfo() if swarmNet == nil { return nil, fmt.Errorf("%w, swarm net info does not exist", ErrNetInfoNotFound) } dcli := docker.NewClient() sd, err := deployers.NewSwarmDeployer(ctx, project, swarmNet, &dcli) if err != nil { return nil, fmt.Errorf("%w, unable to init swarm deployer, err=%v", ErrDeployerInit, err) } var nd deployers.NginxDeployer if project.Deps.NginxFile != "" { nginxNet := hmmap.GetNginxNetInfo() if nginxNet == nil { return nil, fmt.Errorf("%w, nginx net info does not exist", ErrNetInfoNotFound) } d, err := deployers.NewNginxDeployer(ctx, project, nginxNet) if err != nil { return nil, fmt.Errorf( "%w, unable to init nginx deployer, err=%v", ErrDeployerInit, err, ) } nd = d } var opt Option for _, o := range options { o(&opt) } if opt.fnCancel != nil { sd.SetCancellationFunc(opt.fnCancel) nd.SetCancellationFunc(opt.fnCancel) } return []deployers.IDeployer{&sd, &nd}, nil } // generateTasksTree returns a list of linked `Task` to submit. // // It's here that all tasks are linked each other to provide the deployment ordering. func generateTasksTree(deployers []deployers.IDeployer, destroy bool) ([]*scheduler.Task, error) { if len(deployers) != MaxDeployers { return nil, fmt.Errorf("%w, deployers len should be equals to 2", ErrGenerateTasksTree) } sd := deployers[0] nd := deployers[1] tasks := []*scheduler.Task{} if destroy { swarmDestroy := scheduler.NewTask("swarm-destroy", sd.Destroy) destroyTask := scheduler.NewTask("nginx-destroy", nd.Destroy, swarmDestroy) tasks = append(tasks, destroyTask) return tasks, nil } var swarmTask *scheduler.Task if nd != nil { deployNginx := scheduler.NewTask("nginx-deploy", nd.Deploy) swarmTask = scheduler.NewTask("swarm-deploy", sd.Deploy, deployNginx) } else { swarmTask = scheduler.NewTask("swarm-deploy", sd.Deploy) } swarmTask = scheduler.NewTask("swarm-build", sd.Build, swarmTask) tasks = append(tasks, swarmTask, scheduler.NewTask("nginx-build", nd.Build)) return tasks, nil } // waitForCompletion waits for all deployers to complete. // // After the completion, deployers `Clear` methods are executed to clean all ressources. // Then the scheduler is stopped to terminate the engine. func waitForCompletion( deployers []deployers.IDeployer, s *scheduler.Scheduler, destroy bool, ) error { var wg sync.WaitGroup for idx := range deployers { if d := deployers[idx]; d != nil { wg.Add(1) go func() { defer wg.Done() <-d.Done() }() } } wg.Wait() var errs []error for idx := range deployers { if d := deployers[idx]; d != nil { errs = append(errs, d.Error()) if !destroy { s.Submit(scheduler.NewTask(string(d.Type()), d.Clear)) //nolint: errcheck // TODO } } } s.Stop() <-s.Done() return errors.Join(errs...) } func main() { ctx, fnCancel := signal.NotifyContext( context.Background(), os.Interrupt, os.Kill, ) initLogger() log.Info().Msg("hmdeploy started") projectDir := flag.String("path", ".", "define the .homeserver project root dir") destroy := flag.Bool("destroy", false, "delete the deployed project") flag.Parse() hmmap, err := loadHMMap() if err != nil { log.Fatal().Err(err).Msg("failed to load conf") } project, err := models.ProjectFromDir(*projectDir) if err != nil { log.Fatal().Str("dir", *projectDir).Err(err).Msg("unable to init project from directory") } log.Info(). Str("dir", project.Dir). Str("name", project.Name). Msg("project initialized with success") deployers, err := initDeployers(ctx, &hmmap, &project, WithGlobalCancellation(fnCancel)) if err != nil { log.Fatal().Err(err).Msg("unable to init deployers") } tasks, err := generateTasksTree(deployers, *destroy) if err != nil { log.Fatal().Err(err).Msg("unable to generate tasks tree") } s := scheduler.NewScheduler( context.Background(), SchedulerQueueCapacity, SchedulerNbWorkers, tasks..., ) if err := waitForCompletion(deployers, s, *destroy); err != nil { log.Fatal(). Err(err). Str("name", project.Name). Msg("unable to deploy project, see logs for details") } log.Info().Str("name", project.Name).Msg("project deployed successfully") }