hmdeploy/deployers/swarm.go
2025-04-30 13:46:13 +02:00

298 lines
7.1 KiB
Go

package deployers
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"gitea.thegux.fr/hmdeploy/connection"
"gitea.thegux.fr/hmdeploy/docker"
"gitea.thegux.fr/hmdeploy/models"
"gitea.thegux.fr/hmdeploy/utils"
"github.com/rs/zerolog/log"
)
const stateTickDuration = 4 * time.Second
var ErrSwarmDeployerNoArchive = errors.New("no archive found to be deployed")
// SwarmDeployer handles the deployment of a Docker service on the swarm instance.
type SwarmDeployer struct {
*deployer
conn connection.IConnection
dloc docker.IClient
drem *docker.RemoteClient
archivePath string
}
var _ IDeployer = (*SwarmDeployer)(nil)
func NewSwarmDeployer(
ctx context.Context,
project *models.Project,
netInfo *models.HMNetInfo,
dloc docker.IClient,
drem *docker.RemoteClient,
) (SwarmDeployer, error) {
var sd SwarmDeployer
conn, err := connection.NewSSHConn(
netInfo.IP.String(),
netInfo.SSH.User,
netInfo.SSH.Port,
netInfo.SSH.PrivKey,
)
if err != nil {
return sd, err
}
sd.conn = &conn
sd.dloc = dloc
sd.drem = drem
sd.deployer = newDeployer(ctx, Swarm, project)
return sd, nil
}
func (sd *SwarmDeployer) close() error {
return sd.conn.Close()
}
func (sd *SwarmDeployer) clean() (err error) {
if err = os.Remove(sd.archivePath); err != nil {
log.Err(err).Str("archive", sd.archivePath).Msg("unable to clean local swarm archive file")
}
_, err = sd.conn.Execute(
fmt.Sprintf("rm -f %s %s *.tar.gz *.tar", models.ComposeFile, models.EnvFile),
)
return
}
func (sd *SwarmDeployer) Clear() error {
log.Debug().Msg("clearing swarm deployment...")
if err := sd.clean(); err != nil {
log.Err(err).Msg("unable to clean swarm conf remotly")
}
if err := sd.close(); err != nil {
log.Err(err).Msg("unable to close swarm conn")
}
log.Debug().Msg("clear swarm deployment done")
return nil
}
// Build builds the archive with mandatory files to deploy a swarm service.
//
// After the build, the path of the local archive built is set in
// the `archivePath` field.
func (sd *SwarmDeployer) Build() error {
sd.processing.Store(true)
defer sd.processing.Store(false)
select {
case <-sd.ctx.Done():
sd.setDone(nil)
return fmt.Errorf("%w, swarm project build skipped", ErrContextDone)
default:
}
log.Info().Msg("building swarm archive for deployment...")
filesToArchive := []string{}
for idx := range sd.project.ImageNames {
tarFile, err := sd.dloc.Save(sd.project.ImageNames[idx], sd.project.Dir)
if err != nil {
sd.setDone(err)
return err
}
defer os.Remove(tarFile) //nolint: errcheck // defered
// copy the file directly instead of adding it in the tar archive
log.Info().Str("image", tarFile).Msg("Transferring image...")
if err := sd.conn.CopyFile(tarFile, filepath.Base(tarFile)); err != nil {
return err
}
log.Info().Str("image", tarFile).Msg("image transferred with success")
}
if envFilePath := sd.project.Deps.EnvFile; envFilePath != "" {
filesToArchive = append(
filesToArchive,
envFilePath,
)
log.Info().Msg(".env file added to the archive for deployment")
}
filesToArchive = append(
filesToArchive,
sd.project.Deps.ComposeFile,
)
archivePath, err := utils.CreateArchive(
sd.project.Dir,
fmt.Sprintf("%s-%s", sd.project.Name, "swarm"),
filesToArchive...)
if err != nil {
sd.setDone(err)
return err
}
sd.archivePath = archivePath
log.Info().Str("archive", archivePath).Msg("swarm archive built")
return nil
}
func (sd *SwarmDeployer) Deploy() error {
sd.processing.Store(true)
defer sd.processing.Store(false)
select {
case <-sd.ctx.Done():
sd.setDone(nil)
return fmt.Errorf("%w, swarm deployment skipped", ErrContextDone)
default:
}
if sd.archivePath == "" {
sd.setDone(ErrSwarmDeployerNoArchive)
return ErrSwarmDeployerNoArchive
}
log.Info().Str("archive", sd.archivePath).Msg("deploying archive to swarm...")
for idx := range sd.project.ImageNames {
if _, err := sd.conn.Execute("docker image load -i " + sd.project.ImageNames[idx] + ".tar"); err != nil {
return err
}
}
archiveDestPath := filepath.Base(sd.archivePath)
log.Info().
Str("archive", sd.archivePath).
Msg("archive built with success, tranferring to swarm for deployment...")
if err := sd.conn.CopyFile(sd.archivePath, archiveDestPath); err != nil {
sd.setDone(err)
return err
}
if _, err := sd.conn.Execute(fmt.Sprintf("tar xzvf %s", archiveDestPath)); err != nil {
sd.setDone(err)
return err
}
log.Info().Str("project", sd.project.Name).Msg("deploying swarm project...")
composeFileBase := filepath.Base(sd.project.Deps.ComposeFile)
if _, err := sd.conn.Execute(fmt.Sprintf("docker stack deploy -c %s %s --with-registry-auth", composeFileBase, sd.project.Name)); err != nil {
sd.setDone(err)
return err
}
if err := sd.checkState(docker.Running); err != nil {
sd.setDone(err)
return err
}
log.Info().Msg("swarm deployment done with success")
sd.setDone(nil)
return nil
}
// checkState checks the state of the deployment.
// It loops over all the services deployed for the project (replicas included) and
// checks if the `target` state match the services states.
//
// There's a timeout (default: 30s) that you can set with the options: `WithTimeout`.
func (sd *SwarmDeployer) checkState(target docker.ServiceStatus, options ...fnStateOption) error {
var opts checkStateOption
for _, opt := range options {
opt(&opts)
}
var checkErr error
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
timeoutDuration := DefaultStateTimeout
if opts.timeout != nil {
timeoutDuration = *opts.timeout
}
ticker := time.NewTicker(stateTickDuration)
ctx, fnCancel := context.WithDeadline(sd.ctx, time.Now().UTC().Add(timeoutDuration))
defer fnCancel()
for {
select {
case <-ticker.C:
log.Info().
Str("project", sd.project.Name).
Str("state", string(target)).
Msg("checking project state...")
srvs, err := sd.drem.ExtractServicesDetails(docker.WithName(sd.project.Name))
if err != nil {
checkErr = err
return
}
ready := true
mainloop:
for idx := range srvs {
for idy := range srvs[idx].Replicas {
if srvs[idx].Replicas[idy].State != docker.ServiceStatus(target) {
log.Info().Dur("retry (ms)", stateTickDuration).Msg("project not in good state yet, retrying...")
ready = false
break mainloop
}
}
}
if ready {
return
}
case <-ctx.Done():
msg := "swarm deployment skipped"
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
msg = "swarm check state timeout"
}
checkErr = fmt.Errorf("%w, %s", ErrContextDone, msg)
return
}
}
}()
wg.Wait()
return checkErr
}
func (sd *SwarmDeployer) Destroy() error {
sd.processing.Store(true)
defer sd.processing.Store(false)
log.Info().Str("project", sd.project.Name).Msg("destroying swarm project...")
if _, err := sd.conn.Execute(fmt.Sprintf("docker stack rm %s", sd.project.Name)); err != nil {
sd.setDone(err)
return err
}
if err := sd.checkState(docker.Shutdown); err != nil {
sd.setDone(err)
return err
}
log.Info().Msg("swarm undeployment done with success")
sd.setDone(nil)
return nil
}