move docker commands in client
This commit is contained in:
parent
15f1c5445f
commit
d0bd0fb9f5
@ -29,22 +29,8 @@ const (
|
||||
Swarm DeployerType = "swarm"
|
||||
|
||||
GracefulTimeout = 10 * time.Second
|
||||
|
||||
DefaultStateTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
type checkStateOption struct {
|
||||
timeout *time.Duration
|
||||
}
|
||||
|
||||
type fnStateOption func(c *checkStateOption)
|
||||
|
||||
func WithTimeout(duration time.Duration) fnStateOption {
|
||||
return func(c *checkStateOption) {
|
||||
c.timeout = &duration
|
||||
}
|
||||
}
|
||||
|
||||
// Base struct of the deployers.
|
||||
// It handles the main informations to build a deployer.
|
||||
//
|
||||
|
||||
@ -6,8 +6,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gitea.thegux.fr/hmdeploy/connection"
|
||||
"gitea.thegux.fr/hmdeploy/docker"
|
||||
@ -16,8 +14,6 @@ import (
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
const stateTickDuration = 4 * time.Second
|
||||
|
||||
var ErrSwarmDeployerNoArchive = errors.New("no archive found to be deployed")
|
||||
|
||||
// SwarmDeployer handles the deployment of a Docker service on the swarm instance.
|
||||
@ -34,28 +30,16 @@ var _ IDeployer = (*SwarmDeployer)(nil)
|
||||
func NewSwarmDeployer(
|
||||
ctx context.Context,
|
||||
project *models.Project,
|
||||
netInfo *models.HMNetInfo,
|
||||
dloc docker.IClient,
|
||||
drem *docker.RemoteClient,
|
||||
) (SwarmDeployer, error) {
|
||||
) SwarmDeployer {
|
||||
var sd SwarmDeployer
|
||||
|
||||
conn, err := connection.NewSSHConn(
|
||||
netInfo.IP.String(),
|
||||
netInfo.SSH.User,
|
||||
netInfo.SSH.Port,
|
||||
netInfo.SSH.PrivKey,
|
||||
)
|
||||
if err != nil {
|
||||
return sd, err
|
||||
}
|
||||
|
||||
sd.conn = &conn
|
||||
sd.dloc = dloc
|
||||
sd.drem = drem
|
||||
sd.deployer = newDeployer(ctx, Swarm, project)
|
||||
|
||||
return sd, nil
|
||||
return sd
|
||||
}
|
||||
|
||||
func (sd *SwarmDeployer) close() error {
|
||||
@ -169,10 +153,9 @@ func (sd *SwarmDeployer) Deploy() error {
|
||||
|
||||
log.Info().Str("archive", sd.archivePath).Msg("deploying archive to swarm...")
|
||||
|
||||
for idx := range sd.project.ImageNames {
|
||||
if _, err := sd.conn.Execute("docker image load -i " + sd.project.ImageNames[idx] + ".tar"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := sd.drem.LoadImages(sd.project.ImageNames...); err != nil {
|
||||
sd.setDone(err)
|
||||
return err
|
||||
}
|
||||
|
||||
archiveDestPath := filepath.Base(sd.archivePath)
|
||||
@ -191,12 +174,7 @@ func (sd *SwarmDeployer) Deploy() error {
|
||||
|
||||
log.Info().Str("project", sd.project.Name).Msg("deploying swarm project...")
|
||||
composeFileBase := filepath.Base(sd.project.Deps.ComposeFile)
|
||||
if _, err := sd.conn.Execute(fmt.Sprintf("docker stack deploy -c %s %s --with-registry-auth", composeFileBase, sd.project.Name)); err != nil {
|
||||
sd.setDone(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sd.checkState(docker.Running); err != nil {
|
||||
if err := sd.drem.DeployStack(sd.ctx, sd.project.Name, composeFileBase, docker.WithCheckState()); err != nil {
|
||||
sd.setDone(err)
|
||||
return err
|
||||
}
|
||||
@ -207,85 +185,12 @@ func (sd *SwarmDeployer) Deploy() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkState checks the state of the deployment.
|
||||
// It loops over all the services deployed for the project (replicas included) and
|
||||
// checks if the `target` state match the services states.
|
||||
//
|
||||
// There's a timeout (default: 30s) that you can set with the options: `WithTimeout`.
|
||||
func (sd *SwarmDeployer) checkState(target docker.ServiceStatus, options ...fnStateOption) error {
|
||||
var opts checkStateOption
|
||||
for _, opt := range options {
|
||||
opt(&opts)
|
||||
}
|
||||
|
||||
var checkErr error
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
timeoutDuration := DefaultStateTimeout
|
||||
if opts.timeout != nil {
|
||||
timeoutDuration = *opts.timeout
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(stateTickDuration)
|
||||
ctx, fnCancel := context.WithDeadline(sd.ctx, time.Now().UTC().Add(timeoutDuration))
|
||||
defer fnCancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Info().
|
||||
Str("project", sd.project.Name).
|
||||
Str("state", string(target)).
|
||||
Msg("checking project state...")
|
||||
srvs, err := sd.drem.ExtractServicesDetails(docker.WithName(sd.project.Name))
|
||||
if err != nil {
|
||||
checkErr = err
|
||||
return
|
||||
}
|
||||
|
||||
ready := true
|
||||
mainloop:
|
||||
for idx := range srvs {
|
||||
for idy := range srvs[idx].Replicas {
|
||||
if srvs[idx].Replicas[idy].State != docker.ServiceStatus(target) {
|
||||
log.Info().Dur("retry (ms)", stateTickDuration).Msg("project not in good state yet, retrying...")
|
||||
ready = false
|
||||
break mainloop
|
||||
}
|
||||
}
|
||||
}
|
||||
if ready {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
msg := "swarm deployment skipped"
|
||||
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
msg = "swarm check state timeout"
|
||||
}
|
||||
checkErr = fmt.Errorf("%w, %s", ErrContextDone, msg)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
return checkErr
|
||||
}
|
||||
|
||||
func (sd *SwarmDeployer) Destroy() error {
|
||||
sd.processing.Store(true)
|
||||
defer sd.processing.Store(false)
|
||||
|
||||
log.Info().Str("project", sd.project.Name).Msg("destroying swarm project...")
|
||||
if _, err := sd.conn.Execute(fmt.Sprintf("docker stack rm %s", sd.project.Name)); err != nil {
|
||||
sd.setDone(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sd.checkState(docker.Shutdown); err != nil {
|
||||
if err := sd.drem.DestroyStack(sd.ctx, sd.project.Name, docker.WithCheckState()); err != nil {
|
||||
sd.setDone(err)
|
||||
return err
|
||||
}
|
||||
|
||||
136
docker/client.go
136
docker/client.go
@ -1,15 +1,24 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gitea.thegux.fr/hmdeploy/connection"
|
||||
"gitea.thegux.fr/hmdeploy/models"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
const (
|
||||
stateTickDuration = 4 * time.Second
|
||||
defaultStateTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
@ -17,8 +26,22 @@ var (
|
||||
|
||||
ErrDockerClientExtractServicesInputLength = errors.New("bad input length")
|
||||
ErrDockerClientExtractServicesParse = errors.New("parse error")
|
||||
|
||||
ErrContextDone = errors.New("unable to execute, context done")
|
||||
)
|
||||
|
||||
type stackOption struct {
|
||||
checkState bool
|
||||
}
|
||||
|
||||
type fnStackOption func(s *stackOption)
|
||||
|
||||
func WithCheckState() fnStackOption {
|
||||
return func(s *stackOption) {
|
||||
s.checkState = true
|
||||
}
|
||||
}
|
||||
|
||||
func parseIDs(cmdOutput string) []string {
|
||||
ids := []string{}
|
||||
bufLine := []rune{}
|
||||
@ -164,6 +187,57 @@ func (c *RemoteClient) extractServicesDetails(ids ...string) (Services, error) {
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) LoadImages(imageNames ...string) error {
|
||||
for idx := range imageNames {
|
||||
if _, err := c.conn.Execute("docker image load -i " + imageNames[idx] + ".tar"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) DeployStack(
|
||||
ctx context.Context,
|
||||
projectName, composeFilepath string,
|
||||
options ...fnStackOption,
|
||||
) error {
|
||||
if _, err := c.conn.Execute(fmt.Sprintf("docker stack deploy -c %s %s --with-registry-auth", composeFilepath, projectName)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var opts stackOption
|
||||
for _, opt := range options {
|
||||
opt(&opts)
|
||||
}
|
||||
|
||||
if opts.checkState {
|
||||
return c.checkState(ctx, projectName, Running)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) DestroyStack(
|
||||
ctx context.Context,
|
||||
projectName string,
|
||||
options ...fnStackOption,
|
||||
) error {
|
||||
if _, err := c.conn.Execute(fmt.Sprintf("docker stack rm %s", projectName)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var opts stackOption
|
||||
for _, opt := range options {
|
||||
opt(&opts)
|
||||
}
|
||||
|
||||
if opts.checkState {
|
||||
return c.checkState(ctx, projectName, Shutdown)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) ExtractServicesDetails(options ...fnExtractOption) (Services, error) {
|
||||
var opts extractOption
|
||||
for _, opt := range options {
|
||||
@ -177,3 +251,65 @@ func (c *RemoteClient) ExtractServicesDetails(options ...fnExtractOption) (Servi
|
||||
|
||||
return c.extractServicesDetails(ids...)
|
||||
}
|
||||
|
||||
// checkState checks the state of the deployment.
|
||||
// It loops over all the services deployed for the project (replicas included) and
|
||||
// checks if the `target` state match the services states.
|
||||
//
|
||||
// There's a timeout (default: 30s) that you can set with the options: `WithTimeout`.
|
||||
func (c *RemoteClient) checkState(
|
||||
ctx context.Context,
|
||||
projectName string,
|
||||
target ServiceStatus,
|
||||
) error {
|
||||
var checkErr error
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
ticker := time.NewTicker(stateTickDuration)
|
||||
ctxTimeout, fnCancel := context.WithDeadline(ctx, time.Now().UTC().Add(defaultStateTimeout))
|
||||
defer fnCancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Info().
|
||||
Str("project", projectName).
|
||||
Str("state", string(target)).
|
||||
Msg("checking project state...")
|
||||
srvs, err := c.ExtractServicesDetails(WithName(projectName))
|
||||
if err != nil {
|
||||
checkErr = err
|
||||
return
|
||||
}
|
||||
|
||||
ready := true
|
||||
mainloop:
|
||||
for idx := range srvs {
|
||||
for idy := range srvs[idx].Replicas {
|
||||
if srvs[idx].Replicas[idy].State != target {
|
||||
log.Info().Dur("retry (ms)", stateTickDuration).Msg("project not in good state yet, retrying...")
|
||||
ready = false
|
||||
break mainloop
|
||||
}
|
||||
}
|
||||
}
|
||||
if ready {
|
||||
return
|
||||
}
|
||||
case <-ctxTimeout.Done():
|
||||
msg := "swarm deployment skipped"
|
||||
if errors.Is(ctxTimeout.Err(), context.DeadlineExceeded) {
|
||||
msg = "swarm check state timeout"
|
||||
}
|
||||
checkErr = fmt.Errorf("%w, %s", ErrContextDone, msg)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
return checkErr
|
||||
}
|
||||
|
||||
5
main.go
5
main.go
@ -214,10 +214,7 @@ func initDeployers(
|
||||
return deps, err
|
||||
}
|
||||
|
||||
sd, err := deployers.NewSwarmDeployer(ctx, project, swarmNet, &dloc, &drem)
|
||||
if err != nil {
|
||||
return deps, fmt.Errorf("%w, unable to init swarm deployer, err=%v", ErrDeployerInit, err)
|
||||
}
|
||||
sd := deployers.NewSwarmDeployer(ctx, project, &dloc, &drem)
|
||||
deps.sd = sd
|
||||
|
||||
if !opt.noNginx && project.Deps.NginxFile != "" {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user