334 lines
7.3 KiB
Go
334 lines
7.3 KiB
Go
package docker
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"sync"
|
|
"time"
|
|
|
|
"gitea.sonak.fr/hmdeploy/connection"
|
|
"gitea.sonak.fr/hmdeploy/models"
|
|
"github.com/rs/zerolog/log"
|
|
)
|
|
|
|
const (
|
|
stateTickDuration = 4 * time.Second
|
|
defaultStateTimeout = 10 * time.Minute
|
|
)
|
|
|
|
var (
|
|
ErrDockerClientSave = errors.New("unable to save image into tar")
|
|
|
|
ErrDockerClientExtractServicesInputLength = errors.New("bad input length")
|
|
ErrDockerClientExtractServicesParse = errors.New("parse error")
|
|
|
|
ErrContextDone = errors.New("unable to execute, context done")
|
|
)
|
|
|
|
type stackOption struct {
|
|
baseDir string
|
|
checkState bool
|
|
}
|
|
|
|
type fnStackOption func(s *stackOption)
|
|
|
|
func WithCheckState() fnStackOption {
|
|
return func(s *stackOption) {
|
|
s.checkState = true
|
|
}
|
|
}
|
|
|
|
func WithBaseDir(dir string) fnStackOption {
|
|
return func(s *stackOption) {
|
|
s.baseDir = dir
|
|
}
|
|
}
|
|
|
|
func parseIDs(cmdOutput string) []string {
|
|
ids := []string{}
|
|
bufLine := []rune{}
|
|
for _, c := range cmdOutput {
|
|
if c == '\n' {
|
|
ids = append(ids, string(bufLine))
|
|
bufLine = bufLine[:0]
|
|
|
|
continue
|
|
}
|
|
bufLine = append(bufLine, c)
|
|
}
|
|
return ids
|
|
}
|
|
|
|
type IClient interface {
|
|
Save(imageName, dest string) (string, error)
|
|
}
|
|
|
|
// LocalClient is a simple Docker client wrapping the local Docker daemon.
|
|
// It does not use the Docker API but instead shell command and collect the output.
|
|
//
|
|
// NOTE: for now, it's ok, it only needs one command so, no need to add a fat dedicated
|
|
// library with full Docker client API.
|
|
type LocalClient struct{}
|
|
|
|
var _ IClient = (*LocalClient)(nil)
|
|
|
|
func NewLocalClient() LocalClient {
|
|
return LocalClient{}
|
|
}
|
|
|
|
// Save saves the `imageName` (tag included) in tar format in the target directory: `dest`.
|
|
// The `dest` directory must exist with correct permissions.
|
|
func (c *LocalClient) Save(imageName, dest string) (string, error) {
|
|
destInfo, err := os.Stat(dest)
|
|
if err != nil {
|
|
return "", fmt.Errorf("unable to stat file, dir=%s, err=%v", dest, err)
|
|
}
|
|
|
|
if !destInfo.IsDir() {
|
|
return "", fmt.Errorf("dest file must be a directory, dir=%s, err=%v", dest, err)
|
|
}
|
|
|
|
tarFile := fmt.Sprintf("%s.tar", imageName)
|
|
|
|
cmd := exec.Command("docker", "save", "-o", tarFile, imageName)
|
|
cmd.Dir = dest
|
|
if _, err := cmd.Output(); err != nil {
|
|
return "", fmt.Errorf(
|
|
"%w, dir=%s, image=%s, err=%v",
|
|
ErrDockerClientSave,
|
|
dest,
|
|
imageName,
|
|
err,
|
|
)
|
|
}
|
|
|
|
return filepath.Join(dest, tarFile), nil
|
|
}
|
|
|
|
// RemoteClient is a simple Docker client for remote daemon.
|
|
// It does not use the Docker API but instead shell command over an SSH connection and collect the output.
|
|
//
|
|
// NOTE: for now, it's ok, it only needs one command so, no need to add a fat dedicated
|
|
// library with full Docker client API.
|
|
type RemoteClient struct {
|
|
conn connection.SSHConn
|
|
}
|
|
|
|
func NewRemoteClient(netInfo *models.HMNetInfo) (RemoteClient, error) {
|
|
var rc RemoteClient
|
|
conn, err := connection.NewSSHConn(
|
|
netInfo.IP.String(),
|
|
netInfo.SSH.User,
|
|
netInfo.SSH.Port,
|
|
netInfo.SSH.PrivKey,
|
|
)
|
|
if err != nil {
|
|
return rc, nil
|
|
}
|
|
|
|
rc.conn = conn
|
|
return rc, nil
|
|
}
|
|
|
|
type extractOption struct {
|
|
filter string
|
|
}
|
|
|
|
type fnExtractOption func(*extractOption)
|
|
|
|
func WithName(name string) fnExtractOption {
|
|
return func(o *extractOption) {
|
|
o.filter = name
|
|
}
|
|
}
|
|
|
|
func (c *RemoteClient) getIDS(name string) ([]string, error) {
|
|
cmd := "docker service ls -q"
|
|
if name != "" {
|
|
cmd += " --filter name=" + name
|
|
}
|
|
|
|
output, err := c.conn.Execute(cmd)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return parseIDs(output), nil
|
|
}
|
|
|
|
func (c *RemoteClient) getServiceDetails(id string) (Service, error) {
|
|
output, err := c.conn.Execute(
|
|
fmt.Sprintf(
|
|
`echo "{\"services\": [$(echo $(cmd=$(docker service ps %s --format json); cmd=$(echo $cmd | tr '} {' '},{'); echo $cmd))], \"details\": $(docker service inspect %s --format=json)}"`,
|
|
id,
|
|
id,
|
|
),
|
|
)
|
|
if err != nil {
|
|
return Service{}, err
|
|
}
|
|
|
|
sc := Service{}
|
|
if err := json.Unmarshal([]byte(output), &sc); err != nil {
|
|
return sc, err
|
|
}
|
|
|
|
return sc, nil
|
|
}
|
|
|
|
func (c *RemoteClient) extractServicesDetails(ids ...string) (Services, error) {
|
|
services := Services{}
|
|
for _, id := range ids {
|
|
srv, err := c.getServiceDetails(id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
services = append(services, srv)
|
|
}
|
|
|
|
return services, nil
|
|
}
|
|
|
|
func (c *RemoteClient) LoadImages(imageNames ...string) error {
|
|
for idx := range imageNames {
|
|
if _, err := c.conn.Execute("docker image load -i " + imageNames[idx] + ".tar"); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (c *RemoteClient) DeployStack(
|
|
ctx context.Context,
|
|
projectName, composeFilepath string,
|
|
options ...fnStackOption,
|
|
) error {
|
|
var opts stackOption
|
|
for _, opt := range options {
|
|
opt(&opts)
|
|
}
|
|
|
|
if bd := opts.baseDir; bd != "" {
|
|
composeFilepath = filepath.Join(bd, composeFilepath)
|
|
}
|
|
|
|
if _, err := c.conn.Execute(fmt.Sprintf("docker stack deploy -c %s %s --with-registry-auth", composeFilepath, projectName)); err != nil {
|
|
return err
|
|
}
|
|
|
|
if opts.checkState {
|
|
return c.checkState(ctx, projectName, Running)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (c *RemoteClient) DestroyStack(
|
|
ctx context.Context,
|
|
projectName string,
|
|
options ...fnStackOption,
|
|
) error {
|
|
if _, err := c.conn.Execute(fmt.Sprintf("docker stack rm %s", projectName)); err != nil {
|
|
return err
|
|
}
|
|
|
|
var opts stackOption
|
|
for _, opt := range options {
|
|
opt(&opts)
|
|
}
|
|
|
|
if opts.checkState {
|
|
return c.checkState(ctx, projectName, Shutdown)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (c *RemoteClient) ExtractServicesDetails(options ...fnExtractOption) (Services, error) {
|
|
var opts extractOption
|
|
for _, opt := range options {
|
|
opt(&opts)
|
|
}
|
|
|
|
ids, err := c.getIDS(opts.filter)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return c.extractServicesDetails(ids...)
|
|
}
|
|
|
|
// checkState checks the state of the deployment.
|
|
// It loops over all the services deployed for the project (replicas included) and
|
|
// checks if the `target` state match the services states.
|
|
//
|
|
// There's a timeout (default: 30s) that you can set with the options: `WithTimeout`.
|
|
func (c *RemoteClient) checkState(
|
|
ctx context.Context,
|
|
projectName string,
|
|
target ServiceStatus,
|
|
) error {
|
|
var checkErr error
|
|
var wg sync.WaitGroup
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
|
|
ticker := time.NewTicker(stateTickDuration)
|
|
ctxTimeout, fnCancel := context.WithDeadline(ctx, time.Now().UTC().Add(defaultStateTimeout))
|
|
defer fnCancel()
|
|
|
|
for {
|
|
select {
|
|
case <-ticker.C:
|
|
ticker.Stop()
|
|
log.Info().
|
|
Str("project", projectName).
|
|
Str("state", string(target)).
|
|
Msg("checking project state...")
|
|
srvs, err := c.ExtractServicesDetails(WithName(projectName))
|
|
if err != nil {
|
|
checkErr = err
|
|
return
|
|
}
|
|
|
|
ready := true
|
|
mainloop:
|
|
for idx := range srvs {
|
|
// ensure app name is the right one
|
|
if srvs[idx].App != projectName {
|
|
continue
|
|
}
|
|
for idy := range srvs[idx].Replicas {
|
|
if srvs[idx].Replicas[idy].State != target {
|
|
log.Info().Dur("retry (ms)", stateTickDuration).Msg("project not in good state yet, retrying...")
|
|
ready = false
|
|
break mainloop
|
|
}
|
|
}
|
|
}
|
|
if ready {
|
|
return
|
|
}
|
|
|
|
ticker.Reset(stateTickDuration)
|
|
case <-ctxTimeout.Done():
|
|
msg := "swarm deployment skipped"
|
|
if errors.Is(ctxTimeout.Err(), context.DeadlineExceeded) {
|
|
msg = "swarm check state timeout"
|
|
}
|
|
checkErr = fmt.Errorf("%w, %s", ErrContextDone, msg)
|
|
return
|
|
}
|
|
}
|
|
}()
|
|
|
|
wg.Wait()
|
|
return checkErr
|
|
}
|