rework graceful shutdown + rework golangci lint
This commit is contained in:
parent
541a671bc2
commit
35ac9a21ef
391
.golangci.yml
391
.golangci.yml
@ -1,33 +1,372 @@
|
||||
run:
|
||||
timeout: 5m
|
||||
modules-download-mode: readonly
|
||||
# Defines the configuration version.
|
||||
# The only possible value is "2".
|
||||
version: "2"
|
||||
|
||||
linters:
|
||||
# Default set of linters.
|
||||
# The value can be: `standard`, `all`, `none`, or `fast`.
|
||||
# Default: standard
|
||||
default: all
|
||||
# Enable specific linter.
|
||||
# https://golangci-lint.run/usage/linters/#enabled-by-default
|
||||
enable:
|
||||
- gofmt
|
||||
- govet
|
||||
- goimports
|
||||
- asasalint
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- canonicalheader
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- copyloopvar
|
||||
- cyclop
|
||||
- decorder
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- dupword
|
||||
- durationcheck
|
||||
- err113
|
||||
- errcheck
|
||||
- staticcheck
|
||||
- unused
|
||||
- gosimple
|
||||
- errchkjson
|
||||
- errname
|
||||
- errorlint
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- exptostd
|
||||
- fatcontext
|
||||
- forbidigo
|
||||
- forcetypeassert
|
||||
- funlen
|
||||
- ginkgolinter
|
||||
- gocheckcompilerdirectives
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gochecksumtype
|
||||
- gocognit
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
- goheader
|
||||
- gomoddirectives
|
||||
- gomodguard
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosmopolitan
|
||||
- govet
|
||||
- grouper
|
||||
- iface
|
||||
- importas
|
||||
- inamedparam
|
||||
- ineffassign
|
||||
- typecheck
|
||||
- interfacebloat
|
||||
- intrange
|
||||
- ireturn
|
||||
- lll
|
||||
- loggercheck
|
||||
- maintidx
|
||||
- makezero
|
||||
- mirror
|
||||
- misspell
|
||||
- mnd
|
||||
- musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
- nilerr
|
||||
- nilnesserr
|
||||
- nilnil
|
||||
- nlreturn
|
||||
- noctx
|
||||
- nolintlint
|
||||
- nonamedreturns
|
||||
- nosprintfhostport
|
||||
- paralleltest
|
||||
- perfsprint
|
||||
- prealloc
|
||||
- predeclared
|
||||
- promlinter
|
||||
- protogetter
|
||||
- reassign
|
||||
- recvcheck
|
||||
- revive
|
||||
- rowserrcheck
|
||||
- sloglint
|
||||
- spancheck
|
||||
- sqlclosecheck
|
||||
- staticcheck
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- testableexamples
|
||||
- testifylint
|
||||
- testpackage
|
||||
- thelper
|
||||
- tparallel
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- usestdlibvars
|
||||
- usetesting
|
||||
- varnamelen
|
||||
- wastedassign
|
||||
- whitespace
|
||||
- wrapcheck
|
||||
- wsl
|
||||
- zerologlint
|
||||
- misspell
|
||||
- mnd
|
||||
- funlen
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- gosec
|
||||
- govet
|
||||
- unparam
|
||||
- unused
|
||||
- errname
|
||||
# Disable specific linter
|
||||
# https://golangci-lint.run/usage/linters/#disabled-by-default
|
||||
disable:
|
||||
- varnamelen
|
||||
- asasalint
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- canonicalheader
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- copyloopvar
|
||||
- cyclop
|
||||
- decorder
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- dupword
|
||||
- durationcheck
|
||||
- err113
|
||||
- errcheck
|
||||
- errchkjson
|
||||
- errorlint
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- exptostd
|
||||
- fatcontext
|
||||
- forbidigo
|
||||
- forcetypeassert
|
||||
- ginkgolinter
|
||||
- gocheckcompilerdirectives
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gochecksumtype
|
||||
- gocognit
|
||||
- godot
|
||||
- godox
|
||||
- gosmopolitan
|
||||
- goheader
|
||||
- gomoddirectives
|
||||
- gomodguard
|
||||
- goprintffuncname
|
||||
- grouper
|
||||
- iface
|
||||
- importas
|
||||
- inamedparam
|
||||
- ineffassign
|
||||
- interfacebloat
|
||||
- intrange
|
||||
- ireturn
|
||||
- lll
|
||||
- loggercheck
|
||||
- maintidx
|
||||
- makezero
|
||||
- mirror
|
||||
- musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
- nilerr
|
||||
- nilnesserr
|
||||
- nilnil
|
||||
- nlreturn
|
||||
- noctx
|
||||
- nolintlint
|
||||
- nonamedreturns
|
||||
- nosprintfhostport
|
||||
- paralleltest
|
||||
- perfsprint
|
||||
- prealloc
|
||||
- predeclared
|
||||
- promlinter
|
||||
- protogetter
|
||||
- reassign
|
||||
- recvcheck
|
||||
- revive
|
||||
- rowserrcheck
|
||||
- sloglint
|
||||
- spancheck
|
||||
- sqlclosecheck
|
||||
- staticcheck
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- testableexamples
|
||||
- testifylint
|
||||
- testpackage
|
||||
- thelper
|
||||
- tparallel
|
||||
- unconvert
|
||||
- usestdlibvars
|
||||
- usetesting
|
||||
- wastedassign
|
||||
- whitespace
|
||||
- wrapcheck
|
||||
- wsl
|
||||
- zerologlint
|
||||
# All available settings of specific linters.
|
||||
settings:
|
||||
# See the dedicated "linters.settings" documentation section.
|
||||
option: value
|
||||
# Defines a set of rules to ignore issues.
|
||||
# It does not skip the analysis, and so does not ignore "typecheck" errors.
|
||||
exclusions:
|
||||
# Mode of the generated files analysis.
|
||||
#
|
||||
# - `strict`: sources are excluded by strictly following the Go generated file convention.
|
||||
# Source files that have lines matching only the following regular expression will be excluded: `^// Code generated .* DO NOT EDIT\.$`
|
||||
# This line must appear before the first non-comment, non-blank text in the file.
|
||||
# https://go.dev/s/generatedcode
|
||||
# - `lax`: sources are excluded if they contain lines like `autogenerated file`, `code generated`, `do not edit`, etc.
|
||||
# - `disable`: disable the generated files exclusion.
|
||||
#
|
||||
# Default: lax
|
||||
generated: strict
|
||||
# Log a warning if an exclusion rule is unused.
|
||||
# Default: false
|
||||
warn-unused: true
|
||||
# Predefined exclusion rules.
|
||||
# Default: []
|
||||
presets:
|
||||
- comments
|
||||
- std-error-handling
|
||||
- common-false-positives
|
||||
- legacy
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source.
|
||||
# rules:
|
||||
# # Exclude some linters from running on tests files.
|
||||
# - path: _test\.go
|
||||
# linters:
|
||||
# - gocyclo
|
||||
# - errcheck
|
||||
# - dupl
|
||||
# - gosec
|
||||
# # Run some linter only for test files by excluding its issues for everything else.
|
||||
# - path-except: _test\.go
|
||||
# linters:
|
||||
# - forbidigo
|
||||
# # Exclude known linters from partially hard-vendored code,
|
||||
# # which is impossible to exclude via `nolint` comments.
|
||||
# # `/` will be replaced by the current OS file path separator to properly work on Windows.
|
||||
# - path: internal/hmac/
|
||||
# text: "weak cryptographic primitive"
|
||||
# linters:
|
||||
# - gosec
|
||||
# # Exclude some `staticcheck` messages.
|
||||
# - linters:
|
||||
# - staticcheck
|
||||
# text: "SA9003:"
|
||||
# # Exclude `lll` issues for long lines with `go:generate`.
|
||||
# - linters:
|
||||
# - lll
|
||||
# source: "^//go:generate "
|
||||
# # Which file paths to exclude: they will be analyzed, but issues from them won't be reported.
|
||||
# # "/" will be replaced by the current OS file path separator to properly work on Windows.
|
||||
# # Default: []
|
||||
# paths:
|
||||
# - ".*\\.my\\.go$"
|
||||
# - lib/bad.go
|
||||
# # Which file paths to not exclude.
|
||||
# # Default: []
|
||||
# paths-except:
|
||||
# - ".*\\.my\\.go$"
|
||||
# - lib/bad.go
|
||||
|
||||
linters-settings:
|
||||
gofmt:
|
||||
simplify: true
|
||||
goimports:
|
||||
local-prefixes: gitea.thegux.fr
|
||||
formatters:
|
||||
# Enable specific formatter.
|
||||
# Default: [] (uses standard Go formatting)
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
- golines
|
||||
# Formatters settings.
|
||||
settings:
|
||||
# See the dedicated "formatters.settings" documentation section.
|
||||
option: value
|
||||
# exclusions:
|
||||
# # Mode of the generated files analysis.
|
||||
# #
|
||||
# # - `strict`: sources are excluded by strictly following the Go generated file convention.
|
||||
# # Source files that have lines matching only the following regular expression will be excluded: `^// Code generated .* DO NOT EDIT\.$`
|
||||
# # This line must appear before the first non-comment, non-blank text in the file.
|
||||
# # https://go.dev/s/generatedcode
|
||||
# # - `lax`: sources are excluded if they contain lines like `autogenerated file`, `code generated`, `do not edit`, etc.
|
||||
# # - `disable`: disable the generated files exclusion.
|
||||
# #
|
||||
# # Default: lax
|
||||
# generated: strict
|
||||
# # Which file paths to exclude.
|
||||
# # Default: []
|
||||
# paths:
|
||||
# - ".*\\.my\\.go$"
|
||||
# - lib/bad.go
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- errcheck
|
||||
- staticcheck
|
||||
exclude-dirs:
|
||||
- ..
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.54.x
|
||||
# Options for analysis running.
|
||||
run:
|
||||
# Timeout for total work, e.g. 30s, 5m, 5m30s.
|
||||
# If the value is lower or equal to 0, the timeout is disabled.
|
||||
# Default: 0 (disabled)
|
||||
timeout: 5m
|
||||
# The mode used to evaluate relative paths.
|
||||
# It's used by exclusions, Go plugins, and some linters.
|
||||
# The value can be:
|
||||
# - `gomod`: the paths will be relative to the directory of the `go.mod` file.
|
||||
# - `gitroot`: the paths will be relative to the git root (the parent directory of `.git`).
|
||||
# - `cfg`: the paths will be relative to the configuration file.
|
||||
# - `wd` (NOT recommended): the paths will be relative to the place where golangci-lint is run.
|
||||
# Default: cfg
|
||||
relative-path-mode: gomod
|
||||
# Exit code when at least one issue was found.
|
||||
# Default: 1
|
||||
issues-exit-code: 2
|
||||
# Include test files or not.
|
||||
# Default: true
|
||||
tests: false
|
||||
# # List of build tags, all linters use it.
|
||||
# # Default: []
|
||||
# build-tags:
|
||||
# - mytag
|
||||
# If set, we pass it to "go list -mod={option}". From "go help modules":
|
||||
# If invoked with -mod=readonly, the go command is disallowed from the implicit
|
||||
# automatic updating of go.mod described above. Instead, it fails when any changes
|
||||
# to go.mod are needed. This setting is most useful to check that go.mod does
|
||||
# not need updates, such as in a continuous integration and testing system.
|
||||
# If invoked with -mod=vendor, the go command assumes that the vendor
|
||||
# directory holds the correct copies of dependencies and ignores
|
||||
# the dependency descriptions in go.mod.
|
||||
#
|
||||
# Allowed values: readonly|vendor|mod
|
||||
# Default: ""
|
||||
modules-download-mode: readonly
|
||||
# Allow multiple parallel golangci-lint instances running.
|
||||
# If false, golangci-lint acquires file lock on start.
|
||||
# Default: false
|
||||
allow-parallel-runners: true
|
||||
# Allow multiple golangci-lint instances running, but serialize them around a lock.
|
||||
# If false, golangci-lint exits with an error if it fails to acquire file lock on start.
|
||||
# Default: false
|
||||
allow-serial-runners: true
|
||||
# Define the Go version limit.
|
||||
# Default: use Go version from the go.mod file, fallback on the env var `GOVERSION`, fallback on 1.22.
|
||||
go: '1.23'
|
||||
# Number of operating system threads (`GOMAXPROCS`) that can execute golangci-lint simultaneously.
|
||||
# Default: 0 (automatically set to match Linux container CPU quota and
|
||||
# fall back to the number of logical CPUs in the machine)
|
||||
concurrency: 4
|
||||
7
Makefile
7
Makefile
@ -1,8 +1,9 @@
|
||||
run: lint
|
||||
go run main.go
|
||||
@go run main.go
|
||||
|
||||
build: lint
|
||||
go build -o hmdeploy main.go
|
||||
@go build -o hmdeploy main.go
|
||||
|
||||
lint:
|
||||
golangci-lint run ./...
|
||||
@golangci-lint fmt ./...
|
||||
@golangci-lint run ./...
|
||||
@ -54,7 +54,7 @@ func NewSSHConn(addr, user string, port int, privkey string) (SSHConn, error) {
|
||||
|
||||
sshConfig := ssh.ClientConfig{
|
||||
User: user,
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(), //nolint:gosec // no need
|
||||
Auth: []ssh.AuthMethod{
|
||||
ssh.PublicKeys(sshPrivKey),
|
||||
},
|
||||
@ -80,7 +80,7 @@ func (c *SSHConn) CopyFile(src, dest string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w, addr=%s, err=%v", ErrSSHSession, c.addr, err)
|
||||
}
|
||||
defer sshSession.Close()
|
||||
defer sshSession.Close() //nolint: errcheck // defered
|
||||
|
||||
fileInfo, err := os.Stat(src)
|
||||
if err != nil {
|
||||
@ -91,24 +91,44 @@ func (c *SSHConn) CopyFile(src, dest string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open scp source file src=%s, err=%v", src, err)
|
||||
}
|
||||
defer file.Close()
|
||||
defer file.Close() //nolint: errcheck // defered
|
||||
|
||||
go func() {
|
||||
w, _ := sshSession.StdinPipe()
|
||||
defer w.Close()
|
||||
defer w.Close() //nolint: errcheck // defered
|
||||
|
||||
fmt.Fprintf(w, "C0644 %d %s\n", fileInfo.Size(), filepath.Base(dest))
|
||||
if _, err := fmt.Fprintf(w, "C0644 %d %s\n", fileInfo.Size(), filepath.Base(dest)); err != nil {
|
||||
log.Debug().
|
||||
Err(err).
|
||||
Str("src", src).
|
||||
Str("dest", dest).
|
||||
Msg("unable to write file info to scp")
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := io.Copy(w, file); err != nil {
|
||||
log.Debug().Err(err).Str("src", src).Str("dest", dest).Msg("unable to scp src to dest")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprint(w, "\x00")
|
||||
if _, err := fmt.Fprint(w, "\x00"); err != nil {
|
||||
log.Debug().
|
||||
Err(err).
|
||||
Str("src", src).
|
||||
Str("dest", dest).
|
||||
Msg("unable to write scp termination string")
|
||||
}
|
||||
}()
|
||||
|
||||
if err := sshSession.Run(fmt.Sprintf("scp -t %s", dest)); err != nil {
|
||||
return fmt.Errorf("%w, addr=%s, src=%s, dest=%s, err=%v", ErrSShCopy, c.addr, src, dest, err)
|
||||
return fmt.Errorf(
|
||||
"%w, addr=%s, src=%s, dest=%s, err=%v",
|
||||
ErrSShCopy,
|
||||
c.addr,
|
||||
src,
|
||||
dest,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -119,7 +139,7 @@ func (c *SSHConn) Execute(cmd string) (string, error) {
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%w, addr=%s, err=%v", ErrSSHSession, c.addr, err)
|
||||
}
|
||||
defer sshSession.Close()
|
||||
defer sshSession.Close() //nolint: errcheck // defered
|
||||
|
||||
var buf bytes.Buffer
|
||||
sshSession.Stdout = &buf
|
||||
|
||||
@ -1,10 +1,11 @@
|
||||
package deployers
|
||||
|
||||
var ErrContextDone = "unable to execute, context done"
|
||||
import "errors"
|
||||
|
||||
var ErrContextDone = errors.New("unable to execute, context done")
|
||||
|
||||
type IDeployer interface {
|
||||
Deploy() error
|
||||
Build() error
|
||||
Clear() error
|
||||
Done() <-chan struct{}
|
||||
}
|
||||
|
||||
@ -4,11 +4,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"gitea.thegux.fr/hmdeploy/connection"
|
||||
"gitea.thegux.fr/hmdeploy/models"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type NginxDeployer struct {
|
||||
@ -17,25 +18,38 @@ type NginxDeployer struct {
|
||||
conn connection.IConnection
|
||||
project *models.Project
|
||||
|
||||
chDone chan struct{}
|
||||
processing atomic.Bool
|
||||
chDone chan struct{}
|
||||
errFlag error
|
||||
}
|
||||
|
||||
var _ IDeployer = (*NginxDeployer)(nil)
|
||||
|
||||
func NewNginxDeployer(ctx context.Context, netInfo *models.HMNetInfo, project *models.Project) (NginxDeployer, error) {
|
||||
func NewNginxDeployer(
|
||||
ctx context.Context,
|
||||
netInfo *models.HMNetInfo,
|
||||
project *models.Project,
|
||||
) (*NginxDeployer, error) {
|
||||
var nd NginxDeployer
|
||||
|
||||
conn, err := connection.NewSSHConn(netInfo.IP.String(), netInfo.SSH.User, netInfo.SSH.Port, netInfo.SSH.PrivKey)
|
||||
conn, err := connection.NewSSHConn(
|
||||
netInfo.IP.String(),
|
||||
netInfo.SSH.User,
|
||||
netInfo.SSH.Port,
|
||||
netInfo.SSH.PrivKey,
|
||||
)
|
||||
if err != nil {
|
||||
return nd, err
|
||||
return &nd, err
|
||||
}
|
||||
|
||||
nd.conn = &conn
|
||||
nd.project = project
|
||||
nd.chDone = make(chan struct{}, 5)
|
||||
nd.ctx = ctx
|
||||
nd.processing = atomic.Bool{}
|
||||
nd.processing.Store(false)
|
||||
nd.chDone = make(chan struct{}, 1)
|
||||
|
||||
return nd, nil
|
||||
return &nd, nil
|
||||
}
|
||||
|
||||
func (nd *NginxDeployer) close() error {
|
||||
@ -47,19 +61,46 @@ func (nd *NginxDeployer) clean() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (nd *NginxDeployer) setDone() {
|
||||
func (nd *NginxDeployer) setDone(err error) {
|
||||
nd.chDone <- struct{}{}
|
||||
nd.errFlag = err
|
||||
}
|
||||
|
||||
func (nd *NginxDeployer) Error() error {
|
||||
return nd.errFlag
|
||||
}
|
||||
|
||||
func (nd *NginxDeployer) Done() <-chan struct{} {
|
||||
chDone := make(chan struct{})
|
||||
go func() {
|
||||
defer func() {
|
||||
close(chDone)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-nd.chDone:
|
||||
chDone <- struct{}{}
|
||||
return
|
||||
case <-nd.ctx.Done():
|
||||
log.Warn().Str("deployer", "swarm").Msg("context done catch")
|
||||
|
||||
timeout := time.NewTicker(10 * time.Second) //nolint:mnd //TODO: to refactor
|
||||
tick := time.NewTicker(time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-timeout.C:
|
||||
log.Error().
|
||||
Msg("timeout while waiting for graceful swarm deployer shutdown")
|
||||
chDone <- struct{}{}
|
||||
return
|
||||
case <-tick.C:
|
||||
if !nd.processing.Load() {
|
||||
chDone <- struct{}{}
|
||||
return
|
||||
}
|
||||
tick.Reset(1 * time.Second)
|
||||
}
|
||||
}
|
||||
case <-nd.chDone:
|
||||
log.Info().Str("deployer", "nginx").Msg("terminated")
|
||||
chDone <- struct{}{}
|
||||
return
|
||||
}
|
||||
@ -84,9 +125,12 @@ func (nd *NginxDeployer) Clear() error {
|
||||
}
|
||||
|
||||
func (nd *NginxDeployer) Build() error {
|
||||
nd.processing.Store(true)
|
||||
defer nd.processing.Store(false)
|
||||
|
||||
select {
|
||||
case <-nd.ctx.Done():
|
||||
nd.setDone()
|
||||
nd.errFlag = ErrContextDone
|
||||
return fmt.Errorf("%w, build nginx archive skipped", ErrContextDone)
|
||||
default:
|
||||
}
|
||||
@ -94,22 +138,24 @@ func (nd *NginxDeployer) Build() error {
|
||||
nginxPath := filepath.Join(nd.project.Dir, filepath.Base(nd.project.Deps.NginxFile))
|
||||
nginxConf := nd.project.Name + ".conf"
|
||||
|
||||
log.Info().Str("nginx", nginxConf).Msg("transfering nginx conf...")
|
||||
log.Info().Str("nginx", nginxConf).Msg("transferring nginx conf...")
|
||||
|
||||
if err := nd.conn.CopyFile(nginxPath, nginxConf); err != nil {
|
||||
nd.setDone()
|
||||
nd.setDone(err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Str("nginx", nginxConf).Msg("nginx conf transfered with success")
|
||||
log.Info().Str("nginx", nginxConf).Msg("nginx conf transferred with success")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nd *NginxDeployer) Deploy() (err error) {
|
||||
defer nd.setDone()
|
||||
nd.processing.Store(true)
|
||||
defer nd.processing.Store(false)
|
||||
|
||||
select {
|
||||
case <-nd.ctx.Done():
|
||||
nd.errFlag = ErrContextDone
|
||||
return fmt.Errorf("%w, nginx deployment skipped", ErrContextDone)
|
||||
default:
|
||||
}
|
||||
@ -121,10 +167,16 @@ func (nd *NginxDeployer) Deploy() (err error) {
|
||||
_, err = nd.conn.Execute(
|
||||
fmt.Sprintf(
|
||||
"cp %s /etc/nginx/sites-available && ln -sf /etc/nginx/sites-available/%s /etc/nginx/sites-enabled/%s",
|
||||
nginxConf, nginxConf, nginxConf,
|
||||
nginxConf,
|
||||
nginxConf,
|
||||
nginxConf,
|
||||
),
|
||||
)
|
||||
nd.setDone(err)
|
||||
|
||||
if err == nil {
|
||||
log.Info().Str("nginx", nginxConf).Msg("nginx conf successfully deployed")
|
||||
}
|
||||
|
||||
log.Info().Str("nginx", nginxConf).Msg("nginx conf successfully deployed")
|
||||
return err
|
||||
}
|
||||
|
||||
@ -2,18 +2,22 @@ package deployers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"gitea.thegux.fr/hmdeploy/connection"
|
||||
"gitea.thegux.fr/hmdeploy/docker"
|
||||
"gitea.thegux.fr/hmdeploy/models"
|
||||
"gitea.thegux.fr/hmdeploy/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
var ErrSwarmDeployerNoArchive = errors.New("no archive found to be deployed")
|
||||
|
||||
type SwarmDeployer struct {
|
||||
ctx context.Context
|
||||
|
||||
@ -23,26 +27,40 @@ type SwarmDeployer struct {
|
||||
project *models.Project
|
||||
archivePath string
|
||||
|
||||
chDone chan struct{}
|
||||
processing atomic.Bool
|
||||
chDone chan struct{}
|
||||
errFlag error
|
||||
}
|
||||
|
||||
var _ IDeployer = (*SwarmDeployer)(nil)
|
||||
|
||||
func NewSwarmDeployer(ctx context.Context, dockerClient docker.IClient, netInfo *models.HMNetInfo, project *models.Project) (SwarmDeployer, error) {
|
||||
func NewSwarmDeployer(
|
||||
ctx context.Context,
|
||||
dockerClient docker.IClient,
|
||||
netInfo *models.HMNetInfo,
|
||||
project *models.Project,
|
||||
) (*SwarmDeployer, error) {
|
||||
var sd SwarmDeployer
|
||||
|
||||
conn, err := connection.NewSSHConn(netInfo.IP.String(), netInfo.SSH.User, netInfo.SSH.Port, netInfo.SSH.PrivKey)
|
||||
conn, err := connection.NewSSHConn(
|
||||
netInfo.IP.String(),
|
||||
netInfo.SSH.User,
|
||||
netInfo.SSH.Port,
|
||||
netInfo.SSH.PrivKey,
|
||||
)
|
||||
if err != nil {
|
||||
return sd, err
|
||||
return &sd, err
|
||||
}
|
||||
|
||||
sd.ctx = ctx
|
||||
sd.conn = &conn
|
||||
sd.dcli = dockerClient
|
||||
sd.project = project
|
||||
sd.chDone = make(chan struct{}, 5)
|
||||
sd.processing = atomic.Bool{}
|
||||
sd.processing.Store(false)
|
||||
sd.chDone = make(chan struct{}, 1)
|
||||
|
||||
return sd, nil
|
||||
return &sd, nil
|
||||
}
|
||||
|
||||
func (sd *SwarmDeployer) close() error {
|
||||
@ -50,24 +68,52 @@ func (sd *SwarmDeployer) close() error {
|
||||
}
|
||||
|
||||
func (sd *SwarmDeployer) clean() (err error) {
|
||||
defer os.Remove(sd.archivePath)
|
||||
_, err = sd.conn.Execute(fmt.Sprintf("rm -f %s %s *.tar.gz *.tar", models.ComposeFile, models.EnvFile))
|
||||
defer os.Remove(sd.archivePath) //nolint: errcheck // defered
|
||||
_, err = sd.conn.Execute(
|
||||
fmt.Sprintf("rm -f %s %s *.tar.gz *.tar", models.ComposeFile, models.EnvFile),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
func (sd *SwarmDeployer) setDone() {
|
||||
func (sd *SwarmDeployer) setDone(err error) {
|
||||
sd.chDone <- struct{}{}
|
||||
sd.errFlag = err
|
||||
}
|
||||
|
||||
func (sd *SwarmDeployer) Error() error {
|
||||
return sd.errFlag
|
||||
}
|
||||
|
||||
func (sd *SwarmDeployer) Done() <-chan struct{} {
|
||||
chDone := make(chan struct{})
|
||||
go func() {
|
||||
defer func() {
|
||||
close(chDone)
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-sd.chDone:
|
||||
chDone <- struct{}{}
|
||||
return
|
||||
case <-sd.ctx.Done():
|
||||
log.Warn().Str("deployer", "swarm").Msg("context done catch")
|
||||
|
||||
timeout := time.NewTicker(10 * time.Second) //nolint:mnd //TODO: to refactor
|
||||
tick := time.NewTicker(time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-timeout.C:
|
||||
log.Error().
|
||||
Msg("timeout while waiting for graceful swarm deployer shutdown")
|
||||
chDone <- struct{}{}
|
||||
return
|
||||
case <-tick.C:
|
||||
if !sd.processing.Load() {
|
||||
chDone <- struct{}{}
|
||||
return
|
||||
}
|
||||
tick.Reset(1 * time.Second)
|
||||
}
|
||||
}
|
||||
case <-sd.chDone:
|
||||
log.Info().Str("deployer", "swarm").Msg("terminated")
|
||||
chDone <- struct{}{}
|
||||
return
|
||||
}
|
||||
@ -93,9 +139,12 @@ func (sd *SwarmDeployer) Clear() error {
|
||||
}
|
||||
|
||||
func (sd *SwarmDeployer) Build() error {
|
||||
sd.processing.Store(true)
|
||||
defer sd.processing.Store(false)
|
||||
|
||||
select {
|
||||
case <-sd.ctx.Done():
|
||||
sd.setDone()
|
||||
sd.errFlag = ErrContextDone
|
||||
return fmt.Errorf("%w, swarm project build skipped", ErrContextDone)
|
||||
default:
|
||||
}
|
||||
@ -106,11 +155,11 @@ func (sd *SwarmDeployer) Build() error {
|
||||
if imageName := sd.project.ImageName; imageName != "" {
|
||||
tarFile, err := sd.dcli.Save(imageName, sd.project.Dir)
|
||||
if err != nil {
|
||||
sd.setDone()
|
||||
sd.setDone(err)
|
||||
return err
|
||||
}
|
||||
|
||||
defer os.Remove(tarFile)
|
||||
defer os.Remove(tarFile) //nolint: errcheck // defered
|
||||
|
||||
filesToArchive = append(filesToArchive, tarFile)
|
||||
|
||||
@ -118,16 +167,22 @@ func (sd *SwarmDeployer) Build() error {
|
||||
}
|
||||
|
||||
if envFilePath := sd.project.Deps.EnvFile; envFilePath != "" {
|
||||
filesToArchive = append(filesToArchive, filepath.Join(sd.project.Dir, filepath.Base(envFilePath)))
|
||||
filesToArchive = append(
|
||||
filesToArchive,
|
||||
filepath.Join(sd.project.Dir, filepath.Base(envFilePath)),
|
||||
)
|
||||
log.Info().Msg(".env file added to the archive for deployment")
|
||||
}
|
||||
|
||||
composeFileBase := filepath.Base(sd.project.Deps.ComposeFile)
|
||||
filesToArchive = append(filesToArchive, filepath.Join(sd.project.Dir, composeFileBase))
|
||||
|
||||
archivePath, err := utils.CreateArchive(sd.project.Dir, fmt.Sprintf("%s-%s", sd.project.Name, "swarm"), filesToArchive...)
|
||||
archivePath, err := utils.CreateArchive(
|
||||
sd.project.Dir,
|
||||
fmt.Sprintf("%s-%s", sd.project.Name, "swarm"),
|
||||
filesToArchive...)
|
||||
if err != nil {
|
||||
sd.setDone()
|
||||
sd.setDone(err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -138,36 +193,46 @@ func (sd *SwarmDeployer) Build() error {
|
||||
}
|
||||
|
||||
func (sd *SwarmDeployer) Deploy() error {
|
||||
defer sd.setDone()
|
||||
sd.processing.Store(true)
|
||||
defer sd.processing.Store(false)
|
||||
|
||||
select {
|
||||
case <-sd.ctx.Done():
|
||||
sd.errFlag = ErrContextDone
|
||||
return fmt.Errorf("%w, swarm deployment skipped", ErrContextDone)
|
||||
default:
|
||||
}
|
||||
|
||||
if sd.archivePath == "" {
|
||||
return fmt.Errorf("unable to deploy, no archive to deploy")
|
||||
sd.setDone(ErrSwarmDeployerNoArchive)
|
||||
return ErrSwarmDeployerNoArchive
|
||||
}
|
||||
|
||||
log.Info().Str("archive", sd.archivePath).Msg("deploying archive to swarm...")
|
||||
|
||||
archiveDestPath := filepath.Base(sd.archivePath)
|
||||
log.Info().Str("archive", sd.archivePath).Msg("archive built with success, tranfering to swarm for deployment...")
|
||||
log.Info().
|
||||
Str("archive", sd.archivePath).
|
||||
Msg("archive built with success, tranfering to swarm for deployment...")
|
||||
if err := sd.conn.CopyFile(sd.archivePath, archiveDestPath); err != nil {
|
||||
sd.setDone(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := sd.conn.Execute(fmt.Sprintf("tar xzvf %s", archiveDestPath)); err != nil {
|
||||
sd.setDone(err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Str("project", sd.project.Name).Msg("deploying swarm project...")
|
||||
composeFileBase := filepath.Base(sd.project.Deps.ComposeFile)
|
||||
if _, err := sd.conn.Execute(fmt.Sprintf("docker stack deploy -c %s %s", composeFileBase, sd.project.Name)); err != nil {
|
||||
sd.setDone(err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Msg("swarm deployment done with success")
|
||||
|
||||
sd.setDone(nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -37,7 +37,13 @@ func (c *Client) Save(imageName, dest string) (string, error) {
|
||||
cmd := exec.Command("docker", "save", "-o", tarFile, imageName)
|
||||
cmd.Dir = dest
|
||||
if _, err := cmd.Output(); err != nil {
|
||||
return "", fmt.Errorf("%w, dir=%s, image=%s, err=%v", ErrDockerClientSave, dest, imageName, err)
|
||||
return "", fmt.Errorf(
|
||||
"%w, dir=%s, image=%s, err=%v",
|
||||
ErrDockerClientSave,
|
||||
dest,
|
||||
imageName,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
return filepath.Join(dest, tarFile), nil
|
||||
|
||||
62
main.go
62
main.go
@ -8,17 +8,21 @@ import (
|
||||
"os/signal"
|
||||
"path"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"gitea.thegux.fr/hmdeploy/deployers"
|
||||
"gitea.thegux.fr/hmdeploy/docker"
|
||||
"gitea.thegux.fr/hmdeploy/models"
|
||||
"gitea.thegux.fr/hmdeploy/scheduler"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
const HMDEPLOY_DIRNAME = ".homeserver"
|
||||
const NETWORK_FILENAME = "map.json"
|
||||
const (
|
||||
HMDeployDirname string = ".homeserver"
|
||||
NetworkFilename string = "map.json"
|
||||
|
||||
SchedulerNbWorkers uint8 = 4
|
||||
SchedulerQueueCapacity uint32 = 30
|
||||
)
|
||||
|
||||
var HOME_PATH = os.Getenv("HOME")
|
||||
|
||||
@ -27,13 +31,12 @@ func initLogger() {
|
||||
log.Logger = log.With().Caller().Logger().Output(zerolog.ConsoleWriter{Out: os.Stderr})
|
||||
}
|
||||
|
||||
func main() {
|
||||
ctx, stop := signal.NotifyContext(
|
||||
func main() { //nolint: funlen // TODO: to rework
|
||||
ctx, _ := signal.NotifyContext(
|
||||
context.Background(),
|
||||
os.Interrupt,
|
||||
os.Kill,
|
||||
)
|
||||
defer stop()
|
||||
|
||||
initLogger()
|
||||
log.Info().Msg("hmdeploy started")
|
||||
@ -41,7 +44,7 @@ func main() {
|
||||
projectDir := flag.String("path", ".", "define the .homeserver project root dir")
|
||||
flag.Parse()
|
||||
|
||||
hmmap_path := path.Join(HOME_PATH, HMDEPLOY_DIRNAME, NETWORK_FILENAME)
|
||||
hmmap_path := path.Join(HOME_PATH, HMDeployDirname, NetworkFilename)
|
||||
c, err := os.ReadFile(hmmap_path)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Str("conf", hmmap_path).Msg("unable to load configuration")
|
||||
@ -57,7 +60,10 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatal().Str("dir", *projectDir).Err(err).Msg("unable to init project from directory")
|
||||
}
|
||||
log.Info().Str("dir", project.Dir).Str("name", project.Name).Msg("project initialized with success")
|
||||
log.Info().
|
||||
Str("dir", project.Dir).
|
||||
Str("name", project.Name).
|
||||
Msg("project initialized with success")
|
||||
|
||||
swarmNet := hmmap.GetSwarmNetInfo()
|
||||
if swarmNet == nil {
|
||||
@ -70,7 +76,7 @@ func main() {
|
||||
log.Fatal().Err(err).Msg("unable to init swarm deployer")
|
||||
}
|
||||
|
||||
var nd deployers.IDeployer
|
||||
var nd *deployers.NginxDeployer
|
||||
if project.Deps.NginxFile != "" {
|
||||
nginxNet := hmmap.GetNginxNetInfo()
|
||||
if nginxNet == nil {
|
||||
@ -84,24 +90,40 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
nd = &d
|
||||
nd = d
|
||||
}
|
||||
|
||||
deployNginx := scheduler.NewTask("nginx-deploy", nd.Deploy)
|
||||
deploySwarm := scheduler.NewTask("swarm-deploy", sd.Deploy, deployNginx)
|
||||
var deploySwarm *scheduler.Task
|
||||
if nd != nil {
|
||||
deployNginx := scheduler.NewTask("nginx-deploy", nd.Deploy)
|
||||
deploySwarm = scheduler.NewTask("swarm-deploy", sd.Deploy, deployNginx)
|
||||
} else {
|
||||
deploySwarm = scheduler.NewTask("swarm-deploy", sd.Deploy)
|
||||
}
|
||||
|
||||
s := scheduler.NewScheduler(context.Background(), 30, 4)
|
||||
s.Submit(scheduler.NewTask("swarm-build", sd.Build, deploySwarm))
|
||||
s.Submit(scheduler.NewTask("nginx-build", nd.Build))
|
||||
s := scheduler.NewScheduler(
|
||||
context.Background(),
|
||||
SchedulerQueueCapacity,
|
||||
SchedulerNbWorkers,
|
||||
)
|
||||
s.Submit(scheduler.NewTask("swarm-build", sd.Build, deploySwarm)) //nolint: errcheck // TODO
|
||||
if nd != nil {
|
||||
s.Submit(scheduler.NewTask("nginx-build", nd.Build)) //nolint: errcheck // TODO
|
||||
}
|
||||
|
||||
<-nd.Done()
|
||||
<-sd.Done()
|
||||
<-nd.Done()
|
||||
|
||||
s.Submit(scheduler.NewTask("nginx-clear", nd.Clear))
|
||||
s.Submit(scheduler.NewTask("swarm-clear", sd.Clear))
|
||||
s.Submit(scheduler.NewTask("nginx-clear", nd.Clear)) //nolint: errcheck // TODO
|
||||
s.Submit(scheduler.NewTask("swarm-clear", sd.Clear)) //nolint: errcheck // TODO
|
||||
|
||||
s.Stop()
|
||||
<-s.Done()
|
||||
|
||||
if sd.Error() != nil || nd.Error() != nil {
|
||||
log.Error().Str("name", project.Name).Msg("unable to deploy project, see logs for details")
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().Str("name", project.Name).Msg("project deployed successfully")
|
||||
}
|
||||
|
||||
@ -14,8 +14,10 @@ type HMNetInfo struct {
|
||||
} `json:"ssh,omitempty"`
|
||||
}
|
||||
|
||||
type HMVM map[string]*HMNetInfo
|
||||
type HMLXC map[string]*HMNetInfo
|
||||
type (
|
||||
HMVM map[string]*HMNetInfo
|
||||
HMLXC map[string]*HMNetInfo
|
||||
)
|
||||
|
||||
type HMMap struct {
|
||||
*HMNetInfo
|
||||
|
||||
@ -15,7 +15,7 @@ const (
|
||||
MainDir string = ".homeserver"
|
||||
|
||||
ComposeFile string = "docker-compose.deploy.yml"
|
||||
EnvFile = ".env"
|
||||
EnvFile string = ".env"
|
||||
NginxFile = "nginx.conf"
|
||||
|
||||
ConfFile = "hmdeploy.json"
|
||||
@ -36,7 +36,12 @@ func getFileInfo(baseDir, filePath string) (fs.FileInfo, error) {
|
||||
|
||||
fInf, err = os.Stat(fileAbsPath)
|
||||
if err != nil {
|
||||
return fInf, fmt.Errorf("%w, unable to stat file=%s, err=%v", ErrProjectConfFile, fileAbsPath, err)
|
||||
return fInf, fmt.Errorf(
|
||||
"%w, unable to stat file=%s, err=%v",
|
||||
ErrProjectConfFile,
|
||||
fileAbsPath,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
return fInf, nil
|
||||
@ -92,15 +97,31 @@ func ProjectFromDir(dir string) (Project, error) {
|
||||
|
||||
content, err := os.ReadFile(filepath.Join(dir, ConfFile))
|
||||
if err != nil {
|
||||
return p, fmt.Errorf("%w, unable to read conf file=%s, err=%v", ErrProjectConfFile, ConfFile, err)
|
||||
return p, fmt.Errorf(
|
||||
"%w, unable to read conf file=%s, err=%v",
|
||||
ErrProjectConfFile,
|
||||
ConfFile,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(content, &p); err != nil {
|
||||
return p, fmt.Errorf("%w, unable to parse conf file=%s, err=%v", ErrProjectConfFile, ConfFile, err)
|
||||
return p, fmt.Errorf(
|
||||
"%w, unable to parse conf file=%s, err=%v",
|
||||
ErrProjectConfFile,
|
||||
ConfFile,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
if err := p.validate(); err != nil {
|
||||
return p, fmt.Errorf("%w, unable to validate project, name=%s, dir=%s, err=%v", ErrProjectConfFile, p.Name, p.Dir, err)
|
||||
return p, fmt.Errorf(
|
||||
"%w, unable to validate project, name=%s, dir=%s, err=%v",
|
||||
ErrProjectConfFile,
|
||||
p.Name,
|
||||
p.Dir,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
|
||||
@ -84,13 +84,13 @@ type Scheduler struct {
|
||||
wg sync.WaitGroup
|
||||
|
||||
capacity atomic.Uint32
|
||||
workers int
|
||||
workers uint8
|
||||
|
||||
chTasks chan *Task
|
||||
tasks taskStore
|
||||
}
|
||||
|
||||
func NewScheduler(ctx context.Context, capacity, workers int) *Scheduler {
|
||||
func NewScheduler(ctx context.Context, capacity uint32, workers uint8) *Scheduler {
|
||||
ctxChild, fnCancel := context.WithCancel(ctx)
|
||||
s := Scheduler{
|
||||
ctx: ctxChild,
|
||||
@ -101,14 +101,14 @@ func NewScheduler(ctx context.Context, capacity, workers int) *Scheduler {
|
||||
tasks: newTaskStore(),
|
||||
wg: sync.WaitGroup{},
|
||||
}
|
||||
s.capacity.Add(uint32(capacity))
|
||||
s.capacity.Add(capacity)
|
||||
s.run()
|
||||
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s *Scheduler) run() {
|
||||
for i := 0; i < s.workers; i++ {
|
||||
for i := 0; i < int(s.workers); i++ {
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
@ -126,7 +126,7 @@ func (s *Scheduler) run() {
|
||||
s.tasks.setStatus(t, Success)
|
||||
|
||||
for _, nt := range t.Next {
|
||||
s.Submit(nt)
|
||||
s.Submit(nt) //nolint: errcheck // TODO
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Warn().Msg("context done, stopping worker...")
|
||||
@ -162,7 +162,7 @@ func (s *Scheduler) Submit(task *Task) error {
|
||||
func (s *Scheduler) Done() <-chan struct{} {
|
||||
chDone := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
for { //nolint: staticcheck // no
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
log.Info().Msg("waiting for scheduler task completion...")
|
||||
|
||||
@ -16,7 +16,7 @@ func addToArchive(tw *tar.Writer, filename string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
defer file.Close() //nolint: errcheck // defered
|
||||
|
||||
info, err := file.Stat()
|
||||
if err != nil {
|
||||
@ -39,23 +39,31 @@ func addToArchive(tw *tar.Writer, filename string) error {
|
||||
|
||||
func CreateArchive(destDir, name string, files ...string) (string, error) {
|
||||
now := time.Now().UTC()
|
||||
archivePath := filepath.Join(destDir, fmt.Sprintf("%s-%s.tar.gz", name, strings.Replace(now.Format(time.RFC3339), ":", "-", -1)))
|
||||
archivePath := filepath.Join(
|
||||
destDir,
|
||||
fmt.Sprintf("%s-%s.tar.gz", name, strings.ReplaceAll(now.Format(time.RFC3339), ":", "-")),
|
||||
)
|
||||
|
||||
file, err := os.Create(archivePath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to create archive=%s, err=%v", archivePath, err)
|
||||
}
|
||||
defer file.Close()
|
||||
defer file.Close() //nolint: errcheck // defered
|
||||
|
||||
gw := gzip.NewWriter(file)
|
||||
defer gw.Close()
|
||||
defer gw.Close() //nolint: errcheck // defered
|
||||
|
||||
tw := tar.NewWriter(gw)
|
||||
defer tw.Close()
|
||||
defer tw.Close() //nolint: errcheck // defered
|
||||
|
||||
for _, f := range files {
|
||||
if err := addToArchive(tw, f); err != nil {
|
||||
return "", fmt.Errorf("unable to add file=%s to archive=%s, err=%v", f, archivePath, err)
|
||||
return "", fmt.Errorf(
|
||||
"unable to add file=%s to archive=%s, err=%v",
|
||||
f,
|
||||
archivePath,
|
||||
err,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user