fix depends on behavior and simplify some of its logic (#908)

* fix depends on behavior and simplify some of its logic

* fix comments
This commit is contained in:
Simon Aronsson 2021-04-18 18:37:35 +02:00 committed by GitHub
parent 4142f7966a
commit 3de202a965
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 83 additions and 29 deletions

View file

@ -2,28 +2,47 @@ package actions
import (
"fmt"
"github.com/containrrr/watchtower/pkg/types"
"sort"
"time"
"github.com/containrrr/watchtower/pkg/filters"
"github.com/containrrr/watchtower/pkg/sorter"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/containrrr/watchtower/pkg/container"
)
// CheckForSanity makes sure everything is sane before starting
func CheckForSanity(client container.Client, filter types.Filter, rollingRestarts bool) error {
log.Debug("Making sure everything is sane before starting")
if rollingRestarts {
containers, err := client.ListContainers(filter)
if err != nil {
return err
}
for _, c := range containers {
if len(c.Links()) > 0 {
return fmt.Errorf(
"%q is depending on at least one other container. This is not compatible with rolling restarts",
c.Name(),
)
}
}
}
return nil
}
// CheckForMultipleWatchtowerInstances will ensure that there are not multiple instances of the
// watchtower running simultaneously. If multiple watchtower containers are detected, this function
// will stop and remove all but the most recently started container. This behaviour can be bypassed
// if a scope UID is defined.
func CheckForMultipleWatchtowerInstances(client container.Client, cleanup bool, scope string) error {
awaitDockerClient()
containers, err := client.ListContainers(filters.FilterByScope(scope, filters.WatchtowerContainersFilter))
if err != nil {
log.Fatal(err)
return err
}
@ -45,14 +64,14 @@ func cleanupExcessWatchtowers(containers []container.Container, client container
for _, c := range allContainersExceptLast {
if err := client.StopContainer(c, 10*time.Minute); err != nil {
// logging the original here as we're just returning a count
logrus.WithError(err).Error("Could not stop a previous watchtower instance.")
log.WithError(err).Error("Could not stop a previous watchtower instance.")
stopErrors++
continue
}
if cleanup {
if err := client.RemoveImageByID(c.ImageID()); err != nil {
logrus.WithError(err).Warning("Could not cleanup watchtower images, possibly because of other watchtowers instances in other scopes.")
log.WithError(err).Warning("Could not cleanup watchtower images, possibly because of other watchtowers instances in other scopes.")
}
}
}
@ -63,8 +82,3 @@ func cleanupExcessWatchtowers(containers []container.Container, client container
return nil
}
func awaitDockerClient() {
log.Debug("Sleeping for a second to ensure the docker api client has been properly initialized.")
time.Sleep(1 * time.Second)
}

View file

@ -50,6 +50,7 @@ func Update(client container.Client, params types.UpdateParams) (*metrics2.Metri
}
containers, err = sorter.SortByDependencies(containers)
metric.Scanned = len(containers)
if err != nil {
return nil, err
@ -57,11 +58,11 @@ func Update(client container.Client, params types.UpdateParams) (*metrics2.Metri
checkDependencies(containers)
containersToUpdate := []container.Container{}
var containersToUpdate []container.Container
if !params.MonitorOnly {
for i := len(containers) - 1; i >= 0; i-- {
if !containers[i].IsMonitorOnly() {
containersToUpdate = append(containersToUpdate, containers[i])
for _, c := range containers {
if !c.IsMonitorOnly() {
containersToUpdate = append(containersToUpdate, c)
}
}
}
@ -86,7 +87,7 @@ func performRollingRestart(containers []container.Container, client container.Cl
failed := 0
for i := len(containers) - 1; i >= 0; i-- {
if containers[i].Stale {
if containers[i].ToRestart() {
if err := stopStaleContainer(containers[i], client, params); err != nil {
failed++
}
@ -119,7 +120,7 @@ func stopStaleContainer(container container.Container, client container.Client,
return nil
}
if !container.Stale {
if !container.ToRestart() {
return nil
}
if params.LifecycleHooks {
@ -143,7 +144,7 @@ func restartContainersInSortedOrder(containers []container.Container, client con
failed := 0
for _, c := range containers {
if !c.Stale {
if !c.ToRestart() {
continue
}
if err := restartStaleContainer(c, client, params); err != nil {
@ -183,7 +184,7 @@ func restartStaleContainer(container container.Container, client container.Clien
if newContainerID, err := client.StartContainer(container); err != nil {
log.Error(err)
return err
} else if container.Stale && params.LifecycleHooks {
} else if container.ToRestart() && params.LifecycleHooks {
lifecycle.ExecutePostUpdateCommand(client, newContainerID)
}
}
@ -192,16 +193,19 @@ func restartStaleContainer(container container.Container, client container.Clien
func checkDependencies(containers []container.Container) {
for i, parent := range containers {
if parent.ToRestart() {
for _, c := range containers {
if c.ToRestart() {
continue
}
LinkLoop:
for _, linkName := range parent.Links() {
for _, child := range containers {
if child.Name() == linkName && child.ToRestart() {
containers[i].Linked = true
for _, linkName := range c.Links() {
for _, candidate := range containers {
if candidate.Name() != linkName {
continue
}
if candidate.ToRestart() {
c.LinkedToRestarting = true
break LinkLoop
}
}