Merge branch 'main' into fix-container-downtime

This commit is contained in:
nils måsén 2021-04-25 10:55:17 +02:00
commit 24276cfbc6
85 changed files with 3816 additions and 1098 deletions

View file

@ -1,10 +1,11 @@
package actions_test
import (
"github.com/containrrr/watchtower/internal/actions"
"testing"
"time"
"github.com/containrrr/watchtower/internal/actions"
"github.com/containrrr/watchtower/pkg/container"
"github.com/containrrr/watchtower/pkg/container/mocks"

View file

@ -1,32 +1,48 @@
package actions
import (
"errors"
"fmt"
"github.com/containrrr/watchtower/pkg/types"
"sort"
"strings"
"time"
"github.com/containrrr/watchtower/pkg/filters"
"github.com/containrrr/watchtower/pkg/sorter"
"github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/containrrr/watchtower/pkg/container"
)
// CheckForSanity makes sure everything is sane before starting
func CheckForSanity(client container.Client, filter types.Filter, rollingRestarts bool) error {
log.Debug("Making sure everything is sane before starting")
if rollingRestarts {
containers, err := client.ListContainers(filter)
if err != nil {
return err
}
for _, c := range containers {
if len(c.Links()) > 0 {
return fmt.Errorf(
"%q is depending on at least one other container. This is not compatible with rolling restarts",
c.Name(),
)
}
}
}
return nil
}
// CheckForMultipleWatchtowerInstances will ensure that there are not multiple instances of the
// watchtower running simultaneously. If multiple watchtower containers are detected, this function
// will stop and remove all but the most recently started container. This behaviour can be bypassed
// if a scope UID is defined.
func CheckForMultipleWatchtowerInstances(client container.Client, cleanup bool, scope string) error {
awaitDockerClient()
containers, err := client.ListContainers(filters.FilterByScope(scope, filters.WatchtowerContainersFilter))
if err != nil {
log.Fatal(err)
return err
}
@ -40,7 +56,6 @@ func CheckForMultipleWatchtowerInstances(client container.Client, cleanup bool,
}
func cleanupExcessWatchtowers(containers []container.Container, client container.Client, cleanup bool) error {
var cleanupErrors int
var stopErrors int
sort.Sort(sorter.ByCreated(containers))
@ -49,40 +64,21 @@ func cleanupExcessWatchtowers(containers []container.Container, client container
for _, c := range allContainersExceptLast {
if err := client.StopContainer(c, 10*time.Minute); err != nil {
// logging the original here as we're just returning a count
logrus.Error(err)
log.WithError(err).Error("Could not stop a previous watchtower instance.")
stopErrors++
continue
}
if cleanup {
if err := client.RemoveImageByID(c.ImageID()); err != nil {
// logging the original here as we're just returning a count
logrus.Error(err)
cleanupErrors++
log.WithError(err).Warning("Could not cleanup watchtower images, possibly because of other watchtowers instances in other scopes.")
}
}
}
return createErrorIfAnyHaveOccurred(stopErrors, cleanupErrors)
}
func createErrorIfAnyHaveOccurred(c int, i int) error {
if c == 0 && i == 0 {
return nil
if stopErrors > 0 {
return fmt.Errorf("%d errors while stopping watchtower containers", stopErrors)
}
var output strings.Builder
if c > 0 {
output.WriteString(fmt.Sprintf("%d errors while stopping containers", c))
}
if i > 0 {
output.WriteString(fmt.Sprintf("%d errors while cleaning up images", c))
}
return errors.New(output.String())
}
func awaitDockerClient() {
log.Debug("Sleeping for a second to ensure the docker api client has been properly initialized.")
time.Sleep(1 * time.Second)
return nil
}

View file

@ -86,3 +86,8 @@ func (client MockClient) ExecuteCommand(containerID string, command string, time
func (client MockClient) IsContainerStale(c container.Container) (bool, error) {
return true, nil
}
// WarnOnHeadPullFailed is always true for the mock client
func (client MockClient) WarnOnHeadPullFailed(c container.Container) bool {
return true
}

View file

@ -4,6 +4,7 @@ import (
"github.com/containrrr/watchtower/pkg/container"
"github.com/docker/docker/api/types"
container2 "github.com/docker/docker/api/types/container"
"github.com/docker/go-connections/nat"
"time"
)
@ -15,9 +16,14 @@ func CreateMockContainer(id string, name string, image string, created time.Time
Image: image,
Name: name,
Created: created.String(),
HostConfig: &container2.HostConfig{
PortBindings: map[nat.Port][]nat.PortBinding{},
},
},
Config: &container2.Config{
Labels: make(map[string]string),
Image: image,
Labels: make(map[string]string),
ExposedPorts: map[nat.Port]struct{}{},
},
}
dependencyString := ""
@ -33,10 +39,40 @@ func CreateMockContainer(id string, name string, image string, created time.Time
&content,
&types.ImageInspect{
ID: image,
RepoDigests: []string{
image,
},
},
)
}
// CreateMockContainerWithImageInfo should only be used for testing
func CreateMockContainerWithImageInfo(id string, name string, image string, created time.Time, imageInfo types.ImageInspect) container.Container {
content := types.ContainerJSON{
ContainerJSONBase: &types.ContainerJSONBase{
ID: id,
Image: image,
Name: name,
Created: created.String(),
},
Config: &container2.Config{
Image: image,
Labels: make(map[string]string),
},
}
return *container.NewContainer(
&content,
&imageInfo,
)
}
// CreateMockContainerWithDigest should only be used for testing
func CreateMockContainerWithDigest(id string, name string, image string, created time.Time, digest string) container.Container {
c := CreateMockContainer(id, name, image, created)
c.ImageInfo().RepoDigests = []string{digest}
return c
}
// CreateMockContainerWithConfig creates a container substitute valid for testing
func CreateMockContainerWithConfig(id string, name string, image string, created time.Time, config *container2.Config) container.Container {
content := types.ContainerJSON{

View file

@ -1,174 +1,174 @@
package actions
import (
"errors"
"github.com/containrrr/watchtower/internal/util"
"github.com/containrrr/watchtower/pkg/container"
"github.com/containrrr/watchtower/pkg/lifecycle"
metrics2 "github.com/containrrr/watchtower/pkg/metrics"
"github.com/containrrr/watchtower/pkg/sorter"
"github.com/containrrr/watchtower/pkg/types"
log "github.com/sirupsen/logrus"
)
// CreateUndirectedLinks creates a map of undirected links
// Key: Name of a container
// Value: List of containers that are linked to the container
// i.e if Container A depends on B, undirectedNodes['A'] will initially contain B.
// This function adds 'A' into undirectedNodes['B'] to make the link undirected.
func CreateUndirectedLinks(containers []container.Container) map[string][]string {
// Update looks at the running Docker containers to see if any of the images
// used to start those containers have been updated. If a change is detected in
// any of the images, the associated containers are stopped and restarted with
// the new image.
func Update(client container.Client, params types.UpdateParams) (*metrics2.Metric, error) {
log.Debug("Checking containers for updated images")
metric := &metrics2.Metric{}
staleCount := 0
undirectedNodes := make(map[string][]string)
for i:= 0; i < len(containers); i++ {
undirectedNodes[containers[i].Name()] = containers[i].Links()
if params.LifecycleHooks {
lifecycle.ExecutePreChecks(client, params)
}
for i:= 0; i< len(containers); i++ {
for j:=0; j < len(containers[i].Links()); j++ {
undirectedNodes[containers[i].Links()[j]] = append(undirectedNodes[containers[i].Links()[j]], containers[i].Name())
}
}
return undirectedNodes;
}
// PrepareContainerList prepares a dependency sorted list of list of containers
// Each list inside the outer list contains containers that are related by links
// This method checks for staleness, checks dependencies, sorts the containers and returns the final
// [][]container.Container
func PrepareContainerList(client container.Client, params types.UpdateParams) ([]container.Container, error) {
containers, err := client.ListContainers(params.Filter)
if err != nil {
return nil, err
}
staleCheckFailed := 0
for i, targetContainer := range containers {
stale, err := client.IsContainerStale(targetContainer)
if stale && !params.NoRestart && !params.MonitorOnly && !targetContainer.IsMonitorOnly() && !targetContainer.HasImageInfo() {
err = errors.New("no available image info")
shouldUpdate := stale && !params.NoRestart && !params.MonitorOnly && !targetContainer.IsMonitorOnly()
if err == nil && shouldUpdate {
// Check to make sure we have all the necessary information for recreating the container
err = targetContainer.VerifyConfiguration()
// If the image information is incomplete and trace logging is enabled, log it for further diagnosis
if err != nil && log.IsLevelEnabled(log.TraceLevel) {
imageInfo := targetContainer.ImageInfo()
log.Tracef("Image info: %#v", imageInfo)
log.Tracef("Container info: %#v", targetContainer.ContainerInfo())
if imageInfo != nil {
log.Tracef("Image config: %#v", imageInfo.Config)
}
}
}
if err != nil {
log.Infof("Unable to update container %q: %v. Proceeding to next.", containers[i].Name(), err)
log.Infof("Unable to update container %q: %v. Proceeding to next.", targetContainer.Name(), err)
stale = false
staleCheckFailed++
metric.Failed++
}
containers[i].Stale = stale
if stale {
staleCount++
}
}
containers, err = sorter.SortByDependencies(containers)
metric.Scanned = len(containers)
if err != nil {
return nil, err
}
checkDependencies(containers)
return containers, nil
}
// Update looks at the running Docker containers to see if any of the images
// used to start those containers have been updated. If a change is detected in
// any of the images, the associated containers are stopped and restarted with
// the new image.
func Update(client container.Client, params types.UpdateParams) error {
log.Debug("Checking containers for updated images")
if params.LifecycleHooks {
lifecycle.ExecutePreChecks(client, params)
}
containers, err := PrepareContainerList(client, params)
if err != nil {
return err
}
containersToUpdate := []container.Container{}
var containersToUpdate []container.Container
if !params.MonitorOnly {
for i := 0; i < len(containers); i++ {
if !containers[i].IsMonitorOnly() {
containersToUpdate = append(containersToUpdate, containers[i])
for _, c := range containers {
if !c.IsMonitorOnly() {
containersToUpdate = append(containersToUpdate, c)
}
}
}
//shared map for independent and linked update
imageIDs := make(map[string]bool)
if params.RollingRestart {
performRollingRestart(containersToUpdate, client, params)
metric.Failed += performRollingRestart(containersToUpdate, client, params)
} else {
var dependencySortedGraphs [][]container.Container
undirectedNodes := CreateUndirectedLinks(containersToUpdate)
dependencySortedGraphs, err := sorter.SortByDependencies(containersToUpdate,undirectedNodes)
if err != nil {
return err
}
//Use ordered start and stop for each independent set of containers
for _, dependencyGraph:= range dependencySortedGraphs {
stopContainersInReversedOrder(dependencyGraph, client, params)
restartContainersInSortedOrder(dependencyGraph, client, params, imageIDs)
}
//clean up after containers updated
if params.Cleanup {
cleanupImages(client,imageIDs)
}
metric.Failed += stopContainersInReversedOrder(containersToUpdate, client, params)
metric.Failed += restartContainersInSortedOrder(containersToUpdate, client, params)
}
metric.Updated = staleCount - (metric.Failed - staleCheckFailed)
if params.LifecycleHooks {
lifecycle.ExecutePostChecks(client, params)
}
return nil
return metric, nil
}
func performRollingRestart(containers []container.Container, client container.Client, params types.UpdateParams) {
func performRollingRestart(containers []container.Container, client container.Client, params types.UpdateParams) int {
cleanupImageIDs := make(map[string]bool)
failed := 0
for i := len(containers) - 1; i >= 0; i-- {
if containers[i].Stale {
stopStaleContainer(containers[i], client, params)
restartStaleContainer(containers[i], client, params)
if containers[i].ToRestart() {
if err := stopStaleContainer(containers[i], client, params); err != nil {
failed++
}
if err := restartStaleContainer(containers[i], client, params); err != nil {
failed++
}
cleanupImageIDs[containers[i].ImageID()] = true
}
}
if params.Cleanup {
cleanupImages(client, cleanupImageIDs)
}
return failed
}
func stopContainersInReversedOrder(containers []container.Container, client container.Client, params types.UpdateParams) {
func stopContainersInReversedOrder(containers []container.Container, client container.Client, params types.UpdateParams) int {
failed := 0
for i := len(containers) - 1; i >= 0; i-- {
stopStaleContainer(containers[i], client, params)
if err := stopStaleContainer(containers[i], client, params); err != nil {
failed++
}
}
return failed
}
func stopStaleContainer(container container.Container, client container.Client, params types.UpdateParams) {
func stopStaleContainer(container container.Container, client container.Client, params types.UpdateParams) error {
if container.IsWatchtower() {
log.Debugf("This is the watchtower container %s", container.Name())
return
return nil
}
if !container.Stale {
return
if !container.ToRestart() {
return nil
}
if params.LifecycleHooks {
if err := lifecycle.ExecutePreUpdateCommand(client, container); err != nil {
log.Error(err)
log.Info("Skipping container as the pre-update command failed")
return
return err
}
}
if err := client.StopContainer(container, params.Timeout); err != nil {
log.Error(err)
return err
}
return nil
}
func restartContainersInSortedOrder(containers []container.Container, client container.Client, params types.UpdateParams, imageIDs map[string]bool) {
for _, container := range containers {
if !container.Stale {
func restartContainersInSortedOrder(containers []container.Container, client container.Client, params types.UpdateParams) int {
imageIDs := make(map[string]bool)
failed := 0
for _, c := range containers {
if !c.ToRestart() {
continue
}
restartStaleContainer(container, client, params)
imageIDs[container.ImageID()] = true
if err := restartStaleContainer(c, client, params); err != nil {
failed++
}
imageIDs[c.ImageID()] = true
}
if params.Cleanup {
cleanupImages(client, imageIDs)
}
return failed
}
func cleanupImages(client container.Client, imageIDs map[string]bool) {
@ -179,7 +179,7 @@ func cleanupImages(client container.Client, imageIDs map[string]bool) {
}
}
func restartStaleContainer(container container.Container, client container.Client, params types.UpdateParams) {
func restartStaleContainer(container container.Container, client container.Client, params types.UpdateParams) error {
// Since we can't shutdown a watchtower container immediately, we need to
// start the new one while the old one is still running. This prevents us
// from re-using the same container name so we first rename the current
@ -187,34 +187,39 @@ func restartStaleContainer(container container.Container, client container.Clien
if container.IsWatchtower() {
if err := client.RenameContainer(container, util.RandName()); err != nil {
log.Error(err)
return
return nil
}
}
if !params.NoRestart {
if newContainerID, err := client.StartContainer(container); err != nil {
log.Error(err)
} else if container.Stale && params.LifecycleHooks {
return err
} else if container.ToRestart() && params.LifecycleHooks {
lifecycle.ExecutePostUpdateCommand(client, newContainerID)
}
}
return nil
}
func checkDependencies(containers []container.Container) {
for i, parent := range containers {
if parent.ToRestart() {
for _, c := range containers {
if c.ToRestart() {
continue
}
LinkLoop:
for _, linkName := range parent.Links() {
for _, child := range containers {
if child.Name() == linkName && child.ToRestart() {
containers[i].Linked = true
for _, linkName := range c.Links() {
for _, candidate := range containers {
if candidate.Name() != linkName {
continue
}
if candidate.ToRestart() {
c.LinkedToRestarting = true
break LinkLoop
}
}
}
}
}
}

View file

@ -62,7 +62,7 @@ var _ = Describe("the update action", func() {
When("there are multiple containers using the same image", func() {
It("should only try to remove the image once", func() {
err := actions.Update(client, types.UpdateParams{Cleanup: true})
_, err := actions.Update(client, types.UpdateParams{Cleanup: true})
Expect(err).NotTo(HaveOccurred())
Expect(client.TestData.TriedToRemoveImageCount).To(Equal(1))
})
@ -80,7 +80,7 @@ var _ = Describe("the update action", func() {
nil,
),
)
err := actions.Update(client, types.UpdateParams{Cleanup: true})
_, err := actions.Update(client, types.UpdateParams{Cleanup: true})
Expect(err).NotTo(HaveOccurred())
Expect(client.TestData.TriedToRemoveImageCount).To(Equal(2))
})
@ -230,6 +230,14 @@ var _ = Describe("the update action", func() {
Expect(client.TestData.RestartOrder).To(Equal(ExpectedRestartOutput))
})
})
When("performing a rolling restart update", func() {
It("should try to remove the image once", func() {
_, err := actions.Update(client, types.UpdateParams{Cleanup: true, RollingRestart: true})
Expect(err).NotTo(HaveOccurred())
Expect(client.TestData.TriedToRemoveImageCount).To(Equal(1))
})
})
})
When("watchtower has been instructed to monitor only", func() {
@ -264,7 +272,7 @@ var _ = Describe("the update action", func() {
})
It("should not update those containers", func() {
err := actions.Update(client, types.UpdateParams{Cleanup: true})
_, err := actions.Update(client, types.UpdateParams{Cleanup: true})
Expect(err).NotTo(HaveOccurred())
Expect(client.TestData.TriedToRemoveImageCount).To(Equal(1))
})
@ -296,7 +304,7 @@ var _ = Describe("the update action", func() {
})
It("should not update any containers", func() {
err := actions.Update(client, types.UpdateParams{MonitorOnly: true})
_, err := actions.Update(client, types.UpdateParams{MonitorOnly: true})
Expect(err).NotTo(HaveOccurred())
Expect(client.TestData.TriedToRemoveImageCount).To(Equal(0))
})

View file

@ -105,6 +105,12 @@ func RegisterSystemFlags(rootCmd *cobra.Command) {
viper.GetBool("WATCHTOWER_RUN_ONCE"),
"Run once now and exit")
flags.BoolP(
"include-restarting",
"",
viper.GetBool("WATCHTOWER_INCLUDE_RESTARTING"),
"Will also include restarting containers")
flags.BoolP(
"include-stopped",
"S",
@ -130,10 +136,15 @@ func RegisterSystemFlags(rootCmd *cobra.Command) {
"Restart containers one at a time")
flags.BoolP(
"http-api",
"http-api-update",
"",
viper.GetBool("WATCHTOWER_HTTP_API"),
viper.GetBool("WATCHTOWER_HTTP_API_UPDATE"),
"Runs Watchtower in HTTP API mode, so that image updates must to be triggered by a request")
flags.BoolP(
"http-api-metrics",
"",
viper.GetBool("WATCHTOWER_HTTP_API_METRICS"),
"Runs Watchtower with the Prometheus metrics API enabled")
flags.StringP(
"http-api-token",
@ -296,14 +307,21 @@ Should only be used for testing.`)
"",
viper.GetStringSlice("WATCHTOWER_NOTIFICATION_URL"),
"The shoutrrr URL to send notifications to")
flags.String(
"warn-on-head-failure",
viper.GetString("WATCHTOWER_WARN_ON_HEAD_FAILURE"),
"When to warn about HEAD pull requests failing. Possible values: always, auto or never")
}
// SetDefaults provides default values for environment variables
func SetDefaults() {
day := (time.Hour * 24).Seconds()
viper.AutomaticEnv()
viper.SetDefault("DOCKER_HOST", "unix:///var/run/docker.sock")
viper.SetDefault("DOCKER_API_VERSION", DockerAPIMinVersion)
viper.SetDefault("WATCHTOWER_POLL_INTERVAL", 300)
viper.SetDefault("WATCHTOWER_POLL_INTERVAL", day)
viper.SetDefault("WATCHTOWER_TIMEOUT", time.Second*10)
viper.SetDefault("WATCHTOWER_NOTIFICATIONS", []string{})
viper.SetDefault("WATCHTOWER_NOTIFICATIONS_LEVEL", "info")