diff --git a/.all-contributorsrc b/.all-contributorsrc
index 65b6a43..266a339 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -627,8 +627,63 @@
"avatar_url": "https://avatars1.githubusercontent.com/u/2992895?v=4",
"profile": "https://github.com/bugficks",
"contributions": [
+ "code",
"doc"
]
+ },
+ {
+ "login": "MichaelSp",
+ "name": "Michael",
+ "avatar_url": "https://avatars0.githubusercontent.com/u/448282?v=4",
+ "profile": "https://github.com/MichaelSp",
+ "contributions": [
+ "code"
+ ]
+ },
+ {
+ "login": "x-jokay",
+ "name": "D. Domig",
+ "avatar_url": "https://avatars0.githubusercontent.com/u/18613935?v=4",
+ "profile": "https://github.com/x-jokay",
+ "contributions": [
+ "doc"
+ ]
+ },
+ {
+ "login": "osheroff",
+ "name": "Ben Osheroff",
+ "avatar_url": "https://avatars1.githubusercontent.com/u/260084?v=4",
+ "profile": "https://maxwells-daemon.io",
+ "contributions": [
+ "code"
+ ]
+ },
+ {
+ "login": "dhet",
+ "name": "David H.",
+ "avatar_url": "https://avatars3.githubusercontent.com/u/2668621?v=4",
+ "profile": "https://github.com/dhet",
+ "contributions": [
+ "code"
+ ]
+ },
+ {
+ "login": "chander",
+ "name": "Chander Ganesan",
+ "avatar_url": "https://avatars1.githubusercontent.com/u/671887?v=4",
+ "profile": "http://www.gridgeo.com",
+ "contributions": [
+ "doc"
+ ]
+ },
+ {
+ "login": "yrien30",
+ "name": "yrien30",
+ "avatar_url": "https://avatars1.githubusercontent.com/u/26816162?v=4",
+ "profile": "https://github.com/yrien30",
+ "contributions": [
+ "code"
+ ]
}
],
"contributorsPerLine": 7,
diff --git a/README.md b/README.md
index bf304be..e7d78d8 100644
--- a/README.md
+++ b/README.md
@@ -32,9 +32,6 @@
-
-
-
@@ -45,7 +42,7 @@
> ### ⚠️ Help needed
>
-> We're finding it a bit hard to keep up with all issues and pull requests. Interested in helping out with triage, troubleshooting and issue handling? Let us know on gitter!
+> We're finding it a bit hard to keep up with all issues and pull requests. Interested in helping out with triage, troubleshooting and issue handling? Let us know in the ["Discussions"](https://github.com/containrrr/watchtower/discussions) tab!
## Quick Start
@@ -155,7 +152,15 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
 Pierre Grimaud 📖 |
 Matt Doran 📖 |
 MihailITPlace 💻 |
-  bugficks 📖 |
+  bugficks 💻 📖 |
+  Michael 💻 |
+  D. Domig 📖 |
+  Ben Osheroff 💻 |
+
+
+  David H. 💻 |
+  Chander Ganesan 📖 |
+  yrien30 💻 |
diff --git a/cmd/root.go b/cmd/root.go
index bef694c..1e61308 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -30,6 +30,8 @@ var (
notifier *notifications.Notifier
timeout time.Duration
lifecycleHooks bool
+ rollingRestart bool
+ scope string
)
var rootCmd = &cobra.Command{
@@ -61,6 +63,17 @@ func Execute() {
func PreRun(cmd *cobra.Command, args []string) {
f := cmd.PersistentFlags()
+ if enabled, _ := f.GetBool("no-color"); enabled {
+ log.SetFormatter(&log.TextFormatter{
+ DisableColors: true,
+ })
+ } else {
+ // enable logrus built-in support for https://bixense.com/clicolors/
+ log.SetFormatter(&log.TextFormatter{
+ EnvironmentOverrideColors: true,
+ })
+ }
+
if enabled, _ := f.GetBool("debug"); enabled {
log.SetLevel(log.DebugLevel)
}
@@ -90,6 +103,10 @@ func PreRun(cmd *cobra.Command, args []string) {
enableLabel, _ = f.GetBool("label-enable")
lifecycleHooks, _ = f.GetBool("enable-lifecycle-hooks")
+ rollingRestart, _ = f.GetBool("rolling-restart")
+ scope, _ = f.GetString("scope")
+
+ log.Debug(scope)
// configure environment vars for client
err := flags.EnvConfig(cmd)
@@ -99,14 +116,20 @@ func PreRun(cmd *cobra.Command, args []string) {
noPull, _ := f.GetBool("no-pull")
includeStopped, _ := f.GetBool("include-stopped")
+ includeRestarting, _ := f.GetBool("include-restarting")
reviveStopped, _ := f.GetBool("revive-stopped")
removeVolumes, _ := f.GetBool("remove-volumes")
+ if monitorOnly && noPull {
+ log.Warn("Using `WATCHTOWER_NO_PULL` and `WATCHTOWER_MONITOR_ONLY` simultaneously might lead to no action being taken at all. If this is intentional, you may safely ignore this message.")
+ }
+
client = container.NewClient(
!noPull,
includeStopped,
reviveStopped,
removeVolumes,
+ includeRestarting,
)
notifier = notifications.NewNotifier(cmd)
@@ -114,10 +137,24 @@ func PreRun(cmd *cobra.Command, args []string) {
// Run is the main execution flow of the command
func Run(c *cobra.Command, names []string) {
- filter := filters.BuildFilter(names, enableLabel)
+ filter := filters.BuildFilter(names, enableLabel, scope)
runOnce, _ := c.PersistentFlags().GetBool("run-once")
httpAPI, _ := c.PersistentFlags().GetBool("http-api")
+ if runOnce {
+ if noStartupMessage, _ := c.PersistentFlags().GetBool("no-startup-message"); !noStartupMessage {
+ log.Info("Running a one time update.")
+ }
+ runUpdatesWithNotifications(filter)
+ notifier.Close()
+ os.Exit(0)
+ return
+ }
+
+ if err := actions.CheckForMultipleWatchtowerInstances(client, cleanup, scope); err != nil {
+ log.Fatal(err)
+ }
+
if httpAPI {
apiToken, _ := c.PersistentFlags().GetString("http-api-token")
@@ -129,19 +166,6 @@ func Run(c *cobra.Command, names []string) {
api.WaitForHTTPUpdates()
}
- if runOnce {
- if noStartupMessage, _ := c.PersistentFlags().GetBool("no-startup-message"); !noStartupMessage {
- log.Info("Running a one time update.")
- }
- runUpdatesWithNotifications(filter)
- os.Exit(0)
- return
- }
-
- if err := actions.CheckForMultipleWatchtowerInstances(client, cleanup); err != nil {
- log.Fatal(err)
- }
-
if err := runUpgradesOnSchedule(c, filter); err != nil {
log.Error(err)
}
@@ -202,6 +226,7 @@ func runUpdatesWithNotifications(filter t.Filter) {
Timeout: timeout,
MonitorOnly: monitorOnly,
LifecycleHooks: lifecycleHooks,
+ RollingRestart: rollingRestart,
}
err := actions.Update(client, updateParams)
if err != nil {
diff --git a/docs/arguments.md b/docs/arguments.md
index b22aa79..b80257a 100644
--- a/docs/arguments.md
+++ b/docs/arguments.md
@@ -88,6 +88,16 @@ Environment Variable: WATCHTOWER_TRACE
Default: false
```
+## ANSI colors
+Disable ANSI color escape codes in log output.
+
+```
+ Argument: --no-color
+Environment Variable: NO_COLOR
+ Type: Boolean
+ Default: false
+```
+
## Docker host
Docker daemon socket to connect to. Can be pointed at a remote Docker host by specifying a TCP endpoint as "tcp://hostname:port".
@@ -116,7 +126,7 @@ Will also include created and exited containers.
Environment Variable: WATCHTOWER_INCLUDE_STOPPED
Type: Boolean
Default: false
-```
+```
## Revive stopped
Start any stopped containers that have had their image updated. This argument is only usable with the `--include-stopped` argument.
@@ -126,7 +136,7 @@ Start any stopped containers that have had their image updated. This argument is
Environment Variable: WATCHTOWER_REVIVE_STOPPED
Type: Boolean
Default: false
-```
+```
## Poll interval
Poll interval (in seconds). This value controls how frequently watchtower will poll for new images. Either `--schedule` or a poll interval can be defined, but not both.
@@ -136,7 +146,7 @@ Poll interval (in seconds). This value controls how frequently watchtower will p
Environment Variable: WATCHTOWER_POLL_INTERVAL
Type: Integer
Default: 300
-```
+```
## Filter by enable label
Update containers that have a `com.centurylinklabs.watchtower.enable` label set to true.
@@ -146,13 +156,13 @@ Update containers that have a `com.centurylinklabs.watchtower.enable` label set
Environment Variable: WATCHTOWER_LABEL_ENABLE
Type: Boolean
Default: false
-```
+```
## Filter by disable label
**Do not** update containers that have `com.centurylinklabs.watchtower.enable` label set to false and no `--label-enable` argument is passed. Note that only one or the other (targeting by enable label) can be used at the same time to target containers.
## Without updating containers
-Will only monitor for new images, not update the containers.
+Will only monitor for new images, send notifications and invoke the [pre-check/post-check hooks](https://containrrr.dev/watchtower/lifecycle-hooks/), but will **not** update the containers.
> ### ⚠️ Please note
>
@@ -163,7 +173,9 @@ Will only monitor for new images, not update the containers.
Environment Variable: WATCHTOWER_MONITOR_ONLY
Type: Boolean
Default: false
-```
+```
+
+Note that monitor-only can also be specified on a per-container basis with the `com.centurylinklabs.watchtower.monitor-only` label set on those containers.
## Without restarting containers
Do not restart containers after updating. This option can be useful when the start of the containers
@@ -186,10 +198,10 @@ them to a registry.
Environment Variable: WATCHTOWER_NO_PULL
Type: Boolean
Default: false
-```
+```
## Without sending a startup message
-Do not send a message after watchtower started. Otherwise there will be an info-level notification.
+Do not send a message after watchtower started. Otherwise there will be an info-level notification.
```
Argument: --no-startup-message
@@ -216,7 +228,7 @@ Runs Watchtower in HTTP API mode, only allowing image updates to be triggered by
Environment Variable: WATCHTOWER_HTTP_API
Type: Boolean
Default: false
-```
+```
## HTTP API Token
Sets an authentication token to HTTP API requests.
@@ -226,10 +238,20 @@ Sets an authentication token to HTTP API requests.
Environment Variable: WATCHTOWER_HTTP_API_TOKEN
Type: String
Default: -
+```
+
+## Filter by scope
+Update containers that have a `com.centurylinklabs.watchtower.scope` label set with the same value as the given argument. This enables [running multiple instances](https://containrrr.github.io/watchtower/running-multiple-instances).
+
+```
+ Argument: --scope
+Environment Variable: WATCHTOWER_SCOPE
+ Type: String
+ Default: -
```
## Scheduling
-[Cron expression](https://pkg.go.dev/github.com/robfig/cron@v1.2.0?tab=doc#hdr-CRON_Expression_Format) in 6 fields (rather than the traditional 5) which defines when and how often to check for new images. Either `--interval` or the schedule expression
+[Cron expression](https://pkg.go.dev/github.com/robfig/cron@v1.2.0?tab=doc#hdr-CRON_Expression_Format) in 6 fields (rather than the traditional 5) which defines when and how often to check for new images. Either `--interval` or the schedule expression
can be defined, but not both. An example: `--schedule "0 0 4 * * *"`
```
@@ -237,7 +259,18 @@ can be defined, but not both. An example: `--schedule "0 0 4 * * *"`
Environment Variable: WATCHTOWER_SCHEDULE
Type: String
Default: -
-```
+```
+
+## Rolling restart
+Restart one image at time instead of stopping and starting all at once. Useful in conjunction with lifecycle hooks
+to implement zero-downtime deploy.
+
+```
+ Argument: --rolling-restart
+Environment Variable: WATCHTOWER_ROLLING_RESTART
+ Type: Boolean
+ Default: false
+```
## Wait until timeout
Timeout before the container is forcefully stopped. When set, this option will change the default (`10s`) wait time to the given value. An example: `--stop-timeout 30s` will set the timeout to 30 seconds.
@@ -247,7 +280,7 @@ Timeout before the container is forcefully stopped. When set, this option will c
Environment Variable: WATCHTOWER_TIMEOUT
Type: Duration
Default: 10s
-```
+```
## TLS Verification
Use TLS when connecting to the Docker socket and verify the server's certificate. See below for options used to configure notifications.
diff --git a/docs/container-selection.md b/docs/container-selection.md
index eea0c03..799091f 100644
--- a/docs/container-selection.md
+++ b/docs/container-selection.md
@@ -1,5 +1,12 @@
By default, watchtower will watch all containers. However, sometimes only some containers should be updated.
+There are two options:
+
+- **Fully exclude**: You can choose to exclude containers entirely from being watched by watchtower.
+- **Monitor only**: In this mode, watchtower checks for container updates, sends notifications and invokes the [pre-check/post-check hooks](https://containrrr.dev/watchtower/lifecycle-hooks/) on the containers but does **not** perform the update.
+
+## Full Exclude
+
If you need to exclude some containers, set the _com.centurylinklabs.watchtower.enable_ label to `false`.
```docker
@@ -23,3 +30,29 @@ Or, it can be specified as part of the `docker run` command line:
```bash
docker run -d --label=com.centurylinklabs.watchtower.enable=true someimage
```
+
+If you wish to create a monitoring scope, you will need to [run multiple instances and set a scope for each of them](https://containrrr.github.io/watchtower/running-multiple-instances).
+
+Watchtower filters running containers by testing them against each configured criteria. A container is monitored if all criteria are met. For example:
+- If a container's name is on the monitoring name list (not empty `--name` argument) but it is not enabled (_centurylinklabs.watchtower.enable=false_), it won't be monitored;
+- If a container's name is not on the monitoring name list (not empty `--name` argument), even if it is enabled (_centurylinklabs.watchtower.enable=true_ and `--label-enable` flag is set), it won't be monitored;
+
+## Monitor Only
+
+Individual containers can be marked to only be monitored (without being updated).
+
+To do so, set the *com.centurylinklabs.watchtower.monitor-only* label to `true` on that container.
+
+```docker
+LABEL com.centurylinklabs.watchtower.monitor-only="true"
+```
+
+Or, it can be specified as part of the `docker run` command line:
+
+```bash
+docker run -d --label=com.centurylinklabs.watchtower.monitor-only=true someimage
+```
+
+When the label is specified on a container, watchtower treats that container exactly as if [`WATCHTOWER_MONITOR_ONLY`](https://containrrr.dev/watchtower/arguments/#without_updating_containers) was set, but the effect is limited to the individual container.
+
+
diff --git a/docs/private-registries.md b/docs/private-registries.md
index 147e307..535b3e8 100644
--- a/docs/private-registries.md
+++ b/docs/private-registries.md
@@ -59,14 +59,14 @@ docker run [...] -v /.docker/config.json:/config.json containr
When creating the watchtower container via docker-compose, use the following lines:
```yaml
-version: "3"
-[...]
-watchtower:
- image: index.docker.io/containrrr/watchtower:latest
- volumes:
+version: "3.4"
+services:
+ watchtower:
+ image: index.docker.io/containrrr/watchtower:latest
+ volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /.docker/config.json:/config.json
-[...]
+ ...
```
#### Docker Config path
@@ -74,10 +74,13 @@ By default, watchtower will look for the `config.json` file in `/`, but this can
Example usage:
```yaml
-watchtower:
- image: containrrr/watchtower
- environment:
- DOCKER_CONFIG: /config
+version: "3.4"
+
+services:
+ watchtower:
+ image: containrrr/watchtower
+ environment:
+ DOCKER_CONFIG: /config
volumes:
- /etc/watchtower/config/:/config/
- /var/run/docker.sock:/var/run/docker.sock
@@ -94,6 +97,12 @@ helper in a separate container and mount it using volumes.
### Example
Example implementation for use with [amazon-ecr-credential-helper](https://github.com/awslabs/amazon-ecr-credential-helper):
+
+Use the dockerfile below to build the [amazon-ecr-credential-helper](https://github.com/awslabs/amazon-ecr-credential-helper),
+in a volume that may be mounted onto your watchtower container.
+
+1. Create the Dockerfile (contents below):
+
```Dockerfile
FROM golang:latest
@@ -111,43 +120,68 @@ RUN go build \
WORKDIR /go/bin/
```
+2. Use the following commands to build the aws-ecr-dock-cred-helper and store it's output in a volume:
+
+```shell script
+# Create a volume to store the command (once built)
+docker volume create helper
+
+# Build the container
+docker build -t aws-ecr-dock-cred-helper .
+
+# Build the command and store it in the new volume in the /go/bin directory.
+docker run -d --rm --name aws-cred-helper --volume helper:/go/bin aws-ecr-dock-cred-helper
+
+```
+
+3. Create a configuration file for docker, and store it in $HOME/.docker/config.json (replace the
+ placeholders with your AWS Account ID):
+
+```json
+{
+ "credsStore" : "ecr-login",
+ "HttpHeaders" : {
+ "User-Agent" : "Docker-Client/19.03.1 (XXXXXX)"
+ },
+ "auths" : {
+ ".dkr.ecr.us-west-1.amazonaws.com" : {}
+ },
+ "credHelpers": {
+ ".dkr.ecr.us-west-1.amazonaws.com" : "ecr-login"
+ }
+}
+```
+
+4. Create a docker-compose file (as an example) to help launch the container:
+
and the docker-compose definition:
```yaml
-version: "3"
-
+version: "3.4"
services:
+ # Check for new images and restart things if a new image exists
+ # for any of our containers.
watchtower:
- image: index.docker.io/containrrr/watchtower:latest
+ image: containrrr/watchtower:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- - /.docker/config.json:/config.json
+ - .docker/config.json:/config.json
- helper:/go/bin
environment:
- HOME=/
- PATH=$PATH:/go/bin
- - AWS_REGION=
- - AWS_ACCESS_KEY_ID=
- - AWS_SECRET_ACCESS_KEY=
+ - AWS_REGION=us-west-1
volumes:
- helper: {}
+ helper:
+ external: true
```
-and for `/.docker/config.json`:
-```json
- {
- "HttpHeaders" : {
- "User-Agent" : "Docker-Client/19.03.1 (XXXXXX)"
- },
- "credsStore" : "osxkeychain",
- "auths" : {
- "xyzxyzxyz.dkr.ecr.eu-north-1.amazonaws.com" : {},
- "https://index.docker.io/v1/": {}
- },
- "credHelpers": {
- "xyzxyzxyz.dkr.ecr.eu-north-1.amazonaws.com" : "ecr-login",
- "index.docker.io": "osxkeychain"
- }
- }
-```
+A few additional notes:
-*Note:* `osxkeychain` can be changed to your preferred credentials helper.
+1. With docker-compose the volume (helper, in this case) MUST be set to `external: true`, otherwise docker-compose
+ will preface it with the directory name.
+2. Note that "credsStore" : "ecr-login" is needed - and in theory if you have that you can remove the
+ credHelpers section
+3. I have this running on an EC2 instance that has credentials assigned to it - so no keys are needed; however,
+ you may need to include the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables as well.
+4. An alternative to adding the various variables is to create a ~/.aws/config and ~/.aws/credentials files and
+ place the settings there, then mount the ~/.aws directory to / in the container.
diff --git a/docs/running-multiple-instances.md b/docs/running-multiple-instances.md
new file mode 100644
index 0000000..82cd955
--- /dev/null
+++ b/docs/running-multiple-instances.md
@@ -0,0 +1,27 @@
+By default, Watchtower will clean up other instances and won't allow multiple instances running on the same Docker host or swarm. It is possible to override this behavior by defining a [scope](https://containrrr.github.io/watchtower/arguments/#filter_by_scope) to each running instance.
+
+Notice that:
+- Multiple instances can't run with the same scope;
+- An instance without a scope will clean up other running instances, even if they have a defined scope;
+
+To define an instance monitoring scope, use the `--scope` argument or the `WATCHTOWER_SCOPE` environment variable on startup and set the _com.centurylinklabs.watchtower.scope_ label with the same value for the containers you want to include in this instance's scope (including the instance itself).
+
+For example, in a Docker Compose config file:
+
+```json
+version: '3'
+
+services:
+ app-monitored-by-watchtower:
+ image: myapps/monitored-by-watchtower
+ labels:
+ - "com.centurylinklabs.watchtower.scope=myscope"
+
+ watchtower:
+ image: containrrr/watchtower
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ command: --interval 30 --scope myscope
+ labels:
+ - "com.centurylinklabs.watchtower.scope=myscope"
+```
\ No newline at end of file
diff --git a/internal/actions/actions_suite_test.go b/internal/actions/actions_suite_test.go
index e966fd8..4d8c0cd 100644
--- a/internal/actions/actions_suite_test.go
+++ b/internal/actions/actions_suite_test.go
@@ -46,7 +46,7 @@ var _ = Describe("the actions package", func() {
When("given an empty array", func() {
It("should not do anything", func() {
client.TestData.Containers = []container.Container{}
- err := actions.CheckForMultipleWatchtowerInstances(client, false)
+ err := actions.CheckForMultipleWatchtowerInstances(client, false, "")
Expect(err).NotTo(HaveOccurred())
})
})
@@ -60,7 +60,7 @@ var _ = Describe("the actions package", func() {
time.Now(),
make([]string,0)),
}
- err := actions.CheckForMultipleWatchtowerInstances(client, false)
+ err := actions.CheckForMultipleWatchtowerInstances(client, false, "")
Expect(err).NotTo(HaveOccurred())
})
})
@@ -93,7 +93,7 @@ var _ = Describe("the actions package", func() {
})
It("should stop all but the latest one", func() {
- err := actions.CheckForMultipleWatchtowerInstances(client, false)
+ err := actions.CheckForMultipleWatchtowerInstances(client, false, "")
Expect(err).NotTo(HaveOccurred())
})
})
@@ -125,12 +125,12 @@ var _ = Describe("the actions package", func() {
)
})
It("should try to delete the image if the cleanup flag is true", func() {
- err := actions.CheckForMultipleWatchtowerInstances(client, true)
+ err := actions.CheckForMultipleWatchtowerInstances(client, true, "")
Expect(err).NotTo(HaveOccurred())
Expect(client.TestData.TriedToRemoveImage()).To(BeTrue())
})
It("should not try to delete the image if the cleanup flag is false", func() {
- err := actions.CheckForMultipleWatchtowerInstances(client, false)
+ err := actions.CheckForMultipleWatchtowerInstances(client, false, "")
Expect(err).NotTo(HaveOccurred())
Expect(client.TestData.TriedToRemoveImage()).To(BeFalse())
})
diff --git a/internal/actions/check.go b/internal/actions/check.go
index c6d5c12..56a9fc4 100644
--- a/internal/actions/check.go
+++ b/internal/actions/check.go
@@ -19,10 +19,11 @@ import (
// CheckForMultipleWatchtowerInstances will ensure that there are not multiple instances of the
// watchtower running simultaneously. If multiple watchtower containers are detected, this function
-// will stop and remove all but the most recently started container.
-func CheckForMultipleWatchtowerInstances(client container.Client, cleanup bool) error {
+// will stop and remove all but the most recently started container. This behaviour can be bypassed
+// if a scope UID is defined.
+func CheckForMultipleWatchtowerInstances(client container.Client, cleanup bool, scope string) error {
awaitDockerClient()
- containers, err := client.ListContainers(filters.WatchtowerContainersFilter)
+ containers, err := client.ListContainers(filters.FilterByScope(scope, filters.WatchtowerContainersFilter))
if err != nil {
log.Fatal(err)
diff --git a/internal/actions/mocks/container.go b/internal/actions/mocks/container.go
index 060a0a0..92dd0b8 100644
--- a/internal/actions/mocks/container.go
+++ b/internal/actions/mocks/container.go
@@ -36,3 +36,22 @@ func CreateMockContainer(id string, name string, image string, created time.Time
},
)
}
+
+// CreateMockContainerWithConfig creates a container substitute valid for testing
+func CreateMockContainerWithConfig(id string, name string, image string, created time.Time, config *container2.Config) container.Container {
+ content := types.ContainerJSON{
+ ContainerJSONBase: &types.ContainerJSONBase{
+ ID: id,
+ Image: image,
+ Name: name,
+ Created: created.String(),
+ },
+ Config: config,
+ }
+ return *container.NewContainer(
+ &content,
+ &types.ImageInspect{
+ ID: image,
+ },
+ )
+}
diff --git a/internal/actions/update.go b/internal/actions/update.go
index c1774d9..eeb5033 100644
--- a/internal/actions/update.go
+++ b/internal/actions/update.go
@@ -1,6 +1,7 @@
package actions
import (
+ "errors"
"github.com/containrrr/watchtower/internal/util"
"github.com/containrrr/watchtower/pkg/container"
"github.com/containrrr/watchtower/pkg/lifecycle"
@@ -41,11 +42,13 @@ func PrepareContainerList(client container.Client, params types.UpdateParams) ([
return nil, err
}
- for i, container := range containers {
- stale, err := client.IsContainerStale(container)
+ for i, targetContainer := range containers {
+ stale, err := client.IsContainerStale(targetContainer)
+ if stale && !params.NoRestart && !params.MonitorOnly && !targetContainer.IsMonitorOnly() && !targetContainer.HasImageInfo() {
+ err = errors.New("no available image info")
+ }
if err != nil {
- log.Infof("Unable to update container %s. Proceeding to next.", containers[i].Name())
- log.Debug(err)
+ log.Infof("Unable to update container %q: %v. Proceeding to next.", containers[i].Name(), err)
stale = false
}
containers[i].Stale = stale
@@ -76,11 +79,13 @@ func Update(client container.Client, params types.UpdateParams) error {
lifecycle.ExecutePreChecks(client, params)
}
- if params.MonitorOnly {
- if params.LifecycleHooks {
- lifecycle.ExecutePostChecks(client, params)
+ containersToUpdate := []container.Container{}
+ if !params.MonitorOnly {
+ for i := len(containers) - 1; i >= 0; i-- {
+ if !containers[i].IsMonitorOnly() {
+ containersToUpdate = append(containersToUpdate, containers[i])
+ }
}
- return nil
}
//shared map for independent and linked update
@@ -113,6 +118,21 @@ func Update(client container.Client, params types.UpdateParams) error {
return nil
}
+func performRollingRestart(containers []container.Container, client container.Client, params types.UpdateParams) {
+ cleanupImageIDs := make(map[string]bool)
+
+ for i := len(containers) - 1; i >= 0; i-- {
+ if containers[i].Stale {
+ stopStaleContainer(containers[i], client, params)
+ restartStaleContainer(containers[i], client, params)
+ }
+ }
+
+ if params.Cleanup {
+ cleanupImages(client, cleanupImageIDs)
+ }
+}
+
func stopContainersInReversedOrder(containers []container.Container, client container.Client, params types.UpdateParams) {
for i := len(containers) - 1; i >= 0; i-- {
stopStaleContainer(containers[i], client, params)
@@ -146,8 +166,8 @@ func restartContainersInSortedOrder(containers []container.Container, client con
if !container.Stale {
continue
}
- restartStaleContainer(container, client, params)
- imageIDs[container.ImageID()] = true
+ restartStaleContainer(staleContainer, client, params)
+ imageIDs[staleContainer.ImageID()] = true
}
}
diff --git a/internal/actions/update_test.go b/internal/actions/update_test.go
index 315c1c7..de38520 100644
--- a/internal/actions/update_test.go
+++ b/internal/actions/update_test.go
@@ -5,6 +5,7 @@ import (
"github.com/containrrr/watchtower/pkg/container"
"github.com/containrrr/watchtower/pkg/container/mocks"
"github.com/containrrr/watchtower/pkg/types"
+ container2 "github.com/docker/docker/api/types/container"
cli "github.com/docker/docker/client"
"time"
@@ -228,4 +229,73 @@ var _ = Describe("the update action", func() {
})
})
})
+
+ When("watchtower has been instructed to monitor only", func() {
+ When("certain containers are set to monitor only", func() {
+ BeforeEach(func() {
+ client = CreateMockClient(
+ &TestData{
+ NameOfContainerToKeep: "test-container-02",
+ Containers: []container.Container{
+ CreateMockContainer(
+ "test-container-01",
+ "test-container-01",
+ "fake-image1:latest",
+ time.Now()),
+ CreateMockContainerWithConfig(
+ "test-container-02",
+ "test-container-02",
+ "fake-image2:latest",
+ time.Now(),
+ &container2.Config{
+ Labels: map[string]string{
+ "com.centurylinklabs.watchtower.monitor-only": "true",
+ },
+ }),
+ },
+ },
+ dockerClient,
+ false,
+ false,
+ )
+ })
+
+ It("should not update those containers", func() {
+ err := actions.Update(client, types.UpdateParams{Cleanup: true})
+ Expect(err).NotTo(HaveOccurred())
+ Expect(client.TestData.TriedToRemoveImageCount).To(Equal(1))
+ })
+ })
+
+ When("monitor only is set globally", func() {
+ BeforeEach(func() {
+ client = CreateMockClient(
+ &TestData{
+ Containers: []container.Container{
+ CreateMockContainer(
+ "test-container-01",
+ "test-container-01",
+ "fake-image:latest",
+ time.Now()),
+ CreateMockContainer(
+ "test-container-02",
+ "test-container-02",
+ "fake-image:latest",
+ time.Now()),
+ },
+ },
+ dockerClient,
+ false,
+ false,
+ )
+ })
+
+ It("should not update any containers", func() {
+ err := actions.Update(client, types.UpdateParams{MonitorOnly: true})
+ Expect(err).NotTo(HaveOccurred())
+ Expect(client.TestData.TriedToRemoveImageCount).To(Equal(0))
+ })
+ })
+
+ })
})
diff --git a/internal/flags/flags.go b/internal/flags/flags.go
index 8d8d2ab..c7c98b1 100644
--- a/internal/flags/flags.go
+++ b/internal/flags/flags.go
@@ -123,6 +123,12 @@ func RegisterSystemFlags(rootCmd *cobra.Command) {
viper.GetBool("WATCHTOWER_LIFECYCLE_HOOKS"),
"Enable the execution of commands triggered by pre- and post-update lifecycle hooks")
+ flags.BoolP(
+ "rolling-restart",
+ "",
+ viper.GetBool("WATCHTOWER_ROLLING_RESTART"),
+ "Restart containers one at a time")
+
flags.BoolP(
"http-api",
"",
@@ -134,6 +140,17 @@ func RegisterSystemFlags(rootCmd *cobra.Command) {
"",
viper.GetString("WATCHTOWER_HTTP_API_TOKEN"),
"Sets an authentication token to HTTP API requests.")
+ // https://no-color.org/
+ flags.BoolP(
+ "no-color",
+ "",
+ viper.IsSet("NO_COLOR"),
+ "Disable ANSI color escape codes in log output")
+ flags.StringP(
+ "scope",
+ "",
+ viper.GetString("WATCHTOWER_SCOPE"),
+ "Defines a monitoring scope for the Watchtower instance.")
}
// RegisterNotificationFlags that are used by watchtower to send notifications
diff --git a/mkdocs.yml b/mkdocs.yml
index 645c1cc..696f87d 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -20,5 +20,6 @@ nav:
- 'Secure connections': 'secure-connections.md'
- 'Stop signals': 'stop-signals.md'
- 'Lifecycle hooks': 'lifecycle-hooks.md'
+ - 'Running multiple instances': 'running-multiple-instances.md'
plugins:
- search
diff --git a/pkg/container/client.go b/pkg/container/client.go
index 2306812..a333ea5 100644
--- a/pkg/container/client.go
+++ b/pkg/container/client.go
@@ -3,11 +3,12 @@ package container
import (
"bytes"
"fmt"
- "github.com/containrrr/watchtower/pkg/registry"
"io/ioutil"
"strings"
"time"
+ "github.com/containrrr/watchtower/pkg/registry"
+
t "github.com/containrrr/watchtower/pkg/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
@@ -39,7 +40,7 @@ type Client interface {
// * DOCKER_HOST the docker-engine host to send api requests to
// * DOCKER_TLS_VERIFY whether to verify tls certificates
// * DOCKER_API_VERSION the minimum docker api version to work with
-func NewClient(pullImages bool, includeStopped bool, reviveStopped bool, removeVolumes bool) Client {
+func NewClient(pullImages bool, includeStopped bool, reviveStopped bool, removeVolumes bool, includeRestarting bool) Client {
cli, err := sdkClient.NewClientWithOpts(sdkClient.FromEnv)
if err != nil {
@@ -47,28 +48,34 @@ func NewClient(pullImages bool, includeStopped bool, reviveStopped bool, removeV
}
return dockerClient{
- api: cli,
- pullImages: pullImages,
- removeVolumes: removeVolumes,
- includeStopped: includeStopped,
- reviveStopped: reviveStopped,
+ api: cli,
+ pullImages: pullImages,
+ removeVolumes: removeVolumes,
+ includeStopped: includeStopped,
+ reviveStopped: reviveStopped,
+ includeRestarting: includeRestarting,
}
}
type dockerClient struct {
- api sdkClient.CommonAPIClient
- pullImages bool
- removeVolumes bool
- includeStopped bool
- reviveStopped bool
+ api sdkClient.CommonAPIClient
+ pullImages bool
+ removeVolumes bool
+ includeStopped bool
+ reviveStopped bool
+ includeRestarting bool
}
func (client dockerClient) ListContainers(fn t.Filter) ([]Container, error) {
cs := []Container{}
bg := context.Background()
- if client.includeStopped {
- log.Debug("Retrieving containers including stopped and exited")
+ if client.includeStopped && client.includeRestarting {
+ log.Debug("Retrieving running, stopped, restarting and exited containers")
+ } else if client.includeStopped {
+ log.Debug("Retrieving running, stopped and exited containers")
+ } else if client.includeRestarting {
+ log.Debug("Retrieving running and restarting containers")
} else {
log.Debug("Retrieving running containers")
}
@@ -108,6 +115,10 @@ func (client dockerClient) createListFilter() filters.Args {
filterArgs.Add("status", "exited")
}
+ if client.includeRestarting {
+ filterArgs.Add("status", "restarting")
+ }
+
return filterArgs
}
@@ -121,11 +132,11 @@ func (client dockerClient) GetContainer(containerID string) (Container, error) {
imageInfo, _, err := client.api.ImageInspectWithRaw(bg, containerInfo.Image)
if err != nil {
- return Container{}, err
+ log.Warnf("Failed to retrieve container image info: %v", err)
+ return Container{containerInfo: &containerInfo, imageInfo: nil}, nil
}
- container := Container{containerInfo: &containerInfo, imageInfo: &imageInfo}
- return container, nil
+ return Container{containerInfo: &containerInfo, imageInfo: &imageInfo}, nil
}
func (client dockerClient) StopContainer(c Container, timeout time.Duration) error {
diff --git a/pkg/container/container.go b/pkg/container/container.go
index bc2f600..9e339c3 100644
--- a/pkg/container/container.go
+++ b/pkg/container/container.go
@@ -90,6 +90,33 @@ func (c Container) Enabled() (bool, bool) {
return parsedBool, true
}
+// IsMonitorOnly returns the value of the monitor-only label. If the label
+// is not set then false is returned.
+func (c Container) IsMonitorOnly() bool {
+ rawBool, ok := c.getLabelValue(monitorOnlyLabel)
+ if !ok {
+ return false
+ }
+
+ parsedBool, err := strconv.ParseBool(rawBool)
+ if err != nil {
+ return false
+ }
+
+ return parsedBool
+}
+
+// Scope returns the value of the scope UID label and if the label
+// was set.
+func (c Container) Scope() (string, bool) {
+ rawString, ok := c.getLabelValue(scope)
+ if !ok {
+ return "", false
+ }
+
+ return rawString, true
+}
+
// Links returns a list containing the names of all the containers to which
// this container is linked.
func (c Container) Links() []string {
@@ -221,3 +248,8 @@ func (c Container) hostConfig() *dockercontainer.HostConfig {
return hostConfig
}
+
+// HasImageInfo returns whether image information could be retrieved for the container
+func (c Container) HasImageInfo() bool {
+ return c.imageInfo != nil
+}
diff --git a/pkg/container/container_test.go b/pkg/container/container_test.go
index 4f0f544..16b8922 100644
--- a/pkg/container/container_test.go
+++ b/pkg/container/container_test.go
@@ -1,6 +1,8 @@
package container
import (
+ "testing"
+
"github.com/containrrr/watchtower/pkg/container/mocks"
"github.com/containrrr/watchtower/pkg/filters"
"github.com/docker/docker/api/types"
@@ -8,7 +10,6 @@ import (
cli "github.com/docker/docker/client"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "testing"
)
func TestContainer(t *testing.T) {
@@ -68,6 +69,44 @@ var _ = Describe("the container", func() {
Expect(len(containers) > 0).To(BeTrue())
})
})
+ When(`listing containers with the "include restart" option`, func() {
+ It("should return both stopped, restarting and running containers", func() {
+ client = dockerClient{
+ api: docker,
+ pullImages: false,
+ includeRestarting: true,
+ }
+ containers, err := client.ListContainers(filters.NoFilter)
+ Expect(err).NotTo(HaveOccurred())
+ RestartingContainerFound := false
+ for _, ContainerRunning := range containers {
+ if ContainerRunning.containerInfo.State.Restarting {
+ RestartingContainerFound = true
+ }
+ }
+ Expect(RestartingContainerFound).To(BeTrue())
+ Expect(RestartingContainerFound).NotTo(BeFalse())
+ })
+ })
+ When(`listing containers without restarting ones`, func() {
+ It("should not return restarting containers", func() {
+ client = dockerClient{
+ api: docker,
+ pullImages: false,
+ includeRestarting: false,
+ }
+ containers, err := client.ListContainers(filters.NoFilter)
+ Expect(err).NotTo(HaveOccurred())
+ RestartingContainerFound := false
+ for _, ContainerRunning := range containers {
+ if ContainerRunning.containerInfo.State.Restarting {
+ RestartingContainerFound = true
+ }
+ }
+ Expect(RestartingContainerFound).To(BeFalse())
+ Expect(RestartingContainerFound).NotTo(BeTrue())
+ })
+ })
})
When("asked for metadata", func() {
var c *Container
diff --git a/pkg/container/metadata.go b/pkg/container/metadata.go
index 2c1b933..215cccb 100644
--- a/pkg/container/metadata.go
+++ b/pkg/container/metadata.go
@@ -4,8 +4,10 @@ const (
watchtowerLabel = "com.centurylinklabs.watchtower"
signalLabel = "com.centurylinklabs.watchtower.stop-signal"
enableLabel = "com.centurylinklabs.watchtower.enable"
+ monitorOnlyLabel = "com.centurylinklabs.watchtower.monitor-only"
dependsOnLabel = "com.centurylinklabs.watchtower.depends-on"
zodiacLabel = "com.centurylinklabs.zodiac.original-image"
+ scope = "com.centurylinklabs.watchtower.scope"
preCheckLabel = "com.centurylinklabs.watchtower.lifecycle.pre-check"
postCheckLabel = "com.centurylinklabs.watchtower.lifecycle.post-check"
preUpdateLabel = "com.centurylinklabs.watchtower.lifecycle.pre-update"
diff --git a/pkg/container/mocks/ApiServer.go b/pkg/container/mocks/ApiServer.go
index 82e05de..35b52e2 100644
--- a/pkg/container/mocks/ApiServer.go
+++ b/pkg/container/mocks/ApiServer.go
@@ -1,13 +1,16 @@
package mocks
import (
+ "encoding/json"
"fmt"
- "github.com/sirupsen/logrus"
"io/ioutil"
"net/http"
"net/http/httptest"
"path/filepath"
"strings"
+
+ "github.com/docker/docker/api/types"
+ "github.com/sirupsen/logrus"
)
// NewMockAPIServer returns a mocked docker api server that responds to some fixed requests
@@ -18,16 +21,36 @@ func NewMockAPIServer() *httptest.Server {
logrus.Debug("Mock server has received a HTTP call on ", r.URL)
var response = ""
- if isRequestFor("filters=%7B%22status%22%3A%7B%22running%22%3Atrue%7D%7D&limit=0", r) {
- response = getMockJSONFromDisk("./mocks/data/containers.json")
- } else if isRequestFor("filters=%7B%22status%22%3A%7B%22created%22%3Atrue%2C%22exited%22%3Atrue%2C%22running%22%3Atrue%7D%7D&limit=0", r) {
+ if isRequestFor("filters=", r) {
+
+ Filters := r.URL.Query().Get("filters")
+ var result map[string]interface{}
+ json.Unmarshal([]byte(Filters), &result)
+ status := result["status"].(map[string]interface{})
+
response = getMockJSONFromDisk("./mocks/data/containers.json")
+ var x2 []types.Container
+ var containers []types.Container
+ json.Unmarshal([]byte(response), &containers)
+ for _, v := range containers {
+ for key := range status {
+ if v.State == key {
+ x2 = append(x2, v)
+ }
+ }
+ }
+
+ b, _ := json.Marshal(x2)
+ response = string(b)
+
} else if isRequestFor("containers/json?limit=0", r) {
response = getMockJSONFromDisk("./mocks/data/containers.json")
} else if isRequestFor("ae8964ba86c7cd7522cf84e09781343d88e0e3543281c747d88b27e246578b65", r) {
response = getMockJSONFromDisk("./mocks/data/container_stopped.json")
} else if isRequestFor("b978af0b858aa8855cce46b628817d4ed58e58f2c4f66c9b9c5449134ed4c008", r) {
response = getMockJSONFromDisk("./mocks/data/container_running.json")
+ } else if isRequestFor("ae8964ba86c7cd7522cf84e09781343d88e0e3543281c747d88b27e246578b67", r) {
+ response = getMockJSONFromDisk("./mocks/data/container_restarting.json")
} else if isRequestFor("sha256:19d07168491a3f9e2798a9bed96544e34d57ddc4757a4ac5bb199dea896c87fd", r) {
response = getMockJSONFromDisk("./mocks/data/image01.json")
} else if isRequestFor("sha256:4dbc5f9c07028a985e14d1393e849ea07f68804c4293050d5a641b138db72daa", r) {
diff --git a/pkg/container/mocks/FilterableContainer.go b/pkg/container/mocks/FilterableContainer.go
index 508bd7c..1ae8125 100644
--- a/pkg/container/mocks/FilterableContainer.go
+++ b/pkg/container/mocks/FilterableContainer.go
@@ -55,3 +55,26 @@ func (_m *FilterableContainer) Name() string {
return r0
}
+
+// Scope provides a mock function with given fields:
+func (_m *FilterableContainer) Scope() (string, bool) {
+ ret := _m.Called()
+
+ var r0 string
+
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ var r1 bool
+
+ if rf, ok := ret.Get(1).(func() bool); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Get(1).(bool)
+ }
+
+ return r0, r1
+}
diff --git a/pkg/container/mocks/data/container_restarting.json b/pkg/container/mocks/data/container_restarting.json
new file mode 100644
index 0000000..4eae912
--- /dev/null
+++ b/pkg/container/mocks/data/container_restarting.json
@@ -0,0 +1,205 @@
+{
+ "Id": "ae8964ba86c7cd7522cf84e09781343d88e0e3543281c747d88b27e246578b67",
+ "Created": "2019-04-10T19:51:22.245041005Z",
+ "Path": "/watchtower",
+ "Args": [],
+ "State": {
+ "Status": "exited",
+ "Running": false,
+ "Paused": false,
+ "Restarting": true,
+ "OOMKilled": false,
+ "Dead": false,
+ "Pid": 0,
+ "ExitCode": 1,
+ "Error": "",
+ "StartedAt": "2019-04-10T19:51:22.918972606Z",
+ "FinishedAt": "2019-04-10T19:52:14.265091583Z"
+ },
+ "Image": "sha256:4dbc5f9c07028a985e14d1393e849ea07f68804c4293050d5a641b138db72daa",
+ "ResolvConfPath": "/var/lib/docker/containers/ae8964ba86c7cd7522cf84e09781343d88e0e3543281c747d88b27e246578b65/resolv.conf",
+ "HostnamePath": "/var/lib/docker/containers/ae8964ba86c7cd7522cf84e09781343d88e0e3543281c747d88b27e246578b65/hostname",
+ "HostsPath": "/var/lib/docker/containers/ae8964ba86c7cd7522cf84e09781343d88e0e3543281c747d88b27e246578b65/hosts",
+ "LogPath": "/var/lib/docker/containers/ae8964ba86c7cd7522cf84e09781343d88e0e3543281c747d88b27e246578b65/ae8964ba86c7cd7522cf84e09781343d88e0e3543281c747d88b27e246578b65-json.log",
+ "Name": "/watchtower-test",
+ "RestartCount": 0,
+ "Driver": "overlay2",
+ "Platform": "linux",
+ "MountLabel": "",
+ "ProcessLabel": "",
+ "AppArmorProfile": "",
+ "ExecIDs": null,
+ "HostConfig": {
+ "Binds": [
+ "/var/run/docker.sock:/var/run/docker.sock"
+ ],
+ "ContainerIDFile": "",
+ "LogConfig": {
+ "Type": "json-file",
+ "Config": {}
+ },
+ "NetworkMode": "default",
+ "PortBindings": {},
+ "RestartPolicy": {
+ "Name": "no",
+ "MaximumRetryCount": 0
+ },
+ "AutoRemove": false,
+ "VolumeDriver": "",
+ "VolumesFrom": null,
+ "CapAdd": null,
+ "CapDrop": null,
+ "Dns": [],
+ "DnsOptions": [],
+ "DnsSearch": [],
+ "ExtraHosts": null,
+ "GroupAdd": null,
+ "IpcMode": "shareable",
+ "Cgroup": "",
+ "Links": null,
+ "OomScoreAdj": 0,
+ "PidMode": "",
+ "Privileged": false,
+ "PublishAllPorts": false,
+ "ReadonlyRootfs": false,
+ "SecurityOpt": null,
+ "UTSMode": "",
+ "UsernsMode": "",
+ "ShmSize": 67108864,
+ "Runtime": "runc",
+ "ConsoleSize": [
+ 0,
+ 0
+ ],
+ "Isolation": "",
+ "CpuShares": 0,
+ "Memory": 0,
+ "NanoCpus": 0,
+ "CgroupParent": "",
+ "BlkioWeight": 0,
+ "BlkioWeightDevice": [],
+ "BlkioDeviceReadBps": null,
+ "BlkioDeviceWriteBps": null,
+ "BlkioDeviceReadIOps": null,
+ "BlkioDeviceWriteIOps": null,
+ "CpuPeriod": 0,
+ "CpuQuota": 0,
+ "CpuRealtimePeriod": 0,
+ "CpuRealtimeRuntime": 0,
+ "CpusetCpus": "",
+ "CpusetMems": "",
+ "Devices": [],
+ "DeviceCgroupRules": null,
+ "DiskQuota": 0,
+ "KernelMemory": 0,
+ "MemoryReservation": 0,
+ "MemorySwap": 0,
+ "MemorySwappiness": null,
+ "OomKillDisable": false,
+ "PidsLimit": 0,
+ "Ulimits": null,
+ "CpuCount": 0,
+ "CpuPercent": 0,
+ "IOMaximumIOps": 0,
+ "IOMaximumBandwidth": 0,
+ "MaskedPaths": [
+ "/proc/asound",
+ "/proc/acpi",
+ "/proc/kcore",
+ "/proc/keys",
+ "/proc/latency_stats",
+ "/proc/timer_list",
+ "/proc/timer_stats",
+ "/proc/sched_debug",
+ "/proc/scsi",
+ "/sys/firmware"
+ ],
+ "ReadonlyPaths": [
+ "/proc/bus",
+ "/proc/fs",
+ "/proc/irq",
+ "/proc/sys",
+ "/proc/sysrq-trigger"
+ ]
+ },
+ "GraphDriver": {
+ "Data": {
+ "LowerDir": "/var/lib/docker/overlay2/9f6b91ea6e142835035d91123bbc7a05224dfa2abd4d020eac42f2ab420ccddc-init/diff:/var/lib/docker/overlay2/cdf82f50bc49177d0c17c24f3eaa29eba607b70cc6a081f77781b21c59a13eb8/diff:/var/lib/docker/overlay2/8108325ee844603c9b08d2772cf6e65dccf31dd5171f265078e5ed79a0ba3c0f/diff:/var/lib/docker/overlay2/e5e0cce6bf91b829a308424d99d7e56a33be3a11414ff5cdc48e762a1342b20f/diff",
+ "MergedDir": "/var/lib/docker/overlay2/9f6b91ea6e142835035d91123bbc7a05224dfa2abd4d020eac42f2ab420ccddc/merged",
+ "UpperDir": "/var/lib/docker/overlay2/9f6b91ea6e142835035d91123bbc7a05224dfa2abd4d020eac42f2ab420ccddc/diff",
+ "WorkDir": "/var/lib/docker/overlay2/9f6b91ea6e142835035d91123bbc7a05224dfa2abd4d020eac42f2ab420ccddc/work"
+ },
+ "Name": "overlay2"
+ },
+ "Mounts": [
+ {
+ "Type": "bind",
+ "Source": "/var/run/docker.sock",
+ "Destination": "/var/run/docker.sock",
+ "Mode": "",
+ "RW": true,
+ "Propagation": "rprivate"
+ }
+ ],
+ "Config": {
+ "Hostname": "ae8964ba86c7",
+ "Domainname": "",
+ "User": "",
+ "AttachStdin": false,
+ "AttachStdout": true,
+ "AttachStderr": true,
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "Cmd": null,
+ "Image": "containrrr/watchtower:latest",
+ "Volumes": null,
+ "WorkingDir": "",
+ "Entrypoint": [
+ "/watchtower"
+ ],
+ "OnBuild": null,
+ "Labels": {
+ "com.centurylinklabs.watchtower": "true"
+ }
+ },
+ "NetworkSettings": {
+ "Bridge": "",
+ "SandboxID": "05627d36c08ed994eebc44a2a8c9365a511756b55c500fb03fd5a14477cd4bf3",
+ "HairpinMode": false,
+ "LinkLocalIPv6Address": "",
+ "LinkLocalIPv6PrefixLen": 0,
+ "Ports": {},
+ "SandboxKey": "/var/run/docker/netns/05627d36c08e",
+ "SecondaryIPAddresses": null,
+ "SecondaryIPv6Addresses": null,
+ "EndpointID": "",
+ "Gateway": "",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "IPAddress": "",
+ "IPPrefixLen": 0,
+ "IPv6Gateway": "",
+ "MacAddress": "",
+ "Networks": {
+ "bridge": {
+ "IPAMConfig": null,
+ "Links": null,
+ "Aliases": null,
+ "NetworkID": "8fcfd56fa9203bafa98510abb08bff66ad05bef5b6e97d158cbae3397e1e065e",
+ "EndpointID": "",
+ "Gateway": "",
+ "IPAddress": "",
+ "IPPrefixLen": 0,
+ "IPv6Gateway": "",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "MacAddress": "",
+ "DriverOpts": null
+ }
+ }
+ }
+}
diff --git a/pkg/container/mocks/data/containers.json b/pkg/container/mocks/data/containers.json
index e2507bf..4acd7e2 100644
--- a/pkg/container/mocks/data/containers.json
+++ b/pkg/container/mocks/data/containers.json
@@ -109,5 +109,68 @@
"Propagation": "rprivate"
}
]
+ },
+ {
+ "Id": "ae8964ba86c7cd7522cf84e09781343d88e0e3543281c747d88b27e246578b67",
+ "Names": [
+ "/portainer"
+ ],
+ "Image": "portainer/portainer:latest",
+ "ImageID": "sha256:19d07168491a3f9e2798a9bed96544e34d57ddc4757a4ac5bb199dea896c87fd",
+ "Command": "/portainer",
+ "Created": 1554409712,
+ "Ports": [
+ {
+ "IP": "0.0.0.0",
+ "PrivatePort": 9000,
+ "PublicPort": 9000,
+ "Type": "tcp"
+ }
+ ],
+ "Labels": {},
+ "State": "restarting",
+ "Status": "Restarting (0) 35 seconds ago",
+ "HostConfig": {
+ "NetworkMode": "default"
+ },
+ "NetworkSettings": {
+ "Networks": {
+ "bridge": {
+ "IPAMConfig": null,
+ "Links": null,
+ "Aliases": null,
+ "NetworkID": "9352796e0330dcf31ce3d44fae4b719304b8b3fd97b02ade3aefb8737251682b",
+ "EndpointID": "a8bcd737f27edb4d2955f7bce0c777bb2990b792a6b335b0727387624abe0702",
+ "Gateway": "172.17.0.1",
+ "IPAddress": "172.17.0.2",
+ "IPPrefixLen": 16,
+ "IPv6Gateway": "",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "MacAddress": "02:42:ac:11:00:02",
+ "DriverOpts": null
+ }
+ }
+ },
+ "Mounts": [
+ {
+ "Type": "volume",
+ "Name": "portainer_data",
+ "Source": "/var/lib/docker/volumes/portainer_data/_data",
+ "Destination": "/data",
+ "Driver": "local",
+ "Mode": "z",
+ "RW": true,
+ "Propagation": ""
+ },
+ {
+ "Type": "bind",
+ "Source": "/var/run/docker.sock",
+ "Destination": "/var/run/docker.sock",
+ "Mode": "",
+ "RW": true,
+ "Propagation": "rprivate"
+ }
+ ]
}
]
diff --git a/pkg/filters/filters.go b/pkg/filters/filters.go
index b923745..0e37885 100644
--- a/pkg/filters/filters.go
+++ b/pkg/filters/filters.go
@@ -51,8 +51,24 @@ func FilterByDisabledLabel(baseFilter t.Filter) t.Filter {
}
}
+// FilterByScope returns all containers that belongs to a specific scope
+func FilterByScope(scope string, baseFilter t.Filter) t.Filter {
+ if scope == "" {
+ return baseFilter
+ }
+
+ return func(c t.FilterableContainer) bool {
+ containerScope, ok := c.Scope()
+ if ok && containerScope == scope {
+ return baseFilter(c)
+ }
+
+ return false
+ }
+}
+
// BuildFilter creates the needed filter of containers
-func BuildFilter(names []string, enableLabel bool) t.Filter {
+func BuildFilter(names []string, enableLabel bool, scope string) t.Filter {
filter := NoFilter
filter = FilterByNames(names, filter)
if enableLabel {
@@ -60,6 +76,11 @@ func BuildFilter(names []string, enableLabel bool) t.Filter {
// if the label is specifically set.
filter = FilterByEnableLabel(filter)
}
+ if scope != "" {
+ // If a scope has been defined, containers should only be considered
+ // if the scope is specifically set.
+ filter = FilterByScope(scope, filter)
+ }
filter = FilterByDisabledLabel(filter)
return filter
}
diff --git a/pkg/filters/filters_test.go b/pkg/filters/filters_test.go
index d24b186..5766b64 100644
--- a/pkg/filters/filters_test.go
+++ b/pkg/filters/filters_test.go
@@ -67,6 +67,29 @@ func TestFilterByEnableLabel(t *testing.T) {
container.AssertExpectations(t)
}
+func TestFilterByScope(t *testing.T) {
+ var scope string
+ scope = "testscope"
+
+ filter := FilterByScope(scope, NoFilter)
+ assert.NotNil(t, filter)
+
+ container := new(mocks.FilterableContainer)
+ container.On("Scope").Return("testscope", true)
+ assert.True(t, filter(container))
+ container.AssertExpectations(t)
+
+ container = new(mocks.FilterableContainer)
+ container.On("Scope").Return("nottestscope", true)
+ assert.False(t, filter(container))
+ container.AssertExpectations(t)
+
+ container = new(mocks.FilterableContainer)
+ container.On("Scope").Return("", false)
+ assert.False(t, filter(container))
+ container.AssertExpectations(t)
+}
+
func TestFilterByDisabledLabel(t *testing.T) {
filter := FilterByDisabledLabel(NoFilter)
assert.NotNil(t, filter)
@@ -91,7 +114,7 @@ func TestBuildFilter(t *testing.T) {
var names []string
names = append(names, "test")
- filter := BuildFilter(names, false)
+ filter := BuildFilter(names, false, "")
container := new(mocks.FilterableContainer)
container.On("Name").Return("Invalid")
@@ -127,7 +150,7 @@ func TestBuildFilterEnableLabel(t *testing.T) {
var names []string
names = append(names, "test")
- filter := BuildFilter(names, true)
+ filter := BuildFilter(names, true, "")
container := new(mocks.FilterableContainer)
container.On("Enabled").Return(false, false)
diff --git a/pkg/lifecycle/lifecycle.go b/pkg/lifecycle/lifecycle.go
index 2d2d50c..df639d7 100644
--- a/pkg/lifecycle/lifecycle.go
+++ b/pkg/lifecycle/lifecycle.go
@@ -36,7 +36,7 @@ func ExecutePreCheckCommand(client container.Client, container container.Contain
return
}
- log.Info("Executing pre-check command.")
+ log.Debug("Executing pre-check command.")
if err := client.ExecuteCommand(container.ID(), command, 1); err != nil {
log.Error(err)
}
@@ -50,7 +50,7 @@ func ExecutePostCheckCommand(client container.Client, container container.Contai
return
}
- log.Info("Executing post-check command.")
+ log.Debug("Executing post-check command.")
if err := client.ExecuteCommand(container.ID(), command, 1); err != nil {
log.Error(err)
}
@@ -65,7 +65,7 @@ func ExecutePreUpdateCommand(client container.Client, container container.Contai
return nil
}
- log.Info("Executing pre-update command.")
+ log.Debug("Executing pre-update command.")
return client.ExecuteCommand(container.ID(), command, timeout)
}
@@ -83,7 +83,7 @@ func ExecutePostUpdateCommand(client container.Client, newContainerID string) {
return
}
- log.Info("Executing post-update command.")
+ log.Debug("Executing post-update command.")
if err := client.ExecuteCommand(newContainerID, command, 1); err != nil {
log.Error(err)
}
diff --git a/pkg/notifications/email.go b/pkg/notifications/email.go
index c4ee56b..6079de7 100644
--- a/pkg/notifications/email.go
+++ b/pkg/notifications/email.go
@@ -153,3 +153,5 @@ func (e *emailTypeNotifier) Fire(entry *log.Entry) error {
}
return nil
}
+
+func (e *emailTypeNotifier) Close() {}
diff --git a/pkg/notifications/gotify.go b/pkg/notifications/gotify.go
index a065ac0..789f778 100644
--- a/pkg/notifications/gotify.go
+++ b/pkg/notifications/gotify.go
@@ -59,6 +59,8 @@ func (n *gotifyTypeNotifier) StartNotification() {}
func (n *gotifyTypeNotifier) SendNotification() {}
+func (n *gotifyTypeNotifier) Close() {}
+
func (n *gotifyTypeNotifier) Levels() []log.Level {
return n.logLevels
}
diff --git a/pkg/notifications/msteams.go b/pkg/notifications/msteams.go
index b356814..ab33966 100644
--- a/pkg/notifications/msteams.go
+++ b/pkg/notifications/msteams.go
@@ -47,6 +47,8 @@ func (n *msTeamsTypeNotifier) StartNotification() {}
func (n *msTeamsTypeNotifier) SendNotification() {}
+func (n *msTeamsTypeNotifier) Close() {}
+
func (n *msTeamsTypeNotifier) Levels() []log.Level {
return n.levels
}
diff --git a/pkg/notifications/notifier.go b/pkg/notifications/notifier.go
index 6595b22..dedb21a 100644
--- a/pkg/notifications/notifier.go
+++ b/pkg/notifications/notifier.go
@@ -66,3 +66,10 @@ func (n *Notifier) SendNotification() {
t.SendNotification()
}
}
+
+// Close closes all notifiers.
+func (n *Notifier) Close() {
+ for _, t := range n.types {
+ t.Close()
+ }
+}
diff --git a/pkg/notifications/shoutrrr.go b/pkg/notifications/shoutrrr.go
index 9a7cd62..d16808d 100644
--- a/pkg/notifications/shoutrrr.go
+++ b/pkg/notifications/shoutrrr.go
@@ -3,11 +3,11 @@ package notifications
import (
"bytes"
"fmt"
+ "github.com/containrrr/shoutrrr/pkg/types"
+ "strings"
"text/template"
- "strings"
"github.com/containrrr/shoutrrr"
- "github.com/containrrr/shoutrrr/pkg/router"
t "github.com/containrrr/watchtower/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@@ -18,13 +18,19 @@ const (
shoutrrrType = "shoutrrr"
)
+type router interface {
+ Send(message string, params *types.Params) []error
+}
+
// Implements Notifier, logrus.Hook
type shoutrrrTypeNotifier struct {
Urls []string
- Router *router.ServiceRouter
+ Router router
entries []*log.Entry
logLevels []log.Level
template *template.Template
+ messages chan string
+ done chan bool
}
func newShoutrrrNotifier(c *cobra.Command, acceptedLogLevels []log.Level) t.Notifier {
@@ -41,13 +47,33 @@ func newShoutrrrNotifier(c *cobra.Command, acceptedLogLevels []log.Level) t.Noti
Router: r,
logLevels: acceptedLogLevels,
template: getShoutrrrTemplate(c),
+ messages: make(chan string, 1),
+ done: make(chan bool),
}
log.AddHook(n)
+ // Do the sending in a separate goroutine so we don't block the main process.
+ go sendNotifications(n)
+
return n
}
+func sendNotifications(n *shoutrrrTypeNotifier) {
+ for msg := range n.messages {
+ errs := n.Router.Send(msg, nil)
+
+ for i, err := range errs {
+ if err != nil {
+ // Use fmt so it doesn't trigger another notification.
+ fmt.Println("Failed to send notification via shoutrrr (url="+n.Urls[i]+"): ", err)
+ }
+ }
+ }
+
+ n.done <- true
+}
+
func (e *shoutrrrTypeNotifier) buildMessage(entries []*log.Entry) string {
var body bytes.Buffer
if err := e.template.Execute(&body, entries); err != nil {
@@ -58,20 +84,8 @@ func (e *shoutrrrTypeNotifier) buildMessage(entries []*log.Entry) string {
}
func (e *shoutrrrTypeNotifier) sendEntries(entries []*log.Entry) {
-
msg := e.buildMessage(entries)
-
- // Do the sending in a separate goroutine so we don't block the main process.
- go func() {
- errs := e.Router.Send(msg, nil)
-
- for i, err := range errs {
- if err != nil {
- // Use fmt so it doesn't trigger another notification.
- fmt.Println("Failed to send notification via shoutrrr (url="+e.Urls[i]+"): ", err)
- }
- }
- }()
+ e.messages <- msg
}
func (e *shoutrrrTypeNotifier) StartNotification() {
@@ -89,6 +103,15 @@ func (e *shoutrrrTypeNotifier) SendNotification() {
e.entries = nil
}
+func (e *shoutrrrTypeNotifier) Close() {
+ close(e.messages)
+
+ // Use fmt so it doesn't trigger another notification.
+ fmt.Println("Waiting for the notification goroutine to finish")
+
+ _ = <-e.done
+}
+
func (e *shoutrrrTypeNotifier) Levels() []log.Level {
return e.logLevels
}
@@ -113,7 +136,7 @@ func getShoutrrrTemplate(c *cobra.Command) *template.Template {
funcs := template.FuncMap{
"ToUpper": strings.ToUpper,
"ToLower": strings.ToLower,
- "Title": strings.Title,
+ "Title": strings.Title,
}
// If we succeed in getting a non-empty template configuration
diff --git a/pkg/notifications/shoutrrr_test.go b/pkg/notifications/shoutrrr_test.go
index 5db7473..47334af 100644
--- a/pkg/notifications/shoutrrr_test.go
+++ b/pkg/notifications/shoutrrr_test.go
@@ -1,6 +1,7 @@
package notifications
import (
+ "github.com/containrrr/shoutrrr/pkg/types"
"testing"
"text/template"
@@ -74,7 +75,6 @@ func TestShoutrrrStringFunctions(t *testing.T) {
require.Equal(t, "INFO: foo bar Foo Bar\n", s)
}
-
func TestShoutrrrInvalidTemplateUsesTemplate(t *testing.T) {
cmd := new(cobra.Command)
@@ -102,3 +102,69 @@ func TestShoutrrrInvalidTemplateUsesTemplate(t *testing.T) {
require.Equal(t, sd, s)
}
+
+type blockingRouter struct {
+ unlock chan bool
+ sent chan bool
+}
+
+func (b blockingRouter) Send(message string, params *types.Params) []error {
+ _ = <-b.unlock
+ b.sent <- true
+ return nil
+}
+
+func TestSlowNotificationNotSent(t *testing.T) {
+ _, blockingRouter := sendNotificationsWithBlockingRouter()
+
+ notifSent := false
+ select {
+ case notifSent = <-blockingRouter.sent:
+ default:
+ }
+
+ require.Equal(t, false, notifSent)
+}
+
+func TestSlowNotificationSent(t *testing.T) {
+ shoutrrr, blockingRouter := sendNotificationsWithBlockingRouter()
+
+ blockingRouter.unlock <- true
+ shoutrrr.Close()
+
+ notifSent := false
+ select {
+ case notifSent = <-blockingRouter.sent:
+ default:
+ }
+ require.Equal(t, true, notifSent)
+}
+
+func sendNotificationsWithBlockingRouter() (*shoutrrrTypeNotifier, *blockingRouter) {
+ cmd := new(cobra.Command)
+
+ router := &blockingRouter{
+ unlock: make(chan bool, 1),
+ sent: make(chan bool, 1),
+ }
+
+ shoutrrr := &shoutrrrTypeNotifier{
+ template: getShoutrrrTemplate(cmd),
+ messages: make(chan string, 1),
+ done: make(chan bool),
+ Router: router,
+ }
+
+ entry := &log.Entry{
+ Message: "foo bar",
+ }
+
+ go sendNotifications(shoutrrr)
+
+ shoutrrr.StartNotification()
+ shoutrrr.Fire(entry)
+
+ shoutrrr.SendNotification()
+
+ return shoutrrr, router
+}
diff --git a/pkg/notifications/slack.go b/pkg/notifications/slack.go
index 42b7915..5f96390 100644
--- a/pkg/notifications/slack.go
+++ b/pkg/notifications/slack.go
@@ -42,3 +42,5 @@ func newSlackNotifier(c *cobra.Command, acceptedLogLevels []log.Level) t.Notifie
func (s *slackTypeNotifier) StartNotification() {}
func (s *slackTypeNotifier) SendNotification() {}
+
+func (s *slackTypeNotifier) Close() {}
diff --git a/pkg/types/filterable_container.go b/pkg/types/filterable_container.go
index d89b910..3c46295 100644
--- a/pkg/types/filterable_container.go
+++ b/pkg/types/filterable_container.go
@@ -6,4 +6,5 @@ type FilterableContainer interface {
Name() string
IsWatchtower() bool
Enabled() (bool, bool)
+ Scope() (string, bool)
}
diff --git a/pkg/types/notifier.go b/pkg/types/notifier.go
index c8d07d0..27dc483 100644
--- a/pkg/types/notifier.go
+++ b/pkg/types/notifier.go
@@ -4,4 +4,5 @@ package types
type Notifier interface {
StartNotification()
SendNotification()
+ Close()
}
diff --git a/pkg/types/update_params.go b/pkg/types/update_params.go
index 8c6fea7..611cc70 100644
--- a/pkg/types/update_params.go
+++ b/pkg/types/update_params.go
@@ -12,4 +12,5 @@ type UpdateParams struct {
Timeout time.Duration
MonitorOnly bool
LifecycleHooks bool
+ RollingRestart bool
}