mirror of
https://github.com/containrrr/watchtower.git
synced 2025-09-21 21:30:48 +02:00
Session report collection and report templates (#981)
* wip: notification stats * make report notifications optional * linting/documentation fixes * linting/documentation fixes * merge types.Container and container.Interface * smaller naming/format fixes * use typed image/container IDs * simplify notifier and update tests * add missed doc comments * lint fixes * remove unused constructors * rename old/new current/latest
This commit is contained in:
parent
d0ecc23d72
commit
e3dd8d688a
32 changed files with 853 additions and 598 deletions
13
cmd/root.go
13
cmd/root.go
|
@ -34,7 +34,7 @@ var (
|
|||
noRestart bool
|
||||
monitorOnly bool
|
||||
enableLabel bool
|
||||
notifier *notifications.Notifier
|
||||
notifier t.Notifier
|
||||
timeout time.Duration
|
||||
lifecycleHooks bool
|
||||
rollingRestart bool
|
||||
|
@ -268,9 +268,9 @@ func writeStartupMessage(c *cobra.Command, sched time.Time, filtering string) {
|
|||
}
|
||||
|
||||
notifs := "Using no notifications"
|
||||
notifList := notifier.String()
|
||||
if len(notifList) > 0 {
|
||||
notifs = "Using notifications: " + notifList
|
||||
notifierNames := notifier.GetNames()
|
||||
if len(notifierNames) > 0 {
|
||||
notifs = "Using notifications: " + strings.Join(notifierNames, ", ")
|
||||
}
|
||||
|
||||
log.Info("Watchtower ", meta.Version, "\n", notifs, "\n", filtering, "\n", schedMessage)
|
||||
|
@ -338,11 +338,12 @@ func runUpdatesWithNotifications(filter t.Filter) *metrics.Metric {
|
|||
LifecycleHooks: lifecycleHooks,
|
||||
RollingRestart: rollingRestart,
|
||||
}
|
||||
metricResults, err := actions.Update(client, updateParams)
|
||||
result, err := actions.Update(client, updateParams)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
notifier.SendNotification()
|
||||
notifier.SendNotification(result)
|
||||
metricResults := metrics.NewMetric(result)
|
||||
log.Debugf("Session done: %v scanned, %v updated, %v failed",
|
||||
metricResults.Scanned, metricResults.Updated, metricResults.Failed)
|
||||
return metricResults
|
||||
|
|
|
@ -41,12 +41,12 @@ func CreateMockClient(data *TestData, api cli.CommonAPIClient, pullImages bool,
|
|||
}
|
||||
|
||||
// ListContainers is a mock method returning the provided container testdata
|
||||
func (client MockClient) ListContainers(f t.Filter) ([]container.Container, error) {
|
||||
func (client MockClient) ListContainers(_ t.Filter) ([]container.Container, error) {
|
||||
return client.TestData.Containers, nil
|
||||
}
|
||||
|
||||
// StopContainer is a mock method
|
||||
func (client MockClient) StopContainer(c container.Container, d time.Duration) error {
|
||||
func (client MockClient) StopContainer(c container.Container, _ time.Duration) error {
|
||||
if c.Name() == client.TestData.NameOfContainerToKeep {
|
||||
return errors.New("tried to stop the instance we want to keep")
|
||||
}
|
||||
|
@ -54,28 +54,28 @@ func (client MockClient) StopContainer(c container.Container, d time.Duration) e
|
|||
}
|
||||
|
||||
// StartContainer is a mock method
|
||||
func (client MockClient) StartContainer(c container.Container) (string, error) {
|
||||
func (client MockClient) StartContainer(_ container.Container) (t.ContainerID, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// RenameContainer is a mock method
|
||||
func (client MockClient) RenameContainer(c container.Container, s string) error {
|
||||
func (client MockClient) RenameContainer(_ container.Container, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveImageByID increments the TriedToRemoveImageCount on being called
|
||||
func (client MockClient) RemoveImageByID(id string) error {
|
||||
func (client MockClient) RemoveImageByID(_ t.ImageID) error {
|
||||
client.TestData.TriedToRemoveImageCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetContainer is a mock method
|
||||
func (client MockClient) GetContainer(containerID string) (container.Container, error) {
|
||||
func (client MockClient) GetContainer(_ t.ContainerID) (container.Container, error) {
|
||||
return client.TestData.Containers[0], nil
|
||||
}
|
||||
|
||||
// ExecuteCommand is a mock method
|
||||
func (client MockClient) ExecuteCommand(containerID string, command string, timeout int) (SkipUpdate bool, err error) {
|
||||
func (client MockClient) ExecuteCommand(_ t.ContainerID, command string, _ int) (SkipUpdate bool, err error) {
|
||||
switch command {
|
||||
case "/PreUpdateReturn0.sh":
|
||||
return false, nil
|
||||
|
@ -89,11 +89,11 @@ func (client MockClient) ExecuteCommand(containerID string, command string, time
|
|||
}
|
||||
|
||||
// IsContainerStale is always true for the mock client
|
||||
func (client MockClient) IsContainerStale(c container.Container) (bool, error) {
|
||||
return true, nil
|
||||
func (client MockClient) IsContainerStale(_ container.Container) (bool, t.ImageID, error) {
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
// WarnOnHeadPullFailed is always true for the mock client
|
||||
func (client MockClient) WarnOnHeadPullFailed(c container.Container) bool {
|
||||
func (client MockClient) WarnOnHeadPullFailed(_ container.Container) bool {
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -1,10 +1,14 @@
|
|||
package mocks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/containrrr/watchtower/pkg/container"
|
||||
wt "github.com/containrrr/watchtower/pkg/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
container2 "github.com/docker/docker/api/types/container"
|
||||
dockerContainer "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -16,11 +20,11 @@ func CreateMockContainer(id string, name string, image string, created time.Time
|
|||
Image: image,
|
||||
Name: name,
|
||||
Created: created.String(),
|
||||
HostConfig: &container2.HostConfig{
|
||||
HostConfig: &dockerContainer.HostConfig{
|
||||
PortBindings: map[nat.Port][]nat.PortBinding{},
|
||||
},
|
||||
},
|
||||
Config: &container2.Config{
|
||||
Config: &dockerContainer.Config{
|
||||
Image: image,
|
||||
Labels: make(map[string]string),
|
||||
ExposedPorts: map[nat.Port]struct{}{},
|
||||
|
@ -46,7 +50,7 @@ func CreateMockContainerWithImageInfo(id string, name string, image string, crea
|
|||
Name: name,
|
||||
Created: created.String(),
|
||||
},
|
||||
Config: &container2.Config{
|
||||
Config: &dockerContainer.Config{
|
||||
Image: image,
|
||||
Labels: make(map[string]string),
|
||||
},
|
||||
|
@ -65,18 +69,18 @@ func CreateMockContainerWithDigest(id string, name string, image string, created
|
|||
}
|
||||
|
||||
// CreateMockContainerWithConfig creates a container substitute valid for testing
|
||||
func CreateMockContainerWithConfig(id string, name string, image string, running bool, restarting bool, created time.Time, config *container2.Config) container.Container {
|
||||
func CreateMockContainerWithConfig(id string, name string, image string, running bool, restarting bool, created time.Time, config *dockerContainer.Config) container.Container {
|
||||
content := types.ContainerJSON{
|
||||
ContainerJSONBase: &types.ContainerJSONBase{
|
||||
ID: id,
|
||||
Image: image,
|
||||
Name: name,
|
||||
State: &types.ContainerState{
|
||||
Running: running,
|
||||
Running: running,
|
||||
Restarting: restarting,
|
||||
},
|
||||
Created: created.String(),
|
||||
HostConfig: &container2.HostConfig{
|
||||
HostConfig: &dockerContainer.HostConfig{
|
||||
PortBindings: map[nat.Port][]nat.PortBinding{},
|
||||
},
|
||||
},
|
||||
|
@ -89,3 +93,19 @@ func CreateMockContainerWithConfig(id string, name string, image string, running
|
|||
},
|
||||
)
|
||||
}
|
||||
|
||||
// CreateContainerForProgress creates a container substitute for tracking session/update progress
|
||||
func CreateContainerForProgress(index int, idPrefix int, nameFormat string) (container.Container, wt.ImageID) {
|
||||
indexStr := strconv.Itoa(idPrefix + index)
|
||||
mockID := indexStr + strings.Repeat("0", 61-len(indexStr))
|
||||
contID := "c79" + mockID
|
||||
contName := fmt.Sprintf(nameFormat, index+1)
|
||||
oldImgID := "01d" + mockID
|
||||
newImgID := "d0a" + mockID
|
||||
imageName := fmt.Sprintf("mock/%s:latest", contName)
|
||||
config := &dockerContainer.Config{
|
||||
Image: imageName,
|
||||
}
|
||||
c := CreateMockContainerWithConfig(contID, contName, oldImgID, true, false, time.Now(), config)
|
||||
return c, wt.ImageID(newImgID)
|
||||
}
|
||||
|
|
46
internal/actions/mocks/progress.go
Normal file
46
internal/actions/mocks/progress.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
package mocks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/containrrr/watchtower/pkg/session"
|
||||
wt "github.com/containrrr/watchtower/pkg/types"
|
||||
)
|
||||
|
||||
// CreateMockProgressReport creates a mock report from a given set of container states
|
||||
// All containers will be given a unique ID and name based on its state and index
|
||||
func CreateMockProgressReport(states ...session.State) wt.Report {
|
||||
|
||||
stateNums := make(map[session.State]int)
|
||||
progress := session.Progress{}
|
||||
failed := make(map[wt.ContainerID]error)
|
||||
|
||||
for _, state := range states {
|
||||
index := stateNums[state]
|
||||
|
||||
switch state {
|
||||
case session.SkippedState:
|
||||
c, _ := CreateContainerForProgress(index, 41, "skip%d")
|
||||
progress.AddSkipped(c, errors.New("unpossible"))
|
||||
break
|
||||
case session.FreshState:
|
||||
c, _ := CreateContainerForProgress(index, 31, "frsh%d")
|
||||
progress.AddScanned(c, c.ImageID())
|
||||
break
|
||||
case session.UpdatedState:
|
||||
c, newImage := CreateContainerForProgress(index, 11, "updt%d")
|
||||
progress.AddScanned(c, newImage)
|
||||
progress.MarkForUpdate(c.ID())
|
||||
break
|
||||
case session.FailedState:
|
||||
c, newImage := CreateContainerForProgress(index, 21, "fail%d")
|
||||
progress.AddScanned(c, newImage)
|
||||
failed[c.ID()] = errors.New("accidentally the whole container")
|
||||
}
|
||||
|
||||
stateNums[state] = index + 1
|
||||
}
|
||||
progress.UpdateFailed(failed)
|
||||
|
||||
return progress.Report()
|
||||
|
||||
}
|
|
@ -5,7 +5,7 @@ import (
|
|||
"github.com/containrrr/watchtower/internal/util"
|
||||
"github.com/containrrr/watchtower/pkg/container"
|
||||
"github.com/containrrr/watchtower/pkg/lifecycle"
|
||||
metrics2 "github.com/containrrr/watchtower/pkg/metrics"
|
||||
"github.com/containrrr/watchtower/pkg/session"
|
||||
"github.com/containrrr/watchtower/pkg/sorter"
|
||||
"github.com/containrrr/watchtower/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
@ -15,9 +15,9 @@ import (
|
|||
// used to start those containers have been updated. If a change is detected in
|
||||
// any of the images, the associated containers are stopped and restarted with
|
||||
// the new image.
|
||||
func Update(client container.Client, params types.UpdateParams) (*metrics2.Metric, error) {
|
||||
func Update(client container.Client, params types.UpdateParams) (types.Report, error) {
|
||||
log.Debug("Checking containers for updated images")
|
||||
metric := &metrics2.Metric{}
|
||||
progress := &session.Progress{}
|
||||
staleCount := 0
|
||||
|
||||
if params.LifecycleHooks {
|
||||
|
@ -32,7 +32,7 @@ func Update(client container.Client, params types.UpdateParams) (*metrics2.Metri
|
|||
staleCheckFailed := 0
|
||||
|
||||
for i, targetContainer := range containers {
|
||||
stale, err := client.IsContainerStale(targetContainer)
|
||||
stale, newestImage, err := client.IsContainerStale(targetContainer)
|
||||
shouldUpdate := stale && !params.NoRestart && !params.MonitorOnly && !targetContainer.IsMonitorOnly()
|
||||
if err == nil && shouldUpdate {
|
||||
// Check to make sure we have all the necessary information for recreating the container
|
||||
|
@ -52,7 +52,9 @@ func Update(client container.Client, params types.UpdateParams) (*metrics2.Metri
|
|||
log.Infof("Unable to update container %q: %v. Proceeding to next.", targetContainer.Name(), err)
|
||||
stale = false
|
||||
staleCheckFailed++
|
||||
metric.Failed++
|
||||
progress.AddSkipped(targetContainer, err)
|
||||
} else {
|
||||
progress.AddScanned(targetContainer, newestImage)
|
||||
}
|
||||
containers[i].Stale = stale
|
||||
|
||||
|
@ -62,8 +64,6 @@ func Update(client container.Client, params types.UpdateParams) (*metrics2.Metri
|
|||
}
|
||||
|
||||
containers, err = sorter.SortByDependencies(containers)
|
||||
|
||||
metric.Scanned = len(containers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -75,38 +75,38 @@ func Update(client container.Client, params types.UpdateParams) (*metrics2.Metri
|
|||
for _, c := range containers {
|
||||
if !c.IsMonitorOnly() {
|
||||
containersToUpdate = append(containersToUpdate, c)
|
||||
progress.MarkForUpdate(c.ID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if params.RollingRestart {
|
||||
metric.Failed += performRollingRestart(containersToUpdate, client, params)
|
||||
progress.UpdateFailed(performRollingRestart(containersToUpdate, client, params))
|
||||
} else {
|
||||
imageIDsOfStoppedContainers := make(map[string]bool)
|
||||
metric.Failed, imageIDsOfStoppedContainers = stopContainersInReversedOrder(containersToUpdate, client, params)
|
||||
metric.Failed += restartContainersInSortedOrder(containersToUpdate, client, params, imageIDsOfStoppedContainers)
|
||||
failedStop, stoppedImages := stopContainersInReversedOrder(containersToUpdate, client, params)
|
||||
progress.UpdateFailed(failedStop)
|
||||
failedStart := restartContainersInSortedOrder(containersToUpdate, client, params, stoppedImages)
|
||||
progress.UpdateFailed(failedStart)
|
||||
}
|
||||
|
||||
metric.Updated = staleCount - (metric.Failed - staleCheckFailed)
|
||||
|
||||
if params.LifecycleHooks {
|
||||
lifecycle.ExecutePostChecks(client, params)
|
||||
}
|
||||
return metric, nil
|
||||
return progress.Report(), nil
|
||||
}
|
||||
|
||||
func performRollingRestart(containers []container.Container, client container.Client, params types.UpdateParams) int {
|
||||
cleanupImageIDs := make(map[string]bool)
|
||||
failed := 0
|
||||
func performRollingRestart(containers []container.Container, client container.Client, params types.UpdateParams) map[types.ContainerID]error {
|
||||
cleanupImageIDs := make(map[types.ImageID]bool, len(containers))
|
||||
failed := make(map[types.ContainerID]error, len(containers))
|
||||
|
||||
for i := len(containers) - 1; i >= 0; i-- {
|
||||
if containers[i].ToRestart() {
|
||||
err := stopStaleContainer(containers[i], client, params)
|
||||
if err != nil {
|
||||
failed++
|
||||
failed[containers[i].ID()] = err
|
||||
} else {
|
||||
if err := restartStaleContainer(containers[i], client, params); err != nil {
|
||||
failed++
|
||||
failed[containers[i].ID()] = err
|
||||
}
|
||||
cleanupImageIDs[containers[i].ImageID()] = true
|
||||
}
|
||||
|
@ -119,18 +119,18 @@ func performRollingRestart(containers []container.Container, client container.Cl
|
|||
return failed
|
||||
}
|
||||
|
||||
func stopContainersInReversedOrder(containers []container.Container, client container.Client, params types.UpdateParams) (int, map[string]bool) {
|
||||
imageIDsOfStoppedContainers := make(map[string]bool)
|
||||
failed := 0
|
||||
func stopContainersInReversedOrder(containers []container.Container, client container.Client, params types.UpdateParams) (failed map[types.ContainerID]error, stopped map[types.ImageID]bool) {
|
||||
failed = make(map[types.ContainerID]error, len(containers))
|
||||
stopped = make(map[types.ImageID]bool, len(containers))
|
||||
for i := len(containers) - 1; i >= 0; i-- {
|
||||
if err := stopStaleContainer(containers[i], client, params); err != nil {
|
||||
failed++
|
||||
failed[containers[i].ID()] = err
|
||||
} else {
|
||||
imageIDsOfStoppedContainers[containers[i].ImageID()] = true
|
||||
stopped[containers[i].ImageID()] = true
|
||||
}
|
||||
|
||||
}
|
||||
return failed, imageIDsOfStoppedContainers
|
||||
return
|
||||
}
|
||||
|
||||
func stopStaleContainer(container container.Container, client container.Client, params types.UpdateParams) error {
|
||||
|
@ -143,15 +143,15 @@ func stopStaleContainer(container container.Container, client container.Client,
|
|||
return nil
|
||||
}
|
||||
if params.LifecycleHooks {
|
||||
SkipUpdate, err := lifecycle.ExecutePreUpdateCommand(client, container)
|
||||
skipUpdate, err := lifecycle.ExecutePreUpdateCommand(client, container)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
log.Info("Skipping container as the pre-update command failed")
|
||||
return err
|
||||
}
|
||||
if SkipUpdate {
|
||||
if skipUpdate {
|
||||
log.Debug("Skipping container as the pre-update command returned exit code 75 (EX_TEMPFAIL)")
|
||||
return errors.New("Skipping container as the pre-update command returned exit code 75 (EX_TEMPFAIL)")
|
||||
return errors.New("skipping container as the pre-update command returned exit code 75 (EX_TEMPFAIL)")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -162,31 +162,30 @@ func stopStaleContainer(container container.Container, client container.Client,
|
|||
return nil
|
||||
}
|
||||
|
||||
func restartContainersInSortedOrder(containers []container.Container, client container.Client, params types.UpdateParams, imageIDsOfStoppedContainers map[string]bool) int {
|
||||
imageIDs := make(map[string]bool)
|
||||
|
||||
failed := 0
|
||||
func restartContainersInSortedOrder(containers []container.Container, client container.Client, params types.UpdateParams, stoppedImages map[types.ImageID]bool) map[types.ContainerID]error {
|
||||
cleanupImageIDs := make(map[types.ImageID]bool, len(containers))
|
||||
failed := make(map[types.ContainerID]error, len(containers))
|
||||
|
||||
for _, c := range containers {
|
||||
if !c.ToRestart() {
|
||||
continue
|
||||
}
|
||||
if imageIDsOfStoppedContainers[c.ImageID()] {
|
||||
if stoppedImages[c.ImageID()] {
|
||||
if err := restartStaleContainer(c, client, params); err != nil {
|
||||
failed++
|
||||
failed[c.ID()] = err
|
||||
}
|
||||
imageIDs[c.ImageID()] = true
|
||||
cleanupImageIDs[c.ImageID()] = true
|
||||
}
|
||||
}
|
||||
|
||||
if params.Cleanup {
|
||||
cleanupImages(client, imageIDs)
|
||||
cleanupImages(client, cleanupImageIDs)
|
||||
}
|
||||
|
||||
return failed
|
||||
}
|
||||
|
||||
func cleanupImages(client container.Client, imageIDs map[string]bool) {
|
||||
func cleanupImages(client container.Client, imageIDs map[types.ImageID]bool) {
|
||||
for imageID := range imageIDs {
|
||||
if err := client.RemoveImageByID(imageID); err != nil {
|
||||
log.Error(err)
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"github.com/containrrr/watchtower/pkg/container"
|
||||
"github.com/containrrr/watchtower/pkg/container/mocks"
|
||||
"github.com/containrrr/watchtower/pkg/types"
|
||||
container2 "github.com/docker/docker/api/types/container"
|
||||
dockerContainer "github.com/docker/docker/api/types/container"
|
||||
cli "github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"time"
|
||||
|
@ -110,7 +110,7 @@ var _ = Describe("the update action", func() {
|
|||
false,
|
||||
false,
|
||||
time.Now(),
|
||||
&container2.Config{
|
||||
&dockerContainer.Config{
|
||||
Labels: map[string]string{
|
||||
"com.centurylinklabs.watchtower.monitor-only": "true",
|
||||
},
|
||||
|
@ -177,7 +177,7 @@ var _ = Describe("the update action", func() {
|
|||
true,
|
||||
false,
|
||||
time.Now(),
|
||||
&container2.Config{
|
||||
&dockerContainer.Config{
|
||||
Labels: map[string]string{
|
||||
"com.centurylinklabs.watchtower.lifecycle.pre-update-timeout": "190",
|
||||
"com.centurylinklabs.watchtower.lifecycle.pre-update": "/PreUpdateReturn1.sh",
|
||||
|
@ -213,7 +213,7 @@ var _ = Describe("the update action", func() {
|
|||
true,
|
||||
false,
|
||||
time.Now(),
|
||||
&container2.Config{
|
||||
&dockerContainer.Config{
|
||||
Labels: map[string]string{
|
||||
"com.centurylinklabs.watchtower.lifecycle.pre-update-timeout": "190",
|
||||
"com.centurylinklabs.watchtower.lifecycle.pre-update": "/PreUpdateReturn75.sh",
|
||||
|
@ -249,7 +249,7 @@ var _ = Describe("the update action", func() {
|
|||
true,
|
||||
false,
|
||||
time.Now(),
|
||||
&container2.Config{
|
||||
&dockerContainer.Config{
|
||||
Labels: map[string]string{
|
||||
"com.centurylinklabs.watchtower.lifecycle.pre-update-timeout": "190",
|
||||
"com.centurylinklabs.watchtower.lifecycle.pre-update": "/PreUpdateReturn0.sh",
|
||||
|
@ -284,7 +284,7 @@ var _ = Describe("the update action", func() {
|
|||
false,
|
||||
false,
|
||||
time.Now(),
|
||||
&container2.Config{
|
||||
&dockerContainer.Config{
|
||||
Labels: map[string]string{
|
||||
"com.centurylinklabs.watchtower.lifecycle.pre-update-timeout": "190",
|
||||
"com.centurylinklabs.watchtower.lifecycle.pre-update": "/PreUpdateReturn1.sh",
|
||||
|
@ -320,7 +320,7 @@ var _ = Describe("the update action", func() {
|
|||
false,
|
||||
true,
|
||||
time.Now(),
|
||||
&container2.Config{
|
||||
&dockerContainer.Config{
|
||||
Labels: map[string]string{
|
||||
"com.centurylinklabs.watchtower.lifecycle.pre-update-timeout": "190",
|
||||
"com.centurylinklabs.watchtower.lifecycle.pre-update": "/PreUpdateReturn1.sh",
|
||||
|
|
|
@ -179,9 +179,8 @@ func RegisterNotificationFlags(rootCmd *cobra.Command) {
|
|||
viper.GetStringSlice("WATCHTOWER_NOTIFICATIONS"),
|
||||
" notification types to send (valid: email, slack, msteams, gotify, shoutrrr)")
|
||||
|
||||
flags.StringP(
|
||||
flags.String(
|
||||
"notifications-level",
|
||||
"",
|
||||
viper.GetString("WATCHTOWER_NOTIFICATIONS_LEVEL"),
|
||||
"The log level used for sending notifications. Possible values: panic, fatal, error, warn, info or debug")
|
||||
|
||||
|
@ -307,18 +306,20 @@ Should only be used for testing.`)
|
|||
`Controls whether watchtower verifies the Gotify server's certificate chain and host name.
|
||||
Should only be used for testing.`)
|
||||
|
||||
flags.StringP(
|
||||
flags.String(
|
||||
"notification-template",
|
||||
"",
|
||||
viper.GetString("WATCHTOWER_NOTIFICATION_TEMPLATE"),
|
||||
"The shoutrrr text/template for the messages")
|
||||
|
||||
flags.StringArrayP(
|
||||
flags.StringArray(
|
||||
"notification-url",
|
||||
"",
|
||||
viper.GetStringSlice("WATCHTOWER_NOTIFICATION_URL"),
|
||||
"The shoutrrr URL to send notifications to")
|
||||
|
||||
flags.Bool("notification-report",
|
||||
viper.GetBool("WATCHTOWER_NOTIFICATION_REPORT"),
|
||||
"Use the session report as the notification template data")
|
||||
|
||||
flags.String(
|
||||
"warn-on-head-failure",
|
||||
viper.GetString("WATCHTOWER_WARN_ON_HEAD_FAILURE"),
|
||||
|
|
|
@ -26,13 +26,13 @@ const defaultStopSignal = "SIGTERM"
|
|||
// Docker API.
|
||||
type Client interface {
|
||||
ListContainers(t.Filter) ([]Container, error)
|
||||
GetContainer(containerID string) (Container, error)
|
||||
GetContainer(containerID t.ContainerID) (Container, error)
|
||||
StopContainer(Container, time.Duration) error
|
||||
StartContainer(Container) (string, error)
|
||||
StartContainer(Container) (t.ContainerID, error)
|
||||
RenameContainer(Container, string) error
|
||||
IsContainerStale(Container) (bool, error)
|
||||
ExecuteCommand(containerID string, command string, timeout int) (SkipUpdate bool, err error)
|
||||
RemoveImageByID(string) error
|
||||
IsContainerStale(Container) (stale bool, latestImage t.ImageID, err error)
|
||||
ExecuteCommand(containerID t.ContainerID, command string, timeout int) (SkipUpdate bool, err error)
|
||||
RemoveImageByID(t.ImageID) error
|
||||
WarnOnHeadPullFailed(container Container) bool
|
||||
}
|
||||
|
||||
|
@ -108,7 +108,7 @@ func (client dockerClient) ListContainers(fn t.Filter) ([]Container, error) {
|
|||
|
||||
for _, runningContainer := range containers {
|
||||
|
||||
c, err := client.GetContainer(runningContainer.ID)
|
||||
c, err := client.GetContainer(t.ContainerID(runningContainer.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -137,10 +137,10 @@ func (client dockerClient) createListFilter() filters.Args {
|
|||
return filterArgs
|
||||
}
|
||||
|
||||
func (client dockerClient) GetContainer(containerID string) (Container, error) {
|
||||
func (client dockerClient) GetContainer(containerID t.ContainerID) (Container, error) {
|
||||
bg := context.Background()
|
||||
|
||||
containerInfo, err := client.api.ContainerInspect(bg, containerID)
|
||||
containerInfo, err := client.api.ContainerInspect(bg, string(containerID))
|
||||
if err != nil {
|
||||
return Container{}, err
|
||||
}
|
||||
|
@ -161,11 +161,12 @@ func (client dockerClient) StopContainer(c Container, timeout time.Duration) err
|
|||
signal = defaultStopSignal
|
||||
}
|
||||
|
||||
shortID := ShortID(c.ID())
|
||||
idStr := string(c.ID())
|
||||
shortID := c.ID().ShortID()
|
||||
|
||||
if c.IsRunning() {
|
||||
log.Infof("Stopping %s (%s) with %s", c.Name(), shortID, signal)
|
||||
if err := client.api.ContainerKill(bg, c.ID(), signal); err != nil {
|
||||
if err := client.api.ContainerKill(bg, idStr, signal); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -178,7 +179,7 @@ func (client dockerClient) StopContainer(c Container, timeout time.Duration) err
|
|||
} else {
|
||||
log.Debugf("Removing container %s", shortID)
|
||||
|
||||
if err := client.api.ContainerRemove(bg, c.ID(), types.ContainerRemoveOptions{Force: true, RemoveVolumes: client.removeVolumes}); err != nil {
|
||||
if err := client.api.ContainerRemove(bg, idStr, types.ContainerRemoveOptions{Force: true, RemoveVolumes: client.removeVolumes}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -191,7 +192,7 @@ func (client dockerClient) StopContainer(c Container, timeout time.Duration) err
|
|||
return nil
|
||||
}
|
||||
|
||||
func (client dockerClient) StartContainer(c Container) (string, error) {
|
||||
func (client dockerClient) StartContainer(c Container) (t.ContainerID, error) {
|
||||
bg := context.Background()
|
||||
config := c.runtimeConfig()
|
||||
hostConfig := c.hostConfig()
|
||||
|
@ -234,18 +235,19 @@ func (client dockerClient) StartContainer(c Container) (string, error) {
|
|||
|
||||
}
|
||||
|
||||
createdContainerID := t.ContainerID(createdContainer.ID)
|
||||
if !c.IsRunning() && !client.reviveStopped {
|
||||
return createdContainer.ID, nil
|
||||
return createdContainerID, nil
|
||||
}
|
||||
|
||||
return createdContainer.ID, client.doStartContainer(bg, c, createdContainer)
|
||||
return createdContainerID, client.doStartContainer(bg, c, createdContainer)
|
||||
|
||||
}
|
||||
|
||||
func (client dockerClient) doStartContainer(bg context.Context, c Container, creation container.ContainerCreateCreatedBody) error {
|
||||
name := c.Name()
|
||||
|
||||
log.Debugf("Starting container %s (%s)", name, ShortID(creation.ID))
|
||||
log.Debugf("Starting container %s (%s)", name, t.ContainerID(creation.ID).ShortID())
|
||||
err := client.api.ContainerStart(bg, creation.ID, types.ContainerStartOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -255,38 +257,39 @@ func (client dockerClient) doStartContainer(bg context.Context, c Container, cre
|
|||
|
||||
func (client dockerClient) RenameContainer(c Container, newName string) error {
|
||||
bg := context.Background()
|
||||
log.Debugf("Renaming container %s (%s) to %s", c.Name(), ShortID(c.ID()), newName)
|
||||
return client.api.ContainerRename(bg, c.ID(), newName)
|
||||
log.Debugf("Renaming container %s (%s) to %s", c.Name(), c.ID().ShortID(), newName)
|
||||
return client.api.ContainerRename(bg, string(c.ID()), newName)
|
||||
}
|
||||
|
||||
func (client dockerClient) IsContainerStale(container Container) (bool, error) {
|
||||
func (client dockerClient) IsContainerStale(container Container) (stale bool, latestImage t.ImageID, err error) {
|
||||
ctx := context.Background()
|
||||
|
||||
if !client.pullImages {
|
||||
log.Debugf("Skipping image pull.")
|
||||
} else if err := client.PullImage(ctx, container); err != nil {
|
||||
return false, err
|
||||
return false, container.SafeImageID(), err
|
||||
}
|
||||
|
||||
return client.HasNewImage(ctx, container)
|
||||
}
|
||||
|
||||
func (client dockerClient) HasNewImage(ctx context.Context, container Container) (bool, error) {
|
||||
oldImageID := container.containerInfo.ContainerJSONBase.Image
|
||||
func (client dockerClient) HasNewImage(ctx context.Context, container Container) (hasNew bool, latestImage t.ImageID, err error) {
|
||||
currentImageID := t.ImageID(container.containerInfo.ContainerJSONBase.Image)
|
||||
imageName := container.ImageName()
|
||||
|
||||
newImageInfo, _, err := client.api.ImageInspectWithRaw(ctx, imageName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, currentImageID, err
|
||||
}
|
||||
|
||||
if newImageInfo.ID == oldImageID {
|
||||
newImageID := t.ImageID(newImageInfo.ID)
|
||||
if newImageID == currentImageID {
|
||||
log.Debugf("No new images found for %s", container.Name())
|
||||
return false, nil
|
||||
return false, currentImageID, nil
|
||||
}
|
||||
|
||||
log.Infof("Found new %s image (%s)", imageName, ShortID(newImageInfo.ID))
|
||||
return true, nil
|
||||
log.Infof("Found new %s image (%s)", imageName, newImageID.ShortID())
|
||||
return true, newImageID, nil
|
||||
}
|
||||
|
||||
// PullImage pulls the latest image for the supplied container, optionally skipping if it's digest can be confirmed
|
||||
|
@ -343,12 +346,12 @@ func (client dockerClient) PullImage(ctx context.Context, container Container) e
|
|||
return nil
|
||||
}
|
||||
|
||||
func (client dockerClient) RemoveImageByID(id string) error {
|
||||
log.Infof("Removing image %s", ShortID(id))
|
||||
func (client dockerClient) RemoveImageByID(id t.ImageID) error {
|
||||
log.Infof("Removing image %s", id.ShortID())
|
||||
|
||||
_, err := client.api.ImageRemove(
|
||||
context.Background(),
|
||||
id,
|
||||
string(id),
|
||||
types.ImageRemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
|
@ -356,7 +359,7 @@ func (client dockerClient) RemoveImageByID(id string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (client dockerClient) ExecuteCommand(containerID string, command string, timeout int) (SkipUpdate bool, err error) {
|
||||
func (client dockerClient) ExecuteCommand(containerID t.ContainerID, command string, timeout int) (SkipUpdate bool, err error) {
|
||||
bg := context.Background()
|
||||
|
||||
// Create the exec
|
||||
|
@ -366,7 +369,7 @@ func (client dockerClient) ExecuteCommand(containerID string, command string, ti
|
|||
Cmd: []string{"sh", "-c", command},
|
||||
}
|
||||
|
||||
exec, err := client.api.ContainerExecCreate(bg, containerID, execConfig)
|
||||
exec, err := client.api.ContainerExecCreate(bg, string(containerID), execConfig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -462,7 +465,7 @@ func (client dockerClient) waitForStopOrTimeout(c Container, waitTime time.Durat
|
|||
case <-timeout:
|
||||
return nil
|
||||
default:
|
||||
if ci, err := client.api.ContainerInspect(bg, c.ID()); err != nil {
|
||||
if ci, err := client.api.ContainerInspect(bg, string(c.ID())); err != nil {
|
||||
return err
|
||||
} else if !ci.State.Running {
|
||||
return nil
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/containrrr/watchtower/internal/util"
|
||||
wt "github.com/containrrr/watchtower/pkg/types"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
|
@ -35,8 +36,8 @@ func (c Container) ContainerInfo() *types.ContainerJSON {
|
|||
}
|
||||
|
||||
// ID returns the Docker container ID.
|
||||
func (c Container) ID() string {
|
||||
return c.containerInfo.ID
|
||||
func (c Container) ID() wt.ContainerID {
|
||||
return wt.ContainerID(c.containerInfo.ID)
|
||||
}
|
||||
|
||||
// IsRunning returns a boolean flag indicating whether or not the current
|
||||
|
@ -59,9 +60,18 @@ func (c Container) Name() string {
|
|||
}
|
||||
|
||||
// ImageID returns the ID of the Docker image that was used to start the
|
||||
// container.
|
||||
func (c Container) ImageID() string {
|
||||
return c.imageInfo.ID
|
||||
// container. May cause nil dereference if imageInfo is not set!
|
||||
func (c Container) ImageID() wt.ImageID {
|
||||
return wt.ImageID(c.imageInfo.ID)
|
||||
}
|
||||
|
||||
// SafeImageID returns the ID of the Docker image that was used to start the container if available,
|
||||
// otherwise returns an empty string
|
||||
func (c Container) SafeImageID() wt.ImageID {
|
||||
if c.imageInfo == nil {
|
||||
return ""
|
||||
}
|
||||
return wt.ImageID(c.imageInfo.ID)
|
||||
}
|
||||
|
||||
// ImageName returns the name of the Docker image that was used to start the
|
||||
|
|
|
@ -204,8 +204,8 @@ var _ = Describe("the container", func() {
|
|||
It("should return its ID on calls to .ID()", func() {
|
||||
id := c.ID()
|
||||
|
||||
Expect(id).To(Equal("container_id"))
|
||||
Expect(id).NotTo(Equal("wrong-id"))
|
||||
Expect(id).To(BeEquivalentTo("container_id"))
|
||||
Expect(id).NotTo(BeEquivalentTo("wrong-id"))
|
||||
})
|
||||
It("should return true, true if enabled on calls to .Enabled()", func() {
|
||||
enabled, exists := c.Enabled()
|
||||
|
|
|
@ -25,13 +25,13 @@ func NewMockAPIServer() *httptest.Server {
|
|||
|
||||
Filters := r.URL.Query().Get("filters")
|
||||
var result map[string]interface{}
|
||||
json.Unmarshal([]byte(Filters), &result)
|
||||
_ = json.Unmarshal([]byte(Filters), &result)
|
||||
status := result["status"].(map[string]interface{})
|
||||
|
||||
response = getMockJSONFromDisk("./mocks/data/containers.json")
|
||||
var x2 []types.Container
|
||||
var containers []types.Container
|
||||
json.Unmarshal([]byte(response), &containers)
|
||||
_ = json.Unmarshal([]byte(response), &containers)
|
||||
for _, v := range containers {
|
||||
for key := range status {
|
||||
if v.State == key {
|
||||
|
@ -56,7 +56,7 @@ func NewMockAPIServer() *httptest.Server {
|
|||
} else if isRequestFor("sha256:4dbc5f9c07028a985e14d1393e849ea07f68804c4293050d5a641b138db72daa", r) {
|
||||
response = getMockJSONFromDisk("./mocks/data/image02.json")
|
||||
}
|
||||
fmt.Fprintln(w, response)
|
||||
_, _ = fmt.Fprintln(w, response)
|
||||
},
|
||||
))
|
||||
}
|
||||
|
@ -67,10 +67,9 @@ func isRequestFor(urlPart string, r *http.Request) bool {
|
|||
|
||||
func getMockJSONFromDisk(relPath string) string {
|
||||
absPath, _ := filepath.Abs(relPath)
|
||||
logrus.Error(absPath)
|
||||
buf, err := ioutil.ReadFile(absPath)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
logrus.WithError(err).WithField("file", absPath).Error(err)
|
||||
return ""
|
||||
}
|
||||
return string(buf)
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
package container
|
||||
|
||||
import "strings"
|
||||
|
||||
// ShortID returns the 12-character (hex) short version of an image ID hash, removing any "sha256:" prefix if present
|
||||
func ShortID(imageID string) (short string) {
|
||||
prefixSep := strings.IndexRune(imageID, ':')
|
||||
offset := 0
|
||||
length := 12
|
||||
if prefixSep >= 0 {
|
||||
if imageID[0:prefixSep] == "sha256" {
|
||||
offset = prefixSep + 1
|
||||
} else {
|
||||
length += prefixSep + 1
|
||||
}
|
||||
}
|
||||
|
||||
if len(imageID) >= offset+length {
|
||||
return imageID[offset : offset+length]
|
||||
}
|
||||
|
||||
return imageID
|
||||
}
|
|
@ -1,10 +1,9 @@
|
|||
package container_test
|
||||
|
||||
import (
|
||||
wt "github.com/containrrr/watchtower/pkg/types"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
. "github.com/containrrr/watchtower/pkg/container"
|
||||
)
|
||||
|
||||
var _ = Describe("container utils", func() {
|
||||
|
@ -12,13 +11,13 @@ var _ = Describe("container utils", func() {
|
|||
When("given a normal image ID", func() {
|
||||
When("it contains a sha256 prefix", func() {
|
||||
It("should return that ID in short version", func() {
|
||||
actual := ShortID("sha256:0123456789abcd00000000001111111111222222222233333333334444444444")
|
||||
actual := shortID("sha256:0123456789abcd00000000001111111111222222222233333333334444444444")
|
||||
Expect(actual).To(Equal("0123456789ab"))
|
||||
})
|
||||
})
|
||||
When("it doesn't contain a prefix", func() {
|
||||
It("should return that ID in short version", func() {
|
||||
actual := ShortID("0123456789abcd00000000001111111111222222222233333333334444444444")
|
||||
actual := shortID("0123456789abcd00000000001111111111222222222233333333334444444444")
|
||||
Expect(actual).To(Equal("0123456789ab"))
|
||||
})
|
||||
})
|
||||
|
@ -26,21 +25,26 @@ var _ = Describe("container utils", func() {
|
|||
When("given a short image ID", func() {
|
||||
When("it contains no prefix", func() {
|
||||
It("should return the same string", func() {
|
||||
Expect(ShortID("0123456789ab")).To(Equal("0123456789ab"))
|
||||
Expect(shortID("0123456789ab")).To(Equal("0123456789ab"))
|
||||
})
|
||||
})
|
||||
When("it contains a the sha256 prefix", func() {
|
||||
It("should return the ID without the prefix", func() {
|
||||
Expect(ShortID("sha256:0123456789ab")).To(Equal("0123456789ab"))
|
||||
Expect(shortID("sha256:0123456789ab")).To(Equal("0123456789ab"))
|
||||
})
|
||||
})
|
||||
})
|
||||
When("given an ID with an unknown prefix", func() {
|
||||
It("should return a short version of that ID including the prefix", func() {
|
||||
Expect(ShortID("md5:0123456789ab")).To(Equal("md5:0123456789ab"))
|
||||
Expect(ShortID("md5:0123456789abcdefg")).To(Equal("md5:0123456789ab"))
|
||||
Expect(ShortID("md5:01")).To(Equal("md5:01"))
|
||||
Expect(shortID("md5:0123456789ab")).To(Equal("md5:0123456789ab"))
|
||||
Expect(shortID("md5:0123456789abcdefg")).To(Equal("md5:0123456789ab"))
|
||||
Expect(shortID("md5:01")).To(Equal("md5:01"))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func shortID(id string) string {
|
||||
// Proxy to the types implementation, relocated due to package dependency resolution
|
||||
return wt.ImageID(id).ShortID()
|
||||
}
|
||||
|
|
|
@ -12,8 +12,8 @@ func ExecutePreChecks(client container.Client, params types.UpdateParams) {
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, container := range containers {
|
||||
ExecutePreCheckCommand(client, container)
|
||||
for _, currentContainer := range containers {
|
||||
ExecutePreCheckCommand(client, currentContainer)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,8 +23,8 @@ func ExecutePostChecks(client container.Client, params types.UpdateParams) {
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, container := range containers {
|
||||
ExecutePostCheckCommand(client, container)
|
||||
for _, currentContainer := range containers {
|
||||
ExecutePostCheckCommand(client, currentContainer)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -37,8 +37,8 @@ func ExecutePreCheckCommand(client container.Client, container container.Contain
|
|||
}
|
||||
|
||||
log.Debug("Executing pre-check command.")
|
||||
_,err := client.ExecuteCommand(container.ID(), command, 1);
|
||||
if err != nil {
|
||||
_, err := client.ExecuteCommand(container.ID(), command, 1)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
@ -52,24 +52,24 @@ func ExecutePostCheckCommand(client container.Client, container container.Contai
|
|||
}
|
||||
|
||||
log.Debug("Executing post-check command.")
|
||||
_,err := client.ExecuteCommand(container.ID(), command, 1);
|
||||
_, err := client.ExecuteCommand(container.ID(), command, 1)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// ExecutePreUpdateCommand tries to run the pre-update lifecycle hook for a single container.
|
||||
func ExecutePreUpdateCommand(client container.Client, container container.Container) (SkipUpdate bool,err error) {
|
||||
func ExecutePreUpdateCommand(client container.Client, container container.Container) (SkipUpdate bool, err error) {
|
||||
timeout := container.PreUpdateTimeout()
|
||||
command := container.GetLifecyclePreUpdateCommand()
|
||||
if len(command) == 0 {
|
||||
log.Debug("No pre-update command supplied. Skipping")
|
||||
return false,nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !container.IsRunning() || container.IsRestarting() {
|
||||
log.Debug("Container is not running. Skipping pre-update command.")
|
||||
return false,nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Debug("Executing pre-update command.")
|
||||
|
@ -77,7 +77,7 @@ func ExecutePreUpdateCommand(client container.Client, container container.Contai
|
|||
}
|
||||
|
||||
// ExecutePostUpdateCommand tries to run the post-update lifecycle hook for a single container.
|
||||
func ExecutePostUpdateCommand(client container.Client, newContainerID string) {
|
||||
func ExecutePostUpdateCommand(client container.Client, newContainerID types.ContainerID) {
|
||||
newContainer, err := client.GetContainer(newContainerID)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -91,9 +91,9 @@ func ExecutePostUpdateCommand(client container.Client, newContainerID string) {
|
|||
}
|
||||
|
||||
log.Debug("Executing post-update command.")
|
||||
_,err = client.ExecuteCommand(newContainerID, command, 1);
|
||||
_, err = client.ExecuteCommand(newContainerID, command, 1)
|
||||
|
||||
if err != nil {
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/containrrr/watchtower/pkg/types"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
@ -24,6 +25,16 @@ type Metrics struct {
|
|||
skipped prometheus.Counter
|
||||
}
|
||||
|
||||
// NewMetric returns a Metric with the counts taken from the appropriate types.Report fields
|
||||
func NewMetric(report types.Report) *Metric {
|
||||
return &Metric{
|
||||
Scanned: len(report.Scanned()),
|
||||
// Note: This is for backwards compatibility. ideally, stale containers should be counted separately
|
||||
Updated: len(report.Updated()) + len(report.Stale()),
|
||||
Failed: len(report.Failed()),
|
||||
}
|
||||
}
|
||||
|
||||
// QueueIsEmpty checks whether any messages are enqueued in the channel
|
||||
func (metrics *Metrics) QueueIsEmpty() bool {
|
||||
return len(metrics.channel) == 0
|
||||
|
|
|
@ -25,11 +25,6 @@ type emailTypeNotifier struct {
|
|||
delay time.Duration
|
||||
}
|
||||
|
||||
// NewEmailNotifier is a factory method creating a new email notifier instance
|
||||
func NewEmailNotifier(c *cobra.Command, acceptedLogLevels []log.Level) t.ConvertibleNotifier {
|
||||
return newEmailNotifier(c, acceptedLogLevels)
|
||||
}
|
||||
|
||||
func newEmailNotifier(c *cobra.Command, acceptedLogLevels []log.Level) t.ConvertibleNotifier {
|
||||
flags := c.PersistentFlags()
|
||||
|
||||
|
|
|
@ -22,11 +22,6 @@ type gotifyTypeNotifier struct {
|
|||
logLevels []log.Level
|
||||
}
|
||||
|
||||
// NewGotifyNotifier is a factory method creating a new gotify notifier instance
|
||||
func NewGotifyNotifier(c *cobra.Command, levels []log.Level) t.ConvertibleNotifier {
|
||||
return newGotifyNotifier(c, levels)
|
||||
}
|
||||
|
||||
func newGotifyNotifier(c *cobra.Command, levels []log.Level) t.ConvertibleNotifier {
|
||||
flags := c.PersistentFlags()
|
||||
|
||||
|
|
|
@ -18,11 +18,6 @@ type msTeamsTypeNotifier struct {
|
|||
data bool
|
||||
}
|
||||
|
||||
// NewMsTeamsNotifier is a factory method creating a new teams notifier instance
|
||||
func NewMsTeamsNotifier(cmd *cobra.Command, acceptedLogLevels []log.Level) t.ConvertibleNotifier {
|
||||
return newMsTeamsNotifier(cmd, acceptedLogLevels)
|
||||
}
|
||||
|
||||
func newMsTeamsNotifier(cmd *cobra.Command, acceptedLogLevels []log.Level) t.ConvertibleNotifier {
|
||||
|
||||
flags := cmd.PersistentFlags()
|
||||
|
|
13
pkg/notifications/notifications_suite_test.go
Normal file
13
pkg/notifications/notifications_suite_test.go
Normal file
|
@ -0,0 +1,13 @@
|
|||
package notifications_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestNotifications(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Notifications Suite")
|
||||
}
|
|
@ -6,18 +6,10 @@ import (
|
|||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Notifier can send log output as notification to admins, with optional batching.
|
||||
type Notifier struct {
|
||||
types []ty.Notifier
|
||||
}
|
||||
|
||||
// NewNotifier creates and returns a new Notifier, using global configuration.
|
||||
func NewNotifier(c *cobra.Command) *Notifier {
|
||||
n := &Notifier{}
|
||||
|
||||
func NewNotifier(c *cobra.Command) ty.Notifier {
|
||||
f := c.PersistentFlags()
|
||||
|
||||
level, _ := f.GetString("notifications-level")
|
||||
|
@ -32,54 +24,26 @@ func NewNotifier(c *cobra.Command) *Notifier {
|
|||
log.Fatalf("Unsupported notification log level provided: %s", level)
|
||||
}
|
||||
|
||||
reportTemplate, _ := f.GetBool("notification-report")
|
||||
tplString, _ := f.GetString("notification-template")
|
||||
urls, _ := f.GetStringArray("notification-url")
|
||||
|
||||
urls = AppendLegacyUrls(urls, c)
|
||||
|
||||
return newShoutrrrNotifier(tplString, acceptedLogLevels, !reportTemplate, urls...)
|
||||
}
|
||||
|
||||
// AppendLegacyUrls creates shoutrrr equivalent URLs from legacy notification flags
|
||||
func AppendLegacyUrls(urls []string, cmd *cobra.Command) []string {
|
||||
|
||||
// Parse types and create notifiers.
|
||||
types, err := f.GetStringSlice("notifications")
|
||||
types, err := cmd.Flags().GetStringSlice("notifications")
|
||||
if err != nil {
|
||||
log.WithField("could not read notifications argument", log.Fields{"Error": err}).Fatal()
|
||||
log.WithError(err).Fatal("could not read notifications argument")
|
||||
}
|
||||
|
||||
n.types = n.getNotificationTypes(c, acceptedLogLevels, types)
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func (n *Notifier) String() string {
|
||||
if len(n.types) < 1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
sb := strings.Builder{}
|
||||
for _, notif := range n.types {
|
||||
for _, name := range notif.GetNames() {
|
||||
sb.WriteString(name)
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
}
|
||||
|
||||
if sb.Len() < 2 {
|
||||
// No notification services are configured, return early as the separator strip is not applicable
|
||||
return "none"
|
||||
}
|
||||
|
||||
names := sb.String()
|
||||
|
||||
// remove the last separator
|
||||
names = names[:len(names)-2]
|
||||
|
||||
return names
|
||||
}
|
||||
|
||||
// getNotificationTypes produces an array of notifiers from a list of types
|
||||
func (n *Notifier) getNotificationTypes(cmd *cobra.Command, levels []log.Level, types []string) []ty.Notifier {
|
||||
output := make([]ty.Notifier, 0)
|
||||
|
||||
for _, t := range types {
|
||||
|
||||
if t == shoutrrrType {
|
||||
output = append(output, newShoutrrrNotifier(cmd, levels))
|
||||
continue
|
||||
}
|
||||
|
||||
var legacyNotifier ty.ConvertibleNotifier
|
||||
var err error
|
||||
|
||||
|
@ -89,9 +53,11 @@ func (n *Notifier) getNotificationTypes(cmd *cobra.Command, levels []log.Level,
|
|||
case slackType:
|
||||
legacyNotifier = newSlackNotifier(cmd, []log.Level{})
|
||||
case msTeamsType:
|
||||
legacyNotifier = newMsTeamsNotifier(cmd, levels)
|
||||
legacyNotifier = newMsTeamsNotifier(cmd, []log.Level{})
|
||||
case gotifyType:
|
||||
legacyNotifier = newGotifyNotifier(cmd, []log.Level{})
|
||||
case shoutrrrType:
|
||||
continue
|
||||
default:
|
||||
log.Fatalf("Unknown notification type %q", t)
|
||||
// Not really needed, used for nil checking static analysis
|
||||
|
@ -102,40 +68,11 @@ func (n *Notifier) getNotificationTypes(cmd *cobra.Command, levels []log.Level,
|
|||
if err != nil {
|
||||
log.Fatal("failed to create notification config:", err)
|
||||
}
|
||||
urls = append(urls, shoutrrrURL)
|
||||
|
||||
log.WithField("URL", shoutrrrURL).Trace("created Shoutrrr URL from legacy notifier")
|
||||
|
||||
notifier := newShoutrrrNotifierFromURL(
|
||||
cmd,
|
||||
shoutrrrURL,
|
||||
levels,
|
||||
)
|
||||
|
||||
output = append(output, notifier)
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// StartNotification starts a log batch. Notifications will be accumulated after this point and only sent when SendNotification() is called.
|
||||
func (n *Notifier) StartNotification() {
|
||||
for _, t := range n.types {
|
||||
t.StartNotification()
|
||||
}
|
||||
}
|
||||
|
||||
// SendNotification sends any notifications accumulated since StartNotification() was called.
|
||||
func (n *Notifier) SendNotification() {
|
||||
for _, t := range n.types {
|
||||
t.SendNotification()
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes all notifiers.
|
||||
func (n *Notifier) Close() {
|
||||
for _, t := range n.types {
|
||||
t.Close()
|
||||
}
|
||||
return urls
|
||||
}
|
||||
|
||||
// GetTitle returns a common notification title with hostname appended
|
||||
|
|
|
@ -4,24 +4,14 @@ import (
|
|||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/containrrr/watchtower/cmd"
|
||||
"github.com/containrrr/watchtower/internal/flags"
|
||||
"github.com/containrrr/watchtower/pkg/notifications"
|
||||
"github.com/containrrr/watchtower/pkg/types"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func TestActions(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Notifier Suite")
|
||||
}
|
||||
|
||||
var _ = Describe("notifications", func() {
|
||||
Describe("the notifier", func() {
|
||||
When("only empty notifier types are provided", func() {
|
||||
|
@ -36,11 +26,11 @@ var _ = Describe("notifications", func() {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
notif := notifications.NewNotifier(command)
|
||||
|
||||
Expect(notif.String()).To(Equal("none"))
|
||||
Expect(notif.GetNames()).To(BeEmpty())
|
||||
})
|
||||
})
|
||||
Describe("the slack notifier", func() {
|
||||
builderFn := notifications.NewSlackNotifier
|
||||
// builderFn := notifications.NewSlackNotifier
|
||||
|
||||
When("passing a discord url to the slack notifier", func() {
|
||||
command := cmd.NewRootCommand()
|
||||
|
@ -62,11 +52,11 @@ var _ = Describe("notifications", func() {
|
|||
|
||||
It("should return a discord url when using a hook url with the domain discord.com", func() {
|
||||
hookURL := fmt.Sprintf("https://%s/api/webhooks/%s/%s/slack", "discord.com", channel, token)
|
||||
testURL(builderFn, buildArgs(hookURL), expected)
|
||||
testURL(buildArgs(hookURL), expected)
|
||||
})
|
||||
It("should return a discord url when using a hook url with the domain discordapp.com", func() {
|
||||
hookURL := fmt.Sprintf("https://%s/api/webhooks/%s/%s/slack", "discordapp.com", channel, token)
|
||||
testURL(builderFn, buildArgs(hookURL), expected)
|
||||
testURL(buildArgs(hookURL), expected)
|
||||
})
|
||||
})
|
||||
When("converting a slack service config into a shoutrrr url", func() {
|
||||
|
@ -86,21 +76,21 @@ var _ = Describe("notifications", func() {
|
|||
expectedOutput := fmt.Sprintf("slack://%s@%s/%s/%s?color=%s&title=%s", username, tokenA, tokenB, tokenC, color, title)
|
||||
|
||||
args := []string{
|
||||
"--notifications",
|
||||
"slack",
|
||||
"--notification-slack-hook-url",
|
||||
hookURL,
|
||||
"--notification-slack-identifier",
|
||||
username,
|
||||
}
|
||||
|
||||
testURL(builderFn, args, expectedOutput)
|
||||
testURL(args, expectedOutput)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("the gotify notifier", func() {
|
||||
When("converting a gotify service config into a shoutrrr url", func() {
|
||||
builderFn := notifications.NewGotifyNotifier
|
||||
|
||||
It("should return the expected URL", func() {
|
||||
command := cmd.NewRootCommand()
|
||||
flags.RegisterNotificationFlags(command)
|
||||
|
@ -112,21 +102,21 @@ var _ = Describe("notifications", func() {
|
|||
expectedOutput := fmt.Sprintf("gotify://%s/%s?title=%s", host, token, title)
|
||||
|
||||
args := []string{
|
||||
"--notifications",
|
||||
"gotify",
|
||||
"--notification-gotify-url",
|
||||
fmt.Sprintf("https://%s", host),
|
||||
"--notification-gotify-token",
|
||||
token,
|
||||
}
|
||||
|
||||
testURL(builderFn, args, expectedOutput)
|
||||
testURL(args, expectedOutput)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("the teams notifier", func() {
|
||||
When("converting a teams service config into a shoutrrr url", func() {
|
||||
builderFn := notifications.NewMsTeamsNotifier
|
||||
|
||||
It("should return the expected URL", func() {
|
||||
command := cmd.NewRootCommand()
|
||||
flags.RegisterNotificationFlags(command)
|
||||
|
@ -141,24 +131,25 @@ var _ = Describe("notifications", func() {
|
|||
expectedOutput := fmt.Sprintf("teams://%s/%s/%s?color=%s&title=%s", tokenA, tokenB, tokenC, color, title)
|
||||
|
||||
args := []string{
|
||||
"--notifications",
|
||||
"msteams",
|
||||
"--notification-msteams-hook",
|
||||
hookURL,
|
||||
}
|
||||
|
||||
testURL(builderFn, args, expectedOutput)
|
||||
testURL(args, expectedOutput)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("the email notifier", func() {
|
||||
|
||||
builderFn := notifications.NewEmailNotifier
|
||||
|
||||
When("converting an email service config into a shoutrrr url", func() {
|
||||
It("should set the from address in the URL", func() {
|
||||
fromAddress := "lala@example.com"
|
||||
expectedOutput := buildExpectedURL("containrrrbot", "secret-password", "mail.containrrr.dev", 25, fromAddress, "mail@example.com", "Plain")
|
||||
args := []string{
|
||||
"--notifications",
|
||||
"email",
|
||||
"--notification-email-from",
|
||||
fromAddress,
|
||||
"--notification-email-to",
|
||||
|
@ -170,7 +161,7 @@ var _ = Describe("notifications", func() {
|
|||
"--notification-email-server",
|
||||
"mail.containrrr.dev",
|
||||
}
|
||||
testURL(builderFn, args, expectedOutput)
|
||||
testURL(args, expectedOutput)
|
||||
})
|
||||
|
||||
It("should return the expected URL", func() {
|
||||
|
@ -180,6 +171,8 @@ var _ = Describe("notifications", func() {
|
|||
expectedOutput := buildExpectedURL("containrrrbot", "secret-password", "mail.containrrr.dev", 25, fromAddress, toAddress, "Plain")
|
||||
|
||||
args := []string{
|
||||
"--notifications",
|
||||
"email",
|
||||
"--notification-email-from",
|
||||
fromAddress,
|
||||
"--notification-email-to",
|
||||
|
@ -192,7 +185,7 @@ var _ = Describe("notifications", func() {
|
|||
"mail.containrrr.dev",
|
||||
}
|
||||
|
||||
testURL(builderFn, args, expectedOutput)
|
||||
testURL(args, expectedOutput)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -214,9 +207,7 @@ func buildExpectedURL(username string, password string, host string, port int, f
|
|||
url.QueryEscape(to))
|
||||
}
|
||||
|
||||
type builderFn = func(c *cobra.Command, acceptedLogLevels []log.Level) types.ConvertibleNotifier
|
||||
|
||||
func testURL(builder builderFn, args []string, expectedURL string) {
|
||||
func testURL(args []string, expectedURL string) {
|
||||
|
||||
command := cmd.NewRootCommand()
|
||||
flags.RegisterNotificationFlags(command)
|
||||
|
@ -224,10 +215,9 @@ func testURL(builder builderFn, args []string, expectedURL string) {
|
|||
err := command.ParseFlags(args)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
notifier := builder(command, []log.Level{})
|
||||
actualURL, err := notifier.GetURL(command)
|
||||
urls := notifications.AppendLegacyUrls([]string{}, command)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Expect(actualURL).To(Equal(expectedURL))
|
||||
Expect(urls).To(ContainElement(expectedURL))
|
||||
}
|
||||
|
|
|
@ -11,12 +11,26 @@ import (
|
|||
"github.com/containrrr/shoutrrr/pkg/types"
|
||||
t "github.com/containrrr/watchtower/pkg/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
shoutrrrDefaultTemplate = "{{range .}}{{.Message}}{{println}}{{end}}"
|
||||
shoutrrrType = "shoutrrr"
|
||||
shoutrrrDefaultLegacyTemplate = "{{range .}}{{.Message}}{{println}}{{end}}"
|
||||
shoutrrrDefaultTemplate = `{{- with .Report -}}
|
||||
{{len .Scanned}} Scanned, {{len .Updated}} Updated, {{len .Failed}} Failed
|
||||
{{range .Updated -}}
|
||||
- {{.Name}} ({{.ImageName}}): {{.CurrentImageID.ShortID}} updated to {{.LatestImageID.ShortID}}
|
||||
{{end -}}
|
||||
{{range .Fresh -}}
|
||||
- {{.Name}} ({{.ImageName}}): {{.State}}
|
||||
{{end -}}
|
||||
{{range .Skipped -}}
|
||||
- {{.Name}} ({{.ImageName}}): {{.State}}: {{.Error}}
|
||||
{{end -}}
|
||||
{{range .Failed -}}
|
||||
- {{.Name}} ({{.ImageName}}): {{.State}}: {{.Error}}
|
||||
{{end -}}
|
||||
{{end -}}`
|
||||
shoutrrrType = "shoutrrr"
|
||||
)
|
||||
|
||||
type router interface {
|
||||
|
@ -25,41 +39,49 @@ type router interface {
|
|||
|
||||
// Implements Notifier, logrus.Hook
|
||||
type shoutrrrTypeNotifier struct {
|
||||
Urls []string
|
||||
Router router
|
||||
entries []*log.Entry
|
||||
logLevels []log.Level
|
||||
template *template.Template
|
||||
messages chan string
|
||||
done chan bool
|
||||
Urls []string
|
||||
Router router
|
||||
entries []*log.Entry
|
||||
logLevels []log.Level
|
||||
template *template.Template
|
||||
messages chan string
|
||||
done chan bool
|
||||
legacyTemplate bool
|
||||
}
|
||||
|
||||
// GetScheme returns the scheme part of a Shoutrrr URL
|
||||
func GetScheme(url string) string {
|
||||
schemeEnd := strings.Index(url, ":")
|
||||
if schemeEnd <= 0 {
|
||||
return "invalid"
|
||||
}
|
||||
return url[:schemeEnd]
|
||||
}
|
||||
|
||||
func (n *shoutrrrTypeNotifier) GetNames() []string {
|
||||
names := make([]string, len(n.Urls))
|
||||
for i, u := range n.Urls {
|
||||
schemeEnd := strings.Index(u, ":")
|
||||
if schemeEnd <= 0 {
|
||||
names[i] = "invalid"
|
||||
continue
|
||||
}
|
||||
names[i] = u[:schemeEnd]
|
||||
names[i] = GetScheme(u)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func newShoutrrrNotifier(c *cobra.Command, acceptedLogLevels []log.Level) t.Notifier {
|
||||
flags := c.PersistentFlags()
|
||||
urls, _ := flags.GetStringArray("notification-url")
|
||||
tpl := getShoutrrrTemplate(c)
|
||||
return createSender(urls, acceptedLogLevels, tpl)
|
||||
func newShoutrrrNotifier(tplString string, acceptedLogLevels []log.Level, legacy bool, urls ...string) t.Notifier {
|
||||
|
||||
notifier := createNotifier(urls, acceptedLogLevels, tplString, legacy)
|
||||
log.AddHook(notifier)
|
||||
|
||||
// Do the sending in a separate goroutine so we don't block the main process.
|
||||
go sendNotifications(notifier)
|
||||
|
||||
return notifier
|
||||
}
|
||||
|
||||
func newShoutrrrNotifierFromURL(c *cobra.Command, url string, levels []log.Level) t.Notifier {
|
||||
tpl := getShoutrrrTemplate(c)
|
||||
return createSender([]string{url}, levels, tpl)
|
||||
}
|
||||
|
||||
func createSender(urls []string, levels []log.Level, template *template.Template) t.Notifier {
|
||||
func createNotifier(urls []string, levels []log.Level, tplString string, legacy bool) *shoutrrrTypeNotifier {
|
||||
tpl, err := getShoutrrrTemplate(tplString, legacy)
|
||||
if err != nil {
|
||||
log.Errorf("Could not use configured notification template: %s. Using default template", err)
|
||||
}
|
||||
|
||||
traceWriter := log.StandardLogger().WriterLevel(log.TraceLevel)
|
||||
r, err := shoutrrr.NewSender(stdlog.New(traceWriter, "Shoutrrr: ", 0), urls...)
|
||||
|
@ -67,21 +89,15 @@ func createSender(urls []string, levels []log.Level, template *template.Template
|
|||
log.Fatalf("Failed to initialize Shoutrrr notifications: %s\n", err.Error())
|
||||
}
|
||||
|
||||
n := &shoutrrrTypeNotifier{
|
||||
Urls: urls,
|
||||
Router: r,
|
||||
messages: make(chan string, 1),
|
||||
done: make(chan bool),
|
||||
logLevels: levels,
|
||||
template: template,
|
||||
return &shoutrrrTypeNotifier{
|
||||
Urls: urls,
|
||||
Router: r,
|
||||
messages: make(chan string, 1),
|
||||
done: make(chan bool),
|
||||
logLevels: levels,
|
||||
template: tpl,
|
||||
legacyTemplate: legacy,
|
||||
}
|
||||
|
||||
log.AddHook(n)
|
||||
|
||||
// Do the sending in a separate goroutine so we don't block the main process.
|
||||
go sendNotifications(n)
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func sendNotifications(n *shoutrrrTypeNotifier) {
|
||||
|
@ -90,8 +106,9 @@ func sendNotifications(n *shoutrrrTypeNotifier) {
|
|||
|
||||
for i, err := range errs {
|
||||
if err != nil {
|
||||
scheme := GetScheme(n.Urls[i])
|
||||
// Use fmt so it doesn't trigger another notification.
|
||||
fmt.Println("Failed to send notification via shoutrrr (url="+n.Urls[i]+"): ", err)
|
||||
fmt.Printf("Failed to send shoutrrr notification (#%d, %s): %v\n", i, scheme, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -99,17 +116,21 @@ func sendNotifications(n *shoutrrrTypeNotifier) {
|
|||
n.done <- true
|
||||
}
|
||||
|
||||
func (n *shoutrrrTypeNotifier) buildMessage(entries []*log.Entry) string {
|
||||
func (n *shoutrrrTypeNotifier) buildMessage(data Data) string {
|
||||
var body bytes.Buffer
|
||||
if err := n.template.Execute(&body, entries); err != nil {
|
||||
var templateData interface{} = data
|
||||
if n.legacyTemplate {
|
||||
templateData = data.Entries
|
||||
}
|
||||
if err := n.template.Execute(&body, templateData); err != nil {
|
||||
fmt.Printf("Failed to execute Shoutrrrr template: %s\n", err.Error())
|
||||
}
|
||||
|
||||
return body.String()
|
||||
}
|
||||
|
||||
func (n *shoutrrrTypeNotifier) sendEntries(entries []*log.Entry) {
|
||||
msg := n.buildMessage(entries)
|
||||
func (n *shoutrrrTypeNotifier) sendEntries(entries []*log.Entry, report t.Report) {
|
||||
msg := n.buildMessage(Data{entries, report})
|
||||
n.messages <- msg
|
||||
}
|
||||
|
||||
|
@ -119,12 +140,12 @@ func (n *shoutrrrTypeNotifier) StartNotification() {
|
|||
}
|
||||
}
|
||||
|
||||
func (n *shoutrrrTypeNotifier) SendNotification() {
|
||||
if n.entries == nil || len(n.entries) <= 0 {
|
||||
return
|
||||
}
|
||||
func (n *shoutrrrTypeNotifier) SendNotification(report t.Report) {
|
||||
//if n.entries == nil || len(n.entries) <= 0 {
|
||||
// return
|
||||
//}
|
||||
|
||||
n.sendEntries(n.entries)
|
||||
n.sendEntries(n.entries, report)
|
||||
n.entries = nil
|
||||
}
|
||||
|
||||
|
@ -146,36 +167,23 @@ func (n *shoutrrrTypeNotifier) Fire(entry *log.Entry) error {
|
|||
n.entries = append(n.entries, entry)
|
||||
} else {
|
||||
// Log output generated outside a cycle is sent immediately.
|
||||
n.sendEntries([]*log.Entry{entry})
|
||||
n.sendEntries([]*log.Entry{entry}, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getShoutrrrTemplate(c *cobra.Command) *template.Template {
|
||||
var tpl *template.Template
|
||||
|
||||
flags := c.PersistentFlags()
|
||||
|
||||
tplString, err := flags.GetString("notification-template")
|
||||
|
||||
func getShoutrrrTemplate(tplString string, legacy bool) (tpl *template.Template, err error) {
|
||||
funcs := template.FuncMap{
|
||||
"ToUpper": strings.ToUpper,
|
||||
"ToLower": strings.ToLower,
|
||||
"Title": strings.Title,
|
||||
}
|
||||
tplBase := template.New("").Funcs(funcs)
|
||||
|
||||
// If we succeed in getting a non-empty template configuration
|
||||
// try to parse the template string.
|
||||
if tplString != "" && err == nil {
|
||||
tpl, err = template.New("").Funcs(funcs).Parse(tplString)
|
||||
}
|
||||
|
||||
// In case of errors (either from parsing the template string
|
||||
// or from getting the template configuration) log an error
|
||||
// message about this and the fact that we'll use the default
|
||||
// template instead.
|
||||
if err != nil {
|
||||
log.Errorf("Could not use configured notification template: %s. Using default template", err)
|
||||
if tplString != "" {
|
||||
tpl, err = tplBase.Parse(tplString)
|
||||
}
|
||||
|
||||
// If we had an error (either from parsing the template string
|
||||
|
@ -183,8 +191,19 @@ func getShoutrrrTemplate(c *cobra.Command) *template.Template {
|
|||
// template wasn't configured (the empty template string)
|
||||
// fallback to using the default template.
|
||||
if err != nil || tplString == "" {
|
||||
tpl = template.Must(template.New("").Funcs(funcs).Parse(shoutrrrDefaultTemplate))
|
||||
defaultTemplate := shoutrrrDefaultTemplate
|
||||
if legacy {
|
||||
defaultTemplate = shoutrrrDefaultLegacyTemplate
|
||||
}
|
||||
|
||||
tpl = template.Must(tplBase.Parse(defaultTemplate))
|
||||
}
|
||||
|
||||
return tpl
|
||||
return
|
||||
}
|
||||
|
||||
// Data is the notification template data model
|
||||
type Data struct {
|
||||
Entries []*log.Entry
|
||||
Report t.Report
|
||||
}
|
||||
|
|
|
@ -2,169 +2,226 @@ package notifications
|
|||
|
||||
import (
|
||||
"github.com/containrrr/shoutrrr/pkg/types"
|
||||
"testing"
|
||||
"text/template"
|
||||
|
||||
"github.com/containrrr/watchtower/internal/actions/mocks"
|
||||
"github.com/containrrr/watchtower/internal/flags"
|
||||
log "github.com/sirupsen/logrus"
|
||||
s "github.com/containrrr/watchtower/pkg/session"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/gbytes"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestShoutrrrDefaultTemplate(t *testing.T) {
|
||||
cmd := new(cobra.Command)
|
||||
|
||||
shoutrrr := &shoutrrrTypeNotifier{
|
||||
template: getShoutrrrTemplate(cmd),
|
||||
}
|
||||
|
||||
entries := []*log.Entry{
|
||||
var legacyMockData = Data{
|
||||
Entries: []*logrus.Entry{
|
||||
{
|
||||
Message: "foo bar",
|
||||
},
|
||||
}
|
||||
|
||||
s := shoutrrr.buildMessage(entries)
|
||||
|
||||
require.Equal(t, "foo bar\n", s)
|
||||
}
|
||||
|
||||
func TestShoutrrrTemplate(t *testing.T) {
|
||||
cmd := new(cobra.Command)
|
||||
flags.RegisterNotificationFlags(cmd)
|
||||
err := cmd.ParseFlags([]string{"--notification-template={{range .}}{{.Level}}: {{.Message}}{{println}}{{end}}"})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
shoutrrr := &shoutrrrTypeNotifier{
|
||||
template: getShoutrrrTemplate(cmd),
|
||||
}
|
||||
|
||||
entries := []*log.Entry{
|
||||
{
|
||||
Level: log.InfoLevel,
|
||||
Message: "foo bar",
|
||||
},
|
||||
}
|
||||
|
||||
s := shoutrrr.buildMessage(entries)
|
||||
|
||||
require.Equal(t, "info: foo bar\n", s)
|
||||
}
|
||||
|
||||
func TestShoutrrrStringFunctions(t *testing.T) {
|
||||
cmd := new(cobra.Command)
|
||||
flags.RegisterNotificationFlags(cmd)
|
||||
err := cmd.ParseFlags([]string{"--notification-template={{range .}}{{.Level | printf \"%v\" | ToUpper }}: {{.Message | ToLower }} {{.Message | Title }}{{println}}{{end}}"})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
shoutrrr := &shoutrrrTypeNotifier{
|
||||
template: getShoutrrrTemplate(cmd),
|
||||
}
|
||||
|
||||
entries := []*log.Entry{
|
||||
{
|
||||
Level: log.InfoLevel,
|
||||
Level: logrus.InfoLevel,
|
||||
Message: "foo Bar",
|
||||
},
|
||||
}
|
||||
|
||||
s := shoutrrr.buildMessage(entries)
|
||||
|
||||
require.Equal(t, "INFO: foo bar Foo Bar\n", s)
|
||||
},
|
||||
}
|
||||
|
||||
func TestShoutrrrInvalidTemplateUsesTemplate(t *testing.T) {
|
||||
cmd := new(cobra.Command)
|
||||
|
||||
flags.RegisterNotificationFlags(cmd)
|
||||
err := cmd.ParseFlags([]string{"--notification-template={{"})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
shoutrrr := &shoutrrrTypeNotifier{
|
||||
template: getShoutrrrTemplate(cmd),
|
||||
func mockDataFromStates(states ...s.State) Data {
|
||||
return Data{
|
||||
Entries: legacyMockData.Entries,
|
||||
Report: mocks.CreateMockProgressReport(states...),
|
||||
}
|
||||
|
||||
shoutrrrDefault := &shoutrrrTypeNotifier{
|
||||
template: template.Must(template.New("").Parse(shoutrrrDefaultTemplate)),
|
||||
}
|
||||
|
||||
entries := []*log.Entry{
|
||||
{
|
||||
Message: "foo bar",
|
||||
},
|
||||
}
|
||||
|
||||
s := shoutrrr.buildMessage(entries)
|
||||
sd := shoutrrrDefault.buildMessage(entries)
|
||||
|
||||
require.Equal(t, sd, s)
|
||||
}
|
||||
|
||||
var _ = Describe("Shoutrrr", func() {
|
||||
var logBuffer *gbytes.Buffer
|
||||
|
||||
BeforeEach(func() {
|
||||
logBuffer = gbytes.NewBuffer()
|
||||
logrus.SetOutput(logBuffer)
|
||||
logrus.SetFormatter(&logrus.TextFormatter{
|
||||
DisableColors: true,
|
||||
DisableTimestamp: true,
|
||||
})
|
||||
})
|
||||
|
||||
When("using legacy templates", func() {
|
||||
|
||||
When("no custom template is provided", func() {
|
||||
It("should format the messages using the default template", func() {
|
||||
cmd := new(cobra.Command)
|
||||
flags.RegisterNotificationFlags(cmd)
|
||||
|
||||
shoutrrr := createNotifier([]string{}, logrus.AllLevels, "", true)
|
||||
|
||||
entries := []*logrus.Entry{
|
||||
{
|
||||
Message: "foo bar",
|
||||
},
|
||||
}
|
||||
|
||||
s := shoutrrr.buildMessage(Data{Entries: entries})
|
||||
|
||||
Expect(s).To(Equal("foo bar\n"))
|
||||
})
|
||||
})
|
||||
When("given a valid custom template", func() {
|
||||
It("should format the messages using the custom template", func() {
|
||||
|
||||
tplString := `{{range .}}{{.Level}}: {{.Message}}{{println}}{{end}}`
|
||||
tpl, err := getShoutrrrTemplate(tplString, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
shoutrrr := &shoutrrrTypeNotifier{
|
||||
template: tpl,
|
||||
legacyTemplate: true,
|
||||
}
|
||||
|
||||
entries := []*logrus.Entry{
|
||||
{
|
||||
Level: logrus.InfoLevel,
|
||||
Message: "foo bar",
|
||||
},
|
||||
}
|
||||
|
||||
s := shoutrrr.buildMessage(Data{Entries: entries})
|
||||
|
||||
Expect(s).To(Equal("info: foo bar\n"))
|
||||
})
|
||||
})
|
||||
|
||||
When("given an invalid custom template", func() {
|
||||
It("should format the messages using the default template", func() {
|
||||
invNotif, err := createNotifierWithTemplate(`{{ intentionalSyntaxError`, true)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
defNotif, err := createNotifierWithTemplate(``, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(invNotif.buildMessage(legacyMockData)).To(Equal(defNotif.buildMessage(legacyMockData)))
|
||||
})
|
||||
})
|
||||
|
||||
When("given a template that is using ToUpper function", func() {
|
||||
It("should return the text in UPPER CASE", func() {
|
||||
tplString := `{{range .}}{{ .Message | ToUpper }}{{end}}`
|
||||
Expect(getTemplatedResult(tplString, true, legacyMockData)).To(Equal("FOO BAR"))
|
||||
})
|
||||
})
|
||||
|
||||
When("given a template that is using ToLower function", func() {
|
||||
It("should return the text in lower case", func() {
|
||||
tplString := `{{range .}}{{ .Message | ToLower }}{{end}}`
|
||||
Expect(getTemplatedResult(tplString, true, legacyMockData)).To(Equal("foo bar"))
|
||||
})
|
||||
})
|
||||
|
||||
When("given a template that is using Title function", func() {
|
||||
It("should return the text in Title Case", func() {
|
||||
tplString := `{{range .}}{{ .Message | Title }}{{end}}`
|
||||
Expect(getTemplatedResult(tplString, true, legacyMockData)).To(Equal("Foo Bar"))
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
When("using report templates", func() {
|
||||
|
||||
When("no custom template is provided", func() {
|
||||
It("should format the messages using the default template", func() {
|
||||
expected := `4 Scanned, 2 Updated, 1 Failed
|
||||
- updt1 (mock/updt1:latest): 01d110000000 updated to d0a110000000
|
||||
- updt2 (mock/updt2:latest): 01d120000000 updated to d0a120000000
|
||||
- frsh1 (mock/frsh1:latest): Fresh
|
||||
- skip1 (mock/skip1:latest): Skipped: unpossible
|
||||
- fail1 (mock/fail1:latest): Failed: accidentally the whole container
|
||||
`
|
||||
data := mockDataFromStates(s.UpdatedState, s.FreshState, s.FailedState, s.SkippedState, s.UpdatedState)
|
||||
Expect(getTemplatedResult(``, false, data)).To(Equal(expected))
|
||||
})
|
||||
|
||||
It("should format the messages using the default template", func() {
|
||||
expected := `1 Scanned, 0 Updated, 0 Failed
|
||||
- frsh1 (mock/frsh1:latest): Fresh
|
||||
`
|
||||
data := mockDataFromStates(s.FreshState)
|
||||
Expect(getTemplatedResult(``, false, data)).To(Equal(expected))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
When("sending notifications", func() {
|
||||
|
||||
It("SlowNotificationNotSent", func() {
|
||||
_, blockingRouter := sendNotificationsWithBlockingRouter(true)
|
||||
|
||||
Eventually(blockingRouter.sent).Should(Not(Receive()))
|
||||
|
||||
})
|
||||
|
||||
It("SlowNotificationSent", func() {
|
||||
shoutrrr, blockingRouter := sendNotificationsWithBlockingRouter(true)
|
||||
|
||||
blockingRouter.unlock <- true
|
||||
shoutrrr.Close()
|
||||
|
||||
Eventually(blockingRouter.sent).Should(Receive(BeTrue()))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
type blockingRouter struct {
|
||||
unlock chan bool
|
||||
sent chan bool
|
||||
}
|
||||
|
||||
func (b blockingRouter) Send(message string, params *types.Params) []error {
|
||||
func (b blockingRouter) Send(_ string, _ *types.Params) []error {
|
||||
_ = <-b.unlock
|
||||
b.sent <- true
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestSlowNotificationNotSent(t *testing.T) {
|
||||
_, blockingRouter := sendNotificationsWithBlockingRouter()
|
||||
|
||||
notifSent := false
|
||||
select {
|
||||
case notifSent = <-blockingRouter.sent:
|
||||
default:
|
||||
}
|
||||
|
||||
require.Equal(t, false, notifSent)
|
||||
}
|
||||
|
||||
func TestSlowNotificationSent(t *testing.T) {
|
||||
shoutrrr, blockingRouter := sendNotificationsWithBlockingRouter()
|
||||
|
||||
blockingRouter.unlock <- true
|
||||
shoutrrr.Close()
|
||||
|
||||
notifSent := false
|
||||
select {
|
||||
case notifSent = <-blockingRouter.sent:
|
||||
default:
|
||||
}
|
||||
require.Equal(t, true, notifSent)
|
||||
}
|
||||
|
||||
func sendNotificationsWithBlockingRouter() (*shoutrrrTypeNotifier, *blockingRouter) {
|
||||
cmd := new(cobra.Command)
|
||||
func sendNotificationsWithBlockingRouter(legacy bool) (*shoutrrrTypeNotifier, *blockingRouter) {
|
||||
|
||||
router := &blockingRouter{
|
||||
unlock: make(chan bool, 1),
|
||||
sent: make(chan bool, 1),
|
||||
}
|
||||
|
||||
tpl, err := getShoutrrrTemplate("", legacy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
shoutrrr := &shoutrrrTypeNotifier{
|
||||
template: getShoutrrrTemplate(cmd),
|
||||
messages: make(chan string, 1),
|
||||
done: make(chan bool),
|
||||
Router: router,
|
||||
template: tpl,
|
||||
messages: make(chan string, 1),
|
||||
done: make(chan bool),
|
||||
Router: router,
|
||||
legacyTemplate: legacy,
|
||||
}
|
||||
|
||||
entry := &log.Entry{
|
||||
entry := &logrus.Entry{
|
||||
Message: "foo bar",
|
||||
}
|
||||
|
||||
go sendNotifications(shoutrrr)
|
||||
|
||||
shoutrrr.StartNotification()
|
||||
shoutrrr.Fire(entry)
|
||||
_ = shoutrrr.Fire(entry)
|
||||
|
||||
shoutrrr.SendNotification()
|
||||
shoutrrr.SendNotification(nil)
|
||||
|
||||
return shoutrrr, router
|
||||
}
|
||||
|
||||
func createNotifierWithTemplate(tplString string, legacy bool) (*shoutrrrTypeNotifier, error) {
|
||||
tpl, err := getShoutrrrTemplate(tplString, legacy)
|
||||
|
||||
return &shoutrrrTypeNotifier{
|
||||
template: tpl,
|
||||
legacyTemplate: legacy,
|
||||
}, err
|
||||
}
|
||||
|
||||
func getTemplatedResult(tplString string, legacy bool, data Data) (string, error) {
|
||||
notifier, err := createNotifierWithTemplate(tplString, legacy)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return notifier.buildMessage(data), err
|
||||
}
|
||||
|
|
|
@ -19,11 +19,6 @@ type slackTypeNotifier struct {
|
|||
slackrus.SlackrusHook
|
||||
}
|
||||
|
||||
// NewSlackNotifier is a factory function used to generate new instance of the slack notifier type
|
||||
func NewSlackNotifier(c *cobra.Command, acceptedLogLevels []log.Level) t.ConvertibleNotifier {
|
||||
return newSlackNotifier(c, acceptedLogLevels)
|
||||
}
|
||||
|
||||
func newSlackNotifier(c *cobra.Command, acceptedLogLevels []log.Level) t.ConvertibleNotifier {
|
||||
flags := c.PersistentFlags()
|
||||
|
||||
|
|
|
@ -1,77 +0,0 @@
|
|||
// Package notifications ...
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license.
|
||||
package notifications
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"net/smtp"
|
||||
)
|
||||
|
||||
// SendMail connects to the server at addr, switches to TLS if
|
||||
// possible, authenticates with the optional mechanism a if possible,
|
||||
// and then sends an email from address from, to addresses to, with
|
||||
// message msg.
|
||||
// The addr must include a port, as in "mail.example.com:smtp".
|
||||
//
|
||||
// The addresses in the to parameter are the SMTP RCPT addresses.
|
||||
//
|
||||
// The msg parameter should be an RFC 822-style email with headers
|
||||
// first, a blank line, and then the message body. The lines of msg
|
||||
// should be CRLF terminated. The msg headers should usually include
|
||||
// fields such as "From", "To", "Subject", and "Cc". Sending "Bcc"
|
||||
// messages is accomplished by including an email address in the to
|
||||
// parameter but not including it in the msg headers.
|
||||
//
|
||||
// The SendMail function and the net/smtp package are low-level
|
||||
// mechanisms and provide no support for DKIM signing, MIME
|
||||
// attachments (see the mime/multipart package), or other mail
|
||||
// functionality. Higher-level packages exist outside of the standard
|
||||
// library.
|
||||
func SendMail(addr string, insecureSkipVerify bool, a smtp.Auth, from string, to []string, msg []byte) error {
|
||||
c, err := smtp.Dial(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
if err = c.Hello("localHost"); err != nil {
|
||||
return err
|
||||
}
|
||||
if ok, _ := c.Extension("STARTTLS"); ok {
|
||||
serverName, _, _ := net.SplitHostPort(addr)
|
||||
config := &tls.Config{ServerName: serverName, InsecureSkipVerify: insecureSkipVerify}
|
||||
if err = c.StartTLS(config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if a != nil {
|
||||
if ok, _ := c.Extension("AUTH"); ok {
|
||||
if err = c.Auth(a); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err = c.Mail(from); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, addr := range to {
|
||||
if err = c.Rcpt(addr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w, err := c.Data()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Quit()
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
package notifications
|
||||
|
||||
import "bytes"
|
||||
|
||||
// SplitSubN splits a string into a list of string with each having
|
||||
// a maximum number of characters n
|
||||
func SplitSubN(s string, n int) []string {
|
||||
sub := ""
|
||||
subs := []string{}
|
||||
|
||||
runes := bytes.Runes([]byte(s))
|
||||
l := len(runes)
|
||||
for i, r := range runes {
|
||||
sub = sub + string(r)
|
||||
if (i+1)%n == 0 {
|
||||
subs = append(subs, sub)
|
||||
sub = ""
|
||||
} else if (i + 1) == l {
|
||||
subs = append(subs, sub)
|
||||
}
|
||||
}
|
||||
|
||||
return subs
|
||||
}
|
82
pkg/session/container_status.go
Normal file
82
pkg/session/container_status.go
Normal file
|
@ -0,0 +1,82 @@
|
|||
package session
|
||||
|
||||
import wt "github.com/containrrr/watchtower/pkg/types"
|
||||
|
||||
// State indicates what the current state is of the container
|
||||
type State int
|
||||
|
||||
// State enum values
|
||||
const (
|
||||
// UnknownState is only used to represent an uninitialized State value
|
||||
UnknownState State = iota
|
||||
SkippedState
|
||||
ScannedState
|
||||
UpdatedState
|
||||
FailedState
|
||||
FreshState
|
||||
StaleState
|
||||
)
|
||||
|
||||
// ContainerStatus contains the container state during a session
|
||||
type ContainerStatus struct {
|
||||
containerID wt.ContainerID
|
||||
oldImage wt.ImageID
|
||||
newImage wt.ImageID
|
||||
containerName string
|
||||
imageName string
|
||||
error
|
||||
state State
|
||||
}
|
||||
|
||||
// ID returns the container ID
|
||||
func (u *ContainerStatus) ID() wt.ContainerID {
|
||||
return u.containerID
|
||||
}
|
||||
|
||||
// Name returns the container name
|
||||
func (u *ContainerStatus) Name() string {
|
||||
return u.containerName
|
||||
}
|
||||
|
||||
// CurrentImageID returns the image ID that the container used when the session started
|
||||
func (u *ContainerStatus) CurrentImageID() wt.ImageID {
|
||||
return u.oldImage
|
||||
}
|
||||
|
||||
// LatestImageID returns the newest image ID found during the session
|
||||
func (u *ContainerStatus) LatestImageID() wt.ImageID {
|
||||
return u.newImage
|
||||
}
|
||||
|
||||
// ImageName returns the name:tag that the container uses
|
||||
func (u *ContainerStatus) ImageName() string {
|
||||
return u.imageName
|
||||
}
|
||||
|
||||
// Error returns the error (if any) that was encountered for the container during a session
|
||||
func (u *ContainerStatus) Error() string {
|
||||
if u.error == nil {
|
||||
return ""
|
||||
}
|
||||
return u.error.Error()
|
||||
}
|
||||
|
||||
// State returns the current State that the container is in
|
||||
func (u *ContainerStatus) State() string {
|
||||
switch u.state {
|
||||
case SkippedState:
|
||||
return "Skipped"
|
||||
case ScannedState:
|
||||
return "Scanned"
|
||||
case UpdatedState:
|
||||
return "Updated"
|
||||
case FailedState:
|
||||
return "Failed"
|
||||
case FreshState:
|
||||
return "Fresh"
|
||||
case StaleState:
|
||||
return "Stale"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
56
pkg/session/progress.go
Normal file
56
pkg/session/progress.go
Normal file
|
@ -0,0 +1,56 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"github.com/containrrr/watchtower/pkg/types"
|
||||
)
|
||||
|
||||
// Progress contains the current session container status
|
||||
type Progress map[types.ContainerID]*ContainerStatus
|
||||
|
||||
// UpdateFromContainer sets various status fields from their corresponding container equivalents
|
||||
func UpdateFromContainer(cont types.Container, newImage types.ImageID, state State) *ContainerStatus {
|
||||
return &ContainerStatus{
|
||||
containerID: cont.ID(),
|
||||
containerName: cont.Name(),
|
||||
imageName: cont.ImageName(),
|
||||
oldImage: cont.SafeImageID(),
|
||||
newImage: newImage,
|
||||
state: state,
|
||||
}
|
||||
}
|
||||
|
||||
// AddSkipped adds a container to the Progress with the state set as skipped
|
||||
func (m Progress) AddSkipped(cont types.Container, err error) {
|
||||
update := UpdateFromContainer(cont, cont.SafeImageID(), SkippedState)
|
||||
update.error = err
|
||||
m.Add(update)
|
||||
}
|
||||
|
||||
// AddScanned adds a container to the Progress with the state set as scanned
|
||||
func (m Progress) AddScanned(cont types.Container, newImage types.ImageID) {
|
||||
m.Add(UpdateFromContainer(cont, newImage, ScannedState))
|
||||
}
|
||||
|
||||
// UpdateFailed updates the containers passed, setting their state as failed with the supplied error
|
||||
func (m Progress) UpdateFailed(failures map[types.ContainerID]error) {
|
||||
for id, err := range failures {
|
||||
update := m[id]
|
||||
update.error = err
|
||||
update.state = FailedState
|
||||
}
|
||||
}
|
||||
|
||||
// Add a container to the map using container ID as the key
|
||||
func (m Progress) Add(update *ContainerStatus) {
|
||||
m[update.containerID] = update
|
||||
}
|
||||
|
||||
// MarkForUpdate marks the container identified by containerID for update
|
||||
func (m Progress) MarkForUpdate(containerID types.ContainerID) {
|
||||
m[containerID].state = UpdatedState
|
||||
}
|
||||
|
||||
// Report creates a new Report from a Progress instance
|
||||
func (m Progress) Report() types.Report {
|
||||
return NewReport(m)
|
||||
}
|
90
pkg/session/report.go
Normal file
90
pkg/session/report.go
Normal file
|
@ -0,0 +1,90 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"github.com/containrrr/watchtower/pkg/types"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type report struct {
|
||||
scanned []types.ContainerReport
|
||||
updated []types.ContainerReport
|
||||
failed []types.ContainerReport
|
||||
skipped []types.ContainerReport
|
||||
stale []types.ContainerReport
|
||||
fresh []types.ContainerReport
|
||||
}
|
||||
|
||||
func (r *report) Scanned() []types.ContainerReport {
|
||||
return r.scanned
|
||||
}
|
||||
func (r *report) Updated() []types.ContainerReport {
|
||||
return r.updated
|
||||
}
|
||||
func (r *report) Failed() []types.ContainerReport {
|
||||
return r.failed
|
||||
}
|
||||
func (r *report) Skipped() []types.ContainerReport {
|
||||
return r.skipped
|
||||
}
|
||||
func (r *report) Stale() []types.ContainerReport {
|
||||
return r.stale
|
||||
}
|
||||
func (r *report) Fresh() []types.ContainerReport {
|
||||
return r.fresh
|
||||
}
|
||||
|
||||
// NewReport creates a types.Report from the supplied Progress
|
||||
func NewReport(progress Progress) types.Report {
|
||||
report := &report{
|
||||
scanned: []types.ContainerReport{},
|
||||
updated: []types.ContainerReport{},
|
||||
failed: []types.ContainerReport{},
|
||||
skipped: []types.ContainerReport{},
|
||||
stale: []types.ContainerReport{},
|
||||
fresh: []types.ContainerReport{},
|
||||
}
|
||||
|
||||
for _, update := range progress {
|
||||
if update.state == SkippedState {
|
||||
report.skipped = append(report.skipped, update)
|
||||
continue
|
||||
}
|
||||
|
||||
report.scanned = append(report.scanned, update)
|
||||
if update.newImage == update.oldImage {
|
||||
update.state = FreshState
|
||||
report.fresh = append(report.fresh, update)
|
||||
continue
|
||||
}
|
||||
|
||||
switch update.state {
|
||||
case UpdatedState:
|
||||
report.updated = append(report.updated, update)
|
||||
case FailedState:
|
||||
report.failed = append(report.failed, update)
|
||||
default:
|
||||
update.state = StaleState
|
||||
report.stale = append(report.stale, update)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(sortableContainers(report.scanned))
|
||||
sort.Sort(sortableContainers(report.updated))
|
||||
sort.Sort(sortableContainers(report.failed))
|
||||
sort.Sort(sortableContainers(report.skipped))
|
||||
sort.Sort(sortableContainers(report.stale))
|
||||
sort.Sort(sortableContainers(report.fresh))
|
||||
|
||||
return report
|
||||
}
|
||||
|
||||
type sortableContainers []types.ContainerReport
|
||||
|
||||
// Len implements sort.Interface.Len
|
||||
func (s sortableContainers) Len() int { return len(s) }
|
||||
|
||||
// Less implements sort.Interface.Less
|
||||
func (s sortableContainers) Less(i, j int) bool { return s[i].ID() < s[j].ID() }
|
||||
|
||||
// Swap implements sort.Interface.Swap
|
||||
func (s sortableContainers) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
@ -1,14 +1,53 @@
|
|||
package types
|
||||
|
||||
import "github.com/docker/docker/api/types"
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ImageID is a hash string representing a container image
|
||||
type ImageID string
|
||||
|
||||
// ContainerID is a hash string representing a container instance
|
||||
type ContainerID string
|
||||
|
||||
// ShortID returns the 12-character (hex) short version of an image ID hash, removing any "sha256:" prefix if present
|
||||
func (id ImageID) ShortID() (short string) {
|
||||
return shortID(string(id))
|
||||
}
|
||||
|
||||
// ShortID returns the 12-character (hex) short version of a container ID hash, removing any "sha256:" prefix if present
|
||||
func (id ContainerID) ShortID() (short string) {
|
||||
return shortID(string(id))
|
||||
}
|
||||
|
||||
func shortID(longID string) string {
|
||||
prefixSep := strings.IndexRune(longID, ':')
|
||||
offset := 0
|
||||
length := 12
|
||||
if prefixSep >= 0 {
|
||||
if longID[0:prefixSep] == "sha256" {
|
||||
offset = prefixSep + 1
|
||||
} else {
|
||||
length += prefixSep + 1
|
||||
}
|
||||
}
|
||||
|
||||
if len(longID) >= offset+length {
|
||||
return longID[offset : offset+length]
|
||||
}
|
||||
|
||||
return longID
|
||||
}
|
||||
|
||||
// Container is a docker container running an image
|
||||
type Container interface {
|
||||
ContainerInfo() *types.ContainerJSON
|
||||
ID() string
|
||||
ID() ContainerID
|
||||
IsRunning() bool
|
||||
Name() string
|
||||
ImageID() string
|
||||
ImageID() ImageID
|
||||
SafeImageID() ImageID
|
||||
ImageName() string
|
||||
Enabled() (bool, bool)
|
||||
IsMonitorOnly() bool
|
||||
|
|
|
@ -3,7 +3,7 @@ package types
|
|||
// Notifier is the interface that all notification services have in common
|
||||
type Notifier interface {
|
||||
StartNotification()
|
||||
SendNotification()
|
||||
SendNotification(Report)
|
||||
GetNames() []string
|
||||
Close()
|
||||
}
|
||||
|
|
22
pkg/types/report.go
Normal file
22
pkg/types/report.go
Normal file
|
@ -0,0 +1,22 @@
|
|||
package types
|
||||
|
||||
// Report contains reports for all the containers processed during a session
|
||||
type Report interface {
|
||||
Scanned() []ContainerReport
|
||||
Updated() []ContainerReport
|
||||
Failed() []ContainerReport
|
||||
Skipped() []ContainerReport
|
||||
Stale() []ContainerReport
|
||||
Fresh() []ContainerReport
|
||||
}
|
||||
|
||||
// ContainerReport represents a container that was included in watchtower session
|
||||
type ContainerReport interface {
|
||||
ID() ContainerID
|
||||
Name() string
|
||||
CurrentImageID() ImageID
|
||||
LatestImageID() ImageID
|
||||
ImageName() string
|
||||
Error() string
|
||||
State() string
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue