Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into feat/add-docusaurus
Browse files Browse the repository at this point in the history
  • Loading branch information
jonathan-mayer committed Oct 23, 2024
2 parents f42ef6f + f2e95b5 commit a9ce751
Show file tree
Hide file tree
Showing 48 changed files with 963 additions and 929 deletions.
File renamed without changes.
36 changes: 16 additions & 20 deletions .github/workflows/check_version.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ jobs:
outputs:
version_change: ${{ steps.check_for_version_change.outputs.version_change }}
app_version_change: ${{ steps.check_for_appVersion_change.outputs.app_version_change }}
version: ${{ steps.chart_version.outputs.version }}
app_version: ${{ steps.app_version.outputs.app_version }}

steps:
- name: Checkout code
Expand All @@ -32,6 +34,18 @@ jobs:
version_change=$(git diff main HEAD~1 -- deployments/chart/Chart.yaml | grep -qe "^[+-]version: " && echo "version changed" || echo "version didn't change")
echo "version_change=$version_change" >> $GITHUB_OUTPUT
- name: Extract Chart Version
id: chart_version
run: |
version=$(yq e '.version' ./deployments/chart/Chart.yaml)
echo "version=$version" >> $GITHUB_OUTPUT
- name: Extract App Version
id: app_version
run: |
app_version=$(yq e '.appVersion' ./deployments/chart/Chart.yaml)
echo "app_version=$app_version" >> $GITHUB_OUTPUT
build_new_chart:
runs-on: ubuntu-latest
needs: check_versions
Expand All @@ -40,22 +54,13 @@ jobs:
contents: write

steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Extract Chart Version
id: chart_version
run: |
version=$(yq e '.version' ./deployments/chart/Chart.yaml)
echo "version=$version" >> $GITHUB_ENV
- name: Dispatch Event to build new helm chart
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.GITHUB_TOKEN }}
repository: caas-team/GoKubeDownscaler
event-type: build-new-chart
client-payload: '{"version": "${{ env.version }}"}'
client-payload: '{"version": "${{ needs.check_versions.outputs.version }}", "appVersion": "${{ needs.check_versions.outputs.app_version }}"}'

release_new_version:
runs-on: ubuntu-latest
Expand All @@ -65,19 +70,10 @@ jobs:
contents: write

steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Extract App Version
id: app_version
run: |
app_version=$(yq e '.appVersion' ./deployments/chart/Chart.yaml)
echo "app_version=$app_version" >> $GITHUB_ENV
- name: Dispatch Event to create new release
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.GITHUB_TOKEN }}
repository: caas-team/GoKubeDownscaler
event-type: release-new-version
client-payload: '{"appVersion": "${{ env.app_version }}"}'
client-payload: '{"appVersion": "${{ needs.check_versions.outputs.app_version }}", "setLatest": true}'
28 changes: 28 additions & 0 deletions .github/workflows/create_dev_build.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
name: Create dev build

on:
workflow_dispatch:

jobs:
create_dev_build:
runs-on: ubuntu-latest
permissions:
contents: write

steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Extract Chart Version
id: chart_version
run: |
version=$(yq e '.version' ./test-chart/Chart.yaml)
echo "version=$version" >> $GITHUB_ENV
- name: Dispatch event to create dev build
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.GITHUB_TOKEN }}
repository: caas-team/GoKubeDownscaler
event-type: create-dev-build
client-payload: '{"version": "${{ env.version }}-dev","appVersion": "dev", "setLatest": false}'
4 changes: 2 additions & 2 deletions .github/workflows/docker_build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ name: Build and push Image

on:
repository_dispatch:
types: [release-new-version]
types: [release-new-version, create-dev-build]

jobs:
build:
Expand All @@ -19,7 +19,7 @@ jobs:
mtr.devops.telekom.de/caas/go-kube-downscaler
ghcr.io/caas-team/gokubedownscaler
tags: |
latest
${{ github.event.client_payload.setLatest && 'latest' || '' }}
${{ github.event.client_payload.appVersion }}
- name: Install Cosign
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/helm_build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ name: Build and push helm chart

on:
repository_dispatch:
types: [build-new-chart]
types: [build-new-chart, create-dev-build]

jobs:
build_and_push:
Expand All @@ -29,7 +29,7 @@ jobs:
run: |
cd deployments/chart
helm lint .
helm package .
helm package . --version ${{ github.event.client_payload.version }} --app-version ${{ github.event.client_payload.appVersion }}
helm push $(ls *.tgz | head -1) oci://ghcr.io/caas-team/charts
helm push $(ls *.tgz | head -1) oci://${MTR}/${REPO}/charts
env:
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM golang:1.22.5 AS build
FROM golang:1.23.1 AS build

WORKDIR /tmp/kubedownscaler

Expand Down
39 changes: 27 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ This is a golang port of the popular [(py-)kube-downscaler](github.com/caas-team
- [PodDisruptionBudgets](#poddisruptionbudgets)
- [ScaledObjects](#scaledobjects)
- [StatefulSets](#statefulsets)
- [Rollouts](#rollouts)
- [Stacks](#stacks)
- [Prometheuses](#prometheuses)
- [Installation](#installation)
- [Configuration](#configuration)
- [Annotations](#annotations)
Expand All @@ -41,6 +44,7 @@ This is a golang port of the popular [(py-)kube-downscaler](github.com/caas-team
- [Edge Cases](#edge-cases)
- [Differences to py-kube-downscaler](#differences-to-py-kube-downscaler)
- [Missing Features](#missing-features)
- [Troubleshooting](#troubleshooting)
- [Developing](#developing)
- [Cloning the Repository](#cloning-the-repository)
- [Setting up Pre-Commit](#setting-up-pre-commit)
Expand Down Expand Up @@ -68,6 +72,12 @@ These are the resources the Downscaler can scale:
- sets the paused replicas annotation to the [downscale replicas](#downscale-replicas)
- <span id="statefulsets">StatefulSets</span>:
- sets the replica count to the [downscale replicas](#downscale-replicas)
- <span id="rollouts">Rollouts</span>:
- sets the replica count to the [downscale replicas](#downscale-replicas)
- <span id="stacks">Stacks</span>:
- sets the replica count to the [downscale replicas](#downscale-replicas)
- <span id="prometheuses">Prometheuses</span>:
- sets the replica count to the [downscale replicas](#downscale-replicas)

## Installation

Expand Down Expand Up @@ -212,12 +222,12 @@ See [RFC3339 Timestamps](https://datatracker.ietf.org/doc/html/rfc3339) for more
example:

```text
Mon-Fri 08:00-20:00 Europe/Berlin # From Monday to Friday: from 08:00 to 20:00
Sat-Sun 00:00-24:00 UTC # On The Weekend: the entire day
Mon-Fri 20:00-08:00 PST # From Monday to Friday: from Midnight to 08:00 and from 20:00 until end of day
Mon-Sun 00:00-00:00 America/New_York # The timespan never matches, this would not do anything
Mon-Tue 20:00-24:00 CEST # On Monday and Tuesday: from 20:00 to midnight
Mon-Tue 20:00-00:00 Europe/Amsterdam # On Monday and Tuesday: from 20:00 to midnight
Mon-Fri 08:00-20:00 Asia/Tokyo # From Monday to Friday: from 08:00 to 20:00
Sat-Sun 00:00-24:00 UTC # On The Weekend: the entire day
Mon-Fri 20:00-08:00 Australia/Sydney # From Monday to Friday: from Midnight to 08:00 and from 20:00 until end of day
Mon-Sun 00:00-00:00 America/New_York # The timespan never matches, this would not do anything
Mon-Tue 20:00-24:00 Africa/Johannesburg # On Monday and Tuesday: from 20:00 to midnight
Mon-Tue 20:00-00:00 Europe/Amsterdam # On Monday and Tuesday: from 20:00 to midnight
```

Valid Values:
Expand Down Expand Up @@ -256,6 +266,7 @@ OR with optional spaces:
```

The timespans can be absolute, relative or mixed.

Example: downscale over the weekend and at night:

```
Expand All @@ -275,7 +286,7 @@ Or by a duration string:

```text
"1h30m" # 1 hour and 30 minutes
"1.5h" # 1 ½ hours (1 hour and 30 minutes)
"1.5h" # 1 hour and 30 minutes
"2m" # 2 minutes
"10s" # 10 seconds
"300s" # 300 seconds
Expand Down Expand Up @@ -361,7 +372,7 @@ Workload will be scaled according to the downtime schedule on the cli layer
--- Layers
Workload: uptime="Mon-Fri 08:00-16:00 Europe/Berlin"
Namespace: force-downtime=true
CLI: downtime="Mon-Fri 20:00-08:00 PST"
CLI: downtime="Mon-Fri 20:00-08:00 America/Los_Angeles"
ENV: (no env vars)
--- Process:
Exclusion not set on any layer (...)
Expand All @@ -374,7 +385,7 @@ Workload will be forced into a down-scaled state
--- Layers
Workload: uptime="Mon-Fri 08:00-16:00 Europe/Berlin"
Namespace: force-downtime=true
CLI: downtime="Mon-Fri 20:00-08:00 PST"
CLI: downtime="Mon-Fri 20:00-08:00 America/Los_Angeles"
ENV: (no env vars)
--- Process:
Exclusion not set on any layer (...)
Expand Down Expand Up @@ -465,7 +476,7 @@ Some cases where this might be needed include:

<span id="diff-duration-units">Duration units</span>:

- instead of integers representing seconds you can also use duration strings. See [Duration](#duration) for more information
- instead of integers representing seconds you can also use [duration strings](#duration)
- backwards compatible: fully compatible, integer seconds are still supported

<span id="diff-layer-system">Layer system</span>:
Expand All @@ -485,8 +496,8 @@ Some cases where this might be needed include:

<span id="diff-uniform-timestamp">Uniform timestamp</span>:

- all timestamps are [RFC3339 Timestamps](https://datatracker.ietf.org/doc/html/rfc3339) this is more optimized for golang, more consistent and also used by kubernetes itself
- backwards compatible: mostly, unless you used a short form of ISO 8601 (`2023-08-12`, `2023-233`) or `2023-W34-1` it should be totally fine to not change anything
- all timestamps are [RFC3339 Timestamps](https://datatracker.ietf.org/doc/html/rfc3339) this is more optimized for golang, more consistent and also used by Kubernetes itself
- backwards compatible: mostly, unless you used a short form of ISO 8601 (`2023-08-12`, `2023-233` or `2023-W34-1`) it should be totally fine to not change anything

<span id="diff-overlapping-days">Overlapping [relative timespans](#configuration-of-a-relative-timespan) into next day</span>:

Expand Down Expand Up @@ -517,6 +528,10 @@ Some cases where this might be needed include:

Currently the GoKubeDownscaler is still a WIP. This means that there are still some features missing. You can find a list of the known-missing features [here](/../../labels/missing%20feature). If you think that any other features are missing or you have an idea for a new feature, feel free to open an [Issue](/../../issues/)

## Troubleshooting

See [troubleshooting](docs/troubleshooting.md)

## Developing

Please read the [contribution manifest](./CONTRIBUTING.md)
Expand Down
29 changes: 18 additions & 11 deletions cmd/kubedownscaler/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ import (
"sync"
"time"

_ "time/tzdata"

"github.com/caas-team/gokubedownscaler/internal/api/kubernetes"
"github.com/caas-team/gokubedownscaler/internal/pkg/scalable"
"github.com/caas-team/gokubedownscaler/internal/pkg/values"
Expand All @@ -26,11 +28,11 @@ var (
// if the scan should only run once
once = false
// how long to wait between scans
interval = values.Duration(30 * time.Second)
interval = 30 * time.Second
// list of namespaces to restrict the downscaler to
includeNamespaces values.StringList
includeNamespaces []string
// list of resources to restrict the downscaler to
includeResources = values.StringList{"deployments"}
includeResources = []string{"deployments"}
// list of namespaces to ignore while downscaling
excludeNamespaces = values.RegexList{regexp.MustCompile("kube-system"), regexp.MustCompile("kube-downscaler")}
// list of workload names to ignore while downscaling
Expand All @@ -45,24 +47,25 @@ var (

func init() {
// set defaults for layers
layerCli.GracePeriod = values.Duration(15 * time.Minute)
layerCli.GracePeriod = 15 * time.Minute
layerCli.DownscaleReplicas = 0

// cli layer values
flag.Var(&layerCli.DownscalePeriod, "downscale-period", "period to scale down in (default: never, incompatible: UpscaleTime, DownscaleTime)")
flag.Var(&layerCli.DownTime, "default-downtime", "timespans where workloads will be scaled down, outside of them they will be scaled up (default: never, incompatible: UpscalePeriod, DownscalePeriod)")
flag.Var(&layerCli.UpscalePeriod, "upscale-period", "periods to scale up in (default: never, incompatible: UpscaleTime, DownscaleTime)")
flag.Var(&layerCli.UpTime, "default-uptime", "timespans where workloads will be scaled up, outside of them they will be scaled down (default: never, incompatible: UpscalePeriod, DownscalePeriod)")
flag.Var(&layerCli.Exclude, "explicit-include", "sets exclude on cli layer to true, makes it so namespaces or deployments have to specify downscaler/exclude=false (default: false)")
flag.IntVar(&layerCli.DownscaleReplicas, "downtime-replicas", 0, "the replicas to scale down to (default: 0)")
flag.Var(&layerCli.GracePeriod, "grace-period", "the grace period between creation of workload until first downscale (default: 15min)")
flag.Var((*values.Int32Value)(&layerCli.DownscaleReplicas), "downtime-replicas", "the replicas to scale down to (default: 0)")
flag.Var((*values.DurationValue)(&layerCli.GracePeriod), "grace-period", "the grace period between creation of workload until first downscale (default: 15min)")

// cli runtime configuration
flag.BoolVar(&dryRun, "dry-run", false, "print actions instead of doing them. enables debug logs (default: false)")
flag.BoolVar(&debug, "debug", false, "print more debug information (default: false)")
flag.BoolVar(&once, "once", false, "run scan only once (default: false)")
flag.Var(&interval, "interval", "time between scans (default: 30s)")
flag.Var(&includeNamespaces, "namespace", "restrict the downscaler to the specified namespaces (default: all)")
flag.Var(&includeResources, "include-resources", "restricts the downscaler to the specified resource types (default: deployments)")
flag.Var((*values.DurationValue)(&interval), "interval", "time between scans (default: 30s)")
flag.Var((*values.StringListValue)(&includeNamespaces), "namespace", "restrict the downscaler to the specified namespaces (default: all)")
flag.Var((*values.StringListValue)(&includeResources), "include-resources", "restricts the downscaler to the specified resource types (default: deployments)")
flag.Var(&excludeNamespaces, "exclude-namespaces", "exclude namespaces from being scaled (default: kube-system,kube-downscaler)")
flag.Var(&excludeWorkloads, "exclude-deployments", "exclude deployments from being scaled (optional)")
flag.Var(&includeLabels, "matching-labels", "restricts the downscaler to workloads with these labels (default: all)")
Expand Down Expand Up @@ -96,21 +99,24 @@ func main() {
}
ctx := context.Background()

slog.Debug("getting client for kubernetes")
client, err := kubernetes.NewClient(kubeconfig, dryRun)
if err != nil {
slog.Error("failed to create new kubernetes client", "error", err)
slog.Error("failed to create new Kubernetes client", "error", err)
os.Exit(1)
}

slog.Info("started downscaler")
for {
slog.Debug("scanning workloads")
slog.Info("scanning workloads")

workloads, err := client.GetWorkloads(includeNamespaces, includeResources, ctx)
if err != nil {
slog.Error("failed to get workloads", "error", err)
os.Exit(1)
}
workloads = scalable.FilterExcluded(workloads, includeLabels, excludeNamespaces, excludeWorkloads)
slog.Info("scanning over workloads matching filters", "amount", len(workloads))

var wg sync.WaitGroup
for _, workload := range workloads {
Expand All @@ -129,6 +135,7 @@ func main() {
}()
}
wg.Wait()
slog.Info("successfully scanned all workloads")

if once {
slog.Debug("once is set to true, exiting")
Expand Down
6 changes: 3 additions & 3 deletions cmd/kubedownscaler/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ func (m *MockClient) GetNamespaceAnnotations(namespace string, ctx context.Conte
return args.Get(0).(map[string]string), args.Error(1)
}

func (m *MockClient) DownscaleWorkload(replicas int, workload scalable.Workload, ctx context.Context) error {
func (m *MockClient) DownscaleWorkload(replicas int32, workload scalable.Workload, ctx context.Context) error {
args := m.Called(replicas, workload, ctx)
return args.Error(0)
}
Expand Down Expand Up @@ -65,7 +65,7 @@ func TestScanWorkload(t *testing.T) {
layerEnv := values.NewLayer()

layerCli.DownscaleReplicas = 0
layerCli.GracePeriod = values.Duration(15 * time.Minute)
layerCli.GracePeriod = 15 * time.Minute

mockClient := new(MockClient)
mockWorkload := new(MockWorkload)
Expand All @@ -78,7 +78,7 @@ func TestScanWorkload(t *testing.T) {
})

mockClient.On("GetNamespaceAnnotations", "test-namespace", ctx).Return(map[string]string{}, nil)
mockClient.On("DownscaleWorkload", 0, mockWorkload, ctx).Return(nil)
mockClient.On("DownscaleWorkload", int32(0), mockWorkload, ctx).Return(nil)

err := scanWorkload(mockWorkload, mockClient, ctx, layerCli, layerEnv)

Expand Down
1 change: 1 addition & 0 deletions deployments/chart/.helmignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
.git/
.gitignore
icon.svg
Loading

0 comments on commit a9ce751

Please sign in to comment.