Compare commits

..

9 Commits

Author SHA1 Message Date
35ca4bbe4a Add renovate.json 2024-09-11 14:26:08 +00:00
7a8e163973
Use https for argocd 2024-07-24 18:07:42 +02:00
d8faab209c
Use latest builder 2024-07-24 18:03:49 +02:00
3f5c97bf35
Fix deployment 2024-07-24 17:58:29 +02:00
79dd9a18d4 Update the app namespace 2024-07-11 11:28:23 +00:00
59a0287b5e
Trigger build 2024-07-11 13:18:21 +02:00
f9457940be
Trigger build 2024-07-11 12:35:12 +02:00
Nikolai Rodionov
33bce94b2b
Fix woodpecker 2024-07-11 12:32:44 +02:00
5414085273
Start using ingress instead of virtual service 2024-05-28 22:30:11 +02:00
17 changed files with 256 additions and 264 deletions

93
.woodpecker.yml Normal file
View File

@ -0,0 +1,93 @@
---
when:
event:
- push
steps:
- image: alpine/helm
name: Publish the Helm chart
commands:
- helm plugin install https://github.com/chartmuseum/helm-push
- helm package chart -d chart-package
- helm repo add --username allanger --password $GITEA_TOKEN badhouseplants-net https://git.badhouseplants.net/api/packages/badhouseplants/helm
- helm cm-push "./chart-package/$(ls chart-package)" badhouseplants-net
secrets:
- gitea_token
- name: Test a build
image: git.badhouseplants.net/badhouseplants/hugo-container
commands:
- hugo -s ./src
- name: Build and push the docker image
image: git.badhouseplants.net/badhouseplants/badhouseplants-builder:latest
privileged: true
depends_on:
- Test a build
secrets:
- gitea_token
environment:
BUILDER_COMMIT: 2449b73b13a62ae916c6703778d096e5290157b3
commands:
- rm -rf $DRONE_WORKSPACE/src/assets/
- ./scripts/build-container.pl
backend_options:
kubernetes:
resources:
requests:
memory: 500Mi
cpu: 200m
limits:
memory: 1000Mi
cpu: 1000m
securityContext:
privileged: true
- name: Sync pictures from lfs to Minio
image: git.badhouseplants.net/badhouseplants/badhouseplants-builder:latest
depends_on:
- Test a build
secrets:
- rclone_config_content
environment:
RCLONE_CONFIG: /tmp/rclone.conf
commands:
- echo "$RCLONE_CONFIG_CONTENT" > $RCLONE_CONFIG
- ./scripts/upload-media.pl
- name: Deploy the application
image: git.badhouseplants.net/badhouseplants/badhouseplants-builder:latest
depends_on:
- Build and push the docker image
- Sync pictures from lfs to Minio
secrets:
- gitea_token
- argocd_auth_token
- argo_github_oauth_key
- argo_google_oauth_key
environment:
ARGOCD_SERVER: https://argo.badhouseplants.net:443
commands:
- ./scripts/deploy-app.pl
- name: Cleanup everything
image: git.badhouseplants.net/badhouseplants/badhouseplants-builder:latest
depends_on:
- Deploy the application
secrets:
- gitea_token
- argocd_auth_token
- rclone_config_content
environment:
ARGOCD_SERVER: argo.badhouseplants.net:443
RCLONE_CONFIG: /tmp/rclone.conf
commands:
- echo "$RCLONE_CONFIG_CONTENT" > $RCLONE_CONFIG
- ./scripts/cleanup.pl
- name: Spell-Checker
failure: ignore
image: node
commands:
- npm i markdown-spellcheck -g
- mdspell "src/content/**/*.md" -n -r

View File

@ -2,10 +2,10 @@ apiVersion: v2
name: badhouseplants-net
description: A Helm chart for Kubernetes
type: application
version: 0.8.6
version: 0.9.0
appVersion: "4.20.0"
dependencies:
- name: remark42
version: 0.5.5
version: 0.7.0
repository: https://groundhog2k.github.io/helm-charts/
condition: remark42.enabled

View File

@ -1,57 +0,0 @@
{{- if .Values.istio.enabled -}}
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: {{ include "badhouseplants-net.fullname" . }}
labels:
{{- include "badhouseplants-net.labels" . | nindent 4 }}
{{- with .Values.istio.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
gateways:
- istio-system/badhouseplants-net
hosts:
{{- range .Values.istio.hosts}}
- {{ . }}
{{- end }}
http:
- match:
- uri:
prefix: {{ .Values.istio.prefix }}
route:
- destination:
host: {{ include "badhouseplants-net.fullname" . }}
port:
number: {{ .Values.service.port }}
{{- end }}
---
{{- if .Values.remark42.istio.enabled -}}
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: {{ include "remark42.fullname" . }}-remark42
labels:
{{- include "badhouseplants-net.labels" . | nindent 4 }}
{{- with .Values.remark42.istio.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
gateways:
- istio-system/badhouseplants-net
hosts:
{{- range .Values.remark42.istio.hosts}}
- {{ . }}
{{- end }}
http:
- match:
- uri:
prefix: {{ .Values.remark42.istio.prefix }}
route:
- destination:
host: {{ .Release.Name }}-remark42
port:
number: {{ .Values.remark42.service.port }}
{{- end }}

View File

@ -39,14 +39,6 @@ hugo:
env:
HUGO_PARAMS_GITBRANCH: main
istio:
annotations: {}
enabled: true
hosts:
- badhouseplants.net
- www.badhouseplants.net
prefix: /
volumes:
# ----------------------------------------------
# -- An emptydir volume where hugo should

View File

@ -6,10 +6,10 @@ metadata:
branch: $ARGO_APP_BRANCH
commit_sha: $ARGO_APP_IMAGE_TAG
name: badhouseplants-$ARGO_APP_BRANCH
namespace: argo-system
namespace: platform
spec:
destination:
namespace: badhouseplants-$ARGO_APP_NAMESPACE
namespace: $ARGO_APP_NAMESPACE
server: https://kubernetes.default.svc
project: badhouseplants
source:

View File

@ -2,7 +2,7 @@ apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: badhouseplants
namespace: argo-system
namespace: platform
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
@ -11,14 +11,12 @@ spec:
- '*'
destinations:
- namespace: badhouseplants-*
- namespace: development
server: https://kubernetes.default.svc
name: in-cluster
- namespace: production
server: https://kubernetes.default.svc
name: in-cluster
# Deny all cluster-scoped resources from being created, except for Namespace
clusterResourceWhitelist:
- group: ''
kind: Namespace
# Allow all namespaced-scoped resources to be created, except for ResourceQuota, LimitRange, NetworkPolicy
namespaceResourceBlacklist:
@ -31,4 +29,4 @@ spec:
# Enables namespace orphaned resource monitoring.
orphanedResources:
warn: false
warn: false

View File

@ -5,11 +5,33 @@ values: |
tag: $ARGO_APP_IMAGE_TAG
env:
HUGO_PARAMS_GITCOMMIT: $ARGO_APP_IMAGE_TAG
istio:
annotations:
ingress:
enabled: true
className: ~
annotations:
kubernetes.io/ingress.class: traefik
kubernetes.io/tls-acme: "true"
kubernetes.io/ingress.allow-http: "false"
kubernetes.io/ingress.global-static-ip-name: ""
cert-manager.io/cluster-issuer: badhouseplants-issuer-http01
link.argocd.argoproj.io/env: https://badhouseplants.net/
link.argocd.argoproj.io/build: $DRONE_BUILD_LINK
link.argocd.argoproj.io/remark42: https://remark42.badhouseplants.net/web
pathtype: ImplementationSpecific
hosts:
- host: badhouseplants.net
paths:
- path: /
pathType: ImplementationSpecific
- host: www.badhouseplants.net
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: badhp-tls
hosts:
- badhouseplants.net
- www.badhouseplants.net
remark42:
settings:
secret: $ARGO_REMARK_SECRET
@ -26,3 +48,25 @@ values: |
secret: $ARGO_GOOGLE_OAUTH_KEY
storage:
requestedSize: 300Mi
ingress:
enabled: true
className: ~
annotations:
kubernetes.io/ingress.class: traefik
kubernetes.io/tls-acme: "true"
kubernetes.io/ingress.allow-http: "false"
kubernetes.io/ingress.global-static-ip-name: ""
cert-manager.io/cluster-issuer: badhouseplants-issuer-http01
link.argocd.argoproj.io/remark42: https://remark42.badhouseplants.net/web
## Hosts
hosts:
- host: remark42.badhouseplants.net
paths:
- path: /
pathType: ImplementationSpecific
tls:
secretName: chart-example-tls
hosts:
- remark42.badhouseplants.net

View File

@ -1,12 +1,27 @@
---
values: |
istio:
hosts:
- $ARGO_APP_HOSTNAME
annotations:
ingress:
enabled: true
className: ~
annotations:
kubernetes.io/ingress.class: traefik
kubernetes.io/tls-acme: "true"
kubernetes.io/ingress.allow-http: "false"
kubernetes.io/ingress.global-static-ip-name: ""
cert-manager.io/cluster-issuer: badhouseplants-issuer-http01
link.argocd.argoproj.io/env: https://$ARGO_APP_HOSTNAME/
link.argocd.argoproj.io/remark42: https://remark42-$ARGO_APP_HOSTNAME/web
link.argocd.argoproj.io/build: $DRONE_BUILD_LINK
pathtype: ImplementationSpecific
hosts:
- host: $ARGO_APP_HOSTNAME
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: badhp-$ARGO_APP_BRANCH-tls
hosts:
- $ARGO_APP_HOSTNAME
hugo:
image:
tag: $ARGO_APP_IMAGE_TAG
@ -17,13 +32,31 @@ values: |
HUGO_PARAMS_COMMENTS_REMARK42_HOST: https://remark42-$ARGO_APP_HOSTNAME
HUGO_PARAMS_GITCOMMIT: $ARGO_APP_IMAGE_TAG
remark42:
istio:
hosts:
- remark42-$ARGO_APP_HOSTNAME
settings:
url: https://remark42-$ARGO_APP_HOSTNAME/
auth:
anonymous: true
secretKey: $ARGO_REMARK_SECRET
ingress:
enabled: true
className: ~
annotations:
kubernetes.io/ingress.class: traefik
kubernetes.io/tls-acme: "true"
kubernetes.io/ingress.allow-http: "false"
kubernetes.io/ingress.global-static-ip-name: ""
cert-manager.io/cluster-issuer: badhouseplants-issuer-http01
link.argocd.argoproj.io/remark42: https://remark42-$ARGO_APP_HOSTNAME/
## Hosts
hosts:
- host: remark42.badhouseplants.net
paths:
- path: /
pathType: ImplementationSpecific
tls:
secretName: remark-$ARGO_APP_BRANCH-tls
hosts:
- remark42-$ARGO_APP_HOSTNAME
rclone:
command: 'rclone copy -P badhouseplants-public:/badhouseplants-net/$ARGO_APP_IMAGE_TAG /static'

3
renovate.json Normal file
View File

@ -0,0 +1,3 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
}

View File

@ -18,9 +18,9 @@ chomp($remark_secret);
$ENV{'ARGO_APP_CHART_VERSION'} = $chart_version;
$ENV{'ARGO_APP_BRANCH'} = $git_branch;
if ($git_branch eq $main_branch) {
$ENV{'ARGO_APP_NAMESPACE'} = $git_branch;
$ENV{'ARGO_APP_NAMESPACE'} = "production";
} else {
$ENV{'ARGO_APP_NAMESPACE'} = "preview"
$ENV{'ARGO_APP_NAMESPACE'} = "development"
}
$ENV{'ARGO_APP_HOSTNAME'} = "$git_branch-dev.badhouseplants.net";
$ENV{'ARGO_APP_IMAGE_TAG'} = $git_commit_sha;
@ -72,4 +72,3 @@ foreach my $app (@all_applications) {
}
}
}

View File

@ -1,12 +1,13 @@
---
title: "Dynamic Environment Per Branch with ArgoCD"
date: 2023-02-25T14:00:00+01:00
image: "/posts/argocd-dynamic-environment-per-branch-part-1/cover.png"
draft: false
categories:
- "Kubernetes"
- "CI-CD"
---
+++
title = "Dynamic Environment Per Branch with ArgoCD"
date = 2023-02-25T14:00:00+01:00
image = "/posts/argocd-dynamic-environment-per-branch-part-1/cover.png"
draft = false
categories = [
"Kubernetes",
"CI-CD"
]
+++
[Do you remember?]({{< ref "dont-use-argocd-for-infrastructure" >}})
> And using `helmfile`, I will install `ArgoCD` to my clusters, of course, because it's an awesome tool, without any doubts. But don't manage your infrastructure with it, because it's a part of your infrastructure, and it's a service that you provide to other teams. And I'll talk about in one of the next posts.

View File

@ -1,12 +1,13 @@
---
title: "ArgoCD vs Helmfile: Applications"
date: 2023-02-13T12:14:09+01:00
image: "/posts/argocd-vs-helmfile/cover-applications.png"
draft: false
categories:
- "Kubernetes"
- "CI-CD"
---
+++
title = "ArgoCD vs Helmfile: Applications"
date = 2023-02-13T12:14:09+01:00
image = "/posts/argocd-vs-helmfile/cover-applications.png"
draft = false
categories = [
"Kubernetes",
"CI-CD"
]
+++
> So as promised in [the previous ArgoCD post]({{< ref "dont-use-argocd-for-infrastructure" >}}), I'll try to show a simple example of Pull Requests for different kinds of setups. This is the first part. Putting everything in the same post seems kind of too much.

View File

@ -1,12 +1,13 @@
---
title: 'ArgoCD vs Helmfile: ApplicationSet'
date: 2023-02-15T10:14:09+01:00
image: "/posts/argocd-vs-helmfile/cover-applicationset.png"
draft: false
categories:
- "Kubernetes"
- "CI-CD"
---
+++
title = 'ArgoCD vs Helmfile: ApplicationSet'
date = 2023-02-15T10:14:09+01:00
image = "/posts/argocd-vs-helmfile/cover-applicationset.png"
draft = false
categories = [
"Kubernetes",
"CI-CD"
]
+++
This is a second post about *"argocding"* your infrastructure. [First can be found here]({{< ref "argocd-vs-helmfile-application" >}}).

View File

@ -1,11 +1,12 @@
---
title: "Argocd vs Helmfile: Helmfile"
date: 2023-02-17T12:48:51+01:00
draft: false
categories:
- "Kubernetes"
- "CI-CD"
---
+++
title = "Argocd vs Helmfile: Helmfile"
date = 2023-02-17T12:48:51+01:00
draft = false
categories = [
"Kubernetes",
"CI-CD"
]
+++
In two previous posts I've described how it's possible to install a couple of applications with [`Applications`]({{< relref "/post/allanger/argocd-vs-helmfile-application" >}}) and [`ApplicationSets`]({{< relref "/post/allanger/argocd-vs-helmfile-applicationset" >}}), and this one is the last in a row. And here I'm going to install the same applications (`VPA` and `Goldilocks`) with helmfile, and I will tell why I think that it's better than `ArgoCD`
So let's start. Here you can find the [initial config](https://git.badhouseplants.net/allanger/helmfile-vs-argo/src/branch/helmfile-main). Let's see what we got here:
@ -450,7 +451,7 @@ vpa-system, goldilocks-dashboard, ServiceAccount (v1) has been added:
hook[prepare] logs | diff -u -N /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/LIVE-4051758900/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalercheckpoints.autoscaling.k8s.io /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/MERGED-3664876659/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalercheckpoints.autoscaling.k8s.io
hook[prepare] logs | --- /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/LIVE-4051758900/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalercheckpoints.autoscaling.k8s.io 2023-02-17 13:15:29
hook[prepare] logs | --- /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/MERGED-3664876659/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalercheckpoints.autoscaling.k8s.io 2023-02-17 13:15:29
hook[prepare] logs | +++ /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/MERGED-3664876659/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalercheckpoints.autoscaling.k8s.io 2023-02-17 13:15:29
hook[prepare] logs | @@ -0,0 +1,216 @@
hook[prepare] logs | +apiVersion: apiextensions.k8s.io/v1
hook[prepare] logs | +kind: CustomResourceDefinition
@ -670,7 +671,7 @@ hook[prepare] logs | + storedVersions:
hook[prepare] logs | + - v1
hook[prepare] logs | diff -u -N /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/LIVE-4051758900/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalers.autoscaling.k8s.io /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/MERGED-3664876659/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalers.autoscaling.k8s.io
hook[prepare] logs | --- /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/LIVE-4051758900/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalers.autoscaling.k8s.io 2023-02-17 13:15:29
hook[prepare] logs | --- /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/MERGED-3664876659/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalers.autoscaling.k8s.io 2023-02-17 13:15:29
hook[prepare] logs | +++ /var/folders/w1/27ptcr29547f0g8732kmffwm0000gn/T/MERGED-3664876659/apiextensions.k8s.io.v1.CustomResourceDefinition..verticalpodautoscalers.autoscaling.k8s.io 2023-02-17 13:15:29
hook[prepare] logs | @@ -0,0 +1,550 @@
hook[prepare] logs | +apiVersion: apiextensions.k8s.io/v1
hook[prepare] logs | +kind: CustomResourceDefinition

View File

@ -1,11 +1,12 @@
---
title: 'Do we really need Continuous Reconciliation after all?'
date: 2024-02-13T15:04:44+01:00
draft: true
categories:
- "Kubernetes"
- "CI-CD"
---
+++
title = 'Do we really need Continuous Reconciliation after all?'
date = 2024-02-13T15:04:44+01:00
draft = true
categories = [
"Kubernetes",
"CI-CD"
]
+++
> Well, alright, I guess it depends

View File

@ -1,13 +1,13 @@
---
title: "Don't use ArgoCD for your infrastructure"
date: 2023-02-09T12:47:32+01:00
draft: false
image: /posts/dont-use-argocd-for-infrastructure/cover.png
categories:
- "Kubernetes"
- "CI-CD"
---
+++
title = "Don't use ArgoCD for your infrastructure"
date = 2023-02-09T12:47:32+01:00
draft = false
image = '/posts/dont-use-argocd-for-infrastructure/cover.png'
categories = [
"Kubernetes",
"CI-CD"
]
+++
> Of course, it's just a clickbait title. Use whatever works for you. I will just describe why I wouldn't use `ArgoCD` for the infrastructure

View File

@ -1,118 +0,0 @@
---
title: "Testing External Snapshooter"
description: Trying to use the external-snapshooter
date: 2024-05-14T15:37:59+02:00
image:
math:
hidden: false
comments: true
draft: true
---
# Intro
# Installing
I've created a new empty k3s cluster, into which I've installed `coreDNS`, `cilium`, and `local-path-provisioner` by Rancher.
```shell
$ kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system cilium-operator-9c465d6d8-jlbc9 1/1 Running 0 72s
kube-system cilium-gzwlp 1/1 Running 0 72s
kube-system local-path-provisioner-6896b5f8c-7mpfl 1/1 Running 0 50s
kube-system coredns-7db6d4f6d7-vqhpc 1/1 Running 0 61s
```
Now let's install the external-snapshooter, the project source code can be found here: <https://github.com/kubernetes-csi/external-snapshotter>, but they don't provide us with a helm chart, so I'll install it using that one <https://github.com/piraeusdatastore/helm-charts/tree/main/charts/snapshot-controller>
```shell
$ helm repo add piraeus-charts https://piraeus.io/helm-charts/
$ helm install snapshot-controller piraeus-charts/snapshot-controller -n kube-system
$ kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system cilium-operator-9c465d6d8-jlbc9 1/1 Running 0 3m35s
kube-system cilium-gzwlp 1/1 Running 0 3m35s
kube-system local-path-provisioner-6896b5f8c-7mpfl 1/1 Running 0 3m13s
kube-system coredns-7db6d4f6d7-vqhpc 1/1 Running 0 3m24s
kube-system snapshot-controller-5fd4df575-2vmhl 1/1 Running 0 16s
kube-system snapshot-validation-webhook-79f9c6bb5f-p6hqx 1/1 Running 0 16s
$ kubectl get crd
NAME CREATED AT
...
volumegroupsnapshotclasses.groupsnapshot.storage.k8s.io 2024-05-14T13:52:17Z
volumegroupsnapshotcontents.groupsnapshot.storage.k8s.io 2024-05-14T13:52:17Z
volumegroupsnapshots.groupsnapshot.storage.k8s.io 2024-05-14T13:52:17Z
volumesnapshotclasses.snapshot.storage.k8s.io 2024-05-14T13:52:17Z
volumesnapshotcontents.snapshot.storage.k8s.io 2024-05-14T13:52:18Z
volumesnapshots.snapshot.storage.k8s.io 2024-05-14T13:52:18Z
```
Let's create some dummy worklaod that will write something to a PVC
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test
spec:
storageClassName: local-path
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Pod
metadata:
name: test
spec:
volumes:
- name: test
persistentVolumeClaim:
claimName: test
containers:
- name: test
image: alpine
volumeMounts:
- mountPath: /src
name: test
command:
- sh
args:
- -c
- sleep 1000
```
```shell
$ kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test Bound pvc-4924e25f-84ae-4640-8199-0156659cb167 1Gi RWO local-path 2m7s
```
```shell
$ kubectl exec -it test -- sh
# -- Inside the container
$ echo 1 > /src/test
$ cat /src/test
1
```
So now let's try creating a snapshot
```yaml
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshot
metadata:
name: test-snapshot
spec:
source:
volumeSnapshotContentName: test
```
```shell
$ kubectl get volumesnapshot
```