The "new" cluster got all the configurations set on the "old" cluster.
Certificates already provisioned. Part 6 complete.
This commit is contained in:
parent
c206bb1e5b
commit
19d8748741
@ -3,6 +3,8 @@ kind: DestinationRule
|
||||
metadata:
|
||||
name: filebrowser
|
||||
namespace: external
|
||||
labels:
|
||||
app: filebrowser
|
||||
spec:
|
||||
host: filebrowser.external.svc.cluster.local
|
||||
trafficPolicy:
|
||||
|
@ -3,6 +3,8 @@ kind: ServiceEntry
|
||||
metadata:
|
||||
name: filebrowser-se
|
||||
namespace: external
|
||||
labels:
|
||||
app: filebrowser
|
||||
spec:
|
||||
hosts:
|
||||
- filebrowser.external.svc.cluster.local
|
||||
|
@ -3,6 +3,8 @@ kind: VirtualService
|
||||
metadata:
|
||||
name: filebrowser-vs
|
||||
namespace: external
|
||||
labels:
|
||||
app: filebrowser
|
||||
spec:
|
||||
hosts:
|
||||
- "filebrowser.filter.home"
|
||||
|
@ -3,6 +3,8 @@ kind: DestinationRule
|
||||
metadata:
|
||||
name: gitea
|
||||
namespace: external
|
||||
labels:
|
||||
app: gitea
|
||||
spec:
|
||||
host: gitea.external.svc.cluster.local
|
||||
trafficPolicy:
|
||||
|
@ -3,6 +3,8 @@ kind: ServiceEntry
|
||||
metadata:
|
||||
name: gitea-se
|
||||
namespace: external
|
||||
labels:
|
||||
app: gitea
|
||||
spec:
|
||||
hosts:
|
||||
- gitea.external.svc.cluster.local
|
||||
|
@ -3,6 +3,8 @@ kind: VirtualService
|
||||
metadata:
|
||||
name: gitea-vs
|
||||
namespace: external
|
||||
labels:
|
||||
app: gitea
|
||||
spec:
|
||||
hosts:
|
||||
- "gitea.filter.home"
|
||||
|
@ -3,6 +3,8 @@ kind: VirtualService
|
||||
metadata:
|
||||
name: jelly-vs
|
||||
namespace: external
|
||||
labels:
|
||||
app: jellyfin
|
||||
spec:
|
||||
hosts:
|
||||
- "jelly.filter.home"
|
||||
|
@ -3,6 +3,8 @@ kind: DestinationRule
|
||||
metadata:
|
||||
name: tube
|
||||
namespace: external
|
||||
labels:
|
||||
app: tube
|
||||
spec:
|
||||
host: tube.external.svc.cluster.local
|
||||
trafficPolicy:
|
||||
|
@ -3,6 +3,8 @@ kind: ServiceEntry
|
||||
metadata:
|
||||
name: tube-se
|
||||
namespace: external
|
||||
labels:
|
||||
app: tube
|
||||
spec:
|
||||
hosts:
|
||||
- tube.external.svc.cluster.local
|
||||
|
@ -3,6 +3,8 @@ kind: VirtualService
|
||||
metadata:
|
||||
name: tube-vs
|
||||
namespace: external
|
||||
labels:
|
||||
app: tube
|
||||
spec:
|
||||
hosts:
|
||||
- "tube.filter.home"
|
||||
|
@ -0,0 +1,14 @@
|
||||
apiVersion: install.istio.io/v1alpha1
|
||||
kind: IstioOperator
|
||||
metadata:
|
||||
namespace: istio-system
|
||||
name: istio-config
|
||||
labels:
|
||||
last-update: 2023-07-16
|
||||
spec:
|
||||
profile: minimal
|
||||
meshConfig:
|
||||
accessLogFile: /dev/stdout
|
||||
enableTracing: true
|
||||
ingressService: istio-public-ingress
|
||||
ingressSelector: public-ingress
|
@ -0,0 +1,21 @@
|
||||
apiVersion: install.istio.io/v1alpha1
|
||||
kind: IstioOperator
|
||||
metadata:
|
||||
namespace: istio-system
|
||||
name: egress
|
||||
labels:
|
||||
last-update: 2023-07-16
|
||||
spec:
|
||||
profile: empty
|
||||
components:
|
||||
egressGateways:
|
||||
- namespace: istio-system
|
||||
name: egress-gw
|
||||
enabled: true
|
||||
label:
|
||||
istio: egress-gw
|
||||
app: istio-egress-gw
|
||||
k8s:
|
||||
service:
|
||||
type: LoadBalancer
|
||||
loadBalancerIP: 192.168.1.39
|
@ -0,0 +1,21 @@
|
||||
apiVersion: install.istio.io/v1alpha1
|
||||
kind: IstioOperator
|
||||
metadata:
|
||||
namespace: istio-system
|
||||
name: local-ingress
|
||||
labels:
|
||||
last-update: 2023-07-16
|
||||
spec:
|
||||
profile: empty
|
||||
components:
|
||||
ingressGateways:
|
||||
- namespace: istio-system
|
||||
name: istio-local-ingress
|
||||
enabled: true
|
||||
label:
|
||||
istio: local-ingress
|
||||
app: istio-local-ingress
|
||||
k8s:
|
||||
service:
|
||||
type: LoadBalancer
|
||||
loadBalancerIP: 192.168.1.21
|
@ -0,0 +1,21 @@
|
||||
apiVersion: install.istio.io/v1alpha1
|
||||
kind: IstioOperator
|
||||
metadata:
|
||||
namespace: istio-system
|
||||
name: public-ingress
|
||||
labels:
|
||||
last-update: 2023-07-16
|
||||
spec:
|
||||
profile: empty
|
||||
components:
|
||||
ingressGateways:
|
||||
- namespace: istio-system
|
||||
name: istio-public-ingress
|
||||
enabled: true
|
||||
label:
|
||||
istio: public-ingress
|
||||
app: istio-public-ingress
|
||||
k8s:
|
||||
service:
|
||||
type: LoadBalancer
|
||||
loadBalancerIP: 192.168.1.20
|
29
Migrations/Forget_Traefik_2023/P6_Redeployment/MetalLB.yaml
Normal file
29
Migrations/Forget_Traefik_2023/P6_Redeployment/MetalLB.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: cherrypick
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
- 192.168.1.20-192.168.1.39
|
||||
autoAssign: false
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: flex
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
- 192.168.1.41 - 192.168.1.60
|
||||
autoAssign: true
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: l2-advert
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
ipAddressPools:
|
||||
- cherrypick
|
||||
- flex
|
@ -0,0 +1,64 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-public
|
||||
namespace: istio-system
|
||||
spec:
|
||||
acme:
|
||||
# The ACME server URL
|
||||
# server: https://acme-staging-v02.api.letsencrypt.org/directory # Testing
|
||||
server: https://acme-v02.api.letsencrypt.org/directory # Prod
|
||||
# Email address used for ACME registration
|
||||
email: filter.oriol@gmail.com
|
||||
# Name of a secret used to store the ACME account private key
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-public
|
||||
# Enable the HTTP-01 challenge provider
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: istio
|
||||
podTemplate:
|
||||
metadata:
|
||||
annotations:
|
||||
sidecar.istio.io/inject: "true"
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: filterhome-domain-cert-public
|
||||
namespace: istio-system
|
||||
spec:
|
||||
secretName: filterhome-domain-cert-public
|
||||
duration: 720h # 30d
|
||||
renewBefore: 24h # 1d
|
||||
# duration: 2160h # 90d
|
||||
# renewBefore: 360h # 15d
|
||||
isCA: false
|
||||
privateKey:
|
||||
algorithm: RSA
|
||||
encoding: PKCS1
|
||||
size: 4096
|
||||
rotationPolicy: Always
|
||||
usages:
|
||||
- server auth
|
||||
- client auth
|
||||
dnsNames:
|
||||
## - "*.filterhome.xyz"
|
||||
|
||||
# Gitea
|
||||
- "gitea.filterhome.xyz"
|
||||
|
||||
# Jellyfin
|
||||
- "jelly.filterhome.xyz"
|
||||
|
||||
# Filebrowser
|
||||
- "filebrowser.filterhome.xyz"
|
||||
|
||||
# Tube
|
||||
- "tube.filterhome.xyz"
|
||||
|
||||
issuerRef:
|
||||
name: letsencrypt-public
|
||||
kind: ClusterIssuer
|
||||
group: cert-manager.io
|
@ -0,0 +1,6 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: external
|
||||
labels:
|
||||
istio-injection: "enabled"
|
@ -0,0 +1,15 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: filebrowser
|
||||
namespace: external
|
||||
labels:
|
||||
app: filebrowser
|
||||
spec:
|
||||
host: filebrowser.external.svc.cluster.local
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
connectionPool:
|
||||
http:
|
||||
h2UpgradePolicy: UPGRADE
|
@ -0,0 +1,19 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: ServiceEntry
|
||||
metadata:
|
||||
name: filebrowser-se
|
||||
namespace: external
|
||||
labels:
|
||||
app: filebrowser
|
||||
spec:
|
||||
hosts:
|
||||
- filebrowser.external.svc.cluster.local
|
||||
location: MESH_INTERNAL
|
||||
ports:
|
||||
- number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
resolution: NONE
|
||||
workloadSelector:
|
||||
labels:
|
||||
host: srv
|
@ -0,0 +1,21 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: filebrowser-vs
|
||||
namespace: external
|
||||
labels:
|
||||
app: filebrowser
|
||||
spec:
|
||||
hosts:
|
||||
- "filebrowser.filter.home"
|
||||
- "filebrowser.filterhome.xyz"
|
||||
- "filebrowser.filterhome.duckdns.org"
|
||||
gateways:
|
||||
- default/public-gateway
|
||||
- default/local-gateway
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: filebrowser.external.svc.cluster.local
|
||||
port:
|
||||
number: 443
|
@ -0,0 +1,40 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: http-to-https-public
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
istio: public-ingress
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http2
|
||||
protocol: HTTP2
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
httpsRedirect: true
|
||||
---
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: public-gateway
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
istio: public-ingress
|
||||
servers:
|
||||
- port:
|
||||
number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
hosts:
|
||||
- "*.filterhome.xyz"
|
||||
- "filterhome.xyz"
|
||||
|
||||
# - "filterhome.duckdns.org"
|
||||
# - "*.filterhome.duckdns.org"
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
credentialName: filterhome-domain-cert-public
|
@ -0,0 +1,15 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: gitea
|
||||
namespace: external
|
||||
labels:
|
||||
app: gitea
|
||||
spec:
|
||||
host: gitea.external.svc.cluster.local
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
connectionPool:
|
||||
http:
|
||||
h2UpgradePolicy: UPGRADE
|
@ -0,0 +1,19 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: ServiceEntry
|
||||
metadata:
|
||||
name: gitea-se
|
||||
namespace: external
|
||||
labels:
|
||||
app: gitea
|
||||
spec:
|
||||
hosts:
|
||||
- gitea.external.svc.cluster.local
|
||||
location: MESH_INTERNAL
|
||||
ports:
|
||||
- number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
resolution: NONE
|
||||
workloadSelector:
|
||||
labels:
|
||||
host: srv
|
@ -0,0 +1,21 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: gitea-vs
|
||||
namespace: external
|
||||
labels:
|
||||
app: gitea
|
||||
spec:
|
||||
hosts:
|
||||
- "gitea.filter.home"
|
||||
- "gitea.filterhome.xyz"
|
||||
- "gitea.filterhome.duckdns.org"
|
||||
gateways:
|
||||
- default/public-gateway
|
||||
- default/local-gateway
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: gitea.external.svc.cluster.local
|
||||
port:
|
||||
number: 443
|
@ -0,0 +1,9 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: WorkloadEntry
|
||||
metadata:
|
||||
name: srv-host
|
||||
namespace: external
|
||||
spec:
|
||||
address: 192.168.1.3
|
||||
labels:
|
||||
host: srv
|
@ -0,0 +1,16 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: jelly
|
||||
namespace: external
|
||||
labels:
|
||||
app: jellyfin
|
||||
spec:
|
||||
host: jelly.external.svc.cluster.local
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
connectionPool:
|
||||
http:
|
||||
h2UpgradePolicy: DO_NOT_UPGRADE
|
||||
# h2UpgradePolicy: UPGRADE
|
@ -0,0 +1,19 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: ServiceEntry
|
||||
metadata:
|
||||
name: jelly-se
|
||||
namespace: external
|
||||
labels:
|
||||
app: jellyfin
|
||||
spec:
|
||||
hosts:
|
||||
- jelly.external.svc.cluster.local
|
||||
location: MESH_INTERNAL
|
||||
ports:
|
||||
- number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
resolution: NONE
|
||||
workloadSelector:
|
||||
labels:
|
||||
host: srv
|
@ -0,0 +1,21 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: jelly-vs
|
||||
namespace: external
|
||||
labels:
|
||||
app: jellyfin
|
||||
spec:
|
||||
hosts:
|
||||
- "jelly.filter.home"
|
||||
- "jelly.filterhome.xyz"
|
||||
- "jelly.filterhome.duckdns.org"
|
||||
gateways:
|
||||
- default/public-gateway
|
||||
- default/local-gateway
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: jelly.external.svc.cluster.local
|
||||
port:
|
||||
number: 443
|
@ -0,0 +1,15 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: tube
|
||||
namespace: external
|
||||
labels:
|
||||
app: tube
|
||||
spec:
|
||||
host: tube.external.svc.cluster.local
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
connectionPool:
|
||||
http:
|
||||
h2UpgradePolicy: UPGRADE
|
@ -0,0 +1,19 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: ServiceEntry
|
||||
metadata:
|
||||
name: tube-se
|
||||
namespace: external
|
||||
labels:
|
||||
app: tube
|
||||
spec:
|
||||
hosts:
|
||||
- tube.external.svc.cluster.local
|
||||
location: MESH_INTERNAL
|
||||
ports:
|
||||
- number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
resolution: NONE
|
||||
workloadSelector:
|
||||
labels:
|
||||
host: srv
|
@ -0,0 +1,21 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: tube-vs
|
||||
namespace: external
|
||||
labels:
|
||||
app: tube
|
||||
spec:
|
||||
hosts:
|
||||
- "tube.filter.home"
|
||||
- "tube.filterhome.xyz"
|
||||
- "tube.filterhome.duckdns.org"
|
||||
gateways:
|
||||
- default/public-gateway
|
||||
- default/local-gateway
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: tube.external.svc.cluster.local
|
||||
port:
|
||||
number: 443
|
@ -0,0 +1,42 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: ca-issuer
|
||||
namespace: cert-manager
|
||||
spec:
|
||||
ca:
|
||||
secretName: local-ca
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: local-wildcard-certificate
|
||||
namespace: istio-system
|
||||
spec:
|
||||
secretName: domain-cert-local
|
||||
privateKey:
|
||||
rotationPolicy: Always
|
||||
algorithm: RSA
|
||||
encoding: PKCS1
|
||||
size: 4096
|
||||
duration: 720h # 30d
|
||||
renewBefore: 24h # 1d
|
||||
subject:
|
||||
organizations:
|
||||
- FilterHome
|
||||
commonName: filterhome
|
||||
isCA: false
|
||||
usages:
|
||||
- server auth
|
||||
- client auth
|
||||
dnsNames:
|
||||
# - demoapi.default
|
||||
# - demoapi.default.svc
|
||||
# - demoapi.default.svc.cluster
|
||||
# - demoapi.default.svc.cluster.local
|
||||
- "filter.home"
|
||||
- "*.filter.home"
|
||||
# - jelly.filter.home
|
||||
issuerRef:
|
||||
name: ca-issuer
|
||||
kind: ClusterIssuer
|
@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: local-ca
|
||||
namespace: cert-manager
|
||||
data:
|
||||
tls.crt:
|
||||
tls.key:
|
@ -0,0 +1,37 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: http-to-https-local
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
istio: public-ingress
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
httpsRedirect: true
|
||||
---
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: local-gateway
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
istio: local-ingress
|
||||
servers:
|
||||
- port:
|
||||
number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
hosts:
|
||||
- "filter.home"
|
||||
- "*.filter.home"
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
credentialName: domain-cert-local
|
@ -141,7 +141,7 @@ Current Issue? For X and y, I need to wait for a while for the DNS provider to r
|
||||
|
||||
- [x] Deploy an Ingress LB for local thingies.
|
||||
|
||||
> **Note:**\
|
||||
> **Note:**
|
||||
> - https://istio.io/latest/docs/tasks/traffic-management/egress/
|
||||
> - https://istio.io/latest/docs/tasks/traffic-management/egress/egress-kubernetes-services/
|
||||
> - https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/
|
||||
@ -167,7 +167,7 @@ Current Issue? For X and y, I need to wait for a while for the DNS provider to r
|
||||
- [x] Deploy configurations to route some services through the Istio `istio-local-ingress` Load Balancer deployed.
|
||||
|
||||
> **Note**:\
|
||||
> Regarding Let's Encrypt certificate provisioning, for testing purposes the `staging` environment should be used, nevertheless on my scenario I am running directly on the production environment, why?\
|
||||
> Regarding Let's Encrypt certificate provisioning, for testing purposes the `staging` environment should be used, nevertheless on my scenario I am running directly on the production environment, why?
|
||||
> - `Staging` and `Production` behave different, therefore one can get the certificates verified on `Staging` and not on `Production`.
|
||||
> - I ran into some issues regarding the sentence from above, so there was some back and forth, this topic is mentioned [here at the end.](#2x1-able-to-get-staging-le-certs-but-not-the-production-one-when-using-custom-istio-selector-and-only-able-to-get-production-le-certs-when-using-the-default-istio-ingressgateway-selector)
|
||||
> - Since there was "back and forth" I sort of cheated and set this as it is.
|
||||
@ -182,7 +182,7 @@ Current Issue? For X and y, I need to wait for a while for the DNS provider to r
|
||||
|
||||
### Part 4
|
||||
|
||||
> Completed 27/July/2023\
|
||||
> Completed 27/July/2023
|
||||
|
||||
- [x] Deploy locally a Certificate Authorization Service (on the SRV host.)
|
||||
|
||||
@ -208,6 +208,8 @@ Current Issue? For X and y, I need to wait for a while for the DNS provider to r
|
||||
|
||||
### Part 5
|
||||
|
||||
> Completed 01/August/2023
|
||||
|
||||
- [x] Explore Pi4 Storage options.
|
||||
|
||||
- [x] Consider Storage options for the OrangePi5.
|
||||
@ -218,8 +220,10 @@ Current Issue? For X and y, I need to wait for a while for the DNS provider to r
|
||||
|
||||
### Part 6
|
||||
|
||||
> Completed 02/August/2023
|
||||
|
||||
- [x] ~~Wipe~~ (**don't wipe** just use a different drive) and recreate the current `Kluster`, this time using the Pi4 as a _master_, and the 2 Orange Pi5 as _slaves_ (this will require updating the DNS/DHCP local services).
|
||||
- [ ] Deploy all the services from the previous Kubernetes cluster to the new one.
|
||||
- [x] Deploy all the services from the previous Kubernetes cluster to the new one.
|
||||
|
||||
> **Note**:\
|
||||
> I can make a new cluster on the Pi4, and remove the taint that prevents from scheduling pods on that node. Deploy everything inside (a well a LB with the same exact IP than the current one, and proceed to stop the Orange PI 5), then "reformat" the OPi5s with a new distro, install stuff etc., and join them to the cluster running on the Pi4.
|
||||
@ -262,13 +266,10 @@ Current Issue? For X and y, I need to wait for a while for the DNS provider to r
|
||||
|
||||
- Set wildcards certificates through `ACME DNS01` challenge.
|
||||
|
||||
### Extras?
|
||||
|
||||
#### Horizontal Pod Autoscaling for the Istio LBs.
|
||||
|
||||
# Execution
|
||||
|
||||
## Part 1
|
||||
|
||||
### Transfer local network dependencies services from Pi4 to SRV.
|
||||
|
||||
#### Install ZIP on `Pi4`
|
||||
@ -1757,17 +1758,6 @@ PLAY RECAP *********************************************************************
|
||||
|
||||
#### Check cluster status
|
||||
|
||||
First I will copy the kubeconfig file to a place of my own.
|
||||
|
||||
```shell
|
||||
cp ksetup/Exported/kubeconfig.conf ~/kubeconfig.conf -v
|
||||
```
|
||||
|
||||
```text
|
||||
'ksetup/Exported/kubeconfig.conf' -> '/home/savagebidoof/kubeconfig.conf'
|
||||
```
|
||||
|
||||
Pods are deployed correctly
|
||||
|
||||
```shell
|
||||
kubectl get pods --kubeconfig ~/kubeconfig.conf -A -owide
|
||||
@ -1791,11 +1781,343 @@ metallb-system speaker-5zptn 1/1 Running 2
|
||||
metallb-system speaker-whw4n 1/1 Running 2 (22m ago) 26m 192.168.1.11 slave02.filter.home <none> <none>
|
||||
```
|
||||
|
||||
#### Kubeconfig
|
||||
|
||||
##### Backup `Kubeconfig` file
|
||||
|
||||
I will back up `kubeconfig.conf` file to a directory of my own.
|
||||
|
||||
```shell
|
||||
cp ksetup/Exported/kubeconfig.conf ~/kubeconfig.conf -v
|
||||
```
|
||||
|
||||
```text
|
||||
'ksetup/Exported/kubeconfig.conf' -> '/home/savagebidoof/kubeconfig.conf'
|
||||
```
|
||||
|
||||
Pods are deployed correctly
|
||||
|
||||
##### Configure new `Kubeconfig` location
|
||||
|
||||
```shell
|
||||
export KUBECONFIG="/home/savagebidoof/kubeconfig.conf"
|
||||
```
|
||||
|
||||
##### Confirm `Kubeconfig` is selected properly
|
||||
|
||||
```shell
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
```text
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
pi4.filter.home Ready control-plane 18h v1.27.4
|
||||
slave02.filter.home Ready <none> 17h v1.27.4
|
||||
```
|
||||
|
||||
#### Move workloads
|
||||
|
||||
Well it's time to move everything over.
|
||||
|
||||
During the couple of days I have been waiting until I get the NVME I ordered, considered changing the IP from the deployed LBs.
|
||||
|
||||
Instead of using:
|
||||
|
||||
```yaml
|
||||
.80 -> public LB
|
||||
.81 -> local LB
|
||||
.90 -> egress LB
|
||||
```
|
||||
|
||||
I will be using
|
||||
|
||||
```yaml
|
||||
.20 -> public LB
|
||||
.21 -> local LB
|
||||
.39 -> egress LB
|
||||
```
|
||||
|
||||
##### Deploy lacking CRDs
|
||||
|
||||
I already have `MetalLB` and `Calico` installed.
|
||||
|
||||
I lack Cert-manager CRDs.
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.0/cert-manager.yaml
|
||||
```
|
||||
|
||||
```text
|
||||
namespace/cert-manager created
|
||||
customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io created
|
||||
customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io created
|
||||
customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io created
|
||||
customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io created
|
||||
...
|
||||
```
|
||||
|
||||
##### MetalLB Config
|
||||
|
||||
I am setting up 2 Address Pools.
|
||||
|
||||
`cherrypick` for the things that I want to give IPs with my finger.
|
||||
|
||||
```yaml
|
||||
kind: IPAddressPool
|
||||
...
|
||||
name: cherrypick
|
||||
- 192.168.1.20-192.168.1.39
|
||||
...
|
||||
autoAssign: false
|
||||
```
|
||||
|
||||
And `flex` to assign IP dynamically.
|
||||
|
||||
```yaml
|
||||
kind: IPAddressPool
|
||||
...
|
||||
name: flex
|
||||
- 192.168.1.41-192.168.1.60
|
||||
...
|
||||
autoAssign: true
|
||||
```
|
||||
|
||||
###### Deploy MetalLB configuration
|
||||
|
||||
```shell
|
||||
kubectl create -f P6_Redeployment/MetalLB.yaml
|
||||
```
|
||||
|
||||
|
||||
# I am here <----
|
||||
```text
|
||||
ipaddresspool.metallb.io/cherrypick created
|
||||
ipaddresspool.metallb.io/flex created
|
||||
l2advertisement.metallb.io/l2-advert created
|
||||
```
|
||||
|
||||
## Should add labels to the SE resources etc.
|
||||
|
||||
##### Deploy Istio Config and Load Balancers
|
||||
|
||||
###### IstioOperator_IstioConfig.yaml
|
||||
|
||||
```shell
|
||||
istioctl install -y -f P6_Redeployment/Istio_Config/IstioOperator_IstioConfig.yaml
|
||||
```
|
||||
|
||||
<pre>This will install the Istio 1.18.2 minimal profile with ["Istio core" "Istiod"] components into the cluster. Proceed? (y/N) y
|
||||
<span style="color:#7F3FBF">✔</span> Istio core installed
|
||||
<span style="color:#7F3FBF">✔</span> Istiod installed
|
||||
<span style="color:#7F3FBF">✔</span> Installation complete
|
||||
Making this installation the default for injection and validation.</pre>
|
||||
|
||||
###### IstioOperator_IstioEgress.yaml
|
||||
|
||||
```shell
|
||||
istioctl install -y -f P6_Redeployment/Istio_Config/IstioOperator_IstioEgress.yaml
|
||||
```
|
||||
|
||||
<pre><span style="color:#7F3FBF">✔</span> Egress gateways installed
|
||||
<span style="color:#7F3FBF">✔</span> Installation complete </pre>
|
||||
|
||||
> **Note**:\
|
||||
> The egress resource doesn't work "right off the bat", requires some configurations regarding this matter, don't think this will be done on this "walkthrough".
|
||||
|
||||
###### IstioOperator_LocalIngress.yaml
|
||||
|
||||
```shell
|
||||
istioctl install -y -f P6_Redeployment/Istio_Config/IstioOperator_LocalIngress.yaml
|
||||
```
|
||||
|
||||
<pre><span style="color:#7F3FBF">✔</span> Ingress gateways installed
|
||||
<span style="color:#7F3FBF">✔</span> Installation complete</pre>
|
||||
|
||||
###### IstioOperator_PublicIngress.yaml
|
||||
|
||||
```shell
|
||||
istioctl install -y -f P6_Redeployment/Istio_Config/IstioOperator_PublicIngress.yaml
|
||||
```
|
||||
|
||||
<pre><span style="color:#7F3FBF">✔</span> Ingress gateways installed
|
||||
<span style="color:#7F3FBF">✔</span> Installation complete </pre>
|
||||
|
||||
###### Check Service IP provisioning
|
||||
|
||||
```shell
|
||||
kubectl get svc -n istio-system | grep LoadBalancer
|
||||
```
|
||||
|
||||
<pre>egress-gw <span style="color:#FF7F7F"><b>LoadBalancer</b></span> 10.106.41.20 192.168.1.39 80:31322/TCP,443:30559/TCP 138m
|
||||
istio-local-ingress <span style="color:#FF7F7F"><b>LoadBalancer</b></span> 10.97.14.59 192.168.1.21 15021:30005/TCP,80:30168/TCP,443:32103/TCP 50m
|
||||
istio-public-ingress <span style="color:#FF7F7F"><b>LoadBalancer</b></span> 10.100.53.247 192.168.1.20 15021:31249/TCP,80:30427/TCP,443:30411/TCP 50m</pre>
|
||||
|
||||
#### Prepare Secrets
|
||||
|
||||
##### Placeholder folder for God knows what
|
||||
|
||||
```shell
|
||||
mkdir tmp
|
||||
```
|
||||
|
||||
##### Local Certs
|
||||
|
||||
```shell
|
||||
openssl req -x509 -newkey rsa:4096 -sha512 -days 365 -nodes \
|
||||
-keyout tmp/ca.filter.home.key -out tmp/ca.filter.home.cer \
|
||||
-subj /C=ES/ST=BAR/O=FilterHome/CN=ca.filter.home \
|
||||
-extensions ext \
|
||||
-config <(cat <<EOF
|
||||
[req]
|
||||
distinguished_name=req
|
||||
[ext]
|
||||
keyUsage=critical,keyCertSign,cRLSign
|
||||
basicConstraints=critical,CA:true,pathlen:1
|
||||
subjectAltName=DNS:ca.filter.home
|
||||
EOF
|
||||
)
|
||||
```
|
||||
|
||||
```shell
|
||||
cmdsubst heredoc> [req]
|
||||
cmdsubst heredoc> distinguished_name=req
|
||||
cmdsubst heredoc> [ext]
|
||||
cmdsubst heredoc> keyUsage=critical,keyCertSign,cRLSign
|
||||
cmdsubst heredoc> basicConstraints=critical,CA:true,pathlen:1
|
||||
cmdsubst heredoc> subjectAltName=DNS:ca.filter.home
|
||||
cmdsubst heredoc> EOF
|
||||
cmdsubst> )
|
||||
```
|
||||
|
||||
Export `.key` and `.cer` base64
|
||||
|
||||
```shell
|
||||
cat tmp/ca.filter.home.cer | base64 | tr -d '\n'
|
||||
```
|
||||
|
||||
```shell
|
||||
cat tmp/ca.filter.home.key | base64 | tr -d '\n'
|
||||
```
|
||||
|
||||
Add the base64 outputs to the secrets file `P6_Redeployment/non_Istio_Config/Local_Certs/Secret.yaml`
|
||||
|
||||
```shell
|
||||
nano P6_Redeployment/non_Istio_Config/Local_Certs/Secret.yaml
|
||||
```
|
||||
|
||||
##### Modify Public Certificate duration
|
||||
|
||||
Changed the duration to something more "reasonable".
|
||||
|
||||
```shell
|
||||
nano P6_Redeployment/non_Istio_Config/Certificate_Manager/Issuer.yaml
|
||||
```
|
||||
|
||||
##### Modify Local Certificate duration
|
||||
|
||||
Changed the duration to something more "reasonable".
|
||||
|
||||
```shell
|
||||
nano P6_Redeployment/non_Istio_Config/Local_CA/Issuer.yaml
|
||||
```
|
||||
|
||||
###### Set up HTTP to HTTPS in the local gateway.
|
||||
|
||||
Added the following Gateway and modified the previous existing to remove `HTTP` access through the port 80.
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: http-to-https-local
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
istio: public-ingress
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
httpsRedirect: true
|
||||
```
|
||||
|
||||
##### Deploy EVERYTHING not IstioOperator
|
||||
|
||||
Well it's time to press the create button, this will pop up some text, just make sure everything sates as "created".
|
||||
|
||||
```shell
|
||||
kubectl create -f P6_Redeployment/non_Istio_Config -R
|
||||
```
|
||||
|
||||
```text
|
||||
clusterissuer.cert-manager.io/letsencrypt-public created
|
||||
certificate.cert-manager.io/filterhome-domain-cert-public created
|
||||
namespace/external created
|
||||
destinationrule.networking.istio.io/filebrowser created
|
||||
serviceentry.networking.istio.io/filebrowser-se created
|
||||
virtualservice.networking.istio.io/filebrowser-vs created
|
||||
gateway.networking.istio.io/http-to-https-public created
|
||||
gateway.networking.istio.io/public-gateway created
|
||||
destinationrule.networking.istio.io/gitea created
|
||||
serviceentry.networking.istio.io/gitea-se created
|
||||
virtualservice.networking.istio.io/gitea-vs created
|
||||
workloadentry.networking.istio.io/srv-host created
|
||||
destinationrule.networking.istio.io/jelly created
|
||||
serviceentry.networking.istio.io/jelly-se created
|
||||
virtualservice.networking.istio.io/jelly-vs created
|
||||
destinationrule.networking.istio.io/tube created
|
||||
serviceentry.networking.istio.io/tube-se created
|
||||
virtualservice.networking.istio.io/tube-vs created
|
||||
clusterissuer.cert-manager.io/ca-issuer created
|
||||
certificate.cert-manager.io/local-wildcard-certificate created
|
||||
secret/local-ca created
|
||||
gateway.networking.istio.io/http-to-https-local created
|
||||
gateway.networking.istio.io/local-gateway created
|
||||
```
|
||||
|
||||
##### Remove ./tmp folder
|
||||
|
||||
We no longer need the folder ./tmp, therefore we can delete it.
|
||||
|
||||
```shell
|
||||
rm ./tmp/* -v
|
||||
```
|
||||
|
||||
```text
|
||||
zsh: sure you want to delete all 4 files in /home/savagebidoof/IdeaProjects/home_shit/Migrations/Forget_Traefik_2023/./tmp [yn]? y
|
||||
removed './tmp/ca.filter.home.cer'
|
||||
removed './tmp/ca.filter.home.key'
|
||||
```
|
||||
|
||||
|
||||
##### Update the Router to point towards the "new" Ingress Load Balancer
|
||||
|
||||
|
||||
Changed from `192.168.1.80` to `192.168.1.20`.
|
||||
|
||||
|
||||
##### Update Local DNS
|
||||
|
||||
I did the needful.
|
||||
|
||||
##### Monitor Public Cert Provisioning
|
||||
|
||||
```shell
|
||||
kubectl get events -n istio-system --field-selector involvedObject.name=filterhome-domain-cert-public,involvedObject.kind=Certificate --sort-by=.metadata.creationTimestamp --watch
|
||||
```
|
||||
|
||||
```text
|
||||
LAST SEEN TYPE REASON OBJECT MESSAGE
|
||||
13m Normal Issuing certificate/filterhome-domain-cert-public Issuing certificate as Secret does not exist
|
||||
12m Normal Generated certificate/filterhome-domain-cert-public Stored new private key in temporary Secret resource "filterhome-domain-cert-public-2vdxk"
|
||||
12m Normal Requested certificate/filterhome-domain-cert-public Created new CertificateRequest resource "filterhome-domain-cert-public-js69j"
|
||||
8m46s Normal Issuing certificate/filterhome-domain-cert-public Issuing certificate as Secret does not exist
|
||||
8m35s Normal Generated certificate/filterhome-domain-cert-public Stored new private key in temporary Secret resource "filterhome-domain-cert-public-n8w8s"
|
||||
8m35s Normal Requested certificate/filterhome-domain-cert-public Created new CertificateRequest resource "filterhome-domain-cert-public-cb8ws"
|
||||
103s Normal Issuing certificate/filterhome-domain-cert-public The certificate has been successfully issued
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user