Dashboard added

This commit is contained in:
Justine Pelletreau 2022-08-18 16:21:55 +02:00
parent d2564bc6f1
commit a5644f9f13
23 changed files with 442 additions and 586 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
certmanager

5
dashboard/README.md Normal file
View File

@ -0,0 +1,5 @@
# Obtenir le token
Une fois tout déployé, sur le master:
```
sudo k3s kubectl -n kubernetes-dashboard create token admin-user
```

20
dashboard/account.yaml Normal file
View File

@ -0,0 +1,20 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard

View File

@ -0,0 +1,21 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubernetes-dashboard-ingr
namespace: kubernetes-dashboard
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
ingressClassName: nginx
rules:
- host: k8s.sq.lan
http:
paths:
- pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number: 443
path: /

306
dashboard/recommended.yaml Normal file
View File

@ -0,0 +1,306 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.5.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.7
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}

View File

@ -1,129 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: blackbox-exporter
name: blackbox-exporter
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
run: blackbox-exporter
template:
metadata:
labels:
run: blackbox-exporter
spec:
containers:
- image: prom/blackbox-exporter:master
name: blackbox-exporter
volumeMounts:
- mountPath: /etc/blackbox_exporter
name: blackbox-exporter
readOnly: true
restartPolicy: Always
volumes:
- name: blackbox-exporter
persistentVolumeClaim:
claimName: blackbox-exporter
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: grafana
name: grafana
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
run: grafana
template:
metadata:
labels:
run: grafana
spec:
initContainers:
- name: volume-mount-hack
image: busybox:1.28
command: ["sh", "-c", "chown -R 472:472 /etc/grafana && chown -R 472:472 /var/lib/grafana"]
volumeMounts:
- mountPath: /var/lib/grafana
readOnly: false
name: grafana-data
- mountPath: /etc/grafana
readOnly: false
name: grafana-conf
volumes:
- name: grafana-data
persistentVolumeClaim:
claimName: grafana-data
- name: grafana-conf
persistentVolumeClaim:
claimName: grafana-conf
containers:
- image: grafana/grafana
name: grafana
volumeMounts:
- mountPath: /var/lib/grafana
readOnly: false
name: grafana-data
- mountPath: /etc/grafana
readOnly: false
name: grafana-conf
restartPolicy: Always
volumes:
- name: grafana-data
persistentVolumeClaim:
claimName: grafana-data
- name: grafana-conf
persistentVolumeClaim:
claimName: grafana-conf
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: prometheus
name: prometheus
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
run: prometheus
template:
metadata:
labels:
run: prometheus
spec:
containers:
- args:
- --storage.tsdb.retention.time=1y
- --config.file=/etc/prometheus/prometheus.yml
image: prom/prometheus
name: prometheus
ports:
- containerPort: 9090
volumeMounts:
- mountPath: /etc/prometheus
readOnly: true
name: prometheus
- mountPath: /prometheus/data
readOnly: false
name: prometheus-data
restartPolicy: Always
volumes:
- name: prometheus
persistentVolumeClaim:
claimName: prometheus
- name: prometheus-data
persistentVolumeClaim:
claimName: prometheus-data

View File

@ -1,34 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
namespace: monitoring
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups:
- extensions
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: default
namespace: monitoring

View File

@ -1,49 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
run: grafana
name: grafana
namespace: monitoring
spec:
ports:
- name: "grafweb"
port: 80
targetPort: 3000
selector:
run: grafana
---
apiVersion: v1
kind: Service
metadata:
labels:
run: prometheus
name: prometheus
namespace: monitoring
spec:
ports:
- name: "prom"
port: 9090
targetPort: 9090
selector:
run: prometheus
type: LoadBalancer
---
apiVersion: v1
kind: Service
metadata:
labels:
run: blackbox-exporter
name: blackbox-exporter
namespace: monitoring
spec:
ports:
- name: "bbox"
port: 9115
targetPort: 9115
selector:
run: blackbox-exporter

View File

@ -1,75 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: blackbox-exporter
namespace: monitoring
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 10Gi
volumeName: blackbox-exporter
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-data
namespace: monitoring
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 50Gi
volumeName: grafana-data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-conf
namespace: monitoring
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 10Gi
volumeName: grafana-conf
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: prometheus
namespace: monitoring
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 10Gi
volumeName: prometheus
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: prometheus-data
namespace: monitoring
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 10Gi
volumeName: prometheus-data

View File

@ -1,85 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: blackbox-exporter
namespace: monitoring
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
nfs:
server: nas.sq.lan
path: "/swarmdata/grafana/blackbox"
mountOptions:
- nfsvers=4.2
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-data
namespace: monitoring
spec:
capacity:
storage: 50Gi
accessModes:
- ReadWriteMany
nfs:
server: nas.sq.lan
path: "/swarmdata/grafana/grafana-data"
mountOptions:
- nfsvers=4.2
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-conf
namespace: monitoring
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
nfs:
server: nas.sq.lan
path: "/swarmdata/grafana/grafana-conf"
mountOptions:
- nfsvers=4.2
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: prometheus
namespace: monitoring
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
nfs:
server: nas.sq.lan
path: "/swarmdata/grafana/prometheus-conf"
mountOptions:
- nfsvers=4.2
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: prometheus-data
namespace: monitoring
spec:
capacity:
storage: 30Gi
accessModes:
- ReadWriteMany
nfs:
server: nas.sq.lan
path: "/swarmdata/grafana/prometheus-data"
mountOptions:
- nfsvers=4.2

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: monitoring

View File

@ -1,109 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/version: 2.3.0
name: kube-state-metrics
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
- nodes
- pods
- services
- resourcequotas
- replicationcontrollers
- limitranges
- persistentvolumeclaims
- persistentvolumes
- namespaces
- endpoints
verbs:
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
- daemonsets
- deployments
- replicasets
verbs:
- list
- watch
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- list
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- list
- watch
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- list
- watch
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
verbs:
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
- volumeattachments
verbs:
- list
- watch
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
verbs:
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
- ingresses
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch

View File

@ -1,50 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/version: 2.3.0
name: kube-state-metrics
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: kube-state-metrics
template:
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/version: 2.3.0
spec:
automountServiceAccountToken: true
containers:
- image: k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.3.0
imagePullPolicy: Always
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
timeoutSeconds: 5
name: kube-state-metrics
ports:
- containerPort: 8080
name: http-metrics
- containerPort: 8081
name: telemetry
readinessProbe:
httpGet:
path: /
port: 8081
initialDelaySeconds: 5
timeoutSeconds: 5
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 65534
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: kube-state-metrics

View File

@ -1,16 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/version: 2.3.0
name: kube-state-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-state-metrics
subjects:
- kind: ServiceAccount
name: kube-state-metrics
namespace: monitoring

View File

@ -1,20 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/version: 2.3.0
name: kube-state-metrics
namespace: monitoring
spec:
type: LoadBalancer
ports:
- name: http-metrics
port: 8080
targetPort: http-metrics
- name: telemetry
port: 8081
targetPort: telemetry
selector:
app.kubernetes.io/name: kube-state-metrics

View File

@ -1,10 +0,0 @@
apiVersion: v1
automountServiceAccountToken: false
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/version: 2.3.0
name: kube-state-metrics
namespace: monitoring

View File

@ -0,0 +1,28 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: site
name: site
spec:
replicas: 1
selector:
matchLabels:
run: site
template:
metadata:
labels:
run: site
spec:
containers:
- image: nginx:latest
name: site
imagePullPolicy: "Always"
volumeMounts:
- mountPath: /usr/share/nginx/html
name: site
restartPolicy: Always
volumes:
- name: site
persistentVolumeClaim:
claimName: site

View File

@ -1,18 +1,17 @@
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
name: grafana-ingr name: site-ingr
namespace: monitoring
spec: spec:
ingressClassName: nginx ingressClassName: nginx
rules: rules:
- host: graf.squi.fr - host: justinepelletreau.com
http: http:
paths: paths:
- pathType: Prefix - pathType: Prefix
backend: backend:
service: service:
name: grafana name: site
port: port:
number: 80 number: 80
path: / path: /

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: site
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
nfs:
server: nas.sq.lan
path: "/swarmdata/personnalsite"
mountOptions:
- nfsvers=4.2

13
persosite/site-pvc.yaml Normal file
View File

@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: site
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 10Gi
volumeName: site

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
labels:
run: site
name: site
spec:
ports:
- name: "site"
port: 80
targetPort: 80
selector:
run: site

View File

@ -2,8 +2,9 @@ apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
name: notes-ingr name: notes-ingr
annotations:
kubernetes.io/ingress.class: "nginx"
spec: spec:
ingressClassName: nginx
rules: rules:
- host: notes.squi.fr - host: notes.squi.fr
http: http:

15
testpod/testpod-depl.yaml Normal file
View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: dnsutils
namespace: default
spec:
nodeName: swarm2
containers:
- name: dnsutils
image: k8s.gcr.io/e2e-test-images/jessie-dnsutils:1.3
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always