一、概述
Metrics-Server组件目的:获取集群中pod、节点等负载信息;
hpa资源目的:通过metrics-server获取的pod负载信息,自动伸缩创建pod;
二、安装部署 Metrics-Server组件
安装目的,就是给k8s集群安装top命令
1、下载Metrics-Server资源清单
wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/high-availability-1.21+.yaml
下载好的 high-availability-1.21+.yaml文件
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- nodes/metrics
verbs:
- get
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: metrics-server
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: metrics-server
namespaces:
- kube-system
topologyKey: kubernetes.io/hostname
containers:
- args:
- --cert-dir=/tmp
- --secure-port=10250
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
image: registry.k8s.io/metrics-server/metrics-server:v0.7.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 10250
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: metrics-server
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
2、编辑Metrics-Server的资源清单
.... topologyKey: kubernetes.io/hostname containers: - args: #启动允许使用不安全的证书 - --kubelet-insecure-tls - --cert-dir=/tmp - --secure-port=10250 - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --kubelet-use-node-status-port - --metric-resolution=15s #image: registry.k8s.io/metrics-server/metrics-server:v0.7.1 image: registry.aliyuncs.com/google_containers/metrics-server:v0.6.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 3 httpGet: path: /livez....
3、创建Metrics-Server资源
[root@master study-demo]# kubectl apply -f high-availability-1.21+.yaml
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
poddisruptionbudget.policy/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
4、查看安装结果
4.1 查看pod
[root@master study-demo]# kubectl get pod --show-labels -A | grep metrics
4.2 kubectl top node 命令查看节点信息
4.3 kubectl top pods -A 命令查看pod信息
三、hpa资源实现pod水平伸缩(自动扩缩容)
- 当资源使用超过一定的范围,会自动扩容,但是扩容数量不会超过最大pod数量
- 扩容时无延迟,只要监控资源超过阈值,则会自动创建pod
- 当资源使用率恢复到阈值以下时,需要等待一段时间才会释放,大概5分钟
3.1 编写deployment资源清单
[root@master hpa-demo]# cat deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: dm-hpa
spec:
replicas: 1
selector:
matchLabels:
k8s: dolphin
template:
metadata:
labels:
k8s: dolphin
spec:
containers:
- name: c1
image: centos:7
command:
- tail
- -f
- /etc/hosts
resources:
requests:
cpu: "50m"
limits:
cpu: "150m"
3.2 编写hpa资源清单绑定deployment
[root@master hpa-demo]# cat hpa.yaml
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: hpa-tools
spec:
maxReplicas: 10 # 扩容上线
minReplicas: 2 # 缩容下限
scaleTargetRef: # 扩缩容的目标
apiVersion: "apps/v1"
kind: Deployment
name: dm-hpa
targetCPUUtilizationPercentage: 95 #cpu阈值达到95%开始扩缩容
3.3 创建上面两个资源
[root@master hpa-demo]# kubectl apply -f deployment.yaml
[root@master hpa-demo]# kubectl apply -f hpa.yaml
3.4 查看hpa资源
可以看到我们deployment.yaml文件中的副本是1,创建hap绑定资源后,会根据hpa的配置,这里配置了2个,所以就创建了2个pod资源。
四、压测测试
1,进入pod,安装stress工具
· 进入pod容器
[root@master hpa]# kubectl exec dm-hpa-5bb4dd448d-ks2rt -it -- sh
· 安装aili源和epel源
sh-4.2# yum -y install wget
sh-4.2# wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
sh-4.2# wget -O /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo
· 安装压测工具
sh-4.2# yum -y install stress
2,开始使用命令压测pod
sh-4.2# stress --cpu 8 --io 4 --vm 2 --vm-bytes 128M --timeout 20m
3,查看hpa资源的负载情况
[root@master ~]# kubectl get hpa -o wide
可以看到:
1,我们创建的deploy资源只有一个副本;
2,我们创建的hpa资源之后,设置最小值是2,最大值是10 ;
3,我们在查看pod,可以看见,pod变成了2个;
4,我们进入容器,开始压测,将负载压测到超过95%;
5,再次查看pod,发现变成了3个,自动创建了一个;
6,关闭压测,5分钟后,pod有回归到了2个;
7,至此,hpa的pod自动伸缩,测试完毕;