[toc]
kubernetes 操作记录三
存储卷
emptyDir
# vim pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
ssjinyao.com/create-by: "cluster admin"
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /data/www/html
command: ["/bin/sh"]
args: ["-c" , "httpd -h /data/www/html && sleep 300000"]
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command: ["/bin/sh"]
args: [ "-c", "while sleep 2 ; do echo $(date) >> /data/index.html; done"]
volumes:
- name: html
emptyDir: {}
# kubectl exec -it pod-demo -c busybox -- /bin/sh
/ # echo $(date) >> /data/index.html
/ # cat /data/index.html
Fri May 17 06:59:59 UTC 2019
# kubectl exec -it pod-demo -c myapp -- /bin/sh
/ # ls /data/web/html/index.html
/data/web/html/index.html
/ # cat /data/web/html/index.html
Fri May 17 06:59:59 UTC 2019
# curl 10.244.2.183
Fri May 17 07:33:24 UTC 2019
Fri May 17 07:33:26 UTC 2019
Fri May 17 07:33:28 UTC 2019
Fri May 17 07:33:30 UTC 2019
Fri May 17 07:33:32 UTC 2019
Fri May 17 07:33:34 UTC 2019
Fri May 17 07:33:36 UTC 2019
Fri May 17 07:33:38 UTC 2019
Fri May 17 07:33:40 UTC 2019
Fri May 17 07:33:42 UTC 2019
Fri May 17 07:33:44 UTC 2019
hostPath挂载使用
# vim pod-hostpath-vol.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-vol-hostpath
namespace: default
spec:
containers:
- name: myapp
image: ikubernets/myapp:v1
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html/
volumes:
- name: html
hostPath:
path: /data/pod/volume1
type: DirectoryOrCreate
# kubectl apply -f pod-hostpath-vol.yaml
node1,node2,node3 分别创建以下目录
# mkdir -p /data/pod/volume1/ #注三个节点都要执行
# echo "node1.ssjinyao.com" >> /data/pod/volume1/index.html
# echo "node2.ssjinyao.com" >> /data/pod/volume1/index.html
# echo "node3.ssjinyao.com" >> /data/pod/volume1/index.html
# curl 10.244.1.155
node1.ssjinyao.com
可以看出当前运行在node1节点上
nfs 卷挂载使用
选择一台服务器,安装并开启nfs 服务
# mkdir -pv /data/volumes
# yum -y install nfs nfs-utils
# vim /etc/exports
/data/volumes 10.1.87.83/24(rw,no_root_squash)
# systemctl start nfs
注: 其它节点也需要安装 nfs-utils 不然pod驱动不了
在node02上手动测试挂载
# mount -t nfs node03:/data/volumes/ /mnt
在kubernetes 集群中使用nfs volumes
apiVersion: v1
kind: Pod
metadata:
name: pod-vol-nfs
namespace: default
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html/
volumes:
- name: html
nfs:
path: /data/volumes
server: node03
# kubectl apply -f pod-vol-nfs.yaml
在nfs服务器上写入数据
# echo "<h1>nfs.ssjinyao.com</h1>" >> /data/volumes/index.html
尝试访问
# curl 10.244.2.185
<h1>nfs.ssjinyao.com</h1>
pv, pvc 的使用
nfs 服务器上创建多个目录
# mkdir /data/volumes/v{1,2,3,4,5}
# vim /etc/exports
/data/volumes/v1 10.1.87.83/24(rw,no_root_squash)
/data/volumes/v2 10.1.87.83/24(rw,no_root_squash)
/data/volumes/v3 10.1.87.83/24(rw,no_root_squash)
/data/volumes/v4 10.1.87.83/24(rw,no_root_squash)
/data/volumes/v5 10.1.87.83/24(rw,no_root_squash)
# exportfs -arv
exporting 10.1.87.83/24:/data/volumes/v5
exporting 10.1.87.83/24:/data/volumes/v4
exporting 10.1.87.83/24:/data/volumes/v3
exporting 10.1.87.83/24:/data/volumes/v2
exporting 10.1.87.83/24:/data/volumes/v1
# systemctl restart nfs
# vim pv-demo.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv01
labels:
name: pv001
spec:
nfs:
path: /data/volumes/v1
server: node03
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 5Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv02
labels:
name: pv002
spec:
nfs:
path: /data/volumes/v2
server: node03
accessModes: ["ReadWriteOnce"]
capacity:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv03
labels:
name: pv003
spec:
nfs:
path: /data/volumes/v3
server: node03
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv04
labels:
name: pv004
spec:
nfs:
path: /data/volumes/v4
server: node03
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv05
labels:
name: pv005
spec:
nfs:
path: /data/volumes/v5
server: node03
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 10Gi
---
# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv01 5Gi RWO,RWX Retain Available 74s
pv02 10Gi RWO Retain Available 74s
pv03 20Gi RWO,RWX Retain Available 74s
pv04 10Gi RWO,RWX Retain Available 74s
pv05 10Gi RWO,RWX Retain Available 74s
pvc 绑定 pv
# cat pod-vol-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc
namespace: default
spec:
accessModes: ["ReadWriteMany"]
resources:
requests:
storage: 11Gi
---
apiVersion: v1
kind: Pod
metadata:
name: pod-vol-pvc
namespace: default
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html/
volumes:
- name: html
persistentVolumeClaim:
claimName: mypvc
# kubectl apply -f pod-vol-pvc.yaml
# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv01 5Gi RWO,RWX Retain Available 40m
pv02 10Gi RWO Retain Available 40m
pv03 20Gi RWO,RWX Retain Bound default/mypvc 40m
pv04 10Gi RWO,RWX Retain Available 40m
pv05 10Gi RWO,RWX Retain Available 40m
# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
mypvc Bound pv03 20Gi RWO,RWX 8s
configmap 的使用
# kubectl create configmap nginx-config --from-literal=nginx_port=80 --from-literal=server_name=myapp.ssjinyao.com
configmap/nginx-config created
# kubectl get cm
NAME DATA AGE
nginx-config 2 25s
# kubectl describe cm nginx-config
Name: nginx-config
Namespace: default
Labels: <none>
Annotations: <none>
Data
====
nginx_port:
----
80
server_name:
----
myapp.ssjinyao.com
Events: <none>
# mkdir configmap
# cd configmap/
# vim www.conf
server {
server_name myapp.ssjinyao.com;
listen 80;
root /data/web/html/;
}
# kubectl create configmap nginx-www --from-file=./www.conf
# kubectl get cm
NAME DATA AGE
nginx-config 2 4m4s
nginx-www 1 3s
# kubectl get cm nginx-www -o yaml
apiVersion: v1
data:
www.conf: "server {\n\tserver_name myapp.ssjinyao.com;\n listen 80;\n root
/data/web/html/;\n\n}\n"
kind: ConfigMap
metadata:
creationTimestamp: "2019-05-20T08:17:37Z"
name: nginx-www
namespace: default
resourceVersion: "3462936"
selfLink: /api/v1/namespaces/default/configmaps/nginx-www
uid: ba1625a7-7ad7-11e9-8902-525400c45563
Pod 引用 configmap ,环境变量方式
# vim pod-configmap.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-cm-1
namespace: default
labels:
app: myapp
tier: frontend
annotations:
ssjinyao.com/create-by: "cluster admin"
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
ports:
- name: http
containerPort: 80
env:
- name: NGINX_SERVER_PORT
valueFrom:
configMapKeyRef:
name: nginx-config
key: nginx_port
- name: NGINX_SERVER_NAME
valueFrom:
configMapKeyRef:
name: nginx-config
key: server_name
# kubectl apply -f pod-configmap.yaml
pod/pod-cm-1 created
# kubectl exec pod-cm-1 -it -- /bin/sh
/ # printenv | grep NGINX_SERVER
NGINX_SERVER_PORT=80
NGINX_SERVER_NAME=myapp.ssjinyao.com
当环境变量获取时,容器的变量不是实时更新的
# kubectl edit cm nginx-config
configmap/nginx-config edited
# 将nginx_port: "80" 改变 nginx_port: "8080"
# 可以看到configmap 是实时生效的
# kubectl describe cm nginx-config
Name: nginx-config
Namespace: default
Labels: <none>
Annotations: <none>
Data
====
nginx_port:
----
8080
server_name:
----
myapp.ssjinyao.com
Events: <none>
# 而容器中则不会时实更新变量
# kubectl exec pod-cm-1 -it -- /bin/sh
/ # printenv | grep NGINX_SERVER
NGINX_SERVER_PORT=80
NGINX_SERVER_NAME=myapp.ssjinyao.com
存储卷挂载方式
# vim pod-configmap2.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-cm-2
namespace: default
labels:
app: myapp
tier: frontend
annotations:
ssjinyao.com/create-by: "cluster admin"
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
ports:
- name: http
containerPort: 80
volumeMounts:
- name: nginxconf
mountPath: /etc/nginx/config.d/
readOnly: true
volumes:
- name: nginxconf
configMap:
name: nginx-config
# kubectl apply -f pod-configmap2.yaml
pod/pod-cm-2 created
# kubectl exec pod-cm-2 -it -- /bin/sh
/ # cd /etc/nginx/conf
conf.d/ config.d/
/ # cd /etc/nginx/config.d/
/etc/nginx/config.d # ls
nginx_port server_name
/etc/nginx/config.d # cat nginx_port
8080/etc/nginx/config.d #
/etc/nginx/config.d # cat server_name
myapp.ssjinyao.com/etc/nginx/config.d #
# kubectl edit cm nginx-config
configmap/nginx-config edited
# 将nginx_port: "8080" 改为 nginx_port: "8088"
# 稍等片刻后,接入容器发现nginx_port值实时更新
# kubectl edit cm nginx-config
Edit cancelled, no changes made.
[root@bj-zb-vm-ops-test5 configmap]# kubectl exec pod-cm-2 -it -- /bin/sh
/ # cd /etc/nginx/config.d/
/etc/nginx/config.d # cat nginx_port
8088/etc/nginx/config.d #
案例,配置文件焙进镜像
# vim pod-configmap3.yaml
# kubectl apply -f pod-configmap3.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-cm-3
namespace: default
labels:
app: myapp
tier: frontend
annotations:
ssjinyao.com/create-by: "cluster admin"
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
ports:
- name: http
containerPort: 80
volumeMounts:
- name: nginxconf
mountPath: /etc/nginx/conf.d/
readOnly: true
volumes:
- name: nginxconf
configMap:
name: nginx-www
# kubectl exec pod-cm-3 -it -- /bin/sh
/ # cd /etc/nginx/conf.d/
/etc/nginx/conf.d # cat www.conf
server {
server_name myapp.ssjinyao.com;
listen 80;
root /data/web/html/;
}
# kubectl edit cm nginx-www
configmap/nginx-www edited
# 将 listen 80 改为 listen 8080,稍等片刻后接入容器查看配置
# kubectl exec pod-cm-3 -it -- /bin/sh
/ # cd /etc/nginx/conf.d/
/etc/nginx/conf.d # cat www.conf
server {
server_name myapp.ssjinyao.com;
listen 8080;
root /data/web/html/;
}
# echo "<h1> Nginx Server Configured by CM </h1>" > /data/web/html/index.html
# 说明配置文件焙进镜像的方式是可以实时更新的;当然配置文件生效,需要nginx -s reload
# vim /etc/hosts 加入本地解析至Pod ip
10.244.1.158 myapp.ssjinyao.com
# curl http://myapp.ssjinyao.com:8080
<h1> Nginx Server Configured by CM </h1>
secret 的使用
configmap 都是明文存数据的,私钥和证书要放在secret中,密码要写成dns secret 而非configamp
# kubectl create secret generic mysql-root-password --from-literal=passwod=H@ndih3
secret/mysql-root-password created
# kubectl get secret
NAME TYPE DATA AGE
default-token-2sgn5 kubernetes.io/service-account-token 3 24d
mysql-root-password Opaque 1 45s
tomcat-ingress-secret kubernetes.io/tls 2 4d
# kubectl describe secret mysql-root-password
Name: mysql-root-password
Namespace: default
Labels: <none>
Annotations: <none>
Type: Opaque
Data
====
passwod: 7 bytes
# 对比configmap 的Data数据,secret 值是不显示的
# kubectl describe configmap nginx-www
Name: nginx-www
Namespace: default
Labels: <none>
Annotations: <none>
Data
====
www.conf:
----
server {
server_name myapp.ssjinyao.com;
listen 8080;
root /data/web/html/;
}
Events: <none>
# 而 configmap Data的值是显示的
# 我们也可以这样看
# kubectl get secret mysql-root-password -o yaml
apiVersion: v1
data:
passwod: SEBuZGloMw==
kind: Secret
metadata:
creationTimestamp: "2019-05-20T09:20:53Z"
name: mysql-root-password
namespace: default
resourceVersion: "3469600"
selfLink: /api/v1/namespaces/default/secrets/mysql-root-password
uid: 90cd9aaa-7ae0-11e9-8902-525400c45563
type: Opaque」
可以看出数据还是有的,因此安全没有那好,也没有加密码的意义
可以直接用 base64 -d 进行解密
# echo SEBuZGloMw== | base64 -d
H@ndih3
# vim pod-secret-1.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-secret-1
namespace: default
labels:
app: myapp
tier: frontend
annotations:
ssjinyao.com/create-by: "cluster admin"
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
ports:
- name: http
containerPort: 80
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-root-password
key: passwod
# kubectl exec pod-secret-1 -- printenv | grep MYSQL
MYSQL_ROOT_PASSWORD=H@ndih3
statefulset 的使用
nfs服务器还是在node3 上,配置信息如下
# cat /etc/exports
/data/volumes/v1 10.1.87.83/24(rw,no_root_squash)
/data/volumes/v2 10.1.87.83/24(rw,no_root_squash)
/data/volumes/v3 10.1.87.83/24(rw,no_root_squash)
/data/volumes/v4 10.1.87.83/24(rw,no_root_squash)
/data/volumes/v5 10.1.87.83/24(rw,no_root_squash)
注:确保三台node服务器都安装了 nfs-utils
pv改动及创建
# cat volumes/pv-demo.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv01
labels:
name: pv001
spec:
nfs:
path: /data/volumes/v1
server: node03
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 5Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv02
labels:
name: pv002
spec:
nfs:
path: /data/volumes/v2
server: node03
accessModes: ["ReadWriteOnce"]
capacity:
storage: 5Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv03
labels:
name: pv003
spec:
nfs:
path: /data/volumes/v3
server: node03
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 5Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv04
labels:
name: pv004
spec:
nfs:
path: /data/volumes/v4
server: node03
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv05
labels:
name: pv005
spec:
nfs:
path: /data/volumes/v5
server: node03
accessModes: ["ReadWriteMany","ReadWriteOnce"]
capacity:
storage: 10Gi
---
# kubectl apply -f volumes/pv-demo.yaml
# vim stateful-demo.yaml
apiVersion: v1
kind: Service
metadata:
name: myapp
labels:
app: myapp
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: myapp-pod
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: myapp
spec:
serviceName: myapp
replicas: 3
selector:
matchLabels:
app: myapp-pod
template:
metadata:
labels:
app: myapp-pod
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
ports:
- containerPort: 80
name: web
volumeMounts:
- name: myappdata
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: myappdata
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 5Gi
# kubectl get pods
NAME READY STATUS RESTARTS AGE
client 1/1 Running 0 23d
myapp-0 1/1 Running 0 7m10s
myapp-1 1/1 Running 0 7m5s
myapp-2 1/1 Running 0 7m1s
# kubectl get sts
NAME READY AGE
myapp 3/3 7m32s
# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv01 5Gi RWO,RWX Retain Bound default/myappdata-myapp-1 8m12s
pv02 5Gi RWO Retain Bound default/myappdata-myapp-0 8m12s
pv03 5Gi RWO,RWX Retain Bound default/myappdata-myapp-2 8m12s
pv04 10Gi RWO,RWX Retain Available 8m12s
pv05 10Gi RWO,RWX Retain Available 8m12s
# kubectl exec -it myapp-0 -- /bin/sh
/ # nslookup myapp-0.myapp.default.svc.cluster.local
nslookup: can't resolve '(null)': Name does not resolve
Name: myapp-0.myapp.default.svc.cluster.local
Address 1: 10.244.1.162 myapp-0.myapp.default.svc.cluster.local
/ # nslookup myapp-1.myapp.default.svc.cluster.local
nslookup: can't resolve '(null)': Name does not resolve
Name: myapp-1.myapp.default.svc.cluster.local
Address 1: 10.244.2.192 myapp-1.myapp.default.svc.cluster.local
/ # nslookup myapp-2.myapp.default.svc.cluster.local
nslookup: can't resolve '(null)': Name does not resolve
Name: myapp-2.myapp.default.svc.cluster.local
Address 1: 10.244.3.172 myapp-2.myapp.default.svc.cluster.local
pod 数量扩展
# kubectl scale sts myapp --replicas=5
statefulset.apps/myapp scaled
# kubectl patch sts myapp -p '{"spec":{"replicas":2}}'
statefulset.apps/myapp patched
# kubectl patch sts myapp -p '{"spec":{"updateStrategy":{"rollingUpdate":{"partition":4}}}}'
statefulset.apps/myapp patched
# kubectl set image sts/myapp myapp=ikubernetes/myapp:v2
statefulset.apps/myapp image updated
# kubectl get sts -o wide
NAME READY AGE CONTAINERS IMAGES
myapp 2/2 53m myapp ikubernetes/myapp:v2