云原生第四课作业

一、基于Deployment控制器运行pod

1.1yaml文件

root@master1:~/manifests/deployment# cat deployment-nginx

apiVersion: apps/v1

kind: Deployment

metadata:

  name: deployment-nginx

spec:

  replicas: 2

  selector:

    matchLabels:

      app: nginx

      tired: frented

  template:

    metadata:

      labels:

        app: nginx

        tired: frented

    spec:

      containers:

      - name: nginx-c

        image: nginx

        ports:

        - name: http

          containerPort: 80

1.2生成pod

root@master1:~/manifests/deployment# kubectl apply -f deployment-nginx --dry-run=client

deployment.apps/deployment-nginx created (dry run)

root@master1:~/manifests/deployment# kubectl apply -f deployment-nginx

deployment.apps/deployment-nginx created

root@master1:~/manifests/deployment# kubectl get pods -w -o wide

NAME                                READY  STATUS              RESTARTS  AGE  IP      NODE        NOMINATED NODE  READINESS GATES

deployment-nginx-69998b8865-7bscd  0/1    ContainerCreating  0          67s  <none>  10.0.0.24  <none>          <none>

deployment-nginx-69998b8865-z78gm  0/1    ContainerCreating  0          67s  <none>  10.0.0.25  <none>          <none>

deployment-nginx-69998b8865-z78gm  0/1    ErrImagePull        0          79s  172.200.104.3  10.0.0.25  <none>          <none>

deployment-nginx-69998b8865-7bscd  1/1    Running            0          80s  172.200.166.131  10.0.0.24  <none>          <none>

deployment-nginx-69998b8865-z78gm  0/1    ImagePullBackOff    0          94s  172.200.104.3    10.0.0.25  <none>          <none>

deployment-nginx-69998b8865-z78gm  1/1    Running            0          2m17s  172.200.104.3    10.0.0.25  <none>          <none>

二、总结nodeport+svc的访问访问流程

client->nodeport->nginx.svc->nginx-pod->tomcat.svc-tomcat.pod

三、使用nfs为pod提供数据持久化功数据共享

3.1、yaml文件

root@master1:~/manifests/deployment# cat deployment-nfs

apiVersion: apps/v1

kind: Deployment

metadata:

  name: deployment-nginx

spec:

  replicas: 2

  selector:

    matchLabels:

      app: nginx

      tired: frented

  template:

    metadata:

      labels:

        app: nginx

        tired: frented

    spec:

      containers:

      - name: nginx-c

        image: nginx

        ports:

        - name: http

          containerPort: 80

        volumeMounts:

        - name: nfs-test

          mountPath: /usr/share/nginx/html

      volumes:

      - name: nfs-test

        nfs:

          server: 10.0.0.20

          path: /nfs

---

apiVersion: v1

kind: Service

metadata:

  name: service-nfs

spec:

  ports:

  - name: http

    port: 81

    targetPort: 80

    nodePort: 30080

    protocol: TCP

  type: NodePort

  selector:

    app: nginx

    tired: frented

3.2NFS SERVER 配置

[root@haproxy1 ~]# cat /etc/exports

/nfs          10.0.0.0/24(rw,no_root_squash)  no_root_squash是必须的

[root@haproxy1 ~]# ls /nfs

index.html

[root@haproxy1 ~]# cat /nfs/index.html

test-nfs

3.3启动pod然后修改pod中的/etc/nginx/nginx.conf文件

root@master1:~/manifests/deployment# kubectl apply -f deployment-nfs

deployment.apps/deployment-nginx created

service/service-nfs created

root@master1:~/manifests/deployment# kubectl get pods

NAME                                READY  STATUS    RESTARTS  AGE

deployment-nginx-85c7ff4d88-4n4d4  1/1    Running  0          3h15m

deployment-nginx-85c7ff4d88-hr4hw  1/1    Running  0          3h15m

root@deployment-nginx-85c7ff4d88-4n4d4:/# cat /etc/nginx/nginx.conf

user  root;                 #必须改为root

worker_processes  auto;


root@deployment-nginx-85c7ff4d88-4n4d4:/# nginx -s reload

2022/01/20 10:46:56 [notice] 508#508: signal process started

3.4测试

root@master1:~/manifests/deployment# kubectl get svc

NAME          TYPE        CLUSTER-IP      EXTERNAL-IP  PORT(S)        AGE

kubernetes    ClusterIP  10.100.0.1      <none>        443/TCP        5d1h

service-nfs  NodePort    10.100.148.236  <none>        81:30080/TCP  3h19m

root@master1:~/manifests/deployment# curl 10.100.148.236:81

test-nfs

四、使用configmap为pod提供配置

4.1configmap的配置文件

root@master1:~/manifests/configmap# cat nginx-configmap.yaml

apiVersion: v1

kind: ConfigMap

metadata:

  name: nginx-configmap

  namespace: default

data:

  default:

      server {

          listen        80;

          server_name    www.mysite.com;

          index          index.html;

          location / {

              root  /data/nginx/html;

              if (!-e $request_filename) {

                  rewrite ^/(.*) /index.html last;

              }

          }

      }

4.2其它配置文件

root@master1:~/manifests/configmap# cat configmap.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  name: deployment-nginx

spec:

  replicas: 2

  selector:

    matchLabels:

      app: nginx

      tired: frented

  template:

    metadata:

      labels:

        app: nginx

        tired: frented

    spec:

      containers:

      - name: nginx-c

        image: nginx

        ports:

        - name: http

          containerPort: 80

        volumeMounts:

        - name: nfs-test

          mountPath: /data/nginx/html

        - name: cfg-test

          mountPath: /etc/nginx/conf.n60

      volumes:

      - name: nfs-test

        nfs:

          server: 10.0.0.20

          path: /nfs

      - name: cfg-test

        configMap:

          items:

          - key: default

            path: mysite.conf

          name: nginx-configmap

---

apiVersion: v1

kind: Service

metadata:

  name: service-nfs

spec:

  ports:

  - name: http

    port: 81

    targetPort: 80

    nodePort: 30080

    protocol: TCP

  type: NodePort

  selector:

    app: nginx

    tired: frented

4.3启动

root@master1:~/manifests/configmap# kubectl apply -f nginx-configmap.yaml

configmap/nginx-configmap configured

root@master1:~/manifests/configmap# kubectl apply -f configmap.yaml

deployment.apps/deployment-nginx created

root@master1:~/manifests/configmap# kubectl get configmap

NAME              DATA  AGE

kube-root-ca.crt  1      5d3h

nginx-configmap    1      77m

root@master1:~/manifests/configmap# kubectl get pods

NAME                                READY  STATUS    RESTARTS  AGE

deployment-nginx-75d5547b74-5gjdn  1/1    Running  0          56m

deployment-nginx-75d5547b74-7tr7z  1/1    Running  0          56m

service/service-nfs created

修改nginx的主配置文件,启动用户改为root,加载configmap导入的配置文件

configmap加载的配置

root@deployment-nginx-75d5547b74-5gjdn:/# cat /etc/nginx/conf.n60/mysite.conf

server { listen        80; server_name    www.mysite.com; index          index.html;

location / { root  /data/nginx/html; if (!-e $request_filename) { rewrite ^/(.*) /index.html last; } } }

nfs挂载

root@deployment-nginx-75d5547b74-5gjdn:/# df -h

Filesystem                        Size  Used Avail Use% Mounted on

overlay                            29G  9.0G  19G  33% /

tmpfs                              64M    0  64M  0% /dev

tmpfs                              2.0G    0  2.0G  0% /sys/fs/cgroup

/dev/mapper/ubuntu--vg-ubuntu--lv  29G  9.0G  19G  33% /etc/hosts

shm                                64M    0  64M  0% /dev/shm

10.0.0.20:/nfs                      38G  1.7G  36G  5% /data/nginx/html

tmpfs                              3.6G  12K  3.6G  1% /run/secrets/kubernetes.io/serviceaccount

tmpfs                              2.0G    0  2.0G  0% /proc/acpi

tmpfs                              2.0G    0  2.0G  0% /proc/scsi

tmpfs                              2.0G    0  2.0G  0% /sys/firmware

root@deployment-nginx-75d5547b74-5gjdn:/# cat /data/nginx/html/index.html

test-nfs

nginx的主配置文件/etc/nginx/nginx.conf

user root;  #改为root用户

include /etc/nginx/conf.n60/*.conf;包含configmap导入的配置文件

修改客户端的host文件解析

root@master1:~/manifests/configmap# cat /etc/hosts

127.0.0.1 localhost

127.0.1.1 cncf-k8s-md

# The following lines are desirable for IPv6 capable host

10.0.0.24  www.mysite.com

4.4访问测试

root@master1:~/manifests/configmap# curl www.mysite.com:30080

test-nfs

五、使用存活探针和就绪探针就pod服务进行探测

5.1readiness探针

错误的目录设定

^Croot@master1:~/manifests/Probes# cat probe.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  name: probe-test

spec:

  replicas: 2

  selector:

    matchLabels:

      app: nginx-probe

      tired: frented-probe

  template:

    metadata:

      labels:

        app: nginx-probe

        tired: frented-probe

    spec:

      containers:

      - name: nginx-probe

        image: nginx

        ports:

        - name: http

          containerPort: 80

        volumeMounts:

        - name: nfs-test

          mountPath: /usr/share/nginx/html

        - name: cfg-test

          mountPath: /etc/nginx/conf.n60

        readinessProbe:

          initialDelaySeconds: 5

          periodSeconds: 3

          timeoutSeconds: 5

          successThreshold: 1

          failureThreshold: 3

          httpGet:

            path: /healthz1            #正确的地址是/healthz

            port: 80

      volumes:

      - name: nfs-test

        nfs:

          server: 10.0.0.20

          path: /nfs

      - name: cfg-test

        configMap:

          items:

          - key: default

            path: mysite.conf

          name: nginx-configmap

---

apiVersion: v1

kind: Service

metadata:

  name: service-nfs-probe

spec:

  ports:

  - name: http

    port: 81

    targetPort: 80

    nodePort: 30081

    protocol: TCP

  type: NodePort

  selector:

    app: nginx-probe

    tired: frented-probe

应用配置

root@master1:~/manifests/Probes# kubectl apply -f probe.yaml

deployment.apps/probe-test configured

service/service-nfs-probe unchanged

root@master1:~/manifests/Probes# kubectl get pods

NAME                          READY  STATUS              RESTARTS  AGE

probe-test-764bc564c7-2m47c  0/1    ContainerCreating  0          11s

probe-test-764bc564c7-h8vrl  0/1    ContainerCreating  0          12s

ready一直为0

日志:

root@master1:~/manifests/deployment# kubectl logs probe-test-7bd9f8cdb8-4mmmr

/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration

/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/

/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh

10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf

10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf

/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh

/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh

/docker-entrypoint.sh: Configuration complete; ready for start up

2022/01/21 00:38:33 [notice] 1#1: using the "epoll" event method

2022/01/21 00:38:33 [notice] 1#1: nginx/1.21.5

2022/01/21 00:38:33 [notice] 1#1: built by gcc 10.2.1 20210110 (Debian 10.2.1-6)

2022/01/21 00:38:33 [notice] 1#1: OS: Linux 5.4.0-94-generic

2022/01/21 00:38:33 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576

2022/01/21 00:38:33 [notice] 1#1: start worker processes

2022/01/21 00:38:33 [notice] 1#1: start worker process 30

2022/01/21 00:38:33 [notice] 1#1: start worker process 31

2022/01/21 00:38:40 [error] 31#31: *1 open() "/usr/share/nginx/html/healthz1" failed (2: No such file or directory), client: 10.0.0.25, server: localhost, request: "GET /healthz1 HTTP/1.1", host: "172.200.104.26:80"

10.0.0.25 - - [21/Jan/2022:00:38:40 +0000] "GET /healthz1 HTTP/1.1" 404 153 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:38:43 +0000] "GET /healthz1 HTTP/1.1" 404 153 "-" "kube-probe/1.22" "-"

2022/01/21 00:38:43 [error] 31#31: *2 open() "/usr/share/nginx/html/healthz1" failed (2: No such file or directory), client: 10.0.0.25, server: localhost, request: "GET /healthz1 HTTP/1.1", host: "172.200.104.26:80"

10.0.0.25 - - [21/Jan/2022:00:38:46 +0000] "GET /healthz1 HTTP/1.1" 404 153 "-" "kube-probe/1.22" "-"

2022/01/21 00:38:46 [error] 31#31: *3 open() "/usr/share/nginx/html/healthz1" failed (2: No such file or directory), client: 10.0.0.25, server: localhost, request: "GET /healthz1 HTTP/1.1", host: "172.200.104.26:80"

10.0.0.25 - - [21/Jan/2022:00:38:49 +0000] "GET /healthz1 HTTP/1.1" 404 153 "-" "kube-probe/1.22" "-"

2022/01/21 00:38:49 [error] 31#31: *4 open() "/usr/share/nginx/html/healthz1" failed (2: No such file or directory), client: 10.0.0.25, server: localhost, request: "GET /healthz1 HTTP/1.1", host: "172.200.104.26:80"

2022/01/21 00:38:52 [error] 31#31: *5 open() "/usr/share/nginx/html/healthz1" failed (2: No such file or directory), client: 10.0.0.25, server: localhost, request: "GET /healthz1 HTTP/1.1", host: "172.200.104.26:80"

10.0.0.25 - - [21/Jan/2022:00:38:52 +0000] "GET /healthz1 HTTP/1.1" 404 153 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:38:55 +0000] "GET /healthz1 HTTP/1.1" 404 153 "-" "kube-probe/1.22" "-"

2022/01/21 00:38:55 [error] 31#31: *6 open() "/usr/share/nginx/html/healthz1" failed (2: No such file or directory), client: 10.0.0.25, server: localhost, request: "GET /healthz1 HTTP/1.1", host: "172.200.104.26:80"

修改为正确的路径

httpGet:

            path: /healthz

应用配置

root@master1:~/manifests/Probes# kubectl apply -f probe.yaml

deployment.apps/probe-test configured

service/service-nfs-probe unchanged

root@master1:~/manifests/Probes# kubectl get pods

NAME                          READY  STATUS    RESTARTS  AGE

probe-test-764bc564c7-n6f9v  1/1    Running  0          96s

probe-test-764bc564c7-wnn2q  1/1    Running  0          72s

root@master1:~/manifests/deployment# kubectl logs probe-test-764bc564c7-n6f9v

/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration

/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/

/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh

10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf

10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf

/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh

/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh

/docker-entrypoint.sh: Configuration complete; ready for start up

2022/01/21 00:47:37 [notice] 1#1: using the "epoll" event method

2022/01/21 00:47:37 [notice] 1#1: nginx/1.21.5

2022/01/21 00:47:37 [notice] 1#1: built by gcc 10.2.1 20210110 (Debian 10.2.1-6)

2022/01/21 00:47:37 [notice] 1#1: OS: Linux 5.4.0-94-generic

2022/01/21 00:47:37 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576

2022/01/21 00:47:37 [notice] 1#1: start worker processes

2022/01/21 00:47:37 [notice] 1#1: start worker process 30

2022/01/21 00:47:37 [notice] 1#1: start worker process 31

10.0.0.25 - - [21/Jan/2022:00:47:44 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:47:47 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:47:50 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:47:53 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:47:56 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:47:59 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:02 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:05 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:08 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:11 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:14 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:17 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:20 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:23 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:26 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:29 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:32 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:35 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:38 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:41 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:44 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:47 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:50 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:53 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:56 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:48:59 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:49:02 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:49:05 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:49:08 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:49:11 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:49:14 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:49:17 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:49:20 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:49:23 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:49:26 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:49:29 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:49:32 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:00:49:35 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

5.2存活探针

错误的配置

root@master1:~/manifests/Probes# cat probe.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  name: probe-test

spec:

  replicas: 2

  selector:

    matchLabels:

      app: nginx-probe

      tired: frented-probe

  template:

    metadata:

      labels:

        app: nginx-probe

        tired: frented-probe

    spec:

      containers:

      - name: nginx-probe

        image: nginx

        ports:

        - name: http

          containerPort: 80

        volumeMounts:

        - name: nfs-test

          mountPath: /usr/share/nginx/html

        - name: cfg-test

          mountPath: /etc/nginx/conf.n60

        readinessProbe:

          initialDelaySeconds: 5

          periodSeconds: 3

          timeoutSeconds: 5

          successThreshold: 1

          failureThreshold: 3

          httpGet:

            path: /healthz

            port: 80

        livenessProbe:

          initialDelaySeconds: 5

          periodSeconds: 3

          timeoutSeconds: 5

          successThreshold: 1

          failureThreshold: 3

          httpGet:

            path: /index.html1            #正确的是index.html

            port: 80

      volumes:

      - name: nfs-test

        nfs:

          server: 10.0.0.20

          path: /nfs

      - name: cfg-test

        configMap:

          items:

          - key: default

            path: mysite.conf

          name: nginx-configmap

---

apiVersion: v1

kind: Service

metadata:

  name: service-nfs-probe

spec:

  ports:

  - name: http

    port: 81

    targetPort: 80

    nodePort: 30081

    protocol: TCP

  type: NodePort

  selector:

    app: nginx-probe

    tired: frented-probe

应用配置

root@master1:~/manifests/Probes# kubectl apply -f probe.yaml

deployment.apps/probe-test configured

service/service-nfs-probe unchanged

root@master1:~/manifests/Probes# kubectl get pods -w

NAME                          READY  STATUS              RESTARTS  AGE

probe-test-5f5b66794c-2g98q  0/1    ContainerCreating  0          7s

probe-test-5f5b66794c-9zv2w  1/1    Running            0          31s

probe-test-764bc564c7-wnn2q  1/1    Running            0          19m

probe-test-5f5b66794c-9zv2w  0/1    Running            0          39s

probe-test-5f5b66794c-2g98q  0/1    Running            0          18s

probe-test-5f5b66794c-9zv2w  0/1    Running            1 (17s ago)  47s

probe-test-5f5b66794c-2g98q  1/1    Running            0            25s

probe-test-5f5b66794c-9zv2w  1/1    Running            1 (21s ago)  51s

probe-test-764bc564c7-wnn2q  1/1    Terminating        0            19m

probe-test-764bc564c7-wnn2q  0/1    Terminating        0            19m

probe-test-764bc564c7-wnn2q  0/1    Terminating        0            19m

probe-test-764bc564c7-wnn2q  0/1    Terminating        0            19m

probe-test-5f5b66794c-2g98q  0/1    Running            0            40s

probe-test-5f5b66794c-9zv2w  0/1    Running            1 (36s ago)  66s

probe-test-5f5b66794c-2g98q  0/1    Running            1 (16s ago)  47s

probe-test-5f5b66794c-9zv2w  0/1    Running            2 (17s ago)  74s

probe-test-5f5b66794c-9zv2w  1/1    Running            2 (21s ago)  78s

probe-test-5f5b66794c-2g98q  1/1    Running            1 (24s ago)  55s

probe-test-5f5b66794c-9zv2w  0/1    Running            2 (36s ago)  93s

probe-test-5f5b66794c-2g98q  0/1    Running            1 (39s ago)  70s

probe-test-5f5b66794c-2g98q  0/1    Running            2 (16s ago)  77s

probe-test-5f5b66794c-9zv2w  0/1    Running            3 (17s ago)  101s

probe-test-5f5b66794c-9zv2w  1/1    Running            3 (21s ago)  105s

probe-test-5f5b66794c-2g98q  1/1    Running            2 (24s ago)  85s

probe-test-5f5b66794c-9zv2w  0/1    Running            3 (36s ago)  2m

probe-test-5f5b66794c-2g98q  0/1    Running            2 (39s ago)  100s

probe-test-5f5b66794c-9zv2w  0/1    Running            4 (16s ago)  2m7s

probe-test-5f5b66794c-2g98q  0/1    Running            3 (16s ago)  107s

probe-test-5f5b66794c-9zv2w  1/1    Running            4 (21s ago)  2m12s

probe-test-5f5b66794c-9zv2w  0/1    CrashLoopBackOff    4 (0s ago)    2m18s

probe-test-5f5b66794c-2g98q  1/1    Running            3 (24s ago)  115s

probe-test-5f5b66794c-2g98q  0/1    Running            3 (39s ago)  2m10s


一直重启


改为正确的配置

httpGet:

            path: /index.html

root@master1:~/manifests/Probes# kubectl apply -f probe.yaml

deployment.apps/probe-test configured

service/service-nfs-probe unchanged

Croot@master1:~/manifests/Probes# kubectl get pods

NAME                          READY  STATUS    RESTARTS  AGE

probe-test-54b567f858-l6b4k  1/1    Running  0          50s

probe-test-54b567f858-zfwbh  1/1    Running  0          26s

root@master1:~/manifests/Probes# kubectl logs probe-test-54b567f858-l6b4k

/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration

/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/

/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh

10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf

10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf

/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh

/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh

/docker-entrypoint.sh: Configuration complete; ready for start up

2022/01/21 01:10:26 [notice] 1#1: using the "epoll" event method

2022/01/21 01:10:26 [notice] 1#1: nginx/1.21.5

2022/01/21 01:10:26 [notice] 1#1: built by gcc 10.2.1 20210110 (Debian 10.2.1-6)

2022/01/21 01:10:26 [notice] 1#1: OS: Linux 5.4.0-94-generic

2022/01/21 01:10:26 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576

2022/01/21 01:10:26 [notice] 1#1: start worker processes

2022/01/21 01:10:26 [notice] 1#1: start worker process 30

2022/01/21 01:10:26 [notice] 1#1: start worker process 31

10.0.0.25 - - [21/Jan/2022:01:10:33 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:33 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:36 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:36 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:39 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:39 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:42 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:42 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:45 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:45 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:48 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:48 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:51 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:51 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:54 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:54 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:57 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:10:57 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:00 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:00 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:03 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:03 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:06 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:06 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:09 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:09 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:12 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:12 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:15 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:15 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:18 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:18 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:21 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:21 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:24 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:24 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:27 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:27 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:30 +0000] "GET /index.html HTTP/1.1" 200 9 "-" "kube-probe/1.22" "-"

10.0.0.25 - - [21/Jan/2022:01:11:30 +0000] "GET /healthz HTTP/1.1" 200 13 "-" "kube-probe/1.22" "-"

©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 225,165评论 6 523
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 96,476评论 3 405
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 172,446评论 0 368
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 61,157评论 1 301
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 70,164评论 6 400
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 53,615评论 1 316
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 41,969评论 3 430
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 40,959评论 0 279
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 47,495评论 1 324
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 39,529评论 3 347
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 41,641评论 1 355
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 37,233评论 5 351
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 42,976评论 3 340
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 33,407评论 0 25
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 34,552评论 1 277
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 50,218评论 3 381
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 46,715评论 2 366

推荐阅读更多精彩内容