Kubernetes 常用命令

  • 创建命名空间
root@master:/home/ljy/桌面# kubectl create namespace cloud
namespace/cloud created
  • 查看集群信息
    kubectl cluster-info
root@master:/home/ljy/桌面# kubectl cluster-info
Kubernetes master is running at https://10.0.2.15:6443
KubeDNS is running at https://10.0.2.15:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

  • 查看各个组件状态
    kubectl get cs
root@master:/home/ljy/桌面# kubectl  get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE             ERROR
etcd-0               Healthy   {"health":"true"}   
scheduler            Healthy   ok                  
controller-manager   Healthy   ok  
  • 查看服务器节点
    kubectl get nodes
root@master:/home/ljy/桌面# kubectl get nodes
NAME     STATUS   ROLES    AGE    VERSION
master   Ready    master   5d3h   v1.19.4

  • 查看服务器节点详情
    kubectl get nodes -o wide
root@master:/home/ljy/桌面# kubectl get nodes -o wide
NAME     STATUS   ROLES    AGE    VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION     CONTAINER-RUNTIME
master   Ready    master   5d3h   v1.19.4   10.0.2.15     <none>        Ubuntu 20.04.1 LTS   5.4.0-56-generic   docker://19.3.14

  • 节点打标签
kubectl label nodes 节点名称 labelName=标签名称
  • 查看节点标签
kubectl get node --show-labels
  • 删除节点标签
kubectl label  node 节点名称  labelName-
  • 查看命名空间
    kubectl get namespaces
root@master:/home/ljy/桌面# kubectl get namespaces
NAME              STATUS   AGE
default           Active   5d3h
kube-node-lease   Active   5d3h
kube-public       Active   5d3h
kube-system       Active   5d3h
  • 查看pod节点
    kubectl get pod -n 命名空间名称
root@master:/home/ljy/桌面# kubectl get pod -n kube-system
NAME                                      READY   STATUS    RESTARTS   AGE
cloud-communal-service-5d867f6dd4-gd4rq   1/1     Running   0          52m
coredns-f9fd979d6-ffr4w                   1/1     Running   1          5d3h
coredns-f9fd979d6-sr2rq                   1/1     Running   1          5d3h
etcd-master                               1/1     Running   1          5d3h
kube-apiserver-master                     1/1     Running   1          5d3h
kube-controller-manager-master            0/1     Error     3          5d3h
kube-flannel-ds-29jbd                     1/1     Running   0          53m
kube-proxy-n7hjf                          1/1     Running   1          5d3h
kube-scheduler-master                     0/1     Running   3          5d3h

  • 查看pod节点详情
    kubectl get pod -n 命名空间名称 -o wide
root@master:/home/ljy/桌面# kubectl get pod -n kube-system -o wide
NAME                                      READY   STATUS             RESTARTS   AGE    IP           NODE     NOMINATED NODE   READINESS GATES
cloud-communal-service-5d867f6dd4-gd4rq   1/1     Running            0          63m    10.244.0.9   master   <none>           <none>
coredns-f9fd979d6-ffr4w                   1/1     Running            1          5d3h   10.244.0.8   master   <none>           <none>
coredns-f9fd979d6-sr2rq                   1/1     Running            1          5d3h   10.244.0.7   master   <none>           <none>
etcd-master                               1/1     Running            1          5d3h   10.0.2.15    master   <none>           <none>
kube-apiserver-master                     1/1     Running            1          5d3h   10.0.2.15    master   <none>           <none>
kube-controller-manager-master            0/1     CrashLoopBackOff   5          5d3h   10.0.2.15    master   <none>           <none>
kube-flannel-ds-29jbd                     1/1     Running            0          64m    10.0.2.15    master   <none>           <none>
kube-proxy-n7hjf                          1/1     Running            1          5d3h   10.0.2.15    master   <none>           <none>
kube-scheduler-master                     0/1     Running            5          5d3h   10.0.2.15    master   <none>           <none>

  • 查看运行中的pod节点
    kubectl get pods -n 命名空间名称 | grep -v Running
root@master:/home/ljy/桌面# kubectl get pod -n kube-system | grep Running
cloud-communal-service-5d867f6dd4-gd4rq   1/1     Running            0          65m
coredns-f9fd979d6-ffr4w                   1/1     Running            1          5d3h
coredns-f9fd979d6-sr2rq                   1/1     Running            1          5d3h
etcd-master                               1/1     Running            1          5d3h
kube-apiserver-master                     1/1     Running            1          5d3h
kube-flannel-ds-29jbd                     1/1     Running            0          66m
kube-proxy-n7hjf                          1/1     Running            1          5d3h

  • 查看异常的pod节点
    kubectl get pods -n 命名空间名称 | grep -v Error
root@master:/home/ljy/桌面# kubectl get pod -n kube-system | grep Error
kube-scheduler-master                     0/1     Error              6          5d3h

  • 查看pod节点的日志
    kubectl describe pod pod名称 -n 命名空间名称
oot@master:/home/ljy/桌面# kubectl describe pod kube-scheduler-master -n kube-system
Name:                 kube-scheduler-master
Namespace:            kube-system
Priority:             2000001000
Priority Class Name:  system-node-critical
Node:                 master/10.0.2.15
Start Time:           Thu, 10 Dec 2020 11:30:24 +0800
Labels:               component=kube-scheduler
                      tier=control-plane
Annotations:          kubernetes.io/config.hash: eacd4884e052077eeb923552f174ef74
                      kubernetes.io/config.mirror: eacd4884e052077eeb923552f174ef74
                      kubernetes.io/config.seen: 2020-12-10T11:30:15.783223384+08:00
                      kubernetes.io/config.source: file
Status:               Running
IP:                   10.0.2.15
IPs:
  IP:           10.0.2.15
Controlled By:  Node/master
Containers:
  kube-scheduler:
    Container ID:  docker://fbb4f14a570612f44851c4efce1156cf908671229ee8eb5713d318e98585e8bd
    Image:         k8s.gcr.io/kube-scheduler:v1.19.4
    Image ID:      docker://sha256:14cd22f7abe78e59b77c30819906920b3c5677596ef8967b649b87c13e8e65f4
    Port:          <none>
    Host Port:     <none>
    Command:
      kube-scheduler
      --authentication-kubeconfig=/etc/kubernetes/scheduler.conf
      --authorization-kubeconfig=/etc/kubernetes/scheduler.conf
      --bind-address=127.0.0.1
      --kubeconfig=/etc/kubernetes/scheduler.conf
      --leader-elect=true
    State:          Waiting
      Reason:       CrashLoopBackOff
    Last State:     Terminated
      Reason:       Error
      Exit Code:    255
      Started:      Tue, 15 Dec 2020 15:16:05 +0800
      Finished:     Tue, 15 Dec 2020 15:17:08 +0800
    Ready:          False
    Restart Count:  7
    Requests:
      cpu:        100m
    Liveness:     http-get https://127.0.0.1:10259/healthz delay=10s timeout=15s period=10s #success=1 #failure=8
    Startup:      http-get https://127.0.0.1:10259/healthz delay=10s timeout=15s period=10s #success=1 #failure=24
    Environment:  <none>
    Mounts:
      /etc/kubernetes/scheduler.conf from kubeconfig (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  kubeconfig:
    Type:          HostPath (bare host directory volume)
    Path:          /etc/kubernetes/scheduler.conf
    HostPathType:  FileOrCreate
QoS Class:         Burstable
Node-Selectors:    <none>
Tolerations:       :NoExecuteop=Exists
Events:
  Type     Reason     Age                  From     Message
  ----     ------     ----                 ----     -------
  Warning  Unhealthy  26m                  kubelet  Liveness probe failed: Get "https://127.0.0.1:10259/healthz": write tcp 127.0.0.1:45170->127.0.0.1:10259: write: broken pipe
  Warning  Unhealthy  24m                  kubelet  Liveness probe failed: Get "https://127.0.0.1:10259/healthz": write tcp 127.0.0.1:45632->127.0.0.1:10259: write: connection reset by peer
  Warning  Unhealthy  22m                  kubelet  Liveness probe failed: Get "https://127.0.0.1:10259/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
  Warning  Unhealthy  21m (x11 over 25m)   kubelet  Liveness probe failed: Get "https://127.0.0.1:10259/healthz": dial tcp 127.0.0.1:10259: connect: connection refused
  Normal   Started    21m (x4 over 98m)    kubelet  Started container kube-scheduler
  Warning  Unhealthy  18m                  kubelet  Liveness probe failed: Get "https://127.0.0.1:10259/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
  Normal   Killing    16m                  kubelet  Container kube-scheduler failed liveness probe, will be restarted
  Normal   Pulled     15m (x5 over 98m)    kubelet  Container image "k8s.gcr.io/kube-scheduler:v1.19.4" already present on machine
  Normal   Created    15m (x5 over 98m)    kubelet  Created container kube-scheduler
  Warning  BackOff    5m46s (x8 over 11m)  kubelet  Back-off restarting failed container

  • 根据ymal创建pod
    kubectl apply -f yaml后缀的文件名
root@master:/home/ljy/桌面#  kubectl  apply -f kube-flannel.yml 
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
  • 重启pod
kubectl replace --force -f xxxx.yaml 
  • 让pod优雅滚动重启
kubectl rollout restart deployment 你的deployment名称
# 使用 kubectl get deployment -n 命名空间 -o wide 命令查看 deployment名称
root@master:/home/ljy/桌面#  kubectl get deployment -n cloud -o wide
NAME                     READY   UP-TO-DATE   AVAILABLE   AGE     CONTAINERS               IMAGES                                                             SELECTOR
cloud-communal-service   1/1     1            1           2d22h   cloud-communal-service   registry.cn-chengdu.aliyuncs.com/lbyjwwyqt/cloud-communal:v1.0.0   app=cloud-communal-service

root@master:/home/ljy/桌面# kubectl rollout restart deployment cloud-communal-service -n cloud
deployment.apps/cloud-communal-service restarted

  • 删除pod节点
kubectl delete pod pod名称 -n 命名空间名称
kubectl get pod -n kube-system

如果发现pod还在 则使用下面方法删除试试

kubectl get deployment -n kube-system
NAME               READY   UP-TO-DATE   AVAILABLE   AGE
nginx-deployment   1/1     1            1           39m

kubectl delete deployment -n kube-system nginx-deployment
deployment.apps "nginx-deployment" deleted

  • 根据yaml 删除
kubectl delete -f ingress.yaml
root@master:/home/ljy/桌面# kubectl delete -f ingress.yaml
Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress
ingress.extensions "api-ingress" deleted

  • 进入pod容器内部
    kubectl exec -it pod名称 -n 命名空间名称 -- /bin/bash
kubectl exec -it nginx-service-777878686f-wx567 -- /bin/bash

  • 查看pod应用服务日志
    持续输出日志
kubectl logs -f pod名称 -n 命名空间名称

输出最后多少条

kubectl logs --tail=100 pod名称 -n 命名空间名称
  • 查看svc服务
    kubectl get svc
root@master:/home/ljy/桌面# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   5d4h

  • 查看service
root@master:/home/ljy/桌面# kubectl get service -o wide -n kube-system
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE    SELECTOR
kube-dns   ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   6d5h   k8s-app=kube-dns

  • 获取端点
kubectl get endpoints --all-namespaces
root@master:/home/ljy/桌面# kubectl get endpoints --all-namespaces
NAMESPACE       NAME                                 ENDPOINTS                                                  AGE
default         kubernetes                           10.0.2.15:6443                                             10d
ingress-nginx   ingress-nginx-controller             10.0.2.15:443,10.0.2.15:80                                 2d19h
ingress-nginx   ingress-nginx-controller-admission   10.0.2.15:8443                                             2d19h
kube-system     kube-controller-manager              <none>                                                     10d
kube-system     kube-dns                             10.244.0.44:53,10.244.0.45:53,10.244.0.44:53 + 3 more...   10d
kube-system     kube-scheduler                       <none>                                                     10d
  • 查看所有 pod
kubectl get pod -A -o wide
root@master:/home/ljy/桌面# kubectl get pod -A -o wide
NAMESPACE       NAME                                       READY   STATUS    RESTARTS   AGE     IP            NODE     NOMINATED NODE   READINESS GATES
ingress-nginx   ingress-nginx-controller-9f64489f5-7pvwf   1/1     Running   2          2d19h   10.0.2.15     master   <none>           <none>
kube-system     cloud-communal-service-5d867f6dd4-gd4rq    1/1     Running   67         5d20h   10.244.0.43   master   <none>           <none>
kube-system     coredns-f9fd979d6-ffr4w                    1/1     Running   9          10d     10.244.0.45   master   <none>           <none>
kube-system     coredns-f9fd979d6-sr2rq                    1/1     Running   9          10d     10.244.0.44   master   <none>           <none>
kube-system     etcd-master                                1/1     Running   9          10d     10.0.2.15     master   <none>           <none>
kube-system     kube-apiserver-master                      1/1     Running   9          10d     10.0.2.15     master   <none>           <none>
kube-system     kube-controller-manager-master             1/1     Running   35         10d     10.0.2.15     master   <none>           <none>
kube-system     kube-flannel-ds-29jbd                      1/1     Running   12         5d20h   10.0.2.15     master   <none>           <none>
kube-system     kube-proxy-n7hjf                           1/1     Running   9          10d     10.0.2.15     master   <none>           <none>
kube-system     kube-scheduler-master                      1/1     Running   35         10d     10.0.2.15     master   <none>           <none>
  • 为Deployment设置新的镜像
    kubectl set image deployment/自己的deployment名称 -n 命名空间 镜像名称=镜像
kubectl set image deployment/nginx-deployment  -n nginx nginx=nginx:1.9.1
  • 查看Deployment部署的历史记录
    kubectl rollout history deployment/自己的deployment名称 -n 命名空间
kubectl rollout history deployment/nginx-deployment -n nginx
  • 撤销本次发布回滚到上一个部署版本
    kubectl rollout undo deployment 自己的deployment名称
kubectl rollout undo deployment/nginx-deployment  -n nginx
  • 回滚到指定版本
    kubectl rollout undo deployment/自己的deployment名称 --to-revision=版本号
kubectl rollout undo deployment/nginx-deployment --to-revision=2 -n nginx
  • 查看 回滚操作状态
    kubectl rollout status deployment 自己的deployment名称 -n 命名空间
kubectl rollout status deployment nginx-deployment -n nginx
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 206,013评论 6 481
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 88,205评论 2 382
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 152,370评论 0 342
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 55,168评论 1 278
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 64,153评论 5 371
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 48,954评论 1 283
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 38,271评论 3 399
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,916评论 0 259
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 43,382评论 1 300
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,877评论 2 323
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 37,989评论 1 333
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 33,624评论 4 322
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 39,209评论 3 307
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 30,199评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 31,418评论 1 260
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 45,401评论 2 352
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 42,700评论 2 345

推荐阅读更多精彩内容