IP地址设置
hostnamectl set-hostname k8s-node03 echo "nameserver 8.8.8.8" >/etc/resolv.conf nmcli connection modify eth0 ipv4.addresses 172.16.60.65/24 ipv4.method manual ipv4.gateway 172.16.60.254 autoconnect yes cat > /etc/hosts << 'EOF' 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 172.16.60.60 k8s-master01.niceyh.com k8s-master01 172.16.60.61 k8s-master02.niceyh.com k8s-master02 172.16.60.62 k8s-master03.niceyh.com k8s-master03 172.16.60.63 k8s-node01.niceyh.com k8s-node01 172.16.60.64 k8s-node02.niceyh.com k8s-node02 172.16.60.65 k8s-node03.niceyh.com k8s-node03 172.16.20.29 havip.niceyh.com havip 172.16.20.26 ngvip.niceyh.com ngvip EOF ## dns 配置 cat > /etc/resolv.conf << EOF nameserver 172.16.20.20 nameserver 172.16.20.21 search niceyh.com options rotate options timeout:2 options attempts:1 EOF # 配置节点互信 ssh-keygen -t rsa ssh-copy-id root@k8s-master01 ssh-copy-id root@k8s-master02 ssh-copy-id root@k8s-master03 ssh-copy-id root@k8s-node01 ssh-copy-id root@k8s-node02 ssh-copy-id root@k8s-node03 # 查看主机的uuid号,不能一样 cat /sys/class/dmi/id/product_uuid
## base源 rm -rf /etc/yum.repos.d/* curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo ## epel源 yum install -y https://mirrors.aliyun.com/epel/epel-release-latest-8.noarch.rpm sed -i 's|^#baseurl=https://download.example/pub|baseurl=https://mirrors.aliyun.com|' /etc/yum.repos.d/epel* sed -i 's|^metalink|#metalink|' /etc/yum.repos.d/epel*
## base源 rm -rf /etc/yum.repos.d/* curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo ## epel源 yum install -y https://mirrors.aliyun.com/epel/epel-release-latest-8.noarch.rpm sed -i 's|^#baseurl=https://download.example/pub|baseurl=https://mirrors.aliyun.com|' /etc/yum.repos.d/epel* sed -i 's|^metalink|#metalink|' /etc/yum.repos.d/epel*
#允许iptables检查bridge流量(3节点配置) modprobe overlay modprobe br_netfilter modprobe ip_vs modprobe ip_vs_rr modprobe ip_vs_wrr modprobe ip_vs_sh #modprobe nf_conntrack_ipv4 modprobe br_netfilter cat <设置-->参数设置里查看本地监听端口 #export http=172.16.50.200:10808 #export htt2=172.16.50.200:10808 ## 下载containerd mkdir -p /soft/containerd && cd /soft/containerd curl -LO https://github.com/containerd/containerd/releases/download/v1.7.3/cri-containerd-1.7.3-linux-amd64.tar.gz ## 将文件推送到所有节点 all_node="k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03" for i in $all_node do ssh root@$i "mkdir -p /soft/containerd" ssh root@$i "yum install iproute-tc -y" scp cri-containerd-1.7.3-linux-amd64.tar.gz root@$i:/soft/containerd/ ssh root@$i "tar -zxf /soft/containerd/cri-containerd-1.7.3-linux-amd64.tar.gz -C / " done # ubuntu 系统下配置 sudo bash -c "cat > /etc/crictl.yaml" < /etc/containerd/config.toml # 修改Containerd的配置文件 sed -i "s#SystemdCgroup\ \=\ false#SystemdCgroup\ \=\ true#g" /etc/containerd/config.toml cat /etc/containerd/config.toml | grep SystemdCgroup # 配置阿里云镜像源 sed -i "s#registry.k8s.io#registry.aliyuncs.com/google_containers#g" /etc/containerd/config.toml sed -i "s#pause:3.8#pause:3.9#g" /etc/containerd/config.toml cat /etc/containerd/config.toml | grep sandbox_image ## 修改镜像仓库源 # 修改镜像源为本地 [plugins."io.containerd.grpc.v1.cri".registry] config_path = "" [plugins."io.containerd.grpc.v1.cri".registry.auths] [plugins."io.containerd.grpc.v1.cri".registry.configs] # 添加自己的第一个仓库为信任的 [plugins."io.containerd.grpc.v1.cri".registry.configs."img01.niceyh.com".tls] insecure_skip_verify = true # 添加自己的仓库用户名与密码 [plugins."io.containerd.grpc.v1.cri".registry.configs."img01.niceyh.com".auth] username = "user01" password = "Aa123456" # 添加自己的第二个仓库为信任的 [plugins."io.containerd.grpc.v1.cri".registry.configs."img02.niceyh.com".tls] insecure_skip_verify = true # 添加自己的仓库用户名与密码 [plugins."io.containerd.grpc.v1.cri".registry.configs."img02.niceyh.com".auth] username = "user01" password = "Aa123456" [plugins."io.containerd.grpc.v1.cri".registry.headers] [plugins."io.containerd.grpc.v1.cri".registry.mirrors] # 添加其它源来大陆区源 [plugins."io.containerd.grpc.v1.cri".registry.mirrors."img01.niceyh.com"] endpoint = ["https://img01.kube.niceyh.com"] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."img02.niceyh.com"] endpoint = ["https://img02.kube.niceyh.com"] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] endpoint=["https://docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com","https://yrrmmhwn.mirror.aliyuncs.com","https://registry.docker-cn.com","https://registry-1.docker.io"] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"] endpoint=["https://docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com","https://yrrmmhwn.mirror.aliyuncs.com","https://registry.docker-cn.com","https://registry-1.docker.io"] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"] endpoint=["https://docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com","https://yrrmmhwn.mirror.aliyuncs.com","https://registry.docker-cn.com","https://registry-1.docker.io"] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."ghcr.io"] endpoint=["https://docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com","https://yrrmmhwn.mirror.aliyuncs.com","https://registry.docker-cn.com","https://registry-1.docker.io"] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"] endpoint=["https://docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com","https://yrrmmhwn.mirror.aliyuncs.com","https://registry.docker-cn.com","https://registry-1.docker.io"] # ------------------------------------------可不用加 [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] tls_cert_file = "" tls_key_file = "" # 所有节点都一致 master_node="k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03" for i in $master_node do #scp cri-containerd-1.7.3-linux-amd64.tar.gz root@$i:~/ #ssh root@$i "tar zxvf ~/cri-containerd-1.7.3-linux-amd64.tar.gz -C /" ssh root@$i "mkdir -p /etc/containerd" ssh root@$i "swapoff -a" scp /etc/containerd/config.toml root@$i:/etc/containerd/config.toml ssh root@$i "systemctl daemon-reload " ssh root@$i "systemctl restart containerd" ssh root@$i "systemctl enable containerd" echo "################node $i transfer finish###################" ssh root@$i "systemctl status containerd |grep -i active" echo "################node $i service status###################" ssh root@$i "crictl info| egrep 'SystemdCgroup|sandboxImage'" done ## 检查配置 crictl info| grep sandboxImage crictl info| grep SystemdCgroup # 测试配置文件是否生效 # 如果有内部,测试拉内部镜像 crictl pull img01.niceyh.com/project01/nginx crictl pull img01.kube.niceyh.com/project01/nginx:latest crictl images# ubuntu 系统下配置 crictl rmi img01.kube.niceyh.com/project01/nginx crictl pull img02.kube.niceyh.com/project01/nginx:latest crictl images crictl rmi img02.kube.niceyh.com/project01/nginx:latest
你需要在每台机器上安装以下的软件包: kubeadm:用来初始化集群的指令。 kubelet:在集群中的每个节点上用来启动 Pod 和容器等。 kubectl:用来与集群通信的命令行工具。 kubeadm 不能帮你安装或者管理 kubelet 或 kubectl,所以你需要确保它们与通过kubeadm安装的控制平面 的版本相匹配。 如果不这样做,则存在发生版本偏差的风险,可能会导致一些预料之外的 错误和问题。 # ubuntu 系统下配置 #所有个节点均执行如下操作 cat </etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=1 gpgcheck=0 EOF yum install -y kubelet kubeadm kubectl systemctl enable --now kubelet #kubelet 现在每隔几秒就会重启,因为它陷入了一个等待 kubeadm 指令的死循环
kubeadm init \ --apiserver-advertise-address=172.16.60.60 \ --image-repository registry.aliyuncs.com/google_containers \ --kubernetes-version v1.28.0 \ --service-cidr=10.254.0.0/16 \ --pod-network-cidr=172.24.0.0/16
#另外一种高效的方法是通过定义好的配置文件来初始化k8s集群。 使用kubeadm config print init-defaults --component-configs KubeletConfiguration可以打印集群初始化默认的使用的配置。
[root@master ~]# kubeadm config print init-defaults --component-configs KubeletConfiguration [root@k8s-master01 /soft/containerd]# kubeadm config print init-defaults --component-configs apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration localAPIEndpoint:advertiseAddress: 172.16.60.60 bindPort: 6443 nodeRegistration: criSocket: unix:///run/containerd/containerd.sock taints: - effect: PreferNoSchedule key: node-role.kubernetes.io/master --- apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration kubernetesVersion: 1.26.1 imageRepository: registry.aliyuncs.com/google_containers networking: podSubnet: 172.24.0.0/16 --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration cgroupDriver: systemd failSwapOn: false --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: ipvs #这里将imageRepository修改为阿里云的registry,避免因为神秘原因无法访问gcr,无法直接拉取镜像。 criSocket设置了容器运行时为containerd。 同时设置kubelet的cgroupDriver为systemd,设置kube- proxy代理模式为ipvs。 在开始初始化集群之前可以使用kubeadm config images pull --config kubeadm.yaml预先在各个服务器 节点上拉取所k8s需要的容器镜像。 [root@master ~]# kubeadm config images pull --config kubeadm.yaml [config/images] Pulled registry.aliyuncs.com/google_containers/kube- apiserver:v1.26.1 [config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller- manager:v1.26.1 [config/images] Pulled registry.aliyuncs.com/google_containers/kube- scheduler:v1.26.1 [config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.26.1 [config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.9 [config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.5.6-0 [config/images] Pulled registry.aliyuncs.com/google_containers/coredns:v1.9.3 ## 查看bootstrap信息 kubectl get secret -n kube-system kubectl get secret bootstrap-token-pupzf3 -n kube-system -oyaml
mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config # 设置kubectl 命令自动补全,设置完重新登录 master_node="k8s-master01 k8s-master02 k8s-master03" for i in $master_node do ssh root@$i 'yum -y install bash-completion &>/dev/null' ssh root@$i 'echo "source <(kubectl completion bash)" >> ~/.bashrc' done
kubeadm join 172.16.60.60:6443 --token pupzf3.7a452ul6g3qltqjr \ --discovery-token-ca-cert-hash sha256:eb594a167ef4d354abdb32e0ea45d710a4a341a6f6f5d66ffab18bc128ce6656
[root@k8s-master01 /soft/containerd]# kubectl get csr NAME AGE SIGNERNAME REQUESTOR REQUESTEDDURATION CONDITION csr-fxqm4 9s kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:pupzf3Approved,IssuedNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-deployment-6d6565499c-4k7q csr-jdwt9 11s kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:pupzf3 Approved,Issued csr-vp48d 15s kubernetes.io/kube-apiserver-client-kubelet system:bootstrap:pupzf3 Approved,Issued
kubectl get nodes kubectl describe node k8s-node01
# 安装网络组件 mkdir /soft/helm && cd /soft/helm curl -O https://get.helm.sh/helm-v3.10.3-linux-amd64.tar.gz tar -zxvf helm-v3.10.3-linux-amd64.tar.gz mv linux-amd64/helm /usr/local/bin/ crul -LO https://github.com/projectcalico/calico/releases/download/v3.24.5/tigera-operator-v3.24.5.tgz helm install calico tigera-operator-v3.24.5.tgz -n kube-system
# 创建一个yaml模板文件 kubectl create deployment web --image=nginx -oyaml --dry-run=client > web.yaml # 用现有的资源创建一个模板 [root@k8s-master01 ~]# kubectl get deployments.apps -o=yaml > web01.yaml # 查看yaml文件的内容 ##---------------------------------控制器定义----------------------- apiVersion: apps/v1 ## API版本 kind: Deployment ## 资源类型 metadata: ## 资源元数据 creationTimestamp: null labels: app: web name: web spec: ## 资源规格 replicas: 1 ## 复本数量 selector: ## 标签选择器 matchLabels: app: web strategy: {} ##----------------------------------被控制的对象------------------------- template: ## pod模板 metadata: ## pod元数据 creationTimestamp: null labels: app: web spec: ## pod配置 nodeSelector: ## 调度策略 env_role:dev ## 环境变量为dev的标签 containers: ## 容器配置 - image: nginx ## 镜像名称 name: nginx resources: {} status: {} # 文件创建一个deploment [root@k8s-master01 ~]# kubectl apply -f web.yaml # 创建完之后修改配置信息 [root@k8s-master01 ~]# kubectl edit deployments.apps web
[root@k8s-master01 ~]# kubectl api-versions [root@k8s-master01 ~]# kubectl api-resources
kubectl create deployment nginx-deployment --image=nginx #查看pod [root@master ~]# kubectl get pods #查看deployment [root@master ~]# kubectl get deployment #查看deployment更加详细的信息 kubectl describe deployment nginx-deployment #查看replicaset [root@master ~]# kubectl get replicaset #查看replicaset更详细的信息 [root@master ~]# kubectl describe replicaset #你也可以继续使用下面命令了解到pod是被replicaset控制而创建的节点 [root@master ~]# kubectl describe pod 所以上述的过程就是: (1)你通过kubectl创建了deployment(nginx-deployment) (2)deployment创建了replicaset(nginx-deployment-55888b446c) (3)replicaset创建了pod(nginx-deployment-55888b446c-dklsc和nginx-deployment- 55888b446c-khsx8) 从名字上也可以看出来对象的命名方式是:"父对象NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-deployment-6d6565499c-4k7q名字+随机字符串或数字" # 删除deployment 部署的容器 kubectl delete deployment就可以了。
# 指定nignx 一个版本为1.14 kubectl create deployment web --image=nginx:1.14 -o yaml --dry-run=client > test.yaml kubectl apply -f test.yaml # 更新到1.15版本 [root@k8s-master01 ~]# kubectl set image deployment web nginx=nginx:1.15 [root@k8s-master01 ~]# kubectl get pods NAME READY STATUS RESTARTS AGE web-649c448b77-5cg99 1/1 Running 0 4m37s web-649c448b77-l8pws 1/1 Running 0 6m41s web-7c7b7878c6-qt6jd 0/1 ContainerCreating 0 33s ## 查看升级的状态 [root@k8s-master01 ~]# kubectl rollout status deployment web deployment "web" successfully rolled out ### 也可以看容器里的信息 kubectl describe pod web-7c7b7878c6-qt6jd |grep Image
## 查看升级的次数 kubectl rollout history deployment web deployment.apps/web REVISION CHANGE-CAUSE 12 ## 回退到上一版本 kubectl rollout undo deployment web
kubectl rollout undo deployment web --to-revision=2
kubectl scale deployment web --replicas=6
在kubernetes中,pod是应用程序的载体,我们可以通过pod的ip来访问应用程序,但是pod的ip地址不是固定的,这也就意味着不方便直接采用pod的ip对服务进行访问。为了解决这个问题,kubernetes提供了Service资源,Service会对提供同一个服务的多个pod进行聚合,并且提供一个统一的入口地址。通过访问Service的入口地址就能访问到后面的pod服务。 Kubernetes 中Service有以下4中类型: 1.ClusterIP:默认类型,自动分配一个仅Cluster内部可以访问的虚拟IP 2.NodePort:通过每个 Node 上的 IP 和静态端口(NodePort)暴露服务。以ClusterIP为基础,NodePort 服务会路由到 ClusterIP 服务。通过请求 :,可以从集群的外部访问一个集群内部的 NodePort服务。 3.LoadBalancer:使用云提供商的负载均衡器,可以向外部暴露服务。外部的负载均衡器可以路由到 NodePort服务和 ClusterIP 服务。 4.ExternalName:通过返回 CNAME 和它的值,可以将服务映射到 externalName 字段的内容(例如,foo.bar.example.com)。没有任何类型代理被创建
# 指定nignx 一个版本为1.14 kubectl create deployment web --image=nginx -o yaml --dry-run=client > test.yaml kubectl apply -f test.yaml # 发布一个deployment应用到主机接口 kubectl expose deployment web --type=NodePort --port=80 --target-port=80 --name=web # 写成yaml的方式 kubectl expose deployment web --type=NodePort --port=80 --target-port=80 --name=web2 -o yaml > web2.yaml
部署一个有状态应用,用于dns的方式去访问应用,前端无需关心IP
关键参数有
1. cluster IP: NODE 2. kind: StateFulSet ############下面示例 apiVersion: v1 kind: Service metadata: labels: app: nginx name: nginx1 spec: ports: - port: 80 name: web clusterIP: None selector: app: nginx --- apiVersion: apps/v1 kind: StatefulSet metadata: name: nginx-statefulset namespace: default spec: serviceName: nginx1 replicas: 3 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx:latest ports: - containerPort: 80 ## 每个pod都有一个唯一的名称 ## 每个pod的svc中的clusterIP值是NODE ## * 根据主机名 + 按照一定规则生成域名。 ## * 唯一的域名格式: 主机名称.名称空间.svc.cluster.local #示例: nginx-statefulset-0.nginx.default.svc.cluster.local
#.部署守护进程DaemonSet 用于容器收集日志 等 信息 kind: DaemonSet #.一次行的任务 kind: Job && # 查看jos#: kubectl get jobs && # 删除 kubectl delete -f job.yaml #. 定时任务 kind: CronJob && # 查看 kubectl get cronjobs schedule: "*/1 * * * * " # 每分钟执行一次
## 以admin示例 [root@k8s-master01 ~]# echo -n "admin" |base64 YWRtaW4= [root@k8s-master01 ~]# echo -n "password" |base64 cGFzc3dvcmQ= ##创建secret加密数据 apiVersion: v1 kind: Secret metadata: name: myseceret type: Opaque data: username: YWRtaW4= password: cGFzc3dvcmQ= ## 创建与查看 [root@k8s-master01 ~]# kubectl apply -f mysecret.yaml secret/myseceret created [root@k8s-master01 ~]# kubectl get secret NAME TYPE DATA AGE myseceret Opaque 2 11s ## 调用secret中的数据 #### 1. 以变量的方式调用 apiVserion: v1 kind: Pod metadata: name: mypod spec: containers: - name: nginx image: nginx env: - name: SECRET_USERNAME valueFrom: secretKeyRef: name: mysecret key: username - name: SECRET_PASSWORD valueFrom: secretKeyRef: name: mysecret key: password #### 2. 以valume的方式调用 apiVserion: v1 kind: Pod metadata: name: mypod spec: containers: - name: nginx image: nginx volumeMounts: - name: foo mountPath: "/etc/foo" readOnly: true volumes: - name: foo secret: secretName: mysecret
作用: 存储不加密码数据,让pod以变量或volume挂载到容器中 场景: 配置文件 # 1. 创建配置文件,定义需要用到配置信息 cat > read.config.properties << EOF hostname: read01 ipaddress: 1.1.1.1 username: admin EOF # 2.创建configmap kubectl create configmap readis-config --from-file=read.config.properties # 3.查看configmap信息 kubectl get cm kubectl describe cm readis-config ## 同样支持volume与变量的方式挂载。 tee > myconfig << 'EOF' apiVersion: v1 kind: ConfigMap metadata: name: myconfig namespace: default data: special.level: info special.type: hello EOF # 创建 cm kubectl apply -f myconfig # pod变量调用 tee > mypod.yaml << "EOF" apiVserion: v1 kind: Pod metadata: name: mypod spec: containers: - name: busybox image: busybox command: ["/bin/sh", "-c","echo $(LEVEL) $(TYPE)"] env: - name: LEVEL valueFrom: configMapKeyRef: name: myconfig key: special.level - name: TYPE valueFrom: configMapKeyRef: name: myconfig key: special.type restarPolicy: Never EOF kubectl apply -f mypod.yaml
访问集群,需要经过三个步骤完成
第一步: 认证 第二步: 授权(鉴权) 第三步: 准入控制 1. 进行访问时候,过程都需要经过apiserver,apiserver做统一协调,访问过程中需要证书,token.或者用户与密码,如果访问pod,需要serviceAccount的帐号。 角色分类 1.role: 特定命名空间访问权限 2.clusterrole: 所有命名空间访问权限 角色绑定 roleBinding: 角色绑定到主体 ClusterRoleBinding: 集群角色绑定到主体 主体 user: 用户 group: 用户组 serviceAccount : 服务帐号,一般访问pod
# 1.创建一个命名空间roletest kubectl create ns roledemo # 2.创建一个命名空间 kubectl run nginx --image=nginx -n roledemo # 3.创建一个角色 cat > ./pod-reader.yaml <<"EOF" kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: namespace: roledemo name: pod-reader rules: - apiGroups: [""] # "" indicates the core API group resources: ["pods"] verbs: ["get", "watch", "list"] EOF kubectl apply -f pod-reader.yaml # 4.角色绑定 cat > ./pod-RoleBinding.yaml <<"EOF" kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: read-pods namespace: roledemo subjects: - kind: User name: mary apiGroup: rbac.authorization.k8s.io roleRef: kind: Role name: pod-reader apiGroup: rbac.authorization.k8s.io EOF kubectl apply -f pod-RoleBinding.yaml # 查看绑定 [root@k8s-master01 ~]# kubectl get rolebindings.rbac.authorization.k8s.io -n roledemo NAME ROLE AGE read-pods Role/pod-reader 45s # 证书来识别角色 cat > mary-csr.json <
8.Ingress 部署
8.1.安装ingress
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx helm repo update cd /usr/local/src/ helm search repo ingress-nginx helm pull ingress-nginx/ingress-nginx tar -zxvf ingress-nginx-4.7.1.tgz cd ingress-nginx # 下载下来的 chart 包,需要修改一下资源清单配置文件,修改 values.yaml 文件如下: # 修改 ingress-nginx-contorller 的镜像仓库地址,默认是 k8s.gcr.io 国内无法访问,这里用到github上一个同步 ingress-nginx-contorller 的仓库 docker.io/willdockerhub/ingress-nginx-controller ## 修改hostNetwork: true ## dnsPolicy的值改为: ClusterFirstWithHostNet ## nodeSelector 添加标签: ingress: "true" ## kind类型更改为:DaemonSet ##kube-webhook-certgen的镜像地址改为国内仓库地址 registry.aliyuncs.com/google_containers/kube-webhook-certgen kubectl create ns ingress-nginx cd /usr/local/src/ingress-nginx helm install ingress-nginx -n ingress-nginx . kubectl label node k8s-master01 ingress=true kubectl get all -n ingress-nginx ## 应用 与查看 [root@k8s-master01 ~]# kubectl get pods -n ingress-nginx
8.2.创建ingress规则
## 查看你的网络插件的版本 kubectl api-versions |grep networking.k8s.io cat > ingress-web.yaml << "EOF" apiVersion: apps/v1 kind: Deployment metadata: name: svc-demo spec: replicas: 2 selector: matchLabels: app: myapp template: metadata: labels: app: myapp spec: containers: - image: nginx:1.18.0 name: svc-demo ports: - containerPort: 80 --- apiVersion: v1 kind: Service metadata: name: web-ingress spec: selector: app: web ports: - targetPort: 80 # 后端Pod的端口 port: 8080 # 服务要暴露的端口 EOF ## 应用配置 kubectl apply -f ingress-web.yaml ### 域名访问 # 在主机host中添加域名就可以访问了 # 查看 kubectl get svc kubectl get ing
8.3.使用ingree暴露端口
kubectl create deployment web --image=nginx kubectl expose deployment web --port=80 --target-port=80 --type=NodePort kubectl get svc
9. helm使用
1.helm: 一个命令行客户端工具,主要用于kubernets应用char的创建,打包,发布和管理 2.Char: 用户 描述,一系列用于k8s资源相关文件的集合 3.Release:基于Chart的部署实体,一个cahrt被helm运行后将会生成对应的一个relase;将在k8s中创建真实运行的资源对象。
9.1.安装
mkdir /soft/helm && cd /soft/helm curl -O https://get.helm.sh/helm-v3.10.3-linux-amd64.tar.gz tar -zxvf helm-v3.10.3-linux-amd64.tar.gz mv linux-amd64/helm /usr/local/bin/
9.2.配置helm仓库
helm repo add 仓库名 http://地址 helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx # helm repo add stable https://mirror.azure.cn/kubernetes/charts # 微软 # 查看 helm repo list helm status # 删除 helm repo remove helm repo add # 更新 helm repo update # 查找 helm search repo ingress9.3.使用helm
主要介绍三个命令 chart install chart upgrade chart rollback # 查找 [root@k8s-master01 ~]# helm search repo weave NAME CHART VERSION APP VERSION DESCRIPTION stable/weave-cloud 0.3.9 1.4.0 DEPRECATED - Weave Cloud is a add-on to Kuberne... stable/weave-scope 1.1.12 1.12.0 DEPRECATED - A Helm chart for the Weave Scope c... # 安装 helm install ui weave stable/weave-scope # 安装之后查看 helm repo list helm status
9.4.创建chart
#创建一个自己定义的chart_name helm create chart_name ## 里面有三个文件,分别是下面三个 chartyaml : 当前chart属性配置信息 templates: 编写的yaml文件 values.yaml: yam的全局变量
9.5.应用升级
#创建一个自己定义的chart_name helm create chart_name ## 里面有三个文件,分别是下面三个 chartyaml : 当前chart属性配置信息 templates: 编写的yaml文件 values.yaml: yam的全局变量
9.6.变量使用
#在yaml文件中通常几个值不一样其它都一样,如果同时部署多个应用的话,可以采用变量方式 1.image 2.tag 3.label 4.port 5.replicas # 在values.yaml定义变量值 image: nginx tag: 1.16 label: nginx port: 80 replicas: 3 # 在template中使用表达式 {{ .Values.变量名称 }} ## 变量赋值的表达式 ## helm install web2 mychart/ {{ .Release.name }} ## 随机生成一个数来表示的方式 #{{ .Release.name }} = web2
点击加载更多