春风十里不如你 —— Taozi - docker https://www.xiongan.host/index.php/tag/docker/ 基于Kubernetes集群的监控网络服务 https://www.xiongan.host/index.php/archives/226/ 2023-10-30T01:41:00+08:00 基于Kubernetes集群的监控网络服务介绍需要以下环境Kubernetes集群Blackbox工具Grafana、Prometheus监控大致功能:通过在K8s集群中部署blackbox工具(用于监控服务,检查网络可用性)和Grafana、Prometheus(监控可视化面板)更直观的体现网络连通性,可以进行警报和分析本文章通过若海博客的【Kubernetes 集群上安装 Blackbox 监控网站状态】和【Kubernetes 集群上安装 Grafana 和 Prometheus】整合而成部署Kubernetes集群(Ubuntu/Debian操作系统)确保主节点和子节点都有Docker环境(最好是同一个版本)主节点//安装Docker,一键安装(如有安装可以忽略) curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun //开启docker、并设置开机自启 systemctl start docker & systemctl enable docker apt update apt install -y wireguard echo "net.ipv4.ip_forward = 1" >/etc/sysctl.d/ip_forward.conf sysctl -p /etc/sysctl.d/ip_forward.conf //以下Token值请保存,任意字符串 export SERVER_TOKEN=r83nui54eg8wihyiteshuo3o43gbf7u9er63o43gbf7uitujg8wihyitr6 export PUBLIC_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/public-ipv4) export PRIVATE_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/local-ipv4) export INSTALL_K3S_SKIP_DOWNLOAD=true export DOWNLOAD_K3S_BIN_URL=https://github.com/k3s-io/k3s/releases/download/v1.28.2%2Bk3s1/k3s if [ $(curl -Ls http://ipip.rehi.org/country_code) == "CN" ]; then   DOWNLOAD_K3S_BIN_URL=https://ghproxy.com/${DOWNLOAD_K3S_BIN_URL} fi curl -Lo /usr/local/bin/k3s $DOWNLOAD_K3S_BIN_URL chmod a+x /usr/local/bin/k3s curl -Ls https://get.k3s.io | sh -s - server \   --cluster-init \   --token $SERVER_TOKEN \   --node-ip $PRIVATE_IP \   --node-external-ip $PUBLIC_IP \   --advertise-address $PRIVATE_IP \   --service-node-port-range 5432-9876 \   --flannel-backend wireguard-native \   --flannel-external-ip子节点//安装Docker,一键安装(如有安装可以忽略) curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun //开启docker、并设置开机自启 systemctl start docker & systemctl enable docker //子节点代码 apt update apt install -y wireguard echo "net.ipv4.ip_forward = 1" >/etc/sysctl.d/ip_forward.conf sysctl -p /etc/sysctl.d/ip_forward.conf export SERVER_IP=43.129.195.33 //此ip填你的主节点地址 export SERVER_TOKEN=r83nui54eg8wihyiteshuo3o43gbf7u9er63o43gbf7uitujg8wihyitr6 export PUBLIC_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/public-ipv4) export PRIVATE_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/local-ipv4) export INSTALL_K3S_SKIP_DOWNLOAD=true export DOWNLOAD_K3S_BIN_URL=https://github.com/k3s-io/k3s/releases/download/v1.28.2%2Bk3s1/k3s if [ $(curl -Ls http://ipip.rehi.org/country_code) == "CN" ]; then   DOWNLOAD_K3S_BIN_URL=https://ghproxy.com/${DOWNLOAD_K3S_BIN_URL} fi curl -Lo /usr/local/bin/k3s $DOWNLOAD_K3S_BIN_URL chmod a+x /usr/local/bin/k3s curl -Ls https://get.k3s.io | sh -s - agent \   --server https://$SERVER_IP:6443 \   --token $SERVER_TOKEN \   --node-ip $PRIVATE_IP \   --node-external-ip $PUBLIC_IPBlackbox工具部署(也有集群方式)//拉取镜像 docker pull rehiy/blackbox //一键启动 docker run -d \   --name blackbox \   --restart always \   --publish 9115:9115 \   --env "NODE_NAME=guangzhou-taozi" \   --env "NODE_OWNER=Taozi" \   --env "NODE_REGION=广州" \   --env "NODE_ISP=TencentCloud" \   --env "NODE_BANNER=From Taozii-www.xiongan.host" \   rehiy/blackbox //开始注册 docker logs -f blackboxGrafana、Prometheus部署在主节点创建一个目录,名字任意,然后在同一目录中创建两个文件(grafpro.yaml、grafpro.sh)grafpro.yamlkind: Deployment apiVersion: apps/v1 metadata: name: &name grafpro labels:   app: *name spec: selector:   matchLabels:     app: *name template:   metadata:     labels:       app: *name   spec:     initContainers:       - name: busybox         image: busybox         command:           - sh           - -c           - |             if [ ! -f /etc/prometheus/prometheus.yml ]; then             cat <<EOF >/etc/prometheus/prometheus.yml             global:               scrape_timeout: 25s               scrape_interval: 1m               evaluation_interval: 1m             scrape_configs:               - job_name: prometheus                 static_configs:                   - targets:                       - 127.0.0.1:9090             EOF             fi         volumeMounts:           - name: *name             subPath: etc             mountPath: /etc/prometheus     containers:       - name: grafana         image: grafana/grafana         securityContext:           runAsUser: 0         ports:           - containerPort: 3000         volumeMounts:           - name: *name             subPath: grafana             mountPath: /var/lib/grafana       - name: prometheus         image: prom/prometheus         securityContext:           runAsUser: 0         ports:           - containerPort: 9090         volumeMounts:           - name: *name             subPath: etc             mountPath: /etc/prometheus           - name: *name             subPath: prometheus             mountPath: /prometheus     volumes:       - name: *name         hostPath:           path: /srv/grafpro           type: DirectoryOrCreate --- kind: Service apiVersion: v1 metadata: name: &name grafpro labels:   app: *name spec: selector:   app: *name ports:   - name: grafana     port: 3000     targetPort: 3000   - name: prometheus     port: 9090     targetPort: 9090 --- kind: Ingress apiVersion: networking.k8s.io/v1 metadata: name: &name grafpro annotations:   traefik.ingress.kubernetes.io/router.entrypoints: web,websecure spec: rules:   - host: grafana.example.org     http:       paths:         - path: /           pathType: Prefix           backend:             service:               name: *name               port:                 name: grafana   - host: prometheus.example.org     http:       paths:         - path: /           pathType: Prefix           backend:             service:               name: *name               port:                 name: prometheus tls:   - secretName: defaultgrafpro.sh//警告:请修改路径和访问域名 # 配置存储路径 export GRAFPRO_STORAGE=${GRAFPRO_STORAGE:-"/srv/grafpro"} # 配置访问域名 export GRAFANA_DOMAIN=${GRAFPRO_DOMAIN:-"grafana.example.org"} export PROMETHEUS_DOMAIN=${PROMETHEUS_DOMAIN:-"prometheus.example.org"} # 修改参数并部署服务 cat grafpro.yaml \   | sed "s#/srv/grafpro#$GRAFPRO_STORAGE#g" \   | sed "s#grafana.example.org#$GRAFANA_DOMAIN#g" \   | sed "s#prometheus.example.org#$PROMETHEUS_DOMAIN#g" \   | kubectl apply -f -部署chmod +x grafpro.sh ./grafpro.sh测试打开注意以下,开启端口9115、9090 浏览器打开地址http://grafana.example.org 账号密码都是admin,首次登录,提示修改密码,修改后自动跳到控制台 浏览器打开http://grafana.example.org/connections/datasources/选择第一个,然后编辑URL为:http://127.0.0.1:9090 然后保存 然后选择创建好的Prometheus,导入面板 浏览器打开http://prometheus.example.org,查看信息配置Promethues任务//回到主节点的/srv/grafpro/etc目录下 编辑yml文件,备份一下原有的yml,创建新的yml mv prometheus.yml prometheus00.yml //以下是yml文件内容(若部署时修改了负载名称blackbox-exporter,下文的配置文件也要做相应的修改) global: scrape_timeout: 15s scrape_interval: 1m evaluation_interval: 1m scrape_configs: # prometheus - job_name: prometheus   static_configs:     - targets:         - 127.0.0.1:9090 # blackbox_all - job_name: blackbox_all   static_configs:     - targets:         - blackbox-gz:9115       labels:         region: '广州,腾讯云' # http_status_gz - job_name: http_status_gz   metrics_path: /probe   params:     module: [http_2xx] #配置get请求检测   static_configs:     - targets:         - https://www.example.com       labels:         project: 测试1         desc: 测试网站描述1     - targets:         - https://www.example.org       labels:         project: 测试2         desc: 测试网站描述2   basic_auth:     username: ******     password: ******         relabel_configs:     - target_label: region       replacement: '广州,腾讯云'     - source_labels: [__address__]       target_label: __param_target     - source_labels: [__param_target]       target_label: instance     - target_label: __address__       replacement: blackbox-gz:9115:80然后重启svc,方法如下:首先查看podkubectl get pod 然后删除查看到关于grafana的pod,然后稍等几分钟即可 kubectl delete pod *导入 Grafana 仪表盘下载附件json在Grafana仪表盘里导入即可导入后可以查看到监控仪已经开始了,显示各项信息 Debian和Ubuntu安装k8s https://www.xiongan.host/index.php/archives/225/ 2023-10-20T18:44:00+08:00 //主节点代码 apt update apt install -y wireguard echo "net.ipv4.ip_forward = 1" >/etc/sysctl.d/ip_forward.conf sysctl -p /etc/sysctl.d/ip_forward.conf export SERVER_TOKEN=r83nui54eg8wihyiteshuo3o43gbf7u9er63o43gbf7uitujg8wihyitr6 export PUBLIC_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/public-ipv4) export PRIVATE_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/local-ipv4) export INSTALL_K3S_SKIP_DOWNLOAD=true export DOWNLOAD_K3S_BIN_URL=https://github.com/k3s-io/k3s/releases/download/v1.28.2%2Bk3s1/k3s if [ $(curl -Ls http://ipip.rehi.org/country_code) == "CN" ]; then DOWNLOAD_K3S_BIN_URL=https://ghproxy.com/${DOWNLOAD_K3S_BIN_URL} fi curl -Lo /usr/local/bin/k3s $DOWNLOAD_K3S_BIN_URL chmod a+x /usr/local/bin/k3s curl -Ls https://get.k3s.io | sh -s - server \ --cluster-init \ --token $SERVER_TOKEN \ --node-ip $PRIVATE_IP \ --node-external-ip $PUBLIC_IP \ --advertise-address $PRIVATE_IP \ --service-node-port-range 5432-9876 \ --flannel-backend wireguard-native \ --flannel-external-ip //子节点代码 apt update apt install -y wireguard echo "net.ipv4.ip_forward = 1" >/etc/sysctl.d/ip_forward.conf sysctl -p /etc/sysctl.d/ip_forward.conf export SERVER_IP=43.129.195.33 export SERVER_TOKEN=r83nui54eg8wihyiteshuo3o43gbf7u9er63o43gbf7uitujg8wihyitr6 export PUBLIC_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/public-ipv4) export PRIVATE_IP=$(curl -Ls http://metadata.tencentyun.com/latest/meta-data/local-ipv4) export INSTALL_K3S_SKIP_DOWNLOAD=true export DOWNLOAD_K3S_BIN_URL=https://github.com/k3s-io/k3s/releases/download/v1.28.2%2Bk3s1/k3s if [ $(curl -Ls http://ipip.rehi.org/country_code) == "CN" ]; then DOWNLOAD_K3S_BIN_URL=https://ghproxy.com/${DOWNLOAD_K3S_BIN_URL} fi curl -Lo /usr/local/bin/k3s $DOWNLOAD_K3S_BIN_URL chmod a+x /usr/local/bin/k3s curl -Ls https://get.k3s.io | sh -s - agent \ --server https://$SERVER_IP:6443 \ --token $SERVER_TOKEN \ --node-ip $PRIVATE_IP \ --node-external-ip $PUBLIC_IP //docker配置镜像加速器 sudo mkdir -p /etc/docker sudo tee /etc/docker/daemon.json <<-'EOF' { "registry-mirrors": ["https://wml59v5w.mirror.aliyuncs.com"] } EOF sudo systemctl daemon-reload sudo systemctl restart docker //非集群机器安装集群管理面板kuboard sudo docker run -d \ --restart=unless-stopped \ --name=kuboard \ -p 80:80/tcp \ -p 10081:10081/udp \ -p 10081:10081/tcp \ -e KUBOARD_ENDPOINT="http://kuboard.my-company.com:80" \ -e KUBOARD_AGENT_SERVER_UDP_PORT="10081" \ -e KUBOARD_AGENT_SERVER_TCP_PORT="10081" \ -v /root/kuboard-data:/data \ eipwork/kuboard:v3.1.7.1 【k8s】将Go服务上传到k8s https://www.xiongan.host/index.php/archives/215/ 2023-06-07T22:56:55+08:00 将Go服务发布到k8s集群首先安装go环境下载go的tar.gz包,可以前往阿里云镜像站go页面下载相应版本地址:https://mirrors.aliyun.com/golang/?spm=a2c6h.13651104.mirror-free-trial.1.75b41e57BOxyw5然后下载到虚拟机中,并且解压到/usr/local/src中//解压缩go包 [root@master ~]# tar -zxf go1.18.10.linux-amd64.tar.gz -C /usr/local/src //添加环境变量 [root@master src]# vim /etc/profile //添加如下: export GOROOT=/usr/local/src export PATH=$PATH:$GOROOT/bin //保存退出后source一下 source /etc/profile //查看是否成功 go version创建源码文件[root@master ~]# mkdir 0607tz [root@master ~]# cd 0607tz/ [root@master 0607tz]# vim main.go //编辑到文件中 package main import (       "net/http"       "github.com/gin-gonic/gin" ) func statusOKHandler(c *gin.Context) {       c.JSON(http.StatusOK, gin.H{"status": "success~welcome to study"}) } func versionHandler(c *gin.Context) {       c.JSON(http.StatusOK, gin.H{"version": "v1.1版本"}) } func main() {       router := gin.New()       router.Use(gin.Recovery())       router.GET("/", statusOKHandler)       router.GET("/version", versionHandler)       router.Run(":8080") }Go mod初始化项目初始化项目[root@master 0607tz]# go mod init 0607tz go: creating new go.mod: module 0607tz go: to add module requirements and sums:       go mod tidy //成功初始化 //设置代理 [root@master 0607tz]# go env -w GOPROXY=https://goproxy.cn,direct [root@master 0607tz]# go mod tidy //构建源码 [root@master 0607tz]# CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o k8s-demo main.go创建镜像编写dockerfile文件[root@master 0607tz]# vim Dockerfile FROM alpine ADD k8s-demo /data/app/ WORKDIR /data/app/ CMD ["/bin/sh","-c","./k8s-demo"]构建镜像[root@master 0607tz]# docker build -t taozheng/k8sdemo:v1 .打包镜像,传到k8s工作节点[root@master 0607tz]# docker save -o k8sdemo.tar.gz taozheng/k8sdemo:v1 [root@master 0607tz]# scp k8sdemo.tar.gz node:/root/ k8sdemo.tar.gz                               100%   16MB 68.0MB/s   00:00 //在node节点解压镜像创建deployment的yaml文件[root@master 0607tz]# vim k8s.yaml //k8s.yaml apiVersion: apps/v1 kind: Deployment metadata: name: k8s-demo namespace: default labels:   app: k8s-demo   cy: taozheng spec: selector:   matchLabels:     app: k8s-demo replicas: 4 template:   metadata:     labels:       app: k8s-demo   spec:     containers:     - image: taozheng/k8sdemo:v1       imagePullPolicy: IfNotPresent       name: k8s-demo       ports:       - containerPort: 8080         protocol: TCP       resources:         limits:           cpu: 100m           memory: 100Mi         requests:           cpu: 50m           memory: 50Mi       livenessProbe:         tcpSocket:           port: 8080         initialDelaySeconds: 10         timeoutSeconds: 3       readinessProbe:         httpGet:           path: /           port: 8080         initialDelaySeconds: 10         timeoutSeconds: 2创建go的服务[root@master 0607tz]# vim gosvc.yaml kind: Service apiVersion: v1 metadata: name: k8s-demo-svc namespace: default labels:   app: k8s-demo   cy: taozheng spec: ports:   - name: api     port: 8080     protocol: TCP     targetPort: 8080 selector:   app: k8s-demok8s部署查看pod和服务信息修改svc的type类型变成nodePort[root@master 0607tz]# kubectl edit svc k8s-demo-svc保存后再次查看,已经修改成功查看svc标签浏览器测试访问: 【Docker】k8s健康检查 https://www.xiongan.host/index.php/archives/212/ 2023-05-28T12:05:24+08:00 健康检查使用存活探针创建使用 execaction 模式的存活探针 pod 的 yaml 文件。需要创建目录(/tmp/healthy)查看到运行成功,持续监控pod状态,看到pod反复重启使用 describe 命令查看详细 pod 信息,正常创建使用 http 存活探针的 pod 的 yaml 文件。创建yaml运行并查看状态查看详细events创建使用 tcp 存活探针的 pod 的 yaml,模板采用 httpd 容器镜像。创建yaml文件运行并进行容器内操作查看pod的restarts次数查看pod之前未通过liveness的记录就绪探针创建 http 的 deployment 的 yaml 文件,其中配置 readiness 探针。运行deployment使用describechakanhttp服务的endpoint可以看到有4个地址进入一个容器,删除index.html文件再使用 describe 命令查看 endpoint可以看到删除的pod地址已经从endpoint中移除查看pod的详细信息,看到pod未通过探针检测查看pod信息,kandaopod处于notready状态 【k8s】service服务和job服务 https://www.xiongan.host/index.php/archives/208/ 2023-05-17T13:35:50+08:00 Service服务发现使用Service使用Service实验使用的目录病创建后端的httpd-Dy[root@master servicefile]# vim httpd-dy.yaml kind: Deployment apiVersion: apps/v1 metadata: name: httpd spec: replicas: 3 selector:   matchLabels:     app: httpd template:   metadata:     labels:       app: httpd   spec:     containers:     - name: httpd       image: httpd       ports:       - containerPort: 80部署Deployment并查看信息创建httpd-service.yaml文件[root@master servicefile]# vim httpd-service.yaml创建service并查看该信息(下kubernetes服务是系统服务)测试服务可用性,通过curl命令查看服务是否正常[root@master servicefile]# curl 10.102.124.67:8080可以删除刚刚创建的服务创建httpd-expose.yaml,并部署[root@master servicefile]# vim httpd-expose.yaml kind: Service apiVersion: v1 metadata: name: httpd-svc spec: type: NodePort selector:   app: httpd ports: - protocol: TCP   port: 8080   targetPort: 80   nodePort: 30144使用跳板机浏览器登录,查看node节点ip:端口使用DNS创建client.yaml,创建一个客户端pod,测试DNS功能[root@master servicefile]# vim client.yaml kind: Pod apiVersion: v1 metadata: name: clientpod spec: containers:   - name: clientpod     image: busybox:1.28.3     args:     - /bin/sh     - -c     - sleep 30000创建并进入Pod命令行[root@master servicefile]# kubectl apply -f client.yaml使用nslookup命令查看服务域名,wget命令通过域名访问服务实训任务创建deployment1要求: 2 副本,镜像类型 httpd创建deployment2要求: 3副本,镜像类型 httpd创建 service1,service1 后端为 deployment1 和 deployment2 中所有 pod。创建 service2,service2 后端为 deployment1 中的第一个 pod 和 deployment2 中的第一个pod为dy的第一个pod和dy2的第一个pod打上标签tz=httpd01查看容器的详细信息查看端口信息,可以看到svc2的pod是要求所说的DeamonSet 与 Job使用DaemonSet创建一个Daemonset的yaml文件,并运行[root@master servicefile]# vim DS-nginx.yaml kind: DaemonSet apiVersion: apps/v1 metadata: name: nginx-daemonset spec: selector:   matchLabels:     app: nginx template:   metadata:     labels:       app: nginx   spec:     containers:     - name: nginx       image: nginx:1.7.9       ports:       - containerPort: 80查看daemonset的pod信息位置删除pod,查看daemonset的自动恢复功能查看到已经恢复好了使用Job创建Job的yaml文件[root@master servicefile]# vim pi-job.yaml kind: Job apiVersion: batch/v1 metadata: name: pi spec: template:   spec:     containers:     - name: pi       image: perl       command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]     restartPolicy: Never backoffLimit: 4创建Job并查看运行状态,他运行完毕后自动关闭了查看他的运行结果使用CronJob创建CronJob的yaml文件,设置每一分钟运行一次返回一次hello[root@master servicefile]# vim CJ-hello.yaml kind: CronJob apiVersion: batch/v1beta1 metadata: name: hello spec: schedule: "*/1 * * * *" jobTemplate:   spec:     template:       spec:         containers:         - name: hello           image: busybox           args:           - /bin/sh           - -c           - date; echo Hello from the Kubernets cluster-tz123         restartPolicy: OnFailure运行cronjob,查看运行情况查看pod的状态已经完成可以查看到运行cronjob后,每隔一分钟就会创建新的pod的,并输出信息实训任务创建一个 DaemonSet包含两个 pod镜像为 nginx创建一个job,用于输出helloworld创建一个 cronjob,在每日的 xx 小时 xx 点输出 helloworld。删除本次实验创建 DaemonSet,Job 和 CronJob。 【k8s】标签Label与Label Selector https://www.xiongan.host/index.php/archives/207/ 2023-05-16T20:43:42+08:00 Label与Label Selector标签进入目录保存实验文件并创建一个yaml使用多个标签[root@master tz123]# cd /root/tz123/labfile/labelfile [root@master labelfile]# vim labelpod.yaml kind: Pod apiVersion: v1 metadata: name: labelpod labels:   app: busybox   version: new spec: containers:   - name: labelpod     image: busybox     args:     - /bin/sh     - -c     - sleep 30000创建Pod,并查看pod的label[root@master labelfile]# kubectl apply -f labelpod.yaml pod/labelpod created [root@master labelfile]# kubectl get pod --show-labels NAME       READY   STATUS   RESTARTS   AGE   LABELS labelpod   1/1     Running   0         11s   app=busybox,version=new为容器添加新标签[root@master labelfile]# kubectl label pod labelpod time=2019 pod/labelpod labeled [root@master labelfile]# kubectl get pod --show-labels NAME       READY   STATUS   RESTARTS   AGE   LABELS labelpod   1/1     Running   0         69s   app=busybox,time=2019,version=new标签选择器创建新的yaml[root@master labelfile]# vim labelpod2.yaml kind: Pod apiVersion: v1 metadata: name: labelpod2 labels:   app: httpd   version: new spec: containers:   - name: httpd     image: httpd创建并查看新创建的labelpod2[root@master labelfile]# kubectl apply -f labelpod2.yaml [root@master labelfile]# kubectl get pod --show-labels NAME       READY   STATUS             RESTARTS   AGE   LABELS labelpod   1/1     Running             0         12m   app=busybox,time=2019,version=new labelpod2   0/1     ContainerCreating   0         23s   app=httpd,version=new使用给予等值的标签选择器[root@master labelfile]# kubectl get pod -l app=httpd NAME       READY   STATUS   RESTARTS   AGE labelpod2   1/1     Running   0         100s 或 [root@master labelfile]# kubectl get pod -l app==httpd NAME       READY   STATUS   RESTARTS   AGE labelpod2   1/1     Running   0         114s使用基于不等值的标签选择器和查看pod针对某标签键的值[root@master labelfile]# kubectl get pod -l app!=httpd NAME       READY   STATUS   RESTARTS   AGE labelpod   1/1     Running   0         14m [root@master labelfile]# kubectl get pod -L app NAME       READY   STATUS   RESTARTS   AGE   APP labelpod   1/1     Running   0         15m   busybox labelpod2   1/1     Running   0         3m5s   httpd使用标签选择器实现调度将节点1打上标签并查看[root@master labelfile]# kubectl label node node env=test node/node labeled [root@master labelfile]# kubectl get node -L env NAME     STATUS   ROLES                 AGE   VERSION   ENV master   Ready   control-plane,master   91d   v1.20.6   node     Ready   <none>                 91d   v1.20.6   test使用nodeselector实现调度,创建新的yaml文件[root@master labelfile]# vim nsdeploy.yaml kind: Deployment apiVersion: apps/v1 metadata: name: nginx-dy labels:   app: nginx spec: replicas: 3 selector:   matchLabels:     app: nginx template:   metadata:     labels:       app: nginx   spec:     containers:     - name: nginx       image: nginx:1.7.9       ports:       - containerPort: 80     nodeSelector:       env: test查看deployment中的pod位置[root@master labelfile]# kubectl get pod -o wide NAME                       READY   STATUS   RESTARTS   AGE     IP               NODE   NOMINATED NODE   READINESS GATES labelpod                   1/1     Running   0         28m     10.244.167.145   node   <none>           <none> labelpod2                   1/1     Running   0         15m     10.244.167.146   node   <none>           <none> nginx-dy-6dd6c76bcb-667ss   1/1     Running   0         5m19s   10.244.167.148   node   <none>           <none> nginx-dy-6dd6c76bcb-q8tqh   1/1     Running   0         5m19s   10.244.167.149   node   <none>           <none> nginx-dy-6dd6c76bcb-xc9h7   1/1     Running   0         5m19s   10.244.167.147   node   <none>           <none>使用 node affinity 调度,创建一个新的 yaml 文件 nadeploy2.yaml[root@master labelfile]# vim nadeploy2.yaml kind: Deployment apiVersion: apps/v1 metadata: name: httpd-dy labels:   app: httpd spec: replicas: 3 selector:   matchLabels:     app: httpd template:   metadata:     labels:       app: httpd   spec:     containers:     - name: httpd       image: httpd     affinity:       nodeAffinity:         requiredDuringSchedulingIgnoredDuringExecution:           nodeSelectorTerms:           - matchExpressions:             - key: env               operator: In               values:               - test创建deployment并查看deployment中的pod位置,三个pod都在node上[root@master labelfile]# kubectl apply -f nadeploy2.yaml deployment.apps/httpd-dy created [root@master labelfile]# kubectl get pod -o wide NAME                       READY   STATUS             RESTARTS   AGE   IP               NODE   NOMINATED NODE   READINESS GATES httpd-dy-5b4bb9646-g4jzb   1/1     Running             0         33s   10.244.167.150   node   <none>           <none> httpd-dy-5b4bb9646-lb876   1/1     Running             0         33s   10.244.167.151   node   <none>           <none> httpd-dy-5b4bb9646-q7zcm   0/1     ContainerCreating   0         33s   <none>           node   <none>           <none> labelpod                   1/1     Running             0         38m   10.244.167.145   node   <none>           <none> labelpod2                   1/1     Running             0         26m   10.244.167.146   node   <none>           <none> nginx-dy-6dd6c76bcb-667ss   1/1     Running             0         15m   10.244.167.148   node   <none>           <none> nginx-dy-6dd6c76bcb-q8tqh   1/1     Running             0         15m   10.244.167.149   node   <none>           <none> nginx-dy-6dd6c76bcb-xc9h7   1/1     Running             0         15m   10.244.167.147   node   <none>           <none>实训任务创建一个deployment使用镜像nginx,5个副本deployment中的pod不能出现在node上[root@master labelfile]# vim shixun01.yaml kind: Deployment apiVersion: apps/v1 metadata: name: nginx-dy labels:   app: nginx spec: replicas: 1 selector:   matchLabels:     app: nginx template:   metadata:     labels:       app: nginx   spec:     containers:     - name: nginx       image: nginx     affinity:       nodeAffinity:         requiredDuringSchedulingIgnoredDuringExecution:           nodeSelectorTerms:           - matchExpressions:             - key: env               operator: Not In               values:               - node寻找一种方式搜索出kubernetes系统上提供core-dns,kubeproxy以及dashboard服务的pod先给core-dns,kubeproxy,dashboard打上标签[root@master labelfile]# kubectl label -n kube-system pod kube-proxy-kj8j5 app=kubeproxy [root@master labelfile]# kubectl label -n kube-system pod coredns-7f89b7bc75-n224r app=coredns查找关键词的pod搜索dashboard的pod使用标签和标签选择器,使用一条命令删除node2节点的nginx 【K8s】下的kubectl的Deployment部署Nginx https://www.xiongan.host/index.php/archives/205/ 2023-05-10T21:00:59+08:00 部署Nginx服务简介:使用Deployment实现其滚动更新管理。创建 Deployment在 master 节点创建/labfile/deployfile 目录,用于保存配置文件。后续创建deployment 的 yaml 文件保存在此处。[root@master ~]# mkdir labfile [root@master ~]# cd labfile/ [root@master labfile]# mkdir deplofile [root@master labfile]# cd deplofile/ [root@master deplofile]# vim nginx-dy.yaml //以下内容为deployment文件 apiVersion: apps/v1 kind: Deployment metadata: name: nginx-dy labels:   app: nginx spec: replicas: 3 selector:   matchLabels:     app: nginx template:   metadata:     labels:       app: nginx   spec:     containers:     - name: nginx       image: nginx:1.7.9       ports:       - containerPort: 80部署该 nginx-dy[root@master deplofile]# kubectl apply -f nginx-dy.yaml deployment.apps/nginx-dy created查看详细信息、创建结果和replicaset已经创建好:弹性伸缩 Deployment编辑之前创建的nginx-dy.yaml,将副本数量修改5应用变更后的yaml文件[root@master deplofile]# kubectl apply -f nginx-dy.yaml deployment.apps/nginx-dy configured [root@master deplofile]# kubectl get pod滚动升级 deployment复制ng原版为两个新版本[root@master deplofile]# cp nginx-dy.yaml nginx-dy-v2.yaml [root@master deplofile]# cp nginx-dy.yaml nginx-dy-v3.yaml进行滚动更新[root@master deplofile]# kubectl apply -f nginx-dy-v2.yaml --record查看更新状态,上为更新前版本查看replicaset,看到一个新的,里面有5个pod,原有的pod不存在了查看deployment更新事件更新到v3版本[root@master deplofile]# kubectl apply -f nginx-dy-v3.yaml --record查看deployment的更新记录[root@master deplofile]# kubectl rollout history deployment nginx-dy查看历史版本 2 的详细信息[root@master deplofile]# kubectl rollout history deployment nginx-dy --revision=2回滚到历史版本2[root@master deplofile]# kubectl rollout undo deployment nginx-dy --to-revision=2可以看到已经回滚到了版本2删除deployment[root@master deplofile]# kubectl delete deployment nginx-dy实训查看deployment信息ymal文件搭建httpd通过 yaml 文件创建一个 deployment,有如下要求:使用 httpd:2.44副本[root@master deplofile]# vim httpd-v1.yaml apiVersion: apps/v1 kind: Deployment metadata: name: httpd-dy labels:   app: httpd spec: replicas: 4 selector:   matchLabels:     app: httpd template:   metadata:     labels:       app: httpd   spec:     containers:     - name: httpd       image: httpd:2.4       ports:       - containerPort: 8080开始创建将上面创建的deployment删除一个pod,变成副本3编辑yaml文件更新升级该 deployment 的镜像版本至 latest。复制v1版本yaml为v2版本,并修改镜像版本号进行更新升级,看到版本已经升级到了latest寻找该 deployment 中各个 pod 运行节点,deployment 创建的时间戳(Creation Timestamp)。 kubeadm方式部署k8s集群 https://www.xiongan.host/index.php/archives/186/ 2022-12-07T08:58:00+08:00 环境准备第一台节点(主节点): 192.168.123.200 master第二台节点(从节点): 192.168.123.201 slave以下文件需要单独下载云盘地址地址配置/etc/hosts域名解析(两台)[root@master ~]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.123.200 master-tz 192.168.123.201 slave01-tz关闭防火墙、SELINUX和swap(两台)1.systemctl disable firewalld --now 2.setenforce 0 3./etc/selinux/config中的一行修改为SELINUX=disabled 4.swapoff -a 5./etc/fstab中的swap加注释 #/dev/mapper/centos-swap swap swap defaults 0 0配置系统内核参数,使流过网桥的流量页进入IPTables/Netfilter(两台)[root@master ~]# modprobe br_netfilter [root@master ~]# echo "modprobe br_netfilter" >> /etc/profi le [root@master ~]# cat > /etc/sysctl.d/k8s.conf <<EOF net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 EOF [root@master ~]# sysctl -p /etc/sysctl.d/k8s.conf安装基本软件包,要求虚拟主机能够访问外网(两台)yum -y install wget vim ntpdate get配置时间同步ntpdate ntp1.aliyun.com配置yum源(两台)[root@master ~]# rm -rf /etc/yum.repos.d/* [root@master ~]# wget -O /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo [root@master ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo [root@master ~]# wget -O kubernetes.sh https://www.xiongan.host/sh/kubernetes.sh && sh kubernetes.sh开启ipvs(两台)把 ipvs .modules 上传到 /etc/sysconfig/ modules目录下chmod 755 /etc/sysconfig/modules/ipvs.modules bash /etc/sysconfig/modules/ipvs.modules lsmod | grep ip_vs安装kubeadm和相关工具包(两台)yum install -y kubelet-1.20.6 kubeadm-1.20.6 kubectl-1.20.6 systemctl enable kubelet注:docker的版本是20.10.8注:每个软件包的作用Kubeadm : kubeadm 是一个工具,用来初始化 k 8s 集群的kubelet: 安装 在集群所有节点上,用于启动 Pod 的kubectl:通过 kubectl 可以部署和管理应用,查看各种资源,创建、删除和更新各种组件kubeadm初始化k8s集群(两台)上传k8simage-1-20-6.tar.gz到两个节点docker load -i k8simage-1-20-6.tar.gz在master节点执行kubeadm命令(master节点)[root@master ~]# kubeadm init --kubernetes-version=1.20.6 --apiserver-advertise-address=192.168.123.200 --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=SystemVerificationkubernetes-version 代表 k8s的版本apiserver-advertise-address 如果master节点有多个网卡,则需要进行指定pod-network-cidr 指定pod网络的范围。image repository registry.aliyuncs.com/google_containers 手动指定仓库地址为registry.aliyuncs.com/google_containers 。kubeadm 默认从 k 8s.grc.io 拉取镜像 ,但是 k 8s.gcr.io访问不到,所以需要指定从 registry.aliyuncs.com/google_containers 仓库拉取镜像配置kubectl的配置文件config,相当于对kubectl进行授权,这样kubectl命令可以使用这个证书对k8s 集群进行管理 [root@master ~]# mkdir -p $HOME/.kube [root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config [root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config进入slave节点执行添加到集群,命令是上图中的 [root@slave01-tz ~]# kubeadm join 192.168.123.200:6443 --token d32tmx.utjgdkqxhy9sk517 \ > --discovery-token-ca-cert-hash sha256:d6a0bb61368c23be10444d7a18eab071b750c97c45186020980714fd57b13bdd再次查看master节点[root@master-tz ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION master-tz NotReady control-plane,master 8m31s v1.20.6 slave01-tz NotReady <none> 12s v1.20.6此时集群状态还是NotReady 状态,因为 没有安装网络 插件 。若要扩充集群(master节点)master执行 kubeadm token create --print-join-command 结果在新增节点执行安装k8s网络组件Calico(master节点)上传calico.yaml文件到master节点。[root@master ~]# kubectl apply -f calico.yaml 再次使用kubectl get nodes命令查看节点状态为Ready安装dashboard(master节点)上传dashboard_2_0_0.tar.gz和metrics-scrapter-1-0-1.tar.gz到两个节点。上传kubernetes-dashboard.yaml到master节点。[root@master ~]# docker load -i dashboard_2_0_0.tar.gz [root@master ~]# docker load -i metrics-scrapter-1-0-1.tar.gz [root@master ~]# kubectl apply -f kubernetes-dashboard.yaml [root@master-tz ~]# kubectl get pods -n kubernetes-dashboard NAME READY STATUS RESTARTS AGE dashboard-metrics-scraper-7445d59dfd-p572g 1/1 Running 0 10s kubernetes-dashboard-54f5b6dc4b-5zxpm 1/1 Running 0 10s 说明dasbnoard安装成功了。查看dashboard的service[root@master-tz ~]# kubectl get svc -n kubernetes-dashboard NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE dashboard-metrics-scraper ClusterIP 10.101.56.238 <none> 8000/TCP 97s kubernetes-dashboard ClusterIP 10.97.126.230 <none> 443/TCP 97s修改service type的类型为NodePort[root@master ~]# kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard[root@master ~]# kubectl get svc -n kubernetes-dashboard通过浏览器进行访问https://192.168.123.200:30245通过token登录dashboard(master节点)创建管理员token,具有查看任何空间的权限,可以管理所有资源对象。[root@master-tz ~]# kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard clusterrolebinding.rbac.authorization.k8s.io/dashboard-cluster-admin created [root@master-tz ~]# kubectl get secret -n kubernetes-dashboard NAME TYPE DATA AGE default-token-scvqs kubernetes.io/service-account-token 3 14m kubernetes-dashboard-certs Opaque 0 14m kubernetes-dashboard-csrf Opaque 1 14m kubernetes-dashboard-key-holder Opaque 2 14m kubernetes-dashboard-token-bs98s kubernetes.io/service-account-token 3 14m [root@master-tz ~]# kubectl describe secret kubernetes-dashboard-token-bs98s -n kubernetes-dashboard Name: kubernetes-dashboard-token-bs98s Namespace: kubernetes-dashboard Labels: <none> Annotations: kubernetes.io/service-account.name: kubernetes-dashboard kubernetes.io/service-account.uid: d0842b14-e79e-4129-b6e3-bfd3c7039334 Type: kubernetes.io/service-account-token Data ==== ca.crt: 1066 bytes namespace: 20 bytes token: eyJhbGciOiJSUzI1NiIsImtpZCI6IkdHTy1lQ2tndl9qQ29INUtEMEREMW1iUWhWeENOODB1Q2lOOERSYnN6OTQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1iczk4cyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImQwODQyYjE0LWU3OWUtNDEyOS1iNmUzLWJmZDNjNzAzOTMzNCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.f6iGY-QbB5YQFuaTkU6qR9UBTbFiIcDbpgT40E_ceQZGh3kdWyKzeTB-pWUkrJV1gWFaQt3Er7_brB-T7juO8eywunXkE6Xd_xH7XzaiWbNYFYfr3gMMXI8SmbnpqDKHclqw_tUIgun37ao7YYY_22_mYDdcTSIVFvx9XehK48eJWVfdyy-snuZiTKoR2pKMH0Rau3oXKlw7is8bV7yezeucZnaMPa60N-1KIMAvRM7gXlMX9m_BKiqvxEoru-2FDEoOkiCFXV-juGclxM_Qtn70i9R2JVjPgE5VX_gP7RFHDoXIEwykyjJqOg2fguE9Vy8nKnrfOo0c99aGXxnW_g使用token值登录测试创建nginx拉取nginx镜像docker pull nginx创建nginx应用服务kubectl create deployment ngix-deployment1 --image nginx --port=80 --replicas=2创建service服务kubectl expose deployment ngix-deployment1 --name=nginx --port=80 --target-port=80 --type=NodePort访问nginx服务[root@master-tz ~]# kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 109m nginx NodePort 10.110.242.218 <none> 80:31079/TCP 9s 【docker】完成跨主机通信 https://www.xiongan.host/index.php/archives/176/ 2022-11-16T10:58:00+08:00 实验要求掌握利用Docker实现跨主机容器互连的方法。前置准备要求实验主机能够连接外网,已经正确安装Docker,并关闭防火墙和selinux,各主机配置信息如表1-1所示。表1-1 主机配置信息表主机名IP地址/子网掩码容器名node1192.168.123.88/24Centosnode2192.168.123.99/24Centos实验步骤步骤1:创建跨主机的容器,测试连通性。步骤2:设置域名解析。步骤3:安装etcd和flannel服务。node01需要安装两个服务(etcd、flannel),node02需要安装一个服务(flannel)步骤4:编辑etcd配置文件并启动etcd服务。node01节点注:1.ETCD_DATA_DIR为etcd数据存放路径 2.ETCD_LISTEN_CLIENT_URLS为监听客户端地址 3.ETCD_NAME为节点名称 4.ETCD_ADVERTISE_CLIENT_URLS为通知etcd服务器步骤5:测试etcd服务。node01节点:步骤6:设置网络网段。node01节点:注:查看容器ipdocker inspect 容器名/id | grep IPAddress步骤7:配置并重启flannel服务。步骤8:配置flannel0与docker0之间的网络。(两个节点)步骤9:设置防火墙。(两个节点)步骤10:启动容器,测试连通性。(两个节点) 【swarm】docker环境下的集群服务 https://www.xiongan.host/index.php/archives/139/ 2022-11-09T11:38:00+08:00 介绍Docker Swarm是Docker原生的集群工具,因而无须使用额外的编排软件创建或管理集群。DockerSwarm部署更简单,适合规模不大的应用程序环境,尤其适用于简单和快速开发。Docker Client:客户端 Swarm Manager: 管理器节点 Scheduler:调度器Discovery Service:服务发现 Swarm Node:工作者节点 Docker Containers:容器基本命令集群管理命令docker swarm ca:显示和轮转根CA。 docker swarm init:初始化集群。 docker swarm join:作为节点加入集群。 docker swarm join-token:管理加入集群的令牌。 docker swarm leave:脱离集群。 docker swarm unlock:解锁集群。 docker swarm unlock-key:管理解锁密钥。 docker swarm update:更新集群节点管理命令docker node demote:将一个或多个管理器节点降级为工作者节点。 docker node inspect:显示一个或多个节点的详细信息。 docker node ls:列出Swarm集群中的节点。 docker node promote:将一个或多个节点升级为管理器节点。 docker node ps:列出在一个或多个节点(默认为当前节点)上运行的任务。 docker node rm:从Swarm集群中删除一个或多个节点。 docker node update:更新节点的选项,如可用性、标签或角色。实践操作主机名ip角色Manager192.168.123.100主控Worker01192.168.123.101节点01Worker02192.168.123.102节点02初始化swarm集群在管理器节点上执行以下命令获取加入管理器角色节点的命令(含令牌)docker swarm init --advertise-addr 192.168.123.100添加节点将上操作中的代码复制到worker01、02主机上查看集群信息docker node list