k8s部署手册-v04
创始人
2024-03-16 18:17:40
0

一、基础配置

1.修改主机名

hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master02
hostnamectl set-hostname k8s-master03
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02

2.添加 主机名与IP地址解析

cat > /etc/hosts <

3.升级服务器内核,时间同步,关闭防火墙,重启服务器

#添加访问互联路由
cat > /etc/resolv.conf </dev/null 2>&1'>/var/spool/cron/root && crontab -l#设置防火墙为 Iptables 并设置空规则
systemctl  stop firewalld  &&  systemctl  disable firewalldyum -y install iptables-services  &&  systemctl  start iptables  &&  systemctl  enable iptables  &&  iptables -F  &&  service iptables save#关闭 SELINUX
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config#调整内核参数,对于 K8S
cat > /etc/sysctl.d/kubernetes.conf <

4.升级内核,重启服务器

rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.orgyum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpmyum --enablerepo="elrepo-kernel" -y install kernel-lt.x86_64awk -F \' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
grub2-set-default "CentOS Linux (5.4.225-1.el7.elrepo.x86_64) 7 (Core)"
#grub2-set-default 'CentOS Linux (4.4.222-1.el7.elrepo.x86_64) 7 (Core)'
#重启服务器
reboot
################################

二、sealos部署k8s-v1.19

1.安装sealos3.3


#添加访问互联路由
cat > /etc/resolv.conf <

2.离线安装k8s 1.19

链接:https://pan.baidu.com/s/1F9sZoHBX1K1ihBP9rZSHBQ?pwd=jood 
提取码:jood#安装
sealos init --passwd 1qaz@WSX \--master 192.168.1.60 \--master 192.168.1.61 \--master 192.168.1.62 \--node 192.168.1.63 \--node 192.168.1.64 \--pkg-url /root/kube1.19.16.tar.gz \--version v1.19.16

3.验证集群

kubectl get nodeskubectl get pod -A#配置kubectl自动补全
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> /etc/profile#查看污点
kubectl describe node |grep -i taints#去除污点#kubectl taint node k8s-master02 node-role.kubernetes.io/master:NoSchedule-
#kubectl taint node k8s-master03 node-role.kubernetes.io/master:NoSchedule-

4.sealos3.3常用命令

#添加 node 节点:
sealos join --node 192.168.1.63,192.168.1.64#添加master
sealos join -master 192.168.1.61,192.168.1.62#删除 node 节点:
sealos clean --node 192.168.1.63,192.168.1.64#删除 master 节点:
sealos clean --master 192.168.1.61,192.168.1.62#重置集群
sealos clean --all -f

5.安装top命令

cat > /root/top.yaml <

三、部署nfs

1.服务端

#添加访问互联路由
cat > /etc/resolv.conf < /etc/exports# 重启服务
systemctl restart rpcbind.service
systemctl restart nfs-utils.service 
systemctl restart nfs-server.service # 增加NFS-SERVER开机自启动
systemctl enable  rpcbind.service
systemctl enable  nfs-utils.service 
systemctl enable  nfs-server.service # 验证NFS-SERVER是否能正常访问
#showmount -e 192.168.1.60               

2.客户端

#需要挂载的服务器执行
mkdir /nfs_dir
yum install nfs-utils -y#挂载
mount 192.168.1.60:/nfs_dir /nfs_dir#添加开机挂载
echo "mount 192.168.1.60:/nfs_dir /nfs_dir" >> /etc/rc.localcat /etc/rc.local

四、部署StorageClass

1.创建nfs-sc.yaml

cat > /root/nfs-sc.yaml <

五、harbor仓库搭建

1.安装

 
#目录/root上传文件docker-compose和harbor-offline-installer-v1.2.0.tgzmv /root/docker-compose /usr/local/bin/
chmod a+x /usr/local/bin/docker-composeln -s /usr/local/bin/docker-compose /usr/bin/docker-composetar -zxvf harbor-offline-installer-v2.4.1.tgzmv harbor /usr/local/cd /usr/local/harbor/cp harbor.yml.tmpl harbor.ymlsed -i 's/hostname: reg.mydomain.com/hostname: 192.168.1.77/g' harbor.yml
sed -i 's/https/#https/g' harbor.yml
sed -i 's/certificate/#certificate/g' harbor.yml
sed -i 's/private_key/#private_key/g' harbor.yml#数据库目录
mkdir /datacat /etc/docker/daemon.json
{"registry-mirrors": ["https://nr240upq.mirror.aliyuncs.com", "https://registry.docker-cn.com", "https://docker.mirrors.ustc.edu.cn", "https://dockerhub.azk8s.cn", "http://hub-mirror.c.163.com"],"exec-opts": ["native.cgroupdriver=systemd"],"log-driver": "json-file","log-opts": {"max-size": "100m"},"insecure-registries": ["192.168.1.77:80"]
}systemctl daemon-reload && systemctl restart docker#安装
./install.sh## 重启harbor
cd /usr/local/harbor/
docker-compose down -v
docker-compose up -d
docker ps|grep harbor
netstat -ntlp

2.需要访问仓库的其他节点的 daemon.json添加如下内容


##-------------------
vim /etc/docker/daemon.json"registry-mirrors": ["https://nr240upq.mirror.aliyuncs.com", "https://registry.docker-cn.com", "https://docker.mirrors.ustc.edu.cn", "https://dockerhub.azk8s.cn"],"insecure-registries": ["192.168.1.77:80"],##-------------------#重启
systemctl daemon-reload && systemctl restart docker

3.节点使用仓库

 
#登入仓库网站docker login -u admin -p Harbor12345 192.168.1.77:80#下载镜像
docker pull daocloud.io/library/nginx:1.9.1#给镜像打上标签
docker tag daocloud.io/library/nginx:1.9.1 192.168.1.77:80/library/nginx:1.9.1#镜像上传
docker push 192.168.1.77:80/library/nginx:1.9.1#删除镜像
docker rmi 192.168.1.77:80/library/nginx:1.9.1#将镜像保存为本地tar文件,
docker save k8s.gcr.io/coredns:1.7.0  > /root/coredns-v1.7.0.tar #使用load加载tar文件
docker load -i  /root/coredns-v1.7.0.tar

4.批量打包上传harbor镜像


cd /root
#查看服务器镜像名称
docker images | awk 'NR!=1{print $1":"$2}' > 01-image-old.txt && cat 01-image-old.txt# /换成-
rm -rf  02-image-sed.txt && cp 01-image-old.txt 02-image-sed.txt && sed -i  "s|/|-|g" 02-image-sed.txt  && cat /root/02-image-sed.txt#打标签harbor仓库vim /root/03-tar-image.sh
#####################################################
#!/bin/sh
old=/root/01-image-old.txt
new=/root/02-image-sed.txt
l=$(cat /root/01-image-old.txt| wc -l)
for ((i=1 ; i<=$l ; i++))
do
a=$(sed -n "$i"p $old)
b=$(sed -n "$i"p $new)
#echo "update xxxx  set uid='$a' where uid='$b';"
docker tag $a 192.168.1.77:80/library/$b
done
######################################################运行打仓库标签
bash /root/03-tar-image.shdocker images |grep library#查看打标harbor仓库images名称
docker images |grep 192.168.1.77 | awk '{print $1":"$2}'  > 04-tar-image.txt && cat 04-tar-image.txt#上传到harbor仓库
for h in `cat 04-tar-image.txt`; do docker push $h; done#删除打标镜像
for d in `cat 04-tar-image.txt`; do docker rmi $d; done
docker images |grep library#删除创建的文件
rm -rf /root/0*txt  03-tar-image.sh

六、kuboard界面管理

1.下载地址

curl -o kuboard-v3.yaml https://addons.kuboard.cn/kuboard/kuboard-v3-storage-class.yaml

2.编辑yaml

#编辑 kuboard-v3.yaml 文件中的配置,该部署文件中,有1处配置必须修改:storageClassNamevolumeClaimTemplates:- metadata:name: dataspec:# 请填写一个有效的 StorageClass namestorageClassName: nfs-bogeaccessModes: [ "ReadWriteMany" ]resources:requests:storage: 5Gi

3.执行

kubectl create -f kuboard-v3.yamlkubectl get pod -n kuboard#############################################访问
http://192.168.1.60:30080/
输入初始用户名和密码,并登录用户名: admin密码: Kuboard123
#############################################	#查看错误
journalctl -f -u kubelet.service

七、helm3安装

1.helm包下载地址

 wget https://get.helm.sh/helm-v3.6.1-linux-amd64.tar.gz

2.安装helm

#解压 && 移动到 /usr/bin 目录下:tar -xvf helm-v3.6.1-linux-amd64.tar.gz && cd linux-amd64/ && mv helm /usr/bin #查看版本
helm version

3.配置仓库

#添加公用的仓库
helm repo add incubator https://charts.helm.sh/incubator
helm repo add bitnami https://charts.bitnami.com/bitnami
# 配置helm微软源地址
helm repo add stable http://mirror.azure.cn/kubernetes/charts
# 配置helm阿里源地址
helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/chartshelm repo add stable   https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
helm repo add google  https://kubernetes-charts.storage.googleapis.com
helm repo add jetstack https://charts.jetstack.io# 查看仓库
helm repo list
# 更新仓库
helm repo update  # 删除仓库
#helm repo remove  aliyun# helm list

八、haproxy+keepalived+ingress

1.部署阿里云ingress

mkdir -p /data/k8s/cd /data/k8s/cat > /data/k8s/aliyun-ingress-nginx.yaml <

2.节点打标签

#允许节点打标签
kubectl label node k8s-master01  boge/ingress-controller-ready=true
kubectl label node k8s-master02  boge/ingress-controller-ready=true
kubectl label node k8s-master03  boge/ingress-controller-ready=true#删除标签
#kubectl label node k8s-master01  boge/ingress-controller-ready=true --overwrite
#kubectl label node k8s-master02  boge/ingress-controller-ready=true --overwrite
#kubectl label node k8s-master03  boge/ingress-controller-ready=true --overwrite

3.haproxy+keepalived部署

3.0 部署

yum install haproxy keepalived -y#重启程序
systemctl restart haproxy.service
systemctl restart keepalived.service# 查看运行状态
systemctl status haproxy.service 
systemctl status keepalived.service#开机自启动
systemctl  enable keepalived.service
systemctl  enable haproxy.service 

3.1 修改配置haproxy

vim /etc/haproxy/haproxy.cfg
###################################################
listen ingress-httpbind 0.0.0.0:80mode tcpoption tcplogoption dontlognulloption dontlog-normalbalance roundrobinserver 192.168.1.60 192.168.1.60:80 check inter 2000 fall 2 rise 2 weight 1server 192.168.1.61 192.168.1.61:80 check inter 2000 fall 2 rise 2 weight 1server 192.168.1.62 192.168.1.62:80 check inter 2000 fall 2 rise 2 weight 1listen ingress-httpsbind 0.0.0.0:443mode tcpoption tcplogoption dontlognulloption dontlog-normalbalance roundrobinserver 192.168.1.60 192.168.1.60:443 check inter 2000 fall 2 rise 2 weight 1server 192.168.1.61 192.168.1.61:443 check inter 2000 fall 2 rise 2 weight 1server 192.168.1.62 192.168.1.62:443 check inter 2000 fall 2 rise 2 weight 1

3.2 A机器修改keepalived配置

cat > /etc/keepalived/keepalived.conf <

3.3 B机器修改keepalived配置

cat > /etc/keepalived/keepalived.conf <

3.4 重启


#重启程序
systemctl restart haproxy.service
systemctl restart keepalived.service# 查看运行状态
systemctl status haproxy.service 
systemctl status keepalived.service

4.部署nginx-ingress

cat > /root/nginx-ingress.yaml <

5.测试nginx-ingress

  
kubectl apply -f /root/nginx-ingress.yaml
#查看创建的ingress资源
kubectl get ingress -A#服务器新增域名解析
echo "192.168.1.100 nginx.boge.com" >> /etc/hosts# 我们在其它节点上,加下本地hosts,来测试下效果
20.6.1.226 nginx.boge.com#测试
curl nginx.boge.com  

九、elk日志监控

1.创建测试tomcat

cat > 01-tomcat-test.yaml <

2.部署elasticsearch

cat > 02-elasticsearch.6.8.13-statefulset.yaml <}securityContext:privileged: true- name: fix-permissionsimage: busyboxcommand: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]securityContext:privileged: truevolumeMounts:- name: elasticsearch-loggingmountPath: /usr/share/elasticsearch/datavolumes:- name: elasticsearch-logginghostPath:path: /esdata
---
apiVersion: v1
kind: Service
metadata:labels:k8s-app: elasticsearch-loggingname: elasticsearchnamespace: logging
spec:ports:- port: 9200protocol: TCPtargetPort: dbselector:k8s-app: elasticsearch-loggingtype: ClusterIPkubectl apply -f 02-elasticsearch.6.8.13-statefulset.yaml

3.部署kibana

cat > 03-kibana.6.8.13.yaml <

4.部署log-pilot

cat > 04-log-pilot.yml <

5.配置kibana页面

Managenment>index Patterns>Create index pattern

#创建日志
Create index pattern> index pattern(tomcat-access*)>Next step

#创建时间
Time Filter field name(@timestamp)>Create index pattern

#查看日志展示
Discover>tomcat-access*

十、Prometheus监控

1.导入离线包

链接:https://pan.baidu.com/s/1DyMJPT8r_TUpI8Dr31SVew?pwd=m1bk 
提取码:m1bk#导入上传tar包
sudo docker load -i alertmanager-v0.21.0.tar
sudo docker load -i grafana-7.3.4.tar
sudo docker load -i k8s-prometheus-adapter-v0.8.2.tar
sudo docker load -i kube-rbac-proxy-v0.8.0.tar
sudo docker load -i kube-state-metrics-v1.9.7.tar
sudo docker load -i node-exporter-v1.0.1.tar
sudo docker load -i prometheus-config-reloader-v0.43.2.tar
sudo docker load -i prometheus_demo_service.tar
sudo docker load -i prometheus-operator-v0.43.2.tar
sudo docker load -i prometheus-v2.22.1.tar

2.主节点创建


#解压下载的代码包
sudo unzip kube-prometheus-master.zip
sudo rm -f kube-prometheus-master.zip && cd kube-prometheus-master#这里建议先看下有哪些镜像,便于在下载镜像快的节点上先收集好所有需要的离线docker镜像
find ./ -type f |xargs grep 'image: '|sort|uniq|awk '{print $3}'|grep ^[a-zA-Z]|grep -Evw 'error|kubeRbacProxy'|sort -rn|uniqkubectl create -f manifests/setup
kubectl create -f manifests/#过一会查看创建结果:
kubectl -n monitoring get all# 附:清空上面部署的prometheus所有服务:
# kubectl delete --ignore-not-found=true -f manifests/ -f manifests/setup

3. 访问下prometheus的UI

# 修改下prometheus UI的service模式,便于我们访问
# kubectl -n monitoring patch svc prometheus-k8s -p '{"spec":{"type":"NodePort"}}'
service/prometheus-k8s patched# kubectl -n monitoring get svc prometheus-k8s 
NAME             TYPE       CLUSTER-IP    EXTERNAL-IP   PORT(S)          AGE
prometheus-k8s   NodePort   10.68.23.79           9090:22129/TCP   7m43s

3.1 修改用户权限

#   kubectl edit clusterrole prometheus-k8s
#------ 原始的rules -------
rules:
- apiGroups:- ""resources:- nodes/metricsverbs:- get
- nonResourceURLs:- /metricsverbs:- get
#---------------------------apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:name: prometheus-k8s
rules:
- apiGroups:- ""resources:- nodes- services- endpoints- pods- nodes/proxyverbs:- get- list- watch
- apiGroups:- ""resources:- configmaps- nodes/metricsverbs:- get
- nonResourceURLs:- /metricsverbs:- get

4. 监控ingress-nginx

cat > servicemonitor.yaml <

十一、安装kubesphere3.3

官网参考文档

https://kubesphere.com.cn/docs/v3.3/pluggable-components/alerting/

1.部署kubesphere时需要默认 StorageClass

kubectl edit sc nfs-boge

  metadata:annotations:storageclass.beta.kubernetes.io/is-default-class: "true"

2.下载yaml

wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.0/kubesphere-installer.yamlwget https://github.com/kubesphere/ks-installer/releases/download/v3.3.0/cluster-configuration.yaml#修改cluster-configuration.yaml
#将ectd下的 endpointIps改为你的master节点的私有IP地址。
#endpointIps: XX.X.X.X

3.运行yaml

kubectl apply -f kubesphere-installer.yamlkubectl apply -f cluster-configuration.yaml

4. 查看日志

kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f#访问任意机器的 30880端口
#账号 : admin
#密码 : P@88w0rd

5. 解决etcd监控证书找不到问题

kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs  \
--from-file=etcd-client-ca.crt=/etc/kubernetes/pki/etcd/ca.crt  \
--from-file=etcd-client.crt=/etc/kubernetes/pki/etcd/healthcheck-client.crt  \
--from-file=etcd-client.key=/etc/kubernetes/pki/etcd/healthcheck-client.key

6. 在安装后启用告警系统,

在 cluster-configuration.yaml 文件中,搜索 alerting,将 enabled 的 false 更改为 true 以启用告警系统。完成后保存文件

alerting:enabled: true # 将“false”更改为“true”。#运行
kubectl apply -f kubesphere-installer.yamlkubectl apply -f cluster-configuration.yaml

6.0 配置钉钉报警

6.1 钉钉自定义机器配置

添加自定义机器人,安全配置,勾选** 加签**

6.2 操作步骤

左上角>平台管理>平台设置>通知管理>通知配置>钉钉>群机器人配置
开启-已启用
填写自己的 Webhook URL
填写自己的 密钥 (加签
发送测试信息
确定

查看钉钉群消息.是否发送成功?????

7. 在安装后启用应用商店,

在该 YAML 文件中,搜索 openpitrix,将 enabled 的 false 改为 true。完成后,点击右下角的确定,保存配置。

openpitrix:store:enabled: true # 将“false”更改为“true”。#运行
kubectl apply -f kubesphere-installer.yamlkubectl apply -f cluster-configuration.yaml

8.在安装后启用服务网格 istio

在该配置文件中,搜索 servicemesh,并将 enabled 的 false 改为 true。完成后,点击右下角的确定,保存配置

servicemesh:
enabled: true # 将“false”更改为“true”。
istio: # Customizing the istio installation configuration, refer to https://istio.io/latest/docs/setup/additional-setup/customize-installation/components:ingressGateways:- name: istio-ingressgateway # 将服务暴露至服务网格之外。默认不开启。enabled: falsecni:enabled: false # 启用后,会在 Kubernetes pod 生命周期的网络设置阶段完成 Istio 网格的 pod 流量转发设置工作。

9.在安装前启用 DevOps

在该 YAML 文件中,搜索 devops,将 enabled 的 false 改为 true。完成后,点击右下角的确定,保存配置。

devops:enabled: true # 将“false”更改为“true”。

10. 卸载方法


kubectl delete -f cluster-configuration.yaml --force
kubectl delete -f kubesphere-installer.yaml --force

#删除残余文件
vi del.sh

#!/usr/bin/env bashfunction delete_sure(){cat << eof
$(echo -e "\033[1;36mNote:\033[0m")
Delete the KubeSphere cluster, including the module kubesphere-system kubesphere-devops-system kubesphere-devops-worker kubesphere-monitoring-system kubesphere-logging-system openpitrix-system.
eofread -p "Please reconfirm that you want to delete the KubeSphere cluster.  (yes/no) " ans
while [[ "x"$ans != "xyes" && "x"$ans != "xno" ]]; doread -p "Please reconfirm that you want to delete the KubeSphere cluster.  (yes/no) " ans
doneif [[ "x"$ans == "xno" ]]; thenexit
fi
}delete_sure# delete ks-installer
kubectl delete deploy ks-installer -n kubesphere-system 2>/dev/null# delete helm
for namespaces in kubesphere-system kubesphere-devops-system kubesphere-monitoring-system kubesphere-logging-system openpitrix-system kubesphere-monitoring-federated
dohelm list -n $namespaces | grep -v NAME | awk '{print $1}' | sort -u | xargs -r -L1 helm uninstall -n $namespaces 2>/dev/null
done# delete kubefed
kubectl get cc -n kubesphere-system ks-installer -o jsonpath="{.status.multicluster}" | grep enable
if [[ $? -eq 0 ]]; then# delete kubefed types resourcesfor kubefed in `kubectl api-resources --namespaced=true --api-group=types.kubefed.io -o name`dokubectl delete -n kube-federation-system $kubefed --all 2>/dev/nulldonefor kubefed in `kubectl api-resources --namespaced=false --api-group=types.kubefed.io -o name`dokubectl delete $kubefed --all 2>/dev/nulldone# delete kubefed core resoucesfor kubefed in `kubectl api-resources --namespaced=true --api-group=core.kubefed.io -o name`dokubectl delete -n kube-federation-system $kubefed --all 2>/dev/nulldonefor kubefed in `kubectl api-resources --namespaced=false --api-group=core.kubefed.io -o name`dokubectl delete $kubefed --all 2>/dev/nulldone# uninstall kubefed charthelm uninstall -n kube-federation-system kubefed 2>/dev/null
fihelm uninstall -n kube-system snapshot-controller 2>/dev/null# delete kubesphere deployment & statefulset
kubectl delete deployment -n kubesphere-system `kubectl get deployment -n kubesphere-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null
kubectl delete statefulset -n kubesphere-system `kubectl get statefulset -n kubesphere-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null# delete monitor resources
kubectl delete prometheus -n kubesphere-monitoring-system k8s 2>/dev/null
kubectl delete Alertmanager -n kubesphere-monitoring-system main 2>/dev/null
kubectl delete DaemonSet -n kubesphere-monitoring-system node-exporter 2>/dev/null
kubectl delete statefulset -n kubesphere-monitoring-system `kubectl get statefulset -n kubesphere-monitoring-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null# delete grafana
kubectl delete deployment -n kubesphere-monitoring-system grafana 2>/dev/null
kubectl --no-headers=true get pvc -n kubesphere-monitoring-system -o custom-columns=:metadata.namespace,:metadata.name | grep -E kubesphere-monitoring-system | xargs -n2 kubectl delete pvc -n 2>/dev/null# delete pvc
pvcs="kubesphere-system|openpitrix-system|kubesphere-devops-system|kubesphere-logging-system"
kubectl --no-headers=true get pvc --all-namespaces -o custom-columns=:metadata.namespace,:metadata.name | grep -E $pvcs | xargs -n2 kubectl delete pvc -n 2>/dev/null# delete rolebindings
delete_role_bindings() {for rolebinding in `kubectl -n $1 get rolebindings -l iam.kubesphere.io/user-ref -o jsonpath="{.items[*].metadata.name}"`dokubectl -n $1 delete rolebinding $rolebinding 2>/dev/nulldone
}# delete roles
delete_roles() {kubectl -n $1 delete role admin 2>/dev/nullkubectl -n $1 delete role operator 2>/dev/nullkubectl -n $1 delete role viewer 2>/dev/nullfor role in `kubectl -n $1 get roles -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`dokubectl -n $1 delete role $role 2>/dev/nulldone
}# remove useless labels and finalizers
for ns in `kubectl get ns -o jsonpath="{.items[*].metadata.name}"`
dokubectl label ns $ns kubesphere.io/workspace-kubectl label ns $ns kubesphere.io/namespace-kubectl patch ns $ns -p '{"metadata":{"finalizers":null,"ownerReferences":null}}'delete_role_bindings $nsdelete_roles $ns
done# delete clusterroles
delete_cluster_roles() {for role in `kubectl get clusterrole -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`dokubectl delete clusterrole $role 2>/dev/nulldonefor role in `kubectl get clusterroles | grep "kubesphere" | awk '{print $1}'| paste -sd " "`dokubectl delete clusterrole $role 2>/dev/nulldone
}
delete_cluster_roles# delete clusterrolebindings
delete_cluster_role_bindings() {for rolebinding in `kubectl get clusterrolebindings -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`dokubectl delete clusterrolebindings $rolebinding 2>/dev/nulldonefor rolebinding in `kubectl get clusterrolebindings | grep "kubesphere" | awk '{print $1}'| paste -sd " "`dokubectl delete clusterrolebindings $rolebinding 2>/dev/nulldone
}
delete_cluster_role_bindings# delete clusters
for cluster in `kubectl get clusters -o jsonpath="{.items[*].metadata.name}"`
dokubectl patch cluster $cluster -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete clusters --all 2>/dev/null# delete workspaces
for ws in `kubectl get workspaces -o jsonpath="{.items[*].metadata.name}"`
dokubectl patch workspace $ws -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete workspaces --all 2>/dev/null# make DevOps CRs deletable
for devops_crd in $(kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io"); dofor ns in $(kubectl get ns -ojsonpath='{.items..metadata.name}'); dofor devops_res in $(kubectl get $devops_crd -n $ns -oname); dokubectl patch $devops_res -n $ns -p '{"metadata":{"finalizers":[]}}' --type=mergedonedone
done# delete validatingwebhookconfigurations
for webhook in ks-events-admission-validate users.iam.kubesphere.io network.kubesphere.io validating-webhook-configuration resourcesquotas.quota.kubesphere.io
dokubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
done# delete mutatingwebhookconfigurations
for webhook in ks-events-admission-mutate logsidecar-injector-admission-mutate mutating-webhook-configuration
dokubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
done# delete users
for user in `kubectl get users -o jsonpath="{.items[*].metadata.name}"`
dokubectl patch user $user -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete users --all 2>/dev/null# delete helm resources
for resource_type in `echo helmcategories helmapplications helmapplicationversions helmrepos helmreleases`; dofor resource_name in `kubectl get ${resource_type}.application.kubesphere.io -o jsonpath="{.items[*].metadata.name}"`; dokubectl patch ${resource_type}.application.kubesphere.io ${resource_name} -p '{"metadata":{"finalizers":null}}' --type=mergedonekubectl delete ${resource_type}.application.kubesphere.io --all 2>/dev/null
done# delete workspacetemplates
for workspacetemplate in `kubectl get workspacetemplates.tenant.kubesphere.io -o jsonpath="{.items[*].metadata.name}"`
dokubectl patch workspacetemplates.tenant.kubesphere.io $workspacetemplate -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete workspacetemplates.tenant.kubesphere.io --all 2>/dev/null# delete federatednamespaces in namespace kubesphere-monitoring-federated
for resource in $(kubectl get federatednamespaces.types.kubefed.io -n kubesphere-monitoring-federated -oname); dokubectl patch "${resource}" -p '{"metadata":{"finalizers":null}}' --type=merge -n kubesphere-monitoring-federated
done# delete crds
for crd in `kubectl get crds -o jsonpath="{.items[*].metadata.name}"`
doif [[ $crd == *kubesphere.io ]] || [[ $crd == *kubefed.io ]] ; then kubectl delete crd $crd 2>/dev/null; fi
done# delete relevance ns
for ns in kube-federation-system kubesphere-alerting-system kubesphere-controls-system kubesphere-devops-system kubesphere-devops-worker kubesphere-logging-system kubesphere-monitoring-system kubesphere-monitoring-federated openpitrix-system kubesphere-system
dokubectl delete ns $ns 2>/dev/null
done

#执行删除
sh del.sh

十二、 GitLab安装

1. 单独准备服务器,采用Docker安装

docker search gitlab
docker pull gitlab/gitlab-ce

2.准备docker-compose.yml文件


mkdir -p /data/gitvim /data/git/docker-compose.ymlversion: '3.1'
services:gitlab:image: 'gitlab/gitlab-ce:latest'container_name: gitlabrestart: alwaysenvironment:GITLAB_OMNIBUS_CONFIG: |external_url 'http://10.1.100.225:8929'#自己安装git的服务器IPgitlab_rails['gitlab_shell_ssh_port'] = 2224ports:- '8929:8929'- '2224:2224'volumes:- './config:/etc/gitlab'- './logs:/var/log/gitlab'- './data:/var/opt/gitlab'

3.启动容器(需要稍等很久……)

cd /data/git
docker-compose up -d

4.访问GitLab首页

http://10.1.100.225:8929

5.查看root用户初始密码

docker exec -it gitlab cat /etc/gitlab/initial_root_password

6.第一次登录网页,需要修改密码 Password

**右上角>>**Administrator>Preferences>Password

十三、DevOps初始化环境

1.linux系统 安装Jenkins、jdk 、maven

1.下载地址

JDK包下载地址
https://www.oracle.com/java/technologies/downloads/

MAven下载地址
https://maven.apache.org/download.cgi

2.安装jdk maven

tar -zxvf jdk-8*.tar.gz -C /usr/local/
tar -zxvf apache-maven-*.tar.gz -C /usr/local/cd /usr/local
mv apache-maven*/ maven
mv jdk1.8*/ jdk

2.1 编辑maven配置

vim /usr/local/maven/conf/settings.xml

 

nexus-aliyuncentralNexus aliyunhttp://maven.aliyun.com/nexus/content/groups/public

    jdk1.8        true    1.8            1.8    1.8    1.8         
 jdk1.8

3.安装jenkins

3.1 下载

docker pull jenkins/jenkins:2.319.1-lts

3.2 创建yaml

mkdir -p /data/jenkins/
cd /data/jenkins/
vim /data/jenkins/docker-compose.yml

version: "3.1"
services:jenkins:image: jenkins/jenkinscontainer_name: jenkinsports:- 8080:8080- 50000:50000volumes:- ./data/:/var/jenkins_home/- /var/run/docker.sock:/var/run/docker.sock- /usr/bin/docker:/usr/bin/docker- /etc/docker/daemon.json:/etc/docker/daemon.json

3.3 启动jenkins

 
#修改Jenkins用户权限
cd /var/runchown root:root docker.sock#其他用户有读和写权限
chmod o+rw docker.sockcd /data/jenkins/
docker-compose up -d#授权
chmod 777 /data/jenkins/data/cat /data/jenkins/data/hudson.model.UpdateCenter.xml
#重新启动Jenkins容器后,由于Jenkins需要下载大量内容,但是由于默认下载地址下载速度较慢,
#需要重新设置下载地址为国内镜像站# 清华大学的插件源也可以
# 修改数据卷中的hudson.model.UpdateCenter.xml文件
# 将下载地址替换为http://mirror.esuni.jp/jenkins/updates/update-center.json# 清华大学的插件源也可以
#https://mirrors.tuna.tsinghua.edu.cn/jenkins/updates/update-center.json#重启
docker-compose restart#查看日志
docker logs -f jenkins

3.4 访问页面,安装插件

http://10.1.100.225:8080
1.输入密码2.选择插件来安装3.点击安装

4.jenkins插件安装

中文界面>系统管理>插件管理>可选插件>搜索插件
英文界面> Manage Jenkins–Manage Plugins-Available>搜索插件
Locale
Localization
Git Parameter
Publish Over SSH

5. 配置jenkins

mv /usr/local/maven/ /data/jenkins/data/
mv /usr/local/jdk/ /data/jenkins/data/

5.1 加载本地jdk

Dashboard>系统管理>全局工具配置>Add JDK>去掉对钩 (√)自动安装
NAME

jdk8

JAVA_HOME

/var/jenkins_home/jdk/

5.1 加载本地maven

Dashboard>系统管理>全局工具配置>Add Maven>去掉对钩 (√)自动安装
NAME

maven

JAVA_HOME

/var/jenkins_home/maven/

Save Apply
保存 应用

运行mvn测试
mvn help:system

3.jenkins拉取测试

系统管理>系统配置>Publish over SSH>SSH Servers>Add

#自定义项目名称
name

test

#主机IP
Hostname

10.1.100.25

#主机用户名
Username

root

#拉取项目路径
Remote Directory

/data/work/mytest

点击高级
√ Use password authentication, or use a different key

#输入服务器密码
Passphrase / Password

xxxx

#点击 测试

Test ConfigurationSave Apply
保存 应用

4.Jenkins服务器设置免密登入k8s-mast服务器

#Jenkins服务器-进入jenkins容器
docker exec -it jenkins bash

#进入jenkins容器-生成免密登录公私钥,根据提示按回车
ssh-keygen -t rsa

#进入jenkins容器-查看jenkins 秘钥
cat /var/jenkins_home/.ssh/id_rsa.pub

#k8s-mast服务器中authorized_keys 加入Jenkins服务器秘钥
echo “xxxxxx” >> /root/.ssh/authorized_keys

十四、开发环境部署IDEA

工具下载:

链接:https://pan.baidu.com/s/1Jkyh_kgrT2o388Xiujbdeg?pwd=b7rx
提取码:b7rx

1. windows配置maven 和jdk

https://blog.csdn.net/weixin_46565024/article/details/122758111

2. IDEA简单得项目创建

File>New>ProjectSpring Initializr>NextType(选择Maven)>Java Version (选择8) > NextWeb> 勾选√Spring Web> Next>Finish

相关内容

热门资讯

专业文章丨跨境模具纠纷高效和解... 【珠海律师、珠海法律咨询、珠海律师事务所、京师律所、京师珠海律所】 (本文转载自北京市京师律师事务所...
全总等三部门联合发布2025年... 新华社北京12月25日电(记者樊曦、冯家顺)记者12月25日从全国总工会了解到,全国总工会与最高人民...
对“问题法规”要及时纠正 从事网约车经营要求车辆购置总价不得低于12万元、残疾人机动轮椅车登记需有本市常住户籍……日前提请全国...
专业文章丨相对不起诉:一起交通... 【珠海律师、珠海法律咨询、珠海律师事务所、京师律所、京师珠海律所】 (本文转载自北京市京师郑州律师事...
巡回审判进商场 两起纠纷就地解 央广网长春12月26日消息(记者舒震)“真没想到法官能把法庭‘搬’到商场里来,更没想到当庭就把我们的...
专业文章丨赠与车辆未交付 可以... 【珠海律师、珠海法律咨询、珠海律师事务所、京师律所、京师珠海律所】 (本文转载自北京市京师合肥律师事...
广西重拳打击制售假劣农资犯罪 ... 中新网南宁12月26日电 (韦小婷)广西壮族自治区农业农村厅25日介绍,今年开春以来,该厅在广西开展...
政策面前瞻:多元工具下的宽松红... 我们来聊聊今年政策的整体变化。感受特别明显的是,今年政策利率的锚发生了一些切换,包括货币政策投放工具...
政策力挺消费!这波逢低布局机会... 板块轮动已经成为A股常态。被看作“长坡厚雪”的消费板块,已经走过了一段漫长的调整路,估值也跌回了历史...