跳至主要內容

负载均衡器OpenELB

知识库容器技术kubernetes建设Kuberneteskubesphere大约 2 分钟

1、OpenELB 介绍

官网地址:https://openelb.io/open in new window

GitHub 地址:https://github.com/openelb/openelbopen in new window

OpenELB 是由 KubeSphere 开发团队设计、经过 CNCF 认证的一款负载均衡插件。

它可以在裸金属服务器、边缘以及虚拟化的Kubernetes环境中使用 LoadBalancer 类型的 Service 对外暴露服务。

2、安装OpenELB

## 1、helm安装(推荐)
helm repo add test https://charts.kubesphere.io/test
helm repo update
helm install openelb test/openelb --namespace=kube-devops --create-namespace


## 2、直接使用kubectl安装
kubectl apply -f https://raw.githubusercontent.com/openelb/openelb/master/deploy/openelb.yaml
kubectl get crd | grep network.kubesphere.io
kubectl get pod -n openelb-system
kubectl get deployment -n openelb-system
kubectl get svc -n openelb-system
kubectl get jobs.batch -n openelb-system

3、配置地址池

## 1.开启`kube-proxy`的`strictARP`为修改false为true
kubectl get configmap -n kube-system kube-proxy -o yaml | grep strictARP
#kubectl edit configmap kube-proxy -n kube-system
# 使用命令直接修改并对比不同
kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e "s/strictARP: false/strictARP: true/" | kubectl diff -f - -n kube-system
# 确认无误后使用命令直接修改并生效
kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e "s/strictARP: false/strictARP: true/" | kubectl apply -f - -n kube-system
# 重启kube-proxy
kubectl rollout restart daemonset kube-proxy -n kube-system

## 2.创建地址池(eip)
cat >> eip-pool.yaml <<EOF
apiVersion: network.kubesphere.io/v1alpha2
kind: Eip
metadata:
  name: eip-pool
  annotations:
    ## 默认的地址池
    eip.openelb.kubesphere.io/is-default-eip: "true"
spec:
  # Eip 对象的地址池
  address: 192.168.60.100-192.168.60.105
  # OpenELB 在其上侦听 ARP/NDP 请求的网卡。该字段仅在protocol设置为时有效layer2。
  interface: eth0
  # openELB的运行模式,默认为bgp
  protocol: layer2
  # 指定是否禁用Eip对象: false表示可以继续分配 | true表示不再继续分配
  disable: false
EOF
kubectl apply -f eip-pool.yaml
kubectl get eip -o yaml

4、测试负载均衡器

cat >> nginx.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx
  namespace: default
spec:
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - name: http
          containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: nginx
  namespace: default
spec:
  allocateLoadBalancerNodePorts: false
  externalTrafficPolicy: Cluster
  internalTrafficPolicy: Cluster
  selector:
    app: nginx
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
  type: LoadBalancer
EOF
kubectl apply -f nginx.yaml
kubectl get svc -n default nginx -o yaml | grep "allocateLoadBalancerNodePorts:"

## 指定VIP
cat >> service.yaml << EOF
apiVersion: v1
kind: Service
metadata:
  name: nginx2
  namespace: default
  annotations:
    lb.kubesphere.io/v1alpha1: openelb
    protocol.openelb.kubesphere.io/v1alpha1: layer2
    eip.openelb.kubesphere.io/v1alpha2: eip-pool
spec:
  allocateLoadBalancerNodePorts: false
  externalTrafficPolicy: Cluster
  internalTrafficPolicy: Cluster
  selector:
    app: nginx
  ports:
  - name: http-80
    port: 80
    protocol: TCP
    targetPort: 80
  type: LoadBalancer
  loadBalancerIP: 192.168.60.101
EOF

5、OpenELB 高可用

## 多网卡绑定 | 指定 OpenELB 使用的网卡
kubectl describe node | grep -A5 Annotations
kubectl annotate nodes master1 layer2.openelb.kubesphere.io/v1alpha1="192.168.60.91"
ip neigh | grep ens192

## 我们给集群内的三个节点都打上label
kubectl label --overwrite nodes node01 node02 lb.kubesphere.io/v1alpha1=openelb
## 查看当前节点的labels
kubectl get nodes -o wide --show-labels=true | grep openelb

## 然后我们先把副本的数量缩容到0。
kubectl scale deployment openelb-manager --replicas=0 -n openelb-system
## 修改配置
kubectl get deployment openelb-manager -n openelb-system -o yaml
#     nodeSelector:
#       kubernetes.io/os: linux
#       lb.kubesphere.io/v1alpha1: openelb
## 扩容副本数量到3。
kubectl scale deployment openelb-manager --replicas=3 -n openelb-system
## 检查deployment状态
kubectl get pod -n openelb-system -o wide