DPVS-3: 双臂负载均衡测试

发布于:2025-02-25 ⋅ 阅读:(11) ⋅ 点赞:(0)

测试拓扑

双臂模式, 使用两个网卡,一个对外,一个对内。
在这里插入图片描述

Client host是物理机, RS host都是虚拟机。
LB host是物理机,两个CX5网卡分别在两个子网。

配置文件

用dpvs.conf.sample作为双臂配置文件,其中多一个接口配置,以及worker中多一个接口tx/rx队列

netif_defs {
    <init> pktpool_size     1048575
    <init> pktpool_cache    256
    <init> fdir_mode        perfect
	# dpdk0 dpdk1 两个接口
    <init> device dpdk0 {
        rx {
            queue_number        8
            descriptor_number   1024
            rss                 all
        }
        tx {
            queue_number        8
            descriptor_number   1024
        }
        ! mtu                   1500
        ! promisc_mode
        ! allmulticast
        kni_name                dpdk0.kni
    }

    <init> device dpdk1 {
        rx {
            queue_number        8
            descriptor_number   1024
            rss                 all
        }
        tx {
            queue_number        8
            descriptor_number   1024
        }
        ! mtu                   1500
        ! promisc_mode
        ! allmulticast
        kni_name                dpdk1.kni
    }
}


worker_defs {
    <init> worker cpu0 {
        type    master
        cpu_id  0
    }
	# worker中也增加了一个接口tx/rx队列
    <init> worker cpu1 {
        type    slave
        cpu_id  1
        port    dpdk0 {
            rx_queue_ids     0
            tx_queue_ids     0
            ! isol_rx_cpu_ids  9
            ! isol_rxq_ring_sz 1048576
        }
        port    dpdk1 {
            rx_queue_ids     0
            tx_queue_ids     0
            ! isol_rx_cpu_ids  9
            ! isol_rxq_ring_sz 1048576
        }
    }

启动DPVS

命令行中 -a 98:00.0 -a 98:00.1 设置使用的两张网卡。

./dpvs -c ../conf/dpvs.conf.sample -- -a 98:00.0 -a 98:00.1 -l 0-9

root@r750-132:~/dpvs/bin# ./dpvs -c ../conf/dpvs.conf.sample -- -a 98:00.0 -a 98:00.1 -l 0-8
current thread affinity is set to FFFFFFFF
EAL: Detected 32 lcore(s)
EAL: Detected 2 NUMA nodes
EAL: Detected static linkage of DPDK
EAL: Multi-process socket /var/run/dpdk/rte/mp_socket
EAL: Selected IOVA mode 'PA'
EAL: No available hugepages reported in hugepages-1048576kB
EAL: Probing VFIO support...
EAL: VFIO support initialized
EAL: Probe PCI driver: mlx5_pci (15b3:1017) device: 0000:98:00.0 (socket 1)
EAL: Probe PCI driver: mlx5_pci (15b3:1017) device: 0000:98:00.1 (socket 1)
EAL: No legacy callbacks, legacy socket not created
DPVS: dpvs version: 1.9-8, build on 2025.02.20.15:37:58
DPVS: dpvs-conf-file: ../conf/dpvs.conf.sample
DPVS: dpvs-pid-file: /var/run/dpvs.pid
DPVS: dpvs-ipc-file: /var/run/dpvs.ipc
CFG_FILE: Opening configuration file '../conf/dpvs.conf.sample'.
CFG_FILE: log_level = WARNING
NETIF: dpdk0:rx_queue_number = 8
NETIF: dpdk1:rx_queue_number = 8
NETIF: worker cpu1:dpdk0 rx_queue_id += 0
NETIF: worker cpu1:dpdk0 tx_queue_id += 0
NETIF: worker cpu1:dpdk1 rx_queue_id += 0
NETIF: worker cpu1:dpdk1 tx_queue_id += 0
NETIF: worker cpu2:dpdk0 rx_queue_id += 1
NETIF: worker cpu2:dpdk0 tx_queue_id += 1
NETIF: worker cpu2:dpdk1 rx_queue_id += 1
NETIF: worker cpu2:dpdk1 tx_queue_id += 1
NETIF: worker cpu3:dpdk0 rx_queue_id += 2
NETIF: worker cpu3:dpdk0 tx_queue_id += 2
NETIF: worker cpu3:dpdk1 rx_queue_id += 2
NETIF: worker cpu3:dpdk1 tx_queue_id += 2
NETIF: worker cpu4:dpdk0 rx_queue_id += 3
NETIF: worker cpu4:dpdk0 tx_queue_id += 3
NETIF: worker cpu4:dpdk1 rx_queue_id += 3
NETIF: worker cpu4:dpdk1 tx_queue_id += 3
NETIF: worker cpu5:dpdk0 rx_queue_id += 4
NETIF: worker cpu5:dpdk0 tx_queue_id += 4
NETIF: worker cpu5:dpdk1 rx_queue_id += 4
NETIF: worker cpu5:dpdk1 tx_queue_id += 4
NETIF: worker cpu6:dpdk0 rx_queue_id += 5
NETIF: worker cpu6:dpdk0 tx_queue_id += 5
NETIF: worker cpu6:dpdk1 rx_queue_id += 5
NETIF: worker cpu6:dpdk1 tx_queue_id += 5
NETIF: worker cpu7:dpdk0 rx_queue_id += 6
NETIF: worker cpu7:dpdk0 tx_queue_id += 6
NETIF: worker cpu7:dpdk1 rx_queue_id += 6
NETIF: worker cpu7:dpdk1 tx_queue_id += 6
NETIF: worker cpu8:dpdk0 rx_queue_id += 7
NETIF: worker cpu8:dpdk0 tx_queue_id += 7
NETIF: worker cpu8:dpdk1 rx_queue_id += 7
NETIF: worker cpu8:dpdk1 tx_queue_id += 7
SAPOOL: sapool_filter_enable = on
NETIF: Ethdev port_id=0 invalid tx_offload: 0x1000e, valid value: 0xc96af
NETIF: Ethdev port_id=1 invalid tx_offload: 0x1000e, valid value: 0xc96af

负载均衡配置

# 添加VIP
./dpip addr add 10.0.0.100/32 dev dpdk0

# 添加负载均衡服务 ,  轮询模式
./ipvsadm -A -t 10.0.0.100:80 -s rr

# 添加 3个RS, FULLNAT 模式
./ipvsadm -a -t 10.0.0.100:80 -r 192.168.100.3:80 -b
./ipvsadm -a -t 10.0.0.100:80 -r 192.168.100.4:80 -b
./ipvsadm -a -t 10.0.0.100:80 -r 192.168.100.5:80 -b

# 为负载均衡服务 10.0.0.100:80 添加一个LOCAL IP 在dpdk1上
./ipvsadm --add-laddr -z 192.168.100.200 -t 10.0.0.100:80 -F dpdk1

# 添加路由
./dpip route add 10.0.0.0/16 dev dpdk0
./dpip route add 192.168.100.0/24 dev dpdk1

检查配置

服务配置

root@r750-132:~/dpvs/bin# ./ipvsadm -L -n
IP Virtual Server version 1.9.8 (size=0)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.0.0.100:80 rr
  -> 192.168.100.3:0              FullNat 1      0          0         
  -> 192.168.100.4:0              FullNat 1      0          0         
  -> 192.168.100.5:0              FullNat 1      0          0      
root@r750-132:~/dpvs/bin# ./ipvsadm  -G
VIP:VPORT            TOTAL    SNAT_IP              CONFLICTS  CONNS     
10.0.0.100:80        1        
                              192.168.100.200      0          0   

接口路由配置

root@r750-132:~/dpvs/bin# ./dpip addr show
inet 10.0.0.100/32 scope global dpdk0
     valid_lft forever preferred_lft forever
inet6 fe80::eaeb:d3ff:fea3:8377/64 scope link dpdk1
     valid_lft forever preferred_lft forever
inet 192.168.100.200/32 scope global dpdk1
     valid_lft forever preferred_lft forever
inet6 fe80::eaeb:d3ff:fea3:8376/64 scope link dpdk0
     valid_lft forever preferred_lft forever
root@r750-132:~/dpvs/bin# 
root@r750-132:~/dpvs/bin# ./dpip route show
inet 10.0.0.100/32 via 0.0.0.0 src 0.0.0.0 dev dpdk0 mtu 1500 tos 0 scope host metric 0 proto auto 
inet 192.168.100.200/32 via 0.0.0.0 src 0.0.0.0 dev dpdk1 mtu 1500 tos 0 scope host metric 0 proto auto 
inet 192.168.100.0/24 via 0.0.0.0 src 0.0.0.0 dev dpdk1 mtu 1500 tos 0 scope link metric 0 proto auto 
inet 10.0.0.0/16 via 0.0.0.0 src 0.0.0.0 dev dpdk0 mtu 1500 tos 0 scope link metric 0 proto auto 

检查连通性

Client ping dpdk0

# Client ping dpdk0
[root@dkdp192 ~]# ifconfig enp1s0f0np0 10.0.0.192/24
[root@dkdp192 ~]# 
[root@dkdp192 ~]# ping 10.0.0.100
PING 10.0.0.100 (10.0.0.100) 56(84) bytes of data.
64 bytes from 10.0.0.100: icmp_seq=1 ttl=64 time=0.153 ms
64 bytes from 10.0.0.100: icmp_seq=2 ttl=64 time=0.068 ms
64 bytes from 10.0.0.100: icmp_seq=3 ttl=64 time=0.080 ms
^C
--- 10.0.0.100 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2063ms
rtt min/avg/max/mdev = 0.068/0.100/0.153/0.038 ms

RS ping dpdk1

root@ubuntu22:~# ping 192.168.100.200
PING 192.168.100.200 (192.168.100.200) 56(84) bytes of data.
64 bytes from 192.168.100.200: icmp_seq=1 ttl=64 time=0.328 ms
64 bytes from 192.168.100.200: icmp_seq=2 ttl=64 time=0.118 ms
64 bytes from 192.168.100.200: icmp_seq=3 ttl=64 time=0.085 ms
^C
--- 192.168.100.200 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2042ms
rtt min/avg/max/mdev = 0.085/0.177/0.328/0.107 ms

CURL测试

测试成功

[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 2 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 1 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 2 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 0 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 0 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 0 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 1 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 2 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 1 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 0 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 1 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 2 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 2 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 2 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 0 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 2 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 1 !
[root@dkdp192 ~]# curl 10.0.0.100:80
This is Server 1 !
[root@dkdp192 ~]#