一、什么是双网卡绑定
双网卡绑定的主要作用包括:增大带宽、保持带宽的稳定性、形成网卡冗余阵列、分担负载,以及提供负载均衡和冗余。通过将两个或多个网卡绑定成一个虚拟网卡,双网卡绑定技术能够提高网络的吞吐量,增强网络的高可用性,同时实现负载均衡和冗余,从而提高服务的可靠性和网络的稳定性。
二、双网卡绑定模式
共有7种模式bond(0~6)
模式 | 含义 |
---|---|
mode=0 | 平衡轮询策略 balance-rr |
mode=1 | 主备模式 active-backup |
mode=2 | 异或平衡策略 balance-xor |
mode=3 | 广播策略 broadcast |
mode=4 | 动态链路聚合 802.3ad |
mode=5 | 适配器传输负载均衡 balance-tlb |
mode=6 | 适配器适应性负载均衡 balance-alb |
三、RHEL不同版本的配置方法
在RHEL 7之前,网卡绑定常用的是bonding模块,从RHEL 7开始支持使用 team作网卡绑定,但在RHEL 7中 bonging依然可用。
四、RHEL 6使用bongding模块做网卡绑定
1.加载并验证bonding模块
#加载bonding模块
[root@work1 ~]# modprobe bonding
[root@work1 ~]# lsmod | grep bonding
bonding 152656 0
2.配置网卡
#查看新增网卡信息
[root@work1 ~]# ls /sys/class/net/
bond0 bonding_masters eth0 eth1 eth2 lo
#配置bond网卡
[root@work1 network-scripts]# cat ifcfg-bond0
TYPE=Bond
BOOTPROTO=none
DEVICE=bond0
NAME=bond0
ONBOOT=yes
USERCTL=no #不允许普通用户操作
IPADDR=10.0.0.110
NETMASK=255.255.255.0
GATEWAY=10.0.0.254
DNS1=223.5.5.5
#配置新增的两个网卡
[root@work1 network-scripts]# cat ifcfg-eth1
TYPE=Ethernet
BOOTPROTO=none
NAME=eth1
DEVICE=eth1
ONBOOT=yes
DEFROUTE=yes #自动配置默认路由
IPV4_FAILURE_FATAL=no #如果ipv4配置失败禁用设备
MASTER=bond0 #指定主网卡的名称
SLAVE=yes #开启从属网卡
[root@work1 network-scripts]# cat ifcfg-eth2
TYPE=Ethernet
BOOTPROTO=none
NAME=eth2
DEVICE=eth2
ONBOOT=yes
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
MASTER=bond0
SLAVE=yes
3.修改modprobe相关设定文件,并加载bonding模块
#使用模式1,加载bonding模块,对外虚拟网络接口设备为bond0
[root@work1 ~]# cat /etc/modprobe.d/bonding.conf
alias bond0 bonding
options bonding mode=1 miimon=200 # miimon=200,每200毫秒检测一下链路连接状态
4.重启network,检查bond情况
网络重启成功,绑定正常
[root@work1 parted]# systemctl restart network
[root@work1 ~]# cat /proc/net/bonding/bond0
Ethernet Channel Bonding Driver: v3.7.1 (April 27, 2011)
Bonding Mode: load balancing (round-robin)
MII Status: up
MII Polling Interval (ms): 0
Up Delay (ms): 0
Down Delay (ms): 0
Slave Interface: eth1
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr: 00:0c:29:a1:35:3c
Slave queue ID: 0
Slave Interface: eth2
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr: 00:0c:29:a1:35:46
Slave queue ID: 0
5.测试
下掉eth2网卡,远程机器依然能ping通
[root@work1 ~]# ifdown eth2
[root@work1 ~]# cat /proc/net/bonding/bond0
Ethernet Channel Bonding Driver: v3.7.1 (April 27, 2011)
Bonding Mode: load balancing (round-robin)
MII Status: up
MII Polling Interval (ms): 0
Up Delay (ms): 0
Down Delay (ms): 0
Slave Interface: eth1
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr: 00:0c:29:a1:35:3c
Slave queue ID: 0
[root@node1 ~]# ping 10.0.0.110
PING 10.0.0.110 (10.0.0.110) 56(84) bytes of data.
64 bytes from 10.0.0.110: icmp_seq=1 ttl=64 time=0.990 ms
64 bytes from 10.0.0.110: icmp_seq=2 ttl=64 time=0.697 ms
64 bytes from 10.0.0.110: icmp_seq=3 ttl=64 time=0.664 ms
五、RHEL 7使用bongding模块做网卡绑定
可以使用和RHEL 6相似的方式,直接修改ifcfg配置文件做bonding,也可以使用NetworkManager工具配置bonding。RHEL 7开始,bonding已弃用了 /etc/modprobe.d/bonding.conf 和 /etc/modprobe.conf 配置bonding。
加载并验证bonding模块
[root@node1 ~]# modprobe bonding
[root@node1 ~]# lsmod | grep bonding
bonding 152656 0
方式一:修改网卡配置文件绑定网卡
1.禁用NetworkManager服务
#防止干扰或重置网络配置
[root@node1 ~]# systemctl stop NetworkManager
[root@node1 ~]# systemctl disable NetworkManager
2.配置网卡
#配置bond网卡
[root@work1 network-scripts]# cat ifcfg-bond0
TYPE=Bond
BOOTPROTO=none
DEVICE=bond0
NAME=bond0
ONBOOT=yes
USERCTL=no
IPADDR=10.0.0.110
NETMASK=255.255.255.0
GATEWAY=10.0.0.254
DNS1=223.5.5.5
BONDING_OPTS="miimon=100 mode=1"
#配置新增的两个网卡
[root@work1 network-scripts]# cat ifcfg-eth1
TYPE=Ethernet
BOOTPROTO=none
NAME=eth1
DEVICE=eth1
ONBOOT=yes
DEFROUTE=yes #自动配置默认路由
IPV4_FAILURE_FATAL=no #如果ipv4配置失败禁用设备
MASTER=bond0 #指定主网卡的名称
SLAVE=yes #开启从属网卡
[root@work1 network-scripts]# cat ifcfg-eth2
TYPE=Ethernet
BOOTPROTO=none
NAME=eth2
DEVICE=eth2
ONBOOT=yes
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
MASTER=bond0
SLAVE=yes
方式二:使用网络管理工具 nmcli 绑定网卡
1.启用NetworkManager服务
[root@node1 ~]# systemctl start NetworkManager
[root@node1 ~]# systemctl enable NetworkManager
2.配置网卡
#创建绑定网卡bond0,并设置ip地址
[root@node1 ~]# nmcli conn add type bond ifname bond0 con-name bond0 miimon 100 mode active-backup primary eth0 ipv4.method manual ipv4.addresses 10.0.0.102 ipv4.gateway 10.0.0.254
#创建bond0的子网卡
[root@node1 ~]# nmcli conn add type bond-slave ifname eth0 con-name eth0 master bond0
[root@node1 ~]# nmcli conn add type bond-slave ifname eth1 con-name eth1 master bond0
#激活网卡
[root@node1 ~]# nmcli conn up eth0
[root@node1 ~]# nmcli conn up eth1
[root@node1 ~]# nmcli conn up bond0
#查看网卡连接状态
[root@node1 ~]# nmcli conn show
NAME UUID TYPE DEVICE
bond0 918d4c0c-821a-4f09-bff3-9e3117492cbc bond bond0
eth0 9b144e8b-8bb2-4f12-b3e2-dfeec4f735ec ethernet eth0
eth1 17f4cdfe-8c60-47a6-9248-8a499929bff9 ethernet eth1
3.查看bond0网卡状态
[root@node1 ~]# cat /proc/net/bonding/bond0
Ethernet Channel Bonding Driver: v3.7.1 (April 27, 2011)
Bonding Mode: fault-tolerance (active-backup)
Primary Slave: eth0 (primary_reselect always)
Currently Active Slave: eth0
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
Slave Interface: eth0
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 2
Permanent HW addr: 00:0c:29:9a:fa:da
Slave queue ID: 0
Slave Interface: eth1
MII Status: up
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 2
Permanent HW addr: 00:0c:29:9a:fa:e4
Slave queue ID: 0
方式三:通过视图工具 nmtui 绑定网卡
[root@node1 ~]# nmtui
六、RHEL 7使用网络成组 team 做网卡绑定
1.安装
[root@node1 ~]# yum install -y teamd
2.网卡配置
#创建team网卡,之后可以使用 “ nmcli conn modify cteam0 ” 做配置修改
[root@node1 ~]# nmcli conn add type team ifname iteam0 con-name cteam0 ipv4.addresses "10.0.0.120/24" ipv4.method manual ipv4.gateway "10.0.0.254" config '{"runner":{"name":"activebackup"}}'
#创建cteam0网卡的子网卡
[root@node1 ~]# nmcli conn add type team-slave ifname eth2 con-name cteam-port1 master cteam0
[root@node1 ~]# nmcli conn add type team-slave ifname eth3 con-name cteam-port2 master cteam0
#激活网卡
[root@node1 ~]# nmcli conn up cteam-port1
[root@node1 ~]# nmcli conn up cteam-port2
[root@node1 ~]# nmcli conn up cteam0
#查看网卡连接状态
[root@node1 ~]# nmcli conn show
NAME UUID TYPE DEVICE
cteam0 8c8e5a6e-34ff-46a8-a545-04d3e7aa1302 team iteam0
cteam-port1 b1bdb127-6abf-4868-9919-da89aedb3460 ethernet eth2
cteam-port2 8b546d75-9b59-485f-94f2-2714e4ab9897 ethernet eth3
3.查看网卡状态
[root@node1 ~]# teamdctl iteam0 state view
setup:
runner: activebackup
ports:
eth2
link watches:
link summary: up
instance[link_watch_0]:
name: ethtool
link: up
down count: 4
eth3
link watches:
link summary: up
instance[link_watch_0]:
name: ethtool
link: up
down count: 4
runner:
active port: eth2