Thursday, December 11, 2014

Linux NIC Teaming or NIC Bonding

Linux NIC Teaming or NIC Bonding
*******************************

[root@node1 ~]# cat /etc/modprobe.d/modprobe.conf
alias eth0 e1000
alias eth1 e1000
alias eth2 e1000
alias eth3 e1000
alias bond0 bonding
options bond0 max_bonds=2 miimon=100 mode=1
alias bond1 bonding
options bond1 max_bonds=2 miimon=100 mode=1
[root@node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-bond0
DEVICE=bond0
IPADDR=192.168.1.11
NETMASK=255.255.255.0
BOOTPROTO=static
ONBOOT=yes
USERCTL=no
IPV6INIT=no
[root@node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-bond1
DEVICE=bond1
IPADDR=172.24.1.11
NETMASK=255.255.0.0
BOOTPROTO=static
ONBOOT=yes
USERCTL=no
IPV6INIT=no
[root@node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth0
BOOTPROTO=static
HWADDR=08:00:27:EA:1C:EC
MASTER=bond0
SLAVE=yes
ONBOOT=yes
USERCTL=no
[root@node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth1
BOOTPROTO=static
HWADDR=08:00:27:12:A1:28
MASTER=bond1
SLAVE=yes
ONBOOT=yes
USERCTL=no
[root@node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth2
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth2
BOOTPROTO=static
HWADDR=08:00:27:32:CD:DE
MASTER=bond0
SLAVE=yes
ONBOOT=yes
USERCTL=no
[root@node1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth3
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth3
BOOTPROTO=static
HWADDR=08:00:27:EE:C4:38
MASTER=bond1
SLAVE=yes
ONBOOT=yes
USERCTL=no
[root@node1 ~]# mii-tool
eth0: no autonegotiation, 100baseTx-FD, link ok
eth1: no autonegotiation, 100baseTx-FD, link ok
eth2: no autonegotiation, 100baseTx-FD, link ok
eth3: no autonegotiation, 100baseTx-FD, link ok
[root@node1 ~]# ifconfig -a
bond0     Link encap:Ethernet  HWaddr 08:00:27:EA:1C:EC
          inet addr:192.168.1.11  Bcast:192.168.1.255  Mask:255.255.255.0
          inet6 addr: fe80::a00:27ff:feea:1cec/64 Scope:Link
          UP BROADCAST RUNNING MASTER MULTICAST  MTU:1500  Metric:1
          RX packets:2473 errors:0 dropped:0 overruns:0 frame:0
          TX packets:1645 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:236857 (231.3 KiB)  TX bytes:219985 (214.8 KiB)

bond1     Link encap:Ethernet  HWaddr 08:00:27:12:A1:28
          inet addr:172.24.1.11  Bcast:172.24.255.255  Mask:255.255.0.0
          inet6 addr: fe80::a00:27ff:fe12:a128/64 Scope:Link
          UP BROADCAST RUNNING MASTER MULTICAST  MTU:1500  Metric:1
          RX packets:581 errors:0 dropped:0 overruns:0 frame:0
          TX packets:53 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:60072 (58.6 KiB)  TX bytes:7215 (7.0 KiB)

eth0      Link encap:Ethernet  HWaddr 08:00:27:EA:1C:EC
          UP BROADCAST RUNNING SLAVE MULTICAST  MTU:1500  Metric:1
          RX packets:2243 errors:0 dropped:0 overruns:0 frame:0
          TX packets:1663 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:215657 (210.6 KiB)  TX bytes:222773 (217.5 KiB)

eth1      Link encap:Ethernet  HWaddr 08:00:27:12:A1:28
          UP BROADCAST RUNNING SLAVE MULTICAST  MTU:1500  Metric:1
          RX packets:346 errors:0 dropped:0 overruns:0 frame:0
          TX packets:53 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:38572 (37.6 KiB)  TX bytes:7215 (7.0 KiB)

eth2      Link encap:Ethernet  HWaddr 08:00:27:EA:1C:EC
          UP BROADCAST RUNNING SLAVE MULTICAST  MTU:1500  Metric:1
          RX packets:235 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:21500 (20.9 KiB)  TX bytes:0 (0.0 b)

eth3      Link encap:Ethernet  HWaddr 08:00:27:12:A1:28
          UP BROADCAST RUNNING SLAVE MULTICAST  MTU:1500  Metric:1
          RX packets:235 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:21500 (20.9 KiB)  TX bytes:0 (0.0 b)

lo        Link encap:Local Loopback
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:16436  Metric:1
          RX packets:1770 errors:0 dropped:0 overruns:0 frame:0
          TX packets:1770 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:2020795 (1.9 MiB)  TX bytes:2020795 (1.9 MiB)

sit0      Link encap:IPv6-in-IPv4
          NOARP  MTU:1480  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:0 (0.0 b)  TX bytes:0 (0.0 b)

[root@node1 ~]#modprobe bonding
[root@node1 ~]#service network restart
[root@node1 ~]# cat /proc/net/bonding/bond0
Ethernet Channel Bonding Driver: v3.4.0 (October 7, 2008)

Bonding Mode: fault-tolerance (active-backup)
Primary Slave: None
Currently Active Slave: eth0
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0

Slave Interface: eth0
MII Status: up
Link Failure Count: 0
Permanent HW addr: 08:00:27:ea:1c:ec

Slave Interface: eth2
MII Status: up
Link Failure Count: 0
Permanent HW addr: 08:00:27:32:cd:de
[root@node1 ~]# cat /proc/net/bonding/bond1
Ethernet Channel Bonding Driver: v3.4.0 (October 7, 2008)

Bonding Mode: fault-tolerance (active-backup)
Primary Slave: None
Currently Active Slave: eth1
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0

Slave Interface: eth1
MII Status: up
Link Failure Count: 0
Permanent HW addr: 08:00:27:12:a1:28

Slave Interface: eth3
MII Status: up
Link Failure Count: 0
Permanent HW addr: 08:00:27:ee:c4:38
[root@node1 ~]#



add following lines to file
#vim /etc/sysctl.conf
# Gigabit tuning
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
# net.core.wmem_max = 8388608
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 2096 65535 16777216

net.ipv4.tcp_mem = 98304 131072 196608
net.core.netdev_max_backlog = 250000
net.ipv4.tcp_timestamps = 1
net.ipv4.ip_local_port_range = 1025 61000

# VM pressure fixes
vm.swappiness = 100
vm.inactive_clean_percent = 100

vm.pagecache = 200 10 20
vm.dirty_ratio = 10
vm.dirty_background_ratio = 5


# Security tweaks
net.ipv4.tcp_synack_retries = 3
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_max_syn_backlog = 10240
net.ipv4.tcp_fin_timeout = 30

net.ipv4.tcp_keepalive_time = 1200


What is bonding?
Bonding is the same as port trunking. In the following I will use the word bonding because practically we will bond interfaces as one.

But still...what is bonding?
Bonding allows you to aggregate multiple ports into a single group, effectively combining the bandwidth into a single connection. Bonding also allows you to create multi-gigabit pipes to transport traffic through the highest traffic areas of your network. For example, you can aggregate three megabits ports (1 mb each) into a three-megabits trunk port. That is equivalent with having one interface with three megabits speed.

Where should I use bonding?
You can use it wherever you need redundant links, fault tolerance or load balancing networks. It is the best way to have a high availability network segment. A very useful way to use bonding is to use it in connection with 802.1q VLAN support (your network equipment must have 802.1q protocol implemented).

The best documentation is on the Linux Channel Bonding Project page
I strongly recommend to read it for more details.

Credits: Linux Channel Bonding Project page , Thea

This small howto will try to cover the most used bonding types. The following script (the gray area) will configure a bond interface (bond0) using two ethernet interface (eth0 and eth1). You can place it onto your on file and run it at boot time..

#!/bin/bash

modprobe bonding mode=0 miimon=100 # load bonding module

ifconfig eth0 down # putting down the eth0 interface
ifconfig eth1 down # putting down the eth1 interface

ifconfig bond0 hw ether 00:11:22:33:44:55 # changing the MAC address of the bond0 interface
ifconfig bond0 192.168.55.55 up # to set ethX interfaces as slave the bond0 must have an ip.

ifenslave bond0 eth0 # putting the eth0 interface in the slave mod for bond0
ifenslave bond0 eth1 # putting the eth1 interface in the slave mod for bond0

You can set up your bond interface according to your needs. Changing one parameters (mode=X) you can have the following bonding types:
mode=0 (balance-rr)
Round-robin policy: Transmit packets in sequential order from the first available slave through the last. This mode provides load balancing and fault tolerance.

mode=1 (active-backup)
Active-backup policy: Only one slave in the bond is active. A different slave becomes active if, and only if, the active slave fails. The bond's MAC address is externally visible on only one port (network adapter) to avoid confusing the switch. This mode provides fault tolerance. The primary option affects the behavior of this mode.

mode=2 (balance-xor)
XOR policy: Transmit based on [(source MAC address XOR'd with destination MAC address) modulo slave count]. This selects the same slave for each destination MAC address. This mode provides load balancing and fault tolerance.

mode=3 (broadcast)
Broadcast policy: transmits everything on all slave interfaces. This mode provides fault tolerance.

mode=4 (802.3ad)
IEEE 802.3ad Dynamic link aggregation. Creates aggregation groups that share the same speed and duplex settings. Utilizes all slaves in the active aggregator according to the 802.3ad specification.

Pre-requisites:
1. Ethtool support in the base drivers for retrieving
the speed and duplex of each slave.
2. A switch that supports IEEE 802.3ad Dynamic link
aggregation.
Most switches will require some type of configuration
to enable 802.3ad mode.

mode=5 (balance-tlb)
Adaptive transmit load balancing: channel bonding that does not require any special switch support. The outgoing traffic is distributed according to the current load (computed relative to the speed) on each slave. Incoming traffic is received by the current slave. If the receiving slave fails, another slave takes over the MAC address of the failed receiving slave.

Prerequisite:
Ethtool support in the base drivers for retrieving the
speed of each slave.

mode=6 (balance-alb)
Adaptive load balancing: includes balance-tlb plus receive load balancing (rlb) for IPV4 traffic, and does not require any special switch support. The receive load balancing is achieved by ARP negotiation. The bonding driver intercepts the ARP Replies sent by the local system on their way out and overwrites the source hardware address with the unique hardware address of one of the slaves in the bond such that different peers use different hardware addresses for the server.

The most used are the first four mode types...

Also you can use multiple bond interface but for that you must load the bonding module as many as you need.
Presuming that you want two bond interface you must configure the /etc/modules.conf as follow:

alias bond0 bonding
options bond0 -o bond0 mode=0 miimon=100
alias bond1 bonding
options bond1 -o bond1 mode=1 miimon=100

Notes:

* To restore your slaves MAC addresses, you need to detach them from the bond (`ifenslave -d bond0 eth0'). The bonding driver will then restore the MAC addresses that the slaves had before they were enslaved.
* The bond MAC address will be the taken from its first slave device.
* Promiscous mode: According to your bond type, when you put the bond interface in the promiscous mode it will propogates the setting to the slave devices as follow:
o for mode=0,2,3 and 4 the promiscuous mode setting is propogated to all slaves.
o for mode=1,5 and 6 the promiscuous mode setting is propogated only to the active slave.
For balance-tlb mode the active slave is the slave currently receiving inbound traffic, for balance-alb mode the active slave is the slave used as a "primary." and for the active-backup, balance-tlb and balance-alb modes, when the active slave changes (e.g., due to a link failure), the promiscuous setting will be propogated to the new active slave.

No comments:

Post a Comment

TCP/UDP Protocols/Prots & important port numbers in Linux OS

TCP/UDP Protocols/Ports are ranging from 0-65535 so total we have 65536 ports & because of the limitation in TCP/IP stack where the por...