Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

BFD Tx, Rx interval support for Vnet Monitored routes. #3335

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 65 additions & 3 deletions orchagent/vnetorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1080,6 +1080,7 @@ bool VNetRouteOrch::doRouteTask<VNetVrfObject>(const string& vnet, IpPrefix& ipP
NextHopGroupKey& nexthops, string& op, string& profile,
const string& monitoring, NextHopGroupKey& nexthops_secondary,
const IpPrefix& adv_prefix,
bool check_directly_connected,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This check will be added later, right?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes

const map<NextHopKey, IpAddress>& monitors)
{
SWSS_LOG_ENTER();
Expand Down Expand Up @@ -1829,7 +1830,8 @@ void VNetRouteOrch::delRoute(const IpPrefix& ipPrefix)
syncd_routes_.erase(route_itr);
}

void VNetRouteOrch::createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr)
void VNetRouteOrch::createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr,
bool has_monitor_interval, u_int64_t tx_monitor_interval, u_int64_t rx_monitor_interval)
{
SWSS_LOG_ENTER();

Expand Down Expand Up @@ -1857,6 +1859,13 @@ void VNetRouteOrch::createBfdSession(const string& vnet, const NextHopKey& endpo
// when the device goes into TSA. The following parameter ensures that these session are
// brought down while transitioning to TSA and brought back up when transitioning to TSB.
data.emplace_back("shutdown_bfd_during_tsa", "true");
if (has_monitor_interval)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add a note that without tx/rx interval in the DB, BFD session will use the default timers BFD_SESSION_DEFAULT_TX_INTERVAL and BFD_SESSION_DEFAULT_RX_INTERVAL

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ill add it.

{
FieldValueTuple fvTuple1("tx_interval", to_string(tx_monitor_interval));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please include sonic-mgmt tests with Rx/Tx timer to check if these are supported by vendor SAI for BFD hardware offload.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ill add those tests after I have made the changes for directly connected nexthops.

data.push_back(fvTuple1);
FieldValueTuple fvTuple2("rx_interval", to_string(rx_monitor_interval));
data.push_back(fvTuple2);
}
bfd_session_producer_.set(key, data);
bfd_sessions_[monitor_addr].bfd_state = SAI_BFD_SESSION_STATE_DOWN;
}
Expand Down Expand Up @@ -1963,7 +1972,19 @@ void VNetRouteOrch::removeMonitoringSession(const string& vnet, const NextHopKey
void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map<NextHopKey, IpAddress>& monitors, NextHopGroupKey& nexthops, const string& monitoring, IpPrefix& ipPrefix)
{
SWSS_LOG_ENTER();
bool has_monitor_internvals = false;
uint64_t tx_monitor_interval = 0;
uint64_t rx_monitor_interval = 0;

if (monitoring != "custom")
{
if (prefix_to_monitor_intervals_.find(ipPrefix) != prefix_to_monitor_intervals_.end())
{
has_monitor_internvals = true;
tx_monitor_interval = prefix_to_monitor_intervals_[ipPrefix].tx_monitor_timer;
rx_monitor_interval = prefix_to_monitor_intervals_[ipPrefix].rx_monitor_timer;
}
}
for (auto monitor : monitors)
{
NextHopKey nh = monitor.first;
Expand All @@ -1989,7 +2010,7 @@ void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map<NextHopKey,
{
if (nexthop_info_[vnet].find(nh.ip_address) == nexthop_info_[vnet].end())
{
createBfdSession(vnet, nh, monitor_ip);
createBfdSession(vnet, nh, monitor_ip, has_monitor_internvals, tx_monitor_interval, rx_monitor_interval);
}
nexthop_info_[vnet][nh.ip_address].ref_count++;
}
Expand Down Expand Up @@ -2620,6 +2641,10 @@ bool VNetRouteOrch::handleTunnel(const Request& request)
swss::IpPrefix adv_prefix;
bool has_priority_ep = false;
bool has_adv_pfx = false;
bool has_monitor_intervals = false;
uint64_t rx_monitor_timer = 0;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

these must be initialized to defaults instead of 0

uint64_t tx_monitor_timer = 0;
bool check_directly_connected = false;
for (const auto& name: request.getAttrFieldNames())
{
if (name == "endpoint")
Expand Down Expand Up @@ -2657,6 +2682,20 @@ bool VNetRouteOrch::handleTunnel(const Request& request)
adv_prefix = request.getAttrIpPrefix(name);
has_adv_pfx = true;
}
else if (name == "rx_monitor_timer")
{
rx_monitor_timer = request.getAttrUint(name);
has_monitor_intervals = true;
}
else if (name == "tx_monitor_timer")
{
tx_monitor_timer = request.getAttrUint(name);
has_monitor_intervals = true;
}
else if (name == "check_directly_connected")
{
check_directly_connected = request.getAttrBool(name);
}
else
{
SWSS_LOG_INFO("Unknown attribute: %s", name.c_str());
Expand Down Expand Up @@ -2746,9 +2785,32 @@ bool VNetRouteOrch::handleTunnel(const Request& request)
{
adv_prefix = ip_pfx;
}
if (has_monitor_intervals)
{
struct VnetRouteMonitorIntervals intervals;
intervals.rx_monitor_timer = rx_monitor_timer;
intervals.tx_monitor_timer = tx_monitor_timer;
prefix_to_monitor_intervals_[ip_pfx] = intervals;
}
if ( op == DEL_COMMAND)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

extra space

{
if (prefix_to_monitor_intervals_.find(ip_pfx) != prefix_to_monitor_intervals_.end())
{
prefix_to_monitor_intervals_.erase(ip_pfx);
}
}
if (vnet_orch_->isVnetExecVrf())
{
return doRouteTask<VNetVrfObject>(vnet_name, ip_pfx, (has_priority_ep == true) ? nhg_primary : nhg, op, profile, monitoring, nhg_secondary, adv_prefix, monitors);
return doRouteTask<VNetVrfObject>(vnet_name,
ip_pfx,
(has_priority_ep == true) ? nhg_primary : nhg,
op,
profile,
monitoring,
nhg_secondary,
adv_prefix,
check_directly_connected,
monitors);
}

return true;
Expand Down
36 changes: 25 additions & 11 deletions orchagent/vnetorch.h
Original file line number Diff line number Diff line change
Expand Up @@ -298,16 +298,20 @@ class VNetOrch : public Orch2
const request_description_t vnet_route_description = {
{ REQ_T_STRING, REQ_T_IP_PREFIX },
{
{ "endpoint", REQ_T_IP_LIST },
{ "ifname", REQ_T_STRING },
{ "nexthop", REQ_T_STRING },
{ "vni", REQ_T_STRING },
{ "mac_address", REQ_T_STRING },
{ "endpoint_monitor", REQ_T_IP_LIST },
{ "profile", REQ_T_STRING },
{ "primary", REQ_T_IP_LIST },
{ "monitoring", REQ_T_STRING },
{ "adv_prefix", REQ_T_IP_PREFIX },
{ "endpoint", REQ_T_IP_LIST },
{ "ifname", REQ_T_STRING },
{ "nexthop", REQ_T_STRING },
{ "vni", REQ_T_STRING },
{ "mac_address", REQ_T_STRING },
{ "endpoint_monitor", REQ_T_IP_LIST },
{ "profile", REQ_T_STRING },
{ "primary", REQ_T_IP_LIST },
{ "monitoring", REQ_T_STRING },
{ "adv_prefix", REQ_T_IP_PREFIX },
{ "rx_monitor_timer", REQ_T_UINT },
{ "tx_monitor_timer", REQ_T_UINT },
{ "check_directly_connected", REQ_T_BOOL },

},
{ }
};
Expand Down Expand Up @@ -404,6 +408,13 @@ struct VNetTunnelRouteEntry
NextHopGroupKey secondary;
};

struct VnetRouteMonitorIntervals
{
// The interval in milliseconds at which the BFD session should be monitored
uint64_t rx_monitor_timer;
uint64_t tx_monitor_timer;
};

typedef std::map<NextHopGroupKey, NextHopGroupInfo> VNetNextHopGroupInfoTable;
typedef std::map<IpPrefix, VNetTunnelRouteEntry> VNetTunnelRouteTable;
typedef std::map<IpAddress, BfdSessionInfo> BfdSessionTable;
Expand Down Expand Up @@ -448,7 +459,8 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer
VNetVrfObject *vrf_obj, NextHopGroupKey&,
const std::map<NextHopKey,IpAddress>& monitors=std::map<NextHopKey, IpAddress>());

void createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr);
void createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr,
bool has_monitor_interval, u_int64_t tx_monitor_interval, u_int64_t rx_monitor_interval);
void removeBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr);
void createMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, IpPrefix& ipPrefix);
void removeMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, IpPrefix& ipPrefix);
Expand All @@ -469,6 +481,7 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer
template<typename T>
bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile,
const string& monitoring, NextHopGroupKey& nexthops_secondary, const IpPrefix& adv_prefix,
bool check_directly_connected,
const std::map<NextHopKey, IpAddress>& monitors=std::map<NextHopKey, IpAddress>());

template<typename T>
Expand All @@ -487,6 +500,7 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer
std::map<std::string, VNetEndpointInfoTable> nexthop_info_;
std::map<IpPrefix, IpPrefix> prefix_to_adv_prefix_;
std::map<IpPrefix, int> adv_prefix_refcount_;
std::map<IpPrefix, VnetRouteMonitorIntervals> prefix_to_monitor_intervals_;
std::set<IpPrefix> subnet_decap_terms_created_;
ProducerStateTable bfd_session_producer_;
ProducerStateTable app_tunnel_decap_term_producer_;
Expand Down
163 changes: 159 additions & 4 deletions tests/test_vnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,17 +141,30 @@ def delete_vnet_local_routes(dvs, prefix, vnet_name):
time.sleep(2)


def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""):
set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor, profile=profile, primary=primary, monitoring=monitoring, adv_prefix=adv_prefix)
def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="",
primary="", monitoring="", adv_prefix="", rx_timer="", tx_timer="", check_directly_connected=False):

set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor,
profile=profile, primary=primary, monitoring=monitoring, adv_prefix=adv_prefix,
rx_timer=rx_timer, tx_timer=tx_timer, check_directly_connected=check_directly_connected)

def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""):

def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="",
monitoring="", adv_prefix="", rx_timer="", tx_timer="",
check_directly_connected=False):
conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0)

attrs = [
("endpoint", endpoint),
]

if rx_timer:
attrs.append(('rx_monitor_timer', rx_timer))
if tx_timer:
attrs.append(('tx_monitor_timer', tx_timer))
if check_directly_connected:
attrs.append(('check_directly_connected', 'true'))

if vni:
attrs.append(('vni', vni))

Expand Down Expand Up @@ -480,6 +493,21 @@ def get_bfd_session_id(dvs, addr):

return None

def get_bfd_tx_rx_interval(dvs, addr):
asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)
tbl = swsscommon.Table(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION")
entries = set(tbl.getKeys())
for entry in entries:
status, fvs = tbl.get(entry)
fvs = dict(fvs)
assert status, "Got an error when get a key"
if fvs["SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS"] == addr and fvs["SAI_BFD_SESSION_ATTR_MULTIHOP"] == "true":
rx_interval = fvs["SAI_BFD_SESSION_ATTR_MIN_RX"]
tx_interval = fvs["SAI_BFD_SESSION_ATTR_MIN_TX"]
return (tx_interval, rx_interval)

return None, None


def check_del_bfd_session(dvs, addrs):
for addr in addrs:
Expand Down Expand Up @@ -3687,7 +3715,6 @@ def test_vnet_orch_26(self, dvs, setup_subnet_decap):
"src_ip_v6": "20c1:ba8::/64"
}
setup_subnet_decap(subnet_decap_config)

vnet_obj = self.get_vnet_obj()
vnet_obj.fetch_exist_entries(dvs)

Expand Down Expand Up @@ -3848,6 +3875,134 @@ def test_vnet_orch_27(self, dvs, setup_subnet_decap):
delete_subnet_decap_tunnel(dvs, "IPINIP_SUBNET_V6")
vnet_obj.check_del_ipinip_tunnel(dvs, "IPINIP_SUBNET_V6")

'''
Test 28 - Test for monitor Tx and Rx interval parameters.
'''
def test_vnet_orch_28(self, dvs, testlog):
vnet_obj = self.get_vnet_obj()
tunnel_name = 'tunnel_28'
vnet_name = 'Vnet28'

vnet_obj.fetch_exist_entries(dvs)

create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9')
create_vnet_entry(dvs, vnet_name, tunnel_name, '10028', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66")

vnet_obj.check_vnet_entry(dvs, vnet_name)
vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10028')

vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9')

vnet_obj.fetch_exist_entries(dvs)
tx_timer = '1234'
rx_timer = '5678'
create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2', ep_monitor='9.1.0.1,9.1.0.2',
profile="Test_profile", adv_prefix='100.100.1.0/24',tx_timer=tx_timer, rx_timer=rx_timer)

# default monitor session status is down, route should not be programmed in this status
vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"])
check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", [])

# check the bfd session is created and the interval is set
tx_val, rx_val = get_bfd_tx_rx_interval(dvs, '9.1.0.1')
assert tx_val == (tx_timer+'000')
assert rx_val == (rx_timer+'000')

tx_val, rx_val = get_bfd_tx_rx_interval(dvs, '9.1.0.2')
assert tx_val == (tx_timer+'000')
assert rx_val == (rx_timer+'000')

# change the values of tx and rx timers in a route
tx_timer = '2345'
rx_timer = '6789'
set_vnet_routes(dvs,"100.100.1.1/32", vnet_name,'9.1.0.1,9.1.0.2,9.1.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3',
profile="Test_profile", adv_prefix='100.100.1.0/24',tx_timer=tx_timer, rx_timer=rx_timer)

# check the bfd session is created for new nexthop with updated values
tx_val, rx_val = get_bfd_tx_rx_interval(dvs, '9.1.0.3')
assert tx_val == (tx_timer+'000')
assert rx_val == (rx_timer+'000')

# Remove tunnel route 1
delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name)

#ensure bfd session is removed
tx_val, rx_val = get_bfd_tx_rx_interval(dvs, '9.1.0.1')
assert tx_val == None and rx_val == None

tx_val, rx_val = get_bfd_tx_rx_interval(dvs, '9.1.0.2')
assert tx_val ==None and rx_val == None

tx_val, rx_val = get_bfd_tx_rx_interval(dvs, '9.1.0.3')
assert tx_val ==None and rx_val == None


vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"])
check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32")

create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3',
ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3', profile="test_profile",
tx_timer=tx_timer, rx_timer=rx_timer,check_directly_connected=True)

# default bfd status is down, route should not be programmed in this status
vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["fd:10:10::1/128"])
check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", [])
check_remove_routes_advertisement(dvs, "fd:10:10::1/128")

# check the bfd session is created and the interval is set
tx_val, rx_val = get_bfd_tx_rx_interval(dvs, 'fd:10:2::1')
assert tx_val == (tx_timer+'000')
assert rx_val == (rx_timer+'000')

tx_val, rx_val = get_bfd_tx_rx_interval(dvs, 'fd:10:2::2')
assert tx_val == (tx_timer+'000')
assert rx_val == (rx_timer+'000')

tx_val, rx_val = get_bfd_tx_rx_interval(dvs, 'fd:10:2::3')
assert tx_val == (tx_timer+'000')
assert rx_val == (rx_timer+'000')

tx_timer = '5432'
rx_timer = '9876'
set_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4',
ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4', profile="test_profile",
tx_timer=tx_timer, rx_timer=rx_timer,check_directly_connected=False)

# check the bfd session is created for new nexthop with updated values
tx_val, rx_val = get_bfd_tx_rx_interval(dvs, 'fd:10:2::4')
assert tx_val == (tx_timer+'000')
assert rx_val == (rx_timer+'000')

#remove first and re-add the nexthop to change its BFD interval values
set_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::2,fd:10:1::3,fd:10:1::4',
ep_monitor='fd:10:2::2,fd:10:2::3,fd:10:1::4', profile="test_profile",
tx_timer=tx_timer, rx_timer=rx_timer,check_directly_connected=False)

set_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4',
ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:1::4', profile="test_profile",
tx_timer=tx_timer, rx_timer=rx_timer,check_directly_connected=False)

# check the bfd session is created for new nexthop with updated values
tx_val, rx_val = get_bfd_tx_rx_interval(dvs, 'fd:10:2::1')
assert tx_val == (tx_timer+'000')
assert rx_val == (rx_timer+'000')

# Remove tunnel route 2
delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name)
#ensure bfd session is removed
tx_val, rx_val = get_bfd_tx_rx_interval(dvs, 'fd:10:2::1')
assert tx_val == None and rx_val == None

tx_val, rx_val = get_bfd_tx_rx_interval(dvs, 'fd:10:2::2')
assert tx_val ==None and rx_val == None

tx_val, rx_val = get_bfd_tx_rx_interval(dvs, 'fd:10:2::3')
assert tx_val ==None and rx_val == None

delete_vnet_entry(dvs, vnet_name)
vnet_obj.check_del_vnet_entry(dvs, vnet_name)
delete_vxlan_tunnel(dvs, tunnel_name)

# Add Dummy always-pass test at end as workaroud
# for issue when Flaky fail on final test it invokes module tear-down before retrying
def test_nonflaky_dummy():
Expand Down
Loading