Add modular fib lookup framework.

This change introduces framework that allows to dynamically
 attach or detach longest prefix match (lpm) lookup algorithms
 to speed up datapath route tables lookups.

Framework takes care of handling initial synchronisation,
 route subscription, nhop/nhop groups reference and indexing,
 dataplane attachments and fib instance algorithm setup/teardown.
Framework features automatic algorithm selection, allowing for
 picking the best matching algorithm on-the-fly based on the
 amount of routes in the routing table.

Currently framework code is guarded under FIB_ALGO config option.
An idea is to enable it by default in the next couple of weeks.

The following algorithms are provided by default:
IPv4:
* bsearch4 (lockless binary search in a special IP array), tailored for
  small-fib (<16 routes)
* radix4_lockless (lockless immutable radix, re-created on every rtable change),
  tailored for small-fib (<1000 routes)
* radix4 (base system radix backend)
* dpdk_lpm4 (DPDK DIR24-8-based lookups), lockless datastrucure, optimized
  for large-fib (D27412)
IPv6:
* radix6_lockless (lockless immutable radix, re-created on every rtable change),
  tailed for small-fib (<1000 routes)
* radix6 (base system radix backend)
* dpdk_lpm6 (DPDK DIR24-8-based lookups), lockless datastrucure, optimized
  for large-fib (D27412)

Performance changes:
Micro benchmarks (I7-7660U, single-core lookups, 2048k dst, code in D27604):
IPv4:
8 routes:
  radix4: ~20mpps
  radix4_lockless: ~24.8mpps
  bsearch4: ~69mpps
  dpdk_lpm4: ~67 mpps
700k routes:
  radix4_lockless: 3.3mpps
  dpdk_lpm4: 46mpps

IPv6:
8 routes:
  radix6_lockless: ~20mpps
  dpdk_lpm6: ~70mpps
100k routes:
  radix6_lockless: 13.9mpps
  dpdk_lpm6: 57mpps

Forwarding benchmarks:
+ 10-15% IPv4 forwarding performance (small-fib, bsearch4)
+ 25% IPv4 forwarding performance (full-view, dpdk_lpm4)
+ 20% IPv6 forwarding performance (full-view, dpdk_lpm6)

Control:
Framwork adds the following runtime sysctls:

List algos
* net.route.algo.inet.algo_list: bsearch4, radix4_lockless, radix4
* net.route.algo.inet6.algo_list: radix6_lockless, radix6, dpdk_lpm6
Debug level (7=LOG_DEBUG, per-route)
net.route.algo.debug_level: 5
Algo selection (currently only for fib 0):
net.route.algo.inet.algo: bsearch4
net.route.algo.inet6.algo: radix6_lockless

Support for manually changing algos in non-default fib will be added
soon. Some sysctl names will be changed in the near future.

Differential Revision: https://reviews.freebsd.org/D27401
This commit is contained in:
Alexander V. Chernikov 2020-12-25 10:39:52 +00:00
parent 760dbe84ab
commit f5baf8bb12
12 changed files with 2962 additions and 6 deletions

View file

@ -4178,6 +4178,7 @@ net/route/nhgrp_ctl.c optional route_mpath
net/route/nhop.c standard
net/route/nhop_ctl.c standard
net/route/nhop_utils.c standard
net/route/fib_algo.c optional fib_algo
net/route/route_ctl.c standard
net/route/route_ddb.c optional ddb
net/route/route_helpers.c standard
@ -4329,6 +4330,7 @@ netinet/in_debug.c optional inet ddb
netinet/in_kdtrace.c optional inet | inet6
netinet/ip_carp.c optional inet carp | inet6 carp
netinet/in_fib.c optional inet
netinet/in_fib_algo.c optional inet fib_algo
netinet/in_gif.c optional gif inet | netgraph_gif inet
netinet/ip_gre.c optional gre inet
netinet/ip_id.c optional inet
@ -4405,6 +4407,7 @@ netinet6/icmp6.c optional inet6
netinet6/in6.c optional inet6
netinet6/in6_cksum.c optional inet6
netinet6/in6_fib.c optional inet6
netinet6/in6_fib_algo.c optional inet6 fib_algo
netinet6/in6_gif.c optional gif inet6 | netgraph_gif inet6
netinet6/in6_ifattach.c optional inet6
netinet6/in6_jail.c optional inet6

View file

@ -454,6 +454,7 @@ PCBGROUP opt_pcbgroup.h
PF_DEFAULT_TO_DROP opt_pf.h
ROUTE_MPATH opt_route.h
ROUTETABLES opt_route.h
FIB_ALGO opt_route.h
RSS opt_rss.h
SLIP_IFF_OPTS opt_slip.h
TCPDEBUG

View file

@ -151,6 +151,14 @@ void
rt_table_destroy(struct rib_head *rh)
{
RIB_WLOCK(rh);
rh->rib_dying = true;
RIB_WUNLOCK(rh);
#ifdef FIB_ALGO
fib_destroy_rib(rh);
#endif
tmproutes_destroy(rh);
rn_walktree(&rh->rmhead.head, rt_freeentry, &rh->rmhead.head);

1608
sys/net/route/fib_algo.c Normal file

File diff suppressed because it is too large Load diff

109
sys/net/route/fib_algo.h Normal file
View file

@ -0,0 +1,109 @@
/*-
* Copyright (c) 2020
* Alexander V. Chernikov <melifaro@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
struct fib_data;
struct fib_dp;
enum flm_op_result {
FLM_SUCCESS, /* No errors, operation successful */
FLM_REBUILD, /* Operation cannot be completed, schedule algorithm rebuild */
FLM_ERROR, /* Operation failed, this algo cannot be used */
};
struct rib_rtable_info {
uint32_t num_prefixes;
uint32_t num_nhops;
uint32_t num_nhgrp;
};
struct flm_lookup_key {
union {
const struct in6_addr *addr6;
struct in_addr addr4;
};
};
typedef struct nhop_object *flm_lookup_t(void *algo_data,
const struct flm_lookup_key key, uint32_t scopeid);
typedef enum flm_op_result flm_init_t (uint32_t fibnum, struct fib_data *fd,
void *_old_data, void **new_data);
typedef void flm_destroy_t(void *data);
typedef enum flm_op_result flm_dump_t(struct rtentry *rt, void *data);
typedef enum flm_op_result flm_dump_end_t(void *data, struct fib_dp *dp);
typedef enum flm_op_result flm_change_t(struct rib_head *rnh,
struct rib_cmd_info *rc, void *data);
typedef uint8_t flm_get_pref_t(const struct rib_rtable_info *rinfo);
struct fib_lookup_module {
char *flm_name; /* algo name */
int flm_family; /* address family this module supports */
int flm_refcount; /* # of references */
uint32_t flm_flags; /* flags */
uint8_t flm_index; /* internal algo index */
flm_init_t *flm_init_cb; /* instance init */
flm_destroy_t *flm_destroy_cb; /* destroy instance */
flm_change_t *flm_change_rib_item_cb;/* routing table change hook */
flm_dump_t *flm_dump_rib_item_cb; /* routing table dump cb */
flm_dump_end_t *flm_dump_end_cb; /* end of dump */
flm_lookup_t *flm_lookup; /* lookup function */
flm_get_pref_t *flm_get_pref; /* get algo preference */
TAILQ_ENTRY(fib_lookup_module) entries;
};
/* Datapath lookup data */
struct fib_dp {
flm_lookup_t *f;
void *arg;
};
VNET_DECLARE(struct fib_dp *, inet_dp);
#define V_inet_dp VNET(inet_dp)
VNET_DECLARE(struct fib_dp *, inet6_dp);
#define V_inet6_dp VNET(inet6_dp)
#define FIB_PRINTF(_l, _fd, _fmt, ...) fib_printf(_l, _fd, __func__, _fmt, ##__VA_ARGS__)
void fib_printf(int level, struct fib_data *fd, const char *func, char *fmt, ...);
int fib_module_init(struct fib_lookup_module *flm, uint32_t fibnum,
int family);
int fib_module_clone(const struct fib_lookup_module *flm_orig,
struct fib_lookup_module *flm, bool waitok);
int fib_module_dumptree(struct fib_lookup_module *flm,
enum rib_subscription_type subscription_type);
int fib_module_register(struct fib_lookup_module *flm);
int fib_module_unregister(struct fib_lookup_module *flm);
uint32_t fib_get_nhop_idx(struct fib_data *fd, struct nhop_object *nh);
struct nhop_object **fib_get_nhop_array(struct fib_data *fd);
void fib_get_rtable_info(struct rib_head *rh, struct rib_rtable_info *rinfo);
struct rib_head *fib_get_rh(struct fib_data *fd);

View file

@ -171,7 +171,7 @@ static void
grow_rtables(uint32_t num_tables)
{
struct domain *dom;
struct rib_head **prnh;
struct rib_head **prnh, *rh;
struct rib_head **new_rt_tables, **old_rt_tables;
int family;
@ -188,6 +188,10 @@ grow_rtables(uint32_t num_tables)
"by default. Consider tuning %s if needed\n",
"net.add_addr_allfibs");
#ifdef FIB_ALGO
fib_grow_rtables(num_tables);
#endif
/*
* Current rt_tables layout:
* fib0[af0, af1, af2, .., AF_MAX]fib1[af0, af1, af2, .., Af_MAX]..
@ -206,10 +210,18 @@ grow_rtables(uint32_t num_tables)
prnh = &new_rt_tables[i * (AF_MAX + 1) + family];
if (*prnh != NULL)
continue;
*prnh = dom->dom_rtattach(i);
if (*prnh == NULL)
log(LOG_ERR, "unable to create routing tables for domain %d\n",
dom->dom_family);
rh = dom->dom_rtattach(i);
if (rh == NULL)
log(LOG_ERR, "unable to create routing table for %d.%d\n",
dom->dom_family, i);
#ifdef FIB_ALGO
if (fib_select_algo_initial(rh) != 0) {
log(LOG_ERR, "unable to select algo for table %d.%d\n",
dom->dom_family, i);
// TODO: detach table
}
#endif
*prnh = rh;
}
}
@ -246,7 +258,9 @@ vnet_rtables_init(const void *unused __unused)
V_rt_numfibs = 1;
vnet_rtzone_init();
#ifdef FIB_ALGO
vnet_fib_init();
#endif
RTABLES_LOCK_INIT();
RTABLES_LOCK();
@ -288,6 +302,9 @@ rtables_destroy(const void *unused __unused)
free(V_rt_tables, M_RTABLE);
vnet_rtzone_destroy();
#ifdef FIB_ALGO
vnet_fib_destroy();
#endif
}
VNET_SYSUNINIT(rtables_destroy, SI_SUB_PROTO_DOMAIN, SI_ORDER_FIRST,
rtables_destroy, 0);

View file

@ -71,6 +71,8 @@ struct rib_head {
struct callout expire_callout; /* Callout for expiring dynamic routes */
time_t next_expire; /* Next expire run ts */
uint32_t rnh_prefixes; /* Number of prefixes */
uint32_t rib_dying:1; /* rib is detaching */
uint32_t rib_algo_fixed:1;/* fixed algorithm */
struct nh_control *nh_control; /* nexthop subsystem data */
CK_STAILQ_HEAD(, rib_subscription) rnh_subscribers;/* notification subscribers */
};
@ -303,6 +305,14 @@ int nhgrp_get_addition_group(struct rib_head *rnh,
void nhgrp_ref_object(struct nhgrp_object *nhg);
uint32_t nhgrp_get_idx(const struct nhgrp_object *nhg);
void nhgrp_free(struct nhgrp_object *nhg);
uint32_t nhgrp_get_idx(const struct nhgrp_object *nhg);
/* lookup_framework.c */
void fib_grow_rtables(uint32_t new_num_tables);
int fib_select_algo_initial(struct rib_head *rh);
void fib_destroy_rib(struct rib_head *rh);
void vnet_fib_init(void);
void vnet_fib_destroy(void);
/* Entropy data used for outbound hashing */
#define MPATH_ENTROPY_KEY_LEN 40

View file

@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
#include <net/route.h>
#include <net/route/route_ctl.h>
#include <net/route/route_var.h>
#include <net/route/fib_algo.h>
#include <net/route/nhop.h>
#include <net/toeplitz.h>
#include <net/vnet.h>
@ -63,6 +64,10 @@ __FBSDID("$FreeBSD$");
/* Assert 'struct route_in' is compatible with 'struct route' */
CHK_STRUCT_ROUTE_COMPAT(struct route_in, ro_dst4);
#ifdef FIB_ALGO
VNET_DEFINE(struct fib_dp *, inet_dp);
#endif
#ifdef ROUTE_MPATH
struct _hash_5tuple_ipv4 {
struct in_addr src;
@ -103,6 +108,29 @@ fib4_calc_software_hash(struct in_addr src, struct in_addr dst,
* one needs to pass NHR_REF as a flag. This will return referenced
* nexthop.
*/
#ifdef FIB_ALGO
struct nhop_object *
fib4_lookup(uint32_t fibnum, struct in_addr dst, uint32_t scopeid,
uint32_t flags, uint32_t flowid)
{
struct nhop_object *nh;
struct fib_dp *dp = &V_inet_dp[fibnum];
struct flm_lookup_key key = {.addr4 = dst };
nh = dp->f(dp->arg, key, scopeid);
if (nh != NULL) {
nh = nhop_select(nh, flowid);
/* Ensure route & ifp is UP */
if (RT_LINK_IS_UP(nh->nh_ifp)) {
if (flags & NHR_REF)
nhop_ref_object(nh);
return (nh);
}
}
RTSTAT_INC(rts_unreach);
return (NULL);
}
#else
struct nhop_object *
fib4_lookup(uint32_t fibnum, struct in_addr dst, uint32_t scopeid,
uint32_t flags, uint32_t flowid)
@ -142,6 +170,7 @@ fib4_lookup(uint32_t fibnum, struct in_addr dst, uint32_t scopeid,
RTSTAT_INC(rts_unreach);
return (NULL);
}
#endif
inline static int
check_urpf_nhop(const struct nhop_object *nh, uint32_t flags,
@ -180,6 +209,7 @@ check_urpf(struct nhop_object *nh, uint32_t flags,
return (check_urpf_nhop(nh, flags, src_if));
}
#ifndef FIB_ALGO
static struct nhop_object *
lookup_nhop(uint32_t fibnum, struct in_addr dst, uint32_t scopeid)
{
@ -208,6 +238,7 @@ lookup_nhop(uint32_t fibnum, struct in_addr dst, uint32_t scopeid)
return (nh);
}
#endif
/*
* Performs reverse path forwarding lookup.
@ -223,8 +254,14 @@ fib4_check_urpf(uint32_t fibnum, struct in_addr dst, uint32_t scopeid,
uint32_t flags, const struct ifnet *src_if)
{
struct nhop_object *nh;
#ifdef FIB_ALGO
struct fib_dp *dp = &V_inet_dp[fibnum];
struct flm_lookup_key key = {.addr4 = dst };
nh = dp->f(dp->arg, key, scopeid);
#else
nh = lookup_nhop(fibnum, dst, scopeid);
#endif
if (nh != NULL)
return (check_urpf(nh, flags, src_if));

765
sys/netinet/in_fib_algo.c Normal file
View file

@ -0,0 +1,765 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2020 Alexander V. Chernikov
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/rmlock.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/priv.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <net/vnet.h>
#include <net/if.h>
#include <netinet/in.h>
#include <net/route.h>
#include <net/route/nhop.h>
#include <net/route/route_ctl.h>
#include <net/route/route_var.h>
#include <net/route/fib_algo.h>
/*
* Binary search lookup algo.
*
* Compiles route table into a sorted array.
* Used with small amount of routes (< 16).
* As array is immutable, it is rebuild on each rtable change.
*
* Example:
*
* 0.0.0.0/0 -> nh1
* 10.0.0.0/24 -> nh2
* 10.0.0.1/32 -> nh3
*
* gets compiled to:
*
* 0.0.0.0 -> nh1
* 10.0.0.0 -> nh2
* 10.0.0.1 -> nh3
* 10.0.0.2 -> nh2
* 10.0.1.0 -> nh1
*
*/
struct bsearch4_record {
uint32_t addr4;
uint32_t mask4;
struct nhop_object *nh;
};
struct bsearch4_data {
struct fib_data *fd;
uint32_t alloc_items;
uint32_t num_items;
void *mem;
struct bsearch4_record *rr;
struct bsearch4_record br[0];
};
/*
* Main IPv4 address lookup function.
*
* Finds array record with maximum index that is <= provided key.
* Assumes 0.0.0.0/0 always exists (may be with NULL nhop)
*/
static struct nhop_object *
bsearch4_lookup(void *algo_data, const struct flm_lookup_key key, uint32_t scopeid)
{
const struct bsearch4_data *bd = (const struct bsearch4_data *)algo_data;
const struct bsearch4_record *br;
uint32_t addr4 = ntohl(key.addr4.s_addr);
int start = 0;
int end = bd->num_items;
int i = (start + end) / 2;
while (start + 1 < end) {
i = (start + end) / 2;
br = &bd->br[i];
if (addr4 < br->addr4) {
/* key < average, reduce right boundary */
end = i;
continue;
} else if (addr4 > br->addr4) {
/* key > average, increase left aboundary */
start = i;
continue;
} else {
/* direct match */
return (br->nh);
}
}
/* start + 1 == end */
return (bd->br[start].nh);
}
/*
* Preference function.
* Assume ideal for < 10 (typical single-interface setup has 5)
* Then gradually degrade.
* Assume 30 prefixes is at least 60 records, so it will require 8 lookup,
* which is even worse than radix.
*/
static uint8_t
bsearch4_get_pref(const struct rib_rtable_info *rinfo)
{
if (rinfo->num_prefixes < 10)
return (253);
else if (rinfo->num_prefixes < 30)
return (255 - rinfo->num_prefixes * 8);
else
return (1);
}
static enum flm_op_result
bsearch4_init(uint32_t fibnum, struct fib_data *fd, void *_old_data, void **_data)
{
struct bsearch4_data *bd;
struct rib_rtable_info rinfo;
uint32_t count;
size_t sz;
void *mem;
fib_get_rtable_info(fib_get_rh(fd), &rinfo);
count = rinfo.num_prefixes * 11 / 10 + 64;
sz = sizeof(struct bsearch4_data) + sizeof(struct bsearch4_record) * count;
/* add cache line sz to ease alignment */
sz += CACHE_LINE_SIZE;
mem = malloc(sz, M_RTABLE, M_NOWAIT | M_ZERO);
if (mem == NULL)
return (FLM_REBUILD);
/* Align datapath-usable structure to cache line boundary */
bd = (struct bsearch4_data *)roundup2((uintptr_t)mem, CACHE_LINE_SIZE);
bd->mem = mem;
bd->alloc_items = count;
bd->fd = fd;
*_data = bd;
/*
* Allocate temporary array to store all rtable data.
* This step is required to provide the required prefix iteration order.
*/
bd->rr = mallocarray(count, sizeof(struct bsearch4_record), M_TEMP, M_NOWAIT | M_ZERO);
if (bd->rr == NULL)
return (FLM_REBUILD);
return (FLM_SUCCESS);
}
static void
bsearch4_destroy(void *_data)
{
struct bsearch4_data *bd = (struct bsearch4_data *)_data;
if (bd->rr != NULL)
free(bd->rr, M_TEMP);
free(bd->mem, M_RTABLE);
}
/*
* Callback storing converted rtable prefixes in the temporary array.
* Addresses are converted to a host order.
*/
static enum flm_op_result
bsearch4_add_route_cb(struct rtentry *rt, void *_data)
{
struct bsearch4_data *bd = (struct bsearch4_data *)_data;
struct bsearch4_record *rr;
struct in_addr addr4, mask4;
uint32_t scopeid;
if (bd->num_items >= bd->alloc_items)
return (FLM_REBUILD);
rr = &bd->rr[bd->num_items++];
rt_get_inet_prefix_pmask(rt, &addr4, &mask4, &scopeid);
rr->addr4 = ntohl(addr4.s_addr);
rr->mask4 = ntohl(mask4.s_addr);
rr->nh = rt_get_raw_nhop(rt);
return (FLM_SUCCESS);
}
/*
* Prefix comparison function.
* 10.0.0.0/24 < 10.0.0.0/25 <- less specific wins
* 10.0.0.0/25 < 10.0.0.1/32 <- bigger base wins
*/
static int
rr_cmp(const void *_rec1, const void *_rec2)
{
const struct bsearch4_record *rec1, *rec2;
rec1 = _rec1;
rec2 = _rec2;
if (rec1->addr4 < rec2->addr4)
return (-1);
else if (rec1->addr4 > rec2->addr4)
return (1);
/*
* wider mask value is lesser mask
* we want less specific come first, e.g. <
*/
if (rec1->mask4 < rec2->mask4)
return (-1);
else if (rec1->mask4 > rec2->mask4)
return (1);
return (0);
}
struct bsearch4_array {
uint32_t alloc_items;
uint32_t num_items;
struct bsearch4_record *arr;
};
static bool
add_array_entry(struct bsearch4_array *ba, struct bsearch4_record *br_new)
{
if (ba->num_items < ba->alloc_items) {
ba->arr[ba->num_items++] = *br_new;
return (true);
}
return (false);
}
static struct bsearch4_record *
get_last_entry(struct bsearch4_array *ba)
{
return (&ba->arr[ba->num_items - 1]);
}
/*
*
* Example:
* stack: 10.0.1.0/24,nh=3 array: 10.0.1.0/25,nh=4 -> ++10.0.1.128/24,nh=3
*
*
*/
static bool
pop_stack_entry(struct bsearch4_array *dst_array, struct bsearch4_array *stack)
{
uint32_t last_stack_addr, last_array_addr;
struct bsearch4_record *br_prev = get_last_entry(dst_array);
struct bsearch4_record *pstack = get_last_entry(stack);
/* Regardless of the result, pop stack entry */
stack->num_items--;
/* Prefix last address for the last entry in lookup array */
last_array_addr = (br_prev->addr4 | ~br_prev->mask4);
/* Prefix last address for the stack record entry */
last_stack_addr = (pstack->addr4 | ~pstack->mask4);
if (last_stack_addr > last_array_addr) {
/*
* Stack record covers > address space than
* the last entry in the lookup array.
* Add the remaining parts of a stack record to
* the lookup array.
*/
struct bsearch4_record br_new = {
.addr4 = last_array_addr + 1,
.mask4 = pstack->mask4,
.nh = pstack->nh,
};
return (add_array_entry(dst_array, &br_new));
}
return (true);
}
/*
* Updates resulting array @dst_array with a rib entry @rib_entry.
*/
static bool
bsearch4_process_record(struct bsearch4_array *dst_array,
struct bsearch4_array *stack, struct bsearch4_record *rib_entry)
{
/*
* Maintain invariant: current rib_entry is always contained
* in the top stack entry.
* Note we always have 0.0.0.0/0.
*/
while (stack->num_items > 0) {
struct bsearch4_record *pst = get_last_entry(stack);
/*
* Check if we need to pop stack.
* Rely on the ordering - larger prefixes comes up first
* Pop any entry that doesn't contain current prefix.
*/
if (pst->addr4 == (rib_entry->addr4 & pst->mask4))
break;
if (!pop_stack_entry(dst_array, stack))
return (false);
}
if (dst_array->num_items > 0) {
/*
* Check if there is a gap between previous entry and a
* current entry. Code above guarantees that both previous
* and current entry are contained in the top stack entry.
*
* Example: last: 10.0.0.1(/32,nh=3) cur: 10.0.0.3(/32,nh=4),
* stack: 10.0.0.0/24,nh=2.
* Cover a gap between previous and current by adding stack
* nexthop.
*/
struct bsearch4_record *br_tmp = get_last_entry(dst_array);
uint32_t last_declared_addr = br_tmp->addr4 | ~br_tmp->mask4;
if (last_declared_addr < rib_entry->addr4 - 1) {
/* Cover a hole */
struct bsearch4_record *pst = get_last_entry(stack);
struct bsearch4_record new_entry = {
.addr4 = last_declared_addr + 1,
.mask4 = pst->mask4,
.nh = pst->nh,
};
if (!add_array_entry(dst_array, &new_entry))
return (false);
}
}
if (!add_array_entry(dst_array, rib_entry))
return (false);
add_array_entry(stack, rib_entry);
return (true);
}
static enum flm_op_result
bsearch4_build_array(struct bsearch4_array *dst_array, struct bsearch4_array *src_array)
{
/*
* During iteration, we keep track of all prefixes in rtable
* we currently match, by maintaining stack. As there can be only
* 32 prefixes for a single address, pre-allocate stack of size 32.
*/
struct bsearch4_array stack = {
.alloc_items = 32,
.arr = mallocarray(32, sizeof(struct bsearch4_record), M_TEMP, M_NOWAIT | M_ZERO),
};
if (stack.arr == NULL)
return (FLM_REBUILD);
for (int i = 0; i < src_array->num_items; i++) {
struct bsearch4_record *rib_entry = &src_array->arr[i];
if (!bsearch4_process_record(dst_array, &stack, rib_entry)) {
free(stack.arr, M_TEMP);
return (FLM_REBUILD);
}
}
/*
* We know that last record is contained in the top stack entry.
*/
while (stack.num_items > 0) {
if (!pop_stack_entry(dst_array, &stack))
return (FLM_REBUILD);
}
free(stack.arr, M_TEMP);
return (FLM_SUCCESS);
}
static enum flm_op_result
bsearch4_build(struct bsearch4_data *bd)
{
enum flm_op_result ret;
struct bsearch4_array prefixes_array = {
.alloc_items = bd->alloc_items,
.num_items = bd->num_items,
.arr = bd->rr,
};
/* Add default route if not exists */
bool default_found = false;
for (int i = 0; i < prefixes_array.num_items; i++) {
if (prefixes_array.arr[i].mask4 == 0) {
default_found = true;
break;
}
}
if (!default_found) {
/* Add default route with NULL nhop */
struct bsearch4_record default_entry = {};
if (!add_array_entry(&prefixes_array, &default_entry))
return (FLM_REBUILD);
}
/* Sort prefixes */
qsort(prefixes_array.arr, prefixes_array.num_items, sizeof(struct bsearch4_record), rr_cmp);
struct bsearch4_array dst_array = {
.alloc_items = bd->alloc_items,
.arr = bd->br,
};
ret = bsearch4_build_array(&dst_array, &prefixes_array);
bd->num_items = dst_array.num_items;
free(bd->rr, M_TEMP);
bd->rr = NULL;
return (ret);
}
static enum flm_op_result
bsearch4_end_dump(void *_data, struct fib_dp *dp)
{
struct bsearch4_data *bd = (struct bsearch4_data *)_data;
enum flm_op_result ret;
ret = bsearch4_build(bd);
if (ret == FLM_SUCCESS) {
dp->f = bsearch4_lookup;
dp->arg = bd;
}
return (ret);
}
static enum flm_op_result
bsearch4_change_cb(struct rib_head *rnh, struct rib_cmd_info *rc,
void *_data)
{
return (FLM_REBUILD);
}
struct fib_lookup_module flm_bsearch4= {
.flm_name = "bsearch4",
.flm_family = AF_INET,
.flm_init_cb = bsearch4_init,
.flm_destroy_cb = bsearch4_destroy,
.flm_dump_rib_item_cb = bsearch4_add_route_cb,
.flm_dump_end_cb = bsearch4_end_dump,
.flm_change_rib_item_cb = bsearch4_change_cb,
.flm_get_pref = bsearch4_get_pref,
};
/*
* Lockless radix lookup algo.
*
* Compiles immutable radix from the current routing table.
* Used with small amount of routes (<1000).
* As datastructure is immutable, it gets rebuild on each rtable change.
*
* Lookups are slightly faster as shorter lookup keys are used
* (4 bytes instead of 8 in stock radix).
*/
#define KEY_LEN_INET (offsetof(struct sockaddr_in, sin_addr) + sizeof(in_addr_t))
#define OFF_LEN_INET (8 * offsetof(struct sockaddr_in, sin_addr))
struct radix4_addr_entry {
struct radix_node rn[2];
struct sockaddr_in addr;
struct nhop_object *nhop;
};
#define LRADIX4_ITEM_SZ roundup2(sizeof(struct radix4_addr_entry), 64)
struct lradix4_data {
struct radix_node_head *rnh;
struct fib_data *fd;
void *mem;
char *rt_base;
uint32_t alloc_items;
uint32_t num_items;
};
static struct nhop_object *
lradix4_lookup(void *algo_data, const struct flm_lookup_key key, uint32_t scopeid)
{
struct radix_node_head *rnh = (struct radix_node_head *)algo_data;
struct radix4_addr_entry *ent;
struct sockaddr_in addr4 = {
.sin_len = KEY_LEN_INET,
.sin_addr = key.addr4,
};
ent = (struct radix4_addr_entry *)(rnh->rnh_matchaddr(&addr4, &rnh->rh));
if (ent != NULL)
return (ent->nhop);
return (NULL);
}
/*
* Preference function.
* Assume close-to-ideal of < 10 routes (though worse than bsearch), then
* gradually degrade until 1000 routes are reached.
*/
static uint8_t
lradix4_get_pref(const struct rib_rtable_info *rinfo)
{
if (rinfo->num_prefixes < 10)
return (250);
else if (rinfo->num_prefixes < 1000)
return (254 - rinfo->num_prefixes / 4);
else
return (1);
}
static enum flm_op_result
lradix4_init(uint32_t fibnum, struct fib_data *fd, void *_old_data, void **_data)
{
struct lradix4_data *lr;
struct rib_rtable_info rinfo;
uint32_t count;
size_t sz;
lr = malloc(sizeof(struct lradix4_data), M_RTABLE, M_NOWAIT | M_ZERO);
if (lr == NULL || !rn_inithead((void **)&lr->rnh, OFF_LEN_INET))
return (FLM_REBUILD);
fib_get_rtable_info(fib_get_rh(fd), &rinfo);
count = rinfo.num_prefixes * 11 / 10;
sz = count * LRADIX4_ITEM_SZ + CACHE_LINE_SIZE;
lr->mem = malloc(sz, M_RTABLE, M_NOWAIT | M_ZERO);
if (lr->mem == NULL)
return (FLM_REBUILD);
/* Align all rtentries to a cacheline boundary */
lr->rt_base = (char *)roundup2((uintptr_t)lr->mem, CACHE_LINE_SIZE);
lr->alloc_items = count;
lr->fd = fd;
*_data = lr;
return (FLM_SUCCESS);
}
static void
lradix4_destroy(void *_data)
{
struct lradix4_data *lr = (struct lradix4_data *)_data;
if (lr->rnh != NULL)
rn_detachhead((void **)&lr->rnh);
if (lr->mem != NULL)
free(lr->mem, M_RTABLE);
free(lr, M_RTABLE);
}
static enum flm_op_result
lradix4_add_route_cb(struct rtentry *rt, void *_data)
{
struct lradix4_data *lr = (struct lradix4_data *)_data;
struct radix4_addr_entry *ae;
struct sockaddr_in mask;
struct sockaddr *rt_mask = NULL;
struct radix_node *rn;
struct in_addr addr4, mask4;
uint32_t scopeid;
if (lr->num_items >= lr->alloc_items)
return (FLM_REBUILD);
ae = (struct radix4_addr_entry *)(lr->rt_base + lr->num_items * LRADIX4_ITEM_SZ);
lr->num_items++;
ae->nhop = rt_get_raw_nhop(rt);
rt_get_inet_prefix_pmask(rt, &addr4, &mask4, &scopeid);
ae->addr.sin_len = KEY_LEN_INET;
ae->addr.sin_addr = addr4;
if (mask4.s_addr != INADDR_ANY) {
bzero(&mask, sizeof(mask));
mask.sin_len = KEY_LEN_INET;
mask.sin_addr = mask4;
rt_mask = (struct sockaddr *)&mask;
}
rn = lr->rnh->rnh_addaddr((struct sockaddr *)&ae->addr, rt_mask,
&lr->rnh->rh, ae->rn);
if (rn == NULL)
return (FLM_REBUILD);
return (FLM_SUCCESS);
}
static enum flm_op_result
lradix4_end_dump(void *_data, struct fib_dp *dp)
{
struct lradix4_data *lr = (struct lradix4_data *)_data;
dp->f = lradix4_lookup;
dp->arg = lr->rnh;
return (FLM_SUCCESS);
}
static enum flm_op_result
lradix4_change_cb(struct rib_head *rnh, struct rib_cmd_info *rc,
void *_data)
{
return (FLM_REBUILD);
}
struct fib_lookup_module flm_radix4_lockless = {
.flm_name = "radix4_lockless",
.flm_family = AF_INET,
.flm_init_cb = lradix4_init,
.flm_destroy_cb = lradix4_destroy,
.flm_dump_rib_item_cb = lradix4_add_route_cb,
.flm_dump_end_cb = lradix4_end_dump,
.flm_change_rib_item_cb = lradix4_change_cb,
.flm_get_pref = lradix4_get_pref,
};
/*
* Fallback lookup algorithm.
* This is a simple wrapper around system radix.
*/
struct radix4_data {
struct fib_data *fd;
struct rib_head *rh;
};
static struct nhop_object *
radix4_lookup(void *algo_data, const struct flm_lookup_key key, uint32_t scopeid)
{
RIB_RLOCK_TRACKER;
struct rib_head *rh = (struct rib_head *)algo_data;
struct radix_node *rn;
struct nhop_object *nh;
/* Prepare lookup key */
struct sockaddr_in sin4 = {
.sin_family = AF_INET,
.sin_len = sizeof(struct sockaddr_in),
.sin_addr = key.addr4,
};
nh = NULL;
RIB_RLOCK(rh);
rn = rh->rnh_matchaddr((void *)&sin4, &rh->head);
if (rn != NULL && ((rn->rn_flags & RNF_ROOT) == 0))
nh = (RNTORT(rn))->rt_nhop;
RIB_RUNLOCK(rh);
return (nh);
}
static uint8_t
radix4_get_pref(const struct rib_rtable_info *rinfo)
{
return (50);
}
static enum flm_op_result
radix4_init(uint32_t fibnum, struct fib_data *fd, void *_old_data, void **_data)
{
struct radix4_data *r4;
r4 = malloc(sizeof(struct radix4_data), M_RTABLE, M_NOWAIT | M_ZERO);
if (r4 == NULL)
return (FLM_REBUILD);
r4->fd = fd;
r4->rh = fib_get_rh(fd);
*_data = r4;
return (FLM_SUCCESS);
}
static void
radix4_destroy(void *_data)
{
free(_data, M_RTABLE);
}
static enum flm_op_result
radix4_add_route_cb(struct rtentry *rt, void *_data)
{
return (FLM_SUCCESS);
}
static enum flm_op_result
radix4_end_dump(void *_data, struct fib_dp *dp)
{
struct radix4_data *r4 = (struct radix4_data *)_data;
dp->f = radix4_lookup;
dp->arg = r4->rh;
return (FLM_SUCCESS);
}
static enum flm_op_result
radix4_change_cb(struct rib_head *rnh, struct rib_cmd_info *rc,
void *_data)
{
return (FLM_SUCCESS);
}
struct fib_lookup_module flm_radix4 = {
.flm_name = "radix4",
.flm_family = AF_INET,
.flm_init_cb = radix4_init,
.flm_destroy_cb = radix4_destroy,
.flm_dump_rib_item_cb = radix4_add_route_cb,
.flm_dump_end_cb = radix4_end_dump,
.flm_change_rib_item_cb = radix4_change_cb,
.flm_get_pref = radix4_get_pref,
};
static void
fib4_algo_init(void)
{
fib_module_register(&flm_bsearch4);
fib_module_register(&flm_radix4_lockless);
fib_module_register(&flm_radix4);
}
SYSINIT(fib4_algo_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, fib4_algo_init, NULL);

View file

@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <net/route.h>
#include <net/route/route_ctl.h>
#include <net/route/route_var.h>
#include <net/route/fib_algo.h>
#include <net/route/nhop.h>
#include <net/toeplitz.h>
#include <net/vnet.h>
@ -69,6 +70,10 @@ __FBSDID("$FreeBSD$");
CHK_STRUCT_ROUTE_COMPAT(struct route_in6, ro_dst);
#ifdef FIB_ALGO
VNET_DEFINE(struct fib_dp *, inet6_dp);
#endif
#ifdef ROUTE_MPATH
struct _hash_5tuple_ipv6 {
struct in6_addr src;
@ -111,6 +116,29 @@ fib6_calc_software_hash(const struct in6_addr *src, const struct in6_addr *dst,
* one needs to pass NHR_REF as a flag. This will return referenced
* nexthop.
*/
#ifdef FIB_ALGO
struct nhop_object *
fib6_lookup(uint32_t fibnum, const struct in6_addr *dst6,
uint32_t scopeid, uint32_t flags, uint32_t flowid)
{
struct nhop_object *nh;
struct fib_dp *dp = &V_inet6_dp[fibnum];
struct flm_lookup_key key = {.addr6 = dst6 };
nh = dp->f(dp->arg, key, scopeid);
if (nh != NULL) {
nh = nhop_select(nh, flowid);
/* Ensure route & ifp is UP */
if (RT_LINK_IS_UP(nh->nh_ifp)) {
if (flags & NHR_REF)
nhop_ref_object(nh);
return (nh);
}
}
RTSTAT_INC(rts_unreach);
return (NULL);
}
#else
struct nhop_object *
fib6_lookup(uint32_t fibnum, const struct in6_addr *dst6,
uint32_t scopeid, uint32_t flags, uint32_t flowid)
@ -151,6 +179,7 @@ fib6_lookup(uint32_t fibnum, const struct in6_addr *dst6,
RTSTAT_INC(rts_unreach);
return (NULL);
}
#endif
inline static int
check_urpf_nhop(const struct nhop_object *nh, uint32_t flags,
@ -237,8 +266,14 @@ fib6_check_urpf(uint32_t fibnum, const struct in6_addr *dst6,
uint32_t scopeid, uint32_t flags, const struct ifnet *src_if)
{
struct nhop_object *nh;
#ifdef FIB_ALGO
struct fib_dp *dp = &V_inet6_dp[fibnum];
struct flm_lookup_key key = {.addr6 = dst6 };
nh = dp->f(dp->arg, key, scopeid);
#else
nh = lookup_nhop(fibnum, dst6, scopeid);
#endif
if (nh != NULL)
return (check_urpf(nh, flags, src_if));
return (0);

View file

@ -44,6 +44,8 @@ struct rtentry *fib6_lookup_rt(uint32_t fibnum, const struct in6_addr *dst6,
uint32_t scopeid, uint32_t flags, struct route_nhop_data *rnd);
struct nhop_object *fib6_lookup_debugnet(uint32_t fibnum,
const struct in6_addr *dst6, uint32_t scopeid, uint32_t flags);
struct nhop_object *fib6_radix_lookup_nh(uint32_t fibnum,
const struct in6_addr *dst6, uint32_t scopeid);
uint32_t fib6_calc_software_hash(const struct in6_addr *src,
const struct in6_addr *dst, unsigned short src_port, unsigned short dst_port,
char proto, uint32_t *phashtype);

361
sys/netinet6/in6_fib_algo.c Normal file
View file

@ -0,0 +1,361 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2020 Alexander V. Chernikov
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/rmlock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <net/vnet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <netinet/in.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <netinet6/in6_fib.h>
#include <net/route.h>
#include <net/route/nhop.h>
#include <net/route/route_ctl.h>
#include <net/route/route_var.h>
#include <net/route/fib_algo.h>
/*
* Lockless radix lookup algo.
*
* Compiles immutable radix from the current routing table.
* Used with small amount of routes (<1000).
* As datastructure is immutable, it gets rebuild on each rtable change.
*
*/
#define KEY_LEN_INET6 (offsetof(struct sa_in6, sin6_addr) + sizeof(struct in6_addr))
#define OFF_LEN_INET6 (8 * offsetof(struct sa_in6, sin6_addr))
struct sa_in6 {
uint8_t sin6_len;
uint8_t sin6_family;
uint8_t pad[6];
struct in6_addr sin6_addr;
};
struct radix6_addr_entry {
struct radix_node rn[2];
struct sa_in6 addr;
struct nhop_object *nhop;
};
#define LRADIX6_ITEM_SZ roundup2(sizeof(struct radix6_addr_entry), CACHE_LINE_SIZE)
struct lradix6_data {
struct radix_node_head *rnh;
struct fib_data *fd;
void *mem; // raw radix_mem pointer to free
void *radix_mem;
uint32_t alloc_items;
uint32_t num_items;
};
static struct nhop_object *
lradix6_lookup(void *algo_data, const struct flm_lookup_key key, uint32_t scopeid)
{
struct radix_node_head *rnh = (struct radix_node_head *)algo_data;
struct radix6_addr_entry *ent;
struct sa_in6 addr6 = {
.sin6_len = KEY_LEN_INET6,
.sin6_addr = *key.addr6,
};
if (IN6_IS_SCOPE_LINKLOCAL(key.addr6))
addr6.sin6_addr.s6_addr16[1] = htons(scopeid & 0xffff);
ent = (struct radix6_addr_entry *)(rnh->rnh_matchaddr(&addr6, &rnh->rh));
if (ent != NULL)
return (ent->nhop);
return (NULL);
}
static uint8_t
lradix6_get_pref(const struct rib_rtable_info *rinfo)
{
if (rinfo->num_prefixes < 10)
return (255);
else if (rinfo->num_prefixes < 100000)
return (255 - rinfo->num_prefixes / 394);
else
return (1);
}
static enum flm_op_result
lradix6_init(uint32_t fibnum, struct fib_data *fd, void *_old_data, void **_data)
{
struct lradix6_data *lr;
struct rib_rtable_info rinfo;
uint32_t count;
void *mem;
lr = malloc(sizeof(struct lradix6_data), M_RTABLE, M_NOWAIT | M_ZERO);
if (lr == NULL || !rn_inithead((void **)&lr->rnh, OFF_LEN_INET6))
return (FLM_REBUILD);
fib_get_rtable_info(fib_get_rh(fd), &rinfo);
count = rinfo.num_prefixes * 11 / 10;
// count+1 adds at least 1 cache line
mem = malloc((count + 1) * LRADIX6_ITEM_SZ, M_RTABLE, M_NOWAIT | M_ZERO);
if (mem == NULL)
return (FLM_REBUILD);
lr->mem = mem;
lr->radix_mem = (void *)roundup2((uintptr_t)mem, CACHE_LINE_SIZE);
lr->alloc_items = count;
lr->fd = fd;
*_data = lr;
return (FLM_SUCCESS);
}
static void
lradix6_destroy(void *_data)
{
struct lradix6_data *lr = (struct lradix6_data *)_data;
if (lr->rnh != NULL)
rn_detachhead((void **)&lr->rnh);
if (lr->mem != NULL)
free(lr->mem, M_RTABLE);
free(lr, M_RTABLE);
}
static enum flm_op_result
lradix6_add_route_cb(struct rtentry *rt, void *_data)
{
struct lradix6_data *lr = (struct lradix6_data *)_data;
struct radix6_addr_entry *ae;
struct sockaddr_in6 *rt_dst, *rt_mask;
struct sa_in6 mask;
struct radix_node *rn;
struct nhop_object *nh;
nh = rt_get_raw_nhop(rt);
if (lr->num_items >= lr->alloc_items)
return (FLM_REBUILD);
ae = (struct radix6_addr_entry *)((char *)lr->radix_mem + lr->num_items * LRADIX6_ITEM_SZ);
lr->num_items++;
ae->nhop = nh;
rt_dst = (struct sockaddr_in6 *)rt_key(rt);
rt_mask = (struct sockaddr_in6 *)rt_mask(rt);
ae->addr.sin6_len = KEY_LEN_INET6;
ae->addr.sin6_addr = rt_dst->sin6_addr;
if (rt_mask != NULL) {
bzero(&mask, sizeof(mask));
mask.sin6_len = KEY_LEN_INET6;
mask.sin6_addr = rt_mask->sin6_addr;
rt_mask = (struct sockaddr_in6 *)&mask;
}
rn = lr->rnh->rnh_addaddr((struct sockaddr *)&ae->addr,
(struct sockaddr *)rt_mask, &lr->rnh->rh, ae->rn);
if (rn == NULL)
return (FLM_REBUILD);
return (FLM_SUCCESS);
}
static enum flm_op_result
lradix6_end_dump(void *_data, struct fib_dp *dp)
{
struct lradix6_data *lr = (struct lradix6_data *)_data;
dp->f = lradix6_lookup;
dp->arg = lr->rnh;
return (FLM_SUCCESS);
}
static enum flm_op_result
lradix6_change_cb(struct rib_head *rnh, struct rib_cmd_info *rc,
void *_data)
{
return (FLM_REBUILD);
}
struct fib_lookup_module flm_radix6_lockless = {
.flm_name = "radix6_lockless",
.flm_family = AF_INET6,
.flm_init_cb = lradix6_init,
.flm_destroy_cb = lradix6_destroy,
.flm_dump_rib_item_cb = lradix6_add_route_cb,
.flm_dump_end_cb = lradix6_end_dump,
.flm_change_rib_item_cb = lradix6_change_cb,
.flm_get_pref = lradix6_get_pref,
};
/*
* Fallback lookup algorithm.
* This is a simple wrapper around system radix.
*/
struct radix6_data {
struct fib_data *fd;
struct rib_head *rh;
};
static struct nhop_object *
radix6_lookup(void *algo_data, const struct flm_lookup_key key, uint32_t scopeid)
{
RIB_RLOCK_TRACKER;
struct rib_head *rh = (struct rib_head *)algo_data;
struct radix_node *rn;
struct nhop_object *nh;
/* Prepare lookup key */
struct sockaddr_in6 sin6 = {
.sin6_family = AF_INET6,
.sin6_len = sizeof(struct sockaddr_in6),
.sin6_addr = *key.addr6,
};
if (IN6_IS_SCOPE_LINKLOCAL(key.addr6))
sin6.sin6_addr.s6_addr16[1] = htons(scopeid & 0xffff);
nh = NULL;
RIB_RLOCK(rh);
rn = rh->rnh_matchaddr((void *)&sin6, &rh->head);
if (rn != NULL && ((rn->rn_flags & RNF_ROOT) == 0))
nh = (RNTORT(rn))->rt_nhop;
RIB_RUNLOCK(rh);
return (nh);
}
struct nhop_object *
fib6_radix_lookup_nh(uint32_t fibnum, const struct in6_addr *dst6, uint32_t scopeid)
{
struct rib_head *rh = rh = rt_tables_get_rnh(fibnum, AF_INET6);
const struct flm_lookup_key key = { .addr6 = dst6 };
if (rh == NULL)
return (NULL);
return (radix6_lookup(rh, key, scopeid));
}
static uint8_t
radix6_get_pref(const struct rib_rtable_info *rinfo)
{
return (50);
}
static enum flm_op_result
radix6_init(uint32_t fibnum, struct fib_data *fd, void *_old_data, void **_data)
{
struct radix6_data *r6;
r6 = malloc(sizeof(struct radix6_data), M_RTABLE, M_NOWAIT | M_ZERO);
if (r6 == NULL)
return (FLM_REBUILD);
r6->fd = fd;
r6->rh = fib_get_rh(fd);
*_data = r6;
return (FLM_SUCCESS);
}
static void
radix6_destroy(void *_data)
{
free(_data, M_RTABLE);
}
static enum flm_op_result
radix6_add_route_cb(struct rtentry *rt, void *_data)
{
return (FLM_SUCCESS);
}
static enum flm_op_result
radix6_end_dump(void *_data, struct fib_dp *dp)
{
struct radix6_data *r6 = (struct radix6_data *)_data;
dp->f = radix6_lookup;
dp->arg = r6->rh;
return (FLM_SUCCESS);
}
static enum flm_op_result
radix6_change_cb(struct rib_head *rnh, struct rib_cmd_info *rc,
void *_data)
{
return (FLM_SUCCESS);
}
struct fib_lookup_module flm_radix6 = {
.flm_name = "radix6",
.flm_family = AF_INET6,
.flm_init_cb = radix6_init,
.flm_destroy_cb = radix6_destroy,
.flm_dump_rib_item_cb = radix6_add_route_cb,
.flm_dump_end_cb = radix6_end_dump,
.flm_change_rib_item_cb = radix6_change_cb,
.flm_get_pref = radix6_get_pref,
};
static void
fib6_algo_init(void)
{
fib_module_register(&flm_radix6_lockless);
fib_module_register(&flm_radix6);
}
SYSINIT(fib6_algo_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, fib6_algo_init, NULL);