pf: backport OpenBSD syntax of "scrub" option for "match" and "pass" rules

Introduce the OpenBSD syntax of "scrub" option for "match" and "pass"
rules and the "set reassemble" flag. The patch is backward-compatible,
pf.conf can be still written in FreeBSD-style.

Obtained from:	OpenBSD
MFC after:	never
Sponsored by:	InnoGames GmbH
Differential Revision:	https://reviews.freebsd.org/D38025
This commit is contained in:
Kajetan Staszkiewicz 2023-04-13 18:12:59 +02:00 committed by Kristof Provost
parent 8935a39932
commit 39282ef356
15 changed files with 673 additions and 214 deletions

View file

@ -225,6 +225,7 @@ pfctl_get_status(int dev)
status->states = nvlist_get_number(nvl, "states");
status->src_nodes = nvlist_get_number(nvl, "src_nodes");
status->syncookies_active = nvlist_get_bool(nvl, "syncookies_active");
status->reass = nvlist_get_number(nvl, "reass");
strlcpy(status->ifname, nvlist_get_string(nvl, "ifname"),
IFNAMSIZ);

View file

@ -58,6 +58,7 @@ struct pfctl_status {
char ifname[IFNAMSIZ];
uint8_t pf_chksum[PF_MD5_DIGEST_LENGTH];
bool syncookies_active;
uint32_t reass;
struct pfctl_status_counters counters;
struct pfctl_status_counters lcounters;
@ -347,7 +348,7 @@ struct pfctl_state {
uint32_t creation;
uint32_t expire;
uint32_t pfsync_time;
uint8_t state_flags;
uint16_t state_flags;
uint32_t sync_flags;
};

View file

@ -225,13 +225,21 @@ struct node_qassign {
static struct filter_opts {
int marker;
#define FOM_FLAGS 0x01
#define FOM_ICMP 0x02
#define FOM_TOS 0x04
#define FOM_KEEP 0x08
#define FOM_SRCTRACK 0x10
#define FOM_FLAGS 0x0001
#define FOM_ICMP 0x0002
#define FOM_TOS 0x0004
#define FOM_KEEP 0x0008
#define FOM_SRCTRACK 0x0010
#define FOM_MINTTL 0x0020
#define FOM_MAXMSS 0x0040
#define FOM_AFTO 0x0080 /* not yet implemmented */
#define FOM_SETTOS 0x0100
#define FOM_SCRUB_TCP 0x0200
#define FOM_SETPRIO 0x0400
#define FOM_ONCE 0x1000 /* not yet implemmented */
#define FOM_PRIO 0x2000
#define FOM_SETDELAY 0x4000
#define FOM_FRAGCACHE 0x8000 /* does not exist in OpenBSD */
struct node_uid *uid;
struct node_gid *gid;
struct {
@ -266,6 +274,12 @@ static struct filter_opts {
struct node_host *addr;
u_int16_t port;
} divert;
/* new-style scrub opts */
int nodf;
int minttl;
int settos;
int randomid;
int max_mss;
} filter_opts;
static struct antispoof_opts {
@ -277,10 +291,6 @@ static struct antispoof_opts {
static struct scrub_opts {
int marker;
#define SOM_MINTTL 0x01
#define SOM_MAXMSS 0x02
#define SOM_FRAGCACHE 0x04
#define SOM_SETTOS 0x08
int nodf;
int minttl;
int maxmss;
@ -511,7 +521,7 @@ int parseport(char *, struct range *r, int);
%token <v.i> PORTBINARY
%type <v.interface> interface if_list if_item_not if_item
%type <v.number> number icmptype icmp6type uid gid
%type <v.number> tos not yesno
%type <v.number> tos not yesno optnodf
%type <v.probability> probability
%type <v.i> no dir af fragcache optimizer syncookie_val
%type <v.i> sourcetrack flush unaryop statelock
@ -631,7 +641,16 @@ optimizer : string {
}
;
option : SET OPTIMIZATION STRING {
optnodf : /* empty */ { $$ = 0; }
| NODF { $$ = 1; }
;
option : SET REASSEMBLE yesno optnodf {
if (check_rulestate(PFCTL_STATE_OPTION))
YYERROR;
pfctl_set_reassembly(pf, $3, $4);
}
| SET OPTIMIZATION STRING {
if (check_rulestate(PFCTL_STATE_OPTION)) {
free($3);
YYERROR;
@ -1408,7 +1427,7 @@ scrubrule : scrubaction dir logquick interface af proto fromto scrub_opts
r.min_ttl = $8.minttl;
if ($8.maxmss)
r.max_mss = $8.maxmss;
if ($8.marker & SOM_SETTOS) {
if ($8.marker & FOM_SETTOS) {
r.rule_flag |= PFRULE_SET_TOS;
r.set_tos = $8.settos;
}
@ -1443,7 +1462,7 @@ scrub_opts : {
}
;
scrub_opts_l : scrub_opts_l scrub_opt
scrub_opts_l : scrub_opts_l comma scrub_opt
| scrub_opt
;
@ -1455,7 +1474,7 @@ scrub_opt : NODF {
scrub_opts.nodf = 1;
}
| MINTTL NUMBER {
if (scrub_opts.marker & SOM_MINTTL) {
if (scrub_opts.marker & FOM_MINTTL) {
yyerror("min-ttl cannot be respecified");
YYERROR;
}
@ -1463,11 +1482,11 @@ scrub_opt : NODF {
yyerror("illegal min-ttl value %d", $2);
YYERROR;
}
scrub_opts.marker |= SOM_MINTTL;
scrub_opts.marker |= FOM_MINTTL;
scrub_opts.minttl = $2;
}
| MAXMSS NUMBER {
if (scrub_opts.marker & SOM_MAXMSS) {
if (scrub_opts.marker & FOM_MAXMSS) {
yyerror("max-mss cannot be respecified");
YYERROR;
}
@ -1475,23 +1494,23 @@ scrub_opt : NODF {
yyerror("illegal max-mss value %d", $2);
YYERROR;
}
scrub_opts.marker |= SOM_MAXMSS;
scrub_opts.marker |= FOM_MAXMSS;
scrub_opts.maxmss = $2;
}
| SETTOS tos {
if (scrub_opts.marker & SOM_SETTOS) {
if (scrub_opts.marker & FOM_SETTOS) {
yyerror("set-tos cannot be respecified");
YYERROR;
}
scrub_opts.marker |= SOM_SETTOS;
scrub_opts.marker |= FOM_SETTOS;
scrub_opts.settos = $2;
}
| fragcache {
if (scrub_opts.marker & SOM_FRAGCACHE) {
if (scrub_opts.marker & FOM_FRAGCACHE) {
yyerror("fragcache cannot be respecified");
YYERROR;
}
scrub_opts.marker |= SOM_FRAGCACHE;
scrub_opts.marker |= FOM_FRAGCACHE;
scrub_opts.fragcache = $1;
}
| REASSEMBLE STRING {
@ -2351,6 +2370,21 @@ pfrule : action dir logquick interface route af proto fromto
r.prob = $9.prob;
r.rtableid = $9.rtableid;
if ($9.nodf)
r.scrub_flags |= PFSTATE_NODF;
if ($9.randomid)
r.scrub_flags |= PFSTATE_RANDOMID;
if ($9.minttl)
r.min_ttl = $9.minttl;
if ($9.max_mss)
r.max_mss = $9.max_mss;
if ($9.marker & FOM_SETTOS) {
r.scrub_flags |= PFSTATE_SETTOS;
r.set_tos = $9.settos;
}
if ($9.marker & FOM_SCRUB_TCP)
r.scrub_flags |= PFSTATE_SCRUB_TCP;
if ($9.marker & FOM_PRIO) {
if ($9.prio == 0)
r.prio = PF_PRIO_ZERO;
@ -2933,6 +2967,24 @@ filter_opt : USER uids {
filter_opts.divert.port = 1; /* some random value */
#endif
}
| SCRUB '(' scrub_opts ')' {
filter_opts.nodf = $3.nodf;
filter_opts.minttl = $3.minttl;
if ($3.marker & FOM_SETTOS) {
/* Old style rules are "scrub set-tos 0x42"
* New style are "set tos 0x42 scrub (...)"
* What is in "scrub(...)"" is unfortunately the
* original scrub syntax so it would overwrite
* "set tos" of a pass/match rule.
*/
filter_opts.settos = $3.settos;
}
filter_opts.randomid = $3.randomid;
filter_opts.max_mss = $3.maxmss;
if ($3.reassemble_tcp)
filter_opts.marker |= FOM_SCRUB_TCP;
filter_opts.marker |= $3.marker;
}
| filter_sets
;
@ -2953,6 +3005,14 @@ filter_set : prio {
filter_opts.set_prio[0] = $1.b1;
filter_opts.set_prio[1] = $1.b2;
}
| TOS tos {
if (filter_opts.marker & FOM_SETTOS) {
yyerror("tos cannot be respecified");
YYERROR;
}
filter_opts.marker |= FOM_SETTOS;
filter_opts.settos = $2;
}
prio : PRIO NUMBER {
if ($2 < 0 || $2 > PF_PRIO_MAX) {
yyerror("prio must be 0 - %u", PF_PRIO_MAX);
@ -5170,6 +5230,7 @@ rule_consistent(struct pfctl_rule *r, int anchor_call)
switch (r->action) {
case PF_PASS:
case PF_MATCH:
case PF_DROP:
case PF_SCRUB:
case PF_NOSCRUB:
@ -5240,8 +5301,8 @@ filter_consistent(struct pfctl_rule *r, int anchor_call)
yyerror("max-src-nodes requires 'source-track rule'");
problems++;
}
if (r->action == PF_DROP && r->keep_state) {
yyerror("keep state on block rules doesn't make sense");
if (r->action != PF_PASS && r->keep_state) {
yyerror("keep state is great, but only for pass rules");
problems++;
}
if (r->rule_flag & PFRULE_STATESLOPPY &&
@ -5251,6 +5312,18 @@ filter_consistent(struct pfctl_rule *r, int anchor_call)
"synproxy state or modulate state");
problems++;
}
/* match rules rules */
if (r->action == PF_MATCH) {
if (r->divert.port) {
yyerror("divert is not supported on match rules");
problems++;
}
if (r->rt) {
yyerror("route-to, reply-to, dup-to and fastroute "
"must not be used on match rules");
problems++;
}
}
return (-problems);
}

View file

@ -339,8 +339,24 @@ print_state(struct pfctl_state *s, int opts)
printf(", anchor %u", s->anchor);
if (s->rule != -1)
printf(", rule %u", s->rule);
if (s->state_flags & PFSTATE_ALLOWOPTS)
printf(", allow-opts");
if (s->state_flags & PFSTATE_SLOPPY)
printf(", sloppy");
if (s->state_flags & PFSTATE_NOSYNC)
printf(", no-sync");
if (s->state_flags & PFSTATE_ACK)
printf(", psync-ack");
if (s->state_flags & PFSTATE_NODF)
printf(", no-df");
if (s->state_flags & PFSTATE_SETTOS)
printf(", set-tos");
if (s->state_flags & PFSTATE_RANDOMID)
printf(", random-id");
if (s->state_flags & PFSTATE_SCRUB_TCP)
printf(", scrub-tcp");
if (s->state_flags & PFSTATE_SETPRIO)
printf(", set-prio");
if (s->sync_flags & PFSYNC_FLAG_SRCNODE)
printf(", source-track");
if (s->sync_flags & PFSYNC_FLAG_NATSRCNODE)

View file

@ -94,6 +94,7 @@ int pfctl_load_timeout(struct pfctl *, unsigned int, unsigned int);
int pfctl_load_debug(struct pfctl *, unsigned int);
int pfctl_load_logif(struct pfctl *, char *);
int pfctl_load_hostid(struct pfctl *, u_int32_t);
int pfctl_load_reassembly(struct pfctl *, u_int32_t);
int pfctl_load_syncookies(struct pfctl *, u_int8_t);
int pfctl_get_pool(int, struct pfctl_pool *, u_int32_t, u_int32_t, int,
char *);
@ -2258,6 +2259,7 @@ pfctl_init_options(struct pfctl *pf)
pf->limit[PF_LIMIT_TABLE_ENTRIES] = PFR_KENTRY_HIWAT;
pf->debug = PF_DEBUG_URGENT;
pf->reassemble = 0;
pf->syncookies = false;
pf->syncookieswat[0] = PF_SYNCOOKIES_LOWATPCT;
@ -2318,6 +2320,11 @@ pfctl_load_options(struct pfctl *pf)
if (pfctl_load_hostid(pf, pf->hostid))
error = 1;
/* load reassembly settings */
if (!(pf->opts & PF_OPT_MERGE) || pf->reass_set)
if (pfctl_load_reassembly(pf, pf->reassemble))
error = 1;
/* load keepcounters */
if (pfctl_set_keepcounters(pf->dev, pf->keep_counters))
error = 1;
@ -2414,6 +2421,28 @@ pfctl_load_timeout(struct pfctl *pf, unsigned int timeout, unsigned int seconds)
return (0);
}
int
pfctl_set_reassembly(struct pfctl *pf, int on, int nodf)
{
if ((loadopt & PFCTL_FLAG_OPTION) == 0)
return (0);
pf->reass_set = 1;
if (on) {
pf->reassemble = PF_REASS_ENABLED;
if (nodf)
pf->reassemble |= PF_REASS_NODF;
} else {
pf->reassemble = 0;
}
if (pf->opts & PF_OPT_VERBOSE)
printf("set reassemble %s %s\n", on ? "yes" : "no",
nodf ? "no-df" : "");
return (0);
}
int
pfctl_set_optimization(struct pfctl *pf, const char *opt)
{
@ -2512,6 +2541,16 @@ pfctl_load_hostid(struct pfctl *pf, u_int32_t hostid)
return (0);
}
int
pfctl_load_reassembly(struct pfctl *pf, u_int32_t reassembly)
{
if (ioctl(dev, DIOCSETREASS, &reassembly)) {
warnx("DIOCSETREASS");
return (1);
}
return (0);
}
int
pfctl_load_syncookies(struct pfctl *pf, u_int8_t val)
{

View file

@ -429,6 +429,7 @@ print_pool(struct pfctl_pool *pool, u_int16_t p1, u_int16_t p2,
print_addr(&pooladdr->addr, af, 0);
break;
case PF_PASS:
case PF_MATCH:
if (PF_AZERO(&pooladdr->addr.v.a.addr, af))
printf("%s", pooladdr->ifname);
else {
@ -624,6 +625,10 @@ print_status(struct pfctl_status *s, struct pfctl_syncookies *cookies, int opts)
PFCTL_SYNCOOKIES_MODE_NAMES[cookies->mode]);
printf(" %-25s %s\n", "active",
s->syncookies_active ? "active" : "inactive");
printf("Reassemble %24s %s\n",
s->reass & PF_REASS_ENABLED ? "yes" : "no",
s->reass & PF_REASS_NODF ? "no-df" : ""
);
}
}
@ -685,6 +690,7 @@ print_src_node(struct pf_src_node *sn, int opts)
printf(", rdr rule %u", sn->rule.nr);
break;
case PF_PASS:
case PF_MATCH:
if (sn->rule.nr != -1)
printf(", filter rule %u", sn->rule.nr);
break;
@ -810,7 +816,8 @@ void
print_rule(struct pfctl_rule *r, const char *anchor_call, int verbose, int numeric)
{
static const char *actiontypes[] = { "pass", "block", "scrub",
"no scrub", "nat", "no nat", "binat", "no binat", "rdr", "no rdr" };
"no scrub", "nat", "no nat", "binat", "no binat", "rdr", "no rdr",
"", "", "match"};
static const char *anchortypes[] = { "anchor", "anchor", "anchor",
"anchor", "nat-anchor", "nat-anchor", "binat-anchor",
"binat-anchor", "rdr-anchor", "rdr-anchor" };
@ -946,7 +953,7 @@ print_rule(struct pfctl_rule *r, const char *anchor_call, int verbose, int numer
print_flags(r->flags);
printf("/");
print_flags(r->flagset);
} else if (r->action == PF_PASS &&
} else if ((r->action == PF_PASS || r->action == PF_MATCH) &&
(!r->proto || r->proto == IPPROTO_TCP) &&
!(r->rule_flag & PFRULE_FRAGMENT) &&
!anchor_call[0] && r->keep_state)
@ -988,6 +995,10 @@ print_rule(struct pfctl_rule *r, const char *anchor_call, int verbose, int numer
r->set_prio[1]);
comma = ",";
}
if (r->scrub_flags & PFSTATE_SETTOS) {
printf("%s tos 0x%2.2x", comma, r->set_tos);
comma = ",";
}
printf(" )");
}
if (!r->keep_state && r->action == PF_PASS && !anchor_call[0])
@ -1113,26 +1124,43 @@ print_rule(struct pfctl_rule *r, const char *anchor_call, int verbose, int numer
}
printf(")");
}
if (r->rule_flag & PFRULE_FRAGMENT)
printf(" fragment");
if (r->rule_flag & PFRULE_NODF)
printf(" no-df");
if (r->rule_flag & PFRULE_RANDOMID)
printf(" random-id");
if (r->min_ttl)
printf(" min-ttl %d", r->min_ttl);
if (r->max_mss)
printf(" max-mss %d", r->max_mss);
if (r->rule_flag & PFRULE_SET_TOS)
printf(" set-tos 0x%2.2x", r->set_tos);
if (r->allow_opts)
printf(" allow-opts");
if (r->rule_flag & PFRULE_FRAGMENT)
printf(" fragment");
if (r->action == PF_SCRUB) {
/* Scrub flags for old-style scrub. */
if (r->rule_flag & PFRULE_NODF)
printf(" no-df");
if (r->rule_flag & PFRULE_RANDOMID)
printf(" random-id");
if (r->min_ttl)
printf(" min-ttl %d", r->min_ttl);
if (r->max_mss)
printf(" max-mss %d", r->max_mss);
if (r->rule_flag & PFRULE_SET_TOS)
printf(" set-tos 0x%2.2x", r->set_tos);
if (r->rule_flag & PFRULE_REASSEMBLE_TCP)
printf(" reassemble tcp");
/* The PFRULE_FRAGMENT_NOREASS is set on all rules by default! */
printf(" fragment %sreassemble",
r->rule_flag & PFRULE_FRAGMENT_NOREASS ? "no " : "");
} else if (r->scrub_flags & PFSTATE_SCRUBMASK || r->min_ttl || r->max_mss) {
/* Scrub actions on normal rules. */
printf(" scrub(");
if (r->scrub_flags & PFSTATE_NODF)
printf(" no-df");
if (r->scrub_flags & PFSTATE_RANDOMID)
printf(" random-id");
if (r->min_ttl)
printf(" min-ttl %d", r->min_ttl);
if (r->scrub_flags & PFSTATE_SETTOS)
printf(" set-tos 0x%2.2x", r->set_tos);
if (r->scrub_flags & PFSTATE_SCRUB_TCP)
printf(" reassemble tcp");
if (r->max_mss)
printf(" max-mss %d", r->max_mss);
printf(")");
}
i = 0;
while (r->label[i][0])

View file

@ -101,6 +101,7 @@ struct pfctl {
u_int32_t limit[PF_LIMIT_MAX];
u_int32_t debug;
u_int32_t hostid;
u_int32_t reassemble;
char *ifname;
bool keep_counters;
u_int8_t syncookies;
@ -112,6 +113,7 @@ struct pfctl {
u_int8_t debug_set;
u_int8_t hostid_set;
u_int8_t ifname_set;
u_int8_t reass_set;
};
struct node_if {
@ -285,6 +287,7 @@ void pfctl_move_pool(struct pfctl_pool *, struct pfctl_pool *);
void pfctl_clear_pool(struct pfctl_pool *);
int pfctl_set_timeout(struct pfctl *, const char *, int, int);
int pfctl_set_reassembly(struct pfctl *, int, int);
int pfctl_set_optimization(struct pfctl *, const char *);
int pfctl_set_limit(struct pfctl *, const char *, unsigned int);
int pfctl_set_logif(struct pfctl *, char *);

View file

@ -1489,6 +1489,15 @@ The packet is passed;
state is created unless the
.Ar no state
option is specified.
.It Ar match
Action is unaltered, the previously matched rule's action still matters.
Match rules apply queue and rtable assignments for every matched packet,
subsequent matching pass or match rules can overwrite the assignment,
if they don't specify a queue or an rtable, respectively, the previously
set value remains.
Additionally, match rules can contain log statements; the is logging done
for each and every matching match rule, so it is possible to log a single
packet multiple times.
.El
.Pp
By default
@ -3172,7 +3181,7 @@ schedulers = ( cbq-def | priq-def | hfsc-def )
bandwidth-spec = "number" ( "b" | "Kb" | "Mb" | "Gb" | "%" )
etheraction = "pass" | "block"
action = "pass" | "block" [ return ] | [ "no" ] "scrub"
action = "pass" | "match" | "block" [ return ] | [ "no" ] "scrub"
return = "drop" | "return" | "return-rst" [ "( ttl" number ")" ] |
"return-icmp" [ "(" icmpcode [ [ "," ] icmp6code ] ")" ] |
"return-icmp6" [ "(" icmp6code ")" ]

View file

@ -310,6 +310,7 @@ _Static_assert(sizeof(time_t) == 4 || sizeof(time_t) == 8, "unexpected time_t si
SYSCTL_DECL(_net_pf);
MALLOC_DECLARE(M_PFHASH);
MALLOC_DECLARE(M_PF_RULE_ITEM);
SDT_PROVIDER_DECLARE(pf);
@ -593,8 +594,13 @@ struct pf_kpool {
};
struct pf_rule_actions {
int rtableid;
uint16_t qid;
uint16_t pqid;
uint16_t max_mss;
uint8_t log;
uint8_t set_tos;
uint8_t min_ttl;
uint16_t dnpipe;
uint16_t dnrpipe; /* Reverse direction pipe */
uint32_t flags;
@ -811,10 +817,18 @@ struct pf_krule {
#endif
};
struct pf_krule_item {
SLIST_ENTRY(pf_krule_item) entry;
struct pf_krule *r;
};
SLIST_HEAD(pf_krule_slist, pf_krule_item);
struct pf_ksrc_node {
LIST_ENTRY(pf_ksrc_node) entry;
struct pf_addr addr;
struct pf_addr raddr;
struct pf_krule_slist match_rules;
union pf_krule_ptr rule;
struct pfi_kkif *kif;
counter_u64_t bytes[2];
@ -892,16 +906,6 @@ struct pf_state_cmp {
u_int8_t pad[3];
};
#define PFSTATE_ALLOWOPTS 0x01
#define PFSTATE_SLOPPY 0x02
/* was PFSTATE_PFLOW 0x04 */
#define PFSTATE_NOSYNC 0x08
#define PFSTATE_ACK 0x10
#define PFRULE_DN_IS_PIPE 0x40
#define PFRULE_DN_IS_QUEUE 0x80
#define PFSTATE_SETPRIO 0x0200
#define PFSTATE_SETMASK (PFSTATE_SETPRIO)
struct pf_state_scrub_export {
uint16_t pfss_flags;
uint8_t pfss_ttl; /* stashed TTL */
@ -952,12 +956,13 @@ struct pf_state_export {
uint8_t proto;
uint8_t direction;
uint8_t log;
uint8_t state_flags;
uint8_t state_flags_compat;
uint8_t timeout;
uint8_t sync_flags;
uint8_t updates;
uint16_t state_flags;
uint8_t spare[112];
uint8_t spare[110];
};
_Static_assert(sizeof(struct pf_state_export) == 384, "size incorrect");
@ -974,7 +979,7 @@ struct pf_kstate {
* end of the area
*/
u_int8_t state_flags;
u_int16_t state_flags;
u_int8_t timeout;
u_int8_t sync_state; /* PFSYNC_S_x */
u_int8_t sync_updates; /* XXX */
@ -985,6 +990,7 @@ struct pf_kstate {
LIST_ENTRY(pf_kstate) entry;
struct pf_state_peer src;
struct pf_state_peer dst;
struct pf_krule_slist match_rules;
union pf_krule_ptr rule;
union pf_krule_ptr anchor;
union pf_krule_ptr nat_rule;
@ -1000,18 +1006,22 @@ struct pf_kstate {
u_int32_t creation;
u_int32_t expire;
u_int32_t pfsync_time;
u_int16_t qid;
u_int16_t pqid;
u_int16_t qid;
u_int16_t pqid;
u_int16_t dnpipe;
u_int16_t dnrpipe;
u_int16_t tag;
u_int8_t log;
int rtableid;
u_int8_t min_ttl;
u_int8_t set_tos;
u_int16_t max_mss;
};
/*
* Size <= fits 13 objects per page on LP64. Try to not grow the struct beyond that.
* Size <= fits 12 objects per page on LP64. Try to not grow the struct beyond that.
*/
_Static_assert(sizeof(struct pf_kstate) <= 312, "pf_kstate size crosses 312 bytes");
_Static_assert(sizeof(struct pf_kstate) <= 336, "pf_kstate size crosses 336 bytes");
#endif
/*
@ -1061,9 +1071,9 @@ struct pfsync_state {
sa_family_t af;
u_int8_t proto;
u_int8_t direction;
u_int8_t __spare[2];
u_int16_t state_flags;
u_int8_t log;
u_int8_t state_flags;
u_int8_t state_flags_compat;
u_int8_t timeout;
u_int8_t sync_flags;
u_int8_t updates;
@ -1545,6 +1555,7 @@ struct pf_kstatus {
bool syncookies_active;
uint64_t syncookies_inflight[2];
uint32_t states_halfopen;
uint32_t reass;
};
#endif
@ -1897,6 +1908,7 @@ struct pfioc_iface {
#define DIOCGETETHRULES _IOWR('D', 99, struct pfioc_nv)
#define DIOCGETETHRULESETS _IOWR('D', 100, struct pfioc_nv)
#define DIOCGETETHRULESET _IOWR('D', 101, struct pfioc_nv)
#define DIOCSETREASS _IOWR('D', 102, u_int32_t)
struct pf_ifspeed_v0 {
char ifname[IFNAMSIZ];
@ -2249,12 +2261,12 @@ struct mbuf *pf_build_tcp(const struct pf_krule *, sa_family_t,
const struct pf_addr *, const struct pf_addr *,
u_int16_t, u_int16_t, u_int32_t, u_int32_t,
u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
u_int16_t);
u_int16_t, int);
void pf_send_tcp(const struct pf_krule *, sa_family_t,
const struct pf_addr *, const struct pf_addr *,
u_int16_t, u_int16_t, u_int32_t, u_int32_t,
u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
u_int16_t);
u_int16_t, int);
void pf_syncookies_init(void);
void pf_syncookies_cleanup(void);
@ -2368,6 +2380,16 @@ struct pf_state_key *pf_state_key_setup(struct pf_pdesc *, struct pf_addr *,
struct pf_addr *, u_int16_t, u_int16_t);
struct pf_state_key *pf_state_key_clone(struct pf_state_key *);
int pf_normalize_mss(struct mbuf *m, int off,
struct pf_pdesc *pd, u_int16_t maxmss);
u_int16_t pf_rule_to_scrub_flags(u_int32_t);
#ifdef INET
void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t);
#endif /* INET */
#ifdef INET6
void pf_scrub_ip6(struct mbuf **, uint32_t, uint8_t, uint8_t);
#endif /* INET6 */
struct pfi_kkif *pf_kkif_create(int);
void pf_kkif_free(struct pfi_kkif *);
void pf_kkif_zero(struct pfi_kkif *);

View file

@ -581,7 +581,30 @@ pfsync_state_import(struct pfsync_state *sp, int flags)
st->direction = sp->direction;
st->log = sp->log;
st->timeout = sp->timeout;
st->state_flags = sp->state_flags;
/* 8 from old peers, 16 bits from new peers */
st->state_flags = sp->state_flags_compat | ntohs(sp->state_flags);
if (r == &V_pf_default_rule) {
/* ToS and Prio are not sent over struct pfsync_state */
st->state_flags &= ~PFSTATE_SETMASK;
} else {
/* Most actions are applied form state, not from rule. Until
* pfsync can forward all those actions and their parameters we
* must relay on restoring them from the found rule.
* It's a copy of pf_rule_to_actions() */
st->qid = r->qid;
st->pqid = r->pqid;
st->rtableid = r->rtableid;
if (r->scrub_flags & PFSTATE_SETTOS)
st->set_tos = r->set_tos;
st->min_ttl = r->min_ttl;
st->max_mss = r->max_mss;
st->state_flags |= (r->scrub_flags & (PFSTATE_NODF|PFSTATE_RANDOMID|
PFSTATE_SETTOS|PFSTATE_SCRUB_TCP|PFSTATE_SETPRIO));
st->dnpipe = r->dnpipe;
st->dnrpipe = r->dnrpipe;
/* FIXME: dnflags are not part of state, can't update them */
}
st->id = sp->id;
st->creatorid = sp->creatorid;

View file

@ -268,7 +268,7 @@ static void pf_change_icmp(struct pf_addr *, u_int16_t *,
u_int16_t *, u_int16_t *, u_int16_t *,
u_int16_t *, u_int8_t, sa_family_t);
static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
sa_family_t, struct pf_krule *);
sa_family_t, struct pf_krule *, int);
static void pf_detach_state(struct pf_kstate *);
static int pf_state_key_attach(struct pf_state_key *,
struct pf_state_key *, struct pf_kstate *);
@ -294,7 +294,7 @@ static int pf_create_state(struct pf_krule *, struct pf_krule *,
struct pf_state_key *, struct mbuf *, int,
u_int16_t, u_int16_t, int *, struct pfi_kkif *,
struct pf_kstate **, int, u_int16_t, u_int16_t,
int);
int, struct pf_krule_slist *);
static int pf_test_fragment(struct pf_krule **, int,
struct pfi_kkif *, struct mbuf *, void *,
struct pf_pdesc *, struct pf_krule **,
@ -386,6 +386,7 @@ VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
#define STATE_INC_COUNTERS(s) \
do { \
struct pf_krule_item *mrm; \
counter_u64_add(s->rule.ptr->states_cur, 1); \
counter_u64_add(s->rule.ptr->states_tot, 1); \
if (s->anchor.ptr != NULL) { \
@ -396,18 +397,26 @@ VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
} \
SLIST_FOREACH(mrm, &s->match_rules, entry) { \
counter_u64_add(mrm->r->states_cur, 1); \
counter_u64_add(mrm->r->states_tot, 1); \
} \
} while (0)
#define STATE_DEC_COUNTERS(s) \
do { \
struct pf_krule_item *mrm; \
if (s->nat_rule.ptr != NULL) \
counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
if (s->anchor.ptr != NULL) \
counter_u64_add(s->anchor.ptr->states_cur, -1); \
counter_u64_add(s->rule.ptr->states_cur, -1); \
SLIST_FOREACH(mrm, &s->match_rules, entry) \
counter_u64_add(mrm->r->states_cur, -1); \
} while (0)
MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
MALLOC_DEFINE(M_PF_RULE_ITEM, "pf_krule_item", "pf(4) rule items");
VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
VNET_DEFINE(struct pf_idhash *, pf_idhash);
VNET_DEFINE(struct pf_srchash *, pf_srchash);
@ -2031,7 +2040,7 @@ pf_unlink_state(struct pf_kstate *s)
s->key[PF_SK_WIRE]->port[1],
s->key[PF_SK_WIRE]->port[0],
s->src.seqhi, s->src.seqlo + 1,
TH_RST|TH_ACK, 0, 0, 0, 1, s->tag);
TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, s->rtableid);
}
LIST_REMOVE(s, entry);
@ -2066,11 +2075,17 @@ pf_alloc_state(int flags)
void
pf_free_state(struct pf_kstate *cur)
{
struct pf_krule_item *ri;
KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
cur->timeout));
while ((ri = SLIST_FIRST(&cur->match_rules))) {
SLIST_REMOVE_HEAD(&cur->match_rules, entry);
free(ri, M_PF_RULE_ITEM);
}
pf_normalize_tcp_cleanup(cur);
uma_zfree(V_pf_state_z, cur);
pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
@ -2084,6 +2099,7 @@ pf_purge_expired_states(u_int i, int maxcheck)
{
struct pf_idhash *ih;
struct pf_kstate *s;
struct pf_krule_item *mrm;
V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
@ -2109,6 +2125,8 @@ pf_purge_expired_states(u_int i, int maxcheck)
if (s->anchor.ptr != NULL)
s->anchor.ptr->rule_ref |= PFRULE_REFS;
s->kif->pfik_flags |= PFI_IFLAG_REFS;
SLIST_FOREACH(mrm, &s->match_rules, entry)
mrm->r->rule_ref |= PFRULE_REFS;
if (s->rt_kif)
s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
}
@ -2772,7 +2790,7 @@ pf_build_tcp(const struct pf_krule *r, sa_family_t af,
const struct pf_addr *saddr, const struct pf_addr *daddr,
u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
u_int16_t rtag)
u_int16_t rtag, int rtableid)
{
struct mbuf *m;
int len, tlen;
@ -2824,8 +2842,8 @@ pf_build_tcp(const struct pf_krule *r, sa_family_t af,
m->m_flags |= M_SKIP_FIREWALL;
pf_mtag->tag = rtag;
if (r != NULL && r->rtableid >= 0)
M_SETFIB(m, r->rtableid);
if (rtableid >= 0)
M_SETFIB(m, rtableid);
#ifdef ALTQ
if (r != NULL && r->qid) {
@ -2923,13 +2941,13 @@ pf_send_tcp(const struct pf_krule *r, sa_family_t af,
const struct pf_addr *saddr, const struct pf_addr *daddr,
u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
u_int16_t rtag)
u_int16_t rtag, int rtableid)
{
struct pf_send_entry *pfse;
struct mbuf *m;
m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack, flags,
win, mss, ttl, tag, rtag);
win, mss, ttl, tag, rtag, rtableid);
if (m == NULL)
return;
@ -2961,7 +2979,7 @@ static void
pf_return(struct pf_krule *r, struct pf_krule *nr, struct pf_pdesc *pd,
struct pf_state_key *sk, int off, struct mbuf *m, struct tcphdr *th,
struct pfi_kkif *kif, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen,
u_short *reason)
u_short *reason, int rtableid)
{
struct pf_addr * const saddr = pd->src;
struct pf_addr * const daddr = pd->dst;
@ -3019,16 +3037,16 @@ pf_return(struct pf_krule *r, struct pf_krule *nr, struct pf_pdesc *pd,
pf_send_tcp(r, af, pd->dst,
pd->src, th->th_dport, th->th_sport,
ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
r->return_ttl, 1, 0);
r->return_ttl, 1, 0, rtableid);
}
} else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
r->return_icmp)
pf_send_icmp(m, r->return_icmp >> 8,
r->return_icmp & 255, af, r);
r->return_icmp & 255, af, r, rtableid);
else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
r->return_icmp6)
pf_send_icmp(m, r->return_icmp6 >> 8,
r->return_icmp6 & 255, af, r);
r->return_icmp6 & 255, af, r, rtableid);
}
static int
@ -3067,7 +3085,7 @@ pf_icmp_to_bandlim(uint8_t type)
static void
pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
struct pf_krule *r)
struct pf_krule *r, int rtableid)
{
struct pf_send_entry *pfse;
struct mbuf *m0;
@ -3104,8 +3122,8 @@ pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
/* XXX: revisit */
m0->m_flags |= M_SKIP_FIREWALL;
if (r->rtableid >= 0)
M_SETFIB(m0, r->rtableid);
if (rtableid >= 0)
M_SETFIB(m0, rtableid);
#ifdef ALTQ
if (r->qid) {
@ -3567,15 +3585,26 @@ pf_rule_to_actions(struct pf_krule *r, struct pf_rule_actions *a)
a->qid = r->qid;
if (r->pqid)
a->pqid = r->pqid;
if (r->rtableid >= 0)
a->rtableid = r->rtableid;
a->log |= r->log;
if (r->scrub_flags & PFSTATE_SETTOS)
a->set_tos = r->set_tos;
if (r->min_ttl)
a->min_ttl = r->min_ttl;
if (r->max_mss)
a->max_mss = r->max_mss;
a->flags |= (r->scrub_flags & (PFSTATE_NODF|PFSTATE_RANDOMID|
PFSTATE_SETTOS|PFSTATE_SCRUB_TCP|PFSTATE_SETPRIO));
if (r->dnpipe)
a->dnpipe = r->dnpipe;
if (r->dnrpipe)
a->dnrpipe = r->dnrpipe;
if (r->dnpipe || r->dnrpipe) {
if (r->free_flags & PFRULE_DN_IS_PIPE)
a->flags |= PFRULE_DN_IS_PIPE;
a->flags |= PFSTATE_DN_IS_PIPE;
else
a->flags &= ~PFRULE_DN_IS_PIPE;
a->flags &= ~PFSTATE_DN_IS_PIPE;
}
}
@ -4140,12 +4169,14 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, int direction,
sa_family_t af = pd->af;
struct pf_krule *r, *a = NULL;
struct pf_kruleset *ruleset = NULL;
struct pf_krule_slist match_rules;
struct pf_krule_item *ri;
struct pf_ksrc_node *nsn = NULL;
struct tcphdr *th = &pd->hdr.tcp;
struct pf_state_key *sk = NULL, *nk = NULL;
u_short reason;
int rewrite = 0, hdrlen = 0;
int tag = -1, rtableid = -1;
int tag = -1;
int asd = 0;
int match = 0;
int state_icmp = 0;
@ -4347,6 +4378,7 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, int direction,
pd->nat_rule = nr;
}
SLIST_INIT(&match_rules);
while (r != NULL) {
pf_counter_u64_add(&r->evaluations, 1);
if (pfi_kkif_match(r->kif, kif) == r->ifnot)
@ -4413,10 +4445,15 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, int direction,
else {
if (r->tag)
tag = r->tag;
if (r->rtableid >= 0)
rtableid = r->rtableid;
if (r->anchor == NULL) {
if (r->action == PF_MATCH) {
ri = malloc(sizeof(struct pf_krule_item), M_PF_RULE_ITEM, M_NOWAIT | M_ZERO);
if (ri == NULL) {
REASON_SET(&reason, PFRES_MEMORY);
goto cleanup;
}
ri->r = r;
SLIST_INSERT_HEAD(&match_rules, ri, entry);
pf_counter_u64_critical_enter();
pf_counter_u64_add_protected(&r->packets[direction == PF_OUT], 1);
pf_counter_u64_add_protected(&r->bytes[direction == PF_OUT], pd->tot_len);
@ -4465,7 +4502,7 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, int direction,
(r->rule_flag & PFRULE_RETURNICMP) ||
(r->rule_flag & PFRULE_RETURN))) {
pf_return(r, nr, pd, sk, off, m, th, kif, bproto_sum,
bip_sum, hdrlen, &reason);
bip_sum, hdrlen, &reason, r->rtableid);
}
if (r->action == PF_DROP)
@ -4475,20 +4512,21 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, int direction,
REASON_SET(&reason, PFRES_MEMORY);
goto cleanup;
}
if (rtableid >= 0)
M_SETFIB(m, rtableid);
if (pd->act.rtableid >= 0)
M_SETFIB(m, pd->act.rtableid);
if (!state_icmp && (r->keep_state || nr != NULL ||
(pd->flags & PFDESC_TCP_NORM))) {
int action;
action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
hdrlen);
hdrlen, &match_rules);
if (action != PF_PASS) {
if (action == PF_DROP &&
(r->rule_flag & PFRULE_RETURN))
pf_return(r, nr, pd, sk, off, m, th, kif,
bproto_sum, bip_sum, hdrlen, &reason);
bproto_sum, bip_sum, hdrlen, &reason,
pd->act.rtableid);
return (action);
}
} else {
@ -4516,6 +4554,11 @@ pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, int direction,
return (PF_PASS);
cleanup:
while ((ri = SLIST_FIRST(&match_rules))) {
SLIST_REMOVE_HEAD(&match_rules, entry);
free(ri, M_PF_RULE_ITEM);
}
if (sk != NULL)
uma_zfree(V_pf_state_key_z, sk);
if (nk != NULL)
@ -4528,7 +4571,8 @@ pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
struct pf_pdesc *pd, struct pf_ksrc_node *nsn, struct pf_state_key *nk,
struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
u_int16_t dport, int *rewrite, struct pfi_kkif *kif, struct pf_kstate **sm,
int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen,
struct pf_krule_slist *match_rules)
{
struct pf_kstate *s = NULL;
struct pf_ksrc_node *sn = NULL;
@ -4564,12 +4608,21 @@ pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
s->rule.ptr = r;
s->nat_rule.ptr = nr;
s->anchor.ptr = a;
bcopy(match_rules, &s->match_rules, sizeof(s->match_rules));
STATE_INC_COUNTERS(s);
if (r->allow_opts)
s->state_flags |= PFSTATE_ALLOWOPTS;
if (r->rule_flag & PFRULE_STATESLOPPY)
s->state_flags |= PFSTATE_SLOPPY;
s->log = r->log & PF_LOG_ALL;
if (pd->flags & PFDESC_TCP_NORM) /* Set by old-style scrub rules */
s->state_flags |= PFSTATE_SCRUB_TCP;
s->log = pd->act.log & PF_LOG_ALL;
s->qid = pd->act.qid;
s->pqid = pd->act.pqid;
s->rtableid = pd->act.rtableid;
s->min_ttl = pd->act.min_ttl;
s->set_tos = pd->act.set_tos;
s->max_mss = pd->act.max_mss;
s->sync_state = PFSYNC_S_NONE;
s->qid = pd->act.qid;
s->pqid = pd->act.pqid;
@ -4655,8 +4708,8 @@ pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
s->nat_src_node = nsn;
}
if (pd->proto == IPPROTO_TCP) {
if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
off, pd, th, &s->src, &s->dst)) {
if (s->state_flags & PFSTATE_SCRUB_TCP &&
pf_normalize_tcp_init(m, off, pd, th, &s->src, &s->dst)) {
REASON_SET(&reason, PFRES_MEMORY);
pf_src_tree_remove_state(s);
s->timeout = PFTM_UNLINKED;
@ -4664,7 +4717,7 @@ pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
pf_free_state(s);
return (PF_DROP);
}
if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
if (s->state_flags & PFSTATE_SCRUB_TCP && s->src.scrub &&
pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
&s->src, &s->dst, rewrite)) {
/* This really shouldn't happen!!! */
@ -4738,7 +4791,7 @@ pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
s->src.mss = mss;
pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0);
TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, pd->act.rtableid);
REASON_SET(&reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
}
@ -4789,6 +4842,8 @@ pf_test_fragment(struct pf_krule **rm, int direction, struct pfi_kkif *kif,
{
struct pf_krule *r, *a = NULL;
struct pf_kruleset *ruleset = NULL;
struct pf_krule_slist match_rules;
struct pf_krule_item *ri;
sa_family_t af = pd->af;
u_short reason;
int tag = -1;
@ -4799,6 +4854,7 @@ pf_test_fragment(struct pf_krule **rm, int direction, struct pfi_kkif *kif,
PF_RULES_RASSERT();
r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
SLIST_INIT(&match_rules);
while (r != NULL) {
pf_counter_u64_add(&r->evaluations, 1);
if (pfi_kkif_match(r->kif, kif) == r->ifnot)
@ -4841,6 +4897,13 @@ pf_test_fragment(struct pf_krule **rm, int direction, struct pfi_kkif *kif,
else {
if (r->anchor == NULL) {
if (r->action == PF_MATCH) {
ri = malloc(sizeof(struct pf_krule_item), M_PF_RULE_ITEM, M_NOWAIT | M_ZERO);
if (ri == NULL) {
REASON_SET(&reason, PFRES_MEMORY);
goto cleanup;
}
ri->r = r;
SLIST_INSERT_HEAD(&match_rules, ri, entry);
pf_counter_u64_critical_enter();
pf_counter_u64_add_protected(&r->packets[direction == PF_OUT], 1);
pf_counter_u64_add_protected(&r->bytes[direction == PF_OUT], pd->tot_len);
@ -4878,18 +4941,26 @@ pf_test_fragment(struct pf_krule **rm, int direction, struct pfi_kkif *kif,
pf_rule_to_actions(r, &pd->act);
if (r->log)
PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
1);
PFLOG_PACKET(kif, m, af, direction, reason, r, a,
ruleset, pd, 1);
if (r->action != PF_PASS)
return (PF_DROP);
if (tag > 0 && pf_tag_packet(m, pd, tag)) {
REASON_SET(&reason, PFRES_MEMORY);
return (PF_DROP);
goto cleanup;
}
return (PF_PASS);
cleanup:
while ((ri = SLIST_FIRST(&match_rules))) {
SLIST_REMOVE_HEAD(&match_rules, entry);
free(ri, M_PF_RULE_ITEM);
}
return (PF_DROP);
}
static int
@ -4932,7 +5003,7 @@ pf_tcp_track_full(struct pf_kstate **state, struct pfi_kkif *kif,
if (src->seqlo == 0) {
/* First packet from this end. Set its state */
if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
if (((*state)->state_flags & PFSTATE_SCRUB_TCP || dst->scrub) &&
src->scrub == NULL) {
if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
REASON_SET(reason, PFRES_MEMORY);
@ -5201,7 +5272,8 @@ pf_tcp_track_full(struct pf_kstate **state, struct pfi_kkif *kif,
pd->dst, pd->src, th->th_dport,
th->th_sport, ntohl(th->th_ack), 0,
TH_RST, 0, 0,
(*state)->rule.ptr->return_ttl, 1, 0);
(*state)->rule.ptr->return_ttl, 1, 0,
(*state)->rtableid);
src->seqlo = 0;
src->seqhi = 1;
src->max_win = 1;
@ -5337,7 +5409,8 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate **state, u_short *reason)
pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
pd->src, th->th_dport, th->th_sport,
(*state)->src.seqhi, ntohl(th->th_seq) + 1,
TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0);
TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0,
(*state)->rtableid);
REASON_SET(reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
} else if ((th->th_flags & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
@ -5368,7 +5441,8 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate **state, u_short *reason)
&sk->addr[pd->sidx], &sk->addr[pd->didx],
sk->port[pd->sidx], sk->port[pd->didx],
(*state)->dst.seqhi, 0, TH_SYN, 0,
(*state)->src.mss, 0, 0, (*state)->tag);
(*state)->src.mss, 0, 0, (*state)->tag,
(*state)->rtableid);
REASON_SET(reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
} else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
@ -5383,12 +5457,13 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate **state, u_short *reason)
pd->src, th->th_dport, th->th_sport,
ntohl(th->th_ack), ntohl(th->th_seq) + 1,
TH_ACK, (*state)->src.max_win, 0, 0, 0,
(*state)->tag);
(*state)->tag, (*state)->rtableid);
pf_send_tcp((*state)->rule.ptr, pd->af,
&sk->addr[pd->sidx], &sk->addr[pd->didx],
sk->port[pd->sidx], sk->port[pd->didx],
(*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0);
TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0,
(*state)->rtableid);
(*state)->src.seqdiff = (*state)->dst.seqhi -
(*state)->src.seqlo;
(*state)->dst.seqdiff = (*state)->src.seqhi -
@ -6888,7 +6963,7 @@ pf_pdesc_to_dnflow(int dir, const struct pf_pdesc *pd,
}
dnflow->rule.info |= IPFW_IS_DUMMYNET;
if (r->free_flags & PFRULE_DN_IS_PIPE || pd->act.flags & PFRULE_DN_IS_PIPE)
if (r->free_flags & PFRULE_DN_IS_PIPE || pd->act.flags & PFSTATE_DN_IS_PIPE)
dnflow->rule.info |= IPFW_IS_PIPE;
dnflow->f_id.proto = pd->proto;
@ -7118,7 +7193,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
if (off < (int)sizeof(struct ip)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_SHORT);
log = 1;
log = PF_LOG_FORCE;
goto done;
}
@ -7134,6 +7209,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
pd.af = AF_INET;
pd.tos = h->ip_tos & ~IPTOS_ECN_MASK;
pd.tot_len = ntohs(h->ip_len);
pd.act.rtableid = -1;
/* handle fragments that didn't get reassembled by normalization */
if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
@ -7146,7 +7222,8 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
case IPPROTO_TCP: {
if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp),
&action, &reason, AF_INET)) {
log = action != PF_PASS;
if (action != PF_PASS)
log = PF_LOG_FORCE;
goto done;
}
pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2);
@ -7219,13 +7296,19 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
&pd, &a, &ruleset, inp);
}
}
if (s) {
if (s->max_mss)
pf_normalize_mss(m, off, &pd, s->max_mss);
} else if (r->max_mss)
pf_normalize_mss(m, off, &pd, r->max_mss);
break;
}
case IPPROTO_UDP: {
if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp),
&action, &reason, AF_INET)) {
log = action != PF_PASS;
if (action != PF_PASS)
log = PF_LOG_FORCE;
goto done;
}
pd.sport = &pd.hdr.udp.uh_sport;
@ -7253,7 +7336,8 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
case IPPROTO_ICMP: {
if (!pf_pull_hdr(m, off, &pd.hdr.icmp, ICMP_MINLEN,
&action, &reason, AF_INET)) {
log = action != PF_PASS;
if (action != PF_PASS)
log = PF_LOG_FORCE;
goto done;
}
action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
@ -7299,17 +7383,37 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
!((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_IPOPTIONS);
log = r->log;
log = PF_LOG_FORCE;
DPFPRINTF(PF_DEBUG_MISC,
("pf: dropping packet with ip options\n"));
}
if (s) {
pf_scrub_ip(&m, s->state_flags, s->min_ttl, s->set_tos);
if (s->rtableid >= 0)
M_SETFIB(m, s->rtableid);
#ifdef ALTQ
if (s->qid) {
pd.act.pqid = s->pqid;
pd.act.qid = s->qid;
}
#endif
} else {
pf_scrub_ip(&m, r->scrub_flags, r->min_ttl, r->set_tos);
if (r->rtableid >= 0)
M_SETFIB(m, r->rtableid);
#ifdef ALTQ
if (r->qid) {
pd.act.pqid = r->pqid;
pd.act.qid = r->qid;
}
#endif
}
if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_MEMORY);
}
if (r->rtableid >= 0)
M_SETFIB(m, r->rtableid);
if (r->scrub_flags & PFSTATE_SETPRIO) {
if (pd.tos & IPTOS_LOWDELAY)
@ -7317,20 +7421,13 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
if (vlan_set_pcp(m, r->set_prio[pqid])) {
action = PF_DROP;
REASON_SET(&reason, PFRES_MEMORY);
log = 1;
log = PF_LOG_FORCE;
DPFPRINTF(PF_DEBUG_MISC,
("pf: failed to allocate 802.1q mtag\n"));
}
}
#ifdef ALTQ
if (s && s->qid) {
pd.act.pqid = s->pqid;
pd.act.qid = s->qid;
} else if (r->qid) {
pd.act.pqid = r->pqid;
pd.act.qid = r->qid;
}
if (action == PF_PASS && pd.act.qid) {
if (pd.pf_mtag == NULL &&
((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
@ -7379,7 +7476,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_MEMORY);
log = 1;
log = PF_LOG_FORCE;
DPFPRINTF(PF_DEBUG_MISC,
("pf: failed to allocate tag\n"));
} else {
@ -7396,22 +7493,31 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
/* XXX: ipfw has the same behaviour! */
action = PF_DROP;
REASON_SET(&reason, PFRES_MEMORY);
log = 1;
log = PF_LOG_FORCE;
DPFPRINTF(PF_DEBUG_MISC,
("pf: failed to allocate divert tag\n"));
}
}
if (log) {
struct pf_krule *lr;
struct pf_krule *lr;
struct pf_krule_item *ri;
if (s != NULL && s->nat_rule.ptr != NULL &&
s->nat_rule.ptr->log & PF_LOG_ALL)
lr = s->nat_rule.ptr;
else
lr = r;
PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
(s == NULL));
if (log & PF_LOG_FORCE || lr->log & PF_LOG_ALL)
PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a,
ruleset, &pd, (s == NULL));
if (s) {
SLIST_FOREACH(ri, &s->match_rules, entry)
if (ri->r->log & PF_LOG_ALL)
PFLOG_PACKET(kif, m, AF_INET, dir,
reason, ri->r, a, ruleset, &pd, 0);
}
}
pf_counter_u64_critical_enter();
@ -7431,6 +7537,8 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
pf_counter_u64_add_protected(&a->bytes[dirndx], pd.tot_len);
}
if (s != NULL) {
struct pf_krule_item *ri;
if (s->nat_rule.ptr != NULL) {
pf_counter_u64_add_protected(&s->nat_rule.ptr->packets[dirndx],
1);
@ -7452,6 +7560,10 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
dirndx = (dir == s->direction) ? 0 : 1;
s->packets[dirndx]++;
s->bytes[dirndx] += pd.tot_len;
SLIST_FOREACH(ri, &s->match_rules, entry) {
pf_counter_u64_add_protected(&ri->r->packets[dirndx], 1);
pf_counter_u64_add_protected(&ri->r->bytes[dirndx], pd.tot_len);
}
}
tr = r;
nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
@ -7613,6 +7725,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
pd.af = AF_INET6;
pd.tos = IPV6_DSCP(h);
pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
pd.act.rtableid = -1;
off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
pd.proto = h->ip6_nxt;
@ -7632,7 +7745,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
("pf: IPv6 more than one rthdr\n"));
action = PF_DROP;
REASON_SET(&reason, PFRES_IPOPTIONS);
log = 1;
log = PF_LOG_FORCE;
goto done;
}
if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
@ -7641,7 +7754,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
("pf: IPv6 short rthdr\n"));
action = PF_DROP;
REASON_SET(&reason, PFRES_SHORT);
log = 1;
log = PF_LOG_FORCE;
goto done;
}
if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
@ -7649,7 +7762,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
("pf: IPv6 rthdr0\n"));
action = PF_DROP;
REASON_SET(&reason, PFRES_IPOPTIONS);
log = 1;
log = PF_LOG_FORCE;
goto done;
}
/* FALLTHROUGH */
@ -7665,7 +7778,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
DPFPRINTF(PF_DEBUG_MISC,
("pf: IPv6 short opt\n"));
action = PF_DROP;
log = 1;
log = PF_LOG_FORCE;
goto done;
}
if (pd.proto == IPPROTO_AH)
@ -7690,7 +7803,8 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
case IPPROTO_TCP: {
if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp),
&action, &reason, AF_INET6)) {
log = action != PF_PASS;
if (action != PF_PASS)
log |= PF_LOG_FORCE;
goto done;
}
pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2);
@ -7710,13 +7824,19 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
} else if (s == NULL)
action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
&a, &ruleset, inp);
if (s) {
if (s->max_mss)
pf_normalize_mss(m, off, &pd, s->max_mss);
} else if (r->max_mss)
pf_normalize_mss(m, off, &pd, r->max_mss);
break;
}
case IPPROTO_UDP: {
if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp),
&action, &reason, AF_INET6)) {
log = action != PF_PASS;
if (action != PF_PASS)
log |= PF_LOG_FORCE;
goto done;
}
pd.sport = &pd.hdr.udp.uh_sport;
@ -7751,7 +7871,8 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
case IPPROTO_ICMPV6: {
if (!pf_pull_hdr(m, off, &pd.hdr.icmp6, sizeof(pd.hdr.icmp6),
&action, &reason, AF_INET6)) {
log = action != PF_PASS;
if (action != PF_PASS)
log |= PF_LOG_FORCE;
goto done;
}
action = pf_test_state_icmp(&s, dir, kif,
@ -7803,8 +7924,28 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
action = PF_DROP;
REASON_SET(&reason, PFRES_MEMORY);
}
if (r->rtableid >= 0)
M_SETFIB(m, r->rtableid);
if (s) {
pf_scrub_ip6(&m, s->state_flags, s->min_ttl, s->set_tos);
if (s->rtableid >= 0)
M_SETFIB(m, s->rtableid);
#ifdef ALTQ
if (s->qid) {
pd.act.pqid = s->pqid;
pd.act.qid = s->qid;
}
#endif
} else {
pf_scrub_ip6(&m, r->scrub_flags, r->min_ttl, r->set_tos);
if (r->rtableid >= 0)
M_SETFIB(m, r->rtableid);
#ifdef ALTQ
if (r->qid) {
pd.act.pqid = r->pqid;
pd.act.qid = r->qid;
}
#endif
}
if (r->scrub_flags & PFSTATE_SETPRIO) {
if (pd.tos & IPTOS_LOWDELAY)
@ -7812,20 +7953,13 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
if (vlan_set_pcp(m, r->set_prio[pqid])) {
action = PF_DROP;
REASON_SET(&reason, PFRES_MEMORY);
log = 1;
log = PF_LOG_FORCE;
DPFPRINTF(PF_DEBUG_MISC,
("pf: failed to allocate 802.1q mtag\n"));
}
}
#ifdef ALTQ
if (s && s->qid) {
pd.act.pqid = s->pqid;
pd.act.qid = s->qid;
} else if (r->qid) {
pd.act.pqid = r->pqid;
pd.act.qid = r->qid;
}
if (action == PF_PASS && pd.act.qid) {
if (pd.pf_mtag == NULL &&
((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
@ -7856,15 +7990,24 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
printf("pf: divert(9) is not supported for IPv6\n");
if (log) {
struct pf_krule *lr;
struct pf_krule *lr;
struct pf_krule_item *ri;
if (s != NULL && s->nat_rule.ptr != NULL &&
s->nat_rule.ptr->log & PF_LOG_ALL)
lr = s->nat_rule.ptr;
else
lr = r;
PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
&pd, (s == NULL));
if (log & PF_LOG_FORCE || lr->log & PF_LOG_ALL)
PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a,
ruleset, &pd, (s == NULL));
if (s) {
SLIST_FOREACH(ri, &s->match_rules, entry)
if (ri->r->log & PF_LOG_ALL)
PFLOG_PACKET(kif, m, AF_INET6, dir,
reason, ri->r, a, ruleset, &pd, 0);
}
}
pf_counter_u64_critical_enter();

View file

@ -113,6 +113,7 @@ enum { PF_ADDR_ADDRMASK, PF_ADDR_NOROUTE, PF_ADDR_DYNIFTL,
#define PF_LOG 0x01
#define PF_LOG_ALL 0x02
#define PF_LOG_SOCKET_LOOKUP 0x04
#define PF_LOG_FORCE 0x08
/* Reasons code for passing/dropping a packet */
#define PFRES_MATCH 0 /* Explicit match of a rule */
@ -264,6 +265,9 @@ struct pf_status {
uint8_t pf_chksum[PF_MD5_DIGEST_LENGTH];
};
#define PF_REASS_ENABLED 0x01
#define PF_REASS_NODF 0x02
struct pf_addr {
union {
struct in_addr v4;
@ -582,30 +586,49 @@ struct pf_rule {
uint64_t u_src_nodes;
};
/* rule flags */
#define PFRULE_DROP 0x0000
#define PFRULE_RETURNRST 0x0001
#define PFRULE_FRAGMENT 0x0002
#define PFRULE_RETURNICMP 0x0004
#define PFRULE_RETURN 0x0008
#define PFRULE_NOSYNC 0x0010
#define PFRULE_SRCTRACK 0x0020 /* track source states */
#define PFRULE_RULESRCTRACK 0x0040 /* per rule */
/* pf_krule->rule_flag and old-style scrub flags */
#define PFRULE_DROP 0x00000000
#define PFRULE_RETURNRST 0x00000001
#define PFRULE_FRAGMENT 0x00000002
#define PFRULE_RETURNICMP 0x00000004
#define PFRULE_RETURN 0x00000008
#define PFRULE_NOSYNC 0x00000010
#define PFRULE_SRCTRACK 0x00000020 /* track source states */
#define PFRULE_RULESRCTRACK 0x00000040 /* per rule */
#define PFRULE_NODF 0x00000100
#define PFRULE_FRAGMENT_NOREASS 0x00000200
#define PFRULE_RANDOMID 0x00000800
#define PFRULE_REASSEMBLE_TCP 0x00001000
#define PFRULE_SET_TOS 0x00002000
#define PFRULE_IFBOUND 0x00010000 /* if-bound */
#define PFRULE_STATESLOPPY 0x00020000 /* sloppy state tracking */
#ifdef _KERNEL
#define PFRULE_REFS 0x0080 /* rule has references */
#endif
/* scrub flags */
#define PFRULE_NODF 0x0100
#define PFRULE_FRAGMENT_NOREASS 0x0200
#define PFRULE_RANDOMID 0x0800
#define PFRULE_REASSEMBLE_TCP 0x1000
#define PFRULE_SET_TOS 0x2000
/* pf_rule_actions->dnflags */
#define PFRULE_DN_IS_PIPE 0x0040
#define PFRULE_DN_IS_QUEUE 0x0080
/* rule flags again */
#define PFRULE_IFBOUND 0x00010000 /* if-bound */
#define PFRULE_STATESLOPPY 0x00020000 /* sloppy state tracking */
/* pf_state->state_flags, pf_rule_actions->flags, pf_krule->scrub_flags */
#define PFSTATE_ALLOWOPTS 0x0001
#define PFSTATE_SLOPPY 0x0002
/* was PFSTATE_PFLOW 0x0004 */
#define PFSTATE_NOSYNC 0x0008
#define PFSTATE_ACK 0x0010
#define PFSTATE_NODF 0x0020
#define PFSTATE_SETTOS 0x0040
#define PFSTATE_RANDOMID 0x0080
#define PFSTATE_SCRUB_TCP 0x0100
#define PFSTATE_SETPRIO 0x0200
/* was PFSTATE_INP_UNLINKED 0x0400 */
/* FreeBSD-specific flags are added from the end to keep space for porting
* flags from OpenBSD */
#define PFSTATE_DN_IS_PIPE 0x4000
#define PFSTATE_DN_IS_QUEUE 0x8000
#define PFSTATE_SCRUBMASK (PFSTATE_NODF|PFSTATE_RANDOMID|PFSTATE_SCRUB_TCP)
#define PFSTATE_SETMASK (PFSTATE_SETTOS|PFSTATE_SETPRIO)
#define PFSTATE_HIWAT 100000 /* default state table size */
#define PFSTATE_ADAPT_START 60000 /* default adaptive timeout start */

View file

@ -384,6 +384,14 @@ pfattach_vnet(void)
my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
V_pf_status.debug = PF_DEBUG_URGENT;
/*
* XXX This is different than in OpenBSD where reassembly is enabled by
* defult. In FreeBSD we expect people to still use scrub rules and
* switch to the new syntax later. Only when they switch they must
* explicitly enable reassemle. We could change the default once the
* scrub rule functionality is hopefully removed some day in future.
*/
V_pf_status.reass = 0;
V_pf_pfil_hooked = false;
V_pf_pfil_eth_hooked = false;
@ -1305,6 +1313,9 @@ pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
PF_MD5_UPD(rule, allow_opts);
PF_MD5_UPD(rule, rt);
PF_MD5_UPD(rule, tos);
PF_MD5_UPD(rule, scrub_flags);
PF_MD5_UPD(rule, min_ttl);
PF_MD5_UPD(rule, set_tos);
if (rule->anchor != NULL)
PF_MD5_UPD_STR(rule, anchor->path);
}
@ -5625,6 +5636,17 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
break;
}
case DIOCSETREASS: {
u_int32_t *reass = (u_int32_t *)addr;
V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
/* Removal of DF flag without reassembly enabled is not a
* valid combination. Disable reassembly in such case. */
if (!(V_pf_status.reass & PF_REASS_ENABLED))
V_pf_status.reass = 0;
break;
}
default:
error = ENODEV;
break;
@ -5669,7 +5691,8 @@ pfsync_state_export(struct pfsync_state *sp, struct pf_kstate *st)
sp->direction = st->direction;
sp->log = st->log;
sp->timeout = st->timeout;
sp->state_flags = st->state_flags;
sp->state_flags_compat = st->state_flags;
sp->state_flags = htons(st->state_flags);
if (st->src_node)
sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
if (st->nat_src_node)
@ -5733,6 +5756,8 @@ pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
sp->direction = st->direction;
sp->log = st->log;
sp->timeout = st->timeout;
/* 8 bits for old peers, 16 bits for new peers */
sp->state_flags_compat = st->state_flags;
sp->state_flags = st->state_flags;
if (st->src_node)
sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
@ -5828,6 +5853,7 @@ pf_getstatus(struct pfioc_nv *nv)
nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
nvlist_add_number(nvl, "states", V_pf_status.states);
nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
nvlist_add_number(nvl, "reass", V_pf_status.reass);
nvlist_add_bool(nvl, "syncookies_active",
V_pf_status.syncookies_active);

View file

@ -135,8 +135,7 @@ static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
static void pf_flush_fragments(void);
static void pf_free_fragment(struct pf_fragment *);
static void pf_remove_fragment(struct pf_fragment *);
static int pf_normalize_tcpopt(struct pf_krule *, struct mbuf *,
struct tcphdr *, int, sa_family_t);
static struct pf_frent *pf_create_fragment(u_short *);
static int pf_frent_holes(struct pf_frent *frent);
static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
@ -152,13 +151,11 @@ static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
struct pf_frent *, u_short *);
static struct mbuf *pf_join_fragment(struct pf_fragment *);
#ifdef INET
static void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t);
static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
#endif /* INET */
#ifdef INET6
static int pf_reassemble6(struct mbuf **, struct ip6_hdr *,
struct ip6_frag *, uint16_t, uint16_t, u_short *);
static void pf_scrub_ip6(struct mbuf **, uint32_t, uint8_t, uint8_t);
#endif /* INET6 */
#define DPFPRINTF(x) do { \
@ -1046,10 +1043,14 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kkif *kif, u_short *reason
int ip_len;
int tag = -1;
int verdict;
int srs;
PF_RULES_RASSERT();
r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
/* Check if there any scrub rules. Lack of scrub rules means enforced
* packet normalization operation just like in OpenBSD. */
srs = (r != NULL);
while (r != NULL) {
pf_counter_u64_add(&r->evaluations, 1);
if (pfi_kkif_match(r->kif, kif) == r->ifnot)
@ -1075,13 +1076,23 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kkif *kif, u_short *reason
break;
}
if (r == NULL || r->action == PF_NOSCRUB)
return (PF_PASS);
if (srs) {
/* With scrub rules present IPv4 normalization happens only
* if one of rules has matched and it's not a "no scrub" rule */
if (r == NULL || r->action == PF_NOSCRUB)
return (PF_PASS);
pf_counter_u64_critical_enter();
pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1);
pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len);
pf_counter_u64_critical_exit();
pf_counter_u64_critical_enter();
pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1);
pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len);
pf_counter_u64_critical_exit();
} else if ((!V_pf_status.reass && (h->ip_off & htons(IP_MF | IP_OFFMASK)))) {
/* With no scrub rules IPv4 fragment reassembly depends on the
* global switch. Fragments can be dropped early if reassembly
* is disabled. */
REASON_SET(reason, PFRES_NORM);
goto drop;
}
/* Check for illegal packets */
if (hlen < (int)sizeof(struct ip)) {
@ -1094,8 +1105,10 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kkif *kif, u_short *reason
goto drop;
}
/* Clear IP_DF if the rule uses the no-df option */
if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
/* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */
if ((((r && r->rule_flag & PFRULE_NODF) ||
(V_pf_status.reass & PF_REASS_NODF)) && h->ip_off & htons(IP_DF)
)) {
u_int16_t ip_off = h->ip_off;
h->ip_off &= htons(~IP_DF);
@ -1129,7 +1142,7 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kkif *kif, u_short *reason
goto bad;
}
if (! (r->rule_flag & PFRULE_FRAGMENT_NOREASS)) {
if (r==NULL || !(r->rule_flag & PFRULE_FRAGMENT_NOREASS)) {
max = fragoff + ip_len;
/* Fully buffer all of the fragments
@ -1157,8 +1170,10 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kkif *kif, u_short *reason
h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
}
}
pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos);
if (r != NULL) {
int scrub_flags = pf_rule_to_scrub_flags(r->rule_flag);
pf_scrub_ip(&m, scrub_flags, r->min_ttl, r->set_tos);
}
return (PF_PASS);
@ -1192,10 +1207,14 @@ pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kkif *kif,
int ooff;
u_int8_t proto;
int terminal;
int srs;
PF_RULES_RASSERT();
r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
/* Check if there any scrub rules. Lack of scrub rules means enforced
* packet normalization operation just like in OpenBSD. */
srs = (r != NULL);
while (r != NULL) {
pf_counter_u64_add(&r->evaluations, 1);
if (pfi_kkif_match(r->kif, kif) == r->ifnot)
@ -1220,13 +1239,17 @@ pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kkif *kif,
break;
}
if (r == NULL || r->action == PF_NOSCRUB)
return (PF_PASS);
if (srs) {
/* With scrub rules present IPv6 normalization happens only
* if one of rules has matched and it's not a "no scrub" rule */
if (r == NULL || r->action == PF_NOSCRUB)
return (PF_PASS);
pf_counter_u64_critical_enter();
pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1);
pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len);
pf_counter_u64_critical_exit();
pf_counter_u64_critical_enter();
pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1);
pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len);
pf_counter_u64_critical_exit();
}
/* Check for illegal packets */
if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
@ -1297,7 +1320,10 @@ pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kkif *kif,
if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
goto shortpkt;
pf_scrub_ip6(&m, r->rule_flag, r->min_ttl, r->set_tos);
if (r != NULL) {
int scrub_flags = pf_rule_to_scrub_flags(r->rule_flag);
pf_scrub_ip6(&m, scrub_flags, r->min_ttl, r->set_tos);
}
return (PF_PASS);
@ -1347,10 +1373,14 @@ pf_normalize_tcp(int dir, struct pfi_kkif *kif, struct mbuf *m, int ipoff,
u_short reason;
u_int8_t flags;
sa_family_t af = pd->af;
int srs;
PF_RULES_RASSERT();
r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
/* Check if there any scrub rules. Lack of scrub rules means enforced
* packet normalization operation just like in OpenBSD. */
srs = (r != NULL);
while (r != NULL) {
pf_counter_u64_add(&r->evaluations, 1);
if (pfi_kkif_match(r->kif, kif) == r->ifnot)
@ -1383,15 +1413,19 @@ pf_normalize_tcp(int dir, struct pfi_kkif *kif, struct mbuf *m, int ipoff,
}
}
if (rm == NULL || rm->action == PF_NOSCRUB)
return (PF_PASS);
if (srs) {
/* With scrub rules present TCP normalization happens only
* if one of rules has matched and it's not a "no scrub" rule */
if (rm == NULL || rm->action == PF_NOSCRUB)
return (PF_PASS);
pf_counter_u64_critical_enter();
pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1);
pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len);
pf_counter_u64_critical_exit();
pf_counter_u64_critical_enter();
pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1);
pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len);
pf_counter_u64_critical_exit();
}
if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP)
pd->flags |= PFDESC_TCP_NORM;
flags = th->th_flags;
@ -1439,9 +1473,10 @@ pf_normalize_tcp(int dir, struct pfi_kkif *kif, struct mbuf *m, int ipoff,
rewrite = 1;
}
/* Process options */
if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af))
rewrite = 1;
/* Set MSS for old-style scrub rules.
* The function performs its own copyback. */
if (rm != NULL && rm->max_mss)
pf_normalize_mss(m, off, pd, rm->max_mss);
/* copy back packet headers if we sanitized */
if (rewrite)
@ -1944,14 +1979,13 @@ pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
return (0);
}
static int
pf_normalize_tcpopt(struct pf_krule *r, struct mbuf *m, struct tcphdr *th,
int off, sa_family_t af)
int
pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd, u_int16_t maxmss)
{
struct tcphdr *th = &pd->hdr.tcp;
u_int16_t *mss;
int thoff;
int opt, cnt, optlen = 0;
int rewrite = 0;
u_char opts[TCP_MAXOLEN];
u_char *optp = opts;
size_t startoff;
@ -1960,8 +1994,8 @@ pf_normalize_tcpopt(struct pf_krule *r, struct mbuf *m, struct tcphdr *th,
cnt = thoff - sizeof(struct tcphdr);
if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
NULL, NULL, af))
return (rewrite);
NULL, NULL, pd->af))
return (0);
for (; cnt > 0; cnt -= optlen, optp += optlen) {
startoff = optp - opts;
@ -1980,13 +2014,15 @@ pf_normalize_tcpopt(struct pf_krule *r, struct mbuf *m, struct tcphdr *th,
switch (opt) {
case TCPOPT_MAXSEG:
mss = (u_int16_t *)(optp + 2);
if ((ntohs(*mss)) > r->max_mss) {
if ((ntohs(*mss)) > maxmss) {
pf_patch_16_unaligned(m,
&th->th_sum,
mss, htons(r->max_mss),
mss, htons(maxmss),
PF_ALGNMNT(startoff),
0);
rewrite = 1;
m_copyback(m, off + sizeof(*th),
thoff - sizeof(*th), opts);
m_copyback(m, off, sizeof(*th), (caddr_t)th);
}
break;
default:
@ -1994,21 +2030,37 @@ pf_normalize_tcpopt(struct pf_krule *r, struct mbuf *m, struct tcphdr *th,
}
}
if (rewrite)
m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts);
return (0);
}
return (rewrite);
u_int16_t
pf_rule_to_scrub_flags(u_int32_t rule_flags)
{
/*
* Translate pf_krule->rule_flag to pf_krule->scrub_flags.
* The pf_scrub_ip functions have been adapted to the new style of pass
* rules but they might get called if old scrub rules are used.
*/
int scrub_flags = 0;
if (rule_flags & PFRULE_SET_TOS) {
scrub_flags |= PFSTATE_SETTOS;
}
if (rule_flags & PFRULE_RANDOMID)
scrub_flags |= PFSTATE_RANDOMID;
return scrub_flags;
}
#ifdef INET
static void
void
pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
{
struct mbuf *m = *m0;
struct ip *h = mtod(m, struct ip *);
/* Clear IP_DF if no-df was requested */
if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
if (flags & PFSTATE_NODF && h->ip_off & htons(IP_DF)) {
u_int16_t ip_off = h->ip_off;
h->ip_off &= htons(~IP_DF);
@ -2024,7 +2076,7 @@ pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
}
/* Enforce tos */
if (flags & PFRULE_SET_TOS) {
if (flags & PFSTATE_SETTOS) {
u_int16_t ov, nv;
ov = *(u_int16_t *)h;
@ -2035,7 +2087,7 @@ pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
}
/* random-id, but not for fragments */
if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
if (flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
uint16_t ip_id = h->ip_id;
ip_fillid(h);
@ -2045,7 +2097,7 @@ pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
#endif /* INET */
#ifdef INET6
static void
void
pf_scrub_ip6(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
{
struct mbuf *m = *m0;
@ -2056,7 +2108,7 @@ pf_scrub_ip6(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
h->ip6_hlim = min_ttl;
/* Enforce tos. Set traffic class bits */
if (flags & PFRULE_SET_TOS) {
if (flags & PFSTATE_SETTOS) {
h->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK;
h->ip6_flow |= htonl((tos | IPV6_ECN(h)) << 20);
}

View file

@ -300,7 +300,7 @@ pf_syncookie_send(struct mbuf *m, int off, struct pf_pdesc *pd)
iss = pf_syncookie_generate(m, off, pd, mss);
pf_send_tcp(NULL, pd->af, pd->dst, pd->src, *pd->dport, *pd->sport,
iss, ntohl(pd->hdr.tcp.th_seq) + 1, TH_SYN|TH_ACK, 0, mss,
0, 1, 0);
0, 1, 0, pd->act.rtableid);
counter_u64_add(V_pf_status.lcounters[KLCNT_SYNCOOKIES_SENT], 1);
/* XXX Maybe only in adaptive mode? */
atomic_add_64(&V_pf_status.syncookies_inflight[V_pf_syncookie_status.oddeven],
@ -519,5 +519,5 @@ pf_syncookie_recreate_syn(uint8_t ttl, int off, struct pf_pdesc *pd)
return (pf_build_tcp(NULL, pd->af, pd->src, pd->dst, *pd->sport,
*pd->dport, seq, 0, TH_SYN, wscale, mss, ttl, 0,
PF_TAG_SYNCOOKIE_RECREATED));
PF_TAG_SYNCOOKIE_RECREATED, pd->act.rtableid));
}