pf: Simplify rule actions logic

Actions applied to a processed packet come in case of stateless
firewalling from a rule or in case of statefull firewalling from a
state. The state obtains the actions from a rule when it is created by a
rule or by pfsync. The logic for deciding if actions come from a rule or
a state is spread across many places in pf.

There already is struct pf_rule_actions in struct pf_pdesc and thus it
can be used as a central place for storing actions and their parameters.
OpenBSD does something similar: they also store the actions in struct
pf_pdesc and have no variables in pf_test() but they use separate
variables instead of a structure. By using struct pf_rule_actions we can
simplify the code even further. Applying of actions is done *only* in
pf_rule_to_actions() no matter if for the legacy scrub rules or for the
normal match / pass rules. The logic of choosing if rule or state
actions are used is applied only once in pf_test() by copying the whole
struct.

Reviewed by:	kp
Sponsored by:	InnoGames GmbH
Differential Revision:	https://reviews.freebsd.org/D41009
This commit is contained in:
Kajetan Staszkiewicz 2023-07-13 07:08:24 +02:00 committed by Kristof Provost
parent 9d843ba324
commit 6b4ed16d74
5 changed files with 139 additions and 265 deletions

View file

@ -1061,18 +1061,9 @@ struct pf_kstate {
u_int32_t creation;
u_int32_t expire;
u_int32_t pfsync_time;
u_int16_t qid;
u_int16_t pqid;
u_int16_t dnpipe;
u_int16_t dnrpipe;
struct pf_rule_actions act;
u_int16_t tag;
u_int8_t log;
int32_t rtableid;
u_int8_t min_ttl;
u_int8_t set_tos;
u_int16_t max_mss;
u_int8_t rt;
u_int8_t set_prio[2];
};
/*
@ -2480,15 +2471,15 @@ struct pf_krule *pf_get_translation(struct pf_pdesc *, struct mbuf *,
struct pf_state_key *pf_state_key_setup(struct pf_pdesc *, struct pf_addr *,
struct pf_addr *, u_int16_t, u_int16_t);
struct pf_state_key *pf_state_key_clone(struct pf_state_key *);
void pf_rule_to_actions(struct pf_krule *,
struct pf_rule_actions *);
int pf_normalize_mss(struct mbuf *m, int off,
struct pf_pdesc *pd, u_int16_t maxmss);
u_int16_t pf_rule_to_scrub_flags(u_int32_t);
struct pf_pdesc *pd);
#ifdef INET
void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t);
void pf_scrub_ip(struct mbuf **, struct pf_pdesc *);
#endif /* INET */
#ifdef INET6
void pf_scrub_ip6(struct mbuf **, uint32_t, uint8_t, uint8_t);
void pf_scrub_ip6(struct mbuf **, struct pf_pdesc *);
#endif /* INET6 */
struct pfi_kkif *pf_kkif_create(int);

View file

@ -618,7 +618,7 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
}
st->direction = sp->pfs_1301.direction;
st->log = sp->pfs_1301.log;
st->act.log = sp->pfs_1301.log;
st->timeout = sp->pfs_1301.timeout;
switch (msg_version) {
@ -639,13 +639,13 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
* from multiple "match" rules as only rule
* creating the state is send over pfsync.
*/
st->qid = r->qid;
st->pqid = r->pqid;
st->rtableid = r->rtableid;
st->act.qid = r->qid;
st->act.pqid = r->pqid;
st->act.rtableid = r->rtableid;
if (r->scrub_flags & PFSTATE_SETTOS)
st->set_tos = r->set_tos;
st->min_ttl = r->min_ttl;
st->max_mss = r->max_mss;
st->act.set_tos = r->set_tos;
st->act.min_ttl = r->min_ttl;
st->act.max_mss = r->max_mss;
st->state_flags |= (r->scrub_flags &
(PFSTATE_NODF|PFSTATE_RANDOMID|
PFSTATE_SETTOS|PFSTATE_SCRUB_TCP|
@ -656,22 +656,22 @@ pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
else
st->state_flags &= ~PFSTATE_DN_IS_PIPE;
}
st->dnpipe = r->dnpipe;
st->dnrpipe = r->dnrpipe;
st->act.dnpipe = r->dnpipe;
st->act.dnrpipe = r->dnrpipe;
}
break;
case PFSYNC_MSG_VERSION_1400:
st->state_flags = ntohs(sp->pfs_1400.state_flags);
st->qid = ntohs(sp->pfs_1400.qid);
st->pqid = ntohs(sp->pfs_1400.pqid);
st->dnpipe = ntohs(sp->pfs_1400.dnpipe);
st->dnrpipe = ntohs(sp->pfs_1400.dnrpipe);
st->rtableid = ntohl(sp->pfs_1400.rtableid);
st->min_ttl = sp->pfs_1400.min_ttl;
st->set_tos = sp->pfs_1400.set_tos;
st->max_mss = ntohs(sp->pfs_1400.max_mss);
st->set_prio[0] = sp->pfs_1400.set_prio[0];
st->set_prio[1] = sp->pfs_1400.set_prio[1];
st->act.qid = ntohs(sp->pfs_1400.qid);
st->act.pqid = ntohs(sp->pfs_1400.pqid);
st->act.dnpipe = ntohs(sp->pfs_1400.dnpipe);
st->act.dnrpipe = ntohs(sp->pfs_1400.dnrpipe);
st->act.rtableid = ntohl(sp->pfs_1400.rtableid);
st->act.min_ttl = sp->pfs_1400.min_ttl;
st->act.set_tos = sp->pfs_1400.set_tos;
st->act.max_mss = ntohs(sp->pfs_1400.max_mss);
st->act.set_prio[0] = sp->pfs_1400.set_prio[0];
st->act.set_prio[1] = sp->pfs_1400.set_prio[1];
st->rt = sp->pfs_1400.rt;
if (st->rt && (st->rt_kif = pfi_kkif_find(sp->pfs_1400.rt_ifname)) == NULL) {
if (V_pf_status.debug >= PF_DEBUG_MISC)

View file

@ -275,8 +275,6 @@ static int pf_state_key_attach(struct pf_state_key *,
static void pf_state_key_detach(struct pf_kstate *, int);
static int pf_state_key_ctor(void *, int, void *, int);
static u_int32_t pf_tcp_iss(struct pf_pdesc *);
void pf_rule_to_actions(struct pf_krule *,
struct pf_rule_actions *);
static int pf_dummynet(struct pf_pdesc *, struct pf_kstate *,
struct pf_krule *, struct mbuf **);
static int pf_dummynet_route(struct pf_pdesc *,
@ -2048,7 +2046,7 @@ pf_unlink_state(struct pf_kstate *s)
s->key[PF_SK_WIRE]->port[1],
s->key[PF_SK_WIRE]->port[0],
s->src.seqhi, s->src.seqlo + 1,
TH_RST|TH_ACK, 0, 0, 0, true, s->tag, 0, s->rtableid);
TH_RST|TH_ACK, 0, 0, 0, true, s->tag, 0, s->act.rtableid);
}
LIST_REMOVE(s, entry);
@ -3590,8 +3588,22 @@ pf_addr_inc(struct pf_addr *addr, sa_family_t af)
void
pf_rule_to_actions(struct pf_krule *r, struct pf_rule_actions *a)
{
/*
* Modern rules use the same flags in rules as they do in states.
*/
a->flags |= (r->scrub_flags & (PFSTATE_NODF|PFSTATE_RANDOMID|
PFSTATE_SETTOS|PFSTATE_SCRUB_TCP|PFSTATE_SETPRIO));
PFSTATE_SCRUB_TCP|PFSTATE_SETPRIO));
/*
* Old-style scrub rules have different flags which need to be translated.
*/
if (r->rule_flag & PFRULE_RANDOMID)
a->flags |= PFSTATE_RANDOMID;
if (r->scrub_flags & PFSTATE_SETTOS || r->rule_flag & PFRULE_SET_TOS ) {
a->flags |= PFSTATE_SETTOS;
a->set_tos = r->set_tos;
}
if (r->qid)
a->qid = r->qid;
if (r->pqid)
@ -3599,8 +3611,6 @@ pf_rule_to_actions(struct pf_krule *r, struct pf_rule_actions *a)
if (r->rtableid >= 0)
a->rtableid = r->rtableid;
a->log |= r->log;
if (a->flags & PFSTATE_SETTOS)
a->set_tos = r->set_tos;
if (r->min_ttl)
a->min_ttl = r->min_ttl;
if (r->max_mss)
@ -3615,7 +3625,7 @@ pf_rule_to_actions(struct pf_krule *r, struct pf_rule_actions *a)
else
a->flags &= ~PFSTATE_DN_IS_PIPE;
}
if (a->flags & PFSTATE_SETPRIO) {
if (r->scrub_flags & PFSTATE_SETPRIO) {
a->set_prio[0] = r->set_prio[0];
a->set_prio[1] = r->set_prio[1];
}
@ -4620,6 +4630,8 @@ pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
s->nat_rule.ptr = nr;
s->anchor.ptr = a;
bcopy(match_rules, &s->match_rules, sizeof(s->match_rules));
memcpy(&s->act, &pd->act, sizeof(struct pf_rule_actions));
STATE_INC_COUNTERS(s);
if (r->allow_opts)
s->state_flags |= PFSTATE_ALLOWOPTS;
@ -4627,23 +4639,13 @@ pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
s->state_flags |= PFSTATE_SLOPPY;
if (pd->flags & PFDESC_TCP_NORM) /* Set by old-style scrub rules */
s->state_flags |= PFSTATE_SCRUB_TCP;
s->log = pd->act.log & PF_LOG_ALL;
s->qid = pd->act.qid;
s->pqid = pd->act.pqid;
s->rtableid = pd->act.rtableid;
s->min_ttl = pd->act.min_ttl;
s->set_tos = pd->act.set_tos;
s->max_mss = pd->act.max_mss;
s->act.log = pd->act.log & PF_LOG_ALL;
s->sync_state = PFSYNC_S_NONE;
s->qid = pd->act.qid;
s->pqid = pd->act.pqid;
s->dnpipe = pd->act.dnpipe;
s->dnrpipe = pd->act.dnrpipe;
s->set_prio[0] = pd->act.set_prio[0];
s->set_prio[1] = pd->act.set_prio[1];
s->state_flags |= pd->act.flags;
s->state_flags |= pd->act.flags; /* Only needed for pfsync and state export */
if (nr != NULL)
s->log |= nr->log & PF_LOG_ALL;
s->act.log |= nr->log & PF_LOG_ALL;
switch (pd->proto) {
case IPPROTO_TCP:
s->src.seqlo = ntohl(th->th_seq);
@ -5280,7 +5282,7 @@ pf_tcp_track_full(struct pf_kstate **state, struct pfi_kkif *kif,
th->th_sport, ntohl(th->th_ack), 0,
TH_RST, 0, 0,
(*state)->rule.ptr->return_ttl, true, 0, 0,
(*state)->rtableid);
(*state)->act.rtableid);
src->seqlo = 0;
src->seqhi = 1;
src->max_win = 1;
@ -5417,7 +5419,7 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate **state, u_short *reason)
pd->src, th->th_dport, th->th_sport,
(*state)->src.seqhi, ntohl(th->th_seq) + 1,
TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, true, 0, 0,
(*state)->rtableid);
(*state)->act.rtableid);
REASON_SET(reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
} else if ((th->th_flags & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
@ -5449,7 +5451,7 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate **state, u_short *reason)
sk->port[pd->sidx], sk->port[pd->didx],
(*state)->dst.seqhi, 0, TH_SYN, 0,
(*state)->src.mss, 0, false, (*state)->tag, 0,
(*state)->rtableid);
(*state)->act.rtableid);
REASON_SET(reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
} else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
@ -5464,13 +5466,13 @@ pf_synproxy(struct pf_pdesc *pd, struct pf_kstate **state, u_short *reason)
pd->src, th->th_dport, th->th_sport,
ntohl(th->th_ack), ntohl(th->th_seq) + 1,
TH_ACK, (*state)->src.max_win, 0, 0, false,
(*state)->tag, 0, (*state)->rtableid);
(*state)->tag, 0, (*state)->act.rtableid);
pf_send_tcp((*state)->rule.ptr, pd->af,
&sk->addr[pd->sidx], &sk->addr[pd->didx],
sk->port[pd->sidx], sk->port[pd->didx],
(*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
TH_ACK, (*state)->dst.max_win, 0, 0, true, 0, 0,
(*state)->rtableid);
(*state)->act.rtableid);
(*state)->src.seqdiff = (*state)->dst.seqhi -
(*state)->src.seqlo;
(*state)->dst.seqdiff = (*state)->src.seqhi -
@ -7061,15 +7063,6 @@ pf_dummynet_route(struct pf_pdesc *pd, struct pf_kstate *s,
{
NET_EPOCH_ASSERT();
if (s && (s->dnpipe || s->dnrpipe)) {
pd->act.dnpipe = s->dnpipe;
pd->act.dnrpipe = s->dnrpipe;
pd->act.flags = s->state_flags;
} else if (r->dnpipe || r->dnrpipe) {
pd->act.dnpipe = r->dnpipe;
pd->act.dnrpipe = r->dnrpipe;
pd->act.flags = r->free_flags;
}
if (pd->act.dnpipe || pd->act.dnrpipe) {
struct ip_fw_args dnflow;
if (ip_dn_io_ptr == NULL) {
@ -7120,7 +7113,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
struct inpcb *inp, struct pf_rule_actions *default_actions)
{
struct pfi_kkif *kif;
u_short action, reason = 0, log = 0;
u_short action, reason = 0;
struct mbuf *m = *m0;
struct ip *h = NULL;
struct m_tag *ipfwtag;
@ -7128,18 +7121,9 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
struct pf_kstate *s = NULL;
struct pf_kruleset *ruleset = NULL;
struct pf_pdesc pd;
int off, dirndx;
uint16_t scrub_flags;
#ifdef ALTQ
uint16_t qid;
#endif
uint16_t pqid;
int off, dirndx, use_2nd_queue = 0;
uint16_t tag;
int32_t rtableid;
uint8_t min_ttl;
uint8_t set_tos;
uint8_t rt;
uint8_t set_prio[2];
PF_RULES_RLOCK_TRACKER;
KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir));
@ -7245,7 +7229,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
if (off < (int)sizeof(struct ip)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_SHORT);
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
goto done;
}
@ -7267,7 +7251,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp),
&action, &reason, AF_INET)) {
if (action != PF_PASS)
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
goto done;
}
pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2);
@ -7284,7 +7268,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
}
if ((pd.hdr.tcp.th_flags & TH_ACK) && pd.p_len == 0)
pqid = 1;
use_2nd_queue = 1;
action = pf_normalize_tcp(kif, m, 0, off, h, &pd);
if (action == PF_DROP)
goto done;
@ -7294,7 +7278,6 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
V_pfsync_update_state_ptr(s);
r = s->rule.ptr;
a = s->anchor.ptr;
log = s->log;
} else if (s == NULL) {
/* Validate remote SYN|ACK, re-create original SYN if
* valid. */
@ -7339,11 +7322,6 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
&pd, &a, &ruleset, inp);
}
}
if (s) {
if (s->max_mss)
pf_normalize_mss(m, off, &pd, s->max_mss);
} else if (r->max_mss)
pf_normalize_mss(m, off, &pd, r->max_mss);
break;
}
@ -7351,7 +7329,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp),
&action, &reason, AF_INET)) {
if (action != PF_PASS)
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
goto done;
}
pd.sport = &pd.hdr.udp.uh_sport;
@ -7369,7 +7347,6 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
V_pfsync_update_state_ptr(s);
r = s->rule.ptr;
a = s->anchor.ptr;
log = s->log;
} else if (s == NULL)
action = pf_test_rule(&r, &s, kif, m, off, &pd,
&a, &ruleset, inp);
@ -7380,7 +7357,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
if (!pf_pull_hdr(m, off, &pd.hdr.icmp, ICMP_MINLEN,
&action, &reason, AF_INET)) {
if (action != PF_PASS)
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
goto done;
}
action = pf_test_state_icmp(&s, kif, m, off, h, &pd, &reason);
@ -7389,7 +7366,6 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
V_pfsync_update_state_ptr(s);
r = s->rule.ptr;
a = s->anchor.ptr;
log = s->log;
} else if (s == NULL)
action = pf_test_rule(&r, &s, kif, m, off, &pd,
&a, &ruleset, inp);
@ -7412,7 +7388,6 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
V_pfsync_update_state_ptr(s);
r = s->rule.ptr;
a = s->anchor.ptr;
log = s->log;
} else if (s == NULL)
action = pf_test_rule(&r, &s, kif, m, off, &pd,
&a, &ruleset, inp);
@ -7425,37 +7400,18 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
!((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_IPOPTIONS);
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
DPFPRINTF(PF_DEBUG_MISC,
("pf: dropping packet with ip options\n"));
}
if (s) {
scrub_flags = s->state_flags;
min_ttl = s->min_ttl;
set_tos = s->set_tos;
rtableid = s->rtableid;
pqid = s->pqid;
#ifdef ALTQ
qid = s->qid;
#endif
memcpy(&pd.act, &s->act, sizeof(struct pf_rule_actions));
tag = s->tag;
rt = s->rt;
set_prio[0] = s->set_prio[0];
set_prio[1] = s->set_prio[1];
} else {
scrub_flags = r->scrub_flags;
min_ttl = r->min_ttl;
set_tos = r->set_tos;
rtableid = r->rtableid;
pqid = r->pqid;
#ifdef ALTQ
qid = r->qid;
#endif
tag = r->tag;
rt = r->rt;
set_prio[0] = r->set_prio[0];
set_prio[1] = r->set_prio[1];
}
if (tag > 0 && pf_tag_packet(m, &pd, tag)) {
@ -7463,29 +7419,26 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
REASON_SET(&reason, PFRES_MEMORY);
}
pf_scrub_ip(&m, scrub_flags, min_ttl, set_tos);
pf_scrub_ip(&m, &pd);
if (pd.proto == IPPROTO_TCP && pd.act.max_mss)
pf_normalize_mss(m, off, &pd);
if (rtableid >= 0)
M_SETFIB(m, rtableid);
if (pd.act.rtableid >= 0)
M_SETFIB(m, pd.act.rtableid);
if (scrub_flags & PFSTATE_SETPRIO) {
if (pd.act.flags & PFSTATE_SETPRIO) {
if (pd.tos & IPTOS_LOWDELAY)
pqid = 1;
if (vlan_set_pcp(m, set_prio[pqid])) {
use_2nd_queue = 1;
if (vlan_set_pcp(m, pd.act.set_prio[use_2nd_queue])) {
action = PF_DROP;
REASON_SET(&reason, PFRES_MEMORY);
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
DPFPRINTF(PF_DEBUG_MISC,
("pf: failed to allocate 802.1q mtag\n"));
}
}
#ifdef ALTQ
if (qid) {
pd.act.pqid = pqid;
pd.act.qid = qid;
}
if (action == PF_PASS && pd.act.qid) {
if (pd.pf_mtag == NULL &&
((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
@ -7494,7 +7447,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
} else {
if (s != NULL)
pd.pf_mtag->qid_hash = pf_state_hash(s);
if (pqid || (pd.tos & IPTOS_LOWDELAY))
if (use_2nd_queue || (pd.tos & IPTOS_LOWDELAY))
pd.pf_mtag->qid = pd.act.pqid;
else
pd.pf_mtag->qid = pd.act.qid;
@ -7534,7 +7487,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_MEMORY);
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
DPFPRINTF(PF_DEBUG_MISC,
("pf: failed to allocate tag\n"));
} else {
@ -7551,13 +7504,13 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
/* XXX: ipfw has the same behaviour! */
action = PF_DROP;
REASON_SET(&reason, PFRES_MEMORY);
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
DPFPRINTF(PF_DEBUG_MISC,
("pf: failed to allocate divert tag\n"));
}
}
if (log) {
if (pd.act.log) {
struct pf_krule *lr;
struct pf_krule_item *ri;
@ -7567,7 +7520,7 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
else
lr = r;
if (log & PF_LOG_FORCE || lr->log & PF_LOG_ALL)
if (pd.act.log & PF_LOG_FORCE || lr->log & PF_LOG_ALL)
PFLOG_PACKET(kif, m, AF_INET, reason, lr, a, ruleset,
&pd, (s == NULL));
if (s) {
@ -7683,7 +7636,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
struct pf_rule_actions *default_actions)
{
struct pfi_kkif *kif;
u_short action, reason = 0, log = 0;
u_short action, reason = 0;
struct mbuf *m = *m0, *n = NULL;
struct m_tag *mtag;
struct ip6_hdr *h = NULL;
@ -7691,18 +7644,9 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
struct pf_kstate *s = NULL;
struct pf_kruleset *ruleset = NULL;
struct pf_pdesc pd;
int off, terminal = 0, dirndx, rh_cnt = 0;
uint16_t scrub_flags;
#ifdef ALTQ
uint16_t qid;
#endif
uint16_t pqid;
int off, terminal = 0, dirndx, rh_cnt = 0, use_2nd_queue = 0;
uint16_t tag;
int32_t rtableid;
uint8_t min_ttl;
uint8_t set_tos;
uint8_t rt;
uint8_t set_prio[2];
PF_RULES_RLOCK_TRACKER;
KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir));
@ -7818,7 +7762,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
("pf: IPv6 more than one rthdr\n"));
action = PF_DROP;
REASON_SET(&reason, PFRES_IPOPTIONS);
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
goto done;
}
if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
@ -7827,7 +7771,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
("pf: IPv6 short rthdr\n"));
action = PF_DROP;
REASON_SET(&reason, PFRES_SHORT);
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
goto done;
}
if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
@ -7835,7 +7779,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
("pf: IPv6 rthdr0\n"));
action = PF_DROP;
REASON_SET(&reason, PFRES_IPOPTIONS);
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
goto done;
}
/* FALLTHROUGH */
@ -7851,7 +7795,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
DPFPRINTF(PF_DEBUG_MISC,
("pf: IPv6 short opt\n"));
action = PF_DROP;
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
goto done;
}
if (pd.proto == IPPROTO_AH)
@ -7877,7 +7821,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp),
&action, &reason, AF_INET6)) {
if (action != PF_PASS)
log |= PF_LOG_FORCE;
pd.act.log |= PF_LOG_FORCE;
goto done;
}
pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2);
@ -7892,15 +7836,9 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
V_pfsync_update_state_ptr(s);
r = s->rule.ptr;
a = s->anchor.ptr;
log = s->log;
} else if (s == NULL)
action = pf_test_rule(&r, &s, kif, m, off, &pd,
&a, &ruleset, inp);
if (s) {
if (s->max_mss)
pf_normalize_mss(m, off, &pd, s->max_mss);
} else if (r->max_mss)
pf_normalize_mss(m, off, &pd, r->max_mss);
break;
}
@ -7908,7 +7846,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp),
&action, &reason, AF_INET6)) {
if (action != PF_PASS)
log |= PF_LOG_FORCE;
pd.act.log |= PF_LOG_FORCE;
goto done;
}
pd.sport = &pd.hdr.udp.uh_sport;
@ -7926,7 +7864,6 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
V_pfsync_update_state_ptr(s);
r = s->rule.ptr;
a = s->anchor.ptr;
log = s->log;
} else if (s == NULL)
action = pf_test_rule(&r, &s, kif, m, off, &pd,
&a, &ruleset, inp);
@ -7944,7 +7881,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
if (!pf_pull_hdr(m, off, &pd.hdr.icmp6, sizeof(pd.hdr.icmp6),
&action, &reason, AF_INET6)) {
if (action != PF_PASS)
log |= PF_LOG_FORCE;
pd.act.log |= PF_LOG_FORCE;
goto done;
}
action = pf_test_state_icmp(&s, kif, m, off, h, &pd, &reason);
@ -7953,7 +7890,6 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
V_pfsync_update_state_ptr(s);
r = s->rule.ptr;
a = s->anchor.ptr;
log = s->log;
} else if (s == NULL)
action = pf_test_rule(&r, &s, kif, m, off, &pd,
&a, &ruleset, inp);
@ -7967,7 +7903,6 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
V_pfsync_update_state_ptr(s);
r = s->rule.ptr;
a = s->anchor.ptr;
log = s->log;
} else if (s == NULL)
action = pf_test_rule(&r, &s, kif, m, off, &pd,
&a, &ruleset, inp);
@ -7986,37 +7921,18 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
!((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_IPOPTIONS);
log = r->log;
pd.act.log = r->log;
DPFPRINTF(PF_DEBUG_MISC,
("pf: dropping packet with dangerous v6 headers\n"));
}
if (s) {
scrub_flags = s->state_flags;
min_ttl = s->min_ttl;
set_tos = s->set_tos;
rtableid = s->rtableid;
pqid = s->pqid;
#ifdef ALTQ
qid = s->qid;
#endif
memcpy(&pd.act, &s->act, sizeof(struct pf_rule_actions));
tag = s->tag;
rt = s->rt;
set_prio[0] = s->set_prio[0];
set_prio[1] = s->set_prio[1];
} else {
scrub_flags = r->scrub_flags;
min_ttl = r->min_ttl;
set_tos = r->set_tos;
rtableid = r->rtableid;
pqid = r->pqid;
#ifdef ALTQ
qid = r->qid;
#endif
tag = r->tag;
rt = r->rt;
set_prio[0] = r->set_prio[0];
set_prio[1] = r->set_prio[1];
}
if (tag > 0 && pf_tag_packet(m, &pd, tag)) {
@ -8024,29 +7940,26 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
REASON_SET(&reason, PFRES_MEMORY);
}
pf_scrub_ip6(&m, scrub_flags, min_ttl, set_tos);
pf_scrub_ip6(&m, &pd);
if (pd.proto == IPPROTO_TCP && pd.act.max_mss)
pf_normalize_mss(m, off, &pd);
if (rtableid >= 0)
M_SETFIB(m, rtableid);
if (pd.act.rtableid >= 0)
M_SETFIB(m, pd.act.rtableid);
if (scrub_flags & PFSTATE_SETPRIO) {
if (pd.act.flags & PFSTATE_SETPRIO) {
if (pd.tos & IPTOS_LOWDELAY)
pqid = 1;
if (vlan_set_pcp(m, set_prio[pqid])) {
use_2nd_queue = 1;
if (vlan_set_pcp(m, pd.act.set_prio[use_2nd_queue])) {
action = PF_DROP;
REASON_SET(&reason, PFRES_MEMORY);
log = PF_LOG_FORCE;
pd.act.log = PF_LOG_FORCE;
DPFPRINTF(PF_DEBUG_MISC,
("pf: failed to allocate 802.1q mtag\n"));
}
}
#ifdef ALTQ
if (qid) {
pd.act.pqid = pqid;
pd.act.qid = qid;
}
if (action == PF_PASS && pd.act.qid) {
if (pd.pf_mtag == NULL &&
((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
@ -8076,7 +7989,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
if (r->divert.port)
printf("pf: divert(9) is not supported for IPv6\n");
if (log) {
if (pd.act.log) {
struct pf_krule *lr;
struct pf_krule_item *ri;
@ -8086,7 +7999,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
else
lr = r;
if (log & PF_LOG_FORCE || lr->log & PF_LOG_ALL)
if (pd.act.log & PF_LOG_FORCE || lr->log & PF_LOG_ALL)
PFLOG_PACKET(kif, m, AF_INET6, reason, lr, a, ruleset,
&pd, (s == NULL));
if (s) {

View file

@ -5706,7 +5706,7 @@ pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_
sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
sp->pfs_1301.direction = st->direction;
sp->pfs_1301.log = st->log;
sp->pfs_1301.log = st->act.log;
sp->pfs_1301.timeout = st->timeout;
switch (msg_version) {
@ -5715,16 +5715,16 @@ pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_
break;
case PFSYNC_MSG_VERSION_1400:
sp->pfs_1400.state_flags = htons(st->state_flags);
sp->pfs_1400.qid = htons(st->qid);
sp->pfs_1400.pqid = htons(st->pqid);
sp->pfs_1400.dnpipe = htons(st->dnpipe);
sp->pfs_1400.dnrpipe = htons(st->dnrpipe);
sp->pfs_1400.rtableid = htonl(st->rtableid);
sp->pfs_1400.min_ttl = st->min_ttl;
sp->pfs_1400.set_tos = st->set_tos;
sp->pfs_1400.max_mss = htons(st->max_mss);
sp->pfs_1400.set_prio[0] = st->set_prio[0];
sp->pfs_1400.set_prio[1] = st->set_prio[1];
sp->pfs_1400.qid = htons(st->act.qid);
sp->pfs_1400.pqid = htons(st->act.pqid);
sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
sp->pfs_1400.rtableid = htonl(st->act.rtableid);
sp->pfs_1400.min_ttl = st->act.min_ttl;
sp->pfs_1400.set_tos = st->act.set_tos;
sp->pfs_1400.max_mss = htons(st->act.max_mss);
sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
sp->pfs_1400.rt = st->rt;
if (st->rt_kif)
strlcpy(sp->pfs_1400.rt_ifname,
@ -5797,7 +5797,7 @@ pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
sp->expire = htonl(sp->expire - time_uptime);
sp->direction = st->direction;
sp->log = st->log;
sp->log = st->act.log;
sp->timeout = st->timeout;
/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
sp->state_flags_compat = st->state_flags;
@ -5830,20 +5830,20 @@ pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
sp->bytes[0] = st->bytes[0];
sp->bytes[1] = st->bytes[1];
sp->qid = htons(st->qid);
sp->pqid = htons(st->pqid);
sp->dnpipe = htons(st->dnpipe);
sp->dnrpipe = htons(st->dnrpipe);
sp->rtableid = htonl(st->rtableid);
sp->min_ttl = st->min_ttl;
sp->set_tos = st->set_tos;
sp->max_mss = htons(st->max_mss);
sp->qid = htons(st->act.qid);
sp->pqid = htons(st->act.pqid);
sp->dnpipe = htons(st->act.dnpipe);
sp->dnrpipe = htons(st->act.dnrpipe);
sp->rtableid = htonl(st->act.rtableid);
sp->min_ttl = st->act.min_ttl;
sp->set_tos = st->act.set_tos;
sp->max_mss = htons(st->act.max_mss);
sp->rt = st->rt;
if (st->rt_kif)
strlcpy(sp->rt_ifname, st->rt_kif->pfik_name,
sizeof(sp->rt_ifname));
sp->set_prio[0] = st->set_prio[0];
sp->set_prio[1] = st->set_prio[1];
sp->set_prio[0] = st->act.set_prio[0];
sp->set_prio[1] = st->act.set_prio[1];
}

View file

@ -1086,6 +1086,7 @@ pf_normalize_ip(struct mbuf **m0, struct pfi_kkif *kif, u_short *reason,
pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
pf_counter_u64_critical_exit();
pf_rule_to_actions(r, &pd->act);
} else if ((!V_pf_status.reass && (h->ip_off & htons(IP_MF | IP_OFFMASK)))) {
/* With no scrub rules IPv4 fragment reassembly depends on the
* global switch. Fragments can be dropped early if reassembly
@ -1170,10 +1171,6 @@ pf_normalize_ip(struct mbuf **m0, struct pfi_kkif *kif, u_short *reason,
h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
}
}
if (r != NULL) {
int scrub_flags = pf_rule_to_scrub_flags(r->rule_flag);
pf_scrub_ip(&m, scrub_flags, r->min_ttl, r->set_tos);
}
return (PF_PASS);
@ -1248,6 +1245,7 @@ pf_normalize_ip6(struct mbuf **m0, struct pfi_kkif *kif,
pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
pf_counter_u64_critical_exit();
pf_rule_to_actions(r, &pd->act);
}
/* Check for illegal packets */
@ -1319,11 +1317,6 @@ pf_normalize_ip6(struct mbuf **m0, struct pfi_kkif *kif,
if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
goto shortpkt;
if (r != NULL) {
int scrub_flags = pf_rule_to_scrub_flags(r->rule_flag);
pf_scrub_ip6(&m, scrub_flags, r->min_ttl, r->set_tos);
}
return (PF_PASS);
fragment:
@ -1420,6 +1413,7 @@ pf_normalize_tcp(struct pfi_kkif *kif, struct mbuf *m, int ipoff,
pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
pf_counter_u64_critical_exit();
pf_rule_to_actions(rm, &pd->act);
}
if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP)
@ -1470,11 +1464,6 @@ pf_normalize_tcp(struct pfi_kkif *kif, struct mbuf *m, int ipoff,
rewrite = 1;
}
/* Set MSS for old-style scrub rules.
* The function performs its own copyback. */
if (rm != NULL && rm->max_mss)
pf_normalize_mss(m, off, pd, rm->max_mss);
/* copy back packet headers if we sanitized */
if (rewrite)
m_copyback(m, off, sizeof(*th), (caddr_t)th);
@ -1974,7 +1963,7 @@ pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
}
int
pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd, u_int16_t maxmss)
pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd)
{
struct tcphdr *th = &pd->hdr.tcp;
u_int16_t *mss;
@ -2008,10 +1997,10 @@ pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd, u_int16_t maxmss)
switch (opt) {
case TCPOPT_MAXSEG:
mss = (u_int16_t *)(optp + 2);
if ((ntohs(*mss)) > maxmss) {
if ((ntohs(*mss)) > pd->act.max_mss) {
pf_patch_16_unaligned(m,
&th->th_sum,
mss, htons(maxmss),
mss, htons(pd->act.max_mss),
PF_ALGNMNT(startoff),
0);
m_copyback(m, off + sizeof(*th),
@ -2027,34 +2016,15 @@ pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd, u_int16_t maxmss)
return (0);
}
u_int16_t
pf_rule_to_scrub_flags(u_int32_t rule_flags)
{
/*
* Translate pf_krule->rule_flag to pf_krule->scrub_flags.
* The pf_scrub_ip functions have been adapted to the new style of pass
* rules but they might get called if old scrub rules are used.
*/
int scrub_flags = 0;
if (rule_flags & PFRULE_SET_TOS) {
scrub_flags |= PFSTATE_SETTOS;
}
if (rule_flags & PFRULE_RANDOMID)
scrub_flags |= PFSTATE_RANDOMID;
return scrub_flags;
}
#ifdef INET
void
pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
pf_scrub_ip(struct mbuf **m0, struct pf_pdesc *pd)
{
struct mbuf *m = *m0;
struct ip *h = mtod(m, struct ip *);
/* Clear IP_DF if no-df was requested */
if (flags & PFSTATE_NODF && h->ip_off & htons(IP_DF)) {
if (pd->act.flags & PFSTATE_NODF && h->ip_off & htons(IP_DF)) {
u_int16_t ip_off = h->ip_off;
h->ip_off &= htons(~IP_DF);
@ -2062,26 +2032,26 @@ pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
}
/* Enforce a minimum ttl, may cause endless packet loops */
if (min_ttl && h->ip_ttl < min_ttl) {
if (pd->act.min_ttl && h->ip_ttl < pd->act.min_ttl) {
u_int16_t ip_ttl = h->ip_ttl;
h->ip_ttl = min_ttl;
h->ip_ttl = pd->act.min_ttl;
h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
}
/* Enforce tos */
if (flags & PFSTATE_SETTOS) {
if (pd->act.flags & PFSTATE_SETTOS) {
u_int16_t ov, nv;
ov = *(u_int16_t *)h;
h->ip_tos = tos | (h->ip_tos & IPTOS_ECN_MASK);
h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK);
nv = *(u_int16_t *)h;
h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
}
/* random-id, but not for fragments */
if (flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
if (pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
uint16_t ip_id = h->ip_id;
ip_fillid(h);
@ -2092,19 +2062,19 @@ pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
#ifdef INET6
void
pf_scrub_ip6(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
pf_scrub_ip6(struct mbuf **m0, struct pf_pdesc *pd)
{
struct mbuf *m = *m0;
struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
/* Enforce a minimum ttl, may cause endless packet loops */
if (min_ttl && h->ip6_hlim < min_ttl)
h->ip6_hlim = min_ttl;
if (pd->act.min_ttl && h->ip6_hlim < pd->act.min_ttl)
h->ip6_hlim = pd->act.min_ttl;
/* Enforce tos. Set traffic class bits */
if (flags & PFSTATE_SETTOS) {
if (pd->act.flags & PFSTATE_SETTOS) {
h->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK;
h->ip6_flow |= htonl((tos | IPV6_ECN(h)) << 20);
h->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h)) << 20);
}
}
#endif