tcp: provide macros to access inpcb and socket from a tcpcb

There should be no functional changes with this commit.

Reviewed by:		rscheff
Differential revision:	https://reviews.freebsd.org/D37123
This commit is contained in:
Gleb Smirnoff 2022-11-08 10:24:40 -08:00
parent f71cb9f748
commit 9eb0e8326d
32 changed files with 430 additions and 471 deletions

View file

@ -306,7 +306,7 @@ static void
assign_rxopt(struct tcpcb *tp, uint16_t opt)
{
struct toepcb *toep = tp->t_toe;
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
struct adapter *sc = td_adapter(toep->td);
INP_LOCK_ASSERT(inp);
@ -442,7 +442,7 @@ void
t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp)
{
struct adapter *sc = tod->tod_softc;
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
struct socket *so = inp->inp_socket;
struct sockbuf *sb = &so->so_rcv;
struct toepcb *toep = tp->t_toe;
@ -466,7 +466,7 @@ t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp)
void
t4_rcvd(struct toedev *tod, struct tcpcb *tp)
{
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
struct socket *so = inp->inp_socket;
struct sockbuf *sb = &so->so_rcv;
@ -1276,7 +1276,7 @@ t4_tod_output(struct toedev *tod, struct tcpcb *tp)
{
struct adapter *sc = tod->tod_softc;
#ifdef INVARIANTS
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
#endif
struct toepcb *toep = tp->t_toe;
@ -1295,7 +1295,7 @@ t4_send_fin(struct toedev *tod, struct tcpcb *tp)
{
struct adapter *sc = tod->tod_softc;
#ifdef INVARIANTS
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
#endif
struct toepcb *toep = tp->t_toe;
@ -1316,7 +1316,7 @@ t4_send_rst(struct toedev *tod, struct tcpcb *tp)
{
struct adapter *sc = tod->tod_softc;
#if defined(INVARIANTS)
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
#endif
struct toepcb *toep = tp->t_toe;

View file

@ -519,7 +519,7 @@ t4_listen_start(struct toedev *tod, struct tcpcb *tp)
struct adapter *sc = tod->tod_softc;
struct vi_info *vi;
struct port_info *pi;
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
struct listen_ctx *lctx;
int i, rc, v;
struct offload_settings settings;
@ -615,7 +615,7 @@ t4_listen_stop(struct toedev *tod, struct tcpcb *tp)
{
struct listen_ctx *lctx;
struct adapter *sc = tod->tod_softc;
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
INP_WLOCK_ASSERT(inp);

View file

@ -366,7 +366,7 @@ static void
t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp)
{
#if defined(KTR) || defined(INVARIANTS)
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
#endif
struct toepcb *toep = tp->t_toe;
@ -820,7 +820,7 @@ t4_tcp_info(struct toedev *tod, struct tcpcb *tp, struct tcp_info *ti)
struct adapter *sc = tod->tod_softc;
struct toepcb *toep = tp->t_toe;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
MPASS(ti != NULL);
fill_tcp_info(sc, toep->tid, ti);
@ -833,7 +833,7 @@ t4_alloc_tls_session(struct toedev *tod, struct tcpcb *tp,
{
struct toepcb *toep = tp->t_toe;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
MPASS(tls != NULL);
return (tls_alloc_ktls(toep, tls, direction));
@ -918,7 +918,7 @@ t4_pmtu_update(struct toedev *tod, struct tcpcb *tp, tcp_seq seq, int mtu)
struct ulp_txpkt *ulpmc;
int idx, len;
struct wrq_cookie cookie;
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
struct toepcb *toep = tp->t_toe;
struct adapter *sc = td_adapter(toep->td);
unsigned short *mtus = &sc->params.mtus[0];

View file

@ -3225,7 +3225,7 @@ ktls_disable_ifnet(void *arg)
struct ktls_session *tls;
tp = arg;
inp = tp->t_inpcb;
inp = tptoinpcb(tp);
INP_WLOCK_ASSERT(inp);
so = inp->inp_socket;
SOCK_LOCK(so);

View file

@ -297,7 +297,7 @@ cdg_cb_init(struct cc_var *ccv, void *ptr)
{
struct cdg *cdg_data;
INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp));
if (ptr == NULL) {
cdg_data = malloc(sizeof(struct cdg), M_CC_MEM, M_NOWAIT);
if (cdg_data == NULL)

View file

@ -324,7 +324,7 @@ chd_cb_init(struct cc_var *ccv, void *ptr)
{
struct chd *chd_data;
INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp));
if (ptr == NULL) {
chd_data = malloc(sizeof(struct chd), M_CC_MEM, M_NOWAIT);
if (chd_data == NULL)

View file

@ -150,8 +150,8 @@ cubic_log_hystart_event(struct cc_var *ccv, struct cubic *cubicd, uint8_t mod, u
log.u_bbr.delivered = cubicd->css_lowrtt_fas;
log.u_bbr.pkt_epoch = ccv->flags;
TCP_LOG_EVENTP(tp, NULL,
&tp->t_inpcb->inp_socket->so_rcv,
&tp->t_inpcb->inp_socket->so_snd,
&tptosocket(tp)->so_rcv,
&tptosocket(tp)->so_snd,
TCP_HYSTART, 0,
0, &log, false, &tv);
}
@ -387,7 +387,7 @@ cubic_cb_init(struct cc_var *ccv, void *ptr)
{
struct cubic *cubic_data;
INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp));
if (ptr == NULL) {
cubic_data = malloc(sizeof(struct cubic), M_CC_MEM, M_NOWAIT|M_ZERO);
if (cubic_data == NULL)

View file

@ -204,7 +204,7 @@ dctcp_cb_init(struct cc_var *ccv, void *ptr)
{
struct dctcp *dctcp_data;
INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp));
if (ptr == NULL) {
dctcp_data = malloc(sizeof(struct dctcp), M_CC_MEM, M_NOWAIT|M_ZERO);
if (dctcp_data == NULL)

View file

@ -256,7 +256,7 @@ htcp_cb_init(struct cc_var *ccv, void *ptr)
{
struct htcp *htcp_data;
INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp));
if (ptr == NULL) {
htcp_data = malloc(sizeof(struct htcp), M_CC_MEM, M_NOWAIT);
if (htcp_data == NULL)

View file

@ -160,8 +160,8 @@ newreno_log_hystart_event(struct cc_var *ccv, struct newreno *nreno, uint8_t mod
log.u_bbr.delivered = nreno->css_lowrtt_fas;
log.u_bbr.pkt_epoch = ccv->flags;
TCP_LOG_EVENTP(tp, NULL,
&tp->t_inpcb->inp_socket->so_rcv,
&tp->t_inpcb->inp_socket->so_snd,
&tptosocket(tp)->so_rcv,
&tptosocket(tp)->so_snd,
TCP_HYSTART, 0,
0, &log, false, &tv);
}
@ -178,7 +178,7 @@ newreno_cb_init(struct cc_var *ccv, void *ptr)
{
struct newreno *nreno;
INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp));
if (ptr == NULL) {
ccv->cc_data = malloc(sizeof(struct newreno), M_CC_MEM, M_NOWAIT);
if (ccv->cc_data == NULL)

View file

@ -187,7 +187,7 @@ vegas_cb_init(struct cc_var *ccv, void *ptr)
{
struct vegas *vegas_data;
INP_WLOCK_ASSERT(ccv->ccvc.tcp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(ccv->ccvc.tcp));
if (ptr == NULL) {
vegas_data = malloc(sizeof(struct vegas), M_CC_MEM, M_NOWAIT);
if (vegas_data == NULL)

View file

@ -219,7 +219,7 @@ ertt_packet_measurement_hook(int hhook_type, int hhook_id, void *udata,
measurenext = measurenext_len = multiack = rts = rtt_bytes_adjust = 0;
acked = th->th_ack - tp->snd_una;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
/* Packet has provided new acknowledgements. */
if (acked > 0 || new_sacked_bytes) {
@ -452,7 +452,7 @@ ertt_add_tx_segment_info_hook(int hhook_type, int hhook_id, void *udata,
len = thdp->len;
tso = thdp->tso;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (len > 0) {
txsi = uma_zalloc(txseginfo_zone, M_NOWAIT);

View file

@ -867,7 +867,7 @@ sysctl_net_inet_tcp_fastopen_client_enable(SYSCTL_HANDLER_ARGS)
void
tcp_fastopen_connect(struct tcpcb *tp)
{
struct inpcb *inp;
struct inpcb *inp = tptoinpcb(tp);
struct tcp_fastopen_ccache_bucket *ccb;
struct tcp_fastopen_ccache_entry *cce;
sbintime_t now;
@ -875,7 +875,6 @@ tcp_fastopen_connect(struct tcpcb *tp)
uint64_t psk_cookie;
psk_cookie = 0;
inp = tp->t_inpcb;
cce = tcp_fastopen_ccache_lookup(&inp->inp_inc, &ccb);
if (cce) {
if (cce->disable_time == 0) {
@ -955,7 +954,7 @@ tcp_fastopen_connect(struct tcpcb *tp)
void
tcp_fastopen_disable_path(struct tcpcb *tp)
{
struct in_conninfo *inc = &tp->t_inpcb->inp_inc;
struct in_conninfo *inc = &tptoinpcb(tp)->inp_inc;
struct tcp_fastopen_ccache_bucket *ccb;
struct tcp_fastopen_ccache_entry *cce;
@ -981,7 +980,7 @@ void
tcp_fastopen_update_cache(struct tcpcb *tp, uint16_t mss,
uint8_t cookie_len, uint8_t *cookie)
{
struct in_conninfo *inc = &tp->t_inpcb->inp_inc;
struct in_conninfo *inc = &tptoinpcb(tp)->inp_inc;
struct tcp_fastopen_ccache_bucket *ccb;
struct tcp_fastopen_ccache_entry *cce;

View file

@ -463,8 +463,8 @@ tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
log.u_bbr.pkt_epoch = hpts->p_runningslot;
log.u_bbr.use_lt_bw = 1;
TCP_LOG_EVENTP(tp, NULL,
&tp->t_inpcb->inp_socket->so_rcv,
&tp->t_inpcb->inp_socket->so_snd,
&tptosocket(tp)->so_rcv,
&tptosocket(tp)->so_snd,
BBR_LOG_HPTSDIAG, 0,
0, &log, false, tv);
}

View file

@ -299,7 +299,7 @@ cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t nsegs,
int32_t gput;
#endif
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
tp->ccv->nsegs = nsegs;
tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th);
@ -366,11 +366,11 @@ void
cc_conn_init(struct tcpcb *tp)
{
struct hc_metrics_lite metrics;
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
u_int maxseg;
int rtt;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
tcp_hc_get(&inp->inp_inc, &metrics);
maxseg = tcp_maxseg(tp);
@ -421,7 +421,7 @@ cc_conn_init(struct tcpcb *tp)
void inline
cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
#ifdef STATS
stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
@ -482,7 +482,7 @@ cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type)
void inline
cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
/* XXXLAS: KASSERT that we're in recovery? */
@ -514,7 +514,7 @@ cc_post_recovery(struct tcpcb *tp, struct tcphdr *th)
void inline
cc_ecnpkt_handler_flags(struct tcpcb *tp, uint16_t flags, uint8_t iptos)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (CC_ALGO(tp)->ecnpkt_handler != NULL) {
switch (iptos & IPTOS_ECN_MASK) {
@ -1501,10 +1501,10 @@ static void
tcp_handle_wakeup(struct tcpcb *tp)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (tp->t_flags & TF_WAKESOR) {
struct socket *so = tp->t_inpcb->inp_socket;
struct socket *so = tptosocket(tp);
tp->t_flags &= ~TF_WAKESOR;
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
@ -1522,7 +1522,8 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
uint32_t tiwin;
uint16_t nsegs;
char *s;
struct in_conninfo *inc;
struct inpcb *inp = tptoinpcb(tp);
struct in_conninfo *inc = &inp->inp_inc;
struct mbuf *mfree;
struct tcpopt to;
int tfo_syn;
@ -1538,13 +1539,12 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
short ostate = 0;
#endif
thflags = tcp_get_flags(th);
inc = &tp->t_inpcb->inp_inc;
tp->sackhint.last_sack_ack = 0;
sack_changed = 0;
nsegs = max(1, m->m_pkthdr.lro_nsegs);
NET_EPOCH_ASSERT();
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
__func__));
KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
@ -1683,7 +1683,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (to.to_flags & TOF_MSS)
mss = to.to_mss;
else
if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
if ((inp->inp_vflag & INP_IPV6) != 0)
mss = TCP6_MSS;
else
mss = TCP_MSS;
@ -2848,7 +2848,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
}
process_ACK:
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
/*
* Adjust for the SYN bit in sequence space,
@ -3043,7 +3043,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
}
step6:
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
/*
* Update window information.
@ -3128,7 +3128,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->rcv_up = tp->rcv_nxt;
}
dodata: /* XXX */
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
/*
* Process the segment text, merging it into the TCP sequencing queue,
@ -3313,13 +3313,13 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
(void) tcp_output(tp);
check_delack:
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
if (tp->t_flags & TF_DELACK) {
tp->t_flags &= ~TF_DELACK;
tcp_timer_activate(tp, TT_DELACK, tcp_delacktime);
}
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
return;
dropafterack:
@ -3353,14 +3353,14 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
TCP_PROBE3(debug__input, tp, th, m);
tp->t_flags |= TF_ACKNOW;
(void) tcp_output(tp);
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
m_freem(m);
return;
dropwithreset:
if (tp != NULL) {
tcp_dropwithreset(m, th, tp, tlen, rstreason);
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
} else
tcp_dropwithreset(m, th, NULL, tlen, rstreason);
return;
@ -3370,13 +3370,13 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* Drop space held by incoming segment and return.
*/
#ifdef TCPDEBUG
if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG))
tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen,
&tcp_savetcp, 0);
#endif
TCP_PROBE3(debug__input, tp, th, m);
if (tp != NULL) {
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
}
m_freem(m);
}
@ -3398,7 +3398,7 @@ tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp,
#endif
if (tp != NULL) {
INP_LOCK_ASSERT(tp->t_inpcb);
INP_LOCK_ASSERT(tptoinpcb(tp));
}
/* Don't bother if destination was broadcast/multicast. */
@ -3570,7 +3570,7 @@ tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m,
char *cp = mtod(m, caddr_t) + cnt;
struct tcpcb *tp = sototcpcb(so);
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
tp->t_iobc = *cp;
tp->t_oobflags |= TCPOOB_HAVEDATA;
@ -3597,7 +3597,7 @@ tcp_xmit_timer(struct tcpcb *tp, int rtt)
{
int delta;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
TCPSTAT_INC(tcps_rttupdated);
tp->t_rttupdated++;
@ -3700,7 +3700,7 @@ tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
{
int mss = 0;
uint32_t maxmtu = 0;
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
struct hc_metrics_lite metrics;
#ifdef INET6
int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0;
@ -3711,7 +3711,7 @@ tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer,
size_t min_protoh = sizeof(struct tcpiphdr);
#endif
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
if (tp->t_port)
min_protoh += V_tcp_udp_tunneling_overhead;
@ -3848,7 +3848,7 @@ tcp_mss(struct tcpcb *tp, int offer)
{
int mss;
uint32_t bufsize;
struct inpcb *inp;
struct inpcb *inp = tptoinpcb(tp);
struct socket *so;
struct hc_metrics_lite metrics;
struct tcp_ifcap cap;
@ -3859,7 +3859,6 @@ tcp_mss(struct tcpcb *tp, int offer)
tcp_mss_update(tp, offer, -1, &metrics, &cap);
mss = tp->t_maxseg;
inp = tp->t_inpcb;
/*
* If there's a pipesize, change the socket buffer to that size,
@ -3965,7 +3964,7 @@ tcp_do_prr_ack(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
int snd_cnt = 0, limit = 0, del_data = 0, pipe = 0;
int maxseg = tcp_maxseg(tp);
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
/*
* Compute the amount of data that this ACK is indicating
@ -4042,7 +4041,7 @@ tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th)
uint32_t ocwnd = tp->snd_cwnd;
u_int maxseg = tcp_maxseg(tp);
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_rtttime = 0;

View file

@ -499,7 +499,7 @@ static void
tcp_log_grow_tlb(char *tlb_id, struct tcpcb *tp)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
#ifdef STATS
if (V_tcp_perconn_stats_enable == 2 && tp->t_stats == NULL)
@ -522,20 +522,21 @@ tcp_log_increment_reqcnt(struct tcp_log_id_bucket *tlb)
int
tcp_log_set_tag(struct tcpcb *tp, char *tag)
{
struct inpcb *inp = tptoinpcb(tp);
struct tcp_log_id_bucket *tlb;
int tree_locked;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
tree_locked = TREE_UNLOCKED;
tlb = tp->t_lib;
if (tlb == NULL) {
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
return (EOPNOTSUPP);
}
TCPID_BUCKET_REF(tlb);
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
TCPID_BUCKET_LOCK(tlb);
strlcpy(tlb->tlb_tag, tag, TCP_LOG_TAG_LEN);
if (!tcp_log_unref_bucket(tlb, &tree_locked, NULL))
@ -562,13 +563,12 @@ tcp_log_set_id(struct tcpcb *tp, char *id)
{
struct tcp_log_id_bucket *tlb, *tmp_tlb;
struct tcp_log_id_node *tln;
struct inpcb *inp;
struct inpcb *inp = tptoinpcb(tp);
int tree_locked, rv;
bool bucket_locked;
tlb = NULL;
tln = NULL;
inp = tp->t_inpcb;
tree_locked = TREE_UNLOCKED;
bucket_locked = false;
@ -922,7 +922,7 @@ tcp_log_get_id(struct tcpcb *tp, char *buf)
{
size_t len;
INP_LOCK_ASSERT(tp->t_inpcb);
INP_LOCK_ASSERT(tptoinpcb(tp));
if (tp->t_lib != NULL) {
len = strlcpy(buf, tp->t_lib->tlb_id, TCP_LOG_ID_LEN);
KASSERT(len < TCP_LOG_ID_LEN,
@ -944,18 +944,19 @@ tcp_log_get_id(struct tcpcb *tp, char *buf)
size_t
tcp_log_get_tag(struct tcpcb *tp, char *buf)
{
struct inpcb *inp = tptoinpcb(tp);
struct tcp_log_id_bucket *tlb;
size_t len;
int tree_locked;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
tree_locked = TREE_UNLOCKED;
tlb = tp->t_lib;
if (tlb != NULL) {
TCPID_BUCKET_REF(tlb);
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
TCPID_BUCKET_LOCK(tlb);
len = strlcpy(buf, tlb->tlb_tag, TCP_LOG_TAG_LEN);
KASSERT(len < TCP_LOG_TAG_LEN,
@ -973,7 +974,7 @@ tcp_log_get_tag(struct tcpcb *tp, char *buf)
} else
TCPID_TREE_UNLOCK_ASSERT();
} else {
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
*buf = '\0';
len = 0;
}
@ -990,7 +991,7 @@ u_int
tcp_log_get_id_cnt(struct tcpcb *tp)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
return ((tp->t_lib == NULL) ? 0 : tp->t_lib->tlb_refcnt);
}
@ -1298,11 +1299,12 @@ tcp_log_expire(void *unused __unused)
static void
tcp_log_move_tp_to_node(struct tcpcb *tp, struct tcp_log_id_node *tln)
{
struct inpcb *inp = tptoinpcb(tp);
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
tln->tln_ie = tp->t_inpcb->inp_inc.inc_ie;
if (tp->t_inpcb->inp_inc.inc_flags & INC_ISIPV6)
tln->tln_ie = inp->inp_inc.inc_ie;
if (inp->inp_inc.inc_flags & INC_ISIPV6)
tln->tln_af = AF_INET6;
else
tln->tln_af = AF_INET;
@ -1323,7 +1325,7 @@ tcp_log_tcpcbfini(struct tcpcb *tp)
struct tcp_log_mem *log_entry;
sbintime_t callouttime;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
TCP_LOG_EVENT(tp, NULL, NULL, NULL, TCP_LOG_CONNEND, 0, 0, NULL, false);
@ -1383,11 +1385,13 @@ tcp_log_tcpcbfini(struct tcpcb *tp)
*/
if (tp->t_lin != NULL) {
struct inpcb *inp = tptoinpcb(tp);
/* Copy the relevant information to the log entry. */
tln = tp->t_lin;
KASSERT(tln->tln_inp == tp->t_inpcb,
("%s: Mismatched inp (tln->tln_inp=%p, tp->t_inpcb=%p)",
__func__, tln->tln_inp, tp->t_inpcb));
KASSERT(tln->tln_inp == inp,
("%s: Mismatched inp (tln->tln_inp=%p, tp inpcb=%p)",
__func__, tln->tln_inp, inp));
tcp_log_move_tp_to_node(tp, tln);
/* Clear information from the PCB. */
@ -1401,7 +1405,7 @@ tcp_log_tcpcbfini(struct tcpcb *tp)
* racing to lock this node when we move it to the expire
* queue.
*/
in_pcbref(tp->t_inpcb);
in_pcbref(inp);
/*
* Store the entry on the expiry list. The exact behavior
@ -1496,10 +1500,8 @@ static void
tcp_log_purge_tp_logbuf(struct tcpcb *tp)
{
struct tcp_log_mem *log_entry;
struct inpcb *inp __diagused;
inp = tp->t_inpcb;
INP_WLOCK_ASSERT(inp);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (tp->t_lognum == 0)
return;
@ -1533,7 +1535,7 @@ tcp_log_event_(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf,
("%s called with inconsistent func (%p) and line (%d) arguments",
__func__, func, line));
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (tcp_disable_all_bb_logs) {
/*
* The global shutdown logging
@ -1748,7 +1750,7 @@ tcp_log_state_change(struct tcpcb *tp, int state)
{
struct tcp_log_mem *log_entry;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
switch(state) {
case TCP_LOG_STATE_CLEAR:
while ((log_entry = STAILQ_FIRST(&tp->t_logs)) != NULL)
@ -1786,7 +1788,7 @@ tcp_log_drain(struct tcpcb *tp)
struct tcp_log_mem *log_entry, *next;
int target, skip;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if ((target = tp->t_lognum / 2) == 0)
return;
@ -1930,12 +1932,11 @@ tcp_log_getlogbuf(struct sockopt *sopt, struct tcpcb *tp)
struct tcp_log_stailq log_tailq;
struct tcp_log_mem *log_entry, *log_next;
struct tcp_log_buffer *out_entry;
struct inpcb *inp;
struct inpcb *inp = tptoinpcb(tp);
size_t outsize, entrysize;
int error, outnum;
INP_WLOCK_ASSERT(tp->t_inpcb);
inp = tp->t_inpcb;
INP_WLOCK_ASSERT(inp);
/*
* Determine which log entries will fit in the buffer. As an
@ -2153,12 +2154,11 @@ int
tcp_log_dump_tp_logbuf(struct tcpcb *tp, char *reason, int how, bool force)
{
struct tcp_log_dev_log_queue *entry;
struct inpcb *inp;
struct inpcb *inp = tptoinpcb(tp);
#ifdef TCPLOG_DEBUG_COUNTERS
int num_entries;
#endif
inp = tp->t_inpcb;
INP_WLOCK_ASSERT(inp);
/* If there are no log entries, there is nothing to do. */
@ -2586,11 +2586,12 @@ tcp_log_dumpbucketlogs(struct tcp_log_id_bucket *tlb, char *reason)
void
tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason)
{
struct inpcb *inp = tptoinpcb(tp);
struct tcp_log_id_bucket *tlb;
int tree_locked;
/* Figure out our bucket and lock it. */
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
tlb = tp->t_lib;
if (tlb == NULL) {
/*
@ -2598,11 +2599,11 @@ tcp_log_dump_tp_bucket_logbufs(struct tcpcb *tp, char *reason)
* session's traces.
*/
(void)tcp_log_dump_tp_logbuf(tp, reason, M_WAITOK, true);
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
return;
}
TCPID_BUCKET_REF(tlb);
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
TCPID_BUCKET_LOCK(tlb);
/* If we are the last reference, we have nothing more to do here. */
@ -2632,7 +2633,7 @@ void
tcp_log_flowend(struct tcpcb *tp)
{
if (tp->t_logstate != TCP_LOG_STATE_OFF) {
struct socket *so = tp->t_inpcb->inp_socket;
struct socket *so = tptosocket(tp);
TCP_LOG_EVENT(tp, NULL, &so->so_rcv, &so->so_snd,
TCP_LOG_FLOWEND, 0, 0, NULL, false);
}

View file

@ -718,11 +718,9 @@ tcp_lro_log(struct tcpcb *tp, const struct lro_ctrl *lc,
log.u_bbr.inhpts = 1;
else
log.u_bbr.inhpts = 0;
TCP_LOG_EVENTP(tp, NULL,
&tp->t_inpcb->inp_socket->so_rcv,
&tp->t_inpcb->inp_socket->so_snd,
TCP_LOG_LRO, 0,
0, &log, false, &tv);
TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv,
&tptosocket(tp)->so_snd,
TCP_LOG_LRO, 0, 0, &log, false, &tv);
}
}
#endif

View file

@ -114,7 +114,7 @@ void
tcp_offload_listen_start(struct tcpcb *tp)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
EVENTHANDLER_INVOKE(tcp_offload_listen_start, tp);
}
@ -123,7 +123,7 @@ void
tcp_offload_listen_stop(struct tcpcb *tp)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
EVENTHANDLER_INVOKE(tcp_offload_listen_stop, tp);
}
@ -134,7 +134,7 @@ tcp_offload_input(struct tcpcb *tp, struct mbuf *m)
struct toedev *tod = tp->tod;
KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp));
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
tod->tod_input(tod, tp, m);
}
@ -146,7 +146,7 @@ tcp_offload_output(struct tcpcb *tp)
int error, flags;
KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp));
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
flags = tcp_outflags[tp->t_state];
@ -170,7 +170,7 @@ tcp_offload_rcvd(struct tcpcb *tp)
struct toedev *tod = tp->tod;
KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp));
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
tod->tod_rcvd(tod, tp);
}
@ -181,7 +181,7 @@ tcp_offload_ctloutput(struct tcpcb *tp, int sopt_dir, int sopt_name)
struct toedev *tod = tp->tod;
KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp));
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
tod->tod_ctloutput(tod, tp, sopt_dir, sopt_name);
}
@ -192,7 +192,7 @@ tcp_offload_tcp_info(struct tcpcb *tp, struct tcp_info *ti)
struct toedev *tod = tp->tod;
KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp));
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
tod->tod_tcp_info(tod, tp, ti);
}
@ -204,7 +204,7 @@ tcp_offload_alloc_tls_session(struct tcpcb *tp, struct ktls_session *tls,
struct toedev *tod = tp->tod;
KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp));
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
return (tod->tod_alloc_tls_session(tod, tp, tls, direction));
}
@ -215,7 +215,7 @@ tcp_offload_detach(struct tcpcb *tp)
struct toedev *tod = tp->tod;
KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp));
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
tod->tod_pcb_detach(tod, tp);
}
@ -226,7 +226,7 @@ tcp_offload_pmtu_update(struct tcpcb *tp, tcp_seq seq, int mtu)
struct toedev *tod = tp->tod;
KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp));
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
tod->tod_pmtu_update(tod, tp, seq, mtu);
}

View file

@ -184,7 +184,7 @@ hhook_run_tcp_est_out(struct tcpcb *tp, struct tcphdr *th,
void
cc_after_idle(struct tcpcb *tp)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (CC_ALGO(tp)->after_idle != NULL)
CC_ALGO(tp)->after_idle(tp->ccv);
@ -196,7 +196,8 @@ cc_after_idle(struct tcpcb *tp)
int
tcp_default_output(struct tcpcb *tp)
{
struct socket *so = tp->t_inpcb->inp_socket;
struct socket *so = tptosocket(tp);
struct inpcb *inp = tptoinpcb(tp);
int32_t len;
uint32_t recwin, sendwin;
uint16_t flags;
@ -230,7 +231,7 @@ tcp_default_output(struct tcpcb *tp)
struct ip6_hdr *ip6 = NULL;
int isipv6;
isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
#endif
#ifdef KERN_TLS
const bool hw_tls = (so->so_snd.sb_flags & SB_TLS_IFNET) != 0;
@ -239,7 +240,7 @@ tcp_default_output(struct tcpcb *tp)
#endif
NET_EPOCH_ASSERT();
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
#ifdef TCP_OFFLOAD
if (tp->t_flags & TF_TOE)
@ -542,23 +543,23 @@ tcp_default_output(struct tcpcb *tp)
*/
#ifdef INET6
if (isipv6 && IPSEC_ENABLED(ipv6))
ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp);
#ifdef INET
else
#endif
#endif /* INET6 */
#ifdef INET
if (IPSEC_ENABLED(ipv4))
ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp);
#endif /* INET */
#endif /* IPSEC */
#ifdef INET6
if (isipv6)
ipoptlen = ip6_optlen(tp->t_inpcb);
ipoptlen = ip6_optlen(inp);
else
#endif
if (tp->t_inpcb->inp_options)
ipoptlen = tp->t_inpcb->inp_options->m_len -
if (inp->inp_options)
ipoptlen = inp->inp_options->m_len -
offsetof(struct ipoption, ipopt_list);
else
ipoptlen = 0;
@ -809,7 +810,7 @@ tcp_default_output(struct tcpcb *tp)
if ((tp->t_flags & TF_NOOPT) == 0) {
/* Maximum segment size. */
if (flags & TH_SYN) {
to.to_mss = tcp_mssopt(&tp->t_inpcb->inp_inc);
to.to_mss = tcp_mssopt(&inp->inp_inc);
if (tp->t_port)
to.to_mss -= V_tcp_udp_tunneling_overhead;
to.to_flags |= TOF_MSS;
@ -1154,7 +1155,7 @@ tcp_default_output(struct tcpcb *tp)
SOCKBUF_UNLOCK_ASSERT(&so->so_snd);
m->m_pkthdr.rcvif = (struct ifnet *)0;
#ifdef MAC
mac_inpcb_create_mbuf(tp->t_inpcb, m);
mac_inpcb_create_mbuf(inp, m);
#endif
#ifdef INET6
if (isipv6) {
@ -1169,7 +1170,7 @@ tcp_default_output(struct tcpcb *tp)
} else {
th = (struct tcphdr *)(ip6 + 1);
}
tcpip_fillheaders(tp->t_inpcb, tp->t_port, ip6, th);
tcpip_fillheaders(inp, tp->t_port, ip6, th);
} else
#endif /* INET6 */
{
@ -1186,7 +1187,7 @@ tcp_default_output(struct tcpcb *tp)
th = (struct tcphdr *)(udp + 1);
} else
th = (struct tcphdr *)(ip + 1);
tcpip_fillheaders(tp->t_inpcb, tp->t_port, ip, th);
tcpip_fillheaders(inp, tp->t_port, ip, th);
}
/*
@ -1467,7 +1468,7 @@ tcp_default_output(struct tcpcb *tp)
* Also, desired default hop limit might be changed via
* Neighbor Discovery.
*/
ip6->ip6_hlim = in6_selecthlim(tp->t_inpcb, NULL);
ip6->ip6_hlim = in6_selecthlim(inp, NULL);
/*
* Set the packet size here for the benefit of DTrace probes.
@ -1492,13 +1493,12 @@ tcp_default_output(struct tcpcb *tp)
#endif
/* TODO: IPv6 IP6TOS_ECT bit on */
error = ip6_output(m, tp->t_inpcb->in6p_outputopts,
&tp->t_inpcb->inp_route6,
error = ip6_output(m, inp->in6p_outputopts, &inp->inp_route6,
((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
NULL, NULL, tp->t_inpcb);
NULL, NULL, inp);
if (error == EMSGSIZE && tp->t_inpcb->inp_route6.ro_nh != NULL)
mtu = tp->t_inpcb->inp_route6.ro_nh->nh_mtu;
if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
mtu = inp->inp_route6.ro_nh->nh_mtu;
}
#endif /* INET6 */
#if defined(INET) && defined(INET6)
@ -1508,8 +1508,8 @@ tcp_default_output(struct tcpcb *tp)
{
ip->ip_len = htons(m->m_pkthdr.len);
#ifdef INET6
if (tp->t_inpcb->inp_vflag & INP_IPV6PROTO)
ip->ip_ttl = in6_selecthlim(tp->t_inpcb, NULL);
if (inp->inp_vflag & INP_IPV6PROTO)
ip->ip_ttl = in6_selecthlim(inp, NULL);
#endif /* INET6 */
/*
* If we do path MTU discovery, then we set DF on every packet.
@ -1538,12 +1538,11 @@ tcp_default_output(struct tcpcb *tp)
tcp_pcap_add(th, m, &(tp->t_outpkts));
#endif
error = ip_output(m, tp->t_inpcb->inp_options, &tp->t_inpcb->inp_route,
((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0,
tp->t_inpcb);
error = ip_output(m, inp->inp_options, &inp->inp_route,
((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0, inp);
if (error == EMSGSIZE && tp->t_inpcb->inp_route.ro_nh != NULL)
mtu = tp->t_inpcb->inp_route.ro_nh->nh_mtu;
if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
mtu = inp->inp_route.ro_nh->nh_mtu;
}
#endif /* INET */

View file

@ -1317,14 +1317,15 @@ const struct tcp_hwrate_limit_table *
tcp_set_pacing_rate(struct tcpcb *tp, struct ifnet *ifp,
uint64_t bytes_per_sec, int flags, int *error, uint64_t *lower_rate)
{
struct inpcb *inp = tptoinpcb(tp);
const struct tcp_hwrate_limit_table *rte;
#ifdef KERN_TLS
struct ktls_session *tls;
#endif
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
if (tp->t_inpcb->inp_snd_tag == NULL) {
if (inp->inp_snd_tag == NULL) {
/*
* We are setting up a rate for the first time.
*/
@ -1336,8 +1337,8 @@ tcp_set_pacing_rate(struct tcpcb *tp, struct ifnet *ifp,
}
#ifdef KERN_TLS
tls = NULL;
if (tp->t_inpcb->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) {
tls = tp->t_inpcb->inp_socket->so_snd.sb_tls_info;
if (tptosocket(tp)->so_snd.sb_flags & SB_TLS_IFNET) {
tls = tptosocket(tp)->so_snd.sb_tls_info;
if ((ifp->if_capenable & IFCAP_TXTLS_RTLMT) == 0 ||
tls->mode != TCP_TLS_MODE_IFNET) {
@ -1347,7 +1348,7 @@ tcp_set_pacing_rate(struct tcpcb *tp, struct ifnet *ifp,
}
}
#endif
rte = rt_setup_rate(tp->t_inpcb, ifp, bytes_per_sec, flags, error, lower_rate);
rte = rt_setup_rate(inp, ifp, bytes_per_sec, flags, error, lower_rate);
if (rte)
rl_increment_using(rte);
#ifdef KERN_TLS
@ -1358,7 +1359,7 @@ tcp_set_pacing_rate(struct tcpcb *tp, struct ifnet *ifp,
* tag to a TLS ratelimit tag.
*/
MPASS(tls->snd_tag->sw->type == IF_SND_TAG_TYPE_TLS);
ktls_output_eagain(tp->t_inpcb, tls);
ktls_output_eagain(inp, tls);
}
#endif
} else {
@ -1381,6 +1382,7 @@ tcp_chg_pacing_rate(const struct tcp_hwrate_limit_table *crte,
struct tcpcb *tp, struct ifnet *ifp,
uint64_t bytes_per_sec, int flags, int *error, uint64_t *lower_rate)
{
struct inpcb *inp = tptoinpcb(tp);
const struct tcp_hwrate_limit_table *nrte;
const struct tcp_rate_set *rs;
#ifdef KERN_TLS
@ -1388,7 +1390,7 @@ tcp_chg_pacing_rate(const struct tcp_hwrate_limit_table *crte,
#endif
int err;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
if (crte == NULL) {
/* Wrong interface */
@ -1398,8 +1400,8 @@ tcp_chg_pacing_rate(const struct tcp_hwrate_limit_table *crte,
}
#ifdef KERN_TLS
if (tp->t_inpcb->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) {
tls = tp->t_inpcb->inp_socket->so_snd.sb_tls_info;
if (tptosocket(tp)->so_snd.sb_flags & SB_TLS_IFNET) {
tls = tptosocket(tp)->so_snd.sb_tls_info;
if (tls->mode != TCP_TLS_MODE_IFNET)
tls = NULL;
else if (tls->snd_tag != NULL &&
@ -1427,7 +1429,7 @@ tcp_chg_pacing_rate(const struct tcp_hwrate_limit_table *crte,
}
}
#endif
if (tp->t_inpcb->inp_snd_tag == NULL) {
if (inp->inp_snd_tag == NULL) {
/* Wrong interface */
tcp_rel_pacing_rate(crte, tp);
if (error)
@ -1466,7 +1468,7 @@ tcp_chg_pacing_rate(const struct tcp_hwrate_limit_table *crte,
err = ktls_modify_txrtlmt(tls, nrte->rate);
else
#endif
err = in_pcbmodify_txrtlmt(tp->t_inpcb, nrte->rate);
err = in_pcbmodify_txrtlmt(inp, nrte->rate);
if (err) {
struct tcp_rate_set *lrs;
uint64_t pre;
@ -1475,8 +1477,8 @@ tcp_chg_pacing_rate(const struct tcp_hwrate_limit_table *crte,
lrs = __DECONST(struct tcp_rate_set *, rs);
pre = atomic_fetchadd_64(&lrs->rs_flows_using, -1);
/* Do we still have a snd-tag attached? */
if (tp->t_inpcb->inp_snd_tag)
in_pcbdetach_txrtlmt(tp->t_inpcb);
if (inp->inp_snd_tag)
in_pcbdetach_txrtlmt(inp);
if (pre == 1) {
struct epoch_tracker et;
@ -1508,11 +1510,12 @@ tcp_chg_pacing_rate(const struct tcp_hwrate_limit_table *crte,
void
tcp_rel_pacing_rate(const struct tcp_hwrate_limit_table *crte, struct tcpcb *tp)
{
struct inpcb *inp = tptoinpcb(tp);
const struct tcp_rate_set *crs;
struct tcp_rate_set *rs;
uint64_t pre;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
tp->t_pacing_rate = -1;
crs = crte->ptbl;
@ -1543,7 +1546,7 @@ tcp_rel_pacing_rate(const struct tcp_hwrate_limit_table *crte, struct tcpcb *tp)
* ktls_output_eagain() to reset the send tag to a plain
* TLS tag?
*/
in_pcbdetach_txrtlmt(tp->t_inpcb);
in_pcbdetach_txrtlmt(inp);
}
#define ONE_POINT_TWO_MEG 150000 /* 1.2 megabits in bytes */
@ -1573,8 +1576,8 @@ tcp_log_pacing_size(struct tcpcb *tp, uint64_t bw, uint32_t segsiz, uint32_t new
log.u_bbr.cur_del_rate = bw;
log.u_bbr.delRate = hw_rate;
TCP_LOG_EVENTP(tp, NULL,
&tp->t_inpcb->inp_socket->so_rcv,
&tp->t_inpcb->inp_socket->so_snd,
&tptosocket(tp)->so_rcv,
&tptosocket(tp)->so_snd,
TCP_HDWR_PACE_SIZE, 0,
0, &log, false, &tv);
}

View file

@ -203,6 +203,7 @@ static void
tcp_log_reassm(struct tcpcb *tp, struct tseg_qent *q, struct tseg_qent *p,
tcp_seq seq, int len, uint8_t action, int instance)
{
struct socket *so = tptosocket(tp);
uint32_t cts;
struct timeval tv;
@ -230,9 +231,7 @@ tcp_log_reassm(struct tcpcb *tp, struct tseg_qent *q, struct tseg_qent *p,
log.u_bbr.flex7 = instance;
log.u_bbr.flex8 = action;
log.u_bbr.timeStamp = cts;
TCP_LOG_EVENTP(tp, NULL,
&tp->t_inpcb->inp_socket->so_rcv,
&tp->t_inpcb->inp_socket->so_snd,
TCP_LOG_EVENTP(tp, NULL, &so->so_rcv, &so->so_snd,
TCP_LOG_REASS, 0,
len, &log, false, &tv);
}
@ -305,7 +304,7 @@ tcp_reass_flush(struct tcpcb *tp)
{
struct tseg_qent *qe;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
while ((qe = TAILQ_FIRST(&tp->t_segq)) != NULL) {
TAILQ_REMOVE(&tp->t_segq, qe, tqe_q);
@ -530,12 +529,13 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
struct tseg_qent *nq = NULL;
struct tseg_qent *te = NULL;
struct mbuf *mlast = NULL;
struct sockbuf *sb;
struct socket *so = tp->t_inpcb->inp_socket;
struct inpcb *inp = tptoinpcb(tp);
struct socket *so = tptosocket(tp);
struct sockbuf *sb = &so->so_rcv;
char *s = NULL;
int flags, i, lenofoh;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
/*
* XXX: tcp_reass() is rather inefficient with its data structures
* and should be rewritten (see NetBSD for optimizations).
@ -597,7 +597,6 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
* Will it fit?
*/
lenofoh = tcp_reass_overhead_of_chain(m, &mlast);
sb = &tp->t_inpcb->inp_socket->so_rcv;
if ((th->th_seq != tp->rcv_nxt || !TCPS_HAVEESTABLISHED(tp->t_state)) &&
(sb->sb_mbcnt + tp->t_segqmbuflen + lenofoh) > sb->sb_mbmax) {
/* No room */
@ -608,7 +607,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
#ifdef TCP_REASS_LOGGING
tcp_log_reassm(tp, NULL, NULL, th->th_seq, lenofoh, TCP_R_LOG_LIMIT_REACHED, 0);
#endif
if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
log(LOG_DEBUG, "%s; %s: mbuf count limit reached, "
"segment dropped\n", s, __func__);
free(s, M_TCPLOG);
@ -987,7 +986,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
*/
TCPSTAT_INC(tcps_rcvreassfull);
*tlenp = 0;
if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
log(LOG_DEBUG, "%s; %s: queue limit reached, "
"segment dropped\n", s, __func__);
free(s, M_TCPLOG);
@ -1003,7 +1002,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
tcp_reass_maxqueuelen)) {
TCPSTAT_INC(tcps_rcvreassfull);
*tlenp = 0;
if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
log(LOG_DEBUG, "%s; %s: queue limit reached, "
"segment dropped\n", s, __func__);
free(s, M_TCPLOG);
@ -1024,8 +1023,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
TCPSTAT_INC(tcps_rcvmemdrop);
m_freem(m);
*tlenp = 0;
if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL,
NULL))) {
if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
log(LOG_DEBUG, "%s; %s: global zone limit "
"reached, segment dropped\n", s, __func__);
free(s, M_TCPLOG);

View file

@ -178,7 +178,7 @@ tcp_update_dsack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
int i, j, n, identical;
tcp_seq start, end;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
KASSERT(SEQ_LT(rcv_start, rcv_end), ("rcv_start < rcv_end"));
@ -279,7 +279,7 @@ tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
struct sackblk head_blk, saved_blks[MAX_SACK_BLKS];
int num_head, num_saved, i;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
/* Check arguments. */
KASSERT(SEQ_LEQ(rcv_start, rcv_end), ("rcv_start <= rcv_end"));
@ -410,7 +410,7 @@ tcp_clean_dsack_blocks(struct tcpcb *tp)
struct sackblk saved_blks[MAX_SACK_BLKS];
int num_saved, i;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
/*
* Clean up any DSACK blocks that
* are in our queue of sack blocks.
@ -451,7 +451,7 @@ tcp_clean_sackreport(struct tcpcb *tp)
{
int i;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
tp->rcv_numsacks = 0;
for (i = 0; i < MAX_SACK_BLKS; i++)
tp->sackblks[i].start = tp->sackblks[i].end=0;
@ -561,7 +561,7 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, tcp_seq th_ack)
int i, j, num_sack_blks, sack_changed;
int delivered_data, left_edge_delta;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
num_sack_blks = 0;
sack_changed = 0;
@ -830,7 +830,7 @@ tcp_free_sackholes(struct tcpcb *tp)
{
struct sackhole *q;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL)
tcp_sackhole_remove(tp, q);
tp->sackhint.sack_bytes_rexmit = 0;
@ -854,7 +854,7 @@ tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th)
int num_segs = 1;
u_int maxseg = tcp_maxseg(tp);
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_rtttime = 0;
/* Send one or 2 segments based on how much new data was acked. */
@ -914,7 +914,7 @@ tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt)
{
struct sackhole *p;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
*sack_bytes_rexmt = 0;
TAILQ_FOREACH(p, &tp->snd_holes, scblink) {
if (SEQ_LT(p->rxmit, p->end)) {
@ -952,7 +952,7 @@ tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt)
{
struct sackhole *hole = NULL;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
*sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit;
hole = tp->sackhint.nexthole;
if (hole == NULL)
@ -995,7 +995,7 @@ tcp_sack_adjust(struct tcpcb *tp)
{
struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes);
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (cur == NULL)
return; /* No holes */
if (SEQ_GEQ(tp->snd_nxt, tp->snd_fack))

View file

@ -584,7 +584,8 @@ bbr_timer_start(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
(tp->t_state < TCPS_ESTABLISHED)) {
/* Nothing on the send map */
activate_rxt:
if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
if (SEQ_LT(tp->snd_una, tp->snd_max) ||
sbavail(&tptosocket(tp)->so_snd)) {
uint64_t tov;
time_since_sent = 0;
@ -734,7 +735,7 @@ bbr_minseg(struct tcp_bbr *bbr)
static void
bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_t frm, int32_t slot, uint32_t tot_len)
{
struct inpcb *inp;
struct inpcb *inp = tptoinpcb(tp);
struct hpts_diag diag;
uint32_t delayed_ack = 0;
uint32_t left = 0;
@ -743,7 +744,6 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
int32_t delay_calc = 0;
uint32_t prev_delay = 0;
inp = tp->t_inpcb;
if (tcp_in_hpts(inp)) {
/* A previous call is already set up */
return;
@ -909,14 +909,14 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
bbr->rc_pacer_started = cts;
(void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot),
(void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(slot),
__LINE__, &diag);
bbr->rc_timer_first = 0;
bbr->bbr_timer_src = frm;
bbr_log_to_start(bbr, cts, hpts_timeout, slot, 1);
bbr_log_hpts_diag(bbr, cts, &diag);
} else if (hpts_timeout) {
(void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout),
(void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout),
__LINE__, &diag);
/*
* We add the flag here as well if the slot is set,
@ -3607,11 +3607,12 @@ static void
bbr_ack_received(struct tcpcb *tp, struct tcp_bbr *bbr, struct tcphdr *th, uint32_t bytes_this_ack,
uint32_t sack_changed, uint32_t prev_acked, int32_t line, uint32_t losses)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
uint64_t bw;
uint32_t cwnd, target_cwnd, saved_bytes, maxseg;
int32_t meth;
INP_WLOCK_ASSERT(tptoinpcb(tp));
#ifdef STATS
if ((tp->t_flags & TF_GPUTINPROG) &&
SEQ_GEQ(th->th_ack, tp->gput_ack)) {
@ -3762,7 +3763,7 @@ tcp_bbr_partialack(struct tcpcb *tp)
struct tcp_bbr *bbr;
bbr = (struct tcp_bbr *)tp->t_fb_ptr;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (ctf_flight_size(tp,
(bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) <=
tp->snd_cwnd) {
@ -3776,7 +3777,7 @@ bbr_post_recovery(struct tcpcb *tp)
struct tcp_bbr *bbr;
uint32_t flight;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
bbr = (struct tcp_bbr *)tp->t_fb_ptr;
/*
* Here we just exit recovery.
@ -3931,7 +3932,7 @@ bbr_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type, struct bbr_s
{
struct tcp_bbr *bbr;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
#ifdef STATS
stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
#endif
@ -4589,7 +4590,7 @@ bbr_timeout_tlp(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
* A TLP timer has expired. We have been idle for 2 rtts. So we now
* need to figure out how to force a full MSS segment out.
*/
so = tp->t_inpcb->inp_socket;
so = tptosocket(tp);
avail = sbavail(&so->so_snd);
out = ctf_outstanding(tp);
if (out > tp->snd_wnd) {
@ -4753,8 +4754,7 @@ bbr_timeout_persist(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
}
if (bbr->rc_in_persist == 0)
return (0);
KASSERT(tp->t_inpcb != NULL,
("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
/*
* Persistence timer into zero window. Force a byte to be output, if
* possible.
@ -4825,13 +4825,12 @@ static int
bbr_timeout_keepalive(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
{
struct tcptemp *t_template;
struct inpcb *inp;
struct inpcb *inp = tptoinpcb(tp);
if (bbr->rc_all_timers_stopped) {
return (1);
}
bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
inp = tp->t_inpcb;
bbr_log_to_event(bbr, cts, BBR_TO_FRM_KEEP);
/*
* Keep-alive timer went off; send something or drop connection if
@ -4969,6 +4968,7 @@ bbr_remxt_tmr(struct tcpcb *tp)
static int
bbr_timeout_rxt(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
{
struct inpcb *inp = tptoinpcb(tp);
int32_t rexmt;
int32_t retval = 0;
bool isipv6;
@ -5059,7 +5059,7 @@ bbr_timeout_rxt(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
* catch ESTABLISHED state.
*/
#ifdef INET6
isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false;
isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false;
#else
isipv6 = false;
#endif
@ -5171,10 +5171,10 @@ bbr_timeout_rxt(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts)
if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
#ifdef INET6
if (bbr->r_is_v6)
in6_losing(tp->t_inpcb);
in6_losing(inp);
else
#endif
in_losing(tp->t_inpcb);
in_losing(inp);
tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
tp->t_srtt = 0;
}
@ -5221,7 +5221,7 @@ bbr_process_timers(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, uint8_t
left = bbr->r_ctl.rc_timer_exp - cts;
ret = -3;
bbr_log_to_processing(bbr, cts, ret, left, hpts_calling);
tcp_hpts_insert(tp->t_inpcb, HPTS_USEC_TO_SLOTS(left));
tcp_hpts_insert(tptoinpcb(tp), HPTS_USEC_TO_SLOTS(left));
return (1);
}
bbr->rc_tmr_stopped = 0;
@ -5928,7 +5928,7 @@ bbr_log_output(struct tcp_bbr *bbr, struct tcpcb *tp, struct tcpopt *to, int32_t
* slot (11).
*
*/
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (err) {
/*
* We don't log errors -- we could but snd_max does not
@ -7333,7 +7333,7 @@ bbr_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th,
uint32_t cts, acked, ack_point, sack_changed = 0;
uint32_t p_maxseg, maxseg, p_acked = 0;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (tcp_get_flags(th) & TH_RST) {
/* We don't log resets */
return (0);
@ -7791,7 +7791,7 @@ bbr_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
/* Send window already scaled. */
}
}
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
acked = BYTES_THIS_ACK(tp, th);
KMOD_TCPSTAT_ADD(tcps_rcvackpack, (int)nsegs);
@ -7839,7 +7839,7 @@ bbr_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
/* Nothing left outstanding */
nothing_left:
bbr_log_progress_event(bbr, tp, ticks, PROGRESS_CLEAR, __LINE__);
if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
if (sbavail(&so->so_snd) == 0)
bbr->rc_tp->t_acktime = 0;
if ((sbused(&so->so_snd) == 0) &&
(tp->t_flags & TF_SENTFIN)) {
@ -8150,7 +8150,7 @@ bbr_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
struct tcp_bbr *bbr;
bbr = (struct tcp_bbr *)tp->t_fb_ptr;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
nsegs = max(1, m->m_pkthdr.lro_nsegs);
if ((thflags & TH_ACK) &&
(SEQ_LT(tp->snd_wl1, th->th_seq) ||
@ -8196,8 +8196,8 @@ bbr_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
(tp->snd_wnd < min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) &&
TCPS_HAVEESTABLISHED(tp->t_state) &&
(tp->snd_max == tp->snd_una) &&
sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
(sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
sbavail(&so->so_snd) &&
(sbavail(&so->so_snd) > tp->snd_wnd)) {
/* No send window.. we must enter persist */
bbr_enter_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__);
}
@ -8212,7 +8212,6 @@ bbr_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
* is surprised.
*/
tp->rcv_up = tp->rcv_nxt;
INP_WLOCK_ASSERT(tp->t_inpcb);
/*
* Process the segment text, merging it into the TCP sequencing
@ -8406,7 +8405,6 @@ bbr_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
bbr->rc_timer_first = 1;
bbr_timer_cancel(bbr,
__LINE__, bbr->r_ctl.rc_rcvtime);
INP_WLOCK_ASSERT(tp->t_inpcb);
tcp_twstart(tp);
return (1);
}
@ -8418,7 +8416,6 @@ bbr_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
(sbavail(&so->so_snd) > ctf_outstanding(tp))) {
bbr->r_wanted_output = 1;
}
INP_WLOCK_ASSERT(tp->t_inpcb);
return (0);
}
@ -8663,8 +8660,8 @@ bbr_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
(tp->snd_wnd < min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) &&
TCPS_HAVEESTABLISHED(tp->t_state) &&
(tp->snd_max == tp->snd_una) &&
sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
(sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
sbavail(&so->so_snd) &&
(sbavail(&so->so_snd) > tp->snd_wnd)) {
/* No send window.. we must enter persist */
bbr_enter_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__);
}
@ -8746,7 +8743,7 @@ bbr_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (tp->snd_una == tp->snd_max) {
/* Nothing left outstanding */
bbr_log_progress_event(bbr, tp, ticks, PROGRESS_CLEAR, __LINE__);
if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
if (sbavail(&so->so_snd) == 0)
bbr->rc_tp->t_acktime = 0;
bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime);
if (bbr->rc_in_persist == 0) {
@ -8782,6 +8779,8 @@ bbr_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
struct tcp_bbr *bbr;
int32_t ret_val = 0;
INP_WLOCK_ASSERT(tptoinpcb(tp));
bbr = (struct tcp_bbr *)tp->t_fb_ptr;
ctf_calc_rwin(so, tp);
/*
@ -8904,7 +8903,6 @@ bbr_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN);
tcp_state_change(tp, TCPS_SYN_RECEIVED);
}
INP_WLOCK_ASSERT(tp->t_inpcb);
/*
* Advance th->th_seq to correspond to first data byte. If data,
* trim to stay within window, dropping FIN if necessary.
@ -8991,6 +8989,8 @@ bbr_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
int32_t ret_val;
struct tcp_bbr *bbr;
INP_WLOCK_ASSERT(tptoinpcb(tp));
bbr = (struct tcp_bbr *)tp->t_fb_ptr;
ctf_calc_rwin(so, tp);
if ((thflags & TH_ACK) &&
@ -9048,7 +9048,6 @@ bbr_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen);
return (1);
}
INP_WLOCK_ASSERT(tp->t_inpcb);
if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
return (ret_val);
}
@ -9210,6 +9209,8 @@ bbr_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
struct tcp_bbr *bbr;
int32_t ret_val;
INP_WLOCK_ASSERT(tptoinpcb(tp));
/*
* Header prediction: check for the two common cases of a
* uni-directional data xfer. If the packet has no control flags,
@ -9271,7 +9272,6 @@ bbr_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
return (ret_val);
}
INP_WLOCK_ASSERT(tp->t_inpcb);
if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
return (ret_val);
}
@ -9345,6 +9345,8 @@ bbr_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
struct tcp_bbr *bbr;
int32_t ret_val;
INP_WLOCK_ASSERT(tptoinpcb(tp));
bbr = (struct tcp_bbr *)tp->t_fb_ptr;
ctf_calc_rwin(so, tp);
if ((thflags & TH_RST) ||
@ -9367,7 +9369,6 @@ bbr_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
return (ret_val);
}
INP_WLOCK_ASSERT(tp->t_inpcb);
if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
return (ret_val);
}
@ -9466,6 +9467,8 @@ bbr_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
int32_t ret_val;
struct tcp_bbr *bbr;
INP_WLOCK_ASSERT(tptoinpcb(tp));
bbr = (struct tcp_bbr *)tp->t_fb_ptr;
ctf_calc_rwin(so, tp);
if ((thflags & TH_RST) ||
@ -9488,7 +9491,6 @@ bbr_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
return (ret_val);
}
INP_WLOCK_ASSERT(tp->t_inpcb);
if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
return (ret_val);
}
@ -9590,6 +9592,8 @@ bbr_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
int32_t ret_val;
struct tcp_bbr *bbr;
INP_WLOCK_ASSERT(tptoinpcb(tp));
bbr = (struct tcp_bbr *)tp->t_fb_ptr;
ctf_calc_rwin(so, tp);
if ((thflags & TH_RST) ||
@ -9612,7 +9616,6 @@ bbr_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
return (ret_val);
}
INP_WLOCK_ASSERT(tp->t_inpcb);
if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
return (ret_val);
}
@ -9700,6 +9703,8 @@ bbr_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
int32_t ret_val;
struct tcp_bbr *bbr;
INP_WLOCK_ASSERT(tptoinpcb(tp));
bbr = (struct tcp_bbr *)tp->t_fb_ptr;
ctf_calc_rwin(so, tp);
if ((thflags & TH_RST) ||
@ -9722,7 +9727,6 @@ bbr_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
return (ret_val);
}
INP_WLOCK_ASSERT(tp->t_inpcb);
if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
return (ret_val);
}
@ -9810,6 +9814,8 @@ bbr_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
int32_t ret_val;
struct tcp_bbr *bbr;
INP_WLOCK_ASSERT(tptoinpcb(tp));
bbr = (struct tcp_bbr *)tp->t_fb_ptr;
ctf_calc_rwin(so, tp);
/* Reset receive buffer auto scaling when not in bulk receive mode. */
@ -9825,7 +9831,6 @@ bbr_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
ctf_challenge_ack(m, th, tp, iptos, &ret_val);
return (ret_val);
}
INP_WLOCK_ASSERT(tp->t_inpcb);
/*
* RFC 1323 PAWS: If we have a timestamp reply on this segment and
* it's less than ts_recent, drop it.
@ -9835,7 +9840,6 @@ bbr_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val))
return (ret_val);
}
INP_WLOCK_ASSERT(tp->t_inpcb);
if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) {
return (ret_val);
}
@ -9863,7 +9867,6 @@ bbr_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
* p.869. In such cases, we can still calculate the RTT correctly
* when RCV.NXT == Last.ACK.Sent.
*/
INP_WLOCK_ASSERT(tp->t_inpcb);
if ((to->to_flags & TOF_TS) != 0 &&
SEQ_LEQ(th->th_seq, tp->last_ack_sent) &&
SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen +
@ -9892,7 +9895,6 @@ bbr_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
/*
* Ack processing.
*/
INP_WLOCK_ASSERT(tp->t_inpcb);
if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
return (ret_val);
}
@ -9903,7 +9905,6 @@ bbr_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so,
return (1);
}
}
INP_WLOCK_ASSERT(tp->t_inpcb);
return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen,
tiwin, thflags, nxt_pkt));
}
@ -9992,8 +9993,8 @@ bbr_google_mode_off(struct tcp_bbr *bbr)
static int
bbr_init(struct tcpcb *tp)
{
struct inpcb *inp = tptoinpcb(tp);
struct tcp_bbr *bbr = NULL;
struct inpcb *inp;
uint32_t cts;
tp->t_fb_ptr = uma_zalloc(bbr_pcb_zone, (M_NOWAIT | M_ZERO));
@ -10008,16 +10009,13 @@ bbr_init(struct tcpcb *tp)
}
bbr = (struct tcp_bbr *)tp->t_fb_ptr;
bbr->rtt_valid = 0;
inp = tp->t_inpcb;
inp->inp_flags2 |= INP_CANNOT_DO_ECN;
inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
TAILQ_INIT(&bbr->r_ctl.rc_map);
TAILQ_INIT(&bbr->r_ctl.rc_free);
TAILQ_INIT(&bbr->r_ctl.rc_tmap);
bbr->rc_tp = tp;
if (tp->t_inpcb) {
bbr->rc_inp = tp->t_inpcb;
}
bbr->rc_inp = inp;
cts = tcp_get_usecs(&bbr->rc_tv);
tp->t_acktime = 0;
bbr->rc_allow_data_af_clo = bbr_ignore_data_after_close;
@ -10238,6 +10236,7 @@ static void
bbr_fini(struct tcpcb *tp, int32_t tcb_is_purged)
{
if (tp->t_fb_ptr) {
struct inpcb *inp = tptoinpcb(tp);
uint32_t calc;
struct tcp_bbr *bbr;
struct bbr_sendmap *rsm;
@ -10247,12 +10246,10 @@ bbr_fini(struct tcpcb *tp, int32_t tcb_is_purged)
tcp_rel_pacing_rate(bbr->r_ctl.crte, bbr->rc_tp);
bbr_log_flowend(bbr);
bbr->rc_tp = NULL;
if (tp->t_inpcb) {
/* Backout any flags2 we applied */
tp->t_inpcb->inp_flags2 &= ~INP_CANNOT_DO_ECN;
tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
}
/* Backout any flags2 we applied */
inp->inp_flags2 &= ~INP_CANNOT_DO_ECN;
inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
if (bbr->bbr_hdrw_pacing)
counter_u64_add(bbr_flows_whdwr_pacing, -1);
else
@ -11331,6 +11328,7 @@ bbr_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
int32_t nxt_pkt, struct timeval *tv)
{
struct inpcb *inp = tptoinpcb(tp);
int32_t thflags, retval;
uint32_t cts, lcts;
uint32_t tiwin;
@ -11356,7 +11354,7 @@ bbr_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
* caller may have unnecessarily acquired a write lock due to a
* race.
*/
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
__func__));
KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
@ -11437,7 +11435,7 @@ bbr_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
* this is traditional behavior, may need to be cleaned up.
*/
if (bbr->rc_inp == NULL) {
bbr->rc_inp = tp->t_inpcb;
bbr->rc_inp = inp;
}
/*
* We need to init rc_inp here since its not init'd when
@ -11474,7 +11472,7 @@ bbr_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (to.to_flags & TOF_MSS)
mss = to.to_mss;
else
if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
if ((inp->inp_vflag & INP_IPV6) != 0)
mss = TCP6_MSS;
else
mss = TCP_MSS;
@ -11498,8 +11496,8 @@ bbr_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
return (1);
}
/* Set the flag */
bbr->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
tcp_set_hpts(tp->t_inpcb);
bbr->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0;
tcp_set_hpts(inp);
sack_filter_clear(&bbr->r_ctl.bbr_sf, th->th_ack);
}
if (thflags & TH_ACK) {
@ -11560,13 +11558,6 @@ bbr_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
}
if (tiwin > bbr->r_ctl.rc_high_rwnd)
bbr->r_ctl.rc_high_rwnd = tiwin;
#ifdef BBR_INVARIANTS
if ((tp->t_inpcb->inp_flags & INP_DROPPED) ||
(tp->t_inpcb->inp_flags2 & INP_FREED)) {
panic("tp:%p bbr:%p given a dropped inp:%p",
tp, bbr, tp->t_inpcb);
}
#endif
bbr->r_ctl.rc_flight_at_input = ctf_flight_size(tp,
(bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes));
bbr->rtt_valid = 0;
@ -11580,13 +11571,6 @@ bbr_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
retval = (*bbr->r_substate) (m, th, so,
tp, &to, drop_hdrlen,
tlen, tiwin, thflags, nxt_pkt, iptos);
#ifdef BBR_INVARIANTS
if ((retval == 0) &&
(tp->t_inpcb == NULL)) {
panic("retval:%d tp:%p t_inpcb:NULL state:%d",
retval, tp, prev_state);
}
#endif
if (nxt_pkt == 0)
BBR_STAT_INC(bbr_rlock_left_ret0);
else
@ -11596,7 +11580,7 @@ bbr_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
* If retval is 1 the tcb is unlocked and most likely the tp
* is gone.
*/
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
tcp_bbr_xmit_timer_commit(bbr, tp, cts);
if (bbr->rc_is_pkt_epoch_now)
bbr_set_pktepoch(bbr, cts, __LINE__);
@ -11664,13 +11648,6 @@ bbr_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
bbr_log_doseg_done(bbr, cts, nxt_pkt, did_out);
if (did_out)
bbr->r_wanted_output = 0;
#ifdef BBR_INVARIANTS
if (tp->t_inpcb == NULL) {
panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d",
did_out,
retval, tp, prev_state);
}
#endif
}
return (retval);
}
@ -11698,7 +11675,7 @@ bbr_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
retval = bbr_do_segment_nounlock(m, th, so, tp,
drop_hdrlen, tlen, iptos, 0, &tv);
if (retval == 0) {
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(tptoinpcb(tp));
}
}
@ -12791,7 +12768,7 @@ bbr_output_wtime(struct tcpcb *tp, const struct timeval *tv)
if ((bbr->rc_in_persist == 0) &&
TCPS_HAVEESTABLISHED(tp->t_state) &&
(tp->snd_max == tp->snd_una) &&
sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
sbavail(&so->so_snd)) {
/* No send window.. we must enter persist */
bbr_enter_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__);
}
@ -13007,11 +12984,11 @@ bbr_output_wtime(struct tcpcb *tp, const struct timeval *tv)
}
#ifdef INET6
if (isipv6)
ipoptlen = ip6_optlen(tp->t_inpcb);
ipoptlen = ip6_optlen(inp);
else
#endif
if (tp->t_inpcb->inp_options)
ipoptlen = tp->t_inpcb->inp_options->m_len -
if (inp->inp_options)
ipoptlen = inp->inp_options->m_len -
offsetof(struct ipoption, ipopt_list);
else
ipoptlen = 0;
@ -14134,7 +14111,7 @@ bbr_output(struct tcpcb *tp)
NET_EPOCH_ASSERT();
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
(void)tcp_get_usecs(&tv);
ret = bbr_output_wtime(tp, &tv);
return (ret);

View file

@ -2549,11 +2549,9 @@ rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg
log.u_bbr.applimited = rack->r_ctl.rc_sacked;
log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto;
log.u_bbr.pacing_gain = rack->r_must_retran;
TCP_LOG_EVENTP(tp, NULL,
&tp->t_inpcb->inp_socket->so_rcv,
&tp->t_inpcb->inp_socket->so_snd,
TCP_HDWR_PACE_SIZE, 0,
0, &log, false, &tv);
TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv,
&tptosocket(tp)->so_snd,
TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv);
}
}
@ -4517,7 +4515,7 @@ rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack,
* this means we need to have the data available
* before we start a measurement.
*/
if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < (tp->gput_ack - tp->gput_seq)) {
if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) {
/* Nope not enough data. */
return;
}
@ -4578,7 +4576,7 @@ rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint
struct tcp_log_buffer *lgb = NULL;
uint8_t labc_to_use, quality;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
tp->ccv->nsegs = nsegs;
acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una);
if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) {
@ -4705,7 +4703,7 @@ tcp_rack_partialack(struct tcpcb *tp)
struct tcp_rack *rack;
rack = (struct tcp_rack *)tp->t_fb_ptr;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
/*
* If we are doing PRR and have enough
* room to send <or> we are pacing and prr
@ -4725,7 +4723,7 @@ rack_post_recovery(struct tcpcb *tp, uint32_t th_ack)
uint32_t orig_cwnd;
orig_cwnd = tp->snd_cwnd;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
rack = (struct tcp_rack *)tp->t_fb_ptr;
/* only alert CC if we alerted when we entered */
if (CC_ALGO(tp)->post_recovery != NULL) {
@ -4765,7 +4763,7 @@ rack_post_recovery(struct tcpcb *tp, uint32_t th_ack)
* Suck the next prr cnt back into cwnd, but
* only do that if we are not application limited.
*/
if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) {
/*
* We are allowed to add back to the cwnd the amount we did
* not get out if:
@ -4799,7 +4797,7 @@ rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line)
struct tcp_rack *rack;
uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
#ifdef STATS
stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type);
#endif
@ -4891,7 +4889,7 @@ rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp)
{
uint32_t i_cwnd;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
#ifdef NETFLIX_STATS
KMOD_TCPSTAT_INC(tcps_idle_restarts);
@ -5270,7 +5268,8 @@ rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_
if (TSTMP_GT(cts, tstmp_touse))
time_since_sent = cts - tstmp_touse;
}
if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) {
if (SEQ_LT(tp->snd_una, tp->snd_max) ||
sbavail(&tptosocket(tp)->so_snd)) {
rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT;
to = tp->t_rxtcur;
if (to > time_since_sent)
@ -5608,7 +5607,7 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
int32_t slot, uint32_t tot_len_this_send, int sup_rack)
{
struct hpts_diag diag;
struct inpcb *inp;
struct inpcb *inp = tptoinpcb(tp);
struct timeval tv;
uint32_t delayed_ack = 0;
uint32_t hpts_timeout;
@ -5617,7 +5616,6 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
uint32_t left = 0;
uint32_t us_cts;
inp = tp->t_inpcb;
if ((tp->t_state == TCPS_CLOSED) ||
(tp->t_state == TCPS_LISTEN)) {
return;
@ -5856,12 +5854,12 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
* Arrange for the hpts to kick back in after the
* t-o if the t-o does not cause a send.
*/
(void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout),
(void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout),
__LINE__, &diag);
rack_log_hpts_diag(rack, us_cts, &diag, &tv);
rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
} else {
(void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot),
(void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(slot),
__LINE__, &diag);
rack_log_hpts_diag(rack, us_cts, &diag, &tv);
rack_log_to_start(rack, cts, hpts_timeout, slot, 1);
@ -5875,7 +5873,7 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
* but it may change the prr stats so letting it in (the set defaults
* at the start of this block) are good enough.
*/
(void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout),
(void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout),
__LINE__, &diag);
rack_log_hpts_diag(rack, us_cts, &diag, &tv);
rack_log_to_start(rack, cts, hpts_timeout, slot, 0);
@ -6120,7 +6118,7 @@ rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t
#ifdef INVARIANTS
struct rack_sendmap *insret;
#endif
struct socket *so;
struct socket *so = tptosocket(tp);
uint32_t amm;
uint32_t out, avail;
int collapsed_win = 0;
@ -6146,7 +6144,6 @@ rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t
counter_u64_add(rack_tlp_tot, 1);
if (rack->r_state && (rack->r_state != tp->t_state))
rack_set_state(tp, rack);
so = tp->t_inpcb->inp_socket;
avail = sbavail(&so->so_snd);
out = tp->snd_max - tp->snd_una;
if ((out > tp->snd_wnd) || rack->rc_has_collapsed) {
@ -6338,9 +6335,6 @@ static int
rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
{
struct tcptemp *t_template;
#ifdef INVARIANTS
struct inpcb *inp = tp->t_inpcb;
#endif
int32_t retval = 1;
if (tp->t_timers->tt_flags & TT_STOPPED) {
@ -6354,7 +6348,6 @@ rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends);
return (-ETIMEDOUT); /* tcp_drop() */
}
KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
/*
* Persistence timer into zero window. Force a byte to be output, if
* possible.
@ -6430,13 +6423,12 @@ static int
rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
{
struct tcptemp *t_template;
struct inpcb *inp;
struct inpcb *inp = tptoinpcb(tp);
if (tp->t_timers->tt_flags & TT_STOPPED) {
return (1);
}
rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP;
inp = tp->t_inpcb;
rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL);
/*
* Keep-alive timer went off; send something or drop connection if
@ -6657,6 +6649,7 @@ rack_cc_conn_init(struct tcpcb *tp)
static int
rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
{
struct inpcb *inp = tptoinpcb(tp);
int32_t rexmt;
int32_t retval = 0;
bool isipv6;
@ -6796,7 +6789,7 @@ rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
* catch ESTABLISHED state.
*/
#ifdef INET6
isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false;
isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false;
#else
isipv6 = false;
#endif
@ -6904,11 +6897,11 @@ rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts)
*/
if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
#ifdef INET6
if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
in6_losing(tp->t_inpcb);
if ((inp->inp_vflag & INP_IPV6) != 0)
in6_losing(inp);
else
#endif
in_losing(tp->t_inpcb);
in_losing(inp);
tp->t_rttvar += tp->t_srtt;
tp->t_srtt = 0;
}
@ -6940,7 +6933,7 @@ rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8
bytes = tp->gput_ack - tp->gput_seq;
if (SEQ_GT(tp->gput_seq, tp->snd_una))
bytes += tp->gput_seq - tp->snd_una;
if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
/*
* There are not enough bytes in the socket
* buffer that have been sent to cover this
@ -7001,7 +6994,7 @@ rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8
*/
ret = -3;
left = rack->r_ctl.rc_timer_exp - cts;
tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left));
tcp_hpts_insert(tptoinpcb(tp), HPTS_MS_TO_SLOTS(left));
rack_log_to_processing(rack, cts, ret, left);
return (1);
}
@ -7282,7 +7275,7 @@ rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len,
* -- i.e. return if err != 0 or should we pretend we sent it? --
* i.e. proceed with add ** do this for now.
*/
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (err)
/*
* We don't log errors -- we could but snd_max does not
@ -8198,7 +8191,7 @@ rack_need_set_test(struct tcpcb *tp,
uint32_t ideal_amount;
ideal_amount = rack_get_measure_window(tp, rack);
if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) {
/*
* There is no sense of continuing this measurement
* because its too small to gain us anything we
@ -9488,7 +9481,7 @@ rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered
uint32_t tsused;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (tcp_get_flags(th) & TH_RST) {
/* We don't log resets */
return;
@ -10145,6 +10138,8 @@ rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
int32_t under_pacing = 0;
int32_t recovery = 0;
INP_WLOCK_ASSERT(tptoinpcb(tp));
rack = (struct tcp_rack *)tp->t_fb_ptr;
if (SEQ_GT(th->th_ack, tp->snd_max)) {
__ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val,
@ -10206,7 +10201,6 @@ rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
}
}
nsegs = max(1, m->m_pkthdr.lro_nsegs);
INP_WLOCK_ASSERT(tp->t_inpcb);
acked = BYTES_THIS_ACK(tp, th);
if (acked) {
@ -10333,7 +10327,7 @@ rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (rack->r_ctl.rc_went_idle_time == 0)
rack->r_ctl.rc_went_idle_time = 1;
rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
if (sbavail(&tptosocket(tp)->so_snd) == 0)
tp->t_acktime = 0;
rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
/* Set need output so persist might get set */
@ -10557,8 +10551,9 @@ rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
int32_t tfo_syn;
struct tcp_rack *rack;
INP_WLOCK_ASSERT(tptoinpcb(tp));
rack = (struct tcp_rack *)tp->t_fb_ptr;
INP_WLOCK_ASSERT(tp->t_inpcb);
nsegs = max(1, m->m_pkthdr.lro_nsegs);
if ((thflags & TH_ACK) &&
(SEQ_LT(tp->snd_wl1, th->th_seq) ||
@ -10605,8 +10600,8 @@ rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
(tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
TCPS_HAVEESTABLISHED(tp->t_state) &&
((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
(sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
sbavail(&tptosocket(tp)->so_snd) &&
(sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
/*
* Here the rwnd is less than
* the pacing size, we are established,
@ -10624,7 +10619,6 @@ rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
* along the up.
*/
tp->rcv_up = tp->rcv_nxt;
INP_WLOCK_ASSERT(tp->t_inpcb);
/*
* Process the segment text, merging it into the TCP sequencing
@ -10830,7 +10824,6 @@ rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so,
(sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) {
rack->r_wanted_output = 1;
}
INP_WLOCK_ASSERT(tp->t_inpcb);
return (0);
}
@ -11061,8 +11054,8 @@ rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
(tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
TCPS_HAVEESTABLISHED(tp->t_state) &&
((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
(sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
sbavail(&tptosocket(tp)->so_snd) &&
(sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
/*
* Here the rwnd is less than
* the pacing size, we are established,
@ -11189,7 +11182,7 @@ rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (rack->r_ctl.rc_went_idle_time == 0)
rack->r_ctl.rc_went_idle_time = 1;
rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
if (sbavail(&tptosocket(tp)->so_snd) == 0)
tp->t_acktime = 0;
rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
}
@ -11216,6 +11209,8 @@ rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
int32_t ourfinisacked = 0;
struct tcp_rack *rack;
INP_WLOCK_ASSERT(tptoinpcb(tp));
ctf_calc_rwin(so, tp);
/*
* If the state is SYN_SENT: if seg contains an ACK, but not for our
@ -11335,7 +11330,6 @@ rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN);
tcp_state_change(tp, TCPS_SYN_RECEIVED);
}
INP_WLOCK_ASSERT(tp->t_inpcb);
/*
* Advance th->th_seq to correspond to first data byte. If data,
* trim to stay within window, dropping FIN if necessary.
@ -12508,6 +12502,7 @@ rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack)
static int
rack_init(struct tcpcb *tp)
{
struct inpcb *inp = tptoinpcb(tp);
struct tcp_rack *rack = NULL;
#ifdef INVARIANTS
struct rack_sendmap *insret;
@ -12532,9 +12527,9 @@ rack_init(struct tcpcb *tp)
TAILQ_INIT(&rack->r_ctl.rc_free);
TAILQ_INIT(&rack->r_ctl.rc_tmap);
rack->rc_tp = tp;
rack->rc_inp = tp->t_inpcb;
rack->rc_inp = inp;
/* Set the flag */
rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0;
/* Probably not needed but lets be sure */
rack_clear_rate_sample(rack);
/*
@ -12599,9 +12594,9 @@ rack_init(struct tcpcb *tp)
else
rack->r_mbuf_queue = 0;
if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
else
tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
rack_set_pace_segments(tp, rack, __LINE__, NULL);
if (rack_limits_scwnd)
rack->r_limit_scw = 1;
@ -12825,6 +12820,8 @@ rack_handoff_ok(struct tcpcb *tp)
static void
rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
{
struct inpcb *inp = tptoinpcb(tp);
if (tp->t_fb_ptr) {
struct tcp_rack *rack;
struct rack_sendmap *rsm, *nrsm;
@ -12963,15 +12960,13 @@ rack_fini(struct tcpcb *tp, int32_t tcb_is_purged)
uma_zfree(rack_pcb_zone, tp->t_fb_ptr);
tp->t_fb_ptr = NULL;
}
if (tp->t_inpcb) {
tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP;
/* Cancel the GP measurement in progress */
tp->t_flags &= ~TF_GPUTINPROG;
tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS;
}
inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY;
inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
inp->inp_flags2 &= ~INP_MBUF_ACKCMP;
/* Cancel the GP measurement in progress */
tp->t_flags &= ~TF_GPUTINPROG;
inp->inp_flags2 &= ~INP_MBUF_L_ACKS;
/* Make sure snd_nxt is correctly set */
tp->snd_nxt = tp->snd_max;
}
@ -12980,7 +12975,7 @@ static void
rack_set_state(struct tcpcb *tp, struct tcp_rack *rack)
{
if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) {
rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0;
}
switch (tp->t_state) {
case TCPS_SYN_SENT:
@ -13059,7 +13054,7 @@ rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
if (tmr_up == PACE_TMR_DELACK)
/* We are supposed to have delayed ack up and we do */
return;
} else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
} else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) {
/*
* if we hit enobufs then we would expect the possibility
* of nothing outstanding and the RXT up (and the hptsi timer).
@ -13109,7 +13104,7 @@ rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
}
rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
}
tcp_hpts_remove(tp->t_inpcb);
tcp_hpts_remove(rack->rc_inp);
}
rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__);
rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0);
@ -13152,8 +13147,8 @@ rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uin
(tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) &&
TCPS_HAVEESTABLISHED(tp->t_state) &&
((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) &&
sbavail(&tp->t_inpcb->inp_socket->so_snd) &&
(sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) {
sbavail(&tptosocket(tp)->so_snd) &&
(sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) {
/*
* Here the rwnd is less than
* the pacing size, we are established,
@ -13169,6 +13164,7 @@ rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent
{
if (tp->t_logstate != TCP_LOG_STATE_OFF) {
struct inpcb *inp = tptoinpcb(tp);
union tcp_log_stackspecific log;
struct timeval ltv;
char tcp_hdr_buf[60];
@ -13256,8 +13252,8 @@ rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent
th->th_ack = ae->ack;
th->th_win = ae->win;
/* Now fill in the ports */
th->th_sport = tp->t_inpcb->inp_fport;
th->th_dport = tp->t_inpcb->inp_lport;
th->th_sport = inp->inp_fport;
th->th_dport = inp->inp_lport;
tcp_set_flags(th, ae->flags);
/* Now do we have a timestamp option? */
if (ae->flags & HAS_TSTMP) {
@ -13300,8 +13296,8 @@ rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent
} else
xx = 0;
TCP_LOG_EVENTP(tp, th,
&tp->t_inpcb->inp_socket->so_rcv,
&tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0,
&tptosocket(tp)->so_rcv,
&tptosocket(tp)->so_snd, TCP_LOG_IN, 0,
0, &log, true, &ltv);
if (xx) {
tp->snd_una = orig_snd_una;
@ -13423,7 +13419,7 @@ rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mb
bytes = tp->gput_ack - tp->gput_seq;
if (SEQ_GT(tp->gput_seq, tp->snd_una))
bytes += tp->gput_seq - tp->snd_una;
if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
/*
* There are not enough bytes in the socket
* buffer that have been sent to cover this
@ -13838,7 +13834,7 @@ rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mb
if (rack->r_ctl.rc_went_idle_time == 0)
rack->r_ctl.rc_went_idle_time = 1;
rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__);
if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0)
if (sbavail(&tptosocket(tp)->so_snd) == 0)
tp->t_acktime = 0;
/* Set so we might enter persists... */
rack->r_wanted_output = 1;
@ -14061,6 +14057,7 @@ rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos,
int32_t nxt_pkt, struct timeval *tv)
{
struct inpcb *inp = tptoinpcb(tp);
#ifdef TCP_ACCOUNTING
uint64_t ts_val;
#endif
@ -14082,6 +14079,10 @@ rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
int ack_val_set = 0xf;
#endif
int nsegs;
NET_EPOCH_ASSERT();
INP_WLOCK_ASSERT(inp);
/*
* tv passed from common code is from either M_TSTMP_LRO or
* tcp_get_usecs() if no LRO m_pkthdr timestamp is present.
@ -14152,8 +14153,6 @@ rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
tcp_dooptions(&to, (u_char *)(th + 1),
(th->th_off << 2) - sizeof(struct tcphdr),
(thflags & TH_SYN) ? TO_SYN : 0);
NET_EPOCH_ASSERT();
INP_WLOCK_ASSERT(tp->t_inpcb);
KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN",
__func__));
KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT",
@ -14172,7 +14171,7 @@ rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
bytes = tp->gput_ack - tp->gput_seq;
if (SEQ_GT(tp->gput_seq, tp->snd_una))
bytes += tp->gput_seq - tp->snd_una;
if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
if (bytes > sbavail(&tptosocket(tp)->so_snd)) {
/*
* There are not enough bytes in the socket
* buffer that have been sent to cover this
@ -14347,7 +14346,7 @@ rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
KASSERT(rack->rc_inp != NULL,
("%s: rack->rc_inp unexpectedly NULL", __func__));
if (rack->rc_inp == NULL) {
rack->rc_inp = tp->t_inpcb;
rack->rc_inp = inp;
}
/*
@ -14392,7 +14391,7 @@ rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (to.to_flags & TOF_MSS)
mss = to.to_mss;
else
if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
if ((inp->inp_vflag & INP_IPV6) != 0)
mss = TCP6_MSS;
else
mss = TCP_MSS;
@ -14421,7 +14420,7 @@ rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
#endif
return (1);
}
tcp_set_hpts(tp->t_inpcb);
tcp_set_hpts(inp);
sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack);
}
if (thflags & TH_FIN)
@ -14453,19 +14452,12 @@ rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
retval = (*rack->r_substate) (m, th, so,
tp, &to, drop_hdrlen,
tlen, tiwin, thflags, nxt_pkt, iptos);
#ifdef INVARIANTS
if ((retval == 0) &&
(tp->t_inpcb == NULL)) {
panic("retval:%d tp:%p t_inpcb:NULL state:%d",
retval, tp, prev_state);
}
#endif
if (retval == 0) {
/*
* If retval is 1 the tcb is unlocked and most likely the tp
* is gone.
*/
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
if ((rack->rc_gp_dyn_mul) &&
(rack->rc_always_pace) &&
(rack->use_fixed_rate == 0) &&
@ -14560,7 +14552,7 @@ rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
;
} else {
int late = 0;
if (tcp_in_hpts(rack->rc_inp)) {
if (tcp_in_hpts(inp)) {
if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
us_cts = tcp_get_usecs(NULL);
if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) {
@ -14570,7 +14562,7 @@ rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
late = 1;
rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT;
}
tcp_hpts_remove(tp->t_inpcb);
tcp_hpts_remove(inp);
}
if (late && (did_out == 0)) {
/*
@ -14592,13 +14584,6 @@ rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so,
rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs));
if (did_out)
rack->r_wanted_output = 0;
#ifdef INVARIANTS
if (tp->t_inpcb == NULL) {
panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d",
did_out,
retval, tp, prev_state);
}
#endif
#ifdef TCP_ACCOUNTING
} else {
/*
@ -14643,7 +14628,7 @@ rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
}
if (rack_do_segment_nounlock(m, th, so, tp,
drop_hdrlen, tlen, iptos, 0, &tv) == 0) {
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(tptoinpcb(tp));
}
}
@ -15253,7 +15238,7 @@ rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack,
* before we start a measurement.
*/
if (sbavail(&tp->t_inpcb->inp_socket->so_snd) <
if (sbavail(&tptosocket(tp)->so_snd) <
max(rc_init_window(rack),
(MIN_GP_WIN * ctf_fixed_maxseg(tp)))) {
/* Nope not enough data */
@ -16734,7 +16719,7 @@ rack_output(struct tcpcb *tp)
struct timeval tv;
int32_t prefetch_so_done = 0;
struct tcp_log_buffer *lgb;
struct inpcb *inp;
struct inpcb *inp = tptoinpcb(tp);
struct sockbuf *sb;
uint64_t ts_val = 0;
#ifdef TCP_ACCOUNTING
@ -16746,15 +16731,16 @@ rack_output(struct tcpcb *tp)
#endif
bool hw_tls = false;
NET_EPOCH_ASSERT();
INP_WLOCK_ASSERT(inp);
/* setup and take the cache hits here */
rack = (struct tcp_rack *)tp->t_fb_ptr;
#ifdef TCP_ACCOUNTING
sched_pin();
ts_val = get_cyclecount();
#endif
hpts_calling = rack->rc_inp->inp_hpts_calls;
NET_EPOCH_ASSERT();
INP_WLOCK_ASSERT(rack->rc_inp);
hpts_calling = inp->inp_hpts_calls;
#ifdef TCP_OFFLOAD
if (tp->t_flags & TF_TOE) {
#ifdef TCP_ACCOUNTING
@ -17584,14 +17570,14 @@ rack_output(struct tcpcb *tp)
*/
#ifdef INET6
if (isipv6 && IPSEC_ENABLED(ipv6))
ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb);
ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp);
#ifdef INET
else
#endif
#endif /* INET6 */
#ifdef INET
if (IPSEC_ENABLED(ipv4))
ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb);
ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp);
#endif /* INET */
#endif
@ -18189,11 +18175,11 @@ rack_output(struct tcpcb *tp)
}
#ifdef INET6
if (isipv6)
ipoptlen = ip6_optlen(tp->t_inpcb);
ipoptlen = ip6_optlen(inp);
else
#endif
if (tp->t_inpcb->inp_options)
ipoptlen = tp->t_inpcb->inp_options->m_len -
if (inp->inp_options)
ipoptlen = inp->inp_options->m_len -
offsetof(struct ipoption, ipopt_list);
else
ipoptlen = 0;
@ -19655,6 +19641,7 @@ rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name,
struct epoch_tracker et;
struct sockopt sopt;
struct cc_newreno_opts opt;
struct inpcb *inp = tptoinpcb(tp);
uint64_t val;
int error = 0;
uint16_t ca, ss;
@ -19803,10 +19790,10 @@ rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name,
} else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) {
rack->r_use_cmp_ack = 1;
rack->r_mbuf_queue = 1;
tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
}
if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state))
rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP;
inp->inp_flags2 |= INP_MBUF_ACKCMP;
break;
case TCP_SHARED_CWND_TIME_LIMIT:
RACK_OPTS_INC(tcp_lscwnd);
@ -19858,9 +19845,9 @@ rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name,
else
rack->r_mbuf_queue = 0;
if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
else
tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
break;
case TCP_RACK_NONRXT_CFG_RATE:
RACK_OPTS_INC(tcp_rack_cfg_rate);
@ -19943,9 +19930,9 @@ rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name,
}
}
if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack)
tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ;
inp->inp_flags2 |= INP_SUPPORTS_MBUFQ;
else
tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ;
/* A rate may be set irate or other, if so set seg size */
rack_update_seg(rack);
break;
@ -20519,7 +20506,7 @@ static void
rack_fill_info(struct tcpcb *tp, struct tcp_info *ti)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
bzero(ti, sizeof(*ti));
ti->tcpi_state = tp->t_state;

View file

@ -403,7 +403,10 @@ ctf_process_inbound_raw(struct tcpcb *tp, struct socket *so, struct mbuf *m, int
uint16_t drop_hdrlen;
uint8_t iptos, no_vn=0;
inp = tptoinpcb(tp);
INP_WLOCK_ASSERT(inp);
NET_EPOCH_ASSERT();
if (m)
ifp = m_rcvif(m);
else
@ -480,8 +483,8 @@ ctf_process_inbound_raw(struct tcpcb *tp, struct socket *so, struct mbuf *m, int
* been compressed. We assert the inp has
* the flag set to enable this!
*/
KASSERT((tp->t_inpcb->inp_flags2 & INP_MBUF_ACKCMP),
("tp:%p inp:%p no INP_MBUF_ACKCMP flags?", tp, tp->t_inpcb));
KASSERT((inp->inp_flags2 & INP_MBUF_ACKCMP),
("tp:%p inp:%p no INP_MBUF_ACKCMP flags?", tp, inp));
tlen = 0;
drop_hdrlen = 0;
th = NULL;
@ -496,8 +499,6 @@ ctf_process_inbound_raw(struct tcpcb *tp, struct socket *so, struct mbuf *m, int
KMOD_TCPSTAT_INC(tcps_rcvtotal);
else
KMOD_TCPSTAT_ADD(tcps_rcvtotal, (m->m_len / sizeof(struct tcp_ackent)));
inp = tp->t_inpcb;
INP_WLOCK_ASSERT(inp);
retval = (*tp->t_fb->tfb_do_segment_nounlock)(m, th, so, tp, drop_hdrlen, tlen,
iptos, nxt_pkt, &tv);
if (retval) {
@ -571,7 +572,7 @@ ctf_do_dropwithreset(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th,
{
if (tp != NULL) {
tcp_dropwithreset(m, th, tp, tlen, rstreason);
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(tptoinpcb(tp));
} else
tcp_dropwithreset(m, th, NULL, tlen, rstreason);
}
@ -759,7 +760,7 @@ ctf_do_drop(struct mbuf *m, struct tcpcb *tp)
* Drop space held by incoming segment and return.
*/
if (tp != NULL)
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(tptoinpcb(tp));
if (m)
m_freem(m);
}
@ -974,7 +975,7 @@ ctf_do_dropwithreset_conn(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th,
tcp_dropwithreset(m, th, tp, tlen, rstreason);
tp = tcp_drop(tp, ETIMEDOUT);
if (tp)
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(tptoinpcb(tp));
}
uint32_t
@ -1010,8 +1011,8 @@ ctf_log_sack_filter(struct tcpcb *tp, int num_sack_blks, struct sackblk *sack_bl
log.u_bbr.pkts_out = sack_blocks[3].end;
}
TCP_LOG_EVENTP(tp, NULL,
&tp->t_inpcb->inp_socket->so_rcv,
&tp->t_inpcb->inp_socket->so_snd,
&tptosocket(tp)->so_rcv,
&tptosocket(tp)->so_snd,
TCP_SACK_FILTER_RES, 0,
0, &log, false, &tv);
}

View file

@ -199,7 +199,7 @@ tcp_stats_sample_rollthedice(struct tcpcb *tp, void *seed_bytes,
rm_runlock(&tcp_stats_tpl_sampling_lock, &tracker);
if (tpl >= 0) {
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (tp->t_stats != NULL)
stats_blob_destroy(tp->t_stats);
tp->t_stats = stats_blob_alloc(tpl, 0);

View file

@ -1044,10 +1044,9 @@ tcp_default_handoff_ok(struct tcpcb *tp)
static int
tcp_default_fb_init(struct tcpcb *tp)
{
struct socket *so = tptosocket(tp);
struct socket *so;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
KASSERT(tp->t_state >= 0 && tp->t_state < TCPS_TIME_WAIT,
("%s: connection %p in unexpected state %d", __func__, tp,
@ -1064,7 +1063,6 @@ tcp_default_fb_init(struct tcpcb *tp)
* Make sure some kind of transmission timer is set if there is
* outstanding data.
*/
so = tp->t_inpcb->inp_socket;
if ((!TCPS_HAVEESTABLISHED(tp->t_state) || sbavail(&so->so_snd) ||
tp->snd_una != tp->snd_max) && !(tcp_timer_active(tp, TT_REXMT) ||
tcp_timer_active(tp, TT_PERSIST))) {
@ -1110,8 +1108,7 @@ static void
tcp_default_fb_fini(struct tcpcb *tp, int tcb_is_purged)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
return;
INP_WLOCK_ASSERT(tptoinpcb(tp));
}
/*
@ -1812,8 +1809,7 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
ip = ipgen;
if (tp != NULL) {
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("tcp control block w/o inpcb"));
inp = tptoinpcb(tp);
INP_LOCK_ASSERT(inp);
} else
inp = NULL;
@ -2102,8 +2098,7 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
nth->th_sum = in6_cksum_pseudo(ip6,
tlen - sizeof(struct ip6_hdr), IPPROTO_TCP, 0);
}
ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb :
NULL, NULL);
ip6->ip6_hlim = in6_selecthlim(inp, NULL);
}
#endif /* INET6 */
#if defined(INET6) && defined(INET)
@ -2139,7 +2134,7 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
struct timeval tv;
memset(&log.u_bbr, 0, sizeof(log.u_bbr));
log.u_bbr.inhpts = tp->t_inpcb->inp_in_hpts;
log.u_bbr.inhpts = inp->inp_in_hpts;
log.u_bbr.flex8 = 4;
log.u_bbr.pkts_out = tp->t_maxseg;
log.u_bbr.timeStamp = tcp_get_usecs(&tv);
@ -2356,10 +2351,10 @@ tcp_newtcpcb(struct inpcb *inp)
struct tcpcb *
tcp_drop(struct tcpcb *tp, int errno)
{
struct socket *so = tp->t_inpcb->inp_socket;
struct socket *so = tptosocket(tp);
NET_EPOCH_ASSERT();
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (TCPS_HAVERCVDSYN(tp->t_state)) {
tcp_state_change(tp, TCPS_CLOSED);
@ -2377,7 +2372,7 @@ tcp_drop(struct tcpcb *tp, int errno)
void
tcp_discardcb(struct tcpcb *tp)
{
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
INP_WLOCK_ASSERT(inp);
@ -2452,8 +2447,8 @@ tcp_discardcb(struct tcpcb *tp)
bool
tcp_freecb(struct tcpcb *tp)
{
struct inpcb *inp = tp->t_inpcb;
struct socket *so = inp->inp_socket;
struct inpcb *inp = tptoinpcb(tp);
struct socket *so = tptosocket(tp);
#ifdef INET6
bool isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
#endif
@ -2544,8 +2539,8 @@ tcp_freecb(struct tcpcb *tp)
struct tcpcb *
tcp_close(struct tcpcb *tp)
{
struct inpcb *inp = tp->t_inpcb;
struct socket *so;
struct inpcb *inp = tptoinpcb(tp);
struct socket *so = tptosocket(tp);
INP_WLOCK_ASSERT(inp);
@ -2570,7 +2565,6 @@ tcp_close(struct tcpcb *tp)
if (tp->t_state != TCPS_CLOSED)
tcp_state_change(tp, TCPS_CLOSED);
KASSERT(inp->inp_socket != NULL, ("tcp_close: inp_socket NULL"));
so = inp->inp_socket;
soisdisconnected(so);
if (inp->inp_flags & INP_SOCKREF) {
inp->inp_flags &= ~INP_SOCKREF;
@ -3463,7 +3457,7 @@ tcp_maxmtu6(struct in_conninfo *inc, struct tcp_ifcap *cap)
void
tcp6_use_min_mtu(struct tcpcb *tp)
{
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
INP_WLOCK_ASSERT(inp);
/*

View file

@ -250,11 +250,12 @@ tcp_timer_delack(void *xtp)
{
struct epoch_tracker et;
struct tcpcb *tp = xtp;
struct inpcb *inp;
#if defined(INVARIANTS) || defined(VIMAGE)
struct inpcb *inp = tptoinpcb(tp);
#endif
CURVNET_SET(tp->t_vnet);
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
INP_WLOCK(inp);
if (callout_pending(&tp->t_timers->tt_delack) ||
!callout_active(&tp->t_timers->tt_delack)) {
@ -283,7 +284,7 @@ static void
tcp_timer_close(struct tcpcb *tp)
{
struct epoch_tracker et;
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
INP_WLOCK_ASSERT(inp);
@ -301,7 +302,7 @@ static void
tcp_timer_drop(struct tcpcb *tp)
{
struct epoch_tracker et;
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
INP_WLOCK_ASSERT(inp);
@ -316,21 +317,20 @@ void
tcp_timer_2msl(void *xtp)
{
struct tcpcb *tp = xtp;
struct inpcb *inp;
struct inpcb *inp = tptoinpcb(tp);
CURVNET_SET(tp->t_vnet);
#ifdef TCPDEBUG
int ostate;
ostate = tp->t_state;
#endif
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
INP_WLOCK(inp);
tcp_log_end_status(tp, TCP_EI_STATUS_2MSL);
tcp_free_sackholes(tp);
if (callout_pending(&tp->t_timers->tt_2msl) ||
!callout_active(&tp->t_timers->tt_2msl)) {
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
CURVNET_RESTORE();
return;
}
@ -359,8 +359,8 @@ tcp_timer_2msl(void *xtp)
CURVNET_RESTORE();
return;
} else if (tp->t_state == TCPS_FIN_WAIT_2 &&
tcp_fast_finwait2_recycle && tp->t_inpcb->inp_socket &&
(tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) {
tcp_fast_finwait2_recycle && inp->inp_socket &&
(inp->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) {
TCPSTAT_INC(tcps_finwait2_drops);
tcp_timer_close(tp);
CURVNET_RESTORE();
@ -377,7 +377,7 @@ tcp_timer_2msl(void *xtp)
}
#ifdef TCPDEBUG
if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
if (tptosocket(tp)->so_options & SO_DEBUG)
tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
PRU_SLOWTIMO);
#endif
@ -390,18 +390,17 @@ tcp_timer_2msl(void *xtp)
void
tcp_timer_keep(void *xtp)
{
struct tcpcb *tp = xtp;
struct tcptemp *t_template;
struct inpcb *inp;
struct epoch_tracker et;
struct tcpcb *tp = xtp;
struct inpcb *inp = tptoinpcb(tp);
struct tcptemp *t_template;
CURVNET_SET(tp->t_vnet);
#ifdef TCPDEBUG
int ostate;
ostate = tp->t_state;
#endif
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
INP_WLOCK(inp);
if (callout_pending(&tp->t_timers->tt_keep) ||
!callout_active(&tp->t_timers->tt_keep)) {
@ -494,7 +493,7 @@ tcp_timer_keep(void *xtp)
tp = tcp_drop(tp, ETIMEDOUT);
#ifdef TCPDEBUG
if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
if (tp != NULL && (tptosocket(tp)->so_options & SO_DEBUG))
tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
PRU_SLOWTIMO);
#endif
@ -533,9 +532,11 @@ tcp_maxunacktime_check(struct tcpcb *tp)
void
tcp_timer_persist(void *xtp)
{
struct tcpcb *tp = xtp;
struct inpcb *inp;
struct epoch_tracker et;
struct tcpcb *tp = xtp;
#if defined(INVARIANTS) || defined(VIMAGE)
struct inpcb *inp = tptoinpcb(tp);
#endif
bool progdrop;
int outrv;
CURVNET_SET(tp->t_vnet);
@ -544,8 +545,7 @@ tcp_timer_persist(void *xtp)
ostate = tp->t_state;
#endif
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
INP_WLOCK(inp);
if (callout_pending(&tp->t_timers->tt_persist) ||
!callout_active(&tp->t_timers->tt_persist)) {
@ -605,7 +605,7 @@ tcp_timer_persist(void *xtp)
tp->t_flags &= ~TF_FORCEDATA;
#ifdef TCPDEBUG
if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
if (tp != NULL && tptosocket(tp)->so_options & SO_DEBUG)
tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO);
#endif
TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
@ -617,19 +617,18 @@ tcp_timer_persist(void *xtp)
void
tcp_timer_rexmt(void * xtp)
{
struct epoch_tracker et;
struct tcpcb *tp = xtp;
CURVNET_SET(tp->t_vnet);
struct inpcb *inp = tptoinpcb(tp);
int rexmt, outrv;
struct inpcb *inp;
struct epoch_tracker et;
bool isipv6;
#ifdef TCPDEBUG
int ostate;
ostate = tp->t_state;
#endif
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
INP_WLOCK(inp);
if (callout_pending(&tp->t_timers->tt_rexmt) ||
!callout_active(&tp->t_timers->tt_rexmt)) {
@ -722,7 +721,7 @@ tcp_timer_rexmt(void * xtp)
* ESTABLISHED state.
*/
#ifdef INET6
isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false;
isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false;
#else
isipv6 = false;
#endif
@ -865,11 +864,11 @@ tcp_timer_rexmt(void * xtp)
*/
if (tp->t_rxtshift > TCP_RTT_INVALIDATE) {
#ifdef INET6
if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
in6_losing(tp->t_inpcb);
if ((inp->inp_vflag & INP_IPV6) != 0)
in6_losing(inp);
else
#endif
in_losing(tp->t_inpcb);
in_losing(inp);
}
tp->snd_nxt = tp->snd_una;
tp->snd_recover = tp->snd_max;
@ -886,7 +885,7 @@ tcp_timer_rexmt(void * xtp)
NET_EPOCH_ENTER(et);
outrv = tcp_output_nodrop(tp);
#ifdef TCPDEBUG
if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
if (tp != NULL && (tptosocket(tp)->so_options & SO_DEBUG))
tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
PRU_SLOWTIMO);
#endif
@ -901,7 +900,7 @@ tcp_timer_activate(struct tcpcb *tp, uint32_t timer_type, u_int delta)
{
struct callout *t_callout;
callout_func_t *f_callout;
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
int cpu = inp_to_cpuid(inp);
#ifdef TCP_OFFLOAD
@ -1066,10 +1065,12 @@ tcp_timers_unsuspend(struct tcpcb *tp, uint32_t timer_type)
break;
case TT_2MSL:
if (tp->t_timers->tt_flags &= TT_2MSL_SUS) {
struct socket *so = tptosocket(tp);
tp->t_timers->tt_flags &= ~TT_2MSL_SUS;
if ((tp->t_state == TCPS_FIN_WAIT_2) &&
((tp->t_inpcb->inp_socket == NULL) ||
(tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE))) {
(so == NULL || /* XXXGL: needed? */
(so->so_rcv.sb_state & SBS_CANTRCVMORE))) {
/* Star the 2MSL timer */
tcp_timer_activate(tp, TT_2MSL,
(tcp_fast_finwait2_recycle) ?
@ -1085,17 +1086,14 @@ tcp_timers_unsuspend(struct tcpcb *tp, uint32_t timer_type)
static void
tcp_timer_discard(void *ptp)
{
struct inpcb *inp;
struct tcpcb *tp;
struct epoch_tracker et;
struct tcpcb *tp = (struct tcpcb *)ptp;
struct inpcb *inp = tptoinpcb(tp);
tp = (struct tcpcb *)ptp;
CURVNET_SET(tp->t_vnet);
NET_EPOCH_ENTER(et);
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL",
__func__, tp));
INP_WLOCK(inp);
NET_EPOCH_ENTER(et);
KASSERT((tp->t_timers->tt_flags & TT_STOPPED) != 0,
("%s: tcpcb has to be stopped here", __func__));
if (--tp->t_timers->tt_draincnt > 0 ||

View file

@ -115,7 +115,7 @@ SYSCTL_BOOL(_net_inet_tcp, OID_AUTO, nolocaltimewait,
void
tcp_twstart(struct tcpcb *tp)
{
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
#ifdef INET6
bool isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
#endif

View file

@ -1476,8 +1476,8 @@ struct protosw tcp6_protosw = {
static int
tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
{
struct inpcb *inp = tp->t_inpcb, *oinp;
struct socket *so = inp->inp_socket;
struct inpcb *inp = tptoinpcb(tp), *oinp;
struct socket *so = tptosocket(tp);
struct in_addr laddr;
u_short lport;
int error;
@ -1549,7 +1549,7 @@ tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
static int
tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
{
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
int error;
INP_WLOCK_ASSERT(inp);
@ -1597,7 +1597,7 @@ static void
tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti)
{
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
bzero(ti, sizeof(*ti));
ti->tcpi_state = tp->t_state;
@ -1815,7 +1815,7 @@ tcp_ctloutput_set(struct inpcb *inp, struct sockopt *sopt)
}
#ifdef TCPHPTS
/* Assure that we are not on any hpts */
tcp_hpts_remove(tp->t_inpcb);
tcp_hpts_remove(tptoinpcb(tp));
#endif
if (blk->tfb_tcp_fb_init) {
error = (*blk->tfb_tcp_fb_init)(tp);
@ -2726,8 +2726,8 @@ tcp_default_ctloutput(struct inpcb *inp, struct sockopt *sopt)
static void
tcp_disconnect(struct tcpcb *tp)
{
struct inpcb *inp = tp->t_inpcb;
struct socket *so = inp->inp_socket;
struct inpcb *inp = tptoinpcb(tp);
struct socket *so = tptosocket(tp);
NET_EPOCH_ASSERT();
INP_WLOCK_ASSERT(inp);
@ -2770,7 +2770,7 @@ tcp_usrclosed(struct tcpcb *tp)
{
NET_EPOCH_ASSERT();
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
switch (tp->t_state) {
case TCPS_LISTEN:
@ -2805,7 +2805,7 @@ tcp_usrclosed(struct tcpcb *tp)
if (tp->t_acktime == 0)
tp->t_acktime = ticks;
if (tp->t_state >= TCPS_FIN_WAIT_2) {
soisdisconnected(tp->t_inpcb->inp_socket);
soisdisconnected(tptosocket(tp));
/* Prevent the connection hanging in FIN_WAIT_2 forever. */
if (tp->t_state == TCPS_FIN_WAIT_2) {
int timeout;

View file

@ -392,6 +392,11 @@ TAILQ_HEAD(tcp_funchead, tcp_function);
struct tcpcb * tcp_drop(struct tcpcb *, int);
#ifdef _NETINET_IN_PCB_H_
#define intotcpcb(inp) ((struct tcpcb *)(inp)->inp_ppcb)
#define sototcpcb(so) intotcpcb(sotoinpcb(so))
#define tptoinpcb(tp) tp->t_inpcb
#define tptosocket(tp) tp->t_inpcb->inp_socket
/*
* tcp_output()
* Handles tcp_drop request from advanced stacks and reports that inpcb is
@ -401,9 +406,10 @@ struct tcpcb * tcp_drop(struct tcpcb *, int);
static inline int
tcp_output(struct tcpcb *tp)
{
struct inpcb *inp = tptoinpcb(tp);
int rv;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
rv = tp->t_fb->tfb_tcp_output(tp);
if (rv < 0) {
@ -412,7 +418,7 @@ tcp_output(struct tcpcb *tp)
tp->t_fb->tfb_tcp_block_name, tp));
tp = tcp_drop(tp, -rv);
if (tp)
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
}
return (rv);
@ -426,9 +432,10 @@ tcp_output(struct tcpcb *tp)
static inline int
tcp_output_unlock(struct tcpcb *tp)
{
struct inpcb *inp = tptoinpcb(tp);
int rv;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
rv = tp->t_fb->tfb_tcp_output(tp);
if (rv < 0) {
@ -438,9 +445,9 @@ tcp_output_unlock(struct tcpcb *tp)
rv = -rv;
tp = tcp_drop(tp, rv);
if (tp)
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
} else
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
return (rv);
}
@ -460,7 +467,7 @@ tcp_output_nodrop(struct tcpcb *tp)
{
int rv;
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(tptoinpcb(tp));
rv = tp->t_fb->tfb_tcp_output(tp);
KASSERT(rv >= 0 || tp->t_fb->tfb_flags & TCP_FUNC_OUTPUT_CANDROP,
@ -477,15 +484,16 @@ tcp_output_nodrop(struct tcpcb *tp)
static inline int
tcp_unlock_or_drop(struct tcpcb *tp, int tcp_output_retval)
{
struct inpcb *inp = tptoinpcb(tp);
INP_WLOCK_ASSERT(tp->t_inpcb);
INP_WLOCK_ASSERT(inp);
if (tcp_output_retval < 0) {
tcp_output_retval = -tcp_output_retval;
if (tcp_drop(tp, tcp_output_retval) != NULL)
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
} else
INP_WUNLOCK(tp->t_inpcb);
INP_WUNLOCK(inp);
return (tcp_output_retval);
}
@ -640,9 +648,6 @@ struct tcp_ifcap {
struct in_conninfo;
#endif /* _NETINET_IN_PCB_H_ */
#define intotcpcb(ip) ((struct tcpcb *)(ip)->inp_ppcb)
#define sototcpcb(so) (intotcpcb(sotoinpcb(so)))
/*
* The smoothed round-trip time and estimated variance
* are stored as fixed point numbers scaled by the values below.

View file

@ -239,7 +239,7 @@ toe_listen_start(struct inpcb *inp, void *arg)
static void
toe_listen_start_event(void *arg __unused, struct tcpcb *tp)
{
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
INP_WLOCK_ASSERT(inp);
KASSERT(tp->t_state == TCPS_LISTEN,
@ -253,7 +253,7 @@ toe_listen_stop_event(void *arg __unused, struct tcpcb *tp)
{
struct toedev *tod;
#ifdef INVARIANTS
struct inpcb *inp = tp->t_inpcb;
struct inpcb *inp = tptoinpcb(tp);
#endif
INP_WLOCK_ASSERT(inp);