mirror of
https://github.com/torvalds/linux
synced 2024-10-15 15:59:15 +00:00
Merge branch 'sk-sk_forward_alloc-fixes'
Kuniyuki Iwashima says: ==================== sk->sk_forward_alloc fixes. The first patch fixes a negative sk_forward_alloc by adding sk_rmem_schedule() before skb_set_owner_r(), and second patch removes an unnecessary WARN_ON_ONCE(). v2: https://lore.kernel.org/netdev/20230209013329.87879-1-kuniyu@amazon.com/ v1: https://lore.kernel.org/netdev/20230207183718.54520-1-kuniyu@amazon.com/ ==================== Link: https://lore.kernel.org/r/20230210002202.81442-1-kuniyu@amazon.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
20ab843242
|
@ -2434,6 +2434,19 @@ static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struc
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk)
|
||||
{
|
||||
skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
|
||||
if (skb) {
|
||||
if (sk_rmem_schedule(sk, skb, skb->truesize)) {
|
||||
skb_set_owner_r(skb, sk);
|
||||
return skb;
|
||||
}
|
||||
__kfree_skb(skb);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void skb_prepare_for_gro(struct sk_buff *skb)
|
||||
{
|
||||
if (skb->destructor != sock_wfree) {
|
||||
|
|
|
@ -1015,6 +1015,7 @@ static void caif_sock_destructor(struct sock *sk)
|
|||
return;
|
||||
}
|
||||
sk_stream_kill_queues(&cf_sk->sk);
|
||||
WARN_ON_ONCE(sk->sk_forward_alloc);
|
||||
caif_free_client(&cf_sk->layer);
|
||||
}
|
||||
|
||||
|
|
|
@ -209,7 +209,6 @@ void sk_stream_kill_queues(struct sock *sk)
|
|||
sk_mem_reclaim_final(sk);
|
||||
|
||||
WARN_ON_ONCE(sk->sk_wmem_queued);
|
||||
WARN_ON_ONCE(sk->sk_forward_alloc);
|
||||
|
||||
/* It is _impossible_ for the backlog to contain anything
|
||||
* when we get here. All user references to this socket
|
||||
|
|
|
@ -551,11 +551,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
|
|||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
|
||||
/* Clone pktoptions received with SYN, if we own the req */
|
||||
if (*own_req && ireq->pktopts) {
|
||||
newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
|
||||
newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
|
||||
consume_skb(ireq->pktopts);
|
||||
ireq->pktopts = NULL;
|
||||
if (newnp->pktoptions)
|
||||
skb_set_owner_r(newnp->pktoptions, newsk);
|
||||
}
|
||||
|
||||
return newsk;
|
||||
|
@ -615,7 +613,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
--ANK (980728)
|
||||
*/
|
||||
if (np->rxopt.all)
|
||||
opt_skb = skb_clone(skb, GFP_ATOMIC);
|
||||
opt_skb = skb_clone_and_charge_r(skb, sk);
|
||||
|
||||
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
|
||||
if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
|
||||
|
@ -679,7 +677,6 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
|
||||
if (ipv6_opt_accepted(sk, opt_skb,
|
||||
&DCCP_SKB_CB(opt_skb)->header.h6)) {
|
||||
skb_set_owner_r(opt_skb, sk);
|
||||
memmove(IP6CB(opt_skb),
|
||||
&DCCP_SKB_CB(opt_skb)->header.h6,
|
||||
sizeof(struct inet6_skb_parm));
|
||||
|
|
|
@ -1388,14 +1388,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
|
|||
|
||||
/* Clone pktoptions received with SYN, if we own the req */
|
||||
if (ireq->pktopts) {
|
||||
newnp->pktoptions = skb_clone(ireq->pktopts,
|
||||
sk_gfp_mask(sk, GFP_ATOMIC));
|
||||
newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
|
||||
consume_skb(ireq->pktopts);
|
||||
ireq->pktopts = NULL;
|
||||
if (newnp->pktoptions) {
|
||||
if (newnp->pktoptions)
|
||||
tcp_v6_restore_cb(newnp->pktoptions);
|
||||
skb_set_owner_r(newnp->pktoptions, newsk);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (!req_unhash && found_dup_sk) {
|
||||
|
@ -1467,7 +1464,7 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
--ANK (980728)
|
||||
*/
|
||||
if (np->rxopt.all)
|
||||
opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
|
||||
opt_skb = skb_clone_and_charge_r(skb, sk);
|
||||
|
||||
reason = SKB_DROP_REASON_NOT_SPECIFIED;
|
||||
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
|
||||
|
@ -1553,7 +1550,6 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|||
if (np->repflow)
|
||||
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
|
||||
if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
|
||||
skb_set_owner_r(opt_skb, sk);
|
||||
tcp_v6_restore_cb(opt_skb);
|
||||
opt_skb = xchg(&np->pktoptions, opt_skb);
|
||||
} else {
|
||||
|
|
Loading…
Reference in a new issue