Skip to content

Commit

Permalink
tgupdate: merge t/upstream-net base into t/upstream-net
Browse files Browse the repository at this point in the history
  • Loading branch information
matttbe committed Feb 20, 2023
2 parents 67a9b5d + 87de69d commit 1231bdd
Show file tree
Hide file tree
Showing 3 changed files with 86 additions and 28 deletions.
27 changes: 10 additions & 17 deletions net/mptcp/protocol.c
Original file line number Diff line number Diff line change
Expand Up @@ -2357,6 +2357,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
/* otherwise tcp will dispose of the ssk and subflow ctx */
if (ssk->sk_state == TCP_LISTEN) {
tcp_set_state(ssk, TCP_CLOSE);
mptcp_subflow_queue_clean(sk, ssk);
inet_csk_listen_stop(ssk);
mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
}
Expand Down Expand Up @@ -2399,10 +2400,9 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
return 0;
}

static void __mptcp_close_subflow(struct sock *sk)
static void __mptcp_close_subflow(struct mptcp_sock *msk)
{
struct mptcp_subflow_context *subflow, *tmp;
struct mptcp_sock *msk = mptcp_sk(sk);

might_sleep();

Expand All @@ -2416,15 +2416,7 @@ static void __mptcp_close_subflow(struct sock *sk)
if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
continue;

mptcp_close_ssk(sk, ssk, subflow);
}

/* if the MPC subflow has been closed before the msk is accepted,
* msk will never be accept-ed, close it now
*/
if (!msk->first && msk->in_accept_queue) {
sock_set_flag(sk, SOCK_DEAD);
inet_sk_state_store(sk, TCP_CLOSE);
mptcp_close_ssk((struct sock *)msk, ssk, subflow);
}
}

Expand Down Expand Up @@ -2633,9 +2625,6 @@ static void mptcp_worker(struct work_struct *work)
__mptcp_check_send_data_fin(sk);
mptcp_check_data_fin(sk);

if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
__mptcp_close_subflow(sk);

/* There is no point in keeping around an orphaned sk timedout or
* closed, but we need the msk around to reply to incoming DATA_FIN,
* even if it is orphaned and in FIN_WAIT2 state
Expand All @@ -2651,6 +2640,9 @@ static void mptcp_worker(struct work_struct *work)
}
}

if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
__mptcp_close_subflow(msk);

if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
__mptcp_retrans(sk);

Expand Down Expand Up @@ -3085,7 +3077,6 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
msk->local_key = subflow_req->local_key;
msk->token = subflow_req->token;
msk->subflow = NULL;
msk->in_accept_queue = 1;
WRITE_ONCE(msk->fully_established, false);
if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
WRITE_ONCE(msk->csum_enabled, true);
Expand All @@ -3105,7 +3096,8 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
security_inet_csk_clone(nsk, req);
bh_unlock_sock(nsk);

/* note: the newly allocated socket refcount is 2 now */
/* keep a single reference */
__sock_put(nsk);
return nsk;
}

Expand Down Expand Up @@ -3161,6 +3153,8 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
goto out;
}

/* acquire the 2nd reference for the owning socket */
sock_hold(new_mptcp_sock);
newsk = new_mptcp_sock;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
} else {
Expand Down Expand Up @@ -3708,7 +3702,6 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
struct sock *newsk = newsock->sk;

set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
msk->in_accept_queue = 0;

lock_sock(newsk);

Expand Down
4 changes: 2 additions & 2 deletions net/mptcp/protocol.h
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,7 @@ struct mptcp_sock {
u8 recvmsg_inq:1,
cork:1,
nodelay:1,
fastopening:1,
in_accept_queue:1;
fastopening:1;
int connect_flags;
struct work_struct work;
struct sk_buff *ooo_last_skb;
Expand Down Expand Up @@ -629,6 +628,7 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
struct mptcp_subflow_context *subflow);
void __mptcp_subflow_send_ack(struct sock *ssk);
void mptcp_subflow_reset(struct sock *ssk);
void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
bool __mptcp_close(struct sock *sk, long timeout);
Expand Down
83 changes: 74 additions & 9 deletions net/mptcp/subflow.c
Original file line number Diff line number Diff line change
Expand Up @@ -692,10 +692,9 @@ static bool subflow_hmac_valid(const struct request_sock *req,

static void mptcp_force_close(struct sock *sk)
{
/* the msk is not yet exposed to user-space, and refcount is 2 */
/* the msk is not yet exposed to user-space */
inet_sk_state_store(sk, TCP_CLOSE);
sk_common_release(sk);
sock_put(sk);
}

static void subflow_ulp_fallback(struct sock *sk,
Expand Down Expand Up @@ -1801,6 +1800,79 @@ static void subflow_state_change(struct sock *sk)
}
}

void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
{
struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
struct mptcp_sock *msk, *next, *head = NULL;
struct request_sock *req;

/* build a list of all unaccepted mptcp sockets */
spin_lock_bh(&queue->rskq_lock);
for (req = queue->rskq_accept_head; req; req = req->dl_next) {
struct mptcp_subflow_context *subflow;
struct sock *ssk = req->sk;
struct mptcp_sock *msk;

if (!sk_is_mptcp(ssk))
continue;

subflow = mptcp_subflow_ctx(ssk);
if (!subflow || !subflow->conn)
continue;

/* skip if already in list */
msk = mptcp_sk(subflow->conn);
if (msk->dl_next || msk == head)
continue;

msk->dl_next = head;
head = msk;
}
spin_unlock_bh(&queue->rskq_lock);
if (!head)
return;

/* can't acquire the msk socket lock under the subflow one,
* or will cause ABBA deadlock
*/
release_sock(listener_ssk);

for (msk = head; msk; msk = next) {
struct sock *sk = (struct sock *)msk;
bool do_cancel_work;

sock_hold(sk);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
next = msk->dl_next;
msk->first = NULL;
msk->dl_next = NULL;

do_cancel_work = __mptcp_close(sk, 0);
release_sock(sk);
if (do_cancel_work) {
/* lockdep will report a false positive ABBA deadlock
* between cancel_work_sync and the listener socket.
* The involved locks belong to different sockets WRT
* the existing AB chain.
* Using a per socket key is problematic as key
* deregistration requires process context and must be
* performed at socket disposal time, in atomic
* context.
* Just tell lockdep to consider the listener socket
* released here.
*/
mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
mptcp_cancel_work(sk);
mutex_acquire(&listener_sk->sk_lock.dep_map,
SINGLE_DEPTH_NESTING, 0, _RET_IP_);
}
sock_put(sk);
}

/* we are still under the listener msk socket lock */
lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
}

static int subflow_ulp_init(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
Expand Down Expand Up @@ -1857,13 +1929,6 @@ static void subflow_ulp_release(struct sock *ssk)
* when the subflow is still unaccepted
*/
release = ctx->disposable || list_empty(&ctx->node);

/* inet_child_forget() does not call sk_state_change(),
* explicitly trigger the socket close machinery
*/
if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
&mptcp_sk(sk)->flags))
mptcp_schedule_work(sk);
sock_put(sk);
}

Expand Down

0 comments on commit 1231bdd

Please sign in to comment.