From 968be23ceaca1f402dfad0a30a8da4649ee32940 Mon Sep 17 00:00:00 2001
From: Magnus Karlsson <magnus.karlsson@intel.com>
Date: Wed, 2 Sep 2020 11:06:09 +0200
Subject: [PATCH] xsk: Fix possible segfault at xskmap entry insertion

Fix possible segfault when entry is inserted into xskmap. This can
happen if the socket is in a state where the umem has been set up, the
Rx ring created but it has yet to be bound to a device. In this case
the pool has not yet been created and we cannot reference it for the
existence of the fill ring. Fix this by removing the whole
xsk_is_setup_for_bpf_map function. Once upon a time, it was used to
make sure that the Rx and fill rings where set up before the driver
could call xsk_rcv, since there are no tests for the existence of
these rings in the data path. But these days, we have a state variable
that we test instead. When it is XSK_BOUND, everything has been set up
correctly and the socket has been bound. So no reason to have the
xsk_is_setup_for_bpf_map function anymore.

Fixes: 7361f9c3d719 ("xsk: Move fill and completion rings to buffer pool")
Reported-by: syzbot+febe51d44243fbc564ee@syzkaller.appspotmail.com
Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/1599037569-26690-1-git-send-email-magnus.karlsson@intel.com
---
 net/xdp/xsk.c    | 6 ------
 net/xdp/xsk.h    | 1 -
 net/xdp/xskmap.c | 5 -----
 3 files changed, 12 deletions(-)

diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 5eb6662f562af..07c32276c527a 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -33,12 +33,6 @@
 
 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
 
-bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
-{
-	return READ_ONCE(xs->rx) &&  READ_ONCE(xs->umem) &&
-		(xs->pool->fq || READ_ONCE(xs->fq_tmp));
-}
-
 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
 {
 	if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
diff --git a/net/xdp/xsk.h b/net/xdp/xsk.h
index da1f73e43924a..b9e896cee5bbb 100644
--- a/net/xdp/xsk.h
+++ b/net/xdp/xsk.h
@@ -39,7 +39,6 @@ static inline struct xdp_sock *xdp_sk(struct sock *sk)
 	return (struct xdp_sock *)sk;
 }
 
-bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
 void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
 			     struct xdp_sock **map_entry);
 int xsk_map_inc(struct xsk_map *map);
diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
index 2a4fd66771557..0c5df593bc567 100644
--- a/net/xdp/xskmap.c
+++ b/net/xdp/xskmap.c
@@ -185,11 +185,6 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
 
 	xs = (struct xdp_sock *)sock->sk;
 
-	if (!xsk_is_setup_for_bpf_map(xs)) {
-		sockfd_put(sock);
-		return -EOPNOTSUPP;
-	}
-
 	map_entry = &m->xsk_map[i];
 	node = xsk_map_node_alloc(m, map_entry);
 	if (IS_ERR(node)) {
-- 
GitLab