Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Daniel Borkmann says:

====================
pull-request: bpf 2022-04-27

We've added 5 non-merge commits during the last 20 day(s) which contain
a total of 6 files changed, 34 insertions(+), 12 deletions(-).

The main changes are:

1) Fix xsk sockets when rx and tx are separately bound to the same umem, also
   fix xsk copy mode combined with busy poll, from Maciej Fijalkowski.

2) Fix BPF tunnel/collect_md helpers with bpf_xmit lwt hook usage which triggered
   a crash due to invalid metadata_dst access, from Eyal Birger.

3) Fix release of page pool in XDP live packet mode, from Toke Høiland-Jørgensen.

4) Fix potential NULL pointer dereference in kretprobes, from Adam Zabrocki.

   (Masami & Steven preferred this small fix to be routed via bpf tree given it's
    follow-up fix to Masami's rethook work that went via bpf earlier, too.)

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  xsk: Fix possible crash when multiple sockets are created
  kprobes: Fix KRETPROBES when CONFIG_KRETPROBE_ON_RETHOOK is set
  bpf, lwt: Fix crash when using bpf_skb_set_tunnel_key() from bpf_xmit lwt hook
  bpf: Fix release of page_pool in BPF_PROG_RUN in test runner
  xsk: Fix l2fwd for copy mode + busy poll combo
====================

Link: https://lore.kernel.org/r/20220427212748.9576-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
kuba-moo committed Apr 27, 2022
2 parents 7b5148b + ba3beec commit 347cb5d
Show file tree
Hide file tree
Showing 6 changed files with 34 additions and 12 deletions.
1 change: 1 addition & 0 deletions include/net/xsk_buff_pool.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
u16 queue_id, u16 flags);
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
struct net_device *dev, u16 queue_id);
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
void xp_destroy(struct xsk_buff_pool *pool);
void xp_get_pool(struct xsk_buff_pool *pool);
bool xp_put_pool(struct xsk_buff_pool *pool);
Expand Down
2 changes: 1 addition & 1 deletion kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -2126,7 +2126,7 @@ static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
struct kprobe_ctlblk *kcb;

/* The data must NOT be null. This means rethook data structure is broken. */
if (WARN_ON_ONCE(!data))
if (WARN_ON_ONCE(!data) || !rp->handler)
return;

__this_cpu_write(current_kprobe, &rp->kp);
Expand Down
5 changes: 3 additions & 2 deletions net/bpf/test_run.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ struct xdp_test_data {
struct page_pool *pp;
struct xdp_frame **frames;
struct sk_buff **skbs;
struct xdp_mem_info mem;
u32 batch_size;
u32 frame_cnt;
};
Expand Down Expand Up @@ -147,7 +148,6 @@ static void xdp_test_run_init_page(struct page *page, void *arg)

static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
{
struct xdp_mem_info mem = {};
struct page_pool *pp;
int err = -ENOMEM;
struct page_pool_params pp_params = {
Expand All @@ -174,7 +174,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c
}

/* will copy 'mem.id' into pp->xdp_mem_id */
err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp);
err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
if (err)
goto err_mmodel;

Expand Down Expand Up @@ -202,6 +202,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c

static void xdp_test_run_teardown(struct xdp_test_data *xdp)
{
xdp_unreg_mem_model(&xdp->mem);
page_pool_destroy(xdp->pp);
kfree(xdp->frames);
kfree(xdp->skbs);
Expand Down
7 changes: 3 additions & 4 deletions net/core/lwt_bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -159,10 +159,8 @@ static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
return dst->lwtstate->orig_output(net, sk, skb);
}

static int xmit_check_hhlen(struct sk_buff *skb)
static int xmit_check_hhlen(struct sk_buff *skb, int hh_len)
{
int hh_len = skb_dst(skb)->dev->hard_header_len;

if (skb_headroom(skb) < hh_len) {
int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));

Expand Down Expand Up @@ -274,6 +272,7 @@ static int bpf_xmit(struct sk_buff *skb)

bpf = bpf_lwt_lwtunnel(dst->lwtstate);
if (bpf->xmit.prog) {
int hh_len = dst->dev->hard_header_len;
__be16 proto = skb->protocol;
int ret;

Expand All @@ -291,7 +290,7 @@ static int bpf_xmit(struct sk_buff *skb)
/* If the header was expanded, headroom might be too
* small for L2 header to come, expand as needed.
*/
ret = xmit_check_hhlen(skb);
ret = xmit_check_hhlen(skb, hh_len);
if (unlikely(ret))
return ret;

Expand Down
15 changes: 14 additions & 1 deletion net/xdp/xsk.c
Original file line number Diff line number Diff line change
Expand Up @@ -639,7 +639,7 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
if (sk_can_busy_loop(sk))
sk_busy_loop(sk, 1); /* only support non-blocking sockets */

if (xsk_no_wakeup(sk))
if (xs->zc && xsk_no_wakeup(sk))
return 0;

pool = xs->pool;
Expand Down Expand Up @@ -967,6 +967,19 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)

xp_get_pool(umem_xs->pool);
xs->pool = umem_xs->pool;

/* If underlying shared umem was created without Tx
* ring, allocate Tx descs array that Tx batching API
* utilizes
*/
if (xs->tx && !xs->pool->tx_descs) {
err = xp_alloc_tx_descs(xs->pool, xs);
if (err) {
xp_put_pool(xs->pool);
sockfd_put(sock);
goto out_unlock;
}
}
}

xdp_get_umem(umem_xs->umem);
Expand Down
16 changes: 12 additions & 4 deletions net/xdp/xsk_buff_pool.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,16 @@ void xp_destroy(struct xsk_buff_pool *pool)
kvfree(pool);
}

int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs)
{
pool->tx_descs = kvcalloc(xs->tx->nentries, sizeof(*pool->tx_descs),
GFP_KERNEL);
if (!pool->tx_descs)
return -ENOMEM;

return 0;
}

struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
struct xdp_umem *umem)
{
Expand All @@ -59,11 +69,9 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
if (!pool->heads)
goto out;

if (xs->tx) {
pool->tx_descs = kcalloc(xs->tx->nentries, sizeof(*pool->tx_descs), GFP_KERNEL);
if (!pool->tx_descs)
if (xs->tx)
if (xp_alloc_tx_descs(pool, xs))
goto out;
}

pool->chunk_mask = ~((u64)umem->chunk_size - 1);
pool->addrs_cnt = umem->size;
Expand Down

0 comments on commit 347cb5d

Please sign in to comment.