Skip to content
Snippets Groups Projects
Select Git revision
  • 97f91a7cf04ff605845c20948b8a80e54cbd3376
  • vme-testing default
  • ci-test
  • master
  • remoteproc
  • am625-sk-ov5640
  • pcal6534-upstreaming
  • lps22df-upstreaming
  • msc-upstreaming
  • imx8mp
  • iio/noa1305
  • vme-next
  • vme-next-4.14-rc4
  • v4.14-rc4
  • v4.14-rc3
  • v4.14-rc2
  • v4.14-rc1
  • v4.13
  • vme-next-4.13-rc7
  • v4.13-rc7
  • v4.13-rc6
  • v4.13-rc5
  • v4.13-rc4
  • v4.13-rc3
  • v4.13-rc2
  • v4.13-rc1
  • v4.12
  • v4.12-rc7
  • v4.12-rc6
  • v4.12-rc5
  • v4.12-rc4
  • v4.12-rc3
32 results

devmap.c

Blame
  • devmap.c 7.99 KiB
    /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
     *
     * This program is free software; you can redistribute it and/or
     * modify it under the terms of version 2 of the GNU General Public
     * License as published by the Free Software Foundation.
     *
     * This program is distributed in the hope that it will be useful, but
     * WITHOUT ANY WARRANTY; without even the implied warranty of
     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
     * General Public License for more details.
     */
    
    /* Devmaps primary use is as a backend map for XDP BPF helper call
     * bpf_redirect_map(). Because XDP is mostly concerned with performance we
     * spent some effort to ensure the datapath with redirect maps does not use
     * any locking. This is a quick note on the details.
     *
     * We have three possible paths to get into the devmap control plane bpf
     * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
     * will invoke an update, delete, or lookup operation. To ensure updates and
     * deletes appear atomic from the datapath side xchg() is used to modify the
     * netdev_map array. Then because the datapath does a lookup into the netdev_map
     * array (read-only) from an RCU critical section we use call_rcu() to wait for
     * an rcu grace period before free'ing the old data structures. This ensures the
     * datapath always has a valid copy. However, the datapath does a "flush"
     * operation that pushes any pending packets in the driver outside the RCU
     * critical section. Each bpf_dtab_netdev tracks these pending operations using
     * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed
     * until all bits are cleared indicating outstanding flush operations have
     * completed.
     *
     * BPF syscalls may race with BPF program calls on any of the update, delete
     * or lookup operations. As noted above the xchg() operation also keep the
     * netdev_map consistent in this case. From the devmap side BPF programs
     * calling into these operations are the same as multiple user space threads
     * making system calls.
     */
    #include <linux/bpf.h>
    #include <linux/jhash.h>
    #include <linux/filter.h>
    #include <linux/rculist_nulls.h>
    #include "percpu_freelist.h"
    #include "bpf_lru_list.h"
    #include "map_in_map.h"
    
    struct bpf_dtab_netdev {
    	struct net_device *dev;
    	int key;
    	struct rcu_head rcu;
    	struct bpf_dtab *dtab;
    };
    
    struct bpf_dtab {
    	struct bpf_map map;
    	struct bpf_dtab_netdev **netdev_map;
    };
    
    static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
    {
    	struct bpf_dtab *dtab;
    	u64 cost;
    	int err;
    
    	/* check sanity of attributes */
    	if (attr->max_entries == 0 || attr->key_size != 4 ||
    	    attr->value_size != 4 || attr->map_flags)
    		return ERR_PTR(-EINVAL);
    
    	/* if value_size is bigger, the user space won't be able to
    	 * access the elements.
    	 */
    	if (attr->value_size > KMALLOC_MAX_SIZE)
    		return ERR_PTR(-E2BIG);
    
    	dtab = kzalloc(sizeof(*dtab), GFP_USER);
    	if (!dtab)
    		return ERR_PTR(-ENOMEM);
    
    	/* mandatory map attributes */
    	dtab->map.map_type = attr->map_type;
    	dtab->map.key_size = attr->key_size;
    	dtab->map.value_size = attr->value_size;
    	dtab->map.max_entries = attr->max_entries;
    	dtab->map.map_flags = attr->map_flags;
    
    	err = -ENOMEM;
    
    	/* make sure page count doesn't overflow */
    	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
    	if (cost >= U32_MAX - PAGE_SIZE)
    		goto free_dtab;
    
    	dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
    
    	/* if map size is larger than memlock limit, reject it early */
    	err = bpf_map_precharge_memlock(dtab->map.pages);
    	if (err)
    		goto free_dtab;
    
    	dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
    					      sizeof(struct bpf_dtab_netdev *));
    	if (!dtab->netdev_map)
    		goto free_dtab;
    
    	return &dtab->map;
    
    free_dtab:
    	kfree(dtab);
    	return ERR_PTR(err);
    }
    
    static void dev_map_free(struct bpf_map *map)
    {
    	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    	int i;
    
    	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
    	 * so the programs (can be more than one that used this map) were
    	 * disconnected from events. Wait for outstanding critical sections in
    	 * these programs to complete. The rcu critical section only guarantees
    	 * no further reads against netdev_map. It does __not__ ensure pending
    	 * flush operations (if any) are complete.
    	 */
    	synchronize_rcu();
    
    	for (i = 0; i < dtab->map.max_entries; i++) {
    		struct bpf_dtab_netdev *dev;
    
    		dev = dtab->netdev_map[i];
    		if (!dev)
    			continue;
    
    		dev_put(dev->dev);
    		kfree(dev);
    	}
    
    	/* At this point bpf program is detached and all pending operations
    	 * _must_ be complete
    	 */
    	bpf_map_area_free(dtab->netdev_map);
    	kfree(dtab);
    }
    
    static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
    {
    	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    	u32 index = key ? *(u32 *)key : U32_MAX;
    	u32 *next = (u32 *)next_key;
    
    	if (index >= dtab->map.max_entries) {
    		*next = 0;
    		return 0;
    	}
    
    	if (index == dtab->map.max_entries - 1)
    		return -ENOENT;
    
    	*next = index + 1;
    	return 0;
    }
    
    struct net_device  *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
    {
    	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    	struct bpf_dtab_netdev *dev;
    
    	if (key >= map->max_entries)
    		return NULL;
    
    	dev = READ_ONCE(dtab->netdev_map[key]);
    	return dev ? dev->dev : NULL;
    }
    
    /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
     * update happens in parallel here a dev_put wont happen until after reading the
     * ifindex.
     */
    static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
    {
    	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    	struct bpf_dtab_netdev *dev;
    	u32 i = *(u32 *)key;
    
    	if (i >= map->max_entries)
    		return NULL;
    
    	dev = READ_ONCE(dtab->netdev_map[i]);
    	return dev ? &dev->dev->ifindex : NULL;
    }
    
    static void __dev_map_entry_free(struct rcu_head *rcu)
    {
    	struct bpf_dtab_netdev *old_dev;
    
    	old_dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
    	dev_put(old_dev->dev);
    	kfree(old_dev);
    }
    
    static int dev_map_delete_elem(struct bpf_map *map, void *key)
    {
    	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    	struct bpf_dtab_netdev *old_dev;
    	int k = *(u32 *)key;
    
    	if (k >= map->max_entries)
    		return -EINVAL;
    
    	/* Use synchronize_rcu() here to ensure any rcu critical sections
    	 * have completed, but this does not guarantee a flush has happened
    	 * yet. Because driver side rcu_read_lock/unlock only protects the
    	 * running XDP program. However, for pending flush operations the
    	 * dev and ctx are stored in another per cpu map. And additionally,
    	 * the driver tear down ensures all soft irqs are complete before
    	 * removing the net device in the case of dev_put equals zero.
    	 */
    	old_dev = xchg(&dtab->netdev_map[k], NULL);
    	if (old_dev)
    		call_rcu(&old_dev->rcu, __dev_map_entry_free);
    	return 0;
    }
    
    static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
    				u64 map_flags)
    {
    	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
    	struct net *net = current->nsproxy->net_ns;
    	struct bpf_dtab_netdev *dev, *old_dev;
    	u32 i = *(u32 *)key;
    	u32 ifindex = *(u32 *)value;
    
    	if (unlikely(map_flags > BPF_EXIST))
    		return -EINVAL;
    
    	if (unlikely(i >= dtab->map.max_entries))
    		return -E2BIG;
    
    	if (unlikely(map_flags == BPF_NOEXIST))
    		return -EEXIST;
    
    	if (!ifindex) {
    		dev = NULL;
    	} else {
    		dev = kmalloc(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN);
    		if (!dev)
    			return -ENOMEM;
    
    		dev->dev = dev_get_by_index(net, ifindex);
    		if (!dev->dev) {
    			kfree(dev);
    			return -EINVAL;
    		}
    
    		dev->key = i;
    		dev->dtab = dtab;
    	}
    
    	/* Use call_rcu() here to ensure rcu critical sections have completed
    	 * Remembering the driver side flush operation will happen before the
    	 * net device is removed.
    	 */
    	old_dev = xchg(&dtab->netdev_map[i], dev);
    	if (old_dev)
    		call_rcu(&old_dev->rcu, __dev_map_entry_free);
    
    	return 0;
    }
    
    const struct bpf_map_ops dev_map_ops = {
    	.map_alloc = dev_map_alloc,
    	.map_free = dev_map_free,
    	.map_get_next_key = dev_map_get_next_key,
    	.map_lookup_elem = dev_map_lookup_elem,
    	.map_update_elem = dev_map_update_elem,
    	.map_delete_elem = dev_map_delete_elem,
    };