Commit 4e481908 authored by wenxu's avatar wenxu Committed by David S. Miller
Browse files

flow_offload: move tc indirect block to flow offload



move tc indirect block to flow_offload and rename
it to flow indirect block.The nf_tables can use the
indr block architecture.
Signed-off-by: default avatarwenxu <wenxu@ucloud.cn>
Acked-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e4da9102
...@@ -781,9 +781,9 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, ...@@ -781,9 +781,9 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
{ {
int err; int err;
err = __tc_indr_block_cb_register(netdev, rpriv, err = __flow_indr_block_cb_register(netdev, rpriv,
mlx5e_rep_indr_setup_tc_cb, mlx5e_rep_indr_setup_tc_cb,
rpriv); rpriv);
if (err) { if (err) {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
...@@ -796,8 +796,8 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, ...@@ -796,8 +796,8 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev) struct net_device *netdev)
{ {
__tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
rpriv); rpriv);
} }
static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
......
...@@ -1649,16 +1649,17 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app, ...@@ -1649,16 +1649,17 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
return NOTIFY_OK; return NOTIFY_OK;
if (event == NETDEV_REGISTER) { if (event == NETDEV_REGISTER) {
err = __tc_indr_block_cb_register(netdev, app, err = __flow_indr_block_cb_register(netdev, app,
nfp_flower_indr_setup_tc_cb, nfp_flower_indr_setup_tc_cb,
app); app);
if (err) if (err)
nfp_flower_cmsg_warn(app, nfp_flower_cmsg_warn(app,
"Indirect block reg failed - %s\n", "Indirect block reg failed - %s\n",
netdev->name); netdev->name);
} else if (event == NETDEV_UNREGISTER) { } else if (event == NETDEV_UNREGISTER) {
__tc_indr_block_cb_unregister(netdev, __flow_indr_block_cb_unregister(netdev,
nfp_flower_indr_setup_tc_cb, app); nfp_flower_indr_setup_tc_cb,
app);
} }
return NOTIFY_OK; return NOTIFY_OK;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h> #include <linux/list.h>
#include <net/flow_dissector.h> #include <net/flow_dissector.h>
#include <linux/rhashtable.h>
struct flow_match { struct flow_match {
struct flow_dissector *dissector; struct flow_dissector *dissector;
...@@ -370,4 +371,32 @@ static inline void flow_block_init(struct flow_block *flow_block) ...@@ -370,4 +371,32 @@ static inline void flow_block_init(struct flow_block *flow_block)
INIT_LIST_HEAD(&flow_block->cb_list); INIT_LIST_HEAD(&flow_block->cb_list);
} }
typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
enum tc_setup_type type, void *type_data);
typedef void flow_indr_block_ing_cmd_t(struct net_device *dev,
flow_indr_block_bind_cb_t *cb,
void *cb_priv,
enum flow_block_command command);
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb,
void *cb_ident);
void __flow_indr_block_cb_unregister(struct net_device *dev,
flow_indr_block_bind_cb_t *cb,
void *cb_ident);
int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb, void *cb_ident);
void flow_indr_block_cb_unregister(struct net_device *dev,
flow_indr_block_bind_cb_t *cb,
void *cb_ident);
void flow_indr_block_call(struct net_device *dev,
flow_indr_block_ing_cmd_t *cb,
struct flow_block_offload *bo,
enum flow_block_command command);
#endif /* _NET_FLOW_OFFLOAD_H */ #endif /* _NET_FLOW_OFFLOAD_H */
...@@ -70,15 +70,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block) ...@@ -70,15 +70,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
return block->q; return block->q;
} }
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident);
int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident);
void __tc_indr_block_cb_unregister(struct net_device *dev,
tc_indr_block_bind_cb_t *cb, void *cb_ident);
void tc_indr_block_cb_unregister(struct net_device *dev,
tc_indr_block_bind_cb_t *cb, void *cb_ident);
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode); struct tcf_result *res, bool compat_mode);
...@@ -137,32 +128,6 @@ void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, ...@@ -137,32 +128,6 @@ void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
{ {
} }
static inline
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
return 0;
}
static inline
int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
return 0;
}
static inline
void __tc_indr_block_cb_unregister(struct net_device *dev,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
}
static inline
void tc_indr_block_cb_unregister(struct net_device *dev,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
}
static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode) struct tcf_result *res, bool compat_mode)
{ {
......
...@@ -23,9 +23,6 @@ struct tcf_walker; ...@@ -23,9 +23,6 @@ struct tcf_walker;
struct module; struct module;
struct bpf_flow_keys; struct bpf_flow_keys;
typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
enum tc_setup_type type, void *type_data);
struct qdisc_rate_table { struct qdisc_rate_table {
struct tc_ratespec rate; struct tc_ratespec rate;
u32 data[256]; u32 data[256];
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <net/flow_offload.h> #include <net/flow_offload.h>
#include <linux/rtnetlink.h>
struct flow_rule *flow_rule_alloc(unsigned int num_actions) struct flow_rule *flow_rule_alloc(unsigned int num_actions)
{ {
...@@ -280,3 +281,217 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f, ...@@ -280,3 +281,217 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
} }
} }
EXPORT_SYMBOL(flow_block_cb_setup_simple); EXPORT_SYMBOL(flow_block_cb_setup_simple);
static struct rhashtable indr_setup_block_ht;
struct flow_indr_block_cb {
struct list_head list;
void *cb_priv;
flow_indr_block_bind_cb_t *cb;
void *cb_ident;
};
struct flow_indr_block_dev {
struct rhash_head ht_node;
struct net_device *dev;
unsigned int refcnt;
flow_indr_block_ing_cmd_t *block_ing_cmd_cb;
struct list_head cb_list;
};
static const struct rhashtable_params flow_indr_setup_block_ht_params = {
.key_offset = offsetof(struct flow_indr_block_dev, dev),
.head_offset = offsetof(struct flow_indr_block_dev, ht_node),
.key_len = sizeof(struct net_device *),
};
static struct flow_indr_block_dev *
flow_indr_block_dev_lookup(struct net_device *dev)
{
return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
flow_indr_setup_block_ht_params);
}
static struct flow_indr_block_dev *
flow_indr_block_dev_get(struct net_device *dev)
{
struct flow_indr_block_dev *indr_dev;
indr_dev = flow_indr_block_dev_lookup(dev);
if (indr_dev)
goto inc_ref;
indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
if (!indr_dev)
return NULL;
INIT_LIST_HEAD(&indr_dev->cb_list);
indr_dev->dev = dev;
if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
flow_indr_setup_block_ht_params)) {
kfree(indr_dev);
return NULL;
}
inc_ref:
indr_dev->refcnt++;
return indr_dev;
}
static void flow_indr_block_dev_put(struct flow_indr_block_dev *indr_dev)
{
if (--indr_dev->refcnt)
return;
rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
flow_indr_setup_block_ht_params);
kfree(indr_dev);
}
static struct flow_indr_block_cb *
flow_indr_block_cb_lookup(struct flow_indr_block_dev *indr_dev,
flow_indr_block_bind_cb_t *cb, void *cb_ident)
{
struct flow_indr_block_cb *indr_block_cb;
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
if (indr_block_cb->cb == cb &&
indr_block_cb->cb_ident == cb_ident)
return indr_block_cb;
return NULL;
}
static struct flow_indr_block_cb *
flow_indr_block_cb_add(struct flow_indr_block_dev *indr_dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb, void *cb_ident)
{
struct flow_indr_block_cb *indr_block_cb;
indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
if (indr_block_cb)
return ERR_PTR(-EEXIST);
indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
if (!indr_block_cb)
return ERR_PTR(-ENOMEM);
indr_block_cb->cb_priv = cb_priv;
indr_block_cb->cb = cb;
indr_block_cb->cb_ident = cb_ident;
list_add(&indr_block_cb->list, &indr_dev->cb_list);
return indr_block_cb;
}
static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb)
{
list_del(&indr_block_cb->list);
kfree(indr_block_cb);
}
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb,
void *cb_ident)
{
struct flow_indr_block_cb *indr_block_cb;
struct flow_indr_block_dev *indr_dev;
int err;
indr_dev = flow_indr_block_dev_get(dev);
if (!indr_dev)
return -ENOMEM;
indr_block_cb = flow_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
err = PTR_ERR_OR_ZERO(indr_block_cb);
if (err)
goto err_dev_put;
if (indr_dev->block_ing_cmd_cb)
indr_dev->block_ing_cmd_cb(dev, indr_block_cb->cb,
indr_block_cb->cb_priv,
FLOW_BLOCK_BIND);
return 0;
err_dev_put:
flow_indr_block_dev_put(indr_dev);
return err;
}
EXPORT_SYMBOL_GPL(__flow_indr_block_cb_register);
int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb,
void *cb_ident)
{
int err;
rtnl_lock();
err = __flow_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
rtnl_unlock();
return err;
}
EXPORT_SYMBOL_GPL(flow_indr_block_cb_register);
void __flow_indr_block_cb_unregister(struct net_device *dev,
flow_indr_block_bind_cb_t *cb,
void *cb_ident)
{
struct flow_indr_block_cb *indr_block_cb;
struct flow_indr_block_dev *indr_dev;
indr_dev = flow_indr_block_dev_lookup(dev);
if (!indr_dev)
return;
indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
if (!indr_block_cb)
return;
if (indr_dev->block_ing_cmd_cb)
indr_dev->block_ing_cmd_cb(dev, indr_block_cb->cb,
indr_block_cb->cb_priv,
FLOW_BLOCK_UNBIND);
flow_indr_block_cb_del(indr_block_cb);
flow_indr_block_dev_put(indr_dev);
}
EXPORT_SYMBOL_GPL(__flow_indr_block_cb_unregister);
void flow_indr_block_cb_unregister(struct net_device *dev,
flow_indr_block_bind_cb_t *cb,
void *cb_ident)
{
rtnl_lock();
__flow_indr_block_cb_unregister(dev, cb, cb_ident);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister);
void flow_indr_block_call(struct net_device *dev,
flow_indr_block_ing_cmd_t cb,
struct flow_block_offload *bo,
enum flow_block_command command)
{
struct flow_indr_block_cb *indr_block_cb;
struct flow_indr_block_dev *indr_dev;
indr_dev = flow_indr_block_dev_lookup(dev);
if (!indr_dev)
return;
indr_dev->block_ing_cmd_cb = command == FLOW_BLOCK_BIND
? cb : NULL;
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
bo);
}
EXPORT_SYMBOL_GPL(flow_indr_block_call);
static int __init init_flow_indr_rhashtable(void)
{
return rhashtable_init(&indr_setup_block_ht,
&flow_indr_setup_block_ht_params);
}
subsys_initcall(init_flow_indr_rhashtable);
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <net/tc_act/tc_skbedit.h> #include <net/tc_act/tc_skbedit.h>
#include <net/tc_act/tc_ct.h> #include <net/tc_act/tc_ct.h>
#include <net/tc_act/tc_mpls.h> #include <net/tc_act/tc_mpls.h>
#include <net/flow_offload.h>
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
...@@ -545,139 +546,12 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) ...@@ -545,139 +546,12 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
} }
} }
static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
{
const struct Qdisc_class_ops *cops;
struct Qdisc *qdisc;
if (!dev_ingress_queue(dev))
return NULL;
qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
if (!qdisc)
return NULL;
cops = qdisc->ops->cl_ops;
if (!cops)
return NULL;
if (!cops->tcf_block)
return NULL;
return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
}
static struct rhashtable indr_setup_block_ht;
struct tc_indr_block_dev {
struct rhash_head ht_node;
struct net_device *dev;
unsigned int refcnt;
struct list_head cb_list;
};
struct tc_indr_block_cb {
struct list_head list;
void *cb_priv;
tc_indr_block_bind_cb_t *cb;
void *cb_ident;
};
static const struct rhashtable_params tc_indr_setup_block_ht_params = {
.key_offset = offsetof(struct tc_indr_block_dev, dev),
.head_offset = offsetof(struct tc_indr_block_dev, ht_node),
.key_len = sizeof(struct net_device *),
};
static struct tc_indr_block_dev *
tc_indr_block_dev_lookup(struct net_device *dev)
{
return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
tc_indr_setup_block_ht_params);
}
static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
{
struct tc_indr_block_dev *indr_dev;
indr_dev = tc_indr_block_dev_lookup(dev);
if (indr_dev)
goto inc_ref;
indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
if (!indr_dev)
return NULL;
INIT_LIST_HEAD(&indr_dev->cb_list);
indr_dev->dev = dev;
if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
tc_indr_setup_block_ht_params)) {
kfree(indr_dev);
return NULL;
}
inc_ref:
indr_dev->refcnt++;
return indr_dev;
}
static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
{
if (--indr_dev->refcnt)
return;
rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
tc_indr_setup_block_ht_params);
kfree(indr_dev);
}
static struct tc_indr_block_cb *
tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
struct tc_indr_block_cb *indr_block_cb;
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
if (indr_block_cb->cb == cb &&
indr_block_cb->cb_ident == cb_ident)
return indr_block_cb;
return NULL;
}
static struct tc_indr_block_cb *
tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
struct tc_indr_block_cb *indr_block_cb;
indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
if (indr_block_cb)
return ERR_PTR(-EEXIST);
indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
if (!indr_block_cb)
return ERR_PTR(-ENOMEM);
indr_block_cb->cb_priv = cb_priv;
indr_block_cb->cb = cb;
indr_block_cb->cb_ident = cb_ident;
list_add(&indr_block_cb->list, &indr_dev->cb_list);
return indr_block_cb;
}
static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
{
list_del(&indr_block_cb->list);
kfree(indr_block_cb);
}
static int tcf_block_setup(struct tcf_block *block, static int tcf_block_setup(struct tcf_block *block,
struct flow_block_offload *bo); struct flow_block_offload *bo);
static void tc_indr_block_ing_cmd(struct net_device *dev, static void tc_indr_block_ing_cmd(struct net_device *dev,
struct tcf_block *block, struct tcf_block *block,
tc_indr_block_bind_cb_t *cb, flow_indr_block_bind_cb_t *cb,
void *cb_priv, void *cb_priv,
enum flow_block_command command) enum flow_block_command command)
{ {
...@@ -699,97 +573,40 @@ static void tc_indr_block_ing_cmd(struct net_device *dev, ...@@ -699,97 +573,40 @@ static void tc_indr_block_ing_cmd(struct net_device *dev,
tcf_block_setup(block, &bo); tcf_block_setup(block, &bo);
} }
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
struct tc_indr_block_cb *indr_block_cb;
struct tc_indr_block_dev *indr_dev;
struct tcf_block *block;
int err;
indr_dev = tc_indr_block_dev_get(dev);
if (!indr_dev)
return -ENOMEM;
indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
err = PTR_ERR_OR_ZERO(indr_block_cb);
if (err)
goto err_dev_put;
block = tc_dev_ingress_block(dev);
tc_indr_block_ing_cmd(dev, block, indr_block_cb->cb,
indr_block_cb->cb_priv, FLOW_BLOCK_BIND);
return 0;
err_dev_put:
tc_indr_block_dev_put(indr_dev);
return err;
}
EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{ {
int err; const struct Qdisc_class_ops *cops;
struct Qdisc *qdisc;
rtnl_lock();
err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);