Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

62
net/bridge/Kconfig Normal file
View file

@ -0,0 +1,62 @@
#
# 802.1d Ethernet Bridging
#
config BRIDGE
tristate "802.1d Ethernet Bridging"
select LLC
select STP
depends on IPV6 || IPV6=n
---help---
If you say Y here, then your Linux box will be able to act as an
Ethernet bridge, which means that the different Ethernet segments it
is connected to will appear as one Ethernet to the participants.
Several such bridges can work together to create even larger
networks of Ethernets using the IEEE 802.1 spanning tree algorithm.
As this is a standard, Linux bridges will cooperate properly with
other third party bridge products.
In order to use the Ethernet bridge, you'll need the bridge
configuration tools; see <file:Documentation/networking/bridge.txt>
for location. Please read the Bridge mini-HOWTO for more
information.
If you enable iptables support along with the bridge support then you
turn your bridge into a bridging IP firewall.
iptables will then see the IP packets being bridged, so you need to
take this into account when setting up your firewall rules.
Enabling arptables support when bridging will let arptables see
bridged ARP traffic in the arptables FORWARD chain.
To compile this code as a module, choose M here: the module
will be called bridge.
If unsure, say N.
config BRIDGE_IGMP_SNOOPING
bool "IGMP/MLD snooping"
depends on BRIDGE
depends on INET
default y
---help---
If you say Y here, then the Ethernet bridge will be able selectively
forward multicast traffic based on IGMP/MLD traffic received from
each port.
Say N to exclude this support and reduce the binary size.
If unsure, say Y.
config BRIDGE_VLAN_FILTERING
bool "VLAN filtering"
depends on BRIDGE
depends on VLAN_8021Q
default n
---help---
If you say Y here, then the Ethernet bridge will be able selectively
receive and forward traffic based on VLAN information in the packet
any VLAN information configured on the bridge port or bridge device.
Say N to exclude this support and reduce the binary size.
If unsure, say Y.

21
net/bridge/Makefile Normal file
View file

@ -0,0 +1,21 @@
#
# Makefile for the IEEE 802.1d ethernet bridging layer.
#
obj-$(CONFIG_BRIDGE) += bridge.o
bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
br_ioctl.o br_stp.o br_stp_bpdu.o \
br_stp_if.o br_stp_timer.o br_netlink.o
bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
bridge-$(subst m,y,$(CONFIG_BRIDGE_NETFILTER)) += br_nf_core.o
obj-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o
obj-$(CONFIG_NETFILTER) += netfilter/

222
net/bridge/br.c Normal file
View file

@ -0,0 +1,222 @@
/*
* Generic parts
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/llc.h>
#include <net/llc.h>
#include <net/stp.h>
#include "br_private.h"
/*
* Handle changes in state of network devices enslaved to a bridge.
*
* Note: don't care about up/down if bridge itself is down, because
* port state is checked when bridge is brought up.
*/
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net_bridge_port *p;
struct net_bridge *br;
bool changed_addr;
int err;
/* register of bridge completed, add sysfs entries */
if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
br_sysfs_addbr(dev);
return NOTIFY_DONE;
}
/* not a port of a bridge */
p = br_port_get_rtnl(dev);
if (!p)
return NOTIFY_DONE;
br = p->br;
switch (event) {
case NETDEV_CHANGEMTU:
dev_set_mtu(br->dev, br_min_mtu(br));
break;
case NETDEV_CHANGEADDR:
spin_lock_bh(&br->lock);
br_fdb_changeaddr(p, dev->dev_addr);
changed_addr = br_stp_recalculate_bridge_id(br);
spin_unlock_bh(&br->lock);
if (changed_addr)
call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
break;
case NETDEV_CHANGE:
br_port_carrier_check(p);
break;
case NETDEV_FEAT_CHANGE:
netdev_update_features(br->dev);
break;
case NETDEV_DOWN:
spin_lock_bh(&br->lock);
if (br->dev->flags & IFF_UP)
br_stp_disable_port(p);
spin_unlock_bh(&br->lock);
break;
case NETDEV_UP:
if (netif_running(br->dev) && netif_oper_up(dev)) {
spin_lock_bh(&br->lock);
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
}
break;
case NETDEV_UNREGISTER:
br_del_if(br, dev);
break;
case NETDEV_CHANGENAME:
err = br_sysfs_renameif(p);
if (err)
return notifier_from_errno(err);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, br->dev);
break;
}
/* Events that may cause spanning tree to refresh */
if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
event == NETDEV_CHANGE || event == NETDEV_DOWN)
br_ifinfo_notify(RTM_NEWLINK, p);
return NOTIFY_DONE;
}
static struct notifier_block br_device_notifier = {
.notifier_call = br_device_event
};
static void __net_exit br_net_exit(struct net *net)
{
struct net_device *dev;
LIST_HEAD(list);
rtnl_lock();
for_each_netdev(net, dev)
if (dev->priv_flags & IFF_EBRIDGE)
br_dev_delete(dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations br_net_ops = {
.exit = br_net_exit,
};
static const struct stp_proto br_stp_proto = {
.rcv = br_stp_rcv,
};
static int __init br_init(void)
{
int err;
err = stp_proto_register(&br_stp_proto);
if (err < 0) {
pr_err("bridge: can't register sap for STP\n");
return err;
}
err = br_fdb_init();
if (err)
goto err_out;
err = register_pernet_subsys(&br_net_ops);
if (err)
goto err_out1;
err = br_nf_core_init();
if (err)
goto err_out2;
err = register_netdevice_notifier(&br_device_notifier);
if (err)
goto err_out3;
err = br_netlink_init();
if (err)
goto err_out4;
brioctl_set(br_ioctl_deviceless_stub);
#if IS_ENABLED(CONFIG_ATM_LANE)
br_fdb_test_addr_hook = br_fdb_test_addr;
#endif
pr_info("bridge: automatic filtering via arp/ip/ip6tables has been "
"deprecated. Update your scripts to load br_netfilter if you "
"need this.\n");
return 0;
err_out4:
unregister_netdevice_notifier(&br_device_notifier);
err_out3:
br_nf_core_fini();
err_out2:
unregister_pernet_subsys(&br_net_ops);
err_out1:
br_fdb_fini();
err_out:
stp_proto_unregister(&br_stp_proto);
return err;
}
static void __exit br_deinit(void)
{
stp_proto_unregister(&br_stp_proto);
br_netlink_fini();
unregister_netdevice_notifier(&br_device_notifier);
brioctl_set(NULL);
unregister_pernet_subsys(&br_net_ops);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
br_nf_core_fini();
#if IS_ENABLED(CONFIG_ATM_LANE)
br_fdb_test_addr_hook = NULL;
#endif
br_fdb_fini();
}
module_init(br_init)
module_exit(br_deinit)
MODULE_LICENSE("GPL");
MODULE_VERSION(BR_VERSION);
MODULE_ALIAS_RTNL_LINK("bridge");

398
net/bridge/br_device.c Normal file
View file

@ -0,0 +1,398 @@
/*
* Device handling code
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/list.h>
#include <linux/netfilter_bridge.h>
#include <asm/uaccess.h>
#include "br_private.h"
#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
/* net device transmit always called with BH disabled */
netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
const unsigned char *dest = skb->data;
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
u16 vid = 0;
rcu_read_lock();
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
br_nf_pre_routing_finish_bridge_slow(skb);
rcu_read_unlock();
return NETDEV_TX_OK;
}
#endif
BR_INPUT_SKB_CB(skb)->brdev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
u64_stats_update_begin(&brstats->syncp);
brstats->tx_packets++;
/* Exclude ETH_HLEN from byte stats for consistency with Rx chain */
brstats->tx_bytes += skb->len;
u64_stats_update_end(&brstats->syncp);
if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
goto out;
if (is_broadcast_ether_addr(dest))
br_flood_deliver(br, skb, false);
else if (is_multicast_ether_addr(dest)) {
if (unlikely(netpoll_tx_running(dev))) {
br_flood_deliver(br, skb, false);
goto out;
}
if (br_multicast_rcv(br, NULL, skb, vid)) {
kfree_skb(skb);
goto out;
}
mdst = br_mdb_get(br, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(br, eth_hdr(skb)))
br_multicast_deliver(mdst, skb);
else
br_flood_deliver(br, skb, false);
} else if ((dst = __br_fdb_get(br, dest, vid)) != NULL)
br_deliver(dst->dst, skb);
else
br_flood_deliver(br, skb, true);
out:
rcu_read_unlock();
return NETDEV_TX_OK;
}
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
int err;
br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!br->stats)
return -ENOMEM;
err = br_vlan_init(br);
if (err)
free_percpu(br->stats);
return err;
}
static int br_dev_open(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
netdev_update_features(dev);
netif_start_queue(dev);
br_stp_enable_bridge(br);
br_multicast_open(br);
return 0;
}
static void br_dev_set_multicast_list(struct net_device *dev)
{
}
static void br_dev_change_rx_flags(struct net_device *dev, int change)
{
if (change & IFF_PROMISC)
br_manage_promisc(netdev_priv(dev));
}
static int br_dev_stop(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
br_stp_disable_bridge(br);
br_multicast_stop(br);
netif_stop_queue(dev);
return 0;
}
static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct net_bridge *br = netdev_priv(dev);
struct pcpu_sw_netstats tmp, sum = { 0 };
unsigned int cpu;
for_each_possible_cpu(cpu) {
unsigned int start;
const struct pcpu_sw_netstats *bstats
= per_cpu_ptr(br->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&bstats->syncp);
memcpy(&tmp, bstats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
sum.tx_bytes += tmp.tx_bytes;
sum.tx_packets += tmp.tx_packets;
sum.rx_bytes += tmp.rx_bytes;
sum.rx_packets += tmp.rx_packets;
}
stats->tx_bytes = sum.tx_bytes;
stats->tx_packets = sum.tx_packets;
stats->rx_bytes = sum.rx_bytes;
stats->rx_packets = sum.rx_packets;
return stats;
}
static int br_change_mtu(struct net_device *dev, int new_mtu)
{
struct net_bridge *br = netdev_priv(dev);
if (new_mtu < 68 || new_mtu > br_min_mtu(br))
return -EINVAL;
dev->mtu = new_mtu;
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* remember the MTU in the rtable for PMTU */
dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
#endif
return 0;
}
/* Allow setting mac address to any valid ethernet address. */
static int br_set_mac_address(struct net_device *dev, void *p)
{
struct net_bridge *br = netdev_priv(dev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
spin_lock_bh(&br->lock);
if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
/* Mac address will be changed in br_stp_change_bridge_id(). */
br_stp_change_bridge_id(br, addr->sa_data);
}
spin_unlock_bh(&br->lock);
return 0;
}
static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "bridge", sizeof(info->driver));
strlcpy(info->version, BR_VERSION, sizeof(info->version));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static netdev_features_t br_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct net_bridge *br = netdev_priv(dev);
return br_features_recompute(br, features);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void br_poll_controller(struct net_device *br_dev)
{
}
static void br_netpoll_cleanup(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list)
br_netpoll_disable(p);
}
static int __br_netpoll_enable(struct net_bridge_port *p)
{
struct netpoll *np;
int err;
np = kzalloc(sizeof(*p->np), GFP_KERNEL);
if (!np)
return -ENOMEM;
err = __netpoll_setup(np, p->dev);
if (err) {
kfree(np);
return err;
}
p->np = np;
return err;
}
int br_netpoll_enable(struct net_bridge_port *p)
{
if (!p->br->dev->npinfo)
return 0;
return __br_netpoll_enable(p);
}
static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
int err = 0;
list_for_each_entry(p, &br->port_list, list) {
if (!p->dev)
continue;
err = __br_netpoll_enable(p);
if (err)
goto fail;
}
out:
return err;
fail:
br_netpoll_cleanup(dev);
goto out;
}
void br_netpoll_disable(struct net_bridge_port *p)
{
struct netpoll *np = p->np;
if (!np)
return;
p->np = NULL;
__netpoll_free_async(np);
}
#endif
static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
{
struct net_bridge *br = netdev_priv(dev);
return br_add_if(br, slave_dev);
}
static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
{
struct net_bridge *br = netdev_priv(dev);
return br_del_if(br, slave_dev);
}
static const struct ethtool_ops br_ethtool_ops = {
.get_drvinfo = br_getinfo,
.get_link = ethtool_op_get_link,
};
static const struct net_device_ops br_netdev_ops = {
.ndo_open = br_dev_open,
.ndo_stop = br_dev_stop,
.ndo_init = br_dev_init,
.ndo_start_xmit = br_dev_xmit,
.ndo_get_stats64 = br_get_stats64,
.ndo_set_mac_address = br_set_mac_address,
.ndo_set_rx_mode = br_dev_set_multicast_list,
.ndo_change_rx_flags = br_dev_change_rx_flags,
.ndo_change_mtu = br_change_mtu,
.ndo_do_ioctl = br_dev_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = br_netpoll_setup,
.ndo_netpoll_cleanup = br_netpoll_cleanup,
.ndo_poll_controller = br_poll_controller,
#endif
.ndo_add_slave = br_add_slave,
.ndo_del_slave = br_del_slave,
.ndo_fix_features = br_fix_features,
.ndo_fdb_add = br_fdb_add,
.ndo_fdb_del = br_fdb_delete,
.ndo_fdb_dump = br_fdb_dump,
.ndo_bridge_getlink = br_getlink,
.ndo_bridge_setlink = br_setlink,
.ndo_bridge_dellink = br_dellink,
};
static void br_dev_free(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
free_percpu(br->stats);
free_netdev(dev);
}
static struct device_type br_type = {
.name = "bridge",
};
void br_dev_setup(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
eth_hw_addr_random(dev);
ether_setup(dev);
dev->netdev_ops = &br_netdev_ops;
dev->destructor = br_dev_free;
dev->ethtool_ops = &br_ethtool_ops;
SET_NETDEV_DEVTYPE(dev, &br_type);
dev->tx_queue_len = 0;
dev->priv_flags = IFF_EBRIDGE;
dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->vlan_features = COMMON_FEATURES;
br->dev = dev;
spin_lock_init(&br->lock);
INIT_LIST_HEAD(&br->port_list);
spin_lock_init(&br->hash_lock);
br->bridge_id.prio[0] = 0x80;
br->bridge_id.prio[1] = 0x00;
ether_addr_copy(br->group_addr, eth_reserved_addr_base);
br->stp_enabled = BR_NO_STP;
br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
br->designated_root = br->bridge_id;
br->bridge_max_age = br->max_age = 20 * HZ;
br->bridge_hello_time = br->hello_time = 2 * HZ;
br->bridge_forward_delay = br->forward_delay = 15 * HZ;
br->ageing_time = 300 * HZ;
br_netfilter_rtable_init(br);
br_stp_timer_init(br);
br_multicast_init(br);
}

1016
net/bridge/br_fdb.c Normal file

File diff suppressed because it is too large Load diff

283
net/bridge/br_forward.c Normal file
View file

@ -0,0 +1,283 @@
/*
* Forwarding decision
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/netfilter_bridge.h>
#include "br_private.h"
static int deliver_clone(const struct net_bridge_port *prev,
struct sk_buff *skb,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb));
/* Don't forward packets to originating port or forwarding disabled */
static inline int should_deliver(const struct net_bridge_port *p,
const struct sk_buff *skb)
{
return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
p->state == BR_STATE_FORWARDING;
}
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
/* ip_fragment doesn't copy the MAC header */
if (nf_bridge_maybe_copy_header(skb) ||
!is_skb_forwardable(skb->dev, skb)) {
kfree_skb(skb);
} else {
skb_push(skb, ETH_HLEN);
br_drop_fake_rtable(skb);
dev_queue_xmit(skb);
}
return 0;
}
EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
int br_forward_finish(struct sk_buff *skb)
{
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
}
EXPORT_SYMBOL_GPL(br_forward_finish);
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
if (!skb)
return;
skb->dev = to->dev;
if (unlikely(netpoll_tx_running(to->br->dev))) {
if (!is_skb_forwardable(skb->dev, skb))
kfree_skb(skb);
else {
skb_push(skb, ETH_HLEN);
br_netpoll_send_skb(to, skb);
}
return;
}
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
br_forward_finish);
}
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
{
struct net_device *indev;
if (skb_warn_if_lro(skb)) {
kfree_skb(skb);
return;
}
skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
if (!skb)
return;
indev = skb->dev;
skb->dev = to->dev;
skb_forward_csum(skb);
NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
br_forward_finish);
}
/* called with rcu_read_lock */
void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
if (to && should_deliver(to, skb)) {
__br_deliver(to, skb);
return;
}
kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(br_deliver);
/* called with rcu_read_lock */
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
{
if (should_deliver(to, skb)) {
if (skb0)
deliver_clone(to, skb, __br_forward);
else
__br_forward(to, skb);
return;
}
if (!skb0)
kfree_skb(skb);
}
static int deliver_clone(const struct net_bridge_port *prev,
struct sk_buff *skb,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb))
{
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb) {
dev->stats.tx_dropped++;
return -ENOMEM;
}
__packet_hook(prev, skb);
return 0;
}
static struct net_bridge_port *maybe_deliver(
struct net_bridge_port *prev, struct net_bridge_port *p,
struct sk_buff *skb,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb))
{
int err;
if (!should_deliver(p, skb))
return prev;
if (!prev)
goto out;
err = deliver_clone(prev, skb, __packet_hook);
if (err)
return ERR_PTR(err);
out:
return p;
}
/* called under bridge lock */
static void br_flood(struct net_bridge *br, struct sk_buff *skb,
struct sk_buff *skb0,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb),
bool unicast)
{
struct net_bridge_port *p;
struct net_bridge_port *prev;
prev = NULL;
list_for_each_entry_rcu(p, &br->port_list, list) {
/* Do not flood unicast traffic to ports that turn it off */
if (unicast && !(p->flags & BR_FLOOD))
continue;
prev = maybe_deliver(prev, p, skb, __packet_hook);
if (IS_ERR(prev))
goto out;
}
if (!prev)
goto out;
if (skb0)
deliver_clone(prev, skb, __packet_hook);
else
__packet_hook(prev, skb);
return;
out:
if (!skb0)
kfree_skb(skb);
}
/* called with rcu_read_lock */
void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast)
{
br_flood(br, skb, NULL, __br_deliver, unicast);
}
/* called under bridge lock */
void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
struct sk_buff *skb2, bool unicast)
{
br_flood(br, skb, skb2, __br_forward, unicast);
}
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
/* called with rcu_read_lock */
static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb, struct sk_buff *skb0,
void (*__packet_hook)(
const struct net_bridge_port *p,
struct sk_buff *skb))
{
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *prev = NULL;
struct net_bridge_port_group *p;
struct hlist_node *rp;
rp = rcu_dereference(hlist_first_rcu(&br->router_list));
p = mdst ? rcu_dereference(mdst->ports) : NULL;
while (p || rp) {
struct net_bridge_port *port, *lport, *rport;
lport = p ? p->port : NULL;
rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
NULL;
port = (unsigned long)lport > (unsigned long)rport ?
lport : rport;
prev = maybe_deliver(prev, port, skb, __packet_hook);
if (IS_ERR(prev))
goto out;
if ((unsigned long)lport >= (unsigned long)port)
p = rcu_dereference(p->next);
if ((unsigned long)rport >= (unsigned long)port)
rp = rcu_dereference(hlist_next_rcu(rp));
}
if (!prev)
goto out;
if (skb0)
deliver_clone(prev, skb, __packet_hook);
else
__packet_hook(prev, skb);
return;
out:
if (!skb0)
kfree_skb(skb);
}
/* called with rcu_read_lock */
void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb)
{
br_multicast_flood(mdst, skb, NULL, __br_deliver);
}
/* called with rcu_read_lock */
void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb, struct sk_buff *skb2)
{
br_multicast_flood(mdst, skb, skb2, __br_forward);
}
#endif

577
net/bridge/br_if.c Normal file
View file

@ -0,0 +1,577 @@
/*
* Userspace interface
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/netpoll.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/if_ether.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/if_vlan.h>
#include "br_private.h"
/*
* Determine initial path cost based on speed.
* using recommendations from 802.1d standard
*
* Since driver might sleep need to not be holding any locks.
*/
static int port_cost(struct net_device *dev)
{
struct ethtool_cmd ecmd;
if (!__ethtool_get_settings(dev, &ecmd)) {
switch (ethtool_cmd_speed(&ecmd)) {
case SPEED_10000:
return 2;
case SPEED_1000:
return 4;
case SPEED_100:
return 19;
case SPEED_10:
return 100;
}
}
/* Old silly heuristics based on name */
if (!strncmp(dev->name, "lec", 3))
return 7;
if (!strncmp(dev->name, "plip", 4))
return 2500;
return 100; /* assume old 10Mbps */
}
/* Check for port carrier transitions. */
void br_port_carrier_check(struct net_bridge_port *p)
{
struct net_device *dev = p->dev;
struct net_bridge *br = p->br;
if (!(p->flags & BR_ADMIN_COST) &&
netif_running(dev) && netif_oper_up(dev))
p->path_cost = port_cost(dev);
if (!netif_running(br->dev))
return;
spin_lock_bh(&br->lock);
if (netif_running(dev) && netif_oper_up(dev)) {
if (p->state == BR_STATE_DISABLED)
br_stp_enable_port(p);
} else {
if (p->state != BR_STATE_DISABLED)
br_stp_disable_port(p);
}
spin_unlock_bh(&br->lock);
}
static void br_port_set_promisc(struct net_bridge_port *p)
{
int err = 0;
if (br_promisc_port(p))
return;
err = dev_set_promiscuity(p->dev, 1);
if (err)
return;
br_fdb_unsync_static(p->br, p);
p->flags |= BR_PROMISC;
}
static void br_port_clear_promisc(struct net_bridge_port *p)
{
int err;
/* Check if the port is already non-promisc or if it doesn't
* support UNICAST filtering. Without unicast filtering support
* we'll end up re-enabling promisc mode anyway, so just check for
* it here.
*/
if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
return;
/* Since we'll be clearing the promisc mode, program the port
* first so that we don't have interruption in traffic.
*/
err = br_fdb_sync_static(p->br, p);
if (err)
return;
dev_set_promiscuity(p->dev, -1);
p->flags &= ~BR_PROMISC;
}
/* When a port is added or removed or when certain port flags
* change, this function is called to automatically manage
* promiscuity setting of all the bridge ports. We are always called
* under RTNL so can skip using rcu primitives.
*/
void br_manage_promisc(struct net_bridge *br)
{
struct net_bridge_port *p;
bool set_all = false;
/* If vlan filtering is disabled or bridge interface is placed
* into promiscuous mode, place all ports in promiscuous mode.
*/
if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br))
set_all = true;
list_for_each_entry(p, &br->port_list, list) {
if (set_all) {
br_port_set_promisc(p);
} else {
/* If the number of auto-ports is <= 1, then all other
* ports will have their output configuration
* statically specified through fdbs. Since ingress
* on the auto-port becomes forwarding/egress to other
* ports and egress configuration is statically known,
* we can say that ingress configuration of the
* auto-port is also statically known.
* This lets us disable promiscuous mode and write
* this config to hw.
*/
if (br->auto_cnt == 0 ||
(br->auto_cnt == 1 && br_auto_port(p)))
br_port_clear_promisc(p);
else
br_port_set_promisc(p);
}
}
}
static void nbp_update_port_count(struct net_bridge *br)
{
struct net_bridge_port *p;
u32 cnt = 0;
list_for_each_entry(p, &br->port_list, list) {
if (br_auto_port(p))
cnt++;
}
if (br->auto_cnt != cnt) {
br->auto_cnt = cnt;
br_manage_promisc(br);
}
}
static void nbp_delete_promisc(struct net_bridge_port *p)
{
/* If port is currently promiscuous, unset promiscuity.
* Otherwise, it is a static port so remove all addresses
* from it.
*/
dev_set_allmulti(p->dev, -1);
if (br_promisc_port(p))
dev_set_promiscuity(p->dev, -1);
else
br_fdb_unsync_static(p->br, p);
}
static void release_nbp(struct kobject *kobj)
{
struct net_bridge_port *p
= container_of(kobj, struct net_bridge_port, kobj);
kfree(p);
}
static struct kobj_type brport_ktype = {
#ifdef CONFIG_SYSFS
.sysfs_ops = &brport_sysfs_ops,
#endif
.release = release_nbp,
};
static void destroy_nbp(struct net_bridge_port *p)
{
struct net_device *dev = p->dev;
p->br = NULL;
p->dev = NULL;
dev_put(dev);
kobject_put(&p->kobj);
}
static void destroy_nbp_rcu(struct rcu_head *head)
{
struct net_bridge_port *p =
container_of(head, struct net_bridge_port, rcu);
destroy_nbp(p);
}
/* Delete port(interface) from bridge is done in two steps.
* via RCU. First step, marks device as down. That deletes
* all the timers and stops new packets from flowing through.
*
* Final cleanup doesn't occur until after all CPU's finished
* processing packets.
*
* Protected from multiple admin operations by RTNL mutex
*/
static void del_nbp(struct net_bridge_port *p)
{
struct net_bridge *br = p->br;
struct net_device *dev = p->dev;
sysfs_remove_link(br->ifobj, p->dev->name);
nbp_delete_promisc(p);
spin_lock_bh(&br->lock);
br_stp_disable_port(p);
spin_unlock_bh(&br->lock);
br_ifinfo_notify(RTM_DELLINK, p);
list_del_rcu(&p->list);
nbp_vlan_flush(p);
br_fdb_delete_by_port(br, p, 1);
nbp_update_port_count(br);
netdev_upper_dev_unlink(dev, br->dev);
dev->priv_flags &= ~IFF_BRIDGE_PORT;
netdev_rx_handler_unregister(dev);
br_multicast_del_port(p);
kobject_uevent(&p->kobj, KOBJ_REMOVE);
kobject_del(&p->kobj);
br_netpoll_disable(p);
call_rcu(&p->rcu, destroy_nbp_rcu);
}
/* Delete bridge device */
void br_dev_delete(struct net_device *dev, struct list_head *head)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p, *n;
list_for_each_entry_safe(p, n, &br->port_list, list) {
del_nbp(p);
}
br_fdb_delete_by_port(br, NULL, 1);
br_vlan_flush(br);
del_timer_sync(&br->gc_timer);
br_sysfs_delbr(br->dev);
unregister_netdevice_queue(br->dev, head);
}
/* find an available port number */
static int find_portno(struct net_bridge *br)
{
int index;
struct net_bridge_port *p;
unsigned long *inuse;
inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
GFP_KERNEL);
if (!inuse)
return -ENOMEM;
set_bit(0, inuse); /* zero is reserved */
list_for_each_entry(p, &br->port_list, list) {
set_bit(p->port_no, inuse);
}
index = find_first_zero_bit(inuse, BR_MAX_PORTS);
kfree(inuse);
return (index >= BR_MAX_PORTS) ? -EXFULL : index;
}
/* called with RTNL but without bridge lock */
static struct net_bridge_port *new_nbp(struct net_bridge *br,
struct net_device *dev)
{
int index;
struct net_bridge_port *p;
index = find_portno(br);
if (index < 0)
return ERR_PTR(index);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL)
return ERR_PTR(-ENOMEM);
p->br = br;
dev_hold(dev);
p->dev = dev;
p->path_cost = port_cost(dev);
p->priority = 0x8000 >> BR_PORT_BITS;
p->port_no = index;
p->flags = BR_LEARNING | BR_FLOOD;
br_init_port(p);
br_set_state(p, BR_STATE_DISABLED);
br_stp_port_timer_init(p);
br_multicast_add_port(p);
return p;
}
int br_add_bridge(struct net *net, const char *name)
{
struct net_device *dev;
int res;
dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
br_dev_setup);
if (!dev)
return -ENOMEM;
dev_net_set(dev, net);
dev->rtnl_link_ops = &br_link_ops;
res = register_netdev(dev);
if (res)
free_netdev(dev);
return res;
}
int br_del_bridge(struct net *net, const char *name)
{
struct net_device *dev;
int ret = 0;
rtnl_lock();
dev = __dev_get_by_name(net, name);
if (dev == NULL)
ret = -ENXIO; /* Could not find device */
else if (!(dev->priv_flags & IFF_EBRIDGE)) {
/* Attempt to delete non bridge device! */
ret = -EPERM;
}
else if (dev->flags & IFF_UP) {
/* Not shutdown yet. */
ret = -EBUSY;
}
else
br_dev_delete(dev, NULL);
rtnl_unlock();
return ret;
}
/* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
int br_min_mtu(const struct net_bridge *br)
{
const struct net_bridge_port *p;
int mtu = 0;
ASSERT_RTNL();
if (list_empty(&br->port_list))
mtu = ETH_DATA_LEN;
else {
list_for_each_entry(p, &br->port_list, list) {
if (!mtu || p->dev->mtu < mtu)
mtu = p->dev->mtu;
}
}
return mtu;
}
/*
* Recomputes features using slave's features
*/
netdev_features_t br_features_recompute(struct net_bridge *br,
netdev_features_t features)
{
struct net_bridge_port *p;
netdev_features_t mask;
if (list_empty(&br->port_list))
return features;
mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
list_for_each_entry(p, &br->port_list, list) {
features = netdev_increment_features(features,
p->dev->features, mask);
}
return features;
}
/* called with RTNL */
int br_add_if(struct net_bridge *br, struct net_device *dev)
{
struct net_bridge_port *p;
int err = 0;
bool changed_addr;
/* Don't allow bridging non-ethernet like devices */
if ((dev->flags & IFF_LOOPBACK) ||
dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
!is_valid_ether_addr(dev->dev_addr))
return -EINVAL;
/* No bridging of bridges */
if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
return -ELOOP;
/* Device is already being bridged */
if (br_port_exists(dev))
return -EBUSY;
/* No bridging devices that dislike that (e.g. wireless) */
if (dev->priv_flags & IFF_DONT_BRIDGE)
return -EOPNOTSUPP;
p = new_nbp(br, dev);
if (IS_ERR(p))
return PTR_ERR(p);
call_netdevice_notifiers(NETDEV_JOIN, dev);
err = dev_set_allmulti(dev, 1);
if (err)
goto put_back;
err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
SYSFS_BRIDGE_PORT_ATTR);
if (err)
goto err1;
err = br_sysfs_addif(p);
if (err)
goto err2;
err = br_netpoll_enable(p);
if (err)
goto err3;
err = netdev_rx_handler_register(dev, br_handle_frame, p);
if (err)
goto err4;
dev->priv_flags |= IFF_BRIDGE_PORT;
err = netdev_master_upper_dev_link(dev, br->dev);
if (err)
goto err5;
dev_disable_lro(dev);
list_add_rcu(&p->list, &br->port_list);
nbp_update_port_count(br);
netdev_update_features(br->dev);
if (br->dev->needed_headroom < dev->needed_headroom)
br->dev->needed_headroom = dev->needed_headroom;
if (br_fdb_insert(br, p, dev->dev_addr, 0))
netdev_err(dev, "failed insert local address bridge forwarding table\n");
if (nbp_vlan_init(p))
netdev_err(dev, "failed to initialize vlan filtering on this port\n");
spin_lock_bh(&br->lock);
changed_addr = br_stp_recalculate_bridge_id(br);
if (netif_running(dev) && netif_oper_up(dev) &&
(br->dev->flags & IFF_UP))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
br_ifinfo_notify(RTM_NEWLINK, p);
if (changed_addr)
call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
dev_set_mtu(br->dev, br_min_mtu(br));
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
err5:
dev->priv_flags &= ~IFF_BRIDGE_PORT;
netdev_rx_handler_unregister(dev);
err4:
br_netpoll_disable(p);
err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
kobject_put(&p->kobj);
p = NULL; /* kobject_put frees */
err1:
dev_set_allmulti(dev, -1);
put_back:
dev_put(dev);
kfree(p);
return err;
}
/* called with RTNL */
int br_del_if(struct net_bridge *br, struct net_device *dev)
{
struct net_bridge_port *p;
bool changed_addr;
p = br_port_get_rtnl(dev);
if (!p || p->br != br)
return -EINVAL;
/* Since more than one interface can be attached to a bridge,
* there still maybe an alternate path for netconsole to use;
* therefore there is no reason for a NETDEV_RELEASE event.
*/
del_nbp(p);
spin_lock_bh(&br->lock);
changed_addr = br_stp_recalculate_bridge_id(br);
spin_unlock_bh(&br->lock);
if (changed_addr)
call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
netdev_update_features(br->dev);
return 0;
}
void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
{
struct net_bridge *br = p->br;
if (mask & BR_AUTO_MASK)
nbp_update_port_count(br);
}

249
net/bridge/br_input.c Normal file
View file

@ -0,0 +1,249 @@
/*
* Handle incoming frames
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/netfilter_bridge.h>
#include <linux/export.h>
#include <linux/rculist.h>
#include "br_private.h"
/* Hook for brouter */
br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
EXPORT_SYMBOL(br_should_route_hook);
static int br_pass_frame_up(struct sk_buff *skb)
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
struct net_port_vlans *pv;
u64_stats_update_begin(&brstats->syncp);
brstats->rx_packets++;
brstats->rx_bytes += skb->len;
u64_stats_update_end(&brstats->syncp);
/* Bridge is just like any other port. Make sure the
* packet is allowed except in promisc modue when someone
* may be running packet capture.
*/
pv = br_get_vlan_info(br);
if (!(brdev->flags & IFF_PROMISC) &&
!br_allowed_egress(br, pv, skb)) {
kfree_skb(skb);
return NET_RX_DROP;
}
indev = skb->dev;
skb->dev = brdev;
skb = br_handle_vlan(br, pv, skb);
if (!skb)
return NET_RX_DROP;
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
netif_receive_skb);
}
/* note: already called with rcu_read_lock */
int br_handle_frame_finish(struct sk_buff *skb)
{
const unsigned char *dest = eth_hdr(skb)->h_dest;
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
struct net_bridge *br;
struct net_bridge_fdb_entry *dst;
struct net_bridge_mdb_entry *mdst;
struct sk_buff *skb2;
bool unicast = true;
u16 vid = 0;
if (!p || p->state == BR_STATE_DISABLED)
goto drop;
if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
goto out;
/* insert into forwarding database after filtering to avoid spoofing */
br = p->br;
if (p->flags & BR_LEARNING)
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
br_multicast_rcv(br, p, skb, vid))
goto drop;
if (p->state == BR_STATE_LEARNING)
goto drop;
BR_INPUT_SKB_CB(skb)->brdev = br->dev;
/* The packet skb2 goes to the local host (NULL to skip). */
skb2 = NULL;
if (br->dev->flags & IFF_PROMISC)
skb2 = skb;
dst = NULL;
if (is_broadcast_ether_addr(dest)) {
skb2 = skb;
unicast = false;
} else if (is_multicast_ether_addr(dest)) {
mdst = br_mdb_get(br, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(br, eth_hdr(skb))) {
if ((mdst && mdst->mglist) ||
br_multicast_is_router(br))
skb2 = skb;
br_multicast_forward(mdst, skb, skb2);
skb = NULL;
if (!skb2)
goto out;
} else
skb2 = skb;
unicast = false;
br->dev->stats.multicast++;
} else if ((dst = __br_fdb_get(br, dest, vid)) &&
dst->is_local) {
skb2 = skb;
/* Do not forward the packet since it's local. */
skb = NULL;
}
if (skb) {
if (dst) {
dst->used = jiffies;
br_forward(dst->dst, skb, skb2);
} else
br_flood_forward(br, skb, skb2, unicast);
}
if (skb2)
return br_pass_frame_up(skb2);
out:
return 0;
drop:
kfree_skb(skb);
goto out;
}
EXPORT_SYMBOL_GPL(br_handle_frame_finish);
/* note: already called with rcu_read_lock */
static int br_handle_local_finish(struct sk_buff *skb)
{
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
u16 vid = 0;
/* check if vlan is allowed, to avoid spoofing */
if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
return 0; /* process further */
}
/*
* Return NULL if skb is handled
* note: already called with rcu_read_lock
*/
rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
{
struct net_bridge_port *p;
struct sk_buff *skb = *pskb;
const unsigned char *dest = eth_hdr(skb)->h_dest;
br_should_route_hook_t *rhook;
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
return RX_HANDLER_PASS;
if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
goto drop;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
return RX_HANDLER_CONSUMED;
p = br_port_get_rcu(skb->dev);
if (unlikely(is_link_local_ether_addr(dest))) {
u16 fwd_mask = p->br->group_fwd_mask_required;
/*
* See IEEE 802.1D Table 7-10 Reserved addresses
*
* Assignment Value
* Bridge Group Address 01-80-C2-00-00-00
* (MAC Control) 802.3 01-80-C2-00-00-01
* (Link Aggregation) 802.3 01-80-C2-00-00-02
* 802.1X PAE address 01-80-C2-00-00-03
*
* 802.1AB LLDP 01-80-C2-00-00-0E
*
* Others reserved for future standardization
*/
switch (dest[5]) {
case 0x00: /* Bridge Group Address */
/* If STP is turned off,
then must forward to keep loop detection */
if (p->br->stp_enabled == BR_NO_STP ||
fwd_mask & (1u << dest[5]))
goto forward;
break;
case 0x01: /* IEEE MAC (Pause) */
goto drop;
default:
/* Allow selective forwarding for most other protocols */
fwd_mask |= p->br->group_fwd_mask;
if (fwd_mask & (1u << dest[5]))
goto forward;
}
/* Deliver packet to local host only */
if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
NULL, br_handle_local_finish)) {
return RX_HANDLER_CONSUMED; /* consumed by filter */
} else {
*pskb = skb;
return RX_HANDLER_PASS; /* continue processing */
}
}
forward:
switch (p->state) {
case BR_STATE_FORWARDING:
rhook = rcu_dereference(br_should_route_hook);
if (rhook) {
if ((*rhook)(skb)) {
*pskb = skb;
return RX_HANDLER_PASS;
}
dest = eth_hdr(skb)->h_dest;
}
/* fall through */
case BR_STATE_LEARNING:
if (ether_addr_equal(p->br->dev->dev_addr, dest))
skb->pkt_type = PACKET_HOST;
NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish);
break;
default:
drop:
kfree_skb(skb);
}
return RX_HANDLER_CONSUMED;
}

396
net/bridge/br_ioctl.c Normal file
View file

@ -0,0 +1,396 @@
/*
* Ioctl handler
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/if_bridge.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/times.h>
#include <net/net_namespace.h>
#include <asm/uaccess.h>
#include "br_private.h"
/* called with RTNL */
static int get_bridge_ifindices(struct net *net, int *indices, int num)
{
struct net_device *dev;
int i = 0;
for_each_netdev(net, dev) {
if (i >= num)
break;
if (dev->priv_flags & IFF_EBRIDGE)
indices[i++] = dev->ifindex;
}
return i;
}
/* called with RTNL */
static void get_port_ifindices(struct net_bridge *br, int *ifindices, int num)
{
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list) {
if (p->port_no < num)
ifindices[p->port_no] = p->dev->ifindex;
}
}
/*
* Format up to a page worth of forwarding table entries
* userbuf -- where to copy result
* maxnum -- maximum number of entries desired
* (limited to a page for sanity)
* offset -- number of records to skip
*/
static int get_fdb_entries(struct net_bridge *br, void __user *userbuf,
unsigned long maxnum, unsigned long offset)
{
int num;
void *buf;
size_t size;
/* Clamp size to PAGE_SIZE, test maxnum to avoid overflow */
if (maxnum > PAGE_SIZE/sizeof(struct __fdb_entry))
maxnum = PAGE_SIZE/sizeof(struct __fdb_entry);
size = maxnum * sizeof(struct __fdb_entry);
buf = kmalloc(size, GFP_USER);
if (!buf)
return -ENOMEM;
num = br_fdb_fillbuf(br, buf, maxnum, offset);
if (num > 0) {
if (copy_to_user(userbuf, buf, num*sizeof(struct __fdb_entry)))
num = -EFAULT;
}
kfree(buf);
return num;
}
/* called with RTNL */
static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
{
struct net *net = dev_net(br->dev);
struct net_device *dev;
int ret;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
dev = __dev_get_by_index(net, ifindex);
if (dev == NULL)
return -EINVAL;
if (isadd)
ret = br_add_if(br, dev);
else
ret = br_del_if(br, dev);
return ret;
}
/*
* Legacy ioctl's through SIOCDEVPRIVATE
* This interface is deprecated because it was too difficult to
* to do the translation for 32/64bit ioctl compatibility.
*/
static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct net_bridge *br = netdev_priv(dev);
unsigned long args[4];
if (copy_from_user(args, rq->ifr_data, sizeof(args)))
return -EFAULT;
switch (args[0]) {
case BRCTL_ADD_IF:
case BRCTL_DEL_IF:
return add_del_if(br, args[1], args[0] == BRCTL_ADD_IF);
case BRCTL_GET_BRIDGE_INFO:
{
struct __bridge_info b;
memset(&b, 0, sizeof(struct __bridge_info));
rcu_read_lock();
memcpy(&b.designated_root, &br->designated_root, 8);
memcpy(&b.bridge_id, &br->bridge_id, 8);
b.root_path_cost = br->root_path_cost;
b.max_age = jiffies_to_clock_t(br->max_age);
b.hello_time = jiffies_to_clock_t(br->hello_time);
b.forward_delay = br->forward_delay;
b.bridge_max_age = br->bridge_max_age;
b.bridge_hello_time = br->bridge_hello_time;
b.bridge_forward_delay = jiffies_to_clock_t(br->bridge_forward_delay);
b.topology_change = br->topology_change;
b.topology_change_detected = br->topology_change_detected;
b.root_port = br->root_port;
b.stp_enabled = (br->stp_enabled != BR_NO_STP);
b.ageing_time = jiffies_to_clock_t(br->ageing_time);
b.hello_timer_value = br_timer_value(&br->hello_timer);
b.tcn_timer_value = br_timer_value(&br->tcn_timer);
b.topology_change_timer_value = br_timer_value(&br->topology_change_timer);
b.gc_timer_value = br_timer_value(&br->gc_timer);
rcu_read_unlock();
if (copy_to_user((void __user *)args[1], &b, sizeof(b)))
return -EFAULT;
return 0;
}
case BRCTL_GET_PORT_LIST:
{
int num, *indices;
num = args[2];
if (num < 0)
return -EINVAL;
if (num == 0)
num = 256;
if (num > BR_MAX_PORTS)
num = BR_MAX_PORTS;
indices = kcalloc(num, sizeof(int), GFP_KERNEL);
if (indices == NULL)
return -ENOMEM;
get_port_ifindices(br, indices, num);
if (copy_to_user((void __user *)args[1], indices, num*sizeof(int)))
num = -EFAULT;
kfree(indices);
return num;
}
case BRCTL_SET_BRIDGE_FORWARD_DELAY:
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
return br_set_forward_delay(br, args[1]);
case BRCTL_SET_BRIDGE_HELLO_TIME:
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
return br_set_hello_time(br, args[1]);
case BRCTL_SET_BRIDGE_MAX_AGE:
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
return br_set_max_age(br, args[1]);
case BRCTL_SET_AGEING_TIME:
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
br->ageing_time = clock_t_to_jiffies(args[1]);
return 0;
case BRCTL_GET_PORT_INFO:
{
struct __port_info p;
struct net_bridge_port *pt;
rcu_read_lock();
if ((pt = br_get_port(br, args[2])) == NULL) {
rcu_read_unlock();
return -EINVAL;
}
memset(&p, 0, sizeof(struct __port_info));
memcpy(&p.designated_root, &pt->designated_root, 8);
memcpy(&p.designated_bridge, &pt->designated_bridge, 8);
p.port_id = pt->port_id;
p.designated_port = pt->designated_port;
p.path_cost = pt->path_cost;
p.designated_cost = pt->designated_cost;
p.state = pt->state;
p.top_change_ack = pt->topology_change_ack;
p.config_pending = pt->config_pending;
p.message_age_timer_value = br_timer_value(&pt->message_age_timer);
p.forward_delay_timer_value = br_timer_value(&pt->forward_delay_timer);
p.hold_timer_value = br_timer_value(&pt->hold_timer);
rcu_read_unlock();
if (copy_to_user((void __user *)args[1], &p, sizeof(p)))
return -EFAULT;
return 0;
}
case BRCTL_SET_BRIDGE_STP_STATE:
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
br_stp_set_enabled(br, args[1]);
return 0;
case BRCTL_SET_BRIDGE_PRIORITY:
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
spin_lock_bh(&br->lock);
br_stp_set_bridge_priority(br, args[1]);
spin_unlock_bh(&br->lock);
return 0;
case BRCTL_SET_PORT_PRIORITY:
{
struct net_bridge_port *p;
int ret;
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
spin_lock_bh(&br->lock);
if ((p = br_get_port(br, args[1])) == NULL)
ret = -EINVAL;
else
ret = br_stp_set_port_priority(p, args[2]);
spin_unlock_bh(&br->lock);
return ret;
}
case BRCTL_SET_PATH_COST:
{
struct net_bridge_port *p;
int ret;
if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
spin_lock_bh(&br->lock);
if ((p = br_get_port(br, args[1])) == NULL)
ret = -EINVAL;
else
ret = br_stp_set_path_cost(p, args[2]);
spin_unlock_bh(&br->lock);
return ret;
}
case BRCTL_GET_FDB_ENTRIES:
return get_fdb_entries(br, (void __user *)args[1],
args[2], args[3]);
}
return -EOPNOTSUPP;
}
static int old_deviceless(struct net *net, void __user *uarg)
{
unsigned long args[3];
if (copy_from_user(args, uarg, sizeof(args)))
return -EFAULT;
switch (args[0]) {
case BRCTL_GET_VERSION:
return BRCTL_VERSION;
case BRCTL_GET_BRIDGES:
{
int *indices;
int ret = 0;
if (args[2] >= 2048)
return -ENOMEM;
indices = kcalloc(args[2], sizeof(int), GFP_KERNEL);
if (indices == NULL)
return -ENOMEM;
args[2] = get_bridge_ifindices(net, indices, args[2]);
ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int))
? -EFAULT : args[2];
kfree(indices);
return ret;
}
case BRCTL_ADD_BRIDGE:
case BRCTL_DEL_BRIDGE:
{
char buf[IFNAMSIZ];
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(buf, (void __user *)args[1], IFNAMSIZ))
return -EFAULT;
buf[IFNAMSIZ-1] = 0;
if (args[0] == BRCTL_ADD_BRIDGE)
return br_add_bridge(net, buf);
return br_del_bridge(net, buf);
}
}
return -EOPNOTSUPP;
}
int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uarg)
{
switch (cmd) {
case SIOCGIFBR:
case SIOCSIFBR:
return old_deviceless(net, uarg);
case SIOCBRADDBR:
case SIOCBRDELBR:
{
char buf[IFNAMSIZ];
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(buf, uarg, IFNAMSIZ))
return -EFAULT;
buf[IFNAMSIZ-1] = 0;
if (cmd == SIOCBRADDBR)
return br_add_bridge(net, buf);
return br_del_bridge(net, buf);
}
}
return -EOPNOTSUPP;
}
int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct net_bridge *br = netdev_priv(dev);
switch (cmd) {
case SIOCDEVPRIVATE:
return old_dev_ioctl(dev, rq, cmd);
case SIOCBRADDIF:
case SIOCBRDELIF:
return add_del_if(br, rq->ifr_ifindex, cmd == SIOCBRADDIF);
}
br_debug(br, "Bridge does not support ioctl 0x%x\n", cmd);
return -EOPNOTSUPP;
}

498
net/bridge/br_mdb.c Normal file
View file

@ -0,0 +1,498 @@
#include <linux/err.h>
#include <linux/igmp.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <net/ip.h>
#include <net/netlink.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/addrconf.h>
#endif
#include "br_private.h"
static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
struct nlattr *nest;
if (!br->multicast_router || hlist_empty(&br->router_list))
return 0;
nest = nla_nest_start(skb, MDBA_ROUTER);
if (nest == NULL)
return -EMSGSIZE;
hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex))
goto fail;
}
nla_nest_end(skb, nest);
return 0;
fail:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_mdb_htable *mdb;
struct nlattr *nest, *nest2;
int i, err = 0;
int idx = 0, s_idx = cb->args[1];
if (br->multicast_disabled)
return 0;
mdb = rcu_dereference(br->mdb);
if (!mdb)
return 0;
nest = nla_nest_start(skb, MDBA_MDB);
if (nest == NULL)
return -EMSGSIZE;
for (i = 0; i < mdb->max; i++) {
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct net_bridge_port *port;
hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
if (idx < s_idx)
goto skip;
nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
if (nest2 == NULL) {
err = -EMSGSIZE;
goto out;
}
for (pp = &mp->ports;
(p = rcu_dereference(*pp)) != NULL;
pp = &p->next) {
port = p->port;
if (port) {
struct br_mdb_entry e;
memset(&e, 0, sizeof(e));
e.ifindex = port->dev->ifindex;
e.state = p->state;
if (p->addr.proto == htons(ETH_P_IP))
e.addr.u.ip4 = p->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
if (p->addr.proto == htons(ETH_P_IPV6))
e.addr.u.ip6 = p->addr.u.ip6;
#endif
e.addr.proto = p->addr.proto;
if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) {
nla_nest_cancel(skb, nest2);
err = -EMSGSIZE;
goto out;
}
}
}
nla_nest_end(skb, nest2);
skip:
idx++;
}
}
out:
cb->args[1] = idx;
nla_nest_end(skb, nest);
return err;
}
static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net_device *dev;
struct net *net = sock_net(skb->sk);
struct nlmsghdr *nlh = NULL;
int idx = 0, s_idx;
s_idx = cb->args[0];
rcu_read_lock();
/* In theory this could be wrapped to 0... */
cb->seq = net->dev_base_seq + br_mdb_rehash_seq;
for_each_netdev_rcu(net, dev) {
if (dev->priv_flags & IFF_EBRIDGE) {
struct br_port_msg *bpm;
if (idx < s_idx)
goto skip;
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, RTM_GETMDB,
sizeof(*bpm), NLM_F_MULTI);
if (nlh == NULL)
break;
bpm = nlmsg_data(nlh);
memset(bpm, 0, sizeof(*bpm));
bpm->ifindex = dev->ifindex;
if (br_mdb_fill_info(skb, cb, dev) < 0)
goto out;
if (br_rports_fill_info(skb, cb, dev) < 0)
goto out;
cb->args[1] = 0;
nlmsg_end(skb, nlh);
skip:
idx++;
}
}
out:
if (nlh)
nlmsg_end(skb, nlh);
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
struct net_device *dev,
struct br_mdb_entry *entry, u32 pid,
u32 seq, int type, unsigned int flags)
{
struct nlmsghdr *nlh;
struct br_port_msg *bpm;
struct nlattr *nest, *nest2;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
if (!nlh)
return -EMSGSIZE;
bpm = nlmsg_data(nlh);
memset(bpm, 0, sizeof(*bpm));
bpm->family = AF_BRIDGE;
bpm->ifindex = dev->ifindex;
nest = nla_nest_start(skb, MDBA_MDB);
if (nest == NULL)
goto cancel;
nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
if (nest2 == NULL)
goto end;
if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
goto end;
nla_nest_end(skb, nest2);
nla_nest_end(skb, nest);
return nlmsg_end(skb, nlh);
end:
nla_nest_end(skb, nest);
cancel:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static inline size_t rtnl_mdb_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct br_port_msg))
+ nla_total_size(sizeof(struct br_mdb_entry));
}
static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
int type)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
int err = -ENOBUFS;
skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
if (!skb)
goto errout;
err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
if (err < 0) {
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
return;
errout:
rtnl_set_sk_err(net, RTNLGRP_MDB, err);
}
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
struct br_ip *group, int type)
{
struct br_mdb_entry entry;
memset(&entry, 0, sizeof(entry));
entry.ifindex = port->dev->ifindex;
entry.addr.proto = group->proto;
entry.addr.u.ip4 = group->u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
entry.addr.u.ip6 = group->u.ip6;
#endif
__br_mdb_notify(dev, &entry, type);
}
static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
{
if (entry->ifindex == 0)
return false;
if (entry->addr.proto == htons(ETH_P_IP)) {
if (!ipv4_is_multicast(entry->addr.u.ip4))
return false;
if (ipv4_is_local_multicast(entry->addr.u.ip4))
return false;
#if IS_ENABLED(CONFIG_IPV6)
} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
return false;
#endif
} else
return false;
if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
return false;
return true;
}
static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net_device **pdev, struct br_mdb_entry **pentry)
{
struct net *net = sock_net(skb->sk);
struct br_mdb_entry *entry;
struct br_port_msg *bpm;
struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
struct net_device *dev;
int err;
err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY, NULL);
if (err < 0)
return err;
bpm = nlmsg_data(nlh);
if (bpm->ifindex == 0) {
pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
return -EINVAL;
}
dev = __dev_get_by_index(net, bpm->ifindex);
if (dev == NULL) {
pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
return -ENODEV;
}
if (!(dev->priv_flags & IFF_EBRIDGE)) {
pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
return -EOPNOTSUPP;
}
*pdev = dev;
if (!tb[MDBA_SET_ENTRY] ||
nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
return -EINVAL;
}
entry = nla_data(tb[MDBA_SET_ENTRY]);
if (!is_valid_mdb_entry(entry)) {
pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
return -EINVAL;
}
*pentry = entry;
return 0;
}
static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
struct br_ip *group, unsigned char state)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct net_bridge_mdb_htable *mdb;
int err;
mdb = mlock_dereference(br->mdb, br);
mp = br_mdb_ip_get(mdb, group);
if (!mp) {
mp = br_multicast_new_group(br, port, group);
err = PTR_ERR(mp);
if (IS_ERR(mp))
return err;
}
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p->port == port)
return -EEXIST;
if ((unsigned long)p->port < (unsigned long)port)
break;
}
p = br_multicast_new_port_group(port, group, *pp, state);
if (unlikely(!p))
return -ENOMEM;
rcu_assign_pointer(*pp, p);
br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
return 0;
}
static int __br_mdb_add(struct net *net, struct net_bridge *br,
struct br_mdb_entry *entry)
{
struct br_ip ip;
struct net_device *dev;
struct net_bridge_port *p;
int ret;
if (!netif_running(br->dev) || br->multicast_disabled)
return -EINVAL;
dev = __dev_get_by_index(net, entry->ifindex);
if (!dev)
return -ENODEV;
p = br_port_get_rtnl(dev);
if (!p || p->br != br || p->state == BR_STATE_DISABLED)
return -EINVAL;
ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP))
ip.u.ip4 = entry->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
else
ip.u.ip6 = entry->addr.u.ip6;
#endif
spin_lock_bh(&br->multicast_lock);
ret = br_mdb_add_group(br, p, &ip, entry->state);
spin_unlock_bh(&br->multicast_lock);
return ret;
}
static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct br_mdb_entry *entry;
struct net_device *dev;
struct net_bridge *br;
int err;
err = br_mdb_parse(skb, nlh, &dev, &entry);
if (err < 0)
return err;
br = netdev_priv(dev);
err = __br_mdb_add(net, br, entry);
if (!err)
__br_mdb_notify(dev, entry, RTM_NEWMDB);
return err;
}
static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct br_ip ip;
int err = -EINVAL;
if (!netif_running(br->dev) || br->multicast_disabled)
return -EINVAL;
ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP)) {
if (timer_pending(&br->ip4_other_query.timer))
return -EBUSY;
ip.u.ip4 = entry->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
} else {
if (timer_pending(&br->ip6_other_query.timer))
return -EBUSY;
ip.u.ip6 = entry->addr.u.ip6;
#endif
}
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
mp = br_mdb_ip_get(mdb, &ip);
if (!mp)
goto unlock;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (!p->port || p->port->dev->ifindex != entry->ifindex)
continue;
if (p->port->state == BR_STATE_DISABLED)
goto unlock;
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
err = 0;
if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
break;
}
unlock:
spin_unlock_bh(&br->multicast_lock);
return err;
}
static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net_device *dev;
struct br_mdb_entry *entry;
struct net_bridge *br;
int err;
err = br_mdb_parse(skb, nlh, &dev, &entry);
if (err < 0)
return err;
br = netdev_priv(dev);
err = __br_mdb_del(br, entry);
if (!err)
__br_mdb_notify(dev, entry, RTM_DELMDB);
return err;
}
void br_mdb_init(void)
{
rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL);
rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL);
rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL);
}
void br_mdb_uninit(void)
{
rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
}

2301
net/bridge/br_multicast.c Normal file

File diff suppressed because it is too large Load diff

1027
net/bridge/br_netfilter.c Normal file

File diff suppressed because it is too large Load diff

611
net/bridge/br_netlink.c Normal file
View file

@ -0,0 +1,611 @@
/*
* Bridge netlink control interface
*
* Authors:
* Stephen Hemminger <shemminger@osdl.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <uapi/linux/if_bridge.h>
#include "br_private.h"
#include "br_private_stp.h"
static inline size_t br_port_info_size(void)
{
return nla_total_size(1) /* IFLA_BRPORT_STATE */
+ nla_total_size(2) /* IFLA_BRPORT_PRIORITY */
+ nla_total_size(4) /* IFLA_BRPORT_COST */
+ nla_total_size(1) /* IFLA_BRPORT_MODE */
+ nla_total_size(1) /* IFLA_BRPORT_GUARD */
+ nla_total_size(1) /* IFLA_BRPORT_PROTECT */
+ nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
+ nla_total_size(1) /* IFLA_BRPORT_LEARNING */
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
+ 0;
}
static inline size_t br_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ nla_total_size(4) /* IFLA_MASTER */
+ nla_total_size(4) /* IFLA_MTU */
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(br_port_info_size()); /* IFLA_PROTINFO */
}
static int br_port_fill_attrs(struct sk_buff *skb,
const struct net_bridge_port *p)
{
u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) ||
nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)))
return -EMSGSIZE;
return 0;
}
/*
* Create one netlink message for one interface
* Contains port and master info as well as carrier and bridge state.
*/
static int br_fill_ifinfo(struct sk_buff *skb,
const struct net_bridge_port *port,
u32 pid, u32 seq, int event, unsigned int flags,
u32 filter_mask, const struct net_device *dev)
{
const struct net_bridge *br;
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
if (port)
br = port->br;
else
br = netdev_priv(dev);
br_debug(br, "br_fill_info event %d port %s master %s\n",
event, dev->name, br->dev->name);
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
if (nlh == NULL)
return -EMSGSIZE;
hdr = nlmsg_data(nlh);
hdr->ifi_family = AF_BRIDGE;
hdr->__ifi_pad = 0;
hdr->ifi_type = dev->type;
hdr->ifi_index = dev->ifindex;
hdr->ifi_flags = dev_get_flags(dev);
hdr->ifi_change = 0;
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
(dev->addr_len &&
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
(dev->ifindex != dev->iflink &&
nla_put_u32(skb, IFLA_LINK, dev->iflink)))
goto nla_put_failure;
if (event == RTM_NEWLINK && port) {
struct nlattr *nest
= nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
}
/* Check if the VID information is requested */
if (filter_mask & RTEXT_FILTER_BRVLAN) {
struct nlattr *af;
const struct net_port_vlans *pv;
struct bridge_vlan_info vinfo;
u16 vid;
u16 pvid;
if (port)
pv = nbp_get_vlan_info(port);
else
pv = br_get_vlan_info(br);
if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
goto done;
af = nla_nest_start(skb, IFLA_AF_SPEC);
if (!af)
goto nla_put_failure;
pvid = br_get_pvid(pv);
for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
vinfo.vid = vid;
vinfo.flags = 0;
if (vid == pvid)
vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
if (test_bit(vid, pv->untagged_bitmap))
vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
sizeof(vinfo), &vinfo))
goto nla_put_failure;
}
nla_nest_end(skb, af);
}
done:
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
/*
* Notify listeners of a change in port information
*/
void br_ifinfo_notify(int event, struct net_bridge_port *port)
{
struct net *net;
struct sk_buff *skb;
int err = -ENOBUFS;
if (!port)
return;
net = dev_net(port->dev);
br_debug(port->br, "port %u(%s) event %d\n",
(unsigned int)port->port_no, port->dev->name, event);
skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
err = br_fill_ifinfo(skb, port, 0, 0, event, 0, 0, port->dev);
if (err < 0) {
/* -EMSGSIZE implies BUG in br_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
return;
errout:
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
}
/*
* Dump information about all ports, in response to GETLINK
*/
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask)
{
int err = 0;
struct net_bridge_port *port = br_port_get_rtnl(dev);
if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN))
goto out;
err = br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
filter_mask, dev);
out:
return err;
}
static const struct nla_policy ifla_br_policy[IFLA_MAX+1] = {
[IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
[IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
[IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
.len = sizeof(struct bridge_vlan_info), },
};
static int br_afspec(struct net_bridge *br,
struct net_bridge_port *p,
struct nlattr *af_spec,
int cmd)
{
struct nlattr *tb[IFLA_BRIDGE_MAX+1];
int err = 0;
err = nla_parse_nested(tb, IFLA_BRIDGE_MAX, af_spec, ifla_br_policy);
if (err)
return err;
if (tb[IFLA_BRIDGE_VLAN_INFO]) {
struct bridge_vlan_info *vinfo;
vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
return -EINVAL;
switch (cmd) {
case RTM_SETLINK:
if (p) {
err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
if (err)
break;
if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
err = br_vlan_add(p->br, vinfo->vid,
vinfo->flags);
} else
err = br_vlan_add(br, vinfo->vid, vinfo->flags);
break;
case RTM_DELLINK:
if (p) {
nbp_vlan_delete(p, vinfo->vid);
if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
br_vlan_delete(p->br, vinfo->vid);
} else
br_vlan_delete(br, vinfo->vid);
break;
}
}
return err;
}
static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_STATE] = { .type = NLA_U8 },
[IFLA_BRPORT_COST] = { .type = NLA_U32 },
[IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
[IFLA_BRPORT_MODE] = { .type = NLA_U8 },
[IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
[IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
[IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
[IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
};
/* Change the state of the port and notify spanning tree */
static int br_set_port_state(struct net_bridge_port *p, u8 state)
{
if (state > BR_STATE_BLOCKING)
return -EINVAL;
/* if kernel STP is running, don't allow changes */
if (p->br->stp_enabled == BR_KERNEL_STP)
return -EBUSY;
/* if device is not up, change is not allowed
* if link is not present, only allowable state is disabled
*/
if (!netif_running(p->dev) ||
(!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
return -ENETDOWN;
br_set_state(p, state);
br_log_state(p);
br_port_state_selection(p->br);
return 0;
}
/* Set/clear or port flags based on attribute */
static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
int attrtype, unsigned long mask)
{
if (tb[attrtype]) {
u8 flag = nla_get_u8(tb[attrtype]);
if (flag)
p->flags |= mask;
else
p->flags &= ~mask;
}
}
/* Process bridge protocol info on port */
static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
{
int err;
unsigned long old_flags = p->flags;
br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
if (tb[IFLA_BRPORT_COST]) {
err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
if (err)
return err;
}
if (tb[IFLA_BRPORT_PRIORITY]) {
err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
if (err)
return err;
}
if (tb[IFLA_BRPORT_STATE]) {
err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
if (err)
return err;
}
br_port_flags_change(p, old_flags ^ p->flags);
return 0;
}
/* Change state and parameters on port. */
int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
{
struct nlattr *protinfo;
struct nlattr *afspec;
struct net_bridge_port *p;
struct nlattr *tb[IFLA_BRPORT_MAX + 1];
int err = 0;
protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!protinfo && !afspec)
return 0;
p = br_port_get_rtnl(dev);
/* We want to accept dev as bridge itself if the AF_SPEC
* is set to see if someone is setting vlan info on the bridge
*/
if (!p && !afspec)
return -EINVAL;
if (p && protinfo) {
if (protinfo->nla_type & NLA_F_NESTED) {
err = nla_parse_nested(tb, IFLA_BRPORT_MAX,
protinfo, br_port_policy);
if (err)
return err;
spin_lock_bh(&p->br->lock);
err = br_setport(p, tb);
spin_unlock_bh(&p->br->lock);
} else {
/* Binary compatibility with old RSTP */
if (nla_len(protinfo) < sizeof(u8))
return -EINVAL;
spin_lock_bh(&p->br->lock);
err = br_set_port_state(p, nla_get_u8(protinfo));
spin_unlock_bh(&p->br->lock);
}
if (err)
goto out;
}
if (afspec) {
err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
afspec, RTM_SETLINK);
}
if (err == 0)
br_ifinfo_notify(RTM_NEWLINK, p);
out:
return err;
}
/* Delete port information */
int br_dellink(struct net_device *dev, struct nlmsghdr *nlh)
{
struct nlattr *afspec;
struct net_bridge_port *p;
int err;
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!afspec)
return 0;
p = br_port_get_rtnl(dev);
/* We want to accept dev as bridge itself as well */
if (!p && !(dev->priv_flags & IFF_EBRIDGE))
return -EINVAL;
err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
afspec, RTM_DELLINK);
return err;
}
static int br_validate(struct nlattr *tb[], struct nlattr *data[])
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
return -EADDRNOTAVAIL;
}
return 0;
}
static int br_dev_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct net_bridge *br = netdev_priv(dev);
if (tb[IFLA_ADDRESS]) {
spin_lock_bh(&br->lock);
br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
spin_unlock_bh(&br->lock);
}
return register_netdevice(dev);
}
static int br_port_slave_changelink(struct net_device *brdev,
struct net_device *dev,
struct nlattr *tb[],
struct nlattr *data[])
{
if (!data)
return 0;
return br_setport(br_port_get_rtnl(dev), data);
}
static int br_port_fill_slave_info(struct sk_buff *skb,
const struct net_device *brdev,
const struct net_device *dev)
{
return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
}
static size_t br_port_get_slave_size(const struct net_device *brdev,
const struct net_device *dev)
{
return br_port_info_size();
}
static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
[IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
[IFLA_BR_HELLO_TIME] = { .type = NLA_U32 },
[IFLA_BR_MAX_AGE] = { .type = NLA_U32 },
};
static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
struct nlattr *data[])
{
struct net_bridge *br = netdev_priv(brdev);
int err;
if (!data)
return 0;
if (data[IFLA_BR_FORWARD_DELAY]) {
err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
if (err)
return err;
}
if (data[IFLA_BR_HELLO_TIME]) {
err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
if (err)
return err;
}
if (data[IFLA_BR_MAX_AGE]) {
err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
if (err)
return err;
}
return 0;
}
static size_t br_get_size(const struct net_device *brdev)
{
return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */
nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */
0;
}
static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
{
struct net_bridge *br = netdev_priv(brdev);
u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
u32 hello_time = jiffies_to_clock_t(br->hello_time);
u32 age_time = jiffies_to_clock_t(br->max_age);
if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time))
return -EMSGSIZE;
return 0;
}
static size_t br_get_link_af_size(const struct net_device *dev)
{
struct net_port_vlans *pv;
if (br_port_exists(dev))
pv = nbp_get_vlan_info(br_port_get_rtnl(dev));
else if (dev->priv_flags & IFF_EBRIDGE)
pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
else
return 0;
if (!pv)
return 0;
/* Each VLAN is returned in bridge_vlan_info along with flags */
return pv->num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
}
static struct rtnl_af_ops br_af_ops = {
.family = AF_BRIDGE,
.get_link_af_size = br_get_link_af_size,
};
struct rtnl_link_ops br_link_ops __read_mostly = {
.kind = "bridge",
.priv_size = sizeof(struct net_bridge),
.setup = br_dev_setup,
.maxtype = IFLA_BRPORT_MAX,
.policy = br_policy,
.validate = br_validate,
.newlink = br_dev_newlink,
.changelink = br_changelink,
.dellink = br_dev_delete,
.get_size = br_get_size,
.fill_info = br_fill_info,
.slave_maxtype = IFLA_BRPORT_MAX,
.slave_policy = br_port_policy,
.slave_changelink = br_port_slave_changelink,
.get_slave_size = br_port_get_slave_size,
.fill_slave_info = br_port_fill_slave_info,
};
int __init br_netlink_init(void)
{
int err;
br_mdb_init();
rtnl_af_register(&br_af_ops);
err = rtnl_link_register(&br_link_ops);
if (err)
goto out_af;
return 0;
out_af:
rtnl_af_unregister(&br_af_ops);
br_mdb_uninit();
return err;
}
void br_netlink_fini(void)
{
br_mdb_uninit();
rtnl_af_unregister(&br_af_ops);
rtnl_link_unregister(&br_link_ops);
}

96
net/bridge/br_nf_core.c Normal file
View file

@ -0,0 +1,96 @@
/*
* Handle firewalling core
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
* Bart De Schuymer <bdschuym@pandora.be>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Lennert dedicates this file to Kerstin Wurdinger.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/in_route.h>
#include <linux/inetdevice.h>
#include <net/route.h>
#include "br_private.h"
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
{
}
static void fake_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb)
{
}
static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
{
return NULL;
}
static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
return NULL;
}
static unsigned int fake_mtu(const struct dst_entry *dst)
{
return dst->dev->mtu;
}
static struct dst_ops fake_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.update_pmtu = fake_update_pmtu,
.redirect = fake_redirect,
.cow_metrics = fake_cow_metrics,
.neigh_lookup = fake_neigh_lookup,
.mtu = fake_mtu,
};
/*
* Initialize bogus route table used to keep netfilter happy.
* Currently, we fill in the PMTU entry because netfilter
* refragmentation needs it, and the rt_flags entry because
* ipt_REJECT needs it. Future netfilter modules might
* require us to fill additional fields.
*/
static const u32 br_dst_default_metrics[RTAX_MAX] = {
[RTAX_MTU - 1] = 1500,
};
void br_netfilter_rtable_init(struct net_bridge *br)
{
struct rtable *rt = &br->fake_rtable;
atomic_set(&rt->dst.__refcnt, 1);
rt->dst.dev = br->dev;
rt->dst.path = &rt->dst;
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE;
rt->dst.ops = &fake_dst_ops;
}
int __init br_nf_core_init(void)
{
return dst_entries_init(&fake_dst_ops);
}
void br_nf_core_fini(void)
{
dst_entries_destroy(&fake_dst_ops);
}

849
net/bridge/br_private.h Normal file
View file

@ -0,0 +1,849 @@
/*
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _BR_PRIVATE_H
#define _BR_PRIVATE_H
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
#include <linux/netpoll.h>
#include <linux/u64_stats_sync.h>
#include <net/route.h>
#include <linux/if_vlan.h>
#define BR_HASH_BITS 8
#define BR_HASH_SIZE (1 << BR_HASH_BITS)
#define BR_HOLD_TIME (1*HZ)
#define BR_PORT_BITS 10
#define BR_MAX_PORTS (1<<BR_PORT_BITS)
#define BR_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
#define BR_VERSION "2.3"
/* Control of forwarding link local multicast */
#define BR_GROUPFWD_DEFAULT 0
/* Don't allow forwarding control protocols like STP and LLDP */
#define BR_GROUPFWD_RESTRICTED 0x4007u
/* The Nearest Customer Bridge Group Address, 01-80-C2-00-00-[00,0B,0C,0D,0F] */
#define BR_GROUPFWD_8021AD 0xB801u
/* Path to usermode spanning tree program */
#define BR_STP_PROG "/sbin/bridge-stp"
typedef struct bridge_id bridge_id;
typedef struct mac_addr mac_addr;
typedef __u16 port_id;
struct bridge_id
{
unsigned char prio[2];
unsigned char addr[ETH_ALEN];
};
struct mac_addr
{
unsigned char addr[ETH_ALEN];
};
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
/* our own querier */
struct bridge_mcast_own_query {
struct timer_list timer;
u32 startup_sent;
};
/* other querier */
struct bridge_mcast_other_query {
struct timer_list timer;
unsigned long delay_time;
};
/* selected querier */
struct bridge_mcast_querier {
struct br_ip addr;
struct net_bridge_port __rcu *port;
};
#endif
struct net_port_vlans {
u16 port_idx;
u16 pvid;
union {
struct net_bridge_port *port;
struct net_bridge *br;
} parent;
struct rcu_head rcu;
unsigned long vlan_bitmap[BR_VLAN_BITMAP_LEN];
unsigned long untagged_bitmap[BR_VLAN_BITMAP_LEN];
u16 num_vlans;
};
struct net_bridge_fdb_entry
{
struct hlist_node hlist;
struct net_bridge_port *dst;
struct rcu_head rcu;
unsigned long updated;
unsigned long used;
mac_addr addr;
unsigned char is_local;
unsigned char is_static;
unsigned char added_by_user;
__u16 vlan_id;
};
struct net_bridge_port_group {
struct net_bridge_port *port;
struct net_bridge_port_group __rcu *next;
struct hlist_node mglist;
struct rcu_head rcu;
struct timer_list timer;
struct br_ip addr;
unsigned char state;
};
struct net_bridge_mdb_entry
{
struct hlist_node hlist[2];
struct net_bridge *br;
struct net_bridge_port_group __rcu *ports;
struct rcu_head rcu;
struct timer_list timer;
struct br_ip addr;
bool mglist;
};
struct net_bridge_mdb_htable
{
struct hlist_head *mhash;
struct rcu_head rcu;
struct net_bridge_mdb_htable *old;
u32 size;
u32 max;
u32 secret;
u32 ver;
};
struct net_bridge_port
{
struct net_bridge *br;
struct net_device *dev;
struct list_head list;
/* STP */
u8 priority;
u8 state;
u16 port_no;
unsigned char topology_change_ack;
unsigned char config_pending;
port_id port_id;
port_id designated_port;
bridge_id designated_root;
bridge_id designated_bridge;
u32 path_cost;
u32 designated_cost;
unsigned long designated_age;
struct timer_list forward_delay_timer;
struct timer_list hold_timer;
struct timer_list message_age_timer;
struct kobject kobj;
struct rcu_head rcu;
unsigned long flags;
#define BR_HAIRPIN_MODE 0x00000001
#define BR_BPDU_GUARD 0x00000002
#define BR_ROOT_BLOCK 0x00000004
#define BR_MULTICAST_FAST_LEAVE 0x00000008
#define BR_ADMIN_COST 0x00000010
#define BR_LEARNING 0x00000020
#define BR_FLOOD 0x00000040
#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
#define BR_PROMISC 0x00000080
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
struct bridge_mcast_own_query ip4_own_query;
#if IS_ENABLED(CONFIG_IPV6)
struct bridge_mcast_own_query ip6_own_query;
#endif /* IS_ENABLED(CONFIG_IPV6) */
unsigned char multicast_router;
struct timer_list multicast_router_timer;
struct hlist_head mglist;
struct hlist_node rlist;
#endif
#ifdef CONFIG_SYSFS
char sysfs_name[IFNAMSIZ];
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
#endif
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
struct net_port_vlans __rcu *vlan_info;
#endif
};
#define br_auto_port(p) ((p)->flags & BR_AUTO_MASK)
#define br_promisc_port(p) ((p)->flags & BR_PROMISC)
#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
{
return rcu_dereference(dev->rx_handler_data);
}
static inline struct net_bridge_port *br_port_get_rtnl(const struct net_device *dev)
{
return br_port_exists(dev) ?
rtnl_dereference(dev->rx_handler_data) : NULL;
}
struct net_bridge
{
spinlock_t lock;
struct list_head port_list;
struct net_device *dev;
struct pcpu_sw_netstats __percpu *stats;
spinlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct rtable fake_rtable;
bool nf_call_iptables;
bool nf_call_ip6tables;
bool nf_call_arptables;
#endif
u16 group_fwd_mask;
u16 group_fwd_mask_required;
/* STP */
bridge_id designated_root;
bridge_id bridge_id;
u32 root_path_cost;
unsigned long max_age;
unsigned long hello_time;
unsigned long forward_delay;
unsigned long bridge_max_age;
unsigned long ageing_time;
unsigned long bridge_hello_time;
unsigned long bridge_forward_delay;
u8 group_addr[ETH_ALEN];
bool group_addr_set;
u16 root_port;
enum {
BR_NO_STP, /* no spanning tree */
BR_KERNEL_STP, /* old STP in kernel */
BR_USER_STP, /* new RSTP in userspace */
} stp_enabled;
unsigned char topology_change;
unsigned char topology_change_detected;
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
unsigned char multicast_router;
u8 multicast_disabled:1;
u8 multicast_querier:1;
u8 multicast_query_use_ifaddr:1;
u32 hash_elasticity;
u32 hash_max;
u32 multicast_last_member_count;
u32 multicast_startup_query_count;
unsigned long multicast_last_member_interval;
unsigned long multicast_membership_interval;
unsigned long multicast_querier_interval;
unsigned long multicast_query_interval;
unsigned long multicast_query_response_interval;
unsigned long multicast_startup_query_interval;
spinlock_t multicast_lock;
struct net_bridge_mdb_htable __rcu *mdb;
struct hlist_head router_list;
struct timer_list multicast_router_timer;
struct bridge_mcast_other_query ip4_other_query;
struct bridge_mcast_own_query ip4_own_query;
struct bridge_mcast_querier ip4_querier;
#if IS_ENABLED(CONFIG_IPV6)
struct bridge_mcast_other_query ip6_other_query;
struct bridge_mcast_own_query ip6_own_query;
struct bridge_mcast_querier ip6_querier;
#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif
struct timer_list hello_timer;
struct timer_list tcn_timer;
struct timer_list topology_change_timer;
struct timer_list gc_timer;
struct kobject *ifobj;
u32 auto_cnt;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
u8 vlan_enabled;
__be16 vlan_proto;
u16 default_pvid;
struct net_port_vlans __rcu *vlan_info;
#endif
};
struct br_input_skb_cb {
struct net_device *brdev;
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
int igmp;
int mrouters_only;
#endif
u16 frag_max_size;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
bool vlan_filtered;
#endif
};
#define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb)
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
# define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (BR_INPUT_SKB_CB(__skb)->mrouters_only)
#else
# define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (0)
#endif
#define br_printk(level, br, format, args...) \
printk(level "%s: " format, (br)->dev->name, ##args)
#define br_err(__br, format, args...) \
br_printk(KERN_ERR, __br, format, ##args)
#define br_warn(__br, format, args...) \
br_printk(KERN_WARNING, __br, format, ##args)
#define br_notice(__br, format, args...) \
br_printk(KERN_NOTICE, __br, format, ##args)
#define br_info(__br, format, args...) \
br_printk(KERN_INFO, __br, format, ##args)
#define br_debug(br, format, args...) \
pr_debug("%s: " format, (br)->dev->name, ##args)
/* called under bridge lock */
static inline int br_is_root_bridge(const struct net_bridge *br)
{
return !memcmp(&br->bridge_id, &br->designated_root, 8);
}
/* br_device.c */
void br_dev_setup(struct net_device *dev);
void br_dev_delete(struct net_device *dev, struct list_head *list);
netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
struct sk_buff *skb)
{
struct netpoll *np = p->np;
if (np)
netpoll_send_skb(np, skb);
}
int br_netpoll_enable(struct net_bridge_port *p);
void br_netpoll_disable(struct net_bridge_port *p);
#else
static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
struct sk_buff *skb)
{
}
static inline int br_netpoll_enable(struct net_bridge_port *p)
{
return 0;
}
static inline void br_netpoll_disable(struct net_bridge_port *p)
{
}
#endif
/* br_fdb.c */
int br_fdb_init(void);
void br_fdb_fini(void);
void br_fdb_flush(struct net_bridge *br);
void br_fdb_find_delete_local(struct net_bridge *br,
const struct net_bridge_port *p,
const unsigned char *addr, u16 vid);
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
void br_fdb_cleanup(unsigned long arg);
void br_fdb_delete_by_port(struct net_bridge *br,
const struct net_bridge_port *p, int do_all);
struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
const unsigned char *addr, __u16 vid);
int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
unsigned long off);
int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid);
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid, bool added_by_user);
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr);
int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
const unsigned char *addr, u16 nlh_flags);
int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev, struct net_device *fdev, int idx);
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
/* br_forward.c */
void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
int br_dev_queue_push_xmit(struct sk_buff *skb);
void br_forward(const struct net_bridge_port *to,
struct sk_buff *skb, struct sk_buff *skb0);
int br_forward_finish(struct sk_buff *skb);
void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast);
void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
struct sk_buff *skb2, bool unicast);
/* br_if.c */
void br_port_carrier_check(struct net_bridge_port *p);
int br_add_bridge(struct net *net, const char *name);
int br_del_bridge(struct net *net, const char *name);
int br_add_if(struct net_bridge *br, struct net_device *dev);
int br_del_if(struct net_bridge *br, struct net_device *dev);
int br_min_mtu(const struct net_bridge *br);
netdev_features_t br_features_recompute(struct net_bridge *br,
netdev_features_t features);
void br_port_flags_change(struct net_bridge_port *port, unsigned long mask);
void br_manage_promisc(struct net_bridge *br);
/* br_input.c */
int br_handle_frame_finish(struct sk_buff *skb);
rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
{
return rcu_dereference(dev->rx_handler) == br_handle_frame;
}
static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
{
return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
}
/* br_ioctl.c */
int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
void __user *arg);
/* br_multicast.c */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
extern unsigned int br_mdb_rehash_seq;
int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
struct sk_buff *skb, u16 vid);
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
struct sk_buff *skb, u16 vid);
void br_multicast_add_port(struct net_bridge_port *port);
void br_multicast_del_port(struct net_bridge_port *port);
void br_multicast_enable_port(struct net_bridge_port *port);
void br_multicast_disable_port(struct net_bridge_port *port);
void br_multicast_init(struct net_bridge *br);
void br_multicast_open(struct net_bridge *br);
void br_multicast_stop(struct net_bridge *br);
void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb);
void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb, struct sk_buff *skb2);
int br_multicast_set_router(struct net_bridge *br, unsigned long val);
int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
int br_multicast_toggle(struct net_bridge *br, unsigned long val);
int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
struct net_bridge_mdb_entry *
br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, struct br_ip *dst);
struct net_bridge_mdb_entry *
br_multicast_new_group(struct net_bridge *br, struct net_bridge_port *port,
struct br_ip *group);
void br_multicast_free_pg(struct rcu_head *head);
struct net_bridge_port_group *
br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
struct net_bridge_port_group __rcu *next,
unsigned char state);
void br_mdb_init(void);
void br_mdb_uninit(void);
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
struct br_ip *group, int type);
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
static inline bool br_multicast_is_router(struct net_bridge *br)
{
return br->multicast_router == 2 ||
(br->multicast_router == 1 &&
timer_pending(&br->multicast_router_timer));
}
static inline bool
__br_multicast_querier_exists(struct net_bridge *br,
struct bridge_mcast_other_query *querier)
{
return time_is_before_jiffies(querier->delay_time) &&
(br->multicast_querier || timer_pending(&querier->timer));
}
static inline bool br_multicast_querier_exists(struct net_bridge *br,
struct ethhdr *eth)
{
switch (eth->h_proto) {
case (htons(ETH_P_IP)):
return __br_multicast_querier_exists(br, &br->ip4_other_query);
#if IS_ENABLED(CONFIG_IPV6)
case (htons(ETH_P_IPV6)):
return __br_multicast_querier_exists(br, &br->ip6_other_query);
#endif
default:
return false;
}
}
#else
static inline int br_multicast_rcv(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb,
u16 vid)
{
return 0;
}
static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
struct sk_buff *skb, u16 vid)
{
return NULL;
}
static inline void br_multicast_add_port(struct net_bridge_port *port)
{
}
static inline void br_multicast_del_port(struct net_bridge_port *port)
{
}
static inline void br_multicast_enable_port(struct net_bridge_port *port)
{
}
static inline void br_multicast_disable_port(struct net_bridge_port *port)
{
}
static inline void br_multicast_init(struct net_bridge *br)
{
}
static inline void br_multicast_open(struct net_bridge *br)
{
}
static inline void br_multicast_stop(struct net_bridge *br)
{
}
static inline void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb)
{
}
static inline void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb,
struct sk_buff *skb2)
{
}
static inline bool br_multicast_is_router(struct net_bridge *br)
{
return 0;
}
static inline bool br_multicast_querier_exists(struct net_bridge *br,
struct ethhdr *eth)
{
return false;
}
static inline void br_mdb_init(void)
{
}
static inline void br_mdb_uninit(void)
{
}
#endif
/* br_vlan.c */
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
struct sk_buff *skb, u16 *vid);
bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v,
const struct sk_buff *skb);
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
struct sk_buff *br_handle_vlan(struct net_bridge *br,
const struct net_port_vlans *v,
struct sk_buff *skb);
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
int br_vlan_delete(struct net_bridge *br, u16 vid);
void br_vlan_flush(struct net_bridge *br);
bool br_vlan_find(struct net_bridge *br, u16 vid);
void br_recalculate_fwd_mask(struct net_bridge *br);
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
int br_vlan_init(struct net_bridge *br);
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
void nbp_vlan_flush(struct net_bridge_port *port);
bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
int nbp_vlan_init(struct net_bridge_port *port);
static inline struct net_port_vlans *br_get_vlan_info(
const struct net_bridge *br)
{
return rcu_dereference_rtnl(br->vlan_info);
}
static inline struct net_port_vlans *nbp_get_vlan_info(
const struct net_bridge_port *p)
{
return rcu_dereference_rtnl(p->vlan_info);
}
/* Since bridge now depends on 8021Q module, but the time bridge sees the
* skb, the vlan tag will always be present if the frame was tagged.
*/
static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid)
{
int err = 0;
if (vlan_tx_tag_present(skb))
*vid = vlan_tx_tag_get(skb) & VLAN_VID_MASK;
else {
*vid = 0;
err = -EINVAL;
}
return err;
}
static inline u16 br_get_pvid(const struct net_port_vlans *v)
{
if (!v)
return 0;
smp_rmb();
return v->pvid;
}
static inline int br_vlan_enabled(struct net_bridge *br)
{
return br->vlan_enabled;
}
#else
static inline bool br_allowed_ingress(struct net_bridge *br,
struct net_port_vlans *v,
struct sk_buff *skb,
u16 *vid)
{
return true;
}
static inline bool br_allowed_egress(struct net_bridge *br,
const struct net_port_vlans *v,
const struct sk_buff *skb)
{
return true;
}
static inline bool br_should_learn(struct net_bridge_port *p,
struct sk_buff *skb, u16 *vid)
{
return true;
}
static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
const struct net_port_vlans *v,
struct sk_buff *skb)
{
return skb;
}
static inline int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
{
return -EOPNOTSUPP;
}
static inline int br_vlan_delete(struct net_bridge *br, u16 vid)
{
return -EOPNOTSUPP;
}
static inline void br_vlan_flush(struct net_bridge *br)
{
}
static inline bool br_vlan_find(struct net_bridge *br, u16 vid)
{
return false;
}
static inline void br_recalculate_fwd_mask(struct net_bridge *br)
{
}
static inline int br_vlan_init(struct net_bridge *br)
{
return 0;
}
static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
{
return -EOPNOTSUPP;
}
static inline int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
{
return -EOPNOTSUPP;
}
static inline void nbp_vlan_flush(struct net_bridge_port *port)
{
}
static inline struct net_port_vlans *br_get_vlan_info(
const struct net_bridge *br)
{
return NULL;
}
static inline struct net_port_vlans *nbp_get_vlan_info(
const struct net_bridge_port *p)
{
return NULL;
}
static inline bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
{
return false;
}
static inline int nbp_vlan_init(struct net_bridge_port *port)
{
return 0;
}
static inline u16 br_vlan_get_tag(const struct sk_buff *skb, u16 *tag)
{
return 0;
}
static inline u16 br_get_pvid(const struct net_port_vlans *v)
{
return 0;
}
static inline int br_vlan_enabled(struct net_bridge *br)
{
return 0;
}
#endif
/* br_netfilter.c */
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
int br_nf_core_init(void);
void br_nf_core_fini(void);
void br_netfilter_rtable_init(struct net_bridge *);
#else
static inline int br_nf_core_init(void) { return 0; }
static inline void br_nf_core_fini(void) {}
#define br_netfilter_rtable_init(x)
#endif
/* br_stp.c */
void br_log_state(const struct net_bridge_port *p);
void br_set_state(struct net_bridge_port *p, unsigned int state);
struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no);
void br_init_port(struct net_bridge_port *p);
void br_become_designated_port(struct net_bridge_port *p);
void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
int br_set_forward_delay(struct net_bridge *br, unsigned long x);
int br_set_hello_time(struct net_bridge *br, unsigned long x);
int br_set_max_age(struct net_bridge *br, unsigned long x);
/* br_stp_if.c */
void br_stp_enable_bridge(struct net_bridge *br);
void br_stp_disable_bridge(struct net_bridge *br);
void br_stp_set_enabled(struct net_bridge *br, unsigned long val);
void br_stp_enable_port(struct net_bridge_port *p);
void br_stp_disable_port(struct net_bridge_port *p);
bool br_stp_recalculate_bridge_id(struct net_bridge *br);
void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio);
int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio);
int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost);
ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
/* br_stp_bpdu.c */
struct stp_proto;
void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
struct net_device *dev);
/* br_stp_timer.c */
void br_stp_timer_init(struct net_bridge *br);
void br_stp_port_timer_init(struct net_bridge_port *p);
unsigned long br_timer_value(const struct timer_list *timer);
/* br.c */
#if IS_ENABLED(CONFIG_ATM_LANE)
extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr);
#endif
/* br_netlink.c */
extern struct rtnl_link_ops br_link_ops;
int br_netlink_init(void);
void br_netlink_fini(void);
void br_ifinfo_notify(int event, struct net_bridge_port *port);
int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
u32 filter_mask);
#ifdef CONFIG_SYSFS
/* br_sysfs_if.c */
extern const struct sysfs_ops brport_sysfs_ops;
int br_sysfs_addif(struct net_bridge_port *p);
int br_sysfs_renameif(struct net_bridge_port *p);
/* br_sysfs_br.c */
int br_sysfs_addbr(struct net_device *dev);
void br_sysfs_delbr(struct net_device *dev);
#else
static inline int br_sysfs_addif(struct net_bridge_port *p) { return 0; }
static inline int br_sysfs_renameif(struct net_bridge_port *p) { return 0; }
static inline int br_sysfs_addbr(struct net_device *dev) { return 0; }
static inline void br_sysfs_delbr(struct net_device *dev) { return; }
#endif /* CONFIG_SYSFS */
#endif

View file

@ -0,0 +1,69 @@
/*
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _BR_PRIVATE_STP_H
#define _BR_PRIVATE_STP_H
#define BPDU_TYPE_CONFIG 0
#define BPDU_TYPE_TCN 0x80
/* IEEE 802.1D-1998 timer values */
#define BR_MIN_HELLO_TIME (1*HZ)
#define BR_MAX_HELLO_TIME (10*HZ)
#define BR_MIN_FORWARD_DELAY (2*HZ)
#define BR_MAX_FORWARD_DELAY (30*HZ)
#define BR_MIN_MAX_AGE (6*HZ)
#define BR_MAX_MAX_AGE (40*HZ)
#define BR_MIN_PATH_COST 1
#define BR_MAX_PATH_COST 65535
struct br_config_bpdu {
unsigned int topology_change:1;
unsigned int topology_change_ack:1;
bridge_id root;
int root_path_cost;
bridge_id bridge_id;
port_id port_id;
int message_age;
int max_age;
int hello_time;
int forward_delay;
};
/* called under bridge lock */
static inline int br_is_designated_port(const struct net_bridge_port *p)
{
return !memcmp(&p->designated_bridge, &p->br->bridge_id, 8) &&
(p->designated_port == p->port_id);
}
/* br_stp.c */
void br_become_root_bridge(struct net_bridge *br);
void br_config_bpdu_generation(struct net_bridge *);
void br_configuration_update(struct net_bridge *);
void br_port_state_selection(struct net_bridge *);
void br_received_config_bpdu(struct net_bridge_port *p,
const struct br_config_bpdu *bpdu);
void br_received_tcn_bpdu(struct net_bridge_port *p);
void br_transmit_config(struct net_bridge_port *p);
void br_transmit_tcn(struct net_bridge *br);
void br_topology_change_detection(struct net_bridge *br);
/* br_stp_bpdu.c */
void br_send_config_bpdu(struct net_bridge_port *, struct br_config_bpdu *);
void br_send_tcn_bpdu(struct net_bridge_port *);
#endif

575
net/bridge/br_stp.c Normal file
View file

@ -0,0 +1,575 @@
/*
* Spanning tree protocol; generic parts
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/rculist.h>
#include "br_private.h"
#include "br_private_stp.h"
/* since time values in bpdu are in jiffies and then scaled (1/256)
* before sending, make sure that is at least one STP tick.
*/
#define MESSAGE_AGE_INCR ((HZ / 256) + 1)
static const char *const br_port_state_names[] = {
[BR_STATE_DISABLED] = "disabled",
[BR_STATE_LISTENING] = "listening",
[BR_STATE_LEARNING] = "learning",
[BR_STATE_FORWARDING] = "forwarding",
[BR_STATE_BLOCKING] = "blocking",
};
void br_log_state(const struct net_bridge_port *p)
{
br_info(p->br, "port %u(%s) entered %s state\n",
(unsigned int) p->port_no, p->dev->name,
br_port_state_names[p->state]);
}
void br_set_state(struct net_bridge_port *p, unsigned int state)
{
p->state = state;
}
/* called under bridge lock */
struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no)
{
struct net_bridge_port *p;
list_for_each_entry_rcu(p, &br->port_list, list) {
if (p->port_no == port_no)
return p;
}
return NULL;
}
/* called under bridge lock */
static int br_should_become_root_port(const struct net_bridge_port *p,
u16 root_port)
{
struct net_bridge *br;
struct net_bridge_port *rp;
int t;
br = p->br;
if (p->state == BR_STATE_DISABLED ||
br_is_designated_port(p))
return 0;
if (memcmp(&br->bridge_id, &p->designated_root, 8) <= 0)
return 0;
if (!root_port)
return 1;
rp = br_get_port(br, root_port);
t = memcmp(&p->designated_root, &rp->designated_root, 8);
if (t < 0)
return 1;
else if (t > 0)
return 0;
if (p->designated_cost + p->path_cost <
rp->designated_cost + rp->path_cost)
return 1;
else if (p->designated_cost + p->path_cost >
rp->designated_cost + rp->path_cost)
return 0;
t = memcmp(&p->designated_bridge, &rp->designated_bridge, 8);
if (t < 0)
return 1;
else if (t > 0)
return 0;
if (p->designated_port < rp->designated_port)
return 1;
else if (p->designated_port > rp->designated_port)
return 0;
if (p->port_id < rp->port_id)
return 1;
return 0;
}
static void br_root_port_block(const struct net_bridge *br,
struct net_bridge_port *p)
{
br_notice(br, "port %u(%s) tried to become root port (blocked)",
(unsigned int) p->port_no, p->dev->name);
br_set_state(p, BR_STATE_LISTENING);
br_log_state(p);
br_ifinfo_notify(RTM_NEWLINK, p);
if (br->forward_delay > 0)
mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay);
}
/* called under bridge lock */
static void br_root_selection(struct net_bridge *br)
{
struct net_bridge_port *p;
u16 root_port = 0;
list_for_each_entry(p, &br->port_list, list) {
if (!br_should_become_root_port(p, root_port))
continue;
if (p->flags & BR_ROOT_BLOCK)
br_root_port_block(br, p);
else
root_port = p->port_no;
}
br->root_port = root_port;
if (!root_port) {
br->designated_root = br->bridge_id;
br->root_path_cost = 0;
} else {
p = br_get_port(br, root_port);
br->designated_root = p->designated_root;
br->root_path_cost = p->designated_cost + p->path_cost;
}
}
/* called under bridge lock */
void br_become_root_bridge(struct net_bridge *br)
{
br->max_age = br->bridge_max_age;
br->hello_time = br->bridge_hello_time;
br->forward_delay = br->bridge_forward_delay;
br_topology_change_detection(br);
del_timer(&br->tcn_timer);
if (br->dev->flags & IFF_UP) {
br_config_bpdu_generation(br);
mod_timer(&br->hello_timer, jiffies + br->hello_time);
}
}
/* called under bridge lock */
void br_transmit_config(struct net_bridge_port *p)
{
struct br_config_bpdu bpdu;
struct net_bridge *br;
if (timer_pending(&p->hold_timer)) {
p->config_pending = 1;
return;
}
br = p->br;
bpdu.topology_change = br->topology_change;
bpdu.topology_change_ack = p->topology_change_ack;
bpdu.root = br->designated_root;
bpdu.root_path_cost = br->root_path_cost;
bpdu.bridge_id = br->bridge_id;
bpdu.port_id = p->port_id;
if (br_is_root_bridge(br))
bpdu.message_age = 0;
else {
struct net_bridge_port *root
= br_get_port(br, br->root_port);
bpdu.message_age = (jiffies - root->designated_age)
+ MESSAGE_AGE_INCR;
}
bpdu.max_age = br->max_age;
bpdu.hello_time = br->hello_time;
bpdu.forward_delay = br->forward_delay;
if (bpdu.message_age < br->max_age) {
br_send_config_bpdu(p, &bpdu);
p->topology_change_ack = 0;
p->config_pending = 0;
mod_timer(&p->hold_timer,
round_jiffies(jiffies + BR_HOLD_TIME));
}
}
/* called under bridge lock */
static void br_record_config_information(struct net_bridge_port *p,
const struct br_config_bpdu *bpdu)
{
p->designated_root = bpdu->root;
p->designated_cost = bpdu->root_path_cost;
p->designated_bridge = bpdu->bridge_id;
p->designated_port = bpdu->port_id;
p->designated_age = jiffies - bpdu->message_age;
mod_timer(&p->message_age_timer, jiffies
+ (bpdu->max_age - bpdu->message_age));
}
/* called under bridge lock */
static void br_record_config_timeout_values(struct net_bridge *br,
const struct br_config_bpdu *bpdu)
{
br->max_age = bpdu->max_age;
br->hello_time = bpdu->hello_time;
br->forward_delay = bpdu->forward_delay;
br->topology_change = bpdu->topology_change;
}
/* called under bridge lock */
void br_transmit_tcn(struct net_bridge *br)
{
struct net_bridge_port *p;
p = br_get_port(br, br->root_port);
if (p)
br_send_tcn_bpdu(p);
else
br_notice(br, "root port %u not found for topology notice\n",
br->root_port);
}
/* called under bridge lock */
static int br_should_become_designated_port(const struct net_bridge_port *p)
{
struct net_bridge *br;
int t;
br = p->br;
if (br_is_designated_port(p))
return 1;
if (memcmp(&p->designated_root, &br->designated_root, 8))
return 1;
if (br->root_path_cost < p->designated_cost)
return 1;
else if (br->root_path_cost > p->designated_cost)
return 0;
t = memcmp(&br->bridge_id, &p->designated_bridge, 8);
if (t < 0)
return 1;
else if (t > 0)
return 0;
if (p->port_id < p->designated_port)
return 1;
return 0;
}
/* called under bridge lock */
static void br_designated_port_selection(struct net_bridge *br)
{
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list) {
if (p->state != BR_STATE_DISABLED &&
br_should_become_designated_port(p))
br_become_designated_port(p);
}
}
/* called under bridge lock */
static int br_supersedes_port_info(const struct net_bridge_port *p,
const struct br_config_bpdu *bpdu)
{
int t;
t = memcmp(&bpdu->root, &p->designated_root, 8);
if (t < 0)
return 1;
else if (t > 0)
return 0;
if (bpdu->root_path_cost < p->designated_cost)
return 1;
else if (bpdu->root_path_cost > p->designated_cost)
return 0;
t = memcmp(&bpdu->bridge_id, &p->designated_bridge, 8);
if (t < 0)
return 1;
else if (t > 0)
return 0;
if (memcmp(&bpdu->bridge_id, &p->br->bridge_id, 8))
return 1;
if (bpdu->port_id <= p->designated_port)
return 1;
return 0;
}
/* called under bridge lock */
static void br_topology_change_acknowledged(struct net_bridge *br)
{
br->topology_change_detected = 0;
del_timer(&br->tcn_timer);
}
/* called under bridge lock */
void br_topology_change_detection(struct net_bridge *br)
{
int isroot = br_is_root_bridge(br);
if (br->stp_enabled != BR_KERNEL_STP)
return;
br_info(br, "topology change detected, %s\n",
isroot ? "propagating" : "sending tcn bpdu");
if (isroot) {
br->topology_change = 1;
mod_timer(&br->topology_change_timer, jiffies
+ br->bridge_forward_delay + br->bridge_max_age);
} else if (!br->topology_change_detected) {
br_transmit_tcn(br);
mod_timer(&br->tcn_timer, jiffies + br->bridge_hello_time);
}
br->topology_change_detected = 1;
}
/* called under bridge lock */
void br_config_bpdu_generation(struct net_bridge *br)
{
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list) {
if (p->state != BR_STATE_DISABLED &&
br_is_designated_port(p))
br_transmit_config(p);
}
}
/* called under bridge lock */
static void br_reply(struct net_bridge_port *p)
{
br_transmit_config(p);
}
/* called under bridge lock */
void br_configuration_update(struct net_bridge *br)
{
br_root_selection(br);
br_designated_port_selection(br);
}
/* called under bridge lock */
void br_become_designated_port(struct net_bridge_port *p)
{
struct net_bridge *br;
br = p->br;
p->designated_root = br->designated_root;
p->designated_cost = br->root_path_cost;
p->designated_bridge = br->bridge_id;
p->designated_port = p->port_id;
}
/* called under bridge lock */
static void br_make_blocking(struct net_bridge_port *p)
{
if (p->state != BR_STATE_DISABLED &&
p->state != BR_STATE_BLOCKING) {
if (p->state == BR_STATE_FORWARDING ||
p->state == BR_STATE_LEARNING)
br_topology_change_detection(p->br);
br_set_state(p, BR_STATE_BLOCKING);
br_log_state(p);
br_ifinfo_notify(RTM_NEWLINK, p);
del_timer(&p->forward_delay_timer);
}
}
/* called under bridge lock */
static void br_make_forwarding(struct net_bridge_port *p)
{
struct net_bridge *br = p->br;
if (p->state != BR_STATE_BLOCKING)
return;
if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) {
br_set_state(p, BR_STATE_FORWARDING);
br_topology_change_detection(br);
del_timer(&p->forward_delay_timer);
} else if (br->stp_enabled == BR_KERNEL_STP)
br_set_state(p, BR_STATE_LISTENING);
else
br_set_state(p, BR_STATE_LEARNING);
br_multicast_enable_port(p);
br_log_state(p);
br_ifinfo_notify(RTM_NEWLINK, p);
if (br->forward_delay != 0)
mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay);
}
/* called under bridge lock */
void br_port_state_selection(struct net_bridge *br)
{
struct net_bridge_port *p;
unsigned int liveports = 0;
list_for_each_entry(p, &br->port_list, list) {
if (p->state == BR_STATE_DISABLED)
continue;
/* Don't change port states if userspace is handling STP */
if (br->stp_enabled != BR_USER_STP) {
if (p->port_no == br->root_port) {
p->config_pending = 0;
p->topology_change_ack = 0;
br_make_forwarding(p);
} else if (br_is_designated_port(p)) {
del_timer(&p->message_age_timer);
br_make_forwarding(p);
} else {
p->config_pending = 0;
p->topology_change_ack = 0;
br_make_blocking(p);
}
}
if (p->state == BR_STATE_FORWARDING)
++liveports;
}
if (liveports == 0)
netif_carrier_off(br->dev);
else
netif_carrier_on(br->dev);
}
/* called under bridge lock */
static void br_topology_change_acknowledge(struct net_bridge_port *p)
{
p->topology_change_ack = 1;
br_transmit_config(p);
}
/* called under bridge lock */
void br_received_config_bpdu(struct net_bridge_port *p,
const struct br_config_bpdu *bpdu)
{
struct net_bridge *br;
int was_root;
br = p->br;
was_root = br_is_root_bridge(br);
if (br_supersedes_port_info(p, bpdu)) {
br_record_config_information(p, bpdu);
br_configuration_update(br);
br_port_state_selection(br);
if (!br_is_root_bridge(br) && was_root) {
del_timer(&br->hello_timer);
if (br->topology_change_detected) {
del_timer(&br->topology_change_timer);
br_transmit_tcn(br);
mod_timer(&br->tcn_timer,
jiffies + br->bridge_hello_time);
}
}
if (p->port_no == br->root_port) {
br_record_config_timeout_values(br, bpdu);
br_config_bpdu_generation(br);
if (bpdu->topology_change_ack)
br_topology_change_acknowledged(br);
}
} else if (br_is_designated_port(p)) {
br_reply(p);
}
}
/* called under bridge lock */
void br_received_tcn_bpdu(struct net_bridge_port *p)
{
if (br_is_designated_port(p)) {
br_info(p->br, "port %u(%s) received tcn bpdu\n",
(unsigned int) p->port_no, p->dev->name);
br_topology_change_detection(p->br);
br_topology_change_acknowledge(p);
}
}
/* Change bridge STP parameter */
int br_set_hello_time(struct net_bridge *br, unsigned long val)
{
unsigned long t = clock_t_to_jiffies(val);
if (t < BR_MIN_HELLO_TIME || t > BR_MAX_HELLO_TIME)
return -ERANGE;
spin_lock_bh(&br->lock);
br->bridge_hello_time = t;
if (br_is_root_bridge(br))
br->hello_time = br->bridge_hello_time;
spin_unlock_bh(&br->lock);
return 0;
}
int br_set_max_age(struct net_bridge *br, unsigned long val)
{
unsigned long t = clock_t_to_jiffies(val);
if (t < BR_MIN_MAX_AGE || t > BR_MAX_MAX_AGE)
return -ERANGE;
spin_lock_bh(&br->lock);
br->bridge_max_age = t;
if (br_is_root_bridge(br))
br->max_age = br->bridge_max_age;
spin_unlock_bh(&br->lock);
return 0;
}
void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
{
br->bridge_forward_delay = t;
if (br_is_root_bridge(br))
br->forward_delay = br->bridge_forward_delay;
}
int br_set_forward_delay(struct net_bridge *br, unsigned long val)
{
unsigned long t = clock_t_to_jiffies(val);
int err = -ERANGE;
spin_lock_bh(&br->lock);
if (br->stp_enabled != BR_NO_STP &&
(t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
goto unlock;
__br_set_forward_delay(br, t);
err = 0;
unlock:
spin_unlock_bh(&br->lock);
return err;
}

241
net/bridge/br_stp_bpdu.c Normal file
View file

@ -0,0 +1,241 @@
/*
* Spanning tree protocol; BPDU handling
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/netfilter_bridge.h>
#include <linux/etherdevice.h>
#include <linux/llc.h>
#include <linux/slab.h>
#include <linux/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/stp.h>
#include <asm/unaligned.h>
#include "br_private.h"
#include "br_private_stp.h"
#define STP_HZ 256
#define LLC_RESERVE sizeof(struct llc_pdu_un)
static void br_send_bpdu(struct net_bridge_port *p,
const unsigned char *data, int length)
{
struct sk_buff *skb;
skb = dev_alloc_skb(length+LLC_RESERVE);
if (!skb)
return;
skb->dev = p->dev;
skb->protocol = htons(ETH_P_802_2);
skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, LLC_RESERVE);
memcpy(__skb_put(skb, length), data, length);
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, LLC_SAP_BSPAN,
LLC_SAP_BSPAN, LLC_PDU_CMD);
llc_pdu_init_as_ui_cmd(skb);
llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr);
skb_reset_mac_header(skb);
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
dev_queue_xmit);
}
static inline void br_set_ticks(unsigned char *dest, int j)
{
unsigned long ticks = (STP_HZ * j)/ HZ;
put_unaligned_be16(ticks, dest);
}
static inline int br_get_ticks(const unsigned char *src)
{
unsigned long ticks = get_unaligned_be16(src);
return DIV_ROUND_UP(ticks * HZ, STP_HZ);
}
/* called under bridge lock */
void br_send_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *bpdu)
{
unsigned char buf[35];
if (p->br->stp_enabled != BR_KERNEL_STP)
return;
buf[0] = 0;
buf[1] = 0;
buf[2] = 0;
buf[3] = BPDU_TYPE_CONFIG;
buf[4] = (bpdu->topology_change ? 0x01 : 0) |
(bpdu->topology_change_ack ? 0x80 : 0);
buf[5] = bpdu->root.prio[0];
buf[6] = bpdu->root.prio[1];
buf[7] = bpdu->root.addr[0];
buf[8] = bpdu->root.addr[1];
buf[9] = bpdu->root.addr[2];
buf[10] = bpdu->root.addr[3];
buf[11] = bpdu->root.addr[4];
buf[12] = bpdu->root.addr[5];
buf[13] = (bpdu->root_path_cost >> 24) & 0xFF;
buf[14] = (bpdu->root_path_cost >> 16) & 0xFF;
buf[15] = (bpdu->root_path_cost >> 8) & 0xFF;
buf[16] = bpdu->root_path_cost & 0xFF;
buf[17] = bpdu->bridge_id.prio[0];
buf[18] = bpdu->bridge_id.prio[1];
buf[19] = bpdu->bridge_id.addr[0];
buf[20] = bpdu->bridge_id.addr[1];
buf[21] = bpdu->bridge_id.addr[2];
buf[22] = bpdu->bridge_id.addr[3];
buf[23] = bpdu->bridge_id.addr[4];
buf[24] = bpdu->bridge_id.addr[5];
buf[25] = (bpdu->port_id >> 8) & 0xFF;
buf[26] = bpdu->port_id & 0xFF;
br_set_ticks(buf+27, bpdu->message_age);
br_set_ticks(buf+29, bpdu->max_age);
br_set_ticks(buf+31, bpdu->hello_time);
br_set_ticks(buf+33, bpdu->forward_delay);
br_send_bpdu(p, buf, 35);
}
/* called under bridge lock */
void br_send_tcn_bpdu(struct net_bridge_port *p)
{
unsigned char buf[4];
if (p->br->stp_enabled != BR_KERNEL_STP)
return;
buf[0] = 0;
buf[1] = 0;
buf[2] = 0;
buf[3] = BPDU_TYPE_TCN;
br_send_bpdu(p, buf, 4);
}
/*
* Called from llc.
*
* NO locks, but rcu_read_lock
*/
void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
struct net_device *dev)
{
const unsigned char *dest = eth_hdr(skb)->h_dest;
struct net_bridge_port *p;
struct net_bridge *br;
const unsigned char *buf;
if (!pskb_may_pull(skb, 4))
goto err;
/* compare of protocol id and version */
buf = skb->data;
if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
goto err;
p = br_port_get_check_rcu(dev);
if (!p)
goto err;
br = p->br;
spin_lock(&br->lock);
if (br->stp_enabled != BR_KERNEL_STP)
goto out;
if (!(br->dev->flags & IFF_UP))
goto out;
if (p->state == BR_STATE_DISABLED)
goto out;
if (!ether_addr_equal(dest, br->group_addr))
goto out;
if (p->flags & BR_BPDU_GUARD) {
br_notice(br, "BPDU received on blocked port %u(%s)\n",
(unsigned int) p->port_no, p->dev->name);
br_stp_disable_port(p);
goto out;
}
buf = skb_pull(skb, 3);
if (buf[0] == BPDU_TYPE_CONFIG) {
struct br_config_bpdu bpdu;
if (!pskb_may_pull(skb, 32))
goto out;
buf = skb->data;
bpdu.topology_change = (buf[1] & 0x01) ? 1 : 0;
bpdu.topology_change_ack = (buf[1] & 0x80) ? 1 : 0;
bpdu.root.prio[0] = buf[2];
bpdu.root.prio[1] = buf[3];
bpdu.root.addr[0] = buf[4];
bpdu.root.addr[1] = buf[5];
bpdu.root.addr[2] = buf[6];
bpdu.root.addr[3] = buf[7];
bpdu.root.addr[4] = buf[8];
bpdu.root.addr[5] = buf[9];
bpdu.root_path_cost =
(buf[10] << 24) |
(buf[11] << 16) |
(buf[12] << 8) |
buf[13];
bpdu.bridge_id.prio[0] = buf[14];
bpdu.bridge_id.prio[1] = buf[15];
bpdu.bridge_id.addr[0] = buf[16];
bpdu.bridge_id.addr[1] = buf[17];
bpdu.bridge_id.addr[2] = buf[18];
bpdu.bridge_id.addr[3] = buf[19];
bpdu.bridge_id.addr[4] = buf[20];
bpdu.bridge_id.addr[5] = buf[21];
bpdu.port_id = (buf[22] << 8) | buf[23];
bpdu.message_age = br_get_ticks(buf+24);
bpdu.max_age = br_get_ticks(buf+26);
bpdu.hello_time = br_get_ticks(buf+28);
bpdu.forward_delay = br_get_ticks(buf+30);
if (bpdu.message_age > bpdu.max_age) {
if (net_ratelimit())
br_notice(p->br,
"port %u config from %pM"
" (message_age %ul > max_age %ul)\n",
p->port_no,
eth_hdr(skb)->h_source,
bpdu.message_age, bpdu.max_age);
goto out;
}
br_received_config_bpdu(p, &bpdu);
} else if (buf[0] == BPDU_TYPE_TCN) {
br_received_tcn_bpdu(p);
}
out:
spin_unlock(&br->lock);
err:
kfree_skb(skb);
}

314
net/bridge/br_stp_if.c Normal file
View file

@ -0,0 +1,314 @@
/*
* Spanning tree protocol; interface code
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include "br_private.h"
#include "br_private_stp.h"
/* Port id is composed of priority and port number.
* NB: some bits of priority are dropped to
* make room for more ports.
*/
static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
{
return ((u16)priority << BR_PORT_BITS)
| (port_no & ((1<<BR_PORT_BITS)-1));
}
#define BR_MAX_PORT_PRIORITY ((u16)~0 >> BR_PORT_BITS)
/* called under bridge lock */
void br_init_port(struct net_bridge_port *p)
{
p->port_id = br_make_port_id(p->priority, p->port_no);
br_become_designated_port(p);
br_set_state(p, BR_STATE_BLOCKING);
p->topology_change_ack = 0;
p->config_pending = 0;
}
/* called under bridge lock */
void br_stp_enable_bridge(struct net_bridge *br)
{
struct net_bridge_port *p;
spin_lock_bh(&br->lock);
mod_timer(&br->hello_timer, jiffies + br->hello_time);
mod_timer(&br->gc_timer, jiffies + HZ/10);
br_config_bpdu_generation(br);
list_for_each_entry(p, &br->port_list, list) {
if (netif_running(p->dev) && netif_oper_up(p->dev))
br_stp_enable_port(p);
}
spin_unlock_bh(&br->lock);
}
/* NO locks held */
void br_stp_disable_bridge(struct net_bridge *br)
{
struct net_bridge_port *p;
spin_lock_bh(&br->lock);
list_for_each_entry(p, &br->port_list, list) {
if (p->state != BR_STATE_DISABLED)
br_stp_disable_port(p);
}
br->topology_change = 0;
br->topology_change_detected = 0;
spin_unlock_bh(&br->lock);
del_timer_sync(&br->hello_timer);
del_timer_sync(&br->topology_change_timer);
del_timer_sync(&br->tcn_timer);
del_timer_sync(&br->gc_timer);
}
/* called under bridge lock */
void br_stp_enable_port(struct net_bridge_port *p)
{
br_init_port(p);
br_port_state_selection(p->br);
br_log_state(p);
br_ifinfo_notify(RTM_NEWLINK, p);
}
/* called under bridge lock */
void br_stp_disable_port(struct net_bridge_port *p)
{
struct net_bridge *br = p->br;
int wasroot;
wasroot = br_is_root_bridge(br);
br_become_designated_port(p);
br_set_state(p, BR_STATE_DISABLED);
p->topology_change_ack = 0;
p->config_pending = 0;
br_log_state(p);
br_ifinfo_notify(RTM_NEWLINK, p);
del_timer(&p->message_age_timer);
del_timer(&p->forward_delay_timer);
del_timer(&p->hold_timer);
br_fdb_delete_by_port(br, p, 0);
br_multicast_disable_port(p);
br_configuration_update(br);
br_port_state_selection(br);
if (br_is_root_bridge(br) && !wasroot)
br_become_root_bridge(br);
}
static void br_stp_start(struct net_bridge *br)
{
int r;
char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
char *envp[] = { NULL };
r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
spin_lock_bh(&br->lock);
if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
__br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
__br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
if (r == 0) {
br->stp_enabled = BR_USER_STP;
br_debug(br, "userspace STP started\n");
} else {
br->stp_enabled = BR_KERNEL_STP;
br_debug(br, "using kernel STP\n");
/* To start timers on any ports left in blocking */
br_port_state_selection(br);
}
spin_unlock_bh(&br->lock);
}
static void br_stp_stop(struct net_bridge *br)
{
int r;
char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
char *envp[] = { NULL };
if (br->stp_enabled == BR_USER_STP) {
r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
br_info(br, "userspace STP stopped, return code %d\n", r);
/* To start timers on any ports left in blocking */
spin_lock_bh(&br->lock);
br_port_state_selection(br);
spin_unlock_bh(&br->lock);
}
br->stp_enabled = BR_NO_STP;
}
void br_stp_set_enabled(struct net_bridge *br, unsigned long val)
{
ASSERT_RTNL();
if (val) {
if (br->stp_enabled == BR_NO_STP)
br_stp_start(br);
} else {
if (br->stp_enabled != BR_NO_STP)
br_stp_stop(br);
}
}
/* called under bridge lock */
void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
{
/* should be aligned on 2 bytes for ether_addr_equal() */
unsigned short oldaddr_aligned[ETH_ALEN >> 1];
unsigned char *oldaddr = (unsigned char *)oldaddr_aligned;
struct net_bridge_port *p;
int wasroot;
wasroot = br_is_root_bridge(br);
br_fdb_change_mac_address(br, addr);
memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN);
memcpy(br->bridge_id.addr, addr, ETH_ALEN);
memcpy(br->dev->dev_addr, addr, ETH_ALEN);
list_for_each_entry(p, &br->port_list, list) {
if (ether_addr_equal(p->designated_bridge.addr, oldaddr))
memcpy(p->designated_bridge.addr, addr, ETH_ALEN);
if (ether_addr_equal(p->designated_root.addr, oldaddr))
memcpy(p->designated_root.addr, addr, ETH_ALEN);
}
br_configuration_update(br);
br_port_state_selection(br);
if (br_is_root_bridge(br) && !wasroot)
br_become_root_bridge(br);
}
/* should be aligned on 2 bytes for ether_addr_equal() */
static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1];
/* called under bridge lock */
bool br_stp_recalculate_bridge_id(struct net_bridge *br)
{
const unsigned char *br_mac_zero =
(const unsigned char *)br_mac_zero_aligned;
const unsigned char *addr = br_mac_zero;
struct net_bridge_port *p;
/* user has chosen a value so keep it */
if (br->dev->addr_assign_type == NET_ADDR_SET)
return false;
list_for_each_entry(p, &br->port_list, list) {
if (addr == br_mac_zero ||
memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0)
addr = p->dev->dev_addr;
}
if (ether_addr_equal(br->bridge_id.addr, addr))
return false; /* no change */
br_stp_change_bridge_id(br, addr);
return true;
}
/* called under bridge lock */
void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
{
struct net_bridge_port *p;
int wasroot;
wasroot = br_is_root_bridge(br);
list_for_each_entry(p, &br->port_list, list) {
if (p->state != BR_STATE_DISABLED &&
br_is_designated_port(p)) {
p->designated_bridge.prio[0] = (newprio >> 8) & 0xFF;
p->designated_bridge.prio[1] = newprio & 0xFF;
}
}
br->bridge_id.prio[0] = (newprio >> 8) & 0xFF;
br->bridge_id.prio[1] = newprio & 0xFF;
br_configuration_update(br);
br_port_state_selection(br);
if (br_is_root_bridge(br) && !wasroot)
br_become_root_bridge(br);
}
/* called under bridge lock */
int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio)
{
port_id new_port_id;
if (newprio > BR_MAX_PORT_PRIORITY)
return -ERANGE;
new_port_id = br_make_port_id(newprio, p->port_no);
if (br_is_designated_port(p))
p->designated_port = new_port_id;
p->port_id = new_port_id;
p->priority = newprio;
if (!memcmp(&p->br->bridge_id, &p->designated_bridge, 8) &&
p->port_id < p->designated_port) {
br_become_designated_port(p);
br_port_state_selection(p->br);
}
return 0;
}
/* called under bridge lock */
int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)
{
if (path_cost < BR_MIN_PATH_COST ||
path_cost > BR_MAX_PATH_COST)
return -ERANGE;
p->flags |= BR_ADMIN_COST;
p->path_cost = path_cost;
br_configuration_update(p->br);
br_port_state_selection(p->br);
return 0;
}
ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id)
{
return sprintf(buf, "%.2x%.2x.%.2x%.2x%.2x%.2x%.2x%.2x\n",
id->prio[0], id->prio[1],
id->addr[0], id->addr[1], id->addr[2],
id->addr[3], id->addr[4], id->addr[5]);
}

174
net/bridge/br_stp_timer.c Normal file
View file

@ -0,0 +1,174 @@
/*
* Spanning tree protocol; timer-related code
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/times.h>
#include "br_private.h"
#include "br_private_stp.h"
/* called under bridge lock */
static int br_is_designated_for_some_port(const struct net_bridge *br)
{
struct net_bridge_port *p;
list_for_each_entry(p, &br->port_list, list) {
if (p->state != BR_STATE_DISABLED &&
!memcmp(&p->designated_bridge, &br->bridge_id, 8))
return 1;
}
return 0;
}
static void br_hello_timer_expired(unsigned long arg)
{
struct net_bridge *br = (struct net_bridge *)arg;
br_debug(br, "hello timer expired\n");
spin_lock(&br->lock);
if (br->dev->flags & IFF_UP) {
br_config_bpdu_generation(br);
mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time));
}
spin_unlock(&br->lock);
}
static void br_message_age_timer_expired(unsigned long arg)
{
struct net_bridge_port *p = (struct net_bridge_port *) arg;
struct net_bridge *br = p->br;
const bridge_id *id = &p->designated_bridge;
int was_root;
if (p->state == BR_STATE_DISABLED)
return;
br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n",
(unsigned int) p->port_no, p->dev->name,
id->prio[0], id->prio[1], &id->addr);
/*
* According to the spec, the message age timer cannot be
* running when we are the root bridge. So.. this was_root
* check is redundant. I'm leaving it in for now, though.
*/
spin_lock(&br->lock);
if (p->state == BR_STATE_DISABLED)
goto unlock;
was_root = br_is_root_bridge(br);
br_become_designated_port(p);
br_configuration_update(br);
br_port_state_selection(br);
if (br_is_root_bridge(br) && !was_root)
br_become_root_bridge(br);
unlock:
spin_unlock(&br->lock);
}
static void br_forward_delay_timer_expired(unsigned long arg)
{
struct net_bridge_port *p = (struct net_bridge_port *) arg;
struct net_bridge *br = p->br;
br_debug(br, "port %u(%s) forward delay timer\n",
(unsigned int) p->port_no, p->dev->name);
spin_lock(&br->lock);
if (p->state == BR_STATE_LISTENING) {
br_set_state(p, BR_STATE_LEARNING);
mod_timer(&p->forward_delay_timer,
jiffies + br->forward_delay);
} else if (p->state == BR_STATE_LEARNING) {
br_set_state(p, BR_STATE_FORWARDING);
if (br_is_designated_for_some_port(br))
br_topology_change_detection(br);
netif_carrier_on(br->dev);
}
br_log_state(p);
br_ifinfo_notify(RTM_NEWLINK, p);
spin_unlock(&br->lock);
}
static void br_tcn_timer_expired(unsigned long arg)
{
struct net_bridge *br = (struct net_bridge *) arg;
br_debug(br, "tcn timer expired\n");
spin_lock(&br->lock);
if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) {
br_transmit_tcn(br);
mod_timer(&br->tcn_timer, jiffies + br->bridge_hello_time);
}
spin_unlock(&br->lock);
}
static void br_topology_change_timer_expired(unsigned long arg)
{
struct net_bridge *br = (struct net_bridge *) arg;
br_debug(br, "topo change timer expired\n");
spin_lock(&br->lock);
br->topology_change_detected = 0;
br->topology_change = 0;
spin_unlock(&br->lock);
}
static void br_hold_timer_expired(unsigned long arg)
{
struct net_bridge_port *p = (struct net_bridge_port *) arg;
br_debug(p->br, "port %u(%s) hold timer expired\n",
(unsigned int) p->port_no, p->dev->name);
spin_lock(&p->br->lock);
if (p->config_pending)
br_transmit_config(p);
spin_unlock(&p->br->lock);
}
void br_stp_timer_init(struct net_bridge *br)
{
setup_timer(&br->hello_timer, br_hello_timer_expired,
(unsigned long) br);
setup_timer(&br->tcn_timer, br_tcn_timer_expired,
(unsigned long) br);
setup_timer(&br->topology_change_timer,
br_topology_change_timer_expired,
(unsigned long) br);
setup_timer(&br->gc_timer, br_fdb_cleanup, (unsigned long) br);
}
void br_stp_port_timer_init(struct net_bridge_port *p)
{
setup_timer(&p->message_age_timer, br_message_age_timer_expired,
(unsigned long) p);
setup_timer(&p->forward_delay_timer, br_forward_delay_timer_expired,
(unsigned long) p);
setup_timer(&p->hold_timer, br_hold_timer_expired,
(unsigned long) p);
}
/* Report ticks left (in USER_HZ) used for API */
unsigned long br_timer_value(const struct timer_list *timer)
{
return timer_pending(timer)
? jiffies_delta_to_clock_t(timer->expires - jiffies) : 0;
}

889
net/bridge/br_sysfs_br.c Normal file
View file

@ -0,0 +1,889 @@
/*
* Sysfs attributes of bridge
* Linux ethernet bridge
*
* Authors:
* Stephen Hemminger <shemminger@osdl.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/rtnetlink.h>
#include <linux/spinlock.h>
#include <linux/times.h>
#include "br_private.h"
#define to_dev(obj) container_of(obj, struct device, kobj)
#define to_bridge(cd) ((struct net_bridge *)netdev_priv(to_net_dev(cd)))
/*
* Common code for storing bridge parameters.
*/
static ssize_t store_bridge_parm(struct device *d,
const char *buf, size_t len,
int (*set)(struct net_bridge *, unsigned long))
{
struct net_bridge *br = to_bridge(d);
char *endp;
unsigned long val;
int err;
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
err = (*set)(br, val);
return err ? err : len;
}
static ssize_t forward_delay_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
}
static ssize_t forward_delay_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_set_forward_delay);
}
static DEVICE_ATTR_RW(forward_delay);
static ssize_t hello_time_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(to_bridge(d)->hello_time));
}
static ssize_t hello_time_store(struct device *d,
struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, br_set_hello_time);
}
static DEVICE_ATTR_RW(hello_time);
static ssize_t max_age_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(to_bridge(d)->max_age));
}
static ssize_t max_age_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_set_max_age);
}
static DEVICE_ATTR_RW(max_age);
static ssize_t ageing_time_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time));
}
static int set_ageing_time(struct net_bridge *br, unsigned long val)
{
br->ageing_time = clock_t_to_jiffies(val);
return 0;
}
static ssize_t ageing_time_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_ageing_time);
}
static DEVICE_ATTR_RW(ageing_time);
static ssize_t stp_state_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->stp_enabled);
}
static ssize_t stp_state_store(struct device *d,
struct device_attribute *attr, const char *buf,
size_t len)
{
struct net_bridge *br = to_bridge(d);
char *endp;
unsigned long val;
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
if (!rtnl_trylock())
return restart_syscall();
br_stp_set_enabled(br, val);
rtnl_unlock();
return len;
}
static DEVICE_ATTR_RW(stp_state);
static ssize_t group_fwd_mask_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%#x\n", br->group_fwd_mask);
}
static ssize_t group_fwd_mask_store(struct device *d,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct net_bridge *br = to_bridge(d);
char *endp;
unsigned long val;
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
if (val & BR_GROUPFWD_RESTRICTED)
return -EINVAL;
br->group_fwd_mask = val;
return len;
}
static DEVICE_ATTR_RW(group_fwd_mask);
static ssize_t priority_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n",
(br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]);
}
static int set_priority(struct net_bridge *br, unsigned long val)
{
br_stp_set_bridge_priority(br, (u16) val);
return 0;
}
static ssize_t priority_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_priority);
}
static DEVICE_ATTR_RW(priority);
static ssize_t root_id_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return br_show_bridge_id(buf, &to_bridge(d)->designated_root);
}
static DEVICE_ATTR_RO(root_id);
static ssize_t bridge_id_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return br_show_bridge_id(buf, &to_bridge(d)->bridge_id);
}
static DEVICE_ATTR_RO(bridge_id);
static ssize_t root_port_show(struct device *d, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", to_bridge(d)->root_port);
}
static DEVICE_ATTR_RO(root_port);
static ssize_t root_path_cost_show(struct device *d,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_bridge(d)->root_path_cost);
}
static DEVICE_ATTR_RO(root_path_cost);
static ssize_t topology_change_show(struct device *d,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_bridge(d)->topology_change);
}
static DEVICE_ATTR_RO(topology_change);
static ssize_t topology_change_detected_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->topology_change_detected);
}
static DEVICE_ATTR_RO(topology_change_detected);
static ssize_t hello_timer_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->hello_timer));
}
static DEVICE_ATTR_RO(hello_timer);
static ssize_t tcn_timer_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->tcn_timer));
}
static DEVICE_ATTR_RO(tcn_timer);
static ssize_t topology_change_timer_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->topology_change_timer));
}
static DEVICE_ATTR_RO(topology_change_timer);
static ssize_t gc_timer_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->gc_timer));
}
static DEVICE_ATTR_RO(gc_timer);
static ssize_t group_addr_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%x:%x:%x:%x:%x:%x\n",
br->group_addr[0], br->group_addr[1],
br->group_addr[2], br->group_addr[3],
br->group_addr[4], br->group_addr[5]);
}
static ssize_t group_addr_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct net_bridge *br = to_bridge(d);
u8 new_addr[6];
int i;
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
&new_addr[0], &new_addr[1], &new_addr[2],
&new_addr[3], &new_addr[4], &new_addr[5]) != 6)
return -EINVAL;
if (!is_link_local_ether_addr(new_addr))
return -EINVAL;
if (new_addr[5] == 1 || /* 802.3x Pause address */
new_addr[5] == 2 || /* 802.3ad Slow protocols */
new_addr[5] == 3) /* 802.1X PAE address */
return -EINVAL;
if (!rtnl_trylock())
return restart_syscall();
spin_lock_bh(&br->lock);
for (i = 0; i < 6; i++)
br->group_addr[i] = new_addr[i];
spin_unlock_bh(&br->lock);
br->group_addr_set = true;
br_recalculate_fwd_mask(br);
rtnl_unlock();
return len;
}
static DEVICE_ATTR_RW(group_addr);
static ssize_t flush_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct net_bridge *br = to_bridge(d);
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
br_fdb_flush(br);
return len;
}
static DEVICE_ATTR_WO(flush);
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static ssize_t multicast_router_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->multicast_router);
}
static ssize_t multicast_router_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_set_router);
}
static DEVICE_ATTR_RW(multicast_router);
static ssize_t multicast_snooping_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", !br->multicast_disabled);
}
static ssize_t multicast_snooping_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_toggle);
}
static DEVICE_ATTR_RW(multicast_snooping);
static ssize_t multicast_query_use_ifaddr_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->multicast_query_use_ifaddr);
}
static int set_query_use_ifaddr(struct net_bridge *br, unsigned long val)
{
br->multicast_query_use_ifaddr = !!val;
return 0;
}
static ssize_t
multicast_query_use_ifaddr_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_query_use_ifaddr);
}
static DEVICE_ATTR_RW(multicast_query_use_ifaddr);
static ssize_t multicast_querier_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->multicast_querier);
}
static ssize_t multicast_querier_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_set_querier);
}
static DEVICE_ATTR_RW(multicast_querier);
static ssize_t hash_elasticity_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->hash_elasticity);
}
static int set_elasticity(struct net_bridge *br, unsigned long val)
{
br->hash_elasticity = val;
return 0;
}
static ssize_t hash_elasticity_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_elasticity);
}
static DEVICE_ATTR_RW(hash_elasticity);
static ssize_t hash_max_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->hash_max);
}
static ssize_t hash_max_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_set_hash_max);
}
static DEVICE_ATTR_RW(hash_max);
static ssize_t multicast_last_member_count_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->multicast_last_member_count);
}
static int set_last_member_count(struct net_bridge *br, unsigned long val)
{
br->multicast_last_member_count = val;
return 0;
}
static ssize_t multicast_last_member_count_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_last_member_count);
}
static DEVICE_ATTR_RW(multicast_last_member_count);
static ssize_t multicast_startup_query_count_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->multicast_startup_query_count);
}
static int set_startup_query_count(struct net_bridge *br, unsigned long val)
{
br->multicast_startup_query_count = val;
return 0;
}
static ssize_t multicast_startup_query_count_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_startup_query_count);
}
static DEVICE_ATTR_RW(multicast_startup_query_count);
static ssize_t multicast_last_member_interval_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_last_member_interval));
}
static int set_last_member_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_last_member_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t multicast_last_member_interval_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_last_member_interval);
}
static DEVICE_ATTR_RW(multicast_last_member_interval);
static ssize_t multicast_membership_interval_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_membership_interval));
}
static int set_membership_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_membership_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t multicast_membership_interval_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_membership_interval);
}
static DEVICE_ATTR_RW(multicast_membership_interval);
static ssize_t multicast_querier_interval_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_querier_interval));
}
static int set_querier_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_querier_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t multicast_querier_interval_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_querier_interval);
}
static DEVICE_ATTR_RW(multicast_querier_interval);
static ssize_t multicast_query_interval_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_query_interval));
}
static int set_query_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_query_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t multicast_query_interval_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_query_interval);
}
static DEVICE_ATTR_RW(multicast_query_interval);
static ssize_t multicast_query_response_interval_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(
buf, "%lu\n",
jiffies_to_clock_t(br->multicast_query_response_interval));
}
static int set_query_response_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_query_response_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t multicast_query_response_interval_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_query_response_interval);
}
static DEVICE_ATTR_RW(multicast_query_response_interval);
static ssize_t multicast_startup_query_interval_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(
buf, "%lu\n",
jiffies_to_clock_t(br->multicast_startup_query_interval));
}
static int set_startup_query_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_startup_query_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t multicast_startup_query_interval_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_startup_query_interval);
}
static DEVICE_ATTR_RW(multicast_startup_query_interval);
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static ssize_t nf_call_iptables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->nf_call_iptables);
}
static int set_nf_call_iptables(struct net_bridge *br, unsigned long val)
{
br->nf_call_iptables = val ? true : false;
return 0;
}
static ssize_t nf_call_iptables_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_nf_call_iptables);
}
static DEVICE_ATTR_RW(nf_call_iptables);
static ssize_t nf_call_ip6tables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->nf_call_ip6tables);
}
static int set_nf_call_ip6tables(struct net_bridge *br, unsigned long val)
{
br->nf_call_ip6tables = val ? true : false;
return 0;
}
static ssize_t nf_call_ip6tables_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_nf_call_ip6tables);
}
static DEVICE_ATTR_RW(nf_call_ip6tables);
static ssize_t nf_call_arptables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->nf_call_arptables);
}
static int set_nf_call_arptables(struct net_bridge *br, unsigned long val)
{
br->nf_call_arptables = val ? true : false;
return 0;
}
static ssize_t nf_call_arptables_store(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_nf_call_arptables);
}
static DEVICE_ATTR_RW(nf_call_arptables);
#endif
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
static ssize_t vlan_filtering_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->vlan_enabled);
}
static ssize_t vlan_filtering_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_vlan_filter_toggle);
}
static DEVICE_ATTR_RW(vlan_filtering);
static ssize_t vlan_protocol_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%#06x\n", ntohs(br->vlan_proto));
}
static ssize_t vlan_protocol_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_vlan_set_proto);
}
static DEVICE_ATTR_RW(vlan_protocol);
static ssize_t default_pvid_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->default_pvid);
}
static ssize_t default_pvid_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_vlan_set_default_pvid);
}
static DEVICE_ATTR_RW(default_pvid);
#endif
static struct attribute *bridge_attrs[] = {
&dev_attr_forward_delay.attr,
&dev_attr_hello_time.attr,
&dev_attr_max_age.attr,
&dev_attr_ageing_time.attr,
&dev_attr_stp_state.attr,
&dev_attr_group_fwd_mask.attr,
&dev_attr_priority.attr,
&dev_attr_bridge_id.attr,
&dev_attr_root_id.attr,
&dev_attr_root_path_cost.attr,
&dev_attr_root_port.attr,
&dev_attr_topology_change.attr,
&dev_attr_topology_change_detected.attr,
&dev_attr_hello_timer.attr,
&dev_attr_tcn_timer.attr,
&dev_attr_topology_change_timer.attr,
&dev_attr_gc_timer.attr,
&dev_attr_group_addr.attr,
&dev_attr_flush.attr,
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
&dev_attr_multicast_router.attr,
&dev_attr_multicast_snooping.attr,
&dev_attr_multicast_querier.attr,
&dev_attr_multicast_query_use_ifaddr.attr,
&dev_attr_hash_elasticity.attr,
&dev_attr_hash_max.attr,
&dev_attr_multicast_last_member_count.attr,
&dev_attr_multicast_startup_query_count.attr,
&dev_attr_multicast_last_member_interval.attr,
&dev_attr_multicast_membership_interval.attr,
&dev_attr_multicast_querier_interval.attr,
&dev_attr_multicast_query_interval.attr,
&dev_attr_multicast_query_response_interval.attr,
&dev_attr_multicast_startup_query_interval.attr,
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
&dev_attr_nf_call_iptables.attr,
&dev_attr_nf_call_ip6tables.attr,
&dev_attr_nf_call_arptables.attr,
#endif
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
&dev_attr_vlan_filtering.attr,
&dev_attr_vlan_protocol.attr,
&dev_attr_default_pvid.attr,
#endif
NULL
};
static struct attribute_group bridge_group = {
.name = SYSFS_BRIDGE_ATTR,
.attrs = bridge_attrs,
};
/*
* Export the forwarding information table as a binary file
* The records are struct __fdb_entry.
*
* Returns the number of bytes read.
*/
static ssize_t brforward_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = to_dev(kobj);
struct net_bridge *br = to_bridge(dev);
int n;
/* must read whole records */
if (off % sizeof(struct __fdb_entry) != 0)
return -EINVAL;
n = br_fdb_fillbuf(br, buf,
count / sizeof(struct __fdb_entry),
off / sizeof(struct __fdb_entry));
if (n > 0)
n *= sizeof(struct __fdb_entry);
return n;
}
static struct bin_attribute bridge_forward = {
.attr = { .name = SYSFS_BRIDGE_FDB,
.mode = S_IRUGO, },
.read = brforward_read,
};
/*
* Add entries in sysfs onto the existing network class device
* for the bridge.
* Adds a attribute group "bridge" containing tuning parameters.
* Binary attribute containing the forward table
* Sub directory to hold links to interfaces.
*
* Note: the ifobj exists only to be a subdirectory
* to hold links. The ifobj exists in same data structure
* as it's parent the bridge so reference counting works.
*/
int br_sysfs_addbr(struct net_device *dev)
{
struct kobject *brobj = &dev->dev.kobj;
struct net_bridge *br = netdev_priv(dev);
int err;
err = sysfs_create_group(brobj, &bridge_group);
if (err) {
pr_info("%s: can't create group %s/%s\n",
__func__, dev->name, bridge_group.name);
goto out1;
}
err = sysfs_create_bin_file(brobj, &bridge_forward);
if (err) {
pr_info("%s: can't create attribute file %s/%s\n",
__func__, dev->name, bridge_forward.attr.name);
goto out2;
}
br->ifobj = kobject_create_and_add(SYSFS_BRIDGE_PORT_SUBDIR, brobj);
if (!br->ifobj) {
pr_info("%s: can't add kobject (directory) %s/%s\n",
__func__, dev->name, SYSFS_BRIDGE_PORT_SUBDIR);
goto out3;
}
return 0;
out3:
sysfs_remove_bin_file(&dev->dev.kobj, &bridge_forward);
out2:
sysfs_remove_group(&dev->dev.kobj, &bridge_group);
out1:
return err;
}
void br_sysfs_delbr(struct net_device *dev)
{
struct kobject *kobj = &dev->dev.kobj;
struct net_bridge *br = netdev_priv(dev);
kobject_put(br->ifobj);
sysfs_remove_bin_file(kobj, &bridge_forward);
sysfs_remove_group(kobj, &bridge_group);
}

312
net/bridge/br_sysfs_if.c Normal file
View file

@ -0,0 +1,312 @@
/*
* Sysfs attributes of bridge ports
* Linux ethernet bridge
*
* Authors:
* Stephen Hemminger <shemminger@osdl.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
#include <linux/rtnetlink.h>
#include <linux/spinlock.h>
#include "br_private.h"
struct brport_attribute {
struct attribute attr;
ssize_t (*show)(struct net_bridge_port *, char *);
int (*store)(struct net_bridge_port *, unsigned long);
};
#define BRPORT_ATTR(_name, _mode, _show, _store) \
const struct brport_attribute brport_attr_##_name = { \
.attr = {.name = __stringify(_name), \
.mode = _mode }, \
.show = _show, \
.store = _store, \
};
#define BRPORT_ATTR_FLAG(_name, _mask) \
static ssize_t show_##_name(struct net_bridge_port *p, char *buf) \
{ \
return sprintf(buf, "%d\n", !!(p->flags & _mask)); \
} \
static int store_##_name(struct net_bridge_port *p, unsigned long v) \
{ \
return store_flag(p, v, _mask); \
} \
static BRPORT_ATTR(_name, S_IRUGO | S_IWUSR, \
show_##_name, store_##_name)
static int store_flag(struct net_bridge_port *p, unsigned long v,
unsigned long mask)
{
unsigned long flags;
flags = p->flags;
if (v)
flags |= mask;
else
flags &= ~mask;
if (flags != p->flags) {
p->flags = flags;
br_port_flags_change(p, mask);
br_ifinfo_notify(RTM_NEWLINK, p);
}
return 0;
}
static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->path_cost);
}
static BRPORT_ATTR(path_cost, S_IRUGO | S_IWUSR,
show_path_cost, br_stp_set_path_cost);
static ssize_t show_priority(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->priority);
}
static BRPORT_ATTR(priority, S_IRUGO | S_IWUSR,
show_priority, br_stp_set_port_priority);
static ssize_t show_designated_root(struct net_bridge_port *p, char *buf)
{
return br_show_bridge_id(buf, &p->designated_root);
}
static BRPORT_ATTR(designated_root, S_IRUGO, show_designated_root, NULL);
static ssize_t show_designated_bridge(struct net_bridge_port *p, char *buf)
{
return br_show_bridge_id(buf, &p->designated_bridge);
}
static BRPORT_ATTR(designated_bridge, S_IRUGO, show_designated_bridge, NULL);
static ssize_t show_designated_port(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->designated_port);
}
static BRPORT_ATTR(designated_port, S_IRUGO, show_designated_port, NULL);
static ssize_t show_designated_cost(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->designated_cost);
}
static BRPORT_ATTR(designated_cost, S_IRUGO, show_designated_cost, NULL);
static ssize_t show_port_id(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "0x%x\n", p->port_id);
}
static BRPORT_ATTR(port_id, S_IRUGO, show_port_id, NULL);
static ssize_t show_port_no(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "0x%x\n", p->port_no);
}
static BRPORT_ATTR(port_no, S_IRUGO, show_port_no, NULL);
static ssize_t show_change_ack(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->topology_change_ack);
}
static BRPORT_ATTR(change_ack, S_IRUGO, show_change_ack, NULL);
static ssize_t show_config_pending(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->config_pending);
}
static BRPORT_ATTR(config_pending, S_IRUGO, show_config_pending, NULL);
static ssize_t show_port_state(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->state);
}
static BRPORT_ATTR(state, S_IRUGO, show_port_state, NULL);
static ssize_t show_message_age_timer(struct net_bridge_port *p,
char *buf)
{
return sprintf(buf, "%ld\n", br_timer_value(&p->message_age_timer));
}
static BRPORT_ATTR(message_age_timer, S_IRUGO, show_message_age_timer, NULL);
static ssize_t show_forward_delay_timer(struct net_bridge_port *p,
char *buf)
{
return sprintf(buf, "%ld\n", br_timer_value(&p->forward_delay_timer));
}
static BRPORT_ATTR(forward_delay_timer, S_IRUGO, show_forward_delay_timer, NULL);
static ssize_t show_hold_timer(struct net_bridge_port *p,
char *buf)
{
return sprintf(buf, "%ld\n", br_timer_value(&p->hold_timer));
}
static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL);
static int store_flush(struct net_bridge_port *p, unsigned long v)
{
br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry
return 0;
}
static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush);
BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE);
BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD);
BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK);
BRPORT_ATTR_FLAG(learning, BR_LEARNING);
BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD);
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->multicast_router);
}
static int store_multicast_router(struct net_bridge_port *p,
unsigned long v)
{
return br_multicast_set_port_router(p, v);
}
static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
store_multicast_router);
BRPORT_ATTR_FLAG(multicast_fast_leave, BR_MULTICAST_FAST_LEAVE);
#endif
static const struct brport_attribute *brport_attrs[] = {
&brport_attr_path_cost,
&brport_attr_priority,
&brport_attr_port_id,
&brport_attr_port_no,
&brport_attr_designated_root,
&brport_attr_designated_bridge,
&brport_attr_designated_port,
&brport_attr_designated_cost,
&brport_attr_state,
&brport_attr_change_ack,
&brport_attr_config_pending,
&brport_attr_message_age_timer,
&brport_attr_forward_delay_timer,
&brport_attr_hold_timer,
&brport_attr_flush,
&brport_attr_hairpin_mode,
&brport_attr_bpdu_guard,
&brport_attr_root_block,
&brport_attr_learning,
&brport_attr_unicast_flood,
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
&brport_attr_multicast_router,
&brport_attr_multicast_fast_leave,
#endif
NULL
};
#define to_brport_attr(_at) container_of(_at, struct brport_attribute, attr)
#define to_brport(obj) container_of(obj, struct net_bridge_port, kobj)
static ssize_t brport_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct brport_attribute *brport_attr = to_brport_attr(attr);
struct net_bridge_port *p = to_brport(kobj);
return brport_attr->show(p, buf);
}
static ssize_t brport_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t count)
{
struct brport_attribute *brport_attr = to_brport_attr(attr);
struct net_bridge_port *p = to_brport(kobj);
ssize_t ret = -EINVAL;
char *endp;
unsigned long val;
if (!ns_capable(dev_net(p->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
val = simple_strtoul(buf, &endp, 0);
if (endp != buf) {
if (!rtnl_trylock())
return restart_syscall();
if (p->dev && p->br && brport_attr->store) {
spin_lock_bh(&p->br->lock);
ret = brport_attr->store(p, val);
spin_unlock_bh(&p->br->lock);
if (ret == 0)
ret = count;
}
rtnl_unlock();
}
return ret;
}
const struct sysfs_ops brport_sysfs_ops = {
.show = brport_show,
.store = brport_store,
};
/*
* Add sysfs entries to ethernet device added to a bridge.
* Creates a brport subdirectory with bridge attributes.
* Puts symlink in bridge's brif subdirectory
*/
int br_sysfs_addif(struct net_bridge_port *p)
{
struct net_bridge *br = p->br;
const struct brport_attribute **a;
int err;
err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj,
SYSFS_BRIDGE_PORT_LINK);
if (err)
return err;
for (a = brport_attrs; *a; ++a) {
err = sysfs_create_file(&p->kobj, &((*a)->attr));
if (err)
return err;
}
strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ);
return sysfs_create_link(br->ifobj, &p->kobj, p->sysfs_name);
}
/* Rename bridge's brif symlink */
int br_sysfs_renameif(struct net_bridge_port *p)
{
struct net_bridge *br = p->br;
int err;
/* If a rename fails, the rollback will cause another
* rename call with the existing name.
*/
if (!strncmp(p->sysfs_name, p->dev->name, IFNAMSIZ))
return 0;
err = sysfs_rename_link(br->ifobj, &p->kobj,
p->sysfs_name, p->dev->name);
if (err)
netdev_notice(br->dev, "unable to rename link %s to %s",
p->sysfs_name, p->dev->name);
else
strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ);
return err;
}

736
net/bridge/br_vlan.c Normal file
View file

@ -0,0 +1,736 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include "br_private.h"
static void __vlan_add_pvid(struct net_port_vlans *v, u16 vid)
{
if (v->pvid == vid)
return;
smp_wmb();
v->pvid = vid;
}
static void __vlan_delete_pvid(struct net_port_vlans *v, u16 vid)
{
if (v->pvid != vid)
return;
smp_wmb();
v->pvid = 0;
}
static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
{
if (flags & BRIDGE_VLAN_INFO_PVID)
__vlan_add_pvid(v, vid);
else
__vlan_delete_pvid(v, vid);
if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
set_bit(vid, v->untagged_bitmap);
else
clear_bit(vid, v->untagged_bitmap);
}
static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
{
struct net_bridge_port *p = NULL;
struct net_bridge *br;
struct net_device *dev;
int err;
if (test_bit(vid, v->vlan_bitmap)) {
__vlan_add_flags(v, vid, flags);
return 0;
}
if (v->port_idx) {
p = v->parent.port;
br = p->br;
dev = p->dev;
} else {
br = v->parent.br;
dev = br->dev;
}
if (p) {
/* Add VLAN to the device filter if it is supported.
* This ensures tagged traffic enters the bridge when
* promiscuous mode is disabled by br_manage_promisc().
*/
err = vlan_vid_add(dev, br->vlan_proto, vid);
if (err)
return err;
}
err = br_fdb_insert(br, p, dev->dev_addr, vid);
if (err) {
br_err(br, "failed insert local address into bridge "
"forwarding table\n");
goto out_filt;
}
set_bit(vid, v->vlan_bitmap);
v->num_vlans++;
__vlan_add_flags(v, vid, flags);
return 0;
out_filt:
if (p)
vlan_vid_del(dev, br->vlan_proto, vid);
return err;
}
static int __vlan_del(struct net_port_vlans *v, u16 vid)
{
if (!test_bit(vid, v->vlan_bitmap))
return -EINVAL;
__vlan_delete_pvid(v, vid);
clear_bit(vid, v->untagged_bitmap);
if (v->port_idx) {
struct net_bridge_port *p = v->parent.port;
vlan_vid_del(p->dev, p->br->vlan_proto, vid);
}
clear_bit(vid, v->vlan_bitmap);
v->num_vlans--;
if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
if (v->port_idx)
RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
else
RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
kfree_rcu(v, rcu);
}
return 0;
}
static void __vlan_flush(struct net_port_vlans *v)
{
smp_wmb();
v->pvid = 0;
bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
if (v->port_idx)
RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
else
RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
kfree_rcu(v, rcu);
}
struct sk_buff *br_handle_vlan(struct net_bridge *br,
const struct net_port_vlans *pv,
struct sk_buff *skb)
{
u16 vid;
/* If this packet was not filtered at input, let it pass */
if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
goto out;
/* Vlan filter table must be configured at this point. The
* only exception is the bridge is set in promisc mode and the
* packet is destined for the bridge device. In this case
* pass the packet as is.
*/
if (!pv) {
if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
goto out;
} else {
kfree_skb(skb);
return NULL;
}
}
/* At this point, we know that the frame was filtered and contains
* a valid vlan id. If the vlan id is set in the untagged bitmap,
* send untagged; otherwise, send tagged.
*/
br_vlan_get_tag(skb, &vid);
if (test_bit(vid, pv->untagged_bitmap))
skb->vlan_tci = 0;
out:
return skb;
}
/* Called under RCU */
bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
struct sk_buff *skb, u16 *vid)
{
bool tagged;
__be16 proto;
/* If VLAN filtering is disabled on the bridge, all packets are
* permitted.
*/
if (!br->vlan_enabled) {
BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
return true;
}
/* If there are no vlan in the permitted list, all packets are
* rejected.
*/
if (!v)
goto drop;
BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
proto = br->vlan_proto;
/* If vlan tx offload is disabled on bridge device and frame was
* sent from vlan device on the bridge device, it does not have
* HW accelerated vlan tag.
*/
if (unlikely(!vlan_tx_tag_present(skb) &&
skb->protocol == proto)) {
skb = skb_vlan_untag(skb);
if (unlikely(!skb))
return false;
}
if (!br_vlan_get_tag(skb, vid)) {
/* Tagged frame */
if (skb->vlan_proto != proto) {
/* Protocol-mismatch, empty out vlan_tci for new tag */
skb_push(skb, ETH_HLEN);
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
vlan_tx_tag_get(skb));
if (unlikely(!skb))
return false;
skb_pull(skb, ETH_HLEN);
skb_reset_mac_len(skb);
*vid = 0;
tagged = false;
} else {
tagged = true;
}
} else {
/* Untagged frame */
tagged = false;
}
if (!*vid) {
u16 pvid = br_get_pvid(v);
/* Frame had a tag with VID 0 or did not have a tag.
* See if pvid is set on this port. That tells us which
* vlan untagged or priority-tagged traffic belongs to.
*/
if (!pvid)
goto drop;
/* PVID is set on this port. Any untagged or priority-tagged
* ingress frame is considered to belong to this vlan.
*/
*vid = pvid;
if (likely(!tagged))
/* Untagged Frame. */
__vlan_hwaccel_put_tag(skb, proto, pvid);
else
/* Priority-tagged Frame.
* At this point, We know that skb->vlan_tci had
* VLAN_TAG_PRESENT bit and its VID field was 0x000.
* We update only VID field and preserve PCP field.
*/
skb->vlan_tci |= pvid;
return true;
}
/* Frame had a valid vlan tag. See if vlan is allowed */
if (test_bit(*vid, v->vlan_bitmap))
return true;
drop:
kfree_skb(skb);
return false;
}
/* Called under RCU. */
bool br_allowed_egress(struct net_bridge *br,
const struct net_port_vlans *v,
const struct sk_buff *skb)
{
u16 vid;
/* If this packet was not filtered at input, let it pass */
if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
return true;
if (!v)
return false;
br_vlan_get_tag(skb, &vid);
if (test_bit(vid, v->vlan_bitmap))
return true;
return false;
}
/* Called under RCU */
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
{
struct net_bridge *br = p->br;
struct net_port_vlans *v;
/* If filtering was disabled at input, let it pass. */
if (!br->vlan_enabled)
return true;
v = rcu_dereference(p->vlan_info);
if (!v)
return false;
if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
*vid = 0;
if (!*vid) {
*vid = br_get_pvid(v);
if (!*vid)
return false;
return true;
}
if (test_bit(*vid, v->vlan_bitmap))
return true;
return false;
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
{
struct net_port_vlans *pv = NULL;
int err;
ASSERT_RTNL();
pv = rtnl_dereference(br->vlan_info);
if (pv)
return __vlan_add(pv, vid, flags);
/* Create port vlan infomration
*/
pv = kzalloc(sizeof(*pv), GFP_KERNEL);
if (!pv)
return -ENOMEM;
pv->parent.br = br;
err = __vlan_add(pv, vid, flags);
if (err)
goto out;
rcu_assign_pointer(br->vlan_info, pv);
return 0;
out:
kfree(pv);
return err;
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
int br_vlan_delete(struct net_bridge *br, u16 vid)
{
struct net_port_vlans *pv;
ASSERT_RTNL();
pv = rtnl_dereference(br->vlan_info);
if (!pv)
return -EINVAL;
br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
__vlan_del(pv, vid);
return 0;
}
void br_vlan_flush(struct net_bridge *br)
{
struct net_port_vlans *pv;
ASSERT_RTNL();
pv = rtnl_dereference(br->vlan_info);
if (!pv)
return;
__vlan_flush(pv);
}
bool br_vlan_find(struct net_bridge *br, u16 vid)
{
struct net_port_vlans *pv;
bool found = false;
rcu_read_lock();
pv = rcu_dereference(br->vlan_info);
if (!pv)
goto out;
if (test_bit(vid, pv->vlan_bitmap))
found = true;
out:
rcu_read_unlock();
return found;
}
/* Must be protected by RTNL. */
static void recalculate_group_addr(struct net_bridge *br)
{
if (br->group_addr_set)
return;
spin_lock_bh(&br->lock);
if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
/* Bridge Group Address */
br->group_addr[5] = 0x00;
} else { /* vlan_enabled && ETH_P_8021AD */
/* Provider Bridge Group Address */
br->group_addr[5] = 0x08;
}
spin_unlock_bh(&br->lock);
}
/* Must be protected by RTNL. */
void br_recalculate_fwd_mask(struct net_bridge *br)
{
if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
else /* vlan_enabled && ETH_P_8021AD */
br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
~(1u << br->group_addr[5]);
}
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
{
if (!rtnl_trylock())
return restart_syscall();
if (br->vlan_enabled == val)
goto unlock;
br->vlan_enabled = val;
br_manage_promisc(br);
recalculate_group_addr(br);
br_recalculate_fwd_mask(br);
unlock:
rtnl_unlock();
return 0;
}
int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
{
int err = 0;
struct net_bridge_port *p;
struct net_port_vlans *pv;
__be16 proto, oldproto;
u16 vid, errvid;
if (val != ETH_P_8021Q && val != ETH_P_8021AD)
return -EPROTONOSUPPORT;
if (!rtnl_trylock())
return restart_syscall();
proto = htons(val);
if (br->vlan_proto == proto)
goto unlock;
/* Add VLANs for the new proto to the device filter. */
list_for_each_entry(p, &br->port_list, list) {
pv = rtnl_dereference(p->vlan_info);
if (!pv)
continue;
for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err = vlan_vid_add(p->dev, proto, vid);
if (err)
goto err_filt;
}
}
oldproto = br->vlan_proto;
br->vlan_proto = proto;
recalculate_group_addr(br);
br_recalculate_fwd_mask(br);
/* Delete VLANs for the old proto from the device filter. */
list_for_each_entry(p, &br->port_list, list) {
pv = rtnl_dereference(p->vlan_info);
if (!pv)
continue;
for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
vlan_vid_del(p->dev, oldproto, vid);
}
unlock:
rtnl_unlock();
return err;
err_filt:
errvid = vid;
for_each_set_bit(vid, pv->vlan_bitmap, errvid)
vlan_vid_del(p->dev, proto, vid);
list_for_each_entry_continue_reverse(p, &br->port_list, list) {
pv = rtnl_dereference(p->vlan_info);
if (!pv)
continue;
for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
vlan_vid_del(p->dev, proto, vid);
}
goto unlock;
}
static bool vlan_default_pvid(struct net_port_vlans *pv, u16 vid)
{
return pv && vid == pv->pvid && test_bit(vid, pv->untagged_bitmap);
}
static void br_vlan_disable_default_pvid(struct net_bridge *br)
{
struct net_bridge_port *p;
u16 pvid = br->default_pvid;
/* Disable default_pvid on all ports where it is still
* configured.
*/
if (vlan_default_pvid(br_get_vlan_info(br), pvid))
br_vlan_delete(br, pvid);
list_for_each_entry(p, &br->port_list, list) {
if (vlan_default_pvid(nbp_get_vlan_info(p), pvid))
nbp_vlan_delete(p, pvid);
}
br->default_pvid = 0;
}
static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
{
struct net_bridge_port *p;
u16 old_pvid;
int err = 0;
unsigned long *changed;
changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
GFP_KERNEL);
if (!changed)
return -ENOMEM;
old_pvid = br->default_pvid;
/* Update default_pvid config only if we do not conflict with
* user configuration.
*/
if ((!old_pvid || vlan_default_pvid(br_get_vlan_info(br), old_pvid)) &&
!br_vlan_find(br, pvid)) {
err = br_vlan_add(br, pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED);
if (err)
goto out;
br_vlan_delete(br, old_pvid);
set_bit(0, changed);
}
list_for_each_entry(p, &br->port_list, list) {
/* Update default_pvid config only if we do not conflict with
* user configuration.
*/
if ((old_pvid &&
!vlan_default_pvid(nbp_get_vlan_info(p), old_pvid)) ||
nbp_vlan_find(p, pvid))
continue;
err = nbp_vlan_add(p, pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED);
if (err)
goto err_port;
nbp_vlan_delete(p, old_pvid);
set_bit(p->port_no, changed);
}
br->default_pvid = pvid;
out:
kfree(changed);
return err;
err_port:
list_for_each_entry_continue_reverse(p, &br->port_list, list) {
if (!test_bit(p->port_no, changed))
continue;
if (old_pvid)
nbp_vlan_add(p, old_pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED);
nbp_vlan_delete(p, pvid);
}
if (test_bit(0, changed)) {
if (old_pvid)
br_vlan_add(br, old_pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED);
br_vlan_delete(br, pvid);
}
goto out;
}
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
{
u16 pvid = val;
int err = 0;
if (val >= VLAN_VID_MASK)
return -EINVAL;
if (!rtnl_trylock())
return restart_syscall();
if (pvid == br->default_pvid)
goto unlock;
/* Only allow default pvid change when filtering is disabled */
if (br->vlan_enabled) {
pr_info_once("Please disable vlan filtering to change default_pvid\n");
err = -EPERM;
goto unlock;
}
if (!pvid)
br_vlan_disable_default_pvid(br);
else
err = __br_vlan_set_default_pvid(br, pvid);
unlock:
rtnl_unlock();
return err;
}
int br_vlan_init(struct net_bridge *br)
{
br->vlan_proto = htons(ETH_P_8021Q);
br->default_pvid = 1;
return br_vlan_add(br, 1,
BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED);
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
{
struct net_port_vlans *pv = NULL;
int err;
ASSERT_RTNL();
pv = rtnl_dereference(port->vlan_info);
if (pv)
return __vlan_add(pv, vid, flags);
/* Create port vlan infomration
*/
pv = kzalloc(sizeof(*pv), GFP_KERNEL);
if (!pv) {
err = -ENOMEM;
goto clean_up;
}
pv->port_idx = port->port_no;
pv->parent.port = port;
err = __vlan_add(pv, vid, flags);
if (err)
goto clean_up;
rcu_assign_pointer(port->vlan_info, pv);
return 0;
clean_up:
kfree(pv);
return err;
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
{
struct net_port_vlans *pv;
ASSERT_RTNL();
pv = rtnl_dereference(port->vlan_info);
if (!pv)
return -EINVAL;
br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
return __vlan_del(pv, vid);
}
void nbp_vlan_flush(struct net_bridge_port *port)
{
struct net_port_vlans *pv;
u16 vid;
ASSERT_RTNL();
pv = rtnl_dereference(port->vlan_info);
if (!pv)
return;
for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
vlan_vid_del(port->dev, port->br->vlan_proto, vid);
__vlan_flush(pv);
}
bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
{
struct net_port_vlans *pv;
bool found = false;
rcu_read_lock();
pv = rcu_dereference(port->vlan_info);
if (!pv)
goto out;
if (test_bit(vid, pv->vlan_bitmap))
found = true;
out:
rcu_read_unlock();
return found;
}
int nbp_vlan_init(struct net_bridge_port *p)
{
return p->br->default_pvid ?
nbp_vlan_add(p, p->br->default_pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED) :
0;
}

View file

@ -0,0 +1,227 @@
#
# Bridge netfilter configuration
#
#
menuconfig NF_TABLES_BRIDGE
depends on BRIDGE && NETFILTER && NF_TABLES
tristate "Ethernet Bridge nf_tables support"
if NF_TABLES_BRIDGE
config NFT_BRIDGE_META
tristate "Netfilter nf_table bridge meta support"
depends on NFT_META
help
Add support for bridge dedicated meta key.
config NFT_BRIDGE_REJECT
tristate "Netfilter nf_tables bridge reject support"
depends on NFT_REJECT && NFT_REJECT_IPV4 && NFT_REJECT_IPV6
help
Add support to reject packets.
config NF_LOG_BRIDGE
tristate "Bridge packet logging"
endif # NF_TABLES_BRIDGE
menuconfig BRIDGE_NF_EBTABLES
tristate "Ethernet Bridge tables (ebtables) support"
depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
help
ebtables is a general, extensible frame/packet identification
framework. Say 'Y' or 'M' here if you want to do Ethernet
filtering/NAT/brouting on the Ethernet bridge.
if BRIDGE_NF_EBTABLES
#
# tables
#
config BRIDGE_EBT_BROUTE
tristate "ebt: broute table support"
help
The ebtables broute table is used to define rules that decide between
bridging and routing frames, giving Linux the functionality of a
brouter. See the man page for ebtables(8) and examples on the ebtables
website.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_T_FILTER
tristate "ebt: filter table support"
help
The ebtables filter table is used to define frame filtering rules at
local input, forwarding and local output. See the man page for
ebtables(8).
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_T_NAT
tristate "ebt: nat table support"
help
The ebtables nat table is used to define rules that alter the MAC
source address (MAC SNAT) or the MAC destination address (MAC DNAT).
See the man page for ebtables(8).
To compile it as a module, choose M here. If unsure, say N.
#
# matches
#
config BRIDGE_EBT_802_3
tristate "ebt: 802.3 filter support"
help
This option adds matching support for 802.3 Ethernet frames.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_AMONG
tristate "ebt: among filter support"
help
This option adds the among match, which allows matching the MAC source
and/or destination address on a list of addresses. Optionally,
MAC/IP address pairs can be matched, f.e. for anti-spoofing rules.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_ARP
tristate "ebt: ARP filter support"
help
This option adds the ARP match, which allows ARP and RARP header field
filtering.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_IP
tristate "ebt: IP filter support"
help
This option adds the IP match, which allows basic IP header field
filtering.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_IP6
tristate "ebt: IP6 filter support"
depends on BRIDGE_NF_EBTABLES && IPV6
help
This option adds the IP6 match, which allows basic IPV6 header field
filtering.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_LIMIT
tristate "ebt: limit match support"
help
This option adds the limit match, which allows you to control
the rate at which a rule can be matched. This match is the
equivalent of the iptables limit match.
If you want to compile it as a module, say M here and read
<file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
config BRIDGE_EBT_MARK
tristate "ebt: mark filter support"
help
This option adds the mark match, which allows matching frames based on
the 'nfmark' value in the frame. This can be set by the mark target.
This value is the same as the one used in the iptables mark match and
target.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_PKTTYPE
tristate "ebt: packet type filter support"
help
This option adds the packet type match, which allows matching on the
type of packet based on its Ethernet "class" (as determined by
the generic networking code): broadcast, multicast,
for this host alone or for another host.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_STP
tristate "ebt: STP filter support"
help
This option adds the Spanning Tree Protocol match, which
allows STP header field filtering.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_VLAN
tristate "ebt: 802.1Q VLAN filter support"
help
This option adds the 802.1Q vlan match, which allows the filtering of
802.1Q vlan fields.
To compile it as a module, choose M here. If unsure, say N.
#
# targets
#
config BRIDGE_EBT_ARPREPLY
tristate "ebt: arp reply target support"
depends on BRIDGE_NF_EBTABLES && INET
help
This option adds the arp reply target, which allows
automatically sending arp replies to arp requests.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_DNAT
tristate "ebt: dnat target support"
help
This option adds the MAC DNAT target, which allows altering the MAC
destination address of frames.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_MARK_T
tristate "ebt: mark target support"
help
This option adds the mark target, which allows marking frames by
setting the 'nfmark' value in the frame.
This value is the same as the one used in the iptables mark match and
target.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_REDIRECT
tristate "ebt: redirect target support"
help
This option adds the MAC redirect target, which allows altering the MAC
destination address of a frame to that of the device it arrived on.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_SNAT
tristate "ebt: snat target support"
help
This option adds the MAC SNAT target, which allows altering the MAC
source address of frames.
To compile it as a module, choose M here. If unsure, say N.
#
# watchers
#
config BRIDGE_EBT_LOG
tristate "ebt: log support"
help
This option adds the log watcher, that you can use in any rule
in any ebtables table. It records info about the frame header
to the syslog.
To compile it as a module, choose M here. If unsure, say N.
config BRIDGE_EBT_NFLOG
tristate "ebt: nflog support"
help
This option enables the nflog watcher, which allows to LOG
messages through the netfilter logging API, which can use
either the old LOG target, the old ULOG target or nfnetlink_log
as backend.
This option adds the nflog watcher, that you can use in any rule
in any ebtables table.
To compile it as a module, choose M here. If unsure, say N.
endif # BRIDGE_NF_EBTABLES

View file

@ -0,0 +1,40 @@
#
# Makefile for the netfilter modules for Link Layer filtering on a bridge.
#
obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
obj-$(CONFIG_NFT_BRIDGE_META) += nft_meta_bridge.o
obj-$(CONFIG_NFT_BRIDGE_REJECT) += nft_reject_bridge.o
# packet logging
obj-$(CONFIG_NF_LOG_BRIDGE) += nf_log_bridge.o
obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
# tables
obj-$(CONFIG_BRIDGE_EBT_BROUTE) += ebtable_broute.o
obj-$(CONFIG_BRIDGE_EBT_T_FILTER) += ebtable_filter.o
obj-$(CONFIG_BRIDGE_EBT_T_NAT) += ebtable_nat.o
#matches
obj-$(CONFIG_BRIDGE_EBT_802_3) += ebt_802_3.o
obj-$(CONFIG_BRIDGE_EBT_AMONG) += ebt_among.o
obj-$(CONFIG_BRIDGE_EBT_ARP) += ebt_arp.o
obj-$(CONFIG_BRIDGE_EBT_IP) += ebt_ip.o
obj-$(CONFIG_BRIDGE_EBT_IP6) += ebt_ip6.o
obj-$(CONFIG_BRIDGE_EBT_LIMIT) += ebt_limit.o
obj-$(CONFIG_BRIDGE_EBT_MARK) += ebt_mark_m.o
obj-$(CONFIG_BRIDGE_EBT_PKTTYPE) += ebt_pkttype.o
obj-$(CONFIG_BRIDGE_EBT_STP) += ebt_stp.o
obj-$(CONFIG_BRIDGE_EBT_VLAN) += ebt_vlan.o
# targets
obj-$(CONFIG_BRIDGE_EBT_ARPREPLY) += ebt_arpreply.o
obj-$(CONFIG_BRIDGE_EBT_MARK_T) += ebt_mark.o
obj-$(CONFIG_BRIDGE_EBT_DNAT) += ebt_dnat.o
obj-$(CONFIG_BRIDGE_EBT_REDIRECT) += ebt_redirect.o
obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o
# watchers
obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
obj-$(CONFIG_BRIDGE_EBT_NFLOG) += ebt_nflog.o

View file

@ -0,0 +1,72 @@
/*
* 802_3
*
* Author:
* Chris Vitale csv@bluetail.com
*
* May 2003
*
*/
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_802_3.h>
static bool
ebt_802_3_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_802_3_info *info = par->matchinfo;
const struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb);
__be16 type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type;
if (info->bitmask & EBT_802_3_SAP) {
if (FWINV(info->sap != hdr->llc.ui.ssap, EBT_802_3_SAP))
return false;
if (FWINV(info->sap != hdr->llc.ui.dsap, EBT_802_3_SAP))
return false;
}
if (info->bitmask & EBT_802_3_TYPE) {
if (!(hdr->llc.ui.dsap == CHECK_TYPE && hdr->llc.ui.ssap == CHECK_TYPE))
return false;
if (FWINV(info->type != type, EBT_802_3_TYPE))
return false;
}
return true;
}
static int ebt_802_3_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_802_3_info *info = par->matchinfo;
if (info->bitmask & ~EBT_802_3_MASK || info->invflags & ~EBT_802_3_MASK)
return -EINVAL;
return 0;
}
static struct xt_match ebt_802_3_mt_reg __read_mostly = {
.name = "802_3",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_802_3_mt,
.checkentry = ebt_802_3_mt_check,
.matchsize = sizeof(struct ebt_802_3_info),
.me = THIS_MODULE,
};
static int __init ebt_802_3_init(void)
{
return xt_register_match(&ebt_802_3_mt_reg);
}
static void __exit ebt_802_3_fini(void)
{
xt_unregister_match(&ebt_802_3_mt_reg);
}
module_init(ebt_802_3_init);
module_exit(ebt_802_3_fini);
MODULE_DESCRIPTION("Ebtables: DSAP/SSAP field and SNAP type matching");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,229 @@
/*
* ebt_among
*
* Authors:
* Grzegorz Borowiak <grzes@gnu.univ.gda.pl>
*
* August, 2003
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ip.h>
#include <linux/if_arp.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_among.h>
static bool ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh,
const char *mac, __be32 ip)
{
/* You may be puzzled as to how this code works.
* Some tricks were used, refer to
* include/linux/netfilter_bridge/ebt_among.h
* as there you can find a solution of this mystery.
*/
const struct ebt_mac_wormhash_tuple *p;
int start, limit, i;
uint32_t cmp[2] = { 0, 0 };
int key = ((const unsigned char *)mac)[5];
ether_addr_copy(((char *) cmp) + 2, mac);
start = wh->table[key];
limit = wh->table[key + 1];
if (ip) {
for (i = start; i < limit; i++) {
p = &wh->pool[i];
if (cmp[1] == p->cmp[1] && cmp[0] == p->cmp[0])
if (p->ip == 0 || p->ip == ip)
return true;
}
} else {
for (i = start; i < limit; i++) {
p = &wh->pool[i];
if (cmp[1] == p->cmp[1] && cmp[0] == p->cmp[0])
if (p->ip == 0)
return true;
}
}
return false;
}
static int ebt_mac_wormhash_check_integrity(const struct ebt_mac_wormhash
*wh)
{
int i;
for (i = 0; i < 256; i++) {
if (wh->table[i] > wh->table[i + 1])
return -0x100 - i;
if (wh->table[i] < 0)
return -0x200 - i;
if (wh->table[i] > wh->poolsize)
return -0x300 - i;
}
if (wh->table[256] > wh->poolsize)
return -0xc00;
return 0;
}
static int get_ip_dst(const struct sk_buff *skb, __be32 *addr)
{
if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) {
const struct iphdr *ih;
struct iphdr _iph;
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (ih == NULL)
return -1;
*addr = ih->daddr;
} else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
const struct arphdr *ah;
struct arphdr _arph;
const __be32 *bp;
__be32 buf;
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
if (ah == NULL ||
ah->ar_pln != sizeof(__be32) ||
ah->ar_hln != ETH_ALEN)
return -1;
bp = skb_header_pointer(skb, sizeof(struct arphdr) +
2 * ETH_ALEN + sizeof(__be32),
sizeof(__be32), &buf);
if (bp == NULL)
return -1;
*addr = *bp;
}
return 0;
}
static int get_ip_src(const struct sk_buff *skb, __be32 *addr)
{
if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) {
const struct iphdr *ih;
struct iphdr _iph;
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (ih == NULL)
return -1;
*addr = ih->saddr;
} else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
const struct arphdr *ah;
struct arphdr _arph;
const __be32 *bp;
__be32 buf;
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
if (ah == NULL ||
ah->ar_pln != sizeof(__be32) ||
ah->ar_hln != ETH_ALEN)
return -1;
bp = skb_header_pointer(skb, sizeof(struct arphdr) +
ETH_ALEN, sizeof(__be32), &buf);
if (bp == NULL)
return -1;
*addr = *bp;
}
return 0;
}
static bool
ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_among_info *info = par->matchinfo;
const char *dmac, *smac;
const struct ebt_mac_wormhash *wh_dst, *wh_src;
__be32 dip = 0, sip = 0;
wh_dst = ebt_among_wh_dst(info);
wh_src = ebt_among_wh_src(info);
if (wh_src) {
smac = eth_hdr(skb)->h_source;
if (get_ip_src(skb, &sip))
return false;
if (!(info->bitmask & EBT_AMONG_SRC_NEG)) {
/* we match only if it contains */
if (!ebt_mac_wormhash_contains(wh_src, smac, sip))
return false;
} else {
/* we match only if it DOES NOT contain */
if (ebt_mac_wormhash_contains(wh_src, smac, sip))
return false;
}
}
if (wh_dst) {
dmac = eth_hdr(skb)->h_dest;
if (get_ip_dst(skb, &dip))
return false;
if (!(info->bitmask & EBT_AMONG_DST_NEG)) {
/* we match only if it contains */
if (!ebt_mac_wormhash_contains(wh_dst, dmac, dip))
return false;
} else {
/* we match only if it DOES NOT contain */
if (ebt_mac_wormhash_contains(wh_dst, dmac, dip))
return false;
}
}
return true;
}
static int ebt_among_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_among_info *info = par->matchinfo;
const struct ebt_entry_match *em =
container_of(par->matchinfo, const struct ebt_entry_match, data);
int expected_length = sizeof(struct ebt_among_info);
const struct ebt_mac_wormhash *wh_dst, *wh_src;
int err;
wh_dst = ebt_among_wh_dst(info);
wh_src = ebt_among_wh_src(info);
expected_length += ebt_mac_wormhash_size(wh_dst);
expected_length += ebt_mac_wormhash_size(wh_src);
if (em->match_size != EBT_ALIGN(expected_length)) {
pr_info("wrong size: %d against expected %d, rounded to %Zd\n",
em->match_size, expected_length,
EBT_ALIGN(expected_length));
return -EINVAL;
}
if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) {
pr_info("dst integrity fail: %x\n", -err);
return -EINVAL;
}
if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) {
pr_info("src integrity fail: %x\n", -err);
return -EINVAL;
}
return 0;
}
static struct xt_match ebt_among_mt_reg __read_mostly = {
.name = "among",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_among_mt,
.checkentry = ebt_among_mt_check,
.matchsize = -1, /* special case */
.me = THIS_MODULE,
};
static int __init ebt_among_init(void)
{
return xt_register_match(&ebt_among_mt_reg);
}
static void __exit ebt_among_fini(void)
{
xt_unregister_match(&ebt_among_mt_reg);
}
module_init(ebt_among_init);
module_exit(ebt_among_fini);
MODULE_DESCRIPTION("Ebtables: Combined MAC/IP address list matching");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,140 @@
/*
* ebt_arp
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
* Tim Gardner <timg@tpi.com>
*
* April, 2002
*
*/
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_arp.h>
static bool
ebt_arp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_arp_info *info = par->matchinfo;
const struct arphdr *ah;
struct arphdr _arph;
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
if (ah == NULL)
return false;
if (info->bitmask & EBT_ARP_OPCODE && FWINV(info->opcode !=
ah->ar_op, EBT_ARP_OPCODE))
return false;
if (info->bitmask & EBT_ARP_HTYPE && FWINV(info->htype !=
ah->ar_hrd, EBT_ARP_HTYPE))
return false;
if (info->bitmask & EBT_ARP_PTYPE && FWINV(info->ptype !=
ah->ar_pro, EBT_ARP_PTYPE))
return false;
if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_GRAT)) {
const __be32 *sap, *dap;
__be32 saddr, daddr;
if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP))
return false;
sap = skb_header_pointer(skb, sizeof(struct arphdr) +
ah->ar_hln, sizeof(saddr),
&saddr);
if (sap == NULL)
return false;
dap = skb_header_pointer(skb, sizeof(struct arphdr) +
2*ah->ar_hln+sizeof(saddr),
sizeof(daddr), &daddr);
if (dap == NULL)
return false;
if (info->bitmask & EBT_ARP_SRC_IP &&
FWINV(info->saddr != (*sap & info->smsk), EBT_ARP_SRC_IP))
return false;
if (info->bitmask & EBT_ARP_DST_IP &&
FWINV(info->daddr != (*dap & info->dmsk), EBT_ARP_DST_IP))
return false;
if (info->bitmask & EBT_ARP_GRAT &&
FWINV(*dap != *sap, EBT_ARP_GRAT))
return false;
}
if (info->bitmask & (EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC)) {
const unsigned char *mp;
unsigned char _mac[ETH_ALEN];
uint8_t verdict, i;
if (ah->ar_hln != ETH_ALEN || ah->ar_hrd != htons(ARPHRD_ETHER))
return false;
if (info->bitmask & EBT_ARP_SRC_MAC) {
mp = skb_header_pointer(skb, sizeof(struct arphdr),
sizeof(_mac), &_mac);
if (mp == NULL)
return false;
verdict = 0;
for (i = 0; i < 6; i++)
verdict |= (mp[i] ^ info->smaddr[i]) &
info->smmsk[i];
if (FWINV(verdict != 0, EBT_ARP_SRC_MAC))
return false;
}
if (info->bitmask & EBT_ARP_DST_MAC) {
mp = skb_header_pointer(skb, sizeof(struct arphdr) +
ah->ar_hln + ah->ar_pln,
sizeof(_mac), &_mac);
if (mp == NULL)
return false;
verdict = 0;
for (i = 0; i < 6; i++)
verdict |= (mp[i] ^ info->dmaddr[i]) &
info->dmmsk[i];
if (FWINV(verdict != 0, EBT_ARP_DST_MAC))
return false;
}
}
return true;
}
static int ebt_arp_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_arp_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
if ((e->ethproto != htons(ETH_P_ARP) &&
e->ethproto != htons(ETH_P_RARP)) ||
e->invflags & EBT_IPROTO)
return -EINVAL;
if (info->bitmask & ~EBT_ARP_MASK || info->invflags & ~EBT_ARP_MASK)
return -EINVAL;
return 0;
}
static struct xt_match ebt_arp_mt_reg __read_mostly = {
.name = "arp",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_arp_mt,
.checkentry = ebt_arp_mt_check,
.matchsize = sizeof(struct ebt_arp_info),
.me = THIS_MODULE,
};
static int __init ebt_arp_init(void)
{
return xt_register_match(&ebt_arp_mt_reg);
}
static void __exit ebt_arp_fini(void)
{
xt_unregister_match(&ebt_arp_mt_reg);
}
module_init(ebt_arp_init);
module_exit(ebt_arp_fini);
MODULE_DESCRIPTION("Ebtables: ARP protocol packet match");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,98 @@
/*
* ebt_arpreply
*
* Authors:
* Grzegorz Borowiak <grzes@gnu.univ.gda.pl>
* Bart De Schuymer <bdschuym@pandora.be>
*
* August, 2003
*
*/
#include <linux/if_arp.h>
#include <net/arp.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_arpreply.h>
static unsigned int
ebt_arpreply_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_arpreply_info *info = par->targinfo;
const __be32 *siptr, *diptr;
__be32 _sip, _dip;
const struct arphdr *ap;
struct arphdr _ah;
const unsigned char *shp;
unsigned char _sha[ETH_ALEN];
ap = skb_header_pointer(skb, 0, sizeof(_ah), &_ah);
if (ap == NULL)
return EBT_DROP;
if (ap->ar_op != htons(ARPOP_REQUEST) ||
ap->ar_hln != ETH_ALEN ||
ap->ar_pro != htons(ETH_P_IP) ||
ap->ar_pln != 4)
return EBT_CONTINUE;
shp = skb_header_pointer(skb, sizeof(_ah), ETH_ALEN, &_sha);
if (shp == NULL)
return EBT_DROP;
siptr = skb_header_pointer(skb, sizeof(_ah) + ETH_ALEN,
sizeof(_sip), &_sip);
if (siptr == NULL)
return EBT_DROP;
diptr = skb_header_pointer(skb,
sizeof(_ah) + 2 * ETH_ALEN + sizeof(_sip),
sizeof(_dip), &_dip);
if (diptr == NULL)
return EBT_DROP;
arp_send(ARPOP_REPLY, ETH_P_ARP, *siptr, (struct net_device *)par->in,
*diptr, shp, info->mac, shp);
return info->target;
}
static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_arpreply_info *info = par->targinfo;
const struct ebt_entry *e = par->entryinfo;
if (BASE_CHAIN && info->target == EBT_RETURN)
return -EINVAL;
if (e->ethproto != htons(ETH_P_ARP) ||
e->invflags & EBT_IPROTO)
return -EINVAL;
return 0;
}
static struct xt_target ebt_arpreply_tg_reg __read_mostly = {
.name = "arpreply",
.revision = 0,
.family = NFPROTO_BRIDGE,
.table = "nat",
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING),
.target = ebt_arpreply_tg,
.checkentry = ebt_arpreply_tg_check,
.targetsize = sizeof(struct ebt_arpreply_info),
.me = THIS_MODULE,
};
static int __init ebt_arpreply_init(void)
{
return xt_register_target(&ebt_arpreply_tg_reg);
}
static void __exit ebt_arpreply_fini(void)
{
xt_unregister_target(&ebt_arpreply_tg_reg);
}
module_init(ebt_arpreply_init);
module_exit(ebt_arpreply_fini);
MODULE_DESCRIPTION("Ebtables: ARP reply target");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,74 @@
/*
* ebt_dnat
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* June, 2002
*
*/
#include <linux/module.h>
#include <net/sock.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_nat.h>
static unsigned int
ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
if (!skb_make_writable(skb, 0))
return EBT_DROP;
ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
return info->target;
}
static int ebt_dnat_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
unsigned int hook_mask;
if (BASE_CHAIN && info->target == EBT_RETURN)
return -EINVAL;
hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS);
if ((strcmp(par->table, "nat") != 0 ||
(hook_mask & ~((1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_LOCAL_OUT)))) &&
(strcmp(par->table, "broute") != 0 ||
hook_mask & ~(1 << NF_BR_BROUTING)))
return -EINVAL;
if (INVALID_TARGET)
return -EINVAL;
return 0;
}
static struct xt_target ebt_dnat_tg_reg __read_mostly = {
.name = "dnat",
.revision = 0,
.family = NFPROTO_BRIDGE,
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING),
.target = ebt_dnat_tg,
.checkentry = ebt_dnat_tg_check,
.targetsize = sizeof(struct ebt_nat_info),
.me = THIS_MODULE,
};
static int __init ebt_dnat_init(void)
{
return xt_register_target(&ebt_dnat_tg_reg);
}
static void __exit ebt_dnat_fini(void)
{
xt_unregister_target(&ebt_dnat_tg_reg);
}
module_init(ebt_dnat_init);
module_exit(ebt_dnat_fini);
MODULE_DESCRIPTION("Ebtables: Destination MAC address translation");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,130 @@
/*
* ebt_ip
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* April, 2002
*
* Changes:
* added ip-sport and ip-dport
* Innominate Security Technologies AG <mhopf@innominate.com>
* September, 2002
*/
#include <linux/ip.h>
#include <net/ip.h>
#include <linux/in.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ip.h>
struct tcpudphdr {
__be16 src;
__be16 dst;
};
static bool
ebt_ip_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_ip_info *info = par->matchinfo;
const struct iphdr *ih;
struct iphdr _iph;
const struct tcpudphdr *pptr;
struct tcpudphdr _ports;
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (ih == NULL)
return false;
if (info->bitmask & EBT_IP_TOS &&
FWINV(info->tos != ih->tos, EBT_IP_TOS))
return false;
if (info->bitmask & EBT_IP_SOURCE &&
FWINV((ih->saddr & info->smsk) !=
info->saddr, EBT_IP_SOURCE))
return false;
if ((info->bitmask & EBT_IP_DEST) &&
FWINV((ih->daddr & info->dmsk) !=
info->daddr, EBT_IP_DEST))
return false;
if (info->bitmask & EBT_IP_PROTO) {
if (FWINV(info->protocol != ih->protocol, EBT_IP_PROTO))
return false;
if (!(info->bitmask & EBT_IP_DPORT) &&
!(info->bitmask & EBT_IP_SPORT))
return true;
if (ntohs(ih->frag_off) & IP_OFFSET)
return false;
pptr = skb_header_pointer(skb, ih->ihl*4,
sizeof(_ports), &_ports);
if (pptr == NULL)
return false;
if (info->bitmask & EBT_IP_DPORT) {
u32 dst = ntohs(pptr->dst);
if (FWINV(dst < info->dport[0] ||
dst > info->dport[1],
EBT_IP_DPORT))
return false;
}
if (info->bitmask & EBT_IP_SPORT) {
u32 src = ntohs(pptr->src);
if (FWINV(src < info->sport[0] ||
src > info->sport[1],
EBT_IP_SPORT))
return false;
}
}
return true;
}
static int ebt_ip_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_ip_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
if (e->ethproto != htons(ETH_P_IP) ||
e->invflags & EBT_IPROTO)
return -EINVAL;
if (info->bitmask & ~EBT_IP_MASK || info->invflags & ~EBT_IP_MASK)
return -EINVAL;
if (info->bitmask & (EBT_IP_DPORT | EBT_IP_SPORT)) {
if (info->invflags & EBT_IP_PROTO)
return -EINVAL;
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
return -EINVAL;
}
if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1])
return -EINVAL;
if (info->bitmask & EBT_IP_SPORT && info->sport[0] > info->sport[1])
return -EINVAL;
return 0;
}
static struct xt_match ebt_ip_mt_reg __read_mostly = {
.name = "ip",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_ip_mt,
.checkentry = ebt_ip_mt_check,
.matchsize = sizeof(struct ebt_ip_info),
.me = THIS_MODULE,
};
static int __init ebt_ip_init(void)
{
return xt_register_match(&ebt_ip_mt_reg);
}
static void __exit ebt_ip_fini(void)
{
xt_unregister_match(&ebt_ip_mt_reg);
}
module_init(ebt_ip_init);
module_exit(ebt_ip_fini);
MODULE_DESCRIPTION("Ebtables: IPv4 protocol packet match");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,158 @@
/*
* ebt_ip6
*
* Authors:
* Manohar Castelino <manohar.r.castelino@intel.com>
* Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
* Jan Engelhardt <jengelh@medozas.de>
*
* Summary:
* This is just a modification of the IPv4 code written by
* Bart De Schuymer <bdschuym@pandora.be>
* with the changes required to support IPv6
*
* Jan, 2008
*/
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <linux/in.h>
#include <linux/module.h>
#include <net/dsfield.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ip6.h>
union pkthdr {
struct {
__be16 src;
__be16 dst;
} tcpudphdr;
struct {
u8 type;
u8 code;
} icmphdr;
};
static bool
ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_ip6_info *info = par->matchinfo;
const struct ipv6hdr *ih6;
struct ipv6hdr _ip6h;
const union pkthdr *pptr;
union pkthdr _pkthdr;
ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
if (ih6 == NULL)
return false;
if (info->bitmask & EBT_IP6_TCLASS &&
FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS))
return false;
if ((info->bitmask & EBT_IP6_SOURCE &&
FWINV(ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk,
&info->saddr), EBT_IP6_SOURCE)) ||
(info->bitmask & EBT_IP6_DEST &&
FWINV(ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk,
&info->daddr), EBT_IP6_DEST)))
return false;
if (info->bitmask & EBT_IP6_PROTO) {
uint8_t nexthdr = ih6->nexthdr;
__be16 frag_off;
int offset_ph;
offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off);
if (offset_ph == -1)
return false;
if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
return false;
if (!(info->bitmask & ( EBT_IP6_DPORT |
EBT_IP6_SPORT | EBT_IP6_ICMP6)))
return true;
/* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */
pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr),
&_pkthdr);
if (pptr == NULL)
return false;
if (info->bitmask & EBT_IP6_DPORT) {
u16 dst = ntohs(pptr->tcpudphdr.dst);
if (FWINV(dst < info->dport[0] ||
dst > info->dport[1], EBT_IP6_DPORT))
return false;
}
if (info->bitmask & EBT_IP6_SPORT) {
u16 src = ntohs(pptr->tcpudphdr.src);
if (FWINV(src < info->sport[0] ||
src > info->sport[1], EBT_IP6_SPORT))
return false;
}
if ((info->bitmask & EBT_IP6_ICMP6) &&
FWINV(pptr->icmphdr.type < info->icmpv6_type[0] ||
pptr->icmphdr.type > info->icmpv6_type[1] ||
pptr->icmphdr.code < info->icmpv6_code[0] ||
pptr->icmphdr.code > info->icmpv6_code[1],
EBT_IP6_ICMP6))
return false;
}
return true;
}
static int ebt_ip6_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_entry *e = par->entryinfo;
struct ebt_ip6_info *info = par->matchinfo;
if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO)
return -EINVAL;
if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK)
return -EINVAL;
if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) {
if (info->invflags & EBT_IP6_PROTO)
return -EINVAL;
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
return -EINVAL;
}
if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1])
return -EINVAL;
if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
return -EINVAL;
if (info->bitmask & EBT_IP6_ICMP6) {
if ((info->invflags & EBT_IP6_PROTO) ||
info->protocol != IPPROTO_ICMPV6)
return -EINVAL;
if (info->icmpv6_type[0] > info->icmpv6_type[1] ||
info->icmpv6_code[0] > info->icmpv6_code[1])
return -EINVAL;
}
return 0;
}
static struct xt_match ebt_ip6_mt_reg __read_mostly = {
.name = "ip6",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_ip6_mt,
.checkentry = ebt_ip6_mt_check,
.matchsize = sizeof(struct ebt_ip6_info),
.me = THIS_MODULE,
};
static int __init ebt_ip6_init(void)
{
return xt_register_match(&ebt_ip6_mt_reg);
}
static void __exit ebt_ip6_fini(void)
{
xt_unregister_match(&ebt_ip6_mt_reg);
}
module_init(ebt_ip6_init);
module_exit(ebt_ip6_fini);
MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match");
MODULE_AUTHOR("Kuo-Lang Tseng <kuo-lang.tseng@intel.com>");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,127 @@
/*
* ebt_limit
*
* Authors:
* Tom Marshall <tommy@home.tig-grr.com>
*
* Mostly copied from netfilter's ipt_limit.c, see that file for
* more explanation
*
* September, 2003
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_limit.h>
static DEFINE_SPINLOCK(limit_lock);
#define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
#define _POW2_BELOW2(x) ((x)|((x)>>1))
#define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
static bool
ebt_limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ebt_limit_info *info = (void *)par->matchinfo;
unsigned long now = jiffies;
spin_lock_bh(&limit_lock);
info->credit += (now - xchg(&info->prev, now)) * CREDITS_PER_JIFFY;
if (info->credit > info->credit_cap)
info->credit = info->credit_cap;
if (info->credit >= info->cost) {
/* We're not limited. */
info->credit -= info->cost;
spin_unlock_bh(&limit_lock);
return true;
}
spin_unlock_bh(&limit_lock);
return false;
}
/* Precision saver. */
static u_int32_t
user2credits(u_int32_t user)
{
/* If multiplying would overflow... */
if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
/* Divide first. */
return (user / EBT_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
return (user * HZ * CREDITS_PER_JIFFY) / EBT_LIMIT_SCALE;
}
static int ebt_limit_mt_check(const struct xt_mtchk_param *par)
{
struct ebt_limit_info *info = par->matchinfo;
/* Check for overflow. */
if (info->burst == 0 ||
user2credits(info->avg * info->burst) < user2credits(info->avg)) {
pr_info("overflow, try lower: %u/%u\n",
info->avg, info->burst);
return -EINVAL;
}
/* User avg in seconds * EBT_LIMIT_SCALE: convert to jiffies * 128. */
info->prev = jiffies;
info->credit = user2credits(info->avg * info->burst);
info->credit_cap = user2credits(info->avg * info->burst);
info->cost = user2credits(info->avg);
return 0;
}
#ifdef CONFIG_COMPAT
/*
* no conversion function needed --
* only avg/burst have meaningful values in userspace.
*/
struct ebt_compat_limit_info {
compat_uint_t avg, burst;
compat_ulong_t prev;
compat_uint_t credit, credit_cap, cost;
};
#endif
static struct xt_match ebt_limit_mt_reg __read_mostly = {
.name = "limit",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_limit_mt,
.checkentry = ebt_limit_mt_check,
.matchsize = sizeof(struct ebt_limit_info),
#ifdef CONFIG_COMPAT
.compatsize = sizeof(struct ebt_compat_limit_info),
#endif
.me = THIS_MODULE,
};
static int __init ebt_limit_init(void)
{
return xt_register_match(&ebt_limit_mt_reg);
}
static void __exit ebt_limit_fini(void)
{
xt_unregister_match(&ebt_limit_mt_reg);
}
module_init(ebt_limit_init);
module_exit(ebt_limit_fini);
MODULE_DESCRIPTION("Ebtables: Rate-limit match");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,225 @@
/*
* ebt_log
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
* Harald Welte <laforge@netfilter.org>
*
* April, 2002
*
*/
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/if_arp.h>
#include <linux/spinlock.h>
#include <net/netfilter/nf_log.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <linux/in6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_log.h>
#include <linux/netfilter.h>
static DEFINE_SPINLOCK(ebt_log_lock);
static int ebt_log_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_log_info *info = par->targinfo;
if (info->bitmask & ~EBT_LOG_MASK)
return -EINVAL;
if (info->loglevel >= 8)
return -EINVAL;
info->prefix[EBT_LOG_PREFIX_SIZE - 1] = '\0';
return 0;
}
struct tcpudphdr
{
__be16 src;
__be16 dst;
};
struct arppayload
{
unsigned char mac_src[ETH_ALEN];
unsigned char ip_src[4];
unsigned char mac_dst[ETH_ALEN];
unsigned char ip_dst[4];
};
static void
print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
{
if (protocol == IPPROTO_TCP ||
protocol == IPPROTO_UDP ||
protocol == IPPROTO_UDPLITE ||
protocol == IPPROTO_SCTP ||
protocol == IPPROTO_DCCP) {
const struct tcpudphdr *pptr;
struct tcpudphdr _ports;
pptr = skb_header_pointer(skb, offset,
sizeof(_ports), &_ports);
if (pptr == NULL) {
printk(" INCOMPLETE TCP/UDP header");
return;
}
printk(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst));
}
}
static void
ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const struct nf_loginfo *loginfo,
const char *prefix)
{
unsigned int bitmask;
/* FIXME: Disabled from containers until syslog ns is supported */
if (!net_eq(net, &init_net))
return;
spin_lock_bh(&ebt_log_lock);
printk(KERN_SOH "%c%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x",
'0' + loginfo->u.log.level, prefix,
in ? in->name : "", out ? out->name : "",
eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
ntohs(eth_hdr(skb)->h_proto));
if (loginfo->type == NF_LOG_TYPE_LOG)
bitmask = loginfo->u.log.logflags;
else
bitmask = NF_LOG_MASK;
if ((bitmask & EBT_LOG_IP) && eth_hdr(skb)->h_proto ==
htons(ETH_P_IP)) {
const struct iphdr *ih;
struct iphdr _iph;
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (ih == NULL) {
printk(" INCOMPLETE IP header");
goto out;
}
printk(" IP SRC=%pI4 IP DST=%pI4, IP tos=0x%02X, IP proto=%d",
&ih->saddr, &ih->daddr, ih->tos, ih->protocol);
print_ports(skb, ih->protocol, ih->ihl*4);
goto out;
}
#if IS_ENABLED(CONFIG_BRIDGE_EBT_IP6)
if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto ==
htons(ETH_P_IPV6)) {
const struct ipv6hdr *ih;
struct ipv6hdr _iph;
uint8_t nexthdr;
__be16 frag_off;
int offset_ph;
ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (ih == NULL) {
printk(" INCOMPLETE IPv6 header");
goto out;
}
printk(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d",
&ih->saddr, &ih->daddr, ih->priority, ih->nexthdr);
nexthdr = ih->nexthdr;
offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off);
if (offset_ph == -1)
goto out;
print_ports(skb, nexthdr, offset_ph);
goto out;
}
#endif
if ((bitmask & EBT_LOG_ARP) &&
((eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) ||
(eth_hdr(skb)->h_proto == htons(ETH_P_RARP)))) {
const struct arphdr *ah;
struct arphdr _arph;
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
if (ah == NULL) {
printk(" INCOMPLETE ARP header");
goto out;
}
printk(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d",
ntohs(ah->ar_hrd), ntohs(ah->ar_pro),
ntohs(ah->ar_op));
/* If it's for Ethernet and the lengths are OK,
* then log the ARP payload */
if (ah->ar_hrd == htons(1) &&
ah->ar_hln == ETH_ALEN &&
ah->ar_pln == sizeof(__be32)) {
const struct arppayload *ap;
struct arppayload _arpp;
ap = skb_header_pointer(skb, sizeof(_arph),
sizeof(_arpp), &_arpp);
if (ap == NULL) {
printk(" INCOMPLETE ARP payload");
goto out;
}
printk(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4",
ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst);
}
}
out:
printk("\n");
spin_unlock_bh(&ebt_log_lock);
}
static unsigned int
ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_log_info *info = par->targinfo;
struct nf_loginfo li;
struct net *net = dev_net(par->in ? par->in : par->out);
li.type = NF_LOG_TYPE_LOG;
li.u.log.level = info->loglevel;
li.u.log.logflags = info->bitmask;
/* Remember that we have to use ebt_log_packet() not to break backward
* compatibility. We cannot use the default bridge packet logger via
* nf_log_packet() with NFT_LOG_TYPE_LOG here. --Pablo
*/
if (info->bitmask & EBT_LOG_NFLOG)
nf_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb,
par->in, par->out, &li, "%s", info->prefix);
else
ebt_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb, par->in,
par->out, &li, info->prefix);
return EBT_CONTINUE;
}
static struct xt_target ebt_log_tg_reg __read_mostly = {
.name = "log",
.revision = 0,
.family = NFPROTO_BRIDGE,
.target = ebt_log_tg,
.checkentry = ebt_log_tg_check,
.targetsize = sizeof(struct ebt_log_info),
.me = THIS_MODULE,
};
static int __init ebt_log_init(void)
{
return xt_register_target(&ebt_log_tg_reg);
}
static void __exit ebt_log_fini(void)
{
xt_unregister_target(&ebt_log_tg_reg);
}
module_init(ebt_log_init);
module_exit(ebt_log_fini);
MODULE_DESCRIPTION("Ebtables: Packet logging to syslog");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,110 @@
/*
* ebt_mark
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* July, 2002
*
*/
/* The mark target can be used in any chain,
* I believe adding a mangle table just for marking is total overkill.
* Marking a frame doesn't really change anything in the frame anyway.
*/
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_mark_t.h>
static unsigned int
ebt_mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_mark_t_info *info = par->targinfo;
int action = info->target & -16;
if (action == MARK_SET_VALUE)
skb->mark = info->mark;
else if (action == MARK_OR_VALUE)
skb->mark |= info->mark;
else if (action == MARK_AND_VALUE)
skb->mark &= info->mark;
else
skb->mark ^= info->mark;
return info->target | ~EBT_VERDICT_BITS;
}
static int ebt_mark_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_mark_t_info *info = par->targinfo;
int tmp;
tmp = info->target | ~EBT_VERDICT_BITS;
if (BASE_CHAIN && tmp == EBT_RETURN)
return -EINVAL;
if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
return -EINVAL;
tmp = info->target & ~EBT_VERDICT_BITS;
if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE &&
tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE)
return -EINVAL;
return 0;
}
#ifdef CONFIG_COMPAT
struct compat_ebt_mark_t_info {
compat_ulong_t mark;
compat_uint_t target;
};
static void mark_tg_compat_from_user(void *dst, const void *src)
{
const struct compat_ebt_mark_t_info *user = src;
struct ebt_mark_t_info *kern = dst;
kern->mark = user->mark;
kern->target = user->target;
}
static int mark_tg_compat_to_user(void __user *dst, const void *src)
{
struct compat_ebt_mark_t_info __user *user = dst;
const struct ebt_mark_t_info *kern = src;
if (put_user(kern->mark, &user->mark) ||
put_user(kern->target, &user->target))
return -EFAULT;
return 0;
}
#endif
static struct xt_target ebt_mark_tg_reg __read_mostly = {
.name = "mark",
.revision = 0,
.family = NFPROTO_BRIDGE,
.target = ebt_mark_tg,
.checkentry = ebt_mark_tg_check,
.targetsize = sizeof(struct ebt_mark_t_info),
#ifdef CONFIG_COMPAT
.compatsize = sizeof(struct compat_ebt_mark_t_info),
.compat_from_user = mark_tg_compat_from_user,
.compat_to_user = mark_tg_compat_to_user,
#endif
.me = THIS_MODULE,
};
static int __init ebt_mark_init(void)
{
return xt_register_target(&ebt_mark_tg_reg);
}
static void __exit ebt_mark_fini(void)
{
xt_unregister_target(&ebt_mark_tg_reg);
}
module_init(ebt_mark_init);
module_exit(ebt_mark_fini);
MODULE_DESCRIPTION("Ebtables: Packet mark modification");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,98 @@
/*
* ebt_mark_m
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* July, 2002
*
*/
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_mark_m.h>
static bool
ebt_mark_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_mark_m_info *info = par->matchinfo;
if (info->bitmask & EBT_MARK_OR)
return !!(skb->mark & info->mask) ^ info->invert;
return ((skb->mark & info->mask) == info->mark) ^ info->invert;
}
static int ebt_mark_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_mark_m_info *info = par->matchinfo;
if (info->bitmask & ~EBT_MARK_MASK)
return -EINVAL;
if ((info->bitmask & EBT_MARK_OR) && (info->bitmask & EBT_MARK_AND))
return -EINVAL;
if (!info->bitmask)
return -EINVAL;
return 0;
}
#ifdef CONFIG_COMPAT
struct compat_ebt_mark_m_info {
compat_ulong_t mark, mask;
uint8_t invert, bitmask;
};
static void mark_mt_compat_from_user(void *dst, const void *src)
{
const struct compat_ebt_mark_m_info *user = src;
struct ebt_mark_m_info *kern = dst;
kern->mark = user->mark;
kern->mask = user->mask;
kern->invert = user->invert;
kern->bitmask = user->bitmask;
}
static int mark_mt_compat_to_user(void __user *dst, const void *src)
{
struct compat_ebt_mark_m_info __user *user = dst;
const struct ebt_mark_m_info *kern = src;
if (put_user(kern->mark, &user->mark) ||
put_user(kern->mask, &user->mask) ||
put_user(kern->invert, &user->invert) ||
put_user(kern->bitmask, &user->bitmask))
return -EFAULT;
return 0;
}
#endif
static struct xt_match ebt_mark_mt_reg __read_mostly = {
.name = "mark_m",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_mark_mt,
.checkentry = ebt_mark_mt_check,
.matchsize = sizeof(struct ebt_mark_m_info),
#ifdef CONFIG_COMPAT
.compatsize = sizeof(struct compat_ebt_mark_m_info),
.compat_from_user = mark_mt_compat_from_user,
.compat_to_user = mark_mt_compat_to_user,
#endif
.me = THIS_MODULE,
};
static int __init ebt_mark_m_init(void)
{
return xt_register_match(&ebt_mark_mt_reg);
}
static void __exit ebt_mark_m_fini(void)
{
xt_unregister_match(&ebt_mark_mt_reg);
}
module_init(ebt_mark_m_init);
module_exit(ebt_mark_m_fini);
MODULE_DESCRIPTION("Ebtables: Packet mark match");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,73 @@
/*
* ebt_nflog
*
* Author:
* Peter Warasin <peter@endian.com>
*
* February, 2008
*
* Based on:
* xt_NFLOG.c, (C) 2006 by Patrick McHardy <kaber@trash.net>
* ebt_ulog.c, (C) 2004 by Bart De Schuymer <bdschuym@pandora.be>
*
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_nflog.h>
#include <net/netfilter/nf_log.h>
static unsigned int
ebt_nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nflog_info *info = par->targinfo;
struct nf_loginfo li;
struct net *net = dev_net(par->in ? par->in : par->out);
li.type = NF_LOG_TYPE_ULOG;
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
nf_log_packet(net, PF_BRIDGE, par->hooknum, skb, par->in,
par->out, &li, "%s", info->prefix);
return EBT_CONTINUE;
}
static int ebt_nflog_tg_check(const struct xt_tgchk_param *par)
{
struct ebt_nflog_info *info = par->targinfo;
if (info->flags & ~EBT_NFLOG_MASK)
return -EINVAL;
info->prefix[EBT_NFLOG_PREFIX_SIZE - 1] = '\0';
return 0;
}
static struct xt_target ebt_nflog_tg_reg __read_mostly = {
.name = "nflog",
.revision = 0,
.family = NFPROTO_BRIDGE,
.target = ebt_nflog_tg,
.checkentry = ebt_nflog_tg_check,
.targetsize = sizeof(struct ebt_nflog_info),
.me = THIS_MODULE,
};
static int __init ebt_nflog_init(void)
{
return xt_register_target(&ebt_nflog_tg_reg);
}
static void __exit ebt_nflog_fini(void)
{
xt_unregister_target(&ebt_nflog_tg_reg);
}
module_init(ebt_nflog_init);
module_exit(ebt_nflog_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Warasin <peter@endian.com>");
MODULE_DESCRIPTION("ebtables NFLOG netfilter logging module");

View file

@ -0,0 +1,56 @@
/*
* ebt_pkttype
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* April, 2003
*
*/
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_pkttype.h>
static bool
ebt_pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_pkttype_info *info = par->matchinfo;
return (skb->pkt_type == info->pkt_type) ^ info->invert;
}
static int ebt_pkttype_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_pkttype_info *info = par->matchinfo;
if (info->invert != 0 && info->invert != 1)
return -EINVAL;
/* Allow any pkt_type value */
return 0;
}
static struct xt_match ebt_pkttype_mt_reg __read_mostly = {
.name = "pkttype",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_pkttype_mt,
.checkentry = ebt_pkttype_mt_check,
.matchsize = sizeof(struct ebt_pkttype_info),
.me = THIS_MODULE,
};
static int __init ebt_pkttype_init(void)
{
return xt_register_match(&ebt_pkttype_mt_reg);
}
static void __exit ebt_pkttype_fini(void)
{
xt_unregister_match(&ebt_pkttype_mt_reg);
}
module_init(ebt_pkttype_init);
module_exit(ebt_pkttype_fini);
MODULE_DESCRIPTION("Ebtables: Link layer packet type match");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,80 @@
/*
* ebt_redirect
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* April, 2002
*
*/
#include <linux/module.h>
#include <net/sock.h>
#include "../br_private.h"
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_redirect.h>
static unsigned int
ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_redirect_info *info = par->targinfo;
if (!skb_make_writable(skb, 0))
return EBT_DROP;
if (par->hooknum != NF_BR_BROUTING)
/* rcu_read_lock()ed by nf_hook_slow */
ether_addr_copy(eth_hdr(skb)->h_dest,
br_port_get_rcu(par->in)->br->dev->dev_addr);
else
ether_addr_copy(eth_hdr(skb)->h_dest, par->in->dev_addr);
skb->pkt_type = PACKET_HOST;
return info->target;
}
static int ebt_redirect_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_redirect_info *info = par->targinfo;
unsigned int hook_mask;
if (BASE_CHAIN && info->target == EBT_RETURN)
return -EINVAL;
hook_mask = par->hook_mask & ~(1 << NF_BR_NUMHOOKS);
if ((strcmp(par->table, "nat") != 0 ||
hook_mask & ~(1 << NF_BR_PRE_ROUTING)) &&
(strcmp(par->table, "broute") != 0 ||
hook_mask & ~(1 << NF_BR_BROUTING)))
return -EINVAL;
if (INVALID_TARGET)
return -EINVAL;
return 0;
}
static struct xt_target ebt_redirect_tg_reg __read_mostly = {
.name = "redirect",
.revision = 0,
.family = NFPROTO_BRIDGE,
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_BROUTING),
.target = ebt_redirect_tg,
.checkentry = ebt_redirect_tg_check,
.targetsize = sizeof(struct ebt_redirect_info),
.me = THIS_MODULE,
};
static int __init ebt_redirect_init(void)
{
return xt_register_target(&ebt_redirect_tg_reg);
}
static void __exit ebt_redirect_fini(void)
{
xt_unregister_target(&ebt_redirect_tg_reg);
}
module_init(ebt_redirect_init);
module_exit(ebt_redirect_fini);
MODULE_DESCRIPTION("Ebtables: Packet redirection to localhost");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,87 @@
/*
* ebt_snat
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* June, 2002
*
*/
#include <linux/module.h>
#include <net/sock.h>
#include <linux/if_arp.h>
#include <net/arp.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_nat.h>
static unsigned int
ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
if (!skb_make_writable(skb, 0))
return EBT_DROP;
ether_addr_copy(eth_hdr(skb)->h_source, info->mac);
if (!(info->target & NAT_ARP_BIT) &&
eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
const struct arphdr *ap;
struct arphdr _ah;
ap = skb_header_pointer(skb, 0, sizeof(_ah), &_ah);
if (ap == NULL)
return EBT_DROP;
if (ap->ar_hln != ETH_ALEN)
goto out;
if (skb_store_bits(skb, sizeof(_ah), info->mac, ETH_ALEN))
return EBT_DROP;
}
out:
return info->target | ~EBT_VERDICT_BITS;
}
static int ebt_snat_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
int tmp;
tmp = info->target | ~EBT_VERDICT_BITS;
if (BASE_CHAIN && tmp == EBT_RETURN)
return -EINVAL;
if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
return -EINVAL;
tmp = info->target | EBT_VERDICT_BITS;
if ((tmp & ~NAT_ARP_BIT) != ~NAT_ARP_BIT)
return -EINVAL;
return 0;
}
static struct xt_target ebt_snat_tg_reg __read_mostly = {
.name = "snat",
.revision = 0,
.family = NFPROTO_BRIDGE,
.table = "nat",
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING),
.target = ebt_snat_tg,
.checkentry = ebt_snat_tg_check,
.targetsize = sizeof(struct ebt_nat_info),
.me = THIS_MODULE,
};
static int __init ebt_snat_init(void)
{
return xt_register_target(&ebt_snat_tg_reg);
}
static void __exit ebt_snat_fini(void)
{
xt_unregister_target(&ebt_snat_tg_reg);
}
module_init(ebt_snat_init);
module_exit(ebt_snat_fini);
MODULE_DESCRIPTION("Ebtables: Source MAC address translation");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,197 @@
/*
* ebt_stp
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
* Stephen Hemminger <shemminger@osdl.org>
*
* July, 2003
*/
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_stp.h>
#define BPDU_TYPE_CONFIG 0
#define BPDU_TYPE_TCN 0x80
struct stp_header {
uint8_t dsap;
uint8_t ssap;
uint8_t ctrl;
uint8_t pid;
uint8_t vers;
uint8_t type;
};
struct stp_config_pdu {
uint8_t flags;
uint8_t root[8];
uint8_t root_cost[4];
uint8_t sender[8];
uint8_t port[2];
uint8_t msg_age[2];
uint8_t max_age[2];
uint8_t hello_time[2];
uint8_t forward_delay[2];
};
#define NR16(p) (p[0] << 8 | p[1])
#define NR32(p) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])
static bool ebt_filter_config(const struct ebt_stp_info *info,
const struct stp_config_pdu *stpc)
{
const struct ebt_stp_config_info *c;
uint16_t v16;
uint32_t v32;
int verdict, i;
c = &info->config;
if ((info->bitmask & EBT_STP_FLAGS) &&
FWINV(c->flags != stpc->flags, EBT_STP_FLAGS))
return false;
if (info->bitmask & EBT_STP_ROOTPRIO) {
v16 = NR16(stpc->root);
if (FWINV(v16 < c->root_priol ||
v16 > c->root_priou, EBT_STP_ROOTPRIO))
return false;
}
if (info->bitmask & EBT_STP_ROOTADDR) {
verdict = 0;
for (i = 0; i < 6; i++)
verdict |= (stpc->root[2+i] ^ c->root_addr[i]) &
c->root_addrmsk[i];
if (FWINV(verdict != 0, EBT_STP_ROOTADDR))
return false;
}
if (info->bitmask & EBT_STP_ROOTCOST) {
v32 = NR32(stpc->root_cost);
if (FWINV(v32 < c->root_costl ||
v32 > c->root_costu, EBT_STP_ROOTCOST))
return false;
}
if (info->bitmask & EBT_STP_SENDERPRIO) {
v16 = NR16(stpc->sender);
if (FWINV(v16 < c->sender_priol ||
v16 > c->sender_priou, EBT_STP_SENDERPRIO))
return false;
}
if (info->bitmask & EBT_STP_SENDERADDR) {
verdict = 0;
for (i = 0; i < 6; i++)
verdict |= (stpc->sender[2+i] ^ c->sender_addr[i]) &
c->sender_addrmsk[i];
if (FWINV(verdict != 0, EBT_STP_SENDERADDR))
return false;
}
if (info->bitmask & EBT_STP_PORT) {
v16 = NR16(stpc->port);
if (FWINV(v16 < c->portl ||
v16 > c->portu, EBT_STP_PORT))
return false;
}
if (info->bitmask & EBT_STP_MSGAGE) {
v16 = NR16(stpc->msg_age);
if (FWINV(v16 < c->msg_agel ||
v16 > c->msg_ageu, EBT_STP_MSGAGE))
return false;
}
if (info->bitmask & EBT_STP_MAXAGE) {
v16 = NR16(stpc->max_age);
if (FWINV(v16 < c->max_agel ||
v16 > c->max_ageu, EBT_STP_MAXAGE))
return false;
}
if (info->bitmask & EBT_STP_HELLOTIME) {
v16 = NR16(stpc->hello_time);
if (FWINV(v16 < c->hello_timel ||
v16 > c->hello_timeu, EBT_STP_HELLOTIME))
return false;
}
if (info->bitmask & EBT_STP_FWDD) {
v16 = NR16(stpc->forward_delay);
if (FWINV(v16 < c->forward_delayl ||
v16 > c->forward_delayu, EBT_STP_FWDD))
return false;
}
return true;
}
static bool
ebt_stp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_stp_info *info = par->matchinfo;
const struct stp_header *sp;
struct stp_header _stph;
const uint8_t header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00};
sp = skb_header_pointer(skb, 0, sizeof(_stph), &_stph);
if (sp == NULL)
return false;
/* The stp code only considers these */
if (memcmp(sp, header, sizeof(header)))
return false;
if (info->bitmask & EBT_STP_TYPE &&
FWINV(info->type != sp->type, EBT_STP_TYPE))
return false;
if (sp->type == BPDU_TYPE_CONFIG &&
info->bitmask & EBT_STP_CONFIG_MASK) {
const struct stp_config_pdu *st;
struct stp_config_pdu _stpc;
st = skb_header_pointer(skb, sizeof(_stph),
sizeof(_stpc), &_stpc);
if (st == NULL)
return false;
return ebt_filter_config(info, st);
}
return true;
}
static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_stp_info *info = par->matchinfo;
const uint8_t bridge_ula[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
const uint8_t msk[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
const struct ebt_entry *e = par->entryinfo;
if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK ||
!(info->bitmask & EBT_STP_MASK))
return -EINVAL;
/* Make sure the match only receives stp frames */
if (!ether_addr_equal(e->destmac, bridge_ula) ||
!ether_addr_equal(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
return -EINVAL;
return 0;
}
static struct xt_match ebt_stp_mt_reg __read_mostly = {
.name = "stp",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_stp_mt,
.checkentry = ebt_stp_mt_check,
.matchsize = sizeof(struct ebt_stp_info),
.me = THIS_MODULE,
};
static int __init ebt_stp_init(void)
{
return xt_register_match(&ebt_stp_mt_reg);
}
static void __exit ebt_stp_fini(void)
{
xt_unregister_match(&ebt_stp_mt_reg);
}
module_init(ebt_stp_init);
module_exit(ebt_stp_fini);
MODULE_DESCRIPTION("Ebtables: Spanning Tree Protocol packet match");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,180 @@
/*
* Description: EBTables 802.1Q match extension kernelspace module.
* Authors: Nick Fedchik <nick@fedchik.org.ua>
* Bart De Schuymer <bdschuym@pandora.be>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_vlan.h>
#define MODULE_VERS "0.6"
MODULE_AUTHOR("Nick Fedchik <nick@fedchik.org.ua>");
MODULE_DESCRIPTION("Ebtables: 802.1Q VLAN tag match");
MODULE_LICENSE("GPL");
#define GET_BITMASK(_BIT_MASK_) info->bitmask & _BIT_MASK_
#define EXIT_ON_MISMATCH(_MATCH_,_MASK_) {if (!((info->_MATCH_ == _MATCH_)^!!(info->invflags & _MASK_))) return false; }
static bool
ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_vlan_info *info = par->matchinfo;
unsigned short TCI; /* Whole TCI, given from parsed frame */
unsigned short id; /* VLAN ID, given from frame TCI */
unsigned char prio; /* user_priority, given from frame TCI */
/* VLAN encapsulated Type/Length field, given from orig frame */
__be16 encap;
if (vlan_tx_tag_present(skb)) {
TCI = vlan_tx_tag_get(skb);
encap = skb->protocol;
} else {
const struct vlan_hdr *fp;
struct vlan_hdr _frame;
fp = skb_header_pointer(skb, 0, sizeof(_frame), &_frame);
if (fp == NULL)
return false;
TCI = ntohs(fp->h_vlan_TCI);
encap = fp->h_vlan_encapsulated_proto;
}
/* Tag Control Information (TCI) consists of the following elements:
* - User_priority. The user_priority field is three bits in length,
* interpreted as a binary number.
* - Canonical Format Indicator (CFI). The Canonical Format Indicator
* (CFI) is a single bit flag value. Currently ignored.
* - VLAN Identifier (VID). The VID is encoded as
* an unsigned binary number. */
id = TCI & VLAN_VID_MASK;
prio = (TCI >> 13) & 0x7;
/* Checking VLAN Identifier (VID) */
if (GET_BITMASK(EBT_VLAN_ID))
EXIT_ON_MISMATCH(id, EBT_VLAN_ID);
/* Checking user_priority */
if (GET_BITMASK(EBT_VLAN_PRIO))
EXIT_ON_MISMATCH(prio, EBT_VLAN_PRIO);
/* Checking Encapsulated Proto (Length/Type) field */
if (GET_BITMASK(EBT_VLAN_ENCAP))
EXIT_ON_MISMATCH(encap, EBT_VLAN_ENCAP);
return true;
}
static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
{
struct ebt_vlan_info *info = par->matchinfo;
const struct ebt_entry *e = par->entryinfo;
/* Is it 802.1Q frame checked? */
if (e->ethproto != htons(ETH_P_8021Q)) {
pr_debug("passed entry proto %2.4X is not 802.1Q (8100)\n",
ntohs(e->ethproto));
return -EINVAL;
}
/* Check for bitmask range
* True if even one bit is out of mask */
if (info->bitmask & ~EBT_VLAN_MASK) {
pr_debug("bitmask %2X is out of mask (%2X)\n",
info->bitmask, EBT_VLAN_MASK);
return -EINVAL;
}
/* Check for inversion flags range */
if (info->invflags & ~EBT_VLAN_MASK) {
pr_debug("inversion flags %2X is out of mask (%2X)\n",
info->invflags, EBT_VLAN_MASK);
return -EINVAL;
}
/* Reserved VLAN ID (VID) values
* -----------------------------
* 0 - The null VLAN ID.
* 1 - The default Port VID (PVID)
* 0x0FFF - Reserved for implementation use.
* if_vlan.h: VLAN_N_VID 4096. */
if (GET_BITMASK(EBT_VLAN_ID)) {
if (!!info->id) { /* if id!=0 => check vid range */
if (info->id > VLAN_N_VID) {
pr_debug("id %d is out of range (1-4096)\n",
info->id);
return -EINVAL;
}
/* Note: This is valid VLAN-tagged frame point.
* Any value of user_priority are acceptable,
* but should be ignored according to 802.1Q Std.
* So we just drop the prio flag. */
info->bitmask &= ~EBT_VLAN_PRIO;
}
/* Else, id=0 (null VLAN ID) => user_priority range (any?) */
}
if (GET_BITMASK(EBT_VLAN_PRIO)) {
if ((unsigned char) info->prio > 7) {
pr_debug("prio %d is out of range (0-7)\n",
info->prio);
return -EINVAL;
}
}
/* Check for encapsulated proto range - it is possible to be
* any value for u_short range.
* if_ether.h: ETH_ZLEN 60 - Min. octets in frame sans FCS */
if (GET_BITMASK(EBT_VLAN_ENCAP)) {
if ((unsigned short) ntohs(info->encap) < ETH_ZLEN) {
pr_debug("encap frame length %d is less than "
"minimal\n", ntohs(info->encap));
return -EINVAL;
}
}
return 0;
}
static struct xt_match ebt_vlan_mt_reg __read_mostly = {
.name = "vlan",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_vlan_mt,
.checkentry = ebt_vlan_mt_check,
.matchsize = sizeof(struct ebt_vlan_info),
.me = THIS_MODULE,
};
static int __init ebt_vlan_init(void)
{
pr_debug("ebtables 802.1Q extension module v" MODULE_VERS "\n");
return xt_register_match(&ebt_vlan_mt_reg);
}
static void __exit ebt_vlan_fini(void)
{
xt_unregister_match(&ebt_vlan_mt_reg);
}
module_init(ebt_vlan_init);
module_exit(ebt_vlan_fini);

View file

@ -0,0 +1,100 @@
/*
* ebtable_broute
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* April, 2002
*
* This table lets you choose between routing and bridging for frames
* entering on a bridge enslaved nic. This table is traversed before any
* other ebtables table. See net/bridge/br_input.c.
*/
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/module.h>
#include <linux/if_bridge.h>
/* EBT_ACCEPT means the frame will be bridged
* EBT_DROP means the frame will be routed
*/
static struct ebt_entries initial_chain = {
.name = "BROUTING",
.policy = EBT_ACCEPT,
};
static struct ebt_replace_kernel initial_table = {
.name = "broute",
.valid_hooks = 1 << NF_BR_BROUTING,
.entries_size = sizeof(struct ebt_entries),
.hook_entry = {
[NF_BR_BROUTING] = &initial_chain,
},
.entries = (char *)&initial_chain,
};
static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
{
if (valid_hooks & ~(1 << NF_BR_BROUTING))
return -EINVAL;
return 0;
}
static const struct ebt_table broute_table = {
.name = "broute",
.table = &initial_table,
.valid_hooks = 1 << NF_BR_BROUTING,
.check = check,
.me = THIS_MODULE,
};
static int ebt_broute(struct sk_buff *skb)
{
int ret;
ret = ebt_do_table(NF_BR_BROUTING, skb, skb->dev, NULL,
dev_net(skb->dev)->xt.broute_table);
if (ret == NF_DROP)
return 1; /* route it */
return 0; /* bridge it */
}
static int __net_init broute_net_init(struct net *net)
{
net->xt.broute_table = ebt_register_table(net, &broute_table);
return PTR_ERR_OR_ZERO(net->xt.broute_table);
}
static void __net_exit broute_net_exit(struct net *net)
{
ebt_unregister_table(net, net->xt.broute_table);
}
static struct pernet_operations broute_net_ops = {
.init = broute_net_init,
.exit = broute_net_exit,
};
static int __init ebtable_broute_init(void)
{
int ret;
ret = register_pernet_subsys(&broute_net_ops);
if (ret < 0)
return ret;
/* see br_input.c */
RCU_INIT_POINTER(br_should_route_hook,
(br_should_route_hook_t *)ebt_broute);
return 0;
}
static void __exit ebtable_broute_fini(void)
{
RCU_INIT_POINTER(br_should_route_hook, NULL);
synchronize_net();
unregister_pernet_subsys(&broute_net_ops);
}
module_init(ebtable_broute_init);
module_exit(ebtable_broute_fini);
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,138 @@
/*
* ebtable_filter
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* April, 2002
*
*/
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/module.h>
#define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | \
(1 << NF_BR_LOCAL_OUT))
static struct ebt_entries initial_chains[] = {
{
.name = "INPUT",
.policy = EBT_ACCEPT,
},
{
.name = "FORWARD",
.policy = EBT_ACCEPT,
},
{
.name = "OUTPUT",
.policy = EBT_ACCEPT,
},
};
static struct ebt_replace_kernel initial_table = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.entries_size = 3 * sizeof(struct ebt_entries),
.hook_entry = {
[NF_BR_LOCAL_IN] = &initial_chains[0],
[NF_BR_FORWARD] = &initial_chains[1],
[NF_BR_LOCAL_OUT] = &initial_chains[2],
},
.entries = (char *)initial_chains,
};
static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
{
if (valid_hooks & ~FILTER_VALID_HOOKS)
return -EINVAL;
return 0;
}
static const struct ebt_table frame_filter = {
.name = "filter",
.table = &initial_table,
.valid_hooks = FILTER_VALID_HOOKS,
.check = check,
.me = THIS_MODULE,
};
static unsigned int
ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return ebt_do_table(ops->hooknum, skb, in, out,
dev_net(in)->xt.frame_filter);
}
static unsigned int
ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return ebt_do_table(ops->hooknum, skb, in, out,
dev_net(out)->xt.frame_filter);
}
static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
{
.hook = ebt_in_hook,
.owner = THIS_MODULE,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_IN,
.priority = NF_BR_PRI_FILTER_BRIDGED,
},
{
.hook = ebt_in_hook,
.owner = THIS_MODULE,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_FILTER_BRIDGED,
},
{
.hook = ebt_out_hook,
.owner = THIS_MODULE,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_OUT,
.priority = NF_BR_PRI_FILTER_OTHER,
},
};
static int __net_init frame_filter_net_init(struct net *net)
{
net->xt.frame_filter = ebt_register_table(net, &frame_filter);
return PTR_ERR_OR_ZERO(net->xt.frame_filter);
}
static void __net_exit frame_filter_net_exit(struct net *net)
{
ebt_unregister_table(net, net->xt.frame_filter);
}
static struct pernet_operations frame_filter_net_ops = {
.init = frame_filter_net_init,
.exit = frame_filter_net_exit,
};
static int __init ebtable_filter_init(void)
{
int ret;
ret = register_pernet_subsys(&frame_filter_net_ops);
if (ret < 0)
return ret;
ret = nf_register_hooks(ebt_ops_filter, ARRAY_SIZE(ebt_ops_filter));
if (ret < 0)
unregister_pernet_subsys(&frame_filter_net_ops);
return ret;
}
static void __exit ebtable_filter_fini(void)
{
nf_unregister_hooks(ebt_ops_filter, ARRAY_SIZE(ebt_ops_filter));
unregister_pernet_subsys(&frame_filter_net_ops);
}
module_init(ebtable_filter_init);
module_exit(ebtable_filter_fini);
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,138 @@
/*
* ebtable_nat
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* April, 2002
*
*/
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/module.h>
#define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | \
(1 << NF_BR_POST_ROUTING))
static struct ebt_entries initial_chains[] = {
{
.name = "PREROUTING",
.policy = EBT_ACCEPT,
},
{
.name = "OUTPUT",
.policy = EBT_ACCEPT,
},
{
.name = "POSTROUTING",
.policy = EBT_ACCEPT,
}
};
static struct ebt_replace_kernel initial_table = {
.name = "nat",
.valid_hooks = NAT_VALID_HOOKS,
.entries_size = 3 * sizeof(struct ebt_entries),
.hook_entry = {
[NF_BR_PRE_ROUTING] = &initial_chains[0],
[NF_BR_LOCAL_OUT] = &initial_chains[1],
[NF_BR_POST_ROUTING] = &initial_chains[2],
},
.entries = (char *)initial_chains,
};
static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
{
if (valid_hooks & ~NAT_VALID_HOOKS)
return -EINVAL;
return 0;
}
static struct ebt_table frame_nat = {
.name = "nat",
.table = &initial_table,
.valid_hooks = NAT_VALID_HOOKS,
.check = check,
.me = THIS_MODULE,
};
static unsigned int
ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return ebt_do_table(ops->hooknum, skb, in, out,
dev_net(in)->xt.frame_nat);
}
static unsigned int
ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return ebt_do_table(ops->hooknum, skb, in, out,
dev_net(out)->xt.frame_nat);
}
static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
{
.hook = ebt_nat_out,
.owner = THIS_MODULE,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_OUT,
.priority = NF_BR_PRI_NAT_DST_OTHER,
},
{
.hook = ebt_nat_out,
.owner = THIS_MODULE,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_POST_ROUTING,
.priority = NF_BR_PRI_NAT_SRC,
},
{
.hook = ebt_nat_in,
.owner = THIS_MODULE,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_BR_PRI_NAT_DST_BRIDGED,
},
};
static int __net_init frame_nat_net_init(struct net *net)
{
net->xt.frame_nat = ebt_register_table(net, &frame_nat);
return PTR_ERR_OR_ZERO(net->xt.frame_nat);
}
static void __net_exit frame_nat_net_exit(struct net *net)
{
ebt_unregister_table(net, net->xt.frame_nat);
}
static struct pernet_operations frame_nat_net_ops = {
.init = frame_nat_net_init,
.exit = frame_nat_net_exit,
};
static int __init ebtable_nat_init(void)
{
int ret;
ret = register_pernet_subsys(&frame_nat_net_ops);
if (ret < 0)
return ret;
ret = nf_register_hooks(ebt_ops_nat, ARRAY_SIZE(ebt_ops_nat));
if (ret < 0)
unregister_pernet_subsys(&frame_nat_net_ops);
return ret;
}
static void __exit ebtable_nat_fini(void)
{
nf_unregister_hooks(ebt_ops_nat, ARRAY_SIZE(ebt_ops_nat));
unregister_pernet_subsys(&frame_nat_net_ops);
}
module_init(ebtable_nat_init);
module_exit(ebtable_nat_fini);
MODULE_LICENSE("GPL");

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,96 @@
/*
* (C) 2014 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/if_bridge.h>
#include <linux/ip.h>
#include <net/route.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_log.h>
static void nf_log_bridge_packet(struct net *net, u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
nf_log_packet(net, NFPROTO_IPV4, hooknum, skb, in, out,
loginfo, "%s", prefix);
break;
case htons(ETH_P_IPV6):
nf_log_packet(net, NFPROTO_IPV6, hooknum, skb, in, out,
loginfo, "%s", prefix);
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
nf_log_packet(net, NFPROTO_ARP, hooknum, skb, in, out,
loginfo, "%s", prefix);
break;
}
}
static struct nf_logger nf_bridge_logger __read_mostly = {
.name = "nf_log_bridge",
.type = NF_LOG_TYPE_LOG,
.logfn = nf_log_bridge_packet,
.me = THIS_MODULE,
};
static int __net_init nf_log_bridge_net_init(struct net *net)
{
nf_log_set(net, NFPROTO_BRIDGE, &nf_bridge_logger);
return 0;
}
static void __net_exit nf_log_bridge_net_exit(struct net *net)
{
nf_log_unset(net, &nf_bridge_logger);
}
static struct pernet_operations nf_log_bridge_net_ops = {
.init = nf_log_bridge_net_init,
.exit = nf_log_bridge_net_exit,
};
static int __init nf_log_bridge_init(void)
{
int ret;
/* Request to load the real packet loggers. */
nf_logger_request_module(NFPROTO_IPV4, NF_LOG_TYPE_LOG);
nf_logger_request_module(NFPROTO_IPV6, NF_LOG_TYPE_LOG);
nf_logger_request_module(NFPROTO_ARP, NF_LOG_TYPE_LOG);
ret = register_pernet_subsys(&nf_log_bridge_net_ops);
if (ret < 0)
return ret;
nf_log_register(NFPROTO_BRIDGE, &nf_bridge_logger);
return 0;
}
static void __exit nf_log_bridge_exit(void)
{
unregister_pernet_subsys(&nf_log_bridge_net_ops);
nf_log_unregister(&nf_bridge_logger);
}
module_init(nf_log_bridge_init);
module_exit(nf_log_bridge_exit);
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_DESCRIPTION("Netfilter bridge packet logging");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 0);

View file

@ -0,0 +1,108 @@
/*
* Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
* Copyright (c) 2013 Pablo Neira Ayuso <pablo@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netfilter_bridge.h>
#include <net/netfilter/nf_tables.h>
static unsigned int
nft_do_chain_bridge(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nft_pktinfo pkt;
nft_set_pktinfo(&pkt, ops, skb, in, out);
return nft_do_chain(&pkt, ops);
}
static struct nft_af_info nft_af_bridge __read_mostly = {
.family = NFPROTO_BRIDGE,
.nhooks = NF_BR_NUMHOOKS,
.owner = THIS_MODULE,
.nops = 1,
.hooks = {
[NF_BR_PRE_ROUTING] = nft_do_chain_bridge,
[NF_BR_LOCAL_IN] = nft_do_chain_bridge,
[NF_BR_FORWARD] = nft_do_chain_bridge,
[NF_BR_LOCAL_OUT] = nft_do_chain_bridge,
[NF_BR_POST_ROUTING] = nft_do_chain_bridge,
},
};
static int nf_tables_bridge_init_net(struct net *net)
{
net->nft.bridge = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
if (net->nft.bridge == NULL)
return -ENOMEM;
memcpy(net->nft.bridge, &nft_af_bridge, sizeof(nft_af_bridge));
if (nft_register_afinfo(net, net->nft.bridge) < 0)
goto err;
return 0;
err:
kfree(net->nft.bridge);
return -ENOMEM;
}
static void nf_tables_bridge_exit_net(struct net *net)
{
nft_unregister_afinfo(net->nft.bridge);
kfree(net->nft.bridge);
}
static struct pernet_operations nf_tables_bridge_net_ops = {
.init = nf_tables_bridge_init_net,
.exit = nf_tables_bridge_exit_net,
};
static const struct nf_chain_type filter_bridge = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_BRIDGE,
.owner = THIS_MODULE,
.hook_mask = (1 << NF_BR_PRE_ROUTING) |
(1 << NF_BR_LOCAL_IN) |
(1 << NF_BR_FORWARD) |
(1 << NF_BR_LOCAL_OUT) |
(1 << NF_BR_POST_ROUTING),
};
static int __init nf_tables_bridge_init(void)
{
int ret;
nft_register_chain_type(&filter_bridge);
ret = register_pernet_subsys(&nf_tables_bridge_net_ops);
if (ret < 0)
nft_unregister_chain_type(&filter_bridge);
return ret;
}
static void __exit nf_tables_bridge_exit(void)
{
unregister_pernet_subsys(&nf_tables_bridge_net_ops);
nft_unregister_chain_type(&filter_bridge);
}
module_init(nf_tables_bridge_init);
module_exit(nf_tables_bridge_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_FAMILY(AF_BRIDGE);

View file

@ -0,0 +1,139 @@
/*
* Copyright (c) 2014 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_meta.h>
#include "../br_private.h"
static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
struct nft_data data[NFT_REG_MAX + 1],
const struct nft_pktinfo *pkt)
{
const struct nft_meta *priv = nft_expr_priv(expr);
const struct net_device *in = pkt->in, *out = pkt->out;
struct nft_data *dest = &data[priv->dreg];
const struct net_bridge_port *p;
switch (priv->key) {
case NFT_META_BRI_IIFNAME:
if (in == NULL || (p = br_port_get_rcu(in)) == NULL)
goto err;
break;
case NFT_META_BRI_OIFNAME:
if (out == NULL || (p = br_port_get_rcu(out)) == NULL)
goto err;
break;
default:
goto out;
}
strncpy((char *)dest->data, p->br->dev->name, sizeof(dest->data));
return;
out:
return nft_meta_get_eval(expr, data, pkt);
err:
data[NFT_REG_VERDICT].verdict = NFT_BREAK;
}
static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_meta *priv = nft_expr_priv(expr);
int err;
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_BRI_IIFNAME:
case NFT_META_BRI_OIFNAME:
break;
default:
return nft_meta_get_init(ctx, expr, tb);
}
priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
err = nft_validate_output_register(priv->dreg);
if (err < 0)
return err;
err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
if (err < 0)
return err;
return 0;
}
static struct nft_expr_type nft_meta_bridge_type;
static const struct nft_expr_ops nft_meta_bridge_get_ops = {
.type = &nft_meta_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
.eval = nft_meta_bridge_get_eval,
.init = nft_meta_bridge_get_init,
.dump = nft_meta_get_dump,
};
static const struct nft_expr_ops nft_meta_bridge_set_ops = {
.type = &nft_meta_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
.eval = nft_meta_set_eval,
.init = nft_meta_set_init,
.dump = nft_meta_set_dump,
};
static const struct nft_expr_ops *
nft_meta_bridge_select_ops(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
if (tb[NFTA_META_KEY] == NULL)
return ERR_PTR(-EINVAL);
if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
return ERR_PTR(-EINVAL);
if (tb[NFTA_META_DREG])
return &nft_meta_bridge_get_ops;
if (tb[NFTA_META_SREG])
return &nft_meta_bridge_set_ops;
return ERR_PTR(-EINVAL);
}
static struct nft_expr_type nft_meta_bridge_type __read_mostly = {
.family = NFPROTO_BRIDGE,
.name = "meta",
.select_ops = &nft_meta_bridge_select_ops,
.policy = nft_meta_policy,
.maxattr = NFTA_META_MAX,
.owner = THIS_MODULE,
};
static int __init nft_meta_bridge_module_init(void)
{
return nft_register_expr(&nft_meta_bridge_type);
}
static void __exit nft_meta_bridge_module_exit(void)
{
nft_unregister_expr(&nft_meta_bridge_type);
}
module_init(nft_meta_bridge_module_init);
module_exit(nft_meta_bridge_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");

View file

@ -0,0 +1,427 @@
/*
* Copyright (c) 2014 Pablo Neira Ayuso <pablo@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
#include <net/netfilter/ipv4/nf_reject.h>
#include <net/netfilter/ipv6/nf_reject.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/ip6_checksum.h>
#include <linux/netfilter_bridge.h>
#include "../br_private.h"
static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
struct sk_buff *nskb)
{
struct ethhdr *eth;
eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
skb_reset_mac_header(nskb);
ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
eth->h_proto = eth_hdr(oldskb)->h_proto;
skb_pull(nskb, ETH_HLEN);
}
static int nft_reject_iphdr_validate(struct sk_buff *oldskb)
{
struct iphdr *iph;
u32 len;
if (!pskb_may_pull(oldskb, sizeof(struct iphdr)))
return 0;
iph = ip_hdr(oldskb);
if (iph->ihl < 5 || iph->version != 4)
return 0;
len = ntohs(iph->tot_len);
if (oldskb->len < len)
return 0;
else if (len < (iph->ihl*4))
return 0;
if (!pskb_may_pull(oldskb, iph->ihl*4))
return 0;
return 1;
}
static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
{
struct sk_buff *nskb;
struct iphdr *niph;
const struct tcphdr *oth;
struct tcphdr _oth;
if (!nft_reject_iphdr_validate(oldskb))
return;
oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
if (!oth)
return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
LL_MAX_HEADER, GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
sysctl_ip_default_ttl);
nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
niph->ttl = sysctl_ip_default_ttl;
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
nft_reject_br_push_etherhdr(oldskb, nskb);
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
}
static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
u8 code)
{
struct sk_buff *nskb;
struct iphdr *niph;
struct icmphdr *icmph;
unsigned int len;
void *payload;
__wsum csum;
if (!nft_reject_iphdr_validate(oldskb))
return;
/* IP header checks: fragment. */
if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
return;
/* RFC says return as much as we can without exceeding 576 bytes. */
len = min_t(unsigned int, 536, oldskb->len);
if (!pskb_may_pull(oldskb, len))
return;
if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0))
return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
LL_MAX_HEADER + len, GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
sysctl_ip_default_ttl);
skb_reset_transport_header(nskb);
icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
memset(icmph, 0, sizeof(*icmph));
icmph->type = ICMP_DEST_UNREACH;
icmph->code = code;
payload = skb_put(nskb, len);
memcpy(payload, skb_network_header(oldskb), len);
csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
icmph->checksum = csum_fold(csum);
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
nft_reject_br_push_etherhdr(oldskb, nskb);
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
}
static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb)
{
struct ipv6hdr *hdr;
u32 pkt_len;
if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr)))
return 0;
hdr = ipv6_hdr(oldskb);
if (hdr->version != 6)
return 0;
pkt_len = ntohs(hdr->payload_len);
if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len)
return 0;
return 1;
}
static void nft_reject_br_send_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb, int hook)
{
struct sk_buff *nskb;
const struct tcphdr *oth;
struct tcphdr _oth;
unsigned int otcplen;
struct ipv6hdr *nip6h;
if (!nft_reject_ip6hdr_validate(oldskb))
return;
oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
if (!oth)
return;
nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
LL_MAX_HEADER, GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
net->ipv6.devconf_all->hop_limit);
nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
nft_reject_br_push_etherhdr(oldskb, nskb);
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
}
static void nft_reject_br_send_v6_unreach(struct net *net,
struct sk_buff *oldskb, int hook,
u8 code)
{
struct sk_buff *nskb;
struct ipv6hdr *nip6h;
struct icmp6hdr *icmp6h;
unsigned int len;
void *payload;
if (!nft_reject_ip6hdr_validate(oldskb))
return;
/* Include "As much of invoking packet as possible without the ICMPv6
* packet exceeding the minimum IPv6 MTU" in the ICMP payload.
*/
len = min_t(unsigned int, 1220, oldskb->len);
if (!pskb_may_pull(oldskb, len))
return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
LL_MAX_HEADER + len, GFP_ATOMIC);
if (!nskb)
return;
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
net->ipv6.devconf_all->hop_limit);
skb_reset_transport_header(nskb);
icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
memset(icmp6h, 0, sizeof(*icmp6h));
icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
icmp6h->icmp6_code = code;
payload = skb_put(nskb, len);
memcpy(payload, skb_network_header(oldskb), len);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
icmp6h->icmp6_cksum =
csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
nskb->len - sizeof(struct ipv6hdr),
IPPROTO_ICMPV6,
csum_partial(icmp6h,
nskb->len - sizeof(struct ipv6hdr),
0));
nft_reject_br_push_etherhdr(oldskb, nskb);
br_deliver(br_port_get_rcu(oldskb->dev), nskb);
}
static void nft_reject_bridge_eval(const struct nft_expr *expr,
struct nft_data data[NFT_REG_MAX + 1],
const struct nft_pktinfo *pkt)
{
struct nft_reject *priv = nft_expr_priv(expr);
struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
if (is_broadcast_ether_addr(dest) ||
is_multicast_ether_addr(dest))
goto out;
switch (eth_hdr(pkt->skb)->h_proto) {
case htons(ETH_P_IP):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nft_reject_br_send_v4_unreach(pkt->skb,
pkt->ops->hooknum,
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
nft_reject_br_send_v4_tcp_reset(pkt->skb,
pkt->ops->hooknum);
break;
case NFT_REJECT_ICMPX_UNREACH:
nft_reject_br_send_v4_unreach(pkt->skb,
pkt->ops->hooknum,
nft_reject_icmp_code(priv->icmp_code));
break;
}
break;
case htons(ETH_P_IPV6):
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
nft_reject_br_send_v6_unreach(net, pkt->skb,
pkt->ops->hooknum,
priv->icmp_code);
break;
case NFT_REJECT_TCP_RST:
nft_reject_br_send_v6_tcp_reset(net, pkt->skb,
pkt->ops->hooknum);
break;
case NFT_REJECT_ICMPX_UNREACH:
nft_reject_br_send_v6_unreach(net, pkt->skb,
pkt->ops->hooknum,
nft_reject_icmpv6_code(priv->icmp_code));
break;
}
break;
default:
/* No explicit way to reject this protocol, drop it. */
break;
}
out:
data[NFT_REG_VERDICT].verdict = NF_DROP;
}
static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain)
{
struct nft_base_chain *basechain;
if (chain->flags & NFT_BASE_CHAIN) {
basechain = nft_base_chain(chain);
switch (basechain->ops[0].hooknum) {
case NF_BR_PRE_ROUTING:
case NF_BR_LOCAL_IN:
break;
default:
return -EOPNOTSUPP;
}
}
return 0;
}
static int nft_reject_bridge_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_reject *priv = nft_expr_priv(expr);
int icmp_code, err;
err = nft_reject_bridge_validate_hooks(ctx->chain);
if (err < 0)
return err;
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
case NFT_REJECT_ICMPX_UNREACH:
if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
return -EINVAL;
icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
icmp_code > NFT_REJECT_ICMPX_MAX)
return -EINVAL;
priv->icmp_code = icmp_code;
break;
case NFT_REJECT_TCP_RST:
break;
default:
return -EINVAL;
}
return 0;
}
static int nft_reject_bridge_dump(struct sk_buff *skb,
const struct nft_expr *expr)
{
const struct nft_reject *priv = nft_expr_priv(expr);
if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
goto nla_put_failure;
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
case NFT_REJECT_ICMPX_UNREACH:
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
goto nla_put_failure;
break;
}
return 0;
nla_put_failure:
return -1;
}
static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_reject_bridge_validate_hooks(ctx->chain);
}
static struct nft_expr_type nft_reject_bridge_type;
static const struct nft_expr_ops nft_reject_bridge_ops = {
.type = &nft_reject_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_bridge_eval,
.init = nft_reject_bridge_init,
.dump = nft_reject_bridge_dump,
.validate = nft_reject_bridge_validate,
};
static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
.family = NFPROTO_BRIDGE,
.name = "reject",
.ops = &nft_reject_bridge_ops,
.policy = nft_reject_policy,
.maxattr = NFTA_REJECT_MAX,
.owner = THIS_MODULE,
};
static int __init nft_reject_bridge_module_init(void)
{
return nft_register_expr(&nft_reject_bridge_type);
}
static void __exit nft_reject_bridge_module_exit(void)
{
nft_unregister_expr(&nft_reject_bridge_type);
}
module_init(nft_reject_bridge_module_init);
module_exit(nft_reject_bridge_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");