mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-07 16:58:04 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
67
net/openvswitch/Kconfig
Normal file
67
net/openvswitch/Kconfig
Normal file
|
@ -0,0 +1,67 @@
|
|||
#
|
||||
# Open vSwitch
|
||||
#
|
||||
|
||||
config OPENVSWITCH
|
||||
tristate "Open vSwitch"
|
||||
select LIBCRC32C
|
||||
---help---
|
||||
Open vSwitch is a multilayer Ethernet switch targeted at virtualized
|
||||
environments. In addition to supporting a variety of features
|
||||
expected in a traditional hardware switch, it enables fine-grained
|
||||
programmatic extension and flow-based control of the network. This
|
||||
control is useful in a wide variety of applications but is
|
||||
particularly important in multi-server virtualization deployments,
|
||||
which are often characterized by highly dynamic endpoints and the
|
||||
need to maintain logical abstractions for multiple tenants.
|
||||
|
||||
The Open vSwitch datapath provides an in-kernel fast path for packet
|
||||
forwarding. It is complemented by a userspace daemon, ovs-vswitchd,
|
||||
which is able to accept configuration from a variety of sources and
|
||||
translate it into packet processing rules.
|
||||
|
||||
See http://openvswitch.org for more information and userspace
|
||||
utilities.
|
||||
|
||||
To compile this code as a module, choose M here: the module will be
|
||||
called openvswitch.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config OPENVSWITCH_GRE
|
||||
bool "Open vSwitch GRE tunneling support"
|
||||
depends on INET
|
||||
depends on OPENVSWITCH
|
||||
depends on NET_IPGRE_DEMUX && !(OPENVSWITCH=y && NET_IPGRE_DEMUX=m)
|
||||
default y
|
||||
---help---
|
||||
If you say Y here, then the Open vSwitch will be able create GRE
|
||||
vport.
|
||||
|
||||
Say N to exclude this support and reduce the binary size.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config OPENVSWITCH_VXLAN
|
||||
bool "Open vSwitch VXLAN tunneling support"
|
||||
depends on INET
|
||||
depends on OPENVSWITCH
|
||||
depends on VXLAN && !(OPENVSWITCH=y && VXLAN=m)
|
||||
default y
|
||||
---help---
|
||||
If you say Y here, then the Open vSwitch will be able create vxlan vport.
|
||||
|
||||
Say N to exclude this support and reduce the binary size.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config OPENVSWITCH_GENEVE
|
||||
bool "Open vSwitch Geneve tunneling support"
|
||||
depends on INET
|
||||
depends on OPENVSWITCH
|
||||
depends on GENEVE && !(OPENVSWITCH=y && GENEVE=m)
|
||||
default y
|
||||
---help---
|
||||
If you say Y here, then the Open vSwitch will be able create geneve vport.
|
||||
|
||||
Say N to exclude this support and reduce the binary size.
|
28
net/openvswitch/Makefile
Normal file
28
net/openvswitch/Makefile
Normal file
|
@ -0,0 +1,28 @@
|
|||
#
|
||||
# Makefile for Open vSwitch.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_OPENVSWITCH) += openvswitch.o
|
||||
|
||||
openvswitch-y := \
|
||||
actions.o \
|
||||
datapath.o \
|
||||
dp_notify.o \
|
||||
flow.o \
|
||||
flow_netlink.o \
|
||||
flow_table.o \
|
||||
vport.o \
|
||||
vport-internal_dev.o \
|
||||
vport-netdev.o
|
||||
|
||||
ifneq ($(CONFIG_OPENVSWITCH_GENEVE),)
|
||||
openvswitch-y += vport-geneve.o
|
||||
endif
|
||||
|
||||
ifneq ($(CONFIG_OPENVSWITCH_VXLAN),)
|
||||
openvswitch-y += vport-vxlan.o
|
||||
endif
|
||||
|
||||
ifneq ($(CONFIG_OPENVSWITCH_GRE),)
|
||||
openvswitch-y += vport-gre.o
|
||||
endif
|
808
net/openvswitch/actions.c
Normal file
808
net/openvswitch/actions.c
Normal file
|
@ -0,0 +1,808 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2014 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/openvswitch.h>
|
||||
#include <linux/sctp.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <net/checksum.h>
|
||||
#include <net/dsfield.h>
|
||||
#include <net/sctp/checksum.h>
|
||||
|
||||
#include "datapath.h"
|
||||
#include "flow.h"
|
||||
#include "vport.h"
|
||||
|
||||
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
||||
struct sw_flow_key *key,
|
||||
const struct nlattr *attr, int len);
|
||||
|
||||
struct deferred_action {
|
||||
struct sk_buff *skb;
|
||||
const struct nlattr *actions;
|
||||
|
||||
/* Store pkt_key clone when creating deferred action. */
|
||||
struct sw_flow_key pkt_key;
|
||||
};
|
||||
|
||||
#define DEFERRED_ACTION_FIFO_SIZE 10
|
||||
struct action_fifo {
|
||||
int head;
|
||||
int tail;
|
||||
/* Deferred action fifo queue storage. */
|
||||
struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
|
||||
};
|
||||
|
||||
static struct action_fifo __percpu *action_fifos;
|
||||
static DEFINE_PER_CPU(int, exec_actions_level);
|
||||
|
||||
static void action_fifo_init(struct action_fifo *fifo)
|
||||
{
|
||||
fifo->head = 0;
|
||||
fifo->tail = 0;
|
||||
}
|
||||
|
||||
static bool action_fifo_is_empty(struct action_fifo *fifo)
|
||||
{
|
||||
return (fifo->head == fifo->tail);
|
||||
}
|
||||
|
||||
static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
|
||||
{
|
||||
if (action_fifo_is_empty(fifo))
|
||||
return NULL;
|
||||
|
||||
return &fifo->fifo[fifo->tail++];
|
||||
}
|
||||
|
||||
static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
|
||||
{
|
||||
if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
|
||||
return NULL;
|
||||
|
||||
return &fifo->fifo[fifo->head++];
|
||||
}
|
||||
|
||||
/* Return true if fifo is not full */
|
||||
static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
|
||||
struct sw_flow_key *key,
|
||||
const struct nlattr *attr)
|
||||
{
|
||||
struct action_fifo *fifo;
|
||||
struct deferred_action *da;
|
||||
|
||||
fifo = this_cpu_ptr(action_fifos);
|
||||
da = action_fifo_put(fifo);
|
||||
if (da) {
|
||||
da->skb = skb;
|
||||
da->actions = attr;
|
||||
da->pkt_key = *key;
|
||||
}
|
||||
|
||||
return da;
|
||||
}
|
||||
|
||||
static int make_writable(struct sk_buff *skb, int write_len)
|
||||
{
|
||||
if (!pskb_may_pull(skb, write_len))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
|
||||
return 0;
|
||||
|
||||
return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
/* remove VLAN header from packet and update csum accordingly. */
|
||||
static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
|
||||
{
|
||||
struct vlan_hdr *vhdr;
|
||||
int err;
|
||||
|
||||
err = make_writable(skb, VLAN_ETH_HLEN);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->csum = csum_sub(skb->csum, csum_partial(skb->data
|
||||
+ (2 * ETH_ALEN), VLAN_HLEN, 0));
|
||||
|
||||
vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
|
||||
*current_tci = vhdr->h_vlan_TCI;
|
||||
|
||||
memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
|
||||
__skb_pull(skb, VLAN_HLEN);
|
||||
|
||||
vlan_set_encap_proto(skb, vhdr);
|
||||
skb->mac_header += VLAN_HLEN;
|
||||
if (skb_network_offset(skb) < ETH_HLEN)
|
||||
skb_set_network_header(skb, ETH_HLEN);
|
||||
skb_reset_mac_len(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pop_vlan(struct sk_buff *skb)
|
||||
{
|
||||
__be16 tci;
|
||||
int err;
|
||||
|
||||
if (likely(vlan_tx_tag_present(skb))) {
|
||||
skb->vlan_tci = 0;
|
||||
} else {
|
||||
if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
|
||||
skb->len < VLAN_ETH_HLEN))
|
||||
return 0;
|
||||
|
||||
err = __pop_vlan_tci(skb, &tci);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
/* move next vlan tag to hw accel tag */
|
||||
if (likely(skb->protocol != htons(ETH_P_8021Q) ||
|
||||
skb->len < VLAN_ETH_HLEN))
|
||||
return 0;
|
||||
|
||||
err = __pop_vlan_tci(skb, &tci);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
|
||||
{
|
||||
if (unlikely(vlan_tx_tag_present(skb))) {
|
||||
u16 current_tag;
|
||||
|
||||
/* push down current VLAN tag */
|
||||
current_tag = vlan_tx_tag_get(skb);
|
||||
|
||||
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
|
||||
current_tag);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->csum = csum_add(skb->csum, csum_partial(skb->data
|
||||
+ (2 * ETH_ALEN), VLAN_HLEN, 0));
|
||||
|
||||
}
|
||||
__vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_eth_addr(struct sk_buff *skb,
|
||||
const struct ovs_key_ethernet *eth_key)
|
||||
{
|
||||
int err;
|
||||
err = make_writable(skb, ETH_HLEN);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
|
||||
|
||||
ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
|
||||
ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
|
||||
|
||||
ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
|
||||
__be32 *addr, __be32 new_addr)
|
||||
{
|
||||
int transport_len = skb->len - skb_transport_offset(skb);
|
||||
|
||||
if (nh->protocol == IPPROTO_TCP) {
|
||||
if (likely(transport_len >= sizeof(struct tcphdr)))
|
||||
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
|
||||
*addr, new_addr, 1);
|
||||
} else if (nh->protocol == IPPROTO_UDP) {
|
||||
if (likely(transport_len >= sizeof(struct udphdr))) {
|
||||
struct udphdr *uh = udp_hdr(skb);
|
||||
|
||||
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
inet_proto_csum_replace4(&uh->check, skb,
|
||||
*addr, new_addr, 1);
|
||||
if (!uh->check)
|
||||
uh->check = CSUM_MANGLED_0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
csum_replace4(&nh->check, *addr, new_addr);
|
||||
skb_clear_hash(skb);
|
||||
*addr = new_addr;
|
||||
}
|
||||
|
||||
static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
|
||||
__be32 addr[4], const __be32 new_addr[4])
|
||||
{
|
||||
int transport_len = skb->len - skb_transport_offset(skb);
|
||||
|
||||
if (l4_proto == NEXTHDR_TCP) {
|
||||
if (likely(transport_len >= sizeof(struct tcphdr)))
|
||||
inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
|
||||
addr, new_addr, 1);
|
||||
} else if (l4_proto == NEXTHDR_UDP) {
|
||||
if (likely(transport_len >= sizeof(struct udphdr))) {
|
||||
struct udphdr *uh = udp_hdr(skb);
|
||||
|
||||
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
inet_proto_csum_replace16(&uh->check, skb,
|
||||
addr, new_addr, 1);
|
||||
if (!uh->check)
|
||||
uh->check = CSUM_MANGLED_0;
|
||||
}
|
||||
}
|
||||
} else if (l4_proto == NEXTHDR_ICMP) {
|
||||
if (likely(transport_len >= sizeof(struct icmp6hdr)))
|
||||
inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
|
||||
skb, addr, new_addr, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
|
||||
__be32 addr[4], const __be32 new_addr[4],
|
||||
bool recalculate_csum)
|
||||
{
|
||||
if (recalculate_csum)
|
||||
update_ipv6_checksum(skb, l4_proto, addr, new_addr);
|
||||
|
||||
skb_clear_hash(skb);
|
||||
memcpy(addr, new_addr, sizeof(__be32[4]));
|
||||
}
|
||||
|
||||
static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
|
||||
{
|
||||
nh->priority = tc >> 4;
|
||||
nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
|
||||
}
|
||||
|
||||
static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
|
||||
{
|
||||
nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
|
||||
nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
|
||||
nh->flow_lbl[2] = fl & 0x000000FF;
|
||||
}
|
||||
|
||||
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
|
||||
{
|
||||
csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
|
||||
nh->ttl = new_ttl;
|
||||
}
|
||||
|
||||
static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
|
||||
{
|
||||
struct iphdr *nh;
|
||||
int err;
|
||||
|
||||
err = make_writable(skb, skb_network_offset(skb) +
|
||||
sizeof(struct iphdr));
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
nh = ip_hdr(skb);
|
||||
|
||||
if (ipv4_key->ipv4_src != nh->saddr)
|
||||
set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
|
||||
|
||||
if (ipv4_key->ipv4_dst != nh->daddr)
|
||||
set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
|
||||
|
||||
if (ipv4_key->ipv4_tos != nh->tos)
|
||||
ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
|
||||
|
||||
if (ipv4_key->ipv4_ttl != nh->ttl)
|
||||
set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
|
||||
{
|
||||
struct ipv6hdr *nh;
|
||||
int err;
|
||||
__be32 *saddr;
|
||||
__be32 *daddr;
|
||||
|
||||
err = make_writable(skb, skb_network_offset(skb) +
|
||||
sizeof(struct ipv6hdr));
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
nh = ipv6_hdr(skb);
|
||||
saddr = (__be32 *)&nh->saddr;
|
||||
daddr = (__be32 *)&nh->daddr;
|
||||
|
||||
if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src)))
|
||||
set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
|
||||
ipv6_key->ipv6_src, true);
|
||||
|
||||
if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
|
||||
unsigned int offset = 0;
|
||||
int flags = IP6_FH_F_SKIP_RH;
|
||||
bool recalc_csum = true;
|
||||
|
||||
if (ipv6_ext_hdr(nh->nexthdr))
|
||||
recalc_csum = ipv6_find_hdr(skb, &offset,
|
||||
NEXTHDR_ROUTING, NULL,
|
||||
&flags) != NEXTHDR_ROUTING;
|
||||
|
||||
set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
|
||||
ipv6_key->ipv6_dst, recalc_csum);
|
||||
}
|
||||
|
||||
set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
|
||||
set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
|
||||
nh->hop_limit = ipv6_key->ipv6_hlimit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Must follow make_writable() since that can move the skb data. */
|
||||
static void set_tp_port(struct sk_buff *skb, __be16 *port,
|
||||
__be16 new_port, __sum16 *check)
|
||||
{
|
||||
inet_proto_csum_replace2(check, skb, *port, new_port, 0);
|
||||
*port = new_port;
|
||||
skb_clear_hash(skb);
|
||||
}
|
||||
|
||||
static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
|
||||
{
|
||||
struct udphdr *uh = udp_hdr(skb);
|
||||
|
||||
if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
|
||||
set_tp_port(skb, port, new_port, &uh->check);
|
||||
|
||||
if (!uh->check)
|
||||
uh->check = CSUM_MANGLED_0;
|
||||
} else {
|
||||
*port = new_port;
|
||||
skb_clear_hash(skb);
|
||||
}
|
||||
}
|
||||
|
||||
static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
|
||||
{
|
||||
struct udphdr *uh;
|
||||
int err;
|
||||
|
||||
err = make_writable(skb, skb_transport_offset(skb) +
|
||||
sizeof(struct udphdr));
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
uh = udp_hdr(skb);
|
||||
if (udp_port_key->udp_src != uh->source)
|
||||
set_udp_port(skb, &uh->source, udp_port_key->udp_src);
|
||||
|
||||
if (udp_port_key->udp_dst != uh->dest)
|
||||
set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
|
||||
{
|
||||
struct tcphdr *th;
|
||||
int err;
|
||||
|
||||
err = make_writable(skb, skb_transport_offset(skb) +
|
||||
sizeof(struct tcphdr));
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
th = tcp_hdr(skb);
|
||||
if (tcp_port_key->tcp_src != th->source)
|
||||
set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
|
||||
|
||||
if (tcp_port_key->tcp_dst != th->dest)
|
||||
set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_sctp(struct sk_buff *skb,
|
||||
const struct ovs_key_sctp *sctp_port_key)
|
||||
{
|
||||
struct sctphdr *sh;
|
||||
int err;
|
||||
unsigned int sctphoff = skb_transport_offset(skb);
|
||||
|
||||
err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
sh = sctp_hdr(skb);
|
||||
if (sctp_port_key->sctp_src != sh->source ||
|
||||
sctp_port_key->sctp_dst != sh->dest) {
|
||||
__le32 old_correct_csum, new_csum, old_csum;
|
||||
|
||||
old_csum = sh->checksum;
|
||||
old_correct_csum = sctp_compute_cksum(skb, sctphoff);
|
||||
|
||||
sh->source = sctp_port_key->sctp_src;
|
||||
sh->dest = sctp_port_key->sctp_dst;
|
||||
|
||||
new_csum = sctp_compute_cksum(skb, sctphoff);
|
||||
|
||||
/* Carry any checksum errors through. */
|
||||
sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
|
||||
|
||||
skb_clear_hash(skb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
|
||||
{
|
||||
struct vport *vport;
|
||||
|
||||
if (unlikely(!skb))
|
||||
return -ENOMEM;
|
||||
|
||||
vport = ovs_vport_rcu(dp, out_port);
|
||||
if (unlikely(!vport)) {
|
||||
kfree_skb(skb);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ovs_vport_send(vport, skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
|
||||
struct sw_flow_key *key, const struct nlattr *attr)
|
||||
{
|
||||
struct dp_upcall_info upcall;
|
||||
const struct nlattr *a;
|
||||
int rem;
|
||||
|
||||
upcall.cmd = OVS_PACKET_CMD_ACTION;
|
||||
upcall.key = key;
|
||||
upcall.userdata = NULL;
|
||||
upcall.portid = 0;
|
||||
|
||||
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
|
||||
a = nla_next(a, &rem)) {
|
||||
switch (nla_type(a)) {
|
||||
case OVS_USERSPACE_ATTR_USERDATA:
|
||||
upcall.userdata = a;
|
||||
break;
|
||||
|
||||
case OVS_USERSPACE_ATTR_PID:
|
||||
upcall.portid = nla_get_u32(a);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ovs_dp_upcall(dp, skb, &upcall);
|
||||
}
|
||||
|
||||
static bool last_action(const struct nlattr *a, int rem)
|
||||
{
|
||||
return a->nla_len == rem;
|
||||
}
|
||||
|
||||
static int sample(struct datapath *dp, struct sk_buff *skb,
|
||||
struct sw_flow_key *key, const struct nlattr *attr)
|
||||
{
|
||||
const struct nlattr *acts_list = NULL;
|
||||
const struct nlattr *a;
|
||||
int rem;
|
||||
|
||||
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
|
||||
a = nla_next(a, &rem)) {
|
||||
switch (nla_type(a)) {
|
||||
case OVS_SAMPLE_ATTR_PROBABILITY:
|
||||
if (prandom_u32() >= nla_get_u32(a))
|
||||
return 0;
|
||||
break;
|
||||
|
||||
case OVS_SAMPLE_ATTR_ACTIONS:
|
||||
acts_list = a;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
rem = nla_len(acts_list);
|
||||
a = nla_data(acts_list);
|
||||
|
||||
/* Actions list is empty, do nothing */
|
||||
if (unlikely(!rem))
|
||||
return 0;
|
||||
|
||||
/* The only known usage of sample action is having a single user-space
|
||||
* action. Treat this usage as a special case.
|
||||
* The output_userspace() should clone the skb to be sent to the
|
||||
* user space. This skb will be consumed by its caller.
|
||||
*/
|
||||
if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
|
||||
last_action(a, rem)))
|
||||
return output_userspace(dp, skb, key, a);
|
||||
|
||||
skb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
/* Skip the sample action when out of memory. */
|
||||
return 0;
|
||||
|
||||
if (!add_deferred_actions(skb, key, a)) {
|
||||
if (net_ratelimit())
|
||||
pr_warn("%s: deferred actions limit reached, dropping sample action\n",
|
||||
ovs_dp_name(dp));
|
||||
|
||||
kfree_skb(skb);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
|
||||
const struct nlattr *attr)
|
||||
{
|
||||
struct ovs_action_hash *hash_act = nla_data(attr);
|
||||
u32 hash = 0;
|
||||
|
||||
/* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
|
||||
hash = skb_get_hash(skb);
|
||||
hash = jhash_1word(hash, hash_act->hash_basis);
|
||||
if (!hash)
|
||||
hash = 0x1;
|
||||
|
||||
key->ovs_flow_hash = hash;
|
||||
}
|
||||
|
||||
static int execute_set_action(struct sk_buff *skb,
|
||||
const struct nlattr *nested_attr)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
switch (nla_type(nested_attr)) {
|
||||
case OVS_KEY_ATTR_PRIORITY:
|
||||
skb->priority = nla_get_u32(nested_attr);
|
||||
break;
|
||||
|
||||
case OVS_KEY_ATTR_SKB_MARK:
|
||||
skb->mark = nla_get_u32(nested_attr);
|
||||
break;
|
||||
|
||||
case OVS_KEY_ATTR_TUNNEL_INFO:
|
||||
OVS_CB(skb)->egress_tun_info = nla_data(nested_attr);
|
||||
break;
|
||||
|
||||
case OVS_KEY_ATTR_ETHERNET:
|
||||
err = set_eth_addr(skb, nla_data(nested_attr));
|
||||
break;
|
||||
|
||||
case OVS_KEY_ATTR_IPV4:
|
||||
err = set_ipv4(skb, nla_data(nested_attr));
|
||||
break;
|
||||
|
||||
case OVS_KEY_ATTR_IPV6:
|
||||
err = set_ipv6(skb, nla_data(nested_attr));
|
||||
break;
|
||||
|
||||
case OVS_KEY_ATTR_TCP:
|
||||
err = set_tcp(skb, nla_data(nested_attr));
|
||||
break;
|
||||
|
||||
case OVS_KEY_ATTR_UDP:
|
||||
err = set_udp(skb, nla_data(nested_attr));
|
||||
break;
|
||||
|
||||
case OVS_KEY_ATTR_SCTP:
|
||||
err = set_sctp(skb, nla_data(nested_attr));
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
|
||||
struct sw_flow_key *key,
|
||||
const struct nlattr *a, int rem)
|
||||
{
|
||||
struct deferred_action *da;
|
||||
int err;
|
||||
|
||||
err = ovs_flow_key_update(skb, key);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!last_action(a, rem)) {
|
||||
/* Recirc action is the not the last action
|
||||
* of the action list, need to clone the skb.
|
||||
*/
|
||||
skb = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
/* Skip the recirc action when out of memory, but
|
||||
* continue on with the rest of the action list.
|
||||
*/
|
||||
if (!skb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
da = add_deferred_actions(skb, key, NULL);
|
||||
if (da) {
|
||||
da->pkt_key.recirc_id = nla_get_u32(a);
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
|
||||
if (net_ratelimit())
|
||||
pr_warn("%s: deferred action limit reached, drop recirc action\n",
|
||||
ovs_dp_name(dp));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Execute a list of actions against 'skb'. */
|
||||
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
||||
struct sw_flow_key *key,
|
||||
const struct nlattr *attr, int len)
|
||||
{
|
||||
/* Every output action needs a separate clone of 'skb', but the common
|
||||
* case is just a single output action, so that doing a clone and
|
||||
* then freeing the original skbuff is wasteful. So the following code
|
||||
* is slightly obscure just to avoid that. */
|
||||
int prev_port = -1;
|
||||
const struct nlattr *a;
|
||||
int rem;
|
||||
|
||||
for (a = attr, rem = len; rem > 0;
|
||||
a = nla_next(a, &rem)) {
|
||||
int err = 0;
|
||||
|
||||
if (prev_port != -1) {
|
||||
do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
|
||||
prev_port = -1;
|
||||
}
|
||||
|
||||
switch (nla_type(a)) {
|
||||
case OVS_ACTION_ATTR_OUTPUT:
|
||||
prev_port = nla_get_u32(a);
|
||||
break;
|
||||
|
||||
case OVS_ACTION_ATTR_USERSPACE:
|
||||
output_userspace(dp, skb, key, a);
|
||||
break;
|
||||
|
||||
case OVS_ACTION_ATTR_HASH:
|
||||
execute_hash(skb, key, a);
|
||||
break;
|
||||
|
||||
case OVS_ACTION_ATTR_PUSH_VLAN:
|
||||
err = push_vlan(skb, nla_data(a));
|
||||
if (unlikely(err)) /* skb already freed. */
|
||||
return err;
|
||||
break;
|
||||
|
||||
case OVS_ACTION_ATTR_POP_VLAN:
|
||||
err = pop_vlan(skb);
|
||||
break;
|
||||
|
||||
case OVS_ACTION_ATTR_RECIRC:
|
||||
err = execute_recirc(dp, skb, key, a, rem);
|
||||
if (last_action(a, rem)) {
|
||||
/* If this is the last action, the skb has
|
||||
* been consumed or freed.
|
||||
* Return immediately.
|
||||
*/
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
|
||||
case OVS_ACTION_ATTR_SET:
|
||||
err = execute_set_action(skb, nla_data(a));
|
||||
break;
|
||||
|
||||
case OVS_ACTION_ATTR_SAMPLE:
|
||||
err = sample(dp, skb, key, a);
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(err)) {
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (prev_port != -1)
|
||||
do_output(dp, skb, prev_port);
|
||||
else
|
||||
consume_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void process_deferred_actions(struct datapath *dp)
|
||||
{
|
||||
struct action_fifo *fifo = this_cpu_ptr(action_fifos);
|
||||
|
||||
/* Do not touch the FIFO in case there is no deferred actions. */
|
||||
if (action_fifo_is_empty(fifo))
|
||||
return;
|
||||
|
||||
/* Finishing executing all deferred actions. */
|
||||
do {
|
||||
struct deferred_action *da = action_fifo_get(fifo);
|
||||
struct sk_buff *skb = da->skb;
|
||||
struct sw_flow_key *key = &da->pkt_key;
|
||||
const struct nlattr *actions = da->actions;
|
||||
|
||||
if (actions)
|
||||
do_execute_actions(dp, skb, key, actions,
|
||||
nla_len(actions));
|
||||
else
|
||||
ovs_dp_process_packet(skb, key);
|
||||
} while (!action_fifo_is_empty(fifo));
|
||||
|
||||
/* Reset FIFO for the next packet. */
|
||||
action_fifo_init(fifo);
|
||||
}
|
||||
|
||||
/* Execute a list of actions against 'skb'. */
|
||||
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
||||
struct sw_flow_key *key)
|
||||
{
|
||||
int level = this_cpu_read(exec_actions_level);
|
||||
struct sw_flow_actions *acts;
|
||||
int err;
|
||||
|
||||
acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
|
||||
|
||||
this_cpu_inc(exec_actions_level);
|
||||
OVS_CB(skb)->egress_tun_info = NULL;
|
||||
err = do_execute_actions(dp, skb, key,
|
||||
acts->actions, acts->actions_len);
|
||||
|
||||
if (!level)
|
||||
process_deferred_actions(dp);
|
||||
|
||||
this_cpu_dec(exec_actions_level);
|
||||
return err;
|
||||
}
|
||||
|
||||
int action_fifos_init(void)
|
||||
{
|
||||
action_fifos = alloc_percpu(struct action_fifo);
|
||||
if (!action_fifos)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void action_fifos_exit(void)
|
||||
{
|
||||
free_percpu(action_fifos);
|
||||
}
|
2193
net/openvswitch/datapath.c
Normal file
2193
net/openvswitch/datapath.c
Normal file
File diff suppressed because it is too large
Load diff
209
net/openvswitch/datapath.h
Normal file
209
net/openvswitch/datapath.h
Normal file
|
@ -0,0 +1,209 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2014 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#ifndef DATAPATH_H
|
||||
#define DATAPATH_H 1
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/u64_stats_sync.h>
|
||||
|
||||
#include "flow.h"
|
||||
#include "flow_table.h"
|
||||
#include "vport.h"
|
||||
|
||||
#define DP_MAX_PORTS USHRT_MAX
|
||||
#define DP_VPORT_HASH_BUCKETS 1024
|
||||
|
||||
#define SAMPLE_ACTION_DEPTH 3
|
||||
|
||||
/**
|
||||
* struct dp_stats_percpu - per-cpu packet processing statistics for a given
|
||||
* datapath.
|
||||
* @n_hit: Number of received packets for which a matching flow was found in
|
||||
* the flow table.
|
||||
* @n_miss: Number of received packets that had no matching flow in the flow
|
||||
* table. The sum of @n_hit and @n_miss is the number of packets that have
|
||||
* been received by the datapath.
|
||||
* @n_lost: Number of received packets that had no matching flow in the flow
|
||||
* table that could not be sent to userspace (normally due to an overflow in
|
||||
* one of the datapath's queues).
|
||||
* @n_mask_hit: Number of masks looked up for flow match.
|
||||
* @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked
|
||||
* up per packet.
|
||||
*/
|
||||
struct dp_stats_percpu {
|
||||
u64 n_hit;
|
||||
u64 n_missed;
|
||||
u64 n_lost;
|
||||
u64 n_mask_hit;
|
||||
struct u64_stats_sync syncp;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct datapath - datapath for flow-based packet switching
|
||||
* @rcu: RCU callback head for deferred destruction.
|
||||
* @list_node: Element in global 'dps' list.
|
||||
* @table: flow table.
|
||||
* @ports: Hash table for ports. %OVSP_LOCAL port always exists. Protected by
|
||||
* ovs_mutex and RCU.
|
||||
* @stats_percpu: Per-CPU datapath statistics.
|
||||
* @net: Reference to net namespace.
|
||||
*
|
||||
* Context: See the comment on locking at the top of datapath.c for additional
|
||||
* locking information.
|
||||
*/
|
||||
struct datapath {
|
||||
struct rcu_head rcu;
|
||||
struct list_head list_node;
|
||||
|
||||
/* Flow table. */
|
||||
struct flow_table table;
|
||||
|
||||
/* Switch ports. */
|
||||
struct hlist_head *ports;
|
||||
|
||||
/* Stats. */
|
||||
struct dp_stats_percpu __percpu *stats_percpu;
|
||||
|
||||
#ifdef CONFIG_NET_NS
|
||||
/* Network namespace ref. */
|
||||
struct net *net;
|
||||
#endif
|
||||
|
||||
u32 user_features;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ovs_skb_cb - OVS data in skb CB
|
||||
* @flow: The flow associated with this packet. May be %NULL if no flow.
|
||||
* @egress_tun_key: Tunnel information about this packet on egress path.
|
||||
* NULL if the packet is not being tunneled.
|
||||
* @input_vport: The original vport packet came in on. This value is cached
|
||||
* when a packet is received by OVS.
|
||||
*/
|
||||
struct ovs_skb_cb {
|
||||
struct sw_flow *flow;
|
||||
struct ovs_tunnel_info *egress_tun_info;
|
||||
struct vport *input_vport;
|
||||
};
|
||||
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
|
||||
|
||||
/**
|
||||
* struct dp_upcall - metadata to include with a packet to send to userspace
|
||||
* @cmd: One of %OVS_PACKET_CMD_*.
|
||||
* @key: Becomes %OVS_PACKET_ATTR_KEY. Must be nonnull.
|
||||
* @userdata: If nonnull, its variable-length value is passed to userspace as
|
||||
* %OVS_PACKET_ATTR_USERDATA.
|
||||
* @pid: Netlink PID to which packet should be sent. If @pid is 0 then no
|
||||
* packet is sent and the packet is accounted in the datapath's @n_lost
|
||||
* counter.
|
||||
*/
|
||||
struct dp_upcall_info {
|
||||
u8 cmd;
|
||||
const struct sw_flow_key *key;
|
||||
const struct nlattr *userdata;
|
||||
u32 portid;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ovs_net - Per net-namespace data for ovs.
|
||||
* @dps: List of datapaths to enable dumping them all out.
|
||||
* Protected by genl_mutex.
|
||||
*/
|
||||
struct ovs_net {
|
||||
struct list_head dps;
|
||||
struct work_struct dp_notify_work;
|
||||
struct vport_net vport_net;
|
||||
};
|
||||
|
||||
extern int ovs_net_id;
|
||||
void ovs_lock(void);
|
||||
void ovs_unlock(void);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
int lockdep_ovsl_is_held(void);
|
||||
#else
|
||||
#define lockdep_ovsl_is_held() 1
|
||||
#endif
|
||||
|
||||
#define ASSERT_OVSL() WARN_ON(!lockdep_ovsl_is_held())
|
||||
#define ovsl_dereference(p) \
|
||||
rcu_dereference_protected(p, lockdep_ovsl_is_held())
|
||||
#define rcu_dereference_ovsl(p) \
|
||||
rcu_dereference_check(p, lockdep_ovsl_is_held())
|
||||
|
||||
static inline struct net *ovs_dp_get_net(struct datapath *dp)
|
||||
{
|
||||
return read_pnet(&dp->net);
|
||||
}
|
||||
|
||||
static inline void ovs_dp_set_net(struct datapath *dp, struct net *net)
|
||||
{
|
||||
write_pnet(&dp->net, net);
|
||||
}
|
||||
|
||||
struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no);
|
||||
|
||||
static inline struct vport *ovs_vport_rcu(const struct datapath *dp, int port_no)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
return ovs_lookup_vport(dp, port_no);
|
||||
}
|
||||
|
||||
static inline struct vport *ovs_vport_ovsl_rcu(const struct datapath *dp, int port_no)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
|
||||
return ovs_lookup_vport(dp, port_no);
|
||||
}
|
||||
|
||||
static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_no)
|
||||
{
|
||||
ASSERT_OVSL();
|
||||
return ovs_lookup_vport(dp, port_no);
|
||||
}
|
||||
|
||||
extern struct notifier_block ovs_dp_device_notifier;
|
||||
extern struct genl_family dp_vport_genl_family;
|
||||
|
||||
void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key);
|
||||
void ovs_dp_detach_port(struct vport *);
|
||||
int ovs_dp_upcall(struct datapath *, struct sk_buff *,
|
||||
const struct dp_upcall_info *);
|
||||
|
||||
const char *ovs_dp_name(const struct datapath *dp);
|
||||
struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
|
||||
u8 cmd);
|
||||
|
||||
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
||||
struct sw_flow_key *);
|
||||
|
||||
void ovs_dp_notify_wq(struct work_struct *work);
|
||||
|
||||
int action_fifos_init(void);
|
||||
void action_fifos_exit(void);
|
||||
|
||||
#define OVS_NLERR(fmt, ...) \
|
||||
do { \
|
||||
if (net_ratelimit()) \
|
||||
pr_info("netlink: " fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
#endif /* datapath.h */
|
102
net/openvswitch/dp_notify.c
Normal file
102
net/openvswitch/dp_notify.c
Normal file
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2012 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <net/genetlink.h>
|
||||
#include <net/netns/generic.h>
|
||||
|
||||
#include "datapath.h"
|
||||
#include "vport-internal_dev.h"
|
||||
#include "vport-netdev.h"
|
||||
|
||||
static void dp_detach_port_notify(struct vport *vport)
|
||||
{
|
||||
struct sk_buff *notify;
|
||||
struct datapath *dp;
|
||||
|
||||
dp = vport->dp;
|
||||
notify = ovs_vport_cmd_build_info(vport, 0, 0,
|
||||
OVS_VPORT_CMD_DEL);
|
||||
ovs_dp_detach_port(vport);
|
||||
if (IS_ERR(notify)) {
|
||||
genl_set_err(&dp_vport_genl_family, ovs_dp_get_net(dp), 0,
|
||||
0, PTR_ERR(notify));
|
||||
return;
|
||||
}
|
||||
|
||||
genlmsg_multicast_netns(&dp_vport_genl_family,
|
||||
ovs_dp_get_net(dp), notify, 0,
|
||||
0, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void ovs_dp_notify_wq(struct work_struct *work)
|
||||
{
|
||||
struct ovs_net *ovs_net = container_of(work, struct ovs_net, dp_notify_work);
|
||||
struct datapath *dp;
|
||||
|
||||
ovs_lock();
|
||||
list_for_each_entry(dp, &ovs_net->dps, list_node) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
|
||||
struct vport *vport;
|
||||
struct hlist_node *n;
|
||||
|
||||
hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
|
||||
struct netdev_vport *netdev_vport;
|
||||
|
||||
if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
|
||||
continue;
|
||||
|
||||
netdev_vport = netdev_vport_priv(vport);
|
||||
if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH))
|
||||
dp_detach_port_notify(vport);
|
||||
}
|
||||
}
|
||||
}
|
||||
ovs_unlock();
|
||||
}
|
||||
|
||||
static int dp_device_event(struct notifier_block *unused, unsigned long event,
|
||||
void *ptr)
|
||||
{
|
||||
struct ovs_net *ovs_net;
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct vport *vport = NULL;
|
||||
|
||||
if (!ovs_is_internal_dev(dev))
|
||||
vport = ovs_netdev_get_vport(dev);
|
||||
|
||||
if (!vport)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (event == NETDEV_UNREGISTER) {
|
||||
/* upper_dev_unlink and decrement promisc immediately */
|
||||
ovs_netdev_detach_dev(vport);
|
||||
|
||||
/* schedule vport destroy, dev_put and genl notification */
|
||||
ovs_net = net_generic(dev_net(dev), ovs_net_id);
|
||||
queue_work(system_wq, &ovs_net->dp_notify_work);
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
struct notifier_block ovs_dp_device_notifier = {
|
||||
.notifier_call = dp_device_event
|
||||
};
|
695
net/openvswitch/flow.c
Normal file
695
net/openvswitch/flow.c
Normal file
|
@ -0,0 +1,695 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2014 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/llc_pdu.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/llc.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/sctp.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/icmp.h>
|
||||
#include <linux/icmpv6.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ip_tunnels.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <net/ndisc.h>
|
||||
|
||||
#include "datapath.h"
|
||||
#include "flow.h"
|
||||
#include "flow_netlink.h"
|
||||
|
||||
u64 ovs_flow_used_time(unsigned long flow_jiffies)
|
||||
{
|
||||
struct timespec cur_ts;
|
||||
u64 cur_ms, idle_ms;
|
||||
|
||||
ktime_get_ts(&cur_ts);
|
||||
idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
|
||||
cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
|
||||
cur_ts.tv_nsec / NSEC_PER_MSEC;
|
||||
|
||||
return cur_ms - idle_ms;
|
||||
}
|
||||
|
||||
#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
|
||||
|
||||
void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct flow_stats *stats;
|
||||
int node = numa_node_id();
|
||||
|
||||
stats = rcu_dereference(flow->stats[node]);
|
||||
|
||||
/* Check if already have node-specific stats. */
|
||||
if (likely(stats)) {
|
||||
spin_lock(&stats->lock);
|
||||
/* Mark if we write on the pre-allocated stats. */
|
||||
if (node == 0 && unlikely(flow->stats_last_writer != node))
|
||||
flow->stats_last_writer = node;
|
||||
} else {
|
||||
stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
|
||||
spin_lock(&stats->lock);
|
||||
|
||||
/* If the current NUMA-node is the only writer on the
|
||||
* pre-allocated stats keep using them.
|
||||
*/
|
||||
if (unlikely(flow->stats_last_writer != node)) {
|
||||
/* A previous locker may have already allocated the
|
||||
* stats, so we need to check again. If node-specific
|
||||
* stats were already allocated, we update the pre-
|
||||
* allocated stats as we have already locked them.
|
||||
*/
|
||||
if (likely(flow->stats_last_writer != NUMA_NO_NODE)
|
||||
&& likely(!rcu_access_pointer(flow->stats[node]))) {
|
||||
/* Try to allocate node-specific stats. */
|
||||
struct flow_stats *new_stats;
|
||||
|
||||
new_stats =
|
||||
kmem_cache_alloc_node(flow_stats_cache,
|
||||
GFP_THISNODE |
|
||||
__GFP_NOMEMALLOC,
|
||||
node);
|
||||
if (likely(new_stats)) {
|
||||
new_stats->used = jiffies;
|
||||
new_stats->packet_count = 1;
|
||||
new_stats->byte_count = skb->len;
|
||||
new_stats->tcp_flags = tcp_flags;
|
||||
spin_lock_init(&new_stats->lock);
|
||||
|
||||
rcu_assign_pointer(flow->stats[node],
|
||||
new_stats);
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
flow->stats_last_writer = node;
|
||||
}
|
||||
}
|
||||
|
||||
stats->used = jiffies;
|
||||
stats->packet_count++;
|
||||
stats->byte_count += skb->len;
|
||||
stats->tcp_flags |= tcp_flags;
|
||||
unlock:
|
||||
spin_unlock(&stats->lock);
|
||||
}
|
||||
|
||||
/* Must be called with rcu_read_lock or ovs_mutex. */
|
||||
void ovs_flow_stats_get(const struct sw_flow *flow,
|
||||
struct ovs_flow_stats *ovs_stats,
|
||||
unsigned long *used, __be16 *tcp_flags)
|
||||
{
|
||||
int node;
|
||||
|
||||
*used = 0;
|
||||
*tcp_flags = 0;
|
||||
memset(ovs_stats, 0, sizeof(*ovs_stats));
|
||||
|
||||
for_each_node(node) {
|
||||
struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[node]);
|
||||
|
||||
if (stats) {
|
||||
/* Local CPU may write on non-local stats, so we must
|
||||
* block bottom-halves here.
|
||||
*/
|
||||
spin_lock_bh(&stats->lock);
|
||||
if (!*used || time_after(stats->used, *used))
|
||||
*used = stats->used;
|
||||
*tcp_flags |= stats->tcp_flags;
|
||||
ovs_stats->n_packets += stats->packet_count;
|
||||
ovs_stats->n_bytes += stats->byte_count;
|
||||
spin_unlock_bh(&stats->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Called with ovs_mutex. */
|
||||
void ovs_flow_stats_clear(struct sw_flow *flow)
|
||||
{
|
||||
int node;
|
||||
|
||||
for_each_node(node) {
|
||||
struct flow_stats *stats = ovsl_dereference(flow->stats[node]);
|
||||
|
||||
if (stats) {
|
||||
spin_lock_bh(&stats->lock);
|
||||
stats->used = 0;
|
||||
stats->packet_count = 0;
|
||||
stats->byte_count = 0;
|
||||
stats->tcp_flags = 0;
|
||||
spin_unlock_bh(&stats->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int check_header(struct sk_buff *skb, int len)
|
||||
{
|
||||
if (unlikely(skb->len < len))
|
||||
return -EINVAL;
|
||||
if (unlikely(!pskb_may_pull(skb, len)))
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool arphdr_ok(struct sk_buff *skb)
|
||||
{
|
||||
return pskb_may_pull(skb, skb_network_offset(skb) +
|
||||
sizeof(struct arp_eth_header));
|
||||
}
|
||||
|
||||
static int check_iphdr(struct sk_buff *skb)
|
||||
{
|
||||
unsigned int nh_ofs = skb_network_offset(skb);
|
||||
unsigned int ip_len;
|
||||
int err;
|
||||
|
||||
err = check_header(skb, nh_ofs + sizeof(struct iphdr));
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
ip_len = ip_hdrlen(skb);
|
||||
if (unlikely(ip_len < sizeof(struct iphdr) ||
|
||||
skb->len < nh_ofs + ip_len))
|
||||
return -EINVAL;
|
||||
|
||||
skb_set_transport_header(skb, nh_ofs + ip_len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool tcphdr_ok(struct sk_buff *skb)
|
||||
{
|
||||
int th_ofs = skb_transport_offset(skb);
|
||||
int tcp_len;
|
||||
|
||||
if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
|
||||
return false;
|
||||
|
||||
tcp_len = tcp_hdrlen(skb);
|
||||
if (unlikely(tcp_len < sizeof(struct tcphdr) ||
|
||||
skb->len < th_ofs + tcp_len))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool udphdr_ok(struct sk_buff *skb)
|
||||
{
|
||||
return pskb_may_pull(skb, skb_transport_offset(skb) +
|
||||
sizeof(struct udphdr));
|
||||
}
|
||||
|
||||
static bool sctphdr_ok(struct sk_buff *skb)
|
||||
{
|
||||
return pskb_may_pull(skb, skb_transport_offset(skb) +
|
||||
sizeof(struct sctphdr));
|
||||
}
|
||||
|
||||
static bool icmphdr_ok(struct sk_buff *skb)
|
||||
{
|
||||
return pskb_may_pull(skb, skb_transport_offset(skb) +
|
||||
sizeof(struct icmphdr));
|
||||
}
|
||||
|
||||
static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
|
||||
{
|
||||
unsigned int nh_ofs = skb_network_offset(skb);
|
||||
unsigned int nh_len;
|
||||
int payload_ofs;
|
||||
struct ipv6hdr *nh;
|
||||
uint8_t nexthdr;
|
||||
__be16 frag_off;
|
||||
int err;
|
||||
|
||||
err = check_header(skb, nh_ofs + sizeof(*nh));
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
nh = ipv6_hdr(skb);
|
||||
nexthdr = nh->nexthdr;
|
||||
payload_ofs = (u8 *)(nh + 1) - skb->data;
|
||||
|
||||
key->ip.proto = NEXTHDR_NONE;
|
||||
key->ip.tos = ipv6_get_dsfield(nh);
|
||||
key->ip.ttl = nh->hop_limit;
|
||||
key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
|
||||
key->ipv6.addr.src = nh->saddr;
|
||||
key->ipv6.addr.dst = nh->daddr;
|
||||
|
||||
payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
|
||||
if (unlikely(payload_ofs < 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (frag_off) {
|
||||
if (frag_off & htons(~0x7))
|
||||
key->ip.frag = OVS_FRAG_TYPE_LATER;
|
||||
else
|
||||
key->ip.frag = OVS_FRAG_TYPE_FIRST;
|
||||
} else {
|
||||
key->ip.frag = OVS_FRAG_TYPE_NONE;
|
||||
}
|
||||
|
||||
nh_len = payload_ofs - nh_ofs;
|
||||
skb_set_transport_header(skb, nh_ofs + nh_len);
|
||||
key->ip.proto = nexthdr;
|
||||
return nh_len;
|
||||
}
|
||||
|
||||
static bool icmp6hdr_ok(struct sk_buff *skb)
|
||||
{
|
||||
return pskb_may_pull(skb, skb_transport_offset(skb) +
|
||||
sizeof(struct icmp6hdr));
|
||||
}
|
||||
|
||||
static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
|
||||
{
|
||||
struct qtag_prefix {
|
||||
__be16 eth_type; /* ETH_P_8021Q */
|
||||
__be16 tci;
|
||||
};
|
||||
struct qtag_prefix *qp;
|
||||
|
||||
if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
|
||||
return 0;
|
||||
|
||||
if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
|
||||
sizeof(__be16))))
|
||||
return -ENOMEM;
|
||||
|
||||
qp = (struct qtag_prefix *) skb->data;
|
||||
key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
|
||||
__skb_pull(skb, sizeof(struct qtag_prefix));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __be16 parse_ethertype(struct sk_buff *skb)
|
||||
{
|
||||
struct llc_snap_hdr {
|
||||
u8 dsap; /* Always 0xAA */
|
||||
u8 ssap; /* Always 0xAA */
|
||||
u8 ctrl;
|
||||
u8 oui[3];
|
||||
__be16 ethertype;
|
||||
};
|
||||
struct llc_snap_hdr *llc;
|
||||
__be16 proto;
|
||||
|
||||
proto = *(__be16 *) skb->data;
|
||||
__skb_pull(skb, sizeof(__be16));
|
||||
|
||||
if (ntohs(proto) >= ETH_P_802_3_MIN)
|
||||
return proto;
|
||||
|
||||
if (skb->len < sizeof(struct llc_snap_hdr))
|
||||
return htons(ETH_P_802_2);
|
||||
|
||||
if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
|
||||
return htons(0);
|
||||
|
||||
llc = (struct llc_snap_hdr *) skb->data;
|
||||
if (llc->dsap != LLC_SAP_SNAP ||
|
||||
llc->ssap != LLC_SAP_SNAP ||
|
||||
(llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
|
||||
return htons(ETH_P_802_2);
|
||||
|
||||
__skb_pull(skb, sizeof(struct llc_snap_hdr));
|
||||
|
||||
if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN)
|
||||
return llc->ethertype;
|
||||
|
||||
return htons(ETH_P_802_2);
|
||||
}
|
||||
|
||||
static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
|
||||
int nh_len)
|
||||
{
|
||||
struct icmp6hdr *icmp = icmp6_hdr(skb);
|
||||
|
||||
/* The ICMPv6 type and code fields use the 16-bit transport port
|
||||
* fields, so we need to store them in 16-bit network byte order.
|
||||
*/
|
||||
key->tp.src = htons(icmp->icmp6_type);
|
||||
key->tp.dst = htons(icmp->icmp6_code);
|
||||
memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
|
||||
|
||||
if (icmp->icmp6_code == 0 &&
|
||||
(icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
|
||||
icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
|
||||
int icmp_len = skb->len - skb_transport_offset(skb);
|
||||
struct nd_msg *nd;
|
||||
int offset;
|
||||
|
||||
/* In order to process neighbor discovery options, we need the
|
||||
* entire packet.
|
||||
*/
|
||||
if (unlikely(icmp_len < sizeof(*nd)))
|
||||
return 0;
|
||||
|
||||
if (unlikely(skb_linearize(skb)))
|
||||
return -ENOMEM;
|
||||
|
||||
nd = (struct nd_msg *)skb_transport_header(skb);
|
||||
key->ipv6.nd.target = nd->target;
|
||||
|
||||
icmp_len -= sizeof(*nd);
|
||||
offset = 0;
|
||||
while (icmp_len >= 8) {
|
||||
struct nd_opt_hdr *nd_opt =
|
||||
(struct nd_opt_hdr *)(nd->opt + offset);
|
||||
int opt_len = nd_opt->nd_opt_len * 8;
|
||||
|
||||
if (unlikely(!opt_len || opt_len > icmp_len))
|
||||
return 0;
|
||||
|
||||
/* Store the link layer address if the appropriate
|
||||
* option is provided. It is considered an error if
|
||||
* the same link layer option is specified twice.
|
||||
*/
|
||||
if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
|
||||
&& opt_len == 8) {
|
||||
if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
|
||||
goto invalid;
|
||||
ether_addr_copy(key->ipv6.nd.sll,
|
||||
&nd->opt[offset+sizeof(*nd_opt)]);
|
||||
} else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
|
||||
&& opt_len == 8) {
|
||||
if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
|
||||
goto invalid;
|
||||
ether_addr_copy(key->ipv6.nd.tll,
|
||||
&nd->opt[offset+sizeof(*nd_opt)]);
|
||||
}
|
||||
|
||||
icmp_len -= opt_len;
|
||||
offset += opt_len;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
invalid:
|
||||
memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
|
||||
memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
|
||||
memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* key_extract - extracts a flow key from an Ethernet frame.
|
||||
* @skb: sk_buff that contains the frame, with skb->data pointing to the
|
||||
* Ethernet header
|
||||
* @key: output flow key
|
||||
*
|
||||
* The caller must ensure that skb->len >= ETH_HLEN.
|
||||
*
|
||||
* Returns 0 if successful, otherwise a negative errno value.
|
||||
*
|
||||
* Initializes @skb header pointers as follows:
|
||||
*
|
||||
* - skb->mac_header: the Ethernet header.
|
||||
*
|
||||
* - skb->network_header: just past the Ethernet header, or just past the
|
||||
* VLAN header, to the first byte of the Ethernet payload.
|
||||
*
|
||||
* - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
|
||||
* on output, then just past the IP header, if one is present and
|
||||
* of a correct length, otherwise the same as skb->network_header.
|
||||
* For other key->eth.type values it is left untouched.
|
||||
*/
|
||||
static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
|
||||
{
|
||||
int error;
|
||||
struct ethhdr *eth;
|
||||
|
||||
/* Flags are always used as part of stats */
|
||||
key->tp.flags = 0;
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
/* Link layer. We are guaranteed to have at least the 14 byte Ethernet
|
||||
* header in the linear data area.
|
||||
*/
|
||||
eth = eth_hdr(skb);
|
||||
ether_addr_copy(key->eth.src, eth->h_source);
|
||||
ether_addr_copy(key->eth.dst, eth->h_dest);
|
||||
|
||||
__skb_pull(skb, 2 * ETH_ALEN);
|
||||
/* We are going to push all headers that we pull, so no need to
|
||||
* update skb->csum here.
|
||||
*/
|
||||
|
||||
key->eth.tci = 0;
|
||||
if (vlan_tx_tag_present(skb))
|
||||
key->eth.tci = htons(skb->vlan_tci);
|
||||
else if (eth->h_proto == htons(ETH_P_8021Q))
|
||||
if (unlikely(parse_vlan(skb, key)))
|
||||
return -ENOMEM;
|
||||
|
||||
key->eth.type = parse_ethertype(skb);
|
||||
if (unlikely(key->eth.type == htons(0)))
|
||||
return -ENOMEM;
|
||||
|
||||
skb_reset_network_header(skb);
|
||||
__skb_push(skb, skb->data - skb_mac_header(skb));
|
||||
|
||||
/* Network layer. */
|
||||
if (key->eth.type == htons(ETH_P_IP)) {
|
||||
struct iphdr *nh;
|
||||
__be16 offset;
|
||||
|
||||
error = check_iphdr(skb);
|
||||
if (unlikely(error)) {
|
||||
memset(&key->ip, 0, sizeof(key->ip));
|
||||
memset(&key->ipv4, 0, sizeof(key->ipv4));
|
||||
if (error == -EINVAL) {
|
||||
skb->transport_header = skb->network_header;
|
||||
error = 0;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
nh = ip_hdr(skb);
|
||||
key->ipv4.addr.src = nh->saddr;
|
||||
key->ipv4.addr.dst = nh->daddr;
|
||||
|
||||
key->ip.proto = nh->protocol;
|
||||
key->ip.tos = nh->tos;
|
||||
key->ip.ttl = nh->ttl;
|
||||
|
||||
offset = nh->frag_off & htons(IP_OFFSET);
|
||||
if (offset) {
|
||||
key->ip.frag = OVS_FRAG_TYPE_LATER;
|
||||
return 0;
|
||||
}
|
||||
if (nh->frag_off & htons(IP_MF) ||
|
||||
skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
|
||||
key->ip.frag = OVS_FRAG_TYPE_FIRST;
|
||||
else
|
||||
key->ip.frag = OVS_FRAG_TYPE_NONE;
|
||||
|
||||
/* Transport layer. */
|
||||
if (key->ip.proto == IPPROTO_TCP) {
|
||||
if (tcphdr_ok(skb)) {
|
||||
struct tcphdr *tcp = tcp_hdr(skb);
|
||||
key->tp.src = tcp->source;
|
||||
key->tp.dst = tcp->dest;
|
||||
key->tp.flags = TCP_FLAGS_BE16(tcp);
|
||||
} else {
|
||||
memset(&key->tp, 0, sizeof(key->tp));
|
||||
}
|
||||
|
||||
} else if (key->ip.proto == IPPROTO_UDP) {
|
||||
if (udphdr_ok(skb)) {
|
||||
struct udphdr *udp = udp_hdr(skb);
|
||||
key->tp.src = udp->source;
|
||||
key->tp.dst = udp->dest;
|
||||
} else {
|
||||
memset(&key->tp, 0, sizeof(key->tp));
|
||||
}
|
||||
} else if (key->ip.proto == IPPROTO_SCTP) {
|
||||
if (sctphdr_ok(skb)) {
|
||||
struct sctphdr *sctp = sctp_hdr(skb);
|
||||
key->tp.src = sctp->source;
|
||||
key->tp.dst = sctp->dest;
|
||||
} else {
|
||||
memset(&key->tp, 0, sizeof(key->tp));
|
||||
}
|
||||
} else if (key->ip.proto == IPPROTO_ICMP) {
|
||||
if (icmphdr_ok(skb)) {
|
||||
struct icmphdr *icmp = icmp_hdr(skb);
|
||||
/* The ICMP type and code fields use the 16-bit
|
||||
* transport port fields, so we need to store
|
||||
* them in 16-bit network byte order. */
|
||||
key->tp.src = htons(icmp->type);
|
||||
key->tp.dst = htons(icmp->code);
|
||||
} else {
|
||||
memset(&key->tp, 0, sizeof(key->tp));
|
||||
}
|
||||
}
|
||||
|
||||
} else if (key->eth.type == htons(ETH_P_ARP) ||
|
||||
key->eth.type == htons(ETH_P_RARP)) {
|
||||
struct arp_eth_header *arp;
|
||||
bool arp_available = arphdr_ok(skb);
|
||||
|
||||
arp = (struct arp_eth_header *)skb_network_header(skb);
|
||||
|
||||
if (arp_available &&
|
||||
arp->ar_hrd == htons(ARPHRD_ETHER) &&
|
||||
arp->ar_pro == htons(ETH_P_IP) &&
|
||||
arp->ar_hln == ETH_ALEN &&
|
||||
arp->ar_pln == 4) {
|
||||
|
||||
/* We only match on the lower 8 bits of the opcode. */
|
||||
if (ntohs(arp->ar_op) <= 0xff)
|
||||
key->ip.proto = ntohs(arp->ar_op);
|
||||
else
|
||||
key->ip.proto = 0;
|
||||
|
||||
memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
|
||||
memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
|
||||
ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
|
||||
ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
|
||||
} else {
|
||||
memset(&key->ip, 0, sizeof(key->ip));
|
||||
memset(&key->ipv4, 0, sizeof(key->ipv4));
|
||||
}
|
||||
} else if (key->eth.type == htons(ETH_P_IPV6)) {
|
||||
int nh_len; /* IPv6 Header + Extensions */
|
||||
|
||||
nh_len = parse_ipv6hdr(skb, key);
|
||||
if (unlikely(nh_len < 0)) {
|
||||
memset(&key->ip, 0, sizeof(key->ip));
|
||||
memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
|
||||
if (nh_len == -EINVAL) {
|
||||
skb->transport_header = skb->network_header;
|
||||
error = 0;
|
||||
} else {
|
||||
error = nh_len;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
if (key->ip.frag == OVS_FRAG_TYPE_LATER)
|
||||
return 0;
|
||||
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
|
||||
key->ip.frag = OVS_FRAG_TYPE_FIRST;
|
||||
|
||||
/* Transport layer. */
|
||||
if (key->ip.proto == NEXTHDR_TCP) {
|
||||
if (tcphdr_ok(skb)) {
|
||||
struct tcphdr *tcp = tcp_hdr(skb);
|
||||
key->tp.src = tcp->source;
|
||||
key->tp.dst = tcp->dest;
|
||||
key->tp.flags = TCP_FLAGS_BE16(tcp);
|
||||
} else {
|
||||
memset(&key->tp, 0, sizeof(key->tp));
|
||||
}
|
||||
} else if (key->ip.proto == NEXTHDR_UDP) {
|
||||
if (udphdr_ok(skb)) {
|
||||
struct udphdr *udp = udp_hdr(skb);
|
||||
key->tp.src = udp->source;
|
||||
key->tp.dst = udp->dest;
|
||||
} else {
|
||||
memset(&key->tp, 0, sizeof(key->tp));
|
||||
}
|
||||
} else if (key->ip.proto == NEXTHDR_SCTP) {
|
||||
if (sctphdr_ok(skb)) {
|
||||
struct sctphdr *sctp = sctp_hdr(skb);
|
||||
key->tp.src = sctp->source;
|
||||
key->tp.dst = sctp->dest;
|
||||
} else {
|
||||
memset(&key->tp, 0, sizeof(key->tp));
|
||||
}
|
||||
} else if (key->ip.proto == NEXTHDR_ICMP) {
|
||||
if (icmp6hdr_ok(skb)) {
|
||||
error = parse_icmpv6(skb, key, nh_len);
|
||||
if (error)
|
||||
return error;
|
||||
} else {
|
||||
memset(&key->tp, 0, sizeof(key->tp));
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
|
||||
{
|
||||
return key_extract(skb, key);
|
||||
}
|
||||
|
||||
int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info,
|
||||
struct sk_buff *skb, struct sw_flow_key *key)
|
||||
{
|
||||
/* Extract metadata from packet. */
|
||||
if (tun_info) {
|
||||
memcpy(&key->tun_key, &tun_info->tunnel, sizeof(key->tun_key));
|
||||
|
||||
if (tun_info->options) {
|
||||
BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
|
||||
8)) - 1
|
||||
> sizeof(key->tun_opts));
|
||||
memcpy(GENEVE_OPTS(key, tun_info->options_len),
|
||||
tun_info->options, tun_info->options_len);
|
||||
key->tun_opts_len = tun_info->options_len;
|
||||
} else {
|
||||
key->tun_opts_len = 0;
|
||||
}
|
||||
} else {
|
||||
key->tun_opts_len = 0;
|
||||
memset(&key->tun_key, 0, sizeof(key->tun_key));
|
||||
}
|
||||
|
||||
key->phy.priority = skb->priority;
|
||||
key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
|
||||
key->phy.skb_mark = skb->mark;
|
||||
key->ovs_flow_hash = 0;
|
||||
key->recirc_id = 0;
|
||||
|
||||
return key_extract(skb, key);
|
||||
}
|
||||
|
||||
int ovs_flow_key_extract_userspace(const struct nlattr *attr,
|
||||
struct sk_buff *skb,
|
||||
struct sw_flow_key *key)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Extract metadata from netlink attributes. */
|
||||
err = ovs_nla_get_flow_metadata(attr, key);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return key_extract(skb, key);
|
||||
}
|
222
net/openvswitch/flow.h
Normal file
222
net/openvswitch/flow.h
Normal file
|
@ -0,0 +1,222 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2014 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#ifndef FLOW_H
|
||||
#define FLOW_H 1
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/openvswitch.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/flex_array.h>
|
||||
#include <net/inet_ecn.h>
|
||||
|
||||
struct sk_buff;
|
||||
|
||||
/* Used to memset ovs_key_ipv4_tunnel padding. */
|
||||
#define OVS_TUNNEL_KEY_SIZE \
|
||||
(offsetof(struct ovs_key_ipv4_tunnel, ipv4_ttl) + \
|
||||
FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, ipv4_ttl))
|
||||
|
||||
struct ovs_key_ipv4_tunnel {
|
||||
__be64 tun_id;
|
||||
__be32 ipv4_src;
|
||||
__be32 ipv4_dst;
|
||||
__be16 tun_flags;
|
||||
u8 ipv4_tos;
|
||||
u8 ipv4_ttl;
|
||||
} __packed __aligned(4); /* Minimize padding. */
|
||||
|
||||
struct ovs_tunnel_info {
|
||||
struct ovs_key_ipv4_tunnel tunnel;
|
||||
struct geneve_opt *options;
|
||||
u8 options_len;
|
||||
};
|
||||
|
||||
/* Store options at the end of the array if they are less than the
|
||||
* maximum size. This allows us to get the benefits of variable length
|
||||
* matching for small options.
|
||||
*/
|
||||
#define GENEVE_OPTS(flow_key, opt_len) \
|
||||
((struct geneve_opt *)((flow_key)->tun_opts + \
|
||||
FIELD_SIZEOF(struct sw_flow_key, tun_opts) - \
|
||||
opt_len))
|
||||
|
||||
static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
|
||||
const struct iphdr *iph,
|
||||
__be64 tun_id, __be16 tun_flags,
|
||||
struct geneve_opt *opts,
|
||||
u8 opts_len)
|
||||
{
|
||||
tun_info->tunnel.tun_id = tun_id;
|
||||
tun_info->tunnel.ipv4_src = iph->saddr;
|
||||
tun_info->tunnel.ipv4_dst = iph->daddr;
|
||||
tun_info->tunnel.ipv4_tos = iph->tos;
|
||||
tun_info->tunnel.ipv4_ttl = iph->ttl;
|
||||
tun_info->tunnel.tun_flags = tun_flags;
|
||||
|
||||
/* clear struct padding. */
|
||||
memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE, 0,
|
||||
sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE);
|
||||
|
||||
tun_info->options = opts;
|
||||
tun_info->options_len = opts_len;
|
||||
}
|
||||
|
||||
struct sw_flow_key {
|
||||
u8 tun_opts[255];
|
||||
u8 tun_opts_len;
|
||||
struct ovs_key_ipv4_tunnel tun_key; /* Encapsulating tunnel key. */
|
||||
struct {
|
||||
u32 priority; /* Packet QoS priority. */
|
||||
u32 skb_mark; /* SKB mark. */
|
||||
u16 in_port; /* Input switch port (or DP_MAX_PORTS). */
|
||||
} __packed phy; /* Safe when right after 'tun_key'. */
|
||||
u32 ovs_flow_hash; /* Datapath computed hash value. */
|
||||
u32 recirc_id; /* Recirculation ID. */
|
||||
struct {
|
||||
u8 src[ETH_ALEN]; /* Ethernet source address. */
|
||||
u8 dst[ETH_ALEN]; /* Ethernet destination address. */
|
||||
__be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
|
||||
__be16 type; /* Ethernet frame type. */
|
||||
} eth;
|
||||
struct {
|
||||
u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */
|
||||
u8 tos; /* IP ToS. */
|
||||
u8 ttl; /* IP TTL/hop limit. */
|
||||
u8 frag; /* One of OVS_FRAG_TYPE_*. */
|
||||
} ip;
|
||||
struct {
|
||||
__be16 src; /* TCP/UDP/SCTP source port. */
|
||||
__be16 dst; /* TCP/UDP/SCTP destination port. */
|
||||
__be16 flags; /* TCP flags. */
|
||||
} tp;
|
||||
union {
|
||||
struct {
|
||||
struct {
|
||||
__be32 src; /* IP source address. */
|
||||
__be32 dst; /* IP destination address. */
|
||||
} addr;
|
||||
struct {
|
||||
u8 sha[ETH_ALEN]; /* ARP source hardware address. */
|
||||
u8 tha[ETH_ALEN]; /* ARP target hardware address. */
|
||||
} arp;
|
||||
} ipv4;
|
||||
struct {
|
||||
struct {
|
||||
struct in6_addr src; /* IPv6 source address. */
|
||||
struct in6_addr dst; /* IPv6 destination address. */
|
||||
} addr;
|
||||
__be32 label; /* IPv6 flow label. */
|
||||
struct {
|
||||
struct in6_addr target; /* ND target address. */
|
||||
u8 sll[ETH_ALEN]; /* ND source link layer address. */
|
||||
u8 tll[ETH_ALEN]; /* ND target link layer address. */
|
||||
} nd;
|
||||
} ipv6;
|
||||
};
|
||||
} __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
|
||||
|
||||
struct sw_flow_key_range {
|
||||
unsigned short int start;
|
||||
unsigned short int end;
|
||||
};
|
||||
|
||||
struct sw_flow_mask {
|
||||
int ref_count;
|
||||
struct rcu_head rcu;
|
||||
struct list_head list;
|
||||
struct sw_flow_key_range range;
|
||||
struct sw_flow_key key;
|
||||
};
|
||||
|
||||
struct sw_flow_match {
|
||||
struct sw_flow_key *key;
|
||||
struct sw_flow_key_range range;
|
||||
struct sw_flow_mask *mask;
|
||||
};
|
||||
|
||||
struct sw_flow_actions {
|
||||
struct rcu_head rcu;
|
||||
u32 actions_len;
|
||||
struct nlattr actions[];
|
||||
};
|
||||
|
||||
struct flow_stats {
|
||||
u64 packet_count; /* Number of packets matched. */
|
||||
u64 byte_count; /* Number of bytes matched. */
|
||||
unsigned long used; /* Last used time (in jiffies). */
|
||||
spinlock_t lock; /* Lock for atomic stats update. */
|
||||
__be16 tcp_flags; /* Union of seen TCP flags. */
|
||||
};
|
||||
|
||||
struct sw_flow {
|
||||
struct rcu_head rcu;
|
||||
struct hlist_node hash_node[2];
|
||||
u32 hash;
|
||||
int stats_last_writer; /* NUMA-node id of the last writer on
|
||||
* 'stats[0]'.
|
||||
*/
|
||||
struct sw_flow_key key;
|
||||
struct sw_flow_key unmasked_key;
|
||||
struct sw_flow_mask *mask;
|
||||
struct sw_flow_actions __rcu *sf_acts;
|
||||
struct flow_stats __rcu *stats[]; /* One for each NUMA node. First one
|
||||
* is allocated at flow creation time,
|
||||
* the rest are allocated on demand
|
||||
* while holding the 'stats[0].lock'.
|
||||
*/
|
||||
};
|
||||
|
||||
struct arp_eth_header {
|
||||
__be16 ar_hrd; /* format of hardware address */
|
||||
__be16 ar_pro; /* format of protocol address */
|
||||
unsigned char ar_hln; /* length of hardware address */
|
||||
unsigned char ar_pln; /* length of protocol address */
|
||||
__be16 ar_op; /* ARP opcode (command) */
|
||||
|
||||
/* Ethernet+IPv4 specific members. */
|
||||
unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
|
||||
unsigned char ar_sip[4]; /* sender IP address */
|
||||
unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
|
||||
unsigned char ar_tip[4]; /* target IP address */
|
||||
} __packed;
|
||||
|
||||
void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
|
||||
struct sk_buff *);
|
||||
void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
|
||||
unsigned long *used, __be16 *tcp_flags);
|
||||
void ovs_flow_stats_clear(struct sw_flow *);
|
||||
u64 ovs_flow_used_time(unsigned long flow_jiffies);
|
||||
|
||||
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
|
||||
int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, struct sk_buff *skb,
|
||||
struct sw_flow_key *key);
|
||||
/* Extract key from packet coming from userspace. */
|
||||
int ovs_flow_key_extract_userspace(const struct nlattr *attr,
|
||||
struct sk_buff *skb,
|
||||
struct sw_flow_key *key);
|
||||
|
||||
#endif /* flow.h */
|
1820
net/openvswitch/flow_netlink.c
Normal file
1820
net/openvswitch/flow_netlink.c
Normal file
File diff suppressed because it is too large
Load diff
60
net/openvswitch/flow_netlink.h
Normal file
60
net/openvswitch/flow_netlink.h
Normal file
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2013 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
|
||||
#ifndef FLOW_NETLINK_H
|
||||
#define FLOW_NETLINK_H 1
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/openvswitch.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/flex_array.h>
|
||||
|
||||
#include <net/inet_ecn.h>
|
||||
#include <net/ip_tunnels.h>
|
||||
|
||||
#include "flow.h"
|
||||
|
||||
void ovs_match_init(struct sw_flow_match *match,
|
||||
struct sw_flow_key *key, struct sw_flow_mask *mask);
|
||||
|
||||
int ovs_nla_put_flow(const struct sw_flow_key *,
|
||||
const struct sw_flow_key *, struct sk_buff *);
|
||||
int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *);
|
||||
|
||||
int ovs_nla_get_match(struct sw_flow_match *match,
|
||||
const struct nlattr *,
|
||||
const struct nlattr *);
|
||||
|
||||
int ovs_nla_copy_actions(const struct nlattr *attr,
|
||||
const struct sw_flow_key *key, int depth,
|
||||
struct sw_flow_actions **sfa);
|
||||
int ovs_nla_put_actions(const struct nlattr *attr,
|
||||
int len, struct sk_buff *skb);
|
||||
|
||||
struct sw_flow_actions *ovs_nla_alloc_flow_actions(int actions_len);
|
||||
void ovs_nla_free_flow_actions(struct sw_flow_actions *);
|
||||
|
||||
#endif /* flow_netlink.h */
|
647
net/openvswitch/flow_table.c
Normal file
647
net/openvswitch/flow_table.c
Normal file
|
@ -0,0 +1,647 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2013 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#include "flow.h"
|
||||
#include "datapath.h"
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/llc_pdu.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/llc.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/sctp.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/icmp.h>
|
||||
#include <linux/icmpv6.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <net/ndisc.h>
|
||||
|
||||
#define TBL_MIN_BUCKETS 1024
|
||||
#define REHASH_INTERVAL (10 * 60 * HZ)
|
||||
|
||||
static struct kmem_cache *flow_cache;
|
||||
struct kmem_cache *flow_stats_cache __read_mostly;
|
||||
|
||||
static u16 range_n_bytes(const struct sw_flow_key_range *range)
|
||||
{
|
||||
return range->end - range->start;
|
||||
}
|
||||
|
||||
void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
|
||||
const struct sw_flow_mask *mask)
|
||||
{
|
||||
const long *m = (const long *)((const u8 *)&mask->key +
|
||||
mask->range.start);
|
||||
const long *s = (const long *)((const u8 *)src +
|
||||
mask->range.start);
|
||||
long *d = (long *)((u8 *)dst + mask->range.start);
|
||||
int i;
|
||||
|
||||
/* The memory outside of the 'mask->range' are not set since
|
||||
* further operations on 'dst' only uses contents within
|
||||
* 'mask->range'.
|
||||
*/
|
||||
for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
|
||||
*d++ = *s++ & *m++;
|
||||
}
|
||||
|
||||
struct sw_flow *ovs_flow_alloc(void)
|
||||
{
|
||||
struct sw_flow *flow;
|
||||
struct flow_stats *stats;
|
||||
int node;
|
||||
|
||||
flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
|
||||
if (!flow)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
flow->sf_acts = NULL;
|
||||
flow->mask = NULL;
|
||||
flow->stats_last_writer = NUMA_NO_NODE;
|
||||
|
||||
/* Initialize the default stat node. */
|
||||
stats = kmem_cache_alloc_node(flow_stats_cache,
|
||||
GFP_KERNEL | __GFP_ZERO, 0);
|
||||
if (!stats)
|
||||
goto err;
|
||||
|
||||
spin_lock_init(&stats->lock);
|
||||
|
||||
RCU_INIT_POINTER(flow->stats[0], stats);
|
||||
|
||||
for_each_node(node)
|
||||
if (node != 0)
|
||||
RCU_INIT_POINTER(flow->stats[node], NULL);
|
||||
|
||||
return flow;
|
||||
err:
|
||||
kmem_cache_free(flow_cache, flow);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
int ovs_flow_tbl_count(struct flow_table *table)
|
||||
{
|
||||
return table->count;
|
||||
}
|
||||
|
||||
static struct flex_array *alloc_buckets(unsigned int n_buckets)
|
||||
{
|
||||
struct flex_array *buckets;
|
||||
int i, err;
|
||||
|
||||
buckets = flex_array_alloc(sizeof(struct hlist_head),
|
||||
n_buckets, GFP_KERNEL);
|
||||
if (!buckets)
|
||||
return NULL;
|
||||
|
||||
err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
|
||||
if (err) {
|
||||
flex_array_free(buckets);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < n_buckets; i++)
|
||||
INIT_HLIST_HEAD((struct hlist_head *)
|
||||
flex_array_get(buckets, i));
|
||||
|
||||
return buckets;
|
||||
}
|
||||
|
||||
static void flow_free(struct sw_flow *flow)
|
||||
{
|
||||
int node;
|
||||
|
||||
kfree((struct sw_flow_actions __force *)flow->sf_acts);
|
||||
for_each_node(node)
|
||||
if (flow->stats[node])
|
||||
kmem_cache_free(flow_stats_cache,
|
||||
(struct flow_stats __force *)flow->stats[node]);
|
||||
kmem_cache_free(flow_cache, flow);
|
||||
}
|
||||
|
||||
static void rcu_free_flow_callback(struct rcu_head *rcu)
|
||||
{
|
||||
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
|
||||
|
||||
flow_free(flow);
|
||||
}
|
||||
|
||||
void ovs_flow_free(struct sw_flow *flow, bool deferred)
|
||||
{
|
||||
if (!flow)
|
||||
return;
|
||||
|
||||
if (deferred)
|
||||
call_rcu(&flow->rcu, rcu_free_flow_callback);
|
||||
else
|
||||
flow_free(flow);
|
||||
}
|
||||
|
||||
static void free_buckets(struct flex_array *buckets)
|
||||
{
|
||||
flex_array_free(buckets);
|
||||
}
|
||||
|
||||
|
||||
static void __table_instance_destroy(struct table_instance *ti)
|
||||
{
|
||||
free_buckets(ti->buckets);
|
||||
kfree(ti);
|
||||
}
|
||||
|
||||
static struct table_instance *table_instance_alloc(int new_size)
|
||||
{
|
||||
struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
|
||||
|
||||
if (!ti)
|
||||
return NULL;
|
||||
|
||||
ti->buckets = alloc_buckets(new_size);
|
||||
|
||||
if (!ti->buckets) {
|
||||
kfree(ti);
|
||||
return NULL;
|
||||
}
|
||||
ti->n_buckets = new_size;
|
||||
ti->node_ver = 0;
|
||||
ti->keep_flows = false;
|
||||
get_random_bytes(&ti->hash_seed, sizeof(u32));
|
||||
|
||||
return ti;
|
||||
}
|
||||
|
||||
int ovs_flow_tbl_init(struct flow_table *table)
|
||||
{
|
||||
struct table_instance *ti;
|
||||
|
||||
ti = table_instance_alloc(TBL_MIN_BUCKETS);
|
||||
|
||||
if (!ti)
|
||||
return -ENOMEM;
|
||||
|
||||
rcu_assign_pointer(table->ti, ti);
|
||||
INIT_LIST_HEAD(&table->mask_list);
|
||||
table->last_rehash = jiffies;
|
||||
table->count = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
|
||||
{
|
||||
struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
|
||||
|
||||
__table_instance_destroy(ti);
|
||||
}
|
||||
|
||||
static void table_instance_destroy(struct table_instance *ti, bool deferred)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!ti)
|
||||
return;
|
||||
|
||||
if (ti->keep_flows)
|
||||
goto skip_flows;
|
||||
|
||||
for (i = 0; i < ti->n_buckets; i++) {
|
||||
struct sw_flow *flow;
|
||||
struct hlist_head *head = flex_array_get(ti->buckets, i);
|
||||
struct hlist_node *n;
|
||||
int ver = ti->node_ver;
|
||||
|
||||
hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
|
||||
hlist_del_rcu(&flow->hash_node[ver]);
|
||||
ovs_flow_free(flow, deferred);
|
||||
}
|
||||
}
|
||||
|
||||
skip_flows:
|
||||
if (deferred)
|
||||
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
|
||||
else
|
||||
__table_instance_destroy(ti);
|
||||
}
|
||||
|
||||
void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
|
||||
{
|
||||
struct table_instance *ti = ovsl_dereference(table->ti);
|
||||
|
||||
table_instance_destroy(ti, deferred);
|
||||
}
|
||||
|
||||
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
|
||||
u32 *bucket, u32 *last)
|
||||
{
|
||||
struct sw_flow *flow;
|
||||
struct hlist_head *head;
|
||||
int ver;
|
||||
int i;
|
||||
|
||||
ver = ti->node_ver;
|
||||
while (*bucket < ti->n_buckets) {
|
||||
i = 0;
|
||||
head = flex_array_get(ti->buckets, *bucket);
|
||||
hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
|
||||
if (i < *last) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
*last = i + 1;
|
||||
return flow;
|
||||
}
|
||||
(*bucket)++;
|
||||
*last = 0;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
|
||||
{
|
||||
hash = jhash_1word(hash, ti->hash_seed);
|
||||
return flex_array_get(ti->buckets,
|
||||
(hash & (ti->n_buckets - 1)));
|
||||
}
|
||||
|
||||
static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
|
||||
head = find_bucket(ti, flow->hash);
|
||||
hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
|
||||
}
|
||||
|
||||
static void flow_table_copy_flows(struct table_instance *old,
|
||||
struct table_instance *new)
|
||||
{
|
||||
int old_ver;
|
||||
int i;
|
||||
|
||||
old_ver = old->node_ver;
|
||||
new->node_ver = !old_ver;
|
||||
|
||||
/* Insert in new table. */
|
||||
for (i = 0; i < old->n_buckets; i++) {
|
||||
struct sw_flow *flow;
|
||||
struct hlist_head *head;
|
||||
|
||||
head = flex_array_get(old->buckets, i);
|
||||
|
||||
hlist_for_each_entry(flow, head, hash_node[old_ver])
|
||||
table_instance_insert(new, flow);
|
||||
}
|
||||
|
||||
old->keep_flows = true;
|
||||
}
|
||||
|
||||
static struct table_instance *table_instance_rehash(struct table_instance *ti,
|
||||
int n_buckets)
|
||||
{
|
||||
struct table_instance *new_ti;
|
||||
|
||||
new_ti = table_instance_alloc(n_buckets);
|
||||
if (!new_ti)
|
||||
return NULL;
|
||||
|
||||
flow_table_copy_flows(ti, new_ti);
|
||||
|
||||
return new_ti;
|
||||
}
|
||||
|
||||
int ovs_flow_tbl_flush(struct flow_table *flow_table)
|
||||
{
|
||||
struct table_instance *old_ti;
|
||||
struct table_instance *new_ti;
|
||||
|
||||
old_ti = ovsl_dereference(flow_table->ti);
|
||||
new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
|
||||
if (!new_ti)
|
||||
return -ENOMEM;
|
||||
|
||||
rcu_assign_pointer(flow_table->ti, new_ti);
|
||||
flow_table->last_rehash = jiffies;
|
||||
flow_table->count = 0;
|
||||
|
||||
table_instance_destroy(old_ti, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 flow_hash(const struct sw_flow_key *key, int key_start,
|
||||
int key_end)
|
||||
{
|
||||
const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
|
||||
int hash_u32s = (key_end - key_start) >> 2;
|
||||
|
||||
/* Make sure number of hash bytes are multiple of u32. */
|
||||
BUILD_BUG_ON(sizeof(long) % sizeof(u32));
|
||||
|
||||
return arch_fast_hash2(hash_key, hash_u32s, 0);
|
||||
}
|
||||
|
||||
static int flow_key_start(const struct sw_flow_key *key)
|
||||
{
|
||||
if (key->tun_key.ipv4_dst)
|
||||
return 0;
|
||||
else
|
||||
return rounddown(offsetof(struct sw_flow_key, phy),
|
||||
sizeof(long));
|
||||
}
|
||||
|
||||
static bool cmp_key(const struct sw_flow_key *key1,
|
||||
const struct sw_flow_key *key2,
|
||||
int key_start, int key_end)
|
||||
{
|
||||
const long *cp1 = (const long *)((const u8 *)key1 + key_start);
|
||||
const long *cp2 = (const long *)((const u8 *)key2 + key_start);
|
||||
long diffs = 0;
|
||||
int i;
|
||||
|
||||
for (i = key_start; i < key_end; i += sizeof(long))
|
||||
diffs |= *cp1++ ^ *cp2++;
|
||||
|
||||
return diffs == 0;
|
||||
}
|
||||
|
||||
static bool flow_cmp_masked_key(const struct sw_flow *flow,
|
||||
const struct sw_flow_key *key,
|
||||
int key_start, int key_end)
|
||||
{
|
||||
return cmp_key(&flow->key, key, key_start, key_end);
|
||||
}
|
||||
|
||||
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
|
||||
struct sw_flow_match *match)
|
||||
{
|
||||
struct sw_flow_key *key = match->key;
|
||||
int key_start = flow_key_start(key);
|
||||
int key_end = match->range.end;
|
||||
|
||||
return cmp_key(&flow->unmasked_key, key, key_start, key_end);
|
||||
}
|
||||
|
||||
static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
|
||||
const struct sw_flow_key *unmasked,
|
||||
struct sw_flow_mask *mask)
|
||||
{
|
||||
struct sw_flow *flow;
|
||||
struct hlist_head *head;
|
||||
int key_start = mask->range.start;
|
||||
int key_end = mask->range.end;
|
||||
u32 hash;
|
||||
struct sw_flow_key masked_key;
|
||||
|
||||
ovs_flow_mask_key(&masked_key, unmasked, mask);
|
||||
hash = flow_hash(&masked_key, key_start, key_end);
|
||||
head = find_bucket(ti, hash);
|
||||
hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
|
||||
if (flow->mask == mask && flow->hash == hash &&
|
||||
flow_cmp_masked_key(flow, &masked_key,
|
||||
key_start, key_end))
|
||||
return flow;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
|
||||
const struct sw_flow_key *key,
|
||||
u32 *n_mask_hit)
|
||||
{
|
||||
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
|
||||
struct sw_flow_mask *mask;
|
||||
struct sw_flow *flow;
|
||||
|
||||
*n_mask_hit = 0;
|
||||
list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
|
||||
(*n_mask_hit)++;
|
||||
flow = masked_flow_lookup(ti, key, mask);
|
||||
if (flow) /* Found */
|
||||
return flow;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
|
||||
const struct sw_flow_key *key)
|
||||
{
|
||||
u32 __always_unused n_mask_hit;
|
||||
|
||||
return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
|
||||
}
|
||||
|
||||
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
|
||||
struct sw_flow_match *match)
|
||||
{
|
||||
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
|
||||
struct sw_flow_mask *mask;
|
||||
struct sw_flow *flow;
|
||||
|
||||
/* Always called under ovs-mutex. */
|
||||
list_for_each_entry(mask, &tbl->mask_list, list) {
|
||||
flow = masked_flow_lookup(ti, match->key, mask);
|
||||
if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */
|
||||
return flow;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int ovs_flow_tbl_num_masks(const struct flow_table *table)
|
||||
{
|
||||
struct sw_flow_mask *mask;
|
||||
int num = 0;
|
||||
|
||||
list_for_each_entry(mask, &table->mask_list, list)
|
||||
num++;
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
static struct table_instance *table_instance_expand(struct table_instance *ti)
|
||||
{
|
||||
return table_instance_rehash(ti, ti->n_buckets * 2);
|
||||
}
|
||||
|
||||
/* Remove 'mask' from the mask list, if it is not needed any more. */
|
||||
static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
|
||||
{
|
||||
if (mask) {
|
||||
/* ovs-lock is required to protect mask-refcount and
|
||||
* mask list.
|
||||
*/
|
||||
ASSERT_OVSL();
|
||||
BUG_ON(!mask->ref_count);
|
||||
mask->ref_count--;
|
||||
|
||||
if (!mask->ref_count) {
|
||||
list_del_rcu(&mask->list);
|
||||
kfree_rcu(mask, rcu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Must be called with OVS mutex held. */
|
||||
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
|
||||
{
|
||||
struct table_instance *ti = ovsl_dereference(table->ti);
|
||||
|
||||
BUG_ON(table->count == 0);
|
||||
hlist_del_rcu(&flow->hash_node[ti->node_ver]);
|
||||
table->count--;
|
||||
|
||||
/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
|
||||
* accessible as long as the RCU read lock is held.
|
||||
*/
|
||||
flow_mask_remove(table, flow->mask);
|
||||
}
|
||||
|
||||
static struct sw_flow_mask *mask_alloc(void)
|
||||
{
|
||||
struct sw_flow_mask *mask;
|
||||
|
||||
mask = kmalloc(sizeof(*mask), GFP_KERNEL);
|
||||
if (mask)
|
||||
mask->ref_count = 1;
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static bool mask_equal(const struct sw_flow_mask *a,
|
||||
const struct sw_flow_mask *b)
|
||||
{
|
||||
const u8 *a_ = (const u8 *)&a->key + a->range.start;
|
||||
const u8 *b_ = (const u8 *)&b->key + b->range.start;
|
||||
|
||||
return (a->range.end == b->range.end)
|
||||
&& (a->range.start == b->range.start)
|
||||
&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
|
||||
}
|
||||
|
||||
static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
|
||||
const struct sw_flow_mask *mask)
|
||||
{
|
||||
struct list_head *ml;
|
||||
|
||||
list_for_each(ml, &tbl->mask_list) {
|
||||
struct sw_flow_mask *m;
|
||||
m = container_of(ml, struct sw_flow_mask, list);
|
||||
if (mask_equal(mask, m))
|
||||
return m;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Add 'mask' into the mask list, if it is not already there. */
|
||||
static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
|
||||
struct sw_flow_mask *new)
|
||||
{
|
||||
struct sw_flow_mask *mask;
|
||||
mask = flow_mask_find(tbl, new);
|
||||
if (!mask) {
|
||||
/* Allocate a new mask if none exsits. */
|
||||
mask = mask_alloc();
|
||||
if (!mask)
|
||||
return -ENOMEM;
|
||||
mask->key = new->key;
|
||||
mask->range = new->range;
|
||||
list_add_rcu(&mask->list, &tbl->mask_list);
|
||||
} else {
|
||||
BUG_ON(!mask->ref_count);
|
||||
mask->ref_count++;
|
||||
}
|
||||
|
||||
flow->mask = mask;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Must be called with OVS mutex held. */
|
||||
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
|
||||
struct sw_flow_mask *mask)
|
||||
{
|
||||
struct table_instance *new_ti = NULL;
|
||||
struct table_instance *ti;
|
||||
int err;
|
||||
|
||||
err = flow_mask_insert(table, flow, mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
flow->hash = flow_hash(&flow->key, flow->mask->range.start,
|
||||
flow->mask->range.end);
|
||||
ti = ovsl_dereference(table->ti);
|
||||
table_instance_insert(ti, flow);
|
||||
table->count++;
|
||||
|
||||
/* Expand table, if necessary, to make room. */
|
||||
if (table->count > ti->n_buckets)
|
||||
new_ti = table_instance_expand(ti);
|
||||
else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
|
||||
new_ti = table_instance_rehash(ti, ti->n_buckets);
|
||||
|
||||
if (new_ti) {
|
||||
rcu_assign_pointer(table->ti, new_ti);
|
||||
table_instance_destroy(ti, true);
|
||||
table->last_rehash = jiffies;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Initializes the flow module.
|
||||
* Returns zero if successful or a negative error code. */
|
||||
int ovs_flow_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
|
||||
BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
|
||||
|
||||
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
|
||||
+ (num_possible_nodes()
|
||||
* sizeof(struct flow_stats *)),
|
||||
0, 0, NULL);
|
||||
if (flow_cache == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
flow_stats_cache
|
||||
= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
|
||||
0, SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (flow_stats_cache == NULL) {
|
||||
kmem_cache_destroy(flow_cache);
|
||||
flow_cache = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Uninitializes the flow module. */
|
||||
void ovs_flow_exit(void)
|
||||
{
|
||||
kmem_cache_destroy(flow_stats_cache);
|
||||
kmem_cache_destroy(flow_cache);
|
||||
}
|
86
net/openvswitch/flow_table.h
Normal file
86
net/openvswitch/flow_table.h
Normal file
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2013 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#ifndef FLOW_TABLE_H
|
||||
#define FLOW_TABLE_H 1
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/openvswitch.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/flex_array.h>
|
||||
|
||||
#include <net/inet_ecn.h>
|
||||
#include <net/ip_tunnels.h>
|
||||
|
||||
#include "flow.h"
|
||||
|
||||
struct table_instance {
|
||||
struct flex_array *buckets;
|
||||
unsigned int n_buckets;
|
||||
struct rcu_head rcu;
|
||||
int node_ver;
|
||||
u32 hash_seed;
|
||||
bool keep_flows;
|
||||
};
|
||||
|
||||
struct flow_table {
|
||||
struct table_instance __rcu *ti;
|
||||
struct list_head mask_list;
|
||||
unsigned long last_rehash;
|
||||
unsigned int count;
|
||||
};
|
||||
|
||||
extern struct kmem_cache *flow_stats_cache;
|
||||
|
||||
int ovs_flow_init(void);
|
||||
void ovs_flow_exit(void);
|
||||
|
||||
struct sw_flow *ovs_flow_alloc(void);
|
||||
void ovs_flow_free(struct sw_flow *, bool deferred);
|
||||
|
||||
int ovs_flow_tbl_init(struct flow_table *);
|
||||
int ovs_flow_tbl_count(struct flow_table *table);
|
||||
void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
|
||||
int ovs_flow_tbl_flush(struct flow_table *flow_table);
|
||||
|
||||
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
|
||||
struct sw_flow_mask *mask);
|
||||
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
|
||||
int ovs_flow_tbl_num_masks(const struct flow_table *table);
|
||||
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
|
||||
u32 *bucket, u32 *idx);
|
||||
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
|
||||
const struct sw_flow_key *,
|
||||
u32 *n_mask_hit);
|
||||
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
|
||||
const struct sw_flow_key *);
|
||||
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
|
||||
struct sw_flow_match *match);
|
||||
bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
|
||||
struct sw_flow_match *match);
|
||||
|
||||
void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
|
||||
const struct sw_flow_mask *mask);
|
||||
#endif /* flow_table.h */
|
235
net/openvswitch/vport-geneve.c
Normal file
235
net/openvswitch/vport-geneve.c
Normal file
|
@ -0,0 +1,235 @@
|
|||
/*
|
||||
* Copyright (c) 2014 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/version.h>
|
||||
|
||||
#include <linux/in.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/if_vlan.h>
|
||||
|
||||
#include <net/geneve.h>
|
||||
#include <net/icmp.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/route.h>
|
||||
#include <net/udp.h>
|
||||
#include <net/xfrm.h>
|
||||
|
||||
#include "datapath.h"
|
||||
#include "vport.h"
|
||||
|
||||
/**
|
||||
* struct geneve_port - Keeps track of open UDP ports
|
||||
* @gs: The socket created for this port number.
|
||||
* @name: vport name.
|
||||
*/
|
||||
struct geneve_port {
|
||||
struct geneve_sock *gs;
|
||||
char name[IFNAMSIZ];
|
||||
};
|
||||
|
||||
static LIST_HEAD(geneve_ports);
|
||||
|
||||
static inline struct geneve_port *geneve_vport(const struct vport *vport)
|
||||
{
|
||||
return vport_priv(vport);
|
||||
}
|
||||
|
||||
static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
|
||||
{
|
||||
return (struct genevehdr *)(udp_hdr(skb) + 1);
|
||||
}
|
||||
|
||||
/* Convert 64 bit tunnel ID to 24 bit VNI. */
|
||||
static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
vni[0] = (__force __u8)(tun_id >> 16);
|
||||
vni[1] = (__force __u8)(tun_id >> 8);
|
||||
vni[2] = (__force __u8)tun_id;
|
||||
#else
|
||||
vni[0] = (__force __u8)((__force u64)tun_id >> 40);
|
||||
vni[1] = (__force __u8)((__force u64)tun_id >> 48);
|
||||
vni[2] = (__force __u8)((__force u64)tun_id >> 56);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Convert 24 bit VNI to 64 bit tunnel ID. */
|
||||
static __be64 vni_to_tunnel_id(__u8 *vni)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
return (vni[0] << 16) | (vni[1] << 8) | vni[2];
|
||||
#else
|
||||
return (__force __be64)(((__force u64)vni[0] << 40) |
|
||||
((__force u64)vni[1] << 48) |
|
||||
((__force u64)vni[2] << 56));
|
||||
#endif
|
||||
}
|
||||
|
||||
static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
|
||||
{
|
||||
struct vport *vport = gs->rcv_data;
|
||||
struct genevehdr *geneveh = geneve_hdr(skb);
|
||||
int opts_len;
|
||||
struct ovs_tunnel_info tun_info;
|
||||
__be64 key;
|
||||
__be16 flags;
|
||||
|
||||
opts_len = geneveh->opt_len * 4;
|
||||
|
||||
flags = TUNNEL_KEY | TUNNEL_OPTIONS_PRESENT |
|
||||
(udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0) |
|
||||
(geneveh->oam ? TUNNEL_OAM : 0) |
|
||||
(geneveh->critical ? TUNNEL_CRIT_OPT : 0);
|
||||
|
||||
key = vni_to_tunnel_id(geneveh->vni);
|
||||
|
||||
ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key, flags,
|
||||
geneveh->options, opts_len);
|
||||
|
||||
ovs_vport_receive(vport, skb, &tun_info);
|
||||
}
|
||||
|
||||
static int geneve_get_options(const struct vport *vport,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct geneve_port *geneve_port = geneve_vport(vport);
|
||||
struct inet_sock *sk = inet_sk(geneve_port->gs->sock->sk);
|
||||
|
||||
if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(sk->inet_sport)))
|
||||
return -EMSGSIZE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void geneve_tnl_destroy(struct vport *vport)
|
||||
{
|
||||
struct geneve_port *geneve_port = geneve_vport(vport);
|
||||
|
||||
geneve_sock_release(geneve_port->gs);
|
||||
|
||||
ovs_vport_deferred_free(vport);
|
||||
}
|
||||
|
||||
static struct vport *geneve_tnl_create(const struct vport_parms *parms)
|
||||
{
|
||||
struct net *net = ovs_dp_get_net(parms->dp);
|
||||
struct nlattr *options = parms->options;
|
||||
struct geneve_port *geneve_port;
|
||||
struct geneve_sock *gs;
|
||||
struct vport *vport;
|
||||
struct nlattr *a;
|
||||
int err;
|
||||
u16 dst_port;
|
||||
|
||||
if (!options) {
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
|
||||
if (a && nla_len(a) == sizeof(u16)) {
|
||||
dst_port = nla_get_u16(a);
|
||||
} else {
|
||||
/* Require destination port from userspace. */
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
vport = ovs_vport_alloc(sizeof(struct geneve_port),
|
||||
&ovs_geneve_vport_ops, parms);
|
||||
if (IS_ERR(vport))
|
||||
return vport;
|
||||
|
||||
geneve_port = geneve_vport(vport);
|
||||
strncpy(geneve_port->name, parms->name, IFNAMSIZ);
|
||||
|
||||
gs = geneve_sock_add(net, htons(dst_port), geneve_rcv, vport, true, 0);
|
||||
if (IS_ERR(gs)) {
|
||||
ovs_vport_free(vport);
|
||||
return (void *)gs;
|
||||
}
|
||||
geneve_port->gs = gs;
|
||||
|
||||
return vport;
|
||||
error:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
|
||||
{
|
||||
struct ovs_key_ipv4_tunnel *tun_key;
|
||||
struct ovs_tunnel_info *tun_info;
|
||||
struct net *net = ovs_dp_get_net(vport->dp);
|
||||
struct geneve_port *geneve_port = geneve_vport(vport);
|
||||
__be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
|
||||
__be16 sport;
|
||||
struct rtable *rt;
|
||||
struct flowi4 fl;
|
||||
u8 vni[3];
|
||||
__be16 df;
|
||||
int err;
|
||||
|
||||
tun_info = OVS_CB(skb)->egress_tun_info;
|
||||
if (unlikely(!tun_info)) {
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
tun_key = &tun_info->tunnel;
|
||||
|
||||
/* Route lookup */
|
||||
memset(&fl, 0, sizeof(fl));
|
||||
fl.daddr = tun_key->ipv4_dst;
|
||||
fl.saddr = tun_key->ipv4_src;
|
||||
fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
|
||||
fl.flowi4_mark = skb->mark;
|
||||
fl.flowi4_proto = IPPROTO_UDP;
|
||||
|
||||
rt = ip_route_output_key(net, &fl);
|
||||
if (IS_ERR(rt)) {
|
||||
err = PTR_ERR(rt);
|
||||
goto error;
|
||||
}
|
||||
|
||||
df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
|
||||
sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
|
||||
tunnel_id_to_vni(tun_key->tun_id, vni);
|
||||
skb->ignore_df = 1;
|
||||
|
||||
err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr,
|
||||
tun_key->ipv4_dst, tun_key->ipv4_tos,
|
||||
tun_key->ipv4_ttl, df, sport, dport,
|
||||
tun_key->tun_flags, vni,
|
||||
tun_info->options_len, (u8 *)tun_info->options,
|
||||
false);
|
||||
if (err < 0)
|
||||
ip_rt_put(rt);
|
||||
error:
|
||||
return err;
|
||||
}
|
||||
|
||||
static const char *geneve_get_name(const struct vport *vport)
|
||||
{
|
||||
struct geneve_port *geneve_port = geneve_vport(vport);
|
||||
|
||||
return geneve_port->name;
|
||||
}
|
||||
|
||||
const struct vport_ops ovs_geneve_vport_ops = {
|
||||
.type = OVS_VPORT_TYPE_GENEVE,
|
||||
.create = geneve_tnl_create,
|
||||
.destroy = geneve_tnl_destroy,
|
||||
.get_name = geneve_get_name,
|
||||
.get_options = geneve_get_options,
|
||||
.send = geneve_tnl_send,
|
||||
};
|
286
net/openvswitch/vport-gre.c
Normal file
286
net/openvswitch/vport-gre.c
Normal file
|
@ -0,0 +1,286 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2014 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/if.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/if_tunnel.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/in_route.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <net/route.h>
|
||||
#include <net/xfrm.h>
|
||||
|
||||
#include <net/icmp.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ip_tunnels.h>
|
||||
#include <net/gre.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/netns/generic.h>
|
||||
#include <net/protocol.h>
|
||||
|
||||
#include "datapath.h"
|
||||
#include "vport.h"
|
||||
|
||||
/* Returns the least-significant 32 bits of a __be64. */
|
||||
static __be32 be64_get_low32(__be64 x)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
return (__force __be32)x;
|
||||
#else
|
||||
return (__force __be32)((__force u64)x >> 32);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __be16 filter_tnl_flags(__be16 flags)
|
||||
{
|
||||
return flags & (TUNNEL_CSUM | TUNNEL_KEY);
|
||||
}
|
||||
|
||||
static struct sk_buff *__build_header(struct sk_buff *skb,
|
||||
int tunnel_hlen)
|
||||
{
|
||||
struct tnl_ptk_info tpi;
|
||||
const struct ovs_key_ipv4_tunnel *tun_key;
|
||||
|
||||
tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
|
||||
|
||||
skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
|
||||
if (IS_ERR(skb))
|
||||
return NULL;
|
||||
|
||||
tpi.flags = filter_tnl_flags(tun_key->tun_flags);
|
||||
tpi.proto = htons(ETH_P_TEB);
|
||||
tpi.key = be64_get_low32(tun_key->tun_id);
|
||||
tpi.seq = 0;
|
||||
gre_build_header(skb, &tpi, tunnel_hlen);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
|
||||
#else
|
||||
return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Called with rcu_read_lock and BH disabled. */
|
||||
static int gre_rcv(struct sk_buff *skb,
|
||||
const struct tnl_ptk_info *tpi)
|
||||
{
|
||||
struct ovs_tunnel_info tun_info;
|
||||
struct ovs_net *ovs_net;
|
||||
struct vport *vport;
|
||||
__be64 key;
|
||||
|
||||
ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
|
||||
vport = rcu_dereference(ovs_net->vport_net.gre_vport);
|
||||
if (unlikely(!vport))
|
||||
return PACKET_REJECT;
|
||||
|
||||
key = key_to_tunnel_id(tpi->key, tpi->seq);
|
||||
ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key,
|
||||
filter_tnl_flags(tpi->flags), NULL, 0);
|
||||
|
||||
ovs_vport_receive(vport, skb, &tun_info);
|
||||
return PACKET_RCVD;
|
||||
}
|
||||
|
||||
/* Called with rcu_read_lock and BH disabled. */
|
||||
static int gre_err(struct sk_buff *skb, u32 info,
|
||||
const struct tnl_ptk_info *tpi)
|
||||
{
|
||||
struct ovs_net *ovs_net;
|
||||
struct vport *vport;
|
||||
|
||||
ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
|
||||
vport = rcu_dereference(ovs_net->vport_net.gre_vport);
|
||||
|
||||
if (unlikely(!vport))
|
||||
return PACKET_REJECT;
|
||||
else
|
||||
return PACKET_RCVD;
|
||||
}
|
||||
|
||||
static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
|
||||
{
|
||||
struct net *net = ovs_dp_get_net(vport->dp);
|
||||
struct ovs_key_ipv4_tunnel *tun_key;
|
||||
struct flowi4 fl;
|
||||
struct rtable *rt;
|
||||
int min_headroom;
|
||||
int tunnel_hlen;
|
||||
__be16 df;
|
||||
int err;
|
||||
|
||||
if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
|
||||
/* Route lookup */
|
||||
memset(&fl, 0, sizeof(fl));
|
||||
fl.daddr = tun_key->ipv4_dst;
|
||||
fl.saddr = tun_key->ipv4_src;
|
||||
fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
|
||||
fl.flowi4_mark = skb->mark;
|
||||
fl.flowi4_proto = IPPROTO_GRE;
|
||||
|
||||
rt = ip_route_output_key(net, &fl);
|
||||
if (IS_ERR(rt))
|
||||
return PTR_ERR(rt);
|
||||
|
||||
tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
|
||||
|
||||
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
|
||||
+ tunnel_hlen + sizeof(struct iphdr)
|
||||
+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
|
||||
if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
|
||||
int head_delta = SKB_DATA_ALIGN(min_headroom -
|
||||
skb_headroom(skb) +
|
||||
16);
|
||||
err = pskb_expand_head(skb, max_t(int, head_delta, 0),
|
||||
0, GFP_ATOMIC);
|
||||
if (unlikely(err))
|
||||
goto err_free_rt;
|
||||
}
|
||||
|
||||
skb = vlan_hwaccel_push_inside(skb);
|
||||
if (unlikely(!skb)) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_rt;
|
||||
}
|
||||
|
||||
/* Push Tunnel header. */
|
||||
skb = __build_header(skb, tunnel_hlen);
|
||||
if (unlikely(!skb)) {
|
||||
err = 0;
|
||||
goto err_free_rt;
|
||||
}
|
||||
|
||||
df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
|
||||
htons(IP_DF) : 0;
|
||||
|
||||
skb->ignore_df = 1;
|
||||
|
||||
return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
|
||||
tun_key->ipv4_dst, IPPROTO_GRE,
|
||||
tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
|
||||
err_free_rt:
|
||||
ip_rt_put(rt);
|
||||
error:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct gre_cisco_protocol gre_protocol = {
|
||||
.handler = gre_rcv,
|
||||
.err_handler = gre_err,
|
||||
.priority = 1,
|
||||
};
|
||||
|
||||
static int gre_ports;
|
||||
static int gre_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
gre_ports++;
|
||||
if (gre_ports > 1)
|
||||
return 0;
|
||||
|
||||
err = gre_cisco_register(&gre_protocol);
|
||||
if (err)
|
||||
pr_warn("cannot register gre protocol handler\n");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void gre_exit(void)
|
||||
{
|
||||
gre_ports--;
|
||||
if (gre_ports > 0)
|
||||
return;
|
||||
|
||||
gre_cisco_unregister(&gre_protocol);
|
||||
}
|
||||
|
||||
static const char *gre_get_name(const struct vport *vport)
|
||||
{
|
||||
return vport_priv(vport);
|
||||
}
|
||||
|
||||
static struct vport *gre_create(const struct vport_parms *parms)
|
||||
{
|
||||
struct net *net = ovs_dp_get_net(parms->dp);
|
||||
struct ovs_net *ovs_net;
|
||||
struct vport *vport;
|
||||
int err;
|
||||
|
||||
err = gre_init();
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
ovs_net = net_generic(net, ovs_net_id);
|
||||
if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
|
||||
vport = ERR_PTR(-EEXIST);
|
||||
goto error;
|
||||
}
|
||||
|
||||
vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
|
||||
if (IS_ERR(vport))
|
||||
goto error;
|
||||
|
||||
strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
|
||||
rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
|
||||
return vport;
|
||||
|
||||
error:
|
||||
gre_exit();
|
||||
return vport;
|
||||
}
|
||||
|
||||
static void gre_tnl_destroy(struct vport *vport)
|
||||
{
|
||||
struct net *net = ovs_dp_get_net(vport->dp);
|
||||
struct ovs_net *ovs_net;
|
||||
|
||||
ovs_net = net_generic(net, ovs_net_id);
|
||||
|
||||
RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
|
||||
ovs_vport_deferred_free(vport);
|
||||
gre_exit();
|
||||
}
|
||||
|
||||
const struct vport_ops ovs_gre_vport_ops = {
|
||||
.type = OVS_VPORT_TYPE_GRE,
|
||||
.create = gre_create,
|
||||
.destroy = gre_tnl_destroy,
|
||||
.get_name = gre_get_name,
|
||||
.send = gre_tnl_send,
|
||||
};
|
270
net/openvswitch/vport-internal_dev.c
Normal file
270
net/openvswitch/vport-internal_dev.c
Normal file
|
@ -0,0 +1,270 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2012 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
#include <net/dst.h>
|
||||
#include <net/xfrm.h>
|
||||
#include <net/rtnetlink.h>
|
||||
|
||||
#include "datapath.h"
|
||||
#include "vport-internal_dev.h"
|
||||
#include "vport-netdev.h"
|
||||
|
||||
struct internal_dev {
|
||||
struct vport *vport;
|
||||
};
|
||||
|
||||
static struct internal_dev *internal_dev_priv(struct net_device *netdev)
|
||||
{
|
||||
return netdev_priv(netdev);
|
||||
}
|
||||
|
||||
/* This function is only called by the kernel network layer.*/
|
||||
static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct vport *vport = ovs_internal_dev_get_vport(netdev);
|
||||
struct ovs_vport_stats vport_stats;
|
||||
|
||||
ovs_vport_get_stats(vport, &vport_stats);
|
||||
|
||||
/* The tx and rx stats need to be swapped because the
|
||||
* switch and host OS have opposite perspectives. */
|
||||
stats->rx_packets = vport_stats.tx_packets;
|
||||
stats->tx_packets = vport_stats.rx_packets;
|
||||
stats->rx_bytes = vport_stats.tx_bytes;
|
||||
stats->tx_bytes = vport_stats.rx_bytes;
|
||||
stats->rx_errors = vport_stats.tx_errors;
|
||||
stats->tx_errors = vport_stats.rx_errors;
|
||||
stats->rx_dropped = vport_stats.tx_dropped;
|
||||
stats->tx_dropped = vport_stats.rx_dropped;
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
/* Called with rcu_read_lock_bh. */
|
||||
static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
{
|
||||
rcu_read_lock();
|
||||
ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int internal_dev_open(struct net_device *netdev)
|
||||
{
|
||||
netif_start_queue(netdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int internal_dev_stop(struct net_device *netdev)
|
||||
{
|
||||
netif_stop_queue(netdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void internal_dev_getinfo(struct net_device *netdev,
|
||||
struct ethtool_drvinfo *info)
|
||||
{
|
||||
strlcpy(info->driver, "openvswitch", sizeof(info->driver));
|
||||
}
|
||||
|
||||
static const struct ethtool_ops internal_dev_ethtool_ops = {
|
||||
.get_drvinfo = internal_dev_getinfo,
|
||||
.get_link = ethtool_op_get_link,
|
||||
};
|
||||
|
||||
static int internal_dev_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
if (new_mtu < 68)
|
||||
return -EINVAL;
|
||||
|
||||
netdev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void internal_dev_destructor(struct net_device *dev)
|
||||
{
|
||||
struct vport *vport = ovs_internal_dev_get_vport(dev);
|
||||
|
||||
ovs_vport_free(vport);
|
||||
free_netdev(dev);
|
||||
}
|
||||
|
||||
static const struct net_device_ops internal_dev_netdev_ops = {
|
||||
.ndo_open = internal_dev_open,
|
||||
.ndo_stop = internal_dev_stop,
|
||||
.ndo_start_xmit = internal_dev_xmit,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_change_mtu = internal_dev_change_mtu,
|
||||
.ndo_get_stats64 = internal_dev_get_stats,
|
||||
};
|
||||
|
||||
static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
|
||||
.kind = "openvswitch",
|
||||
};
|
||||
|
||||
static void do_setup(struct net_device *netdev)
|
||||
{
|
||||
ether_setup(netdev);
|
||||
|
||||
netdev->netdev_ops = &internal_dev_netdev_ops;
|
||||
|
||||
netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
||||
netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
|
||||
netdev->destructor = internal_dev_destructor;
|
||||
netdev->ethtool_ops = &internal_dev_ethtool_ops;
|
||||
netdev->rtnl_link_ops = &internal_dev_link_ops;
|
||||
netdev->tx_queue_len = 0;
|
||||
|
||||
netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
|
||||
NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
|
||||
NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL;
|
||||
|
||||
netdev->vlan_features = netdev->features;
|
||||
netdev->hw_enc_features = netdev->features;
|
||||
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
|
||||
|
||||
eth_hw_addr_random(netdev);
|
||||
}
|
||||
|
||||
static struct vport *internal_dev_create(const struct vport_parms *parms)
|
||||
{
|
||||
struct vport *vport;
|
||||
struct netdev_vport *netdev_vport;
|
||||
struct internal_dev *internal_dev;
|
||||
int err;
|
||||
|
||||
vport = ovs_vport_alloc(sizeof(struct netdev_vport),
|
||||
&ovs_internal_vport_ops, parms);
|
||||
if (IS_ERR(vport)) {
|
||||
err = PTR_ERR(vport);
|
||||
goto error;
|
||||
}
|
||||
|
||||
netdev_vport = netdev_vport_priv(vport);
|
||||
|
||||
netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
|
||||
parms->name, NET_NAME_UNKNOWN,
|
||||
do_setup);
|
||||
if (!netdev_vport->dev) {
|
||||
err = -ENOMEM;
|
||||
goto error_free_vport;
|
||||
}
|
||||
|
||||
dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp));
|
||||
internal_dev = internal_dev_priv(netdev_vport->dev);
|
||||
internal_dev->vport = vport;
|
||||
|
||||
/* Restrict bridge port to current netns. */
|
||||
if (vport->port_no == OVSP_LOCAL)
|
||||
netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;
|
||||
|
||||
rtnl_lock();
|
||||
err = register_netdevice(netdev_vport->dev);
|
||||
if (err)
|
||||
goto error_free_netdev;
|
||||
|
||||
dev_set_promiscuity(netdev_vport->dev, 1);
|
||||
rtnl_unlock();
|
||||
netif_start_queue(netdev_vport->dev);
|
||||
|
||||
return vport;
|
||||
|
||||
error_free_netdev:
|
||||
rtnl_unlock();
|
||||
free_netdev(netdev_vport->dev);
|
||||
error_free_vport:
|
||||
ovs_vport_free(vport);
|
||||
error:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void internal_dev_destroy(struct vport *vport)
|
||||
{
|
||||
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
|
||||
|
||||
netif_stop_queue(netdev_vport->dev);
|
||||
rtnl_lock();
|
||||
dev_set_promiscuity(netdev_vport->dev, -1);
|
||||
|
||||
/* unregister_netdevice() waits for an RCU grace period. */
|
||||
unregister_netdevice(netdev_vport->dev);
|
||||
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *netdev = netdev_vport_priv(vport)->dev;
|
||||
int len;
|
||||
|
||||
len = skb->len;
|
||||
|
||||
skb_dst_drop(skb);
|
||||
nf_reset(skb);
|
||||
secpath_reset(skb);
|
||||
|
||||
skb->dev = netdev;
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
|
||||
|
||||
netif_rx(skb);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
const struct vport_ops ovs_internal_vport_ops = {
|
||||
.type = OVS_VPORT_TYPE_INTERNAL,
|
||||
.create = internal_dev_create,
|
||||
.destroy = internal_dev_destroy,
|
||||
.get_name = ovs_netdev_get_name,
|
||||
.send = internal_dev_recv,
|
||||
};
|
||||
|
||||
int ovs_is_internal_dev(const struct net_device *netdev)
|
||||
{
|
||||
return netdev->netdev_ops == &internal_dev_netdev_ops;
|
||||
}
|
||||
|
||||
struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
|
||||
{
|
||||
if (!ovs_is_internal_dev(netdev))
|
||||
return NULL;
|
||||
|
||||
return internal_dev_priv(netdev)->vport;
|
||||
}
|
||||
|
||||
int ovs_internal_dev_rtnl_link_register(void)
|
||||
{
|
||||
return rtnl_link_register(&internal_dev_link_ops);
|
||||
}
|
||||
|
||||
void ovs_internal_dev_rtnl_link_unregister(void)
|
||||
{
|
||||
rtnl_link_unregister(&internal_dev_link_ops);
|
||||
}
|
30
net/openvswitch/vport-internal_dev.h
Normal file
30
net/openvswitch/vport-internal_dev.h
Normal file
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2011 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#ifndef VPORT_INTERNAL_DEV_H
|
||||
#define VPORT_INTERNAL_DEV_H 1
|
||||
|
||||
#include "datapath.h"
|
||||
#include "vport.h"
|
||||
|
||||
int ovs_is_internal_dev(const struct net_device *);
|
||||
struct vport *ovs_internal_dev_get_vport(struct net_device *);
|
||||
int ovs_internal_dev_rtnl_link_register(void);
|
||||
void ovs_internal_dev_rtnl_link_unregister(void);
|
||||
|
||||
#endif /* vport-internal_dev.h */
|
233
net/openvswitch/vport-netdev.c
Normal file
233
net/openvswitch/vport-netdev.c
Normal file
|
@ -0,0 +1,233 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2012 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/if_bridge.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/llc.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/openvswitch.h>
|
||||
|
||||
#include <net/llc.h>
|
||||
|
||||
#include "datapath.h"
|
||||
#include "vport-internal_dev.h"
|
||||
#include "vport-netdev.h"
|
||||
|
||||
/* Must be called with rcu_read_lock. */
|
||||
static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(!vport))
|
||||
goto error;
|
||||
|
||||
if (unlikely(skb_warn_if_lro(skb)))
|
||||
goto error;
|
||||
|
||||
/* Make our own copy of the packet. Otherwise we will mangle the
|
||||
* packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
|
||||
*/
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (unlikely(!skb))
|
||||
return;
|
||||
|
||||
skb_push(skb, ETH_HLEN);
|
||||
ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
|
||||
|
||||
ovs_vport_receive(vport, skb, NULL);
|
||||
return;
|
||||
|
||||
error:
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
/* Called with rcu_read_lock and bottom-halves disabled. */
|
||||
static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
|
||||
{
|
||||
struct sk_buff *skb = *pskb;
|
||||
struct vport *vport;
|
||||
|
||||
if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
|
||||
return RX_HANDLER_PASS;
|
||||
|
||||
vport = ovs_netdev_get_vport(skb->dev);
|
||||
|
||||
netdev_port_receive(vport, skb);
|
||||
|
||||
return RX_HANDLER_CONSUMED;
|
||||
}
|
||||
|
||||
static struct net_device *get_dpdev(struct datapath *dp)
|
||||
{
|
||||
struct vport *local;
|
||||
|
||||
local = ovs_vport_ovsl(dp, OVSP_LOCAL);
|
||||
BUG_ON(!local);
|
||||
return netdev_vport_priv(local)->dev;
|
||||
}
|
||||
|
||||
static struct vport *netdev_create(const struct vport_parms *parms)
|
||||
{
|
||||
struct vport *vport;
|
||||
struct netdev_vport *netdev_vport;
|
||||
int err;
|
||||
|
||||
vport = ovs_vport_alloc(sizeof(struct netdev_vport),
|
||||
&ovs_netdev_vport_ops, parms);
|
||||
if (IS_ERR(vport)) {
|
||||
err = PTR_ERR(vport);
|
||||
goto error;
|
||||
}
|
||||
|
||||
netdev_vport = netdev_vport_priv(vport);
|
||||
|
||||
netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
|
||||
if (!netdev_vport->dev) {
|
||||
err = -ENODEV;
|
||||
goto error_free_vport;
|
||||
}
|
||||
|
||||
if (netdev_vport->dev->flags & IFF_LOOPBACK ||
|
||||
netdev_vport->dev->type != ARPHRD_ETHER ||
|
||||
ovs_is_internal_dev(netdev_vport->dev)) {
|
||||
err = -EINVAL;
|
||||
goto error_put;
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
err = netdev_master_upper_dev_link(netdev_vport->dev,
|
||||
get_dpdev(vport->dp));
|
||||
if (err)
|
||||
goto error_unlock;
|
||||
|
||||
err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
|
||||
vport);
|
||||
if (err)
|
||||
goto error_master_upper_dev_unlink;
|
||||
|
||||
dev_set_promiscuity(netdev_vport->dev, 1);
|
||||
netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
|
||||
rtnl_unlock();
|
||||
|
||||
return vport;
|
||||
|
||||
error_master_upper_dev_unlink:
|
||||
netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
|
||||
error_unlock:
|
||||
rtnl_unlock();
|
||||
error_put:
|
||||
dev_put(netdev_vport->dev);
|
||||
error_free_vport:
|
||||
ovs_vport_free(vport);
|
||||
error:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void free_port_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct netdev_vport *netdev_vport = container_of(rcu,
|
||||
struct netdev_vport, rcu);
|
||||
|
||||
dev_put(netdev_vport->dev);
|
||||
ovs_vport_free(vport_from_priv(netdev_vport));
|
||||
}
|
||||
|
||||
void ovs_netdev_detach_dev(struct vport *vport)
|
||||
{
|
||||
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
|
||||
|
||||
ASSERT_RTNL();
|
||||
netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
|
||||
netdev_rx_handler_unregister(netdev_vport->dev);
|
||||
netdev_upper_dev_unlink(netdev_vport->dev,
|
||||
netdev_master_upper_dev_get(netdev_vport->dev));
|
||||
dev_set_promiscuity(netdev_vport->dev, -1);
|
||||
}
|
||||
|
||||
static void netdev_destroy(struct vport *vport)
|
||||
{
|
||||
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
|
||||
|
||||
rtnl_lock();
|
||||
if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
|
||||
ovs_netdev_detach_dev(vport);
|
||||
rtnl_unlock();
|
||||
|
||||
call_rcu(&netdev_vport->rcu, free_port_rcu);
|
||||
}
|
||||
|
||||
const char *ovs_netdev_get_name(const struct vport *vport)
|
||||
{
|
||||
const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
|
||||
return netdev_vport->dev->name;
|
||||
}
|
||||
|
||||
static unsigned int packet_length(const struct sk_buff *skb)
|
||||
{
|
||||
unsigned int length = skb->len - ETH_HLEN;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_8021Q))
|
||||
length -= VLAN_HLEN;
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
static int netdev_send(struct vport *vport, struct sk_buff *skb)
|
||||
{
|
||||
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
|
||||
int mtu = netdev_vport->dev->mtu;
|
||||
int len;
|
||||
|
||||
if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
|
||||
net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
|
||||
netdev_vport->dev->name,
|
||||
packet_length(skb), mtu);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
skb->dev = netdev_vport->dev;
|
||||
len = skb->len;
|
||||
dev_queue_xmit(skb);
|
||||
|
||||
return len;
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Returns null if this device is not attached to a datapath. */
|
||||
struct vport *ovs_netdev_get_vport(struct net_device *dev)
|
||||
{
|
||||
if (likely(dev->priv_flags & IFF_OVS_DATAPATH))
|
||||
return (struct vport *)
|
||||
rcu_dereference_rtnl(dev->rx_handler_data);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const struct vport_ops ovs_netdev_vport_ops = {
|
||||
.type = OVS_VPORT_TYPE_NETDEV,
|
||||
.create = netdev_create,
|
||||
.destroy = netdev_destroy,
|
||||
.get_name = ovs_netdev_get_name,
|
||||
.send = netdev_send,
|
||||
};
|
44
net/openvswitch/vport-netdev.h
Normal file
44
net/openvswitch/vport-netdev.h
Normal file
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2011 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#ifndef VPORT_NETDEV_H
|
||||
#define VPORT_NETDEV_H 1
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#include "vport.h"
|
||||
|
||||
struct vport *ovs_netdev_get_vport(struct net_device *dev);
|
||||
|
||||
struct netdev_vport {
|
||||
struct rcu_head rcu;
|
||||
|
||||
struct net_device *dev;
|
||||
};
|
||||
|
||||
static inline struct netdev_vport *
|
||||
netdev_vport_priv(const struct vport *vport)
|
||||
{
|
||||
return vport_priv(vport);
|
||||
}
|
||||
|
||||
const char *ovs_netdev_get_name(const struct vport *);
|
||||
void ovs_netdev_detach_dev(struct vport *);
|
||||
|
||||
#endif /* vport_netdev.h */
|
202
net/openvswitch/vport-vxlan.c
Normal file
202
net/openvswitch/vport-vxlan.c
Normal file
|
@ -0,0 +1,202 @@
|
|||
/*
|
||||
* Copyright (c) 2014 Nicira, Inc.
|
||||
* Copyright (c) 2013 Cisco Systems, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/in.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/udp.h>
|
||||
|
||||
#include <net/icmp.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/udp.h>
|
||||
#include <net/ip_tunnels.h>
|
||||
#include <net/rtnetlink.h>
|
||||
#include <net/route.h>
|
||||
#include <net/dsfield.h>
|
||||
#include <net/inet_ecn.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/netns/generic.h>
|
||||
#include <net/vxlan.h>
|
||||
|
||||
#include "datapath.h"
|
||||
#include "vport.h"
|
||||
|
||||
/**
|
||||
* struct vxlan_port - Keeps track of open UDP ports
|
||||
* @vs: vxlan_sock created for the port.
|
||||
* @name: vport name.
|
||||
*/
|
||||
struct vxlan_port {
|
||||
struct vxlan_sock *vs;
|
||||
char name[IFNAMSIZ];
|
||||
};
|
||||
|
||||
static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
|
||||
{
|
||||
return vport_priv(vport);
|
||||
}
|
||||
|
||||
/* Called with rcu_read_lock and BH disabled. */
|
||||
static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
|
||||
{
|
||||
struct ovs_tunnel_info tun_info;
|
||||
struct vport *vport = vs->data;
|
||||
struct iphdr *iph;
|
||||
__be64 key;
|
||||
|
||||
/* Save outer tunnel values */
|
||||
iph = ip_hdr(skb);
|
||||
key = cpu_to_be64(ntohl(vx_vni) >> 8);
|
||||
ovs_flow_tun_info_init(&tun_info, iph, key, TUNNEL_KEY, NULL, 0);
|
||||
|
||||
ovs_vport_receive(vport, skb, &tun_info);
|
||||
}
|
||||
|
||||
static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
|
||||
{
|
||||
struct vxlan_port *vxlan_port = vxlan_vport(vport);
|
||||
__be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
|
||||
|
||||
if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
|
||||
return -EMSGSIZE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vxlan_tnl_destroy(struct vport *vport)
|
||||
{
|
||||
struct vxlan_port *vxlan_port = vxlan_vport(vport);
|
||||
|
||||
vxlan_sock_release(vxlan_port->vs);
|
||||
|
||||
ovs_vport_deferred_free(vport);
|
||||
}
|
||||
|
||||
static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
|
||||
{
|
||||
struct net *net = ovs_dp_get_net(parms->dp);
|
||||
struct nlattr *options = parms->options;
|
||||
struct vxlan_port *vxlan_port;
|
||||
struct vxlan_sock *vs;
|
||||
struct vport *vport;
|
||||
struct nlattr *a;
|
||||
u16 dst_port;
|
||||
int err;
|
||||
|
||||
if (!options) {
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
|
||||
if (a && nla_len(a) == sizeof(u16)) {
|
||||
dst_port = nla_get_u16(a);
|
||||
} else {
|
||||
/* Require destination port from userspace. */
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
vport = ovs_vport_alloc(sizeof(struct vxlan_port),
|
||||
&ovs_vxlan_vport_ops, parms);
|
||||
if (IS_ERR(vport))
|
||||
return vport;
|
||||
|
||||
vxlan_port = vxlan_vport(vport);
|
||||
strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
|
||||
|
||||
vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true, 0);
|
||||
if (IS_ERR(vs)) {
|
||||
ovs_vport_free(vport);
|
||||
return (void *)vs;
|
||||
}
|
||||
vxlan_port->vs = vs;
|
||||
|
||||
return vport;
|
||||
|
||||
error:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
|
||||
{
|
||||
struct net *net = ovs_dp_get_net(vport->dp);
|
||||
struct vxlan_port *vxlan_port = vxlan_vport(vport);
|
||||
__be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
|
||||
struct ovs_key_ipv4_tunnel *tun_key;
|
||||
struct rtable *rt;
|
||||
struct flowi4 fl;
|
||||
__be16 src_port;
|
||||
__be16 df;
|
||||
int err;
|
||||
|
||||
if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
|
||||
err = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
|
||||
/* Route lookup */
|
||||
memset(&fl, 0, sizeof(fl));
|
||||
fl.daddr = tun_key->ipv4_dst;
|
||||
fl.saddr = tun_key->ipv4_src;
|
||||
fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
|
||||
fl.flowi4_mark = skb->mark;
|
||||
fl.flowi4_proto = IPPROTO_UDP;
|
||||
|
||||
rt = ip_route_output_key(net, &fl);
|
||||
if (IS_ERR(rt)) {
|
||||
err = PTR_ERR(rt);
|
||||
goto error;
|
||||
}
|
||||
|
||||
df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
|
||||
htons(IP_DF) : 0;
|
||||
|
||||
skb->ignore_df = 1;
|
||||
|
||||
src_port = udp_flow_src_port(net, skb, 0, 0, true);
|
||||
|
||||
err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
|
||||
fl.saddr, tun_key->ipv4_dst,
|
||||
tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
|
||||
src_port, dst_port,
|
||||
htonl(be64_to_cpu(tun_key->tun_id) << 8),
|
||||
false);
|
||||
if (err < 0)
|
||||
ip_rt_put(rt);
|
||||
error:
|
||||
return err;
|
||||
}
|
||||
|
||||
static const char *vxlan_get_name(const struct vport *vport)
|
||||
{
|
||||
struct vxlan_port *vxlan_port = vxlan_vport(vport);
|
||||
return vxlan_port->name;
|
||||
}
|
||||
|
||||
const struct vport_ops ovs_vxlan_vport_ops = {
|
||||
.type = OVS_VPORT_TYPE_VXLAN,
|
||||
.create = vxlan_tnl_create,
|
||||
.destroy = vxlan_tnl_destroy,
|
||||
.get_name = vxlan_get_name,
|
||||
.get_options = vxlan_get_options,
|
||||
.send = vxlan_tnl_send,
|
||||
};
|
537
net/openvswitch/vport.c
Normal file
537
net/openvswitch/vport.c
Normal file
|
@ -0,0 +1,537 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2014 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/if.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/compat.h>
|
||||
#include <net/net_namespace.h>
|
||||
|
||||
#include "datapath.h"
|
||||
#include "vport.h"
|
||||
#include "vport-internal_dev.h"
|
||||
|
||||
static void ovs_vport_record_error(struct vport *,
|
||||
enum vport_err_type err_type);
|
||||
|
||||
/* List of statically compiled vport implementations. Don't forget to also
|
||||
* add yours to the list at the bottom of vport.h. */
|
||||
static const struct vport_ops *vport_ops_list[] = {
|
||||
&ovs_netdev_vport_ops,
|
||||
&ovs_internal_vport_ops,
|
||||
|
||||
#ifdef CONFIG_OPENVSWITCH_GRE
|
||||
&ovs_gre_vport_ops,
|
||||
#endif
|
||||
#ifdef CONFIG_OPENVSWITCH_VXLAN
|
||||
&ovs_vxlan_vport_ops,
|
||||
#endif
|
||||
#ifdef CONFIG_OPENVSWITCH_GENEVE
|
||||
&ovs_geneve_vport_ops,
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Protected by RCU read lock for reading, ovs_mutex for writing. */
|
||||
static struct hlist_head *dev_table;
|
||||
#define VPORT_HASH_BUCKETS 1024
|
||||
|
||||
/**
|
||||
* ovs_vport_init - initialize vport subsystem
|
||||
*
|
||||
* Called at module load time to initialize the vport subsystem.
|
||||
*/
|
||||
int ovs_vport_init(void)
|
||||
{
|
||||
dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
|
||||
GFP_KERNEL);
|
||||
if (!dev_table)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_exit - shutdown vport subsystem
|
||||
*
|
||||
* Called at module exit time to shutdown the vport subsystem.
|
||||
*/
|
||||
void ovs_vport_exit(void)
|
||||
{
|
||||
kfree(dev_table);
|
||||
}
|
||||
|
||||
static struct hlist_head *hash_bucket(struct net *net, const char *name)
|
||||
{
|
||||
unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
|
||||
return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_locate - find a port that has already been created
|
||||
*
|
||||
* @name: name of port to find
|
||||
*
|
||||
* Must be called with ovs or RCU read lock.
|
||||
*/
|
||||
struct vport *ovs_vport_locate(struct net *net, const char *name)
|
||||
{
|
||||
struct hlist_head *bucket = hash_bucket(net, name);
|
||||
struct vport *vport;
|
||||
|
||||
hlist_for_each_entry_rcu(vport, bucket, hash_node)
|
||||
if (!strcmp(name, vport->ops->get_name(vport)) &&
|
||||
net_eq(ovs_dp_get_net(vport->dp), net))
|
||||
return vport;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_alloc - allocate and initialize new vport
|
||||
*
|
||||
* @priv_size: Size of private data area to allocate.
|
||||
* @ops: vport device ops
|
||||
*
|
||||
* Allocate and initialize a new vport defined by @ops. The vport will contain
|
||||
* a private data area of size @priv_size that can be accessed using
|
||||
* vport_priv(). vports that are no longer needed should be released with
|
||||
* vport_free().
|
||||
*/
|
||||
struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
|
||||
const struct vport_parms *parms)
|
||||
{
|
||||
struct vport *vport;
|
||||
size_t alloc_size;
|
||||
|
||||
alloc_size = sizeof(struct vport);
|
||||
if (priv_size) {
|
||||
alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
|
||||
alloc_size += priv_size;
|
||||
}
|
||||
|
||||
vport = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (!vport)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
vport->dp = parms->dp;
|
||||
vport->port_no = parms->port_no;
|
||||
vport->ops = ops;
|
||||
INIT_HLIST_NODE(&vport->dp_hash_node);
|
||||
|
||||
if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
|
||||
kfree(vport);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!vport->percpu_stats) {
|
||||
kfree(vport);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return vport;
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_free - uninitialize and free vport
|
||||
*
|
||||
* @vport: vport to free
|
||||
*
|
||||
* Frees a vport allocated with vport_alloc() when it is no longer needed.
|
||||
*
|
||||
* The caller must ensure that an RCU grace period has passed since the last
|
||||
* time @vport was in a datapath.
|
||||
*/
|
||||
void ovs_vport_free(struct vport *vport)
|
||||
{
|
||||
/* vport is freed from RCU callback or error path, Therefore
|
||||
* it is safe to use raw dereference.
|
||||
*/
|
||||
kfree(rcu_dereference_raw(vport->upcall_portids));
|
||||
free_percpu(vport->percpu_stats);
|
||||
kfree(vport);
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_add - add vport device (for kernel callers)
|
||||
*
|
||||
* @parms: Information about new vport.
|
||||
*
|
||||
* Creates a new vport with the specified configuration (which is dependent on
|
||||
* device type). ovs_mutex must be held.
|
||||
*/
|
||||
struct vport *ovs_vport_add(const struct vport_parms *parms)
|
||||
{
|
||||
struct vport *vport;
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
|
||||
if (vport_ops_list[i]->type == parms->type) {
|
||||
struct hlist_head *bucket;
|
||||
|
||||
vport = vport_ops_list[i]->create(parms);
|
||||
if (IS_ERR(vport)) {
|
||||
err = PTR_ERR(vport);
|
||||
goto out;
|
||||
}
|
||||
|
||||
bucket = hash_bucket(ovs_dp_get_net(vport->dp),
|
||||
vport->ops->get_name(vport));
|
||||
hlist_add_head_rcu(&vport->hash_node, bucket);
|
||||
return vport;
|
||||
}
|
||||
}
|
||||
|
||||
err = -EAFNOSUPPORT;
|
||||
|
||||
out:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_set_options - modify existing vport device (for kernel callers)
|
||||
*
|
||||
* @vport: vport to modify.
|
||||
* @options: New configuration.
|
||||
*
|
||||
* Modifies an existing device with the specified configuration (which is
|
||||
* dependent on device type). ovs_mutex must be held.
|
||||
*/
|
||||
int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
|
||||
{
|
||||
if (!vport->ops->set_options)
|
||||
return -EOPNOTSUPP;
|
||||
return vport->ops->set_options(vport, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_del - delete existing vport device
|
||||
*
|
||||
* @vport: vport to delete.
|
||||
*
|
||||
* Detaches @vport from its datapath and destroys it. It is possible to fail
|
||||
* for reasons such as lack of memory. ovs_mutex must be held.
|
||||
*/
|
||||
void ovs_vport_del(struct vport *vport)
|
||||
{
|
||||
ASSERT_OVSL();
|
||||
|
||||
hlist_del_rcu(&vport->hash_node);
|
||||
|
||||
vport->ops->destroy(vport);
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_get_stats - retrieve device stats
|
||||
*
|
||||
* @vport: vport from which to retrieve the stats
|
||||
* @stats: location to store stats
|
||||
*
|
||||
* Retrieves transmit, receive, and error stats for the given device.
|
||||
*
|
||||
* Must be called with ovs_mutex or rcu_read_lock.
|
||||
*/
|
||||
void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
|
||||
{
|
||||
int i;
|
||||
|
||||
memset(stats, 0, sizeof(*stats));
|
||||
|
||||
/* We potentially have 2 sources of stats that need to be combined:
|
||||
* those we have collected (split into err_stats and percpu_stats) from
|
||||
* set_stats() and device error stats from netdev->get_stats() (for
|
||||
* errors that happen downstream and therefore aren't reported through
|
||||
* our vport_record_error() function).
|
||||
* Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
|
||||
* netdev-stats can be directly read over netlink-ioctl.
|
||||
*/
|
||||
|
||||
stats->rx_errors = atomic_long_read(&vport->err_stats.rx_errors);
|
||||
stats->tx_errors = atomic_long_read(&vport->err_stats.tx_errors);
|
||||
stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped);
|
||||
stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
const struct pcpu_sw_netstats *percpu_stats;
|
||||
struct pcpu_sw_netstats local_stats;
|
||||
unsigned int start;
|
||||
|
||||
percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
|
||||
local_stats = *percpu_stats;
|
||||
} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
|
||||
|
||||
stats->rx_bytes += local_stats.rx_bytes;
|
||||
stats->rx_packets += local_stats.rx_packets;
|
||||
stats->tx_bytes += local_stats.tx_bytes;
|
||||
stats->tx_packets += local_stats.tx_packets;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_get_options - retrieve device options
|
||||
*
|
||||
* @vport: vport from which to retrieve the options.
|
||||
* @skb: sk_buff where options should be appended.
|
||||
*
|
||||
* Retrieves the configuration of the given device, appending an
|
||||
* %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
|
||||
* vport-specific attributes to @skb.
|
||||
*
|
||||
* Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
|
||||
* negative error code if a real error occurred. If an error occurs, @skb is
|
||||
* left unmodified.
|
||||
*
|
||||
* Must be called with ovs_mutex or rcu_read_lock.
|
||||
*/
|
||||
int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
|
||||
{
|
||||
struct nlattr *nla;
|
||||
int err;
|
||||
|
||||
if (!vport->ops->get_options)
|
||||
return 0;
|
||||
|
||||
nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
|
||||
if (!nla)
|
||||
return -EMSGSIZE;
|
||||
|
||||
err = vport->ops->get_options(vport, skb);
|
||||
if (err) {
|
||||
nla_nest_cancel(skb, nla);
|
||||
return err;
|
||||
}
|
||||
|
||||
nla_nest_end(skb, nla);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_set_upcall_portids - set upcall portids of @vport.
|
||||
*
|
||||
* @vport: vport to modify.
|
||||
* @ids: new configuration, an array of port ids.
|
||||
*
|
||||
* Sets the vport's upcall_portids to @ids.
|
||||
*
|
||||
* Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
|
||||
* as an array of U32.
|
||||
*
|
||||
* Must be called with ovs_mutex.
|
||||
*/
|
||||
int ovs_vport_set_upcall_portids(struct vport *vport, struct nlattr *ids)
|
||||
{
|
||||
struct vport_portids *old, *vport_portids;
|
||||
|
||||
if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
|
||||
return -EINVAL;
|
||||
|
||||
old = ovsl_dereference(vport->upcall_portids);
|
||||
|
||||
vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
|
||||
GFP_KERNEL);
|
||||
if (!vport_portids)
|
||||
return -ENOMEM;
|
||||
|
||||
vport_portids->n_ids = nla_len(ids) / sizeof(u32);
|
||||
vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
|
||||
nla_memcpy(vport_portids->ids, ids, nla_len(ids));
|
||||
|
||||
rcu_assign_pointer(vport->upcall_portids, vport_portids);
|
||||
|
||||
if (old)
|
||||
kfree_rcu(old, rcu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
|
||||
*
|
||||
* @vport: vport from which to retrieve the portids.
|
||||
* @skb: sk_buff where portids should be appended.
|
||||
*
|
||||
* Retrieves the configuration of the given vport, appending the
|
||||
* %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
|
||||
* portids to @skb.
|
||||
*
|
||||
* Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
|
||||
* If an error occurs, @skb is left unmodified. Must be called with
|
||||
* ovs_mutex or rcu_read_lock.
|
||||
*/
|
||||
int ovs_vport_get_upcall_portids(const struct vport *vport,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct vport_portids *ids;
|
||||
|
||||
ids = rcu_dereference_ovsl(vport->upcall_portids);
|
||||
|
||||
if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
|
||||
return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
|
||||
ids->n_ids * sizeof(u32), (void *)ids->ids);
|
||||
else
|
||||
return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
|
||||
*
|
||||
* @vport: vport from which the missed packet is received.
|
||||
* @skb: skb that the missed packet was received.
|
||||
*
|
||||
* Uses the skb_get_hash() to select the upcall portid to send the
|
||||
* upcall.
|
||||
*
|
||||
* Returns the portid of the target socket. Must be called with rcu_read_lock.
|
||||
*/
|
||||
u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
|
||||
{
|
||||
struct vport_portids *ids;
|
||||
u32 ids_index;
|
||||
u32 hash;
|
||||
|
||||
ids = rcu_dereference(vport->upcall_portids);
|
||||
|
||||
if (ids->n_ids == 1 && ids->ids[0] == 0)
|
||||
return 0;
|
||||
|
||||
hash = skb_get_hash(skb);
|
||||
ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
|
||||
return ids->ids[ids_index];
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_receive - pass up received packet to the datapath for processing
|
||||
*
|
||||
* @vport: vport that received the packet
|
||||
* @skb: skb that was received
|
||||
* @tun_key: tunnel (if any) that carried packet
|
||||
*
|
||||
* Must be called with rcu_read_lock. The packet cannot be shared and
|
||||
* skb->data should point to the Ethernet header.
|
||||
*/
|
||||
void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
|
||||
struct ovs_tunnel_info *tun_info)
|
||||
{
|
||||
struct pcpu_sw_netstats *stats;
|
||||
struct sw_flow_key key;
|
||||
int error;
|
||||
|
||||
stats = this_cpu_ptr(vport->percpu_stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += skb->len;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
|
||||
OVS_CB(skb)->input_vport = vport;
|
||||
OVS_CB(skb)->egress_tun_info = NULL;
|
||||
/* Extract flow from 'skb' into 'key'. */
|
||||
error = ovs_flow_key_extract(tun_info, skb, &key);
|
||||
if (unlikely(error)) {
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
ovs_dp_process_packet(skb, &key);
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_send - send a packet on a device
|
||||
*
|
||||
* @vport: vport on which to send the packet
|
||||
* @skb: skb to send
|
||||
*
|
||||
* Sends the given packet and returns the length of data sent. Either ovs
|
||||
* lock or rcu_read_lock must be held.
|
||||
*/
|
||||
int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
|
||||
{
|
||||
int sent = vport->ops->send(vport, skb);
|
||||
|
||||
if (likely(sent > 0)) {
|
||||
struct pcpu_sw_netstats *stats;
|
||||
|
||||
stats = this_cpu_ptr(vport->percpu_stats);
|
||||
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += sent;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
} else if (sent < 0) {
|
||||
ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
|
||||
kfree_skb(skb);
|
||||
} else
|
||||
ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
|
||||
|
||||
return sent;
|
||||
}
|
||||
|
||||
/**
|
||||
* ovs_vport_record_error - indicate device error to generic stats layer
|
||||
*
|
||||
* @vport: vport that encountered the error
|
||||
* @err_type: one of enum vport_err_type types to indicate the error type
|
||||
*
|
||||
* If using the vport generic stats layer indicate that an error of the given
|
||||
* type has occurred.
|
||||
*/
|
||||
static void ovs_vport_record_error(struct vport *vport,
|
||||
enum vport_err_type err_type)
|
||||
{
|
||||
switch (err_type) {
|
||||
case VPORT_E_RX_DROPPED:
|
||||
atomic_long_inc(&vport->err_stats.rx_dropped);
|
||||
break;
|
||||
|
||||
case VPORT_E_RX_ERROR:
|
||||
atomic_long_inc(&vport->err_stats.rx_errors);
|
||||
break;
|
||||
|
||||
case VPORT_E_TX_DROPPED:
|
||||
atomic_long_inc(&vport->err_stats.tx_dropped);
|
||||
break;
|
||||
|
||||
case VPORT_E_TX_ERROR:
|
||||
atomic_long_inc(&vport->err_stats.tx_errors);
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void free_vport_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct vport *vport = container_of(rcu, struct vport, rcu);
|
||||
|
||||
ovs_vport_free(vport);
|
||||
}
|
||||
|
||||
void ovs_vport_deferred_free(struct vport *vport)
|
||||
{
|
||||
if (!vport)
|
||||
return;
|
||||
|
||||
call_rcu(&vport->rcu, free_vport_rcu);
|
||||
}
|
229
net/openvswitch/vport.h
Normal file
229
net/openvswitch/vport.h
Normal file
|
@ -0,0 +1,229 @@
|
|||
/*
|
||||
* Copyright (c) 2007-2012 Nicira, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA
|
||||
*/
|
||||
|
||||
#ifndef VPORT_H
|
||||
#define VPORT_H 1
|
||||
|
||||
#include <linux/if_tunnel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/openvswitch.h>
|
||||
#include <linux/reciprocal_div.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/u64_stats_sync.h>
|
||||
|
||||
#include "datapath.h"
|
||||
|
||||
struct vport;
|
||||
struct vport_parms;
|
||||
|
||||
/* The following definitions are for users of the vport subsytem: */
|
||||
|
||||
struct vport_net {
|
||||
struct vport __rcu *gre_vport;
|
||||
};
|
||||
|
||||
int ovs_vport_init(void);
|
||||
void ovs_vport_exit(void);
|
||||
|
||||
struct vport *ovs_vport_add(const struct vport_parms *);
|
||||
void ovs_vport_del(struct vport *);
|
||||
|
||||
struct vport *ovs_vport_locate(struct net *net, const char *name);
|
||||
|
||||
void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
|
||||
|
||||
int ovs_vport_set_options(struct vport *, struct nlattr *options);
|
||||
int ovs_vport_get_options(const struct vport *, struct sk_buff *);
|
||||
|
||||
int ovs_vport_set_upcall_portids(struct vport *, struct nlattr *pids);
|
||||
int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
|
||||
u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
|
||||
|
||||
int ovs_vport_send(struct vport *, struct sk_buff *);
|
||||
|
||||
/* The following definitions are for implementers of vport devices: */
|
||||
|
||||
struct vport_err_stats {
|
||||
atomic_long_t rx_dropped;
|
||||
atomic_long_t rx_errors;
|
||||
atomic_long_t tx_dropped;
|
||||
atomic_long_t tx_errors;
|
||||
};
|
||||
/**
|
||||
* struct vport_portids - array of netlink portids of a vport.
|
||||
* must be protected by rcu.
|
||||
* @rn_ids: The reciprocal value of @n_ids.
|
||||
* @rcu: RCU callback head for deferred destruction.
|
||||
* @n_ids: Size of @ids array.
|
||||
* @ids: Array storing the Netlink socket pids to be used for packets received
|
||||
* on this port that miss the flow table.
|
||||
*/
|
||||
struct vport_portids {
|
||||
struct reciprocal_value rn_ids;
|
||||
struct rcu_head rcu;
|
||||
u32 n_ids;
|
||||
u32 ids[];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vport - one port within a datapath
|
||||
* @rcu: RCU callback head for deferred destruction.
|
||||
* @dp: Datapath to which this port belongs.
|
||||
* @upcall_portids: RCU protected 'struct vport_portids'.
|
||||
* @port_no: Index into @dp's @ports array.
|
||||
* @hash_node: Element in @dev_table hash table in vport.c.
|
||||
* @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
|
||||
* @ops: Class structure.
|
||||
* @percpu_stats: Points to per-CPU statistics used and maintained by vport
|
||||
* @err_stats: Points to error statistics used and maintained by vport
|
||||
* @detach_list: list used for detaching vport in net-exit call.
|
||||
*/
|
||||
struct vport {
|
||||
struct rcu_head rcu;
|
||||
struct datapath *dp;
|
||||
struct vport_portids __rcu *upcall_portids;
|
||||
u16 port_no;
|
||||
|
||||
struct hlist_node hash_node;
|
||||
struct hlist_node dp_hash_node;
|
||||
const struct vport_ops *ops;
|
||||
|
||||
struct pcpu_sw_netstats __percpu *percpu_stats;
|
||||
|
||||
struct vport_err_stats err_stats;
|
||||
struct list_head detach_list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vport_parms - parameters for creating a new vport
|
||||
*
|
||||
* @name: New vport's name.
|
||||
* @type: New vport's type.
|
||||
* @options: %OVS_VPORT_ATTR_OPTIONS attribute from Netlink message, %NULL if
|
||||
* none was supplied.
|
||||
* @dp: New vport's datapath.
|
||||
* @port_no: New vport's port number.
|
||||
*/
|
||||
struct vport_parms {
|
||||
const char *name;
|
||||
enum ovs_vport_type type;
|
||||
struct nlattr *options;
|
||||
|
||||
/* For ovs_vport_alloc(). */
|
||||
struct datapath *dp;
|
||||
u16 port_no;
|
||||
struct nlattr *upcall_portids;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vport_ops - definition of a type of virtual port
|
||||
*
|
||||
* @type: %OVS_VPORT_TYPE_* value for this type of virtual port.
|
||||
* @create: Create a new vport configured as specified. On success returns
|
||||
* a new vport allocated with ovs_vport_alloc(), otherwise an ERR_PTR() value.
|
||||
* @destroy: Destroys a vport. Must call vport_free() on the vport but not
|
||||
* before an RCU grace period has elapsed.
|
||||
* @set_options: Modify the configuration of an existing vport. May be %NULL
|
||||
* if modification is not supported.
|
||||
* @get_options: Appends vport-specific attributes for the configuration of an
|
||||
* existing vport to a &struct sk_buff. May be %NULL for a vport that does not
|
||||
* have any configuration.
|
||||
* @get_name: Get the device's name.
|
||||
* @send: Send a packet on the device. Returns the length of the packet sent,
|
||||
* zero for dropped packets or negative for error.
|
||||
*/
|
||||
struct vport_ops {
|
||||
enum ovs_vport_type type;
|
||||
|
||||
/* Called with ovs_mutex. */
|
||||
struct vport *(*create)(const struct vport_parms *);
|
||||
void (*destroy)(struct vport *);
|
||||
|
||||
int (*set_options)(struct vport *, struct nlattr *);
|
||||
int (*get_options)(const struct vport *, struct sk_buff *);
|
||||
|
||||
/* Called with rcu_read_lock or ovs_mutex. */
|
||||
const char *(*get_name)(const struct vport *);
|
||||
|
||||
int (*send)(struct vport *, struct sk_buff *);
|
||||
};
|
||||
|
||||
enum vport_err_type {
|
||||
VPORT_E_RX_DROPPED,
|
||||
VPORT_E_RX_ERROR,
|
||||
VPORT_E_TX_DROPPED,
|
||||
VPORT_E_TX_ERROR,
|
||||
};
|
||||
|
||||
struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *,
|
||||
const struct vport_parms *);
|
||||
void ovs_vport_free(struct vport *);
|
||||
void ovs_vport_deferred_free(struct vport *vport);
|
||||
|
||||
#define VPORT_ALIGN 8
|
||||
|
||||
/**
|
||||
* vport_priv - access private data area of vport
|
||||
*
|
||||
* @vport: vport to access
|
||||
*
|
||||
* If a nonzero size was passed in priv_size of vport_alloc() a private data
|
||||
* area was allocated on creation. This allows that area to be accessed and
|
||||
* used for any purpose needed by the vport implementer.
|
||||
*/
|
||||
static inline void *vport_priv(const struct vport *vport)
|
||||
{
|
||||
return (u8 *)(uintptr_t)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN);
|
||||
}
|
||||
|
||||
/**
|
||||
* vport_from_priv - lookup vport from private data pointer
|
||||
*
|
||||
* @priv: Start of private data area.
|
||||
*
|
||||
* It is sometimes useful to translate from a pointer to the private data
|
||||
* area to the vport, such as in the case where the private data pointer is
|
||||
* the result of a hash table lookup. @priv must point to the start of the
|
||||
* private data area.
|
||||
*/
|
||||
static inline struct vport *vport_from_priv(void *priv)
|
||||
{
|
||||
return (struct vport *)((u8 *)priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
|
||||
}
|
||||
|
||||
void ovs_vport_receive(struct vport *, struct sk_buff *,
|
||||
struct ovs_tunnel_info *);
|
||||
|
||||
/* List of statically compiled vport implementations. Don't forget to also
|
||||
* add yours to the list at the top of vport.c. */
|
||||
extern const struct vport_ops ovs_netdev_vport_ops;
|
||||
extern const struct vport_ops ovs_internal_vport_ops;
|
||||
extern const struct vport_ops ovs_gre_vport_ops;
|
||||
extern const struct vport_ops ovs_vxlan_vport_ops;
|
||||
extern const struct vport_ops ovs_geneve_vport_ops;
|
||||
|
||||
static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
|
||||
const void *start, unsigned int len)
|
||||
{
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
|
||||
}
|
||||
|
||||
#endif /* vport.h */
|
Loading…
Add table
Add a link
Reference in a new issue