Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

6
net/6lowpan/Kconfig Normal file
View file

@ -0,0 +1,6 @@
config 6LOWPAN
tristate "6LoWPAN Support"
depends on IPV6
---help---
This enables IPv6 over Low power Wireless Personal Area Network -
"6LoWPAN" which is supported by IEEE 802.15.4 or Bluetooth stacks.

3
net/6lowpan/Makefile Normal file
View file

@ -0,0 +1,3 @@
obj-$(CONFIG_6LOWPAN) := 6lowpan.o
6lowpan-y := iphc.o

789
net/6lowpan/iphc.c Normal file
View file

@ -0,0 +1,789 @@
/*
* Copyright 2011, Siemens AG
* written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
*/
/* Based on patches from Jon Smirl <jonsmirl@gmail.com>
* Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* Jon's code is based on 6lowpan implementation for Contiki which is:
* Copyright (c) 2008, Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the Institute nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <linux/bitops.h>
#include <linux/if_arp.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <net/6lowpan.h>
#include <net/ipv6.h>
#include <net/af_ieee802154.h>
/* Uncompress address function for source and
* destination address(non-multicast).
*
* address_mode is sam value or dam value.
*/
static int uncompress_addr(struct sk_buff *skb,
struct in6_addr *ipaddr, const u8 address_mode,
const u8 *lladdr, const u8 addr_type,
const u8 addr_len)
{
bool fail;
switch (address_mode) {
case LOWPAN_IPHC_ADDR_00:
/* for global link addresses */
fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
break;
case LOWPAN_IPHC_ADDR_01:
/* fe:80::XXXX:XXXX:XXXX:XXXX */
ipaddr->s6_addr[0] = 0xFE;
ipaddr->s6_addr[1] = 0x80;
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
break;
case LOWPAN_IPHC_ADDR_02:
/* fe:80::ff:fe00:XXXX */
ipaddr->s6_addr[0] = 0xFE;
ipaddr->s6_addr[1] = 0x80;
ipaddr->s6_addr[11] = 0xFF;
ipaddr->s6_addr[12] = 0xFE;
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
break;
case LOWPAN_IPHC_ADDR_03:
fail = false;
switch (addr_type) {
case IEEE802154_ADDR_LONG:
/* fe:80::XXXX:XXXX:XXXX:XXXX
* \_________________/
* hwaddr
*/
ipaddr->s6_addr[0] = 0xFE;
ipaddr->s6_addr[1] = 0x80;
memcpy(&ipaddr->s6_addr[8], lladdr, addr_len);
/* second bit-flip (Universe/Local)
* is done according RFC2464
*/
ipaddr->s6_addr[8] ^= 0x02;
break;
case IEEE802154_ADDR_SHORT:
/* fe:80::ff:fe00:XXXX
* \__/
* short_addr
*
* Universe/Local bit is zero.
*/
ipaddr->s6_addr[0] = 0xFE;
ipaddr->s6_addr[1] = 0x80;
ipaddr->s6_addr[11] = 0xFF;
ipaddr->s6_addr[12] = 0xFE;
ipaddr->s6_addr16[7] = htons(*((u16 *)lladdr));
break;
default:
pr_debug("Invalid addr_type set\n");
return -EINVAL;
}
break;
default:
pr_debug("Invalid address mode value: 0x%x\n", address_mode);
return -EINVAL;
}
if (fail) {
pr_debug("Failed to fetch skb data\n");
return -EIO;
}
raw_dump_inline(NULL, "Reconstructed ipv6 addr is",
ipaddr->s6_addr, 16);
return 0;
}
/* Uncompress address function for source context
* based address(non-multicast).
*/
static int uncompress_context_based_src_addr(struct sk_buff *skb,
struct in6_addr *ipaddr,
const u8 sam)
{
switch (sam) {
case LOWPAN_IPHC_ADDR_00:
/* unspec address ::
* Do nothing, address is already ::
*/
break;
case LOWPAN_IPHC_ADDR_01:
/* TODO */
case LOWPAN_IPHC_ADDR_02:
/* TODO */
case LOWPAN_IPHC_ADDR_03:
/* TODO */
netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
return -EINVAL;
default:
pr_debug("Invalid sam value: 0x%x\n", sam);
return -EINVAL;
}
raw_dump_inline(NULL,
"Reconstructed context based ipv6 src addr is",
ipaddr->s6_addr, 16);
return 0;
}
static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr,
struct net_device *dev, skb_delivery_cb deliver_skb)
{
struct sk_buff *new;
int stat;
new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
GFP_ATOMIC);
kfree_skb(skb);
if (!new)
return -ENOMEM;
skb_push(new, sizeof(struct ipv6hdr));
skb_reset_network_header(new);
skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
new->protocol = htons(ETH_P_IPV6);
new->pkt_type = PACKET_HOST;
new->dev = dev;
raw_dump_table(__func__, "raw skb data dump before receiving",
new->data, new->len);
stat = deliver_skb(new, dev);
kfree_skb(new);
return stat;
}
/* Uncompress function for multicast destination address,
* when M bit is set.
*/
static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
struct in6_addr *ipaddr,
const u8 dam)
{
bool fail;
switch (dam) {
case LOWPAN_IPHC_DAM_00:
/* 00: 128 bits. The full address
* is carried in-line.
*/
fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
break;
case LOWPAN_IPHC_DAM_01:
/* 01: 48 bits. The address takes
* the form ffXX::00XX:XXXX:XXXX.
*/
ipaddr->s6_addr[0] = 0xFF;
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
break;
case LOWPAN_IPHC_DAM_10:
/* 10: 32 bits. The address takes
* the form ffXX::00XX:XXXX.
*/
ipaddr->s6_addr[0] = 0xFF;
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
break;
case LOWPAN_IPHC_DAM_11:
/* 11: 8 bits. The address takes
* the form ff02::00XX.
*/
ipaddr->s6_addr[0] = 0xFF;
ipaddr->s6_addr[1] = 0x02;
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
break;
default:
pr_debug("DAM value has a wrong value: 0x%x\n", dam);
return -EINVAL;
}
if (fail) {
pr_debug("Failed to fetch skb data\n");
return -EIO;
}
raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is",
ipaddr->s6_addr, 16);
return 0;
}
static int uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
{
bool fail;
u8 tmp = 0, val = 0;
fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));
if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
pr_debug("UDP header uncompression\n");
switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
case LOWPAN_NHC_UDP_CS_P_00:
fail |= lowpan_fetch_skb(skb, &uh->source,
sizeof(uh->source));
fail |= lowpan_fetch_skb(skb, &uh->dest,
sizeof(uh->dest));
break;
case LOWPAN_NHC_UDP_CS_P_01:
fail |= lowpan_fetch_skb(skb, &uh->source,
sizeof(uh->source));
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh->dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
break;
case LOWPAN_NHC_UDP_CS_P_10:
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh->source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
fail |= lowpan_fetch_skb(skb, &uh->dest,
sizeof(uh->dest));
break;
case LOWPAN_NHC_UDP_CS_P_11:
fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
uh->source = htons(LOWPAN_NHC_UDP_4BIT_PORT +
(val >> 4));
uh->dest = htons(LOWPAN_NHC_UDP_4BIT_PORT +
(val & 0x0f));
break;
default:
pr_debug("ERROR: unknown UDP format\n");
goto err;
}
pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
ntohs(uh->source), ntohs(uh->dest));
/* checksum */
if (tmp & LOWPAN_NHC_UDP_CS_C) {
pr_debug_ratelimited("checksum elided currently not supported\n");
goto err;
} else {
fail |= lowpan_fetch_skb(skb, &uh->check,
sizeof(uh->check));
}
/* UDP length needs to be infered from the lower layers
* here, we obtain the hint from the remaining size of the
* frame
*/
uh->len = htons(skb->len + sizeof(struct udphdr));
pr_debug("uncompressed UDP length: src = %d", ntohs(uh->len));
} else {
pr_debug("ERROR: unsupported NH format\n");
goto err;
}
if (fail)
goto err;
return 0;
err:
return -EINVAL;
}
/* TTL uncompression values */
static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 };
int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
u8 iphc0, u8 iphc1, skb_delivery_cb deliver_skb)
{
struct ipv6hdr hdr = {};
u8 tmp, num_context = 0;
int err;
raw_dump_table(__func__, "raw skb data dump uncompressed",
skb->data, skb->len);
/* another if the CID flag is set */
if (iphc1 & LOWPAN_IPHC_CID) {
pr_debug("CID flag is set, increase header with one\n");
if (lowpan_fetch_skb(skb, &num_context, sizeof(num_context)))
goto drop;
}
hdr.version = 6;
/* Traffic Class and Flow Label */
switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
/* Traffic Class and FLow Label carried in-line
* ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
*/
case 0: /* 00b */
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
goto drop;
memcpy(&hdr.flow_lbl, &skb->data[0], 3);
skb_pull(skb, 3);
hdr.priority = ((tmp >> 2) & 0x0f);
hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
(hdr.flow_lbl[0] & 0x0f);
break;
/* Traffic class carried in-line
* ECN + DSCP (1 byte), Flow Label is elided
*/
case 2: /* 10b */
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
goto drop;
hdr.priority = ((tmp >> 2) & 0x0f);
hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
break;
/* Flow Label carried in-line
* ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
*/
case 1: /* 01b */
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
goto drop;
hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
skb_pull(skb, 2);
break;
/* Traffic Class and Flow Label are elided */
case 3: /* 11b */
break;
default:
break;
}
/* Next Header */
if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
/* Next header is carried inline */
if (lowpan_fetch_skb(skb, &hdr.nexthdr, sizeof(hdr.nexthdr)))
goto drop;
pr_debug("NH flag is set, next header carried inline: %02x\n",
hdr.nexthdr);
}
/* Hop Limit */
if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) {
hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
} else {
if (lowpan_fetch_skb(skb, &hdr.hop_limit,
sizeof(hdr.hop_limit)))
goto drop;
}
/* Extract SAM to the tmp variable */
tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
if (iphc1 & LOWPAN_IPHC_SAC) {
/* Source address context based uncompression */
pr_debug("SAC bit is set. Handle context based source address.\n");
err = uncompress_context_based_src_addr(skb, &hdr.saddr, tmp);
} else {
/* Source address uncompression */
pr_debug("source address stateless compression\n");
err = uncompress_addr(skb, &hdr.saddr, tmp, saddr,
saddr_type, saddr_len);
}
/* Check on error of previous branch */
if (err)
goto drop;
/* Extract DAM to the tmp variable */
tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
/* check for Multicast Compression */
if (iphc1 & LOWPAN_IPHC_M) {
if (iphc1 & LOWPAN_IPHC_DAC) {
pr_debug("dest: context-based mcast compression\n");
/* TODO: implement this */
} else {
err = lowpan_uncompress_multicast_daddr(skb, &hdr.daddr,
tmp);
if (err)
goto drop;
}
} else {
err = uncompress_addr(skb, &hdr.daddr, tmp, daddr,
daddr_type, daddr_len);
pr_debug("dest: stateless compression mode %d dest %pI6c\n",
tmp, &hdr.daddr);
if (err)
goto drop;
}
/* UDP data uncompression */
if (iphc0 & LOWPAN_IPHC_NH_C) {
struct udphdr uh;
struct sk_buff *new;
if (uncompress_udp_header(skb, &uh))
goto drop;
/* replace the compressed UDP head by the uncompressed UDP
* header
*/
new = skb_copy_expand(skb, sizeof(struct udphdr),
skb_tailroom(skb), GFP_ATOMIC);
kfree_skb(skb);
if (!new)
return -ENOMEM;
skb = new;
skb_push(skb, sizeof(struct udphdr));
skb_reset_transport_header(skb);
skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
raw_dump_table(__func__, "raw UDP header dump",
(u8 *)&uh, sizeof(uh));
hdr.nexthdr = UIP_PROTO_UDP;
}
hdr.payload_len = htons(skb->len);
pr_debug("skb headroom size = %d, data length = %d\n",
skb_headroom(skb), skb->len);
pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
"nexthdr = 0x%02x\n\thop_lim = %d\n\tdest = %pI6c\n",
hdr.version, ntohs(hdr.payload_len), hdr.nexthdr,
hdr.hop_limit, &hdr.daddr);
raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr));
return skb_deliver(skb, &hdr, dev, deliver_skb);
drop:
kfree_skb(skb);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(lowpan_process_data);
static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift,
const struct in6_addr *ipaddr,
const unsigned char *lladdr)
{
u8 val = 0;
if (is_addr_mac_addr_based(ipaddr, lladdr)) {
val = 3; /* 0-bits */
pr_debug("address compression 0 bits\n");
} else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
/* compress IID to 16 bits xxxx::XXXX */
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[7], 2);
val = 2; /* 16-bits */
raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)",
*hc_ptr - 2, 2);
} else {
/* do not compress IID => xxxx::IID */
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[4], 8);
val = 1; /* 64-bits */
raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)",
*hc_ptr - 8, 8);
}
return rol8(val, shift);
}
static void compress_udp_header(u8 **hc_ptr, struct sk_buff *skb)
{
struct udphdr *uh = udp_hdr(skb);
u8 tmp;
if (((ntohs(uh->source) & LOWPAN_NHC_UDP_4BIT_MASK) ==
LOWPAN_NHC_UDP_4BIT_PORT) &&
((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) ==
LOWPAN_NHC_UDP_4BIT_PORT)) {
pr_debug("UDP header: both ports compression to 4 bits\n");
/* compression value */
tmp = LOWPAN_NHC_UDP_CS_P_11;
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* source and destination port */
tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT +
((ntohs(uh->source) - LOWPAN_NHC_UDP_4BIT_PORT) << 4);
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
} else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) ==
LOWPAN_NHC_UDP_8BIT_PORT) {
pr_debug("UDP header: remove 8 bits of dest\n");
/* compression value */
tmp = LOWPAN_NHC_UDP_CS_P_01;
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* source port */
lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
/* destination port */
tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT;
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
} else if ((ntohs(uh->source) & LOWPAN_NHC_UDP_8BIT_MASK) ==
LOWPAN_NHC_UDP_8BIT_PORT) {
pr_debug("UDP header: remove 8 bits of source\n");
/* compression value */
tmp = LOWPAN_NHC_UDP_CS_P_10;
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* source port */
tmp = ntohs(uh->source) - LOWPAN_NHC_UDP_8BIT_PORT;
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* destination port */
lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
} else {
pr_debug("UDP header: can't compress\n");
/* compression value */
tmp = LOWPAN_NHC_UDP_CS_P_00;
lowpan_push_hc_data(hc_ptr, &tmp, sizeof(tmp));
/* source port */
lowpan_push_hc_data(hc_ptr, &uh->source, sizeof(uh->source));
/* destination port */
lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest));
}
/* checksum is always inline */
lowpan_push_hc_data(hc_ptr, &uh->check, sizeof(uh->check));
/* skip the UDP header */
skb_pull(skb, sizeof(struct udphdr));
}
int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *_daddr,
const void *_saddr, unsigned int len)
{
u8 tmp, iphc0, iphc1, *hc_ptr;
struct ipv6hdr *hdr;
u8 head[100] = {};
int addr_type;
if (type != ETH_P_IPV6)
return -EINVAL;
hdr = ipv6_hdr(skb);
hc_ptr = head + 2;
pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
"\tnexthdr = 0x%02x\n\thop_lim = %d\n\tdest = %pI6c\n",
hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
hdr->hop_limit, &hdr->daddr);
raw_dump_table(__func__, "raw skb network header dump",
skb_network_header(skb), sizeof(struct ipv6hdr));
/* As we copy some bit-length fields, in the IPHC encoding bytes,
* we sometimes use |=
* If the field is 0, and the current bit value in memory is 1,
* this does not work. We therefore reset the IPHC encoding here
*/
iphc0 = LOWPAN_DISPATCH_IPHC;
iphc1 = 0;
/* TODO: context lookup */
raw_dump_inline(__func__, "saddr",
(unsigned char *)_saddr, IEEE802154_ADDR_LEN);
raw_dump_inline(__func__, "daddr",
(unsigned char *)_daddr, IEEE802154_ADDR_LEN);
raw_dump_table(__func__, "sending raw skb network uncompressed packet",
skb->data, skb->len);
/* Traffic class, flow label
* If flow label is 0, compress it. If traffic class is 0, compress it
* We have to process both in the same time as the offset of traffic
* class depends on the presence of version and flow label
*/
/* hc format of TC is ECN | DSCP , original one is DSCP | ECN */
tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
(hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
/* flow label can be compressed */
iphc0 |= LOWPAN_IPHC_FL_C;
if ((hdr->priority == 0) &&
((hdr->flow_lbl[0] & 0xF0) == 0)) {
/* compress (elide) all */
iphc0 |= LOWPAN_IPHC_TC_C;
} else {
/* compress only the flow label */
*hc_ptr = tmp;
hc_ptr += 1;
}
} else {
/* Flow label cannot be compressed */
if ((hdr->priority == 0) &&
((hdr->flow_lbl[0] & 0xF0) == 0)) {
/* compress only traffic class */
iphc0 |= LOWPAN_IPHC_TC_C;
*hc_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
memcpy(hc_ptr + 1, &hdr->flow_lbl[1], 2);
hc_ptr += 3;
} else {
/* compress nothing */
memcpy(hc_ptr, hdr, 4);
/* replace the top byte with new ECN | DSCP format */
*hc_ptr = tmp;
hc_ptr += 4;
}
}
/* NOTE: payload length is always compressed */
/* Next Header is compress if UDP */
if (hdr->nexthdr == UIP_PROTO_UDP)
iphc0 |= LOWPAN_IPHC_NH_C;
if ((iphc0 & LOWPAN_IPHC_NH_C) == 0)
lowpan_push_hc_data(&hc_ptr, &hdr->nexthdr,
sizeof(hdr->nexthdr));
/* Hop limit
* if 1: compress, encoding is 01
* if 64: compress, encoding is 10
* if 255: compress, encoding is 11
* else do not compress
*/
switch (hdr->hop_limit) {
case 1:
iphc0 |= LOWPAN_IPHC_TTL_1;
break;
case 64:
iphc0 |= LOWPAN_IPHC_TTL_64;
break;
case 255:
iphc0 |= LOWPAN_IPHC_TTL_255;
break;
default:
lowpan_push_hc_data(&hc_ptr, &hdr->hop_limit,
sizeof(hdr->hop_limit));
}
addr_type = ipv6_addr_type(&hdr->saddr);
/* source address compression */
if (addr_type == IPV6_ADDR_ANY) {
pr_debug("source address is unspecified, setting SAC\n");
iphc1 |= LOWPAN_IPHC_SAC;
} else {
if (addr_type & IPV6_ADDR_LINKLOCAL) {
iphc1 |= lowpan_compress_addr_64(&hc_ptr,
LOWPAN_IPHC_SAM_BIT,
&hdr->saddr, _saddr);
pr_debug("source address unicast link-local %pI6c iphc1 0x%02x\n",
&hdr->saddr, iphc1);
} else {
pr_debug("send the full source address\n");
lowpan_push_hc_data(&hc_ptr, hdr->saddr.s6_addr, 16);
}
}
addr_type = ipv6_addr_type(&hdr->daddr);
/* destination address compression */
if (addr_type & IPV6_ADDR_MULTICAST) {
pr_debug("destination address is multicast: ");
iphc1 |= LOWPAN_IPHC_M;
if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
pr_debug("compressed to 1 octet\n");
iphc1 |= LOWPAN_IPHC_DAM_11;
/* use last byte */
lowpan_push_hc_data(&hc_ptr,
&hdr->daddr.s6_addr[15], 1);
} else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
pr_debug("compressed to 4 octets\n");
iphc1 |= LOWPAN_IPHC_DAM_10;
/* second byte + the last three */
lowpan_push_hc_data(&hc_ptr,
&hdr->daddr.s6_addr[1], 1);
lowpan_push_hc_data(&hc_ptr,
&hdr->daddr.s6_addr[13], 3);
} else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
pr_debug("compressed to 6 octets\n");
iphc1 |= LOWPAN_IPHC_DAM_01;
/* second byte + the last five */
lowpan_push_hc_data(&hc_ptr,
&hdr->daddr.s6_addr[1], 1);
lowpan_push_hc_data(&hc_ptr,
&hdr->daddr.s6_addr[11], 5);
} else {
pr_debug("using full address\n");
iphc1 |= LOWPAN_IPHC_DAM_00;
lowpan_push_hc_data(&hc_ptr, hdr->daddr.s6_addr, 16);
}
} else {
if (addr_type & IPV6_ADDR_LINKLOCAL) {
/* TODO: context lookup */
iphc1 |= lowpan_compress_addr_64(&hc_ptr,
LOWPAN_IPHC_DAM_BIT, &hdr->daddr, _daddr);
pr_debug("dest address unicast link-local %pI6c "
"iphc1 0x%02x\n", &hdr->daddr, iphc1);
} else {
pr_debug("dest address unicast %pI6c\n", &hdr->daddr);
lowpan_push_hc_data(&hc_ptr, hdr->daddr.s6_addr, 16);
}
}
/* UDP header compression */
if (hdr->nexthdr == UIP_PROTO_UDP)
compress_udp_header(&hc_ptr, skb);
head[0] = iphc0;
head[1] = iphc1;
skb_pull(skb, sizeof(struct ipv6hdr));
skb_reset_transport_header(skb);
memcpy(skb_push(skb, hc_ptr - head), head, hc_ptr - head);
skb_reset_network_header(skb);
pr_debug("header len %d skb %u\n", (int)(hc_ptr - head), skb->len);
raw_dump_table(__func__, "raw skb data dump compressed",
skb->data, skb->len);
return 0;
}
EXPORT_SYMBOL_GPL(lowpan_header_compress);
MODULE_LICENSE("GPL");

10
net/802/Kconfig Normal file
View file

@ -0,0 +1,10 @@
config STP
tristate
select LLC
config GARP
tristate
select STP
config MRP
tristate

14
net/802/Makefile Normal file
View file

@ -0,0 +1,14 @@
#
# Makefile for the Linux 802.x protocol layers.
#
# Check the p8022 selections against net/core/Makefile.
obj-$(CONFIG_LLC) += p8022.o psnap.o
obj-$(CONFIG_NET_FC) += fc.o
obj-$(CONFIG_FDDI) += fddi.o
obj-$(CONFIG_HIPPI) += hippi.o
obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o
obj-$(CONFIG_ATALK) += p8022.o psnap.o
obj-$(CONFIG_STP) += stp.o
obj-$(CONFIG_GARP) += garp.o
obj-$(CONFIG_MRP) += mrp.o

131
net/802/fc.c Normal file
View file

@ -0,0 +1,131 @@
/*
* NET3: Fibre Channel device handling subroutines
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Vineet Abraham <vma@iol.unh.edu>
* v 1.0 03/22/99
*/
#include <asm/uaccess.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/fcdevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/net.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/export.h>
#include <net/arp.h>
/*
* Put the headers on a Fibre Channel packet.
*/
static int fc_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
struct fch_hdr *fch;
int hdr_len;
/*
* Add the 802.2 SNAP header if IP as the IPv4 code calls
* dev->hard_header directly.
*/
if (type == ETH_P_IP || type == ETH_P_ARP)
{
struct fcllc *fcllc;
hdr_len = sizeof(struct fch_hdr) + sizeof(struct fcllc);
fch = (struct fch_hdr *)skb_push(skb, hdr_len);
fcllc = (struct fcllc *)(fch+1);
fcllc->dsap = fcllc->ssap = EXTENDED_SAP;
fcllc->llc = UI_CMD;
fcllc->protid[0] = fcllc->protid[1] = fcllc->protid[2] = 0x00;
fcllc->ethertype = htons(type);
}
else
{
hdr_len = sizeof(struct fch_hdr);
fch = (struct fch_hdr *)skb_push(skb, hdr_len);
}
if(saddr)
memcpy(fch->saddr,saddr,dev->addr_len);
else
memcpy(fch->saddr,dev->dev_addr,dev->addr_len);
if(daddr)
{
memcpy(fch->daddr,daddr,dev->addr_len);
return hdr_len;
}
return -hdr_len;
}
/*
* A neighbour discovery of some species (eg arp) has completed. We
* can now send the packet.
*/
static int fc_rebuild_header(struct sk_buff *skb)
{
#ifdef CONFIG_INET
struct fch_hdr *fch=(struct fch_hdr *)skb->data;
struct fcllc *fcllc=(struct fcllc *)(skb->data+sizeof(struct fch_hdr));
if(fcllc->ethertype != htons(ETH_P_IP)) {
printk("fc_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(fcllc->ethertype));
return 0;
}
return arp_find(fch->daddr, skb);
#else
return 0;
#endif
}
static const struct header_ops fc_header_ops = {
.create = fc_header,
.rebuild = fc_rebuild_header,
};
static void fc_setup(struct net_device *dev)
{
dev->header_ops = &fc_header_ops;
dev->type = ARPHRD_IEEE802;
dev->hard_header_len = FC_HLEN;
dev->mtu = 2024;
dev->addr_len = FC_ALEN;
dev->tx_queue_len = 100; /* Long queues on fc */
dev->flags = IFF_BROADCAST;
memset(dev->broadcast, 0xFF, FC_ALEN);
}
/**
* alloc_fcdev - Register fibre channel device
* @sizeof_priv: Size of additional driver-private structure to be allocated
* for this fibre channel device
*
* Fill in the fields of the device structure with fibre channel-generic values.
*
* Constructs a new net device, complete with a private data area of
* size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
* this private data area.
*/
struct net_device *alloc_fcdev(int sizeof_priv)
{
return alloc_netdev(sizeof_priv, "fc%d", NET_NAME_UNKNOWN, fc_setup);
}
EXPORT_SYMBOL(alloc_fcdev);

215
net/802/fddi.c Normal file
View file

@ -0,0 +1,215 @@
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* FDDI-type device handling.
*
* Version: @(#)fddi.c 1.0.0 08/12/96
*
* Authors: Lawrence V. Stefani, <stefani@lkg.dec.com>
*
* fddi.c is based on previous eth.c and tr.c work by
* Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
* Florian La Roche, <rzsfl@rz.uni-sb.de>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes
* Alan Cox : New arp/rebuild header
* Maciej W. Rozycki : IPv6 support
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/fddidevice.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <net/arp.h>
#include <net/sock.h>
/*
* Create the FDDI MAC header for an arbitrary protocol layer
*
* saddr=NULL means use device source address
* daddr=NULL means leave destination address (eg unresolved arp)
*/
static int fddi_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
int hl = FDDI_K_SNAP_HLEN;
struct fddihdr *fddi;
if(type != ETH_P_IP && type != ETH_P_IPV6 && type != ETH_P_ARP)
hl=FDDI_K_8022_HLEN-3;
fddi = (struct fddihdr *)skb_push(skb, hl);
fddi->fc = FDDI_FC_K_ASYNC_LLC_DEF;
if(type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
{
fddi->hdr.llc_snap.dsap = FDDI_EXTENDED_SAP;
fddi->hdr.llc_snap.ssap = FDDI_EXTENDED_SAP;
fddi->hdr.llc_snap.ctrl = FDDI_UI_CMD;
fddi->hdr.llc_snap.oui[0] = 0x00;
fddi->hdr.llc_snap.oui[1] = 0x00;
fddi->hdr.llc_snap.oui[2] = 0x00;
fddi->hdr.llc_snap.ethertype = htons(type);
}
/* Set the source and destination hardware addresses */
if (saddr != NULL)
memcpy(fddi->saddr, saddr, dev->addr_len);
else
memcpy(fddi->saddr, dev->dev_addr, dev->addr_len);
if (daddr != NULL)
{
memcpy(fddi->daddr, daddr, dev->addr_len);
return hl;
}
return -hl;
}
/*
* Rebuild the FDDI MAC header. This is called after an ARP
* (or in future other address resolution) has completed on
* this sk_buff. We now let ARP fill in the other fields.
*/
static int fddi_rebuild_header(struct sk_buff *skb)
{
struct fddihdr *fddi = (struct fddihdr *)skb->data;
#ifdef CONFIG_INET
if (fddi->hdr.llc_snap.ethertype == htons(ETH_P_IP))
/* Try to get ARP to resolve the header and fill destination address */
return arp_find(fddi->daddr, skb);
else
#endif
{
printk("%s: Don't know how to resolve type %04X addresses.\n",
skb->dev->name, ntohs(fddi->hdr.llc_snap.ethertype));
return 0;
}
}
/*
* Determine the packet's protocol ID and fill in skb fields.
* This routine is called before an incoming packet is passed
* up. It's used to fill in specific skb fields and to set
* the proper pointer to the start of packet data (skb->data).
*/
__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct fddihdr *fddi = (struct fddihdr *)skb->data;
__be16 type;
/*
* Set mac.raw field to point to FC byte, set data field to point
* to start of packet data. Assume 802.2 SNAP frames for now.
*/
skb->dev = dev;
skb_reset_mac_header(skb); /* point to frame control (FC) */
if(fddi->hdr.llc_8022_1.dsap==0xe0)
{
skb_pull(skb, FDDI_K_8022_HLEN-3);
type = htons(ETH_P_802_2);
}
else
{
skb_pull(skb, FDDI_K_SNAP_HLEN); /* adjust for 21 byte header */
type=fddi->hdr.llc_snap.ethertype;
}
/* Set packet type based on destination address and flag settings */
if (*fddi->daddr & 0x01)
{
if (memcmp(fddi->daddr, dev->broadcast, FDDI_K_ALEN) == 0)
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
}
else if (dev->flags & IFF_PROMISC)
{
if (memcmp(fddi->daddr, dev->dev_addr, FDDI_K_ALEN))
skb->pkt_type = PACKET_OTHERHOST;
}
/* Assume 802.2 SNAP frames, for now */
return type;
}
EXPORT_SYMBOL(fddi_type_trans);
int fddi_change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
EXPORT_SYMBOL(fddi_change_mtu);
static const struct header_ops fddi_header_ops = {
.create = fddi_header,
.rebuild = fddi_rebuild_header,
};
static void fddi_setup(struct net_device *dev)
{
dev->header_ops = &fddi_header_ops;
dev->type = ARPHRD_FDDI;
dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */
dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */
dev->addr_len = FDDI_K_ALEN;
dev->tx_queue_len = 100; /* Long queues on FDDI */
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
memset(dev->broadcast, 0xFF, FDDI_K_ALEN);
}
/**
* alloc_fddidev - Register FDDI device
* @sizeof_priv: Size of additional driver-private structure to be allocated
* for this FDDI device
*
* Fill in the fields of the device structure with FDDI-generic values.
*
* Constructs a new net device, complete with a private data area of
* size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
* this private data area.
*/
struct net_device *alloc_fddidev(int sizeof_priv)
{
return alloc_netdev(sizeof_priv, "fddi%d", NET_NAME_UNKNOWN,
fddi_setup);
}
EXPORT_SYMBOL(alloc_fddidev);
MODULE_LICENSE("GPL");

638
net/802/garp.c Normal file
View file

@ -0,0 +1,638 @@
/*
* IEEE 802.1D Generic Attribute Registration Protocol (GARP)
*
* Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/llc.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/garp.h>
#include <asm/unaligned.h>
static unsigned int garp_join_time __read_mostly = 200;
module_param(garp_join_time, uint, 0644);
MODULE_PARM_DESC(garp_join_time, "Join time in ms (default 200ms)");
MODULE_LICENSE("GPL");
static const struct garp_state_trans {
u8 state;
u8 action;
} garp_applicant_state_table[GARP_APPLICANT_MAX + 1][GARP_EVENT_MAX + 1] = {
[GARP_APPLICANT_VA] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA,
.action = GARP_ACTION_S_JOIN_IN },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AA },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
},
[GARP_APPLICANT_AA] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA,
.action = GARP_ACTION_S_JOIN_IN },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
},
[GARP_APPLICANT_QA] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA },
},
[GARP_APPLICANT_LA] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_VO,
.action = GARP_ACTION_S_LEAVE_EMPTY },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_LA },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_LA },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_LA },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VA },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
},
[GARP_APPLICANT_VP] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA,
.action = GARP_ACTION_S_JOIN_IN },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AP },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_VO },
},
[GARP_APPLICANT_AP] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA,
.action = GARP_ACTION_S_JOIN_IN },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_AO },
},
[GARP_APPLICANT_QP] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_QO },
},
[GARP_APPLICANT_VO] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AO },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VP },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
},
[GARP_APPLICANT_AO] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_AP },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
},
[GARP_APPLICANT_QO] = {
[GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID },
[GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO },
[GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO },
[GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_QP },
[GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID },
},
};
static int garp_attr_cmp(const struct garp_attr *attr,
const void *data, u8 len, u8 type)
{
if (attr->type != type)
return attr->type - type;
if (attr->dlen != len)
return attr->dlen - len;
return memcmp(attr->data, data, len);
}
static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app,
const void *data, u8 len, u8 type)
{
struct rb_node *parent = app->gid.rb_node;
struct garp_attr *attr;
int d;
while (parent) {
attr = rb_entry(parent, struct garp_attr, node);
d = garp_attr_cmp(attr, data, len, type);
if (d > 0)
parent = parent->rb_left;
else if (d < 0)
parent = parent->rb_right;
else
return attr;
}
return NULL;
}
static struct garp_attr *garp_attr_create(struct garp_applicant *app,
const void *data, u8 len, u8 type)
{
struct rb_node *parent = NULL, **p = &app->gid.rb_node;
struct garp_attr *attr;
int d;
while (*p) {
parent = *p;
attr = rb_entry(parent, struct garp_attr, node);
d = garp_attr_cmp(attr, data, len, type);
if (d > 0)
p = &parent->rb_left;
else if (d < 0)
p = &parent->rb_right;
else {
/* The attribute already exists; re-use it. */
return attr;
}
}
attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
if (!attr)
return attr;
attr->state = GARP_APPLICANT_VO;
attr->type = type;
attr->dlen = len;
memcpy(attr->data, data, len);
rb_link_node(&attr->node, parent, p);
rb_insert_color(&attr->node, &app->gid);
return attr;
}
static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr)
{
rb_erase(&attr->node, &app->gid);
kfree(attr);
}
static int garp_pdu_init(struct garp_applicant *app)
{
struct sk_buff *skb;
struct garp_pdu_hdr *gp;
#define LLC_RESERVE sizeof(struct llc_pdu_un)
skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
GFP_ATOMIC);
if (!skb)
return -ENOMEM;
skb->dev = app->dev;
skb->protocol = htons(ETH_P_802_2);
skb_reserve(skb, LL_RESERVED_SPACE(app->dev) + LLC_RESERVE);
gp = (struct garp_pdu_hdr *)__skb_put(skb, sizeof(*gp));
put_unaligned(htons(GARP_PROTOCOL_ID), &gp->protocol);
app->pdu = skb;
return 0;
}
static int garp_pdu_append_end_mark(struct garp_applicant *app)
{
if (skb_tailroom(app->pdu) < sizeof(u8))
return -1;
*(u8 *)__skb_put(app->pdu, sizeof(u8)) = GARP_END_MARK;
return 0;
}
static void garp_pdu_queue(struct garp_applicant *app)
{
if (!app->pdu)
return;
garp_pdu_append_end_mark(app);
garp_pdu_append_end_mark(app);
llc_pdu_header_init(app->pdu, LLC_PDU_TYPE_U, LLC_SAP_BSPAN,
LLC_SAP_BSPAN, LLC_PDU_CMD);
llc_pdu_init_as_ui_cmd(app->pdu);
llc_mac_hdr_init(app->pdu, app->dev->dev_addr,
app->app->proto.group_address);
skb_queue_tail(&app->queue, app->pdu);
app->pdu = NULL;
}
static void garp_queue_xmit(struct garp_applicant *app)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&app->queue)))
dev_queue_xmit(skb);
}
static int garp_pdu_append_msg(struct garp_applicant *app, u8 attrtype)
{
struct garp_msg_hdr *gm;
if (skb_tailroom(app->pdu) < sizeof(*gm))
return -1;
gm = (struct garp_msg_hdr *)__skb_put(app->pdu, sizeof(*gm));
gm->attrtype = attrtype;
garp_cb(app->pdu)->cur_type = attrtype;
return 0;
}
static int garp_pdu_append_attr(struct garp_applicant *app,
const struct garp_attr *attr,
enum garp_attr_event event)
{
struct garp_attr_hdr *ga;
unsigned int len;
int err;
again:
if (!app->pdu) {
err = garp_pdu_init(app);
if (err < 0)
return err;
}
if (garp_cb(app->pdu)->cur_type != attr->type) {
if (garp_cb(app->pdu)->cur_type &&
garp_pdu_append_end_mark(app) < 0)
goto queue;
if (garp_pdu_append_msg(app, attr->type) < 0)
goto queue;
}
len = sizeof(*ga) + attr->dlen;
if (skb_tailroom(app->pdu) < len)
goto queue;
ga = (struct garp_attr_hdr *)__skb_put(app->pdu, len);
ga->len = len;
ga->event = event;
memcpy(ga->data, attr->data, attr->dlen);
return 0;
queue:
garp_pdu_queue(app);
goto again;
}
static void garp_attr_event(struct garp_applicant *app,
struct garp_attr *attr, enum garp_event event)
{
enum garp_applicant_state state;
state = garp_applicant_state_table[attr->state][event].state;
if (state == GARP_APPLICANT_INVALID)
return;
switch (garp_applicant_state_table[attr->state][event].action) {
case GARP_ACTION_NONE:
break;
case GARP_ACTION_S_JOIN_IN:
/* When appending the attribute fails, don't update state in
* order to retry on next TRANSMIT_PDU event. */
if (garp_pdu_append_attr(app, attr, GARP_JOIN_IN) < 0)
return;
break;
case GARP_ACTION_S_LEAVE_EMPTY:
garp_pdu_append_attr(app, attr, GARP_LEAVE_EMPTY);
/* As a pure applicant, sending a leave message implies that
* the attribute was unregistered and can be destroyed. */
garp_attr_destroy(app, attr);
return;
default:
WARN_ON(1);
}
attr->state = state;
}
int garp_request_join(const struct net_device *dev,
const struct garp_application *appl,
const void *data, u8 len, u8 type)
{
struct garp_port *port = rtnl_dereference(dev->garp_port);
struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
struct garp_attr *attr;
spin_lock_bh(&app->lock);
attr = garp_attr_create(app, data, len, type);
if (!attr) {
spin_unlock_bh(&app->lock);
return -ENOMEM;
}
garp_attr_event(app, attr, GARP_EVENT_REQ_JOIN);
spin_unlock_bh(&app->lock);
return 0;
}
EXPORT_SYMBOL_GPL(garp_request_join);
void garp_request_leave(const struct net_device *dev,
const struct garp_application *appl,
const void *data, u8 len, u8 type)
{
struct garp_port *port = rtnl_dereference(dev->garp_port);
struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
struct garp_attr *attr;
spin_lock_bh(&app->lock);
attr = garp_attr_lookup(app, data, len, type);
if (!attr) {
spin_unlock_bh(&app->lock);
return;
}
garp_attr_event(app, attr, GARP_EVENT_REQ_LEAVE);
spin_unlock_bh(&app->lock);
}
EXPORT_SYMBOL_GPL(garp_request_leave);
static void garp_gid_event(struct garp_applicant *app, enum garp_event event)
{
struct rb_node *node, *next;
struct garp_attr *attr;
for (node = rb_first(&app->gid);
next = node ? rb_next(node) : NULL, node != NULL;
node = next) {
attr = rb_entry(node, struct garp_attr, node);
garp_attr_event(app, attr, event);
}
}
static void garp_join_timer_arm(struct garp_applicant *app)
{
unsigned long delay;
delay = (u64)msecs_to_jiffies(garp_join_time) * prandom_u32() >> 32;
mod_timer(&app->join_timer, jiffies + delay);
}
static void garp_join_timer(unsigned long data)
{
struct garp_applicant *app = (struct garp_applicant *)data;
spin_lock(&app->lock);
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
garp_pdu_queue(app);
spin_unlock(&app->lock);
garp_queue_xmit(app);
garp_join_timer_arm(app);
}
static int garp_pdu_parse_end_mark(struct sk_buff *skb)
{
if (!pskb_may_pull(skb, sizeof(u8)))
return -1;
if (*skb->data == GARP_END_MARK) {
skb_pull(skb, sizeof(u8));
return -1;
}
return 0;
}
static int garp_pdu_parse_attr(struct garp_applicant *app, struct sk_buff *skb,
u8 attrtype)
{
const struct garp_attr_hdr *ga;
struct garp_attr *attr;
enum garp_event event;
unsigned int dlen;
if (!pskb_may_pull(skb, sizeof(*ga)))
return -1;
ga = (struct garp_attr_hdr *)skb->data;
if (ga->len < sizeof(*ga))
return -1;
if (!pskb_may_pull(skb, ga->len))
return -1;
skb_pull(skb, ga->len);
dlen = sizeof(*ga) - ga->len;
if (attrtype > app->app->maxattr)
return 0;
switch (ga->event) {
case GARP_LEAVE_ALL:
if (dlen != 0)
return -1;
garp_gid_event(app, GARP_EVENT_R_LEAVE_EMPTY);
return 0;
case GARP_JOIN_EMPTY:
event = GARP_EVENT_R_JOIN_EMPTY;
break;
case GARP_JOIN_IN:
event = GARP_EVENT_R_JOIN_IN;
break;
case GARP_LEAVE_EMPTY:
event = GARP_EVENT_R_LEAVE_EMPTY;
break;
case GARP_EMPTY:
event = GARP_EVENT_R_EMPTY;
break;
default:
return 0;
}
if (dlen == 0)
return -1;
attr = garp_attr_lookup(app, ga->data, dlen, attrtype);
if (attr == NULL)
return 0;
garp_attr_event(app, attr, event);
return 0;
}
static int garp_pdu_parse_msg(struct garp_applicant *app, struct sk_buff *skb)
{
const struct garp_msg_hdr *gm;
if (!pskb_may_pull(skb, sizeof(*gm)))
return -1;
gm = (struct garp_msg_hdr *)skb->data;
if (gm->attrtype == 0)
return -1;
skb_pull(skb, sizeof(*gm));
while (skb->len > 0) {
if (garp_pdu_parse_attr(app, skb, gm->attrtype) < 0)
return -1;
if (garp_pdu_parse_end_mark(skb) < 0)
break;
}
return 0;
}
static void garp_pdu_rcv(const struct stp_proto *proto, struct sk_buff *skb,
struct net_device *dev)
{
struct garp_application *appl = proto->data;
struct garp_port *port;
struct garp_applicant *app;
const struct garp_pdu_hdr *gp;
port = rcu_dereference(dev->garp_port);
if (!port)
goto err;
app = rcu_dereference(port->applicants[appl->type]);
if (!app)
goto err;
if (!pskb_may_pull(skb, sizeof(*gp)))
goto err;
gp = (struct garp_pdu_hdr *)skb->data;
if (get_unaligned(&gp->protocol) != htons(GARP_PROTOCOL_ID))
goto err;
skb_pull(skb, sizeof(*gp));
spin_lock(&app->lock);
while (skb->len > 0) {
if (garp_pdu_parse_msg(app, skb) < 0)
break;
if (garp_pdu_parse_end_mark(skb) < 0)
break;
}
spin_unlock(&app->lock);
err:
kfree_skb(skb);
}
static int garp_init_port(struct net_device *dev)
{
struct garp_port *port;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
rcu_assign_pointer(dev->garp_port, port);
return 0;
}
static void garp_release_port(struct net_device *dev)
{
struct garp_port *port = rtnl_dereference(dev->garp_port);
unsigned int i;
for (i = 0; i <= GARP_APPLICATION_MAX; i++) {
if (rtnl_dereference(port->applicants[i]))
return;
}
RCU_INIT_POINTER(dev->garp_port, NULL);
kfree_rcu(port, rcu);
}
int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
{
struct garp_applicant *app;
int err;
ASSERT_RTNL();
if (!rtnl_dereference(dev->garp_port)) {
err = garp_init_port(dev);
if (err < 0)
goto err1;
}
err = -ENOMEM;
app = kzalloc(sizeof(*app), GFP_KERNEL);
if (!app)
goto err2;
err = dev_mc_add(dev, appl->proto.group_address);
if (err < 0)
goto err3;
app->dev = dev;
app->app = appl;
app->gid = RB_ROOT;
spin_lock_init(&app->lock);
skb_queue_head_init(&app->queue);
rcu_assign_pointer(dev->garp_port->applicants[appl->type], app);
setup_timer(&app->join_timer, garp_join_timer, (unsigned long)app);
garp_join_timer_arm(app);
return 0;
err3:
kfree(app);
err2:
garp_release_port(dev);
err1:
return err;
}
EXPORT_SYMBOL_GPL(garp_init_applicant);
void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
{
struct garp_port *port = rtnl_dereference(dev->garp_port);
struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
ASSERT_RTNL();
RCU_INIT_POINTER(port->applicants[appl->type], NULL);
/* Delete timer and generate a final TRANSMIT_PDU event to flush out
* all pending messages before the applicant is gone. */
del_timer_sync(&app->join_timer);
spin_lock_bh(&app->lock);
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
garp_pdu_queue(app);
spin_unlock_bh(&app->lock);
garp_queue_xmit(app);
dev_mc_del(dev, appl->proto.group_address);
kfree_rcu(app, rcu);
garp_release_port(dev);
}
EXPORT_SYMBOL_GPL(garp_uninit_applicant);
int garp_register_application(struct garp_application *appl)
{
appl->proto.rcv = garp_pdu_rcv;
appl->proto.data = appl;
return stp_proto_register(&appl->proto);
}
EXPORT_SYMBOL_GPL(garp_register_application);
void garp_unregister_application(struct garp_application *appl)
{
stp_proto_unregister(&appl->proto);
}
EXPORT_SYMBOL_GPL(garp_unregister_application);

235
net/802/hippi.c Normal file
View file

@ -0,0 +1,235 @@
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* HIPPI-type device handling.
*
* Version: @(#)hippi.c 1.0.0 05/29/97
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
* Florian La Roche, <rzsfl@rz.uni-sb.de>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Jes Sorensen, <Jes.Sorensen@cern.ch>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/hippidevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <net/arp.h>
#include <net/sock.h>
#include <asm/uaccess.h>
/*
* Create the HIPPI MAC header for an arbitrary protocol layer
*
* saddr=NULL means use device source address
* daddr=NULL means leave destination address (eg unresolved arp)
*/
static int hippi_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN);
struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
if (!len){
len = skb->len - HIPPI_HLEN;
printk("hippi_header(): length not supplied\n");
}
/*
* Due to the stupidity of the little endian byte-order we
* have to set the fp field this way.
*/
hip->fp.fixed = htonl(0x04800018);
hip->fp.d2_size = htonl(len + 8);
hip->le.fc = 0;
hip->le.double_wide = 0; /* only HIPPI 800 for the time being */
hip->le.message_type = 0; /* Data PDU */
hip->le.dest_addr_type = 2; /* 12 bit SC address */
hip->le.src_addr_type = 2; /* 12 bit SC address */
memcpy(hip->le.src_switch_addr, dev->dev_addr + 3, 3);
memset(&hip->le.reserved, 0, 16);
hip->snap.dsap = HIPPI_EXTENDED_SAP;
hip->snap.ssap = HIPPI_EXTENDED_SAP;
hip->snap.ctrl = HIPPI_UI_CMD;
hip->snap.oui[0] = 0x00;
hip->snap.oui[1] = 0x00;
hip->snap.oui[2] = 0x00;
hip->snap.ethertype = htons(type);
if (daddr)
{
memcpy(hip->le.dest_switch_addr, daddr + 3, 3);
memcpy(&hcb->ifield, daddr + 2, 4);
return HIPPI_HLEN;
}
hcb->ifield = 0;
return -((int)HIPPI_HLEN);
}
/*
* Rebuild the HIPPI MAC header. This is called after an ARP has
* completed on this sk_buff. We now let ARP fill in the other fields.
*/
static int hippi_rebuild_header(struct sk_buff *skb)
{
struct hippi_hdr *hip = (struct hippi_hdr *)skb->data;
/*
* Only IP is currently supported
*/
if(hip->snap.ethertype != htons(ETH_P_IP))
{
printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n",skb->dev->name,ntohs(hip->snap.ethertype));
return 0;
}
/*
* We don't support dynamic ARP on HIPPI, but we use the ARP
* static ARP tables to hold the I-FIELDs.
*/
return arp_find(hip->le.daddr, skb);
}
/*
* Determine the packet's protocol ID.
*/
__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct hippi_hdr *hip;
/*
* This is actually wrong ... question is if we really should
* set the raw address here.
*/
skb->dev = dev;
skb_reset_mac_header(skb);
hip = (struct hippi_hdr *)skb_mac_header(skb);
skb_pull(skb, HIPPI_HLEN);
/*
* No fancy promisc stuff here now.
*/
return hip->snap.ethertype;
}
EXPORT_SYMBOL(hippi_type_trans);
int hippi_change_mtu(struct net_device *dev, int new_mtu)
{
/*
* HIPPI's got these nice large MTUs.
*/
if ((new_mtu < 68) || (new_mtu > 65280))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
EXPORT_SYMBOL(hippi_change_mtu);
/*
* For HIPPI we will actually use the lower 4 bytes of the hardware
* address as the I-FIELD rather than the actual hardware address.
*/
int hippi_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
return 0;
}
EXPORT_SYMBOL(hippi_mac_addr);
int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
{
/* Never send broadcast/multicast ARP messages */
NEIGH_VAR_INIT(p, MCAST_PROBES, 0);
/* In IPv6 unicast probes are valid even on NBMA,
* because they are encapsulated in normal IPv6 protocol.
* Should be a generic flag.
*/
if (p->tbl->family != AF_INET6)
NEIGH_VAR_INIT(p, UCAST_PROBES, 0);
return 0;
}
EXPORT_SYMBOL(hippi_neigh_setup_dev);
static const struct header_ops hippi_header_ops = {
.create = hippi_header,
.rebuild = hippi_rebuild_header,
};
static void hippi_setup(struct net_device *dev)
{
dev->header_ops = &hippi_header_ops;
/*
* We don't support HIPPI `ARP' for the time being, and probably
* never will unless someone else implements it. However we
* still need a fake ARPHRD to make ifconfig and friends play ball.
*/
dev->type = ARPHRD_HIPPI;
dev->hard_header_len = HIPPI_HLEN;
dev->mtu = 65280;
dev->addr_len = HIPPI_ALEN;
dev->tx_queue_len = 25 /* 5 */;
memset(dev->broadcast, 0xFF, HIPPI_ALEN);
/*
* HIPPI doesn't support broadcast+multicast and we only use
* static ARP tables. ARP is disabled by hippi_neigh_setup_dev.
*/
dev->flags = 0;
}
/**
* alloc_hippi_dev - Register HIPPI device
* @sizeof_priv: Size of additional driver-private structure to be allocated
* for this HIPPI device
*
* Fill in the fields of the device structure with HIPPI-generic values.
*
* Constructs a new net device, complete with a private data area of
* size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
* this private data area.
*/
struct net_device *alloc_hippi_dev(int sizeof_priv)
{
return alloc_netdev(sizeof_priv, "hip%d", NET_NAME_UNKNOWN,
hippi_setup);
}
EXPORT_SYMBOL(alloc_hippi_dev);

926
net/802/mrp.c Normal file
View file

@ -0,0 +1,926 @@
/*
* IEEE 802.1Q Multiple Registration Protocol (MRP)
*
* Copyright (c) 2012 Massachusetts Institute of Technology
*
* Adapted from code in net/802/garp.c
* Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/mrp.h>
#include <asm/unaligned.h>
static unsigned int mrp_join_time __read_mostly = 200;
module_param(mrp_join_time, uint, 0644);
MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
static unsigned int mrp_periodic_time __read_mostly = 1000;
module_param(mrp_periodic_time, uint, 0644);
MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
MODULE_LICENSE("GPL");
static const u8
mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
[MRP_APPLICANT_VO] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
[MRP_EVENT_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_TX] = MRP_APPLICANT_VO,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
[MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
[MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
},
[MRP_APPLICANT_VP] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
[MRP_EVENT_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_TX] = MRP_APPLICANT_AA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
[MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
[MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
},
[MRP_APPLICANT_VN] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_AN,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
[MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
[MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
},
[MRP_APPLICANT_AN] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_AN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_QA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
[MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
},
[MRP_APPLICANT_AA] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_QA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
[MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
},
[MRP_APPLICANT_QA] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_QA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
[MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
},
[MRP_APPLICANT_LA] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
[MRP_EVENT_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_TX] = MRP_APPLICANT_VO,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
[MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
[MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
[MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
[MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
},
[MRP_APPLICANT_AO] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
[MRP_EVENT_LV] = MRP_APPLICANT_AO,
[MRP_EVENT_TX] = MRP_APPLICANT_AO,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
[MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
},
[MRP_APPLICANT_QO] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
[MRP_EVENT_LV] = MRP_APPLICANT_QO,
[MRP_EVENT_TX] = MRP_APPLICANT_QO,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
[MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
},
[MRP_APPLICANT_AP] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
[MRP_EVENT_LV] = MRP_APPLICANT_AO,
[MRP_EVENT_TX] = MRP_APPLICANT_QA,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
[MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
},
[MRP_APPLICANT_QP] = {
[MRP_EVENT_NEW] = MRP_APPLICANT_VN,
[MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
[MRP_EVENT_LV] = MRP_APPLICANT_QO,
[MRP_EVENT_TX] = MRP_APPLICANT_QP,
[MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
[MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
[MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
[MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
[MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
[MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
[MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
[MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
[MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
},
};
static const u8
mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
[MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
[MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
[MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
[MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
[MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
[MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
[MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
[MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
[MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
[MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
[MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
};
static void mrp_attrvalue_inc(void *value, u8 len)
{
u8 *v = (u8 *)value;
/* Add 1 to the last byte. If it becomes zero,
* go to the previous byte and repeat.
*/
while (len > 0 && !++v[--len])
;
}
static int mrp_attr_cmp(const struct mrp_attr *attr,
const void *value, u8 len, u8 type)
{
if (attr->type != type)
return attr->type - type;
if (attr->len != len)
return attr->len - len;
return memcmp(attr->value, value, len);
}
static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
const void *value, u8 len, u8 type)
{
struct rb_node *parent = app->mad.rb_node;
struct mrp_attr *attr;
int d;
while (parent) {
attr = rb_entry(parent, struct mrp_attr, node);
d = mrp_attr_cmp(attr, value, len, type);
if (d > 0)
parent = parent->rb_left;
else if (d < 0)
parent = parent->rb_right;
else
return attr;
}
return NULL;
}
static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
const void *value, u8 len, u8 type)
{
struct rb_node *parent = NULL, **p = &app->mad.rb_node;
struct mrp_attr *attr;
int d;
while (*p) {
parent = *p;
attr = rb_entry(parent, struct mrp_attr, node);
d = mrp_attr_cmp(attr, value, len, type);
if (d > 0)
p = &parent->rb_left;
else if (d < 0)
p = &parent->rb_right;
else {
/* The attribute already exists; re-use it. */
return attr;
}
}
attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
if (!attr)
return attr;
attr->state = MRP_APPLICANT_VO;
attr->type = type;
attr->len = len;
memcpy(attr->value, value, len);
rb_link_node(&attr->node, parent, p);
rb_insert_color(&attr->node, &app->mad);
return attr;
}
static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
{
rb_erase(&attr->node, &app->mad);
kfree(attr);
}
static int mrp_pdu_init(struct mrp_applicant *app)
{
struct sk_buff *skb;
struct mrp_pdu_hdr *ph;
skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
GFP_ATOMIC);
if (!skb)
return -ENOMEM;
skb->dev = app->dev;
skb->protocol = app->app->pkttype.type;
skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
ph = (struct mrp_pdu_hdr *)__skb_put(skb, sizeof(*ph));
ph->version = app->app->version;
app->pdu = skb;
return 0;
}
static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
{
__be16 *endmark;
if (skb_tailroom(app->pdu) < sizeof(*endmark))
return -1;
endmark = (__be16 *)__skb_put(app->pdu, sizeof(*endmark));
put_unaligned(MRP_END_MARK, endmark);
return 0;
}
static void mrp_pdu_queue(struct mrp_applicant *app)
{
if (!app->pdu)
return;
if (mrp_cb(app->pdu)->mh)
mrp_pdu_append_end_mark(app);
mrp_pdu_append_end_mark(app);
dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
app->app->group_address, app->dev->dev_addr,
app->pdu->len);
skb_queue_tail(&app->queue, app->pdu);
app->pdu = NULL;
}
static void mrp_queue_xmit(struct mrp_applicant *app)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&app->queue)))
dev_queue_xmit(skb);
}
static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
u8 attrtype, u8 attrlen)
{
struct mrp_msg_hdr *mh;
if (mrp_cb(app->pdu)->mh) {
if (mrp_pdu_append_end_mark(app) < 0)
return -1;
mrp_cb(app->pdu)->mh = NULL;
mrp_cb(app->pdu)->vah = NULL;
}
if (skb_tailroom(app->pdu) < sizeof(*mh))
return -1;
mh = (struct mrp_msg_hdr *)__skb_put(app->pdu, sizeof(*mh));
mh->attrtype = attrtype;
mh->attrlen = attrlen;
mrp_cb(app->pdu)->mh = mh;
return 0;
}
static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
const void *firstattrvalue, u8 attrlen)
{
struct mrp_vecattr_hdr *vah;
if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
return -1;
vah = (struct mrp_vecattr_hdr *)__skb_put(app->pdu,
sizeof(*vah) + attrlen);
put_unaligned(0, &vah->lenflags);
memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
mrp_cb(app->pdu)->vah = vah;
memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
return 0;
}
static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
const struct mrp_attr *attr,
enum mrp_vecattr_event vaevent)
{
u16 len, pos;
u8 *vaevents;
int err;
again:
if (!app->pdu) {
err = mrp_pdu_init(app);
if (err < 0)
return err;
}
/* If there is no Message header in the PDU, or the Message header is
* for a different attribute type, add an EndMark (if necessary) and a
* new Message header to the PDU.
*/
if (!mrp_cb(app->pdu)->mh ||
mrp_cb(app->pdu)->mh->attrtype != attr->type ||
mrp_cb(app->pdu)->mh->attrlen != attr->len) {
if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
goto queue;
}
/* If there is no VectorAttribute header for this Message in the PDU,
* or this attribute's value does not sequentially follow the previous
* attribute's value, add a new VectorAttribute header to the PDU.
*/
if (!mrp_cb(app->pdu)->vah ||
memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
goto queue;
}
len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
pos = len % 3;
/* Events are packed into Vectors in the PDU, three to a byte. Add a
* byte to the end of the Vector if necessary.
*/
if (!pos) {
if (skb_tailroom(app->pdu) < sizeof(u8))
goto queue;
vaevents = (u8 *)__skb_put(app->pdu, sizeof(u8));
} else {
vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
}
switch (pos) {
case 0:
*vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
__MRP_VECATTR_EVENT_MAX);
break;
case 1:
*vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
break;
case 2:
*vaevents += vaevent;
break;
default:
WARN_ON(1);
}
/* Increment the length of the VectorAttribute in the PDU, as well as
* the value of the next attribute that would continue its Vector.
*/
put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
return 0;
queue:
mrp_pdu_queue(app);
goto again;
}
static void mrp_attr_event(struct mrp_applicant *app,
struct mrp_attr *attr, enum mrp_event event)
{
enum mrp_applicant_state state;
state = mrp_applicant_state_table[attr->state][event];
if (state == MRP_APPLICANT_INVALID) {
WARN_ON(1);
return;
}
if (event == MRP_EVENT_TX) {
/* When appending the attribute fails, don't update its state
* in order to retry at the next TX event.
*/
switch (mrp_tx_action_table[attr->state]) {
case MRP_TX_ACTION_NONE:
case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
case MRP_TX_ACTION_S_IN_OPTIONAL:
break;
case MRP_TX_ACTION_S_NEW:
if (mrp_pdu_append_vecattr_event(
app, attr, MRP_VECATTR_EVENT_NEW) < 0)
return;
break;
case MRP_TX_ACTION_S_JOIN_IN:
if (mrp_pdu_append_vecattr_event(
app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
return;
break;
case MRP_TX_ACTION_S_LV:
if (mrp_pdu_append_vecattr_event(
app, attr, MRP_VECATTR_EVENT_LV) < 0)
return;
/* As a pure applicant, sending a leave message
* implies that the attribute was unregistered and
* can be destroyed.
*/
mrp_attr_destroy(app, attr);
return;
default:
WARN_ON(1);
}
}
attr->state = state;
}
int mrp_request_join(const struct net_device *dev,
const struct mrp_application *appl,
const void *value, u8 len, u8 type)
{
struct mrp_port *port = rtnl_dereference(dev->mrp_port);
struct mrp_applicant *app = rtnl_dereference(
port->applicants[appl->type]);
struct mrp_attr *attr;
if (sizeof(struct mrp_skb_cb) + len >
FIELD_SIZEOF(struct sk_buff, cb))
return -ENOMEM;
spin_lock_bh(&app->lock);
attr = mrp_attr_create(app, value, len, type);
if (!attr) {
spin_unlock_bh(&app->lock);
return -ENOMEM;
}
mrp_attr_event(app, attr, MRP_EVENT_JOIN);
spin_unlock_bh(&app->lock);
return 0;
}
EXPORT_SYMBOL_GPL(mrp_request_join);
void mrp_request_leave(const struct net_device *dev,
const struct mrp_application *appl,
const void *value, u8 len, u8 type)
{
struct mrp_port *port = rtnl_dereference(dev->mrp_port);
struct mrp_applicant *app = rtnl_dereference(
port->applicants[appl->type]);
struct mrp_attr *attr;
if (sizeof(struct mrp_skb_cb) + len >
FIELD_SIZEOF(struct sk_buff, cb))
return;
spin_lock_bh(&app->lock);
attr = mrp_attr_lookup(app, value, len, type);
if (!attr) {
spin_unlock_bh(&app->lock);
return;
}
mrp_attr_event(app, attr, MRP_EVENT_LV);
spin_unlock_bh(&app->lock);
}
EXPORT_SYMBOL_GPL(mrp_request_leave);
static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
{
struct rb_node *node, *next;
struct mrp_attr *attr;
for (node = rb_first(&app->mad);
next = node ? rb_next(node) : NULL, node != NULL;
node = next) {
attr = rb_entry(node, struct mrp_attr, node);
mrp_attr_event(app, attr, event);
}
}
static void mrp_join_timer_arm(struct mrp_applicant *app)
{
unsigned long delay;
delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32;
mod_timer(&app->join_timer, jiffies + delay);
}
static void mrp_join_timer(unsigned long data)
{
struct mrp_applicant *app = (struct mrp_applicant *)data;
spin_lock(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
mrp_pdu_queue(app);
spin_unlock(&app->lock);
mrp_queue_xmit(app);
mrp_join_timer_arm(app);
}
static void mrp_periodic_timer_arm(struct mrp_applicant *app)
{
mod_timer(&app->periodic_timer,
jiffies + msecs_to_jiffies(mrp_periodic_time));
}
static void mrp_periodic_timer(unsigned long data)
{
struct mrp_applicant *app = (struct mrp_applicant *)data;
spin_lock(&app->lock);
mrp_mad_event(app, MRP_EVENT_PERIODIC);
mrp_pdu_queue(app);
spin_unlock(&app->lock);
mrp_periodic_timer_arm(app);
}
static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
{
__be16 endmark;
if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
return -1;
if (endmark == MRP_END_MARK) {
*offset += sizeof(endmark);
return -1;
}
return 0;
}
static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
struct sk_buff *skb,
enum mrp_vecattr_event vaevent)
{
struct mrp_attr *attr;
enum mrp_event event;
attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen,
mrp_cb(skb)->mh->attrtype);
if (attr == NULL)
return;
switch (vaevent) {
case MRP_VECATTR_EVENT_NEW:
event = MRP_EVENT_R_NEW;
break;
case MRP_VECATTR_EVENT_JOIN_IN:
event = MRP_EVENT_R_JOIN_IN;
break;
case MRP_VECATTR_EVENT_IN:
event = MRP_EVENT_R_IN;
break;
case MRP_VECATTR_EVENT_JOIN_MT:
event = MRP_EVENT_R_JOIN_MT;
break;
case MRP_VECATTR_EVENT_MT:
event = MRP_EVENT_R_MT;
break;
case MRP_VECATTR_EVENT_LV:
event = MRP_EVENT_R_LV;
break;
default:
return;
}
mrp_attr_event(app, attr, event);
}
static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
struct sk_buff *skb, int *offset)
{
struct mrp_vecattr_hdr _vah;
u16 valen;
u8 vaevents, vaevent;
mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
&_vah);
if (!mrp_cb(skb)->vah)
return -1;
*offset += sizeof(_vah);
if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
MRP_VECATTR_HDR_FLAG_LA)
mrp_mad_event(app, MRP_EVENT_R_LA);
valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
MRP_VECATTR_HDR_LEN_MASK);
/* The VectorAttribute structure in a PDU carries event information
* about one or more attributes having consecutive values. Only the
* value for the first attribute is contained in the structure. So
* we make a copy of that value, and then increment it each time we
* advance to the next event in its Vector.
*/
if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
FIELD_SIZEOF(struct sk_buff, cb))
return -1;
if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen) < 0)
return -1;
*offset += mrp_cb(skb)->mh->attrlen;
/* In a VectorAttribute, the Vector contains events which are packed
* three to a byte. We process one byte of the Vector at a time.
*/
while (valen > 0) {
if (skb_copy_bits(skb, *offset, &vaevents,
sizeof(vaevents)) < 0)
return -1;
*offset += sizeof(vaevents);
/* Extract and process the first event. */
vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
__MRP_VECATTR_EVENT_MAX);
if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
/* The byte is malformed; stop processing. */
return -1;
}
mrp_pdu_parse_vecattr_event(app, skb, vaevent);
/* If present, extract and process the second event. */
if (!--valen)
break;
mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen);
vaevents %= (__MRP_VECATTR_EVENT_MAX *
__MRP_VECATTR_EVENT_MAX);
vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
mrp_pdu_parse_vecattr_event(app, skb, vaevent);
/* If present, extract and process the third event. */
if (!--valen)
break;
mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
mrp_cb(skb)->mh->attrlen);
vaevents %= __MRP_VECATTR_EVENT_MAX;
vaevent = vaevents;
mrp_pdu_parse_vecattr_event(app, skb, vaevent);
}
return 0;
}
static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
int *offset)
{
struct mrp_msg_hdr _mh;
mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
if (!mrp_cb(skb)->mh)
return -1;
*offset += sizeof(_mh);
if (mrp_cb(skb)->mh->attrtype == 0 ||
mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
mrp_cb(skb)->mh->attrlen == 0)
return -1;
while (skb->len > *offset) {
if (mrp_pdu_parse_end_mark(skb, offset) < 0)
break;
if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
return -1;
}
return 0;
}
static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct mrp_application *appl = container_of(pt, struct mrp_application,
pkttype);
struct mrp_port *port;
struct mrp_applicant *app;
struct mrp_pdu_hdr _ph;
const struct mrp_pdu_hdr *ph;
int offset = skb_network_offset(skb);
/* If the interface is in promiscuous mode, drop the packet if
* it was unicast to another host.
*/
if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
goto out;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
goto out;
port = rcu_dereference(dev->mrp_port);
if (unlikely(!port))
goto out;
app = rcu_dereference(port->applicants[appl->type]);
if (unlikely(!app))
goto out;
ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
if (!ph)
goto out;
offset += sizeof(_ph);
if (ph->version != app->app->version)
goto out;
spin_lock(&app->lock);
while (skb->len > offset) {
if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
break;
if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
break;
}
spin_unlock(&app->lock);
out:
kfree_skb(skb);
return 0;
}
static int mrp_init_port(struct net_device *dev)
{
struct mrp_port *port;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
rcu_assign_pointer(dev->mrp_port, port);
return 0;
}
static void mrp_release_port(struct net_device *dev)
{
struct mrp_port *port = rtnl_dereference(dev->mrp_port);
unsigned int i;
for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
if (rtnl_dereference(port->applicants[i]))
return;
}
RCU_INIT_POINTER(dev->mrp_port, NULL);
kfree_rcu(port, rcu);
}
int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
{
struct mrp_applicant *app;
int err;
ASSERT_RTNL();
if (!rtnl_dereference(dev->mrp_port)) {
err = mrp_init_port(dev);
if (err < 0)
goto err1;
}
err = -ENOMEM;
app = kzalloc(sizeof(*app), GFP_KERNEL);
if (!app)
goto err2;
err = dev_mc_add(dev, appl->group_address);
if (err < 0)
goto err3;
app->dev = dev;
app->app = appl;
app->mad = RB_ROOT;
spin_lock_init(&app->lock);
skb_queue_head_init(&app->queue);
rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
mrp_join_timer_arm(app);
setup_timer(&app->periodic_timer, mrp_periodic_timer,
(unsigned long)app);
mrp_periodic_timer_arm(app);
return 0;
err3:
kfree(app);
err2:
mrp_release_port(dev);
err1:
return err;
}
EXPORT_SYMBOL_GPL(mrp_init_applicant);
void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
{
struct mrp_port *port = rtnl_dereference(dev->mrp_port);
struct mrp_applicant *app = rtnl_dereference(
port->applicants[appl->type]);
ASSERT_RTNL();
RCU_INIT_POINTER(port->applicants[appl->type], NULL);
/* Delete timer and generate a final TX event to flush out
* all pending messages before the applicant is gone.
*/
del_timer_sync(&app->join_timer);
del_timer_sync(&app->periodic_timer);
spin_lock_bh(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
mrp_pdu_queue(app);
spin_unlock_bh(&app->lock);
mrp_queue_xmit(app);
dev_mc_del(dev, appl->group_address);
kfree_rcu(app, rcu);
mrp_release_port(dev);
}
EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
int mrp_register_application(struct mrp_application *appl)
{
appl->pkttype.func = mrp_rcv;
dev_add_pack(&appl->pkttype);
return 0;
}
EXPORT_SYMBOL_GPL(mrp_register_application);
void mrp_unregister_application(struct mrp_application *appl)
{
dev_remove_pack(&appl->pkttype);
}
EXPORT_SYMBOL_GPL(mrp_unregister_application);

66
net/802/p8022.c Normal file
View file

@ -0,0 +1,66 @@
/*
* NET3: Support for 802.2 demultiplexing off Ethernet
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Demultiplex 802.2 encoded protocols. We match the entry by the
* SSAP/DSAP pair and then deliver to the registered datalink that
* matches. The control byte is ignored and handling of such items
* is up to the routine passed the frame.
*
* Unlike the 802.3 datalink we have a list of 802.2 entries as
* there are multiple protocols to demux. The list is currently
* short (3 or 4 entries at most). The current demux assumes this.
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/datalink.h>
#include <linux/mm.h>
#include <linux/in.h>
#include <linux/init.h>
#include <net/llc.h>
#include <net/p8022.h>
static int p8022_request(struct datalink_proto *dl, struct sk_buff *skb,
unsigned char *dest)
{
llc_build_and_send_ui_pkt(dl->sap, skb, dest, dl->sap->laddr.lsap);
return 0;
}
struct datalink_proto *register_8022_client(unsigned char type,
int (*func)(struct sk_buff *skb,
struct net_device *dev,
struct packet_type *pt,
struct net_device *orig_dev))
{
struct datalink_proto *proto;
proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
if (proto) {
proto->type[0] = type;
proto->header_length = 3;
proto->request = p8022_request;
proto->sap = llc_sap_open(type, func);
if (!proto->sap) {
kfree(proto);
proto = NULL;
}
}
return proto;
}
void unregister_8022_client(struct datalink_proto *proto)
{
llc_sap_put(proto->sap);
kfree(proto);
}
EXPORT_SYMBOL(register_8022_client);
EXPORT_SYMBOL(unregister_8022_client);
MODULE_LICENSE("GPL");

64
net/802/p8023.c Normal file
View file

@ -0,0 +1,64 @@
/*
* NET3: 802.3 data link hooks used for IPX 802.3
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* 802.3 isn't really a protocol data link layer. Some old IPX stuff
* uses it however. Note that there is only one 802.3 protocol layer
* in the system. We don't currently support different protocols
* running raw 802.3 on different devices. Thankfully nobody else
* has done anything like the old IPX.
*/
#include <linux/in.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/datalink.h>
#include <net/p8022.h>
/*
* Place an 802.3 header on a packet. The driver will do the mac
* addresses, we just need to give it the buffer length.
*/
static int p8023_request(struct datalink_proto *dl,
struct sk_buff *skb, unsigned char *dest_node)
{
struct net_device *dev = skb->dev;
dev_hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len);
return dev_queue_xmit(skb);
}
/*
* Create an 802.3 client. Note there can be only one 802.3 client
*/
struct datalink_proto *make_8023_client(void)
{
struct datalink_proto *proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
if (proto) {
proto->header_length = 0;
proto->request = p8023_request;
}
return proto;
}
/*
* Destroy the 802.3 client.
*/
void destroy_8023_client(struct datalink_proto *dl)
{
kfree(dl);
}
EXPORT_SYMBOL(destroy_8023_client);
EXPORT_SYMBOL(make_8023_client);
MODULE_LICENSE("GPL");

167
net/802/psnap.c Normal file
View file

@ -0,0 +1,167 @@
/*
* SNAP data link layer. Derived from 802.2
*
* Alan Cox <alan@lxorguk.ukuu.org.uk>,
* from the 802.2 layer by Greg Page.
* Merged in additions from Greg Page's psnap.c.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/datalink.h>
#include <net/llc.h>
#include <net/psnap.h>
#include <linux/mm.h>
#include <linux/in.h>
#include <linux/init.h>
#include <linux/rculist.h>
static LIST_HEAD(snap_list);
static DEFINE_SPINLOCK(snap_lock);
static struct llc_sap *snap_sap;
/*
* Find a snap client by matching the 5 bytes.
*/
static struct datalink_proto *find_snap_client(const unsigned char *desc)
{
struct datalink_proto *proto = NULL, *p;
list_for_each_entry_rcu(p, &snap_list, node) {
if (!memcmp(p->type, desc, 5)) {
proto = p;
break;
}
}
return proto;
}
/*
* A SNAP packet has arrived
*/
static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
int rc = 1;
struct datalink_proto *proto;
static struct packet_type snap_packet_type = {
.type = cpu_to_be16(ETH_P_SNAP),
};
if (unlikely(!pskb_may_pull(skb, 5)))
goto drop;
rcu_read_lock();
proto = find_snap_client(skb_transport_header(skb));
if (proto) {
/* Pass the frame on. */
skb->transport_header += 5;
skb_pull_rcsum(skb, 5);
rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
}
rcu_read_unlock();
if (unlikely(!proto))
goto drop;
out:
return rc;
drop:
kfree_skb(skb);
goto out;
}
/*
* Put a SNAP header on a frame and pass to 802.2
*/
static int snap_request(struct datalink_proto *dl,
struct sk_buff *skb, u8 *dest)
{
memcpy(skb_push(skb, 5), dl->type, 5);
llc_build_and_send_ui_pkt(snap_sap, skb, dest, snap_sap->laddr.lsap);
return 0;
}
/*
* Set up the SNAP layer
*/
EXPORT_SYMBOL(register_snap_client);
EXPORT_SYMBOL(unregister_snap_client);
static const char snap_err_msg[] __initconst =
KERN_CRIT "SNAP - unable to register with 802.2\n";
static int __init snap_init(void)
{
snap_sap = llc_sap_open(0xAA, snap_rcv);
if (!snap_sap) {
printk(snap_err_msg);
return -EBUSY;
}
return 0;
}
module_init(snap_init);
static void __exit snap_exit(void)
{
llc_sap_put(snap_sap);
}
module_exit(snap_exit);
/*
* Register SNAP clients. We don't yet use this for IP.
*/
struct datalink_proto *register_snap_client(const unsigned char *desc,
int (*rcvfunc)(struct sk_buff *,
struct net_device *,
struct packet_type *,
struct net_device *))
{
struct datalink_proto *proto = NULL;
spin_lock_bh(&snap_lock);
if (find_snap_client(desc))
goto out;
proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
if (proto) {
memcpy(proto->type, desc, 5);
proto->rcvfunc = rcvfunc;
proto->header_length = 5 + 3; /* snap + 802.2 */
proto->request = snap_request;
list_add_rcu(&proto->node, &snap_list);
}
out:
spin_unlock_bh(&snap_lock);
return proto;
}
/*
* Unregister SNAP clients. Protocols no longer want to play with us ...
*/
void unregister_snap_client(struct datalink_proto *proto)
{
spin_lock_bh(&snap_lock);
list_del_rcu(&proto->node);
spin_unlock_bh(&snap_lock);
synchronize_net();
kfree(proto);
}
MODULE_LICENSE("GPL");

104
net/802/stp.c Normal file
View file

@ -0,0 +1,104 @@
/*
* STP SAP demux
*
* Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/mutex.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/llc.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/stp.h>
/* 01:80:c2:00:00:20 - 01:80:c2:00:00:2F */
#define GARP_ADDR_MIN 0x20
#define GARP_ADDR_MAX 0x2F
#define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN)
static const struct stp_proto __rcu *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
static const struct stp_proto __rcu *stp_proto __read_mostly;
static struct llc_sap *sap __read_mostly;
static unsigned int sap_registered;
static DEFINE_MUTEX(stp_proto_mutex);
/* Called under rcu_read_lock from LLC */
static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
const struct ethhdr *eh = eth_hdr(skb);
const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
const struct stp_proto *proto;
if (pdu->ssap != LLC_SAP_BSPAN ||
pdu->dsap != LLC_SAP_BSPAN ||
pdu->ctrl_1 != LLC_PDU_TYPE_U)
goto err;
if (eh->h_dest[5] >= GARP_ADDR_MIN && eh->h_dest[5] <= GARP_ADDR_MAX) {
proto = rcu_dereference(garp_protos[eh->h_dest[5] -
GARP_ADDR_MIN]);
if (proto &&
!ether_addr_equal(eh->h_dest, proto->group_address))
goto err;
} else
proto = rcu_dereference(stp_proto);
if (!proto)
goto err;
proto->rcv(proto, skb, dev);
return 0;
err:
kfree_skb(skb);
return 0;
}
int stp_proto_register(const struct stp_proto *proto)
{
int err = 0;
mutex_lock(&stp_proto_mutex);
if (sap_registered++ == 0) {
sap = llc_sap_open(LLC_SAP_BSPAN, stp_pdu_rcv);
if (!sap) {
err = -ENOMEM;
goto out;
}
}
if (is_zero_ether_addr(proto->group_address))
rcu_assign_pointer(stp_proto, proto);
else
rcu_assign_pointer(garp_protos[proto->group_address[5] -
GARP_ADDR_MIN], proto);
out:
mutex_unlock(&stp_proto_mutex);
return err;
}
EXPORT_SYMBOL_GPL(stp_proto_register);
void stp_proto_unregister(const struct stp_proto *proto)
{
mutex_lock(&stp_proto_mutex);
if (is_zero_ether_addr(proto->group_address))
RCU_INIT_POINTER(stp_proto, NULL);
else
RCU_INIT_POINTER(garp_protos[proto->group_address[5] -
GARP_ADDR_MIN], NULL);
synchronize_rcu();
if (--sap_registered == 0)
llc_sap_put(sap);
mutex_unlock(&stp_proto_mutex);
}
EXPORT_SYMBOL_GPL(stp_proto_unregister);
MODULE_LICENSE("GPL");

40
net/8021q/Kconfig Normal file
View file

@ -0,0 +1,40 @@
#
# Configuration for 802.1Q VLAN support
#
config VLAN_8021Q
tristate "802.1Q/802.1ad VLAN Support"
---help---
Select this and you will be able to create 802.1Q VLAN interfaces
on your Ethernet interfaces. 802.1Q VLAN supports almost
everything a regular Ethernet interface does, including
firewalling, bridging, and of course IP traffic. You will need
the 'ip' utility in order to effectively use VLANs.
See the VLAN web page for more information:
<http://www.candelatech.com/~greear/vlan.html>
To compile this code as a module, choose M here: the module
will be called 8021q.
If unsure, say N.
config VLAN_8021Q_GVRP
bool "GVRP (GARP VLAN Registration Protocol) support"
depends on VLAN_8021Q
select GARP
help
Select this to enable GVRP end-system support. GVRP is used for
automatic propagation of registered VLANs to switches.
If unsure, say N.
config VLAN_8021Q_MVRP
bool "MVRP (Multiple VLAN Registration Protocol) support"
depends on VLAN_8021Q
select MRP
help
Select this to enable MVRP end-system support. MVRP is used for
automatic propagation of registered VLANs to switches; it
supersedes GVRP and is not backwards-compatible.
If unsure, say N.

11
net/8021q/Makefile Normal file
View file

@ -0,0 +1,11 @@
#
# Makefile for the Linux VLAN layer.
#
obj-$(subst m,y,$(CONFIG_VLAN_8021Q)) += vlan_core.o
obj-$(CONFIG_VLAN_8021Q) += 8021q.o
8021q-y := vlan.o vlan_dev.o vlan_netlink.o
8021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o
8021q-$(CONFIG_VLAN_8021Q_MVRP) += vlan_mvrp.o
8021q-$(CONFIG_PROC_FS) += vlanproc.o

694
net/8021q/vlan.c Normal file
View file

@ -0,0 +1,694 @@
/*
* INET 802.1Q VLAN
* Ethernet-type device handling.
*
* Authors: Ben Greear <greearb@candelatech.com>
* Please send support related email to: netdev@vger.kernel.org
* VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
*
* Fixes:
* Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
* Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
* Correct all the locking - David S. Miller <davem@redhat.com>;
* Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rculist.h>
#include <net/p8022.h>
#include <net/arp.h>
#include <linux/rtnetlink.h>
#include <linux/notifier.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <asm/uaccess.h>
#include <linux/if_vlan.h>
#include "vlan.h"
#include "vlanproc.h"
#define DRV_VERSION "1.8"
/* Global VLAN variables */
int vlan_net_id __read_mostly;
const char vlan_fullname[] = "802.1Q VLAN Support";
const char vlan_version[] = DRV_VERSION;
/* End of global variables definitions. */
static int vlan_group_prealloc_vid(struct vlan_group *vg,
__be16 vlan_proto, u16 vlan_id)
{
struct net_device **array;
unsigned int pidx, vidx;
unsigned int size;
ASSERT_RTNL();
pidx = vlan_proto_idx(vlan_proto);
vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
array = vg->vlan_devices_arrays[pidx][vidx];
if (array != NULL)
return 0;
size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
array = kzalloc(size, GFP_KERNEL);
if (array == NULL)
return -ENOBUFS;
vg->vlan_devices_arrays[pidx][vidx] = array;
return 0;
}
void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
struct vlan_info *vlan_info;
struct vlan_group *grp;
u16 vlan_id = vlan->vlan_id;
ASSERT_RTNL();
vlan_info = rtnl_dereference(real_dev->vlan_info);
BUG_ON(!vlan_info);
grp = &vlan_info->grp;
grp->nr_vlan_devs--;
if (vlan->flags & VLAN_FLAG_MVRP)
vlan_mvrp_request_leave(dev);
if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_leave(dev);
vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
netdev_upper_dev_unlink(real_dev, dev);
/* Because unregister_netdevice_queue() makes sure at least one rcu
* grace period is respected before device freeing,
* we dont need to call synchronize_net() here.
*/
unregister_netdevice_queue(dev, head);
if (grp->nr_vlan_devs == 0) {
vlan_mvrp_uninit_applicant(real_dev);
vlan_gvrp_uninit_applicant(real_dev);
}
/* Take it out of our own structures, but be sure to interlock with
* HW accelerating devices or SW vlan input packet processing if
* VLAN is not 0 (leave it there for 802.1p).
*/
if (vlan_id)
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
/* Get rid of the vlan's reference to real_dev */
dev_put(real_dev);
}
int vlan_check_real_dev(struct net_device *real_dev,
__be16 protocol, u16 vlan_id)
{
const char *name = real_dev->name;
if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
pr_info("VLANs not supported on %s\n", name);
return -EOPNOTSUPP;
}
if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL)
return -EEXIST;
return 0;
}
int register_vlan_dev(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
u16 vlan_id = vlan->vlan_id;
struct vlan_info *vlan_info;
struct vlan_group *grp;
int err;
err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
if (err)
return err;
vlan_info = rtnl_dereference(real_dev->vlan_info);
/* vlan_info should be there now. vlan_vid_add took care of it */
BUG_ON(!vlan_info);
grp = &vlan_info->grp;
if (grp->nr_vlan_devs == 0) {
err = vlan_gvrp_init_applicant(real_dev);
if (err < 0)
goto out_vid_del;
err = vlan_mvrp_init_applicant(real_dev);
if (err < 0)
goto out_uninit_gvrp;
}
err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
if (err < 0)
goto out_uninit_mvrp;
vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
err = register_netdevice(dev);
if (err < 0)
goto out_uninit_mvrp;
err = netdev_upper_dev_link(real_dev, dev);
if (err)
goto out_unregister_netdev;
/* Account for reference in struct vlan_dev_priv */
dev_hold(real_dev);
netif_stacked_transfer_operstate(real_dev, dev);
linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
/* So, got the sucker initialized, now lets place
* it into our local structure.
*/
vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
grp->nr_vlan_devs++;
return 0;
out_unregister_netdev:
unregister_netdevice(dev);
out_uninit_mvrp:
if (grp->nr_vlan_devs == 0)
vlan_mvrp_uninit_applicant(real_dev);
out_uninit_gvrp:
if (grp->nr_vlan_devs == 0)
vlan_gvrp_uninit_applicant(real_dev);
out_vid_del:
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
return err;
}
/* Attach a VLAN device to a mac address (ie Ethernet Card).
* Returns 0 if the device was created or a negative error code otherwise.
*/
static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
{
struct net_device *new_dev;
struct vlan_dev_priv *vlan;
struct net *net = dev_net(real_dev);
struct vlan_net *vn = net_generic(net, vlan_net_id);
char name[IFNAMSIZ];
int err;
if (vlan_id >= VLAN_VID_MASK)
return -ERANGE;
err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id);
if (err < 0)
return err;
/* Gotta set up the fields for the device. */
switch (vn->name_type) {
case VLAN_NAME_TYPE_RAW_PLUS_VID:
/* name will look like: eth1.0005 */
snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
break;
case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
/* Put our vlan.VID in the name.
* Name will look like: vlan5
*/
snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
break;
case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
/* Put our vlan.VID in the name.
* Name will look like: eth0.5
*/
snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
break;
case VLAN_NAME_TYPE_PLUS_VID:
/* Put our vlan.VID in the name.
* Name will look like: vlan0005
*/
default:
snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
}
new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
NET_NAME_UNKNOWN, vlan_setup);
if (new_dev == NULL)
return -ENOBUFS;
dev_net_set(new_dev, net);
/* need 4 bytes for extra VLAN header info,
* hope the underlying device can handle it.
*/
new_dev->mtu = real_dev->mtu;
new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT);
vlan = vlan_dev_priv(new_dev);
vlan->vlan_proto = htons(ETH_P_8021Q);
vlan->vlan_id = vlan_id;
vlan->real_dev = real_dev;
vlan->dent = NULL;
vlan->flags = VLAN_FLAG_REORDER_HDR;
new_dev->rtnl_link_ops = &vlan_link_ops;
err = register_vlan_dev(new_dev);
if (err < 0)
goto out_free_newdev;
return 0;
out_free_newdev:
free_netdev(new_dev);
return err;
}
static void vlan_sync_address(struct net_device *dev,
struct net_device *vlandev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
/* May be called without an actual change */
if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
return;
/* vlan address was different from the old address and is equal to
* the new address */
if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
dev_uc_del(dev, vlandev->dev_addr);
/* vlan address was equal to the old address and is different from
* the new address */
if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
!ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
dev_uc_add(dev, vlandev->dev_addr);
ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
}
static void vlan_transfer_features(struct net_device *dev,
struct net_device *vlandev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
vlandev->gso_max_size = dev->gso_max_size;
if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
vlandev->hard_header_len = dev->hard_header_len;
else
vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
#if IS_ENABLED(CONFIG_FCOE)
vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
#endif
netdev_update_features(vlandev);
}
static int __vlan_device_event(struct net_device *dev, unsigned long event)
{
int err = 0;
switch (event) {
case NETDEV_CHANGENAME:
vlan_proc_rem_dev(dev);
err = vlan_proc_add_dev(dev);
break;
case NETDEV_REGISTER:
err = vlan_proc_add_dev(dev);
break;
case NETDEV_UNREGISTER:
vlan_proc_rem_dev(dev);
break;
}
return err;
}
static int vlan_device_event(struct notifier_block *unused, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct vlan_group *grp;
struct vlan_info *vlan_info;
int i, flgs;
struct net_device *vlandev;
struct vlan_dev_priv *vlan;
bool last = false;
LIST_HEAD(list);
if (is_vlan_dev(dev)) {
int err = __vlan_device_event(dev, event);
if (err)
return notifier_from_errno(err);
}
if ((event == NETDEV_UP) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
pr_info("adding VLAN 0 to HW filter on device %s\n",
dev->name);
vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
}
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info)
goto out;
grp = &vlan_info->grp;
/* It is OK that we do not hold the group lock right now,
* as we run under the RTNL lock.
*/
switch (event) {
case NETDEV_CHANGE:
/* Propagate real device state to vlan devices */
vlan_group_for_each_dev(grp, i, vlandev)
netif_stacked_transfer_operstate(dev, vlandev);
break;
case NETDEV_CHANGEADDR:
/* Adjust unicast filters on underlying device */
vlan_group_for_each_dev(grp, i, vlandev) {
flgs = vlandev->flags;
if (!(flgs & IFF_UP))
continue;
vlan_sync_address(dev, vlandev);
}
break;
case NETDEV_CHANGEMTU:
vlan_group_for_each_dev(grp, i, vlandev) {
if (vlandev->mtu <= dev->mtu)
continue;
dev_set_mtu(vlandev, dev->mtu);
}
break;
case NETDEV_FEAT_CHANGE:
/* Propagate device features to underlying device */
vlan_group_for_each_dev(grp, i, vlandev)
vlan_transfer_features(dev, vlandev);
break;
case NETDEV_DOWN:
if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
/* Put all VLANs for this dev in the down state too. */
vlan_group_for_each_dev(grp, i, vlandev) {
flgs = vlandev->flags;
if (!(flgs & IFF_UP))
continue;
vlan = vlan_dev_priv(vlandev);
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
dev_change_flags(vlandev, flgs & ~IFF_UP);
netif_stacked_transfer_operstate(dev, vlandev);
}
break;
case NETDEV_UP:
/* Put all VLANs for this dev in the up state too. */
vlan_group_for_each_dev(grp, i, vlandev) {
flgs = vlandev->flags;
if (flgs & IFF_UP)
continue;
vlan = vlan_dev_priv(vlandev);
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
dev_change_flags(vlandev, flgs | IFF_UP);
netif_stacked_transfer_operstate(dev, vlandev);
}
break;
case NETDEV_UNREGISTER:
/* twiddle thumbs on netns device moves */
if (dev->reg_state != NETREG_UNREGISTERING)
break;
vlan_group_for_each_dev(grp, i, vlandev) {
/* removal of last vid destroys vlan_info, abort
* afterwards */
if (vlan_info->nr_vids == 1)
last = true;
unregister_vlan_dev(vlandev, &list);
if (last)
break;
}
unregister_netdevice_many(&list);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
if (vlan_uses_dev(dev))
return NOTIFY_BAD;
break;
case NETDEV_NOTIFY_PEERS:
case NETDEV_BONDING_FAILOVER:
case NETDEV_RESEND_IGMP:
/* Propagate to vlan devices */
vlan_group_for_each_dev(grp, i, vlandev)
call_netdevice_notifiers(event, vlandev);
break;
}
out:
return NOTIFY_DONE;
}
static struct notifier_block vlan_notifier_block __read_mostly = {
.notifier_call = vlan_device_event,
};
/*
* VLAN IOCTL handler.
* o execute requested action or pass command to the device driver
* arg is really a struct vlan_ioctl_args __user *.
*/
static int vlan_ioctl_handler(struct net *net, void __user *arg)
{
int err;
struct vlan_ioctl_args args;
struct net_device *dev = NULL;
if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
return -EFAULT;
/* Null terminate this sucker, just in case. */
args.device1[23] = 0;
args.u.device2[23] = 0;
rtnl_lock();
switch (args.cmd) {
case SET_VLAN_INGRESS_PRIORITY_CMD:
case SET_VLAN_EGRESS_PRIORITY_CMD:
case SET_VLAN_FLAG_CMD:
case ADD_VLAN_CMD:
case DEL_VLAN_CMD:
case GET_VLAN_REALDEV_NAME_CMD:
case GET_VLAN_VID_CMD:
err = -ENODEV;
dev = __dev_get_by_name(net, args.device1);
if (!dev)
goto out;
err = -EINVAL;
if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
goto out;
}
switch (args.cmd) {
case SET_VLAN_INGRESS_PRIORITY_CMD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
vlan_dev_set_ingress_priority(dev,
args.u.skb_priority,
args.vlan_qos);
err = 0;
break;
case SET_VLAN_EGRESS_PRIORITY_CMD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
err = vlan_dev_set_egress_priority(dev,
args.u.skb_priority,
args.vlan_qos);
break;
case SET_VLAN_FLAG_CMD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
err = vlan_dev_change_flags(dev,
args.vlan_qos ? args.u.flag : 0,
args.u.flag);
break;
case SET_VLAN_NAME_TYPE_CMD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
if ((args.u.name_type >= 0) &&
(args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
struct vlan_net *vn;
vn = net_generic(net, vlan_net_id);
vn->name_type = args.u.name_type;
err = 0;
} else {
err = -EINVAL;
}
break;
case ADD_VLAN_CMD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
err = register_vlan_device(dev, args.u.VID);
break;
case DEL_VLAN_CMD:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
unregister_vlan_dev(dev, NULL);
err = 0;
break;
case GET_VLAN_REALDEV_NAME_CMD:
err = 0;
vlan_dev_get_realdev_name(dev, args.u.device2);
if (copy_to_user(arg, &args,
sizeof(struct vlan_ioctl_args)))
err = -EFAULT;
break;
case GET_VLAN_VID_CMD:
err = 0;
args.u.VID = vlan_dev_vlan_id(dev);
if (copy_to_user(arg, &args,
sizeof(struct vlan_ioctl_args)))
err = -EFAULT;
break;
default:
err = -EOPNOTSUPP;
break;
}
out:
rtnl_unlock();
return err;
}
static int __net_init vlan_init_net(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
int err;
vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
err = vlan_proc_init(net);
return err;
}
static void __net_exit vlan_exit_net(struct net *net)
{
vlan_proc_cleanup(net);
}
static struct pernet_operations vlan_net_ops = {
.init = vlan_init_net,
.exit = vlan_exit_net,
.id = &vlan_net_id,
.size = sizeof(struct vlan_net),
};
static int __init vlan_proto_init(void)
{
int err;
pr_info("%s v%s\n", vlan_fullname, vlan_version);
err = register_pernet_subsys(&vlan_net_ops);
if (err < 0)
goto err0;
err = register_netdevice_notifier(&vlan_notifier_block);
if (err < 0)
goto err2;
err = vlan_gvrp_init();
if (err < 0)
goto err3;
err = vlan_mvrp_init();
if (err < 0)
goto err4;
err = vlan_netlink_init();
if (err < 0)
goto err5;
vlan_ioctl_set(vlan_ioctl_handler);
return 0;
err5:
vlan_mvrp_uninit();
err4:
vlan_gvrp_uninit();
err3:
unregister_netdevice_notifier(&vlan_notifier_block);
err2:
unregister_pernet_subsys(&vlan_net_ops);
err0:
return err;
}
static void __exit vlan_cleanup_module(void)
{
vlan_ioctl_set(NULL);
vlan_netlink_fini();
unregister_netdevice_notifier(&vlan_notifier_block);
unregister_pernet_subsys(&vlan_net_ops);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
vlan_mvrp_uninit();
vlan_gvrp_uninit();
}
module_init(vlan_proto_init);
module_exit(vlan_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);

173
net/8021q/vlan.h Normal file
View file

@ -0,0 +1,173 @@
#ifndef __BEN_VLAN_802_1Q_INC__
#define __BEN_VLAN_802_1Q_INC__
#include <linux/if_vlan.h>
#include <linux/u64_stats_sync.h>
#include <linux/list.h>
/* if this changes, algorithm will have to be reworked because this
* depends on completely exhausting the VLAN identifier space. Thus
* it gives constant time look-up, but in many cases it wastes memory.
*/
#define VLAN_GROUP_ARRAY_SPLIT_PARTS 8
#define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
enum vlan_protos {
VLAN_PROTO_8021Q = 0,
VLAN_PROTO_8021AD,
VLAN_PROTO_NUM,
};
struct vlan_group {
unsigned int nr_vlan_devs;
struct hlist_node hlist; /* linked list */
struct net_device **vlan_devices_arrays[VLAN_PROTO_NUM]
[VLAN_GROUP_ARRAY_SPLIT_PARTS];
};
struct vlan_info {
struct net_device *real_dev; /* The ethernet(like) device
* the vlan is attached to.
*/
struct vlan_group grp;
struct list_head vid_list;
unsigned int nr_vids;
struct rcu_head rcu;
};
static inline unsigned int vlan_proto_idx(__be16 proto)
{
switch (proto) {
case htons(ETH_P_8021Q):
return VLAN_PROTO_8021Q;
case htons(ETH_P_8021AD):
return VLAN_PROTO_8021AD;
default:
BUG();
return 0;
}
}
static inline struct net_device *__vlan_group_get_device(struct vlan_group *vg,
unsigned int pidx,
u16 vlan_id)
{
struct net_device **array;
array = vg->vlan_devices_arrays[pidx]
[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL;
}
static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
__be16 vlan_proto,
u16 vlan_id)
{
return __vlan_group_get_device(vg, vlan_proto_idx(vlan_proto), vlan_id);
}
static inline void vlan_group_set_device(struct vlan_group *vg,
__be16 vlan_proto, u16 vlan_id,
struct net_device *dev)
{
struct net_device **array;
if (!vg)
return;
array = vg->vlan_devices_arrays[vlan_proto_idx(vlan_proto)]
[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
}
/* Must be invoked with rcu_read_lock or with RTNL. */
static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
{
struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
if (vlan_info)
return vlan_group_get_device(&vlan_info->grp,
vlan_proto, vlan_id);
return NULL;
}
#define vlan_group_for_each_dev(grp, i, dev) \
for ((i) = 0; i < VLAN_PROTO_NUM * VLAN_N_VID; i++) \
if (((dev) = __vlan_group_get_device((grp), (i) / VLAN_N_VID, \
(i) % VLAN_N_VID)))
/* found in vlan_dev.c */
void vlan_dev_set_ingress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio);
int vlan_dev_set_egress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio);
int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result);
int vlan_check_real_dev(struct net_device *real_dev,
__be16 protocol, u16 vlan_id);
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
static inline u32 vlan_get_ingress_priority(struct net_device *dev,
u16 vlan_tci)
{
struct vlan_dev_priv *vip = vlan_dev_priv(dev);
return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7];
}
#ifdef CONFIG_VLAN_8021Q_GVRP
int vlan_gvrp_request_join(const struct net_device *dev);
void vlan_gvrp_request_leave(const struct net_device *dev);
int vlan_gvrp_init_applicant(struct net_device *dev);
void vlan_gvrp_uninit_applicant(struct net_device *dev);
int vlan_gvrp_init(void);
void vlan_gvrp_uninit(void);
#else
static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; }
static inline void vlan_gvrp_request_leave(const struct net_device *dev) {}
static inline int vlan_gvrp_init_applicant(struct net_device *dev) { return 0; }
static inline void vlan_gvrp_uninit_applicant(struct net_device *dev) {}
static inline int vlan_gvrp_init(void) { return 0; }
static inline void vlan_gvrp_uninit(void) {}
#endif
#ifdef CONFIG_VLAN_8021Q_MVRP
int vlan_mvrp_request_join(const struct net_device *dev);
void vlan_mvrp_request_leave(const struct net_device *dev);
int vlan_mvrp_init_applicant(struct net_device *dev);
void vlan_mvrp_uninit_applicant(struct net_device *dev);
int vlan_mvrp_init(void);
void vlan_mvrp_uninit(void);
#else
static inline int vlan_mvrp_request_join(const struct net_device *dev) { return 0; }
static inline void vlan_mvrp_request_leave(const struct net_device *dev) {}
static inline int vlan_mvrp_init_applicant(struct net_device *dev) { return 0; }
static inline void vlan_mvrp_uninit_applicant(struct net_device *dev) {}
static inline int vlan_mvrp_init(void) { return 0; }
static inline void vlan_mvrp_uninit(void) {}
#endif
extern const char vlan_fullname[];
extern const char vlan_version[];
int vlan_netlink_init(void);
void vlan_netlink_fini(void);
extern struct rtnl_link_ops vlan_link_ops;
extern int vlan_net_id;
struct proc_dir_entry;
struct vlan_net {
/* /proc/net/vlan */
struct proc_dir_entry *proc_vlan_dir;
/* /proc/net/vlan/config */
struct proc_dir_entry *proc_vlan_conf;
/* Determines interface naming scheme. */
unsigned short name_type;
};
#endif /* !(__BEN_VLAN_802_1Q_INC__) */

362
net/8021q/vlan_core.c Normal file
View file

@ -0,0 +1,362 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/netpoll.h>
#include <linux/export.h>
#include "vlan.h"
bool vlan_do_receive(struct sk_buff **skbp)
{
struct sk_buff *skb = *skbp;
__be16 vlan_proto = skb->vlan_proto;
u16 vlan_id = vlan_tx_tag_get_id(skb);
struct net_device *vlan_dev;
struct vlan_pcpu_stats *rx_stats;
vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
if (!vlan_dev)
return false;
skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return false;
skb->dev = vlan_dev;
if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the
* underlying device, and still route correctly. */
if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
skb->pkt_type = PACKET_HOST;
}
if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
unsigned int offset = skb->data - skb_mac_header(skb);
/*
* vlan_insert_tag expect skb->data pointing to mac header.
* So change skb->data before calling it and change back to
* original position later
*/
skb_push(skb, offset);
skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
skb->vlan_tci);
if (!skb)
return false;
skb_pull(skb, offset + VLAN_HLEN);
skb_reset_mac_len(skb);
}
skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
skb->vlan_tci = 0;
rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += skb->len;
if (skb->pkt_type == PACKET_MULTICAST)
rx_stats->rx_multicast++;
u64_stats_update_end(&rx_stats->syncp);
return true;
}
/* Must be invoked with rcu_read_lock. */
struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
__be16 vlan_proto, u16 vlan_id)
{
struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
if (vlan_info) {
return vlan_group_get_device(&vlan_info->grp,
vlan_proto, vlan_id);
} else {
/*
* Lower devices of master uppers (bonding, team) do not have
* grp assigned to themselves. Grp is assigned to upper device
* instead.
*/
struct net_device *upper_dev;
upper_dev = netdev_master_upper_dev_get_rcu(dev);
if (upper_dev)
return __vlan_find_dev_deep_rcu(upper_dev,
vlan_proto, vlan_id);
}
return NULL;
}
EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
struct net_device *ret = vlan_dev_priv(dev)->real_dev;
while (is_vlan_dev(ret))
ret = vlan_dev_priv(ret)->real_dev;
return ret;
}
EXPORT_SYMBOL(vlan_dev_real_dev);
u16 vlan_dev_vlan_id(const struct net_device *dev)
{
return vlan_dev_priv(dev)->vlan_id;
}
EXPORT_SYMBOL(vlan_dev_vlan_id);
__be16 vlan_dev_vlan_proto(const struct net_device *dev)
{
return vlan_dev_priv(dev)->vlan_proto;
}
EXPORT_SYMBOL(vlan_dev_vlan_proto);
/*
* vlan info and vid list
*/
static void vlan_group_free(struct vlan_group *grp)
{
int i, j;
for (i = 0; i < VLAN_PROTO_NUM; i++)
for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
kfree(grp->vlan_devices_arrays[i][j]);
}
static void vlan_info_free(struct vlan_info *vlan_info)
{
vlan_group_free(&vlan_info->grp);
kfree(vlan_info);
}
static void vlan_info_rcu_free(struct rcu_head *rcu)
{
vlan_info_free(container_of(rcu, struct vlan_info, rcu));
}
static struct vlan_info *vlan_info_alloc(struct net_device *dev)
{
struct vlan_info *vlan_info;
vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
if (!vlan_info)
return NULL;
vlan_info->real_dev = dev;
INIT_LIST_HEAD(&vlan_info->vid_list);
return vlan_info;
}
struct vlan_vid_info {
struct list_head list;
__be16 proto;
u16 vid;
int refcount;
};
static bool vlan_hw_filter_capable(const struct net_device *dev,
const struct vlan_vid_info *vid_info)
{
if (vid_info->proto == htons(ETH_P_8021Q) &&
dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
return true;
if (vid_info->proto == htons(ETH_P_8021AD) &&
dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
return true;
return false;
}
static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
__be16 proto, u16 vid)
{
struct vlan_vid_info *vid_info;
list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
if (vid_info->proto == proto && vid_info->vid == vid)
return vid_info;
}
return NULL;
}
static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
{
struct vlan_vid_info *vid_info;
vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
if (!vid_info)
return NULL;
vid_info->proto = proto;
vid_info->vid = vid;
return vid_info;
}
static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
struct vlan_vid_info **pvid_info)
{
struct net_device *dev = vlan_info->real_dev;
const struct net_device_ops *ops = dev->netdev_ops;
struct vlan_vid_info *vid_info;
int err;
vid_info = vlan_vid_info_alloc(proto, vid);
if (!vid_info)
return -ENOMEM;
if (vlan_hw_filter_capable(dev, vid_info)) {
err = ops->ndo_vlan_rx_add_vid(dev, proto, vid);
if (err) {
kfree(vid_info);
return err;
}
}
list_add(&vid_info->list, &vlan_info->vid_list);
vlan_info->nr_vids++;
*pvid_info = vid_info;
return 0;
}
int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
{
struct vlan_info *vlan_info;
struct vlan_vid_info *vid_info;
bool vlan_info_created = false;
int err;
ASSERT_RTNL();
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info) {
vlan_info = vlan_info_alloc(dev);
if (!vlan_info)
return -ENOMEM;
vlan_info_created = true;
}
vid_info = vlan_vid_info_get(vlan_info, proto, vid);
if (!vid_info) {
err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
if (err)
goto out_free_vlan_info;
}
vid_info->refcount++;
if (vlan_info_created)
rcu_assign_pointer(dev->vlan_info, vlan_info);
return 0;
out_free_vlan_info:
if (vlan_info_created)
kfree(vlan_info);
return err;
}
EXPORT_SYMBOL(vlan_vid_add);
static void __vlan_vid_del(struct vlan_info *vlan_info,
struct vlan_vid_info *vid_info)
{
struct net_device *dev = vlan_info->real_dev;
const struct net_device_ops *ops = dev->netdev_ops;
__be16 proto = vid_info->proto;
u16 vid = vid_info->vid;
int err;
if (vlan_hw_filter_capable(dev, vid_info)) {
err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
if (err) {
pr_warn("failed to kill vid %04x/%d for device %s\n",
proto, vid, dev->name);
}
}
list_del(&vid_info->list);
kfree(vid_info);
vlan_info->nr_vids--;
}
void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
{
struct vlan_info *vlan_info;
struct vlan_vid_info *vid_info;
ASSERT_RTNL();
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info)
return;
vid_info = vlan_vid_info_get(vlan_info, proto, vid);
if (!vid_info)
return;
vid_info->refcount--;
if (vid_info->refcount == 0) {
__vlan_vid_del(vlan_info, vid_info);
if (vlan_info->nr_vids == 0) {
RCU_INIT_POINTER(dev->vlan_info, NULL);
call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
}
}
}
EXPORT_SYMBOL(vlan_vid_del);
int vlan_vids_add_by_dev(struct net_device *dev,
const struct net_device *by_dev)
{
struct vlan_vid_info *vid_info;
struct vlan_info *vlan_info;
int err;
ASSERT_RTNL();
vlan_info = rtnl_dereference(by_dev->vlan_info);
if (!vlan_info)
return 0;
list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
if (err)
goto unwind;
}
return 0;
unwind:
list_for_each_entry_continue_reverse(vid_info,
&vlan_info->vid_list,
list) {
vlan_vid_del(dev, vid_info->proto, vid_info->vid);
}
return err;
}
EXPORT_SYMBOL(vlan_vids_add_by_dev);
void vlan_vids_del_by_dev(struct net_device *dev,
const struct net_device *by_dev)
{
struct vlan_vid_info *vid_info;
struct vlan_info *vlan_info;
ASSERT_RTNL();
vlan_info = rtnl_dereference(by_dev->vlan_info);
if (!vlan_info)
return;
list_for_each_entry(vid_info, &vlan_info->vid_list, list)
vlan_vid_del(dev, vid_info->proto, vid_info->vid);
}
EXPORT_SYMBOL(vlan_vids_del_by_dev);
bool vlan_uses_dev(const struct net_device *dev)
{
struct vlan_info *vlan_info;
ASSERT_RTNL();
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info)
return false;
return vlan_info->grp.nr_vlan_devs ? true : false;
}
EXPORT_SYMBOL(vlan_uses_dev);

811
net/8021q/vlan_dev.c Normal file
View file

@ -0,0 +1,811 @@
/* -*- linux-c -*-
* INET 802.1Q VLAN
* Ethernet-type device handling.
*
* Authors: Ben Greear <greearb@candelatech.com>
* Please send support related email to: netdev@vger.kernel.org
* VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
*
* Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
* - reset skb->pkt_type on incoming packets when MAC was changed
* - see that changed MAC is saddr for outgoing packets
* Oct 20, 2001: Ard van Breeman:
* - Fix MC-list, finally.
* - Flush MC-list on VLAN destroy.
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <net/arp.h>
#include "vlan.h"
#include "vlanproc.h"
#include <linux/if_vlan.h>
#include <linux/netpoll.h>
/*
* Rebuild the Ethernet MAC header. This is called after an ARP
* (or in future other address resolution) has completed on this
* sk_buff. We now let ARP fill in the other fields.
*
* This routine CANNOT use cached dst->neigh!
* Really, it is used only when dst->neigh is wrong.
*
* TODO: This needs a checkup, I'm ignorant here. --BLG
*/
static int vlan_dev_rebuild_header(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
switch (veth->h_vlan_encapsulated_proto) {
#ifdef CONFIG_INET
case htons(ETH_P_IP):
/* TODO: Confirm this will work with VLAN headers... */
return arp_find(veth->h_dest, skb);
#endif
default:
pr_debug("%s: unable to resolve type %X addresses\n",
dev->name, ntohs(veth->h_vlan_encapsulated_proto));
ether_addr_copy(veth->h_source, dev->dev_addr);
break;
}
return 0;
}
/*
* Create the VLAN header for an arbitrary protocol layer
*
* saddr=NULL means use device source address
* daddr=NULL means leave destination address (eg unresolved arp)
*
* This is called when the SKB is moving down the stack towards the
* physical devices.
*/
static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
unsigned int len)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct vlan_hdr *vhdr;
unsigned int vhdrlen = 0;
u16 vlan_tci = 0;
int rc;
if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {
vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
vhdr->h_vlan_TCI = htons(vlan_tci);
/*
* Set the protocol type. For a packet of type ETH_P_802_3/2 we
* put the length in here instead.
*/
if (type != ETH_P_802_3 && type != ETH_P_802_2)
vhdr->h_vlan_encapsulated_proto = htons(type);
else
vhdr->h_vlan_encapsulated_proto = htons(len);
skb->protocol = vlan->vlan_proto;
type = ntohs(vlan->vlan_proto);
vhdrlen = VLAN_HLEN;
}
/* Before delegating work to the lower layer, enter our MAC-address */
if (saddr == NULL)
saddr = dev->dev_addr;
/* Now make the underlying real hard header */
dev = vlan->real_dev;
rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
if (rc > 0)
rc += vhdrlen;
return rc;
}
static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
if (vlan->netpoll)
netpoll_send_skb(vlan->netpoll, skb);
#else
BUG();
#endif
return NETDEV_TX_OK;
}
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
unsigned int len;
int ret;
/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
*
* NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
* OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
*/
if (veth->h_vlan_proto != vlan->vlan_proto ||
vlan->flags & VLAN_FLAG_REORDER_HDR) {
u16 vlan_tci;
vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
}
skb->dev = vlan->real_dev;
len = skb->len;
if (unlikely(netpoll_tx_running(dev)))
return vlan_netpoll_send_skb(vlan, skb);
ret = dev_queue_xmit(skb);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct vlan_pcpu_stats *stats;
stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
u64_stats_update_end(&stats->syncp);
} else {
this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);
}
return ret;
}
static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
{
/* TODO: gotta make sure the underlying layer can handle it,
* maybe an IFF_VLAN_CAPABLE flag for devices?
*/
if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
return -ERANGE;
dev->mtu = new_mtu;
return 0;
}
void vlan_dev_set_ingress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
vlan->nr_ingress_mappings--;
else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio)
vlan->nr_ingress_mappings++;
vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
}
int vlan_dev_set_egress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct vlan_priority_tci_mapping *mp = NULL;
struct vlan_priority_tci_mapping *np;
u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
/* See if a priority mapping exists.. */
mp = vlan->egress_priority_map[skb_prio & 0xF];
while (mp) {
if (mp->priority == skb_prio) {
if (mp->vlan_qos && !vlan_qos)
vlan->nr_egress_mappings--;
else if (!mp->vlan_qos && vlan_qos)
vlan->nr_egress_mappings++;
mp->vlan_qos = vlan_qos;
return 0;
}
mp = mp->next;
}
/* Create a new mapping then. */
mp = vlan->egress_priority_map[skb_prio & 0xF];
np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
if (!np)
return -ENOBUFS;
np->next = mp;
np->priority = skb_prio;
np->vlan_qos = vlan_qos;
/* Before inserting this element in hash table, make sure all its fields
* are committed to memory.
* coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
*/
smp_wmb();
vlan->egress_priority_map[skb_prio & 0xF] = np;
if (vlan_qos)
vlan->nr_egress_mappings++;
return 0;
}
/* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */
int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
u32 old_flags = vlan->flags;
if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))
return -EINVAL;
vlan->flags = (old_flags & ~mask) | (flags & mask);
if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) {
if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_join(dev);
else
vlan_gvrp_request_leave(dev);
}
if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) {
if (vlan->flags & VLAN_FLAG_MVRP)
vlan_mvrp_request_join(dev);
else
vlan_mvrp_request_leave(dev);
}
return 0;
}
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
{
strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
}
static int vlan_dev_open(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
int err;
if (!(real_dev->flags & IFF_UP) &&
!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
return -ENETDOWN;
if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
err = dev_uc_add(real_dev, dev->dev_addr);
if (err < 0)
goto out;
}
if (dev->flags & IFF_ALLMULTI) {
err = dev_set_allmulti(real_dev, 1);
if (err < 0)
goto del_unicast;
}
if (dev->flags & IFF_PROMISC) {
err = dev_set_promiscuity(real_dev, 1);
if (err < 0)
goto clear_allmulti;
}
ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr);
if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_join(dev);
if (vlan->flags & VLAN_FLAG_MVRP)
vlan_mvrp_request_join(dev);
if (netif_carrier_ok(real_dev))
netif_carrier_on(dev);
return 0;
clear_allmulti:
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(real_dev, -1);
del_unicast:
if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
out:
netif_carrier_off(dev);
return err;
}
static int vlan_dev_stop(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
dev_mc_unsync(real_dev, dev);
dev_uc_unsync(real_dev, dev);
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(real_dev, -1);
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(real_dev, -1);
if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
netif_carrier_off(dev);
return 0;
}
static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
struct sockaddr *addr = p;
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
if (!(dev->flags & IFF_UP))
goto out;
if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {
err = dev_uc_add(real_dev, addr->sa_data);
if (err < 0)
return err;
}
if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
dev_uc_del(real_dev, dev->dev_addr);
out:
ether_addr_copy(dev->dev_addr, addr->sa_data);
return 0;
}
static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
struct ifreq ifrr;
int err = -EOPNOTSUPP;
strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
ifrr.ifr_ifru = ifr->ifr_ifru;
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
case SIOCSHWTSTAMP:
case SIOCGHWTSTAMP:
if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
break;
}
if (!err)
ifr->ifr_ifru = ifrr.ifr_ifru;
return err;
}
static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int err = 0;
if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
err = ops->ndo_neigh_setup(real_dev, pa);
return err;
}
#if IS_ENABLED(CONFIG_FCOE)
static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = 0;
if (ops->ndo_fcoe_ddp_setup)
rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc);
return rc;
}
static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int len = 0;
if (ops->ndo_fcoe_ddp_done)
len = ops->ndo_fcoe_ddp_done(real_dev, xid);
return len;
}
static int vlan_dev_fcoe_enable(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = -EINVAL;
if (ops->ndo_fcoe_enable)
rc = ops->ndo_fcoe_enable(real_dev);
return rc;
}
static int vlan_dev_fcoe_disable(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = -EINVAL;
if (ops->ndo_fcoe_disable)
rc = ops->ndo_fcoe_disable(real_dev);
return rc;
}
static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = -EINVAL;
if (ops->ndo_fcoe_get_wwn)
rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
return rc;
}
static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
const struct net_device_ops *ops = real_dev->netdev_ops;
int rc = 0;
if (ops->ndo_fcoe_ddp_target)
rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
return rc;
}
#endif
static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
if (dev->flags & IFF_UP) {
if (change & IFF_ALLMULTI)
dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
if (change & IFF_PROMISC)
dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
}
}
static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
{
dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
}
/*
* vlan network devices have devices nesting below it, and are a special
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key vlan_netdev_xmit_lock_key;
static struct lock_class_key vlan_netdev_addr_lock_key;
static void vlan_dev_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
void *_subclass)
{
lockdep_set_class_and_subclass(&txq->_xmit_lock,
&vlan_netdev_xmit_lock_key,
*(int *)_subclass);
}
static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
{
lockdep_set_class_and_subclass(&dev->addr_list_lock,
&vlan_netdev_addr_lock_key,
subclass);
netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
}
static int vlan_dev_get_lock_subclass(struct net_device *dev)
{
return vlan_dev_priv(dev)->nest_level;
}
static const struct header_ops vlan_header_ops = {
.create = vlan_dev_hard_header,
.rebuild = vlan_dev_rebuild_header,
.parse = eth_header_parse,
};
static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
unsigned int len)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
if (saddr == NULL)
saddr = dev->dev_addr;
return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
}
static const struct header_ops vlan_passthru_header_ops = {
.create = vlan_passthru_hard_header,
.rebuild = dev_rebuild_header,
.parse = eth_header_parse,
};
static struct device_type vlan_type = {
.name = "vlan",
};
static const struct net_device_ops vlan_netdev_ops;
static int vlan_dev_init(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
netif_carrier_off(dev);
/* IFF_BROADCAST|IFF_MULTICAST; ??? */
dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
IFF_MASTER | IFF_SLAVE);
dev->iflink = real_dev->ifindex;
dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
(1<<__LINK_STATE_DORMANT))) |
(1<<__LINK_STATE_PRESENT);
dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG |
NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |
NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
NETIF_F_ALL_FCOE;
dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
dev->gso_max_size = real_dev->gso_max_size;
if (dev->features & NETIF_F_VLAN_FEATURES)
netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
/* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id;
if (is_zero_ether_addr(dev->dev_addr))
eth_hw_addr_inherit(dev, real_dev);
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
#if IS_ENABLED(CONFIG_FCOE)
dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
#endif
dev->needed_headroom = real_dev->needed_headroom;
if (vlan_hw_offload_capable(real_dev->features,
vlan_dev_priv(dev)->vlan_proto)) {
dev->header_ops = &vlan_passthru_header_ops;
dev->hard_header_len = real_dev->hard_header_len;
} else {
dev->header_ops = &vlan_header_ops;
dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
}
dev->netdev_ops = &vlan_netdev_ops;
SET_NETDEV_DEVTYPE(dev, &vlan_type);
vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
return -ENOMEM;
return 0;
}
static void vlan_dev_uninit(struct net_device *dev)
{
struct vlan_priority_tci_mapping *pm;
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
int i;
for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
while ((pm = vlan->egress_priority_map[i]) != NULL) {
vlan->egress_priority_map[i] = pm->next;
kfree(pm);
}
}
}
static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
netdev_features_t old_features = features;
features = netdev_intersect_features(features, real_dev->vlan_features);
features |= NETIF_F_RXCSUM;
features = netdev_intersect_features(features, real_dev->features);
features |= old_features & NETIF_F_SOFT_FEATURES;
features |= NETIF_F_LLTX;
return features;
}
static int vlan_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
return __ethtool_get_settings(vlan->real_dev, cmd);
}
static void vlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, vlan_fullname, sizeof(info->driver));
strlcpy(info->version, vlan_version, sizeof(info->version));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct vlan_pcpu_stats *p;
u32 rx_errors = 0, tx_dropped = 0;
int i;
for_each_possible_cpu(i) {
u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
unsigned int start;
p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
rxpackets = p->rx_packets;
rxbytes = p->rx_bytes;
rxmulticast = p->rx_multicast;
txpackets = p->tx_packets;
txbytes = p->tx_bytes;
} while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += rxpackets;
stats->rx_bytes += rxbytes;
stats->multicast += rxmulticast;
stats->tx_packets += txpackets;
stats->tx_bytes += txbytes;
/* rx_errors & tx_dropped are u32 */
rx_errors += p->rx_errors;
tx_dropped += p->tx_dropped;
}
stats->rx_errors = rx_errors;
stats->tx_dropped = tx_dropped;
return stats;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void vlan_dev_poll_controller(struct net_device *dev)
{
return;
}
static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
struct netpoll *netpoll;
int err = 0;
netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
err = -ENOMEM;
if (!netpoll)
goto out;
err = __netpoll_setup(netpoll, real_dev);
if (err) {
kfree(netpoll);
goto out;
}
vlan->netpoll = netpoll;
out:
return err;
}
static void vlan_dev_netpoll_cleanup(struct net_device *dev)
{
struct vlan_dev_priv *vlan= vlan_dev_priv(dev);
struct netpoll *netpoll = vlan->netpoll;
if (!netpoll)
return;
vlan->netpoll = NULL;
__netpoll_free_async(netpoll);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
static const struct ethtool_ops vlan_ethtool_ops = {
.get_settings = vlan_ethtool_get_settings,
.get_drvinfo = vlan_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
};
static const struct net_device_ops vlan_netdev_ops = {
.ndo_change_mtu = vlan_dev_change_mtu,
.ndo_init = vlan_dev_init,
.ndo_uninit = vlan_dev_uninit,
.ndo_open = vlan_dev_open,
.ndo_stop = vlan_dev_stop,
.ndo_start_xmit = vlan_dev_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = vlan_dev_set_mac_address,
.ndo_set_rx_mode = vlan_dev_set_rx_mode,
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
.ndo_get_stats64 = vlan_dev_get_stats64,
#if IS_ENABLED(CONFIG_FCOE)
.ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
.ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
.ndo_fcoe_enable = vlan_dev_fcoe_enable,
.ndo_fcoe_disable = vlan_dev_fcoe_disable,
.ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
.ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = vlan_dev_poll_controller,
.ndo_netpoll_setup = vlan_dev_netpoll_setup,
.ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
#endif
.ndo_fix_features = vlan_dev_fix_features,
.ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
};
static void vlan_dev_free(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
free_percpu(vlan->vlan_pcpu_stats);
vlan->vlan_pcpu_stats = NULL;
free_netdev(dev);
}
void vlan_setup(struct net_device *dev)
{
ether_setup(dev);
dev->priv_flags |= IFF_802_1Q_VLAN;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
dev->tx_queue_len = 0;
dev->netdev_ops = &vlan_netdev_ops;
dev->destructor = vlan_dev_free;
dev->ethtool_ops = &vlan_ethtool_ops;
memset(dev->broadcast, 0, ETH_ALEN);
}

70
net/8021q/vlan_gvrp.c Normal file
View file

@ -0,0 +1,70 @@
/*
* IEEE 802.1Q GARP VLAN Registration Protocol (GVRP)
*
* Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/if_vlan.h>
#include <net/garp.h>
#include "vlan.h"
#define GARP_GVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
enum gvrp_attributes {
GVRP_ATTR_INVALID,
GVRP_ATTR_VID,
__GVRP_ATTR_MAX
};
#define GVRP_ATTR_MAX (__GVRP_ATTR_MAX - 1)
static struct garp_application vlan_gvrp_app __read_mostly = {
.proto.group_address = GARP_GVRP_ADDRESS,
.maxattr = GVRP_ATTR_MAX,
.type = GARP_APPLICATION_GVRP,
};
int vlan_gvrp_request_join(const struct net_device *dev)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id);
if (vlan->vlan_proto != htons(ETH_P_8021Q))
return 0;
return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
&vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
}
void vlan_gvrp_request_leave(const struct net_device *dev)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id);
if (vlan->vlan_proto != htons(ETH_P_8021Q))
return;
garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
&vlan_id, sizeof(vlan_id), GVRP_ATTR_VID);
}
int vlan_gvrp_init_applicant(struct net_device *dev)
{
return garp_init_applicant(dev, &vlan_gvrp_app);
}
void vlan_gvrp_uninit_applicant(struct net_device *dev)
{
garp_uninit_applicant(dev, &vlan_gvrp_app);
}
int __init vlan_gvrp_init(void)
{
return garp_register_application(&vlan_gvrp_app);
}
void vlan_gvrp_uninit(void)
{
garp_unregister_application(&vlan_gvrp_app);
}

76
net/8021q/vlan_mvrp.c Normal file
View file

@ -0,0 +1,76 @@
/*
* IEEE 802.1Q Multiple VLAN Registration Protocol (MVRP)
*
* Copyright (c) 2012 Massachusetts Institute of Technology
*
* Adapted from code in net/8021q/vlan_gvrp.c
* Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <net/mrp.h>
#include "vlan.h"
#define MRP_MVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 }
enum mvrp_attributes {
MVRP_ATTR_INVALID,
MVRP_ATTR_VID,
__MVRP_ATTR_MAX
};
#define MVRP_ATTR_MAX (__MVRP_ATTR_MAX - 1)
static struct mrp_application vlan_mrp_app __read_mostly = {
.type = MRP_APPLICATION_MVRP,
.maxattr = MVRP_ATTR_MAX,
.pkttype.type = htons(ETH_P_MVRP),
.group_address = MRP_MVRP_ADDRESS,
.version = 0,
};
int vlan_mvrp_request_join(const struct net_device *dev)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id);
if (vlan->vlan_proto != htons(ETH_P_8021Q))
return 0;
return mrp_request_join(vlan->real_dev, &vlan_mrp_app,
&vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
}
void vlan_mvrp_request_leave(const struct net_device *dev)
{
const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
__be16 vlan_id = htons(vlan->vlan_id);
if (vlan->vlan_proto != htons(ETH_P_8021Q))
return;
mrp_request_leave(vlan->real_dev, &vlan_mrp_app,
&vlan_id, sizeof(vlan_id), MVRP_ATTR_VID);
}
int vlan_mvrp_init_applicant(struct net_device *dev)
{
return mrp_init_applicant(dev, &vlan_mrp_app);
}
void vlan_mvrp_uninit_applicant(struct net_device *dev)
{
mrp_uninit_applicant(dev, &vlan_mrp_app);
}
int __init vlan_mvrp_init(void)
{
return mrp_register_application(&vlan_mrp_app);
}
void vlan_mvrp_uninit(void)
{
mrp_unregister_application(&vlan_mrp_app);
}

265
net/8021q/vlan_netlink.c Normal file
View file

@ -0,0 +1,265 @@
/*
* VLAN netlink control interface
*
* Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/module.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <net/rtnetlink.h>
#include "vlan.h"
static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = {
[IFLA_VLAN_ID] = { .type = NLA_U16 },
[IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) },
[IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED },
[IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED },
[IFLA_VLAN_PROTOCOL] = { .type = NLA_U16 },
};
static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = {
[IFLA_VLAN_QOS_MAPPING] = { .len = sizeof(struct ifla_vlan_qos_mapping) },
};
static inline int vlan_validate_qos_map(struct nlattr *attr)
{
if (!attr)
return 0;
return nla_validate_nested(attr, IFLA_VLAN_QOS_MAX, vlan_map_policy);
}
static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
{
struct ifla_vlan_flags *flags;
u16 id;
int err;
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
return -EADDRNOTAVAIL;
}
if (!data)
return -EINVAL;
if (data[IFLA_VLAN_PROTOCOL]) {
switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) {
case htons(ETH_P_8021Q):
case htons(ETH_P_8021AD):
break;
default:
return -EPROTONOSUPPORT;
}
}
if (data[IFLA_VLAN_ID]) {
id = nla_get_u16(data[IFLA_VLAN_ID]);
if (id >= VLAN_VID_MASK)
return -ERANGE;
}
if (data[IFLA_VLAN_FLAGS]) {
flags = nla_data(data[IFLA_VLAN_FLAGS]);
if ((flags->flags & flags->mask) &
~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))
return -EINVAL;
}
err = vlan_validate_qos_map(data[IFLA_VLAN_INGRESS_QOS]);
if (err < 0)
return err;
err = vlan_validate_qos_map(data[IFLA_VLAN_EGRESS_QOS]);
if (err < 0)
return err;
return 0;
}
static int vlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct ifla_vlan_flags *flags;
struct ifla_vlan_qos_mapping *m;
struct nlattr *attr;
int rem;
if (data[IFLA_VLAN_FLAGS]) {
flags = nla_data(data[IFLA_VLAN_FLAGS]);
vlan_dev_change_flags(dev, flags->flags, flags->mask);
}
if (data[IFLA_VLAN_INGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
m = nla_data(attr);
vlan_dev_set_ingress_priority(dev, m->to, m->from);
}
}
if (data[IFLA_VLAN_EGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
m = nla_data(attr);
vlan_dev_set_egress_priority(dev, m->from, m->to);
}
}
return 0;
}
static int vlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev;
__be16 proto;
int err;
if (!data[IFLA_VLAN_ID])
return -EINVAL;
if (!tb[IFLA_LINK])
return -EINVAL;
real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev)
return -ENODEV;
if (data[IFLA_VLAN_PROTOCOL])
proto = nla_get_be16(data[IFLA_VLAN_PROTOCOL]);
else
proto = htons(ETH_P_8021Q);
vlan->vlan_proto = proto;
vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]);
vlan->real_dev = real_dev;
vlan->flags = VLAN_FLAG_REORDER_HDR;
err = vlan_check_real_dev(real_dev, vlan->vlan_proto, vlan->vlan_id);
if (err < 0)
return err;
if (!tb[IFLA_MTU])
dev->mtu = real_dev->mtu;
else if (dev->mtu > real_dev->mtu)
return -EINVAL;
err = vlan_changelink(dev, tb, data);
if (err < 0)
return err;
return register_vlan_dev(dev);
}
static inline size_t vlan_qos_map_size(unsigned int n)
{
if (n == 0)
return 0;
/* IFLA_VLAN_{EGRESS,INGRESS}_QOS + n * IFLA_VLAN_QOS_MAPPING */
return nla_total_size(sizeof(struct nlattr)) +
nla_total_size(sizeof(struct ifla_vlan_qos_mapping)) * n;
}
static size_t vlan_get_size(const struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
nla_total_size(2) + /* IFLA_VLAN_ID */
nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
vlan_qos_map_size(vlan->nr_ingress_mappings) +
vlan_qos_map_size(vlan->nr_egress_mappings);
}
static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct vlan_priority_tci_mapping *pm;
struct ifla_vlan_flags f;
struct ifla_vlan_qos_mapping m;
struct nlattr *nest;
unsigned int i;
if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) ||
nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id))
goto nla_put_failure;
if (vlan->flags) {
f.flags = vlan->flags;
f.mask = ~0;
if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f))
goto nla_put_failure;
}
if (vlan->nr_ingress_mappings) {
nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS);
if (nest == NULL)
goto nla_put_failure;
for (i = 0; i < ARRAY_SIZE(vlan->ingress_priority_map); i++) {
if (!vlan->ingress_priority_map[i])
continue;
m.from = i;
m.to = vlan->ingress_priority_map[i];
if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
sizeof(m), &m))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
}
if (vlan->nr_egress_mappings) {
nest = nla_nest_start(skb, IFLA_VLAN_EGRESS_QOS);
if (nest == NULL)
goto nla_put_failure;
for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
for (pm = vlan->egress_priority_map[i]; pm;
pm = pm->next) {
if (!pm->vlan_qos)
continue;
m.from = pm->priority;
m.to = (pm->vlan_qos >> 13) & 0x7;
if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
sizeof(m), &m))
goto nla_put_failure;
}
}
nla_nest_end(skb, nest);
}
return 0;
nla_put_failure:
return -EMSGSIZE;
}
struct rtnl_link_ops vlan_link_ops __read_mostly = {
.kind = "vlan",
.maxtype = IFLA_VLAN_MAX,
.policy = vlan_policy,
.priv_size = sizeof(struct vlan_dev_priv),
.setup = vlan_setup,
.validate = vlan_validate,
.newlink = vlan_newlink,
.changelink = vlan_changelink,
.dellink = unregister_vlan_dev,
.get_size = vlan_get_size,
.fill_info = vlan_fill_info,
};
int __init vlan_netlink_init(void)
{
return rtnl_link_register(&vlan_link_ops);
}
void __exit vlan_netlink_fini(void)
{
rtnl_link_unregister(&vlan_link_ops);
}
MODULE_ALIAS_RTNL_LINK("vlan");

326
net/8021q/vlanproc.c Normal file
View file

@ -0,0 +1,326 @@
/******************************************************************************
* vlanproc.c VLAN Module. /proc filesystem interface.
*
* This module is completely hardware-independent and provides
* access to the router using Linux /proc filesystem.
*
* Author: Ben Greear, <greearb@candelatech.com> coppied from wanproc.c
* by: Gene Kozin <genek@compuserve.com>
*
* Copyright: (c) 1998 Ben Greear
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* ============================================================================
* Jan 20, 1998 Ben Greear Initial Version
*****************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include "vlanproc.h"
#include "vlan.h"
/****** Function Prototypes *************************************************/
/* Methods for preparing data for reading proc entries */
static int vlan_seq_show(struct seq_file *seq, void *v);
static void *vlan_seq_start(struct seq_file *seq, loff_t *pos);
static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos);
static void vlan_seq_stop(struct seq_file *seq, void *);
static int vlandev_seq_show(struct seq_file *seq, void *v);
/*
* Global Data
*/
/*
* Names of the proc directory entries
*/
static const char name_root[] = "vlan";
static const char name_conf[] = "config";
/*
* Structures for interfacing with the /proc filesystem.
* VLAN creates its own directory /proc/net/vlan with the following
* entries:
* config device status/configuration
* <device> entry for each device
*/
/*
* Generic /proc/net/vlan/<file> file and inode operations
*/
static const struct seq_operations vlan_seq_ops = {
.start = vlan_seq_start,
.next = vlan_seq_next,
.stop = vlan_seq_stop,
.show = vlan_seq_show,
};
static int vlan_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &vlan_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations vlan_fops = {
.owner = THIS_MODULE,
.open = vlan_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
/*
* /proc/net/vlan/<device> file and inode operations
*/
static int vlandev_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, vlandev_seq_show, PDE_DATA(inode));
}
static const struct file_operations vlandev_fops = {
.owner = THIS_MODULE,
.open = vlandev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* Proc filesystem directory entries.
*/
/* Strings */
static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = {
[VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID",
[VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD",
[VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD",
[VLAN_NAME_TYPE_PLUS_VID] = "VLAN_NAME_TYPE_PLUS_VID",
};
/*
* Interface functions
*/
/*
* Clean up /proc/net/vlan entries
*/
void vlan_proc_cleanup(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
if (vn->proc_vlan_conf)
remove_proc_entry(name_conf, vn->proc_vlan_dir);
if (vn->proc_vlan_dir)
remove_proc_entry(name_root, net->proc_net);
/* Dynamically added entries should be cleaned up as their vlan_device
* is removed, so we should not have to take care of it here...
*/
}
/*
* Create /proc/net/vlan entries
*/
int __net_init vlan_proc_init(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net);
if (!vn->proc_vlan_dir)
goto err;
vn->proc_vlan_conf = proc_create(name_conf, S_IFREG|S_IRUSR|S_IWUSR,
vn->proc_vlan_dir, &vlan_fops);
if (!vn->proc_vlan_conf)
goto err;
return 0;
err:
pr_err("can't create entry in proc filesystem!\n");
vlan_proc_cleanup(net);
return -ENOBUFS;
}
/*
* Add directory entry for VLAN device.
*/
int vlan_proc_add_dev(struct net_device *vlandev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
if (!strcmp(vlandev->name, name_conf))
return -EINVAL;
vlan->dent =
proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
vn->proc_vlan_dir, &vlandev_fops, vlandev);
if (!vlan->dent)
return -ENOBUFS;
return 0;
}
/*
* Delete directory entry for VLAN device.
*/
int vlan_proc_rem_dev(struct net_device *vlandev)
{
/** NOTE: This will consume the memory pointed to by dent, it seems. */
proc_remove(vlan_dev_priv(vlandev)->dent);
vlan_dev_priv(vlandev)->dent = NULL;
return 0;
}
/****** Proc filesystem entry points ****************************************/
/*
* The following few functions build the content of /proc/net/vlan/config
*/
/* start read of /proc/net/vlan/config */
static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(rcu)
{
struct net_device *dev;
struct net *net = seq_file_net(seq);
loff_t i = 1;
rcu_read_lock();
if (*pos == 0)
return SEQ_START_TOKEN;
for_each_netdev_rcu(net, dev) {
if (!is_vlan_dev(dev))
continue;
if (i++ == *pos)
return dev;
}
return NULL;
}
static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net_device *dev;
struct net *net = seq_file_net(seq);
++*pos;
dev = v;
if (v == SEQ_START_TOKEN)
dev = net_device_entry(&net->dev_base_head);
for_each_netdev_continue_rcu(net, dev) {
if (!is_vlan_dev(dev))
continue;
return dev;
}
return NULL;
}
static void vlan_seq_stop(struct seq_file *seq, void *v)
__releases(rcu)
{
rcu_read_unlock();
}
static int vlan_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_net(seq);
struct vlan_net *vn = net_generic(net, vlan_net_id);
if (v == SEQ_START_TOKEN) {
const char *nmtype = NULL;
seq_puts(seq, "VLAN Dev name | VLAN ID\n");
if (vn->name_type < ARRAY_SIZE(vlan_name_type_str))
nmtype = vlan_name_type_str[vn->name_type];
seq_printf(seq, "Name-Type: %s\n",
nmtype ? nmtype : "UNKNOWN");
} else {
const struct net_device *vlandev = v;
const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
seq_printf(seq, "%-15s| %d | %s\n", vlandev->name,
vlan->vlan_id, vlan->real_dev->name);
}
return 0;
}
static int vlandev_seq_show(struct seq_file *seq, void *offset)
{
struct net_device *vlandev = (struct net_device *) seq->private;
const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats;
static const char fmt64[] = "%30s %12llu\n";
int i;
if (!is_vlan_dev(vlandev))
return 0;
stats = dev_get_stats(vlandev, &temp);
seq_printf(seq,
"%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n",
vlandev->name, vlan->vlan_id,
(int)(vlan->flags & 1), vlandev->priv_flags);
seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast);
seq_puts(seq, "\n");
seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
seq_printf(seq, "Device: %s", vlan->real_dev->name);
/* now show all PRIORITY mappings relating to this VLAN */
seq_printf(seq, "\nINGRESS priority mappings: "
"0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n",
vlan->ingress_priority_map[0],
vlan->ingress_priority_map[1],
vlan->ingress_priority_map[2],
vlan->ingress_priority_map[3],
vlan->ingress_priority_map[4],
vlan->ingress_priority_map[5],
vlan->ingress_priority_map[6],
vlan->ingress_priority_map[7]);
seq_printf(seq, " EGRESS priority mappings: ");
for (i = 0; i < 16; i++) {
const struct vlan_priority_tci_mapping *mp
= vlan->egress_priority_map[i];
while (mp) {
seq_printf(seq, "%u:%hu ",
mp->priority, ((mp->vlan_qos >> 13) & 0x7));
mp = mp->next;
}
}
seq_puts(seq, "\n");
return 0;
}

20
net/8021q/vlanproc.h Normal file
View file

@ -0,0 +1,20 @@
#ifndef __BEN_VLAN_PROC_INC__
#define __BEN_VLAN_PROC_INC__
#ifdef CONFIG_PROC_FS
struct net;
int vlan_proc_init(struct net *net);
int vlan_proc_rem_dev(struct net_device *vlandev);
int vlan_proc_add_dev(struct net_device *vlandev);
void vlan_proc_cleanup(struct net *net);
#else /* No CONFIG_PROC_FS */
#define vlan_proc_init(net) (0)
#define vlan_proc_cleanup(net) do {} while (0)
#define vlan_proc_add_dev(dev) ({(void)(dev), 0; })
#define vlan_proc_rem_dev(dev) ({(void)(dev), 0; })
#endif
#endif /* !(__BEN_VLAN_PROC_INC__) */

36
net/9p/Kconfig Normal file
View file

@ -0,0 +1,36 @@
#
# 9P protocol configuration
#
menuconfig NET_9P
depends on NET
tristate "Plan 9 Resource Sharing Support (9P2000)"
help
If you say Y here, you will get experimental support for
Plan 9 resource sharing via the 9P2000 protocol.
See <http://v9fs.sf.net> for more information.
If unsure, say N.
if NET_9P
config NET_9P_VIRTIO
depends on VIRTIO
tristate "9P Virtio Transport"
help
This builds support for a transports between
guest partitions and a host partition.
config NET_9P_RDMA
depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS
tristate "9P RDMA Transport (Experimental)"
help
This builds support for an RDMA transport.
config NET_9P_DEBUG
bool "Debug information"
help
Say Y if you want the 9P subsystem to log debug information.
endif

18
net/9p/Makefile Normal file
View file

@ -0,0 +1,18 @@
obj-$(CONFIG_NET_9P) := 9pnet.o
obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o
obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
9pnet-objs := \
mod.o \
client.o \
error.o \
util.o \
protocol.o \
trans_fd.o \
trans_common.o \
9pnet_virtio-objs := \
trans_virtio.o \
9pnet_rdma-objs := \
trans_rdma.o \

2277
net/9p/client.c Normal file

File diff suppressed because it is too large Load diff

247
net/9p/error.c Normal file
View file

@ -0,0 +1,247 @@
/*
* linux/fs/9p/error.c
*
* Error string handling
*
* Plan 9 uses error strings, Unix uses error numbers. These functions
* try to help manage that and provide for dynamically adding error
* mappings.
*
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/list.h>
#include <linux/jhash.h>
#include <linux/errno.h>
#include <net/9p/9p.h>
/**
* struct errormap - map string errors from Plan 9 to Linux numeric ids
* @name: string sent over 9P
* @val: numeric id most closely representing @name
* @namelen: length of string
* @list: hash-table list for string lookup
*/
struct errormap {
char *name;
int val;
int namelen;
struct hlist_node list;
};
#define ERRHASHSZ 32
static struct hlist_head hash_errmap[ERRHASHSZ];
/* FixMe - reduce to a reasonable size */
static struct errormap errmap[] = {
{"Operation not permitted", EPERM},
{"wstat prohibited", EPERM},
{"No such file or directory", ENOENT},
{"directory entry not found", ENOENT},
{"file not found", ENOENT},
{"Interrupted system call", EINTR},
{"Input/output error", EIO},
{"No such device or address", ENXIO},
{"Argument list too long", E2BIG},
{"Bad file descriptor", EBADF},
{"Resource temporarily unavailable", EAGAIN},
{"Cannot allocate memory", ENOMEM},
{"Permission denied", EACCES},
{"Bad address", EFAULT},
{"Block device required", ENOTBLK},
{"Device or resource busy", EBUSY},
{"File exists", EEXIST},
{"Invalid cross-device link", EXDEV},
{"No such device", ENODEV},
{"Not a directory", ENOTDIR},
{"Is a directory", EISDIR},
{"Invalid argument", EINVAL},
{"Too many open files in system", ENFILE},
{"Too many open files", EMFILE},
{"Text file busy", ETXTBSY},
{"File too large", EFBIG},
{"No space left on device", ENOSPC},
{"Illegal seek", ESPIPE},
{"Read-only file system", EROFS},
{"Too many links", EMLINK},
{"Broken pipe", EPIPE},
{"Numerical argument out of domain", EDOM},
{"Numerical result out of range", ERANGE},
{"Resource deadlock avoided", EDEADLK},
{"File name too long", ENAMETOOLONG},
{"No locks available", ENOLCK},
{"Function not implemented", ENOSYS},
{"Directory not empty", ENOTEMPTY},
{"Too many levels of symbolic links", ELOOP},
{"No message of desired type", ENOMSG},
{"Identifier removed", EIDRM},
{"No data available", ENODATA},
{"Machine is not on the network", ENONET},
{"Package not installed", ENOPKG},
{"Object is remote", EREMOTE},
{"Link has been severed", ENOLINK},
{"Communication error on send", ECOMM},
{"Protocol error", EPROTO},
{"Bad message", EBADMSG},
{"File descriptor in bad state", EBADFD},
{"Streams pipe error", ESTRPIPE},
{"Too many users", EUSERS},
{"Socket operation on non-socket", ENOTSOCK},
{"Message too long", EMSGSIZE},
{"Protocol not available", ENOPROTOOPT},
{"Protocol not supported", EPROTONOSUPPORT},
{"Socket type not supported", ESOCKTNOSUPPORT},
{"Operation not supported", EOPNOTSUPP},
{"Protocol family not supported", EPFNOSUPPORT},
{"Network is down", ENETDOWN},
{"Network is unreachable", ENETUNREACH},
{"Network dropped connection on reset", ENETRESET},
{"Software caused connection abort", ECONNABORTED},
{"Connection reset by peer", ECONNRESET},
{"No buffer space available", ENOBUFS},
{"Transport endpoint is already connected", EISCONN},
{"Transport endpoint is not connected", ENOTCONN},
{"Cannot send after transport endpoint shutdown", ESHUTDOWN},
{"Connection timed out", ETIMEDOUT},
{"Connection refused", ECONNREFUSED},
{"Host is down", EHOSTDOWN},
{"No route to host", EHOSTUNREACH},
{"Operation already in progress", EALREADY},
{"Operation now in progress", EINPROGRESS},
{"Is a named type file", EISNAM},
{"Remote I/O error", EREMOTEIO},
{"Disk quota exceeded", EDQUOT},
/* errors from fossil, vacfs, and u9fs */
{"fid unknown or out of range", EBADF},
{"permission denied", EACCES},
{"file does not exist", ENOENT},
{"authentication failed", ECONNREFUSED},
{"bad offset in directory read", ESPIPE},
{"bad use of fid", EBADF},
{"wstat can't convert between files and directories", EPERM},
{"directory is not empty", ENOTEMPTY},
{"file exists", EEXIST},
{"file already exists", EEXIST},
{"file or directory already exists", EEXIST},
{"fid already in use", EBADF},
{"file in use", ETXTBSY},
{"i/o error", EIO},
{"file already open for I/O", ETXTBSY},
{"illegal mode", EINVAL},
{"illegal name", ENAMETOOLONG},
{"not a directory", ENOTDIR},
{"not a member of proposed group", EPERM},
{"not owner", EACCES},
{"only owner can change group in wstat", EACCES},
{"read only file system", EROFS},
{"no access to special file", EPERM},
{"i/o count too large", EIO},
{"unknown group", EINVAL},
{"unknown user", EINVAL},
{"bogus wstat buffer", EPROTO},
{"exclusive use file already open", EAGAIN},
{"corrupted directory entry", EIO},
{"corrupted file entry", EIO},
{"corrupted block label", EIO},
{"corrupted meta data", EIO},
{"illegal offset", EINVAL},
{"illegal path element", ENOENT},
{"root of file system is corrupted", EIO},
{"corrupted super block", EIO},
{"protocol botch", EPROTO},
{"file system is full", ENOSPC},
{"file is in use", EAGAIN},
{"directory entry is not allocated", ENOENT},
{"file is read only", EROFS},
{"file has been removed", EIDRM},
{"only support truncation to zero length", EPERM},
{"cannot remove root", EPERM},
{"file too big", EFBIG},
{"venti i/o error", EIO},
/* these are not errors */
{"u9fs rhostsauth: no authentication required", 0},
{"u9fs authnone: no authentication required", 0},
{NULL, -1}
};
/**
* p9_error_init - preload mappings into hash list
*
*/
int p9_error_init(void)
{
struct errormap *c;
int bucket;
/* initialize hash table */
for (bucket = 0; bucket < ERRHASHSZ; bucket++)
INIT_HLIST_HEAD(&hash_errmap[bucket]);
/* load initial error map into hash table */
for (c = errmap; c->name != NULL; c++) {
c->namelen = strlen(c->name);
bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ;
INIT_HLIST_NODE(&c->list);
hlist_add_head(&c->list, &hash_errmap[bucket]);
}
return 1;
}
EXPORT_SYMBOL(p9_error_init);
/**
* errstr2errno - convert error string to error number
* @errstr: error string
* @len: length of error string
*
*/
int p9_errstr2errno(char *errstr, int len)
{
int errno;
struct errormap *c;
int bucket;
errno = 0;
c = NULL;
bucket = jhash(errstr, len, 0) % ERRHASHSZ;
hlist_for_each_entry(c, &hash_errmap[bucket], list) {
if (c->namelen == len && !memcmp(c->name, errstr, len)) {
errno = c->val;
break;
}
}
if (errno == 0) {
/* TODO: if error isn't found, add it dynamically */
errstr[len] = 0;
pr_err("%s: server reported unknown error %s\n",
__func__, errstr);
errno = ESERVERFAULT;
}
return -errno;
}
EXPORT_SYMBOL(p9_errstr2errno);

201
net/9p/mod.c Normal file
View file

@ -0,0 +1,201 @@
/*
* net/9p/9p.c
*
* 9P entry point
*
* Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <net/9p/9p.h>
#include <linux/fs.h>
#include <linux/parser.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#ifdef CONFIG_NET_9P_DEBUG
unsigned int p9_debug_level = 0; /* feature-rific global debug level */
EXPORT_SYMBOL(p9_debug_level);
module_param_named(debug, p9_debug_level, uint, 0);
MODULE_PARM_DESC(debug, "9P debugging level");
void _p9_debug(enum p9_debug_flags level, const char *func,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if ((p9_debug_level & level) != level)
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (level == P9_DEBUG_9P)
pr_notice("(%8.8d) %pV", task_pid_nr(current), &vaf);
else
pr_notice("-- %s (%d): %pV", func, task_pid_nr(current), &vaf);
va_end(args);
}
EXPORT_SYMBOL(_p9_debug);
#endif
/*
* Dynamic Transport Registration Routines
*
*/
static DEFINE_SPINLOCK(v9fs_trans_lock);
static LIST_HEAD(v9fs_trans_list);
/**
* v9fs_register_trans - register a new transport with 9p
* @m: structure describing the transport module and entry points
*
*/
void v9fs_register_trans(struct p9_trans_module *m)
{
spin_lock(&v9fs_trans_lock);
list_add_tail(&m->list, &v9fs_trans_list);
spin_unlock(&v9fs_trans_lock);
}
EXPORT_SYMBOL(v9fs_register_trans);
/**
* v9fs_unregister_trans - unregister a 9p transport
* @m: the transport to remove
*
*/
void v9fs_unregister_trans(struct p9_trans_module *m)
{
spin_lock(&v9fs_trans_lock);
list_del_init(&m->list);
spin_unlock(&v9fs_trans_lock);
}
EXPORT_SYMBOL(v9fs_unregister_trans);
/**
* v9fs_get_trans_by_name - get transport with the matching name
* @name: string identifying transport
*
*/
struct p9_trans_module *v9fs_get_trans_by_name(char *s)
{
struct p9_trans_module *t, *found = NULL;
spin_lock(&v9fs_trans_lock);
list_for_each_entry(t, &v9fs_trans_list, list)
if (strcmp(t->name, s) == 0 &&
try_module_get(t->owner)) {
found = t;
break;
}
spin_unlock(&v9fs_trans_lock);
return found;
}
EXPORT_SYMBOL(v9fs_get_trans_by_name);
/**
* v9fs_get_default_trans - get the default transport
*
*/
struct p9_trans_module *v9fs_get_default_trans(void)
{
struct p9_trans_module *t, *found = NULL;
spin_lock(&v9fs_trans_lock);
list_for_each_entry(t, &v9fs_trans_list, list)
if (t->def && try_module_get(t->owner)) {
found = t;
break;
}
if (!found)
list_for_each_entry(t, &v9fs_trans_list, list)
if (try_module_get(t->owner)) {
found = t;
break;
}
spin_unlock(&v9fs_trans_lock);
return found;
}
EXPORT_SYMBOL(v9fs_get_default_trans);
/**
* v9fs_put_trans - put trans
* @m: transport to put
*
*/
void v9fs_put_trans(struct p9_trans_module *m)
{
if (m)
module_put(m->owner);
}
/**
* init_p9 - Initialize module
*
*/
static int __init init_p9(void)
{
int ret = 0;
p9_error_init();
pr_info("Installing 9P2000 support\n");
p9_trans_fd_init();
return ret;
}
/**
* exit_p9 - shutdown module
*
*/
static void __exit exit_p9(void)
{
pr_info("Unloading 9P2000 support\n");
p9_trans_fd_exit();
}
module_init(init_p9)
module_exit(exit_p9)
MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>");
MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
MODULE_AUTHOR("Ron Minnich <rminnich@lanl.gov>");
MODULE_LICENSE("GPL");

636
net/9p/protocol.c Normal file
View file

@ -0,0 +1,636 @@
/*
* net/9p/protocol.c
*
* 9P Protocol Support Code
*
* Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com>
*
* Base on code from Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2008 by IBM, Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include "protocol.h"
#include <trace/events/9p.h>
static int
p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
void p9stat_free(struct p9_wstat *stbuf)
{
kfree(stbuf->name);
kfree(stbuf->uid);
kfree(stbuf->gid);
kfree(stbuf->muid);
kfree(stbuf->extension);
}
EXPORT_SYMBOL(p9stat_free);
size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
{
size_t len = min(pdu->size - pdu->offset, size);
memcpy(data, &pdu->sdata[pdu->offset], len);
pdu->offset += len;
return size - len;
}
static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
{
size_t len = min(pdu->capacity - pdu->size, size);
memcpy(&pdu->sdata[pdu->size], data, len);
pdu->size += len;
return size - len;
}
static size_t
pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
{
size_t len = min(pdu->capacity - pdu->size, size);
if (copy_from_user(&pdu->sdata[pdu->size], udata, len))
len = 0;
pdu->size += len;
return size - len;
}
/*
b - int8_t
w - int16_t
d - int32_t
q - int64_t
s - string
u - numeric uid
g - numeric gid
S - stat
Q - qid
D - data blob (int32_t size followed by void *, results are not freed)
T - array of strings (int16_t count, followed by strings)
R - array of qids (int16_t count, followed by qids)
A - stat for 9p2000.L (p9_stat_dotl)
? - if optional = 1, continue parsing
*/
static int
p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
va_list ap)
{
const char *ptr;
int errcode = 0;
for (ptr = fmt; *ptr; ptr++) {
switch (*ptr) {
case 'b':{
int8_t *val = va_arg(ap, int8_t *);
if (pdu_read(pdu, val, sizeof(*val))) {
errcode = -EFAULT;
break;
}
}
break;
case 'w':{
int16_t *val = va_arg(ap, int16_t *);
__le16 le_val;
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
*val = le16_to_cpu(le_val);
}
break;
case 'd':{
int32_t *val = va_arg(ap, int32_t *);
__le32 le_val;
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
*val = le32_to_cpu(le_val);
}
break;
case 'q':{
int64_t *val = va_arg(ap, int64_t *);
__le64 le_val;
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
*val = le64_to_cpu(le_val);
}
break;
case 's':{
char **sptr = va_arg(ap, char **);
uint16_t len;
errcode = p9pdu_readf(pdu, proto_version,
"w", &len);
if (errcode)
break;
*sptr = kmalloc(len + 1, GFP_NOFS);
if (*sptr == NULL) {
errcode = -EFAULT;
break;
}
if (pdu_read(pdu, *sptr, len)) {
errcode = -EFAULT;
kfree(*sptr);
*sptr = NULL;
} else
(*sptr)[len] = 0;
}
break;
case 'u': {
kuid_t *uid = va_arg(ap, kuid_t *);
__le32 le_val;
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
*uid = make_kuid(&init_user_ns,
le32_to_cpu(le_val));
} break;
case 'g': {
kgid_t *gid = va_arg(ap, kgid_t *);
__le32 le_val;
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
*gid = make_kgid(&init_user_ns,
le32_to_cpu(le_val));
} break;
case 'Q':{
struct p9_qid *qid =
va_arg(ap, struct p9_qid *);
errcode = p9pdu_readf(pdu, proto_version, "bdq",
&qid->type, &qid->version,
&qid->path);
}
break;
case 'S':{
struct p9_wstat *stbuf =
va_arg(ap, struct p9_wstat *);
memset(stbuf, 0, sizeof(struct p9_wstat));
stbuf->n_uid = stbuf->n_muid = INVALID_UID;
stbuf->n_gid = INVALID_GID;
errcode =
p9pdu_readf(pdu, proto_version,
"wwdQdddqssss?sugu",
&stbuf->size, &stbuf->type,
&stbuf->dev, &stbuf->qid,
&stbuf->mode, &stbuf->atime,
&stbuf->mtime, &stbuf->length,
&stbuf->name, &stbuf->uid,
&stbuf->gid, &stbuf->muid,
&stbuf->extension,
&stbuf->n_uid, &stbuf->n_gid,
&stbuf->n_muid);
if (errcode)
p9stat_free(stbuf);
}
break;
case 'D':{
uint32_t *count = va_arg(ap, uint32_t *);
void **data = va_arg(ap, void **);
errcode =
p9pdu_readf(pdu, proto_version, "d", count);
if (!errcode) {
*count =
min_t(uint32_t, *count,
pdu->size - pdu->offset);
*data = &pdu->sdata[pdu->offset];
}
}
break;
case 'T':{
uint16_t *nwname = va_arg(ap, uint16_t *);
char ***wnames = va_arg(ap, char ***);
errcode = p9pdu_readf(pdu, proto_version,
"w", nwname);
if (!errcode) {
*wnames =
kmalloc(sizeof(char *) * *nwname,
GFP_NOFS);
if (!*wnames)
errcode = -ENOMEM;
}
if (!errcode) {
int i;
for (i = 0; i < *nwname; i++) {
errcode =
p9pdu_readf(pdu,
proto_version,
"s",
&(*wnames)[i]);
if (errcode)
break;
}
}
if (errcode) {
if (*wnames) {
int i;
for (i = 0; i < *nwname; i++)
kfree((*wnames)[i]);
}
kfree(*wnames);
*wnames = NULL;
}
}
break;
case 'R':{
int16_t *nwqid = va_arg(ap, int16_t *);
struct p9_qid **wqids =
va_arg(ap, struct p9_qid **);
*wqids = NULL;
errcode =
p9pdu_readf(pdu, proto_version, "w", nwqid);
if (!errcode) {
*wqids =
kmalloc(*nwqid *
sizeof(struct p9_qid),
GFP_NOFS);
if (*wqids == NULL)
errcode = -ENOMEM;
}
if (!errcode) {
int i;
for (i = 0; i < *nwqid; i++) {
errcode =
p9pdu_readf(pdu,
proto_version,
"Q",
&(*wqids)[i]);
if (errcode)
break;
}
}
if (errcode) {
kfree(*wqids);
*wqids = NULL;
}
}
break;
case 'A': {
struct p9_stat_dotl *stbuf =
va_arg(ap, struct p9_stat_dotl *);
memset(stbuf, 0, sizeof(struct p9_stat_dotl));
errcode =
p9pdu_readf(pdu, proto_version,
"qQdugqqqqqqqqqqqqqqq",
&stbuf->st_result_mask,
&stbuf->qid,
&stbuf->st_mode,
&stbuf->st_uid, &stbuf->st_gid,
&stbuf->st_nlink,
&stbuf->st_rdev, &stbuf->st_size,
&stbuf->st_blksize, &stbuf->st_blocks,
&stbuf->st_atime_sec,
&stbuf->st_atime_nsec,
&stbuf->st_mtime_sec,
&stbuf->st_mtime_nsec,
&stbuf->st_ctime_sec,
&stbuf->st_ctime_nsec,
&stbuf->st_btime_sec,
&stbuf->st_btime_nsec,
&stbuf->st_gen,
&stbuf->st_data_version);
}
break;
case '?':
if ((proto_version != p9_proto_2000u) &&
(proto_version != p9_proto_2000L))
return 0;
break;
default:
BUG();
break;
}
if (errcode)
break;
}
return errcode;
}
int
p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
va_list ap)
{
const char *ptr;
int errcode = 0;
for (ptr = fmt; *ptr; ptr++) {
switch (*ptr) {
case 'b':{
int8_t val = va_arg(ap, int);
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
}
break;
case 'w':{
__le16 val = cpu_to_le16(va_arg(ap, int));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
}
break;
case 'd':{
__le32 val = cpu_to_le32(va_arg(ap, int32_t));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
}
break;
case 'q':{
__le64 val = cpu_to_le64(va_arg(ap, int64_t));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
}
break;
case 's':{
const char *sptr = va_arg(ap, const char *);
uint16_t len = 0;
if (sptr)
len = min_t(size_t, strlen(sptr),
USHRT_MAX);
errcode = p9pdu_writef(pdu, proto_version,
"w", len);
if (!errcode && pdu_write(pdu, sptr, len))
errcode = -EFAULT;
}
break;
case 'u': {
kuid_t uid = va_arg(ap, kuid_t);
__le32 val = cpu_to_le32(
from_kuid(&init_user_ns, uid));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
} break;
case 'g': {
kgid_t gid = va_arg(ap, kgid_t);
__le32 val = cpu_to_le32(
from_kgid(&init_user_ns, gid));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
} break;
case 'Q':{
const struct p9_qid *qid =
va_arg(ap, const struct p9_qid *);
errcode =
p9pdu_writef(pdu, proto_version, "bdq",
qid->type, qid->version,
qid->path);
} break;
case 'S':{
const struct p9_wstat *stbuf =
va_arg(ap, const struct p9_wstat *);
errcode =
p9pdu_writef(pdu, proto_version,
"wwdQdddqssss?sugu",
stbuf->size, stbuf->type,
stbuf->dev, &stbuf->qid,
stbuf->mode, stbuf->atime,
stbuf->mtime, stbuf->length,
stbuf->name, stbuf->uid,
stbuf->gid, stbuf->muid,
stbuf->extension, stbuf->n_uid,
stbuf->n_gid, stbuf->n_muid);
} break;
case 'D':{
uint32_t count = va_arg(ap, uint32_t);
const void *data = va_arg(ap, const void *);
errcode = p9pdu_writef(pdu, proto_version, "d",
count);
if (!errcode && pdu_write(pdu, data, count))
errcode = -EFAULT;
}
break;
case 'U':{
int32_t count = va_arg(ap, int32_t);
const char __user *udata =
va_arg(ap, const void __user *);
errcode = p9pdu_writef(pdu, proto_version, "d",
count);
if (!errcode && pdu_write_u(pdu, udata, count))
errcode = -EFAULT;
}
break;
case 'T':{
uint16_t nwname = va_arg(ap, int);
const char **wnames = va_arg(ap, const char **);
errcode = p9pdu_writef(pdu, proto_version, "w",
nwname);
if (!errcode) {
int i;
for (i = 0; i < nwname; i++) {
errcode =
p9pdu_writef(pdu,
proto_version,
"s",
wnames[i]);
if (errcode)
break;
}
}
}
break;
case 'R':{
int16_t nwqid = va_arg(ap, int);
struct p9_qid *wqids =
va_arg(ap, struct p9_qid *);
errcode = p9pdu_writef(pdu, proto_version, "w",
nwqid);
if (!errcode) {
int i;
for (i = 0; i < nwqid; i++) {
errcode =
p9pdu_writef(pdu,
proto_version,
"Q",
&wqids[i]);
if (errcode)
break;
}
}
}
break;
case 'I':{
struct p9_iattr_dotl *p9attr = va_arg(ap,
struct p9_iattr_dotl *);
errcode = p9pdu_writef(pdu, proto_version,
"ddugqqqqq",
p9attr->valid,
p9attr->mode,
p9attr->uid,
p9attr->gid,
p9attr->size,
p9attr->atime_sec,
p9attr->atime_nsec,
p9attr->mtime_sec,
p9attr->mtime_nsec);
}
break;
case '?':
if ((proto_version != p9_proto_2000u) &&
(proto_version != p9_proto_2000L))
return 0;
break;
default:
BUG();
break;
}
if (errcode)
break;
}
return errcode;
}
int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
{
va_list ap;
int ret;
va_start(ap, fmt);
ret = p9pdu_vreadf(pdu, proto_version, fmt, ap);
va_end(ap);
return ret;
}
static int
p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
{
va_list ap;
int ret;
va_start(ap, fmt);
ret = p9pdu_vwritef(pdu, proto_version, fmt, ap);
va_end(ap);
return ret;
}
int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
{
struct p9_fcall fake_pdu;
int ret;
fake_pdu.size = len;
fake_pdu.capacity = len;
fake_pdu.sdata = buf;
fake_pdu.offset = 0;
ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "S", st);
if (ret) {
p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
trace_9p_protocol_dump(clnt, &fake_pdu);
}
return ret;
}
EXPORT_SYMBOL(p9stat_read);
int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type)
{
pdu->id = type;
return p9pdu_writef(pdu, 0, "dbw", 0, type, tag);
}
int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu)
{
int size = pdu->size;
int err;
pdu->size = 0;
err = p9pdu_writef(pdu, 0, "d", size);
pdu->size = size;
trace_9p_protocol_dump(clnt, pdu);
p9_debug(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n",
pdu->size, pdu->id, pdu->tag);
return err;
}
void p9pdu_reset(struct p9_fcall *pdu)
{
pdu->offset = 0;
pdu->size = 0;
}
int p9dirent_read(struct p9_client *clnt, char *buf, int len,
struct p9_dirent *dirent)
{
struct p9_fcall fake_pdu;
int ret;
char *nameptr;
fake_pdu.size = len;
fake_pdu.capacity = len;
fake_pdu.sdata = buf;
fake_pdu.offset = 0;
ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid,
&dirent->d_off, &dirent->d_type, &nameptr);
if (ret) {
p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
trace_9p_protocol_dump(clnt, &fake_pdu);
goto out;
}
strcpy(dirent->d_name, nameptr);
kfree(nameptr);
out:
return fake_pdu.offset;
}
EXPORT_SYMBOL(p9dirent_read);

34
net/9p/protocol.h Normal file
View file

@ -0,0 +1,34 @@
/*
* net/9p/protocol.h
*
* 9P Protocol Support Code
*
* Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com>
*
* Base on code from Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2008 by IBM, Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
int p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
va_list ap);
int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type);
int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu);
void p9pdu_reset(struct p9_fcall *pdu);
size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size);

69
net/9p/trans_common.c Normal file
View file

@ -0,0 +1,69 @@
/*
* Copyright IBM Corporation, 2010
* Author Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <linux/scatterlist.h>
#include "trans_common.h"
/**
* p9_release_req_pages - Release pages after the transaction.
*/
void p9_release_pages(struct page **pages, int nr_pages)
{
int i;
for (i = 0; i < nr_pages; i++)
if (pages[i])
put_page(pages[i]);
}
EXPORT_SYMBOL(p9_release_pages);
/**
* p9_nr_pages - Return number of pages needed to accommodate the payload.
*/
int p9_nr_pages(char *data, int len)
{
unsigned long start_page, end_page;
start_page = (unsigned long)data >> PAGE_SHIFT;
end_page = ((unsigned long)data + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
return end_page - start_page;
}
EXPORT_SYMBOL(p9_nr_pages);
/**
* payload_gup - Translates user buffer into kernel pages and
* pins them either for read/write through get_user_pages_fast().
* @req: Request to be sent to server.
* @pdata_off: data offset into the first page after translation (gup).
* @pdata_len: Total length of the IO. gup may not return requested # of pages.
* @nr_pages: number of pages to accommodate the payload
* @rw: Indicates if the pages are for read or write.
*/
int p9_payload_gup(char *data, int *nr_pages, struct page **pages, int write)
{
int nr_mapped_pages;
nr_mapped_pages = get_user_pages_fast((unsigned long)data,
*nr_pages, write, pages);
if (nr_mapped_pages <= 0)
return nr_mapped_pages;
*nr_pages = nr_mapped_pages;
return 0;
}
EXPORT_SYMBOL(p9_payload_gup);

17
net/9p/trans_common.h Normal file
View file

@ -0,0 +1,17 @@
/*
* Copyright IBM Corporation, 2010
* Author Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
*/
void p9_release_pages(struct page **, int);
int p9_payload_gup(char *, int *, struct page **, int);
int p9_nr_pages(char *, int);

1118
net/9p/trans_fd.c Normal file

File diff suppressed because it is too large Load diff

765
net/9p/trans_rdma.c Normal file
View file

@ -0,0 +1,765 @@
/*
* linux/fs/9p/trans_rdma.c
*
* RDMA transport layer based on the trans_fd.c implementation.
*
* Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
* Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
* Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/ipv6.h>
#include <linux/kthread.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/un.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
#include <linux/idr.h>
#include <linux/file.h>
#include <linux/parser.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#define P9_PORT 5640
#define P9_RDMA_SQ_DEPTH 32
#define P9_RDMA_RQ_DEPTH 32
#define P9_RDMA_SEND_SGE 4
#define P9_RDMA_RECV_SGE 4
#define P9_RDMA_IRD 0
#define P9_RDMA_ORD 0
#define P9_RDMA_TIMEOUT 30000 /* 30 seconds */
#define P9_RDMA_MAXSIZE (1024*1024) /* 1MB */
/**
* struct p9_trans_rdma - RDMA transport instance
*
* @state: tracks the transport state machine for connection setup and tear down
* @cm_id: The RDMA CM ID
* @pd: Protection Domain pointer
* @qp: Queue Pair pointer
* @cq: Completion Queue pointer
* @dm_mr: DMA Memory Region pointer
* @lkey: The local access only memory region key
* @timeout: Number of uSecs to wait for connection management events
* @sq_depth: The depth of the Send Queue
* @sq_sem: Semaphore for the SQ
* @rq_depth: The depth of the Receive Queue.
* @rq_sem: Semaphore for the RQ
* @excess_rc : Amount of posted Receive Contexts without a pending request.
* See rdma_request()
* @addr: The remote peer's address
* @req_lock: Protects the active request list
* @cm_done: Completion event for connection management tracking
*/
struct p9_trans_rdma {
enum {
P9_RDMA_INIT,
P9_RDMA_ADDR_RESOLVED,
P9_RDMA_ROUTE_RESOLVED,
P9_RDMA_CONNECTED,
P9_RDMA_FLUSHING,
P9_RDMA_CLOSING,
P9_RDMA_CLOSED,
} state;
struct rdma_cm_id *cm_id;
struct ib_pd *pd;
struct ib_qp *qp;
struct ib_cq *cq;
struct ib_mr *dma_mr;
u32 lkey;
long timeout;
int sq_depth;
struct semaphore sq_sem;
int rq_depth;
struct semaphore rq_sem;
atomic_t excess_rc;
struct sockaddr_in addr;
spinlock_t req_lock;
struct completion cm_done;
};
/**
* p9_rdma_context - Keeps track of in-process WR
*
* @wc_op: The original WR op for when the CQE completes in error.
* @busa: Bus address to unmap when the WR completes
* @req: Keeps track of requests (send)
* @rc: Keepts track of replies (receive)
*/
struct p9_rdma_req;
struct p9_rdma_context {
enum ib_wc_opcode wc_op;
dma_addr_t busa;
union {
struct p9_req_t *req;
struct p9_fcall *rc;
};
};
/**
* p9_rdma_opts - Collection of mount options
* @port: port of connection
* @sq_depth: The requested depth of the SQ. This really doesn't need
* to be any deeper than the number of threads used in the client
* @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
* @timeout: Time to wait in msecs for CM events
*/
struct p9_rdma_opts {
short port;
int sq_depth;
int rq_depth;
long timeout;
};
/*
* Option Parsing (code inspired by NFS code)
*/
enum {
/* Options that take integer arguments */
Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, Opt_err,
};
static match_table_t tokens = {
{Opt_port, "port=%u"},
{Opt_sq_depth, "sq=%u"},
{Opt_rq_depth, "rq=%u"},
{Opt_timeout, "timeout=%u"},
{Opt_err, NULL},
};
/**
* parse_opts - parse mount options into rdma options structure
* @params: options string passed from mount
* @opts: rdma transport-specific structure to parse options into
*
* Returns 0 upon success, -ERRNO upon failure
*/
static int parse_opts(char *params, struct p9_rdma_opts *opts)
{
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
char *options, *tmp_options;
opts->port = P9_PORT;
opts->sq_depth = P9_RDMA_SQ_DEPTH;
opts->rq_depth = P9_RDMA_RQ_DEPTH;
opts->timeout = P9_RDMA_TIMEOUT;
if (!params)
return 0;
tmp_options = kstrdup(params, GFP_KERNEL);
if (!tmp_options) {
p9_debug(P9_DEBUG_ERROR,
"failed to allocate copy of option string\n");
return -ENOMEM;
}
options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
int r;
if (!*p)
continue;
token = match_token(p, tokens, args);
if (token == Opt_err)
continue;
r = match_int(&args[0], &option);
if (r < 0) {
p9_debug(P9_DEBUG_ERROR,
"integer field, but no integer?\n");
continue;
}
switch (token) {
case Opt_port:
opts->port = option;
break;
case Opt_sq_depth:
opts->sq_depth = option;
break;
case Opt_rq_depth:
opts->rq_depth = option;
break;
case Opt_timeout:
opts->timeout = option;
break;
default:
continue;
}
}
/* RQ must be at least as large as the SQ */
opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
kfree(tmp_options);
return 0;
}
static int
p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
struct p9_client *c = id->context;
struct p9_trans_rdma *rdma = c->trans;
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
BUG_ON(rdma->state != P9_RDMA_INIT);
rdma->state = P9_RDMA_ADDR_RESOLVED;
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
rdma->state = P9_RDMA_ROUTE_RESOLVED;
break;
case RDMA_CM_EVENT_ESTABLISHED:
BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
rdma->state = P9_RDMA_CONNECTED;
break;
case RDMA_CM_EVENT_DISCONNECTED:
if (rdma)
rdma->state = P9_RDMA_CLOSED;
if (c)
c->status = Disconnected;
break;
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_MULTICAST_JOIN:
case RDMA_CM_EVENT_MULTICAST_ERROR:
case RDMA_CM_EVENT_REJECTED:
case RDMA_CM_EVENT_CONNECT_REQUEST:
case RDMA_CM_EVENT_CONNECT_RESPONSE:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
c->status = Disconnected;
rdma_disconnect(rdma->cm_id);
break;
default:
BUG();
}
complete(&rdma->cm_done);
return 0;
}
static void
handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
{
struct p9_req_t *req;
int err = 0;
int16_t tag;
req = NULL;
ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
DMA_FROM_DEVICE);
if (status != IB_WC_SUCCESS)
goto err_out;
err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
if (err)
goto err_out;
req = p9_tag_lookup(client, tag);
if (!req)
goto err_out;
/* Check that we have not yet received a reply for this request.
*/
if (unlikely(req->rc)) {
pr_err("Duplicate reply for request %d", tag);
goto err_out;
}
req->rc = c->rc;
p9_client_cb(client, req, REQ_STATUS_RCVD);
return;
err_out:
p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status);
rdma->state = P9_RDMA_FLUSHING;
client->status = Disconnected;
}
static void
handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
{
ib_dma_unmap_single(rdma->cm_id->device,
c->busa, c->req->tc->size,
DMA_TO_DEVICE);
}
static void qp_event_handler(struct ib_event *event, void *context)
{
p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n",
event->event, context);
}
static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
{
struct p9_client *client = cq_context;
struct p9_trans_rdma *rdma = client->trans;
int ret;
struct ib_wc wc;
ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id;
switch (c->wc_op) {
case IB_WC_RECV:
handle_recv(client, rdma, c, wc.status, wc.byte_len);
up(&rdma->rq_sem);
break;
case IB_WC_SEND:
handle_send(client, rdma, c, wc.status, wc.byte_len);
up(&rdma->sq_sem);
break;
default:
pr_err("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n",
c->wc_op, wc.opcode, wc.status);
break;
}
kfree(c);
}
}
static void cq_event_handler(struct ib_event *e, void *v)
{
p9_debug(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
}
static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
{
if (!rdma)
return;
if (rdma->dma_mr && !IS_ERR(rdma->dma_mr))
ib_dereg_mr(rdma->dma_mr);
if (rdma->qp && !IS_ERR(rdma->qp))
ib_destroy_qp(rdma->qp);
if (rdma->pd && !IS_ERR(rdma->pd))
ib_dealloc_pd(rdma->pd);
if (rdma->cq && !IS_ERR(rdma->cq))
ib_destroy_cq(rdma->cq);
if (rdma->cm_id && !IS_ERR(rdma->cm_id))
rdma_destroy_id(rdma->cm_id);
kfree(rdma);
}
static int
post_recv(struct p9_client *client, struct p9_rdma_context *c)
{
struct p9_trans_rdma *rdma = client->trans;
struct ib_recv_wr wr, *bad_wr;
struct ib_sge sge;
c->busa = ib_dma_map_single(rdma->cm_id->device,
c->rc->sdata, client->msize,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
goto error;
sge.addr = c->busa;
sge.length = client->msize;
sge.lkey = rdma->lkey;
wr.next = NULL;
c->wc_op = IB_WC_RECV;
wr.wr_id = (unsigned long) c;
wr.sg_list = &sge;
wr.num_sge = 1;
return ib_post_recv(rdma->qp, &wr, &bad_wr);
error:
p9_debug(P9_DEBUG_ERROR, "EIO\n");
return -EIO;
}
static int rdma_request(struct p9_client *client, struct p9_req_t *req)
{
struct p9_trans_rdma *rdma = client->trans;
struct ib_send_wr wr, *bad_wr;
struct ib_sge sge;
int err = 0;
unsigned long flags;
struct p9_rdma_context *c = NULL;
struct p9_rdma_context *rpl_context = NULL;
/* When an error occurs between posting the recv and the send,
* there will be a receive context posted without a pending request.
* Since there is no way to "un-post" it, we remember it and skip
* post_recv() for the next request.
* So here,
* see if we are this `next request' and need to absorb an excess rc.
* If yes, then drop and free our own, and do not recv_post().
**/
if (unlikely(atomic_read(&rdma->excess_rc) > 0)) {
if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) {
/* Got one ! */
kfree(req->rc);
req->rc = NULL;
goto dont_need_post_recv;
} else {
/* We raced and lost. */
atomic_inc(&rdma->excess_rc);
}
}
/* Allocate an fcall for the reply */
rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
if (!rpl_context) {
err = -ENOMEM;
goto recv_error;
}
rpl_context->rc = req->rc;
/*
* Post a receive buffer for this request. We need to ensure
* there is a reply buffer available for every outstanding
* request. A flushed request can result in no reply for an
* outstanding request, so we must keep a count to avoid
* overflowing the RQ.
*/
if (down_interruptible(&rdma->rq_sem)) {
err = -EINTR;
goto recv_error;
}
err = post_recv(client, rpl_context);
if (err) {
p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n");
goto recv_error;
}
/* remove posted receive buffer from request structure */
req->rc = NULL;
dont_need_post_recv:
/* Post the request */
c = kmalloc(sizeof *c, GFP_NOFS);
if (!c) {
err = -ENOMEM;
goto send_error;
}
c->req = req;
c->busa = ib_dma_map_single(rdma->cm_id->device,
c->req->tc->sdata, c->req->tc->size,
DMA_TO_DEVICE);
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) {
err = -EIO;
goto send_error;
}
sge.addr = c->busa;
sge.length = c->req->tc->size;
sge.lkey = rdma->lkey;
wr.next = NULL;
c->wc_op = IB_WC_SEND;
wr.wr_id = (unsigned long) c;
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
wr.sg_list = &sge;
wr.num_sge = 1;
if (down_interruptible(&rdma->sq_sem)) {
err = -EINTR;
goto send_error;
}
/* Mark request as `sent' *before* we actually send it,
* because doing if after could erase the REQ_STATUS_RCVD
* status in case of a very fast reply.
*/
req->status = REQ_STATUS_SENT;
err = ib_post_send(rdma->qp, &wr, &bad_wr);
if (err)
goto send_error;
/* Success */
return 0;
/* Handle errors that happened during or while preparing the send: */
send_error:
req->status = REQ_STATUS_ERROR;
kfree(c);
p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
/* Ach.
* We did recv_post(), but not send. We have one recv_post in excess.
*/
atomic_inc(&rdma->excess_rc);
return err;
/* Handle errors that happened during or while preparing post_recv(): */
recv_error:
kfree(rpl_context);
spin_lock_irqsave(&rdma->req_lock, flags);
if (rdma->state < P9_RDMA_CLOSING) {
rdma->state = P9_RDMA_CLOSING;
spin_unlock_irqrestore(&rdma->req_lock, flags);
rdma_disconnect(rdma->cm_id);
} else
spin_unlock_irqrestore(&rdma->req_lock, flags);
return err;
}
static void rdma_close(struct p9_client *client)
{
struct p9_trans_rdma *rdma;
if (!client)
return;
rdma = client->trans;
if (!rdma)
return;
client->status = Disconnected;
rdma_disconnect(rdma->cm_id);
rdma_destroy_trans(rdma);
}
/**
* alloc_rdma - Allocate and initialize the rdma transport structure
* @opts: Mount options structure
*/
static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
{
struct p9_trans_rdma *rdma;
rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
if (!rdma)
return NULL;
rdma->sq_depth = opts->sq_depth;
rdma->rq_depth = opts->rq_depth;
rdma->timeout = opts->timeout;
spin_lock_init(&rdma->req_lock);
init_completion(&rdma->cm_done);
sema_init(&rdma->sq_sem, rdma->sq_depth);
sema_init(&rdma->rq_sem, rdma->rq_depth);
atomic_set(&rdma->excess_rc, 0);
return rdma;
}
static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
{
/* Nothing to do here.
* We will take care of it (if we have to) in rdma_cancelled()
*/
return 1;
}
/* A request has been fully flushed without a reply.
* That means we have posted one buffer in excess.
*/
static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
{
struct p9_trans_rdma *rdma = client->trans;
atomic_inc(&rdma->excess_rc);
return 0;
}
/**
* trans_create_rdma - Transport method for creating atransport instance
* @client: client instance
* @addr: IP address string
* @args: Mount options string
*/
static int
rdma_create_trans(struct p9_client *client, const char *addr, char *args)
{
int err;
struct p9_rdma_opts opts;
struct p9_trans_rdma *rdma;
struct rdma_conn_param conn_param;
struct ib_qp_init_attr qp_attr;
struct ib_device_attr devattr;
/* Parse the transport specific mount options */
err = parse_opts(args, &opts);
if (err < 0)
return err;
/* Create and initialize the RDMA transport structure */
rdma = alloc_rdma(&opts);
if (!rdma)
return -ENOMEM;
/* Create the RDMA CM ID */
rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP,
IB_QPT_RC);
if (IS_ERR(rdma->cm_id))
goto error;
/* Associate the client with the transport */
client->trans = rdma;
/* Resolve the server's address */
rdma->addr.sin_family = AF_INET;
rdma->addr.sin_addr.s_addr = in_aton(addr);
rdma->addr.sin_port = htons(opts.port);
err = rdma_resolve_addr(rdma->cm_id, NULL,
(struct sockaddr *)&rdma->addr,
rdma->timeout);
if (err)
goto error;
err = wait_for_completion_interruptible(&rdma->cm_done);
if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
goto error;
/* Resolve the route to the server */
err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
if (err)
goto error;
err = wait_for_completion_interruptible(&rdma->cm_done);
if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
goto error;
/* Query the device attributes */
err = ib_query_device(rdma->cm_id->device, &devattr);
if (err)
goto error;
/* Create the Completion Queue */
rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
cq_event_handler, client,
opts.sq_depth + opts.rq_depth + 1, 0);
if (IS_ERR(rdma->cq))
goto error;
ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
/* Create the Protection Domain */
rdma->pd = ib_alloc_pd(rdma->cm_id->device);
if (IS_ERR(rdma->pd))
goto error;
/* Cache the DMA lkey in the transport */
rdma->dma_mr = NULL;
if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
rdma->lkey = rdma->cm_id->device->local_dma_lkey;
else {
rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(rdma->dma_mr))
goto error;
rdma->lkey = rdma->dma_mr->lkey;
}
/* Create the Queue Pair */
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.event_handler = qp_event_handler;
qp_attr.qp_context = client;
qp_attr.cap.max_send_wr = opts.sq_depth;
qp_attr.cap.max_recv_wr = opts.rq_depth;
qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE;
qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE;
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
qp_attr.qp_type = IB_QPT_RC;
qp_attr.send_cq = rdma->cq;
qp_attr.recv_cq = rdma->cq;
err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
if (err)
goto error;
rdma->qp = rdma->cm_id->qp;
/* Request a connection */
memset(&conn_param, 0, sizeof(conn_param));
conn_param.private_data = NULL;
conn_param.private_data_len = 0;
conn_param.responder_resources = P9_RDMA_IRD;
conn_param.initiator_depth = P9_RDMA_ORD;
err = rdma_connect(rdma->cm_id, &conn_param);
if (err)
goto error;
err = wait_for_completion_interruptible(&rdma->cm_done);
if (err || (rdma->state != P9_RDMA_CONNECTED))
goto error;
client->status = Connected;
return 0;
error:
rdma_destroy_trans(rdma);
return -ENOTCONN;
}
static struct p9_trans_module p9_rdma_trans = {
.name = "rdma",
.maxsize = P9_RDMA_MAXSIZE,
.def = 0,
.owner = THIS_MODULE,
.create = rdma_create_trans,
.close = rdma_close,
.request = rdma_request,
.cancel = rdma_cancel,
.cancelled = rdma_cancelled,
};
/**
* p9_trans_rdma_init - Register the 9P RDMA transport driver
*/
static int __init p9_trans_rdma_init(void)
{
v9fs_register_trans(&p9_rdma_trans);
return 0;
}
static void __exit p9_trans_rdma_exit(void)
{
v9fs_unregister_trans(&p9_rdma_trans);
}
module_init(p9_trans_rdma_init);
module_exit(p9_trans_rdma_exit);
MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
MODULE_DESCRIPTION("RDMA Transport for 9P");
MODULE_LICENSE("Dual BSD/GPL");

730
net/9p/trans_virtio.c Normal file
View file

@ -0,0 +1,730 @@
/*
* The Virtio 9p transport driver
*
* This is a block based transport driver based on the lguest block driver
* code.
*
* Copyright (C) 2007, 2008 Eric Van Hensbergen, IBM Corporation
*
* Based on virtio console driver
* Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/ipv6.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/un.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
#include <linux/idr.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <net/9p/9p.h>
#include <linux/parser.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
#include <linux/scatterlist.h>
#include <linux/swap.h>
#include <linux/virtio.h>
#include <linux/virtio_9p.h>
#include "trans_common.h"
#define VIRTQUEUE_NUM 128
/* a single mutex to manage channel initialization and attachment */
static DEFINE_MUTEX(virtio_9p_lock);
static DECLARE_WAIT_QUEUE_HEAD(vp_wq);
static atomic_t vp_pinned = ATOMIC_INIT(0);
/**
* struct virtio_chan - per-instance transport information
* @initialized: whether the channel is initialized
* @inuse: whether the channel is in use
* @lock: protects multiple elements within this structure
* @client: client instance
* @vdev: virtio dev associated with this channel
* @vq: virtio queue associated with this channel
* @sg: scatter gather list which is used to pack a request (protected?)
*
* We keep all per-channel information in a structure.
* This structure is allocated within the devices dev->mem space.
* A pointer to the structure will get put in the transport private.
*
*/
struct virtio_chan {
bool inuse;
spinlock_t lock;
struct p9_client *client;
struct virtio_device *vdev;
struct virtqueue *vq;
int ring_bufs_avail;
wait_queue_head_t *vc_wq;
/* This is global limit. Since we don't have a global structure,
* will be placing it in each channel.
*/
unsigned long p9_max_pages;
/* Scatterlist: can be too big for stack. */
struct scatterlist sg[VIRTQUEUE_NUM];
int tag_len;
/*
* tag name to identify a mount Non-null terminated
*/
char *tag;
struct list_head chan_list;
};
static struct list_head virtio_chan_list;
/* How many bytes left in this page. */
static unsigned int rest_of_page(void *data)
{
return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE);
}
/**
* p9_virtio_close - reclaim resources of a channel
* @client: client instance
*
* This reclaims a channel by freeing its resources and
* reseting its inuse flag.
*
*/
static void p9_virtio_close(struct p9_client *client)
{
struct virtio_chan *chan = client->trans;
mutex_lock(&virtio_9p_lock);
if (chan)
chan->inuse = false;
mutex_unlock(&virtio_9p_lock);
}
/**
* req_done - callback which signals activity from the server
* @vq: virtio queue activity was received on
*
* This notifies us that the server has triggered some activity
* on the virtio channel - most likely a response to request we
* sent. Figure out which requests now have responses and wake up
* those threads.
*
* Bugs: could do with some additional sanity checking, but appears to work.
*
*/
static void req_done(struct virtqueue *vq)
{
struct virtio_chan *chan = vq->vdev->priv;
struct p9_fcall *rc;
unsigned int len;
struct p9_req_t *req;
unsigned long flags;
p9_debug(P9_DEBUG_TRANS, ": request done\n");
while (1) {
spin_lock_irqsave(&chan->lock, flags);
rc = virtqueue_get_buf(chan->vq, &len);
if (rc == NULL) {
spin_unlock_irqrestore(&chan->lock, flags);
break;
}
chan->ring_bufs_avail = 1;
spin_unlock_irqrestore(&chan->lock, flags);
/* Wakeup if anyone waiting for VirtIO ring space. */
wake_up(chan->vc_wq);
p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc);
p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
req = p9_tag_lookup(chan->client, rc->tag);
p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
}
}
/**
* pack_sg_list - pack a scatter gather list from a linear buffer
* @sg: scatter/gather list to pack into
* @start: which segment of the sg_list to start at
* @limit: maximum segment to pack data to
* @data: data to pack into scatter/gather list
* @count: amount of data to pack into the scatter/gather list
*
* sg_lists have multiple segments of various sizes. This will pack
* arbitrary data into an existing scatter gather list, segmenting the
* data as necessary within constraints.
*
*/
static int pack_sg_list(struct scatterlist *sg, int start,
int limit, char *data, int count)
{
int s;
int index = start;
while (count) {
s = rest_of_page(data);
if (s > count)
s = count;
BUG_ON(index > limit);
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_buf(&sg[index++], data, s);
count -= s;
data += s;
}
if (index-start)
sg_mark_end(&sg[index - 1]);
return index-start;
}
/* We don't currently allow canceling of virtio requests */
static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
{
return 1;
}
/**
* pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
* this takes a list of pages.
* @sg: scatter/gather list to pack into
* @start: which segment of the sg_list to start at
* @pdata: a list of pages to add into sg.
* @nr_pages: number of pages to pack into the scatter/gather list
* @data: data to pack into scatter/gather list
* @count: amount of data to pack into the scatter/gather list
*/
static int
pack_sg_list_p(struct scatterlist *sg, int start, int limit,
struct page **pdata, int nr_pages, char *data, int count)
{
int i = 0, s;
int data_off;
int index = start;
BUG_ON(nr_pages > (limit - start));
/*
* if the first page doesn't start at
* page boundary find the offset
*/
data_off = offset_in_page(data);
while (nr_pages) {
s = rest_of_page(data);
if (s > count)
s = count;
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_page(&sg[index++], pdata[i++], s, data_off);
data_off = 0;
data += s;
count -= s;
nr_pages--;
}
if (index-start)
sg_mark_end(&sg[index - 1]);
return index - start;
}
/**
* p9_virtio_request - issue a request
* @client: client instance issuing the request
* @req: request to be issued
*
*/
static int
p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
{
int err;
int in, out, out_sgs, in_sgs;
unsigned long flags;
struct virtio_chan *chan = client->trans;
struct scatterlist *sgs[2];
p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n");
req->status = REQ_STATUS_SENT;
req_retry:
spin_lock_irqsave(&chan->lock, flags);
out_sgs = in_sgs = 0;
/* Handle out VirtIO ring buffers */
out = pack_sg_list(chan->sg, 0,
VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
if (out)
sgs[out_sgs++] = chan->sg;
in = pack_sg_list(chan->sg, out,
VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity);
if (in)
sgs[out_sgs + in_sgs++] = chan->sg + out;
err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc,
GFP_ATOMIC);
if (err < 0) {
if (err == -ENOSPC) {
chan->ring_bufs_avail = 0;
spin_unlock_irqrestore(&chan->lock, flags);
err = wait_event_interruptible(*chan->vc_wq,
chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
return err;
p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
goto req_retry;
} else {
spin_unlock_irqrestore(&chan->lock, flags);
p9_debug(P9_DEBUG_TRANS,
"virtio rpc add_sgs returned failure\n");
return -EIO;
}
}
virtqueue_kick(chan->vq);
spin_unlock_irqrestore(&chan->lock, flags);
p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
return 0;
}
static int p9_get_mapped_pages(struct virtio_chan *chan,
struct page **pages, char *data,
int nr_pages, int write, int kern_buf)
{
int err;
if (!kern_buf) {
/*
* We allow only p9_max_pages pinned. We wait for the
* Other zc request to finish here
*/
if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
err = wait_event_interruptible(vp_wq,
(atomic_read(&vp_pinned) < chan->p9_max_pages));
if (err == -ERESTARTSYS)
return err;
}
err = p9_payload_gup(data, &nr_pages, pages, write);
if (err < 0)
return err;
atomic_add(nr_pages, &vp_pinned);
} else {
/* kernel buffer, no need to pin pages */
int s, index = 0;
int count = nr_pages;
while (nr_pages) {
s = rest_of_page(data);
if (is_vmalloc_addr(data))
pages[index++] = vmalloc_to_page(data);
else
pages[index++] = kmap_to_page(data);
data += s;
nr_pages--;
}
nr_pages = count;
}
return nr_pages;
}
/**
* p9_virtio_zc_request - issue a zero copy request
* @client: client instance issuing the request
* @req: request to be issued
* @uidata: user bffer that should be ued for zero copy read
* @uodata: user buffer that shoud be user for zero copy write
* @inlen: read buffer size
* @olen: write buffer size
* @hdrlen: reader header size, This is the size of response protocol data
*
*/
static int
p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
char *uidata, char *uodata, int inlen,
int outlen, int in_hdr_len, int kern_buf)
{
int in, out, err, out_sgs, in_sgs;
unsigned long flags;
int in_nr_pages = 0, out_nr_pages = 0;
struct page **in_pages = NULL, **out_pages = NULL;
struct virtio_chan *chan = client->trans;
struct scatterlist *sgs[4];
p9_debug(P9_DEBUG_TRANS, "virtio request\n");
if (uodata) {
out_nr_pages = p9_nr_pages(uodata, outlen);
out_pages = kmalloc(sizeof(struct page *) * out_nr_pages,
GFP_NOFS);
if (!out_pages) {
err = -ENOMEM;
goto err_out;
}
out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata,
out_nr_pages, 0, kern_buf);
if (out_nr_pages < 0) {
err = out_nr_pages;
kfree(out_pages);
out_pages = NULL;
goto err_out;
}
}
if (uidata) {
in_nr_pages = p9_nr_pages(uidata, inlen);
in_pages = kmalloc(sizeof(struct page *) * in_nr_pages,
GFP_NOFS);
if (!in_pages) {
err = -ENOMEM;
goto err_out;
}
in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata,
in_nr_pages, 1, kern_buf);
if (in_nr_pages < 0) {
err = in_nr_pages;
kfree(in_pages);
in_pages = NULL;
goto err_out;
}
}
req->status = REQ_STATUS_SENT;
req_retry_pinned:
spin_lock_irqsave(&chan->lock, flags);
out_sgs = in_sgs = 0;
/* out data */
out = pack_sg_list(chan->sg, 0,
VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
if (out)
sgs[out_sgs++] = chan->sg;
if (out_pages) {
sgs[out_sgs++] = chan->sg + out;
out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
out_pages, out_nr_pages, uodata, outlen);
}
/*
* Take care of in data
* For example TREAD have 11.
* 11 is the read/write header = PDU Header(7) + IO Size (4).
* Arrange in such a way that server places header in the
* alloced memory and payload onto the user buffer.
*/
in = pack_sg_list(chan->sg, out,
VIRTQUEUE_NUM, req->rc->sdata, in_hdr_len);
if (in)
sgs[out_sgs + in_sgs++] = chan->sg + out;
if (in_pages) {
sgs[out_sgs + in_sgs++] = chan->sg + out + in;
in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
in_pages, in_nr_pages, uidata, inlen);
}
BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc,
GFP_ATOMIC);
if (err < 0) {
if (err == -ENOSPC) {
chan->ring_bufs_avail = 0;
spin_unlock_irqrestore(&chan->lock, flags);
err = wait_event_interruptible(*chan->vc_wq,
chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
goto err_out;
p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
goto req_retry_pinned;
} else {
spin_unlock_irqrestore(&chan->lock, flags);
p9_debug(P9_DEBUG_TRANS,
"virtio rpc add_sgs returned failure\n");
err = -EIO;
goto err_out;
}
}
virtqueue_kick(chan->vq);
spin_unlock_irqrestore(&chan->lock, flags);
p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
err = wait_event_interruptible(*req->wq,
req->status >= REQ_STATUS_RCVD);
/*
* Non kernel buffers are pinned, unpin them
*/
err_out:
if (!kern_buf) {
if (in_pages) {
p9_release_pages(in_pages, in_nr_pages);
atomic_sub(in_nr_pages, &vp_pinned);
}
if (out_pages) {
p9_release_pages(out_pages, out_nr_pages);
atomic_sub(out_nr_pages, &vp_pinned);
}
/* wakeup anybody waiting for slots to pin pages */
wake_up(&vp_wq);
}
kfree(in_pages);
kfree(out_pages);
return err;
}
static ssize_t p9_mount_tag_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct virtio_chan *chan;
struct virtio_device *vdev;
vdev = dev_to_virtio(dev);
chan = vdev->priv;
return snprintf(buf, chan->tag_len + 1, "%s", chan->tag);
}
static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL);
/**
* p9_virtio_probe - probe for existence of 9P virtio channels
* @vdev: virtio device to probe
*
* This probes for existing virtio channels.
*
*/
static int p9_virtio_probe(struct virtio_device *vdev)
{
__u16 tag_len;
char *tag;
int err;
struct virtio_chan *chan;
chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
if (!chan) {
pr_err("Failed to allocate virtio 9P channel\n");
err = -ENOMEM;
goto fail;
}
chan->vdev = vdev;
/* We expect one virtqueue, for requests. */
chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
if (IS_ERR(chan->vq)) {
err = PTR_ERR(chan->vq);
goto out_free_vq;
}
chan->vq->vdev->priv = chan;
spin_lock_init(&chan->lock);
sg_init_table(chan->sg, VIRTQUEUE_NUM);
chan->inuse = false;
if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len);
} else {
err = -EINVAL;
goto out_free_vq;
}
tag = kmalloc(tag_len, GFP_KERNEL);
if (!tag) {
err = -ENOMEM;
goto out_free_vq;
}
virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag),
tag, tag_len);
chan->tag = tag;
chan->tag_len = tag_len;
err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
if (err) {
goto out_free_tag;
}
chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
if (!chan->vc_wq) {
err = -ENOMEM;
goto out_free_tag;
}
init_waitqueue_head(chan->vc_wq);
chan->ring_bufs_avail = 1;
/* Ceiling limit to avoid denial of service attacks */
chan->p9_max_pages = nr_free_buffer_pages()/4;
virtio_device_ready(vdev);
mutex_lock(&virtio_9p_lock);
list_add_tail(&chan->chan_list, &virtio_chan_list);
mutex_unlock(&virtio_9p_lock);
/* Let udev rules use the new mount_tag attribute. */
kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
return 0;
out_free_tag:
kfree(tag);
out_free_vq:
vdev->config->del_vqs(vdev);
kfree(chan);
fail:
return err;
}
/**
* p9_virtio_create - allocate a new virtio channel
* @client: client instance invoking this transport
* @devname: string identifying the channel to connect to (unused)
* @args: args passed from sys_mount() for per-transport options (unused)
*
* This sets up a transport channel for 9p communication. Right now
* we only match the first available channel, but eventually we couldlook up
* alternate channels by matching devname versus a virtio_config entry.
* We use a simple reference count mechanism to ensure that only a single
* mount has a channel open at a time.
*
*/
static int
p9_virtio_create(struct p9_client *client, const char *devname, char *args)
{
struct virtio_chan *chan;
int ret = -ENOENT;
int found = 0;
mutex_lock(&virtio_9p_lock);
list_for_each_entry(chan, &virtio_chan_list, chan_list) {
if (!strncmp(devname, chan->tag, chan->tag_len) &&
strlen(devname) == chan->tag_len) {
if (!chan->inuse) {
chan->inuse = true;
found = 1;
break;
}
ret = -EBUSY;
}
}
mutex_unlock(&virtio_9p_lock);
if (!found) {
pr_err("no channels available\n");
return ret;
}
client->trans = (void *)chan;
client->status = Connected;
chan->client = client;
return 0;
}
/**
* p9_virtio_remove - clean up resources associated with a virtio device
* @vdev: virtio device to remove
*
*/
static void p9_virtio_remove(struct virtio_device *vdev)
{
struct virtio_chan *chan = vdev->priv;
if (chan->inuse)
p9_virtio_close(chan->client);
vdev->config->del_vqs(vdev);
mutex_lock(&virtio_9p_lock);
list_del(&chan->chan_list);
mutex_unlock(&virtio_9p_lock);
sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
kfree(chan->tag);
kfree(chan->vc_wq);
kfree(chan);
}
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static unsigned int features[] = {
VIRTIO_9P_MOUNT_TAG,
};
/* The standard "struct lguest_driver": */
static struct virtio_driver p9_virtio_drv = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = p9_virtio_probe,
.remove = p9_virtio_remove,
};
static struct p9_trans_module p9_virtio_trans = {
.name = "virtio",
.create = p9_virtio_create,
.close = p9_virtio_close,
.request = p9_virtio_request,
.zc_request = p9_virtio_zc_request,
.cancel = p9_virtio_cancel,
/*
* We leave one entry for input and one entry for response
* headers. We also skip one more entry to accomodate, address
* that are not at page boundary, that can result in an extra
* page in zero copy.
*/
.maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
.def = 1,
.owner = THIS_MODULE,
};
/* The standard init function */
static int __init p9_virtio_init(void)
{
INIT_LIST_HEAD(&virtio_chan_list);
v9fs_register_trans(&p9_virtio_trans);
return register_virtio_driver(&p9_virtio_drv);
}
static void __exit p9_virtio_cleanup(void)
{
unregister_virtio_driver(&p9_virtio_drv);
v9fs_unregister_trans(&p9_virtio_trans);
}
module_init(p9_virtio_init);
module_exit(p9_virtio_cleanup);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
MODULE_DESCRIPTION("Virtio 9p Transport");
MODULE_LICENSE("GPL");

141
net/9p/util.c Normal file
View file

@ -0,0 +1,141 @@
/*
* net/9p/util.c
*
* This file contains some helper functions
*
* Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/parser.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <net/9p/9p.h>
/**
* struct p9_idpool - per-connection accounting for tag idpool
* @lock: protects the pool
* @pool: idr to allocate tag id from
*
*/
struct p9_idpool {
spinlock_t lock;
struct idr pool;
};
/**
* p9_idpool_create - create a new per-connection id pool
*
*/
struct p9_idpool *p9_idpool_create(void)
{
struct p9_idpool *p;
p = kmalloc(sizeof(struct p9_idpool), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
spin_lock_init(&p->lock);
idr_init(&p->pool);
return p;
}
EXPORT_SYMBOL(p9_idpool_create);
/**
* p9_idpool_destroy - create a new per-connection id pool
* @p: idpool to destroy
*/
void p9_idpool_destroy(struct p9_idpool *p)
{
idr_destroy(&p->pool);
kfree(p);
}
EXPORT_SYMBOL(p9_idpool_destroy);
/**
* p9_idpool_get - allocate numeric id from pool
* @p: pool to allocate from
*
* Bugs: This seems to be an awful generic function, should it be in idr.c with
* the lock included in struct idr?
*/
int p9_idpool_get(struct p9_idpool *p)
{
int i;
unsigned long flags;
idr_preload(GFP_NOFS);
spin_lock_irqsave(&p->lock, flags);
/* no need to store exactly p, we just need something non-null */
i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT);
spin_unlock_irqrestore(&p->lock, flags);
idr_preload_end();
if (i < 0)
return -1;
p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p);
return i;
}
EXPORT_SYMBOL(p9_idpool_get);
/**
* p9_idpool_put - release numeric id from pool
* @id: numeric id which is being released
* @p: pool to release id into
*
* Bugs: This seems to be an awful generic function, should it be in idr.c with
* the lock included in struct idr?
*/
void p9_idpool_put(int id, struct p9_idpool *p)
{
unsigned long flags;
p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", id, p);
spin_lock_irqsave(&p->lock, flags);
idr_remove(&p->pool, id);
spin_unlock_irqrestore(&p->lock, flags);
}
EXPORT_SYMBOL(p9_idpool_put);
/**
* p9_idpool_check - check if the specified id is available
* @id: id to check
* @p: pool to check
*/
int p9_idpool_check(int id, struct p9_idpool *p)
{
return idr_find(&p->pool, id) != NULL;
}
EXPORT_SYMBOL(p9_idpool_check);

392
net/Kconfig Normal file
View file

@ -0,0 +1,392 @@
#
# Network configuration
#
menuconfig NET
bool "Networking support"
select NLATTR
select GENERIC_NET_UTILS
select BPF
---help---
Unless you really know what you are doing, you should say Y here.
The reason is that some programs need kernel networking support even
when running on a stand-alone machine that isn't connected to any
other computer.
If you are upgrading from an older kernel, you
should consider updating your networking tools too because changes
in the kernel and the tools often go hand in hand. The tools are
contained in the package net-tools, the location and version number
of which are given in <file:Documentation/Changes>.
For a general introduction to Linux networking, it is highly
recommended to read the NET-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
if NET
config WANT_COMPAT_NETLINK_MESSAGES
bool
help
This option can be selected by other options that need compat
netlink messages.
config COMPAT_NETLINK_MESSAGES
def_bool y
depends on COMPAT
depends on WEXT_CORE || WANT_COMPAT_NETLINK_MESSAGES
help
This option makes it possible to send different netlink messages
to tasks depending on whether the task is a compat task or not. To
achieve this, you need to set skb_shinfo(skb)->frag_list to the
compat skb before sending the skb, the netlink code will sort out
which message to actually pass to the task.
Newly written code should NEVER need this option but do
compat-independent messages instead!
menu "Networking options"
source "net/packet/Kconfig"
source "net/unix/Kconfig"
source "net/xfrm/Kconfig"
source "net/iucv/Kconfig"
config INET
bool "TCP/IP networking"
select CRYPTO
select CRYPTO_AES
---help---
These are the protocols used on the Internet and on most local
Ethernets. It is highly recommended to say Y here (this will enlarge
your kernel by about 400 KB), since some programs (e.g. the X window
system) use TCP/IP even if your machine is not connected to any
other computer. You will get the so-called loopback device which
allows you to ping yourself (great fun, that!).
For an excellent introduction to Linux networking, please read the
Linux Networking HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
If you say Y here and also to "/proc file system support" and
"Sysctl support" below, you can change various aspects of the
behavior of the TCP/IP code by writing to the (virtual) files in
/proc/sys/net/ipv4/*; the options are explained in the file
<file:Documentation/networking/ip-sysctl.txt>.
Short answer: say Y.
if INET
source "net/ipv4/Kconfig"
source "net/ipv6/Kconfig"
source "net/netlabel/Kconfig"
endif # if INET
config ANDROID_PARANOID_NETWORK
bool "Only allow certain groups to create sockets"
default y
help
none
config NET_ACTIVITY_STATS
bool "Network activity statistics tracking"
default y
help
Network activity statistics are useful for tracking wireless
modem activity on 2G, 3G, 4G wireless networks. Counts number of
transmissions and groups them in specified time buckets.
config NETWORK_SECMARK
bool "Security Marking"
help
This enables security marking of network packets, similar
to nfmark, but designated for security purposes.
If you are unsure how to answer this question, answer N.
config NET_PTP_CLASSIFY
def_bool n
config NETWORK_PHY_TIMESTAMPING
bool "Timestamping in PHY devices"
select NET_PTP_CLASSIFY
help
This allows timestamping of network packets by PHYs with
hardware timestamping capabilities. This option adds some
overhead in the transmit and receive paths.
If you are unsure how to answer this question, answer N.
menuconfig NETFILTER
bool "Network packet filtering framework (Netfilter)"
---help---
Netfilter is a framework for filtering and mangling network packets
that pass through your Linux box.
The most common use of packet filtering is to run your Linux box as
a firewall protecting a local network from the Internet. The type of
firewall provided by this kernel support is called a "packet
filter", which means that it can reject individual network packets
based on type, source, destination etc. The other kind of firewall,
a "proxy-based" one, is more secure but more intrusive and more
bothersome to set up; it inspects the network traffic much more
closely, modifies it and has knowledge about the higher level
protocols, which a packet filter lacks. Moreover, proxy-based
firewalls often require changes to the programs running on the local
clients. Proxy-based firewalls don't need support by the kernel, but
they are often combined with a packet filter, which only works if
you say Y here.
You should also say Y here if you intend to use your Linux box as
the gateway to the Internet for a local network of machines without
globally valid IP addresses. This is called "masquerading": if one
of the computers on your local network wants to send something to
the outside, your box can "masquerade" as that computer, i.e. it
forwards the traffic to the intended outside destination, but
modifies the packets to make it look like they came from the
firewall box itself. It works both ways: if the outside host
replies, the Linux box will silently forward the traffic to the
correct local computer. This way, the computers on your local net
are completely invisible to the outside world, even though they can
reach the outside and can receive replies. It is even possible to
run globally visible servers from within a masqueraded local network
using a mechanism called portforwarding. Masquerading is also often
called NAT (Network Address Translation).
Another use of Netfilter is in transparent proxying: if a machine on
the local network tries to connect to an outside host, your Linux
box can transparently forward the traffic to a local server,
typically a caching proxy server.
Yet another use of Netfilter is building a bridging firewall. Using
a bridge with Network packet filtering enabled makes iptables "see"
the bridged traffic. For filtering on the lower network and Ethernet
protocols over the bridge, use ebtables (under bridge netfilter
configuration).
Various modules exist for netfilter which replace the previous
masquerading (ipmasqadm), packet filtering (ipchains), transparent
proxying, and portforwarding mechanisms. Please see
<file:Documentation/Changes> under "iptables" for the location of
these packages.
if NETFILTER
config NETFILTER_DEBUG
bool "Network packet filtering debugging"
depends on NETFILTER
help
You can say Y here if you want to get additional messages useful in
debugging the netfilter code.
config NETFILTER_ADVANCED
bool "Advanced netfilter configuration"
depends on NETFILTER
default y
help
If you say Y here you can select between all the netfilter modules.
If you say N the more unusual ones will not be shown and the
basic ones needed by most people will default to 'M'.
If unsure, say Y.
config BRIDGE_NETFILTER
tristate "Bridged IP/ARP packets filtering"
depends on BRIDGE
depends on NETFILTER && INET
depends on NETFILTER_ADVANCED
default m
---help---
Enabling this option will let arptables resp. iptables see bridged
ARP resp. IP traffic. If you want a bridging firewall, you probably
want this option enabled.
Enabling or disabling this option doesn't enable or disable
ebtables.
If unsure, say N.
source "net/netfilter/Kconfig"
source "net/ipv4/netfilter/Kconfig"
source "net/ipv6/netfilter/Kconfig"
source "net/decnet/netfilter/Kconfig"
source "net/bridge/netfilter/Kconfig"
endif
source "net/dccp/Kconfig"
source "net/sctp/Kconfig"
source "net/rds/Kconfig"
source "net/tipc/Kconfig"
source "net/atm/Kconfig"
source "net/l2tp/Kconfig"
source "net/802/Kconfig"
source "net/bridge/Kconfig"
source "net/dsa/Kconfig"
source "net/8021q/Kconfig"
source "net/decnet/Kconfig"
source "net/llc/Kconfig"
source "net/ipx/Kconfig"
source "drivers/net/appletalk/Kconfig"
source "net/x25/Kconfig"
source "net/lapb/Kconfig"
source "net/phonet/Kconfig"
source "net/6lowpan/Kconfig"
source "net/ieee802154/Kconfig"
source "net/mac802154/Kconfig"
source "net/sched/Kconfig"
source "net/dcb/Kconfig"
source "net/dns_resolver/Kconfig"
source "net/batman-adv/Kconfig"
source "net/openvswitch/Kconfig"
source "net/vmw_vsock/Kconfig"
source "net/netlink/Kconfig"
source "net/mpls/Kconfig"
source "net/hsr/Kconfig"
config RPS
boolean "RPS"
depends on SMP && SYSFS
default y
config RFS_ACCEL
boolean
depends on RPS
select CPU_RMAP
default y
config XPS
boolean
depends on SMP
default y
config CGROUP_NET_PRIO
bool "Network priority cgroup"
depends on CGROUPS
---help---
Cgroup subsystem for use in assigning processes to network priorities on
a per-interface basis.
config CGROUP_NET_CLASSID
boolean "Network classid cgroup"
depends on CGROUPS
---help---
Cgroup subsystem for use as general purpose socket classid marker that is
being used in cls_cgroup and for netfilter matching.
config NET_RX_BUSY_POLL
boolean
default y
config BQL
boolean
depends on SYSFS
select DQL
default y
config BPF_JIT
bool "enable BPF Just In Time compiler"
depends on HAVE_BPF_JIT
depends on MODULES
---help---
Berkeley Packet Filter filtering capabilities are normally handled
by an interpreter. This option allows kernel to generate a native
code when filter is loaded in memory. This should speedup
packet sniffing (libpcap/tcpdump). Note : Admin should enable
this feature changing /proc/sys/net/core/bpf_jit_enable
config NET_FLOW_LIMIT
boolean
depends on RPS
default y
---help---
The network stack has to drop packets when a receive processing CPU's
backlog reaches netdev_max_backlog. If a few out of many active flows
generate the vast majority of load, drop their traffic earlier to
maintain capacity for the other flows. This feature provides servers
with many clients some protection against DoS by a single (spoofed)
flow that greatly exceeds average workload.
menu "Network testing"
config NET_PKTGEN
tristate "Packet Generator (USE WITH CAUTION)"
depends on INET && PROC_FS
---help---
This module will inject preconfigured packets, at a configurable
rate, out of a given interface. It is used for network interface
stress testing and performance analysis. If you don't understand
what was just said, you don't need it: say N.
Documentation on how to use the packet generator can be found
at <file:Documentation/networking/pktgen.txt>.
To compile this code as a module, choose M here: the
module will be called pktgen.
config NET_TCPPROBE
tristate "TCP connection probing"
depends on INET && PROC_FS && KPROBES
---help---
This module allows for capturing the changes to TCP connection
state in response to incoming packets. It is used for debugging
TCP congestion avoidance modules. If you don't understand
what was just said, you don't need it: say N.
Documentation on how to use TCP connection probing can be found
at:
http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe
To compile this code as a module, choose M here: the
module will be called tcp_probe.
config NET_DROP_MONITOR
tristate "Network packet drop alerting service"
depends on INET && TRACEPOINTS
---help---
This feature provides an alerting service to userspace in the
event that packets are discarded in the network stack. Alerts
are broadcast via netlink socket to any listening user space
process. If you don't need network drop alerts, or if you are ok
just checking the various proc files and other utilities for
drop statistics, say N here.
endmenu
endmenu
source "net/ax25/Kconfig"
source "net/can/Kconfig"
source "net/irda/Kconfig"
source "net/bluetooth/Kconfig"
source "net/rxrpc/Kconfig"
config FIB_RULES
bool
menuconfig WIRELESS
bool "Wireless"
depends on !S390
default y
if WIRELESS
source "net/wireless/Kconfig"
source "net/mac80211/Kconfig"
endif # WIRELESS
source "net/wimax/Kconfig"
source "net/rfkill/Kconfig"
source "net/9p/Kconfig"
source "net/caif/Kconfig"
source "net/ceph/Kconfig"
source "net/nfc/Kconfig"
endif # if NET
# Used by archs to tell that they support BPF_JIT
config HAVE_BPF_JIT
bool

76
net/Makefile Normal file
View file

@ -0,0 +1,76 @@
#
# Makefile for the linux networking.
#
# 2 Sep 2000, Christoph Hellwig <hch@infradead.org>
# Rewritten to use lists instead of if-statements.
#
obj-y := nonet.o
obj-$(CONFIG_NET) := socket.o core/
tmp-$(CONFIG_COMPAT) := compat.o
obj-$(CONFIG_NET) += $(tmp-y)
# LLC has to be linked before the files in net/802/
obj-$(CONFIG_LLC) += llc/
obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/
obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_INET) += ipv4/
obj-$(CONFIG_XFRM) += xfrm/
obj-$(CONFIG_UNIX) += unix/
obj-$(CONFIG_NET) += ipv6/
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
obj-$(CONFIG_BRIDGE) += bridge/
obj-$(CONFIG_NET_DSA) += dsa/
obj-$(CONFIG_IPX) += ipx/
obj-$(CONFIG_ATALK) += appletalk/
obj-$(CONFIG_X25) += x25/
obj-$(CONFIG_LAPB) += lapb/
obj-$(CONFIG_NETROM) += netrom/
obj-$(CONFIG_ROSE) += rose/
obj-$(CONFIG_AX25) += ax25/
obj-$(CONFIG_CAN) += can/
obj-$(CONFIG_IRDA) += irda/
obj-$(CONFIG_BT) += bluetooth/
obj-$(CONFIG_SUNRPC) += sunrpc/
obj-$(CONFIG_AF_RXRPC) += rxrpc/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_L2TP) += l2tp/
obj-$(CONFIG_DECNET) += decnet/
obj-$(CONFIG_PHONET) += phonet/
ifneq ($(CONFIG_VLAN_8021Q),)
obj-y += 8021q/
endif
obj-$(CONFIG_IP_DCCP) += dccp/
obj-$(CONFIG_IP_SCTP) += sctp/
obj-$(CONFIG_RDS) += rds/
obj-$(CONFIG_WIRELESS) += wireless/
obj-$(CONFIG_MAC80211) += mac80211/
obj-$(CONFIG_TIPC) += tipc/
obj-$(CONFIG_NETLABEL) += netlabel/
obj-$(CONFIG_IUCV) += iucv/
obj-$(CONFIG_RFKILL) += rfkill/
obj-$(CONFIG_NET_9P) += 9p/
obj-$(CONFIG_CAIF) += caif/
ifneq ($(CONFIG_DCB),)
obj-y += dcb/
endif
obj-$(CONFIG_6LOWPAN) += 6lowpan/
obj-$(CONFIG_IEEE802154) += ieee802154/
obj-$(CONFIG_MAC802154) += mac802154/
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_SYSCTL) += sysctl_net.o
endif
obj-$(CONFIG_WIMAX) += wimax/
obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/
obj-$(CONFIG_CEPH_LIB) += ceph/
obj-$(CONFIG_BATMAN_ADV) += batman-adv/
obj-$(CONFIG_NFC) += nfc/
obj-$(CONFIG_OPENVSWITCH) += openvswitch/
obj-$(CONFIG_VSOCKETS) += vmw_vsock/
obj-$(CONFIG_NET_MPLS_GSO) += mpls/
obj-$(CONFIG_HSR) += hsr/
obj-$(CONFIG_NET_ACTIVITY_STATS) += activity_stats.o

119
net/activity_stats.c Normal file
View file

@ -0,0 +1,119 @@
/* net/activity_stats.c
*
* Copyright (C) 2010 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Author: Mike Chan (mike@android.com)
*/
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/suspend.h>
#include <net/net_namespace.h>
/*
* Track transmission rates in buckets (power of 2).
* 1,2,4,8...512 seconds.
*
* Buckets represent the count of network transmissions at least
* N seconds apart, where N is 1 << bucket index.
*/
#define BUCKET_MAX 10
/* Track network activity frequency */
static unsigned long activity_stats[BUCKET_MAX];
static ktime_t last_transmit;
static ktime_t suspend_time;
static DEFINE_SPINLOCK(activity_lock);
void activity_stats_update(void)
{
int i;
unsigned long flags;
ktime_t now;
s64 delta;
spin_lock_irqsave(&activity_lock, flags);
now = ktime_get();
delta = ktime_to_ns(ktime_sub(now, last_transmit));
for (i = BUCKET_MAX - 1; i >= 0; i--) {
/*
* Check if the time delta between network activity is within the
* minimum bucket range.
*/
if (delta < (1000000000ULL << i))
continue;
activity_stats[i]++;
last_transmit = now;
break;
}
spin_unlock_irqrestore(&activity_lock, flags);
}
static int activity_stats_show(struct seq_file *m, void *v)
{
int i;
int ret;
seq_printf(m, "Min Bucket(sec) Count\n");
for (i = 0; i < BUCKET_MAX; i++) {
ret = seq_printf(m, "%15d %lu\n", 1 << i, activity_stats[i]);
if (ret)
return ret;
}
return 0;
}
static int activity_stats_notifier(struct notifier_block *nb,
unsigned long event, void *dummy)
{
switch (event) {
case PM_SUSPEND_PREPARE:
suspend_time = ktime_get_real();
break;
case PM_POST_SUSPEND:
suspend_time = ktime_sub(ktime_get_real(), suspend_time);
last_transmit = ktime_sub(last_transmit, suspend_time);
}
return 0;
}
static int activity_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, activity_stats_show, PDE_DATA(inode));
}
static const struct file_operations activity_stats_fops = {
.open = activity_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static struct notifier_block activity_stats_notifier_block = {
.notifier_call = activity_stats_notifier,
};
static int __init activity_stats_init(void)
{
proc_create("activity", S_IRUGO,
init_net.proc_net_stat, &activity_stats_fops);
return register_pm_notifier(&activity_stats_notifier_block);
}
subsys_initcall(activity_stats_init);

9
net/appletalk/Makefile Normal file
View file

@ -0,0 +1,9 @@
#
# Makefile for the Linux AppleTalk layer.
#
obj-$(CONFIG_ATALK) += appletalk.o
appletalk-y := aarp.o ddp.o dev.o
appletalk-$(CONFIG_PROC_FS) += atalk_proc.o
appletalk-$(CONFIG_SYSCTL) += sysctl_net_atalk.o

1065
net/appletalk/aarp.c Normal file

File diff suppressed because it is too large Load diff

303
net/appletalk/atalk_proc.c Normal file
View file

@ -0,0 +1,303 @@
/*
* atalk_proc.c - proc support for Appletalk
*
* Copyright(c) Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation, version 2.
*/
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <linux/atalk.h>
#include <linux/export.h>
static __inline__ struct atalk_iface *atalk_get_interface_idx(loff_t pos)
{
struct atalk_iface *i;
for (i = atalk_interfaces; pos && i; i = i->next)
--pos;
return i;
}
static void *atalk_seq_interface_start(struct seq_file *seq, loff_t *pos)
__acquires(atalk_interfaces_lock)
{
loff_t l = *pos;
read_lock_bh(&atalk_interfaces_lock);
return l ? atalk_get_interface_idx(--l) : SEQ_START_TOKEN;
}
static void *atalk_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct atalk_iface *i;
++*pos;
if (v == SEQ_START_TOKEN) {
i = NULL;
if (atalk_interfaces)
i = atalk_interfaces;
goto out;
}
i = v;
i = i->next;
out:
return i;
}
static void atalk_seq_interface_stop(struct seq_file *seq, void *v)
__releases(atalk_interfaces_lock)
{
read_unlock_bh(&atalk_interfaces_lock);
}
static int atalk_seq_interface_show(struct seq_file *seq, void *v)
{
struct atalk_iface *iface;
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "Interface Address Networks "
"Status\n");
goto out;
}
iface = v;
seq_printf(seq, "%-16s %04X:%02X %04X-%04X %d\n",
iface->dev->name, ntohs(iface->address.s_net),
iface->address.s_node, ntohs(iface->nets.nr_firstnet),
ntohs(iface->nets.nr_lastnet), iface->status);
out:
return 0;
}
static __inline__ struct atalk_route *atalk_get_route_idx(loff_t pos)
{
struct atalk_route *r;
for (r = atalk_routes; pos && r; r = r->next)
--pos;
return r;
}
static void *atalk_seq_route_start(struct seq_file *seq, loff_t *pos)
__acquires(atalk_routes_lock)
{
loff_t l = *pos;
read_lock_bh(&atalk_routes_lock);
return l ? atalk_get_route_idx(--l) : SEQ_START_TOKEN;
}
static void *atalk_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct atalk_route *r;
++*pos;
if (v == SEQ_START_TOKEN) {
r = NULL;
if (atalk_routes)
r = atalk_routes;
goto out;
}
r = v;
r = r->next;
out:
return r;
}
static void atalk_seq_route_stop(struct seq_file *seq, void *v)
__releases(atalk_routes_lock)
{
read_unlock_bh(&atalk_routes_lock);
}
static int atalk_seq_route_show(struct seq_file *seq, void *v)
{
struct atalk_route *rt;
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "Target Router Flags Dev\n");
goto out;
}
if (atrtr_default.dev) {
rt = &atrtr_default;
seq_printf(seq, "Default %04X:%02X %-4d %s\n",
ntohs(rt->gateway.s_net), rt->gateway.s_node,
rt->flags, rt->dev->name);
}
rt = v;
seq_printf(seq, "%04X:%02X %04X:%02X %-4d %s\n",
ntohs(rt->target.s_net), rt->target.s_node,
ntohs(rt->gateway.s_net), rt->gateway.s_node,
rt->flags, rt->dev->name);
out:
return 0;
}
static void *atalk_seq_socket_start(struct seq_file *seq, loff_t *pos)
__acquires(atalk_sockets_lock)
{
read_lock_bh(&atalk_sockets_lock);
return seq_hlist_start_head(&atalk_sockets, *pos);
}
static void *atalk_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &atalk_sockets, pos);
}
static void atalk_seq_socket_stop(struct seq_file *seq, void *v)
__releases(atalk_sockets_lock)
{
read_unlock_bh(&atalk_sockets_lock);
}
static int atalk_seq_socket_show(struct seq_file *seq, void *v)
{
struct sock *s;
struct atalk_sock *at;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "Type Local_addr Remote_addr Tx_queue "
"Rx_queue St UID\n");
goto out;
}
s = sk_entry(v);
at = at_sk(s);
seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X "
"%02X %u\n",
s->sk_type, ntohs(at->src_net), at->src_node, at->src_port,
ntohs(at->dest_net), at->dest_node, at->dest_port,
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_state,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
out:
return 0;
}
static const struct seq_operations atalk_seq_interface_ops = {
.start = atalk_seq_interface_start,
.next = atalk_seq_interface_next,
.stop = atalk_seq_interface_stop,
.show = atalk_seq_interface_show,
};
static const struct seq_operations atalk_seq_route_ops = {
.start = atalk_seq_route_start,
.next = atalk_seq_route_next,
.stop = atalk_seq_route_stop,
.show = atalk_seq_route_show,
};
static const struct seq_operations atalk_seq_socket_ops = {
.start = atalk_seq_socket_start,
.next = atalk_seq_socket_next,
.stop = atalk_seq_socket_stop,
.show = atalk_seq_socket_show,
};
static int atalk_seq_interface_open(struct inode *inode, struct file *file)
{
return seq_open(file, &atalk_seq_interface_ops);
}
static int atalk_seq_route_open(struct inode *inode, struct file *file)
{
return seq_open(file, &atalk_seq_route_ops);
}
static int atalk_seq_socket_open(struct inode *inode, struct file *file)
{
return seq_open(file, &atalk_seq_socket_ops);
}
static const struct file_operations atalk_seq_interface_fops = {
.owner = THIS_MODULE,
.open = atalk_seq_interface_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations atalk_seq_route_fops = {
.owner = THIS_MODULE,
.open = atalk_seq_route_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations atalk_seq_socket_fops = {
.owner = THIS_MODULE,
.open = atalk_seq_socket_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static struct proc_dir_entry *atalk_proc_dir;
int __init atalk_proc_init(void)
{
struct proc_dir_entry *p;
int rc = -ENOMEM;
atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
if (!atalk_proc_dir)
goto out;
p = proc_create("interface", S_IRUGO, atalk_proc_dir,
&atalk_seq_interface_fops);
if (!p)
goto out_interface;
p = proc_create("route", S_IRUGO, atalk_proc_dir,
&atalk_seq_route_fops);
if (!p)
goto out_route;
p = proc_create("socket", S_IRUGO, atalk_proc_dir,
&atalk_seq_socket_fops);
if (!p)
goto out_socket;
p = proc_create("arp", S_IRUGO, atalk_proc_dir, &atalk_seq_arp_fops);
if (!p)
goto out_arp;
rc = 0;
out:
return rc;
out_arp:
remove_proc_entry("socket", atalk_proc_dir);
out_socket:
remove_proc_entry("route", atalk_proc_dir);
out_route:
remove_proc_entry("interface", atalk_proc_dir);
out_interface:
remove_proc_entry("atalk", init_net.proc_net);
goto out;
}
void __exit atalk_proc_exit(void)
{
remove_proc_entry("interface", atalk_proc_dir);
remove_proc_entry("route", atalk_proc_dir);
remove_proc_entry("socket", atalk_proc_dir);
remove_proc_entry("arp", atalk_proc_dir);
remove_proc_entry("atalk", init_net.proc_net);
}

1966
net/appletalk/ddp.c Normal file

File diff suppressed because it is too large Load diff

45
net/appletalk/dev.c Normal file
View file

@ -0,0 +1,45 @@
/*
* Moved here from drivers/net/net_init.c, which is:
* Written 1993,1994,1995 by Donald Becker.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ltalk.h>
static void ltalk_setup(struct net_device *dev)
{
/* Fill in the fields of the device structure with localtalk-generic values. */
dev->type = ARPHRD_LOCALTLK;
dev->hard_header_len = LTALK_HLEN;
dev->mtu = LTALK_MTU;
dev->addr_len = LTALK_ALEN;
dev->tx_queue_len = 10;
dev->broadcast[0] = 0xFF;
dev->flags = IFF_BROADCAST|IFF_MULTICAST|IFF_NOARP;
}
/**
* alloc_ltalkdev - Allocates and sets up an localtalk device
* @sizeof_priv: Size of additional driver-private structure to be allocated
* for this localtalk device
*
* Fill in the fields of the device structure with localtalk-generic
* values. Basically does everything except registering the device.
*
* Constructs a new net device, complete with a private data area of
* size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
* this private data area.
*/
struct net_device *alloc_ltalkdev(int sizeof_priv)
{
return alloc_netdev(sizeof_priv, "lt%d", NET_NAME_UNKNOWN,
ltalk_setup);
}
EXPORT_SYMBOL(alloc_ltalkdev);

View file

@ -0,0 +1,55 @@
/*
* sysctl_net_atalk.c: sysctl interface to net AppleTalk subsystem.
*
* Begun April 1, 1996, Mike Shaver.
* Added /proc/sys/net/atalk directory entry (empty =) ). [MS]
* Dynamic registration, added aarp entries. (5/30/97 Chris Horn)
*/
#include <linux/sysctl.h>
#include <net/sock.h>
#include <linux/atalk.h>
static struct ctl_table atalk_table[] = {
{
.procname = "aarp-expiry-time",
.data = &sysctl_aarp_expiry_time,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "aarp-tick-time",
.data = &sysctl_aarp_tick_time,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "aarp-retransmit-limit",
.data = &sysctl_aarp_retransmit_limit,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "aarp-resolve-time",
.data = &sysctl_aarp_resolve_time,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ },
};
static struct ctl_table_header *atalk_table_header;
void atalk_register_sysctl(void)
{
atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
}
void atalk_unregister_sysctl(void)
{
unregister_net_sysctl_table(atalk_table_header);
}

73
net/atm/Kconfig Normal file
View file

@ -0,0 +1,73 @@
#
# Asynchronous Transfer Mode (ATM)
#
config ATM
tristate "Asynchronous Transfer Mode (ATM)"
---help---
ATM is a high-speed networking technology for Local Area Networks
and Wide Area Networks. It uses a fixed packet size and is
connection oriented, allowing for the negotiation of minimum
bandwidth requirements.
In order to participate in an ATM network, your Linux box needs an
ATM networking card. If you have that, say Y here and to the driver
of your ATM card below.
Note that you need a set of user-space programs to actually make use
of ATM. See the file <file:Documentation/networking/atm.txt> for
further details.
config ATM_CLIP
tristate "Classical IP over ATM"
depends on ATM && INET
help
Classical IP over ATM for PVCs and SVCs, supporting InARP and
ATMARP. If you want to communication with other IP hosts on your ATM
network, you will typically either say Y here or to "LAN Emulation
(LANE)" below.
config ATM_CLIP_NO_ICMP
bool "Do NOT send ICMP if no neighbour"
depends on ATM_CLIP
help
Normally, an "ICMP host unreachable" message is sent if a neighbour
cannot be reached because there is no VC to it in the kernel's
ATMARP table. This may cause problems when ATMARP table entries are
briefly removed during revalidation. If you say Y here, packets to
such neighbours are silently discarded instead.
config ATM_LANE
tristate "LAN Emulation (LANE) support"
depends on ATM
help
LAN Emulation emulates services of existing LANs across an ATM
network. Besides operating as a normal ATM end station client, Linux
LANE client can also act as an proxy client bridging packets between
ELAN and Ethernet segments. You need LANE if you want to try MPOA.
config ATM_MPOA
tristate "Multi-Protocol Over ATM (MPOA) support"
depends on ATM && INET && ATM_LANE!=n
help
Multi-Protocol Over ATM allows ATM edge devices such as routers,
bridges and ATM attached hosts establish direct ATM VCs across
subnetwork boundaries. These shortcut connections bypass routers
enhancing overall network performance.
config ATM_BR2684
tristate "RFC1483/2684 Bridged protocols"
depends on ATM && INET
help
ATM PVCs can carry ethernet PDUs according to RFC2684 (formerly 1483)
This device will act like an ethernet from the kernels point of view,
with the traffic being carried by ATM PVCs (currently 1 PVC/device).
This is sometimes used over DSL lines. If in doubt, say N.
config ATM_BR2684_IPFILTER
bool "Per-VC IP filter kludge"
depends on ATM_BR2684
help
This is an experimental mechanism for users who need to terminate a
large number of IP-only vcc's. Do not enable this unless you are sure
you know what you are doing.

15
net/atm/Makefile Normal file
View file

@ -0,0 +1,15 @@
#
# Makefile for the ATM Protocol Families.
#
atm-y := addr.o pvc.o signaling.o svc.o ioctl.o common.o atm_misc.o raw.o resources.o atm_sysfs.o
mpoa-objs := mpc.o mpoa_caches.o mpoa_proc.o
obj-$(CONFIG_ATM) += atm.o
obj-$(CONFIG_ATM_CLIP) += clip.o
obj-$(CONFIG_ATM_BR2684) += br2684.o
atm-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_ATM_LANE) += lec.o
obj-$(CONFIG_ATM_MPOA) += mpoa.o
obj-$(CONFIG_PPPOATM) += pppoatm.o

161
net/atm/addr.c Normal file
View file

@ -0,0 +1,161 @@
/* net/atm/addr.c - Local ATM address registry */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "signaling.h"
#include "addr.h"
static int check_addr(const struct sockaddr_atmsvc *addr)
{
int i;
if (addr->sas_family != AF_ATMSVC)
return -EAFNOSUPPORT;
if (!*addr->sas_addr.pub)
return *addr->sas_addr.prv ? 0 : -EINVAL;
for (i = 1; i < ATM_E164_LEN + 1; i++) /* make sure it's \0-terminated */
if (!addr->sas_addr.pub[i])
return 0;
return -EINVAL;
}
static int identical(const struct sockaddr_atmsvc *a, const struct sockaddr_atmsvc *b)
{
if (*a->sas_addr.prv)
if (memcmp(a->sas_addr.prv, b->sas_addr.prv, ATM_ESA_LEN))
return 0;
if (!*a->sas_addr.pub)
return !*b->sas_addr.pub;
if (!*b->sas_addr.pub)
return 0;
return !strcmp(a->sas_addr.pub, b->sas_addr.pub);
}
static void notify_sigd(const struct atm_dev *dev)
{
struct sockaddr_atmpvc pvc;
pvc.sap_addr.itf = dev->number;
sigd_enq(NULL, as_itf_notify, NULL, &pvc, NULL);
}
void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t atype)
{
unsigned long flags;
struct atm_dev_addr *this, *p;
struct list_head *head;
spin_lock_irqsave(&dev->lock, flags);
if (atype == ATM_ADDR_LECS)
head = &dev->lecs;
else
head = &dev->local;
list_for_each_entry_safe(this, p, head, entry) {
list_del(&this->entry);
kfree(this);
}
spin_unlock_irqrestore(&dev->lock, flags);
if (head == &dev->local)
notify_sigd(dev);
}
int atm_add_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
enum atm_addr_type_t atype)
{
unsigned long flags;
struct atm_dev_addr *this;
struct list_head *head;
int error;
error = check_addr(addr);
if (error)
return error;
spin_lock_irqsave(&dev->lock, flags);
if (atype == ATM_ADDR_LECS)
head = &dev->lecs;
else
head = &dev->local;
list_for_each_entry(this, head, entry) {
if (identical(&this->addr, addr)) {
spin_unlock_irqrestore(&dev->lock, flags);
return -EEXIST;
}
}
this = kmalloc(sizeof(struct atm_dev_addr), GFP_ATOMIC);
if (!this) {
spin_unlock_irqrestore(&dev->lock, flags);
return -ENOMEM;
}
this->addr = *addr;
list_add(&this->entry, head);
spin_unlock_irqrestore(&dev->lock, flags);
if (head == &dev->local)
notify_sigd(dev);
return 0;
}
int atm_del_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
enum atm_addr_type_t atype)
{
unsigned long flags;
struct atm_dev_addr *this;
struct list_head *head;
int error;
error = check_addr(addr);
if (error)
return error;
spin_lock_irqsave(&dev->lock, flags);
if (atype == ATM_ADDR_LECS)
head = &dev->lecs;
else
head = &dev->local;
list_for_each_entry(this, head, entry) {
if (identical(&this->addr, addr)) {
list_del(&this->entry);
spin_unlock_irqrestore(&dev->lock, flags);
kfree(this);
if (head == &dev->local)
notify_sigd(dev);
return 0;
}
}
spin_unlock_irqrestore(&dev->lock, flags);
return -ENOENT;
}
int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user * buf,
size_t size, enum atm_addr_type_t atype)
{
unsigned long flags;
struct atm_dev_addr *this;
struct list_head *head;
int total = 0, error;
struct sockaddr_atmsvc *tmp_buf, *tmp_bufp;
spin_lock_irqsave(&dev->lock, flags);
if (atype == ATM_ADDR_LECS)
head = &dev->lecs;
else
head = &dev->local;
list_for_each_entry(this, head, entry)
total += sizeof(struct sockaddr_atmsvc);
tmp_buf = tmp_bufp = kmalloc(total, GFP_ATOMIC);
if (!tmp_buf) {
spin_unlock_irqrestore(&dev->lock, flags);
return -ENOMEM;
}
list_for_each_entry(this, head, entry)
memcpy(tmp_bufp++, &this->addr, sizeof(struct sockaddr_atmsvc));
spin_unlock_irqrestore(&dev->lock, flags);
error = total > size ? -E2BIG : total;
if (copy_to_user(buf, tmp_buf, total < size ? total : size))
error = -EFAULT;
kfree(tmp_buf);
return error;
}

20
net/atm/addr.h Normal file
View file

@ -0,0 +1,20 @@
/* net/atm/addr.h - Local ATM address registry */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#ifndef NET_ATM_ADDR_H
#define NET_ATM_ADDR_H
#include <linux/atm.h>
#include <linux/atmdev.h>
void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t type);
int atm_add_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
enum atm_addr_type_t type);
int atm_del_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
enum atm_addr_type_t type);
int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user *buf,
size_t size, enum atm_addr_type_t type);
#endif

101
net/atm/atm_misc.c Normal file
View file

@ -0,0 +1,101 @@
/* net/atm/atm_misc.c - Various functions for use by ATM drivers */
/* Written 1995-2000 by Werner Almesberger, EPFL ICA */
#include <linux/module.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/skbuff.h>
#include <linux/sonet.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/atomic.h>
int atm_charge(struct atm_vcc *vcc, int truesize)
{
atm_force_charge(vcc, truesize);
if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
return 1;
atm_return(vcc, truesize);
atomic_inc(&vcc->stats->rx_drop);
return 0;
}
EXPORT_SYMBOL(atm_charge);
struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
gfp_t gfp_flags)
{
struct sock *sk = sk_atm(vcc);
int guess = SKB_TRUESIZE(pdu_size);
atm_force_charge(vcc, guess);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
struct sk_buff *skb = alloc_skb(pdu_size, gfp_flags);
if (skb) {
atomic_add(skb->truesize-guess,
&sk->sk_rmem_alloc);
return skb;
}
}
atm_return(vcc, guess);
atomic_inc(&vcc->stats->rx_drop);
return NULL;
}
EXPORT_SYMBOL(atm_alloc_charge);
/*
* atm_pcr_goal returns the positive PCR if it should be rounded up, the
* negative PCR if it should be rounded down, and zero if the maximum available
* bandwidth should be used.
*
* The rules are as follows (* = maximum, - = absent (0), x = value "x",
* (x+ = x or next value above x, x- = x or next value below):
*
* min max pcr result min max pcr result
* - - - * (UBR only) x - - x+
* - - * * x - * *
* - - z z- x - z z-
* - * - * x * - x+
* - * * * x * * *
* - * z z- x * z z-
* - y - y- x y - x+
* - y * y- x y * y-
* - y z z- x y z z-
*
* All non-error cases can be converted with the following simple set of rules:
*
* if pcr == z then z-
* else if min == x && pcr == - then x+
* else if max == y then y-
* else *
*/
int atm_pcr_goal(const struct atm_trafprm *tp)
{
if (tp->pcr && tp->pcr != ATM_MAX_PCR)
return -tp->pcr;
if (tp->min_pcr && !tp->pcr)
return tp->min_pcr;
if (tp->max_pcr != ATM_MAX_PCR)
return -tp->max_pcr;
return 0;
}
EXPORT_SYMBOL(atm_pcr_goal);
void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
{
#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
__SONET_ITEMS
#undef __HANDLE_ITEM
}
EXPORT_SYMBOL(sonet_copy_stats);
void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
{
#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
__SONET_ITEMS
#undef __HANDLE_ITEM
}
EXPORT_SYMBOL(sonet_subtract_stats);

190
net/atm/atm_sysfs.c Normal file
View file

@ -0,0 +1,190 @@
/* ATM driver model support. */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/atmdev.h>
#include "common.h"
#include "resources.h"
#define to_atm_dev(cldev) container_of(cldev, struct atm_dev, class_dev)
static ssize_t show_type(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
return scnprintf(buf, PAGE_SIZE, "%s\n", adev->type);
}
static ssize_t show_address(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
return scnprintf(buf, PAGE_SIZE, "%pM\n", adev->esi);
}
static ssize_t show_atmaddress(struct device *cdev,
struct device_attribute *attr, char *buf)
{
unsigned long flags;
struct atm_dev *adev = to_atm_dev(cdev);
struct atm_dev_addr *aaddr;
int bin[] = { 1, 2, 10, 6, 1 }, *fmt = bin;
int i, j, count = 0;
spin_lock_irqsave(&adev->lock, flags);
list_for_each_entry(aaddr, &adev->local, entry) {
for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
if (j == *fmt) {
count += scnprintf(buf + count,
PAGE_SIZE - count, ".");
++fmt;
j = 0;
}
count += scnprintf(buf + count,
PAGE_SIZE - count, "%02x",
aaddr->addr.sas_addr.prv[i]);
}
count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
}
spin_unlock_irqrestore(&adev->lock, flags);
return count;
}
static ssize_t show_atmindex(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
return scnprintf(buf, PAGE_SIZE, "%d\n", adev->number);
}
static ssize_t show_carrier(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
return scnprintf(buf, PAGE_SIZE, "%d\n",
adev->signal == ATM_PHY_SIG_LOST ? 0 : 1);
}
static ssize_t show_link_rate(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct atm_dev *adev = to_atm_dev(cdev);
int link_rate;
/* show the link rate, not the data rate */
switch (adev->link_rate) {
case ATM_OC3_PCR:
link_rate = 155520000;
break;
case ATM_OC12_PCR:
link_rate = 622080000;
break;
case ATM_25_PCR:
link_rate = 25600000;
break;
default:
link_rate = adev->link_rate * 8 * 53;
}
return scnprintf(buf, PAGE_SIZE, "%d\n", link_rate);
}
static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL);
static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL);
static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL);
static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
static struct device_attribute *atm_attrs[] = {
&dev_attr_atmaddress,
&dev_attr_address,
&dev_attr_atmindex,
&dev_attr_carrier,
&dev_attr_type,
&dev_attr_link_rate,
NULL
};
static int atm_uevent(struct device *cdev, struct kobj_uevent_env *env)
{
struct atm_dev *adev;
if (!cdev)
return -ENODEV;
adev = to_atm_dev(cdev);
if (!adev)
return -ENODEV;
if (add_uevent_var(env, "NAME=%s%d", adev->type, adev->number))
return -ENOMEM;
return 0;
}
static void atm_release(struct device *cdev)
{
struct atm_dev *adev = to_atm_dev(cdev);
kfree(adev);
}
static struct class atm_class = {
.name = "atm",
.dev_release = atm_release,
.dev_uevent = atm_uevent,
};
int atm_register_sysfs(struct atm_dev *adev, struct device *parent)
{
struct device *cdev = &adev->class_dev;
int i, j, err;
cdev->class = &atm_class;
cdev->parent = parent;
dev_set_drvdata(cdev, adev);
dev_set_name(cdev, "%s%d", adev->type, adev->number);
err = device_register(cdev);
if (err < 0)
return err;
for (i = 0; atm_attrs[i]; i++) {
err = device_create_file(cdev, atm_attrs[i]);
if (err)
goto err_out;
}
return 0;
err_out:
for (j = 0; j < i; j++)
device_remove_file(cdev, atm_attrs[j]);
device_del(cdev);
return err;
}
void atm_unregister_sysfs(struct atm_dev *adev)
{
struct device *cdev = &adev->class_dev;
device_del(cdev);
}
int __init atm_sysfs_init(void)
{
return class_register(&atm_class);
}
void __exit atm_sysfs_exit(void)
{
class_unregister(&atm_class);
}

886
net/atm/br2684.c Normal file
View file

@ -0,0 +1,886 @@
/*
* Ethernet netdevice using ATM AAL5 as underlying carrier
* (RFC1483 obsoleted by RFC2684) for Linux
*
* Authors: Marcell GAL, 2000, XDSL Ltd, Hungary
* Eric Kinzie, 2006-2007, US Naval Research Laboratory
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/ip.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <net/arp.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/capability.h>
#include <linux/seq_file.h>
#include <linux/atmbr2684.h>
#include "common.h"
static void skb_debug(const struct sk_buff *skb)
{
#ifdef SKB_DEBUG
#define NUM2PRINT 50
print_hex_dump(KERN_DEBUG, "br2684: skb: ", DUMP_OFFSET,
16, 1, skb->data, min(NUM2PRINT, skb->len), true);
#endif
}
#define BR2684_ETHERTYPE_LEN 2
#define BR2684_PAD_LEN 2
#define LLC 0xaa, 0xaa, 0x03
#define SNAP_BRIDGED 0x00, 0x80, 0xc2
#define SNAP_ROUTED 0x00, 0x00, 0x00
#define PID_ETHERNET 0x00, 0x07
#define ETHERTYPE_IPV4 0x08, 0x00
#define ETHERTYPE_IPV6 0x86, 0xdd
#define PAD_BRIDGED 0x00, 0x00
static const unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 };
static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 };
static const unsigned char llc_oui_pid_pad[] =
{ LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED };
static const unsigned char pad[] = { PAD_BRIDGED };
static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 };
static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 };
enum br2684_encaps {
e_vc = BR2684_ENCAPS_VC,
e_llc = BR2684_ENCAPS_LLC,
};
struct br2684_vcc {
struct atm_vcc *atmvcc;
struct net_device *device;
/* keep old push, pop functions for chaining */
void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb);
void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
void (*old_release_cb)(struct atm_vcc *vcc);
struct module *old_owner;
enum br2684_encaps encaps;
struct list_head brvccs;
#ifdef CONFIG_ATM_BR2684_IPFILTER
struct br2684_filter filter;
#endif /* CONFIG_ATM_BR2684_IPFILTER */
unsigned int copies_needed, copies_failed;
atomic_t qspace;
};
struct br2684_dev {
struct net_device *net_dev;
struct list_head br2684_devs;
int number;
struct list_head brvccs; /* one device <=> one vcc (before xmas) */
int mac_was_set;
enum br2684_payload payload;
};
/*
* This lock should be held for writing any time the list of devices or
* their attached vcc's could be altered. It should be held for reading
* any time these are being queried. Note that we sometimes need to
* do read-locking under interrupt context, so write locking must block
* the current CPU's interrupts
*/
static DEFINE_RWLOCK(devs_lock);
static LIST_HEAD(br2684_devs);
static inline struct br2684_dev *BRPRIV(const struct net_device *net_dev)
{
return netdev_priv(net_dev);
}
static inline struct net_device *list_entry_brdev(const struct list_head *le)
{
return list_entry(le, struct br2684_dev, br2684_devs)->net_dev;
}
static inline struct br2684_vcc *BR2684_VCC(const struct atm_vcc *atmvcc)
{
return (struct br2684_vcc *)(atmvcc->user_back);
}
static inline struct br2684_vcc *list_entry_brvcc(const struct list_head *le)
{
return list_entry(le, struct br2684_vcc, brvccs);
}
/* Caller should hold read_lock(&devs_lock) */
static struct net_device *br2684_find_dev(const struct br2684_if_spec *s)
{
struct list_head *lh;
struct net_device *net_dev;
switch (s->method) {
case BR2684_FIND_BYNUM:
list_for_each(lh, &br2684_devs) {
net_dev = list_entry_brdev(lh);
if (BRPRIV(net_dev)->number == s->spec.devnum)
return net_dev;
}
break;
case BR2684_FIND_BYIFNAME:
list_for_each(lh, &br2684_devs) {
net_dev = list_entry_brdev(lh);
if (!strncmp(net_dev->name, s->spec.ifname, IFNAMSIZ))
return net_dev;
}
break;
}
return NULL;
}
static int atm_dev_event(struct notifier_block *this, unsigned long event,
void *arg)
{
struct atm_dev *atm_dev = arg;
struct list_head *lh;
struct net_device *net_dev;
struct br2684_vcc *brvcc;
struct atm_vcc *atm_vcc;
unsigned long flags;
pr_debug("event=%ld dev=%p\n", event, atm_dev);
read_lock_irqsave(&devs_lock, flags);
list_for_each(lh, &br2684_devs) {
net_dev = list_entry_brdev(lh);
list_for_each_entry(brvcc, &BRPRIV(net_dev)->brvccs, brvccs) {
atm_vcc = brvcc->atmvcc;
if (atm_vcc && brvcc->atmvcc->dev == atm_dev) {
if (atm_vcc->dev->signal == ATM_PHY_SIG_LOST)
netif_carrier_off(net_dev);
else
netif_carrier_on(net_dev);
}
}
}
read_unlock_irqrestore(&devs_lock, flags);
return NOTIFY_DONE;
}
static struct notifier_block atm_dev_notifier = {
.notifier_call = atm_dev_event,
};
/* chained vcc->pop function. Check if we should wake the netif_queue */
static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct br2684_vcc *brvcc = BR2684_VCC(vcc);
pr_debug("(vcc %p ; net_dev %p )\n", vcc, brvcc->device);
brvcc->old_pop(vcc, skb);
/* If the queue space just went up from zero, wake */
if (atomic_inc_return(&brvcc->qspace) == 1)
netif_wake_queue(brvcc->device);
}
/*
* Send a packet out a particular vcc. Not to useful right now, but paves
* the way for multiple vcc's per itf. Returns true if we can send,
* otherwise false
*/
static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
struct br2684_vcc *brvcc)
{
struct br2684_dev *brdev = BRPRIV(dev);
struct atm_vcc *atmvcc;
int minheadroom = (brvcc->encaps == e_llc) ?
((brdev->payload == p_bridged) ?
sizeof(llc_oui_pid_pad) : sizeof(llc_oui_ipv4)) :
((brdev->payload == p_bridged) ? BR2684_PAD_LEN : 0);
if (skb_headroom(skb) < minheadroom) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom);
brvcc->copies_needed++;
dev_kfree_skb(skb);
if (skb2 == NULL) {
brvcc->copies_failed++;
return 0;
}
skb = skb2;
}
if (brvcc->encaps == e_llc) {
if (brdev->payload == p_bridged) {
skb_push(skb, sizeof(llc_oui_pid_pad));
skb_copy_to_linear_data(skb, llc_oui_pid_pad,
sizeof(llc_oui_pid_pad));
} else if (brdev->payload == p_routed) {
unsigned short prot = ntohs(skb->protocol);
skb_push(skb, sizeof(llc_oui_ipv4));
switch (prot) {
case ETH_P_IP:
skb_copy_to_linear_data(skb, llc_oui_ipv4,
sizeof(llc_oui_ipv4));
break;
case ETH_P_IPV6:
skb_copy_to_linear_data(skb, llc_oui_ipv6,
sizeof(llc_oui_ipv6));
break;
default:
dev_kfree_skb(skb);
return 0;
}
}
} else { /* e_vc */
if (brdev->payload == p_bridged) {
skb_push(skb, 2);
memset(skb->data, 0, 2);
}
}
skb_debug(skb);
ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = atmvcc->atm_options;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
if (atomic_dec_return(&brvcc->qspace) < 1) {
/* No more please! */
netif_stop_queue(brvcc->device);
/* We might have raced with br2684_pop() */
if (unlikely(atomic_read(&brvcc->qspace) > 0))
netif_wake_queue(brvcc->device);
}
/* If this fails immediately, the skb will be freed and br2684_pop()
will wake the queue if appropriate. Just return an error so that
the stats are updated correctly */
return !atmvcc->send(atmvcc, skb);
}
static void br2684_release_cb(struct atm_vcc *atmvcc)
{
struct br2684_vcc *brvcc = BR2684_VCC(atmvcc);
if (atomic_read(&brvcc->qspace) > 0)
netif_wake_queue(brvcc->device);
if (brvcc->old_release_cb)
brvcc->old_release_cb(atmvcc);
}
static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb,
const struct br2684_dev *brdev)
{
return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */
}
static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct br2684_dev *brdev = BRPRIV(dev);
struct br2684_vcc *brvcc;
struct atm_vcc *atmvcc;
netdev_tx_t ret = NETDEV_TX_OK;
pr_debug("skb_dst(skb)=%p\n", skb_dst(skb));
read_lock(&devs_lock);
brvcc = pick_outgoing_vcc(skb, brdev);
if (brvcc == NULL) {
pr_debug("no vcc attached to dev %s\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_carrier_errors++;
/* netif_stop_queue(dev); */
dev_kfree_skb(skb);
goto out_devs;
}
atmvcc = brvcc->atmvcc;
bh_lock_sock(sk_atm(atmvcc));
if (test_bit(ATM_VF_RELEASED, &atmvcc->flags) ||
test_bit(ATM_VF_CLOSE, &atmvcc->flags) ||
!test_bit(ATM_VF_READY, &atmvcc->flags)) {
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
goto out;
}
if (sock_owned_by_user(sk_atm(atmvcc))) {
netif_stop_queue(brvcc->device);
ret = NETDEV_TX_BUSY;
goto out;
}
if (!br2684_xmit_vcc(skb, dev, brvcc)) {
/*
* We should probably use netif_*_queue() here, but that
* involves added complication. We need to walk before
* we can run.
*
* Don't free here! this pointer might be no longer valid!
*/
dev->stats.tx_errors++;
dev->stats.tx_fifo_errors++;
}
out:
bh_unlock_sock(sk_atm(atmvcc));
out_devs:
read_unlock(&devs_lock);
return ret;
}
/*
* We remember when the MAC gets set, so we don't override it later with
* the ESI of the ATM card of the first VC
*/
static int br2684_mac_addr(struct net_device *dev, void *p)
{
int err = eth_mac_addr(dev, p);
if (!err)
BRPRIV(dev)->mac_was_set = 1;
return err;
}
#ifdef CONFIG_ATM_BR2684_IPFILTER
/* this IOCTL is experimental. */
static int br2684_setfilt(struct atm_vcc *atmvcc, void __user * arg)
{
struct br2684_vcc *brvcc;
struct br2684_filter_set fs;
if (copy_from_user(&fs, arg, sizeof fs))
return -EFAULT;
if (fs.ifspec.method != BR2684_FIND_BYNOTHING) {
/*
* This is really a per-vcc thing, but we can also search
* by device.
*/
struct br2684_dev *brdev;
read_lock(&devs_lock);
brdev = BRPRIV(br2684_find_dev(&fs.ifspec));
if (brdev == NULL || list_empty(&brdev->brvccs) ||
brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */
brvcc = NULL;
else
brvcc = list_entry_brvcc(brdev->brvccs.next);
read_unlock(&devs_lock);
if (brvcc == NULL)
return -ESRCH;
} else
brvcc = BR2684_VCC(atmvcc);
memcpy(&brvcc->filter, &fs.filter, sizeof(brvcc->filter));
return 0;
}
/* Returns 1 if packet should be dropped */
static inline int
packet_fails_filter(__be16 type, struct br2684_vcc *brvcc, struct sk_buff *skb)
{
if (brvcc->filter.netmask == 0)
return 0; /* no filter in place */
if (type == htons(ETH_P_IP) &&
(((struct iphdr *)(skb->data))->daddr & brvcc->filter.
netmask) == brvcc->filter.prefix)
return 0;
if (type == htons(ETH_P_ARP))
return 0;
/*
* TODO: we should probably filter ARPs too.. don't want to have
* them returning values that don't make sense, or is that ok?
*/
return 1; /* drop */
}
#endif /* CONFIG_ATM_BR2684_IPFILTER */
static void br2684_close_vcc(struct br2684_vcc *brvcc)
{
pr_debug("removing VCC %p from dev %p\n", brvcc, brvcc->device);
write_lock_irq(&devs_lock);
list_del(&brvcc->brvccs);
write_unlock_irq(&devs_lock);
brvcc->atmvcc->user_back = NULL; /* what about vcc->recvq ??? */
brvcc->atmvcc->release_cb = brvcc->old_release_cb;
brvcc->old_push(brvcc->atmvcc, NULL); /* pass on the bad news */
module_put(brvcc->old_owner);
kfree(brvcc);
}
/* when AAL5 PDU comes in: */
static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct br2684_vcc *brvcc = BR2684_VCC(atmvcc);
struct net_device *net_dev = brvcc->device;
struct br2684_dev *brdev = BRPRIV(net_dev);
pr_debug("\n");
if (unlikely(skb == NULL)) {
/* skb==NULL means VCC is being destroyed */
br2684_close_vcc(brvcc);
if (list_empty(&brdev->brvccs)) {
write_lock_irq(&devs_lock);
list_del(&brdev->br2684_devs);
write_unlock_irq(&devs_lock);
unregister_netdev(net_dev);
free_netdev(net_dev);
}
return;
}
skb_debug(skb);
atm_return(atmvcc, skb->truesize);
pr_debug("skb from brdev %p\n", brdev);
if (brvcc->encaps == e_llc) {
if (skb->len > 7 && skb->data[7] == 0x01)
__skb_trim(skb, skb->len - 4);
/* accept packets that have "ipv[46]" in the snap header */
if ((skb->len >= (sizeof(llc_oui_ipv4))) &&
(memcmp(skb->data, llc_oui_ipv4,
sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) {
if (memcmp(skb->data + 6, ethertype_ipv6,
sizeof(ethertype_ipv6)) == 0)
skb->protocol = htons(ETH_P_IPV6);
else if (memcmp(skb->data + 6, ethertype_ipv4,
sizeof(ethertype_ipv4)) == 0)
skb->protocol = htons(ETH_P_IP);
else
goto error;
skb_pull(skb, sizeof(llc_oui_ipv4));
skb_reset_network_header(skb);
skb->pkt_type = PACKET_HOST;
/*
* Let us waste some time for checking the encapsulation.
* Note, that only 7 char is checked so frames with a valid FCS
* are also accepted (but FCS is not checked of course).
*/
} else if ((skb->len >= sizeof(llc_oui_pid_pad)) &&
(memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) {
skb_pull(skb, sizeof(llc_oui_pid_pad));
skb->protocol = eth_type_trans(skb, net_dev);
} else
goto error;
} else { /* e_vc */
if (brdev->payload == p_routed) {
struct iphdr *iph;
skb_reset_network_header(skb);
iph = ip_hdr(skb);
if (iph->version == 4)
skb->protocol = htons(ETH_P_IP);
else if (iph->version == 6)
skb->protocol = htons(ETH_P_IPV6);
else
goto error;
skb->pkt_type = PACKET_HOST;
} else { /* p_bridged */
/* first 2 chars should be 0 */
if (memcmp(skb->data, pad, BR2684_PAD_LEN) != 0)
goto error;
skb_pull(skb, BR2684_PAD_LEN);
skb->protocol = eth_type_trans(skb, net_dev);
}
}
#ifdef CONFIG_ATM_BR2684_IPFILTER
if (unlikely(packet_fails_filter(skb->protocol, brvcc, skb)))
goto dropped;
#endif /* CONFIG_ATM_BR2684_IPFILTER */
skb->dev = net_dev;
ATM_SKB(skb)->vcc = atmvcc; /* needed ? */
pr_debug("received packet's protocol: %x\n", ntohs(skb->protocol));
skb_debug(skb);
/* sigh, interface is down? */
if (unlikely(!(net_dev->flags & IFF_UP)))
goto dropped;
net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += skb->len;
memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
netif_rx(skb);
return;
dropped:
net_dev->stats.rx_dropped++;
goto free_skb;
error:
net_dev->stats.rx_errors++;
free_skb:
dev_kfree_skb(skb);
}
/*
* Assign a vcc to a dev
* Note: we do not have explicit unassign, but look at _push()
*/
static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
{
struct br2684_vcc *brvcc;
struct br2684_dev *brdev;
struct net_device *net_dev;
struct atm_backend_br2684 be;
int err;
if (copy_from_user(&be, arg, sizeof be))
return -EFAULT;
brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
if (!brvcc)
return -ENOMEM;
/*
* Allow two packets in the ATM queue. One actually being sent, and one
* for the ATM 'TX done' handler to send. It shouldn't take long to get
* the next one from the netdev queue, when we need it. More than that
* would be bufferbloat.
*/
atomic_set(&brvcc->qspace, 2);
write_lock_irq(&devs_lock);
net_dev = br2684_find_dev(&be.ifspec);
if (net_dev == NULL) {
pr_err("tried to attach to non-existent device\n");
err = -ENXIO;
goto error;
}
brdev = BRPRIV(net_dev);
if (atmvcc->push == NULL) {
err = -EBADFD;
goto error;
}
if (!list_empty(&brdev->brvccs)) {
/* Only 1 VCC/dev right now */
err = -EEXIST;
goto error;
}
if (be.fcs_in != BR2684_FCSIN_NO ||
be.fcs_out != BR2684_FCSOUT_NO ||
be.fcs_auto || be.has_vpiid || be.send_padding ||
(be.encaps != BR2684_ENCAPS_VC &&
be.encaps != BR2684_ENCAPS_LLC) ||
be.min_size != 0) {
err = -EINVAL;
goto error;
}
pr_debug("vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps, brvcc);
if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) {
unsigned char *esi = atmvcc->dev->esi;
if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5])
memcpy(net_dev->dev_addr, esi, net_dev->addr_len);
else
net_dev->dev_addr[2] = 1;
}
list_add(&brvcc->brvccs, &brdev->brvccs);
write_unlock_irq(&devs_lock);
brvcc->device = net_dev;
brvcc->atmvcc = atmvcc;
atmvcc->user_back = brvcc;
brvcc->encaps = (enum br2684_encaps)be.encaps;
brvcc->old_push = atmvcc->push;
brvcc->old_pop = atmvcc->pop;
brvcc->old_release_cb = atmvcc->release_cb;
brvcc->old_owner = atmvcc->owner;
barrier();
atmvcc->push = br2684_push;
atmvcc->pop = br2684_pop;
atmvcc->release_cb = br2684_release_cb;
atmvcc->owner = THIS_MODULE;
/* initialize netdev carrier state */
if (atmvcc->dev->signal == ATM_PHY_SIG_LOST)
netif_carrier_off(net_dev);
else
netif_carrier_on(net_dev);
__module_get(THIS_MODULE);
/* re-process everything received between connection setup and
backend setup */
vcc_process_recv_queue(atmvcc);
return 0;
error:
write_unlock_irq(&devs_lock);
kfree(brvcc);
return err;
}
static const struct net_device_ops br2684_netdev_ops = {
.ndo_start_xmit = br2684_start_xmit,
.ndo_set_mac_address = br2684_mac_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
static const struct net_device_ops br2684_netdev_ops_routed = {
.ndo_start_xmit = br2684_start_xmit,
.ndo_set_mac_address = br2684_mac_addr,
.ndo_change_mtu = eth_change_mtu
};
static void br2684_setup(struct net_device *netdev)
{
struct br2684_dev *brdev = BRPRIV(netdev);
ether_setup(netdev);
netdev->hard_header_len += sizeof(llc_oui_pid_pad); /* worst case */
brdev->net_dev = netdev;
netdev->netdev_ops = &br2684_netdev_ops;
INIT_LIST_HEAD(&brdev->brvccs);
}
static void br2684_setup_routed(struct net_device *netdev)
{
struct br2684_dev *brdev = BRPRIV(netdev);
brdev->net_dev = netdev;
netdev->hard_header_len = sizeof(llc_oui_ipv4); /* worst case */
netdev->netdev_ops = &br2684_netdev_ops_routed;
netdev->addr_len = 0;
netdev->mtu = 1500;
netdev->type = ARPHRD_PPP;
netdev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
netdev->tx_queue_len = 100;
INIT_LIST_HEAD(&brdev->brvccs);
}
static int br2684_create(void __user *arg)
{
int err;
struct net_device *netdev;
struct br2684_dev *brdev;
struct atm_newif_br2684 ni;
enum br2684_payload payload;
pr_debug("\n");
if (copy_from_user(&ni, arg, sizeof ni))
return -EFAULT;
if (ni.media & BR2684_FLAG_ROUTED)
payload = p_routed;
else
payload = p_bridged;
ni.media &= 0xffff; /* strip flags */
if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500)
return -EINVAL;
netdev = alloc_netdev(sizeof(struct br2684_dev),
ni.ifname[0] ? ni.ifname : "nas%d",
NET_NAME_UNKNOWN,
(payload == p_routed) ? br2684_setup_routed : br2684_setup);
if (!netdev)
return -ENOMEM;
brdev = BRPRIV(netdev);
pr_debug("registered netdev %s\n", netdev->name);
/* open, stop, do_ioctl ? */
err = register_netdev(netdev);
if (err < 0) {
pr_err("register_netdev failed\n");
free_netdev(netdev);
return err;
}
write_lock_irq(&devs_lock);
brdev->payload = payload;
if (list_empty(&br2684_devs)) {
/* 1st br2684 device */
brdev->number = 1;
} else
brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1;
list_add_tail(&brdev->br2684_devs, &br2684_devs);
write_unlock_irq(&devs_lock);
return 0;
}
/*
* This handles ioctls actually performed on our vcc - we must return
* -ENOIOCTLCMD for any unrecognized ioctl
*/
static int br2684_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct atm_vcc *atmvcc = ATM_SD(sock);
void __user *argp = (void __user *)arg;
atm_backend_t b;
int err;
switch (cmd) {
case ATM_SETBACKEND:
case ATM_NEWBACKENDIF:
err = get_user(b, (atm_backend_t __user *) argp);
if (err)
return -EFAULT;
if (b != ATM_BACKEND_BR2684)
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd == ATM_SETBACKEND) {
if (sock->state != SS_CONNECTED)
return -EINVAL;
return br2684_regvcc(atmvcc, argp);
} else {
return br2684_create(argp);
}
#ifdef CONFIG_ATM_BR2684_IPFILTER
case BR2684_SETFILT:
if (atmvcc->push != br2684_push)
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
err = br2684_setfilt(atmvcc, argp);
return err;
#endif /* CONFIG_ATM_BR2684_IPFILTER */
}
return -ENOIOCTLCMD;
}
static struct atm_ioctl br2684_ioctl_ops = {
.owner = THIS_MODULE,
.ioctl = br2684_ioctl,
};
#ifdef CONFIG_PROC_FS
static void *br2684_seq_start(struct seq_file *seq, loff_t * pos)
__acquires(devs_lock)
{
read_lock(&devs_lock);
return seq_list_start(&br2684_devs, *pos);
}
static void *br2684_seq_next(struct seq_file *seq, void *v, loff_t * pos)
{
return seq_list_next(v, &br2684_devs, pos);
}
static void br2684_seq_stop(struct seq_file *seq, void *v)
__releases(devs_lock)
{
read_unlock(&devs_lock);
}
static int br2684_seq_show(struct seq_file *seq, void *v)
{
const struct br2684_dev *brdev = list_entry(v, struct br2684_dev,
br2684_devs);
const struct net_device *net_dev = brdev->net_dev;
const struct br2684_vcc *brvcc;
seq_printf(seq, "dev %.16s: num=%d, mac=%pM (%s)\n",
net_dev->name,
brdev->number,
net_dev->dev_addr,
brdev->mac_was_set ? "set" : "auto");
list_for_each_entry(brvcc, &brdev->brvccs, brvccs) {
seq_printf(seq, " vcc %d.%d.%d: encaps=%s payload=%s"
", failed copies %u/%u"
"\n", brvcc->atmvcc->dev->number,
brvcc->atmvcc->vpi, brvcc->atmvcc->vci,
(brvcc->encaps == e_llc) ? "LLC" : "VC",
(brdev->payload == p_bridged) ? "bridged" : "routed",
brvcc->copies_failed, brvcc->copies_needed);
#ifdef CONFIG_ATM_BR2684_IPFILTER
#define b1(var, byte) ((u8 *) &brvcc->filter.var)[byte]
#define bs(var) b1(var, 0), b1(var, 1), b1(var, 2), b1(var, 3)
if (brvcc->filter.netmask != 0)
seq_printf(seq, " filter=%d.%d.%d.%d/"
"%d.%d.%d.%d\n", bs(prefix), bs(netmask));
#undef bs
#undef b1
#endif /* CONFIG_ATM_BR2684_IPFILTER */
}
return 0;
}
static const struct seq_operations br2684_seq_ops = {
.start = br2684_seq_start,
.next = br2684_seq_next,
.stop = br2684_seq_stop,
.show = br2684_seq_show,
};
static int br2684_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &br2684_seq_ops);
}
static const struct file_operations br2684_proc_ops = {
.owner = THIS_MODULE,
.open = br2684_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
extern struct proc_dir_entry *atm_proc_root; /* from proc.c */
#endif /* CONFIG_PROC_FS */
static int __init br2684_init(void)
{
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *p;
p = proc_create("br2684", 0, atm_proc_root, &br2684_proc_ops);
if (p == NULL)
return -ENOMEM;
#endif
register_atm_ioctl(&br2684_ioctl_ops);
register_atmdevice_notifier(&atm_dev_notifier);
return 0;
}
static void __exit br2684_exit(void)
{
struct net_device *net_dev;
struct br2684_dev *brdev;
struct br2684_vcc *brvcc;
deregister_atm_ioctl(&br2684_ioctl_ops);
#ifdef CONFIG_PROC_FS
remove_proc_entry("br2684", atm_proc_root);
#endif
unregister_atmdevice_notifier(&atm_dev_notifier);
while (!list_empty(&br2684_devs)) {
net_dev = list_entry_brdev(br2684_devs.next);
brdev = BRPRIV(net_dev);
while (!list_empty(&brdev->brvccs)) {
brvcc = list_entry_brvcc(brdev->brvccs.next);
br2684_close_vcc(brvcc);
}
list_del(&brdev->br2684_devs);
unregister_netdev(net_dev);
free_netdev(net_dev);
}
}
module_init(br2684_init);
module_exit(br2684_exit);
MODULE_AUTHOR("Marcell GAL");
MODULE_DESCRIPTION("RFC2684 bridged protocols over ATM/AAL5");
MODULE_LICENSE("GPL");

939
net/atm/clip.c Normal file
View file

@ -0,0 +1,939 @@
/* net/atm/clip.c - RFC1577 Classical IP over ATM */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h> /* for UINT_MAX */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/if_arp.h> /* for some manifest constants */
#include <linux/notifier.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/atmclip.h>
#include <linux/atmarp.h>
#include <linux/capability.h>
#include <linux/ip.h> /* for net/route.h */
#include <linux/in.h> /* for struct sockaddr_in */
#include <linux/if.h> /* for IFF_UP */
#include <linux/inetdevice.h>
#include <linux/bitops.h>
#include <linux/poison.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/rcupdate.h>
#include <linux/jhash.h>
#include <linux/slab.h>
#include <net/route.h> /* for struct rtable and routing */
#include <net/icmp.h> /* icmp_send */
#include <net/arp.h>
#include <linux/param.h> /* for HZ */
#include <linux/uaccess.h>
#include <asm/byteorder.h> /* for htons etc. */
#include <linux/atomic.h>
#include "common.h"
#include "resources.h"
#include <net/atmclip.h>
static struct net_device *clip_devs;
static struct atm_vcc *atmarpd;
static struct timer_list idle_timer;
static const struct neigh_ops clip_neigh_ops;
static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
{
struct sock *sk;
struct atmarp_ctrl *ctrl;
struct sk_buff *skb;
pr_debug("(%d)\n", type);
if (!atmarpd)
return -EUNATCH;
skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
ctrl = (struct atmarp_ctrl *)skb_put(skb, sizeof(struct atmarp_ctrl));
ctrl->type = type;
ctrl->itf_num = itf;
ctrl->ip = ip;
atm_force_charge(atmarpd, skb->truesize);
sk = sk_atm(atmarpd);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
return 0;
}
static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry)
{
pr_debug("%p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh);
clip_vcc->entry = entry;
clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */
clip_vcc->next = entry->vccs;
entry->vccs = clip_vcc;
entry->neigh->used = jiffies;
}
static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
{
struct atmarp_entry *entry = clip_vcc->entry;
struct clip_vcc **walk;
if (!entry) {
pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
return;
}
netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
entry->neigh->used = jiffies;
for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
if (*walk == clip_vcc) {
int error;
*walk = clip_vcc->next; /* atomic */
clip_vcc->entry = NULL;
if (clip_vcc->xoff)
netif_wake_queue(entry->neigh->dev);
if (entry->vccs)
goto out;
entry->expires = jiffies - 1;
/* force resolution or expiration */
error = neigh_update(entry->neigh, NULL, NUD_NONE,
NEIGH_UPDATE_F_ADMIN);
if (error)
pr_crit("neigh_update failed with %d\n", error);
goto out;
}
pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
out:
netif_tx_unlock_bh(entry->neigh->dev);
}
/* The neighbour entry n->lock is held. */
static int neigh_check_cb(struct neighbour *n)
{
struct atmarp_entry *entry = neighbour_priv(n);
struct clip_vcc *cv;
if (n->ops != &clip_neigh_ops)
return 0;
for (cv = entry->vccs; cv; cv = cv->next) {
unsigned long exp = cv->last_use + cv->idle_timeout;
if (cv->idle_timeout && time_after(jiffies, exp)) {
pr_debug("releasing vcc %p->%p of entry %p\n",
cv, cv->vcc, entry);
vcc_release_async(cv->vcc, -ETIMEDOUT);
}
}
if (entry->vccs || time_before(jiffies, entry->expires))
return 0;
if (atomic_read(&n->refcnt) > 1) {
struct sk_buff *skb;
pr_debug("destruction postponed with ref %d\n",
atomic_read(&n->refcnt));
while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
dev_kfree_skb(skb);
return 0;
}
pr_debug("expired neigh %p\n", n);
return 1;
}
static void idle_timer_check(unsigned long dummy)
{
write_lock(&arp_tbl.lock);
__neigh_for_each_release(&arp_tbl, neigh_check_cb);
mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
write_unlock(&arp_tbl.lock);
}
static int clip_arp_rcv(struct sk_buff *skb)
{
struct atm_vcc *vcc;
pr_debug("\n");
vcc = ATM_SKB(skb)->vcc;
if (!vcc || !atm_charge(vcc, skb->truesize)) {
dev_kfree_skb_any(skb);
return 0;
}
pr_debug("pushing to %p\n", vcc);
pr_debug("using %p\n", CLIP_VCC(vcc)->old_push);
CLIP_VCC(vcc)->old_push(vcc, skb);
return 0;
}
static const unsigned char llc_oui[] = {
0xaa, /* DSAP: non-ISO */
0xaa, /* SSAP: non-ISO */
0x03, /* Ctrl: Unnumbered Information Command PDU */
0x00, /* OUI: EtherType */
0x00,
0x00
};
static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
pr_debug("\n");
if (!clip_devs) {
atm_return(vcc, skb->truesize);
kfree_skb(skb);
return;
}
if (!skb) {
pr_debug("removing VCC %p\n", clip_vcc);
if (clip_vcc->entry)
unlink_clip_vcc(clip_vcc);
clip_vcc->old_push(vcc, NULL); /* pass on the bad news */
kfree(clip_vcc);
return;
}
atm_return(vcc, skb->truesize);
skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs;
/* clip_vcc->entry == NULL if we don't have an IP address yet */
if (!skb->dev) {
dev_kfree_skb_any(skb);
return;
}
ATM_SKB(skb)->vcc = vcc;
skb_reset_mac_header(skb);
if (!clip_vcc->encap ||
skb->len < RFC1483LLC_LEN ||
memcmp(skb->data, llc_oui, sizeof(llc_oui)))
skb->protocol = htons(ETH_P_IP);
else {
skb->protocol = ((__be16 *)skb->data)[3];
skb_pull(skb, RFC1483LLC_LEN);
if (skb->protocol == htons(ETH_P_ARP)) {
skb->dev->stats.rx_packets++;
skb->dev->stats.rx_bytes += skb->len;
clip_arp_rcv(skb);
return;
}
}
clip_vcc->last_use = jiffies;
skb->dev->stats.rx_packets++;
skb->dev->stats.rx_bytes += skb->len;
memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
netif_rx(skb);
}
/*
* Note: these spinlocks _must_not_ block on non-SMP. The only goal is that
* clip_pop is atomic with respect to the critical section in clip_start_xmit.
*/
static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
struct net_device *dev = skb->dev;
int old;
unsigned long flags;
pr_debug("(vcc %p)\n", vcc);
clip_vcc->old_pop(vcc, skb);
/* skb->dev == NULL in outbound ARP packets */
if (!dev)
return;
spin_lock_irqsave(&PRIV(dev)->xoff_lock, flags);
if (atm_may_send(vcc, 0)) {
old = xchg(&clip_vcc->xoff, 0);
if (old)
netif_wake_queue(dev);
}
spin_unlock_irqrestore(&PRIV(dev)->xoff_lock, flags);
}
static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
{
__be32 *ip = (__be32 *) neigh->primary_key;
pr_debug("(neigh %p, skb %p)\n", neigh, skb);
to_atmarpd(act_need, PRIV(neigh->dev)->number, *ip);
}
static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb)
{
#ifndef CONFIG_ATM_CLIP_NO_ICMP
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
#endif
kfree_skb(skb);
}
static const struct neigh_ops clip_neigh_ops = {
.family = AF_INET,
.solicit = clip_neigh_solicit,
.error_report = clip_neigh_error,
.output = neigh_direct_output,
.connected_output = neigh_direct_output,
};
static int clip_constructor(struct neighbour *neigh)
{
struct atmarp_entry *entry = neighbour_priv(neigh);
if (neigh->tbl->family != AF_INET)
return -EINVAL;
if (neigh->type != RTN_UNICAST)
return -EINVAL;
neigh->nud_state = NUD_NONE;
neigh->ops = &clip_neigh_ops;
neigh->output = neigh->ops->output;
entry->neigh = neigh;
entry->vccs = NULL;
entry->expires = jiffies - 1;
return 0;
}
/* @@@ copy bh locking from arp.c -- need to bh-enable atm code before */
/*
* We play with the resolve flag: 0 and 1 have the usual meaning, but -1 means
* to allocate the neighbour entry but not to ask atmarpd for resolution. Also,
* don't increment the usage count. This is used to create entries in
* clip_setentry.
*/
static int clip_encap(struct atm_vcc *vcc, int mode)
{
CLIP_VCC(vcc)->encap = mode;
return 0;
}
static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct clip_priv *clip_priv = PRIV(dev);
struct dst_entry *dst = skb_dst(skb);
struct atmarp_entry *entry;
struct neighbour *n;
struct atm_vcc *vcc;
struct rtable *rt;
__be32 *daddr;
int old;
unsigned long flags;
pr_debug("(skb %p)\n", skb);
if (!dst) {
pr_err("skb_dst(skb) == NULL\n");
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
rt = (struct rtable *) dst;
if (rt->rt_gateway)
daddr = &rt->rt_gateway;
else
daddr = &ip_hdr(skb)->daddr;
n = dst_neigh_lookup(dst, daddr);
if (!n) {
pr_err("NO NEIGHBOUR !\n");
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
entry = neighbour_priv(n);
if (!entry->vccs) {
if (time_after(jiffies, entry->expires)) {
/* should be resolved */
entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ;
to_atmarpd(act_need, PRIV(dev)->number, *((__be32 *)n->primary_key));
}
if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
skb_queue_tail(&entry->neigh->arp_queue, skb);
else {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
}
goto out_release_neigh;
}
pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
pr_debug("using neighbour %p, vcc %p\n", n, vcc);
if (entry->vccs->encap) {
void *here;
here = skb_push(skb, RFC1483LLC_LEN);
memcpy(here, llc_oui, sizeof(llc_oui));
((__be16 *) here)[3] = skb->protocol;
}
atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = vcc->atm_options;
entry->vccs->last_use = jiffies;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
if (old) {
pr_warn("XOFF->XOFF transition\n");
goto out_release_neigh;
}
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
vcc->send(vcc, skb);
if (atm_may_send(vcc, 0)) {
entry->vccs->xoff = 0;
goto out_release_neigh;
}
spin_lock_irqsave(&clip_priv->xoff_lock, flags);
netif_stop_queue(dev); /* XOFF -> throttle immediately */
barrier();
if (!entry->vccs->xoff)
netif_start_queue(dev);
/* Oh, we just raced with clip_pop. netif_start_queue should be
good enough, because nothing should really be asleep because
of the brief netif_stop_queue. If this isn't true or if it
changes, use netif_wake_queue instead. */
spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
out_release_neigh:
neigh_release(n);
return NETDEV_TX_OK;
}
static int clip_mkip(struct atm_vcc *vcc, int timeout)
{
struct clip_vcc *clip_vcc;
if (!vcc->push)
return -EBADFD;
clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
if (!clip_vcc)
return -ENOMEM;
pr_debug("%p vcc %p\n", clip_vcc, vcc);
clip_vcc->vcc = vcc;
vcc->user_back = clip_vcc;
set_bit(ATM_VF_IS_CLIP, &vcc->flags);
clip_vcc->entry = NULL;
clip_vcc->xoff = 0;
clip_vcc->encap = 1;
clip_vcc->last_use = jiffies;
clip_vcc->idle_timeout = timeout * HZ;
clip_vcc->old_push = vcc->push;
clip_vcc->old_pop = vcc->pop;
vcc->push = clip_push;
vcc->pop = clip_pop;
/* re-process everything received between connection setup and MKIP */
vcc_process_recv_queue(vcc);
return 0;
}
static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
{
struct neighbour *neigh;
struct atmarp_entry *entry;
int error;
struct clip_vcc *clip_vcc;
struct rtable *rt;
if (vcc->push != clip_push) {
pr_warn("non-CLIP VCC\n");
return -EBADF;
}
clip_vcc = CLIP_VCC(vcc);
if (!ip) {
if (!clip_vcc->entry) {
pr_err("hiding hidden ATMARP entry\n");
return 0;
}
pr_debug("remove\n");
unlink_clip_vcc(clip_vcc);
return 0;
}
rt = ip_route_output(&init_net, ip, 0, 1, 0);
if (IS_ERR(rt))
return PTR_ERR(rt);
neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1);
ip_rt_put(rt);
if (!neigh)
return -ENOMEM;
entry = neighbour_priv(neigh);
if (entry != clip_vcc->entry) {
if (!clip_vcc->entry)
pr_debug("add\n");
else {
pr_debug("update\n");
unlink_clip_vcc(clip_vcc);
}
link_vcc(clip_vcc, entry);
}
error = neigh_update(neigh, llc_oui, NUD_PERMANENT,
NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN);
neigh_release(neigh);
return error;
}
static const struct net_device_ops clip_netdev_ops = {
.ndo_start_xmit = clip_start_xmit,
.ndo_neigh_construct = clip_constructor,
};
static void clip_setup(struct net_device *dev)
{
dev->netdev_ops = &clip_netdev_ops;
dev->type = ARPHRD_ATM;
dev->neigh_priv_len = sizeof(struct atmarp_entry);
dev->hard_header_len = RFC1483LLC_LEN;
dev->mtu = RFC1626_MTU;
dev->tx_queue_len = 100; /* "normal" queue (packets) */
/* When using a "real" qdisc, the qdisc determines the queue */
/* length. tx_queue_len is only used for the default case, */
/* without any more elaborate queuing. 100 is a reasonable */
/* compromise between decent burst-tolerance and protection */
/* against memory hogs. */
netif_keep_dst(dev);
}
static int clip_create(int number)
{
struct net_device *dev;
struct clip_priv *clip_priv;
int error;
if (number != -1) {
for (dev = clip_devs; dev; dev = PRIV(dev)->next)
if (PRIV(dev)->number == number)
return -EEXIST;
} else {
number = 0;
for (dev = clip_devs; dev; dev = PRIV(dev)->next)
if (PRIV(dev)->number >= number)
number = PRIV(dev)->number + 1;
}
dev = alloc_netdev(sizeof(struct clip_priv), "", NET_NAME_UNKNOWN,
clip_setup);
if (!dev)
return -ENOMEM;
clip_priv = PRIV(dev);
sprintf(dev->name, "atm%d", number);
spin_lock_init(&clip_priv->xoff_lock);
clip_priv->number = number;
error = register_netdev(dev);
if (error) {
free_netdev(dev);
return error;
}
clip_priv->next = clip_devs;
clip_devs = dev;
pr_debug("registered (net:%s)\n", dev->name);
return number;
}
static int clip_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event == NETDEV_UNREGISTER)
return NOTIFY_DONE;
/* ignore non-CLIP devices */
if (dev->type != ARPHRD_ATM || dev->netdev_ops != &clip_netdev_ops)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
pr_debug("NETDEV_UP\n");
to_atmarpd(act_up, PRIV(dev)->number, 0);
break;
case NETDEV_GOING_DOWN:
pr_debug("NETDEV_DOWN\n");
to_atmarpd(act_down, PRIV(dev)->number, 0);
break;
case NETDEV_CHANGE:
case NETDEV_CHANGEMTU:
pr_debug("NETDEV_CHANGE*\n");
to_atmarpd(act_change, PRIV(dev)->number, 0);
break;
}
return NOTIFY_DONE;
}
static int clip_inet_event(struct notifier_block *this, unsigned long event,
void *ifa)
{
struct in_device *in_dev;
struct netdev_notifier_info info;
in_dev = ((struct in_ifaddr *)ifa)->ifa_dev;
/*
* Transitions are of the down-change-up type, so it's sufficient to
* handle the change on up.
*/
if (event != NETDEV_UP)
return NOTIFY_DONE;
netdev_notifier_info_init(&info, in_dev->dev);
return clip_device_event(this, NETDEV_CHANGE, &info);
}
static struct notifier_block clip_dev_notifier = {
.notifier_call = clip_device_event,
};
static struct notifier_block clip_inet_notifier = {
.notifier_call = clip_inet_event,
};
static void atmarpd_close(struct atm_vcc *vcc)
{
pr_debug("\n");
rtnl_lock();
atmarpd = NULL;
skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
rtnl_unlock();
pr_debug("(done)\n");
module_put(THIS_MODULE);
}
static struct atmdev_ops atmarpd_dev_ops = {
.close = atmarpd_close
};
static struct atm_dev atmarpd_dev = {
.ops = &atmarpd_dev_ops,
.type = "arpd",
.number = 999,
.lock = __SPIN_LOCK_UNLOCKED(atmarpd_dev.lock)
};
static int atm_init_atmarp(struct atm_vcc *vcc)
{
rtnl_lock();
if (atmarpd) {
rtnl_unlock();
return -EADDRINUSE;
}
mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
atmarpd = vcc;
set_bit(ATM_VF_META, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
/* allow replies and avoid getting closed if signaling dies */
vcc->dev = &atmarpd_dev;
vcc_insert_socket(sk_atm(vcc));
vcc->push = NULL;
vcc->pop = NULL; /* crash */
vcc->push_oam = NULL; /* crash */
rtnl_unlock();
return 0;
}
static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct atm_vcc *vcc = ATM_SD(sock);
int err = 0;
switch (cmd) {
case SIOCMKCLIP:
case ATMARPD_CTRL:
case ATMARP_MKIP:
case ATMARP_SETENTRY:
case ATMARP_ENCAP:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
break;
default:
return -ENOIOCTLCMD;
}
switch (cmd) {
case SIOCMKCLIP:
err = clip_create(arg);
break;
case ATMARPD_CTRL:
err = atm_init_atmarp(vcc);
if (!err) {
sock->state = SS_CONNECTED;
__module_get(THIS_MODULE);
}
break;
case ATMARP_MKIP:
err = clip_mkip(vcc, arg);
break;
case ATMARP_SETENTRY:
err = clip_setentry(vcc, (__force __be32)arg);
break;
case ATMARP_ENCAP:
err = clip_encap(vcc, arg);
break;
}
return err;
}
static struct atm_ioctl clip_ioctl_ops = {
.owner = THIS_MODULE,
.ioctl = clip_ioctl,
};
#ifdef CONFIG_PROC_FS
static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr)
{
static int code[] = { 1, 2, 10, 6, 1, 0 };
static int e164[] = { 1, 8, 4, 6, 1, 0 };
if (*addr->sas_addr.pub) {
seq_printf(seq, "%s", addr->sas_addr.pub);
if (*addr->sas_addr.prv)
seq_putc(seq, '+');
} else if (!*addr->sas_addr.prv) {
seq_printf(seq, "%s", "(none)");
return;
}
if (*addr->sas_addr.prv) {
unsigned char *prv = addr->sas_addr.prv;
int *fields;
int i, j;
fields = *prv == ATM_AFI_E164 ? e164 : code;
for (i = 0; fields[i]; i++) {
for (j = fields[i]; j; j--)
seq_printf(seq, "%02X", *prv++);
if (fields[i + 1])
seq_putc(seq, '.');
}
}
}
/* This means the neighbour entry has no attached VCC objects. */
#define SEQ_NO_VCC_TOKEN ((void *) 2)
static void atmarp_info(struct seq_file *seq, struct neighbour *n,
struct atmarp_entry *entry, struct clip_vcc *clip_vcc)
{
struct net_device *dev = n->dev;
unsigned long exp;
char buf[17];
int svc, llc, off;
svc = ((clip_vcc == SEQ_NO_VCC_TOKEN) ||
(sk_atm(clip_vcc->vcc)->sk_family == AF_ATMSVC));
llc = ((clip_vcc == SEQ_NO_VCC_TOKEN) || clip_vcc->encap);
if (clip_vcc == SEQ_NO_VCC_TOKEN)
exp = entry->neigh->used;
else
exp = clip_vcc->last_use;
exp = (jiffies - exp) / HZ;
seq_printf(seq, "%-6s%-4s%-4s%5ld ",
dev->name, svc ? "SVC" : "PVC", llc ? "LLC" : "NULL", exp);
off = scnprintf(buf, sizeof(buf) - 1, "%pI4", n->primary_key);
while (off < 16)
buf[off++] = ' ';
buf[off] = '\0';
seq_printf(seq, "%s", buf);
if (clip_vcc == SEQ_NO_VCC_TOKEN) {
if (time_before(jiffies, entry->expires))
seq_printf(seq, "(resolving)\n");
else
seq_printf(seq, "(expired, ref %d)\n",
atomic_read(&entry->neigh->refcnt));
} else if (!svc) {
seq_printf(seq, "%d.%d.%d\n",
clip_vcc->vcc->dev->number,
clip_vcc->vcc->vpi, clip_vcc->vcc->vci);
} else {
svc_addr(seq, &clip_vcc->vcc->remote);
seq_putc(seq, '\n');
}
}
struct clip_seq_state {
/* This member must be first. */
struct neigh_seq_state ns;
/* Local to clip specific iteration. */
struct clip_vcc *vcc;
};
static struct clip_vcc *clip_seq_next_vcc(struct atmarp_entry *e,
struct clip_vcc *curr)
{
if (!curr) {
curr = e->vccs;
if (!curr)
return SEQ_NO_VCC_TOKEN;
return curr;
}
if (curr == SEQ_NO_VCC_TOKEN)
return NULL;
curr = curr->next;
return curr;
}
static void *clip_seq_vcc_walk(struct clip_seq_state *state,
struct atmarp_entry *e, loff_t * pos)
{
struct clip_vcc *vcc = state->vcc;
vcc = clip_seq_next_vcc(e, vcc);
if (vcc && pos != NULL) {
while (*pos) {
vcc = clip_seq_next_vcc(e, vcc);
if (!vcc)
break;
--(*pos);
}
}
state->vcc = vcc;
return vcc;
}
static void *clip_seq_sub_iter(struct neigh_seq_state *_state,
struct neighbour *n, loff_t * pos)
{
struct clip_seq_state *state = (struct clip_seq_state *)_state;
if (n->dev->type != ARPHRD_ATM)
return NULL;
return clip_seq_vcc_walk(state, neighbour_priv(n), pos);
}
static void *clip_seq_start(struct seq_file *seq, loff_t * pos)
{
struct clip_seq_state *state = seq->private;
state->ns.neigh_sub_iter = clip_seq_sub_iter;
return neigh_seq_start(seq, pos, &arp_tbl, NEIGH_SEQ_NEIGH_ONLY);
}
static int clip_seq_show(struct seq_file *seq, void *v)
{
static char atm_arp_banner[] =
"IPitf TypeEncp Idle IP address ATM address\n";
if (v == SEQ_START_TOKEN) {
seq_puts(seq, atm_arp_banner);
} else {
struct clip_seq_state *state = seq->private;
struct clip_vcc *vcc = state->vcc;
struct neighbour *n = v;
atmarp_info(seq, n, neighbour_priv(n), vcc);
}
return 0;
}
static const struct seq_operations arp_seq_ops = {
.start = clip_seq_start,
.next = neigh_seq_next,
.stop = neigh_seq_stop,
.show = clip_seq_show,
};
static int arp_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &arp_seq_ops,
sizeof(struct clip_seq_state));
}
static const struct file_operations arp_seq_fops = {
.open = arp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
.owner = THIS_MODULE
};
#endif
static void atm_clip_exit_noproc(void);
static int __init atm_clip_init(void)
{
register_atm_ioctl(&clip_ioctl_ops);
register_netdevice_notifier(&clip_dev_notifier);
register_inetaddr_notifier(&clip_inet_notifier);
setup_timer(&idle_timer, idle_timer_check, 0);
#ifdef CONFIG_PROC_FS
{
struct proc_dir_entry *p;
p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops);
if (!p) {
pr_err("Unable to initialize /proc/net/atm/arp\n");
atm_clip_exit_noproc();
return -ENOMEM;
}
}
#endif
return 0;
}
static void atm_clip_exit_noproc(void)
{
struct net_device *dev, *next;
unregister_inetaddr_notifier(&clip_inet_notifier);
unregister_netdevice_notifier(&clip_dev_notifier);
deregister_atm_ioctl(&clip_ioctl_ops);
/* First, stop the idle timer, so it stops banging
* on the table.
*/
del_timer_sync(&idle_timer);
dev = clip_devs;
while (dev) {
next = PRIV(dev)->next;
unregister_netdev(dev);
free_netdev(dev);
dev = next;
}
}
static void __exit atm_clip_exit(void)
{
remove_proc_entry("arp", atm_proc_root);
atm_clip_exit_noproc();
}
module_init(atm_clip_init);
module_exit(atm_clip_exit);
MODULE_AUTHOR("Werner Almesberger");
MODULE_DESCRIPTION("Classical/IP over ATM interface");
MODULE_LICENSE("GPL");

910
net/atm/common.c Normal file
View file

@ -0,0 +1,910 @@
/* net/atm/common.c - ATM sockets (common part for PVC and SVC) */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/net.h> /* struct socket, struct proto_ops */
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmdev.h>
#include <linux/socket.h> /* SOL_SOCKET */
#include <linux/errno.h> /* error codes */
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/time.h> /* struct timeval */
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/sock.h> /* struct sock */
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/atomic.h>
#include "resources.h" /* atm_find_dev */
#include "common.h" /* prototypes */
#include "protocols.h" /* atm_init_<transport> */
#include "addr.h" /* address registry */
#include "signaling.h" /* for WAITING and sigd_attach */
struct hlist_head vcc_hash[VCC_HTABLE_SIZE];
EXPORT_SYMBOL(vcc_hash);
DEFINE_RWLOCK(vcc_sklist_lock);
EXPORT_SYMBOL(vcc_sklist_lock);
static ATOMIC_NOTIFIER_HEAD(atm_dev_notify_chain);
static void __vcc_insert_socket(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)];
sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1);
sk_add_node(sk, head);
}
void vcc_insert_socket(struct sock *sk)
{
write_lock_irq(&vcc_sklist_lock);
__vcc_insert_socket(sk);
write_unlock_irq(&vcc_sklist_lock);
}
EXPORT_SYMBOL(vcc_insert_socket);
static void vcc_remove_socket(struct sock *sk)
{
write_lock_irq(&vcc_sklist_lock);
sk_del_node_init(sk);
write_unlock_irq(&vcc_sklist_lock);
}
static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size)
{
struct sk_buff *skb;
struct sock *sk = sk_atm(vcc);
if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
return NULL;
}
while (!(skb = alloc_skb(size, GFP_KERNEL)))
schedule();
pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
atomic_add(skb->truesize, &sk->sk_wmem_alloc);
return skb;
}
static void vcc_sock_destruct(struct sock *sk)
{
if (atomic_read(&sk->sk_rmem_alloc))
printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
__func__, atomic_read(&sk->sk_rmem_alloc));
if (atomic_read(&sk->sk_wmem_alloc))
printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
__func__, atomic_read(&sk->sk_wmem_alloc));
}
static void vcc_def_wakeup(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up(&wq->wait);
rcu_read_unlock();
}
static inline int vcc_writable(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
return (vcc->qos.txtp.max_sdu +
atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf;
}
static void vcc_write_space(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
if (vcc_writable(sk)) {
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
static void vcc_release_cb(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
if (vcc->release_cb)
vcc->release_cb(vcc);
}
static struct proto vcc_proto = {
.name = "VCC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct atm_vcc),
.release_cb = vcc_release_cb,
};
int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
{
struct sock *sk;
struct atm_vcc *vcc;
sock->sk = NULL;
if (sock->type == SOCK_STREAM)
return -EINVAL;
sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sk->sk_state_change = vcc_def_wakeup;
sk->sk_write_space = vcc_write_space;
vcc = atm_sk(sk);
vcc->dev = NULL;
memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
atomic_set(&sk->sk_wmem_alloc, 1);
atomic_set(&sk->sk_rmem_alloc, 0);
vcc->push = NULL;
vcc->pop = NULL;
vcc->owner = NULL;
vcc->push_oam = NULL;
vcc->release_cb = NULL;
vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */
vcc->atm_options = vcc->aal_options = 0;
sk->sk_destruct = vcc_sock_destruct;
return 0;
}
static void vcc_destroy_socket(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
struct sk_buff *skb;
set_bit(ATM_VF_CLOSE, &vcc->flags);
clear_bit(ATM_VF_READY, &vcc->flags);
if (vcc->dev) {
if (vcc->dev->ops->close)
vcc->dev->ops->close(vcc);
if (vcc->push)
vcc->push(vcc, NULL); /* atmarpd has no push */
module_put(vcc->owner);
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
atm_return(vcc, skb->truesize);
kfree_skb(skb);
}
module_put(vcc->dev->ops->owner);
atm_dev_put(vcc->dev);
}
vcc_remove_socket(sk);
}
int vcc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk) {
lock_sock(sk);
vcc_destroy_socket(sock->sk);
release_sock(sk);
sock_put(sk);
}
return 0;
}
void vcc_release_async(struct atm_vcc *vcc, int reply)
{
struct sock *sk = sk_atm(vcc);
set_bit(ATM_VF_CLOSE, &vcc->flags);
sk->sk_shutdown |= RCV_SHUTDOWN;
sk->sk_err = -reply;
clear_bit(ATM_VF_WAITING, &vcc->flags);
sk->sk_state_change(sk);
}
EXPORT_SYMBOL(vcc_release_async);
void vcc_process_recv_queue(struct atm_vcc *vcc)
{
struct sk_buff_head queue, *rq;
struct sk_buff *skb, *tmp;
unsigned long flags;
__skb_queue_head_init(&queue);
rq = &sk_atm(vcc)->sk_receive_queue;
spin_lock_irqsave(&rq->lock, flags);
skb_queue_splice_init(rq, &queue);
spin_unlock_irqrestore(&rq->lock, flags);
skb_queue_walk_safe(&queue, skb, tmp) {
__skb_unlink(skb, &queue);
vcc->push(vcc, skb);
}
}
EXPORT_SYMBOL(vcc_process_recv_queue);
void atm_dev_signal_change(struct atm_dev *dev, char signal)
{
pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n",
__func__, signal, dev, dev->number, dev->signal);
/* atm driver sending invalid signal */
WARN_ON(signal < ATM_PHY_SIG_LOST || signal > ATM_PHY_SIG_FOUND);
if (dev->signal == signal)
return; /* no change */
dev->signal = signal;
atomic_notifier_call_chain(&atm_dev_notify_chain, signal, dev);
}
EXPORT_SYMBOL(atm_dev_signal_change);
void atm_dev_release_vccs(struct atm_dev *dev)
{
int i;
write_lock_irq(&vcc_sklist_lock);
for (i = 0; i < VCC_HTABLE_SIZE; i++) {
struct hlist_head *head = &vcc_hash[i];
struct hlist_node *tmp;
struct sock *s;
struct atm_vcc *vcc;
sk_for_each_safe(s, tmp, head) {
vcc = atm_sk(s);
if (vcc->dev == dev) {
vcc_release_async(vcc, -EPIPE);
sk_del_node_init(s);
}
}
}
write_unlock_irq(&vcc_sklist_lock);
}
EXPORT_SYMBOL(atm_dev_release_vccs);
static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
{
int max_sdu;
if (!tp->traffic_class)
return 0;
switch (aal) {
case ATM_AAL0:
max_sdu = ATM_CELL_SIZE-1;
break;
case ATM_AAL34:
max_sdu = ATM_MAX_AAL34_PDU;
break;
default:
pr_warn("AAL problems ... (%d)\n", aal);
/* fall through */
case ATM_AAL5:
max_sdu = ATM_MAX_AAL5_PDU;
}
if (!tp->max_sdu)
tp->max_sdu = max_sdu;
else if (tp->max_sdu > max_sdu)
return -EINVAL;
if (!tp->max_cdv)
tp->max_cdv = ATM_MAX_CDV;
return 0;
}
static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
{
struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
struct sock *s;
struct atm_vcc *walk;
sk_for_each(s, head) {
walk = atm_sk(s);
if (walk->dev != vcc->dev)
continue;
if (test_bit(ATM_VF_ADDR, &walk->flags) && walk->vpi == vpi &&
walk->vci == vci && ((walk->qos.txtp.traffic_class !=
ATM_NONE && vcc->qos.txtp.traffic_class != ATM_NONE) ||
(walk->qos.rxtp.traffic_class != ATM_NONE &&
vcc->qos.rxtp.traffic_class != ATM_NONE)))
return -EADDRINUSE;
}
/* allow VCCs with same VPI/VCI iff they don't collide on
TX/RX (but we may refuse such sharing for other reasons,
e.g. if protocol requires to have both channels) */
return 0;
}
static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
{
static short p; /* poor man's per-device cache */
static int c;
short old_p;
int old_c;
int err;
if (*vpi != ATM_VPI_ANY && *vci != ATM_VCI_ANY) {
err = check_ci(vcc, *vpi, *vci);
return err;
}
/* last scan may have left values out of bounds for current device */
if (*vpi != ATM_VPI_ANY)
p = *vpi;
else if (p >= 1 << vcc->dev->ci_range.vpi_bits)
p = 0;
if (*vci != ATM_VCI_ANY)
c = *vci;
else if (c < ATM_NOT_RSV_VCI || c >= 1 << vcc->dev->ci_range.vci_bits)
c = ATM_NOT_RSV_VCI;
old_p = p;
old_c = c;
do {
if (!check_ci(vcc, p, c)) {
*vpi = p;
*vci = c;
return 0;
}
if (*vci == ATM_VCI_ANY) {
c++;
if (c >= 1 << vcc->dev->ci_range.vci_bits)
c = ATM_NOT_RSV_VCI;
}
if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) &&
*vpi == ATM_VPI_ANY) {
p++;
if (p >= 1 << vcc->dev->ci_range.vpi_bits)
p = 0;
}
} while (old_p != p || old_c != c);
return -EADDRINUSE;
}
static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
int vci)
{
struct sock *sk = sk_atm(vcc);
int error;
if ((vpi != ATM_VPI_UNSPEC && vpi != ATM_VPI_ANY &&
vpi >> dev->ci_range.vpi_bits) || (vci != ATM_VCI_UNSPEC &&
vci != ATM_VCI_ANY && vci >> dev->ci_range.vci_bits))
return -EINVAL;
if (vci > 0 && vci < ATM_NOT_RSV_VCI && !capable(CAP_NET_BIND_SERVICE))
return -EPERM;
error = -ENODEV;
if (!try_module_get(dev->ops->owner))
return error;
vcc->dev = dev;
write_lock_irq(&vcc_sklist_lock);
if (test_bit(ATM_DF_REMOVED, &dev->flags) ||
(error = find_ci(vcc, &vpi, &vci))) {
write_unlock_irq(&vcc_sklist_lock);
goto fail_module_put;
}
vcc->vpi = vpi;
vcc->vci = vci;
__vcc_insert_socket(sk);
write_unlock_irq(&vcc_sklist_lock);
switch (vcc->qos.aal) {
case ATM_AAL0:
error = atm_init_aal0(vcc);
vcc->stats = &dev->stats.aal0;
break;
case ATM_AAL34:
error = atm_init_aal34(vcc);
vcc->stats = &dev->stats.aal34;
break;
case ATM_NO_AAL:
/* ATM_AAL5 is also used in the "0 for default" case */
vcc->qos.aal = ATM_AAL5;
/* fall through */
case ATM_AAL5:
error = atm_init_aal5(vcc);
vcc->stats = &dev->stats.aal5;
break;
default:
error = -EPROTOTYPE;
}
if (!error)
error = adjust_tp(&vcc->qos.txtp, vcc->qos.aal);
if (!error)
error = adjust_tp(&vcc->qos.rxtp, vcc->qos.aal);
if (error)
goto fail;
pr_debug("VCC %d.%d, AAL %d\n", vpi, vci, vcc->qos.aal);
pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",
vcc->qos.txtp.traffic_class,
vcc->qos.txtp.min_pcr,
vcc->qos.txtp.max_pcr,
vcc->qos.txtp.max_sdu);
pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",
vcc->qos.rxtp.traffic_class,
vcc->qos.rxtp.min_pcr,
vcc->qos.rxtp.max_pcr,
vcc->qos.rxtp.max_sdu);
if (dev->ops->open) {
error = dev->ops->open(vcc);
if (error)
goto fail;
}
return 0;
fail:
vcc_remove_socket(sk);
fail_module_put:
module_put(dev->ops->owner);
/* ensure we get dev module ref count correct */
vcc->dev = NULL;
return error;
}
int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
{
struct atm_dev *dev;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
pr_debug("(vpi %d, vci %d)\n", vpi, vci);
if (sock->state == SS_CONNECTED)
return -EISCONN;
if (sock->state != SS_UNCONNECTED)
return -EINVAL;
if (!(vpi || vci))
return -EINVAL;
if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC)
clear_bit(ATM_VF_PARTIAL, &vcc->flags);
else
if (test_bit(ATM_VF_PARTIAL, &vcc->flags))
return -EINVAL;
pr_debug("(TX: cl %d,bw %d-%d,sdu %d; "
"RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n",
vcc->qos.txtp.traffic_class, vcc->qos.txtp.min_pcr,
vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_sdu,
vcc->qos.rxtp.traffic_class, vcc->qos.rxtp.min_pcr,
vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_sdu,
vcc->qos.aal == ATM_AAL5 ? "" :
vcc->qos.aal == ATM_AAL0 ? "" : " ??? code ",
vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal);
if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
return -EBADFD;
if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
vcc->qos.rxtp.traffic_class == ATM_ANYCLASS)
return -EINVAL;
if (likely(itf != ATM_ITF_ANY)) {
dev = try_then_request_module(atm_dev_lookup(itf),
"atm-device-%d", itf);
} else {
dev = NULL;
mutex_lock(&atm_dev_mutex);
if (!list_empty(&atm_devs)) {
dev = list_entry(atm_devs.next,
struct atm_dev, dev_list);
atm_dev_hold(dev);
}
mutex_unlock(&atm_dev_mutex);
}
if (!dev)
return -ENODEV;
error = __vcc_connect(vcc, dev, vpi, vci);
if (error) {
atm_dev_put(dev);
return error;
}
if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC)
set_bit(ATM_VF_PARTIAL, &vcc->flags);
if (test_bit(ATM_VF_READY, &ATM_SD(sock)->flags))
sock->state = SS_CONNECTED;
return 0;
}
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
struct sk_buff *skb;
int copied, error = -EINVAL;
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
/* only handle MSG_DONTWAIT and MSG_PEEK */
if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
return -EOPNOTSUPP;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return 0;
skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
if (!skb)
return error;
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
skb->truesize);
atm_return(vcc, skb->truesize);
}
skb_free_datagram(sk, skb);
return copied;
}
int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
size_t total_len)
{
struct sock *sk = sock->sk;
DEFINE_WAIT(wait);
struct atm_vcc *vcc;
struct sk_buff *skb;
int eff, error;
const void __user *buff;
int size;
lock_sock(sk);
if (sock->state != SS_CONNECTED) {
error = -ENOTCONN;
goto out;
}
if (m->msg_name) {
error = -EISCONN;
goto out;
}
if (m->msg_iovlen != 1) {
error = -ENOSYS; /* fix this later @@@ */
goto out;
}
buff = m->msg_iov->iov_base;
size = m->msg_iov->iov_len;
vcc = ATM_SD(sock);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags)) {
error = -EPIPE;
send_sig(SIGPIPE, current, 0);
goto out;
}
if (!size) {
error = 0;
goto out;
}
if (size < 0 || size > vcc->qos.txtp.max_sdu) {
error = -EMSGSIZE;
goto out;
}
eff = (size+3) & ~3; /* align to word boundary */
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
error = 0;
while (!(skb = alloc_tx(vcc, eff))) {
if (m->msg_flags & MSG_DONTWAIT) {
error = -EAGAIN;
break;
}
schedule();
if (signal_pending(current)) {
error = -ERESTARTSYS;
break;
}
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags)) {
error = -EPIPE;
send_sig(SIGPIPE, current, 0);
break;
}
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
skb->dev = NULL; /* for paths shared with net_device interfaces */
ATM_SKB(skb)->atm_options = vcc->atm_options;
if (copy_from_user(skb_put(skb, size), buff, size)) {
kfree_skb(skb);
error = -EFAULT;
goto out;
}
if (eff != size)
memset(skb->data + size, 0, eff-size);
error = vcc->dev->ops->send(vcc, skb);
error = error ? error : size;
out:
release_sock(sk);
return error;
}
unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
unsigned int mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
vcc = ATM_SD(sock);
/* exceptional events */
if (sk->sk_err)
mask = POLLERR;
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags))
mask |= POLLHUP;
/* readable? */
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* writable? */
if (sock->state == SS_CONNECTING &&
test_bit(ATM_VF_WAITING, &vcc->flags))
return mask;
if (vcc->qos.txtp.traffic_class != ATM_NONE &&
vcc_writable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
return mask;
}
static int atm_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
{
int error;
/*
* Don't let the QoS change the already connected AAL type nor the
* traffic class.
*/
if (qos->aal != vcc->qos.aal ||
qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class ||
qos->txtp.traffic_class != vcc->qos.txtp.traffic_class)
return -EINVAL;
error = adjust_tp(&qos->txtp, qos->aal);
if (!error)
error = adjust_tp(&qos->rxtp, qos->aal);
if (error)
return error;
if (!vcc->dev->ops->change_qos)
return -EOPNOTSUPP;
if (sk_atm(vcc)->sk_family == AF_ATMPVC)
return vcc->dev->ops->change_qos(vcc, qos, ATM_MF_SET);
return svc_change_qos(vcc, qos);
}
static int check_tp(const struct atm_trafprm *tp)
{
/* @@@ Should be merged with adjust_tp */
if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS)
return 0;
if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr &&
!tp->max_pcr)
return -EINVAL;
if (tp->min_pcr == ATM_MAX_PCR)
return -EINVAL;
if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR &&
tp->min_pcr > tp->max_pcr)
return -EINVAL;
/*
* We allow pcr to be outside [min_pcr,max_pcr], because later
* adjustment may still push it in the valid range.
*/
return 0;
}
static int check_qos(const struct atm_qos *qos)
{
int error;
if (!qos->txtp.traffic_class && !qos->rxtp.traffic_class)
return -EINVAL;
if (qos->txtp.traffic_class != qos->rxtp.traffic_class &&
qos->txtp.traffic_class && qos->rxtp.traffic_class &&
qos->txtp.traffic_class != ATM_ANYCLASS &&
qos->rxtp.traffic_class != ATM_ANYCLASS)
return -EINVAL;
error = check_tp(&qos->txtp);
if (error)
return error;
return check_tp(&qos->rxtp);
}
int vcc_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct atm_vcc *vcc;
unsigned long value;
int error;
if (__SO_LEVEL_MATCH(optname, level) && optlen != __SO_SIZE(optname))
return -EINVAL;
vcc = ATM_SD(sock);
switch (optname) {
case SO_ATMQOS:
{
struct atm_qos qos;
if (copy_from_user(&qos, optval, sizeof(qos)))
return -EFAULT;
error = check_qos(&qos);
if (error)
return error;
if (sock->state == SS_CONNECTED)
return atm_change_qos(vcc, &qos);
if (sock->state != SS_UNCONNECTED)
return -EBADFD;
vcc->qos = qos;
set_bit(ATM_VF_HASQOS, &vcc->flags);
return 0;
}
case SO_SETCLP:
if (get_user(value, (unsigned long __user *)optval))
return -EFAULT;
if (value)
vcc->atm_options |= ATM_ATMOPT_CLP;
else
vcc->atm_options &= ~ATM_ATMOPT_CLP;
return 0;
default:
if (level == SOL_SOCKET)
return -EINVAL;
break;
}
if (!vcc->dev || !vcc->dev->ops->setsockopt)
return -EINVAL;
return vcc->dev->ops->setsockopt(vcc, level, optname, optval, optlen);
}
int vcc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct atm_vcc *vcc;
int len;
if (get_user(len, optlen))
return -EFAULT;
if (__SO_LEVEL_MATCH(optname, level) && len != __SO_SIZE(optname))
return -EINVAL;
vcc = ATM_SD(sock);
switch (optname) {
case SO_ATMQOS:
if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
return -EINVAL;
return copy_to_user(optval, &vcc->qos, sizeof(vcc->qos))
? -EFAULT : 0;
case SO_SETCLP:
return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 0,
(unsigned long __user *)optval) ? -EFAULT : 0;
case SO_ATMPVC:
{
struct sockaddr_atmpvc pvc;
if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
return -ENOTCONN;
memset(&pvc, 0, sizeof(pvc));
pvc.sap_family = AF_ATMPVC;
pvc.sap_addr.itf = vcc->dev->number;
pvc.sap_addr.vpi = vcc->vpi;
pvc.sap_addr.vci = vcc->vci;
return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0;
}
default:
if (level == SOL_SOCKET)
return -EINVAL;
break;
}
if (!vcc->dev || !vcc->dev->ops->getsockopt)
return -EINVAL;
return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len);
}
int register_atmdevice_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&atm_dev_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(register_atmdevice_notifier);
void unregister_atmdevice_notifier(struct notifier_block *nb)
{
atomic_notifier_chain_unregister(&atm_dev_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_atmdevice_notifier);
static int __init atm_init(void)
{
int error;
error = proto_register(&vcc_proto, 0);
if (error < 0)
goto out;
error = atmpvc_init();
if (error < 0) {
pr_err("atmpvc_init() failed with %d\n", error);
goto out_unregister_vcc_proto;
}
error = atmsvc_init();
if (error < 0) {
pr_err("atmsvc_init() failed with %d\n", error);
goto out_atmpvc_exit;
}
error = atm_proc_init();
if (error < 0) {
pr_err("atm_proc_init() failed with %d\n", error);
goto out_atmsvc_exit;
}
error = atm_sysfs_init();
if (error < 0) {
pr_err("atm_sysfs_init() failed with %d\n", error);
goto out_atmproc_exit;
}
out:
return error;
out_atmproc_exit:
atm_proc_exit();
out_atmsvc_exit:
atmsvc_exit();
out_atmpvc_exit:
atmsvc_exit();
out_unregister_vcc_proto:
proto_unregister(&vcc_proto);
goto out;
}
static void __exit atm_exit(void)
{
atm_proc_exit();
atm_sysfs_exit();
atmsvc_exit();
atmpvc_exit();
proto_unregister(&vcc_proto);
}
subsys_initcall(atm_init);
module_exit(atm_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_ATMPVC);
MODULE_ALIAS_NETPROTO(PF_ATMSVC);

56
net/atm/common.h Normal file
View file

@ -0,0 +1,56 @@
/* net/atm/common.h - ATM sockets (common part for PVC and SVC) */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#ifndef NET_ATM_COMMON_H
#define NET_ATM_COMMON_H
#include <linux/net.h>
#include <linux/poll.h> /* for poll_table */
int vcc_create(struct net *net, struct socket *sock, int protocol, int family);
int vcc_release(struct socket *sock);
int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags);
int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
size_t total_len);
unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int vcc_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen);
int vcc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen);
void vcc_process_recv_queue(struct atm_vcc *vcc);
int atmpvc_init(void);
void atmpvc_exit(void);
int atmsvc_init(void);
void atmsvc_exit(void);
int atm_sysfs_init(void);
void atm_sysfs_exit(void);
#ifdef CONFIG_PROC_FS
int atm_proc_init(void);
void atm_proc_exit(void);
#else
static inline int atm_proc_init(void)
{
return 0;
}
static inline void atm_proc_exit(void)
{
/* nothing */
}
#endif /* CONFIG_PROC_FS */
/* SVC */
int svc_change_qos(struct atm_vcc *vcc,struct atm_qos *qos);
void atm_dev_release_vccs(struct atm_dev *dev);
#endif

369
net/atm/ioctl.c Normal file
View file

@ -0,0 +1,369 @@
/* ATM ioctl handling */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
/* 2003 John Levon <levon@movementarian.org> */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/net.h> /* struct socket, struct proto_ops */
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmdev.h>
#include <linux/atmclip.h> /* CLIP_*ENCAP */
#include <linux/atmarp.h> /* manifest constants */
#include <linux/capability.h>
#include <linux/sonet.h> /* for ioctls */
#include <linux/atmsvc.h>
#include <linux/atmmpc.h>
#include <net/atmclip.h>
#include <linux/atmlec.h>
#include <linux/mutex.h>
#include <asm/ioctls.h>
#include <net/compat.h>
#include "resources.h"
#include "signaling.h" /* for WAITING and sigd_attach */
#include "common.h"
static DEFINE_MUTEX(ioctl_mutex);
static LIST_HEAD(ioctl_list);
void register_atm_ioctl(struct atm_ioctl *ioctl)
{
mutex_lock(&ioctl_mutex);
list_add_tail(&ioctl->list, &ioctl_list);
mutex_unlock(&ioctl_mutex);
}
EXPORT_SYMBOL(register_atm_ioctl);
void deregister_atm_ioctl(struct atm_ioctl *ioctl)
{
mutex_lock(&ioctl_mutex);
list_del(&ioctl->list);
mutex_unlock(&ioctl_mutex);
}
EXPORT_SYMBOL(deregister_atm_ioctl);
static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg, int compat)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
int error;
struct list_head *pos;
void __user *argp = (void __user *)arg;
vcc = ATM_SD(sock);
switch (cmd) {
case SIOCOUTQ:
if (sock->state != SS_CONNECTED ||
!test_bit(ATM_VF_READY, &vcc->flags)) {
error = -EINVAL;
goto done;
}
error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk),
(int __user *)argp) ? -EFAULT : 0;
goto done;
case SIOCINQ:
{
struct sk_buff *skb;
if (sock->state != SS_CONNECTED) {
error = -EINVAL;
goto done;
}
skb = skb_peek(&sk->sk_receive_queue);
error = put_user(skb ? skb->len : 0,
(int __user *)argp) ? -EFAULT : 0;
goto done;
}
case SIOCGSTAMP: /* borrowed from IP */
#ifdef CONFIG_COMPAT
if (compat)
error = compat_sock_get_timestamp(sk, argp);
else
#endif
error = sock_get_timestamp(sk, argp);
goto done;
case SIOCGSTAMPNS: /* borrowed from IP */
#ifdef CONFIG_COMPAT
if (compat)
error = compat_sock_get_timestampns(sk, argp);
else
#endif
error = sock_get_timestampns(sk, argp);
goto done;
case ATM_SETSC:
net_warn_ratelimited("ATM_SETSC is obsolete; used by %s:%d\n",
current->comm, task_pid_nr(current));
error = 0;
goto done;
case ATMSIGD_CTRL:
if (!capable(CAP_NET_ADMIN)) {
error = -EPERM;
goto done;
}
/*
* The user/kernel protocol for exchanging signalling
* info uses kernel pointers as opaque references,
* so the holder of the file descriptor can scribble
* on the kernel... so we should make sure that we
* have the same privileges that /proc/kcore needs
*/
if (!capable(CAP_SYS_RAWIO)) {
error = -EPERM;
goto done;
}
#ifdef CONFIG_COMPAT
/* WTF? I don't even want to _think_ about making this
work for 32-bit userspace. TBH I don't really want
to think about it at all. dwmw2. */
if (compat) {
net_warn_ratelimited("32-bit task cannot be atmsigd\n");
error = -EINVAL;
goto done;
}
#endif
error = sigd_attach(vcc);
if (!error)
sock->state = SS_CONNECTED;
goto done;
case ATM_SETBACKEND:
case ATM_NEWBACKENDIF:
{
atm_backend_t backend;
error = get_user(backend, (atm_backend_t __user *)argp);
if (error)
goto done;
switch (backend) {
case ATM_BACKEND_PPP:
request_module("pppoatm");
break;
case ATM_BACKEND_BR2684:
request_module("br2684");
break;
}
break;
}
case ATMMPC_CTRL:
case ATMMPC_DATA:
request_module("mpoa");
break;
case ATMARPD_CTRL:
request_module("clip");
break;
case ATMLEC_CTRL:
request_module("lec");
break;
}
error = -ENOIOCTLCMD;
mutex_lock(&ioctl_mutex);
list_for_each(pos, &ioctl_list) {
struct atm_ioctl *ic = list_entry(pos, struct atm_ioctl, list);
if (try_module_get(ic->owner)) {
error = ic->ioctl(sock, cmd, arg);
module_put(ic->owner);
if (error != -ENOIOCTLCMD)
break;
}
}
mutex_unlock(&ioctl_mutex);
if (error != -ENOIOCTLCMD)
goto done;
error = atm_dev_ioctl(cmd, argp, compat);
done:
return error;
}
int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
return do_vcc_ioctl(sock, cmd, arg, 0);
}
#ifdef CONFIG_COMPAT
/*
* FIXME:
* The compat_ioctl handling is duplicated, using both these conversion
* routines and the compat argument to the actual handlers. Both
* versions are somewhat incomplete and should be merged, e.g. by
* moving the ioctl number translation into the actual handlers and
* killing the conversion code.
*
* -arnd, November 2009
*/
#define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct compat_atmif_sioc)
#define ATM_GETNAMES32 _IOW('a', ATMIOC_ITF+3, struct compat_atm_iobuf)
#define ATM_GETTYPE32 _IOW('a', ATMIOC_ITF+4, struct compat_atmif_sioc)
#define ATM_GETESI32 _IOW('a', ATMIOC_ITF+5, struct compat_atmif_sioc)
#define ATM_GETADDR32 _IOW('a', ATMIOC_ITF+6, struct compat_atmif_sioc)
#define ATM_RSTADDR32 _IOW('a', ATMIOC_ITF+7, struct compat_atmif_sioc)
#define ATM_ADDADDR32 _IOW('a', ATMIOC_ITF+8, struct compat_atmif_sioc)
#define ATM_DELADDR32 _IOW('a', ATMIOC_ITF+9, struct compat_atmif_sioc)
#define ATM_GETCIRANGE32 _IOW('a', ATMIOC_ITF+10, struct compat_atmif_sioc)
#define ATM_SETCIRANGE32 _IOW('a', ATMIOC_ITF+11, struct compat_atmif_sioc)
#define ATM_SETESI32 _IOW('a', ATMIOC_ITF+12, struct compat_atmif_sioc)
#define ATM_SETESIF32 _IOW('a', ATMIOC_ITF+13, struct compat_atmif_sioc)
#define ATM_GETSTAT32 _IOW('a', ATMIOC_SARCOM+0, struct compat_atmif_sioc)
#define ATM_GETSTATZ32 _IOW('a', ATMIOC_SARCOM+1, struct compat_atmif_sioc)
#define ATM_GETLOOP32 _IOW('a', ATMIOC_SARCOM+2, struct compat_atmif_sioc)
#define ATM_SETLOOP32 _IOW('a', ATMIOC_SARCOM+3, struct compat_atmif_sioc)
#define ATM_QUERYLOOP32 _IOW('a', ATMIOC_SARCOM+4, struct compat_atmif_sioc)
static struct {
unsigned int cmd32;
unsigned int cmd;
} atm_ioctl_map[] = {
{ ATM_GETLINKRATE32, ATM_GETLINKRATE },
{ ATM_GETNAMES32, ATM_GETNAMES },
{ ATM_GETTYPE32, ATM_GETTYPE },
{ ATM_GETESI32, ATM_GETESI },
{ ATM_GETADDR32, ATM_GETADDR },
{ ATM_RSTADDR32, ATM_RSTADDR },
{ ATM_ADDADDR32, ATM_ADDADDR },
{ ATM_DELADDR32, ATM_DELADDR },
{ ATM_GETCIRANGE32, ATM_GETCIRANGE },
{ ATM_SETCIRANGE32, ATM_SETCIRANGE },
{ ATM_SETESI32, ATM_SETESI },
{ ATM_SETESIF32, ATM_SETESIF },
{ ATM_GETSTAT32, ATM_GETSTAT },
{ ATM_GETSTATZ32, ATM_GETSTATZ },
{ ATM_GETLOOP32, ATM_GETLOOP },
{ ATM_SETLOOP32, ATM_SETLOOP },
{ ATM_QUERYLOOP32, ATM_QUERYLOOP },
};
#define NR_ATM_IOCTL ARRAY_SIZE(atm_ioctl_map)
static int do_atm_iobuf(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct atm_iobuf __user *iobuf;
struct compat_atm_iobuf __user *iobuf32;
u32 data;
void __user *datap;
int len, err;
iobuf = compat_alloc_user_space(sizeof(*iobuf));
iobuf32 = compat_ptr(arg);
if (get_user(len, &iobuf32->length) ||
get_user(data, &iobuf32->buffer))
return -EFAULT;
datap = compat_ptr(data);
if (put_user(len, &iobuf->length) ||
put_user(datap, &iobuf->buffer))
return -EFAULT;
err = do_vcc_ioctl(sock, cmd, (unsigned long) iobuf, 0);
if (!err) {
if (copy_in_user(&iobuf32->length, &iobuf->length,
sizeof(int)))
err = -EFAULT;
}
return err;
}
static int do_atmif_sioc(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct atmif_sioc __user *sioc;
struct compat_atmif_sioc __user *sioc32;
u32 data;
void __user *datap;
int err;
sioc = compat_alloc_user_space(sizeof(*sioc));
sioc32 = compat_ptr(arg);
if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) ||
get_user(data, &sioc32->arg))
return -EFAULT;
datap = compat_ptr(data);
if (put_user(datap, &sioc->arg))
return -EFAULT;
err = do_vcc_ioctl(sock, cmd, (unsigned long) sioc, 0);
if (!err) {
if (copy_in_user(&sioc32->length, &sioc->length,
sizeof(int)))
err = -EFAULT;
}
return err;
}
static int do_atm_ioctl(struct socket *sock, unsigned int cmd32,
unsigned long arg)
{
int i;
unsigned int cmd = 0;
switch (cmd32) {
case SONET_GETSTAT:
case SONET_GETSTATZ:
case SONET_GETDIAG:
case SONET_SETDIAG:
case SONET_CLRDIAG:
case SONET_SETFRAMING:
case SONET_GETFRAMING:
case SONET_GETFRSENSE:
return do_atmif_sioc(sock, cmd32, arg);
}
for (i = 0; i < NR_ATM_IOCTL; i++) {
if (cmd32 == atm_ioctl_map[i].cmd32) {
cmd = atm_ioctl_map[i].cmd;
break;
}
}
if (i == NR_ATM_IOCTL)
return -EINVAL;
switch (cmd) {
case ATM_GETNAMES:
return do_atm_iobuf(sock, cmd, arg);
case ATM_GETLINKRATE:
case ATM_GETTYPE:
case ATM_GETESI:
case ATM_GETADDR:
case ATM_RSTADDR:
case ATM_ADDADDR:
case ATM_DELADDR:
case ATM_GETCIRANGE:
case ATM_SETCIRANGE:
case ATM_SETESI:
case ATM_SETESIF:
case ATM_GETSTAT:
case ATM_GETSTATZ:
case ATM_GETLOOP:
case ATM_SETLOOP:
case ATM_QUERYLOOP:
return do_atmif_sioc(sock, cmd, arg);
}
return -EINVAL;
}
int vcc_compat_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
int ret;
ret = do_vcc_ioctl(sock, cmd, arg, 1);
if (ret != -ENOIOCTLCMD)
return ret;
return do_atm_ioctl(sock, cmd, arg);
}
#endif

2285
net/atm/lec.c Normal file

File diff suppressed because it is too large Load diff

154
net/atm/lec.h Normal file
View file

@ -0,0 +1,154 @@
/*
* Lan Emulation client header file
*
* Marko Kiiskila <mkiiskila@yahoo.com>
*/
#ifndef _LEC_H_
#define _LEC_H_
#include <linux/atmdev.h>
#include <linux/netdevice.h>
#include <linux/atmlec.h>
#define LEC_HEADER_LEN 16
struct lecdatahdr_8023 {
__be16 le_header;
unsigned char h_dest[ETH_ALEN];
unsigned char h_source[ETH_ALEN];
__be16 h_type;
};
struct lecdatahdr_8025 {
__be16 le_header;
unsigned char ac_pad;
unsigned char fc;
unsigned char h_dest[ETH_ALEN];
unsigned char h_source[ETH_ALEN];
};
#define LEC_MINIMUM_8023_SIZE 62
#define LEC_MINIMUM_8025_SIZE 16
/*
* Operations that LANE2 capable device can do. Two first functions
* are used to make the device do things. See spec 3.1.3 and 3.1.4.
*
* The third function is intended for the MPOA component sitting on
* top of the LANE device. The MPOA component assigns it's own function
* to (*associate_indicator)() and the LANE device will use that
* function to tell about TLVs it sees floating through.
*
*/
struct lane2_ops {
int (*resolve) (struct net_device *dev, const u8 *dst_mac, int force,
u8 **tlvs, u32 *sizeoftlvs);
int (*associate_req) (struct net_device *dev, const u8 *lan_dst,
const u8 *tlvs, u32 sizeoftlvs);
void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
const u8 *tlvs, u32 sizeoftlvs);
};
/*
* ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
* frames.
*
* 1. Dix Ethernet EtherType frames encoded by placing EtherType
* field in h_type field. Data follows immediately after header.
* 2. LLC Data frames whose total length, including LLC field and data,
* but not padding required to meet the minimum data frame length,
* is less than ETH_P_802_3_MIN MUST be encoded by placing that length
* in the h_type field. The LLC field follows header immediately.
* 3. LLC data frames longer than this maximum MUST be encoded by placing
* the value 0 in the h_type field.
*
*/
/* Hash table size */
#define LEC_ARP_TABLE_SIZE 16
struct lec_priv {
unsigned short lecid; /* Lecid of this client */
struct hlist_head lec_arp_empty_ones;
/* Used for storing VCC's that don't have a MAC address attached yet */
struct hlist_head lec_arp_tables[LEC_ARP_TABLE_SIZE];
/* Actual LE ARP table */
struct hlist_head lec_no_forward;
/*
* Used for storing VCC's (and forward packets from) which are to
* age out by not using them to forward packets.
* This is because to some LE clients there will be 2 VCCs. Only
* one of them gets used.
*/
struct hlist_head mcast_fwds;
/*
* With LANEv2 it is possible that BUS (or a special multicast server)
* establishes multiple Multicast Forward VCCs to us. This list
* collects all those VCCs. LANEv1 client has only one item in this
* list. These entries are not aged out.
*/
spinlock_t lec_arp_lock;
struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */
struct atm_vcc *lecd;
struct delayed_work lec_arp_work; /* C10 */
unsigned int maximum_unknown_frame_count;
/*
* Within the period of time defined by this variable, the client will send
* no more than C10 frames to BUS for a given unicast destination. (C11)
*/
unsigned long max_unknown_frame_time;
/*
* If no traffic has been sent in this vcc for this period of time,
* vcc will be torn down (C12)
*/
unsigned long vcc_timeout_period;
/*
* An LE Client MUST not retry an LE_ARP_REQUEST for a
* given frame's LAN Destination more than maximum retry count times,
* after the first LEC_ARP_REQUEST (C13)
*/
unsigned short max_retry_count;
/*
* Max time the client will maintain an entry in its arp cache in
* absence of a verification of that relationship (C17)
*/
unsigned long aging_time;
/*
* Max time the client will maintain an entry in cache when
* topology change flag is true (C18)
*/
unsigned long forward_delay_time; /* Topology change flag (C19) */
int topology_change;
/*
* Max time the client expects an LE_ARP_REQUEST/LE_ARP_RESPONSE
* cycle to take (C20)
*/
unsigned long arp_response_time;
/*
* Time limit ot wait to receive an LE_FLUSH_RESPONSE after the
* LE_FLUSH_REQUEST has been sent before taking recover action. (C21)
*/
unsigned long flush_timeout;
/* The time since sending a frame to the bus after which the
* LE Client may assume that the frame has been either discarded or
* delivered to the recipient (C22)
*/
unsigned long path_switching_delay;
u8 *tlvs; /* LANE2: TLVs are new */
u32 sizeoftlvs; /* The size of the tlv array in bytes */
int lane_version; /* LANE2 */
int itfnum; /* e.g. 2 for lec2, 5 for lec5 */
struct lane2_ops *lane2_ops; /* can be NULL for LANE v1 */
int is_proxy; /* bridge between ATM and Ethernet */
};
struct lec_vcc_priv {
void (*old_pop) (struct atm_vcc *vcc, struct sk_buff *skb);
int xoff;
};
#define LEC_VCC_PRIV(vcc) ((struct lec_vcc_priv *)((vcc)->user_back))
#endif /* _LEC_H_ */

96
net/atm/lec_arpc.h Normal file
View file

@ -0,0 +1,96 @@
/*
* Lec arp cache
*
* Marko Kiiskila <mkiiskila@yahoo.com>
*/
#ifndef _LEC_ARP_H_
#define _LEC_ARP_H_
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/if_ether.h>
#include <linux/atmlec.h>
struct lec_arp_table {
struct hlist_node next; /* Linked entry list */
unsigned char atm_addr[ATM_ESA_LEN]; /* Atm address */
unsigned char mac_addr[ETH_ALEN]; /* Mac address */
int is_rdesc; /* Mac address is a route descriptor */
struct atm_vcc *vcc; /* Vcc this entry is attached */
struct atm_vcc *recv_vcc; /* Vcc we receive data from */
void (*old_push) (struct atm_vcc *vcc, struct sk_buff *skb);
/* Push that leads to daemon */
void (*old_recv_push) (struct atm_vcc *vcc, struct sk_buff *skb);
/* Push that leads to daemon */
unsigned long last_used; /* For expiry */
unsigned long timestamp; /* Used for various timestamping things:
* 1. FLUSH started
* (status=ESI_FLUSH_PENDING)
* 2. Counting to
* max_unknown_frame_time
* (status=ESI_ARP_PENDING||
* status=ESI_VC_PENDING)
*/
unsigned char no_tries; /* No of times arp retry has been tried */
unsigned char status; /* Status of this entry */
unsigned short flags; /* Flags for this entry */
unsigned short packets_flooded; /* Data packets flooded */
unsigned long flush_tran_id; /* Transaction id in flush protocol */
struct timer_list timer; /* Arping timer */
struct lec_priv *priv; /* Pointer back */
u8 *tlvs;
u32 sizeoftlvs; /*
* LANE2: Each MAC address can have TLVs
* associated with it. sizeoftlvs tells the
* the length of the tlvs array
*/
struct sk_buff_head tx_wait; /* wait queue for outgoing packets */
atomic_t usage; /* usage count */
};
/*
* LANE2: Template tlv struct for accessing
* the tlvs in the lec_arp_table->tlvs array
*/
struct tlv {
u32 type;
u8 length;
u8 value[255];
};
/* Status fields */
#define ESI_UNKNOWN 0 /*
* Next packet sent to this mac address
* causes ARP-request to be sent
*/
#define ESI_ARP_PENDING 1 /*
* There is no ATM address associated with this
* 48-bit address. The LE-ARP protocol is in
* progress.
*/
#define ESI_VC_PENDING 2 /*
* There is a valid ATM address associated with
* this 48-bit address but there is no VC set
* up to that ATM address. The signaling
* protocol is in process.
*/
#define ESI_FLUSH_PENDING 4 /*
* The LEC has been notified of the FLUSH_START
* status and it is assumed that the flush
* protocol is in process.
*/
#define ESI_FORWARD_DIRECT 5 /*
* Either the Path Switching Delay (C22) has
* elapsed or the LEC has notified the Mapping
* that the flush protocol has completed. In
* either case, it is safe to forward packets
* to this address via the data direct VC.
*/
/* Flag values */
#define LEC_REMOTE_FLAG 0x0001
#define LEC_PERMANENT_FLAG 0x0002
#endif /* _LEC_ARP_H_ */

1534
net/atm/mpc.c Normal file

File diff suppressed because it is too large Load diff

64
net/atm/mpc.h Normal file
View file

@ -0,0 +1,64 @@
#ifndef _MPC_H_
#define _MPC_H_
#include <linux/types.h>
#include <linux/atm.h>
#include <linux/atmmpc.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include "mpoa_caches.h"
/* kernel -> mpc-daemon */
int msg_to_mpoad(struct k_message *msg, struct mpoa_client *mpc);
struct mpoa_client {
struct mpoa_client *next;
struct net_device *dev; /* lec in question */
int dev_num; /* e.g. 2 for lec2 */
struct atm_vcc *mpoad_vcc; /* control channel to mpoad */
uint8_t mps_ctrl_addr[ATM_ESA_LEN]; /* MPS control ATM address */
uint8_t our_ctrl_addr[ATM_ESA_LEN]; /* MPC's control ATM address */
rwlock_t ingress_lock;
struct in_cache_ops *in_ops; /* ingress cache operations */
in_cache_entry *in_cache; /* the ingress cache of this MPC */
rwlock_t egress_lock;
struct eg_cache_ops *eg_ops; /* egress cache operations */
eg_cache_entry *eg_cache; /* the egress cache of this MPC */
uint8_t *mps_macs; /* array of MPS MAC addresses, >=1 */
int number_of_mps_macs; /* number of the above MAC addresses */
struct mpc_parameters parameters; /* parameters for this client */
const struct net_device_ops *old_ops;
struct net_device_ops new_ops;
};
struct atm_mpoa_qos {
struct atm_mpoa_qos *next;
__be32 ipaddr;
struct atm_qos qos;
};
/* MPOA QoS operations */
struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos);
struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip);
int atm_mpoa_delete_qos(struct atm_mpoa_qos *qos);
/* Display QoS entries. This is for the procfs */
struct seq_file;
void atm_mpoa_disp_qos(struct seq_file *m);
#ifdef CONFIG_PROC_FS
int mpc_proc_init(void);
void mpc_proc_clean(void);
#else
#define mpc_proc_init() (0)
#define mpc_proc_clean() do { } while(0)
#endif
#endif /* _MPC_H_ */

569
net/atm/mpoa_caches.c Normal file
View file

@ -0,0 +1,569 @@
#include <linux/types.h>
#include <linux/atmmpc.h>
#include <linux/slab.h>
#include <linux/time.h>
#include "mpoa_caches.h"
#include "mpc.h"
/*
* mpoa_caches.c: Implementation of ingress and egress cache
* handling functions
*/
#if 0
#define dprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
#define dprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
} while (0)
#endif
#if 0
#define ddprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
#define ddprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
} while (0)
#endif
static in_cache_entry *in_cache_get(__be32 dst_ip,
struct mpoa_client *client)
{
in_cache_entry *entry;
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
while (entry != NULL) {
if (entry->ctrl_info.in_dst_ip == dst_ip) {
atomic_inc(&entry->use);
read_unlock_bh(&client->ingress_lock);
return entry;
}
entry = entry->next;
}
read_unlock_bh(&client->ingress_lock);
return NULL;
}
static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
struct mpoa_client *client,
__be32 mask)
{
in_cache_entry *entry;
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
while (entry != NULL) {
if ((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask)) {
atomic_inc(&entry->use);
read_unlock_bh(&client->ingress_lock);
return entry;
}
entry = entry->next;
}
read_unlock_bh(&client->ingress_lock);
return NULL;
}
static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc,
struct mpoa_client *client)
{
in_cache_entry *entry;
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
while (entry != NULL) {
if (entry->shortcut == vcc) {
atomic_inc(&entry->use);
read_unlock_bh(&client->ingress_lock);
return entry;
}
entry = entry->next;
}
read_unlock_bh(&client->ingress_lock);
return NULL;
}
static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
struct mpoa_client *client)
{
in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL);
if (entry == NULL) {
pr_info("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n");
return NULL;
}
dprintk("adding an ingress entry, ip = %pI4\n", &dst_ip);
atomic_set(&entry->use, 1);
dprintk("new_in_cache_entry: about to lock\n");
write_lock_bh(&client->ingress_lock);
entry->next = client->in_cache;
entry->prev = NULL;
if (client->in_cache != NULL)
client->in_cache->prev = entry;
client->in_cache = entry;
memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN);
entry->ctrl_info.in_dst_ip = dst_ip;
do_gettimeofday(&(entry->tv));
entry->retry_time = client->parameters.mpc_p4;
entry->count = 1;
entry->entry_state = INGRESS_INVALID;
entry->ctrl_info.holding_time = HOLDING_TIME_DEFAULT;
atomic_inc(&entry->use);
write_unlock_bh(&client->ingress_lock);
dprintk("new_in_cache_entry: unlocked\n");
return entry;
}
static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
{
struct atm_mpoa_qos *qos;
struct k_message msg;
entry->count++;
if (entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL)
return OPEN;
if (entry->entry_state == INGRESS_REFRESHING) {
if (entry->count > mpc->parameters.mpc_p1) {
msg.type = SND_MPOA_RES_RQST;
msg.content.in_info = entry->ctrl_info;
memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
if (qos != NULL)
msg.qos = qos->qos;
msg_to_mpoad(&msg, mpc);
do_gettimeofday(&(entry->reply_wait));
entry->entry_state = INGRESS_RESOLVING;
}
if (entry->shortcut != NULL)
return OPEN;
return CLOSED;
}
if (entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL)
return OPEN;
if (entry->count > mpc->parameters.mpc_p1 &&
entry->entry_state == INGRESS_INVALID) {
dprintk("(%s) threshold exceeded for ip %pI4, sending MPOA res req\n",
mpc->dev->name, &entry->ctrl_info.in_dst_ip);
entry->entry_state = INGRESS_RESOLVING;
msg.type = SND_MPOA_RES_RQST;
memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
msg.content.in_info = entry->ctrl_info;
qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
if (qos != NULL)
msg.qos = qos->qos;
msg_to_mpoad(&msg, mpc);
do_gettimeofday(&(entry->reply_wait));
}
return CLOSED;
}
static void in_cache_put(in_cache_entry *entry)
{
if (atomic_dec_and_test(&entry->use)) {
memset(entry, 0, sizeof(in_cache_entry));
kfree(entry);
}
}
/*
* This should be called with write lock on
*/
static void in_cache_remove_entry(in_cache_entry *entry,
struct mpoa_client *client)
{
struct atm_vcc *vcc;
struct k_message msg;
vcc = entry->shortcut;
dprintk("removing an ingress entry, ip = %pI4\n",
&entry->ctrl_info.in_dst_ip);
if (entry->prev != NULL)
entry->prev->next = entry->next;
else
client->in_cache = entry->next;
if (entry->next != NULL)
entry->next->prev = entry->prev;
client->in_ops->put(entry);
if (client->in_cache == NULL && client->eg_cache == NULL) {
msg.type = STOP_KEEP_ALIVE_SM;
msg_to_mpoad(&msg, client);
}
/* Check if the egress side still uses this VCC */
if (vcc != NULL) {
eg_cache_entry *eg_entry = client->eg_ops->get_by_vcc(vcc,
client);
if (eg_entry != NULL) {
client->eg_ops->put(eg_entry);
return;
}
vcc_release_async(vcc, -EPIPE);
}
}
/* Call this every MPC-p2 seconds... Not exactly correct solution,
but an easy one... */
static void clear_count_and_expired(struct mpoa_client *client)
{
in_cache_entry *entry, *next_entry;
struct timeval now;
do_gettimeofday(&now);
write_lock_bh(&client->ingress_lock);
entry = client->in_cache;
while (entry != NULL) {
entry->count = 0;
next_entry = entry->next;
if ((now.tv_sec - entry->tv.tv_sec)
> entry->ctrl_info.holding_time) {
dprintk("holding time expired, ip = %pI4\n",
&entry->ctrl_info.in_dst_ip);
client->in_ops->remove_entry(entry, client);
}
entry = next_entry;
}
write_unlock_bh(&client->ingress_lock);
}
/* Call this every MPC-p4 seconds. */
static void check_resolving_entries(struct mpoa_client *client)
{
struct atm_mpoa_qos *qos;
in_cache_entry *entry;
struct timeval now;
struct k_message msg;
do_gettimeofday(&now);
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
while (entry != NULL) {
if (entry->entry_state == INGRESS_RESOLVING) {
if ((now.tv_sec - entry->hold_down.tv_sec) <
client->parameters.mpc_p6) {
entry = entry->next; /* Entry in hold down */
continue;
}
if ((now.tv_sec - entry->reply_wait.tv_sec) >
entry->retry_time) {
entry->retry_time = MPC_C1 * (entry->retry_time);
/*
* Retry time maximum exceeded,
* put entry in hold down.
*/
if (entry->retry_time > client->parameters.mpc_p5) {
do_gettimeofday(&(entry->hold_down));
entry->retry_time = client->parameters.mpc_p4;
entry = entry->next;
continue;
}
/* Ask daemon to send a resolution request. */
memset(&(entry->hold_down), 0, sizeof(struct timeval));
msg.type = SND_MPOA_RES_RTRY;
memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN);
msg.content.in_info = entry->ctrl_info;
qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
if (qos != NULL)
msg.qos = qos->qos;
msg_to_mpoad(&msg, client);
do_gettimeofday(&(entry->reply_wait));
}
}
entry = entry->next;
}
read_unlock_bh(&client->ingress_lock);
}
/* Call this every MPC-p5 seconds. */
static void refresh_entries(struct mpoa_client *client)
{
struct timeval now;
struct in_cache_entry *entry = client->in_cache;
ddprintk("refresh_entries\n");
do_gettimeofday(&now);
read_lock_bh(&client->ingress_lock);
while (entry != NULL) {
if (entry->entry_state == INGRESS_RESOLVED) {
if (!(entry->refresh_time))
entry->refresh_time = (2 * (entry->ctrl_info.holding_time))/3;
if ((now.tv_sec - entry->reply_wait.tv_sec) >
entry->refresh_time) {
dprintk("refreshing an entry.\n");
entry->entry_state = INGRESS_REFRESHING;
}
}
entry = entry->next;
}
read_unlock_bh(&client->ingress_lock);
}
static void in_destroy_cache(struct mpoa_client *mpc)
{
write_lock_irq(&mpc->ingress_lock);
while (mpc->in_cache != NULL)
mpc->in_ops->remove_entry(mpc->in_cache, mpc);
write_unlock_irq(&mpc->ingress_lock);
}
static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id,
struct mpoa_client *mpc)
{
eg_cache_entry *entry;
read_lock_irq(&mpc->egress_lock);
entry = mpc->eg_cache;
while (entry != NULL) {
if (entry->ctrl_info.cache_id == cache_id) {
atomic_inc(&entry->use);
read_unlock_irq(&mpc->egress_lock);
return entry;
}
entry = entry->next;
}
read_unlock_irq(&mpc->egress_lock);
return NULL;
}
/* This can be called from any context since it saves CPU flags */
static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
{
unsigned long flags;
eg_cache_entry *entry;
read_lock_irqsave(&mpc->egress_lock, flags);
entry = mpc->eg_cache;
while (entry != NULL) {
if (entry->ctrl_info.tag == tag) {
atomic_inc(&entry->use);
read_unlock_irqrestore(&mpc->egress_lock, flags);
return entry;
}
entry = entry->next;
}
read_unlock_irqrestore(&mpc->egress_lock, flags);
return NULL;
}
/* This can be called from any context since it saves CPU flags */
static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc,
struct mpoa_client *mpc)
{
unsigned long flags;
eg_cache_entry *entry;
read_lock_irqsave(&mpc->egress_lock, flags);
entry = mpc->eg_cache;
while (entry != NULL) {
if (entry->shortcut == vcc) {
atomic_inc(&entry->use);
read_unlock_irqrestore(&mpc->egress_lock, flags);
return entry;
}
entry = entry->next;
}
read_unlock_irqrestore(&mpc->egress_lock, flags);
return NULL;
}
static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr,
struct mpoa_client *mpc)
{
eg_cache_entry *entry;
read_lock_irq(&mpc->egress_lock);
entry = mpc->eg_cache;
while (entry != NULL) {
if (entry->latest_ip_addr == ipaddr) {
atomic_inc(&entry->use);
read_unlock_irq(&mpc->egress_lock);
return entry;
}
entry = entry->next;
}
read_unlock_irq(&mpc->egress_lock);
return NULL;
}
static void eg_cache_put(eg_cache_entry *entry)
{
if (atomic_dec_and_test(&entry->use)) {
memset(entry, 0, sizeof(eg_cache_entry));
kfree(entry);
}
}
/*
* This should be called with write lock on
*/
static void eg_cache_remove_entry(eg_cache_entry *entry,
struct mpoa_client *client)
{
struct atm_vcc *vcc;
struct k_message msg;
vcc = entry->shortcut;
dprintk("removing an egress entry.\n");
if (entry->prev != NULL)
entry->prev->next = entry->next;
else
client->eg_cache = entry->next;
if (entry->next != NULL)
entry->next->prev = entry->prev;
client->eg_ops->put(entry);
if (client->in_cache == NULL && client->eg_cache == NULL) {
msg.type = STOP_KEEP_ALIVE_SM;
msg_to_mpoad(&msg, client);
}
/* Check if the ingress side still uses this VCC */
if (vcc != NULL) {
in_cache_entry *in_entry = client->in_ops->get_by_vcc(vcc, client);
if (in_entry != NULL) {
client->in_ops->put(in_entry);
return;
}
vcc_release_async(vcc, -EPIPE);
}
}
static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
struct mpoa_client *client)
{
eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL);
if (entry == NULL) {
pr_info("out of memory\n");
return NULL;
}
dprintk("adding an egress entry, ip = %pI4, this should be our IP\n",
&msg->content.eg_info.eg_dst_ip);
atomic_set(&entry->use, 1);
dprintk("new_eg_cache_entry: about to lock\n");
write_lock_irq(&client->egress_lock);
entry->next = client->eg_cache;
entry->prev = NULL;
if (client->eg_cache != NULL)
client->eg_cache->prev = entry;
client->eg_cache = entry;
memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN);
entry->ctrl_info = msg->content.eg_info;
do_gettimeofday(&(entry->tv));
entry->entry_state = EGRESS_RESOLVED;
dprintk("new_eg_cache_entry cache_id %u\n",
ntohl(entry->ctrl_info.cache_id));
dprintk("mps_ip = %pI4\n", &entry->ctrl_info.mps_ip);
atomic_inc(&entry->use);
write_unlock_irq(&client->egress_lock);
dprintk("new_eg_cache_entry: unlocked\n");
return entry;
}
static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
{
do_gettimeofday(&(entry->tv));
entry->entry_state = EGRESS_RESOLVED;
entry->ctrl_info.holding_time = holding_time;
}
static void clear_expired(struct mpoa_client *client)
{
eg_cache_entry *entry, *next_entry;
struct timeval now;
struct k_message msg;
do_gettimeofday(&now);
write_lock_irq(&client->egress_lock);
entry = client->eg_cache;
while (entry != NULL) {
next_entry = entry->next;
if ((now.tv_sec - entry->tv.tv_sec)
> entry->ctrl_info.holding_time) {
msg.type = SND_EGRESS_PURGE;
msg.content.eg_info = entry->ctrl_info;
dprintk("egress_cache: holding time expired, cache_id = %u.\n",
ntohl(entry->ctrl_info.cache_id));
msg_to_mpoad(&msg, client);
client->eg_ops->remove_entry(entry, client);
}
entry = next_entry;
}
write_unlock_irq(&client->egress_lock);
}
static void eg_destroy_cache(struct mpoa_client *mpc)
{
write_lock_irq(&mpc->egress_lock);
while (mpc->eg_cache != NULL)
mpc->eg_ops->remove_entry(mpc->eg_cache, mpc);
write_unlock_irq(&mpc->egress_lock);
}
static struct in_cache_ops ingress_ops = {
in_cache_add_entry, /* add_entry */
in_cache_get, /* get */
in_cache_get_with_mask, /* get_with_mask */
in_cache_get_by_vcc, /* get_by_vcc */
in_cache_put, /* put */
in_cache_remove_entry, /* remove_entry */
cache_hit, /* cache_hit */
clear_count_and_expired, /* clear_count */
check_resolving_entries, /* check_resolving */
refresh_entries, /* refresh */
in_destroy_cache /* destroy_cache */
};
static struct eg_cache_ops egress_ops = {
eg_cache_add_entry, /* add_entry */
eg_cache_get_by_cache_id, /* get_by_cache_id */
eg_cache_get_by_tag, /* get_by_tag */
eg_cache_get_by_vcc, /* get_by_vcc */
eg_cache_get_by_src_ip, /* get_by_src_ip */
eg_cache_put, /* put */
eg_cache_remove_entry, /* remove_entry */
update_eg_cache_entry, /* update */
clear_expired, /* clear_expired */
eg_destroy_cache /* destroy_cache */
};
void atm_mpoa_init_cache(struct mpoa_client *mpc)
{
mpc->in_ops = &ingress_ops;
mpc->eg_ops = &egress_ops;
}

96
net/atm/mpoa_caches.h Normal file
View file

@ -0,0 +1,96 @@
#ifndef MPOA_CACHES_H
#define MPOA_CACHES_H
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/atmmpc.h>
struct mpoa_client;
void atm_mpoa_init_cache(struct mpoa_client *mpc);
typedef struct in_cache_entry {
struct in_cache_entry *next;
struct in_cache_entry *prev;
struct timeval tv;
struct timeval reply_wait;
struct timeval hold_down;
uint32_t packets_fwded;
uint16_t entry_state;
uint32_t retry_time;
uint32_t refresh_time;
uint32_t count;
struct atm_vcc *shortcut;
uint8_t MPS_ctrl_ATM_addr[ATM_ESA_LEN];
struct in_ctrl_info ctrl_info;
atomic_t use;
} in_cache_entry;
struct in_cache_ops{
in_cache_entry *(*add_entry)(__be32 dst_ip,
struct mpoa_client *client);
in_cache_entry *(*get)(__be32 dst_ip, struct mpoa_client *client);
in_cache_entry *(*get_with_mask)(__be32 dst_ip,
struct mpoa_client *client,
__be32 mask);
in_cache_entry *(*get_by_vcc)(struct atm_vcc *vcc,
struct mpoa_client *client);
void (*put)(in_cache_entry *entry);
void (*remove_entry)(in_cache_entry *delEntry,
struct mpoa_client *client );
int (*cache_hit)(in_cache_entry *entry,
struct mpoa_client *client);
void (*clear_count)(struct mpoa_client *client);
void (*check_resolving)(struct mpoa_client *client);
void (*refresh)(struct mpoa_client *client);
void (*destroy_cache)(struct mpoa_client *mpc);
};
typedef struct eg_cache_entry{
struct eg_cache_entry *next;
struct eg_cache_entry *prev;
struct timeval tv;
uint8_t MPS_ctrl_ATM_addr[ATM_ESA_LEN];
struct atm_vcc *shortcut;
uint32_t packets_rcvd;
uint16_t entry_state;
__be32 latest_ip_addr; /* The src IP address of the last packet */
struct eg_ctrl_info ctrl_info;
atomic_t use;
} eg_cache_entry;
struct eg_cache_ops{
eg_cache_entry *(*add_entry)(struct k_message *msg, struct mpoa_client *client);
eg_cache_entry *(*get_by_cache_id)(__be32 cache_id, struct mpoa_client *client);
eg_cache_entry *(*get_by_tag)(__be32 cache_id, struct mpoa_client *client);
eg_cache_entry *(*get_by_vcc)(struct atm_vcc *vcc, struct mpoa_client *client);
eg_cache_entry *(*get_by_src_ip)(__be32 ipaddr, struct mpoa_client *client);
void (*put)(eg_cache_entry *entry);
void (*remove_entry)(eg_cache_entry *entry, struct mpoa_client *client);
void (*update)(eg_cache_entry *entry, uint16_t holding_time);
void (*clear_expired)(struct mpoa_client *client);
void (*destroy_cache)(struct mpoa_client *mpc);
};
/* Ingress cache entry states */
#define INGRESS_REFRESHING 3
#define INGRESS_RESOLVED 2
#define INGRESS_RESOLVING 1
#define INGRESS_INVALID 0
/* VCC states */
#define OPEN 1
#define CLOSED 0
/* Egress cache entry states */
#define EGRESS_RESOLVED 2
#define EGRESS_PURGE 1
#define EGRESS_INVALID 0
#endif

312
net/atm/mpoa_proc.c Normal file
View file

@ -0,0 +1,312 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#ifdef CONFIG_PROC_FS
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/time.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/atmmpc.h>
#include <linux/atm.h>
#include <linux/gfp.h>
#include "mpc.h"
#include "mpoa_caches.h"
/*
* mpoa_proc.c: Implementation MPOA client's proc
* file system statistics
*/
#if 1
#define dprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
#define dprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
} while (0)
#endif
#if 0
#define ddprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
#define ddprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
} while (0)
#endif
#define STAT_FILE_NAME "mpc" /* Our statistic file's name */
extern struct mpoa_client *mpcs;
extern struct proc_dir_entry *atm_proc_root; /* from proc.c. */
static int proc_mpc_open(struct inode *inode, struct file *file);
static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos);
static int parse_qos(const char *buff);
/*
* Define allowed FILE OPERATIONS
*/
static const struct file_operations mpc_file_operations = {
.owner = THIS_MODULE,
.open = proc_mpc_open,
.read = seq_read,
.llseek = seq_lseek,
.write = proc_mpc_write,
.release = seq_release,
};
/*
* Returns the state of an ingress cache entry as a string
*/
static const char *ingress_state_string(int state)
{
switch (state) {
case INGRESS_RESOLVING:
return "resolving ";
case INGRESS_RESOLVED:
return "resolved ";
case INGRESS_INVALID:
return "invalid ";
case INGRESS_REFRESHING:
return "refreshing ";
}
return "";
}
/*
* Returns the state of an egress cache entry as a string
*/
static const char *egress_state_string(int state)
{
switch (state) {
case EGRESS_RESOLVED:
return "resolved ";
case EGRESS_PURGE:
return "purge ";
case EGRESS_INVALID:
return "invalid ";
}
return "";
}
/*
* FIXME: mpcs (and per-mpc lists) have no locking whatsoever.
*/
static void *mpc_start(struct seq_file *m, loff_t *pos)
{
loff_t l = *pos;
struct mpoa_client *mpc;
if (!l--)
return SEQ_START_TOKEN;
for (mpc = mpcs; mpc; mpc = mpc->next)
if (!l--)
return mpc;
return NULL;
}
static void *mpc_next(struct seq_file *m, void *v, loff_t *pos)
{
struct mpoa_client *p = v;
(*pos)++;
return v == SEQ_START_TOKEN ? mpcs : p->next;
}
static void mpc_stop(struct seq_file *m, void *v)
{
}
/*
* READING function - called when the /proc/atm/mpoa file is read from.
*/
static int mpc_show(struct seq_file *m, void *v)
{
struct mpoa_client *mpc = v;
int i;
in_cache_entry *in_entry;
eg_cache_entry *eg_entry;
struct timeval now;
unsigned char ip_string[16];
if (v == SEQ_START_TOKEN) {
atm_mpoa_disp_qos(m);
return 0;
}
seq_printf(m, "\nInterface %d:\n\n", mpc->dev_num);
seq_printf(m, "Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n");
do_gettimeofday(&now);
for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
sprintf(ip_string, "%pI4", &in_entry->ctrl_info.in_dst_ip);
seq_printf(m, "%-16s%s%-14lu%-12u",
ip_string,
ingress_state_string(in_entry->entry_state),
in_entry->ctrl_info.holding_time -
(now.tv_sec-in_entry->tv.tv_sec),
in_entry->packets_fwded);
if (in_entry->shortcut)
seq_printf(m, " %-3d %-3d",
in_entry->shortcut->vpi,
in_entry->shortcut->vci);
seq_printf(m, "\n");
}
seq_printf(m, "\n");
seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
for (i = 0; i < ATM_ESA_LEN; i++)
seq_printf(m, "%02x", p[i]);
seq_printf(m, "\n%-16lu%s%-14lu%-15u",
(unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
egress_state_string(eg_entry->entry_state),
(eg_entry->ctrl_info.holding_time -
(now.tv_sec-eg_entry->tv.tv_sec)),
eg_entry->packets_rcvd);
/* latest IP address */
sprintf(ip_string, "%pI4", &eg_entry->latest_ip_addr);
seq_printf(m, "%-16s", ip_string);
if (eg_entry->shortcut)
seq_printf(m, " %-3d %-3d",
eg_entry->shortcut->vpi,
eg_entry->shortcut->vci);
seq_printf(m, "\n");
}
seq_printf(m, "\n");
return 0;
}
static const struct seq_operations mpc_op = {
.start = mpc_start,
.next = mpc_next,
.stop = mpc_stop,
.show = mpc_show
};
static int proc_mpc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &mpc_op);
}
static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos)
{
char *page, *p;
unsigned int len;
if (nbytes == 0)
return 0;
if (nbytes >= PAGE_SIZE)
nbytes = PAGE_SIZE-1;
page = (char *)__get_free_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
for (p = page, len = 0; len < nbytes; p++, len++) {
if (get_user(*p, buff++)) {
free_page((unsigned long)page);
return -EFAULT;
}
if (*p == '\0' || *p == '\n')
break;
}
*p = '\0';
if (!parse_qos(page))
printk("mpoa: proc_mpc_write: could not parse '%s'\n", page);
free_page((unsigned long)page);
return len;
}
static int parse_qos(const char *buff)
{
/* possible lines look like this
* add 130.230.54.142 tx=max_pcr,max_sdu rx=max_pcr,max_sdu
*/
unsigned char ip[4];
int tx_pcr, tx_sdu, rx_pcr, rx_sdu;
__be32 ipaddr;
struct atm_qos qos;
memset(&qos, 0, sizeof(struct atm_qos));
if (sscanf(buff, "del %hhu.%hhu.%hhu.%hhu",
ip, ip+1, ip+2, ip+3) == 4) {
ipaddr = *(__be32 *)ip;
return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr));
}
if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=tx",
ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu) == 6) {
rx_pcr = tx_pcr;
rx_sdu = tx_sdu;
} else if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=%d,%d",
ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8)
return 0;
ipaddr = *(__be32 *)ip;
qos.txtp.traffic_class = ATM_CBR;
qos.txtp.max_pcr = tx_pcr;
qos.txtp.max_sdu = tx_sdu;
qos.rxtp.traffic_class = ATM_CBR;
qos.rxtp.max_pcr = rx_pcr;
qos.rxtp.max_sdu = rx_sdu;
qos.aal = ATM_AAL5;
dprintk("parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
qos.txtp.max_pcr, qos.txtp.max_sdu,
qos.rxtp.max_pcr, qos.rxtp.max_sdu);
atm_mpoa_add_qos(ipaddr, &qos);
return 1;
}
/*
* INITIALIZATION function - called when module is initialized/loaded.
*/
int mpc_proc_init(void)
{
struct proc_dir_entry *p;
p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations);
if (!p) {
pr_err("Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
return -ENOMEM;
}
return 0;
}
/*
* DELETING function - called when module is removed.
*/
void mpc_proc_clean(void)
{
remove_proc_entry(STAT_FILE_NAME, atm_proc_root);
}
#endif /* CONFIG_PROC_FS */

500
net/atm/pppoatm.c Normal file
View file

@ -0,0 +1,500 @@
/* net/atm/pppoatm.c - RFC2364 PPP over ATM/AAL5 */
/* Copyright 1999-2000 by Mitchell Blank Jr */
/* Based on clip.c; 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
/* And on ppp_async.c; Copyright 1999 Paul Mackerras */
/* And help from Jens Axboe */
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* This driver provides the encapsulation and framing for sending
* and receiving PPP frames in ATM AAL5 PDUs.
*/
/*
* One shortcoming of this driver is that it does not comply with
* section 8 of RFC2364 - we are supposed to detect a change
* in encapsulation and immediately abort the connection (in order
* to avoid a black-hole being created if our peer loses state
* and changes encapsulation unilaterally. However, since the
* ppp_generic layer actually does the decapsulation, we need
* a way of notifying it when we _think_ there might be a problem)
* There's two cases:
* 1. LLC-encapsulation was missing when it was enabled. In
* this case, we should tell the upper layer "tear down
* this session if this skb looks ok to you"
* 2. LLC-encapsulation was present when it was disabled. Then
* we need to tell the upper layer "this packet may be
* ok, but if its in error tear down the session"
* These hooks are not yet available in ppp_generic
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/capability.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/ppp_channel.h>
#include <linux/atmppp.h>
#include "common.h"
enum pppoatm_encaps {
e_autodetect = PPPOATM_ENCAPS_AUTODETECT,
e_vc = PPPOATM_ENCAPS_VC,
e_llc = PPPOATM_ENCAPS_LLC,
};
struct pppoatm_vcc {
struct atm_vcc *atmvcc; /* VCC descriptor */
void (*old_push)(struct atm_vcc *, struct sk_buff *);
void (*old_pop)(struct atm_vcc *, struct sk_buff *);
void (*old_release_cb)(struct atm_vcc *);
struct module *old_owner;
/* keep old push/pop for detaching */
enum pppoatm_encaps encaps;
atomic_t inflight;
unsigned long blocked;
int flags; /* SC_COMP_PROT - compress protocol */
struct ppp_channel chan; /* interface to generic ppp layer */
struct tasklet_struct wakeup_tasklet;
};
/*
* We want to allow two packets in the queue. The one that's currently in
* flight, and *one* queued up ready for the ATM device to send immediately
* from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so
* inflight == -2 represents an empty queue, -1 one packet, and zero means
* there are two packets in the queue.
*/
#define NONE_INFLIGHT -2
#define BLOCKED 0
/*
* Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
* ID (0xC021) used in autodetection
*/
static const unsigned char pppllc[6] = { 0xFE, 0xFE, 0x03, 0xCF, 0xC0, 0x21 };
#define LLC_LEN (4)
static inline struct pppoatm_vcc *atmvcc_to_pvcc(const struct atm_vcc *atmvcc)
{
return (struct pppoatm_vcc *) (atmvcc->user_back);
}
static inline struct pppoatm_vcc *chan_to_pvcc(const struct ppp_channel *chan)
{
return (struct pppoatm_vcc *) (chan->private);
}
/*
* We can't do this directly from our _pop handler, since the ppp code
* doesn't want to be called in interrupt context, so we do it from
* a tasklet
*/
static void pppoatm_wakeup_sender(unsigned long arg)
{
ppp_output_wakeup((struct ppp_channel *) arg);
}
static void pppoatm_release_cb(struct atm_vcc *atmvcc)
{
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
/*
* As in pppoatm_pop(), it's safe to clear the BLOCKED bit here because
* the wakeup *can't* race with pppoatm_send(). They both hold the PPP
* channel's ->downl lock. And the potential race with *setting* it,
* which leads to the double-check dance in pppoatm_may_send(), doesn't
* exist here. In the sock_owned_by_user() case in pppoatm_send(), we
* set the BLOCKED bit while the socket is still locked. We know that
* ->release_cb() can't be called until that's done.
*/
if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
tasklet_schedule(&pvcc->wakeup_tasklet);
if (pvcc->old_release_cb)
pvcc->old_release_cb(atmvcc);
}
/*
* This gets called every time the ATM card has finished sending our
* skb. The ->old_pop will take care up normal atm flow control,
* but we also need to wake up the device if we blocked it
*/
static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
pvcc->old_pop(atmvcc, skb);
atomic_dec(&pvcc->inflight);
/*
* We always used to run the wakeup tasklet unconditionally here, for
* fear of race conditions where we clear the BLOCKED flag just as we
* refuse another packet in pppoatm_send(). This was quite inefficient.
*
* In fact it's OK. The PPP core will only ever call pppoatm_send()
* while holding the channel->downl lock. And ppp_output_wakeup() as
* called by the tasklet will *also* grab that lock. So even if another
* CPU is in pppoatm_send() right now, the tasklet isn't going to race
* with it. The wakeup *will* happen after the other CPU is safely out
* of pppoatm_send() again.
*
* So if the CPU in pppoatm_send() has already set the BLOCKED bit and
* it about to return, that's fine. We trigger a wakeup which will
* happen later. And if the CPU in pppoatm_send() *hasn't* set the
* BLOCKED bit yet, that's fine too because of the double check in
* pppoatm_may_send() which is commented there.
*/
if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
tasklet_schedule(&pvcc->wakeup_tasklet);
}
/*
* Unbind from PPP - currently we only do this when closing the socket,
* but we could put this into an ioctl if need be
*/
static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc)
{
struct pppoatm_vcc *pvcc;
pvcc = atmvcc_to_pvcc(atmvcc);
atmvcc->push = pvcc->old_push;
atmvcc->pop = pvcc->old_pop;
atmvcc->release_cb = pvcc->old_release_cb;
tasklet_kill(&pvcc->wakeup_tasklet);
ppp_unregister_channel(&pvcc->chan);
atmvcc->user_back = NULL;
kfree(pvcc);
}
/* Called when an AAL5 PDU comes in */
static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
pr_debug("\n");
if (skb == NULL) { /* VCC was closed */
struct module *module;
pr_debug("removing ATMPPP VCC %p\n", pvcc);
module = pvcc->old_owner;
pppoatm_unassign_vcc(atmvcc);
atmvcc->push(atmvcc, NULL); /* Pass along bad news */
module_put(module);
return;
}
atm_return(atmvcc, skb->truesize);
switch (pvcc->encaps) {
case e_llc:
if (skb->len < LLC_LEN ||
memcmp(skb->data, pppllc, LLC_LEN))
goto error;
skb_pull(skb, LLC_LEN);
break;
case e_autodetect:
if (pvcc->chan.ppp == NULL) { /* Not bound yet! */
kfree_skb(skb);
return;
}
if (skb->len >= sizeof(pppllc) &&
!memcmp(skb->data, pppllc, sizeof(pppllc))) {
pvcc->encaps = e_llc;
skb_pull(skb, LLC_LEN);
break;
}
if (skb->len >= (sizeof(pppllc) - LLC_LEN) &&
!memcmp(skb->data, &pppllc[LLC_LEN],
sizeof(pppllc) - LLC_LEN)) {
pvcc->encaps = e_vc;
pvcc->chan.mtu += LLC_LEN;
break;
}
pr_debug("Couldn't autodetect yet (skb: %02X %02X %02X %02X %02X %02X)\n",
skb->data[0], skb->data[1], skb->data[2],
skb->data[3], skb->data[4], skb->data[5]);
goto error;
case e_vc:
break;
}
ppp_input(&pvcc->chan, skb);
return;
error:
kfree_skb(skb);
ppp_input_error(&pvcc->chan, 0);
}
static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
{
/*
* It's not clear that we need to bother with using atm_may_send()
* to check we don't exceed sk->sk_sndbuf. If userspace sets a
* value of sk_sndbuf which is lower than the MTU, we're going to
* block for ever. But the code always did that before we introduced
* the packet count limit, so...
*/
if (atm_may_send(pvcc->atmvcc, size) &&
atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
return 1;
/*
* We use test_and_set_bit() rather than set_bit() here because
* we need to ensure there's a memory barrier after it. The bit
* *must* be set before we do the atomic_inc() on pvcc->inflight.
* There's no smp_mb__after_set_bit(), so it's this or abuse
* smp_mb__after_atomic().
*/
test_and_set_bit(BLOCKED, &pvcc->blocked);
/*
* We may have raced with pppoatm_pop(). If it ran for the
* last packet in the queue, *just* before we set the BLOCKED
* bit, then it might never run again and the channel could
* remain permanently blocked. Cope with that race by checking
* *again*. If it did run in that window, we'll have space on
* the queue now and can return success. It's harmless to leave
* the BLOCKED flag set, since it's only used as a trigger to
* run the wakeup tasklet. Another wakeup will never hurt.
* If pppoatm_pop() is running but hasn't got as far as making
* space on the queue yet, then it hasn't checked the BLOCKED
* flag yet either, so we're safe in that case too. It'll issue
* an "immediate" wakeup... where "immediate" actually involves
* taking the PPP channel's ->downl lock, which is held by the
* code path that calls pppoatm_send(), and is thus going to
* wait for us to finish.
*/
if (atm_may_send(pvcc->atmvcc, size) &&
atomic_inc_not_zero(&pvcc->inflight))
return 1;
return 0;
}
/*
* Called by the ppp_generic.c to send a packet - returns true if packet
* was accepted. If we return false, then it's our job to call
* ppp_output_wakeup(chan) when we're feeling more up to it.
* Note that in the ENOMEM case (as opposed to the !atm_may_send case)
* we should really drop the packet, but the generic layer doesn't
* support this yet. We just return 'DROP_PACKET' which we actually define
* as success, just to be clear what we're really doing.
*/
#define DROP_PACKET 1
static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
struct atm_vcc *vcc;
int ret;
ATM_SKB(skb)->vcc = pvcc->atmvcc;
pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
(void) skb_pull(skb, 1);
vcc = ATM_SKB(skb)->vcc;
bh_lock_sock(sk_atm(vcc));
if (sock_owned_by_user(sk_atm(vcc))) {
/*
* Needs to happen (and be flushed, hence test_and_) before we unlock
* the socket. It needs to be seen by the time our ->release_cb gets
* called.
*/
test_and_set_bit(BLOCKED, &pvcc->blocked);
goto nospace;
}
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags)) {
bh_unlock_sock(sk_atm(vcc));
kfree_skb(skb);
return DROP_PACKET;
}
switch (pvcc->encaps) { /* LLC encapsulation needed */
case e_llc:
if (skb_headroom(skb) < LLC_LEN) {
struct sk_buff *n;
n = skb_realloc_headroom(skb, LLC_LEN);
if (n != NULL &&
!pppoatm_may_send(pvcc, n->truesize)) {
kfree_skb(n);
goto nospace;
}
consume_skb(skb);
skb = n;
if (skb == NULL) {
bh_unlock_sock(sk_atm(vcc));
return DROP_PACKET;
}
} else if (!pppoatm_may_send(pvcc, skb->truesize))
goto nospace;
memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
break;
case e_vc:
if (!pppoatm_may_send(pvcc, skb->truesize))
goto nospace;
break;
case e_autodetect:
bh_unlock_sock(sk_atm(vcc));
pr_debug("Trying to send without setting encaps!\n");
kfree_skb(skb);
return 1;
}
atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
? DROP_PACKET : 1;
bh_unlock_sock(sk_atm(vcc));
return ret;
nospace:
bh_unlock_sock(sk_atm(vcc));
/*
* We don't have space to send this SKB now, but we might have
* already applied SC_COMP_PROT compression, so may need to undo
*/
if ((pvcc->flags & SC_COMP_PROT) && skb_headroom(skb) > 0 &&
skb->data[-1] == '\0')
(void) skb_push(skb, 1);
return 0;
}
/* This handles ioctls sent to the /dev/ppp interface */
static int pppoatm_devppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case PPPIOCGFLAGS:
return put_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
? -EFAULT : 0;
case PPPIOCSFLAGS:
return get_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
? -EFAULT : 0;
}
return -ENOTTY;
}
static const struct ppp_channel_ops pppoatm_ops = {
.start_xmit = pppoatm_send,
.ioctl = pppoatm_devppp_ioctl,
};
static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
{
struct atm_backend_ppp be;
struct pppoatm_vcc *pvcc;
int err;
/*
* Each PPPoATM instance has its own tasklet - this is just a
* prototypical one used to initialize them
*/
static const DECLARE_TASKLET(tasklet_proto, pppoatm_wakeup_sender, 0);
if (copy_from_user(&be, arg, sizeof be))
return -EFAULT;
if (be.encaps != PPPOATM_ENCAPS_AUTODETECT &&
be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC)
return -EINVAL;
pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL);
if (pvcc == NULL)
return -ENOMEM;
pvcc->atmvcc = atmvcc;
/* Maximum is zero, so that we can use atomic_inc_not_zero() */
atomic_set(&pvcc->inflight, NONE_INFLIGHT);
pvcc->old_push = atmvcc->push;
pvcc->old_pop = atmvcc->pop;
pvcc->old_owner = atmvcc->owner;
pvcc->old_release_cb = atmvcc->release_cb;
pvcc->encaps = (enum pppoatm_encaps) be.encaps;
pvcc->chan.private = pvcc;
pvcc->chan.ops = &pppoatm_ops;
pvcc->chan.mtu = atmvcc->qos.txtp.max_sdu - PPP_HDRLEN -
(be.encaps == e_vc ? 0 : LLC_LEN);
pvcc->wakeup_tasklet = tasklet_proto;
pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan;
err = ppp_register_channel(&pvcc->chan);
if (err != 0) {
kfree(pvcc);
return err;
}
atmvcc->user_back = pvcc;
atmvcc->push = pppoatm_push;
atmvcc->pop = pppoatm_pop;
atmvcc->release_cb = pppoatm_release_cb;
__module_get(THIS_MODULE);
atmvcc->owner = THIS_MODULE;
/* re-process everything received between connection setup and
backend setup */
vcc_process_recv_queue(atmvcc);
return 0;
}
/*
* This handles ioctls actually performed on our vcc - we must return
* -ENOIOCTLCMD for any unrecognized ioctl
*/
static int pppoatm_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct atm_vcc *atmvcc = ATM_SD(sock);
void __user *argp = (void __user *)arg;
if (cmd != ATM_SETBACKEND && atmvcc->push != pppoatm_push)
return -ENOIOCTLCMD;
switch (cmd) {
case ATM_SETBACKEND: {
atm_backend_t b;
if (get_user(b, (atm_backend_t __user *) argp))
return -EFAULT;
if (b != ATM_BACKEND_PPP)
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (sock->state != SS_CONNECTED)
return -EINVAL;
return pppoatm_assign_vcc(atmvcc, argp);
}
case PPPIOCGCHAN:
return put_user(ppp_channel_index(&atmvcc_to_pvcc(atmvcc)->
chan), (int __user *) argp) ? -EFAULT : 0;
case PPPIOCGUNIT:
return put_user(ppp_unit_number(&atmvcc_to_pvcc(atmvcc)->
chan), (int __user *) argp) ? -EFAULT : 0;
}
return -ENOIOCTLCMD;
}
static struct atm_ioctl pppoatm_ioctl_ops = {
.owner = THIS_MODULE,
.ioctl = pppoatm_ioctl,
};
static int __init pppoatm_init(void)
{
register_atm_ioctl(&pppoatm_ioctl_ops);
return 0;
}
static void __exit pppoatm_exit(void)
{
deregister_atm_ioctl(&pppoatm_ioctl_ops);
}
module_init(pppoatm_init);
module_exit(pppoatm_exit);
MODULE_AUTHOR("Mitchell Blank Jr <mitch@sfgoth.com>");
MODULE_DESCRIPTION("RFC2364 PPP over ATM/AAL5");
MODULE_LICENSE("GPL");

497
net/atm/proc.c Normal file
View file

@ -0,0 +1,497 @@
/* net/atm/proc.c - ATM /proc interface
*
* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA
*
* seq_file api usage by romieu@fr.zoreil.com
*
* Evaluating the efficiency of the whole thing if left as an exercise to
* the reader.
*/
#include <linux/module.h> /* for EXPORT_SYMBOL */
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/stat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/errno.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/netdevice.h>
#include <linux/atmclip.h>
#include <linux/init.h> /* for __init */
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/atmclip.h>
#include <linux/uaccess.h>
#include <linux/param.h> /* for HZ */
#include <linux/atomic.h>
#include "resources.h"
#include "common.h" /* atm_proc_init prototype */
#include "signaling.h" /* to get sigd - ugly too */
static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
size_t count, loff_t *pos);
static const struct file_operations proc_atm_dev_ops = {
.owner = THIS_MODULE,
.read = proc_dev_atm_read,
.llseek = noop_llseek,
};
static void add_stats(struct seq_file *seq, const char *aal,
const struct k_atm_aal_stats *stats)
{
seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
atomic_read(&stats->tx), atomic_read(&stats->tx_err),
atomic_read(&stats->rx), atomic_read(&stats->rx_err),
atomic_read(&stats->rx_drop));
}
static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
{
int i;
seq_printf(seq, "%3d %-8s", dev->number, dev->type);
for (i = 0; i < ESI_LEN; i++)
seq_printf(seq, "%02x", dev->esi[i]);
seq_puts(seq, " ");
add_stats(seq, "0", &dev->stats.aal0);
seq_puts(seq, " ");
add_stats(seq, "5", &dev->stats.aal5);
seq_printf(seq, "\t[%d]", atomic_read(&dev->refcnt));
seq_putc(seq, '\n');
}
struct vcc_state {
int bucket;
struct sock *sk;
int family;
};
static inline int compare_family(struct sock *sk, int family)
{
return !family || (sk->sk_family == family);
}
static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l)
{
struct sock *sk = *sock;
if (sk == SEQ_START_TOKEN) {
for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) {
struct hlist_head *head = &vcc_hash[*bucket];
sk = hlist_empty(head) ? NULL : __sk_head(head);
if (sk)
break;
}
l--;
}
try_again:
for (; sk; sk = sk_next(sk)) {
l -= compare_family(sk, family);
if (l < 0)
goto out;
}
if (!sk && ++*bucket < VCC_HTABLE_SIZE) {
sk = sk_head(&vcc_hash[*bucket]);
goto try_again;
}
sk = SEQ_START_TOKEN;
out:
*sock = sk;
return (l < 0);
}
static inline void *vcc_walk(struct vcc_state *state, loff_t l)
{
return __vcc_walk(&state->sk, state->family, &state->bucket, l) ?
state : NULL;
}
static int __vcc_seq_open(struct inode *inode, struct file *file,
int family, const struct seq_operations *ops)
{
struct vcc_state *state;
state = __seq_open_private(file, ops, sizeof(*state));
if (state == NULL)
return -ENOMEM;
state->family = family;
return 0;
}
static void *vcc_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(vcc_sklist_lock)
{
struct vcc_state *state = seq->private;
loff_t left = *pos;
read_lock(&vcc_sklist_lock);
state->sk = SEQ_START_TOKEN;
return left ? vcc_walk(state, left) : SEQ_START_TOKEN;
}
static void vcc_seq_stop(struct seq_file *seq, void *v)
__releases(vcc_sklist_lock)
{
read_unlock(&vcc_sklist_lock);
}
static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct vcc_state *state = seq->private;
v = vcc_walk(state, 1);
*pos += !!PTR_ERR(v);
return v;
}
static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
{
static const char *const class_name[] = {
"off", "UBR", "CBR", "VBR", "ABR"};
static const char *const aal_name[] = {
"---", "1", "2", "3/4", /* 0- 3 */
"???", "5", "???", "???", /* 4- 7 */
"???", "???", "???", "???", /* 8-11 */
"???", "0", "???", "???"}; /* 12-15 */
seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s",
vcc->dev->number, vcc->vpi, vcc->vci,
vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" :
aal_name[vcc->qos.aal], vcc->qos.rxtp.min_pcr,
class_name[vcc->qos.rxtp.traffic_class],
vcc->qos.txtp.min_pcr,
class_name[vcc->qos.txtp.traffic_class]);
if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) {
struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
struct net_device *dev;
dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : NULL;
seq_printf(seq, "CLIP, Itf:%s, Encap:",
dev ? dev->name : "none?");
seq_printf(seq, "%s", clip_vcc->encap ? "LLC/SNAP" : "None");
}
seq_putc(seq, '\n');
}
static const char *vcc_state(struct atm_vcc *vcc)
{
static const char *const map[] = { ATM_VS2TXT_MAP };
return map[ATM_VF2VS(vcc->flags)];
}
static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
{
struct sock *sk = sk_atm(vcc);
seq_printf(seq, "%pK ", vcc);
if (!vcc->dev)
seq_printf(seq, "Unassigned ");
else
seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi,
vcc->vci);
switch (sk->sk_family) {
case AF_ATMPVC:
seq_printf(seq, "PVC");
break;
case AF_ATMSVC:
seq_printf(seq, "SVC");
break;
default:
seq_printf(seq, "%3d", sk->sk_family);
}
seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n",
vcc->flags, sk->sk_err,
sk_wmem_alloc_get(sk), sk->sk_sndbuf,
sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
atomic_read(&sk->sk_refcnt));
}
static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
{
if (!vcc->dev)
seq_printf(seq, sizeof(void *) == 4 ?
"N/A@%pK%10s" : "N/A@%pK%2s", vcc, "");
else
seq_printf(seq, "%3d %3d %5d ",
vcc->dev->number, vcc->vpi, vcc->vci);
seq_printf(seq, "%-10s ", vcc_state(vcc));
seq_printf(seq, "%s%s", vcc->remote.sas_addr.pub,
*vcc->remote.sas_addr.pub && *vcc->remote.sas_addr.prv ? "+" : "");
if (*vcc->remote.sas_addr.prv) {
int i;
for (i = 0; i < ATM_ESA_LEN; i++)
seq_printf(seq, "%02x", vcc->remote.sas_addr.prv[i]);
}
seq_putc(seq, '\n');
}
static int atm_dev_seq_show(struct seq_file *seq, void *v)
{
static char atm_dev_banner[] =
"Itf Type ESI/\"MAC\"addr "
"AAL(TX,err,RX,err,drop) ... [refcnt]\n";
if (v == &atm_devs)
seq_puts(seq, atm_dev_banner);
else {
struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list);
atm_dev_info(seq, dev);
}
return 0;
}
static const struct seq_operations atm_dev_seq_ops = {
.start = atm_dev_seq_start,
.next = atm_dev_seq_next,
.stop = atm_dev_seq_stop,
.show = atm_dev_seq_show,
};
static int atm_dev_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &atm_dev_seq_ops);
}
static const struct file_operations devices_seq_fops = {
.open = atm_dev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int pvc_seq_show(struct seq_file *seq, void *v)
{
static char atm_pvc_banner[] =
"Itf VPI VCI AAL RX(PCR,Class) TX(PCR,Class)\n";
if (v == SEQ_START_TOKEN)
seq_puts(seq, atm_pvc_banner);
else {
struct vcc_state *state = seq->private;
struct atm_vcc *vcc = atm_sk(state->sk);
pvc_info(seq, vcc);
}
return 0;
}
static const struct seq_operations pvc_seq_ops = {
.start = vcc_seq_start,
.next = vcc_seq_next,
.stop = vcc_seq_stop,
.show = pvc_seq_show,
};
static int pvc_seq_open(struct inode *inode, struct file *file)
{
return __vcc_seq_open(inode, file, PF_ATMPVC, &pvc_seq_ops);
}
static const struct file_operations pvc_seq_fops = {
.open = pvc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
static int vcc_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_printf(seq, sizeof(void *) == 4 ? "%-8s%s" : "%-16s%s",
"Address ", "Itf VPI VCI Fam Flags Reply "
"Send buffer Recv buffer [refcnt]\n");
} else {
struct vcc_state *state = seq->private;
struct atm_vcc *vcc = atm_sk(state->sk);
vcc_info(seq, vcc);
}
return 0;
}
static const struct seq_operations vcc_seq_ops = {
.start = vcc_seq_start,
.next = vcc_seq_next,
.stop = vcc_seq_stop,
.show = vcc_seq_show,
};
static int vcc_seq_open(struct inode *inode, struct file *file)
{
return __vcc_seq_open(inode, file, 0, &vcc_seq_ops);
}
static const struct file_operations vcc_seq_fops = {
.open = vcc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
static int svc_seq_show(struct seq_file *seq, void *v)
{
static const char atm_svc_banner[] =
"Itf VPI VCI State Remote\n";
if (v == SEQ_START_TOKEN)
seq_puts(seq, atm_svc_banner);
else {
struct vcc_state *state = seq->private;
struct atm_vcc *vcc = atm_sk(state->sk);
svc_info(seq, vcc);
}
return 0;
}
static const struct seq_operations svc_seq_ops = {
.start = vcc_seq_start,
.next = vcc_seq_next,
.stop = vcc_seq_stop,
.show = svc_seq_show,
};
static int svc_seq_open(struct inode *inode, struct file *file)
{
return __vcc_seq_open(inode, file, PF_ATMSVC, &svc_seq_ops);
}
static const struct file_operations svc_seq_fops = {
.open = svc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct atm_dev *dev;
unsigned long page;
int length;
if (count == 0)
return 0;
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
dev = PDE_DATA(file_inode(file));
if (!dev->ops->proc_read)
length = -EINVAL;
else {
length = dev->ops->proc_read(dev, pos, (char *)page);
if (length > count)
length = -EINVAL;
}
if (length >= 0) {
if (copy_to_user(buf, (char *)page, length))
length = -EFAULT;
(*pos)++;
}
free_page(page);
return length;
}
struct proc_dir_entry *atm_proc_root;
EXPORT_SYMBOL(atm_proc_root);
int atm_proc_dev_register(struct atm_dev *dev)
{
int error;
/* No proc info */
if (!dev->ops->proc_read)
return 0;
error = -ENOMEM;
dev->proc_name = kasprintf(GFP_KERNEL, "%s:%d", dev->type, dev->number);
if (!dev->proc_name)
goto err_out;
dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
&proc_atm_dev_ops, dev);
if (!dev->proc_entry)
goto err_free_name;
return 0;
err_free_name:
kfree(dev->proc_name);
err_out:
return error;
}
void atm_proc_dev_deregister(struct atm_dev *dev)
{
if (!dev->ops->proc_read)
return;
remove_proc_entry(dev->proc_name, atm_proc_root);
kfree(dev->proc_name);
}
static struct atm_proc_entry {
char *name;
const struct file_operations *proc_fops;
struct proc_dir_entry *dirent;
} atm_proc_ents[] = {
{ .name = "devices", .proc_fops = &devices_seq_fops },
{ .name = "pvc", .proc_fops = &pvc_seq_fops },
{ .name = "svc", .proc_fops = &svc_seq_fops },
{ .name = "vc", .proc_fops = &vcc_seq_fops },
{ .name = NULL, .proc_fops = NULL }
};
static void atm_proc_dirs_remove(void)
{
static struct atm_proc_entry *e;
for (e = atm_proc_ents; e->name; e++) {
if (e->dirent)
remove_proc_entry(e->name, atm_proc_root);
}
remove_proc_entry("atm", init_net.proc_net);
}
int __init atm_proc_init(void)
{
static struct atm_proc_entry *e;
int ret;
atm_proc_root = proc_net_mkdir(&init_net, "atm", init_net.proc_net);
if (!atm_proc_root)
goto err_out;
for (e = atm_proc_ents; e->name; e++) {
struct proc_dir_entry *dirent;
dirent = proc_create(e->name, S_IRUGO,
atm_proc_root, e->proc_fops);
if (!dirent)
goto err_out_remove;
e->dirent = dirent;
}
ret = 0;
out:
return ret;
err_out_remove:
atm_proc_dirs_remove();
err_out:
ret = -ENOMEM;
goto out;
}
void atm_proc_exit(void)
{
atm_proc_dirs_remove();
}

13
net/atm/protocols.h Normal file
View file

@ -0,0 +1,13 @@
/* net/atm/protocols.h - ATM protocol handler entry points */
/* Written 1995-1997 by Werner Almesberger, EPFL LRC */
#ifndef NET_ATM_PROTOCOLS_H
#define NET_ATM_PROTOCOLS_H
int atm_init_aal0(struct atm_vcc *vcc); /* "raw" AAL0 */
int atm_init_aal34(struct atm_vcc *vcc);/* "raw" AAL3/4 transport */
int atm_init_aal5(struct atm_vcc *vcc); /* "raw" AAL5 transport */
#endif

162
net/atm/pvc.c Normal file
View file

@ -0,0 +1,162 @@
/* net/atm/pvc.c - ATM PVC sockets */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#include <linux/net.h> /* struct socket, struct proto_ops */
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmdev.h> /* ATM devices */
#include <linux/errno.h> /* error codes */
#include <linux/kernel.h> /* printk */
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/export.h>
#include <net/sock.h> /* for sock_no_* */
#include "resources.h" /* devs and vccs */
#include "common.h" /* common for PVCs and SVCs */
static int pvc_shutdown(struct socket *sock, int how)
{
return 0;
}
static int pvc_bind(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_atmpvc *addr;
struct atm_vcc *vcc;
int error;
if (sockaddr_len != sizeof(struct sockaddr_atmpvc))
return -EINVAL;
addr = (struct sockaddr_atmpvc *)sockaddr;
if (addr->sap_family != AF_ATMPVC)
return -EAFNOSUPPORT;
lock_sock(sk);
vcc = ATM_SD(sock);
if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
error = -EBADFD;
goto out;
}
if (test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
if (vcc->vpi != ATM_VPI_UNSPEC)
addr->sap_addr.vpi = vcc->vpi;
if (vcc->vci != ATM_VCI_UNSPEC)
addr->sap_addr.vci = vcc->vci;
}
error = vcc_connect(sock, addr->sap_addr.itf, addr->sap_addr.vpi,
addr->sap_addr.vci);
out:
release_sock(sk);
return error;
}
static int pvc_connect(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len, int flags)
{
return pvc_bind(sock, sockaddr, sockaddr_len);
}
static int pvc_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int error;
lock_sock(sk);
error = vcc_setsockopt(sock, level, optname, optval, optlen);
release_sock(sk);
return error;
}
static int pvc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int error;
lock_sock(sk);
error = vcc_getsockopt(sock, level, optname, optval, optlen);
release_sock(sk);
return error;
}
static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
int *sockaddr_len, int peer)
{
struct sockaddr_atmpvc *addr;
struct atm_vcc *vcc = ATM_SD(sock);
if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
return -ENOTCONN;
*sockaddr_len = sizeof(struct sockaddr_atmpvc);
addr = (struct sockaddr_atmpvc *)sockaddr;
memset(addr, 0, sizeof(*addr));
addr->sap_family = AF_ATMPVC;
addr->sap_addr.itf = vcc->dev->number;
addr->sap_addr.vpi = vcc->vpi;
addr->sap_addr.vci = vcc->vci;
return 0;
}
static const struct proto_ops pvc_proto_ops = {
.family = PF_ATMPVC,
.owner = THIS_MODULE,
.release = vcc_release,
.bind = pvc_bind,
.connect = pvc_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = pvc_getname,
.poll = vcc_poll,
.ioctl = vcc_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vcc_compat_ioctl,
#endif
.listen = sock_no_listen,
.shutdown = pvc_shutdown,
.setsockopt = pvc_setsockopt,
.getsockopt = pvc_getsockopt,
.sendmsg = vcc_sendmsg,
.recvmsg = vcc_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static int pvc_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
if (net != &init_net)
return -EAFNOSUPPORT;
sock->ops = &pvc_proto_ops;
return vcc_create(net, sock, protocol, PF_ATMPVC);
}
static const struct net_proto_family pvc_family_ops = {
.family = PF_ATMPVC,
.create = pvc_create,
.owner = THIS_MODULE,
};
/*
* Initialize the ATM PVC protocol family
*/
int __init atmpvc_init(void)
{
return sock_register(&pvc_family_ops);
}
void atmpvc_exit(void)
{
sock_unregister(PF_ATMPVC);
}

85
net/atm/raw.c Normal file
View file

@ -0,0 +1,85 @@
/* net/atm/raw.c - Raw AAL0 and AAL5 transports */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/atmdev.h>
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include "common.h"
#include "protocols.h"
/*
* SKB == NULL indicates that the link is being closed
*/
static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb)
{
if (skb) {
struct sock *sk = sk_atm(vcc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
}
}
static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct sock *sk = sk_atm(vcc);
pr_debug("(%d) %d -= %d\n",
vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
dev_kfree_skb_any(skb);
sk->sk_write_space(sk);
}
static int atm_send_aal0(struct atm_vcc *vcc, struct sk_buff *skb)
{
/*
* Note that if vpi/vci are _ANY or _UNSPEC the below will
* still work
*/
if (!capable(CAP_NET_ADMIN) &&
(((u32 *)skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) !=
((vcc->vpi << ATM_HDR_VPI_SHIFT) |
(vcc->vci << ATM_HDR_VCI_SHIFT))) {
kfree_skb(skb);
return -EADDRNOTAVAIL;
}
return vcc->dev->ops->send(vcc, skb);
}
int atm_init_aal0(struct atm_vcc *vcc)
{
vcc->push = atm_push_raw;
vcc->pop = atm_pop_raw;
vcc->push_oam = NULL;
vcc->send = atm_send_aal0;
return 0;
}
int atm_init_aal34(struct atm_vcc *vcc)
{
vcc->push = atm_push_raw;
vcc->pop = atm_pop_raw;
vcc->push_oam = NULL;
vcc->send = vcc->dev->ops->send;
return 0;
}
int atm_init_aal5(struct atm_vcc *vcc)
{
vcc->push = atm_push_raw;
vcc->pop = atm_pop_raw;
vcc->push_oam = NULL;
vcc->send = vcc->dev->ops->send;
return 0;
}
EXPORT_SYMBOL(atm_init_aal5);

463
net/atm/resources.c Normal file
View file

@ -0,0 +1,463 @@
/* net/atm/resources.c - Statically allocated resources */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
/* Fixes
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
* 2002/01 - don't free the whole struct sock on sk->destruct time,
* use the default destruct function initialized by sock_init_data */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/atmdev.h>
#include <linux/sonet.h>
#include <linux/kernel.h> /* for barrier */
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <net/sock.h> /* for struct sock */
#include "common.h"
#include "resources.h"
#include "addr.h"
LIST_HEAD(atm_devs);
DEFINE_MUTEX(atm_dev_mutex);
static struct atm_dev *__alloc_atm_dev(const char *type)
{
struct atm_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
dev->type = type;
dev->signal = ATM_PHY_SIG_UNKNOWN;
dev->link_rate = ATM_OC3_PCR;
spin_lock_init(&dev->lock);
INIT_LIST_HEAD(&dev->local);
INIT_LIST_HEAD(&dev->lecs);
return dev;
}
static struct atm_dev *__atm_dev_lookup(int number)
{
struct atm_dev *dev;
struct list_head *p;
list_for_each(p, &atm_devs) {
dev = list_entry(p, struct atm_dev, dev_list);
if (dev->number == number) {
atm_dev_hold(dev);
return dev;
}
}
return NULL;
}
struct atm_dev *atm_dev_lookup(int number)
{
struct atm_dev *dev;
mutex_lock(&atm_dev_mutex);
dev = __atm_dev_lookup(number);
mutex_unlock(&atm_dev_mutex);
return dev;
}
EXPORT_SYMBOL(atm_dev_lookup);
struct atm_dev *atm_dev_register(const char *type, struct device *parent,
const struct atmdev_ops *ops, int number,
unsigned long *flags)
{
struct atm_dev *dev, *inuse;
dev = __alloc_atm_dev(type);
if (!dev) {
pr_err("no space for dev %s\n", type);
return NULL;
}
mutex_lock(&atm_dev_mutex);
if (number != -1) {
inuse = __atm_dev_lookup(number);
if (inuse) {
atm_dev_put(inuse);
mutex_unlock(&atm_dev_mutex);
kfree(dev);
return NULL;
}
dev->number = number;
} else {
dev->number = 0;
while ((inuse = __atm_dev_lookup(dev->number))) {
atm_dev_put(inuse);
dev->number++;
}
}
dev->ops = ops;
if (flags)
dev->flags = *flags;
else
memset(&dev->flags, 0, sizeof(dev->flags));
memset(&dev->stats, 0, sizeof(dev->stats));
atomic_set(&dev->refcnt, 1);
if (atm_proc_dev_register(dev) < 0) {
pr_err("atm_proc_dev_register failed for dev %s\n", type);
goto out_fail;
}
if (atm_register_sysfs(dev, parent) < 0) {
pr_err("atm_register_sysfs failed for dev %s\n", type);
atm_proc_dev_deregister(dev);
goto out_fail;
}
list_add_tail(&dev->dev_list, &atm_devs);
out:
mutex_unlock(&atm_dev_mutex);
return dev;
out_fail:
kfree(dev);
dev = NULL;
goto out;
}
EXPORT_SYMBOL(atm_dev_register);
void atm_dev_deregister(struct atm_dev *dev)
{
BUG_ON(test_bit(ATM_DF_REMOVED, &dev->flags));
set_bit(ATM_DF_REMOVED, &dev->flags);
/*
* if we remove current device from atm_devs list, new device
* with same number can appear, such we need deregister proc,
* release async all vccs and remove them from vccs list too
*/
mutex_lock(&atm_dev_mutex);
list_del(&dev->dev_list);
mutex_unlock(&atm_dev_mutex);
atm_dev_release_vccs(dev);
atm_unregister_sysfs(dev);
atm_proc_dev_deregister(dev);
atm_dev_put(dev);
}
EXPORT_SYMBOL(atm_dev_deregister);
static void copy_aal_stats(struct k_atm_aal_stats *from,
struct atm_aal_stats *to)
{
#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
__AAL_STAT_ITEMS
#undef __HANDLE_ITEM
}
static void subtract_aal_stats(struct k_atm_aal_stats *from,
struct atm_aal_stats *to)
{
#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
__AAL_STAT_ITEMS
#undef __HANDLE_ITEM
}
static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg,
int zero)
{
struct atm_dev_stats tmp;
int error = 0;
copy_aal_stats(&dev->stats.aal0, &tmp.aal0);
copy_aal_stats(&dev->stats.aal34, &tmp.aal34);
copy_aal_stats(&dev->stats.aal5, &tmp.aal5);
if (arg)
error = copy_to_user(arg, &tmp, sizeof(tmp));
if (zero && !error) {
subtract_aal_stats(&dev->stats.aal0, &tmp.aal0);
subtract_aal_stats(&dev->stats.aal34, &tmp.aal34);
subtract_aal_stats(&dev->stats.aal5, &tmp.aal5);
}
return error ? -EFAULT : 0;
}
int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
{
void __user *buf;
int error, len, number, size = 0;
struct atm_dev *dev;
struct list_head *p;
int *tmp_buf, *tmp_p;
int __user *sioc_len;
int __user *iobuf_len;
#ifndef CONFIG_COMPAT
compat = 0; /* Just so the compiler _knows_ */
#endif
switch (cmd) {
case ATM_GETNAMES:
if (compat) {
#ifdef CONFIG_COMPAT
struct compat_atm_iobuf __user *ciobuf = arg;
compat_uptr_t cbuf;
iobuf_len = &ciobuf->length;
if (get_user(cbuf, &ciobuf->buffer))
return -EFAULT;
buf = compat_ptr(cbuf);
#endif
} else {
struct atm_iobuf __user *iobuf = arg;
iobuf_len = &iobuf->length;
if (get_user(buf, &iobuf->buffer))
return -EFAULT;
}
if (get_user(len, iobuf_len))
return -EFAULT;
mutex_lock(&atm_dev_mutex);
list_for_each(p, &atm_devs)
size += sizeof(int);
if (size > len) {
mutex_unlock(&atm_dev_mutex);
return -E2BIG;
}
tmp_buf = kmalloc(size, GFP_ATOMIC);
if (!tmp_buf) {
mutex_unlock(&atm_dev_mutex);
return -ENOMEM;
}
tmp_p = tmp_buf;
list_for_each(p, &atm_devs) {
dev = list_entry(p, struct atm_dev, dev_list);
*tmp_p++ = dev->number;
}
mutex_unlock(&atm_dev_mutex);
error = ((copy_to_user(buf, tmp_buf, size)) ||
put_user(size, iobuf_len))
? -EFAULT : 0;
kfree(tmp_buf);
return error;
default:
break;
}
if (compat) {
#ifdef CONFIG_COMPAT
struct compat_atmif_sioc __user *csioc = arg;
compat_uptr_t carg;
sioc_len = &csioc->length;
if (get_user(carg, &csioc->arg))
return -EFAULT;
buf = compat_ptr(carg);
if (get_user(len, &csioc->length))
return -EFAULT;
if (get_user(number, &csioc->number))
return -EFAULT;
#endif
} else {
struct atmif_sioc __user *sioc = arg;
sioc_len = &sioc->length;
if (get_user(buf, &sioc->arg))
return -EFAULT;
if (get_user(len, &sioc->length))
return -EFAULT;
if (get_user(number, &sioc->number))
return -EFAULT;
}
dev = try_then_request_module(atm_dev_lookup(number), "atm-device-%d",
number);
if (!dev)
return -ENODEV;
switch (cmd) {
case ATM_GETTYPE:
size = strlen(dev->type) + 1;
if (copy_to_user(buf, dev->type, size)) {
error = -EFAULT;
goto done;
}
break;
case ATM_GETESI:
size = ESI_LEN;
if (copy_to_user(buf, dev->esi, size)) {
error = -EFAULT;
goto done;
}
break;
case ATM_SETESI:
{
int i;
for (i = 0; i < ESI_LEN; i++)
if (dev->esi[i]) {
error = -EEXIST;
goto done;
}
}
/* fall through */
case ATM_SETESIF:
{
unsigned char esi[ESI_LEN];
if (!capable(CAP_NET_ADMIN)) {
error = -EPERM;
goto done;
}
if (copy_from_user(esi, buf, ESI_LEN)) {
error = -EFAULT;
goto done;
}
memcpy(dev->esi, esi, ESI_LEN);
error = ESI_LEN;
goto done;
}
case ATM_GETSTATZ:
if (!capable(CAP_NET_ADMIN)) {
error = -EPERM;
goto done;
}
/* fall through */
case ATM_GETSTAT:
size = sizeof(struct atm_dev_stats);
error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
if (error)
goto done;
break;
case ATM_GETCIRANGE:
size = sizeof(struct atm_cirange);
if (copy_to_user(buf, &dev->ci_range, size)) {
error = -EFAULT;
goto done;
}
break;
case ATM_GETLINKRATE:
size = sizeof(int);
if (copy_to_user(buf, &dev->link_rate, size)) {
error = -EFAULT;
goto done;
}
break;
case ATM_RSTADDR:
if (!capable(CAP_NET_ADMIN)) {
error = -EPERM;
goto done;
}
atm_reset_addr(dev, ATM_ADDR_LOCAL);
break;
case ATM_ADDADDR:
case ATM_DELADDR:
case ATM_ADDLECSADDR:
case ATM_DELLECSADDR:
{
struct sockaddr_atmsvc addr;
if (!capable(CAP_NET_ADMIN)) {
error = -EPERM;
goto done;
}
if (copy_from_user(&addr, buf, sizeof(addr))) {
error = -EFAULT;
goto done;
}
if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR)
error = atm_add_addr(dev, &addr,
(cmd == ATM_ADDADDR ?
ATM_ADDR_LOCAL : ATM_ADDR_LECS));
else
error = atm_del_addr(dev, &addr,
(cmd == ATM_DELADDR ?
ATM_ADDR_LOCAL : ATM_ADDR_LECS));
goto done;
}
case ATM_GETADDR:
case ATM_GETLECSADDR:
error = atm_get_addr(dev, buf, len,
(cmd == ATM_GETADDR ?
ATM_ADDR_LOCAL : ATM_ADDR_LECS));
if (error < 0)
goto done;
size = error;
/* may return 0, but later on size == 0 means "don't
write the length" */
error = put_user(size, sioc_len) ? -EFAULT : 0;
goto done;
case ATM_SETLOOP:
if (__ATM_LM_XTRMT((int) (unsigned long) buf) &&
__ATM_LM_XTLOC((int) (unsigned long) buf) >
__ATM_LM_XTRMT((int) (unsigned long) buf)) {
error = -EINVAL;
goto done;
}
/* fall through */
case ATM_SETCIRANGE:
case SONET_GETSTATZ:
case SONET_SETDIAG:
case SONET_CLRDIAG:
case SONET_SETFRAMING:
if (!capable(CAP_NET_ADMIN)) {
error = -EPERM;
goto done;
}
/* fall through */
default:
if (compat) {
#ifdef CONFIG_COMPAT
if (!dev->ops->compat_ioctl) {
error = -EINVAL;
goto done;
}
size = dev->ops->compat_ioctl(dev, cmd, buf);
#endif
} else {
if (!dev->ops->ioctl) {
error = -EINVAL;
goto done;
}
size = dev->ops->ioctl(dev, cmd, buf);
}
if (size < 0) {
error = (size == -ENOIOCTLCMD ? -ENOTTY : size);
goto done;
}
}
if (size)
error = put_user(size, sioc_len) ? -EFAULT : 0;
else
error = 0;
done:
atm_dev_put(dev);
return error;
}
void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
{
mutex_lock(&atm_dev_mutex);
return seq_list_start_head(&atm_devs, *pos);
}
void atm_dev_seq_stop(struct seq_file *seq, void *v)
{
mutex_unlock(&atm_dev_mutex);
}
void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_list_next(v, &atm_devs, pos);
}

47
net/atm/resources.h Normal file
View file

@ -0,0 +1,47 @@
/* net/atm/resources.h - ATM-related resources */
/* Written 1995-1998 by Werner Almesberger, EPFL LRC/ICA */
#ifndef NET_ATM_RESOURCES_H
#define NET_ATM_RESOURCES_H
#include <linux/atmdev.h>
#include <linux/mutex.h>
extern struct list_head atm_devs;
extern struct mutex atm_dev_mutex;
int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat);
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos);
void atm_dev_seq_stop(struct seq_file *seq, void *v);
void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
int atm_proc_dev_register(struct atm_dev *dev);
void atm_proc_dev_deregister(struct atm_dev *dev);
#else
static inline int atm_proc_dev_register(struct atm_dev *dev)
{
return 0;
}
static inline void atm_proc_dev_deregister(struct atm_dev *dev)
{
/* nothing */
}
#endif /* CONFIG_PROC_FS */
int atm_register_sysfs(struct atm_dev *adev, struct device *parent);
void atm_unregister_sysfs(struct atm_dev *adev);
#endif

268
net/atm/signaling.c Normal file
View file

@ -0,0 +1,268 @@
/* net/atm/signaling.c - ATM signaling */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/errno.h> /* error codes */
#include <linux/kernel.h> /* printk */
#include <linux/skbuff.h>
#include <linux/wait.h>
#include <linux/sched.h> /* jiffies and HZ */
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmsap.h>
#include <linux/atmsvc.h>
#include <linux/atmdev.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include "resources.h"
#include "signaling.h"
#undef WAIT_FOR_DEMON /* #define this if system calls on SVC sockets
should block until the demon runs.
Danger: may cause nasty hangs if the demon
crashes. */
struct atm_vcc *sigd = NULL;
#ifdef WAIT_FOR_DEMON
static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep);
#endif
static void sigd_put_skb(struct sk_buff *skb)
{
#ifdef WAIT_FOR_DEMON
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(&sigd_sleep, &wait);
while (!sigd) {
set_current_state(TASK_UNINTERRUPTIBLE);
pr_debug("atmsvc: waiting for signaling daemon...\n");
schedule();
}
current->state = TASK_RUNNING;
remove_wait_queue(&sigd_sleep, &wait);
#else
if (!sigd) {
pr_debug("atmsvc: no signaling daemon\n");
kfree_skb(skb);
return;
}
#endif
atm_force_charge(sigd, skb->truesize);
skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
sk_atm(sigd)->sk_data_ready(sk_atm(sigd));
}
static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
{
struct sk_buff *skb;
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return;
msg->type = as_error;
if (!vcc->dev->ops->change_qos)
msg->reply = -EOPNOTSUPP;
else {
/* should lock VCC */
msg->reply = vcc->dev->ops->change_qos(vcc, &msg->qos,
msg->reply);
if (!msg->reply)
msg->type = as_okay;
}
/*
* Should probably just turn around the old skb. But the, the buffer
* space accounting needs to follow the change too. Maybe later.
*/
while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
schedule();
*(struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg)) = *msg;
sigd_put_skb(skb);
}
static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct atmsvc_msg *msg;
struct atm_vcc *session_vcc;
struct sock *sk;
msg = (struct atmsvc_msg *) skb->data;
atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
vcc = *(struct atm_vcc **) &msg->vcc;
pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
sk = sk_atm(vcc);
switch (msg->type) {
case as_okay:
sk->sk_err = -msg->reply;
clear_bit(ATM_VF_WAITING, &vcc->flags);
if (!*vcc->local.sas_addr.prv && !*vcc->local.sas_addr.pub) {
vcc->local.sas_family = AF_ATMSVC;
memcpy(vcc->local.sas_addr.prv,
msg->local.sas_addr.prv, ATM_ESA_LEN);
memcpy(vcc->local.sas_addr.pub,
msg->local.sas_addr.pub, ATM_E164_LEN + 1);
}
session_vcc = vcc->session ? vcc->session : vcc;
if (session_vcc->vpi || session_vcc->vci)
break;
session_vcc->itf = msg->pvc.sap_addr.itf;
session_vcc->vpi = msg->pvc.sap_addr.vpi;
session_vcc->vci = msg->pvc.sap_addr.vci;
if (session_vcc->vpi || session_vcc->vci)
session_vcc->qos = msg->qos;
break;
case as_error:
clear_bit(ATM_VF_REGIS, &vcc->flags);
clear_bit(ATM_VF_READY, &vcc->flags);
sk->sk_err = -msg->reply;
clear_bit(ATM_VF_WAITING, &vcc->flags);
break;
case as_indicate:
vcc = *(struct atm_vcc **)&msg->listen_vcc;
sk = sk_atm(vcc);
pr_debug("as_indicate!!!\n");
lock_sock(sk);
if (sk_acceptq_is_full(sk)) {
sigd_enq(NULL, as_reject, vcc, NULL, NULL);
dev_kfree_skb(skb);
goto as_indicate_complete;
}
sk->sk_ack_backlog++;
skb_queue_tail(&sk->sk_receive_queue, skb);
pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
sk->sk_state_change(sk);
as_indicate_complete:
release_sock(sk);
return 0;
case as_close:
set_bit(ATM_VF_RELEASED, &vcc->flags);
vcc_release_async(vcc, msg->reply);
goto out;
case as_modify:
modify_qos(vcc, msg);
break;
case as_addparty:
case as_dropparty:
sk->sk_err_soft = msg->reply;
/* < 0 failure, otherwise ep_ref */
clear_bit(ATM_VF_WAITING, &vcc->flags);
break;
default:
pr_alert("bad message type %d\n", (int)msg->type);
return -EINVAL;
}
sk->sk_state_change(sk);
out:
dev_kfree_skb(skb);
return 0;
}
void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type,
struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
const struct sockaddr_atmsvc *svc, const struct atm_qos *qos,
int reply)
{
struct sk_buff *skb;
struct atmsvc_msg *msg;
static unsigned int session = 0;
pr_debug("%d (0x%p)\n", (int)type, vcc);
while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
schedule();
msg = (struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg));
memset(msg, 0, sizeof(*msg));
msg->type = type;
*(struct atm_vcc **) &msg->vcc = vcc;
*(struct atm_vcc **) &msg->listen_vcc = listen_vcc;
msg->reply = reply;
if (qos)
msg->qos = *qos;
if (vcc)
msg->sap = vcc->sap;
if (svc)
msg->svc = *svc;
if (vcc)
msg->local = vcc->local;
if (pvc)
msg->pvc = *pvc;
if (vcc) {
if (type == as_connect && test_bit(ATM_VF_SESSION, &vcc->flags))
msg->session = ++session;
/* every new pmp connect gets the next session number */
}
sigd_put_skb(skb);
if (vcc)
set_bit(ATM_VF_REGIS, &vcc->flags);
}
void sigd_enq(struct atm_vcc *vcc, enum atmsvc_msg_type type,
struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
const struct sockaddr_atmsvc *svc)
{
sigd_enq2(vcc, type, listen_vcc, pvc, svc, vcc ? &vcc->qos : NULL, 0);
/* other ISP applications may use "reply" */
}
static void purge_vcc(struct atm_vcc *vcc)
{
if (sk_atm(vcc)->sk_family == PF_ATMSVC &&
!test_bit(ATM_VF_META, &vcc->flags)) {
set_bit(ATM_VF_RELEASED, &vcc->flags);
clear_bit(ATM_VF_REGIS, &vcc->flags);
vcc_release_async(vcc, -EUNATCH);
}
}
static void sigd_close(struct atm_vcc *vcc)
{
struct sock *s;
int i;
pr_debug("\n");
sigd = NULL;
if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
pr_err("closing with requests pending\n");
skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
read_lock(&vcc_sklist_lock);
for (i = 0; i < VCC_HTABLE_SIZE; ++i) {
struct hlist_head *head = &vcc_hash[i];
sk_for_each(s, head) {
vcc = atm_sk(s);
purge_vcc(vcc);
}
}
read_unlock(&vcc_sklist_lock);
}
static struct atmdev_ops sigd_dev_ops = {
.close = sigd_close,
.send = sigd_send
};
static struct atm_dev sigd_dev = {
.ops = &sigd_dev_ops,
.type = "sig",
.number = 999,
.lock = __SPIN_LOCK_UNLOCKED(sigd_dev.lock)
};
int sigd_attach(struct atm_vcc *vcc)
{
if (sigd)
return -EADDRINUSE;
pr_debug("\n");
sigd = vcc;
vcc->dev = &sigd_dev;
vcc_insert_socket(sk_atm(vcc));
set_bit(ATM_VF_META, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
#ifdef WAIT_FOR_DEMON
wake_up(&sigd_sleep);
#endif
return 0;
}

30
net/atm/signaling.h Normal file
View file

@ -0,0 +1,30 @@
/* net/atm/signaling.h - ATM signaling */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#ifndef NET_ATM_SIGNALING_H
#define NET_ATM_SIGNALING_H
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/atmsvc.h>
extern struct atm_vcc *sigd; /* needed in svc_release */
/*
* sigd_enq is a wrapper for sigd_enq2, covering the more common cases, and
* avoiding huge lists of null values.
*/
void sigd_enq2(struct atm_vcc *vcc,enum atmsvc_msg_type type,
struct atm_vcc *listen_vcc,const struct sockaddr_atmpvc *pvc,
const struct sockaddr_atmsvc *svc,const struct atm_qos *qos,int reply);
void sigd_enq(struct atm_vcc *vcc,enum atmsvc_msg_type type,
struct atm_vcc *listen_vcc,const struct sockaddr_atmpvc *pvc,
const struct sockaddr_atmsvc *svc);
int sigd_attach(struct atm_vcc *vcc);
#endif

690
net/atm/svc.c Normal file
View file

@ -0,0 +1,690 @@
/* net/atm/svc.c - ATM SVC sockets */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/string.h>
#include <linux/net.h> /* struct socket, struct proto_ops */
#include <linux/errno.h> /* error codes */
#include <linux/kernel.h> /* printk */
#include <linux/skbuff.h>
#include <linux/wait.h>
#include <linux/sched.h> /* jiffies and HZ */
#include <linux/fcntl.h> /* O_NONBLOCK */
#include <linux/init.h>
#include <linux/atm.h> /* ATM stuff */
#include <linux/atmsap.h>
#include <linux/atmsvc.h>
#include <linux/atmdev.h>
#include <linux/bitops.h>
#include <net/sock.h> /* for sock_no_* */
#include <linux/uaccess.h>
#include <linux/export.h>
#include "resources.h"
#include "common.h" /* common for PVCs and SVCs */
#include "signaling.h"
#include "addr.h"
static int svc_create(struct net *net, struct socket *sock, int protocol,
int kern);
/*
* Note: since all this is still nicely synchronized with the signaling demon,
* there's no need to protect sleep loops with clis. If signaling is
* moved into the kernel, that would change.
*/
static int svc_shutdown(struct socket *sock, int how)
{
return 0;
}
static void svc_disconnect(struct atm_vcc *vcc)
{
DEFINE_WAIT(wait);
struct sk_buff *skb;
struct sock *sk = sk_atm(vcc);
pr_debug("%p\n", vcc);
if (test_bit(ATM_VF_REGIS, &vcc->flags)) {
sigd_enq(vcc, as_close, NULL, NULL, NULL);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
if (test_bit(ATM_VF_RELEASED, &vcc->flags) || !sigd)
break;
schedule();
}
finish_wait(sk_sleep(sk), &wait);
}
/* beware - socket is still in use by atmsigd until the last
as_indicate has been answered */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
atm_return(vcc, skb->truesize);
pr_debug("LISTEN REL\n");
sigd_enq2(NULL, as_reject, vcc, NULL, NULL, &vcc->qos, 0);
dev_kfree_skb(skb);
}
clear_bit(ATM_VF_REGIS, &vcc->flags);
/* ... may retry later */
}
static int svc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
if (sk) {
vcc = ATM_SD(sock);
pr_debug("%p\n", vcc);
clear_bit(ATM_VF_READY, &vcc->flags);
/*
* VCC pointer is used as a reference,
* so we must not free it (thereby subjecting it to re-use)
* before all pending connections are closed
*/
svc_disconnect(vcc);
vcc_release(sock);
}
return 0;
}
static int svc_bind(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct sockaddr_atmsvc *addr;
struct atm_vcc *vcc;
int error;
if (sockaddr_len != sizeof(struct sockaddr_atmsvc))
return -EINVAL;
lock_sock(sk);
if (sock->state == SS_CONNECTED) {
error = -EISCONN;
goto out;
}
if (sock->state != SS_UNCONNECTED) {
error = -EINVAL;
goto out;
}
vcc = ATM_SD(sock);
addr = (struct sockaddr_atmsvc *) sockaddr;
if (addr->sas_family != AF_ATMSVC) {
error = -EAFNOSUPPORT;
goto out;
}
clear_bit(ATM_VF_BOUND, &vcc->flags);
/* failing rebind will kill old binding */
/* @@@ check memory (de)allocation on rebind */
if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
error = -EBADFD;
goto out;
}
vcc->local = *addr;
set_bit(ATM_VF_WAITING, &vcc->flags);
sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd)
break;
schedule();
}
finish_wait(sk_sleep(sk), &wait);
clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */
if (!sigd) {
error = -EUNATCH;
goto out;
}
if (!sk->sk_err)
set_bit(ATM_VF_BOUND, &vcc->flags);
error = -sk->sk_err;
out:
release_sock(sk);
return error;
}
static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len, int flags)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct sockaddr_atmsvc *addr;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
pr_debug("%p\n", vcc);
lock_sock(sk);
if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) {
error = -EINVAL;
goto out;
}
switch (sock->state) {
default:
error = -EINVAL;
goto out;
case SS_CONNECTED:
error = -EISCONN;
goto out;
case SS_CONNECTING:
if (test_bit(ATM_VF_WAITING, &vcc->flags)) {
error = -EALREADY;
goto out;
}
sock->state = SS_UNCONNECTED;
if (sk->sk_err) {
error = -sk->sk_err;
goto out;
}
break;
case SS_UNCONNECTED:
addr = (struct sockaddr_atmsvc *) sockaddr;
if (addr->sas_family != AF_ATMSVC) {
error = -EAFNOSUPPORT;
goto out;
}
if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
error = -EBADFD;
goto out;
}
if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
vcc->qos.rxtp.traffic_class == ATM_ANYCLASS) {
error = -EINVAL;
goto out;
}
if (!vcc->qos.txtp.traffic_class &&
!vcc->qos.rxtp.traffic_class) {
error = -EINVAL;
goto out;
}
vcc->remote = *addr;
set_bit(ATM_VF_WAITING, &vcc->flags);
sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote);
if (flags & O_NONBLOCK) {
sock->state = SS_CONNECTING;
error = -EINPROGRESS;
goto out;
}
error = 0;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
if (!signal_pending(current)) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
continue;
}
pr_debug("*ABORT*\n");
/*
* This is tricky:
* Kernel ---close--> Demon
* Kernel <--close--- Demon
* or
* Kernel ---close--> Demon
* Kernel <--error--- Demon
* or
* Kernel ---close--> Demon
* Kernel <--okay---- Demon
* Kernel <--close--- Demon
*/
sigd_enq(vcc, as_close, NULL, NULL, NULL);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
schedule();
}
if (!sk->sk_err)
while (!test_bit(ATM_VF_RELEASED, &vcc->flags) &&
sigd) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
schedule();
}
clear_bit(ATM_VF_REGIS, &vcc->flags);
clear_bit(ATM_VF_RELEASED, &vcc->flags);
clear_bit(ATM_VF_CLOSE, &vcc->flags);
/* we're gone now but may connect later */
error = -EINTR;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
if (!sigd) {
error = -EUNATCH;
goto out;
}
if (sk->sk_err) {
error = -sk->sk_err;
goto out;
}
}
vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp);
vcc->qos.txtp.pcr = 0;
vcc->qos.txtp.min_pcr = 0;
error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
if (!error)
sock->state = SS_CONNECTED;
else
(void)svc_disconnect(vcc);
out:
release_sock(sk);
return error;
}
static int svc_listen(struct socket *sock, int backlog)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
pr_debug("%p\n", vcc);
lock_sock(sk);
/* let server handle listen on unbound sockets */
if (test_bit(ATM_VF_SESSION, &vcc->flags)) {
error = -EINVAL;
goto out;
}
if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
error = -EADDRINUSE;
goto out;
}
set_bit(ATM_VF_WAITING, &vcc->flags);
sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd)
break;
schedule();
}
finish_wait(sk_sleep(sk), &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
}
set_bit(ATM_VF_LISTEN, &vcc->flags);
vcc_insert_socket(sk);
sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
error = -sk->sk_err;
out:
release_sock(sk);
return error;
}
static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
struct atmsvc_msg *msg;
struct atm_vcc *old_vcc = ATM_SD(sock);
struct atm_vcc *new_vcc;
int error;
lock_sock(sk);
error = svc_create(sock_net(sk), newsock, 0, 0);
if (error)
goto out;
new_vcc = ATM_SD(newsock);
pr_debug("%p -> %p\n", old_vcc, new_vcc);
while (1) {
DEFINE_WAIT(wait);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
sigd) {
if (test_bit(ATM_VF_RELEASED, &old_vcc->flags))
break;
if (test_bit(ATM_VF_CLOSE, &old_vcc->flags)) {
error = -sk->sk_err;
break;
}
if (flags & O_NONBLOCK) {
error = -EAGAIN;
break;
}
release_sock(sk);
schedule();
lock_sock(sk);
if (signal_pending(current)) {
error = -ERESTARTSYS;
break;
}
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
if (error)
goto out;
if (!skb) {
error = -EUNATCH;
goto out;
}
msg = (struct atmsvc_msg *)skb->data;
new_vcc->qos = msg->qos;
set_bit(ATM_VF_HASQOS, &new_vcc->flags);
new_vcc->remote = msg->svc;
new_vcc->local = msg->local;
new_vcc->sap = msg->sap;
error = vcc_connect(newsock, msg->pvc.sap_addr.itf,
msg->pvc.sap_addr.vpi,
msg->pvc.sap_addr.vci);
dev_kfree_skb(skb);
sk->sk_ack_backlog--;
if (error) {
sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL,
&old_vcc->qos, error);
error = error == -EAGAIN ? -EBUSY : error;
goto out;
}
/* wait should be short, so we ignore the non-blocking flag */
set_bit(ATM_VF_WAITING, &new_vcc->flags);
sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL);
for (;;) {
prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait,
TASK_UNINTERRUPTIBLE);
if (!test_bit(ATM_VF_WAITING, &new_vcc->flags) || !sigd)
break;
release_sock(sk);
schedule();
lock_sock(sk);
}
finish_wait(sk_sleep(sk_atm(new_vcc)), &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
}
if (!sk_atm(new_vcc)->sk_err)
break;
if (sk_atm(new_vcc)->sk_err != ERESTARTSYS) {
error = -sk_atm(new_vcc)->sk_err;
goto out;
}
}
newsock->state = SS_CONNECTED;
out:
release_sock(sk);
return error;
}
static int svc_getname(struct socket *sock, struct sockaddr *sockaddr,
int *sockaddr_len, int peer)
{
struct sockaddr_atmsvc *addr;
*sockaddr_len = sizeof(struct sockaddr_atmsvc);
addr = (struct sockaddr_atmsvc *) sockaddr;
memcpy(addr, peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local,
sizeof(struct sockaddr_atmsvc));
return 0;
}
int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
{
struct sock *sk = sk_atm(vcc);
DEFINE_WAIT(wait);
set_bit(ATM_VF_WAITING, &vcc->flags);
sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
if (!test_bit(ATM_VF_WAITING, &vcc->flags) ||
test_bit(ATM_VF_RELEASED, &vcc->flags) || !sigd) {
break;
}
schedule();
}
finish_wait(sk_sleep(sk), &wait);
if (!sigd)
return -EUNATCH;
return -sk->sk_err;
}
static int svc_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc = ATM_SD(sock);
int value, error = 0;
lock_sock(sk);
switch (optname) {
case SO_ATMSAP:
if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) {
error = -EINVAL;
goto out;
}
if (copy_from_user(&vcc->sap, optval, optlen)) {
error = -EFAULT;
goto out;
}
set_bit(ATM_VF_HASSAP, &vcc->flags);
break;
case SO_MULTIPOINT:
if (level != SOL_ATM || optlen != sizeof(int)) {
error = -EINVAL;
goto out;
}
if (get_user(value, (int __user *)optval)) {
error = -EFAULT;
goto out;
}
if (value == 1)
set_bit(ATM_VF_SESSION, &vcc->flags);
else if (value == 0)
clear_bit(ATM_VF_SESSION, &vcc->flags);
else
error = -EINVAL;
break;
default:
error = vcc_setsockopt(sock, level, optname, optval, optlen);
}
out:
release_sock(sk);
return error;
}
static int svc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int error = 0, len;
lock_sock(sk);
if (!__SO_LEVEL_MATCH(optname, level) || optname != SO_ATMSAP) {
error = vcc_getsockopt(sock, level, optname, optval, optlen);
goto out;
}
if (get_user(len, optlen)) {
error = -EFAULT;
goto out;
}
if (len != sizeof(struct atm_sap)) {
error = -EINVAL;
goto out;
}
if (copy_to_user(optval, &ATM_SD(sock)->sap, sizeof(struct atm_sap))) {
error = -EFAULT;
goto out;
}
out:
release_sock(sk);
return error;
}
static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len, int flags)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
lock_sock(sk);
set_bit(ATM_VF_WAITING, &vcc->flags);
sigd_enq(vcc, as_addparty, NULL, NULL,
(struct sockaddr_atmsvc *) sockaddr);
if (flags & O_NONBLOCK) {
error = -EINPROGRESS;
goto out;
}
pr_debug("added wait queue\n");
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd)
break;
schedule();
}
finish_wait(sk_sleep(sk), &wait);
error = xchg(&sk->sk_err_soft, 0);
out:
release_sock(sk);
return error;
}
static int svc_dropparty(struct socket *sock, int ep_ref)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
lock_sock(sk);
set_bit(ATM_VF_WAITING, &vcc->flags);
sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd)
break;
schedule();
}
finish_wait(sk_sleep(sk), &wait);
if (!sigd) {
error = -EUNATCH;
goto out;
}
error = xchg(&sk->sk_err_soft, 0);
out:
release_sock(sk);
return error;
}
static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
int error, ep_ref;
struct sockaddr_atmsvc sa;
struct atm_vcc *vcc = ATM_SD(sock);
switch (cmd) {
case ATM_ADDPARTY:
if (!test_bit(ATM_VF_SESSION, &vcc->flags))
return -EINVAL;
if (copy_from_user(&sa, (void __user *) arg, sizeof(sa)))
return -EFAULT;
error = svc_addparty(sock, (struct sockaddr *)&sa, sizeof(sa),
0);
break;
case ATM_DROPPARTY:
if (!test_bit(ATM_VF_SESSION, &vcc->flags))
return -EINVAL;
if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int)))
return -EFAULT;
error = svc_dropparty(sock, ep_ref);
break;
default:
error = vcc_ioctl(sock, cmd, arg);
}
return error;
}
#ifdef CONFIG_COMPAT
static int svc_compat_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
/* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf.
But actually it takes a struct sockaddr_atmsvc, which doesn't need
compat handling. So all we have to do is fix up cmd... */
if (cmd == COMPAT_ATM_ADDPARTY)
cmd = ATM_ADDPARTY;
if (cmd == ATM_ADDPARTY || cmd == ATM_DROPPARTY)
return svc_ioctl(sock, cmd, arg);
else
return vcc_compat_ioctl(sock, cmd, arg);
}
#endif /* CONFIG_COMPAT */
static const struct proto_ops svc_proto_ops = {
.family = PF_ATMSVC,
.owner = THIS_MODULE,
.release = svc_release,
.bind = svc_bind,
.connect = svc_connect,
.socketpair = sock_no_socketpair,
.accept = svc_accept,
.getname = svc_getname,
.poll = vcc_poll,
.ioctl = svc_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = svc_compat_ioctl,
#endif
.listen = svc_listen,
.shutdown = svc_shutdown,
.setsockopt = svc_setsockopt,
.getsockopt = svc_getsockopt,
.sendmsg = vcc_sendmsg,
.recvmsg = vcc_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static int svc_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
int error;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
sock->ops = &svc_proto_ops;
error = vcc_create(net, sock, protocol, AF_ATMSVC);
if (error)
return error;
ATM_SD(sock)->local.sas_family = AF_ATMSVC;
ATM_SD(sock)->remote.sas_family = AF_ATMSVC;
return 0;
}
static const struct net_proto_family svc_family_ops = {
.family = PF_ATMSVC,
.create = svc_create,
.owner = THIS_MODULE,
};
/*
* Initialize the ATM SVC protocol family
*/
int __init atmsvc_init(void)
{
return sock_register(&svc_family_ops);
}
void atmsvc_exit(void)
{
sock_unregister(PF_ATMSVC);
}

121
net/ax25/Kconfig Normal file
View file

@ -0,0 +1,121 @@
#
# Amateur Radio protocols and AX.25 device configuration
#
menuconfig HAMRADIO
depends on NET && !S390
bool "Amateur Radio support"
help
If you want to connect your Linux box to an amateur radio, answer Y
here. You want to read <http://www.tapr.org/>
and more specifically about AX.25 on Linux
<http://www.linux-ax25.org/>.
Note that the answer to this question won't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about amateur radio.
comment "Packet Radio protocols"
depends on HAMRADIO
config AX25
tristate "Amateur Radio AX.25 Level 2 protocol"
depends on HAMRADIO
help
This is the protocol used for computer communication over amateur
radio. It is either used by itself for point-to-point links, or to
carry other protocols such as tcp/ip. To use it, you need a device
that connects your Linux box to your amateur radio. You can either
use a low speed TNC (a Terminal Node Controller acts as a kind of
modem connecting your computer's serial port to your radio's
microphone input and speaker output) supporting the KISS protocol or
one of the various SCC cards that are supported by the generic Z8530
or the DMA SCC driver. Another option are the Baycom modem serial
and parallel port hacks or the sound card modem (supported by their
own drivers). If you say Y here, you also have to say Y to one of
those drivers.
Information about where to get supporting software for Linux amateur
radio as well as information about how to configure an AX.25 port is
contained in the AX25-HOWTO, available from
<http://www.tldp.org/docs.html#howto>. You might also want to
check out the file <file:Documentation/networking/ax25.txt> in the
kernel source. More information about digital amateur radio in
general is on the WWW at
<http://www.tapr.org/>.
To compile this driver as a module, choose M here: the
module will be called ax25.
config AX25_DAMA_SLAVE
bool "AX.25 DAMA Slave support"
default y
depends on AX25
help
DAMA is a mechanism to prevent collisions when doing AX.25
networking. A DAMA server (called "master") accepts incoming traffic
from clients (called "slaves") and redistributes it to other slaves.
If you say Y here, your Linux box will act as a DAMA slave; this is
transparent in that you don't have to do any special DAMA
configuration. Linux cannot yet act as a DAMA server. This option
only compiles DAMA slave support into the kernel. It still needs to
be enabled at runtime. For more about DAMA see
<http://www.linux-ax25.org>. If unsure, say Y.
# placeholder until implemented
config AX25_DAMA_MASTER
bool 'AX.25 DAMA Master support'
depends on AX25_DAMA_SLAVE && BROKEN
help
DAMA is a mechanism to prevent collisions when doing AX.25
networking. A DAMA server (called "master") accepts incoming traffic
from clients (called "slaves") and redistributes it to other slaves.
If you say Y here, your Linux box will act as a DAMA master; this is
transparent in that you don't have to do any special DAMA
configuration. Linux cannot yet act as a DAMA server. This option
only compiles DAMA slave support into the kernel. It still needs to
be explicitly enabled, so if unsure, say Y.
config NETROM
tristate "Amateur Radio NET/ROM protocol"
depends on AX25
help
NET/ROM is a network layer protocol on top of AX.25 useful for
routing.
A comprehensive listing of all the software for Linux amateur radio
users as well as information about how to configure an AX.25 port is
contained in the Linux Ham Wiki, available from
<http://www.linux-ax25.org>. You also might want to check out the
file <file:Documentation/networking/ax25.txt>. More information about
digital amateur radio in general is on the WWW at
<http://www.tapr.org/>.
To compile this driver as a module, choose M here: the
module will be called netrom.
config ROSE
tristate "Amateur Radio X.25 PLP (Rose)"
depends on AX25
help
The Packet Layer Protocol (PLP) is a way to route packets over X.25
connections in general and amateur radio AX.25 connections in
particular, essentially an alternative to NET/ROM.
A comprehensive listing of all the software for Linux amateur radio
users as well as information about how to configure an AX.25 port is
contained in the Linux Ham Wiki, available from
<http://www.linux-ax25.org>. You also might want to check out the
file <file:Documentation/networking/ax25.txt>. More information about
digital amateur radio in general is on the WWW at
<http://www.tapr.org/>.
To compile this driver as a module, choose M here: the
module will be called rose.
menu "AX.25 network device drivers"
depends on HAMRADIO && AX25
source "drivers/net/hamradio/Kconfig"
endmenu

11
net/ax25/Makefile Normal file
View file

@ -0,0 +1,11 @@
#
# Makefile for the Linux AX.25 layer.
#
obj-$(CONFIG_AX25) += ax25.o
ax25-y := ax25_addr.o ax25_dev.o ax25_iface.o ax25_in.o ax25_ip.o ax25_out.o \
ax25_route.o ax25_std_in.o ax25_std_subr.o ax25_std_timer.o \
ax25_subr.o ax25_timer.o ax25_uid.o af_ax25.o
ax25-$(CONFIG_AX25_DAMA_SLAVE) += ax25_ds_in.o ax25_ds_subr.o ax25_ds_timer.o
ax25-$(CONFIG_SYSCTL) += sysctl_net_ax25.o

20
net/ax25/TODO Normal file
View file

@ -0,0 +1,20 @@
Do the ax25_list_lock, ax25_dev_lock, linkfail_lockreally, ax25_frag_lock and
listen_lock have to be bh-safe?
Do the netrom and rose locks have to be bh-safe?
A device might be deleted after lookup in the SIOCADDRT ioctl but before it's
being used.
Routes to a device being taken down might be deleted by ax25_rt_device_down
but added by somebody else before the device has been deleted fully.
The ax25_rt_find_route synopsys is pervert but I somehow had to deal with
the race caused by the static variable in it's previous implementation.
Implement proper socket locking in netrom and rose.
Check socket locking when ax25_rcv is sending to raw sockets. In particular
ax25_send_to_raw() seems fishy. Heck - ax25_rcv is fishy.
Handle XID and TEST frames properly.

2026
net/ax25/af_ax25.c Normal file

File diff suppressed because it is too large Load diff

307
net/ax25/ax25_addr.c Normal file
View file

@ -0,0 +1,307 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* The default broadcast address of an interface is QST-0; the default address
* is LINUX-1. The null address is defined as a callsign of all spaces with
* an SSID of zero.
*/
const ax25_address ax25_bcast =
{{'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1, 0 << 1}};
const ax25_address ax25_defaddr =
{{'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1, 1 << 1}};
const ax25_address null_ax25_address =
{{' ' << 1, ' ' << 1, ' ' << 1, ' ' << 1, ' ' << 1, ' ' << 1, 0 << 1}};
EXPORT_SYMBOL_GPL(ax25_bcast);
EXPORT_SYMBOL_GPL(ax25_defaddr);
EXPORT_SYMBOL(null_ax25_address);
/*
* ax25 -> ascii conversion
*/
char *ax2asc(char *buf, const ax25_address *a)
{
char c, *s;
int n;
for (n = 0, s = buf; n < 6; n++) {
c = (a->ax25_call[n] >> 1) & 0x7F;
if (c != ' ') *s++ = c;
}
*s++ = '-';
if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) {
*s++ = '1';
n -= 10;
}
*s++ = n + '0';
*s++ = '\0';
if (*buf == '\0' || *buf == '-')
return "*";
return buf;
}
EXPORT_SYMBOL(ax2asc);
/*
* ascii -> ax25 conversion
*/
void asc2ax(ax25_address *addr, const char *callsign)
{
const char *s;
int n;
for (s = callsign, n = 0; n < 6; n++) {
if (*s != '\0' && *s != '-')
addr->ax25_call[n] = *s++;
else
addr->ax25_call[n] = ' ';
addr->ax25_call[n] <<= 1;
addr->ax25_call[n] &= 0xFE;
}
if (*s++ == '\0') {
addr->ax25_call[6] = 0x00;
return;
}
addr->ax25_call[6] = *s++ - '0';
if (*s != '\0') {
addr->ax25_call[6] *= 10;
addr->ax25_call[6] += *s++ - '0';
}
addr->ax25_call[6] <<= 1;
addr->ax25_call[6] &= 0x1E;
}
EXPORT_SYMBOL(asc2ax);
/*
* Compare two ax.25 addresses
*/
int ax25cmp(const ax25_address *a, const ax25_address *b)
{
int ct = 0;
while (ct < 6) {
if ((a->ax25_call[ct] & 0xFE) != (b->ax25_call[ct] & 0xFE)) /* Clean off repeater bits */
return 1;
ct++;
}
if ((a->ax25_call[ct] & 0x1E) == (b->ax25_call[ct] & 0x1E)) /* SSID without control bit */
return 0;
return 2; /* Partial match */
}
EXPORT_SYMBOL(ax25cmp);
/*
* Compare two AX.25 digipeater paths.
*/
int ax25digicmp(const ax25_digi *digi1, const ax25_digi *digi2)
{
int i;
if (digi1->ndigi != digi2->ndigi)
return 1;
if (digi1->lastrepeat != digi2->lastrepeat)
return 1;
for (i = 0; i < digi1->ndigi; i++)
if (ax25cmp(&digi1->calls[i], &digi2->calls[i]) != 0)
return 1;
return 0;
}
/*
* Given an AX.25 address pull of to, from, digi list, command/response and the start of data
*
*/
const unsigned char *ax25_addr_parse(const unsigned char *buf, int len,
ax25_address *src, ax25_address *dest, ax25_digi *digi, int *flags,
int *dama)
{
int d = 0;
if (len < 14) return NULL;
if (flags != NULL) {
*flags = 0;
if (buf[6] & AX25_CBIT)
*flags = AX25_COMMAND;
if (buf[13] & AX25_CBIT)
*flags = AX25_RESPONSE;
}
if (dama != NULL)
*dama = ~buf[13] & AX25_DAMA_FLAG;
/* Copy to, from */
if (dest != NULL)
memcpy(dest, buf + 0, AX25_ADDR_LEN);
if (src != NULL)
memcpy(src, buf + 7, AX25_ADDR_LEN);
buf += 2 * AX25_ADDR_LEN;
len -= 2 * AX25_ADDR_LEN;
digi->lastrepeat = -1;
digi->ndigi = 0;
while (!(buf[-1] & AX25_EBIT)) {
if (d >= AX25_MAX_DIGIS)
return NULL;
if (len < AX25_ADDR_LEN)
return NULL;
memcpy(&digi->calls[d], buf, AX25_ADDR_LEN);
digi->ndigi = d + 1;
if (buf[6] & AX25_HBIT) {
digi->repeated[d] = 1;
digi->lastrepeat = d;
} else {
digi->repeated[d] = 0;
}
buf += AX25_ADDR_LEN;
len -= AX25_ADDR_LEN;
d++;
}
return buf;
}
/*
* Assemble an AX.25 header from the bits
*/
int ax25_addr_build(unsigned char *buf, const ax25_address *src,
const ax25_address *dest, const ax25_digi *d, int flag, int modulus)
{
int len = 0;
int ct = 0;
memcpy(buf, dest, AX25_ADDR_LEN);
buf[6] &= ~(AX25_EBIT | AX25_CBIT);
buf[6] |= AX25_SSSID_SPARE;
if (flag == AX25_COMMAND) buf[6] |= AX25_CBIT;
buf += AX25_ADDR_LEN;
len += AX25_ADDR_LEN;
memcpy(buf, src, AX25_ADDR_LEN);
buf[6] &= ~(AX25_EBIT | AX25_CBIT);
buf[6] &= ~AX25_SSSID_SPARE;
if (modulus == AX25_MODULUS)
buf[6] |= AX25_SSSID_SPARE;
else
buf[6] |= AX25_ESSID_SPARE;
if (flag == AX25_RESPONSE) buf[6] |= AX25_CBIT;
/*
* Fast path the normal digiless path
*/
if (d == NULL || d->ndigi == 0) {
buf[6] |= AX25_EBIT;
return 2 * AX25_ADDR_LEN;
}
buf += AX25_ADDR_LEN;
len += AX25_ADDR_LEN;
while (ct < d->ndigi) {
memcpy(buf, &d->calls[ct], AX25_ADDR_LEN);
if (d->repeated[ct])
buf[6] |= AX25_HBIT;
else
buf[6] &= ~AX25_HBIT;
buf[6] &= ~AX25_EBIT;
buf[6] |= AX25_SSSID_SPARE;
buf += AX25_ADDR_LEN;
len += AX25_ADDR_LEN;
ct++;
}
buf[-1] |= AX25_EBIT;
return len;
}
int ax25_addr_size(const ax25_digi *dp)
{
if (dp == NULL)
return 2 * AX25_ADDR_LEN;
return AX25_ADDR_LEN * (2 + dp->ndigi);
}
/*
* Reverse Digipeat List. May not pass both parameters as same struct
*/
void ax25_digi_invert(const ax25_digi *in, ax25_digi *out)
{
int ct;
out->ndigi = in->ndigi;
out->lastrepeat = in->ndigi - in->lastrepeat - 2;
/* Invert the digipeaters */
for (ct = 0; ct < in->ndigi; ct++) {
out->calls[ct] = in->calls[in->ndigi - ct - 1];
if (ct <= out->lastrepeat) {
out->calls[ct].ax25_call[6] |= AX25_HBIT;
out->repeated[ct] = 1;
} else {
out->calls[ct].ax25_call[6] &= ~AX25_HBIT;
out->repeated[ct] = 0;
}
}
}

198
net/ax25/ax25_dev.c Normal file
View file

@ -0,0 +1,198 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/spinlock.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
ax25_dev *ax25_dev_list;
DEFINE_SPINLOCK(ax25_dev_lock);
ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
{
ax25_dev *ax25_dev, *res = NULL;
spin_lock_bh(&ax25_dev_lock);
for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
if (ax25cmp(addr, (ax25_address *)ax25_dev->dev->dev_addr) == 0) {
res = ax25_dev;
}
spin_unlock_bh(&ax25_dev_lock);
return res;
}
/*
* This is called when an interface is brought up. These are
* reasonable defaults.
*/
void ax25_dev_device_up(struct net_device *dev)
{
ax25_dev *ax25_dev;
if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) {
printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n");
return;
}
dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
dev_hold(dev);
ax25_dev->forward = NULL;
ax25_dev->values[AX25_VALUES_IPDEFMODE] = AX25_DEF_IPDEFMODE;
ax25_dev->values[AX25_VALUES_AXDEFMODE] = AX25_DEF_AXDEFMODE;
ax25_dev->values[AX25_VALUES_BACKOFF] = AX25_DEF_BACKOFF;
ax25_dev->values[AX25_VALUES_CONMODE] = AX25_DEF_CONMODE;
ax25_dev->values[AX25_VALUES_WINDOW] = AX25_DEF_WINDOW;
ax25_dev->values[AX25_VALUES_EWINDOW] = AX25_DEF_EWINDOW;
ax25_dev->values[AX25_VALUES_T1] = AX25_DEF_T1;
ax25_dev->values[AX25_VALUES_T2] = AX25_DEF_T2;
ax25_dev->values[AX25_VALUES_T3] = AX25_DEF_T3;
ax25_dev->values[AX25_VALUES_IDLE] = AX25_DEF_IDLE;
ax25_dev->values[AX25_VALUES_N2] = AX25_DEF_N2;
ax25_dev->values[AX25_VALUES_PACLEN] = AX25_DEF_PACLEN;
ax25_dev->values[AX25_VALUES_PROTOCOL] = AX25_DEF_PROTOCOL;
ax25_dev->values[AX25_VALUES_DS_TIMEOUT]= AX25_DEF_DS_TIMEOUT;
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
ax25_ds_setup_timer(ax25_dev);
#endif
spin_lock_bh(&ax25_dev_lock);
ax25_dev->next = ax25_dev_list;
ax25_dev_list = ax25_dev;
spin_unlock_bh(&ax25_dev_lock);
ax25_register_dev_sysctl(ax25_dev);
}
void ax25_dev_device_down(struct net_device *dev)
{
ax25_dev *s, *ax25_dev;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return;
ax25_unregister_dev_sysctl(ax25_dev);
spin_lock_bh(&ax25_dev_lock);
#ifdef CONFIG_AX25_DAMA_SLAVE
ax25_ds_del_timer(ax25_dev);
#endif
/*
* Remove any packet forwarding that points to this device.
*/
for (s = ax25_dev_list; s != NULL; s = s->next)
if (s->forward == dev)
s->forward = NULL;
if ((s = ax25_dev_list) == ax25_dev) {
ax25_dev_list = s->next;
spin_unlock_bh(&ax25_dev_lock);
dev_put(dev);
kfree(ax25_dev);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == ax25_dev) {
s->next = ax25_dev->next;
spin_unlock_bh(&ax25_dev_lock);
dev_put(dev);
kfree(ax25_dev);
return;
}
s = s->next;
}
spin_unlock_bh(&ax25_dev_lock);
dev->ax25_ptr = NULL;
}
int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
{
ax25_dev *ax25_dev, *fwd_dev;
if ((ax25_dev = ax25_addr_ax25dev(&fwd->port_from)) == NULL)
return -EINVAL;
switch (cmd) {
case SIOCAX25ADDFWD:
if ((fwd_dev = ax25_addr_ax25dev(&fwd->port_to)) == NULL)
return -EINVAL;
if (ax25_dev->forward != NULL)
return -EINVAL;
ax25_dev->forward = fwd_dev->dev;
break;
case SIOCAX25DELFWD:
if (ax25_dev->forward == NULL)
return -EINVAL;
ax25_dev->forward = NULL;
break;
default:
return -EINVAL;
}
return 0;
}
struct net_device *ax25_fwd_dev(struct net_device *dev)
{
ax25_dev *ax25_dev;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return dev;
if (ax25_dev->forward == NULL)
return dev;
return ax25_dev->forward;
}
/*
* Free all memory associated with device structures.
*/
void __exit ax25_dev_free(void)
{
ax25_dev *s, *ax25_dev;
spin_lock_bh(&ax25_dev_lock);
ax25_dev = ax25_dev_list;
while (ax25_dev != NULL) {
s = ax25_dev;
dev_put(ax25_dev->dev);
ax25_dev = ax25_dev->next;
kfree(s);
}
ax25_dev_list = NULL;
spin_unlock_bh(&ax25_dev_lock);
}

302
net/ax25/ax25_ds_in.c Normal file
View file

@ -0,0 +1,302 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* State machine for state 1, Awaiting Connection State.
* The handling of the timer(s) is in file ax25_ds_timer.c.
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_ds_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type)
{
switch (frametype) {
case AX25_SABM:
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
break;
case AX25_SABME:
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW];
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE);
break;
case AX25_UA:
ax25_calculate_rtt(ax25);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
ax25->vs = 0;
ax25->va = 0;
ax25->vr = 0;
ax25->state = AX25_STATE_3;
ax25->n2count = 0;
if (ax25->sk != NULL) {
bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_ESTABLISHED;
/*
* For WAIT_SABM connections we will produce an accept
* ready socket here
*/
if (!sock_flag(ax25->sk, SOCK_DEAD))
ax25->sk->sk_state_change(ax25->sk);
bh_unlock_sock(ax25->sk);
}
ax25_dama_on(ax25);
/* according to DK4EG's spec we are required to
* send a RR RESPONSE FINAL NR=0.
*/
ax25_std_enquiry_response(ax25);
break;
case AX25_DM:
if (pf)
ax25_disconnect(ax25, ECONNREFUSED);
break;
default:
if (pf)
ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND);
break;
}
return 0;
}
/*
* State machine for state 2, Awaiting Release State.
* The handling of the timer(s) is in file ax25_ds_timer.c
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_ds_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type)
{
switch (frametype) {
case AX25_SABM:
case AX25_SABME:
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_dama_off(ax25);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_dama_off(ax25);
ax25_disconnect(ax25, 0);
break;
case AX25_DM:
case AX25_UA:
if (pf) {
ax25_dama_off(ax25);
ax25_disconnect(ax25, 0);
}
break;
case AX25_I:
case AX25_REJ:
case AX25_RNR:
case AX25_RR:
if (pf) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_dama_off(ax25);
}
break;
default:
break;
}
return 0;
}
/*
* State machine for state 3, Connected State.
* The handling of the timer(s) is in file ax25_timer.c
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_ds_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type)
{
int queued = 0;
switch (frametype) {
case AX25_SABM:
case AX25_SABME:
if (frametype == AX25_SABM) {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
} else {
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW];
}
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
ax25->condition = 0x00;
ax25->vs = 0;
ax25->va = 0;
ax25->vr = 0;
ax25_requeue_frames(ax25);
ax25_dama_on(ax25);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_dama_off(ax25);
ax25_disconnect(ax25, 0);
break;
case AX25_DM:
ax25_dama_off(ax25);
ax25_disconnect(ax25, ECONNRESET);
break;
case AX25_RR:
case AX25_RNR:
if (frametype == AX25_RR)
ax25->condition &= ~AX25_COND_PEER_RX_BUSY;
else
ax25->condition |= AX25_COND_PEER_RX_BUSY;
if (ax25_validate_nr(ax25, nr)) {
if (ax25_check_iframes_acked(ax25, nr))
ax25->n2count=0;
if (type == AX25_COMMAND && pf)
ax25_ds_enquiry_response(ax25);
} else {
ax25_ds_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
case AX25_REJ:
ax25->condition &= ~AX25_COND_PEER_RX_BUSY;
if (ax25_validate_nr(ax25, nr)) {
if (ax25->va != nr)
ax25->n2count=0;
ax25_frames_acked(ax25, nr);
ax25_calculate_rtt(ax25);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
ax25_requeue_frames(ax25);
if (type == AX25_COMMAND && pf)
ax25_ds_enquiry_response(ax25);
} else {
ax25_ds_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
case AX25_I:
if (!ax25_validate_nr(ax25, nr)) {
ax25_ds_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
break;
}
if (ax25->condition & AX25_COND_PEER_RX_BUSY) {
ax25_frames_acked(ax25, nr);
ax25->n2count = 0;
} else {
if (ax25_check_iframes_acked(ax25, nr))
ax25->n2count = 0;
}
if (ax25->condition & AX25_COND_OWN_RX_BUSY) {
if (pf) ax25_ds_enquiry_response(ax25);
break;
}
if (ns == ax25->vr) {
ax25->vr = (ax25->vr + 1) % ax25->modulus;
queued = ax25_rx_iframe(ax25, skb);
if (ax25->condition & AX25_COND_OWN_RX_BUSY)
ax25->vr = ns; /* ax25->vr - 1 */
ax25->condition &= ~AX25_COND_REJECT;
if (pf) {
ax25_ds_enquiry_response(ax25);
} else {
if (!(ax25->condition & AX25_COND_ACK_PENDING)) {
ax25->condition |= AX25_COND_ACK_PENDING;
ax25_start_t2timer(ax25);
}
}
} else {
if (ax25->condition & AX25_COND_REJECT) {
if (pf) ax25_ds_enquiry_response(ax25);
} else {
ax25->condition |= AX25_COND_REJECT;
ax25_ds_enquiry_response(ax25);
ax25->condition &= ~AX25_COND_ACK_PENDING;
}
}
break;
case AX25_FRMR:
case AX25_ILLEGAL:
ax25_ds_establish_data_link(ax25);
ax25->state = AX25_STATE_1;
break;
default:
break;
}
return queued;
}
/*
* Higher level upcall for a LAPB frame
*/
int ax25_ds_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type)
{
int queued = 0, frametype, ns, nr, pf;
frametype = ax25_decode(ax25, skb, &ns, &nr, &pf);
switch (ax25->state) {
case AX25_STATE_1:
queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type);
break;
case AX25_STATE_2:
queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type);
break;
case AX25_STATE_3:
queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type);
break;
}
return queued;
}

208
net/ax25/ax25_ds_subr.c Normal file
View file

@ -0,0 +1,208 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/spinlock.h>
#include <linux/net.h>
#include <linux/gfp.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
void ax25_ds_nr_error_recovery(ax25_cb *ax25)
{
ax25_ds_establish_data_link(ax25);
}
/*
* dl1bke 960114: transmit I frames on DAMA poll
*/
void ax25_ds_enquiry_response(ax25_cb *ax25)
{
ax25_cb *ax25o;
/* Please note that neither DK4EG's nor DG2FEF's
* DAMA spec mention the following behaviour as seen
* with TheFirmware:
*
* DB0ACH->DL1BKE <RR C P R0> [DAMA]
* DL1BKE->DB0ACH <I NR=0 NS=0>
* DL1BKE-7->DB0PRA-6 DB0ACH <I C S3 R5>
* DL1BKE->DB0ACH <RR R F R0>
*
* The Flexnet DAMA Master implementation apparently
* insists on the "proper" AX.25 behaviour:
*
* DB0ACH->DL1BKE <RR C P R0> [DAMA]
* DL1BKE->DB0ACH <RR R F R0>
* DL1BKE->DB0ACH <I NR=0 NS=0>
* DL1BKE-7->DB0PRA-6 DB0ACH <I C S3 R5>
*
* Flexnet refuses to send us *any* I frame if we send
* a REJ in case AX25_COND_REJECT is set. It is superfluous in
* this mode anyway (a RR or RNR invokes the retransmission).
* Is this a Flexnet bug?
*/
ax25_std_enquiry_response(ax25);
if (!(ax25->condition & AX25_COND_PEER_RX_BUSY)) {
ax25_requeue_frames(ax25);
ax25_kick(ax25);
}
if (ax25->state == AX25_STATE_1 || ax25->state == AX25_STATE_2 || skb_peek(&ax25->ack_queue) != NULL)
ax25_ds_t1_timeout(ax25);
else
ax25->n2count = 0;
ax25_start_t3timer(ax25);
ax25_ds_set_timer(ax25->ax25_dev);
spin_lock(&ax25_list_lock);
ax25_for_each(ax25o, &ax25_list) {
if (ax25o == ax25)
continue;
if (ax25o->ax25_dev != ax25->ax25_dev)
continue;
if (ax25o->state == AX25_STATE_1 || ax25o->state == AX25_STATE_2) {
ax25_ds_t1_timeout(ax25o);
continue;
}
if (!(ax25o->condition & AX25_COND_PEER_RX_BUSY) && ax25o->state == AX25_STATE_3) {
ax25_requeue_frames(ax25o);
ax25_kick(ax25o);
}
if (ax25o->state == AX25_STATE_1 || ax25o->state == AX25_STATE_2 || skb_peek(&ax25o->ack_queue) != NULL)
ax25_ds_t1_timeout(ax25o);
/* do not start T3 for listening sockets (tnx DD8NE) */
if (ax25o->state != AX25_STATE_0)
ax25_start_t3timer(ax25o);
}
spin_unlock(&ax25_list_lock);
}
void ax25_ds_establish_data_link(ax25_cb *ax25)
{
ax25->condition &= AX25_COND_DAMA_MODE;
ax25->n2count = 0;
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_start_t3timer(ax25);
}
/*
* :::FIXME:::
* This is a kludge. Not all drivers recognize kiss commands.
* We need a driver level request to switch duplex mode, that does
* either SCC changing, PI config or KISS as required. Currently
* this request isn't reliable.
*/
static void ax25_kiss_cmd(ax25_dev *ax25_dev, unsigned char cmd, unsigned char param)
{
struct sk_buff *skb;
unsigned char *p;
if (ax25_dev->dev == NULL)
return;
if ((skb = alloc_skb(2, GFP_ATOMIC)) == NULL)
return;
skb_reset_network_header(skb);
p = skb_put(skb, 2);
*p++ = cmd;
*p++ = param;
skb->protocol = ax25_type_trans(skb, ax25_dev->dev);
dev_queue_xmit(skb);
}
/*
* A nasty problem arises if we count the number of DAMA connections
* wrong, especially when connections on the device already existed
* and our network node (or the sysop) decides to turn on DAMA Master
* mode. We thus flag the 'real' slave connections with
* ax25->dama_slave=1 and look on every disconnect if still slave
* connections exist.
*/
static int ax25_check_dama_slave(ax25_dev *ax25_dev)
{
ax25_cb *ax25;
int res = 0;
spin_lock(&ax25_list_lock);
ax25_for_each(ax25, &ax25_list)
if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) {
res = 1;
break;
}
spin_unlock(&ax25_list_lock);
return res;
}
static void ax25_dev_dama_on(ax25_dev *ax25_dev)
{
if (ax25_dev == NULL)
return;
if (ax25_dev->dama.slave == 0)
ax25_kiss_cmd(ax25_dev, 5, 1);
ax25_dev->dama.slave = 1;
ax25_ds_set_timer(ax25_dev);
}
void ax25_dev_dama_off(ax25_dev *ax25_dev)
{
if (ax25_dev == NULL)
return;
if (ax25_dev->dama.slave && !ax25_check_dama_slave(ax25_dev)) {
ax25_kiss_cmd(ax25_dev, 5, 0);
ax25_dev->dama.slave = 0;
ax25_ds_del_timer(ax25_dev);
}
}
void ax25_dama_on(ax25_cb *ax25)
{
ax25_dev_dama_on(ax25->ax25_dev);
ax25->condition |= AX25_COND_DAMA_MODE;
}
void ax25_dama_off(ax25_cb *ax25)
{
ax25->condition &= ~AX25_COND_DAMA_MODE;
ax25_dev_dama_off(ax25->ax25_dev);
}

236
net/ax25/ax25_ds_timer.c Normal file
View file

@ -0,0 +1,236 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/spinlock.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/tcp_states.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
static void ax25_ds_timeout(unsigned long);
/*
* Add DAMA slave timeout timer to timer list.
* Unlike the connection based timers the timeout function gets
* triggered every second. Please note that NET_AX25_DAMA_SLAVE_TIMEOUT
* (aka /proc/sys/net/ax25/{dev}/dama_slave_timeout) is still in
* 1/10th of a second.
*/
void ax25_ds_setup_timer(ax25_dev *ax25_dev)
{
setup_timer(&ax25_dev->dama.slave_timer, ax25_ds_timeout,
(unsigned long)ax25_dev);
}
void ax25_ds_del_timer(ax25_dev *ax25_dev)
{
if (ax25_dev)
del_timer(&ax25_dev->dama.slave_timer);
}
void ax25_ds_set_timer(ax25_dev *ax25_dev)
{
if (ax25_dev == NULL) /* paranoia */
return;
ax25_dev->dama.slave_timeout =
msecs_to_jiffies(ax25_dev->values[AX25_VALUES_DS_TIMEOUT]) / 10;
mod_timer(&ax25_dev->dama.slave_timer, jiffies + HZ);
}
/*
* DAMA Slave Timeout
* Silently discard all (slave) connections in case our master forgot us...
*/
static void ax25_ds_timeout(unsigned long arg)
{
ax25_dev *ax25_dev = (struct ax25_dev *) arg;
ax25_cb *ax25;
if (ax25_dev == NULL || !ax25_dev->dama.slave)
return; /* Yikes! */
if (!ax25_dev->dama.slave_timeout || --ax25_dev->dama.slave_timeout) {
ax25_ds_set_timer(ax25_dev);
return;
}
spin_lock(&ax25_list_lock);
ax25_for_each(ax25, &ax25_list) {
if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE))
continue;
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_disconnect(ax25, ETIMEDOUT);
}
spin_unlock(&ax25_list_lock);
ax25_dev_dama_off(ax25_dev);
}
void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
{
struct sock *sk=ax25->sk;
if (sk)
bh_lock_sock(sk);
switch (ax25->state) {
case AX25_STATE_0:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!sk || sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN &&
sock_flag(sk, SOCK_DEAD))) {
if (sk) {
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
sock_put(sk);
} else
ax25_destroy_socket(ax25);
return;
}
break;
case AX25_STATE_3:
/*
* Check the state of the receive buffer.
*/
if (sk != NULL) {
if (atomic_read(&sk->sk_rmem_alloc) <
(sk->sk_rcvbuf >> 1) &&
(ax25->condition & AX25_COND_OWN_RX_BUSY)) {
ax25->condition &= ~AX25_COND_OWN_RX_BUSY;
ax25->condition &= ~AX25_COND_ACK_PENDING;
break;
}
}
break;
}
if (sk)
bh_unlock_sock(sk);
ax25_start_heartbeat(ax25);
}
/* dl1bke 960114: T3 works much like the IDLE timeout, but
* gets reloaded with every frame for this
* connection.
*/
void ax25_ds_t3timer_expiry(ax25_cb *ax25)
{
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_dama_off(ax25);
ax25_disconnect(ax25, ETIMEDOUT);
}
/* dl1bke 960228: close the connection when IDLE expires.
* unlike T3 this timer gets reloaded only on
* I frames.
*/
void ax25_ds_idletimer_expiry(ax25_cb *ax25)
{
ax25_clear_queues(ax25);
ax25->n2count = 0;
ax25->state = AX25_STATE_2;
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
ax25_stop_t3timer(ax25);
if (ax25->sk != NULL) {
bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_CLOSE;
ax25->sk->sk_err = 0;
ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
bh_unlock_sock(ax25->sk);
}
}
/* dl1bke 960114: The DAMA protocol requires to send data and SABM/DISC
* within the poll of any connected channel. Remember
* that we are not allowed to send anything unless we
* get polled by the Master.
*
* Thus we'll have to do parts of our T1 handling in
* ax25_enquiry_response().
*/
void ax25_ds_t1_timeout(ax25_cb *ax25)
{
switch (ax25->state) {
case AX25_STATE_1:
if (ax25->n2count == ax25->n2) {
if (ax25->modulus == AX25_MODULUS) {
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
ax25->n2count = 0;
ax25_send_control(ax25, AX25_SABM, AX25_POLLOFF, AX25_COMMAND);
}
} else {
ax25->n2count++;
if (ax25->modulus == AX25_MODULUS)
ax25_send_control(ax25, AX25_SABM, AX25_POLLOFF, AX25_COMMAND);
else
ax25_send_control(ax25, AX25_SABME, AX25_POLLOFF, AX25_COMMAND);
}
break;
case AX25_STATE_2:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
}
break;
case AX25_STATE_3:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DM, AX25_POLLON, AX25_RESPONSE);
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
}
break;
}
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
}

217
net/ax25/ax25_iface.c Normal file
View file

@ -0,0 +1,217 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
static struct ax25_protocol *protocol_list;
static DEFINE_RWLOCK(protocol_list_lock);
static HLIST_HEAD(ax25_linkfail_list);
static DEFINE_SPINLOCK(linkfail_lock);
static struct listen_struct {
struct listen_struct *next;
ax25_address callsign;
struct net_device *dev;
} *listen_list = NULL;
static DEFINE_SPINLOCK(listen_lock);
/*
* Do not register the internal protocols AX25_P_TEXT, AX25_P_SEGMENT,
* AX25_P_IP or AX25_P_ARP ...
*/
void ax25_register_pid(struct ax25_protocol *ap)
{
write_lock_bh(&protocol_list_lock);
ap->next = protocol_list;
protocol_list = ap;
write_unlock_bh(&protocol_list_lock);
}
EXPORT_SYMBOL_GPL(ax25_register_pid);
void ax25_protocol_release(unsigned int pid)
{
struct ax25_protocol *protocol;
write_lock_bh(&protocol_list_lock);
protocol = protocol_list;
if (protocol == NULL)
goto out;
if (protocol->pid == pid) {
protocol_list = protocol->next;
goto out;
}
while (protocol != NULL && protocol->next != NULL) {
if (protocol->next->pid == pid) {
protocol->next = protocol->next->next;
goto out;
}
protocol = protocol->next;
}
out:
write_unlock_bh(&protocol_list_lock);
}
EXPORT_SYMBOL(ax25_protocol_release);
void ax25_linkfail_register(struct ax25_linkfail *lf)
{
spin_lock_bh(&linkfail_lock);
hlist_add_head(&lf->lf_node, &ax25_linkfail_list);
spin_unlock_bh(&linkfail_lock);
}
EXPORT_SYMBOL(ax25_linkfail_register);
void ax25_linkfail_release(struct ax25_linkfail *lf)
{
spin_lock_bh(&linkfail_lock);
hlist_del_init(&lf->lf_node);
spin_unlock_bh(&linkfail_lock);
}
EXPORT_SYMBOL(ax25_linkfail_release);
int ax25_listen_register(ax25_address *callsign, struct net_device *dev)
{
struct listen_struct *listen;
if (ax25_listen_mine(callsign, dev))
return 0;
if ((listen = kmalloc(sizeof(*listen), GFP_ATOMIC)) == NULL)
return -ENOMEM;
listen->callsign = *callsign;
listen->dev = dev;
spin_lock_bh(&listen_lock);
listen->next = listen_list;
listen_list = listen;
spin_unlock_bh(&listen_lock);
return 0;
}
EXPORT_SYMBOL(ax25_listen_register);
void ax25_listen_release(ax25_address *callsign, struct net_device *dev)
{
struct listen_struct *s, *listen;
spin_lock_bh(&listen_lock);
listen = listen_list;
if (listen == NULL) {
spin_unlock_bh(&listen_lock);
return;
}
if (ax25cmp(&listen->callsign, callsign) == 0 && listen->dev == dev) {
listen_list = listen->next;
spin_unlock_bh(&listen_lock);
kfree(listen);
return;
}
while (listen != NULL && listen->next != NULL) {
if (ax25cmp(&listen->next->callsign, callsign) == 0 && listen->next->dev == dev) {
s = listen->next;
listen->next = listen->next->next;
spin_unlock_bh(&listen_lock);
kfree(s);
return;
}
listen = listen->next;
}
spin_unlock_bh(&listen_lock);
}
EXPORT_SYMBOL(ax25_listen_release);
int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *)
{
int (*res)(struct sk_buff *, ax25_cb *) = NULL;
struct ax25_protocol *protocol;
read_lock(&protocol_list_lock);
for (protocol = protocol_list; protocol != NULL; protocol = protocol->next)
if (protocol->pid == pid) {
res = protocol->func;
break;
}
read_unlock(&protocol_list_lock);
return res;
}
int ax25_listen_mine(ax25_address *callsign, struct net_device *dev)
{
struct listen_struct *listen;
spin_lock_bh(&listen_lock);
for (listen = listen_list; listen != NULL; listen = listen->next)
if (ax25cmp(&listen->callsign, callsign) == 0 &&
(listen->dev == dev || listen->dev == NULL)) {
spin_unlock_bh(&listen_lock);
return 1;
}
spin_unlock_bh(&listen_lock);
return 0;
}
void ax25_link_failed(ax25_cb *ax25, int reason)
{
struct ax25_linkfail *lf;
spin_lock_bh(&linkfail_lock);
hlist_for_each_entry(lf, &ax25_linkfail_list, lf_node)
lf->func(ax25, reason);
spin_unlock_bh(&linkfail_lock);
}
int ax25_protocol_is_registered(unsigned int pid)
{
struct ax25_protocol *protocol;
int res = 0;
read_lock_bh(&protocol_list_lock);
for (protocol = protocol_list; protocol != NULL; protocol = protocol->next)
if (protocol->pid == pid) {
res = 1;
break;
}
read_unlock_bh(&protocol_list_lock);
return res;
}

455
net/ax25/ax25_in.c Normal file
View file

@ -0,0 +1,455 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
* Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* Given a fragment, queue it on the fragment queue and if the fragment
* is complete, send it back to ax25_rx_iframe.
*/
static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
{
struct sk_buff *skbn, *skbo;
if (ax25->fragno != 0) {
if (!(*skb->data & AX25_SEG_FIRST)) {
if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) {
/* Enqueue fragment */
ax25->fragno = *skb->data & AX25_SEG_REM;
skb_pull(skb, 1); /* skip fragno */
ax25->fraglen += skb->len;
skb_queue_tail(&ax25->frag_queue, skb);
/* Last fragment received ? */
if (ax25->fragno == 0) {
skbn = alloc_skb(AX25_MAX_HEADER_LEN +
ax25->fraglen,
GFP_ATOMIC);
if (!skbn) {
skb_queue_purge(&ax25->frag_queue);
return 1;
}
skb_reserve(skbn, AX25_MAX_HEADER_LEN);
skbn->dev = ax25->ax25_dev->dev;
skb_reset_network_header(skbn);
skb_reset_transport_header(skbn);
/* Copy data from the fragments */
while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) {
skb_copy_from_linear_data(skbo,
skb_put(skbn, skbo->len),
skbo->len);
kfree_skb(skbo);
}
ax25->fraglen = 0;
if (ax25_rx_iframe(ax25, skbn) == 0)
kfree_skb(skbn);
}
return 1;
}
}
} else {
/* First fragment received */
if (*skb->data & AX25_SEG_FIRST) {
skb_queue_purge(&ax25->frag_queue);
ax25->fragno = *skb->data & AX25_SEG_REM;
skb_pull(skb, 1); /* skip fragno */
ax25->fraglen = skb->len;
skb_queue_tail(&ax25->frag_queue, skb);
return 1;
}
}
return 0;
}
/*
* This is where all valid I frames are sent to, to be dispatched to
* whichever protocol requires them.
*/
int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
{
int (*func)(struct sk_buff *, ax25_cb *);
unsigned char pid;
int queued = 0;
if (skb == NULL) return 0;
ax25_start_idletimer(ax25);
pid = *skb->data;
if (pid == AX25_P_IP) {
/* working around a TCP bug to keep additional listeners
* happy. TCP re-uses the buffer and destroys the original
* content.
*/
struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
if (skbn != NULL) {
kfree_skb(skb);
skb = skbn;
}
skb_pull(skb, 1); /* Remove PID */
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
skb->dev = ax25->ax25_dev->dev;
skb->pkt_type = PACKET_HOST;
skb->protocol = htons(ETH_P_IP);
netif_rx(skb);
return 1;
}
if (pid == AX25_P_SEGMENT) {
skb_pull(skb, 1); /* Remove PID */
return ax25_rx_fragment(ax25, skb);
}
if ((func = ax25_protocol_function(pid)) != NULL) {
skb_pull(skb, 1); /* Remove PID */
return (*func)(skb, ax25);
}
if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
ax25->pidincl) {
if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
queued = 1;
else
ax25->condition |= AX25_COND_OWN_RX_BUSY;
}
}
return queued;
}
/*
* Higher level upcall for a LAPB frame
*/
static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama)
{
int queued = 0;
if (ax25->state == AX25_STATE_0)
return 0;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
queued = ax25_std_frame_in(ax25, skb, type);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (dama || ax25->ax25_dev->dama.slave)
queued = ax25_ds_frame_in(ax25, skb, type);
else
queued = ax25_std_frame_in(ax25, skb, type);
break;
#endif
}
return queued;
}
static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
ax25_address *dev_addr, struct packet_type *ptype)
{
ax25_address src, dest, *next_digi = NULL;
int type = 0, mine = 0, dama;
struct sock *make, *sk;
ax25_digi dp, reverse_dp;
ax25_cb *ax25;
ax25_dev *ax25_dev;
/*
* Process the AX.25/LAPB frame.
*/
skb_reset_transport_header(skb);
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
goto free;
/*
* Parse the address header.
*/
if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL)
goto free;
/*
* Ours perhaps ?
*/
if (dp.lastrepeat + 1 < dp.ndigi) /* Not yet digipeated completely */
next_digi = &dp.calls[dp.lastrepeat + 1];
/*
* Pull of the AX.25 headers leaving the CTRL/PID bytes
*/
skb_pull(skb, ax25_addr_size(&dp));
/* For our port addresses ? */
if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
mine = 1;
/* Also match on any registered callsign from L3/4 */
if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
mine = 1;
/* UI frame - bypass LAPB processing */
if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
skb_set_transport_header(skb, 2); /* skip control and pid */
ax25_send_to_raw(&dest, skb, skb->data[1]);
if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0)
goto free;
/* Now we are pointing at the pid byte */
switch (skb->data[1]) {
case AX25_P_IP:
skb_pull(skb,2); /* drop PID/CTRL */
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
skb->protocol = htons(ETH_P_IP);
netif_rx(skb);
break;
case AX25_P_ARP:
skb_pull(skb,2);
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
skb->protocol = htons(ETH_P_ARP);
netif_rx(skb);
break;
case AX25_P_TEXT:
/* Now find a suitable dgram socket */
sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
if (sk != NULL) {
bh_lock_sock(sk);
if (atomic_read(&sk->sk_rmem_alloc) >=
sk->sk_rcvbuf) {
kfree_skb(skb);
} else {
/*
* Remove the control and PID.
*/
skb_pull(skb, 2);
if (sock_queue_rcv_skb(sk, skb) != 0)
kfree_skb(skb);
}
bh_unlock_sock(sk);
sock_put(sk);
} else {
kfree_skb(skb);
}
break;
default:
kfree_skb(skb); /* Will scan SOCK_AX25 RAW sockets */
break;
}
return 0;
}
/*
* Is connected mode supported on this device ?
* If not, should we DM the incoming frame (except DMs) or
* silently ignore them. For now we stay quiet.
*/
if (ax25_dev->values[AX25_VALUES_CONMODE] == 0)
goto free;
/* LAPB */
/* AX.25 state 1-4 */
ax25_digi_invert(&dp, &reverse_dp);
if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
/*
* Process the frame. If it is queued up internally it
* returns one otherwise we free it immediately. This
* routine itself wakes the user context layers so we do
* no further work
*/
if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
kfree_skb(skb);
ax25_cb_put(ax25);
return 0;
}
/* AX.25 state 0 (disconnected) */
/* a) received not a SABM(E) */
if ((*skb->data & ~AX25_PF) != AX25_SABM &&
(*skb->data & ~AX25_PF) != AX25_SABME) {
/*
* Never reply to a DM. Also ignore any connects for
* addresses that are not our interfaces and not a socket.
*/
if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
ax25_return_dm(dev, &src, &dest, &dp);
goto free;
}
/* b) received SABM(E) */
if (dp.lastrepeat + 1 == dp.ndigi)
sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
else
sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
if (sk != NULL) {
bh_lock_sock(sk);
if (sk_acceptq_is_full(sk) ||
(make = ax25_make_new(sk, ax25_dev)) == NULL) {
if (mine)
ax25_return_dm(dev, &src, &dest, &dp);
kfree_skb(skb);
bh_unlock_sock(sk);
sock_put(sk);
return 0;
}
ax25 = ax25_sk(make);
skb_set_owner_r(skb, make);
skb_queue_head(&sk->sk_receive_queue, skb);
make->sk_state = TCP_ESTABLISHED;
sk->sk_ack_backlog++;
bh_unlock_sock(sk);
} else {
if (!mine)
goto free;
if ((ax25 = ax25_create_cb()) == NULL) {
ax25_return_dm(dev, &src, &dest, &dp);
goto free;
}
ax25_fillin_cb(ax25, ax25_dev);
}
ax25->source_addr = dest;
ax25->dest_addr = src;
/*
* Sort out any digipeated paths.
*/
if (dp.ndigi && !ax25->digipeat &&
(ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
kfree_skb(skb);
ax25_destroy_socket(ax25);
if (sk)
sock_put(sk);
return 0;
}
if (dp.ndigi == 0) {
kfree(ax25->digipeat);
ax25->digipeat = NULL;
} else {
/* Reverse the source SABM's path */
memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
}
if ((*skb->data & ~AX25_PF) == AX25_SABME) {
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW];
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25_dev->values[AX25_VALUES_WINDOW];
}
ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);
#ifdef CONFIG_AX25_DAMA_SLAVE
if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
ax25_dama_on(ax25);
#endif
ax25->state = AX25_STATE_3;
ax25_cb_add(ax25);
ax25_start_heartbeat(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
if (sk) {
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk);
sock_put(sk);
} else {
free:
kfree_skb(skb);
}
return 0;
}
/*
* Receive an AX.25 frame via a SLIP interface.
*/
int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev)
{
skb_orphan(skb);
if (!net_eq(dev_net(dev), &init_net)) {
kfree_skb(skb);
return 0;
}
if ((*skb->data & 0x0F) != 0) {
kfree_skb(skb); /* Not a KISS data frame */
return 0;
}
skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */
return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype);
}

242
net/ax25/ax25_ip.c Normal file
View file

@ -0,0 +1,242 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/netfilter.h>
#include <linux/sysctl.h>
#include <net/ip.h>
#include <net/arp.h>
/*
* IP over AX.25 encapsulation.
*/
/*
* Shove an AX.25 UI header on an IP packet and handle ARP
*/
#ifdef CONFIG_INET
int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned int len)
{
unsigned char *buff;
/* they sometimes come back to us... */
if (type == ETH_P_AX25)
return 0;
/* header is an AX.25 UI frame from us to them */
buff = skb_push(skb, AX25_HEADER_LEN);
*buff++ = 0x00; /* KISS DATA */
if (daddr != NULL)
memcpy(buff, daddr, dev->addr_len); /* Address specified */
buff[6] &= ~AX25_CBIT;
buff[6] &= ~AX25_EBIT;
buff[6] |= AX25_SSSID_SPARE;
buff += AX25_ADDR_LEN;
if (saddr != NULL)
memcpy(buff, saddr, dev->addr_len);
else
memcpy(buff, dev->dev_addr, dev->addr_len);
buff[6] &= ~AX25_CBIT;
buff[6] |= AX25_EBIT;
buff[6] |= AX25_SSSID_SPARE;
buff += AX25_ADDR_LEN;
*buff++ = AX25_UI; /* UI */
/* Append a suitable AX.25 PID */
switch (type) {
case ETH_P_IP:
*buff++ = AX25_P_IP;
break;
case ETH_P_ARP:
*buff++ = AX25_P_ARP;
break;
default:
printk(KERN_ERR "AX.25: ax25_hard_header - wrong protocol type 0x%2.2x\n", type);
*buff++ = 0;
break;
}
if (daddr != NULL)
return AX25_HEADER_LEN;
return -AX25_HEADER_LEN; /* Unfinished header */
}
int ax25_rebuild_header(struct sk_buff *skb)
{
struct sk_buff *ourskb;
unsigned char *bp = skb->data;
ax25_route *route;
struct net_device *dev = NULL;
ax25_address *src, *dst;
ax25_digi *digipeat = NULL;
ax25_dev *ax25_dev;
ax25_cb *ax25;
char ip_mode = ' ';
dst = (ax25_address *)(bp + 1);
src = (ax25_address *)(bp + 8);
if (arp_find(bp + 1, skb))
return 1;
route = ax25_get_route(dst, NULL);
if (route) {
digipeat = route->digipeat;
dev = route->dev;
ip_mode = route->ip_mode;
}
if (dev == NULL)
dev = skb->dev;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
goto put;
}
if (bp[16] == AX25_P_IP) {
if (ip_mode == 'V' || (ip_mode == ' ' && ax25_dev->values[AX25_VALUES_IPDEFMODE])) {
/*
* We copy the buffer and release the original thereby
* keeping it straight
*
* Note: we report 1 back so the caller will
* not feed the frame direct to the physical device
* We don't want that to happen. (It won't be upset
* as we have pulled the frame from the queue by
* freeing it).
*
* NB: TCP modifies buffers that are still
* on a device queue, thus we use skb_copy()
* instead of using skb_clone() unless this
* gets fixed.
*/
ax25_address src_c;
ax25_address dst_c;
if ((ourskb = skb_copy(skb, GFP_ATOMIC)) == NULL) {
kfree_skb(skb);
goto put;
}
if (skb->sk != NULL)
skb_set_owner_w(ourskb, skb->sk);
kfree_skb(skb);
/* dl9sau: bugfix
* after kfree_skb(), dst and src which were pointer
* to bp which is part of skb->data would not be valid
* anymore hope that after skb_pull(ourskb, ..) our
* dsc_c and src_c will not become invalid
*/
bp = ourskb->data;
dst_c = *(ax25_address *)(bp + 1);
src_c = *(ax25_address *)(bp + 8);
skb_pull(ourskb, AX25_HEADER_LEN - 1); /* Keep PID */
skb_reset_network_header(ourskb);
ax25=ax25_send_frame(
ourskb,
ax25_dev->values[AX25_VALUES_PACLEN],
&src_c,
&dst_c, digipeat, dev);
if (ax25) {
ax25_cb_put(ax25);
}
goto put;
}
}
bp[7] &= ~AX25_CBIT;
bp[7] &= ~AX25_EBIT;
bp[7] |= AX25_SSSID_SPARE;
bp[14] &= ~AX25_CBIT;
bp[14] |= AX25_EBIT;
bp[14] |= AX25_SSSID_SPARE;
skb_pull(skb, AX25_KISS_HEADER_LEN);
if (digipeat != NULL) {
if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL) {
kfree_skb(skb);
goto put;
}
skb = ourskb;
}
ax25_queue_xmit(skb, dev);
put:
if (route)
ax25_put_route(route);
return 1;
}
#else /* INET */
int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned int len)
{
return -AX25_HEADER_LEN;
}
int ax25_rebuild_header(struct sk_buff *skb)
{
return 1;
}
#endif
const struct header_ops ax25_header_ops = {
.create = ax25_hard_header,
.rebuild = ax25_rebuild_header,
};
EXPORT_SYMBOL(ax25_hard_header);
EXPORT_SYMBOL(ax25_rebuild_header);
EXPORT_SYMBOL(ax25_header_ops);

398
net/ax25/ax25_out.c Normal file
View file

@ -0,0 +1,398 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/spinlock.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
static DEFINE_SPINLOCK(ax25_frag_lock);
ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
{
ax25_dev *ax25_dev;
ax25_cb *ax25;
/*
* Take the default packet length for the device if zero is
* specified.
*/
if (paclen == 0) {
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return NULL;
paclen = ax25_dev->values[AX25_VALUES_PACLEN];
}
/*
* Look for an existing connection.
*/
if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
ax25_output(ax25, paclen, skb);
return ax25; /* It already existed */
}
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return NULL;
if ((ax25 = ax25_create_cb()) == NULL)
return NULL;
ax25_fillin_cb(ax25, ax25_dev);
ax25->source_addr = *src;
ax25->dest_addr = *dest;
if (digi != NULL) {
ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
if (ax25->digipeat == NULL) {
ax25_cb_put(ax25);
return NULL;
}
}
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_establish_data_link(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (ax25_dev->dama.slave)
ax25_ds_establish_data_link(ax25);
else
ax25_std_establish_data_link(ax25);
break;
#endif
}
/*
* There is one ref for the state machine; a caller needs
* one more to put it back, just like with the existing one.
*/
ax25_cb_hold(ax25);
ax25_cb_add(ax25);
ax25->state = AX25_STATE_1;
ax25_start_heartbeat(ax25);
ax25_output(ax25, paclen, skb);
return ax25; /* We had to create it */
}
EXPORT_SYMBOL(ax25_send_frame);
/*
* All outgoing AX.25 I frames pass via this routine. Therefore this is
* where the fragmentation of frames takes place. If fragment is set to
* zero then we are not allowed to do fragmentation, even if the frame
* is too large.
*/
void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
{
struct sk_buff *skbn;
unsigned char *p;
int frontlen, len, fragno, ka9qfrag, first = 1;
if (paclen < 16) {
WARN_ON_ONCE(1);
kfree_skb(skb);
return;
}
if ((skb->len - 1) > paclen) {
if (*skb->data == AX25_P_TEXT) {
skb_pull(skb, 1); /* skip PID */
ka9qfrag = 0;
} else {
paclen -= 2; /* Allow for fragment control info */
ka9qfrag = 1;
}
fragno = skb->len / paclen;
if (skb->len % paclen == 0) fragno--;
frontlen = skb_headroom(skb); /* Address space + CTRL */
while (skb->len > 0) {
spin_lock_bh(&ax25_frag_lock);
if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
spin_unlock_bh(&ax25_frag_lock);
printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
return;
}
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
spin_unlock_bh(&ax25_frag_lock);
len = (paclen > skb->len) ? skb->len : paclen;
if (ka9qfrag == 1) {
skb_reserve(skbn, frontlen + 2);
skb_set_network_header(skbn,
skb_network_offset(skb));
skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
p = skb_push(skbn, 2);
*p++ = AX25_P_SEGMENT;
*p = fragno--;
if (first) {
*p |= AX25_SEG_FIRST;
first = 0;
}
} else {
skb_reserve(skbn, frontlen + 1);
skb_set_network_header(skbn,
skb_network_offset(skb));
skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
p = skb_push(skbn, 1);
*p = AX25_P_TEXT;
}
skb_pull(skb, len);
skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
}
kfree_skb(skb);
} else {
skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
}
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_kick(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
/*
* A DAMA slave is _required_ to work as normal AX.25L2V2
* if no DAMA master is available.
*/
case AX25_PROTO_DAMA_SLAVE:
if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
break;
#endif
}
}
/*
* This procedure is passed a buffer descriptor for an iframe. It builds
* the rest of the control part of the frame and then writes it out.
*/
static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
{
unsigned char *frame;
if (skb == NULL)
return;
skb_reset_network_header(skb);
if (ax25->modulus == AX25_MODULUS) {
frame = skb_push(skb, 1);
*frame = AX25_I;
*frame |= (poll_bit) ? AX25_PF : 0;
*frame |= (ax25->vr << 5);
*frame |= (ax25->vs << 1);
} else {
frame = skb_push(skb, 2);
frame[0] = AX25_I;
frame[0] |= (ax25->vs << 1);
frame[1] = (poll_bit) ? AX25_EPF : 0;
frame[1] |= (ax25->vr << 1);
}
ax25_start_idletimer(ax25);
ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
}
void ax25_kick(ax25_cb *ax25)
{
struct sk_buff *skb, *skbn;
int last = 1;
unsigned short start, end, next;
if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
return;
if (ax25->condition & AX25_COND_PEER_RX_BUSY)
return;
if (skb_peek(&ax25->write_queue) == NULL)
return;
start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
end = (ax25->va + ax25->window) % ax25->modulus;
if (start == end)
return;
/*
* Transmit data until either we're out of data to send or
* the window is full. Send a poll on the final I frame if
* the window is filled.
*/
/*
* Dequeue the frame and copy it.
* Check for race with ax25_clear_queues().
*/
skb = skb_dequeue(&ax25->write_queue);
if (!skb)
return;
ax25->vs = start;
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb_queue_head(&ax25->write_queue, skb);
break;
}
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
next = (ax25->vs + 1) % ax25->modulus;
last = (next == end);
/*
* Transmit the frame copy.
* bke 960114: do not set the Poll bit on the last frame
* in DAMA mode.
*/
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
break;
#endif
}
ax25->vs = next;
/*
* Requeue the original data frame.
*/
skb_queue_tail(&ax25->ack_queue, skb);
} while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
ax25->condition &= ~AX25_COND_ACK_PENDING;
if (!ax25_t1timer_running(ax25)) {
ax25_stop_t3timer(ax25);
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
}
}
void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
{
struct sk_buff *skbn;
unsigned char *ptr;
int headroom;
if (ax25->ax25_dev == NULL) {
ax25_disconnect(ax25, ENETUNREACH);
return;
}
headroom = ax25_addr_size(ax25->digipeat);
if (skb_headroom(skb) < headroom) {
if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
kfree_skb(skb);
return;
}
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
consume_skb(skb);
skb = skbn;
}
ptr = skb_push(skb, headroom);
ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
ax25_queue_xmit(skb, ax25->ax25_dev->dev);
}
/*
* A small shim to dev_queue_xmit to add the KISS control byte, and do
* any packet forwarding in operation.
*/
void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned char *ptr;
skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
ptr = skb_push(skb, 1);
*ptr = 0x00; /* KISS */
dev_queue_xmit(skb);
}
int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
{
if (ax25->vs == nr) {
ax25_frames_acked(ax25, nr);
ax25_calculate_rtt(ax25);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
return 1;
} else {
if (ax25->va != nr) {
ax25_frames_acked(ax25, nr);
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
return 1;
}
}
return 0;
}

505
net/ax25/ax25_route.c Normal file
View file

@ -0,0 +1,505 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Steven Whitehouse GW7RRM (stevew@acm.org)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
* Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
* Copyright (C) Frederic Rible F1OAT (frible@teaser.fr)
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/timer.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/export.h>
static ax25_route *ax25_route_list;
static DEFINE_RWLOCK(ax25_route_lock);
void ax25_rt_device_down(struct net_device *dev)
{
ax25_route *s, *t, *ax25_rt;
write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
while (ax25_rt != NULL) {
s = ax25_rt;
ax25_rt = ax25_rt->next;
if (s->dev == dev) {
if (ax25_route_list == s) {
ax25_route_list = s->next;
kfree(s->digipeat);
kfree(s);
} else {
for (t = ax25_route_list; t != NULL; t = t->next) {
if (t->next == s) {
t->next = s->next;
kfree(s->digipeat);
kfree(s);
break;
}
}
}
}
}
write_unlock_bh(&ax25_route_lock);
}
static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
{
ax25_route *ax25_rt;
ax25_dev *ax25_dev;
int i;
if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL)
return -EINVAL;
if (route->digi_count > AX25_MAX_DIGIS)
return -EINVAL;
write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
while (ax25_rt != NULL) {
if (ax25cmp(&ax25_rt->callsign, &route->dest_addr) == 0 &&
ax25_rt->dev == ax25_dev->dev) {
kfree(ax25_rt->digipeat);
ax25_rt->digipeat = NULL;
if (route->digi_count != 0) {
if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
write_unlock_bh(&ax25_route_lock);
return -ENOMEM;
}
ax25_rt->digipeat->lastrepeat = -1;
ax25_rt->digipeat->ndigi = route->digi_count;
for (i = 0; i < route->digi_count; i++) {
ax25_rt->digipeat->repeated[i] = 0;
ax25_rt->digipeat->calls[i] = route->digi_addr[i];
}
}
write_unlock_bh(&ax25_route_lock);
return 0;
}
ax25_rt = ax25_rt->next;
}
if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) {
write_unlock_bh(&ax25_route_lock);
return -ENOMEM;
}
atomic_set(&ax25_rt->refcount, 1);
ax25_rt->callsign = route->dest_addr;
ax25_rt->dev = ax25_dev->dev;
ax25_rt->digipeat = NULL;
ax25_rt->ip_mode = ' ';
if (route->digi_count != 0) {
if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
write_unlock_bh(&ax25_route_lock);
kfree(ax25_rt);
return -ENOMEM;
}
ax25_rt->digipeat->lastrepeat = -1;
ax25_rt->digipeat->ndigi = route->digi_count;
for (i = 0; i < route->digi_count; i++) {
ax25_rt->digipeat->repeated[i] = 0;
ax25_rt->digipeat->calls[i] = route->digi_addr[i];
}
}
ax25_rt->next = ax25_route_list;
ax25_route_list = ax25_rt;
write_unlock_bh(&ax25_route_lock);
return 0;
}
void __ax25_put_route(ax25_route *ax25_rt)
{
kfree(ax25_rt->digipeat);
kfree(ax25_rt);
}
static int ax25_rt_del(struct ax25_routes_struct *route)
{
ax25_route *s, *t, *ax25_rt;
ax25_dev *ax25_dev;
if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL)
return -EINVAL;
write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
while (ax25_rt != NULL) {
s = ax25_rt;
ax25_rt = ax25_rt->next;
if (s->dev == ax25_dev->dev &&
ax25cmp(&route->dest_addr, &s->callsign) == 0) {
if (ax25_route_list == s) {
ax25_route_list = s->next;
ax25_put_route(s);
} else {
for (t = ax25_route_list; t != NULL; t = t->next) {
if (t->next == s) {
t->next = s->next;
ax25_put_route(s);
break;
}
}
}
}
}
write_unlock_bh(&ax25_route_lock);
return 0;
}
static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option)
{
ax25_route *ax25_rt;
ax25_dev *ax25_dev;
int err = 0;
if ((ax25_dev = ax25_addr_ax25dev(&rt_option->port_addr)) == NULL)
return -EINVAL;
write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
while (ax25_rt != NULL) {
if (ax25_rt->dev == ax25_dev->dev &&
ax25cmp(&rt_option->dest_addr, &ax25_rt->callsign) == 0) {
switch (rt_option->cmd) {
case AX25_SET_RT_IPMODE:
switch (rt_option->arg) {
case ' ':
case 'D':
case 'V':
ax25_rt->ip_mode = rt_option->arg;
break;
default:
err = -EINVAL;
goto out;
}
break;
default:
err = -EINVAL;
goto out;
}
}
ax25_rt = ax25_rt->next;
}
out:
write_unlock_bh(&ax25_route_lock);
return err;
}
int ax25_rt_ioctl(unsigned int cmd, void __user *arg)
{
struct ax25_route_opt_struct rt_option;
struct ax25_routes_struct route;
switch (cmd) {
case SIOCADDRT:
if (copy_from_user(&route, arg, sizeof(route)))
return -EFAULT;
return ax25_rt_add(&route);
case SIOCDELRT:
if (copy_from_user(&route, arg, sizeof(route)))
return -EFAULT;
return ax25_rt_del(&route);
case SIOCAX25OPTRT:
if (copy_from_user(&rt_option, arg, sizeof(rt_option)))
return -EFAULT;
return ax25_rt_opt(&rt_option);
default:
return -EINVAL;
}
}
#ifdef CONFIG_PROC_FS
static void *ax25_rt_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(ax25_route_lock)
{
struct ax25_route *ax25_rt;
int i = 1;
read_lock(&ax25_route_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (ax25_rt = ax25_route_list; ax25_rt != NULL; ax25_rt = ax25_rt->next) {
if (i == *pos)
return ax25_rt;
++i;
}
return NULL;
}
static void *ax25_rt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? ax25_route_list :
((struct ax25_route *) v)->next;
}
static void ax25_rt_seq_stop(struct seq_file *seq, void *v)
__releases(ax25_route_lock)
{
read_unlock(&ax25_route_lock);
}
static int ax25_rt_seq_show(struct seq_file *seq, void *v)
{
char buf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq, "callsign dev mode digipeaters\n");
else {
struct ax25_route *ax25_rt = v;
const char *callsign;
int i;
if (ax25cmp(&ax25_rt->callsign, &null_ax25_address) == 0)
callsign = "default";
else
callsign = ax2asc(buf, &ax25_rt->callsign);
seq_printf(seq, "%-9s %-4s",
callsign,
ax25_rt->dev ? ax25_rt->dev->name : "???");
switch (ax25_rt->ip_mode) {
case 'V':
seq_puts(seq, " vc");
break;
case 'D':
seq_puts(seq, " dg");
break;
default:
seq_puts(seq, " *");
break;
}
if (ax25_rt->digipeat != NULL)
for (i = 0; i < ax25_rt->digipeat->ndigi; i++)
seq_printf(seq, " %s",
ax2asc(buf, &ax25_rt->digipeat->calls[i]));
seq_puts(seq, "\n");
}
return 0;
}
static const struct seq_operations ax25_rt_seqops = {
.start = ax25_rt_seq_start,
.next = ax25_rt_seq_next,
.stop = ax25_rt_seq_stop,
.show = ax25_rt_seq_show,
};
static int ax25_rt_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &ax25_rt_seqops);
}
const struct file_operations ax25_route_fops = {
.owner = THIS_MODULE,
.open = ax25_rt_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif
/*
* Find AX.25 route
*
* Only routes with a reference count of zero can be destroyed.
*/
ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
{
ax25_route *ax25_spe_rt = NULL;
ax25_route *ax25_def_rt = NULL;
ax25_route *ax25_rt;
read_lock(&ax25_route_lock);
/*
* Bind to the physical interface we heard them on, or the default
* route if none is found;
*/
for (ax25_rt = ax25_route_list; ax25_rt != NULL; ax25_rt = ax25_rt->next) {
if (dev == NULL) {
if (ax25cmp(&ax25_rt->callsign, addr) == 0 && ax25_rt->dev != NULL)
ax25_spe_rt = ax25_rt;
if (ax25cmp(&ax25_rt->callsign, &null_ax25_address) == 0 && ax25_rt->dev != NULL)
ax25_def_rt = ax25_rt;
} else {
if (ax25cmp(&ax25_rt->callsign, addr) == 0 && ax25_rt->dev == dev)
ax25_spe_rt = ax25_rt;
if (ax25cmp(&ax25_rt->callsign, &null_ax25_address) == 0 && ax25_rt->dev == dev)
ax25_def_rt = ax25_rt;
}
}
ax25_rt = ax25_def_rt;
if (ax25_spe_rt != NULL)
ax25_rt = ax25_spe_rt;
if (ax25_rt != NULL)
ax25_hold_route(ax25_rt);
read_unlock(&ax25_route_lock);
return ax25_rt;
}
/*
* Adjust path: If you specify a default route and want to connect
* a target on the digipeater path but w/o having a special route
* set before, the path has to be truncated from your target on.
*/
static inline void ax25_adjust_path(ax25_address *addr, ax25_digi *digipeat)
{
int k;
for (k = 0; k < digipeat->ndigi; k++) {
if (ax25cmp(addr, &digipeat->calls[k]) == 0)
break;
}
digipeat->ndigi = k;
}
/*
* Find which interface to use.
*/
int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
{
ax25_uid_assoc *user;
ax25_route *ax25_rt;
int err = 0;
if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
return -EHOSTUNREACH;
if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
err = -EHOSTUNREACH;
goto put;
}
user = ax25_findbyuid(current_euid());
if (user) {
ax25->source_addr = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
err = -EPERM;
goto put;
}
ax25->source_addr = *(ax25_address *)ax25->ax25_dev->dev->dev_addr;
}
if (ax25_rt->digipeat != NULL) {
ax25->digipeat = kmemdup(ax25_rt->digipeat, sizeof(ax25_digi),
GFP_ATOMIC);
if (ax25->digipeat == NULL) {
err = -ENOMEM;
goto put;
}
ax25_adjust_path(addr, ax25->digipeat);
}
if (ax25->sk != NULL) {
bh_lock_sock(ax25->sk);
sock_reset_flag(ax25->sk, SOCK_ZAPPED);
bh_unlock_sock(ax25->sk);
}
put:
ax25_put_route(ax25_rt);
return err;
}
struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
ax25_address *dest, ax25_digi *digi)
{
struct sk_buff *skbn;
unsigned char *bp;
int len;
len = digi->ndigi * AX25_ADDR_LEN;
if (skb_headroom(skb) < len) {
if ((skbn = skb_realloc_headroom(skb, len)) == NULL) {
printk(KERN_CRIT "AX.25: ax25_dg_build_path - out of memory\n");
return NULL;
}
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
consume_skb(skb);
skb = skbn;
}
bp = skb_push(skb, len);
ax25_addr_build(bp, src, dest, digi, AX25_COMMAND, AX25_MODULUS);
return skb;
}
/*
* Free all memory associated with routing structures.
*/
void __exit ax25_rt_free(void)
{
ax25_route *s, *ax25_rt = ax25_route_list;
write_lock_bh(&ax25_route_lock);
while (ax25_rt != NULL) {
s = ax25_rt;
ax25_rt = ax25_rt->next;
kfree(s->digipeat);
kfree(s);
}
write_unlock_bh(&ax25_route_lock);
}

446
net/ax25/ax25_std_in.c Normal file
View file

@ -0,0 +1,446 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
* Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
*
* Most of this code is based on the SDL diagrams published in the 7th ARRL
* Computer Networking Conference papers. The diagrams have mistakes in them,
* but are mostly correct. Before you modify the code could you read the SDL
* diagrams as the code is not obvious and probably very easy to break.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* State machine for state 1, Awaiting Connection State.
* The handling of the timer(s) is in file ax25_std_timer.c.
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_std_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type)
{
switch (frametype) {
case AX25_SABM:
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
break;
case AX25_SABME:
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW];
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE);
break;
case AX25_UA:
if (pf) {
ax25_calculate_rtt(ax25);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
ax25->vs = 0;
ax25->va = 0;
ax25->vr = 0;
ax25->state = AX25_STATE_3;
ax25->n2count = 0;
if (ax25->sk != NULL) {
bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_ESTABLISHED;
/* For WAIT_SABM connections we will produce an accept ready socket here */
if (!sock_flag(ax25->sk, SOCK_DEAD))
ax25->sk->sk_state_change(ax25->sk);
bh_unlock_sock(ax25->sk);
}
}
break;
case AX25_DM:
if (pf) {
if (ax25->modulus == AX25_MODULUS) {
ax25_disconnect(ax25, ECONNREFUSED);
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
}
}
break;
default:
break;
}
return 0;
}
/*
* State machine for state 2, Awaiting Release State.
* The handling of the timer(s) is in file ax25_std_timer.c
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_std_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type)
{
switch (frametype) {
case AX25_SABM:
case AX25_SABME:
ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_disconnect(ax25, 0);
break;
case AX25_DM:
case AX25_UA:
if (pf)
ax25_disconnect(ax25, 0);
break;
case AX25_I:
case AX25_REJ:
case AX25_RNR:
case AX25_RR:
if (pf) ax25_send_control(ax25, AX25_DM, AX25_POLLON, AX25_RESPONSE);
break;
default:
break;
}
return 0;
}
/*
* State machine for state 3, Connected State.
* The handling of the timer(s) is in file ax25_std_timer.c
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_std_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type)
{
int queued = 0;
switch (frametype) {
case AX25_SABM:
case AX25_SABME:
if (frametype == AX25_SABM) {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
} else {
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW];
}
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
ax25->condition = 0x00;
ax25->vs = 0;
ax25->va = 0;
ax25->vr = 0;
ax25_requeue_frames(ax25);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_disconnect(ax25, 0);
break;
case AX25_DM:
ax25_disconnect(ax25, ECONNRESET);
break;
case AX25_RR:
case AX25_RNR:
if (frametype == AX25_RR)
ax25->condition &= ~AX25_COND_PEER_RX_BUSY;
else
ax25->condition |= AX25_COND_PEER_RX_BUSY;
if (type == AX25_COMMAND && pf)
ax25_std_enquiry_response(ax25);
if (ax25_validate_nr(ax25, nr)) {
ax25_check_iframes_acked(ax25, nr);
} else {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
case AX25_REJ:
ax25->condition &= ~AX25_COND_PEER_RX_BUSY;
if (type == AX25_COMMAND && pf)
ax25_std_enquiry_response(ax25);
if (ax25_validate_nr(ax25, nr)) {
ax25_frames_acked(ax25, nr);
ax25_calculate_rtt(ax25);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
ax25_requeue_frames(ax25);
} else {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
case AX25_I:
if (!ax25_validate_nr(ax25, nr)) {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
break;
}
if (ax25->condition & AX25_COND_PEER_RX_BUSY) {
ax25_frames_acked(ax25, nr);
} else {
ax25_check_iframes_acked(ax25, nr);
}
if (ax25->condition & AX25_COND_OWN_RX_BUSY) {
if (pf) ax25_std_enquiry_response(ax25);
break;
}
if (ns == ax25->vr) {
ax25->vr = (ax25->vr + 1) % ax25->modulus;
queued = ax25_rx_iframe(ax25, skb);
if (ax25->condition & AX25_COND_OWN_RX_BUSY)
ax25->vr = ns; /* ax25->vr - 1 */
ax25->condition &= ~AX25_COND_REJECT;
if (pf) {
ax25_std_enquiry_response(ax25);
} else {
if (!(ax25->condition & AX25_COND_ACK_PENDING)) {
ax25->condition |= AX25_COND_ACK_PENDING;
ax25_start_t2timer(ax25);
}
}
} else {
if (ax25->condition & AX25_COND_REJECT) {
if (pf) ax25_std_enquiry_response(ax25);
} else {
ax25->condition |= AX25_COND_REJECT;
ax25_send_control(ax25, AX25_REJ, pf, AX25_RESPONSE);
ax25->condition &= ~AX25_COND_ACK_PENDING;
}
}
break;
case AX25_FRMR:
case AX25_ILLEGAL:
ax25_std_establish_data_link(ax25);
ax25->state = AX25_STATE_1;
break;
default:
break;
}
return queued;
}
/*
* State machine for state 4, Timer Recovery State.
* The handling of the timer(s) is in file ax25_std_timer.c
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_std_state4_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type)
{
int queued = 0;
switch (frametype) {
case AX25_SABM:
case AX25_SABME:
if (frametype == AX25_SABM) {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
} else {
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW];
}
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
ax25->condition = 0x00;
ax25->vs = 0;
ax25->va = 0;
ax25->vr = 0;
ax25->state = AX25_STATE_3;
ax25->n2count = 0;
ax25_requeue_frames(ax25);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_disconnect(ax25, 0);
break;
case AX25_DM:
ax25_disconnect(ax25, ECONNRESET);
break;
case AX25_RR:
case AX25_RNR:
if (frametype == AX25_RR)
ax25->condition &= ~AX25_COND_PEER_RX_BUSY;
else
ax25->condition |= AX25_COND_PEER_RX_BUSY;
if (type == AX25_RESPONSE && pf) {
ax25_stop_t1timer(ax25);
ax25->n2count = 0;
if (ax25_validate_nr(ax25, nr)) {
ax25_frames_acked(ax25, nr);
if (ax25->vs == ax25->va) {
ax25_start_t3timer(ax25);
ax25->state = AX25_STATE_3;
} else {
ax25_requeue_frames(ax25);
}
} else {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
}
if (type == AX25_COMMAND && pf)
ax25_std_enquiry_response(ax25);
if (ax25_validate_nr(ax25, nr)) {
ax25_frames_acked(ax25, nr);
} else {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
case AX25_REJ:
ax25->condition &= ~AX25_COND_PEER_RX_BUSY;
if (pf && type == AX25_RESPONSE) {
ax25_stop_t1timer(ax25);
ax25->n2count = 0;
if (ax25_validate_nr(ax25, nr)) {
ax25_frames_acked(ax25, nr);
if (ax25->vs == ax25->va) {
ax25_start_t3timer(ax25);
ax25->state = AX25_STATE_3;
} else {
ax25_requeue_frames(ax25);
}
} else {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
}
if (type == AX25_COMMAND && pf)
ax25_std_enquiry_response(ax25);
if (ax25_validate_nr(ax25, nr)) {
ax25_frames_acked(ax25, nr);
ax25_requeue_frames(ax25);
} else {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
case AX25_I:
if (!ax25_validate_nr(ax25, nr)) {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
break;
}
ax25_frames_acked(ax25, nr);
if (ax25->condition & AX25_COND_OWN_RX_BUSY) {
if (pf)
ax25_std_enquiry_response(ax25);
break;
}
if (ns == ax25->vr) {
ax25->vr = (ax25->vr + 1) % ax25->modulus;
queued = ax25_rx_iframe(ax25, skb);
if (ax25->condition & AX25_COND_OWN_RX_BUSY)
ax25->vr = ns; /* ax25->vr - 1 */
ax25->condition &= ~AX25_COND_REJECT;
if (pf) {
ax25_std_enquiry_response(ax25);
} else {
if (!(ax25->condition & AX25_COND_ACK_PENDING)) {
ax25->condition |= AX25_COND_ACK_PENDING;
ax25_start_t2timer(ax25);
}
}
} else {
if (ax25->condition & AX25_COND_REJECT) {
if (pf) ax25_std_enquiry_response(ax25);
} else {
ax25->condition |= AX25_COND_REJECT;
ax25_send_control(ax25, AX25_REJ, pf, AX25_RESPONSE);
ax25->condition &= ~AX25_COND_ACK_PENDING;
}
}
break;
case AX25_FRMR:
case AX25_ILLEGAL:
ax25_std_establish_data_link(ax25);
ax25->state = AX25_STATE_1;
break;
default:
break;
}
return queued;
}
/*
* Higher level upcall for a LAPB frame
*/
int ax25_std_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type)
{
int queued = 0, frametype, ns, nr, pf;
frametype = ax25_decode(ax25, skb, &ns, &nr, &pf);
switch (ax25->state) {
case AX25_STATE_1:
queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type);
break;
case AX25_STATE_2:
queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type);
break;
case AX25_STATE_3:
queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type);
break;
case AX25_STATE_4:
queued = ax25_std_state4_machine(ax25, skb, frametype, ns, nr, pf, type);
break;
}
ax25_kick(ax25);
return queued;
}

86
net/ax25/ax25_std_subr.c Normal file
View file

@ -0,0 +1,86 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* The following routines are taken from page 170 of the 7th ARRL Computer
* Networking Conference paper, as is the whole state machine.
*/
void ax25_std_nr_error_recovery(ax25_cb *ax25)
{
ax25_std_establish_data_link(ax25);
}
void ax25_std_establish_data_link(ax25_cb *ax25)
{
ax25->condition = 0x00;
ax25->n2count = 0;
if (ax25->modulus == AX25_MODULUS)
ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND);
else
ax25_send_control(ax25, AX25_SABME, AX25_POLLON, AX25_COMMAND);
ax25_calculate_t1(ax25);
ax25_stop_idletimer(ax25);
ax25_stop_t3timer(ax25);
ax25_stop_t2timer(ax25);
ax25_start_t1timer(ax25);
}
void ax25_std_transmit_enquiry(ax25_cb *ax25)
{
if (ax25->condition & AX25_COND_OWN_RX_BUSY)
ax25_send_control(ax25, AX25_RNR, AX25_POLLON, AX25_COMMAND);
else
ax25_send_control(ax25, AX25_RR, AX25_POLLON, AX25_COMMAND);
ax25->condition &= ~AX25_COND_ACK_PENDING;
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
}
void ax25_std_enquiry_response(ax25_cb *ax25)
{
if (ax25->condition & AX25_COND_OWN_RX_BUSY)
ax25_send_control(ax25, AX25_RNR, AX25_POLLON, AX25_RESPONSE);
else
ax25_send_control(ax25, AX25_RR, AX25_POLLON, AX25_RESPONSE);
ax25->condition &= ~AX25_COND_ACK_PENDING;
}
void ax25_std_timeout_response(ax25_cb *ax25)
{
if (ax25->condition & AX25_COND_OWN_RX_BUSY)
ax25_send_control(ax25, AX25_RNR, AX25_POLLOFF, AX25_RESPONSE);
else
ax25_send_control(ax25, AX25_RR, AX25_POLLOFF, AX25_RESPONSE);
ax25->condition &= ~AX25_COND_ACK_PENDING;
}

175
net/ax25/ax25_std_timer.c Normal file
View file

@ -0,0 +1,175 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
* Copyright (C) Frederic Rible F1OAT (frible@teaser.fr)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
void ax25_std_heartbeat_expiry(ax25_cb *ax25)
{
struct sock *sk = ax25->sk;
if (sk)
bh_lock_sock(sk);
switch (ax25->state) {
case AX25_STATE_0:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!sk || sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN &&
sock_flag(sk, SOCK_DEAD))) {
if (sk) {
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
sock_put(sk);
} else
ax25_destroy_socket(ax25);
return;
}
break;
case AX25_STATE_3:
case AX25_STATE_4:
/*
* Check the state of the receive buffer.
*/
if (sk != NULL) {
if (atomic_read(&sk->sk_rmem_alloc) <
(sk->sk_rcvbuf >> 1) &&
(ax25->condition & AX25_COND_OWN_RX_BUSY)) {
ax25->condition &= ~AX25_COND_OWN_RX_BUSY;
ax25->condition &= ~AX25_COND_ACK_PENDING;
ax25_send_control(ax25, AX25_RR, AX25_POLLOFF, AX25_RESPONSE);
break;
}
}
}
if (sk)
bh_unlock_sock(sk);
ax25_start_heartbeat(ax25);
}
void ax25_std_t2timer_expiry(ax25_cb *ax25)
{
if (ax25->condition & AX25_COND_ACK_PENDING) {
ax25->condition &= ~AX25_COND_ACK_PENDING;
ax25_std_timeout_response(ax25);
}
}
void ax25_std_t3timer_expiry(ax25_cb *ax25)
{
ax25->n2count = 0;
ax25_std_transmit_enquiry(ax25);
ax25->state = AX25_STATE_4;
}
void ax25_std_idletimer_expiry(ax25_cb *ax25)
{
ax25_clear_queues(ax25);
ax25->n2count = 0;
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25->state = AX25_STATE_2;
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
if (ax25->sk != NULL) {
bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_CLOSE;
ax25->sk->sk_err = 0;
ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
bh_unlock_sock(ax25->sk);
}
}
void ax25_std_t1timer_expiry(ax25_cb *ax25)
{
switch (ax25->state) {
case AX25_STATE_1:
if (ax25->n2count == ax25->n2) {
if (ax25->modulus == AX25_MODULUS) {
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
ax25->n2count = 0;
ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND);
}
} else {
ax25->n2count++;
if (ax25->modulus == AX25_MODULUS)
ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND);
else
ax25_send_control(ax25, AX25_SABME, AX25_POLLON, AX25_COMMAND);
}
break;
case AX25_STATE_2:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
}
break;
case AX25_STATE_3:
ax25->n2count = 1;
ax25_std_transmit_enquiry(ax25);
ax25->state = AX25_STATE_4;
break;
case AX25_STATE_4:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DM, AX25_POLLON, AX25_RESPONSE);
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
ax25_std_transmit_enquiry(ax25);
}
break;
}
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
}

289
net/ax25/ax25_subr.c Normal file
View file

@ -0,0 +1,289 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
* Copyright (C) Frederic Rible F1OAT (frible@teaser.fr)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* This routine purges all the queues of frames.
*/
void ax25_clear_queues(ax25_cb *ax25)
{
skb_queue_purge(&ax25->write_queue);
skb_queue_purge(&ax25->ack_queue);
skb_queue_purge(&ax25->reseq_queue);
skb_queue_purge(&ax25->frag_queue);
}
/*
* This routine purges the input queue of those frames that have been
* acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
* SDL diagram.
*/
void ax25_frames_acked(ax25_cb *ax25, unsigned short nr)
{
struct sk_buff *skb;
/*
* Remove all the ack-ed frames from the ack queue.
*/
if (ax25->va != nr) {
while (skb_peek(&ax25->ack_queue) != NULL && ax25->va != nr) {
skb = skb_dequeue(&ax25->ack_queue);
kfree_skb(skb);
ax25->va = (ax25->va + 1) % ax25->modulus;
}
}
}
void ax25_requeue_frames(ax25_cb *ax25)
{
struct sk_buff *skb;
/*
* Requeue all the un-ack-ed frames on the output queue to be picked
* up by ax25_kick called from the timer. This arrangement handles the
* possibility of an empty output queue.
*/
while ((skb = skb_dequeue_tail(&ax25->ack_queue)) != NULL)
skb_queue_head(&ax25->write_queue, skb);
}
/*
* Validate that the value of nr is between va and vs. Return true or
* false for testing.
*/
int ax25_validate_nr(ax25_cb *ax25, unsigned short nr)
{
unsigned short vc = ax25->va;
while (vc != ax25->vs) {
if (nr == vc) return 1;
vc = (vc + 1) % ax25->modulus;
}
if (nr == ax25->vs) return 1;
return 0;
}
/*
* This routine is the centralised routine for parsing the control
* information for the different frame formats.
*/
int ax25_decode(ax25_cb *ax25, struct sk_buff *skb, int *ns, int *nr, int *pf)
{
unsigned char *frame;
int frametype = AX25_ILLEGAL;
frame = skb->data;
*ns = *nr = *pf = 0;
if (ax25->modulus == AX25_MODULUS) {
if ((frame[0] & AX25_S) == 0) {
frametype = AX25_I; /* I frame - carries NR/NS/PF */
*ns = (frame[0] >> 1) & 0x07;
*nr = (frame[0] >> 5) & 0x07;
*pf = frame[0] & AX25_PF;
} else if ((frame[0] & AX25_U) == 1) { /* S frame - take out PF/NR */
frametype = frame[0] & 0x0F;
*nr = (frame[0] >> 5) & 0x07;
*pf = frame[0] & AX25_PF;
} else if ((frame[0] & AX25_U) == 3) { /* U frame - take out PF */
frametype = frame[0] & ~AX25_PF;
*pf = frame[0] & AX25_PF;
}
skb_pull(skb, 1);
} else {
if ((frame[0] & AX25_S) == 0) {
frametype = AX25_I; /* I frame - carries NR/NS/PF */
*ns = (frame[0] >> 1) & 0x7F;
*nr = (frame[1] >> 1) & 0x7F;
*pf = frame[1] & AX25_EPF;
skb_pull(skb, 2);
} else if ((frame[0] & AX25_U) == 1) { /* S frame - take out PF/NR */
frametype = frame[0] & 0x0F;
*nr = (frame[1] >> 1) & 0x7F;
*pf = frame[1] & AX25_EPF;
skb_pull(skb, 2);
} else if ((frame[0] & AX25_U) == 3) { /* U frame - take out PF */
frametype = frame[0] & ~AX25_PF;
*pf = frame[0] & AX25_PF;
skb_pull(skb, 1);
}
}
return frametype;
}
/*
* This routine is called when the HDLC layer internally generates a
* command or response for the remote machine ( eg. RR, UA etc. ).
* Only supervisory or unnumbered frames are processed.
*/
void ax25_send_control(ax25_cb *ax25, int frametype, int poll_bit, int type)
{
struct sk_buff *skb;
unsigned char *dptr;
if ((skb = alloc_skb(ax25->ax25_dev->dev->hard_header_len + 2, GFP_ATOMIC)) == NULL)
return;
skb_reserve(skb, ax25->ax25_dev->dev->hard_header_len);
skb_reset_network_header(skb);
/* Assume a response - address structure for DTE */
if (ax25->modulus == AX25_MODULUS) {
dptr = skb_put(skb, 1);
*dptr = frametype;
*dptr |= (poll_bit) ? AX25_PF : 0;
if ((frametype & AX25_U) == AX25_S) /* S frames carry NR */
*dptr |= (ax25->vr << 5);
} else {
if ((frametype & AX25_U) == AX25_U) {
dptr = skb_put(skb, 1);
*dptr = frametype;
*dptr |= (poll_bit) ? AX25_PF : 0;
} else {
dptr = skb_put(skb, 2);
dptr[0] = frametype;
dptr[1] = (ax25->vr << 1);
dptr[1] |= (poll_bit) ? AX25_EPF : 0;
}
}
ax25_transmit_buffer(ax25, skb, type);
}
/*
* Send a 'DM' to an unknown connection attempt, or an invalid caller.
*
* Note: src here is the sender, thus it's the target of the DM
*/
void ax25_return_dm(struct net_device *dev, ax25_address *src, ax25_address *dest, ax25_digi *digi)
{
struct sk_buff *skb;
char *dptr;
ax25_digi retdigi;
if (dev == NULL)
return;
if ((skb = alloc_skb(dev->hard_header_len + 1, GFP_ATOMIC)) == NULL)
return; /* Next SABM will get DM'd */
skb_reserve(skb, dev->hard_header_len);
skb_reset_network_header(skb);
ax25_digi_invert(digi, &retdigi);
dptr = skb_put(skb, 1);
*dptr = AX25_DM | AX25_PF;
/*
* Do the address ourselves
*/
dptr = skb_push(skb, ax25_addr_size(digi));
dptr += ax25_addr_build(dptr, dest, src, &retdigi, AX25_RESPONSE, AX25_MODULUS);
ax25_queue_xmit(skb, dev);
}
/*
* Exponential backoff for AX.25
*/
void ax25_calculate_t1(ax25_cb *ax25)
{
int n, t = 2;
switch (ax25->backoff) {
case 0:
break;
case 1:
t += 2 * ax25->n2count;
break;
case 2:
for (n = 0; n < ax25->n2count; n++)
t *= 2;
if (t > 8) t = 8;
break;
}
ax25->t1 = t * ax25->rtt;
}
/*
* Calculate the Round Trip Time
*/
void ax25_calculate_rtt(ax25_cb *ax25)
{
if (ax25->backoff == 0)
return;
if (ax25_t1timer_running(ax25) && ax25->n2count == 0)
ax25->rtt = (9 * ax25->rtt + ax25->t1 - ax25_display_timer(&ax25->t1timer)) / 10;
if (ax25->rtt < AX25_T1CLAMPLO)
ax25->rtt = AX25_T1CLAMPLO;
if (ax25->rtt > AX25_T1CLAMPHI)
ax25->rtt = AX25_T1CLAMPHI;
}
void ax25_disconnect(ax25_cb *ax25, int reason)
{
ax25_clear_queues(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
ax25_stop_idletimer(ax25);
ax25->state = AX25_STATE_0;
ax25_link_failed(ax25, reason);
if (ax25->sk != NULL) {
local_bh_disable();
bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_CLOSE;
ax25->sk->sk_err = reason;
ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
bh_unlock_sock(ax25->sk);
local_bh_enable();
}
}

226
net/ax25/ax25_timer.c Normal file
View file

@ -0,0 +1,226 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
* Copyright (C) Darryl Miles G7LED (dlm@g7led.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
* Copyright (C) Frederic Rible F1OAT (frible@teaser.fr)
* Copyright (C) 2002 Ralf Baechle DO1GRB (ralf@gnu.org)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
static void ax25_heartbeat_expiry(unsigned long);
static void ax25_t1timer_expiry(unsigned long);
static void ax25_t2timer_expiry(unsigned long);
static void ax25_t3timer_expiry(unsigned long);
static void ax25_idletimer_expiry(unsigned long);
void ax25_setup_timers(ax25_cb *ax25)
{
setup_timer(&ax25->timer, ax25_heartbeat_expiry, (unsigned long)ax25);
setup_timer(&ax25->t1timer, ax25_t1timer_expiry, (unsigned long)ax25);
setup_timer(&ax25->t2timer, ax25_t2timer_expiry, (unsigned long)ax25);
setup_timer(&ax25->t3timer, ax25_t3timer_expiry, (unsigned long)ax25);
setup_timer(&ax25->idletimer, ax25_idletimer_expiry,
(unsigned long)ax25);
}
void ax25_start_heartbeat(ax25_cb *ax25)
{
mod_timer(&ax25->timer, jiffies + 5 * HZ);
}
void ax25_start_t1timer(ax25_cb *ax25)
{
mod_timer(&ax25->t1timer, jiffies + ax25->t1);
}
void ax25_start_t2timer(ax25_cb *ax25)
{
mod_timer(&ax25->t2timer, jiffies + ax25->t2);
}
void ax25_start_t3timer(ax25_cb *ax25)
{
if (ax25->t3 > 0)
mod_timer(&ax25->t3timer, jiffies + ax25->t3);
else
del_timer(&ax25->t3timer);
}
void ax25_start_idletimer(ax25_cb *ax25)
{
if (ax25->idle > 0)
mod_timer(&ax25->idletimer, jiffies + ax25->idle);
else
del_timer(&ax25->idletimer);
}
void ax25_stop_heartbeat(ax25_cb *ax25)
{
del_timer(&ax25->timer);
}
void ax25_stop_t1timer(ax25_cb *ax25)
{
del_timer(&ax25->t1timer);
}
void ax25_stop_t2timer(ax25_cb *ax25)
{
del_timer(&ax25->t2timer);
}
void ax25_stop_t3timer(ax25_cb *ax25)
{
del_timer(&ax25->t3timer);
}
void ax25_stop_idletimer(ax25_cb *ax25)
{
del_timer(&ax25->idletimer);
}
int ax25_t1timer_running(ax25_cb *ax25)
{
return timer_pending(&ax25->t1timer);
}
unsigned long ax25_display_timer(struct timer_list *timer)
{
if (!timer_pending(timer))
return 0;
return timer->expires - jiffies;
}
EXPORT_SYMBOL(ax25_display_timer);
static void ax25_heartbeat_expiry(unsigned long param)
{
int proto = AX25_PROTO_STD_SIMPLEX;
ax25_cb *ax25 = (ax25_cb *)param;
if (ax25->ax25_dev)
proto = ax25->ax25_dev->values[AX25_VALUES_PROTOCOL];
switch (proto) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_heartbeat_expiry(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (ax25->ax25_dev->dama.slave)
ax25_ds_heartbeat_expiry(ax25);
else
ax25_std_heartbeat_expiry(ax25);
break;
#endif
}
}
static void ax25_t1timer_expiry(unsigned long param)
{
ax25_cb *ax25 = (ax25_cb *)param;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_t1timer_expiry(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (!ax25->ax25_dev->dama.slave)
ax25_std_t1timer_expiry(ax25);
break;
#endif
}
}
static void ax25_t2timer_expiry(unsigned long param)
{
ax25_cb *ax25 = (ax25_cb *)param;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_t2timer_expiry(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (!ax25->ax25_dev->dama.slave)
ax25_std_t2timer_expiry(ax25);
break;
#endif
}
}
static void ax25_t3timer_expiry(unsigned long param)
{
ax25_cb *ax25 = (ax25_cb *)param;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_t3timer_expiry(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (ax25->ax25_dev->dama.slave)
ax25_ds_t3timer_expiry(ax25);
else
ax25_std_t3timer_expiry(ax25);
break;
#endif
}
}
static void ax25_idletimer_expiry(unsigned long param)
{
ax25_cb *ax25 = (ax25_cb *)param;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_idletimer_expiry(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (ax25->ax25_dev->dama.slave)
ax25_ds_idletimer_expiry(ax25);
else
ax25_std_idletimer_expiry(ax25);
break;
#endif
}
}

222
net/ax25/ax25_uid.c Normal file
View file

@ -0,0 +1,222 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
#include <linux/netfilter.h>
#include <linux/sysctl.h>
#include <linux/export.h>
#include <net/ip.h>
#include <net/arp.h>
/*
* Callsign/UID mapper. This is in kernel space for security on multi-amateur machines.
*/
static HLIST_HEAD(ax25_uid_list);
static DEFINE_RWLOCK(ax25_uid_lock);
int ax25_uid_policy;
EXPORT_SYMBOL(ax25_uid_policy);
ax25_uid_assoc *ax25_findbyuid(kuid_t uid)
{
ax25_uid_assoc *ax25_uid, *res = NULL;
read_lock(&ax25_uid_lock);
ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
if (uid_eq(ax25_uid->uid, uid)) {
ax25_uid_hold(ax25_uid);
res = ax25_uid;
break;
}
}
read_unlock(&ax25_uid_lock);
return res;
}
EXPORT_SYMBOL(ax25_findbyuid);
int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
{
ax25_uid_assoc *ax25_uid;
ax25_uid_assoc *user;
unsigned long res;
switch (cmd) {
case SIOCAX25GETUID:
res = -ENOENT;
read_lock(&ax25_uid_lock);
ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
res = from_kuid_munged(current_user_ns(), ax25_uid->uid);
break;
}
}
read_unlock(&ax25_uid_lock);
return res;
case SIOCAX25ADDUID:
{
kuid_t sax25_kuid;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
sax25_kuid = make_kuid(current_user_ns(), sax->sax25_uid);
if (!uid_valid(sax25_kuid))
return -EINVAL;
user = ax25_findbyuid(sax25_kuid);
if (user) {
ax25_uid_put(user);
return -EEXIST;
}
if (sax->sax25_uid == 0)
return -EINVAL;
if ((ax25_uid = kmalloc(sizeof(*ax25_uid), GFP_KERNEL)) == NULL)
return -ENOMEM;
atomic_set(&ax25_uid->refcount, 1);
ax25_uid->uid = sax25_kuid;
ax25_uid->call = sax->sax25_call;
write_lock(&ax25_uid_lock);
hlist_add_head(&ax25_uid->uid_node, &ax25_uid_list);
write_unlock(&ax25_uid_lock);
return 0;
}
case SIOCAX25DELUID:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
ax25_uid = NULL;
write_lock(&ax25_uid_lock);
ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0)
break;
}
if (ax25_uid == NULL) {
write_unlock(&ax25_uid_lock);
return -ENOENT;
}
hlist_del_init(&ax25_uid->uid_node);
ax25_uid_put(ax25_uid);
write_unlock(&ax25_uid_lock);
return 0;
default:
return -EINVAL;
}
return -EINVAL; /*NOTREACHED */
}
#ifdef CONFIG_PROC_FS
static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(ax25_uid_lock)
{
read_lock(&ax25_uid_lock);
return seq_hlist_start_head(&ax25_uid_list, *pos);
}
static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &ax25_uid_list, pos);
}
static void ax25_uid_seq_stop(struct seq_file *seq, void *v)
__releases(ax25_uid_lock)
{
read_unlock(&ax25_uid_lock);
}
static int ax25_uid_seq_show(struct seq_file *seq, void *v)
{
char buf[11];
if (v == SEQ_START_TOKEN)
seq_printf(seq, "Policy: %d\n", ax25_uid_policy);
else {
struct ax25_uid_assoc *pt;
pt = hlist_entry(v, struct ax25_uid_assoc, uid_node);
seq_printf(seq, "%6d %s\n",
from_kuid_munged(seq_user_ns(seq), pt->uid),
ax2asc(buf, &pt->call));
}
return 0;
}
static const struct seq_operations ax25_uid_seqops = {
.start = ax25_uid_seq_start,
.next = ax25_uid_seq_next,
.stop = ax25_uid_seq_stop,
.show = ax25_uid_seq_show,
};
static int ax25_uid_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &ax25_uid_seqops);
}
const struct file_operations ax25_uid_fops = {
.owner = THIS_MODULE,
.open = ax25_uid_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif
/*
* Free all memory associated with UID/Callsign structures.
*/
void __exit ax25_uid_free(void)
{
ax25_uid_assoc *ax25_uid;
write_lock(&ax25_uid_lock);
again:
ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
hlist_del_init(&ax25_uid->uid_node);
ax25_uid_put(ax25_uid);
goto again;
}
write_unlock(&ax25_uid_lock);
}

184
net/ax25/sysctl_net_ax25.c Normal file
View file

@ -0,0 +1,184 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) 1996 Mike Shaver (shaver@zeroknowledge.com)
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/spinlock.h>
#include <net/ax25.h>
static int min_ipdefmode[1], max_ipdefmode[] = {1};
static int min_axdefmode[1], max_axdefmode[] = {1};
static int min_backoff[1], max_backoff[] = {2};
static int min_conmode[1], max_conmode[] = {2};
static int min_window[] = {1}, max_window[] = {7};
static int min_ewindow[] = {1}, max_ewindow[] = {63};
static int min_t1[] = {1}, max_t1[] = {30000};
static int min_t2[] = {1}, max_t2[] = {20000};
static int min_t3[1], max_t3[] = {3600000};
static int min_idle[1], max_idle[] = {65535000};
static int min_n2[] = {1}, max_n2[] = {31};
static int min_paclen[] = {1}, max_paclen[] = {512};
static int min_proto[1], max_proto[] = { AX25_PROTO_MAX };
#ifdef CONFIG_AX25_DAMA_SLAVE
static int min_ds_timeout[1], max_ds_timeout[] = {65535000};
#endif
static const struct ctl_table ax25_param_table[] = {
{
.procname = "ip_default_mode",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_ipdefmode,
.extra2 = &max_ipdefmode
},
{
.procname = "ax25_default_mode",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_axdefmode,
.extra2 = &max_axdefmode
},
{
.procname = "backoff_type",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_backoff,
.extra2 = &max_backoff
},
{
.procname = "connect_mode",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_conmode,
.extra2 = &max_conmode
},
{
.procname = "standard_window_size",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_window,
.extra2 = &max_window
},
{
.procname = "extended_window_size",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_ewindow,
.extra2 = &max_ewindow
},
{
.procname = "t1_timeout",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_t1,
.extra2 = &max_t1
},
{
.procname = "t2_timeout",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_t2,
.extra2 = &max_t2
},
{
.procname = "t3_timeout",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_t3,
.extra2 = &max_t3
},
{
.procname = "idle_timeout",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_idle,
.extra2 = &max_idle
},
{
.procname = "maximum_retry_count",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_n2,
.extra2 = &max_n2
},
{
.procname = "maximum_packet_length",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_paclen,
.extra2 = &max_paclen
},
{
.procname = "protocol",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_proto,
.extra2 = &max_proto
},
#ifdef CONFIG_AX25_DAMA_SLAVE
{
.procname = "dama_slave_timeout",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_ds_timeout,
.extra2 = &max_ds_timeout
},
#endif
{ } /* that's all, folks! */
};
int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
{
char path[sizeof("net/ax25/") + IFNAMSIZ];
int k;
struct ctl_table *table;
table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
if (!table)
return -ENOMEM;
for (k = 0; k < AX25_MAX_VALUES; k++)
table[k].data = &ax25_dev->values[k];
snprintf(path, sizeof(path), "net/ax25/%s", ax25_dev->dev->name);
ax25_dev->sysheader = register_net_sysctl(&init_net, path, table);
if (!ax25_dev->sysheader) {
kfree(table);
return -ENOMEM;
}
return 0;
}
void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev)
{
struct ctl_table_header *header = ax25_dev->sysheader;
struct ctl_table *table;
if (header) {
ax25_dev->sysheader = NULL;
table = header->ctl_table_arg;
unregister_net_sysctl_table(header);
kfree(table);
}
}

69
net/batman-adv/Kconfig Normal file
View file

@ -0,0 +1,69 @@
#
# B.A.T.M.A.N meshing protocol
#
config BATMAN_ADV
tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
depends on NET
select CRC16
select LIBCRC32C
default n
help
B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
a routing protocol for multi-hop ad-hoc mesh networks. The
networks may be wired or wireless. See
http://www.open-mesh.org/ for more information and user space
tools.
config BATMAN_ADV_BLA
bool "Bridge Loop Avoidance"
depends on BATMAN_ADV && INET
default y
help
This option enables BLA (Bridge Loop Avoidance), a mechanism
to avoid Ethernet frames looping when mesh nodes are connected
to both the same LAN and the same mesh. If you will never use
more than one mesh node in the same LAN, you can safely remove
this feature and save some space.
config BATMAN_ADV_DAT
bool "Distributed ARP Table"
depends on BATMAN_ADV && INET
default n
help
This option enables DAT (Distributed ARP Table), a DHT based
mechanism that increases ARP reliability on sparse wireless
mesh networks. If you think that your network does not need
this option you can safely remove it and save some space.
config BATMAN_ADV_NC
bool "Network Coding"
depends on BATMAN_ADV
default n
help
This option enables network coding, a mechanism that aims to
increase the overall network throughput by fusing multiple
packets in one transmission.
Note that interfaces controlled by batman-adv must be manually
configured to have promiscuous mode enabled in order to make
network coding work.
If you think that your network does not need this feature you
can safely disable it and save some space.
config BATMAN_ADV_MCAST
bool "Multicast optimisation"
depends on BATMAN_ADV
default n
help
This option enables the multicast optimisation which aims to
reduce the air overhead while improving the reliability of
multicast messages.
config BATMAN_ADV_DEBUG
bool "B.A.T.M.A.N. debugging"
depends on BATMAN_ADV
help
This is an option for use by developers; most people should
say N here. This enables compilation of support for
outputting debugging information to the kernel log. The
output is controlled via the module parameter debug.

39
net/batman-adv/Makefile Normal file
View file

@ -0,0 +1,39 @@
#
# Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
#
# Marek Lindner, Simon Wunderlich
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
batman-adv-y += bat_iv_ogm.o
batman-adv-y += bitarray.o
batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
batman-adv-y += debugfs.o
batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
batman-adv-y += fragmentation.o
batman-adv-y += gateway_client.o
batman-adv-y += gateway_common.o
batman-adv-y += hard-interface.o
batman-adv-y += hash.o
batman-adv-y += icmp_socket.o
batman-adv-y += main.o
batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o
batman-adv-y += originator.o
batman-adv-y += routing.o
batman-adv-y += send.o
batman-adv-y += soft-interface.o
batman-adv-y += sysfs.o
batman-adv-y += translation-table.o
batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o

23
net/batman-adv/bat_algo.h Normal file
View file

@ -0,0 +1,23 @@
/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_
#define _NET_BATMAN_ADV_BAT_ALGO_H_
int batadv_iv_init(void);
#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */

Some files were not shown because too many files have changed in this diff Show more