android_kernel_samsung_on5x.../drivers/net/wireless/scsc/netif.c
2018-06-19 23:16:04 +02:00

1141 lines
34 KiB
C

/*****************************************************************************
*
* Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
****************************************************************************/
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <net/sch_generic.h>
#include "debug.h"
#include "netif.h"
#include "dev.h"
#include "mgt.h"
#include "scsc_wifi_fcq.h"
#include "ioctl.h"
#include "oxygen_ioctl.h"
#include "mib.h"
#define IP4_OFFSET_TO_TOS_FIELD 1
#define SLSI_TX_WAKELOCK_TIME (100)
/* Net Device callback operations */
static int slsi_net_open(struct net_device *dev)
{
struct netdev_vif *ndev_vif = netdev_priv(dev);
struct slsi_dev *sdev = ndev_vif->sdev;
int err;
SLSI_NET_DBG2(dev, SLSI_NETDEV, "iface_num = %d\n", ndev_vif->ifnum);
if (WARN_ON(ndev_vif->is_available))
return -EINVAL;
if (sdev->mlme_blocked) {
SLSI_NET_DBG2(dev, SLSI_NETDEV, "MLME Blocked. Reject net_open\n");
return -EIO;
}
slsi_wakelock(&sdev->wlan_wl);
/* check /data/.psm.info if MCD *#232338# request to rf test mode. */
check_mcd_232338_rf_mode();
err = slsi_start(sdev);
if (WARN_ON(err)) {
slsi_wakeunlock(&sdev->wlan_wl);
return err;
}
if (!sdev->netdev_up_count) {
slsi_get_hw_mac_address(sdev, sdev->hw_addr);
SLSI_DBG1(sdev, SLSI_INIT_DEINIT, "Configure MAC address to [%pM]\n", sdev->hw_addr);
/* Assign Addresses */
SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_WLAN], sdev->hw_addr);
SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_P2P], sdev->hw_addr);
sdev->netdev_addresses[SLSI_NET_INDEX_P2P][0] |= 0x02; /* Set the local bit */
SLSI_ETHER_COPY(sdev->netdev_addresses[SLSI_NET_INDEX_P2PX], sdev->hw_addr);
sdev->netdev_addresses[SLSI_NET_INDEX_P2PX][0] |= 0x02; /* Set the local bit */
sdev->netdev_addresses[SLSI_NET_INDEX_P2PX][4] ^= 0x80; /* EXOR 5th byte with 0x80 */
}
SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ndev_vif->ifnum]);
SLSI_MUTEX_LOCK(ndev_vif->vif_mutex);
ndev_vif->is_available = true;
sdev->netdev_up_count++;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
reinit_completion(&ndev_vif->sig_wait.completion);
#else
INIT_COMPLETION(ndev_vif->sig_wait.completion);
#endif
SLSI_MUTEX_UNLOCK(ndev_vif->vif_mutex);
netif_tx_start_all_queues(dev);
slsi_wakeunlock(&sdev->wlan_wl);
/* The default power mode in host*/
/* 2511 measn unifiForceActive and 1 means active */
if (slsi_is_232338_test_mode_enabled()) {
SLSI_NET_ERR(dev, "*#232338# rf test mode set is enabled.\n");
slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_ROAM_MODE, 0);
slsi_set_mib_roam(sdev, NULL, 2511, 1);
slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD, 0);
}
return 0;
}
static int slsi_net_stop(struct net_device *dev)
{
struct netdev_vif *ndev_vif = netdev_priv(dev);
struct slsi_dev *sdev = ndev_vif->sdev;
SLSI_NET_DBG1(dev, SLSI_NETDEV, "\n");
slsi_wakelock(&sdev->wlan_wl);
netif_tx_stop_all_queues(dev);
if (!ndev_vif->is_available) {
/* May have been taken out by the Chip going down */
SLSI_NET_DBG1(dev, SLSI_NETDEV, "Not available.\n");
slsi_wakeunlock(&sdev->wlan_wl);
return 0;
}
#ifndef SLSI_TEST_DEV
if (!slsi_is_232338_test_mode_enabled() && !sdev->recovery_status) {
SLSI_NET_ERR(dev, "*#232338# retruns to user mode.\n");
slsi_set_mib_roam(sdev, NULL, SLSI_PSID_UNIFI_TPC_MAX_POWER_RSSI_THRESHOLD, -55);
}
#endif
slsi_stop_net_dev(sdev, dev);
#ifndef SLSI_TEST_DEV
memset(dev->dev_addr, 0, ETH_ALEN);
#endif
slsi_wakeunlock(&sdev->wlan_wl);
return 0;
}
/* This is called after the WE handlers */
static int slsi_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
SLSI_NET_DBG1(dev, SLSI_NETDEV, "ioctl cmd:0x%.4x\n", cmd);
if (cmd == SIOCDEVPRIVATE + 2) { /* 0x89f0 + 2 from wpa_supplicant */
int ret;
ret = slsi_ioctl(dev, rq, cmd);
return ret;
} else if (cmd == SIOCDEVPRIVATE + 1) { /* 0x89f0 + 1 from olsrd */
#ifdef CONFIG_SCSC_WLAN_OXYGEN_ENABLE
int ret;
ret = oxygen_ioctl(dev, rq, cmd);
return ret;
#else
return -EOPNOTSUPP;
#endif
}
return -EOPNOTSUPP;
}
static struct net_device_stats *slsi_net_get_stats(struct net_device *dev)
{
struct netdev_vif *ndev_vif = netdev_priv(dev);
SLSI_NET_DBG4(dev, SLSI_NETDEV, "\n");
return &ndev_vif->stats;
}
static u16 slsi_get_priority_from_tos(u8 *frame, u16 proto)
{
if (WARN_ON(!frame))
return FAPI_PRIORITY_QOS_UP0;
switch (proto) {
case 0x0800: /* IPv4 */
case 0x814C: /* SNMP */
case 0x880C: /* GSMP */
return (u16)(((frame[IP4_OFFSET_TO_TOS_FIELD]) & 0xE0) >> 5);
case 0x8100: /* VLAN */
return (u16)((*frame & 0xE0) >> 5);
case 0x86DD: /* IPv6 */
return (u16)((*frame & 0x0E) >> 1);
default:
return FAPI_PRIORITY_QOS_UP0;
}
}
static bool slsi_net_downgrade_ac(struct net_device *dev, struct sk_buff *skb)
{
SLSI_UNUSED_PARAMETER(dev);
switch (skb->priority) {
case 6:
case 7:
skb->priority = FAPI_PRIORITY_QOS_UP5; /* VO -> VI */
return true;
case 4:
case 5:
skb->priority = FAPI_PRIORITY_QOS_UP3; /* VI -> BE */
return true;
case 0:
case 3:
skb->priority = FAPI_PRIORITY_QOS_UP2; /* BE -> BK */
return true;
default:
return false;
}
}
static u8 slsi_net_up_to_ac_mapping(u8 priority)
{
switch (priority) {
case FAPI_PRIORITY_QOS_UP6:
case FAPI_PRIORITY_QOS_UP7:
return BIT(FAPI_PRIORITY_QOS_UP6) | BIT(FAPI_PRIORITY_QOS_UP7);
case FAPI_PRIORITY_QOS_UP4:
case FAPI_PRIORITY_QOS_UP5:
return BIT(FAPI_PRIORITY_QOS_UP4) | BIT(FAPI_PRIORITY_QOS_UP5);
case FAPI_PRIORITY_QOS_UP0:
case FAPI_PRIORITY_QOS_UP3:
return BIT(FAPI_PRIORITY_QOS_UP0) | BIT(FAPI_PRIORITY_QOS_UP3);
default:
return BIT(FAPI_PRIORITY_QOS_UP1) | BIT(FAPI_PRIORITY_QOS_UP2);
}
}
enum slsi_traffic_q slsi_frame_priority_to_ac_queue(u16 priority)
{
switch (priority) {
case FAPI_PRIORITY_QOS_UP0:
case FAPI_PRIORITY_QOS_UP3:
return SLSI_TRAFFIC_Q_BE;
case FAPI_PRIORITY_QOS_UP1:
case FAPI_PRIORITY_QOS_UP2:
return SLSI_TRAFFIC_Q_BK;
case FAPI_PRIORITY_QOS_UP4:
case FAPI_PRIORITY_QOS_UP5:
return SLSI_TRAFFIC_Q_VI;
case FAPI_PRIORITY_QOS_UP6:
case FAPI_PRIORITY_QOS_UP7:
return SLSI_TRAFFIC_Q_VO;
default:
return SLSI_TRAFFIC_Q_BE;
}
}
int slsi_ac_to_tids(enum slsi_traffic_q ac, int *tids)
{
switch (ac) {
case SLSI_TRAFFIC_Q_BE:
tids[0] = FAPI_PRIORITY_QOS_UP0;
tids[1] = FAPI_PRIORITY_QOS_UP3;
break;
case SLSI_TRAFFIC_Q_BK:
tids[0] = FAPI_PRIORITY_QOS_UP1;
tids[1] = FAPI_PRIORITY_QOS_UP2;
break;
case SLSI_TRAFFIC_Q_VI:
tids[0] = FAPI_PRIORITY_QOS_UP4;
tids[1] = FAPI_PRIORITY_QOS_UP5;
break;
case SLSI_TRAFFIC_Q_VO:
tids[0] = FAPI_PRIORITY_QOS_UP6;
tids[1] = FAPI_PRIORITY_QOS_UP7;
break;
default:
return -EINVAL;
}
return 0;
}
static void slsi_net_downgrade_pri(struct net_device *dev, struct slsi_peer *peer,
struct sk_buff *skb)
{
/* in case we are a client downgrade the ac if acm is
* set and tspec is not established
*/
while (unlikely(peer->wmm_acm & BIT(skb->priority)) &&
!(peer->tspec_established & slsi_net_up_to_ac_mapping(skb->priority))) {
SLSI_NET_DBG3(dev, SLSI_NETDEV, "Downgrading from UP:%d\n", skb->priority);
if (!slsi_net_downgrade_ac(dev, skb))
break;
}
SLSI_NET_DBG3(dev, SLSI_NETDEV, "To UP:%d\n", skb->priority);
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback)
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv)
#else
static u16 slsi_net_select_queue(struct net_device *dev, struct sk_buff *skb)
#endif
{
struct netdev_vif *ndev_vif = netdev_priv(dev);
struct slsi_dev *sdev = ndev_vif->sdev;
u16 netif_q = 0;
struct ethhdr *ehdr = (struct ethhdr *)skb->data;
int proto = be16_to_cpu(eth_hdr(skb)->h_proto);
struct slsi_peer *peer;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
(void)accel_priv;
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
(void)fallback;
#endif
SLSI_NET_DBG4(dev, SLSI_NETDEV, "\n");
switch (proto) {
default:
/* SLSI_NETIF_Q_PRIORITY is used only for EAP, ARP and IP frames with DHCP */
break;
case ETH_P_PAE:
case ETH_P_WAI:
SLSI_NET_DBG3(dev, SLSI_TX, "EAP packet. Priority Queue Selected\n");
return SLSI_NETIF_Q_PRIORITY;
case ETH_P_ARP:
SLSI_NET_DBG3(dev, SLSI_TX, "ARP frame. Priority Queue Selected\n");
return SLSI_NETIF_Q_PRIORITY;
case ETH_P_IP:
if (slsi_is_dhcp_packet(skb->data) == SLSI_TX_IS_NOT_DHCP)
break;
SLSI_NET_DBG3(dev, SLSI_TX, "DHCP packet. Priority Queue Selected\n");
return SLSI_NETIF_Q_PRIORITY;
}
if (ndev_vif->vif_type == FAPI_VIFTYPE_AP || ndev_vif->vif_type == FAPI_VIFTYPE_ADHOC)
/* MULTICAST/BROADCAST Queue is only used for AP */
if (is_multicast_ether_addr(ehdr->h_dest)) {
SLSI_NET_DBG3(dev, SLSI_TX, "Multicast AC queue will be selected\n");
skb->priority = slsi_get_priority_from_tos(skb->data + ETH_HLEN, proto);
return slsi_netif_get_multicast_queue(slsi_frame_priority_to_ac_queue(skb->priority));
}
slsi_spinlock_lock(&ndev_vif->peer_lock);
peer = slsi_get_peer_from_mac(sdev, dev, ehdr->h_dest);
if (!peer) {
SLSI_NET_DBG1(dev, SLSI_TX, "Peer NOT found : %pM\n", ehdr->h_dest);
slsi_spinlock_unlock(&ndev_vif->peer_lock);
return SLSI_NETIF_Q_DISCARD;
}
if (peer->qos_enabled) {
if (peer->qos_map_set) { /*802.11 QoS for interworking*/
skb->priority = cfg80211_classify8021d(skb, &peer->qos_map);
} else{
skb->priority = slsi_get_priority_from_tos(skb->data + ETH_HLEN, proto);
}
} else{
skb->priority = FAPI_PRIORITY_QOS_UP0;
}
/* Downgrade the priority if acm bit is set and tspec is not established */
slsi_net_downgrade_pri(dev, peer, skb);
netif_q = slsi_netif_get_peer_queue(peer->queueset, slsi_frame_priority_to_ac_queue(skb->priority));
SLSI_NET_DBG3(dev, SLSI_TX, "%u Queue Selected\n", netif_q);
slsi_spinlock_unlock(&ndev_vif->peer_lock);
return netif_q;
}
#define UNUSED(x) \
\
((void)(x))
void slsi_tdls_move_packets(struct slsi_dev *sdev, struct net_device *dev,
struct slsi_peer *sta_peer, struct slsi_peer *tdls_peer, bool connection)
{
struct netdev_vif *netdev_vif = netdev_priv(dev);
struct sk_buff *skb = NULL;
struct ethhdr *ehdr;
struct Qdisc *qd;
u32 num_pkts;
u16 staq;
u16 tdlsq;
u16 netq;
u16 i;
u16 j;
/* Get the netdev queue number from queueset */
staq = slsi_netif_get_peer_queue(sta_peer->queueset, 0);
tdlsq = slsi_netif_get_peer_queue(tdls_peer->queueset, 0);
SLSI_NET_DBG1(dev, SLSI_TDLS, "Connection: %d, sta_qset: %d, tdls_qset: %d, sta_netq = %d, tdls_netq = %d\n",
connection, sta_peer->queueset, tdls_peer->queueset, staq, tdlsq);
/* Pause the TDLS queues and STA netdev queues */
slsi_tx_pause_queues(sdev);
slsi_spinlock_lock(&netdev_vif->peer_lock);
/**
* For TDLS connection set PEER valid to true. After this ndo_select_queue() will select TDLSQ instead of STAQ
* For TDLS teardown set PEER valid to false. After this ndo_select_queue() will select STAQ instead of TDLSQ
*/
if (connection)
tdls_peer->valid = true;
else
tdls_peer->valid = false;
/* Move packets from netdev queues */
for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
SLSI_NET_DBG2(dev, SLSI_TDLS, "NETQ%d: Before: tdlsq_len = %d, staq_len = %d\n",
i, skb_queue_len(&dev->_tx[tdlsq + i].qdisc->q), skb_queue_len(&dev->_tx[staq + i].qdisc->q));
if (connection) {
/* Check if any packet is already avilable in TDLS queue (most likely from last session) */
if (skb_queue_len(&dev->_tx[tdlsq + i].qdisc->q))
SLSI_NET_ERR(dev, "tdls_connection: Packet present in queue %d\n", tdlsq + i);
qd = dev->_tx[staq + i].qdisc;
/* Get the total number of packets in STAQ */
num_pkts = skb_queue_len(&qd->q);
/* Check all the pkt in STAQ and move the TDLS pkts to TDSLQ */
for (j = 0; j < num_pkts; j++) {
qd = dev->_tx[staq + i].qdisc;
/* Dequeue the pkt form STAQ. This logic is similar to kernel API dequeue_skb() */
skb = qd->gso_skb;
if (skb) {
qd->gso_skb = NULL;
qd->q.qlen--;
} else {
skb = qd->dequeue(qd);
}
if (skb == NULL) {
SLSI_NET_ERR(dev, "tdls_connection: STA NETQ skb is NULL\n");
break;
}
/* Change the queue mapping for the TDLS packets */
netq = skb->queue_mapping;
ehdr = (struct ethhdr *)skb->data;
if (compare_ether_addr(tdls_peer->address, ehdr->h_dest) == 0) {
netq += (tdls_peer->queueset * SLSI_NETIF_Q_PER_PEER);
SLSI_NET_DBG3(dev, SLSI_TDLS, "NETQ%d: Queue mapping changed from %d to %d\n",
i, skb->queue_mapping, netq);
skb_set_queue_mapping(skb, netq);
}
qd = dev->_tx[netq].qdisc;
/* If the netdev queue is already full then enqueue() will drop the skb */
qd->enqueue(skb, qd);
}
} else {
num_pkts = skb_queue_len(&dev->_tx[tdlsq + i].qdisc->q);
/* Move the packets from TDLS to STA queue */
for (j = 0; j < num_pkts; j++) {
/* Dequeue the pkt form TDLS_Q. This logic is similar to kernel API dequeue_skb() */
qd = dev->_tx[tdlsq + i].qdisc;
skb = qd->gso_skb;
if (skb) {
qd->gso_skb = NULL;
qd->q.qlen--;
} else {
skb = qd->dequeue(qd);
}
if (skb == NULL) {
SLSI_NET_ERR(dev, "tdls_teardown: TDLS NETQ skb is NULL\n");
break;
}
/* Update the queue mapping */
skb_set_queue_mapping(skb, staq + i);
/* Enqueue the packet in STA queue */
qd = dev->_tx[staq + i].qdisc;
/* If the netdev queue is already full then enqueue() will drop the skb */
qd->enqueue(skb, qd);
}
}
SLSI_NET_DBG2(dev, SLSI_TDLS, "NETQ%d: After : tdlsq_len = %d, staq_len = %d\n",
i, skb_queue_len(&dev->_tx[tdlsq + i].qdisc->q), skb_queue_len(&dev->_tx[staq + i].qdisc->q));
}
slsi_spinlock_unlock(&netdev_vif->peer_lock);
/* Teardown - after teardown there should not be any packet in TDLS queues */
if (!connection)
for (i = 0; i < SLSI_NETIF_Q_PER_PEER; i++) {
if (skb_queue_len(&dev->_tx[tdlsq + i].qdisc->q))
SLSI_NET_ERR(dev, "tdls_teardown: Packet present in NET queue %d\n", tdlsq + i);
}
/* Resume the STA and TDLS netdev queues */
slsi_tx_unpause_queues(sdev);
}
/**
* This is the main TX entry point for the driver.
*
* Ownership of the skb is transferred to another function ONLY IF such
* function was able to deal with that skb and ended with a SUCCESS ret code.
* Owner HAS the RESPONSIBILITY to handle the life cycle of the skb.
*
* In the context of this function:
* - ownership is passed DOWN to the LOWER layers HIP-functions when skbs were
* SUCCESSFULLY transmitted, and there they will be FREED. As a consequence
* kernel netstack will receive back NETDEV_TX_OK too.
* - ownership is KEPT HERE by this function when lower layers fails somehow
* to deal with the transmission of the skb. In this case the skb WOULD HAVE
* NOT BEEN FREED by lower layers that instead returns a proper ERRCODE.
* - intermediate lower layer functions (NOT directly involved in failure or
* success) will relay any retcode up to this layer for evaluation.
*
* WHAT HAPPENS THEN, is ERRCODE-dependent, and at the moment:
* - ENOSPC: something related to queueing happened...this should be
* retried....NETDEV_TX_BUSY is returned to NetStack ...packet will be
* requeued by the Kernel NetStack itself, using the proper queue.
* As a consequence SKB is NOT FREED HERE !.
* - ANY OTHER ERR: all other errors are considered at the moment NOT
* recoverable and SO skbs are droppped(FREED) HERE...Kernel will receive
* the proper ERRCODE and stops dealing with the packet considering it
* consumed by lower layer. (same behavior as NETDEV_TX_OK)
*
* BIG NOTE:
* As detailed in Documentation/networking/drivers.txt the above behavior
* of returning NETDEV_TX_BUSY to trigger requeueinng by the Kernel is
* discouraged and should be used ONLY in case of a real HARD error(?);
* the advised solution is to actively STOP the queues before finishing
* the available space and WAKING them up again when more free buffers
* would have arrived.
*/
static netdev_tx_t slsi_net_hw_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdev_vif *ndev_vif = netdev_priv(dev);
struct slsi_dev *sdev = ndev_vif->sdev;
int r = NETDEV_TX_OK;
struct sk_buff *original_skb = NULL;
#ifdef CONFIG_SCSC_WLAN_DEBUG
int known_users = 0;
#endif
/* Keep the packet length. The packet length will be used to increment
* stats for the netdev if the packet was successfully transmitted.
* The ownership of the SKB is passed to lower layers, so we should
* not refer the SKB after this point
*/
unsigned int packet_len = skb->len;
slsi_wakelock(&sdev->wlan_wl);
slsi_wakelock_timeout(&sdev->wlan_wl_to, SLSI_TX_WAKELOCK_TIME);
/* Check for misaligned (oddly aligned) data.
* The f/w requires 16 bit aligned.
* This is a corner case - for example, the kernel can generate BPDU
* that are oddly aligned. Therefore it is acceptable to copy these
* frames to a 16 bit alignment.
*/
if ((uintptr_t)skb->data & 0x1) {
struct sk_buff *skb2 = NULL;
/* Received a socket buffer aligned on an odd address.
* Re-align by asking for headroom.
*/
skb2 = skb_copy_expand(skb, SLSI_NETIF_SKB_HEADROOM, skb_tailroom(skb), GFP_ATOMIC);
if (skb2 && (!(((uintptr_t)skb2->data) & 0x1))) {
/* We should account for this duplication */
original_skb = skb;
skb = skb2;
SLSI_NET_DBG3(dev, SLSI_TX, "Oddly aligned skb realigned\n");
} else {
/* Drop the packet if we can't re-align. */
SLSI_NET_ERR(dev, "Oddly aligned skb failed realignment, dropping\n");
if (skb2) {
SLSI_NET_DBG3(dev, SLSI_TX, "skb_copy_expand didn't align for us\n");
slsi_kfree_skb(skb2);
} else {
SLSI_NET_DBG3(dev, SLSI_TX, "skb_copy_expand failed when trying to align\n");
}
r = -EFAULT;
goto evaluate;
}
}
slsi_dbg_track_skb(skb, GFP_ATOMIC);
/* Be defensive about the mac_header - some kernels have a bug where a
* frame can be delivered to the driver with mac_header initialised
* to ~0U and this causes a crash when the pointer is dereferenced to
* access part of the Ethernet header.
*/
if (!skb_mac_header_was_set(skb))
skb_reset_mac_header(skb);
SLSI_NET_DBG3(dev, SLSI_TX, "Proto 0x%.4X\n", be16_to_cpu(eth_hdr(skb)->h_proto));
if (!ndev_vif->is_available) {
SLSI_NET_DBG1(dev, SLSI_TX, "Not Available\n");
r = -EFAULT;
goto evaluate;
}
if (skb->queue_mapping == SLSI_NETIF_Q_DISCARD) {
SLSI_NET_DBG1(dev, SLSI_TX, "Discard Queue :: Packet Dropped\n");
r = -EIO;
goto evaluate;
}
#ifdef CONFIG_SCSC_WLAN_DEBUG
known_users = atomic_read(&skb->users);
#endif
/* SKB is owned by slsi_tx_data() ONLY IF ret value is success (0) */
r = slsi_tx_data(sdev, dev, skb);
evaluate:
if (r == 0) {
/**
* A copy has been passed down and successfully transmitted
* and freed....here we free the original coming from the
* upper network layers....if a copy was passed down.
*/
if (original_skb)
slsi_kfree_skb(original_skb);
/* skb freed by lower layers on success...enjoy */
dev->trans_start = jiffies;
ndev_vif->stats.tx_packets++;
ndev_vif->stats.tx_bytes += packet_len;
r = NETDEV_TX_OK;
} else {
/**
* Failed to send:
* - if QueueFull/OutOfMBulk (-ENOSPC returned) the skb was
* NOT discarded by lower layers and NETDEV_TX_BUSY should
* be returned to upper layers: this will cause the skb
* (THAT MUST NOT HAVE BEEN FREED BY LOWER LAYERS !)
* to be requeued ...
* NOTE THAT it's the original skb that will be retried
* by upper netstack.
* THIS CONDITION SHOULD NOT BE REACHED...NEVER...see in
* the following.
*
* - with any other -ERR instead return the error: this
* anyway let the kernel think that the SKB has
* been consumed, and we drop the frame and free it.
*
* - a WARN_ON() takes care to ensure the SKB has NOT been
* freed by someone despite this was NOT supposed to happen,
* just before the actual freeing.
*
*/
if (r == -ENOSPC) {
/* SLSI_NET_DBG1(dev, SLSI_TEST, "Packet Requeued...should NOT get here !\n"); */
ndev_vif->stats.tx_fifo_errors++;
/* Free the local copy if any ... */
if (original_skb)
slsi_kfree_skb(skb);
r = NETDEV_TX_BUSY;
} else {
#ifdef CONFIG_SCSC_WLAN_DEBUG
WARN_ON(known_users &&
atomic_read(&skb->users) != known_users);
#endif
if (original_skb)
slsi_kfree_skb(original_skb);
slsi_kfree_skb(skb);
ndev_vif->stats.tx_dropped++;
/* We return the ORIGINAL Error 'r' anyway
* BUT Kernel treats them as TX complete anyway
* and assumes the SKB has been consumed.
*/
/* SLSI_NET_DBG1(dev, SLSI_TEST, "Packet Dropped\n"); */
}
}
/* SKBs are always considered consumed if the driver
* returns NETDEV_TX_OK.
*/
slsi_wakeunlock(&sdev->wlan_wl);
return r;
}
static netdev_features_t slsi_net_fix_features(struct net_device *dev, netdev_features_t features)
{
SLSI_UNUSED_PARAMETER(dev);
#ifdef CONFIG_SCSC_WLAN_RX_NAPI_GRO
SLSI_NET_DBG1(dev, SLSI_RX, "napi rx gro enabled\n");
features |= NETIF_F_GRO;
#else
SLSI_NET_DBG1(dev, SLSI_RX, "napi rx gro enabled\n");
features &= ~NETIF_F_GRO;
#endif
return features;
}
#ifdef CONFIG_SCSC_WLAN_RX_NAPI
int slsi_net_rx_poll(struct napi_struct *napi, int budget)
{
struct netdev_vif *ndev_vif = netdev_priv(napi->dev);
struct sk_buff *skb = slsi_skb_dequeue(&ndev_vif->napi.rx_data);
int npackets = 0;
while (skb) {
npackets++;
slsi_dbg_untrack_skb(skb);
#ifdef CONFIG_SCSC_WLAN_RX_NAPI_GRO
napi_gro_receive(napi, skb);
#else
netif_receive_skb(skb);
#endif
if (npackets == budget)
break;
skb = slsi_skb_dequeue(&ndev_vif->napi.rx_data);
}
if (npackets < budget) {
ndev_vif->napi.interrupt_enabled = true;
napi_complete(napi);
}
return npackets;
}
#endif
static void slsi_set_multicast_list(struct net_device *dev)
{
struct netdev_vif *ndev_vif = netdev_priv(dev);
u8 count, i = 0;
u8 mdns_addr[ETH_ALEN] = { 0x01, 0x00, 0x5E, 0x00, 0x00, 0xFB };
#ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
u8 mc_addr_prefix[3] = { 0x01, 0x00, 0x5e };
#else
u8 mdns6_addr[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0xFB };
const u8 solicited_node_addr[ETH_ALEN] = { 0x33, 0x33, 0xff, 0x00, 0x00, 0x01 };
u8 ipv6addr_suffix[3];
#endif
struct netdev_hw_addr *ha;
if (ndev_vif->vif_type != FAPI_VIFTYPE_STATION)
return;
if (!ndev_vif->is_available) {
SLSI_NET_DBG1(dev, SLSI_NETDEV, "Not available\n");
return;
}
count = netdev_mc_count(dev);
if (!count)
goto exit;
#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
slsi_spinlock_lock(&ndev_vif->ipv6addr_lock);
memcpy(ipv6addr_suffix, &ndev_vif->ipv6address.s6_addr[13], 3);
slsi_spinlock_unlock(&ndev_vif->ipv6addr_lock);
#endif
netdev_for_each_mc_addr(ha, dev) {
#ifdef CONFIG_SCSC_WLAN_BLOCK_IPV6
if ((!memcmp(ha->addr, mdns_addr, ETH_ALEN)) || /*mDns is handled separately*/
(memcmp(ha->addr, mc_addr_prefix, 3))) { /*only consider IPv4 multicast addresses*/
#else
if ((!memcmp(ha->addr, mdns_addr, ETH_ALEN)) ||
(!memcmp(ha->addr, mdns6_addr, ETH_ALEN)) || /*mDns is handled separately*/
(!memcmp(ha->addr, solicited_node_addr, 3) &&
!memcmp(&ha->addr[3], ipv6addr_suffix, 3))) { /* local multicast addr handled separately*/
#endif
SLSI_NET_DBG3(dev, SLSI_NETDEV, "Drop mac address = %pM\n", ha->addr);
continue;
}
if (i == SLSI_MC_ADDR_ENTRY_MAX) {
SLSI_NET_WARN(dev, "WARNING :mac list has reached max limit(%d), actual count= %d\n", SLSI_MC_ADDR_ENTRY_MAX, count);
break;
}
SLSI_NET_DBG3(dev, SLSI_NETDEV, "mac address %d = %pM\n", i, ha->addr);
SLSI_ETHER_COPY(ndev_vif->sta.regd_mc_addr[i++], ha->addr);
}
exit:
ndev_vif->sta.regd_mc_addr_count = i;
}
static const struct net_device_ops slsi_netdev_ops = {
.ndo_open = slsi_net_open,
.ndo_stop = slsi_net_stop,
.ndo_start_xmit = slsi_net_hw_xmit,
.ndo_do_ioctl = slsi_net_ioctl,
.ndo_get_stats = slsi_net_get_stats,
.ndo_select_queue = slsi_net_select_queue,
.ndo_fix_features = slsi_net_fix_features,
.ndo_set_rx_mode = slsi_set_multicast_list,
};
static void slsi_if_setup(struct net_device *dev)
{
ether_setup(dev);
dev->netdev_ops = &slsi_netdev_ops;
dev->destructor = free_netdev;
}
static int slsi_netif_add_locked(struct slsi_dev *sdev, const char *name, int ifnum)
{
struct net_device *dev = NULL;
struct netdev_vif *ndev_vif;
struct wireless_dev *wdev;
int alloc_size, txq_count, ret;
int i;
WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
if (WARN_ON(!sdev || ifnum > CONFIG_SCSC_WLAN_MAX_INTERFACES || sdev->netdev[ifnum]))
return -EINVAL;
SLSI_DBG1(sdev, SLSI_NETDEV, "Add:%pM\n", sdev->netdev_addresses[ifnum]);
alloc_size = sizeof(struct netdev_vif);
txq_count = SLSI_NETIF_Q_PEER_START + (SLSI_NETIF_Q_PER_PEER * (SLSI_ADHOC_PEER_CONNECTIONS_MAX));
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 16, 0))
dev = alloc_netdev_mqs(alloc_size, name, NET_NAME_PREDICTABLE, slsi_if_setup, txq_count, 1);
#else
dev = alloc_netdev_mqs(alloc_size, name, slsi_if_setup, txq_count, 1);
#endif
if (dev == NULL) {
SLSI_ERR(sdev, "Failed to allocate private data for netdev\n");
return -ENOMEM;
}
/* Reserve space in skb for later use */
dev->needed_headroom = SLSI_NETIF_SKB_HEADROOM;
dev->needed_tailroom = SLSI_NETIF_SKB_TAILROOM;
ret = dev_alloc_name(dev, dev->name);
if (ret < 0)
goto exit_with_error;
ndev_vif = netdev_priv(dev);
memset(ndev_vif, 0x00, sizeof(*ndev_vif));
SLSI_MUTEX_INIT(ndev_vif->vif_mutex);
SLSI_MUTEX_INIT(ndev_vif->scan_mutex);
skb_queue_head_init(&ndev_vif->ba_complete);
slsi_sig_send_init(&ndev_vif->sig_wait);
ndev_vif->sdev = sdev;
ndev_vif->ifnum = ifnum;
ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
#ifndef CONFIG_SCSC_WLAN_BLOCK_IPV6
slsi_spinlock_create(&ndev_vif->ipv6addr_lock);
#endif
slsi_spinlock_create(&ndev_vif->peer_lock);
atomic_set(&ndev_vif->ba_flush, 0);
/* Reserve memory for the peer database - Not required for p2p0 interface */
if (!SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
int queueset;
for (queueset = 0; queueset < SLSI_ADHOC_PEER_CONNECTIONS_MAX; queueset++) {
ndev_vif->peer_sta_record[queueset] = kzalloc(sizeof(*ndev_vif->peer_sta_record[queueset]), GFP_KERNEL);
if (ndev_vif->peer_sta_record[queueset] == NULL) {
int j;
SLSI_NET_ERR(dev, "Could not allocate memory for peer entry (queueset:%d)\n", queueset);
/* Free previously allocated peer database memory till current queueset */
for (j = 0; j < queueset; j++) {
kfree(ndev_vif->peer_sta_record[j]);
ndev_vif->peer_sta_record[j] = NULL;
}
ret = -ENOMEM;
goto exit_with_error;
}
}
}
/* The default power mode in host*/
if (slsi_is_232338_test_mode_enabled()) {
SLSI_NET_ERR(dev, "*#232338# rf test mode set is enabled.\n");
ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_ACTIVE_MODE;
} else {
ndev_vif->set_power_mode = FAPI_POWERMANAGEMENTMODE_POWER_SAVE;
}
INIT_LIST_HEAD(&ndev_vif->sta.network_map);
SLSI_DBG1(sdev, SLSI_NETDEV, "ifnum=%d\n", ndev_vif->ifnum);
/* For HS2 interface */
if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif))
sdev->hs2_state = HS2_NO_VIF;
/* For p2p0 interface */
else if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
ret = slsi_p2p_init(sdev, ndev_vif);
if (ret)
goto exit_with_error;
}
INIT_DELAYED_WORK(&ndev_vif->scan_timeout_work, slsi_scan_ind_timeout_handle);
ret = slsi_skb_work_init(sdev, dev, &ndev_vif->rx_data, "slsi_wlan_rx_data", slsi_rx_netdev_data_work);
if (ret)
goto exit_with_error;
ret = slsi_skb_work_init(sdev, dev, &ndev_vif->rx_mlme, "slsi_wlan_rx_mlme", slsi_rx_netdev_mlme_work);
if (ret) {
slsi_skb_work_deinit(&ndev_vif->rx_data);
goto exit_with_error;
}
wdev = &ndev_vif->wdev;
for (i = 0; i < SLSI_SCAN_MAX; i++)
skb_queue_head_init(&ndev_vif->scan[i].scan_results);
dev->ieee80211_ptr = wdev;
wdev->wiphy = sdev->wiphy;
wdev->netdev = dev;
wdev->iftype = NL80211_IFTYPE_STATION;
SET_NETDEV_DEV(dev, sdev->dev);
/* We are not ready to send data yet. */
netif_carrier_off(dev);
SLSI_ETHER_COPY(dev->dev_addr, sdev->netdev_addresses[ifnum]);
rcu_assign_pointer(sdev->netdev[ifnum], dev);
#ifdef CONFIG_SCSC_WLAN_RX_NAPI
SLSI_NET_DBG1(dev, SLSI_RX, "napi rx enabled\n");
skb_queue_head_init(&ndev_vif->napi.rx_data);
slsi_spinlock_create(&ndev_vif->napi.lock);
ndev_vif->napi.interrupt_enabled = true;
/* TODO_HARDMAC: What weight should we use? 32 is just a Guess */
netif_napi_add(dev, &ndev_vif->napi.napi, slsi_net_rx_poll, 32);
napi_enable(&ndev_vif->napi.napi);
#endif
return 0;
exit_with_error:
free_netdev(dev);
return ret;
}
int slsi_netif_add(struct slsi_dev *sdev, const char *name)
{
int index = -EINVAL;
int i;
int err;
SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++)
if (!sdev->netdev[i]) {
index = i;
break;
}
if (index > 0) {
err = slsi_netif_add_locked(sdev, name, index);
if (err != 0)
index = err;
}
SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
return index;
}
static void slsi_netif_remove_locked(struct slsi_dev *sdev, struct net_device *dev);
int slsi_netif_init(struct slsi_dev *sdev)
{
int i;
SLSI_DBG3(sdev, SLSI_NETDEV, "Init\n");
SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
/* Initialize all other netdev interfaces to NULL */
for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++)
RCU_INIT_POINTER(sdev->netdev[i], NULL);
if (slsi_netif_add_locked(sdev, "wlan%d", SLSI_NET_INDEX_WLAN) != 0) {
SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
return -EINVAL;
}
if (slsi_netif_add_locked(sdev, "p2p%d", SLSI_NET_INDEX_P2P) != 0) {
rtnl_lock();
slsi_netif_remove_locked(sdev, sdev->netdev[SLSI_NET_INDEX_WLAN]);
rtnl_unlock();
SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
return -EINVAL;
}
SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
return 0;
}
static int slsi_netif_register_locked(struct slsi_dev *sdev, struct net_device *dev)
{
struct netdev_vif *ndev_vif = netdev_priv(dev);
int err;
WARN_ON(!rtnl_is_locked());
WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
SLSI_NET_DBG1(dev, SLSI_NETDEV, "Register:%pM\n", dev->dev_addr);
if (atomic_read(&ndev_vif->is_registered)) {
SLSI_NET_ERR(dev, "Register:%pM Failed: Already registered\n", dev->dev_addr);
return 0;
}
err = register_netdevice(dev);
if (err)
SLSI_NET_ERR(dev, "Register:%pM Failed\n", dev->dev_addr);
else
atomic_set(&ndev_vif->is_registered, 1);
return err;
}
int slsi_netif_register_rtlnl_locked(struct slsi_dev *sdev, struct net_device *dev)
{
int err;
SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
err = slsi_netif_register_locked(sdev, dev);
SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
return err;
}
int slsi_netif_register(struct slsi_dev *sdev, struct net_device *dev)
{
int err;
rtnl_lock();
SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
err = slsi_netif_register_locked(sdev, dev);
SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
rtnl_unlock();
return err;
}
static void slsi_netif_remove_locked(struct slsi_dev *sdev, struct net_device *dev)
{
int i;
struct netdev_vif *ndev_vif = netdev_priv(dev);
SLSI_NET_DBG1(dev, SLSI_NETDEV, "Unregister:%pM\n", dev->dev_addr);
WARN_ON(!rtnl_is_locked());
WARN_ON(!SLSI_MUTEX_IS_LOCKED(sdev->netdev_add_remove_mutex));
if (atomic_read(&ndev_vif->is_registered)) {
netif_tx_disable(dev);
netif_carrier_off(dev);
slsi_stop_net_dev(sdev, dev);
}
rcu_assign_pointer(sdev->netdev[ndev_vif->ifnum], NULL);
synchronize_rcu();
/* Free memory of the peer database - Not required for p2p0 interface */
if (!SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
int queueset;
for (queueset = 0; queueset < SLSI_ADHOC_PEER_CONNECTIONS_MAX; queueset++) {
kfree(ndev_vif->peer_sta_record[queueset]);
ndev_vif->peer_sta_record[queueset] = NULL;
}
}
if (SLSI_IS_VIF_INDEX_P2P(ndev_vif)) {
slsi_p2p_deinit(sdev, ndev_vif);
} else if (SLSI_IS_VIF_INDEX_WLAN(ndev_vif)) {
sdev->hs2_state = HS2_NO_VIF;
ndev_vif->vif_type = SLSI_VIFTYPE_UNSPECIFIED;
}
cancel_delayed_work(&ndev_vif->scan_timeout_work);
slsi_skb_work_deinit(&ndev_vif->rx_data);
slsi_skb_work_deinit(&ndev_vif->rx_mlme);
for (i = 0; i < SLSI_SCAN_MAX; i++)
slsi_skb_queue_purge(&ndev_vif->scan[i].scan_results);
slsi_kfree_skb(ndev_vif->sta.mlme_scan_ind_skb);
slsi_roam_channel_cache_prune(dev, 0);
#ifdef CONFIG_SCSC_WLAN_RX_NAPI
slsi_skb_queue_purge(&ndev_vif->napi.rx_data);
#endif
if (atomic_read(&ndev_vif->is_registered)) {
atomic_set(&ndev_vif->is_registered, 0);
unregister_netdevice(dev);
} else {
free_netdev(dev);
}
}
void slsi_netif_remove_rtlnl_locked(struct slsi_dev *sdev, struct net_device *dev)
{
SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
slsi_netif_remove_locked(sdev, dev);
SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
}
void slsi_netif_remove(struct slsi_dev *sdev, struct net_device *dev)
{
rtnl_lock();
SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
slsi_netif_remove_locked(sdev, dev);
SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
rtnl_unlock();
}
void slsi_netif_remove_all(struct slsi_dev *sdev)
{
int i;
SLSI_DBG1(sdev, SLSI_NETDEV, "\n");
rtnl_lock();
SLSI_MUTEX_LOCK(sdev->netdev_add_remove_mutex);
for (i = 1; i <= CONFIG_SCSC_WLAN_MAX_INTERFACES; i++)
if (sdev->netdev[i])
slsi_netif_remove_locked(sdev, sdev->netdev[i]);
SLSI_MUTEX_UNLOCK(sdev->netdev_add_remove_mutex);
rtnl_unlock();
}
void slsi_netif_deinit(struct slsi_dev *sdev)
{
SLSI_DBG1(sdev, SLSI_NETDEV, "\n");
slsi_netif_remove_all(sdev);
}
int slsi_netif_pending_queues(int vif_type, struct net_device *dev)
{
int len = 0, tid = 0, i = 0;
/*Get the network level queue length */
if (vif_type == FAPI_VIFTYPE_STATION)
for (i = SLSI_NETIF_Q_PEER_START; i < (SLSI_NETIF_Q_PEER_START + SLSI_NETIF_Q_PER_PEER); i++)
len += skb_queue_len(&dev->_tx[i].qdisc->q);
if (vif_type == FAPI_VIFTYPE_AP)
for (i = 0; i < SLSI_AP_PEER_CONNECTIONS_MAX; i++)
for (tid = SLSI_NETIF_Q_PEER_START; tid < (SLSI_NETIF_Q_PEER_START + SLSI_NETIF_Q_PER_PEER); tid++)
len += skb_queue_len(&dev->_tx[tid].qdisc->q);
else if (vif_type == FAPI_VIFTYPE_ADHOC)
for (i = 0; i < SLSI_ADHOC_PEER_CONNECTIONS_MAX; i++)
for (tid = SLSI_NETIF_Q_PEER_START; tid < (SLSI_NETIF_Q_PEER_START + SLSI_NETIF_Q_PER_PEER); tid++)
len += skb_queue_len(&dev->_tx[tid].qdisc->q);
return len;
}