Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,47 @@
config ATH10K
tristate "Atheros 802.11ac wireless cards support"
depends on MAC80211 && HAS_DMA
select ATH_COMMON
---help---
This module adds support for wireless adapters based on
Atheros IEEE 802.11ac family of chipsets.
If you choose to build a module, it'll be called ath10k.
config ATH10K_PCI
tristate "Atheros ath10k PCI support"
depends on ATH10K && PCI
---help---
This module adds support for PCIE bus
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
depends on ATH10K
---help---
Enables debug support
If unsure, say Y to make it easier to debug problems.
config ATH10K_DEBUGFS
bool "Atheros ath10k debugfs support"
depends on ATH10K && DEBUG_FS
select RELAY
---help---
Enabled debugfs support
If unsure, say Y to make it easier to debug problems.
config ATH10K_TRACING
bool "Atheros ath10k tracing support"
depends on ATH10K
depends on EVENT_TRACING
---help---
Select this to ath10k use tracing infrastructure.
config ATH10K_DFS_CERTIFIED
bool "Atheros DFS support for certified platforms"
depends on ATH10K && CFG80211_CERTIFICATION_ONUS
default n
---help---
This option enables DFS support for initiating radiation on
ath10k.

View file

@ -0,0 +1,22 @@
obj-$(CONFIG_ATH10K) += ath10k_core.o
ath10k_core-y += mac.o \
debug.o \
core.o \
htc.o \
htt.o \
htt_rx.o \
htt_tx.o \
txrx.o \
wmi.o \
bmi.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
ce.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)

View file

@ -0,0 +1,318 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "bmi.h"
#include "hif.h"
#include "debug.h"
#include "htc.h"
void ath10k_bmi_start(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
ar->bmi.done_sent = false;
}
int ath10k_bmi_done(struct ath10k *ar)
{
struct bmi_cmd cmd;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
if (ar->bmi.done_sent) {
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
return 0;
}
ar->bmi.done_sent = true;
cmd.id = __cpu_to_le32(BMI_DONE);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to write to the device: %d\n", ret);
return ret;
}
return 0;
}
int ath10k_bmi_get_target_info(struct ath10k *ar,
struct bmi_target_info *target_info)
{
struct bmi_cmd cmd;
union bmi_resp resp;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
u32 resplen = sizeof(resp.get_target_info);
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
if (ar->bmi.done_sent) {
ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
return -EBUSY;
}
cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
ath10k_warn(ar, "unable to get target info from device\n");
return ret;
}
if (resplen < sizeof(resp.get_target_info)) {
ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
resplen);
return -EIO;
}
target_info->version = __le32_to_cpu(resp.get_target_info.version);
target_info->type = __le32_to_cpu(resp.get_target_info.type);
return 0;
}
int ath10k_bmi_read_memory(struct ath10k *ar,
u32 address, void *buffer, u32 length)
{
struct bmi_cmd cmd;
union bmi_resp resp;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
u32 rxlen;
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
while (length) {
rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
cmd.read_mem.addr = __cpu_to_le32(address);
cmd.read_mem.len = __cpu_to_le32(rxlen);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
&resp, &rxlen);
if (ret) {
ath10k_warn(ar, "unable to read from the device (%d)\n",
ret);
return ret;
}
memcpy(buffer, resp.read_mem.payload, rxlen);
address += rxlen;
buffer += rxlen;
length -= rxlen;
}
return 0;
}
int ath10k_bmi_write_memory(struct ath10k *ar,
u32 address, const void *buffer, u32 length)
{
struct bmi_cmd cmd;
u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
u32 txlen;
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
/* copy before roundup to avoid reading beyond buffer*/
memcpy(cmd.write_mem.payload, buffer, txlen);
txlen = roundup(txlen, 4);
cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
cmd.write_mem.addr = __cpu_to_le32(address);
cmd.write_mem.len = __cpu_to_le32(txlen);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to write to the device (%d)\n",
ret);
return ret;
}
/* fixup roundup() so `length` zeroes out for last chunk */
txlen = min(txlen, length);
address += txlen;
buffer += txlen;
length -= txlen;
}
return 0;
}
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
{
struct bmi_cmd cmd;
union bmi_resp resp;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
u32 resplen = sizeof(resp.execute);
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
address, param);
if (ar->bmi.done_sent) {
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
cmd.execute.param = __cpu_to_le32(param);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
ath10k_warn(ar, "unable to read from the device\n");
return ret;
}
if (resplen < sizeof(resp.execute)) {
ath10k_warn(ar, "invalid execute response length (%d)\n",
resplen);
return -EIO;
}
*result = __le32_to_cpu(resp.execute.result);
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
return 0;
}
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
{
struct bmi_cmd cmd;
u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
u32 txlen;
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
WARN_ON_ONCE(txlen & 3);
cmd.id = __cpu_to_le32(BMI_LZ_DATA);
cmd.lz_data.len = __cpu_to_le32(txlen);
memcpy(cmd.lz_data.payload, buffer, txlen);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to write to the device\n");
return ret;
}
buffer += txlen;
length -= txlen;
}
return 0;
}
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
{
struct bmi_cmd cmd;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
address);
if (ar->bmi.done_sent) {
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
cmd.lz_start.addr = __cpu_to_le32(address);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
return ret;
}
return 0;
}
int ath10k_bmi_fast_download(struct ath10k *ar,
u32 address, const void *buffer, u32 length)
{
u8 trailer[4] = {};
u32 head_len = rounddown(length, 4);
u32 trailer_len = length - head_len;
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI,
"bmi fast download address 0x%x buffer 0x%p length %d\n",
address, buffer, length);
ret = ath10k_bmi_lz_stream_start(ar, address);
if (ret)
return ret;
/* copy the last word into a zero padded buffer */
if (trailer_len > 0)
memcpy(trailer, buffer + head_len, trailer_len);
ret = ath10k_bmi_lz_data(ar, buffer, head_len);
if (ret)
return ret;
if (trailer_len > 0)
ret = ath10k_bmi_lz_data(ar, trailer, 4);
if (ret != 0)
return ret;
/*
* Close compressed stream and open a new (fake) one.
* This serves mainly to flush Target caches.
*/
ret = ath10k_bmi_lz_stream_start(ar, 0x00);
return ret;
}

View file

@ -0,0 +1,225 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BMI_H_
#define _BMI_H_
#include "core.h"
/*
* Bootloader Messaging Interface (BMI)
*
* BMI is a very simple messaging interface used during initialization
* to read memory, write memory, execute code, and to define an
* application entry PC.
*
* It is used to download an application to QCA988x, to provide
* patches to code that is already resident on QCA988x, and generally
* to examine and modify state. The Host has an opportunity to use
* BMI only once during bootup. Once the Host issues a BMI_DONE
* command, this opportunity ends.
*
* The Host writes BMI requests to mailbox0, and reads BMI responses
* from mailbox0. BMI requests all begin with a command
* (see below for specific commands), and are followed by
* command-specific data.
*
* Flow control:
* The Host can only issue a command once the Target gives it a
* "BMI Command Credit", using AR8K Counter #4. As soon as the
* Target has completed a command, it issues another BMI Command
* Credit (so the Host can issue the next command).
*
* BMI handles all required Target-side cache flushing.
*/
/* Maximum data size used for BMI transfers */
#define BMI_MAX_DATA_SIZE 256
/* len = cmd + addr + length */
#define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \
sizeof(u32) + \
sizeof(u32) + \
sizeof(u32))
/* BMI Commands */
enum bmi_cmd_id {
BMI_NO_COMMAND = 0,
BMI_DONE = 1,
BMI_READ_MEMORY = 2,
BMI_WRITE_MEMORY = 3,
BMI_EXECUTE = 4,
BMI_SET_APP_START = 5,
BMI_READ_SOC_REGISTER = 6,
BMI_READ_SOC_WORD = 6,
BMI_WRITE_SOC_REGISTER = 7,
BMI_WRITE_SOC_WORD = 7,
BMI_GET_TARGET_ID = 8,
BMI_GET_TARGET_INFO = 8,
BMI_ROMPATCH_INSTALL = 9,
BMI_ROMPATCH_UNINSTALL = 10,
BMI_ROMPATCH_ACTIVATE = 11,
BMI_ROMPATCH_DEACTIVATE = 12,
BMI_LZ_STREAM_START = 13, /* should be followed by LZ_DATA */
BMI_LZ_DATA = 14,
BMI_NVRAM_PROCESS = 15,
};
#define BMI_NVRAM_SEG_NAME_SZ 16
struct bmi_cmd {
__le32 id; /* enum bmi_cmd_id */
union {
struct {
} done;
struct {
__le32 addr;
__le32 len;
} read_mem;
struct {
__le32 addr;
__le32 len;
u8 payload[0];
} write_mem;
struct {
__le32 addr;
__le32 param;
} execute;
struct {
__le32 addr;
} set_app_start;
struct {
__le32 addr;
} read_soc_reg;
struct {
__le32 addr;
__le32 value;
} write_soc_reg;
struct {
} get_target_info;
struct {
__le32 rom_addr;
__le32 ram_addr; /* or value */
__le32 size;
__le32 activate; /* 0=install, but dont activate */
} rompatch_install;
struct {
__le32 patch_id;
} rompatch_uninstall;
struct {
__le32 count;
__le32 patch_ids[0]; /* length of @count */
} rompatch_activate;
struct {
__le32 count;
__le32 patch_ids[0]; /* length of @count */
} rompatch_deactivate;
struct {
__le32 addr;
} lz_start;
struct {
__le32 len; /* max BMI_MAX_DATA_SIZE */
u8 payload[0]; /* length of @len */
} lz_data;
struct {
u8 name[BMI_NVRAM_SEG_NAME_SZ];
} nvram_process;
u8 payload[BMI_MAX_CMDBUF_SIZE];
};
} __packed;
union bmi_resp {
struct {
u8 payload[0];
} read_mem;
struct {
__le32 result;
} execute;
struct {
__le32 value;
} read_soc_reg;
struct {
__le32 len;
__le32 version;
__le32 type;
} get_target_info;
struct {
__le32 patch_id;
} rompatch_install;
struct {
__le32 patch_id;
} rompatch_uninstall;
struct {
/* 0 = nothing executed
* otherwise = NVRAM segment return value */
__le32 result;
} nvram_process;
u8 payload[BMI_MAX_CMDBUF_SIZE];
} __packed;
struct bmi_target_info {
u32 version;
u32 type;
};
/* in msec */
#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
#define BMI_CE_NUM_TO_TARG 0
#define BMI_CE_NUM_TO_HOST 1
void ath10k_bmi_start(struct ath10k *ar);
int ath10k_bmi_done(struct ath10k *ar);
int ath10k_bmi_get_target_info(struct ath10k *ar,
struct bmi_target_info *target_info);
int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
void *buffer, u32 length);
int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
#define ath10k_bmi_read32(ar, item, val) \
({ \
int ret; \
u32 addr; \
__le32 tmp; \
\
addr = host_interest_item_address(HI_ITEM(item)); \
ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
if (!ret) \
*val = __le32_to_cpu(tmp); \
ret; \
})
#define ath10k_bmi_write32(ar, item, val) \
({ \
int ret; \
u32 address; \
__le32 v = __cpu_to_le32(val); \
\
address = host_interest_item_address(HI_ITEM(item)); \
ret = ath10k_bmi_write_memory(ar, address, \
(u8 *)&v, sizeof(v)); \
ret; \
})
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
#endif /* _BMI_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,425 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _CE_H_
#define _CE_H_
#include "hif.h"
/* Maximum number of Copy Engine's supported */
#define CE_COUNT_MAX 8
#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
#define CE_SEND_FLAG_GATHER 0x00010000
/*
* Copy Engine support: low-level Target-side Copy Engine API.
* This is a hardware access layer used by code that understands
* how to use copy engines.
*/
struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
#define CE_DESC_FLAGS_META_DATA_LSB 3
struct ce_desc {
__le32 addr;
__le16 nbytes;
__le16 flags; /* %CE_DESC_FLAGS_ */
};
struct ath10k_ce_ring {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
unsigned int nentries_mask;
/*
* For dest ring, this is the next index to be processed
* by software after it was/is received into.
*
* For src ring, this is the last descriptor that was sent
* and completion processed by software.
*
* Regardless of src or dest ring, this is an invariant
* (modulo ring size):
* write index >= read index >= sw_index
*/
unsigned int sw_index;
/* cached copy */
unsigned int write_index;
/*
* For src ring, this is the next index not yet processed by HW.
* This is a cached copy of the real HW index (read index), used
* for avoiding reading the HW index register more often than
* necessary.
* This extends the invariant:
* write index >= read index >= hw_index >= sw_index
*
* For dest ring, this is currently unused.
*/
/* cached copy */
unsigned int hw_index;
/* Start of DMA-coherent area reserved for descriptors */
/* Host address space */
void *base_addr_owner_space_unaligned;
/* CE address space */
u32 base_addr_ce_space_unaligned;
/*
* Actual start of descriptors.
* Aligned to descriptor-size boundary.
* Points into reserved DMA-coherent area, above.
*/
/* Host address space */
void *base_addr_owner_space;
/* CE address space */
u32 base_addr_ce_space;
/*
* Start of shadow copy of descriptors, within regular memory.
* Aligned to descriptor-size boundary.
*/
void *shadow_base_unaligned;
struct ce_desc *shadow_base;
/* keep last */
void *per_transfer_context[0];
};
struct ath10k_ce_pipe {
struct ath10k *ar;
unsigned int id;
unsigned int attr_flags;
u32 ctrl_addr;
void (*send_cb)(struct ath10k_ce_pipe *);
void (*recv_cb)(struct ath10k_ce_pipe *);
unsigned int src_sz_max;
struct ath10k_ce_ring *src_ring;
struct ath10k_ce_ring *dest_ring;
};
/* Copy Engine settable attributes */
struct ce_attr;
/*==================Send====================*/
/* ath10k_ce_send flags */
#define CE_SEND_FLAG_BYTE_SWAP 1
/*
* Queue a source buffer to be sent to an anonymous destination buffer.
* ce - which copy engine to use
* buffer - address of buffer
* nbytes - number of bytes to send
* transfer_id - arbitrary ID; reflected to destination
* flags - CE_SEND_FLAG_* values
* Returns 0 on success; otherwise an error status.
*
* Note: If no flags are specified, use CE's default data swap mode.
*
* Implementation note: pushes 1 buffer to Source ring
*/
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
/* 14 bits */
unsigned int transfer_id,
unsigned int flags);
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
/*==================Recv=======================*/
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
/* recv flags */
/* Data is byte-swapped */
#define CE_RECV_FLAG_SWAPPED 1
/*
* Supply data for the next completed unprocessed receive descriptor.
* Pops buffer from Dest ring.
*/
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp);
/*
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
*/
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
/*==================CE Engine Initialization=======================*/
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr,
void (*send_cb)(struct ath10k_ce_pipe *),
void (*recv_cb)(struct ath10k_ce_pipe *));
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr);
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
/*==================CE Engine Shutdown=======================*/
/*
* Support clean shutdown by allowing the caller to revoke
* receive buffers. Target DMA must be stopped before using
* this API.
*/
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
/*
* Support clean shutdown by allowing the caller to cancel
* pending sends. Target DMA must be stopped before using
* this API.
*/
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_disable_interrupts(struct ath10k *ar);
void ath10k_ce_enable_interrupts(struct ath10k *ar);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
#define CE_ATTR_NO_SNOOP 1
/* Byte swap data words */
#define CE_ATTR_BYTE_SWAP_DATA 2
/* Swizzle descriptors? */
#define CE_ATTR_SWIZZLE_DESCRIPTORS 4
/* no interrupt on copy completion */
#define CE_ATTR_DIS_INTR 8
/* Attributes of an instance of a Copy Engine */
struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
/* #entries in source ring - Must be a power of 2 */
unsigned int src_nentries;
/*
* Max source send size for this CE.
* This is also the minimum size of a destination buffer.
*/
unsigned int src_sz_max;
/* #entries in destination ring - Must be a power of 2 */
unsigned int dest_nentries;
};
#define SR_BA_ADDRESS 0x0000
#define SR_SIZE_ADDRESS 0x0004
#define DR_BA_ADDRESS 0x0008
#define DR_SIZE_ADDRESS 0x000c
#define CE_CMD_ADDRESS 0x0018
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
(((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
(((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
(((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
#define CE_CTRL1_DMAX_LENGTH_MSB 15
#define CE_CTRL1_DMAX_LENGTH_LSB 0
#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
#define CE_CTRL1_DMAX_LENGTH_GET(x) \
(((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
#define CE_CTRL1_DMAX_LENGTH_SET(x) \
(((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
#define CE_CTRL1_ADDRESS 0x0010
#define CE_CTRL1_HW_MASK 0x0007ffff
#define CE_CTRL1_SW_MASK 0x0007ffff
#define CE_CTRL1_HW_WRITE_MASK 0x00000000
#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
#define CE_CTRL1_RSTMASK 0xffffffff
#define CE_CTRL1_RESET 0x00000080
#define CE_CMD_HALT_STATUS_MSB 3
#define CE_CMD_HALT_STATUS_LSB 3
#define CE_CMD_HALT_STATUS_MASK 0x00000008
#define CE_CMD_HALT_STATUS_GET(x) \
(((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
#define CE_CMD_HALT_STATUS_SET(x) \
(((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
#define CE_CMD_HALT_STATUS_RESET 0
#define CE_CMD_HALT_MSB 0
#define CE_CMD_HALT_MASK 0x00000001
#define HOST_IE_COPY_COMPLETE_MSB 0
#define HOST_IE_COPY_COMPLETE_LSB 0
#define HOST_IE_COPY_COMPLETE_MASK 0x00000001
#define HOST_IE_COPY_COMPLETE_GET(x) \
(((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
#define HOST_IE_COPY_COMPLETE_SET(x) \
(((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
#define HOST_IE_COPY_COMPLETE_RESET 0
#define HOST_IE_ADDRESS 0x002c
#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
#define HOST_IS_COPY_COMPLETE_MASK 0x00000001
#define HOST_IS_ADDRESS 0x0030
#define MISC_IE_ADDRESS 0x0034
#define MISC_IS_AXI_ERR_MASK 0x00000400
#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
#define MISC_IS_ADDRESS 0x0038
#define SR_WR_INDEX_ADDRESS 0x003c
#define DST_WR_INDEX_ADDRESS 0x0040
#define CURRENT_SRRI_ADDRESS 0x0044
#define CURRENT_DRRI_ADDRESS 0x0048
#define SRC_WATERMARK_LOW_MSB 31
#define SRC_WATERMARK_LOW_LSB 16
#define SRC_WATERMARK_LOW_MASK 0xffff0000
#define SRC_WATERMARK_LOW_GET(x) \
(((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
#define SRC_WATERMARK_LOW_SET(x) \
(((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
#define SRC_WATERMARK_LOW_RESET 0
#define SRC_WATERMARK_HIGH_MSB 15
#define SRC_WATERMARK_HIGH_LSB 0
#define SRC_WATERMARK_HIGH_MASK 0x0000ffff
#define SRC_WATERMARK_HIGH_GET(x) \
(((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
#define SRC_WATERMARK_HIGH_SET(x) \
(((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
#define SRC_WATERMARK_HIGH_RESET 0
#define SRC_WATERMARK_ADDRESS 0x004c
#define DST_WATERMARK_LOW_LSB 16
#define DST_WATERMARK_LOW_MASK 0xffff0000
#define DST_WATERMARK_LOW_SET(x) \
(((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
#define DST_WATERMARK_LOW_RESET 0
#define DST_WATERMARK_HIGH_MSB 15
#define DST_WATERMARK_HIGH_LSB 0
#define DST_WATERMARK_HIGH_MASK 0x0000ffff
#define DST_WATERMARK_HIGH_GET(x) \
(((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
#define DST_WATERMARK_HIGH_SET(x) \
(((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
#define DST_WATERMARK_HIGH_RESET 0
#define DST_WATERMARK_ADDRESS 0x0050
static inline u32 ath10k_ce_base_address(unsigned int ce_id)
{
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
}
#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
MISC_IS_DST_ADDR_ERR_MASK | \
MISC_IS_SRC_LEN_ERR_MASK | \
MISC_IS_DST_MAX_LEN_VIO_MASK | \
MISC_IS_DST_RING_OVERFLOW_MASK | \
MISC_IS_SRC_RING_OVERFLOW_MASK)
#define CE_SRC_RING_TO_DESC(baddr, idx) \
(&(((struct ce_desc *)baddr)[idx]))
#define CE_DEST_RING_TO_DESC(baddr, idx) \
(&(((struct ce_desc *)baddr)[idx]))
/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
(((int)(toidx)-(int)(fromidx)) & (nentries_mask))
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
#define CE_INTERRUPT_SUMMARY(ar) \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
#endif /* _CE_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,580 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _CORE_H_
#define _CORE_H_
#include <linux/completion.h>
#include <linux/if_ether.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/uuid.h>
#include <linux/time.h>
#include "htt.h"
#include "htc.h"
#include "hw.h"
#include "targaddrs.h"
#include "wmi.h"
#include "../ath.h"
#include "../regd.h"
#include "../dfs_pattern_detector.h"
#include "spectral.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
#define WO(_f) ((_f##_OFFSET) >> 2)
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
#define ATH10K_NUM_CHANS 38
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
#define ATH10K_MAX_NUM_MGMT_PENDING 128
/* number of failed packets */
#define ATH10K_KICKOUT_THRESHOLD 50
/*
* Use insanely high numbers to make sure that the firmware implementation
* won't start, we have the same functionality already in hostapd. Unit
* is seconds.
*/
#define ATH10K_KEEPALIVE_MIN_IDLE 3747
#define ATH10K_KEEPALIVE_MAX_IDLE 3895
#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
struct ath10k;
struct ath10k_skb_cb {
dma_addr_t paddr;
u8 vdev_id;
struct {
u8 tid;
bool is_offchan;
struct ath10k_htt_txbuf *txbuf;
u32 txbuf_paddr;
} __packed htt;
struct {
bool dtim_zero;
bool deliver_cab;
} bcn;
} __packed;
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
}
static inline u32 host_interest_item_address(u32 item_offset)
{
return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
}
struct ath10k_bmi {
bool done_sent;
};
#define ATH10K_MAX_MEM_REQS 16
struct ath10k_mem_chunk {
void *vaddr;
dma_addr_t paddr;
u32 len;
u32 req_id;
};
struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
wait_queue_head_t tx_credits_wq;
struct wmi_cmd_map *cmd;
struct wmi_vdev_param_map *vdev_param;
struct wmi_pdev_param_map *pdev_param;
u32 num_mem_chunks;
struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
};
struct ath10k_peer_stat {
u8 peer_macaddr[ETH_ALEN];
u32 peer_rssi;
u32 peer_tx_rate;
u32 peer_rx_rate; /* 10x only */
};
struct ath10k_target_stats {
/* PDEV stats */
s32 ch_noise_floor;
u32 tx_frame_count;
u32 rx_frame_count;
u32 rx_clear_count;
u32 cycle_count;
u32 phy_err_count;
u32 chan_tx_power;
u32 ack_rx_bad;
u32 rts_bad;
u32 rts_good;
u32 fcs_bad;
u32 no_beacons;
u32 mib_int_count;
/* PDEV TX stats */
s32 comp_queued;
s32 comp_delivered;
s32 msdu_enqued;
s32 mpdu_enqued;
s32 wmm_drop;
s32 local_enqued;
s32 local_freed;
s32 hw_queued;
s32 hw_reaped;
s32 underrun;
s32 tx_abort;
s32 mpdus_requed;
u32 tx_ko;
u32 data_rc;
u32 self_triggers;
u32 sw_retry_failure;
u32 illgl_rate_phy_err;
u32 pdev_cont_xretry;
u32 pdev_tx_timeout;
u32 pdev_resets;
u32 phy_underrun;
u32 txop_ovf;
/* PDEV RX stats */
s32 mid_ppdu_route_change;
s32 status_rcvd;
s32 r0_frags;
s32 r1_frags;
s32 r2_frags;
s32 r3_frags;
s32 htt_msdus;
s32 htt_mpdus;
s32 loc_msdus;
s32 loc_mpdus;
s32 oversize_amsdu;
s32 phy_errs;
s32 phy_err_drop;
s32 mpdu_errs;
/* VDEV STATS */
/* PEER STATS */
u8 peers;
struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
/* TODO: Beacon filter stats */
};
struct ath10k_dfs_stats {
u32 phy_errors;
u32 pulses_total;
u32 pulses_detected;
u32 pulses_discarded;
u32 radar_detected;
};
#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
struct ath10k_peer {
struct list_head list;
int vdev_id;
u8 addr[ETH_ALEN];
DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
};
struct ath10k_sta {
struct ath10k_vif *arvif;
/* the following are protected by ar->data_lock */
u32 changed; /* IEEE80211_RC_* */
u32 bw;
u32 nss;
u32 smps;
struct work_struct update_wk;
};
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
struct ath10k_vif {
struct list_head list;
u32 vdev_id;
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
struct sk_buff *beacon;
/* protected by data_lock */
bool beacon_sent;
struct ath10k *ar;
struct ieee80211_vif *vif;
bool is_started;
bool is_up;
bool spectral_enabled;
u32 aid;
u8 bssid[ETH_ALEN];
struct work_struct wep_key_work;
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
u8 def_wep_key_idx;
u8 def_wep_key_newidx;
u16 tx_seq_no;
union {
struct {
u32 uapsd;
} sta;
struct {
/* 127 stations; wmi limit */
u8 tim_bitmap[16];
u8 tim_len;
u32 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
bool hidden_ssid;
/* P2P_IE with NoA attribute for P2P_GO case */
u32 noa_len;
u8 *noa_data;
} ap;
} u;
u8 fixed_rate;
u8 fixed_nss;
u8 force_sgi;
bool use_cts_prot;
int num_legacy_stations;
};
struct ath10k_vif_iter {
u32 vdev_id;
struct ath10k_vif *arvif;
};
/* used for crash-dump storage, protected by data-lock */
struct ath10k_fw_crash_data {
bool crashed_since_read;
uuid_le uuid;
struct timespec timestamp;
__le32 registers[REG_DUMP_COUNT_QCA988X];
};
struct ath10k_debug {
struct dentry *debugfs_phy;
struct ath10k_target_stats target_stats;
DECLARE_BITMAP(wmi_service_bitmap, WMI_SERVICE_MAX);
struct completion event_stats_compl;
unsigned long htt_stats_mask;
struct delayed_work htt_stats_dwork;
struct ath10k_dfs_stats dfs_stats;
struct ath_dfs_pool_stats dfs_pool_stats;
u32 fw_dbglog_mask;
u8 htt_max_amsdu;
u8 htt_max_ampdu;
struct ath10k_fw_crash_data *fw_crash_data;
};
enum ath10k_state {
ATH10K_STATE_OFF = 0,
ATH10K_STATE_ON,
/* When doing firmware recovery the device is first powered down.
* mac80211 is supposed to call in to start() hook later on. It is
* however possible that driver unloading and firmware crash overlap.
* mac80211 can wait on conf_mutex in stop() while the device is
* stopped in ath10k_core_restart() work holding conf_mutex. The state
* RESTARTED means that the device is up and mac80211 has started hw
* reconfiguration. Once mac80211 is done with the reconfiguration we
* set the state to STATE_ON in restart_complete(). */
ATH10K_STATE_RESTARTING,
ATH10K_STATE_RESTARTED,
/* The device has crashed while restarting hw. This state is like ON
* but commands are blocked in HTC and -ECOMM response is given. This
* prevents completion timeouts and makes the driver more responsive to
* userspace commands. This is also prevents recursive recovery. */
ATH10K_STATE_WEDGED,
/* factory tests */
ATH10K_STATE_UTF,
};
enum ath10k_firmware_mode {
/* the default mode, standard 802.11 functionality */
ATH10K_FIRMWARE_MODE_NORMAL,
/* factory tests etc */
ATH10K_FIRMWARE_MODE_UTF,
};
enum ath10k_fw_features {
/* wmi_mgmt_rx_hdr contains extra RSSI information */
ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
/* firmware from 10X branch */
ATH10K_FW_FEATURE_WMI_10X = 1,
/* firmware support tx frame management over WMI, otherwise it's HTT */
ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
/* Firmware does not support P2P */
ATH10K_FW_FEATURE_NO_P2P = 3,
/* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature bit
* is required to be set as well.
*/
ATH10K_FW_FEATURE_WMI_10_2 = 4,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
enum ath10k_dev_flags {
/* Indicates that ath10k device is during CAC phase of DFS */
ATH10K_CAC_RUNNING,
ATH10K_FLAG_CORE_REGISTERED,
};
enum ath10k_scan_state {
ATH10K_SCAN_IDLE,
ATH10K_SCAN_STARTING,
ATH10K_SCAN_RUNNING,
ATH10K_SCAN_ABORTING,
};
static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
{
switch (state) {
case ATH10K_SCAN_IDLE:
return "idle";
case ATH10K_SCAN_STARTING:
return "starting";
case ATH10K_SCAN_RUNNING:
return "running";
case ATH10K_SCAN_ABORTING:
return "aborting";
}
return "unknown";
}
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
struct device *dev;
u8 mac_addr[ETH_ALEN];
u32 chip_id;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
u16 fw_version_release;
u16 fw_version_build;
u32 phy_capability;
u32 hw_min_tx_power;
u32 hw_max_tx_power;
u32 ht_cap_info;
u32 vht_cap_info;
u32 num_rf_chains;
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
struct targetdef *targetdef;
struct hostdef *hostdef;
bool p2p;
struct {
const struct ath10k_hif_ops *ops;
} hif;
struct completion target_suspend;
struct ath10k_bmi bmi;
struct ath10k_wmi wmi;
struct ath10k_htc htc;
struct ath10k_htt htt;
struct ath10k_hw_params {
u32 id;
const char *name;
u32 patch_load_addr;
struct ath10k_hw_params_fw {
const char *dir;
const char *fw;
const char *otp;
const char *board;
} fw;
} hw_params;
const struct firmware *board;
const void *board_data;
size_t board_len;
const struct firmware *otp;
const void *otp_data;
size_t otp_len;
const struct firmware *firmware;
const void *firmware_data;
size_t firmware_len;
int fw_api;
struct {
struct completion started;
struct completion completed;
struct completion on_channel;
struct delayed_work timeout;
enum ath10k_scan_state state;
bool is_roc;
int vdev_id;
int roc_freq;
} scan;
struct {
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
} mac;
/* should never be NULL; needed for regular htt rx */
struct ieee80211_channel *rx_channel;
/* valid during scan; needed for mgmt rx during scan */
struct ieee80211_channel *scan_channel;
/* current operating channel definition */
struct cfg80211_chan_def chandef;
int free_vdev_map;
bool monitor;
int monitor_vdev_id;
bool monitor_started;
unsigned int filter_flags;
unsigned long dev_flags;
u32 dfs_block_radar_events;
/* protected by conf_mutex */
bool radar_enabled;
int num_started_vdevs;
/* Protected by conf-mutex */
u8 supp_tx_chainmask;
u8 supp_rx_chainmask;
u8 cfg_tx_chainmask;
u8 cfg_rx_chainmask;
struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done;
struct completion vdev_setup_done;
struct workqueue_struct *workqueue;
/* prevents concurrent FW reconfiguration */
struct mutex conf_mutex;
/* protects shared structure data */
spinlock_t data_lock;
struct list_head arvifs;
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
/* number of created peers; protected by data_lock */
int num_peers;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
struct completion offchan_tx_completed;
struct sk_buff *offchan_tx_skb;
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
enum ath10k_state state;
struct work_struct register_work;
struct work_struct restart_work;
/* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock */
u32 survey_last_rx_clear_count;
u32 survey_last_cycle_count;
struct survey_info survey[ATH10K_NUM_CHANS];
struct dfs_pattern_detector *dfs_detector;
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
struct {
/* relay(fs) channel for spectral scan */
struct rchan *rfs_chan_spec_scan;
/* spectral_mode and spec_config are protected by conf_mutex */
enum ath10k_spectral_mode mode;
struct ath10k_spec_scan config;
} spectral;
struct {
/* protected by conf_mutex */
const struct firmware *utf;
DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
/* protected by data_lock */
bool utf_monitor;
} testmode;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, u32 chip_id);
void ath10k_core_unregister(struct ath10k *ar);
#endif /* _CORE_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,145 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _DEBUG_H_
#define _DEBUG_H_
#include <linux/types.h>
#include "trace.h"
enum ath10k_debug_mask {
ATH10K_DBG_PCI = 0x00000001,
ATH10K_DBG_WMI = 0x00000002,
ATH10K_DBG_HTC = 0x00000004,
ATH10K_DBG_HTT = 0x00000008,
ATH10K_DBG_MAC = 0x00000010,
ATH10K_DBG_BOOT = 0x00000020,
ATH10K_DBG_PCI_DUMP = 0x00000040,
ATH10K_DBG_HTT_DUMP = 0x00000080,
ATH10K_DBG_MGMT = 0x00000100,
ATH10K_DBG_DATA = 0x00000200,
ATH10K_DBG_BMI = 0x00000400,
ATH10K_DBG_REGULATORY = 0x00000800,
ATH10K_DBG_TESTMODE = 0x00001000,
ATH10K_DBG_ANY = 0xffffffff,
};
extern unsigned int ath10k_debug_mask;
__printf(2, 3) int ath10k_info(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) int ath10k_err(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) int ath10k_warn(struct ath10k *ar, const char *fmt, ...);
void ath10k_print_driver_info(struct ath10k *ar);
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_debug_start(struct ath10k *ar);
void ath10k_debug_stop(struct ath10k *ar);
int ath10k_debug_create(struct ath10k *ar);
void ath10k_debug_destroy(struct ath10k *ar);
int ath10k_debug_register(struct ath10k *ar);
void ath10k_debug_unregister(struct ath10k *ar);
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size);
void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
#else
static inline int ath10k_debug_start(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_debug_stop(struct ath10k *ar)
{
}
static inline int ath10k_debug_create(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_debug_destroy(struct ath10k *ar)
{
}
static inline int ath10k_debug_register(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_debug_unregister(struct ath10k *ar)
{
}
static inline void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size)
{
}
static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev)
{
}
static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
int len)
{
}
static inline struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
{
return NULL;
}
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
enum ath10k_debug_mask mask,
const char *fmt, ...);
void ath10k_dbg_dump(struct ath10k *ar,
enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len);
#else /* CONFIG_ATH10K_DEBUG */
static inline int ath10k_dbg(struct ath10k *ar,
enum ath10k_debug_mask dbg_mask,
const char *fmt, ...)
{
return 0;
}
static inline void ath10k_dbg_dump(struct ath10k *ar,
enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
}
#endif /* CONFIG_ATH10K_DEBUG */
#endif /* _DEBUG_H_ */

View file

@ -0,0 +1,180 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HIF_H_
#define _HIF_H_
#include <linux/kernel.h>
#include "core.h"
struct ath10k_hif_sg_item {
u16 transfer_id;
void *transfer_context; /* NULL = tx completion callback not called */
void *vaddr; /* for debugging mostly */
u32 paddr;
u16 len;
};
struct ath10k_hif_cb {
int (*tx_completion)(struct ath10k *ar,
struct sk_buff *wbuf,
unsigned transfer_id);
int (*rx_completion)(struct ath10k *ar,
struct sk_buff *wbuf,
u8 pipe_id);
};
struct ath10k_hif_ops {
/* send a scatter-gather list to the target */
int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items, int n_items);
/*
* API to handle HIF-specific BMI message exchanges, this API is
* synchronous and only allowed to be called from a context that
* can block (sleep)
*/
int (*exchange_bmi_msg)(struct ath10k *ar,
void *request, u32 request_len,
void *response, u32 *response_len);
/* Post BMI phase, after FW is loaded. Starts regular operation */
int (*start)(struct ath10k *ar);
/* Clean up what start() did. This does not revert to BMI phase. If
* desired so, call power_down() and power_up() */
void (*stop)(struct ath10k *ar);
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe,
int *ul_is_polled, int *dl_is_polled);
void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
/*
* Check if prior sends have completed.
*
* Check whether the pipe in question has any completed
* sends that have not yet been processed.
* This function is only relevant for HIF pipes that are configured
* to be polled rather than interrupt-driven.
*/
void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
void (*set_callbacks)(struct ath10k *ar,
struct ath10k_hif_cb *callbacks);
u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
/* Power up the device and enter BMI transfer mode for FW download */
int (*power_up)(struct ath10k *ar);
/* Power down the device and free up resources. stop() must be called
* before this if start() was called earlier */
void (*power_down)(struct ath10k *ar);
int (*suspend)(struct ath10k *ar);
int (*resume)(struct ath10k *ar);
};
static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items,
int n_items)
{
return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
}
static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
void *request, u32 request_len,
void *response, u32 *response_len)
{
return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
response, response_len);
}
static inline int ath10k_hif_start(struct ath10k *ar)
{
return ar->hif.ops->start(ar);
}
static inline void ath10k_hif_stop(struct ath10k *ar)
{
return ar->hif.ops->stop(ar);
}
static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id,
u8 *ul_pipe, u8 *dl_pipe,
int *ul_is_polled,
int *dl_is_polled)
{
return ar->hif.ops->map_service_to_pipe(ar, service_id,
ul_pipe, dl_pipe,
ul_is_polled, dl_is_polled);
}
static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
u8 *ul_pipe, u8 *dl_pipe)
{
ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
}
static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
u8 pipe_id, int force)
{
ar->hif.ops->send_complete_check(ar, pipe_id, force);
}
static inline void ath10k_hif_set_callbacks(struct ath10k *ar,
struct ath10k_hif_cb *callbacks)
{
ar->hif.ops->set_callbacks(ar, callbacks);
}
static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
u8 pipe_id)
{
return ar->hif.ops->get_free_queue_number(ar, pipe_id);
}
static inline int ath10k_hif_power_up(struct ath10k *ar)
{
return ar->hif.ops->power_up(ar);
}
static inline void ath10k_hif_power_down(struct ath10k *ar)
{
ar->hif.ops->power_down(ar);
}
static inline int ath10k_hif_suspend(struct ath10k *ar)
{
if (!ar->hif.ops->suspend)
return -EOPNOTSUPP;
return ar->hif.ops->suspend(ar);
}
static inline int ath10k_hif_resume(struct ath10k *ar)
{
if (!ar->hif.ops->resume)
return -EOPNOTSUPP;
return ar->hif.ops->resume(ar);
}
#endif /* _HIF_H_ */

View file

@ -0,0 +1,873 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "hif.h"
#include "debug.h"
/********/
/* Send */
/********/
static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
int force)
{
/*
* Check whether HIF has any prior sends that have finished,
* have not had the post-processing done.
*/
ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
}
static void ath10k_htc_control_tx_complete(struct ath10k *ar,
struct sk_buff *skb)
{
kfree_skb(skb);
}
static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
{
struct sk_buff *skb;
struct ath10k_skb_cb *skb_cb;
skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
if (!skb)
return NULL;
skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
skb_cb = ATH10K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
return skb;
}
static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
struct sk_buff *skb)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
}
static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath10k *ar = ep->htc->ar;
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
ep->eid, skb);
ath10k_htc_restore_tx_skb(ep->htc, skb);
if (!ep->ep_ops.ep_tx_complete) {
ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
dev_kfree_skb_any(skb);
return;
}
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
}
/* assumes tx_lock is held */
static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
{
struct ath10k *ar = ep->htc->ar;
if (!ep->tx_credit_flow_enabled)
return false;
if (ep->tx_credits >= ep->tx_credits_per_max_message)
return false;
ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
ep->eid);
return true;
}
static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath10k_htc_hdr *hdr;
hdr = (struct ath10k_htc_hdr *)skb->data;
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
if (ath10k_htc_ep_need_credit_update(ep))
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_unlock_bh(&ep->htc->tx_lock);
}
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_hif_sg_item sg_item;
struct device *dev = htc->ar->dev;
int credits = 0;
int ret;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
if (eid >= ATH10K_HTC_EP_COUNT) {
ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
return -ENOENT;
}
skb_push(skb, sizeof(struct ath10k_htc_hdr));
if (ep->tx_credit_flow_enabled) {
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
goto err_pull;
}
ep->tx_credits -= credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d consumed %d credits (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
}
ath10k_htc_prepare_tx_skb(ep, skb);
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret)
goto err_credits;
sg_item.transfer_id = ep->eid;
sg_item.transfer_context = skb;
sg_item.vaddr = skb->data;
sg_item.paddr = skb_cb->paddr;
sg_item.len = skb->len;
ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
if (ret)
goto err_unmap;
return 0;
err_unmap:
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d reverted %d credits back (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ar);
}
err_pull:
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
return ret;
}
static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
struct sk_buff *skb,
unsigned int eid)
{
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
if (WARN_ON_ONCE(!skb))
return 0;
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
return 0;
}
/***********/
/* Receive */
/***********/
static void
ath10k_htc_process_credit_report(struct ath10k_htc *htc,
const struct ath10k_htc_credit_report *report,
int len,
enum ath10k_htc_ep_id eid)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_ep *ep;
int i, n_reports;
if (len % sizeof(*report))
ath10k_warn(ar, "Uneven credit report len %d", len);
n_reports = len / sizeof(*report);
spin_lock_bh(&htc->tx_lock);
for (i = 0; i < n_reports; i++, report++) {
if (report->eid >= ATH10K_HTC_EP_COUNT)
break;
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
report->eid, report->credits, ep->tx_credits);
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ar);
spin_lock_bh(&htc->tx_lock);
}
}
spin_unlock_bh(&htc->tx_lock);
}
static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
u8 *buffer,
int length,
enum ath10k_htc_ep_id src_eid)
{
struct ath10k *ar = htc->ar;
int status = 0;
struct ath10k_htc_record *record;
u8 *orig_buffer;
int orig_length;
size_t len;
orig_buffer = buffer;
orig_length = length;
while (length > 0) {
record = (struct ath10k_htc_record *)buffer;
if (length < sizeof(record->hdr)) {
status = -EINVAL;
break;
}
if (record->hdr.len > length) {
/* no room left in buffer for record */
ath10k_warn(ar, "Invalid record length: %d\n",
record->hdr.len);
status = -EINVAL;
break;
}
switch (record->hdr.id) {
case ATH10K_HTC_RECORD_CREDITS:
len = sizeof(struct ath10k_htc_credit_report);
if (record->hdr.len < len) {
ath10k_warn(ar, "Credit report too long\n");
status = -EINVAL;
break;
}
ath10k_htc_process_credit_report(htc,
record->credit_report,
record->hdr.len,
src_eid);
break;
default:
ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
record->hdr.id, record->hdr.len);
break;
}
if (status)
break;
/* multiple records may be present in a trailer */
buffer += sizeof(record->hdr) + record->hdr.len;
length -= sizeof(record->hdr) + record->hdr.len;
}
if (status)
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
orig_buffer, orig_length);
return status;
}
static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
struct sk_buff *skb,
u8 pipe_id)
{
int status = 0;
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_hdr *hdr;
struct ath10k_htc_ep *ep;
u16 payload_len;
u32 trailer_len = 0;
size_t min_len;
u8 eid;
bool trailer_present;
hdr = (struct ath10k_htc_hdr *)skb->data;
skb_pull(skb, sizeof(*hdr));
eid = hdr->eid;
if (eid >= ATH10K_HTC_EP_COUNT) {
ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
hdr, sizeof(*hdr));
status = -EINVAL;
goto out;
}
ep = &htc->endpoint[eid];
/*
* If this endpoint that received a message from the target has
* a to-target HIF pipe whose send completions are polled rather
* than interrupt-driven, this is a good point to ask HIF to check
* whether it has any completed sends to handle.
*/
if (ep->ul_is_polled)
ath10k_htc_send_complete_check(ep, 1);
payload_len = __le16_to_cpu(hdr->len);
if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
payload_len + sizeof(*hdr));
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
hdr, sizeof(*hdr));
status = -EINVAL;
goto out;
}
if (skb->len < payload_len) {
ath10k_dbg(ar, ATH10K_DBG_HTC,
"HTC Rx: insufficient length, got %d, expected %d\n",
skb->len, payload_len);
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
"", hdr, sizeof(*hdr));
status = -EINVAL;
goto out;
}
/* get flags to check for trailer */
trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
if (trailer_present) {
u8 *trailer;
trailer_len = hdr->trailer_len;
min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
if ((trailer_len < min_len) ||
(trailer_len > payload_len)) {
ath10k_warn(ar, "Invalid trailer length: %d\n",
trailer_len);
status = -EPROTO;
goto out;
}
trailer = (u8 *)hdr;
trailer += sizeof(*hdr);
trailer += payload_len;
trailer -= trailer_len;
status = ath10k_htc_process_trailer(htc, trailer,
trailer_len, hdr->eid);
if (status)
goto out;
skb_trim(skb, skb->len - trailer_len);
}
if (((int)payload_len - (int)trailer_len) <= 0)
/* zero length packet with trailer data, just drop these */
goto out;
if (eid == ATH10K_HTC_EP_0) {
struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
switch (__le16_to_cpu(msg->hdr.message_id)) {
default:
/* handle HTC control message */
if (completion_done(&htc->ctl_resp)) {
/*
* this is a fatal error, target should not be
* sending unsolicited messages on the ep 0
*/
ath10k_warn(ar, "HTC rx ctrl still processing\n");
status = -EINVAL;
complete(&htc->ctl_resp);
goto out;
}
htc->control_resp_len =
min_t(int, skb->len,
ATH10K_HTC_MAX_CTRL_MSG_LEN);
memcpy(htc->control_resp_buffer, skb->data,
htc->control_resp_len);
complete(&htc->ctl_resp);
break;
case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
htc->htc_ops.target_send_suspend_complete(ar);
}
goto out;
}
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);
/* skb is now owned by the rx completion handler */
skb = NULL;
out:
kfree_skb(skb);
return status;
}
static void ath10k_htc_control_rx_complete(struct ath10k *ar,
struct sk_buff *skb)
{
/* This is unexpected. FW is not supposed to send regular rx on this
* endpoint. */
ath10k_warn(ar, "unexpected htc rx\n");
kfree_skb(skb);
}
/***************/
/* Init/Deinit */
/***************/
static const char *htc_service_name(enum ath10k_htc_svc_id id)
{
switch (id) {
case ATH10K_HTC_SVC_ID_RESERVED:
return "Reserved";
case ATH10K_HTC_SVC_ID_RSVD_CTRL:
return "Control";
case ATH10K_HTC_SVC_ID_WMI_CONTROL:
return "WMI";
case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
return "DATA BE";
case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
return "DATA BK";
case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
return "DATA VI";
case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
return "DATA VO";
case ATH10K_HTC_SVC_ID_NMI_CONTROL:
return "NMI Control";
case ATH10K_HTC_SVC_ID_NMI_DATA:
return "NMI Data";
case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
return "HTT Data";
case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
return "RAW";
}
return "Unknown";
}
static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
{
struct ath10k_htc_ep *ep;
int i;
for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
ep = &htc->endpoint[i];
ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
}
}
static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
{
struct ath10k_htc_svc_tx_credits *entry;
entry = &htc->service_tx_alloc[0];
/*
* for PCIE allocate all credists/HTC buffers to WMI.
* no buffers are used/required for data. data always
* remains on host.
*/
entry++;
entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
entry->credit_allocation = htc->total_transmit_credits;
}
static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
u16 service_id)
{
u8 allocation = 0;
int i;
for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
if (htc->service_tx_alloc[i].service_id == service_id)
allocation =
htc->service_tx_alloc[i].credit_allocation;
}
return allocation;
}
int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
int i, status = 0;
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k_htc_msg *msg;
u16 message_id;
u16 credit_count;
u16 credit_size;
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (status == 0) {
/* Workaround: In some cases the PCI HIF doesn't
* receive interrupt for the control response message
* even if the buffer was completed. It is suspected
* iomap writes unmasking PCI CE irqs aren't propagated
* properly in KVM PCI-passthrough sometimes.
*/
ath10k_warn(ar, "failed to receive control response completion, polling..\n");
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(htc->ar, i, 1);
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (status == 0)
status = -ETIMEDOUT;
}
if (status < 0) {
ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
return status;
}
if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
htc->control_resp_len);
return -ECOMM;
}
msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
message_id = __le16_to_cpu(msg->hdr.message_id);
credit_count = __le16_to_cpu(msg->ready.credit_count);
credit_size = __le16_to_cpu(msg->ready.credit_size);
if (message_id != ATH10K_HTC_MSG_READY_ID) {
ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
return -ECOMM;
}
htc->total_transmit_credits = credit_count;
htc->target_credit_size = credit_size;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"Target ready! transmit resources: %d size:%d\n",
htc->total_transmit_credits,
htc->target_credit_size);
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
ath10k_err(ar, "Invalid credit size received\n");
return -ECOMM;
}
ath10k_htc_setup_target_buffer_assignments(htc);
/* setup our pseudo HTC control endpoint connection */
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
/* connect fake service */
status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
if (status) {
ath10k_err(ar, "could not connect to htc service (%d)\n",
status);
return status;
}
return 0;
}
int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_msg *msg;
struct ath10k_htc_conn_svc *req_msg;
struct ath10k_htc_conn_svc_response resp_msg_dummy;
struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
struct ath10k_htc_ep *ep;
struct sk_buff *skb;
unsigned int max_msg_size = 0;
int length, status;
bool disable_credit_flow_ctrl = false;
u16 message_id, service_id, flags = 0;
u8 tx_alloc = 0;
/* special case for HTC pseudo control service */
if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
disable_credit_flow_ctrl = true;
assigned_eid = ATH10K_HTC_EP_0;
max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
goto setup;
}
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
if (!skb) {
ath10k_err(ar, "Failed to allocate HTC packet\n");
return -ENOMEM;
}
length = sizeof(msg->hdr) + sizeof(msg->connect_service);
skb_put(skb, length);
memset(skb->data, 0, length);
msg = (struct ath10k_htc_msg *)skb->data;
msg->hdr.message_id =
__cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
/* Only enable credit flow control for WMI ctrl service */
if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
req_msg = &msg->connect_service;
req_msg->flags = __cpu_to_le16(flags);
req_msg->service_id = __cpu_to_le16(conn_req->service_id);
reinit_completion(&htc->ctl_resp);
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
if (status) {
kfree_skb(skb);
return status;
}
/* wait for response */
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
if (status <= 0) {
if (status == 0)
status = -ETIMEDOUT;
ath10k_err(ar, "Service connect timeout: %d\n", status);
return status;
}
/* we controlled the buffer creation, it's aligned */
msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
resp_msg = &msg->connect_service_response;
message_id = __le16_to_cpu(msg->hdr.message_id);
service_id = __le16_to_cpu(resp_msg->service_id);
if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
(htc->control_resp_len < sizeof(msg->hdr) +
sizeof(msg->connect_service_response))) {
ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
return -EPROTO;
}
ath10k_dbg(ar, ATH10K_DBG_HTC,
"HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
htc_service_name(service_id),
resp_msg->status, resp_msg->eid);
conn_resp->connect_resp_code = resp_msg->status;
/* check response status */
if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
htc_service_name(service_id),
resp_msg->status);
return -EPROTO;
}
assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
setup:
if (assigned_eid >= ATH10K_HTC_EP_COUNT)
return -EPROTO;
if (max_msg_size == 0)
return -EPROTO;
ep = &htc->endpoint[assigned_eid];
ep->eid = assigned_eid;
if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
return -EPROTO;
/* return assigned endpoint to caller */
conn_resp->eid = assigned_eid;
conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
/* setup the endpoint */
ep->service_id = conn_req->service_id;
ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
ep->tx_credits = tx_alloc;
ep->tx_credit_size = htc->target_credit_size;
ep->tx_credits_per_max_message = ep->max_ep_message_len /
htc->target_credit_size;
if (ep->max_ep_message_len % htc->target_credit_size)
ep->tx_credits_per_max_message++;
/* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops;
status = ath10k_hif_map_service_to_pipe(htc->ar,
ep->service_id,
&ep->ul_pipe_id,
&ep->dl_pipe_id,
&ep->ul_is_polled,
&ep->dl_is_polled);
if (status)
return status;
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc ep %d ul polled %d dl polled %d\n",
ep->eid, ep->ul_is_polled, ep->dl_is_polled);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
return status;
}
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
{
struct sk_buff *skb;
skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
if (!skb)
return NULL;
skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
/* FW/HTC requires 4-byte aligned streams */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath10k_warn(ar, "Unaligned HTC tx skb\n");
return skb;
}
int ath10k_htc_start(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
struct sk_buff *skb;
int status = 0;
struct ath10k_htc_msg *msg;
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
if (!skb)
return -ENOMEM;
skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
memset(skb->data, 0, skb->len);
msg = (struct ath10k_htc_msg *)skb->data;
msg->hdr.message_id =
__cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
if (status) {
kfree_skb(skb);
return status;
}
return 0;
}
/* registered target arrival callback from the HIF layer */
int ath10k_htc_init(struct ath10k *ar)
{
struct ath10k_hif_cb htc_callbacks;
struct ath10k_htc_ep *ep = NULL;
struct ath10k_htc *htc = &ar->htc;
spin_lock_init(&htc->tx_lock);
ath10k_htc_reset_endpoint_states(htc);
/* setup HIF layer callbacks */
htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
htc->ar = ar;
/* Get HIF default pipe for HTC message exchange */
ep = &htc->endpoint[ATH10K_HTC_EP_0];
ath10k_hif_set_callbacks(ar, &htc_callbacks);
ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
init_completion(&htc->ctl_resp);
return 0;
}

View file

@ -0,0 +1,359 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HTC_H_
#define _HTC_H_
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/skbuff.h>
#include <linux/semaphore.h>
#include <linux/timer.h>
struct ath10k;
/****************/
/* HTC protocol */
/****************/
/*
* HTC - host-target control protocol
*
* tx packets are generally <htc_hdr><payload>
* rx packets are more complex: <htc_hdr><payload><trailer>
*
* The payload + trailer length is stored in len.
* To get payload-only length one needs to payload - trailer_len.
*
* Trailer contains (possibly) multiple <htc_record>.
* Each record is a id-len-value.
*
* HTC header flags, control_byte0, control_byte1
* have different meaning depending whether its tx
* or rx.
*
* Alignment: htc_hdr, payload and trailer are
* 4-byte aligned.
*/
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
ATH10K_HTC_FLAG_SEND_BUNDLE = 0x02
};
enum ath10k_htc_rx_flags {
ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
};
struct ath10k_htc_hdr {
u8 eid; /* @enum ath10k_htc_ep_id */
u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
__le16 len;
union {
u8 trailer_len; /* for rx */
u8 control_byte0;
} __packed;
union {
u8 seq_no; /* for tx */
u8 control_byte1;
} __packed;
u8 pad0;
u8 pad1;
} __packed __aligned(4);
enum ath10k_ath10k_htc_msg_id {
ATH10K_HTC_MSG_READY_ID = 1,
ATH10K_HTC_MSG_CONNECT_SERVICE_ID = 2,
ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
ATH10K_HTC_MSG_SETUP_COMPLETE_ID = 4,
ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6
};
enum ath10k_htc_version {
ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
};
enum ath10k_htc_conn_flags {
ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
#define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3
ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00
#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8
};
enum ath10k_htc_conn_svc_status {
ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
ATH10K_HTC_CONN_SVC_STATUS_FAILED = 2,
ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
};
struct ath10k_ath10k_htc_msg_hdr {
__le16 message_id; /* @enum htc_message_id */
} __packed;
struct ath10k_htc_unknown {
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_ready {
__le16 credit_count;
__le16 credit_size;
u8 max_endpoints;
u8 pad0;
} __packed;
struct ath10k_htc_ready_extended {
struct ath10k_htc_ready base;
u8 htc_version; /* @enum ath10k_htc_version */
u8 max_msgs_per_htc_bundle;
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_conn_svc {
__le16 service_id;
__le16 flags; /* @enum ath10k_htc_conn_flags */
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_conn_svc_response {
__le16 service_id;
u8 status; /* @enum ath10k_htc_conn_svc_status */
u8 eid;
__le16 max_msg_size;
} __packed;
struct ath10k_htc_setup_complete_extended {
u8 pad0;
u8 pad1;
__le32 flags; /* @enum htc_setup_complete_flags */
u8 max_msgs_per_bundled_recv;
u8 pad2;
u8 pad3;
u8 pad4;
} __packed;
struct ath10k_htc_msg {
struct ath10k_ath10k_htc_msg_hdr hdr;
union {
/* host-to-target */
struct ath10k_htc_conn_svc connect_service;
struct ath10k_htc_ready ready;
struct ath10k_htc_ready_extended ready_ext;
struct ath10k_htc_unknown unknown;
struct ath10k_htc_setup_complete_extended setup_complete_ext;
/* target-to-host */
struct ath10k_htc_conn_svc_response connect_service_response;
};
} __packed __aligned(4);
enum ath10k_ath10k_htc_record_id {
ATH10K_HTC_RECORD_NULL = 0,
ATH10K_HTC_RECORD_CREDITS = 1
};
struct ath10k_ath10k_htc_record_hdr {
u8 id; /* @enum ath10k_ath10k_htc_record_id */
u8 len;
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_credit_report {
u8 eid; /* @enum ath10k_htc_ep_id */
u8 credits;
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_record {
struct ath10k_ath10k_htc_record_hdr hdr;
union {
struct ath10k_htc_credit_report credit_report[0];
u8 pauload[0];
};
} __packed __aligned(4);
/*
* note: the trailer offset is dynamic depending
* on payload length. this is only a struct layout draft
*/
struct ath10k_htc_frame {
struct ath10k_htc_hdr hdr;
union {
struct ath10k_htc_msg msg;
u8 payload[0];
};
struct ath10k_htc_record trailer[0];
} __packed __aligned(4);
/*******************/
/* Host-side stuff */
/*******************/
enum ath10k_htc_svc_gid {
ATH10K_HTC_SVC_GRP_RSVD = 0,
ATH10K_HTC_SVC_GRP_WMI = 1,
ATH10K_HTC_SVC_GRP_NMI = 2,
ATH10K_HTC_SVC_GRP_HTT = 3,
ATH10K_HTC_SVC_GRP_TEST = 254,
ATH10K_HTC_SVC_GRP_LAST = 255,
};
#define SVC(group, idx) \
(int)(((int)(group) << 8) | (int)(idx))
enum ath10k_htc_svc_id {
/* NOTE: service ID of 0x0000 is reserved and should never be used */
ATH10K_HTC_SVC_ID_RESERVED = 0x0000,
ATH10K_HTC_SVC_ID_UNUSED = ATH10K_HTC_SVC_ID_RESERVED,
ATH10K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH10K_HTC_SVC_GRP_RSVD, 1),
ATH10K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_WMI, 0),
ATH10K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH10K_HTC_SVC_GRP_WMI, 1),
ATH10K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH10K_HTC_SVC_GRP_WMI, 2),
ATH10K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH10K_HTC_SVC_GRP_WMI, 3),
ATH10K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH10K_HTC_SVC_GRP_WMI, 4),
ATH10K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_NMI, 0),
ATH10K_HTC_SVC_ID_NMI_DATA = SVC(ATH10K_HTC_SVC_GRP_NMI, 1),
ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
/* raw stream service (i.e. flash, tcmd, calibration apps) */
ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
};
#undef SVC
enum ath10k_htc_ep_id {
ATH10K_HTC_EP_UNUSED = -1,
ATH10K_HTC_EP_0 = 0,
ATH10K_HTC_EP_1 = 1,
ATH10K_HTC_EP_2,
ATH10K_HTC_EP_3,
ATH10K_HTC_EP_4,
ATH10K_HTC_EP_5,
ATH10K_HTC_EP_6,
ATH10K_HTC_EP_7,
ATH10K_HTC_EP_8,
ATH10K_HTC_EP_COUNT,
};
struct ath10k_htc_ops {
void (*target_send_suspend_complete)(struct ath10k *ar);
};
struct ath10k_htc_ep_ops {
void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_tx_credits)(struct ath10k *);
};
/* service connection information */
struct ath10k_htc_svc_conn_req {
u16 service_id;
struct ath10k_htc_ep_ops ep_ops;
int max_send_queue_depth;
};
/* service connection response information */
struct ath10k_htc_svc_conn_resp {
u8 buffer_len;
u8 actual_len;
enum ath10k_htc_ep_id eid;
unsigned int max_msg_len;
u8 connect_resp_code;
};
#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
#define ATH10K_HTC_MAX_LEN 4096
#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
sizeof(struct ath10k_htc_hdr))
#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
struct ath10k_htc_ep {
struct ath10k_htc *htc;
enum ath10k_htc_ep_id eid;
enum ath10k_htc_svc_id service_id;
struct ath10k_htc_ep_ops ep_ops;
int max_tx_queue_depth;
int max_ep_message_len;
u8 ul_pipe_id;
u8 dl_pipe_id;
int ul_is_polled; /* call HIF to get tx completions */
int dl_is_polled; /* call HIF to fetch rx (not implemented) */
u8 seq_no; /* for debugging */
int tx_credits;
int tx_credit_size;
int tx_credits_per_max_message;
bool tx_credit_flow_enabled;
};
struct ath10k_htc_svc_tx_credits {
u16 service_id;
u8 credit_allocation;
};
struct ath10k_htc {
struct ath10k *ar;
struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
/* protects endpoints */
spinlock_t tx_lock;
struct ath10k_htc_ops htc_ops;
u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN];
int control_resp_len;
struct completion ctl_resp;
int total_transmit_credits;
struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
int target_credit_size;
};
int ath10k_htc_init(struct ath10k *ar);
int ath10k_htc_wait_target(struct ath10k_htc *htc);
int ath10k_htc_start(struct ath10k_htc *htc);
int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp);
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
#endif

View file

@ -0,0 +1,115 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/if_ether.h>
#include "htt.h"
#include "core.h"
#include "debug.h"
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
int status;
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req,
&conn_resp);
if (status)
return status;
htt->eid = conn_resp.eid;
return 0;
}
int ath10k_htt_init(struct ath10k *ar)
{
struct ath10k_htt *htt = &ar->htt;
htt->ar = ar;
htt->max_throughput_mbps = 800;
/*
* Prefetch enough data to satisfy target
* classification engine.
* This is for LL chips. HL chips will probably
* transfer all frame in the tx fragment.
*/
htt->prefetch_len =
36 + /* 802.11 + qos + ht */
4 + /* 802.1q */
8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */
return 0;
}
#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n",
htt->target_version_major, htt->target_version_minor);
if (htt->target_version_major != 2 &&
htt->target_version_major != 3) {
ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
htt->target_version_major);
return -ENOTSUPP;
}
return 0;
}
int ath10k_htt_setup(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int status;
init_completion(&htt->target_version_received);
status = ath10k_htt_h2t_ver_req_msg(htt);
if (status)
return status;
status = wait_for_completion_timeout(&htt->target_version_received,
HTT_TARGET_VERSION_TIMEOUT_HZ);
if (status <= 0) {
ath10k_warn(ar, "htt version request timed out\n");
return -ETIMEDOUT;
}
status = ath10k_htt_verify_version(htt);
if (status)
return status;
return ath10k_htt_send_rx_ring_cfg_ll(htt);
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,605 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
#include "htt.h"
#include "mac.h"
#include "hif.h"
#include "txrx.h"
#include "debug.h"
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
{
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ieee80211_wake_queues(htt->ar->hw);
}
static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
{
spin_lock_bh(&htt->tx_lock);
__ath10k_htt_tx_dec_pending(htt);
spin_unlock_bh(&htt->tx_lock);
}
static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
{
int ret = 0;
spin_lock_bh(&htt->tx_lock);
if (htt->num_pending_tx >= htt->max_num_pending_tx) {
ret = -EBUSY;
goto exit;
}
htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx)
ieee80211_stop_queues(htt->ar->hw);
exit:
spin_unlock_bh(&htt->tx_lock);
return ret;
}
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int msdu_id;
lockdep_assert_held(&htt->tx_lock);
msdu_id = find_first_zero_bit(htt->used_msdu_ids,
htt->max_num_pending_tx);
if (msdu_id == htt->max_num_pending_tx)
return -ENOBUFS;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
__set_bit(msdu_id, htt->used_msdu_ids);
return msdu_id;
}
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
{
struct ath10k *ar = htt->ar;
lockdep_assert_held(&htt->tx_lock);
if (!test_bit(msdu_id, htt->used_msdu_ids))
ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
msdu_id);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
__clear_bit(msdu_id, htt->used_msdu_ids);
}
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
spin_lock_init(&htt->tx_lock);
init_waitqueue_head(&htt->empty_tx_wq);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
else
htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
htt->max_num_pending_tx, GFP_KERNEL);
if (!htt->pending_tx)
return -ENOMEM;
htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(htt->max_num_pending_tx),
GFP_KERNEL);
if (!htt->used_msdu_ids) {
kfree(htt->pending_tx);
return -ENOMEM;
}
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) {
kfree(htt->used_msdu_ids);
kfree(htt->pending_tx);
return -ENOMEM;
}
return 0;
}
static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct htt_tx_done tx_done = {0};
int msdu_id;
spin_lock_bh(&htt->tx_lock);
for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
msdu_id);
tx_done.discard = 1;
tx_done.msdu_id = msdu_id;
ath10k_txrx_tx_unref(htt, &tx_done);
}
spin_unlock_bh(&htt->tx_lock);
}
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
ath10k_htt_tx_free_pending(htt);
kfree(htt->pending_tx);
kfree(htt->used_msdu_ids);
dma_pool_destroy(htt->tx_pool);
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
dev_kfree_skb_any(skb);
}
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
int len = 0;
int ret;
len += sizeof(cmd->hdr);
len += sizeof(cmd->ver_req);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
{
struct ath10k *ar = htt->ar;
struct htt_stats_req *req;
struct sk_buff *skb;
struct htt_cmd *cmd;
int len = 0, ret;
len += sizeof(cmd->hdr);
len += sizeof(cmd->stats_req);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
req = &cmd->stats_req;
memset(req, 0, sizeof(*req));
/* currently we support only max 8 bit masks so no need to worry
* about endian support */
req->upload_types[0] = mask;
req->reset_types[0] = mask;
req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
ath10k_warn(ar, "failed to send htt type stats request: %d",
ret);
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring *ring;
const int num_rx_ring = 1;
u16 flags;
u32 fw_idx;
int len;
int ret;
/*
* the HW expects the buffer to be an integral number of 4-byte
* "words"
*/
BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+ (sizeof(*ring) * num_rx_ring);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
ring = &cmd->rx_setup.rings[0];
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
cmd->rx_setup.hdr.num_rings = 1;
/* FIXME: do we need all of this? */
flags = 0;
flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
flags |= HTT_RX_RING_FLAGS_PPDU_START;
flags |= HTT_RX_RING_FLAGS_PPDU_END;
flags |= HTT_RX_RING_FLAGS_MPDU_START;
flags |= HTT_RX_RING_FLAGS_MPDU_END;
flags |= HTT_RX_RING_FLAGS_MSDU_START;
flags |= HTT_RX_RING_FLAGS_MSDU_END;
flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
flags |= HTT_RX_RING_FLAGS_CTRL_RX;
flags |= HTT_RX_RING_FLAGS_MGMT_RX;
flags |= HTT_RX_RING_FLAGS_NULL_RX;
flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
ring->fw_idx_shadow_reg_paddr =
__cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
#undef desc_offset
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu)
{
struct ath10k *ar = htt->ar;
struct htt_aggr_conf *aggr_conf;
struct sk_buff *skb;
struct htt_cmd *cmd;
int len;
int ret;
/* Firmware defaults are: amsdu = 3 and ampdu = 64 */
if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
return -EINVAL;
if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
return -EINVAL;
len = sizeof(cmd->hdr);
len += sizeof(cmd->aggr_conf);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
aggr_conf = &cmd->aggr_conf;
aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
aggr_conf->max_num_amsdu_subframes,
aggr_conf->max_num_ampdu_subframes);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
u8 vdev_id = skb_cb->vdev_id;
int len = 0;
int msdu_id = -1;
int res;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(ar, len);
if (!txdesc) {
res = -ENOMEM;
goto err_free_msdu_id;
}
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr);
if (res)
goto err_free_txdesc;
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
skb_cb->htt.txbuf = NULL;
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
goto err_unmap_msdu;
return 0;
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
err:
return res;
}
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
struct htt_data_tx_desc_frag *frags;
u8 vdev_id = skb_cb->vdev_id;
u8 tid = skb_cb->htt.tid;
int prefetch_len;
int res;
u8 flags0 = 0;
u16 msdu_id, flags1 = 0;
dma_addr_t paddr;
u32 frags_paddr;
bool use_frags;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
goto err;
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
* fragment list host driver specifies directly frame pointer. */
use_frags = htt->target_version_major < 3 ||
!ieee80211_is_mgmt(hdr->frame_control);
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
&paddr);
if (!skb_cb->htt.txbuf)
goto err_free_msdu_id;
skb_cb->htt.txbuf_paddr = paddr;
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr);
if (res)
goto err_free_txbuf;
if (likely(use_frags)) {
frags = skb_cb->htt.txbuf->frags;
frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
frags[0].len = __cpu_to_le32(msdu->len);
frags[1].paddr = 0;
frags[1].len = 0;
flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
frags_paddr = skb_cb->htt.txbuf_paddr;
} else {
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
frags_paddr = skb_cb->paddr;
}
/* Normally all commands go through HTC which manages tx credits for
* each endpoint and notifies when tx is completed.
*
* HTT endpoint is creditless so there's no need to care about HTC
* flags. In that case it is trivial to fill the HTC header here.
*
* MSDU transmission is considered completed upon HTT event. This
* implies no relevant resources can be freed until after the event is
* received. That's why HTC tx completion handler itself is ignored by
* setting NULL to transfer_context for all sg items.
*
* There is simply no point in pushing HTT TX_FRM through HTC tx path
* as it's a waste of resources. By bypassing HTC it is possible to
* avoid extra memory allocations, compress data structures and thus
* improve performance. */
skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
sizeof(skb_cb->htt.txbuf->cmd_hdr) +
sizeof(skb_cb->htt.txbuf->cmd_tx) +
prefetch_len);
skb_cb->htt.txbuf->htc_hdr.flags = 0;
if (!ieee80211_has_protected(hdr->frame_control))
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
/* Prevent firmware from sending up tx inspection requests. There's
* nothing ath10k can do with frames requested for inspection so force
* it to simply rely a regular tx completion with discard status.
*/
flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
flags0, flags1, msdu->len, msdu_id, frags_paddr,
(u32)skb_cb->paddr, vdev_id, tid);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
sg_items[0].transfer_id = 0;
sg_items[0].transfer_context = NULL;
sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
sizeof(skb_cb->htt.txbuf->frags);
sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
sizeof(skb_cb->htt.txbuf->cmd_hdr) +
sizeof(skb_cb->htt.txbuf->cmd_tx);
sg_items[1].transfer_id = 0;
sg_items[1].transfer_context = NULL;
sg_items[1].vaddr = msdu->data;
sg_items[1].paddr = skb_cb->paddr;
sg_items[1].len = prefetch_len;
res = ath10k_hif_tx_sg(htt->ar,
htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
sg_items, ARRAY_SIZE(sg_items));
if (res)
goto err_unmap_msdu;
return 0;
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txbuf:
dma_pool_free(htt->tx_pool,
skb_cb->htt.txbuf,
skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
err:
return res;
}

View file

@ -0,0 +1,369 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HW_H_
#define _HW_H_
#include "targaddrs.h"
/* QCA988X 1.0 definitions (unsupported) */
#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
/* QCA988X 2.0 definitions */
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_FW_3_FILE "firmware-3.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
#define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin"
#define ATH10K_FW_UTF_FILE "utf.bin"
/* includes also the null byte */
#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
#define REG_DUMP_COUNT_QCA988X 60
struct ath10k_fw_ie {
__le32 id;
__le32 len;
u8 data[0];
};
enum ath10k_fw_ie_type {
ATH10K_FW_IE_FW_VERSION = 0,
ATH10K_FW_IE_TIMESTAMP = 1,
ATH10K_FW_IE_FEATURES = 2,
ATH10K_FW_IE_FW_IMAGE = 3,
ATH10K_FW_IE_OTP_IMAGE = 4,
};
/* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599)
* - current FW dumps upon raw tx mode (last tested v599)
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
* - raw have FCS, nwifi doesn't
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
* param, llc/snap) are aligned to 4byte boundaries each */
enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
ATH10K_HW_TXRX_ETHERNET = 2,
/* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
ATH10K_HW_TXRX_MGMT = 3,
};
enum ath10k_mcast2ucast_mode {
ATH10K_MCAST2UCAST_DISABLED = 0,
ATH10K_MCAST2UCAST_ENABLED = 1,
};
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 0
#define TARGET_MAC_AGGR_DELIM 0
#define TARGET_AST_SKID_LIMIT 16
#define TARGET_NUM_PEERS 16
#define TARGET_NUM_OFFLOAD_PEERS 0
#define TARGET_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_NUM_PEER_KEYS 2
#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS)))
#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
* avoid a very expensive re-alignment in mac80211. */
#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
#define TARGET_NUM_MCAST_GROUPS 0
#define TARGET_NUM_MCAST_TABLE_ELEMS 0
#define TARGET_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
#define TARGET_TX_DBG_LOG_SIZE 1024
#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0
#define TARGET_VOW_CONFIG 0
#define TARGET_NUM_MSDU_DESC (1024 + 400)
#define TARGET_MAX_FRAG_ENTRIES 0
/* Target specific defines for 10.X firmware */
#define TARGET_10X_NUM_VDEVS 16
#define TARGET_10X_NUM_PEER_AST 2
#define TARGET_10X_NUM_WDS_ENTRIES 32
#define TARGET_10X_DMA_BURST_SIZE 0
#define TARGET_10X_MAC_AGGR_DELIM 0
#define TARGET_10X_AST_SKID_LIMIT 16
#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
#define TARGET_10X_NUM_PEERS_MAX 128
#define TARGET_10X_NUM_OFFLOAD_PEERS 0
#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_10X_NUM_PEER_KEYS 2
#define TARGET_10X_NUM_TIDS 256
#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES 8
#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV 3
#define TARGET_10X_NUM_MCAST_GROUPS 0
#define TARGET_10X_NUM_MCAST_TABLE_ELEMS 0
#define TARGET_10X_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
#define TARGET_10X_TX_DBG_LOG_SIZE 1024
#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
#define TARGET_10X_VOW_CONFIG 0
#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
#define TARGET_10X_MAX_FRAG_ENTRIES 0
/* Number of Copy Engines supported */
#define CE_COUNT 8
/*
* Total number of PCIe MSI interrupts requested for all interrupt sources.
* PCIe standard forces this to be a power of 2.
* Some Host OS's limit MSI requests that can be granted to 8
* so for now we abide by this limit and avoid requesting more
* than that.
*/
#define MSI_NUM_REQUEST_LOG2 3
#define MSI_NUM_REQUEST (1<<MSI_NUM_REQUEST_LOG2)
/*
* Granted MSIs are assigned as follows:
* Firmware uses the first
* Remaining MSIs, if any, are used by Copy Engines
* This mapping is known to both Target firmware and Host software.
* It may be changed as long as Host and Target are kept in sync.
*/
/* MSI for firmware (errors, etc.) */
#define MSI_ASSIGN_FW 0
/* MSIs for Copy Engines */
#define MSI_ASSIGN_CE_INITIAL 1
#define MSI_ASSIGN_CE_MAX 7
/* as of IP3.7.1 */
#define RTC_STATE_V_ON 3
#define RTC_STATE_COLD_RESET_MASK 0x00000400
#define RTC_STATE_V_LSB 0
#define RTC_STATE_V_MASK 0x00000007
#define RTC_STATE_ADDRESS 0x0000
#define PCIE_SOC_WAKE_V_MASK 0x00000001
#define PCIE_SOC_WAKE_ADDRESS 0x0004
#define PCIE_SOC_WAKE_RESET 0x00000000
#define SOC_GLOBAL_RESET_ADDRESS 0x0008
#define RTC_SOC_BASE_ADDRESS 0x00004000
#define RTC_WMAC_BASE_ADDRESS 0x00005000
#define MAC_COEX_BASE_ADDRESS 0x00006000
#define BT_COEX_BASE_ADDRESS 0x00007000
#define SOC_PCIE_BASE_ADDRESS 0x00008000
#define SOC_CORE_BASE_ADDRESS 0x00009000
#define WLAN_UART_BASE_ADDRESS 0x0000c000
#define WLAN_SI_BASE_ADDRESS 0x00010000
#define WLAN_GPIO_BASE_ADDRESS 0x00014000
#define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
#define WLAN_MAC_BASE_ADDRESS 0x00020000
#define EFUSE_BASE_ADDRESS 0x00030000
#define FPGA_REG_BASE_ADDRESS 0x00039000
#define WLAN_UART2_BASE_ADDRESS 0x00054c00
#define CE_WRAPPER_BASE_ADDRESS 0x00057000
#define CE0_BASE_ADDRESS 0x00057400
#define CE1_BASE_ADDRESS 0x00057800
#define CE2_BASE_ADDRESS 0x00057c00
#define CE3_BASE_ADDRESS 0x00058000
#define CE4_BASE_ADDRESS 0x00058400
#define CE5_BASE_ADDRESS 0x00058800
#define CE6_BASE_ADDRESS 0x00058c00
#define CE7_BASE_ADDRESS 0x00059000
#define DBI_BASE_ADDRESS 0x00060000
#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
#define SOC_RESET_CONTROL_ADDRESS 0x00000000
#define SOC_RESET_CONTROL_OFFSET 0x00000000
#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
#define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000
#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
#define SOC_CPU_CLOCK_OFFSET 0x00000020
#define SOC_CPU_CLOCK_STANDARD_LSB 0
#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
#define SOC_CLOCK_CONTROL_OFFSET 0x00000028
#define SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
#define SOC_SYSTEM_SLEEP_OFFSET 0x000000c4
#define SOC_LPO_CAL_OFFSET 0x000000e0
#define SOC_LPO_CAL_ENABLE_LSB 20
#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
#define SOC_CHIP_ID_ADDRESS 0x000000ec
#define SOC_CHIP_ID_REV_LSB 8
#define SOC_CHIP_ID_REV_MASK 0x00000f00
#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
#define WLAN_GPIO_PIN11_ADDRESS 0x00000054
#define WLAN_GPIO_PIN12_ADDRESS 0x00000058
#define WLAN_GPIO_PIN13_ADDRESS 0x0000005c
#define CLOCK_GPIO_OFFSET 0xffffffff
#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0
#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
#define SI_CONFIG_OFFSET 0x00000000
#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
#define SI_CONFIG_I2C_LSB 16
#define SI_CONFIG_I2C_MASK 0x00010000
#define SI_CONFIG_POS_SAMPLE_LSB 7
#define SI_CONFIG_POS_SAMPLE_MASK 0x00000080
#define SI_CONFIG_INACTIVE_DATA_LSB 5
#define SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
#define SI_CONFIG_INACTIVE_CLK_LSB 4
#define SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
#define SI_CONFIG_DIVIDER_LSB 0
#define SI_CONFIG_DIVIDER_MASK 0x0000000f
#define SI_CS_OFFSET 0x00000004
#define SI_CS_DONE_ERR_MASK 0x00000400
#define SI_CS_DONE_INT_MASK 0x00000200
#define SI_CS_START_LSB 8
#define SI_CS_START_MASK 0x00000100
#define SI_CS_RX_CNT_LSB 4
#define SI_CS_RX_CNT_MASK 0x000000f0
#define SI_CS_TX_CNT_LSB 0
#define SI_CS_TX_CNT_MASK 0x0000000f
#define SI_TX_DATA0_OFFSET 0x00000008
#define SI_TX_DATA1_OFFSET 0x0000000c
#define SI_RX_DATA0_OFFSET 0x00000010
#define SI_RX_DATA1_OFFSET 0x00000014
#define CORE_CTRL_CPU_INTR_MASK 0x00002000
#define CORE_CTRL_ADDRESS 0x0000
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
#define PCIE_INTR_CAUSE_ADDRESS 0x000c
#define PCIE_INTR_CLR_ADDRESS 0x0014
#define SCRATCH_3_ADDRESS 0x0030
#define CPU_INTR_ADDRESS 0x0010
/* Firmware indications to the Host via SCRATCH_3 register. */
#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
#define FW_IND_EVENT_PENDING 1
#define FW_IND_INITIALIZED 2
/* HOST_REG interrupt from firmware */
#define PCIE_INTR_FIRMWARE_MASK 0x00000400
#define PCIE_INTR_CE_MASK_ALL 0x0007f800
#define DRAM_BASE_ADDRESS 0x00400000
#define MISSING 0
#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET
#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET
#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK
#define RESET_CONTROL_MBOX_RST_MASK MISSING
#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK
#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS
#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS
#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS
#define LOCAL_SCRATCH_OFFSET 0x18
#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_OFFSET
#define LPO_CAL_OFFSET SOC_LPO_CAL_OFFSET
#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS
#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS
#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS
#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS
#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB
#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK
#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB
#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK
#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS
#define MBOX_BASE_ADDRESS MISSING
#define INT_STATUS_ENABLE_ERROR_LSB MISSING
#define INT_STATUS_ENABLE_ERROR_MASK MISSING
#define INT_STATUS_ENABLE_CPU_LSB MISSING
#define INT_STATUS_ENABLE_CPU_MASK MISSING
#define INT_STATUS_ENABLE_COUNTER_LSB MISSING
#define INT_STATUS_ENABLE_COUNTER_MASK MISSING
#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING
#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING
#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING
#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING
#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING
#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING
#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING
#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING
#define INT_STATUS_ENABLE_ADDRESS MISSING
#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING
#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING
#define HOST_INT_STATUS_ADDRESS MISSING
#define CPU_INT_STATUS_ADDRESS MISSING
#define ERROR_INT_STATUS_ADDRESS MISSING
#define ERROR_INT_STATUS_WAKEUP_MASK MISSING
#define ERROR_INT_STATUS_WAKEUP_LSB MISSING
#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING
#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING
#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING
#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING
#define COUNT_DEC_ADDRESS MISSING
#define HOST_INT_STATUS_CPU_MASK MISSING
#define HOST_INT_STATUS_CPU_LSB MISSING
#define HOST_INT_STATUS_ERROR_MASK MISSING
#define HOST_INT_STATUS_ERROR_LSB MISSING
#define HOST_INT_STATUS_COUNTER_MASK MISSING
#define HOST_INT_STATUS_COUNTER_LSB MISSING
#define RX_LOOKAHEAD_VALID_ADDRESS MISSING
#define WINDOW_DATA_ADDRESS MISSING
#define WINDOW_READ_ADDR_ADDRESS MISSING
#define WINDOW_WRITE_ADDR_ADDRESS MISSING
#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
#endif /* _HW_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,66 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _MAC_H_
#define _MAC_H_
#include <net/mac80211.h>
#include "core.h"
struct ath10k_generic_iter {
struct ath10k *ar;
int ret;
};
struct ath10k *ath10k_mac_create(size_t priv_size);
void ath10k_mac_destroy(struct ath10k *ar);
int ath10k_mac_register(struct ath10k *ar);
void ath10k_mac_unregister(struct ath10k *ar);
struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
void __ath10k_scan_finish(struct ath10k *ar);
void ath10k_scan_finish(struct ath10k *ar);
void ath10k_scan_timeout_work(struct work_struct *work);
void ath10k_offchan_tx_purge(struct ath10k *ar);
void ath10k_offchan_tx_work(struct work_struct *work);
void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
void ath10k_halt(struct ath10k *ar);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
return (struct ath10k_vif *)vif->drv_priv;
}
static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
if (arvif->tx_seq_no == 0)
arvif->tx_seq_no = 0x1000;
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
arvif->tx_seq_no += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no);
}
}
#endif /* _MAC_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,264 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _PCI_H_
#define _PCI_H_
#include <linux/interrupt.h>
#include "hw.h"
#include "ce.h"
/*
* maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
*/
#define DIAG_TRANSFER_LIMIT 2048
/*
* maximum number of bytes that can be
* handled atomically by DiagRead/DiagWrite
*/
#define DIAG_TRANSFER_LIMIT 2048
struct bmi_xfer {
bool tx_done;
bool rx_done;
bool wait_for_resp;
u32 resp_len;
};
/*
* PCI-specific Target state
*
* NOTE: Structure is shared between Host software and Target firmware!
*
* Much of this may be of interest to the Host so
* HOST_INTEREST->hi_interconnect_state points here
* (and all members are 32-bit quantities in order to
* facilitate Host access). In particular, Host software is
* required to initialize pipe_cfg_addr and svc_to_pipe_map.
*/
struct pcie_state {
/* Pipe configuration Target address */
/* NB: ce_pipe_config[CE_COUNT] */
u32 pipe_cfg_addr;
/* Service to pipe map Target address */
/* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
u32 svc_to_pipe_map;
/* number of MSI interrupts requested */
u32 msi_requested;
/* number of MSI interrupts granted */
u32 msi_granted;
/* Message Signalled Interrupt address */
u32 msi_addr;
/* Base data */
u32 msi_data;
/*
* Data for firmware interrupt;
* MSI data for other interrupts are
* in various SoC registers
*/
u32 msi_fw_intr_data;
/* PCIE_PWR_METHOD_* */
u32 power_mgmt_method;
/* PCIE_CONFIG_FLAG_* */
u32 config_flags;
};
/* PCIE_CONFIG_FLAG definitions */
#define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
/* Host software's Copy Engine configuration. */
#define CE_ATTR_FLAGS 0
/*
* Configuration information for a Copy Engine pipe.
* Passed from Host to Target during startup (one per CE).
*
* NOTE: Structure is shared between Host software and Target firmware!
*/
struct ce_pipe_config {
__le32 pipenum;
__le32 pipedir;
__le32 nentries;
__le32 nbytes_max;
__le32 flags;
__le32 reserved;
};
/*
* Directions for interconnect pipe configuration.
* These definitions may be used during configuration and are shared
* between Host and Target.
*
* Pipe Directions are relative to the Host, so PIPEDIR_IN means
* "coming IN over air through Target to Host" as with a WiFi Rx operation.
* Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
* as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
* Target since things that are "PIPEDIR_OUT" are coming IN to the Target
* over the interconnect.
*/
#define PIPEDIR_NONE 0
#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
#define PIPEDIR_INOUT 3 /* bidirectional */
/* Establish a mapping between a service/direction and a pipe. */
struct service_to_pipe {
__le32 service_id;
__le32 pipedir;
__le32 pipenum;
};
/* Per-pipe state. */
struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
struct ath10k_ce_pipe *ce_hdl;
/* Our pipe number; facilitiates use of pipe_info ptrs. */
u8 pipe_num;
/* Convenience back pointer to hif_ce_state. */
struct ath10k *hif_ce_state;
size_t buf_sz;
/* protects compl_free and num_send_allowed */
spinlock_t pipe_lock;
struct ath10k_pci *ar_pci;
struct tasklet_struct intr;
};
struct ath10k_pci {
struct pci_dev *pdev;
struct device *dev;
struct ath10k *ar;
void __iomem *mem;
/*
* Number of MSI interrupts granted, 0 --> using legacy PCI line
* interrupts.
*/
int num_msi_intrs;
struct tasklet_struct intr_tq;
struct tasklet_struct msi_fw_err;
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
struct ath10k_hif_cb msg_callbacks_current;
/* Copy Engine used for Diagnostic Accesses */
struct ath10k_ce_pipe *ce_diag;
/* FIXME: document what this really protects */
spinlock_t ce_lock;
/* Map CE id to ce_state */
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
struct timer_list rx_post_retry;
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
{
return (struct ath10k_pci *)ar->drv_priv;
}
#define ATH10K_PCI_RX_POST_RETRY_MS 50
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
#define BAR_NUM 0
#define CDC_WAR_MAGIC_STR 0xceef0000
#define CDC_WAR_DATA_CE 4
/*
* TODO: Should be a function call specific to each Target-type.
* This convoluted macro converts from Target CPU Virtual Address Space to CE
* Address Space. As part of this process, we conservatively fetch the current
* PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
* for this device; but that's not guaranteed.
*/
#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
(((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
0x100000 | ((addr) & 0xfffff))
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
/* Target exposes its registers for direct access. However before host can
* access them it needs to make sure the target is awake (ath10k_pci_wake,
* ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go
* to sleep unless host tells it to (ath10k_pci_sleep).
*
* If host tries to access target registers without waking it up it can
* scribble over host memory.
*
* If target is asleep waking it up may take up to even 2ms.
*/
static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
iowrite32(value, ar_pci->mem + offset);
}
static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return ioread32(ar_pci->mem + offset);
}
static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
{
return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
{
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
}
static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
#endif /* _PCI_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,561 @@
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/relay.h>
#include "core.h"
#include "debug.h"
static void send_fft_sample(struct ath10k *ar,
const struct fft_sample_tlv *fft_sample_tlv)
{
int length;
if (!ar->spectral.rfs_chan_spec_scan)
return;
length = __be16_to_cpu(fft_sample_tlv->length) +
sizeof(*fft_sample_tlv);
relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length);
}
static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
u8 *data)
{
int dc_pos;
u8 max_exp;
dc_pos = bin_len / 2;
/* peak index outside of bins */
if (dc_pos < max_index || -dc_pos >= max_index)
return 0;
for (max_exp = 0; max_exp < 8; max_exp++) {
if (data[dc_pos + max_index] == (max_magnitude >> max_exp))
break;
}
/* max_exp not found */
if (data[dc_pos + max_index] != (max_magnitude >> max_exp))
return 0;
return max_exp;
}
int ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
struct fft_sample_ath10k *fft_sample;
u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS];
u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag;
u32 reg0, reg1, nf_list1, nf_list2;
u8 chain_idx, *bins;
int dc_pos;
fft_sample = (struct fft_sample_ath10k *)&buf;
if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
return -EINVAL;
reg0 = __le32_to_cpu(fftr->reg0);
reg1 = __le32_to_cpu(fftr->reg1);
length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + bin_len;
fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH10K;
fft_sample->tlv.length = __cpu_to_be16(length);
/* TODO: there might be a reason why the hardware reports 20/40/80 MHz,
* but the results/plots suggest that its actually 22/44/88 MHz.
*/
switch (event->hdr.chan_width_mhz) {
case 20:
fft_sample->chan_width_mhz = 22;
break;
case 40:
fft_sample->chan_width_mhz = 44;
break;
case 80:
/* TODO: As experiments with an analogue sender and various
* configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz)
* show, the particular configuration of 80 MHz/64 bins does
* not match with the other smaples at all. Until the reason
* for that is found, don't report these samples.
*/
if (bin_len == 64)
return -EINVAL;
fft_sample->chan_width_mhz = 88;
break;
default:
fft_sample->chan_width_mhz = event->hdr.chan_width_mhz;
}
fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB);
fft_sample->avgpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB);
peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
fft_sample->max_magnitude = __cpu_to_be16(peak_mag);
fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX);
fft_sample->rssi = event->hdr.rssi_combined;
total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB);
base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB);
fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
freq1 = __le16_to_cpu(event->hdr.freq1);
freq2 = __le16_to_cpu(event->hdr.freq2);
fft_sample->freq1 = __cpu_to_be16(freq1);
fft_sample->freq2 = __cpu_to_be16(freq2);
nf_list1 = __le32_to_cpu(event->hdr.nf_list_1);
nf_list2 = __le32_to_cpu(event->hdr.nf_list_2);
chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
switch (chain_idx) {
case 0:
fft_sample->noise = __cpu_to_be16(nf_list1 & 0xffffu);
break;
case 1:
fft_sample->noise = __cpu_to_be16((nf_list1 >> 16) & 0xffffu);
break;
case 2:
fft_sample->noise = __cpu_to_be16(nf_list2 & 0xffffu);
break;
case 3:
fft_sample->noise = __cpu_to_be16((nf_list2 >> 16) & 0xffffu);
break;
}
bins = (u8 *)fftr;
bins += sizeof(*fftr);
fft_sample->tsf = __cpu_to_be64(tsf);
/* max_exp has been directly reported by previous hardware (ath9k),
* maybe its possible to get it by other means?
*/
fft_sample->max_exp = get_max_exp(fft_sample->max_index, peak_mag,
bin_len, bins);
memcpy(fft_sample->data, bins, bin_len);
/* DC value (value in the middle) is the blind spot of the spectral
* sample and invalid, interpolate it.
*/
dc_pos = bin_len / 2;
fft_sample->data[dc_pos] = (fft_sample->data[dc_pos + 1] +
fft_sample->data[dc_pos - 1]) / 2;
send_fft_sample(ar, &fft_sample->tlv);
return 0;
}
static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar)
{
struct ath10k_vif *arvif;
lockdep_assert_held(&ar->conf_mutex);
if (list_empty(&ar->arvifs))
return NULL;
/* if there already is a vif doing spectral, return that. */
list_for_each_entry(arvif, &ar->arvifs, list)
if (arvif->spectral_enabled)
return arvif;
/* otherwise, return the first vif. */
return list_first_entry(&ar->arvifs, typeof(*arvif), list);
}
static int ath10k_spectral_scan_trigger(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int res;
int vdev_id;
lockdep_assert_held(&ar->conf_mutex);
arvif = ath10k_get_spectral_vdev(ar);
if (!arvif)
return -ENODEV;
vdev_id = arvif->vdev_id;
if (ar->spectral.mode == SPECTRAL_DISABLED)
return 0;
res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
WMI_SPECTRAL_ENABLE_CMD_ENABLE);
if (res < 0)
return res;
res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
WMI_SPECTRAL_ENABLE_CMD_ENABLE);
if (res < 0)
return res;
return 0;
}
static int ath10k_spectral_scan_config(struct ath10k *ar,
enum ath10k_spectral_mode mode)
{
struct wmi_vdev_spectral_conf_arg arg;
struct ath10k_vif *arvif;
int vdev_id, count, res = 0;
lockdep_assert_held(&ar->conf_mutex);
arvif = ath10k_get_spectral_vdev(ar);
if (!arvif)
return -ENODEV;
vdev_id = arvif->vdev_id;
arvif->spectral_enabled = (mode != SPECTRAL_DISABLED);
ar->spectral.mode = mode;
res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
WMI_SPECTRAL_ENABLE_CMD_DISABLE);
if (res < 0) {
ath10k_warn(ar, "failed to enable spectral scan: %d\n", res);
return res;
}
if (mode == SPECTRAL_DISABLED)
return 0;
if (mode == SPECTRAL_BACKGROUND)
count = WMI_SPECTRAL_COUNT_DEFAULT;
else
count = max_t(u8, 1, ar->spectral.config.count);
arg.vdev_id = vdev_id;
arg.scan_count = count;
arg.scan_period = WMI_SPECTRAL_PERIOD_DEFAULT;
arg.scan_priority = WMI_SPECTRAL_PRIORITY_DEFAULT;
arg.scan_fft_size = ar->spectral.config.fft_size;
arg.scan_gc_ena = WMI_SPECTRAL_GC_ENA_DEFAULT;
arg.scan_restart_ena = WMI_SPECTRAL_RESTART_ENA_DEFAULT;
arg.scan_noise_floor_ref = WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
arg.scan_init_delay = WMI_SPECTRAL_INIT_DELAY_DEFAULT;
arg.scan_nb_tone_thr = WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
arg.scan_str_bin_thr = WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
arg.scan_wb_rpt_mode = WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
arg.scan_rssi_rpt_mode = WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
arg.scan_rssi_thr = WMI_SPECTRAL_RSSI_THR_DEFAULT;
arg.scan_pwr_format = WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
arg.scan_rpt_mode = WMI_SPECTRAL_RPT_MODE_DEFAULT;
arg.scan_bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
arg.scan_dbm_adj = WMI_SPECTRAL_DBM_ADJ_DEFAULT;
arg.scan_chn_mask = WMI_SPECTRAL_CHN_MASK_DEFAULT;
res = ath10k_wmi_vdev_spectral_conf(ar, &arg);
if (res < 0) {
ath10k_warn(ar, "failed to configure spectral scan: %d\n", res);
return res;
}
return 0;
}
static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char *mode = "";
unsigned int len;
enum ath10k_spectral_mode spectral_mode;
mutex_lock(&ar->conf_mutex);
spectral_mode = ar->spectral.mode;
mutex_unlock(&ar->conf_mutex);
switch (spectral_mode) {
case SPECTRAL_DISABLED:
mode = "disable";
break;
case SPECTRAL_BACKGROUND:
mode = "background";
break;
case SPECTRAL_MANUAL:
mode = "manual";
break;
}
len = strlen(mode);
return simple_read_from_buffer(user_buf, count, ppos, mode, len);
}
static ssize_t write_file_spec_scan_ctl(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32];
ssize_t len;
int res;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
mutex_lock(&ar->conf_mutex);
if (strncmp("trigger", buf, 7) == 0) {
if (ar->spectral.mode == SPECTRAL_MANUAL ||
ar->spectral.mode == SPECTRAL_BACKGROUND) {
/* reset the configuration to adopt possibly changed
* debugfs parameters
*/
res = ath10k_spectral_scan_config(ar,
ar->spectral.mode);
if (res < 0) {
ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n",
res);
}
res = ath10k_spectral_scan_trigger(ar);
if (res < 0) {
ath10k_warn(ar, "failed to trigger spectral scan: %d\n",
res);
}
} else {
res = -EINVAL;
}
} else if (strncmp("background", buf, 9) == 0) {
res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND);
} else if (strncmp("manual", buf, 6) == 0) {
res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL);
} else if (strncmp("disable", buf, 7) == 0) {
res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED);
} else {
res = -EINVAL;
}
mutex_unlock(&ar->conf_mutex);
if (res < 0)
return res;
return count;
}
static const struct file_operations fops_spec_scan_ctl = {
.read = read_file_spec_scan_ctl,
.write = write_file_spec_scan_ctl,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t read_file_spectral_count(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32];
unsigned int len;
u8 spectral_count;
mutex_lock(&ar->conf_mutex);
spectral_count = ar->spectral.config.count;
mutex_unlock(&ar->conf_mutex);
len = sprintf(buf, "%d\n", spectral_count);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_spectral_count(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val < 0 || val > 255)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
ar->spectral.config.count = val;
mutex_unlock(&ar->conf_mutex);
return count;
}
static const struct file_operations fops_spectral_count = {
.read = read_file_spectral_count,
.write = write_file_spectral_count,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t read_file_spectral_bins(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32];
unsigned int len, bins, fft_size, bin_scale;
mutex_lock(&ar->conf_mutex);
fft_size = ar->spectral.config.fft_size;
bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
bins = 1 << (fft_size - bin_scale);
mutex_unlock(&ar->conf_mutex);
len = sprintf(buf, "%d\n", bins);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_spectral_bins(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS)
return -EINVAL;
if (!is_power_of_2(val))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
ar->spectral.config.fft_size = ilog2(val);
ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT;
mutex_unlock(&ar->conf_mutex);
return count;
}
static const struct file_operations fops_spectral_bins = {
.read = read_file_spectral_bins,
.write = write_file_spectral_bins,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static struct dentry *create_buf_file_handler(const char *filename,
struct dentry *parent,
umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
struct dentry *buf_file;
buf_file = debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
*is_global = 1;
return buf_file;
}
static int remove_buf_file_handler(struct dentry *dentry)
{
debugfs_remove(dentry);
return 0;
}
static struct rchan_callbacks rfs_spec_scan_cb = {
.create_buf_file = create_buf_file_handler,
.remove_buf_file = remove_buf_file_handler,
};
int ath10k_spectral_start(struct ath10k *ar)
{
struct ath10k_vif *arvif;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list)
arvif->spectral_enabled = 0;
ar->spectral.mode = SPECTRAL_DISABLED;
ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT;
ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT;
return 0;
}
int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
{
if (!arvif->spectral_enabled)
return 0;
return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED);
}
int ath10k_spectral_create(struct ath10k *ar)
{
ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
ar->debug.debugfs_phy,
1024, 256,
&rfs_spec_scan_cb, NULL);
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_spec_scan_ctl);
debugfs_create_file("spectral_count",
S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_spectral_count);
debugfs_create_file("spectral_bins",
S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_spectral_bins);
return 0;
}
void ath10k_spectral_destroy(struct ath10k *ar)
{
if (ar->spectral.rfs_chan_spec_scan) {
relay_close(ar->spectral.rfs_chan_spec_scan);
ar->spectral.rfs_chan_spec_scan = NULL;
}
}

View file

@ -0,0 +1,90 @@
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef SPECTRAL_H
#define SPECTRAL_H
#include "../spectral_common.h"
/**
* struct ath10k_spec_scan - parameters for Atheros spectral scan
*
* @count: number of scan results requested for manual mode
* @fft_size: number of bins to be requested = 2^(fft_size - bin_scale)
*/
struct ath10k_spec_scan {
u8 count;
u8 fft_size;
};
/* enum ath10k_spectral_mode:
*
* @SPECTRAL_DISABLED: spectral mode is disabled
* @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
* something else.
* @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
* is performed manually.
*/
enum ath10k_spectral_mode {
SPECTRAL_DISABLED = 0,
SPECTRAL_BACKGROUND,
SPECTRAL_MANUAL,
};
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf);
int ath10k_spectral_start(struct ath10k *ar);
int ath10k_spectral_vif_stop(struct ath10k_vif *arvif);
int ath10k_spectral_create(struct ath10k *ar);
void ath10k_spectral_destroy(struct ath10k *ar);
#else
static inline int
ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
return 0;
}
static inline int ath10k_spectral_start(struct ath10k *ar)
{
return 0;
}
static inline int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
{
return 0;
}
static inline int ath10k_spectral_create(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_spectral_destroy(struct ath10k *ar)
{
}
#endif /* CONFIG_ATH10K_DEBUGFS */
#endif /* SPECTRAL_H */

View file

@ -0,0 +1,448 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __TARGADDRS_H__
#define __TARGADDRS_H__
/*
* xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
* host_interest structure. It must match the address of the _host_interest
* symbol (see linker script).
*
* Host Interest is shared between Host and Target in order to coordinate
* between the two, and is intended to remain constant (with additions only
* at the end) across software releases.
*
* All addresses are available here so that it's possible to
* write a single binary that works with all Target Types.
* May be used in assembler code as well as C.
*/
#define QCA988X_HOST_INTEREST_ADDRESS 0x00400800
#define HOST_INTEREST_MAX_SIZE 0x200
/*
* These are items that the Host may need to access via BMI or via the
* Diagnostic Window. The position of items in this structure must remain
* constant across firmware revisions! Types for each item must be fixed
* size across target and host platforms. More items may be added at the end.
*/
struct host_interest {
/*
* Pointer to application-defined area, if any.
* Set by Target application during startup.
*/
u32 hi_app_host_interest; /* 0x00 */
/* Pointer to register dump area, valid after Target crash. */
u32 hi_failure_state; /* 0x04 */
/* Pointer to debug logging header */
u32 hi_dbglog_hdr; /* 0x08 */
u32 hi_unused0c; /* 0x0c */
/*
* General-purpose flag bits, similar to SOC_OPTION_* flags.
* Can be used by application rather than by OS.
*/
u32 hi_option_flag; /* 0x10 */
/*
* Boolean that determines whether or not to
* display messages on the serial port.
*/
u32 hi_serial_enable; /* 0x14 */
/* Start address of DataSet index, if any */
u32 hi_dset_list_head; /* 0x18 */
/* Override Target application start address */
u32 hi_app_start; /* 0x1c */
/* Clock and voltage tuning */
u32 hi_skip_clock_init; /* 0x20 */
u32 hi_core_clock_setting; /* 0x24 */
u32 hi_cpu_clock_setting; /* 0x28 */
u32 hi_system_sleep_setting; /* 0x2c */
u32 hi_xtal_control_setting; /* 0x30 */
u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
u32 hi_ref_voltage_trim_setting; /* 0x3c */
u32 hi_clock_info; /* 0x40 */
/* Host uses BE CPU or not */
u32 hi_be; /* 0x44 */
u32 hi_stack; /* normal stack */ /* 0x48 */
u32 hi_err_stack; /* error stack */ /* 0x4c */
u32 hi_desired_cpu_speed_hz; /* 0x50 */
/* Pointer to Board Data */
u32 hi_board_data; /* 0x54 */
/*
* Indication of Board Data state:
* 0: board data is not yet initialized.
* 1: board data is initialized; unknown size
* >1: number of bytes of initialized board data
*/
u32 hi_board_data_initialized; /* 0x58 */
u32 hi_dset_ram_index_table; /* 0x5c */
u32 hi_desired_baud_rate; /* 0x60 */
u32 hi_dbglog_config; /* 0x64 */
u32 hi_end_ram_reserve_sz; /* 0x68 */
u32 hi_mbox_io_block_sz; /* 0x6c */
u32 hi_num_bpatch_streams; /* 0x70 -- unused */
u32 hi_mbox_isr_yield_limit; /* 0x74 */
u32 hi_refclk_hz; /* 0x78 */
u32 hi_ext_clk_detected; /* 0x7c */
u32 hi_dbg_uart_txpin; /* 0x80 */
u32 hi_dbg_uart_rxpin; /* 0x84 */
u32 hi_hci_uart_baud; /* 0x88 */
u32 hi_hci_uart_pin_assignments; /* 0x8C */
u32 hi_hci_uart_baud_scale_val; /* 0x90 */
u32 hi_hci_uart_baud_step_val; /* 0x94 */
u32 hi_allocram_start; /* 0x98 */
u32 hi_allocram_sz; /* 0x9c */
u32 hi_hci_bridge_flags; /* 0xa0 */
u32 hi_hci_uart_support_pins; /* 0xa4 */
u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
/*
* 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
* [31:16]: wakeup timeout in ms
*/
/* Pointer to extended board Data */
u32 hi_board_ext_data; /* 0xac */
u32 hi_board_ext_data_config; /* 0xb0 */
/*
* Bit [0] : valid
* Bit[31:16: size
*/
/*
* hi_reset_flag is used to do some stuff when target reset.
* such as restore app_start after warm reset or
* preserve host Interest area, or preserve ROM data, literals etc.
*/
u32 hi_reset_flag; /* 0xb4 */
/* indicate hi_reset_flag is valid */
u32 hi_reset_flag_valid; /* 0xb8 */
u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
/* 0xbc - [31:0]: idle timeout in ms */
/* ACS flags */
u32 hi_acs_flags; /* 0xc0 */
u32 hi_console_flags; /* 0xc4 */
u32 hi_nvram_state; /* 0xc8 */
u32 hi_option_flag2; /* 0xcc */
/* If non-zero, override values sent to Host in WMI_READY event. */
u32 hi_sw_version_override; /* 0xd0 */
u32 hi_abi_version_override; /* 0xd4 */
/*
* Percentage of high priority RX traffic to total expected RX traffic
* applicable only to ar6004
*/
u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
/* test applications flags */
u32 hi_test_apps_related; /* 0xdc */
/* location of test script */
u32 hi_ota_testscript; /* 0xe0 */
/* location of CAL data */
u32 hi_cal_data; /* 0xe4 */
/* Number of packet log buffers */
u32 hi_pktlog_num_buffers; /* 0xe8 */
/* wow extension configuration */
u32 hi_wow_ext_config; /* 0xec */
u32 hi_pwr_save_flags; /* 0xf0 */
/* Spatial Multiplexing Power Save (SMPS) options */
u32 hi_smps_options; /* 0xf4 */
/* Interconnect-specific state */
u32 hi_interconnect_state; /* 0xf8 */
/* Coex configuration flags */
u32 hi_coex_config; /* 0xfc */
/* Early allocation support */
u32 hi_early_alloc; /* 0x100 */
/* FW swap field */
/*
* Bits of this 32bit word will be used to pass specific swap
* instruction to FW
*/
/*
* Bit 0 -- AP Nart descriptor no swap. When this bit is set
* FW will not swap TX descriptor. Meaning packets are formed
* on the target processor.
*/
/* Bit 1 - unused */
u32 hi_fw_swap; /* 0x104 */
} __packed;
#define HI_ITEM(item) offsetof(struct host_interest, item)
/* Bits defined in hi_option_flag */
/* Enable timer workaround */
#define HI_OPTION_TIMER_WAR 0x01
/* Limit BMI command credits */
#define HI_OPTION_BMI_CRED_LIMIT 0x02
/* Relay Dot11 hdr to/from host */
#define HI_OPTION_RELAY_DOT11_HDR 0x04
/* MAC addr method 0-locally administred 1-globally unique addrs */
#define HI_OPTION_MAC_ADDR_METHOD 0x08
/* Firmware Bridging */
#define HI_OPTION_FW_BRIDGE 0x10
/* Enable CPU profiling */
#define HI_OPTION_ENABLE_PROFILE 0x20
/* Disable debug logging */
#define HI_OPTION_DISABLE_DBGLOG 0x40
/* Skip Era Tracking */
#define HI_OPTION_SKIP_ERA_TRACKING 0x80
/* Disable PAPRD (debug) */
#define HI_OPTION_PAPRD_DISABLE 0x100
#define HI_OPTION_NUM_DEV_LSB 0x200
#define HI_OPTION_NUM_DEV_MSB 0x800
#define HI_OPTION_DEV_MODE_LSB 0x1000
#define HI_OPTION_DEV_MODE_MSB 0x8000000
/* Disable LowFreq Timer Stabilization */
#define HI_OPTION_NO_LFT_STBL 0x10000000
/* Skip regulatory scan */
#define HI_OPTION_SKIP_REG_SCAN 0x20000000
/*
* Do regulatory scan during init before
* sending WMI ready event to host
*/
#define HI_OPTION_INIT_REG_SCAN 0x40000000
/* REV6: Do not adjust memory map */
#define HI_OPTION_SKIP_MEMMAP 0x80000000
#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
/* 2 bits of hi_option_flag are used to represent 3 modes */
#define HI_OPTION_FW_MODE_IBSS 0x0 /* IBSS Mode */
#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
#define HI_OPTION_FW_MODE_AP 0x2 /* AP Mode */
#define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */
/* 2 bits of hi_option flag are usedto represent 4 submodes */
#define HI_OPTION_FW_SUBMODE_NONE 0x0 /* Normal mode */
#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1 /* p2p device mode */
#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */
#define HI_OPTION_FW_SUBMODE_P2PGO 0x3 /* p2p go mode */
/* Num dev Mask */
#define HI_OPTION_NUM_DEV_MASK 0x7
#define HI_OPTION_NUM_DEV_SHIFT 0x9
/* firmware bridging */
#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
/*
Fw Mode/SubMode Mask
|-----------------------------------------------------------------------------|
| SUB | SUB | SUB | SUB | | | | |
|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]|
| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) |
|-----------------------------------------------------------------------------|
*/
#define HI_OPTION_FW_MODE_BITS 0x2
#define HI_OPTION_FW_MODE_MASK 0x3
#define HI_OPTION_FW_MODE_SHIFT 0xC
#define HI_OPTION_ALL_FW_MODE_MASK 0xFF
#define HI_OPTION_FW_SUBMODE_BITS 0x2
#define HI_OPTION_FW_SUBMODE_MASK 0x3
#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
#define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00
#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
/* hi_option_flag2 options */
#define HI_OPTION_OFFLOAD_AMSDU 0x01
#define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */
#define HI_OPTION_ENABLE_RFKILL 0x04 /* RFKill Enable Feature*/
#define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */
#define HI_OPTION_EARLY_CFG_DONE 0x10 /* Early configuration is complete */
#define HI_OPTION_RF_KILL_SHIFT 0x2
#define HI_OPTION_RF_KILL_MASK 0x1
/* hi_reset_flag */
/* preserve App Start address */
#define HI_RESET_FLAG_PRESERVE_APP_START 0x01
/* preserve host interest */
#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST 0x02
/* preserve ROM data */
#define HI_RESET_FLAG_PRESERVE_ROMDATA 0x04
#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE 0x08
#define HI_RESET_FLAG_PRESERVE_BOOT_INFO 0x10
#define HI_RESET_FLAG_WARM_RESET 0x20
/* define hi_fw_swap bits */
#define HI_DESC_IN_FW_BIT 0x01
/* indicate the reset flag is valid */
#define HI_RESET_FLAG_IS_VALID 0x12345678
/* ACS is enabled */
#define HI_ACS_FLAGS_ENABLED (1 << 0)
/* Use physical WWAN device */
#define HI_ACS_FLAGS_USE_WWAN (1 << 1)
/* Use test VAP */
#define HI_ACS_FLAGS_TEST_VAP (1 << 2)
/*
* CONSOLE FLAGS
*
* Bit Range Meaning
* --------- --------------------------------
* 2..0 UART ID (0 = Default)
* 3 Baud Select (0 = 9600, 1 = 115200)
* 30..4 Reserved
* 31 Enable Console
*
*/
#define HI_CONSOLE_FLAGS_ENABLE (1 << 31)
#define HI_CONSOLE_FLAGS_UART_MASK (0x7)
#define HI_CONSOLE_FLAGS_UART_SHIFT 0
#define HI_CONSOLE_FLAGS_BAUD_SELECT (1 << 3)
/* SM power save options */
#define HI_SMPS_ALLOW_MASK (0x00000001)
#define HI_SMPS_MODE_MASK (0x00000002)
#define HI_SMPS_MODE_STATIC (0x00000000)
#define HI_SMPS_MODE_DYNAMIC (0x00000002)
#define HI_SMPS_DISABLE_AUTO_MODE (0x00000004)
#define HI_SMPS_DATA_THRESH_MASK (0x000007f8)
#define HI_SMPS_DATA_THRESH_SHIFT (3)
#define HI_SMPS_RSSI_THRESH_MASK (0x0007f800)
#define HI_SMPS_RSSI_THRESH_SHIFT (11)
#define HI_SMPS_LOWPWR_CM_MASK (0x00380000)
#define HI_SMPS_LOWPWR_CM_SHIFT (15)
#define HI_SMPS_HIPWR_CM_MASK (0x03c00000)
#define HI_SMPS_HIPWR_CM_SHIFT (19)
/*
* WOW Extension configuration
*
* Bit Range Meaning
* --------- --------------------------------
* 8..0 Size of each WOW pattern (max 511)
* 15..9 Number of patterns per list (max 127)
* 17..16 Number of lists (max 4)
* 30..18 Reserved
* 31 Enabled
*
* set values (except enable) to zeros for default settings
*/
#define HI_WOW_EXT_ENABLED_MASK (1 << 31)
#define HI_WOW_EXT_NUM_LIST_SHIFT 16
#define HI_WOW_EXT_NUM_LIST_MASK (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT)
#define HI_WOW_EXT_NUM_PATTERNS_SHIFT 9
#define HI_WOW_EXT_NUM_PATTERNS_MASK (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT)
#define HI_WOW_EXT_PATTERN_SIZE_SHIFT 0
#define HI_WOW_EXT_PATTERN_SIZE_MASK (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT)
#define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \
((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \
HI_WOW_EXT_NUM_LIST_MASK) | \
(((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \
HI_WOW_EXT_NUM_PATTERNS_MASK) | \
(((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \
HI_WOW_EXT_PATTERN_SIZE_MASK))
#define HI_WOW_EXT_GET_NUM_LISTS(config) \
(((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT)
#define HI_WOW_EXT_GET_NUM_PATTERNS(config) \
(((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \
HI_WOW_EXT_NUM_PATTERNS_SHIFT)
#define HI_WOW_EXT_GET_PATTERN_SIZE(config) \
(((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \
HI_WOW_EXT_PATTERN_SIZE_SHIFT)
/*
* Early allocation configuration
* Support RAM bank configuration before BMI done and this eases the memory
* allocation at very early stage
* Bit Range Meaning
* --------- ----------------------------------
* [0:3] number of bank assigned to be IRAM
* [4:15] reserved
* [16:31] magic number
*
* Note:
* 1. target firmware would check magic number and if it's a match, firmware
* would consider the bits[0:15] are valid and base on that to calculate
* the end of DRAM. Early allocation would be located at that area and
* may be reclaimed when necesary
* 2. if no magic number is found, early allocation would happen at "_end"
* symbol of ROM which is located before the app-data and might NOT be
* re-claimable. If this is adopted, link script should keep this in
* mind to avoid data corruption.
*/
#define HI_EARLY_ALLOC_MAGIC 0x6d8a
#define HI_EARLY_ALLOC_MAGIC_MASK 0xffff0000
#define HI_EARLY_ALLOC_MAGIC_SHIFT 16
#define HI_EARLY_ALLOC_IRAM_BANKS_MASK 0x0000000f
#define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT 0
#define HI_EARLY_ALLOC_VALID() \
((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \
HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC))
#define HI_EARLY_ALLOC_GET_IRAM_BANKS() \
(((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \
>> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
/*power save flag bit definitions*/
#define HI_PWR_SAVE_LPL_ENABLED 0x1
/*b1-b3 reserved*/
/*b4-b5 : dev0 LPL type : 0 - none
1- Reduce Pwr Search
2- Reduce Pwr Listen*/
/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
#define HI_PWR_SAVE_LPL_DEV0_LSB 4
#define HI_PWR_SAVE_LPL_DEV_MASK 0x3
/*power save related utility macros*/
#define HI_LPL_ENABLED() \
((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
#define HI_DEV_LPL_TYPE_GET(_devix) \
(HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
(HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
#define HOST_INTEREST_SMPS_IS_ALLOWED() \
((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
/* Reserve 1024 bytes for extended board data */
#define QCA988X_BOARD_DATA_SZ 7168
#define QCA988X_BOARD_EXT_DATA_SZ 0
#endif /* __TARGADDRS_H__ */

View file

@ -0,0 +1,382 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "testmode.h"
#include <net/netlink.h>
#include <linux/firmware.h>
#include "debug.h"
#include "wmi.h"
#include "hif.h"
#include "hw.h"
#include "testmode_i.h"
static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = {
[ATH10K_TM_ATTR_CMD] = { .type = NLA_U32 },
[ATH10K_TM_ATTR_DATA] = { .type = NLA_BINARY,
.len = ATH10K_TM_DATA_MAX_LEN },
[ATH10K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
[ATH10K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
[ATH10K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
};
/* Returns true if callee consumes the skb and the skb should be discarded.
* Returns false if skb is not used. Does not sleep.
*/
bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
{
struct sk_buff *nl_skb;
bool consumed;
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
"testmode event wmi cmd_id %d skb %p skb->len %d\n",
cmd_id, skb, skb->len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
spin_lock_bh(&ar->data_lock);
if (!ar->testmode.utf_monitor) {
consumed = false;
goto out;
}
/* Only testmode.c should be handling events from utf firmware,
* otherwise all sort of problems will arise as mac80211 operations
* are not initialised.
*/
consumed = true;
nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
2 * sizeof(u32) + skb->len,
GFP_ATOMIC);
if (!nl_skb) {
ath10k_warn(ar,
"failed to allocate skb for testmode wmi event\n");
goto out;
}
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
if (ret) {
ath10k_warn(ar,
"failed to to put testmode wmi event cmd attribute: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
if (ret) {
ath10k_warn(ar,
"failed to to put testmode wmi even cmd_id: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data);
if (ret) {
ath10k_warn(ar,
"failed to copy skb to testmode wmi event: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
out:
spin_unlock_bh(&ar->data_lock);
return consumed;
}
static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
{
struct sk_buff *skb;
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
"testmode cmd get version_major %d version_minor %d\n",
ATH10K_TESTMODE_VERSION_MAJOR,
ATH10K_TESTMODE_VERSION_MINOR);
skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
nla_total_size(sizeof(u32)));
if (!skb)
return -ENOMEM;
ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MAJOR,
ATH10K_TESTMODE_VERSION_MAJOR);
if (ret) {
kfree_skb(skb);
return ret;
}
ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MINOR,
ATH10K_TESTMODE_VERSION_MINOR);
if (ret) {
kfree_skb(skb);
return ret;
}
return cfg80211_testmode_reply(skb);
}
static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
{
char filename[100];
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n");
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH10K_STATE_UTF) {
ret = -EALREADY;
goto err;
}
/* start utf only when the driver is not in use */
if (ar->state != ATH10K_STATE_OFF) {
ret = -EBUSY;
goto err;
}
if (WARN_ON(ar->testmode.utf != NULL)) {
/* utf image is already downloaded, it shouldn't be */
ret = -EEXIST;
goto err;
}
snprintf(filename, sizeof(filename), "%s/%s",
ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
/* load utf firmware image */
ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
if (ret) {
ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
filename, ret);
goto err;
}
spin_lock_bh(&ar->data_lock);
ar->testmode.utf_monitor = true;
spin_unlock_bh(&ar->data_lock);
BUILD_BUG_ON(sizeof(ar->fw_features) !=
sizeof(ar->testmode.orig_fw_features));
memcpy(ar->testmode.orig_fw_features, ar->fw_features,
sizeof(ar->fw_features));
/* utf.bin firmware image does not advertise firmware features. Do
* an ugly hack where we force the firmware features so that wmi.c
* will use the correct WMI interface.
*/
memset(ar->fw_features, 0, sizeof(ar->fw_features));
__set_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features);
ret = ath10k_hif_power_up(ar);
if (ret) {
ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
goto err_fw_features;
}
ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF);
if (ret) {
ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
goto err_power_down;
}
ar->state = ATH10K_STATE_UTF;
ath10k_info(ar, "UTF firmware started\n");
mutex_unlock(&ar->conf_mutex);
return 0;
err_power_down:
ath10k_hif_power_down(ar);
err_fw_features:
/* return the original firmware features */
memcpy(ar->fw_features, ar->testmode.orig_fw_features,
sizeof(ar->fw_features));
release_firmware(ar->testmode.utf);
ar->testmode.utf = NULL;
err:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
ath10k_core_stop(ar);
ath10k_hif_power_down(ar);
spin_lock_bh(&ar->data_lock);
ar->testmode.utf_monitor = false;
spin_unlock_bh(&ar->data_lock);
/* return the original firmware features */
memcpy(ar->fw_features, ar->testmode.orig_fw_features,
sizeof(ar->fw_features));
release_firmware(ar->testmode.utf);
ar->testmode.utf = NULL;
ar->state = ATH10K_STATE_OFF;
}
static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[])
{
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf stop\n");
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_UTF) {
ret = -ENETDOWN;
goto out;
}
__ath10k_tm_cmd_utf_stop(ar);
ret = 0;
ath10k_info(ar, "UTF firmware stopped\n");
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
{
struct sk_buff *skb;
int ret, buf_len;
u32 cmd_id;
void *buf;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_UTF) {
ret = -ENETDOWN;
goto out;
}
if (!tb[ATH10K_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) {
ret = -EINVAL;
goto out;
}
buf = nla_data(tb[ATH10K_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]);
cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
"testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
cmd_id, buf, buf_len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
skb = ath10k_wmi_alloc_skb(ar, buf_len);
if (!skb) {
ret = -ENOMEM;
goto out;
}
memcpy(skb->data, buf, buf_len);
ret = ath10k_wmi_cmd_send(ar, skb, cmd_id);
if (ret) {
ath10k_warn(ar, "failed to transmit wmi command (testmode): %d\n",
ret);
goto out;
}
ret = 0;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct ath10k *ar = hw->priv;
struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1];
int ret;
ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len,
ath10k_tm_policy);
if (ret)
return ret;
if (!tb[ATH10K_TM_ATTR_CMD])
return -EINVAL;
switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) {
case ATH10K_TM_CMD_GET_VERSION:
return ath10k_tm_cmd_get_version(ar, tb);
case ATH10K_TM_CMD_UTF_START:
return ath10k_tm_cmd_utf_start(ar, tb);
case ATH10K_TM_CMD_UTF_STOP:
return ath10k_tm_cmd_utf_stop(ar, tb);
case ATH10K_TM_CMD_WMI:
return ath10k_tm_cmd_wmi(ar, tb);
default:
return -EOPNOTSUPP;
}
}
void ath10k_testmode_destroy(struct ath10k *ar)
{
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_UTF) {
/* utf firmware is not running, nothing to do */
goto out;
}
__ath10k_tm_cmd_utf_stop(ar);
out:
mutex_unlock(&ar->conf_mutex);
}

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#ifdef CONFIG_NL80211_TESTMODE
void ath10k_testmode_destroy(struct ath10k *ar);
bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb);
int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
#else
static inline void ath10k_testmode_destroy(struct ath10k *ar)
{
}
static inline bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id,
struct sk_buff *skb)
{
return false;
}
static inline int ath10k_tm_cmd(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
void *data, int len)
{
return 0;
}
#endif

View file

@ -0,0 +1,70 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* "API" level of the ath10k testmode interface. Bump it after every
* incompatible interface change.
*/
#define ATH10K_TESTMODE_VERSION_MAJOR 1
/* Bump this after every _compatible_ interface change, for example
* addition of a new command or an attribute.
*/
#define ATH10K_TESTMODE_VERSION_MINOR 0
#define ATH10K_TM_DATA_MAX_LEN 5000
enum ath10k_tm_attr {
__ATH10K_TM_ATTR_INVALID = 0,
ATH10K_TM_ATTR_CMD = 1,
ATH10K_TM_ATTR_DATA = 2,
ATH10K_TM_ATTR_WMI_CMDID = 3,
ATH10K_TM_ATTR_VERSION_MAJOR = 4,
ATH10K_TM_ATTR_VERSION_MINOR = 5,
/* keep last */
__ATH10K_TM_ATTR_AFTER_LAST,
ATH10K_TM_ATTR_MAX = __ATH10K_TM_ATTR_AFTER_LAST - 1,
};
/* All ath10k testmode interface commands specified in
* ATH10K_TM_ATTR_CMD
*/
enum ath10k_tm_cmd {
/* Returns the supported ath10k testmode interface version in
* ATH10K_TM_ATTR_VERSION. Always guaranteed to work. User space
* uses this to verify it's using the correct version of the
* testmode interface
*/
ATH10K_TM_CMD_GET_VERSION = 0,
/* Boots the UTF firmware, the netdev interface must be down at the
* time.
*/
ATH10K_TM_CMD_UTF_START = 1,
/* Shuts down the UTF firmware and puts the driver back into OFF
* state.
*/
ATH10K_TM_CMD_UTF_STOP = 2,
/* The command used to transmit a WMI command to the firmware and
* the event to receive WMI events from the firmware. Without
* struct wmi_cmd_hdr header, only the WMI payload. Command id is
* provided with ATH10K_TM_ATTR_WMI_CMDID and payload in
* ATH10K_TM_ATTR_DATA.
*/
ATH10K_TM_CMD_WMI = 3,
};

View file

@ -0,0 +1,20 @@
/*
* Copyright (c) 2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#define CREATE_TRACE_POINTS
#include "trace.h"

View file

@ -0,0 +1,266 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#include <linux/tracepoint.h>
#include "core.h"
#define _TRACE_H_
/* create empty functions when tracing is disabled */
#if !defined(CONFIG_ATH10K_TRACING)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(...)
#undef DEFINE_EVENT
#define DEFINE_EVENT(evt_class, name, proto, ...) \
static inline void trace_ ## name(proto) {}
#endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath10k
#define ATH10K_MSG_MAX 200
DECLARE_EVENT_CLASS(ath10k_log_event,
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
TP_ARGS(ar, vaf),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__dynamic_array(char, msg, ATH10K_MSG_MAX)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
ATH10K_MSG_MAX,
vaf->fmt,
*vaf->va) >= ATH10K_MSG_MAX);
),
TP_printk(
"%s %s %s",
__get_str(driver),
__get_str(device),
__get_str(msg)
)
);
DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
TP_ARGS(ar, vaf)
);
DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
TP_ARGS(ar, vaf)
);
DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
TP_ARGS(ar, vaf)
);
TRACE_EVENT(ath10k_log_dbg,
TP_PROTO(struct ath10k *ar, unsigned int level, struct va_format *vaf),
TP_ARGS(ar, level, vaf),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(unsigned int, level)
__dynamic_array(char, msg, ATH10K_MSG_MAX)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->level = level;
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
ATH10K_MSG_MAX,
vaf->fmt,
*vaf->va) >= ATH10K_MSG_MAX);
),
TP_printk(
"%s %s %s",
__get_str(driver),
__get_str(device),
__get_str(msg)
)
);
TRACE_EVENT(ath10k_log_dbg_dump,
TP_PROTO(struct ath10k *ar, const char *msg, const char *prefix,
const void *buf, size_t buf_len),
TP_ARGS(ar, msg, prefix, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__string(msg, msg)
__string(prefix, prefix)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__assign_str(msg, msg);
__assign_str(prefix, prefix);
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s %s/%s\n",
__get_str(driver),
__get_str(device),
__get_str(prefix),
__get_str(msg)
)
);
TRACE_EVENT(ath10k_wmi_cmd,
TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len, int ret),
TP_ARGS(ar, id, buf, buf_len, ret),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
__field(int, ret)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->id = id;
__entry->buf_len = buf_len;
__entry->ret = ret;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s id %d len %zu ret %d",
__get_str(driver),
__get_str(device),
__entry->id,
__entry->buf_len,
__entry->ret
)
);
TRACE_EVENT(ath10k_wmi_event,
TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len),
TP_ARGS(ar, id, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->id = id;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s id %d len %zu",
__get_str(driver),
__get_str(device),
__entry->id,
__entry->buf_len
)
);
TRACE_EVENT(ath10k_htt_stats,
TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len),
TP_ARGS(ar, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s len %zu",
__get_str(driver),
__get_str(device),
__entry->buf_len
)
);
TRACE_EVENT(ath10k_wmi_dbglog,
TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len),
TP_ARGS(ar, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s len %zu",
__get_str(driver),
__get_str(device),
__entry->buf_len
)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,221 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "txrx.h"
#include "htt.h"
#include "mac.h"
#include "debug.h"
static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
{
if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
return;
/* If the original wait_for_completion() timed out before
* {data,mgmt}_tx_completed() was called then we could complete
* offchan_tx_completed for a different skb. Prevent this by using
* offchan_tx_skb. */
spin_lock_bh(&ar->data_lock);
if (ar->offchan_tx_skb != skb) {
ath10k_warn(ar, "completed old offchannel frame\n");
goto out;
}
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_tx_info *info;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
lockdep_assert_held(&htt->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
return;
}
msdu = htt->pending_tx[tx_done->msdu_id];
skb_cb = ATH10K_SKB_CB(msdu);
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->htt.txbuf)
dma_pool_free(htt->tx_pool,
skb_cb->htt.txbuf,
skb_cb->htt.txbuf_paddr);
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
goto exit;
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
exit:
htt->pending_tx[tx_done->msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr)
{
struct ath10k_peer *peer;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(peer, &ar->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
if (memcmp(peer->addr, addr, ETH_ALEN))
continue;
return peer;
}
return NULL;
}
struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
{
struct ath10k_peer *peer;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(peer, &ar->peers, list)
if (test_bit(peer_id, peer->peer_ids))
return peer;
return NULL;
}
static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
const u8 *addr, bool expect_mapped)
{
int ret;
ret = wait_event_timeout(ar->peer_mapping_wq, ({
bool mapped;
spin_lock_bh(&ar->data_lock);
mapped = !!ath10k_peer_find(ar, vdev_id, addr);
spin_unlock_bh(&ar->data_lock);
mapped == expect_mapped;
}), 3*HZ);
if (ret <= 0)
return -ETIMEDOUT;
return 0;
}
int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
{
return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
}
int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
{
return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
}
void ath10k_peer_map_event(struct ath10k_htt *htt,
struct htt_peer_map_event *ev)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
if (!peer) {
peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
if (!peer)
goto exit;
peer->vdev_id = ev->vdev_id;
ether_addr_copy(peer->addr, ev->addr);
list_add(&peer->list, &ar->peers);
wake_up(&ar->peer_mapping_wq);
}
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
ev->vdev_id, ev->addr, ev->peer_id);
set_bit(ev->peer_id, peer->peer_ids);
exit:
spin_unlock_bh(&ar->data_lock);
}
void ath10k_peer_unmap_event(struct ath10k_htt *htt,
struct htt_peer_unmap_event *ev)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
ev->peer_id);
goto exit;
}
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, ev->peer_id);
clear_bit(ev->peer_id, peer->peer_ids);
if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
list_del(&peer->list);
kfree(peer);
wake_up(&ar->peer_mapping_wq);
}
exit:
spin_unlock_bh(&ar->data_lock);
}

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _TXRX_H_
#define _TXRX_H_
#include "htt.h"
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr);
struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id);
int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
const u8 *addr);
int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
const u8 *addr);
void ath10k_peer_map_event(struct ath10k_htt *htt,
struct htt_peer_map_event *ev);
void ath10k_peer_unmap_event(struct ath10k_htt *htt,
struct htt_peer_unmap_event *ev);
#endif

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff