Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,288 @@
#
# Wireless LAN device configuration
#
menuconfig WLAN
bool "Wireless LAN"
depends on !S390
depends on NET
select WIRELESS
default y
---help---
This section contains all the pre 802.11 and 802.11 wireless
device drivers. For a complete list of drivers and documentation
on them refer to the wireless wiki:
http://wireless.kernel.org/en/users/Drivers
if WLAN
config PCMCIA_RAYCS
tristate "Aviator/Raytheon 2.4GHz wireless support"
depends on PCMCIA
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
---help---
Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
(PC-card) wireless Ethernet networking card to your computer.
Please read the file <file:Documentation/networking/ray_cs.txt> for
details.
To compile this driver as a module, choose M here: the module will be
called ray_cs. If unsure, say N.
config LIBERTAS_THINFIRM
tristate "Marvell 8xxx Libertas WLAN driver support with thin firmware"
depends on MAC80211
select FW_LOADER
---help---
A library for Marvell Libertas 8xxx devices using thinfirm.
config LIBERTAS_THINFIRM_DEBUG
bool "Enable full debugging output in the Libertas thin firmware module."
depends on LIBERTAS_THINFIRM
---help---
Debugging support.
config LIBERTAS_THINFIRM_USB
tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware"
depends on LIBERTAS_THINFIRM && USB
---help---
A driver for Marvell Libertas 8388 USB devices using thinfirm.
config AIRO
tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
depends on CFG80211 && ISA_DMA_API && (PCI || BROKEN)
select WIRELESS_EXT
select CRYPTO
select WEXT_SPY
select WEXT_PRIV
---help---
This is the standard Linux driver to support Cisco/Aironet ISA and
PCI 802.11 wireless cards.
It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X
- with or without encryption) as well as card before the Cisco
acquisition (Aironet 4500, Aironet 4800, Aironet 4800B).
This driver support both the standard Linux Wireless Extensions
and Cisco proprietary API, so both the Linux Wireless Tools and the
Cisco Linux utilities can be used to configure the card.
The driver can be compiled as a module and will be named "airo".
config ATMEL
tristate "Atmel at76c50x chipset 802.11b support"
depends on CFG80211 && (PCI || PCMCIA)
select WIRELESS_EXT
select WEXT_PRIV
select FW_LOADER
select CRC32
---help---
A driver 802.11b wireless cards based on the Atmel fast-vnet
chips. This driver supports standard Linux wireless extensions.
Many cards based on this chipset do not have flash memory
and need their firmware loaded at start-up. If yours is
one of these, you will need to provide a firmware image
to be loaded into the card by the driver. The Atmel
firmware package can be downloaded from
<http://www.thekelleys.org.uk/atmel>
config PCI_ATMEL
tristate "Atmel at76c506 PCI cards"
depends on ATMEL && PCI
---help---
Enable support for PCI and mini-PCI cards containing the
Atmel at76c506 chip.
config PCMCIA_ATMEL
tristate "Atmel at76c502/at76c504 PCMCIA cards"
depends on ATMEL && PCMCIA
select WIRELESS_EXT
select FW_LOADER
select CRC32
---help---
Enable support for PCMCIA cards containing the
Atmel at76c502 and at76c504 chips.
config AT76C50X_USB
tristate "Atmel at76c503/at76c505/at76c505a USB cards"
depends on MAC80211 && USB
select FW_LOADER
---help---
Enable support for USB Wireless devices using Atmel at76c503,
at76c505 or at76c505a chips.
config AIRO_CS
tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
depends on CFG80211 && PCMCIA && (BROKEN || !M32R)
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
select CRYPTO
select CRYPTO_AES
---help---
This is the standard Linux driver to support Cisco/Aironet PCMCIA
802.11 wireless cards. This driver is the same as the Aironet
driver part of the Linux Pcmcia package.
It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X
- with or without encryption) as well as card before the Cisco
acquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also
supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom
802.11b cards.
This driver support both the standard Linux Wireless Extensions
and Cisco proprietary API, so both the Linux Wireless Tools and the
Cisco Linux utilities can be used to configure the card.
config PCMCIA_WL3501
tristate "Planet WL3501 PCMCIA cards"
depends on CFG80211 && PCMCIA
select WIRELESS_EXT
select WEXT_SPY
help
A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet.
It has basic support for Linux wireless extensions and initial
micro support for ethtool.
config PRISM54
tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus (DEPRECATED)'
depends on PCI
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
select FW_LOADER
---help---
This enables support for FullMAC PCI/Cardbus prism54 devices. This
driver is now deprecated in favor for the SoftMAC driver, p54pci.
p54pci supports FullMAC PCI/Cardbus devices as well.
For more information refer to the p54 wiki:
http://wireless.kernel.org/en/users/Drivers/p54
Note: You need a motherboard with DMA support to use any of these cards
When built as module you get the module prism54
config USB_ZD1201
tristate "USB ZD1201 based Wireless device support"
depends on CFG80211 && USB
select WIRELESS_EXT
select WEXT_PRIV
select FW_LOADER
---help---
Say Y if you want to use wireless LAN adapters based on the ZyDAS
ZD1201 chip.
This driver makes the adapter appear as a normal Ethernet interface,
typically on wlan0.
The zd1201 device requires external firmware to be loaded.
This can be found at http://linux-lc100020.sourceforge.net/
To compile this driver as a module, choose M here: the
module will be called zd1201.
config USB_NET_RNDIS_WLAN
tristate "Wireless RNDIS USB support"
depends on USB
depends on CFG80211
select USB_NET_DRIVERS
select USB_USBNET
select USB_NET_CDCETHER
select USB_NET_RNDIS_HOST
---help---
This is a driver for wireless RNDIS devices.
These are USB based adapters found in devices such as:
Buffalo WLI-U2-KG125S
U.S. Robotics USR5421
Belkin F5D7051
Linksys WUSB54GSv2
Linksys WUSB54GSC
Asus WL169gE
Eminent EM4045
BT Voyager 1055
Linksys WUSB54GSv1
U.S. Robotics USR5420
BUFFALO WLI-USB-G54
All of these devices are based on Broadcom 4320 chip which is the
only wireless RNDIS chip known to date.
If you choose to build a module, it'll be called rndis_wlan.
source "drivers/net/wireless/rtl818x/Kconfig"
config ADM8211
tristate "ADMtek ADM8211 support"
depends on MAC80211 && PCI
select CRC32
select EEPROM_93CX6
---help---
This driver is for ADM8211A, ADM8211B, and ADM8211C based cards.
These are PCI/mini-PCI/Cardbus 802.11b chips found in cards such as:
Xterasys Cardbus XN-2411b
Blitz NetWave Point PC
TrendNet 221pc
Belkin F5D6001
SMC 2635W
Linksys WPC11 v1
Fiberline FL-WL-200X
3com Office Connect (3CRSHPW796)
Corega WLPCIB-11
SMC 2602W V2 EU
D-Link DWL-520 Revision C
However, some of these cards have been replaced with other chips
like the RTL8180L (Xterasys Cardbus XN-2411b, Belkin F5D6001) or
the Ralink RT2400 (SMC2635W) without a model number change.
Thanks to Infineon-ADMtek for their support of this driver.
config MAC80211_HWSIM
tristate "Simulated radio testing tool for mac80211"
depends on MAC80211
---help---
This driver is a developer testing tool that can be used to test
IEEE 802.11 networking stack (mac80211) functionality. This is not
needed for normal wireless LAN usage and is only for testing. See
Documentation/networking/mac80211_hwsim for more information on how
to use this tool.
To compile this driver as a module, choose M here: the module will be
called mac80211_hwsim. If unsure, say N.
config MWL8K
tristate "Marvell 88W8xxx PCI/PCIe Wireless support"
depends on MAC80211 && PCI
---help---
This driver supports Marvell TOPDOG 802.11 wireless cards.
To compile this driver as a module, choose M here: the module
will be called mwl8k. If unsure, say N.
source "drivers/net/wireless/ath/Kconfig"
source "drivers/net/wireless/b43/Kconfig"
source "drivers/net/wireless/b43legacy/Kconfig"
source "drivers/net/wireless/brcm80211/Kconfig"
source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/ipw2x00/Kconfig"
source "drivers/net/wireless/iwlwifi/Kconfig"
source "drivers/net/wireless/iwlegacy/Kconfig"
source "drivers/net/wireless/libertas/Kconfig"
source "drivers/net/wireless/orinoco/Kconfig"
source "drivers/net/wireless/p54/Kconfig"
source "drivers/net/wireless/rt2x00/Kconfig"
source "drivers/net/wireless/rtlwifi/Kconfig"
source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zd1211rw/Kconfig"
source "drivers/net/wireless/mwifiex/Kconfig"
source "drivers/net/wireless/cw1200/Kconfig"
source "drivers/net/wireless/rsi/Kconfig"
source "drivers/net/wireless/scsc/Kconfig"
endif # WLAN

View file

@ -0,0 +1,64 @@
#
# Makefile for the Linux Wireless network device drivers.
#
obj-$(CONFIG_IPW2100) += ipw2x00/
obj-$(CONFIG_IPW2200) += ipw2x00/
obj-$(CONFIG_HERMES) += orinoco/
obj-$(CONFIG_AIRO) += airo.o
obj-$(CONFIG_AIRO_CS) += airo_cs.o airo.o
obj-$(CONFIG_ATMEL) += atmel.o
obj-$(CONFIG_PCI_ATMEL) += atmel_pci.o
obj-$(CONFIG_PCMCIA_ATMEL) += atmel_cs.o
obj-$(CONFIG_AT76C50X_USB) += at76c50x-usb.o
obj-$(CONFIG_PRISM54) += prism54/
obj-$(CONFIG_HOSTAP) += hostap/
obj-$(CONFIG_B43) += b43/
obj-$(CONFIG_B43LEGACY) += b43legacy/
obj-$(CONFIG_ZD1211RW) += zd1211rw/
obj-$(CONFIG_RTL8180) += rtl818x/
obj-$(CONFIG_RTL8187) += rtl818x/
obj-$(CONFIG_RTLWIFI) += rtlwifi/
# 16-bit wireless PCMCIA client drivers
obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o
obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o
obj-$(CONFIG_USB_ZD1201) += zd1201.o
obj-$(CONFIG_LIBERTAS) += libertas/
obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf/
obj-$(CONFIG_ADM8211) += adm8211.o
obj-$(CONFIG_MWL8K) += mwl8k.o
obj-$(CONFIG_IWLWIFI) += iwlwifi/
obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
obj-$(CONFIG_ATH_CARDS) += ath/
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
obj-$(CONFIG_WL_TI) += ti/
obj-$(CONFIG_MWIFIEX) += mwifiex/
obj-$(CONFIG_BRCMFMAC) += brcm80211/
obj-$(CONFIG_BRCMSMAC) += brcm80211/
obj-$(CONFIG_CW1200) += cw1200/
obj-$(CONFIG_RSI_91X) += rsi/
obj-$(CONFIG_SCSC_WLAN) += scsc/

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,602 @@
#ifndef ADM8211_H
#define ADM8211_H
/* ADM8211 Registers */
/* CR32 (SIG) signature */
#define ADM8211_SIG1 0x82011317 /* ADM8211A */
#define ADM8211_SIG2 0x82111317 /* ADM8211B/ADM8211C */
#define ADM8211_CSR_READ(r) ioread32(&priv->map->r)
#define ADM8211_CSR_WRITE(r, val) iowrite32((val), &priv->map->r)
/* CSR (Host Control and Status Registers) */
struct adm8211_csr {
__le32 PAR; /* 0x00 CSR0 */
__le32 FRCTL; /* 0x04 CSR0A */
__le32 TDR; /* 0x08 CSR1 */
__le32 WTDP; /* 0x0C CSR1A */
__le32 RDR; /* 0x10 CSR2 */
__le32 WRDP; /* 0x14 CSR2A */
__le32 RDB; /* 0x18 CSR3 */
__le32 TDBH; /* 0x1C CSR3A */
__le32 TDBD; /* 0x20 CSR4 */
__le32 TDBP; /* 0x24 CSR4A */
__le32 STSR; /* 0x28 CSR5 */
__le32 TDBB; /* 0x2C CSR5A */
__le32 NAR; /* 0x30 CSR6 */
__le32 CSR6A; /* reserved */
__le32 IER; /* 0x38 CSR7 */
__le32 TKIPSCEP; /* 0x3C CSR7A */
__le32 LPC; /* 0x40 CSR8 */
__le32 CSR_TEST1; /* 0x44 CSR8A */
__le32 SPR; /* 0x48 CSR9 */
__le32 CSR_TEST0; /* 0x4C CSR9A */
__le32 WCSR; /* 0x50 CSR10 */
__le32 WPDR; /* 0x54 CSR10A */
__le32 GPTMR; /* 0x58 CSR11 */
__le32 GPIO; /* 0x5C CSR11A */
__le32 BBPCTL; /* 0x60 CSR12 */
__le32 SYNCTL; /* 0x64 CSR12A */
__le32 PLCPHD; /* 0x68 CSR13 */
__le32 MMIWA; /* 0x6C CSR13A */
__le32 MMIRD0; /* 0x70 CSR14 */
__le32 MMIRD1; /* 0x74 CSR14A */
__le32 TXBR; /* 0x78 CSR15 */
__le32 SYNDATA; /* 0x7C CSR15A */
__le32 ALCS; /* 0x80 CSR16 */
__le32 TOFS2; /* 0x84 CSR17 */
__le32 CMDR; /* 0x88 CSR18 */
__le32 PCIC; /* 0x8C CSR19 */
__le32 PMCSR; /* 0x90 CSR20 */
__le32 PAR0; /* 0x94 CSR21 */
__le32 PAR1; /* 0x98 CSR22 */
__le32 MAR0; /* 0x9C CSR23 */
__le32 MAR1; /* 0xA0 CSR24 */
__le32 ATIMDA0; /* 0xA4 CSR25 */
__le32 ABDA1; /* 0xA8 CSR26 */
__le32 BSSID0; /* 0xAC CSR27 */
__le32 TXLMT; /* 0xB0 CSR28 */
__le32 MIBCNT; /* 0xB4 CSR29 */
__le32 BCNT; /* 0xB8 CSR30 */
__le32 TSFTH; /* 0xBC CSR31 */
__le32 TSC; /* 0xC0 CSR32 */
__le32 SYNRF; /* 0xC4 CSR33 */
__le32 BPLI; /* 0xC8 CSR34 */
__le32 CAP0; /* 0xCC CSR35 */
__le32 CAP1; /* 0xD0 CSR36 */
__le32 RMD; /* 0xD4 CSR37 */
__le32 CFPP; /* 0xD8 CSR38 */
__le32 TOFS0; /* 0xDC CSR39 */
__le32 TOFS1; /* 0xE0 CSR40 */
__le32 IFST; /* 0xE4 CSR41 */
__le32 RSPT; /* 0xE8 CSR42 */
__le32 TSFTL; /* 0xEC CSR43 */
__le32 WEPCTL; /* 0xF0 CSR44 */
__le32 WESK; /* 0xF4 CSR45 */
__le32 WEPCNT; /* 0xF8 CSR46 */
__le32 MACTEST; /* 0xFC CSR47 */
__le32 FER; /* 0x100 */
__le32 FEMR; /* 0x104 */
__le32 FPSR; /* 0x108 */
__le32 FFER; /* 0x10C */
} __packed;
/* CSR0 - PAR (PCI Address Register) */
#define ADM8211_PAR_MWIE (1 << 24)
#define ADM8211_PAR_MRLE (1 << 23)
#define ADM8211_PAR_MRME (1 << 21)
#define ADM8211_PAR_RAP ((1 << 18) | (1 << 17))
#define ADM8211_PAR_CAL ((1 << 15) | (1 << 14))
#define ADM8211_PAR_PBL 0x00003f00
#define ADM8211_PAR_BLE (1 << 7)
#define ADM8211_PAR_DSL 0x0000007c
#define ADM8211_PAR_BAR (1 << 1)
#define ADM8211_PAR_SWR (1 << 0)
/* CSR1 - FRCTL (Frame Control Register) */
#define ADM8211_FRCTL_PWRMGT (1 << 31)
#define ADM8211_FRCTL_MAXPSP (1 << 27)
#define ADM8211_FRCTL_DRVPRSP (1 << 26)
#define ADM8211_FRCTL_DRVBCON (1 << 25)
#define ADM8211_FRCTL_AID 0x0000ffff
#define ADM8211_FRCTL_AID_ON 0x0000c000
/* CSR5 - STSR (Status Register) */
#define ADM8211_STSR_PCF (1 << 31)
#define ADM8211_STSR_BCNTC (1 << 30)
#define ADM8211_STSR_GPINT (1 << 29)
#define ADM8211_STSR_LinkOff (1 << 28)
#define ADM8211_STSR_ATIMTC (1 << 27)
#define ADM8211_STSR_TSFTF (1 << 26)
#define ADM8211_STSR_TSCZ (1 << 25)
#define ADM8211_STSR_LinkOn (1 << 24)
#define ADM8211_STSR_SQL (1 << 23)
#define ADM8211_STSR_WEPTD (1 << 22)
#define ADM8211_STSR_ATIME (1 << 21)
#define ADM8211_STSR_TBTT (1 << 20)
#define ADM8211_STSR_NISS (1 << 16)
#define ADM8211_STSR_AISS (1 << 15)
#define ADM8211_STSR_TEIS (1 << 14)
#define ADM8211_STSR_FBE (1 << 13)
#define ADM8211_STSR_REIS (1 << 12)
#define ADM8211_STSR_GPTT (1 << 11)
#define ADM8211_STSR_RPS (1 << 8)
#define ADM8211_STSR_RDU (1 << 7)
#define ADM8211_STSR_RCI (1 << 6)
#define ADM8211_STSR_TUF (1 << 5)
#define ADM8211_STSR_TRT (1 << 4)
#define ADM8211_STSR_TLT (1 << 3)
#define ADM8211_STSR_TDU (1 << 2)
#define ADM8211_STSR_TPS (1 << 1)
#define ADM8211_STSR_TCI (1 << 0)
/* CSR6 - NAR (Network Access Register) */
#define ADM8211_NAR_TXCF (1 << 31)
#define ADM8211_NAR_HF (1 << 30)
#define ADM8211_NAR_UTR (1 << 29)
#define ADM8211_NAR_SQ (1 << 28)
#define ADM8211_NAR_CFP (1 << 27)
#define ADM8211_NAR_SF (1 << 21)
#define ADM8211_NAR_TR ((1 << 15) | (1 << 14))
#define ADM8211_NAR_ST (1 << 13)
#define ADM8211_NAR_OM ((1 << 11) | (1 << 10))
#define ADM8211_NAR_MM (1 << 7)
#define ADM8211_NAR_PR (1 << 6)
#define ADM8211_NAR_EA (1 << 5)
#define ADM8211_NAR_PB (1 << 3)
#define ADM8211_NAR_STPDMA (1 << 2)
#define ADM8211_NAR_SR (1 << 1)
#define ADM8211_NAR_CTX (1 << 0)
#define ADM8211_IDLE() \
do { \
if (priv->nar & (ADM8211_NAR_SR | ADM8211_NAR_ST)) { \
ADM8211_CSR_WRITE(NAR, priv->nar & \
~(ADM8211_NAR_SR | ADM8211_NAR_ST));\
ADM8211_CSR_READ(NAR); \
msleep(20); \
} \
} while (0)
#define ADM8211_IDLE_RX() \
do { \
if (priv->nar & ADM8211_NAR_SR) { \
ADM8211_CSR_WRITE(NAR, priv->nar & ~ADM8211_NAR_SR); \
ADM8211_CSR_READ(NAR); \
mdelay(20); \
} \
} while (0)
#define ADM8211_RESTORE() \
do { \
if (priv->nar & (ADM8211_NAR_SR | ADM8211_NAR_ST)) \
ADM8211_CSR_WRITE(NAR, priv->nar); \
} while (0)
/* CSR7 - IER (Interrupt Enable Register) */
#define ADM8211_IER_PCFIE (1 << 31)
#define ADM8211_IER_BCNTCIE (1 << 30)
#define ADM8211_IER_GPIE (1 << 29)
#define ADM8211_IER_LinkOffIE (1 << 28)
#define ADM8211_IER_ATIMTCIE (1 << 27)
#define ADM8211_IER_TSFTFIE (1 << 26)
#define ADM8211_IER_TSCZE (1 << 25)
#define ADM8211_IER_LinkOnIE (1 << 24)
#define ADM8211_IER_SQLIE (1 << 23)
#define ADM8211_IER_WEPIE (1 << 22)
#define ADM8211_IER_ATIMEIE (1 << 21)
#define ADM8211_IER_TBTTIE (1 << 20)
#define ADM8211_IER_NIE (1 << 16)
#define ADM8211_IER_AIE (1 << 15)
#define ADM8211_IER_TEIE (1 << 14)
#define ADM8211_IER_FBEIE (1 << 13)
#define ADM8211_IER_REIE (1 << 12)
#define ADM8211_IER_GPTIE (1 << 11)
#define ADM8211_IER_RSIE (1 << 8)
#define ADM8211_IER_RUIE (1 << 7)
#define ADM8211_IER_RCIE (1 << 6)
#define ADM8211_IER_TUIE (1 << 5)
#define ADM8211_IER_TRTIE (1 << 4)
#define ADM8211_IER_TLTTIE (1 << 3)
#define ADM8211_IER_TDUIE (1 << 2)
#define ADM8211_IER_TPSIE (1 << 1)
#define ADM8211_IER_TCIE (1 << 0)
/* CSR9 - SPR (Serial Port Register) */
#define ADM8211_SPR_SRS (1 << 11)
#define ADM8211_SPR_SDO (1 << 3)
#define ADM8211_SPR_SDI (1 << 2)
#define ADM8211_SPR_SCLK (1 << 1)
#define ADM8211_SPR_SCS (1 << 0)
/* CSR9A - CSR_TEST0 */
#define ADM8211_CSR_TEST0_EPNE (1 << 18)
#define ADM8211_CSR_TEST0_EPSNM (1 << 17)
#define ADM8211_CSR_TEST0_EPTYP (1 << 16)
#define ADM8211_CSR_TEST0_EPRLD (1 << 15)
/* CSR10 - WCSR (Wake-up Control/Status Register) */
#define ADM8211_WCSR_CRCT (1 << 30)
#define ADM8211_WCSR_TSFTWE (1 << 20)
#define ADM8211_WCSR_TIMWE (1 << 19)
#define ADM8211_WCSR_ATIMWE (1 << 18)
#define ADM8211_WCSR_KEYWE (1 << 17)
#define ADM8211_WCSR_MPRE (1 << 9)
#define ADM8211_WCSR_LSOE (1 << 8)
#define ADM8211_WCSR_KEYUP (1 << 6)
#define ADM8211_WCSR_TSFTW (1 << 5)
#define ADM8211_WCSR_TIMW (1 << 4)
#define ADM8211_WCSR_ATIMW (1 << 3)
#define ADM8211_WCSR_MPR (1 << 1)
#define ADM8211_WCSR_LSO (1 << 0)
/* CSR11A - GPIO */
#define ADM8211_CSR_GPIO_EN5 (1 << 17)
#define ADM8211_CSR_GPIO_EN4 (1 << 16)
#define ADM8211_CSR_GPIO_EN3 (1 << 15)
#define ADM8211_CSR_GPIO_EN2 (1 << 14)
#define ADM8211_CSR_GPIO_EN1 (1 << 13)
#define ADM8211_CSR_GPIO_EN0 (1 << 12)
#define ADM8211_CSR_GPIO_O5 (1 << 11)
#define ADM8211_CSR_GPIO_O4 (1 << 10)
#define ADM8211_CSR_GPIO_O3 (1 << 9)
#define ADM8211_CSR_GPIO_O2 (1 << 8)
#define ADM8211_CSR_GPIO_O1 (1 << 7)
#define ADM8211_CSR_GPIO_O0 (1 << 6)
#define ADM8211_CSR_GPIO_IN 0x0000003f
/* CSR12 - BBPCTL (BBP Control port) */
#define ADM8211_BBPCTL_MMISEL (1 << 31)
#define ADM8211_BBPCTL_SPICADD (0x7F << 24)
#define ADM8211_BBPCTL_RF3000 (0x20 << 24)
#define ADM8211_BBPCTL_TXCE (1 << 23)
#define ADM8211_BBPCTL_RXCE (1 << 22)
#define ADM8211_BBPCTL_CCAP (1 << 21)
#define ADM8211_BBPCTL_TYPE 0x001c0000
#define ADM8211_BBPCTL_WR (1 << 17)
#define ADM8211_BBPCTL_RD (1 << 16)
#define ADM8211_BBPCTL_ADDR 0x0000ff00
#define ADM8211_BBPCTL_DATA 0x000000ff
/* CSR12A - SYNCTL (Synthesizer Control port) */
#define ADM8211_SYNCTL_WR (1 << 31)
#define ADM8211_SYNCTL_RD (1 << 30)
#define ADM8211_SYNCTL_CS0 (1 << 29)
#define ADM8211_SYNCTL_CS1 (1 << 28)
#define ADM8211_SYNCTL_CAL (1 << 27)
#define ADM8211_SYNCTL_SELCAL (1 << 26)
#define ADM8211_SYNCTL_RFtype ((1 << 24) | (1 << 23) | (1 << 22))
#define ADM8211_SYNCTL_RFMD (1 << 22)
#define ADM8211_SYNCTL_GENERAL (0x7 << 22)
/* SYNCTL 21:0 Data (Si4126: 18-bit data, 4-bit address) */
/* CSR18 - CMDR (Command Register) */
#define ADM8211_CMDR_PM (1 << 19)
#define ADM8211_CMDR_APM (1 << 18)
#define ADM8211_CMDR_RTE (1 << 4)
#define ADM8211_CMDR_DRT ((1 << 3) | (1 << 2))
#define ADM8211_CMDR_DRT_8DW (0x0 << 2)
#define ADM8211_CMDR_DRT_16DW (0x1 << 2)
#define ADM8211_CMDR_DRT_SF (0x2 << 2)
/* CSR33 - SYNRF (SYNRF direct control) */
#define ADM8211_SYNRF_SELSYN (1 << 31)
#define ADM8211_SYNRF_SELRF (1 << 30)
#define ADM8211_SYNRF_LERF (1 << 29)
#define ADM8211_SYNRF_LEIF (1 << 28)
#define ADM8211_SYNRF_SYNCLK (1 << 27)
#define ADM8211_SYNRF_SYNDATA (1 << 26)
#define ADM8211_SYNRF_PE1 (1 << 25)
#define ADM8211_SYNRF_PE2 (1 << 24)
#define ADM8211_SYNRF_PA_PE (1 << 23)
#define ADM8211_SYNRF_TR_SW (1 << 22)
#define ADM8211_SYNRF_TR_SWN (1 << 21)
#define ADM8211_SYNRF_RADIO (1 << 20)
#define ADM8211_SYNRF_CAL_EN (1 << 19)
#define ADM8211_SYNRF_PHYRST (1 << 18)
#define ADM8211_SYNRF_IF_SELECT_0 (1 << 31)
#define ADM8211_SYNRF_IF_SELECT_1 ((1 << 31) | (1 << 28))
#define ADM8211_SYNRF_WRITE_SYNDATA_0 (1 << 31)
#define ADM8211_SYNRF_WRITE_SYNDATA_1 ((1 << 31) | (1 << 26))
#define ADM8211_SYNRF_WRITE_CLOCK_0 (1 << 31)
#define ADM8211_SYNRF_WRITE_CLOCK_1 ((1 << 31) | (1 << 27))
/* CSR44 - WEPCTL (WEP Control) */
#define ADM8211_WEPCTL_WEPENABLE (1 << 31)
#define ADM8211_WEPCTL_WPAENABLE (1 << 30)
#define ADM8211_WEPCTL_CURRENT_TABLE (1 << 29)
#define ADM8211_WEPCTL_TABLE_WR (1 << 28)
#define ADM8211_WEPCTL_TABLE_RD (1 << 27)
#define ADM8211_WEPCTL_WEPRXBYP (1 << 25)
#define ADM8211_WEPCTL_SEL_WEPTABLE (1 << 23)
#define ADM8211_WEPCTL_ADDR (0x000001ff)
/* CSR45 - WESK (Data Entry for Share/Individual Key) */
#define ADM8211_WESK_DATA (0x0000ffff)
/* FER (Function Event Register) */
#define ADM8211_FER_INTR_EV_ENT (1 << 15)
/* Si4126 RF Synthesizer - Control Registers */
#define SI4126_MAIN_CONF 0
#define SI4126_PHASE_DET_GAIN 1
#define SI4126_POWERDOWN 2
#define SI4126_RF1_N_DIV 3 /* only Si4136 */
#define SI4126_RF2_N_DIV 4
#define SI4126_IF_N_DIV 5
#define SI4126_RF1_R_DIV 6 /* only Si4136 */
#define SI4126_RF2_R_DIV 7
#define SI4126_IF_R_DIV 8
/* Main Configuration */
#define SI4126_MAIN_XINDIV2 (1 << 6)
#define SI4126_MAIN_IFDIV ((1 << 11) | (1 << 10))
/* Powerdown */
#define SI4126_POWERDOWN_PDIB (1 << 1)
#define SI4126_POWERDOWN_PDRB (1 << 0)
/* RF3000 BBP - Control Port Registers */
/* 0x00 - reserved */
#define RF3000_MODEM_CTRL__RX_STATUS 0x01
#define RF3000_CCA_CTRL 0x02
#define RF3000_DIVERSITY__RSSI 0x03
#define RF3000_RX_SIGNAL_FIELD 0x04
#define RF3000_RX_LEN_MSB 0x05
#define RF3000_RX_LEN_LSB 0x06
#define RF3000_RX_SERVICE_FIELD 0x07
#define RF3000_TX_VAR_GAIN__TX_LEN_EXT 0x11
#define RF3000_TX_LEN_MSB 0x12
#define RF3000_TX_LEN_LSB 0x13
#define RF3000_LOW_GAIN_CALIB 0x14
#define RF3000_HIGH_GAIN_CALIB 0x15
/* ADM8211 revisions */
#define ADM8211_REV_AB 0x11
#define ADM8211_REV_AF 0x15
#define ADM8211_REV_BA 0x20
#define ADM8211_REV_CA 0x30
struct adm8211_desc {
__le32 status;
__le32 length;
__le32 buffer1;
__le32 buffer2;
};
#define RDES0_STATUS_OWN (1 << 31)
#define RDES0_STATUS_ES (1 << 30)
#define RDES0_STATUS_SQL (1 << 29)
#define RDES0_STATUS_DE (1 << 28)
#define RDES0_STATUS_FS (1 << 27)
#define RDES0_STATUS_LS (1 << 26)
#define RDES0_STATUS_PCF (1 << 25)
#define RDES0_STATUS_SFDE (1 << 24)
#define RDES0_STATUS_SIGE (1 << 23)
#define RDES0_STATUS_CRC16E (1 << 22)
#define RDES0_STATUS_RXTOE (1 << 21)
#define RDES0_STATUS_CRC32E (1 << 20)
#define RDES0_STATUS_ICVE (1 << 19)
#define RDES0_STATUS_DA1 (1 << 17)
#define RDES0_STATUS_DA0 (1 << 16)
#define RDES0_STATUS_RXDR ((1 << 15) | (1 << 14) | (1 << 13) | (1 << 12))
#define RDES0_STATUS_FL (0x00000fff)
#define RDES1_CONTROL_RER (1 << 25)
#define RDES1_CONTROL_RCH (1 << 24)
#define RDES1_CONTROL_RBS2 (0x00fff000)
#define RDES1_CONTROL_RBS1 (0x00000fff)
#define RDES1_STATUS_RSSI (0x0000007f)
#define TDES0_CONTROL_OWN (1 << 31)
#define TDES0_CONTROL_DONE (1 << 30)
#define TDES0_CONTROL_TXDR (0x0ff00000)
#define TDES0_STATUS_OWN (1 << 31)
#define TDES0_STATUS_DONE (1 << 30)
#define TDES0_STATUS_ES (1 << 29)
#define TDES0_STATUS_TLT (1 << 28)
#define TDES0_STATUS_TRT (1 << 27)
#define TDES0_STATUS_TUF (1 << 26)
#define TDES0_STATUS_TRO (1 << 25)
#define TDES0_STATUS_SOFBR (1 << 24)
#define TDES0_STATUS_ACR (0x00000fff)
#define TDES1_CONTROL_IC (1 << 31)
#define TDES1_CONTROL_LS (1 << 30)
#define TDES1_CONTROL_FS (1 << 29)
#define TDES1_CONTROL_TER (1 << 25)
#define TDES1_CONTROL_TCH (1 << 24)
#define TDES1_CONTROL_RBS2 (0x00fff000)
#define TDES1_CONTROL_RBS1 (0x00000fff)
/* SRAM offsets */
#define ADM8211_SRAM(x) (priv->pdev->revision < ADM8211_REV_BA ? \
ADM8211_SRAM_A_ ## x : ADM8211_SRAM_B_ ## x)
#define ADM8211_SRAM_INDIV_KEY 0x0000
#define ADM8211_SRAM_A_SHARE_KEY 0x0160
#define ADM8211_SRAM_B_SHARE_KEY 0x00c0
#define ADM8211_SRAM_A_SSID 0x0180
#define ADM8211_SRAM_B_SSID 0x00d4
#define ADM8211_SRAM_SSID ADM8211_SRAM(SSID)
#define ADM8211_SRAM_A_SUPP_RATE 0x0191
#define ADM8211_SRAM_B_SUPP_RATE 0x00dd
#define ADM8211_SRAM_SUPP_RATE ADM8211_SRAM(SUPP_RATE)
#define ADM8211_SRAM_A_SIZE 0x0200
#define ADM8211_SRAM_B_SIZE 0x01c0
#define ADM8211_SRAM_SIZE ADM8211_SRAM(SIZE)
struct adm8211_rx_ring_info {
struct sk_buff *skb;
dma_addr_t mapping;
};
struct adm8211_tx_ring_info {
struct sk_buff *skb;
dma_addr_t mapping;
size_t hdrlen;
};
#define PLCP_SIGNAL_1M 0x0a
#define PLCP_SIGNAL_2M 0x14
#define PLCP_SIGNAL_5M5 0x37
#define PLCP_SIGNAL_11M 0x6e
struct adm8211_tx_hdr {
u8 da[6];
u8 signal; /* PLCP signal / TX rate in 100 Kbps */
u8 service;
__le16 frame_body_size;
__le16 frame_control;
__le16 plcp_frag_tail_len;
__le16 plcp_frag_head_len;
__le16 dur_frag_tail;
__le16 dur_frag_head;
u8 addr4[6];
#define ADM8211_TXHDRCTL_SHORT_PREAMBLE (1 << 0)
#define ADM8211_TXHDRCTL_MORE_FRAG (1 << 1)
#define ADM8211_TXHDRCTL_MORE_DATA (1 << 2)
#define ADM8211_TXHDRCTL_FRAG_NO (1 << 3) /* ? */
#define ADM8211_TXHDRCTL_ENABLE_RTS (1 << 4)
#define ADM8211_TXHDRCTL_ENABLE_WEP_ENGINE (1 << 5)
#define ADM8211_TXHDRCTL_ENABLE_EXTEND_HEADER (1 << 15) /* ? */
__le16 header_control;
__le16 frag;
u8 reserved_0;
u8 retry_limit;
u32 wep2key0;
u32 wep2key1;
u32 wep2key2;
u32 wep2key3;
u8 keyid;
u8 entry_control; // huh??
u16 reserved_1;
u32 reserved_2;
} __packed;
#define RX_COPY_BREAK 128
#define RX_PKT_SIZE 2500
struct adm8211_eeprom {
__le16 signature; /* 0x00 */
u8 major_version; /* 0x02 */
u8 minor_version; /* 0x03 */
u8 reserved_1[4]; /* 0x04 */
u8 hwaddr[6]; /* 0x08 */
u8 reserved_2[8]; /* 0x1E */
__le16 cr49; /* 0x16 */
u8 cr03; /* 0x18 */
u8 cr28; /* 0x19 */
u8 cr29; /* 0x1A */
u8 country_code; /* 0x1B */
/* specific bbp types */
#define ADM8211_BBP_RFMD3000 0x00
#define ADM8211_BBP_RFMD3002 0x01
#define ADM8211_BBP_ADM8011 0x04
u8 specific_bbptype; /* 0x1C */
u8 specific_rftype; /* 0x1D */
u8 reserved_3[2]; /* 0x1E */
__le16 device_id; /* 0x20 */
__le16 vendor_id; /* 0x22 */
__le16 subsystem_id; /* 0x24 */
__le16 subsystem_vendor_id; /* 0x26 */
u8 maxlat; /* 0x28 */
u8 mingnt; /* 0x29 */
__le16 cis_pointer_low; /* 0x2A */
__le16 cis_pointer_high; /* 0x2C */
__le16 csr18; /* 0x2E */
u8 reserved_4[16]; /* 0x30 */
u8 d1_pwrdara; /* 0x40 */
u8 d0_pwrdara; /* 0x41 */
u8 d3_pwrdara; /* 0x42 */
u8 d2_pwrdara; /* 0x43 */
u8 antenna_power[14]; /* 0x44 */
__le16 cis_wordcnt; /* 0x52 */
u8 tx_power[14]; /* 0x54 */
u8 lpf_cutoff[14]; /* 0x62 */
u8 lnags_threshold[14]; /* 0x70 */
__le16 checksum; /* 0x7E */
u8 cis_data[0]; /* 0x80, 384 bytes */
} __packed;
struct adm8211_priv {
struct pci_dev *pdev;
spinlock_t lock;
struct adm8211_csr __iomem *map;
struct adm8211_desc *rx_ring;
struct adm8211_desc *tx_ring;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
struct adm8211_rx_ring_info *rx_buffers;
struct adm8211_tx_ring_info *tx_buffers;
unsigned int rx_ring_size, tx_ring_size;
unsigned int cur_tx, dirty_tx, cur_rx;
struct ieee80211_low_level_stats stats;
struct ieee80211_supported_band band;
struct ieee80211_channel channels[14];
int mode;
int channel;
u8 bssid[ETH_ALEN];
u8 soft_rx_crc;
u8 retry_limit;
u8 ant_power;
u8 tx_power;
u8 lpf_cutoff;
u8 lnags_threshold;
struct adm8211_eeprom *eeprom;
size_t eeprom_len;
u32 nar;
#define ADM8211_TYPE_INTERSIL 0x00
#define ADM8211_TYPE_RFMD 0x01
#define ADM8211_TYPE_MARVEL 0x02
#define ADM8211_TYPE_AIROHA 0x03
#define ADM8211_TYPE_ADMTEK 0x05
unsigned int rf_type:3;
unsigned int bbp_type:3;
u8 specific_bbptype;
enum {
ADM8211_RFMD2948 = 0x0,
ADM8211_RFMD2958 = 0x1,
ADM8211_RFMD2958_RF3000_CONTROL_POWER = 0x2,
ADM8211_MAX2820 = 0x8,
ADM8211_AL2210L = 0xC, /* Airoha */
} transceiver_type;
};
struct ieee80211_chan_range {
u8 min;
u8 max;
};
static const struct ieee80211_chan_range cranges[] = {
{1, 11}, /* FCC */
{1, 11}, /* IC */
{1, 13}, /* ETSI */
{10, 11}, /* SPAIN */
{10, 13}, /* FRANCE */
{14, 14}, /* MMK */
{1, 14}, /* MMK2 */
};
#endif /* ADM8211_H */

8225
drivers/net/wireless/airo.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,9 @@
#ifndef _AIRO_H_
#define _AIRO_H_
struct net_device *init_airo_card(unsigned short irq, int port, int is_pcmcia,
struct device *dmdev);
int reset_airo_card(struct net_device *dev);
void stop_airo_card(struct net_device *dev, int freeres);
#endif /* _AIRO_H_ */

View file

@ -0,0 +1,222 @@
/*======================================================================
Aironet driver for 4500 and 4800 series cards
This code is released under both the GPL version 2 and BSD licenses.
Either license may be used. The respective licenses are found at
the end of this file.
This code was developed by Benjamin Reed <breed@users.sourceforge.net>
including portions of which come from the Aironet PC4500
Developer's Reference Manual and used with permission. Copyright
(C) 1999 Benjamin Reed. All Rights Reserved. Permission to use
code in the Developer's manual was granted for this driver by
Aironet.
In addition this module was derived from dummy_cs.
The initial developer of dummy_cs is David A. Hinds
<dahinds@users.sourceforge.net>. Portions created by David A. Hinds
are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
======================================================================*/
#ifdef __IN_PCMCIA_PACKAGE__
#include <pcmcia/k_compat.h>
#endif
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/netdevice.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
#include <linux/io.h>
#include "airo.h"
/*====================================================================*/
MODULE_AUTHOR("Benjamin Reed");
MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet "
"cards. This is the module that links the PCMCIA card "
"with the airo module.");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards");
/*====================================================================*/
static int airo_config(struct pcmcia_device *link);
static void airo_release(struct pcmcia_device *link);
static void airo_detach(struct pcmcia_device *p_dev);
struct local_info {
struct net_device *eth_dev;
};
static int airo_probe(struct pcmcia_device *p_dev)
{
struct local_info *local;
dev_dbg(&p_dev->dev, "airo_attach()\n");
/* Allocate space for private device-specific data */
local = kzalloc(sizeof(*local), GFP_KERNEL);
if (!local)
return -ENOMEM;
p_dev->priv = local;
return airo_config(p_dev);
} /* airo_attach */
static void airo_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "airo_detach\n");
airo_release(link);
if (((struct local_info *)link->priv)->eth_dev) {
stop_airo_card(((struct local_info *)link->priv)->eth_dev,
0);
}
((struct local_info *)link->priv)->eth_dev = NULL;
kfree(link->priv);
} /* airo_detach */
static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
{
if (p_dev->config_index == 0)
return -EINVAL;
return pcmcia_request_io(p_dev);
}
static int airo_config(struct pcmcia_device *link)
{
struct local_info *dev;
int ret;
dev = link->priv;
dev_dbg(&link->dev, "airo_config\n");
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
CONF_AUTO_AUDIO | CONF_AUTO_SET_IO;
ret = pcmcia_loop_config(link, airo_cs_config_check, NULL);
if (ret)
goto failed;
if (!link->irq)
goto failed;
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
((struct local_info *)link->priv)->eth_dev =
init_airo_card(link->irq,
link->resource[0]->start, 1, &link->dev);
if (!((struct local_info *)link->priv)->eth_dev)
goto failed;
return 0;
failed:
airo_release(link);
return -ENODEV;
} /* airo_config */
static void airo_release(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "airo_release\n");
pcmcia_disable_device(link);
}
static int airo_suspend(struct pcmcia_device *link)
{
struct local_info *local = link->priv;
netif_device_detach(local->eth_dev);
return 0;
}
static int airo_resume(struct pcmcia_device *link)
{
struct local_info *local = link->priv;
if (link->open) {
reset_airo_card(local->eth_dev);
netif_device_attach(local->eth_dev);
}
return 0;
}
static const struct pcmcia_device_id airo_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x015f, 0x000a),
PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0005),
PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0007),
PCMCIA_DEVICE_MANF_CARD(0x0105, 0x0007),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, airo_ids);
static struct pcmcia_driver airo_driver = {
.owner = THIS_MODULE,
.name = "airo_cs",
.probe = airo_probe,
.remove = airo_detach,
.id_table = airo_ids,
.suspend = airo_suspend,
.resume = airo_resume,
};
module_pcmcia_driver(airo_driver);
/*
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
In addition:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,466 @@
/*
* Copyright (c) 2002,2003 Oliver Kurth
* (c) 2003,2004 Joerg Albert <joerg.albert@gmx.de>
* (c) 2007 Guido Guenther <agx@sigxcpu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This driver was based on information from the Sourceforge driver
* released and maintained by Atmel:
*
* http://sourceforge.net/projects/atmelwlandriver/
*
* Although the code was completely re-written,
* it would have been impossible without Atmel's decision to
* release an Open Source driver (unfortunately the firmware was
* kept binary only). Thanks for that decision to Atmel!
*/
#ifndef _AT76_USB_H
#define _AT76_USB_H
/* Board types */
enum board_type {
BOARD_503_ISL3861 = 1,
BOARD_503_ISL3863 = 2,
BOARD_503 = 3,
BOARD_503_ACC = 4,
BOARD_505 = 5,
BOARD_505_2958 = 6,
BOARD_505A = 7,
BOARD_505AMX = 8
};
#define CMD_STATUS_IDLE 0x00
#define CMD_STATUS_COMPLETE 0x01
#define CMD_STATUS_UNKNOWN 0x02
#define CMD_STATUS_INVALID_PARAMETER 0x03
#define CMD_STATUS_FUNCTION_NOT_SUPPORTED 0x04
#define CMD_STATUS_TIME_OUT 0x07
#define CMD_STATUS_IN_PROGRESS 0x08
#define CMD_STATUS_HOST_FAILURE 0xff
#define CMD_STATUS_SCAN_FAILED 0xf0
/* answers to get op mode */
#define OPMODE_NONE 0x00
#define OPMODE_NORMAL_NIC_WITH_FLASH 0x01
#define OPMODE_HW_CONFIG_MODE 0x02
#define OPMODE_DFU_MODE_WITH_FLASH 0x03
#define OPMODE_NORMAL_NIC_WITHOUT_FLASH 0x04
#define CMD_SET_MIB 0x01
#define CMD_GET_MIB 0x02
#define CMD_SCAN 0x03
#define CMD_JOIN 0x04
#define CMD_START_IBSS 0x05
#define CMD_RADIO_ON 0x06
#define CMD_RADIO_OFF 0x07
#define CMD_STARTUP 0x0B
#define MIB_LOCAL 0x01
#define MIB_MAC_ADDR 0x02
#define MIB_MAC 0x03
#define MIB_MAC_MGMT 0x05
#define MIB_MAC_WEP 0x06
#define MIB_PHY 0x07
#define MIB_FW_VERSION 0x08
#define MIB_MDOMAIN 0x09
#define ADHOC_MODE 1
#define INFRASTRUCTURE_MODE 2
/* values for struct mib_local, field preamble_type */
#define PREAMBLE_TYPE_LONG 0
#define PREAMBLE_TYPE_SHORT 1
#define PREAMBLE_TYPE_AUTO 2
/* values for tx_rate */
#define TX_RATE_1MBIT 0
#define TX_RATE_2MBIT 1
#define TX_RATE_5_5MBIT 2
#define TX_RATE_11MBIT 3
#define TX_RATE_AUTO 4
/* power management modes */
#define AT76_PM_OFF 1
#define AT76_PM_ON 2
#define AT76_PM_SMART 3
struct hwcfg_r505 {
u8 cr39_values[14];
u8 reserved1[14];
u8 bb_cr[14];
u8 pidvid[4];
u8 mac_addr[ETH_ALEN];
u8 regulatory_domain;
u8 reserved2[14];
u8 cr15_values[14];
u8 reserved3[3];
} __packed;
struct hwcfg_rfmd {
u8 cr20_values[14];
u8 cr21_values[14];
u8 bb_cr[14];
u8 pidvid[4];
u8 mac_addr[ETH_ALEN];
u8 regulatory_domain;
u8 low_power_values[14];
u8 normal_power_values[14];
u8 reserved1[3];
} __packed;
struct hwcfg_intersil {
u8 mac_addr[ETH_ALEN];
u8 cr31_values[14];
u8 cr58_values[14];
u8 pidvid[4];
u8 regulatory_domain;
u8 reserved[1];
} __packed;
union at76_hwcfg {
struct hwcfg_intersil i;
struct hwcfg_rfmd r3;
struct hwcfg_r505 r5;
};
#define WEP_SMALL_KEY_LEN (40 / 8)
#define WEP_LARGE_KEY_LEN (104 / 8)
#define WEP_KEYS (4)
struct at76_card_config {
u8 exclude_unencrypted;
u8 promiscuous_mode;
u8 short_retry_limit;
u8 encryption_type;
__le16 rts_threshold;
__le16 fragmentation_threshold; /* 256..2346 */
u8 basic_rate_set[4];
u8 auto_rate_fallback; /* 0,1 */
u8 channel;
u8 privacy_invoked;
u8 wep_default_key_id; /* 0..3 */
u8 current_ssid[32];
u8 wep_default_key_value[4][WEP_LARGE_KEY_LEN];
u8 ssid_len;
u8 short_preamble;
__le16 beacon_period;
} __packed;
struct at76_command {
u8 cmd;
u8 reserved;
__le16 size;
u8 data[0];
} __packed;
/* Length of Atmel-specific Rx header before 802.11 frame */
#define AT76_RX_HDRLEN offsetof(struct at76_rx_buffer, packet)
struct at76_rx_buffer {
__le16 wlength;
u8 rx_rate;
u8 newbss;
u8 fragmentation;
u8 rssi;
u8 link_quality;
u8 noise_level;
__le32 rx_time;
u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
} __packed;
/* Length of Atmel-specific Tx header before 802.11 frame */
#define AT76_TX_HDRLEN offsetof(struct at76_tx_buffer, packet)
struct at76_tx_buffer {
__le16 wlength;
u8 tx_rate;
u8 padding;
u8 reserved[4];
u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
} __packed;
/* defines for scan_type below */
#define SCAN_TYPE_ACTIVE 0
#define SCAN_TYPE_PASSIVE 1
struct at76_req_scan {
u8 bssid[ETH_ALEN];
u8 essid[32];
u8 scan_type;
u8 channel;
__le16 probe_delay;
__le16 min_channel_time;
__le16 max_channel_time;
u8 essid_size;
u8 international_scan;
} __packed;
struct at76_req_ibss {
u8 bssid[ETH_ALEN];
u8 essid[32];
u8 bss_type;
u8 channel;
u8 essid_size;
u8 reserved[3];
} __packed;
struct at76_req_join {
u8 bssid[ETH_ALEN];
u8 essid[32];
u8 bss_type;
u8 channel;
__le16 timeout;
u8 essid_size;
u8 reserved;
} __packed;
struct mib_local {
u16 reserved0;
u8 beacon_enable;
u8 txautorate_fallback;
u8 reserved1;
u8 ssid_size;
u8 promiscuous_mode;
u16 reserved2;
u8 preamble_type;
u16 reserved3;
} __packed;
struct mib_mac_addr {
u8 mac_addr[ETH_ALEN];
u8 res[2]; /* ??? */
u8 group_addr[4][ETH_ALEN];
u8 group_addr_status[4];
} __packed;
struct mib_mac {
__le32 max_tx_msdu_lifetime;
__le32 max_rx_lifetime;
__le16 frag_threshold;
__le16 rts_threshold;
__le16 cwmin;
__le16 cwmax;
u8 short_retry_time;
u8 long_retry_time;
u8 scan_type; /* active or passive */
u8 scan_channel;
__le16 probe_delay; /* delay before ProbeReq in active scan, RO */
__le16 min_channel_time;
__le16 max_channel_time;
__le16 listen_interval;
u8 desired_ssid[32];
u8 desired_bssid[ETH_ALEN];
u8 desired_bsstype; /* ad-hoc or infrastructure */
u8 reserved2;
} __packed;
struct mib_mac_mgmt {
__le16 beacon_period;
__le16 CFP_max_duration;
__le16 medium_occupancy_limit;
__le16 station_id; /* assoc id */
__le16 ATIM_window;
u8 CFP_mode;
u8 privacy_option_implemented;
u8 DTIM_period;
u8 CFP_period;
u8 current_bssid[ETH_ALEN];
u8 current_essid[32];
u8 current_bss_type;
u8 power_mgmt_mode;
/* rfmd and 505 */
u8 ibss_change;
u8 res;
u8 multi_domain_capability_implemented;
u8 multi_domain_capability_enabled;
u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
u8 reserved[3];
} __packed;
struct mib_mac_wep {
u8 privacy_invoked; /* 0 disable encr., 1 enable encr */
u8 wep_default_key_id;
u8 wep_key_mapping_len;
u8 exclude_unencrypted;
__le32 wep_icv_error_count;
__le32 wep_excluded_count;
u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN];
u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */
} __packed;
struct mib_phy {
__le32 ed_threshold;
__le16 slot_time;
__le16 sifs_time;
__le16 preamble_length;
__le16 plcp_header_length;
__le16 mpdu_max_length;
__le16 cca_mode_supported;
u8 operation_rate_set[4];
u8 channel_id;
u8 current_cca_mode;
u8 phy_type;
u8 current_reg_domain;
} __packed;
struct mib_fw_version {
u8 major;
u8 minor;
u8 patch;
u8 build;
} __packed;
struct mib_mdomain {
u8 tx_powerlevel[14];
u8 channel_list[14]; /* 0 for invalid channels */
} __packed;
struct set_mib_buffer {
u8 type;
u8 size;
u8 index;
u8 reserved;
union {
u8 byte;
__le16 word;
u8 addr[ETH_ALEN];
struct mib_mac_wep wep_mib;
} data;
} __packed;
struct at76_fw_header {
__le32 crc; /* CRC32 of the whole image */
__le32 board_type; /* firmware compatibility code */
u8 build; /* firmware build number */
u8 patch; /* firmware patch level */
u8 minor; /* firmware minor version */
u8 major; /* firmware major version */
__le32 str_offset; /* offset of the copyright string */
__le32 int_fw_offset; /* internal firmware image offset */
__le32 int_fw_len; /* internal firmware image length */
__le32 ext_fw_offset; /* external firmware image offset */
__le32 ext_fw_len; /* external firmware image length */
} __packed;
/* a description of a regulatory domain and the allowed channels */
struct reg_domain {
u16 code;
char const *name;
u32 channel_map; /* if bit N is set, channel (N+1) is allowed */
};
/* Data for one loaded firmware file */
struct fwentry {
const char *const fwname;
const struct firmware *fw;
int extfw_size;
int intfw_size;
/* pointer to loaded firmware, no need to free */
u8 *extfw; /* external firmware, extfw_size bytes long */
u8 *intfw; /* internal firmware, intfw_size bytes long */
enum board_type board_type; /* board type */
struct mib_fw_version fw_version;
int loaded; /* Loaded and parsed successfully */
};
struct at76_priv {
struct usb_device *udev; /* USB device pointer */
struct sk_buff *rx_skb; /* skbuff for receiving data */
struct sk_buff *tx_skb; /* skbuff for transmitting data */
void *bulk_out_buffer; /* buffer for sending data */
struct urb *tx_urb; /* URB for sending data */
struct urb *rx_urb; /* URB for receiving data */
unsigned int tx_pipe; /* bulk out pipe */
unsigned int rx_pipe; /* bulk in pipe */
struct mutex mtx; /* locks this structure */
/* work queues */
struct work_struct work_set_promisc;
struct work_struct work_submit_rx;
struct work_struct work_join_bssid;
struct delayed_work dwork_hw_scan;
struct tasklet_struct rx_tasklet;
/* the WEP stuff */
int wep_enabled; /* 1 if WEP is enabled */
int wep_key_id; /* key id to be used */
u8 wep_keys[WEP_KEYS][WEP_LARGE_KEY_LEN]; /* WEP keys */
u8 wep_keys_len[WEP_KEYS]; /* length of WEP keys */
int channel;
int iw_mode;
u8 bssid[ETH_ALEN];
u8 essid[IW_ESSID_MAX_SIZE];
int essid_size;
int radio_on;
int promisc;
int preamble_type; /* 0 - long, 1 - short, 2 - auto */
int auth_mode; /* authentication type: 0 open, 1 shared key */
int txrate; /* 0,1,2,3 = 1,2,5.5,11 Mbps, 4 is auto */
int frag_threshold; /* threshold for fragmentation of tx packets */
int rts_threshold; /* threshold for RTS mechanism */
int short_retry_limit;
int scan_min_time; /* scan min channel time */
int scan_max_time; /* scan max channel time */
int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */
int scan_need_any; /* if set, need to scan for any ESSID */
bool scanning; /* if set, the scan is running */
u16 assoc_id; /* current association ID, if associated */
u8 pm_mode; /* power management mode */
u32 pm_period; /* power management period in microseconds */
struct reg_domain const *domain; /* reg domain description */
/* These fields contain HW config provided by the device (not all of
* these fields are used by all board types) */
u8 mac_addr[ETH_ALEN];
u8 regulatory_domain;
struct at76_card_config card_config;
enum board_type board_type;
struct mib_fw_version fw_version;
unsigned int device_unplugged:1;
unsigned int netdev_registered:1;
struct set_mib_buffer mib_buf; /* global buffer for set_mib calls */
int beacon_period; /* period of mgmt beacons, Kus */
struct ieee80211_hw *hw;
int mac80211_registered;
};
#define AT76_SUPPORTED_FILTERS FIF_PROMISC_IN_BSS
#define SCAN_POLL_INTERVAL (HZ / 4)
#define CMD_COMPLETION_TIMEOUT (5 * HZ)
#define DEF_RTS_THRESHOLD 1536
#define DEF_FRAG_THRESHOLD 1536
#define DEF_SHORT_RETRY_LIMIT 8
#define DEF_CHANNEL 10
#define DEF_SCAN_MIN_TIME 10
#define DEF_SCAN_MAX_TIME 120
/* the max padding size for tx in bytes (see calc_padding) */
#define MAX_PADDING_SIZE 53
#endif /* _AT76_USB_H */

View file

@ -0,0 +1,62 @@
config ATH_COMMON
tristate
menuconfig ATH_CARDS
tristate "Atheros Wireless Cards"
depends on CFG80211 && (!UML || BROKEN)
---help---
This will enable the support for the Atheros wireless drivers.
ath5k, ath9k, ath9k_htc and ar9170 drivers share some common code, this option
enables the common ath.ko module which shares common helpers.
For more information and documentation on this module you can visit:
http://wireless.kernel.org/en/users/Drivers/ath
For information on all Atheros wireless drivers visit:
http://wireless.kernel.org/en/users/Drivers/Atheros
if ATH_CARDS
config ATH_DEBUG
bool "Atheros wireless debugging"
---help---
Say Y, if you want to debug atheros wireless drivers.
Right now only ath9k makes use of this.
config ATH_TRACEPOINTS
bool "Atheros wireless tracing"
depends on ATH_DEBUG
depends on EVENT_TRACING
---help---
This option enables tracepoints for atheros wireless drivers.
Currently, ath9k makes use of this facility.
config ATH_REG_DYNAMIC_USER_REG_HINTS
bool "Atheros dynamic user regulatory hints"
depends on CFG80211_CERTIFICATION_ONUS
default n
---help---
Say N. This should only be enabled in countries where
this feature is explicitly allowed and only on cards that
specifically have been tested for this.
config ATH_REG_DYNAMIC_USER_CERT_TESTING
bool "Atheros dynamic user regulatory testing"
depends on ATH_REG_DYNAMIC_USER_REG_HINTS && CFG80211_CERTIFICATION_ONUS
default n
---help---
Say N. This should only be enabled on systems
undergoing certification testing.
source "drivers/net/wireless/ath/ath5k/Kconfig"
source "drivers/net/wireless/ath/ath9k/Kconfig"
source "drivers/net/wireless/ath/carl9170/Kconfig"
source "drivers/net/wireless/ath/ath6kl/Kconfig"
source "drivers/net/wireless/ath/ar5523/Kconfig"
source "drivers/net/wireless/ath/wil6210/Kconfig"
source "drivers/net/wireless/ath/ath10k/Kconfig"
source "drivers/net/wireless/ath/wcn36xx/Kconfig"
endif

View file

@ -0,0 +1,24 @@
obj-$(CONFIG_ATH5K) += ath5k/
obj-$(CONFIG_ATH9K_HW) += ath9k/
obj-$(CONFIG_CARL9170) += carl9170/
obj-$(CONFIG_ATH6KL) += ath6kl/
obj-$(CONFIG_AR5523) += ar5523/
obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH10K) += ath10k/
obj-$(CONFIG_WCN36XX) += wcn36xx/
obj-$(CONFIG_ATH_COMMON) += ath.o
ath-objs := main.o \
regd.o \
hw.o \
key.o \
dfs_pattern_detector.o \
dfs_pri_detector.o
ath-$(CONFIG_ATH_DEBUG) += debug.o
ath-$(CONFIG_ATH_TRACEPOINTS) += trace.o
ccflags-y += -D__CHECK_ENDIAN__
CFLAGS_trace.o := -I$(src)

View file

@ -0,0 +1,8 @@
config AR5523
tristate "Atheros AR5523 wireless driver support"
depends on MAC80211 && USB
select ATH_COMMON
select FW_LOADER
---help---
This module add support for AR5523 based USB dongles such as D-Link
DWL-G132, Netgear WPN111 and many more.

View file

@ -0,0 +1 @@
obj-$(CONFIG_AR5523) := ar5523.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,152 @@
/*
* Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2006 Sam Leffler, Errno Consulting
* Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
* Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
* Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define AR5523_FLAG_PRE_FIRMWARE (1 << 0)
#define AR5523_FLAG_ABG (1 << 1)
#define AR5523_FIRMWARE_FILE "ar5523.bin"
#define AR5523_CMD_TX_PIPE 0x01
#define AR5523_DATA_TX_PIPE 0x02
#define AR5523_CMD_RX_PIPE 0x81
#define AR5523_DATA_RX_PIPE 0x82
#define ar5523_cmd_tx_pipe(dev) \
usb_sndbulkpipe((dev), AR5523_CMD_TX_PIPE)
#define ar5523_data_tx_pipe(dev) \
usb_sndbulkpipe((dev), AR5523_DATA_TX_PIPE)
#define ar5523_cmd_rx_pipe(dev) \
usb_rcvbulkpipe((dev), AR5523_CMD_RX_PIPE)
#define ar5523_data_rx_pipe(dev) \
usb_rcvbulkpipe((dev), AR5523_DATA_RX_PIPE)
#define AR5523_DATA_TIMEOUT 10000
#define AR5523_CMD_TIMEOUT 1000
#define AR5523_TX_DATA_COUNT 8
#define AR5523_TX_DATA_RESTART_COUNT 2
#define AR5523_RX_DATA_COUNT 16
#define AR5523_RX_DATA_REFILL_COUNT 8
#define AR5523_CMD_ID 1
#define AR5523_DATA_ID 2
#define AR5523_TX_WD_TIMEOUT (HZ * 2)
#define AR5523_FLUSH_TIMEOUT (HZ * 3)
enum AR5523_flags {
AR5523_HW_UP,
AR5523_USB_DISCONNECTED,
AR5523_CONNECTED
};
struct ar5523_tx_cmd {
struct ar5523 *ar;
struct urb *urb_tx;
void *buf_tx;
void *odata;
int olen;
int flags;
int res;
struct completion done;
};
/* This struct is placed in tx_info->driver_data. It must not be larger
* than IEEE80211_TX_INFO_DRIVER_DATA_SIZE.
*/
struct ar5523_tx_data {
struct list_head list;
struct ar5523 *ar;
struct sk_buff *skb;
struct urb *urb;
};
struct ar5523_rx_data {
struct list_head list;
struct ar5523 *ar;
struct urb *urb;
struct sk_buff *skb;
};
struct ar5523 {
struct usb_device *dev;
struct ieee80211_hw *hw;
unsigned long flags;
struct mutex mutex;
struct workqueue_struct *wq;
struct ar5523_tx_cmd tx_cmd;
struct delayed_work stat_work;
struct timer_list tx_wd_timer;
struct work_struct tx_wd_work;
struct work_struct tx_work;
struct list_head tx_queue_pending;
struct list_head tx_queue_submitted;
spinlock_t tx_data_list_lock;
wait_queue_head_t tx_flush_waitq;
/* Queued + Submitted TX frames */
atomic_t tx_nr_total;
/* Submitted TX frames */
atomic_t tx_nr_pending;
void *rx_cmd_buf;
struct urb *rx_cmd_urb;
struct ar5523_rx_data rx_data[AR5523_RX_DATA_COUNT];
spinlock_t rx_data_list_lock;
struct list_head rx_data_free;
struct list_head rx_data_used;
atomic_t rx_data_free_cnt;
struct work_struct rx_refill_work;
unsigned int rxbufsz;
u8 serial[16];
struct ieee80211_channel channels[14];
struct ieee80211_rate rates[12];
struct ieee80211_supported_band band;
struct ieee80211_vif *vif;
};
/* flags for sending firmware commands */
#define AR5523_CMD_FLAG_READ (1 << 1)
#define AR5523_CMD_FLAG_MAGIC (1 << 2)
#define ar5523_dbg(ar, format, arg...) \
dev_dbg(&(ar)->dev->dev, format, ## arg)
/* On USB hot-unplug there can be a lot of URBs in flight and they'll all
* fail. Instead of dealing with them in every possible place just surpress
* any messages on USB disconnect.
*/
#define ar5523_err(ar, format, arg...) \
do { \
if (!test_bit(AR5523_USB_DISCONNECTED, &ar->flags)) { \
dev_err(&(ar)->dev->dev, format, ## arg); \
} \
} while (0)
#define ar5523_info(ar, format, arg...) \
dev_info(&(ar)->dev->dev, format, ## arg)

View file

@ -0,0 +1,431 @@
/*
* Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2006 Sam Leffler, Errno Consulting
* Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
* Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
* Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* all fields are big endian */
struct ar5523_fwblock {
__be32 flags;
#define AR5523_WRITE_BLOCK (1 << 4)
__be32 len;
#define AR5523_MAX_FWBLOCK_SIZE 2048
__be32 total;
__be32 remain;
__be32 rxtotal;
__be32 pad[123];
} __packed;
#define AR5523_MAX_RXCMDSZ 1024
#define AR5523_MAX_TXCMDSZ 1024
struct ar5523_cmd_hdr {
__be32 len;
__be32 code;
/* NB: these are defined for rev 1.5 firmware; rev 1.6 is different */
/* messages from Host -> Target */
#define WDCMSG_HOST_AVAILABLE 0x01
#define WDCMSG_BIND 0x02
#define WDCMSG_TARGET_RESET 0x03
#define WDCMSG_TARGET_GET_CAPABILITY 0x04
#define WDCMSG_TARGET_SET_CONFIG 0x05
#define WDCMSG_TARGET_GET_STATUS 0x06
#define WDCMSG_TARGET_GET_STATS 0x07
#define WDCMSG_TARGET_START 0x08
#define WDCMSG_TARGET_STOP 0x09
#define WDCMSG_TARGET_ENABLE 0x0a
#define WDCMSG_TARGET_DISABLE 0x0b
#define WDCMSG_CREATE_CONNECTION 0x0c
#define WDCMSG_UPDATE_CONNECT_ATTR 0x0d
#define WDCMSG_DELETE_CONNECT 0x0e
#define WDCMSG_SEND 0x0f
#define WDCMSG_FLUSH 0x10
/* messages from Target -> Host */
#define WDCMSG_STATS_UPDATE 0x11
#define WDCMSG_BMISS 0x12
#define WDCMSG_DEVICE_AVAIL 0x13
#define WDCMSG_SEND_COMPLETE 0x14
#define WDCMSG_DATA_AVAIL 0x15
#define WDCMSG_SET_PWR_MODE 0x16
#define WDCMSG_BMISS_ACK 0x17
#define WDCMSG_SET_LED_STEADY 0x18
#define WDCMSG_SET_LED_BLINK 0x19
/* more messages */
#define WDCMSG_SETUP_BEACON_DESC 0x1a
#define WDCMSG_BEACON_INIT 0x1b
#define WDCMSG_RESET_KEY_CACHE 0x1c
#define WDCMSG_RESET_KEY_CACHE_ENTRY 0x1d
#define WDCMSG_SET_KEY_CACHE_ENTRY 0x1e
#define WDCMSG_SET_DECOMP_MASK 0x1f
#define WDCMSG_SET_REGULATORY_DOMAIN 0x20
#define WDCMSG_SET_LED_STATE 0x21
#define WDCMSG_WRITE_ASSOCID 0x22
#define WDCMSG_SET_STA_BEACON_TIMERS 0x23
#define WDCMSG_GET_TSF 0x24
#define WDCMSG_RESET_TSF 0x25
#define WDCMSG_SET_ADHOC_MODE 0x26
#define WDCMSG_SET_BASIC_RATE 0x27
#define WDCMSG_MIB_CONTROL 0x28
#define WDCMSG_GET_CHANNEL_DATA 0x29
#define WDCMSG_GET_CUR_RSSI 0x2a
#define WDCMSG_SET_ANTENNA_SWITCH 0x2b
#define WDCMSG_USE_SHORT_SLOT_TIME 0x2f
#define WDCMSG_SET_POWER_MODE 0x30
#define WDCMSG_SETUP_PSPOLL_DESC 0x31
#define WDCMSG_SET_RX_MULTICAST_FILTER 0x32
#define WDCMSG_RX_FILTER 0x33
#define WDCMSG_PER_CALIBRATION 0x34
#define WDCMSG_RESET 0x35
#define WDCMSG_DISABLE 0x36
#define WDCMSG_PHY_DISABLE 0x37
#define WDCMSG_SET_TX_POWER_LIMIT 0x38
#define WDCMSG_SET_TX_QUEUE_PARAMS 0x39
#define WDCMSG_SETUP_TX_QUEUE 0x3a
#define WDCMSG_RELEASE_TX_QUEUE 0x3b
#define WDCMSG_SET_DEFAULT_KEY 0x43
__u32 priv; /* driver private data,
don't care about endianess */
__be32 magic;
__be32 reserved2[4];
};
struct ar5523_cmd_host_available {
__be32 sw_ver_major;
__be32 sw_ver_minor;
__be32 sw_ver_patch;
__be32 sw_ver_build;
} __packed;
#define ATH_SW_VER_MAJOR 1
#define ATH_SW_VER_MINOR 5
#define ATH_SW_VER_PATCH 0
#define ATH_SW_VER_BUILD 9999
struct ar5523_chunk {
u8 seqnum; /* sequence number for ordering */
u8 flags;
#define UATH_CFLAGS_FINAL 0x01 /* final chunk of a msg */
#define UATH_CFLAGS_RXMSG 0x02 /* chunk contains rx completion */
#define UATH_CFLAGS_DEBUG 0x04 /* for debugging */
__be16 length; /* chunk size in bytes */
/* chunk data follows */
} __packed;
/*
* Message format for a WDCMSG_DATA_AVAIL message from Target to Host.
*/
struct ar5523_rx_desc {
__be32 len; /* msg length including header */
__be32 code; /* WDCMSG_DATA_AVAIL */
__be32 gennum; /* generation number */
__be32 status; /* start of RECEIVE_INFO */
#define UATH_STATUS_OK 0
#define UATH_STATUS_STOP_IN_PROGRESS 1
#define UATH_STATUS_CRC_ERR 2
#define UATH_STATUS_PHY_ERR 3
#define UATH_STATUS_DECRYPT_CRC_ERR 4
#define UATH_STATUS_DECRYPT_MIC_ERR 5
#define UATH_STATUS_DECOMP_ERR 6
#define UATH_STATUS_KEY_ERR 7
#define UATH_STATUS_ERR 8
__be32 tstamp_low; /* low-order 32-bits of rx timestamp */
__be32 tstamp_high; /* high-order 32-bits of rx timestamp */
__be32 framelen; /* frame length */
__be32 rate; /* rx rate code */
__be32 antenna;
__be32 rssi;
__be32 channel;
__be32 phyerror;
__be32 connix; /* key table ix for bss traffic */
__be32 decrypterror;
__be32 keycachemiss;
__be32 pad; /* XXX? */
} __packed;
struct ar5523_tx_desc {
__be32 msglen;
u32 msgid; /* msg id (supplied by host) */
__be32 type; /* opcode: WDMSG_SEND or WDCMSG_FLUSH */
__be32 txqid; /* tx queue id and flags */
#define UATH_TXQID_MASK 0x0f
#define UATH_TXQID_MINRATE 0x10 /* use min tx rate */
#define UATH_TXQID_FF 0x20 /* content is fast frame */
__be32 connid; /* tx connection id */
#define UATH_ID_INVALID 0xffffffff /* for sending prior to connection */
__be32 flags; /* non-zero if response desired */
#define UATH_TX_NOTIFY (1 << 24) /* f/w will send a UATH_NOTIF_TX */
__be32 buflen; /* payload length */
} __packed;
#define AR5523_ID_BSS 2
#define AR5523_ID_BROADCAST 0xffffffff
/* structure for command UATH_CMD_WRITE_MAC */
struct ar5523_write_mac {
__be32 reg;
__be32 len;
u8 data[32];
} __packed;
struct ar5523_cmd_rateset {
__u8 length;
#define AR5523_MAX_NRATES 32
__u8 set[AR5523_MAX_NRATES];
};
struct ar5523_cmd_set_associd { /* AR5523_WRITE_ASSOCID */
__be32 defaultrateix;
__be32 associd;
__be32 timoffset;
__be32 turboprime;
__u8 bssid[6];
} __packed;
/* structure for command WDCMSG_RESET */
struct ar5523_cmd_reset {
__be32 flags; /* channel flags */
#define UATH_CHAN_TURBO 0x0100
#define UATH_CHAN_CCK 0x0200
#define UATH_CHAN_OFDM 0x0400
#define UATH_CHAN_2GHZ 0x1000
#define UATH_CHAN_5GHZ 0x2000
__be32 freq; /* channel frequency */
__be32 maxrdpower;
__be32 cfgctl;
__be32 twiceantennareduction;
__be32 channelchange;
__be32 keeprccontent;
} __packed;
/* structure for command WDCMSG_SET_BASIC_RATE */
struct ar5523_cmd_rates {
__be32 connid;
__be32 keeprccontent;
__be32 size;
struct ar5523_cmd_rateset rateset;
} __packed;
enum {
WLAN_MODE_NONE = 0,
WLAN_MODE_11b,
WLAN_MODE_11a,
WLAN_MODE_11g,
WLAN_MODE_11a_TURBO,
WLAN_MODE_11g_TURBO,
WLAN_MODE_11a_TURBO_PRIME,
WLAN_MODE_11g_TURBO_PRIME,
WLAN_MODE_11a_XR,
WLAN_MODE_11g_XR,
};
struct ar5523_cmd_connection_attr {
__be32 longpreambleonly;
struct ar5523_cmd_rateset rateset;
__be32 wlanmode;
} __packed;
/* structure for command AR5523_CREATE_CONNECTION */
struct ar5523_cmd_create_connection {
__be32 connid;
__be32 bssid;
__be32 size;
struct ar5523_cmd_connection_attr connattr;
} __packed;
struct ar5523_cmd_ledsteady { /* WDCMSG_SET_LED_STEADY */
__be32 lednum;
#define UATH_LED_LINK 0
#define UATH_LED_ACTIVITY 1
__be32 ledmode;
#define UATH_LED_OFF 0
#define UATH_LED_ON 1
} __packed;
struct ar5523_cmd_ledblink { /* WDCMSG_SET_LED_BLINK */
__be32 lednum;
__be32 ledmode;
__be32 blinkrate;
__be32 slowmode;
} __packed;
struct ar5523_cmd_ledstate { /* WDCMSG_SET_LED_STATE */
__be32 connected;
} __packed;
struct ar5523_cmd_txq_attr {
__be32 priority;
__be32 aifs;
__be32 logcwmin;
__be32 logcwmax;
__be32 bursttime;
__be32 mode;
__be32 qflags;
} __packed;
struct ar5523_cmd_txq_setup { /* WDCMSG_SETUP_TX_QUEUE */
__be32 qid;
__be32 len;
struct ar5523_cmd_txq_attr attr;
} __packed;
struct ar5523_cmd_rx_filter { /* WDCMSG_RX_FILTER */
__be32 bits;
#define UATH_FILTER_RX_UCAST 0x00000001
#define UATH_FILTER_RX_MCAST 0x00000002
#define UATH_FILTER_RX_BCAST 0x00000004
#define UATH_FILTER_RX_CONTROL 0x00000008
#define UATH_FILTER_RX_BEACON 0x00000010 /* beacon frames */
#define UATH_FILTER_RX_PROM 0x00000020 /* promiscuous mode */
#define UATH_FILTER_RX_PHY_ERR 0x00000040 /* phy errors */
#define UATH_FILTER_RX_PHY_RADAR 0x00000080 /* radar phy errors */
#define UATH_FILTER_RX_XR_POOL 0x00000400 /* XR group polls */
#define UATH_FILTER_RX_PROBE_REQ 0x00000800
__be32 op;
#define UATH_FILTER_OP_INIT 0x0
#define UATH_FILTER_OP_SET 0x1
#define UATH_FILTER_OP_CLEAR 0x2
#define UATH_FILTER_OP_TEMP 0x3
#define UATH_FILTER_OP_RESTORE 0x4
} __packed;
enum {
CFG_NONE, /* Sentinal to indicate "no config" */
CFG_REG_DOMAIN, /* Regulatory Domain */
CFG_RATE_CONTROL_ENABLE,
CFG_DEF_XMIT_DATA_RATE, /* NB: if rate control is not enabled */
CFG_HW_TX_RETRIES,
CFG_SW_TX_RETRIES,
CFG_SLOW_CLOCK_ENABLE,
CFG_COMP_PROC,
CFG_USER_RTS_THRESHOLD,
CFG_XR2NORM_RATE_THRESHOLD,
CFG_XRMODE_SWITCH_COUNT,
CFG_PROTECTION_TYPE,
CFG_BURST_SEQ_THRESHOLD,
CFG_ABOLT,
CFG_IQ_LOG_COUNT_MAX,
CFG_MODE_CTS,
CFG_WME_ENABLED,
CFG_GPRS_CBR_PERIOD,
CFG_SERVICE_TYPE,
/* MAC Address to use. Overrides EEPROM */
CFG_MAC_ADDR,
CFG_DEBUG_EAR,
CFG_INIT_REGS,
/* An ID for use in error & debug messages */
CFG_DEBUG_ID,
CFG_COMP_WIN_SZ,
CFG_DIVERSITY_CTL,
CFG_TP_SCALE,
CFG_TPC_HALF_DBM5,
CFG_TPC_HALF_DBM2,
CFG_OVERRD_TX_POWER,
CFG_USE_32KHZ_CLOCK,
CFG_GMODE_PROTECTION,
CFG_GMODE_PROTECT_RATE_INDEX,
CFG_GMODE_NON_ERP_PREAMBLE,
CFG_WDC_TRANSPORT_CHUNK_SIZE,
};
enum {
/* Sentinal to indicate "no capability" */
CAP_NONE,
CAP_ALL, /* ALL capabilities */
CAP_TARGET_VERSION,
CAP_TARGET_REVISION,
CAP_MAC_VERSION,
CAP_MAC_REVISION,
CAP_PHY_REVISION,
CAP_ANALOG_5GHz_REVISION,
CAP_ANALOG_2GHz_REVISION,
/* Target supports WDC message debug features */
CAP_DEBUG_WDCMSG_SUPPORT,
CAP_REG_DOMAIN,
CAP_COUNTRY_CODE,
CAP_REG_CAP_BITS,
CAP_WIRELESS_MODES,
CAP_CHAN_SPREAD_SUPPORT,
CAP_SLEEP_AFTER_BEACON_BROKEN,
CAP_COMPRESS_SUPPORT,
CAP_BURST_SUPPORT,
CAP_FAST_FRAMES_SUPPORT,
CAP_CHAP_TUNING_SUPPORT,
CAP_TURBOG_SUPPORT,
CAP_TURBO_PRIME_SUPPORT,
CAP_DEVICE_TYPE,
CAP_XR_SUPPORT,
CAP_WME_SUPPORT,
CAP_TOTAL_QUEUES,
CAP_CONNECTION_ID_MAX, /* Should absorb CAP_KEY_CACHE_SIZE */
CAP_LOW_5GHZ_CHAN,
CAP_HIGH_5GHZ_CHAN,
CAP_LOW_2GHZ_CHAN,
CAP_HIGH_2GHZ_CHAN,
CAP_MIC_AES_CCM,
CAP_MIC_CKIP,
CAP_MIC_TKIP,
CAP_MIC_TKIP_WME,
CAP_CIPHER_AES_CCM,
CAP_CIPHER_CKIP,
CAP_CIPHER_TKIP,
CAP_TWICE_ANTENNAGAIN_5G,
CAP_TWICE_ANTENNAGAIN_2G,
};
enum {
ST_NONE, /* Sentinal to indicate "no status" */
ST_ALL,
ST_SERVICE_TYPE,
ST_WLAN_MODE,
ST_FREQ,
ST_BAND,
ST_LAST_RSSI,
ST_PS_FRAMES_DROPPED,
ST_CACHED_DEF_ANT,
ST_COUNT_OTHER_RX_ANT,
ST_USE_FAST_DIVERSITY,
ST_MAC_ADDR,
ST_RX_GENERATION_NUM,
ST_TX_QUEUE_DEPTH,
ST_SERIAL_NUMBER,
ST_WDC_TRANSPORT_CHUNK_SIZE,
};
enum {
TARGET_DEVICE_AWAKE,
TARGET_DEVICE_SLEEP,
TARGET_DEVICE_PWRDN,
TARGET_DEVICE_PWRSAVE,
TARGET_DEVICE_SUSPEND,
TARGET_DEVICE_RESUME,
};
/* this is in net/ieee80211.h, but that conflicts with the mac80211 headers */
#define IEEE80211_2ADDR_LEN 16
#define AR5523_MIN_RXBUFSZ \
(((sizeof(__be32) + IEEE80211_2ADDR_LEN + \
sizeof(struct ar5523_rx_desc)) + 3) & ~3)

View file

@ -0,0 +1,313 @@
/*
* Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef ATH_H
#define ATH_H
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/spinlock.h>
#include <net/mac80211.h>
/*
* The key cache is used for h/w cipher state and also for
* tracking station state such as the current tx antenna.
* We also setup a mapping table between key cache slot indices
* and station state to short-circuit node lookups on rx.
* Different parts have different size key caches. We handle
* up to ATH_KEYMAX entries (could dynamically allocate state).
*/
#define ATH_KEYMAX 128 /* max key cache size we handle */
static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct ath_ani {
bool caldone;
unsigned int longcal_timer;
unsigned int shortcal_timer;
unsigned int resetcal_timer;
unsigned int checkani_timer;
struct timer_list timer;
};
struct ath_cycle_counters {
u32 cycles;
u32 rx_busy;
u32 rx_frame;
u32 tx_frame;
};
enum ath_device_state {
ATH_HW_UNAVAILABLE,
ATH_HW_INITIALIZED,
};
enum ath_op_flags {
ATH_OP_INVALID,
ATH_OP_BEACONS,
ATH_OP_ANI_RUN,
ATH_OP_PRIM_STA_VIF,
ATH_OP_HW_RESET,
ATH_OP_SCANNING,
ATH_OP_MULTI_CHANNEL,
};
enum ath_bus_type {
ATH_PCI,
ATH_AHB,
ATH_USB,
};
struct reg_dmn_pair_mapping {
u16 reg_domain;
u16 reg_5ghz_ctl;
u16 reg_2ghz_ctl;
};
struct ath_regulatory {
char alpha2[2];
enum nl80211_dfs_regions region;
u16 country_code;
u16 max_power_level;
u16 current_rd;
int16_t power_limit;
struct reg_dmn_pair_mapping *regpair;
};
enum ath_crypt_caps {
ATH_CRYPT_CAP_CIPHER_AESCCM = BIT(0),
ATH_CRYPT_CAP_MIC_COMBINED = BIT(1),
};
struct ath_keyval {
u8 kv_type;
u8 kv_pad;
u16 kv_len;
u8 kv_val[16]; /* TK */
u8 kv_mic[8]; /* Michael MIC key */
u8 kv_txmic[8]; /* Michael MIC TX key (used only if the hardware
* supports both MIC keys in the same key cache entry;
* in that case, kv_mic is the RX key) */
};
enum ath_cipher {
ATH_CIPHER_WEP = 0,
ATH_CIPHER_AES_OCB = 1,
ATH_CIPHER_AES_CCM = 2,
ATH_CIPHER_CKIP = 3,
ATH_CIPHER_TKIP = 4,
ATH_CIPHER_CLR = 5,
ATH_CIPHER_MIC = 127
};
/**
* struct ath_ops - Register read/write operations
*
* @read: Register read
* @multi_read: Multiple register read
* @write: Register write
* @enable_write_buffer: Enable multiple register writes
* @write_flush: flush buffered register writes and disable buffering
*/
struct ath_ops {
unsigned int (*read)(void *, u32 reg_offset);
void (*multi_read)(void *, u32 *addr, u32 *val, u16 count);
void (*write)(void *, u32 val, u32 reg_offset);
void (*enable_write_buffer)(void *);
void (*write_flush) (void *);
u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
};
struct ath_common;
struct ath_bus_ops;
struct ath_common {
void *ah;
void *priv;
struct ieee80211_hw *hw;
int debug_mask;
enum ath_device_state state;
unsigned long op_flags;
struct ath_ani ani;
u16 cachelsz;
u16 curaid;
u8 macaddr[ETH_ALEN];
u8 curbssid[ETH_ALEN];
u8 bssidmask[ETH_ALEN];
u32 rx_bufsize;
u32 keymax;
DECLARE_BITMAP(keymap, ATH_KEYMAX);
DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
enum ath_crypt_caps crypt_caps;
unsigned int clockrate;
spinlock_t cc_lock;
struct ath_cycle_counters cc_ani;
struct ath_cycle_counters cc_survey;
struct ath_regulatory regulatory;
struct ath_regulatory reg_world_copy;
const struct ath_ops *ops;
const struct ath_bus_ops *bus_ops;
bool btcoex_enabled;
bool disable_ani;
bool bt_ant_diversity;
int last_rssi;
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
};
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
u32 len,
gfp_t gfp_mask);
bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr);
void ath_hw_setbssidmask(struct ath_common *common);
void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
int ath_key_config(struct ath_common *common,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
bool ath_hw_keyreset(struct ath_common *common, u16 entry);
void ath_hw_cycle_counters_update(struct ath_common *common);
int32_t ath_hw_get_listen_time(struct ath_common *common);
__printf(3, 4)
void ath_printk(const char *level, const struct ath_common *common,
const char *fmt, ...);
#define ath_emerg(common, fmt, ...) \
ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
#define ath_alert(common, fmt, ...) \
ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
#define ath_crit(common, fmt, ...) \
ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
#define ath_err(common, fmt, ...) \
ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
#define ath_warn(common, fmt, ...) \
ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
#define ath_notice(common, fmt, ...) \
ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
#define ath_info(common, fmt, ...) \
ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
/**
* enum ath_debug_level - atheros wireless debug level
*
* @ATH_DBG_RESET: reset processing
* @ATH_DBG_QUEUE: hardware queue management
* @ATH_DBG_EEPROM: eeprom processing
* @ATH_DBG_CALIBRATE: periodic calibration
* @ATH_DBG_INTERRUPT: interrupt processing
* @ATH_DBG_REGULATORY: regulatory processing
* @ATH_DBG_ANI: adaptive noise immunitive processing
* @ATH_DBG_XMIT: basic xmit operation
* @ATH_DBG_BEACON: beacon handling
* @ATH_DBG_CONFIG: configuration of the hardware
* @ATH_DBG_FATAL: fatal errors, this is the default, DBG_DEFAULT
* @ATH_DBG_PS: power save processing
* @ATH_DBG_HWTIMER: hardware timer handling
* @ATH_DBG_BTCOEX: bluetooth coexistance
* @ATH_DBG_BSTUCK: stuck beacons
* @ATH_DBG_MCI: Message Coexistence Interface, a private protocol
* used exclusively for WLAN-BT coexistence starting from
* AR9462.
* @ATH_DBG_DFS: radar datection
* @ATH_DBG_WOW: Wake on Wireless
* @ATH_DBG_DYNACK: dynack handling
* @ATH_DBG_ANY: enable all debugging
*
* The debug level is used to control the amount and type of debugging output
* we want to see. Each driver has its own method for enabling debugging and
* modifying debug level states -- but this is typically done through a
* module parameter 'debug' along with a respective 'debug' debugfs file
* entry.
*/
enum ATH_DEBUG {
ATH_DBG_RESET = 0x00000001,
ATH_DBG_QUEUE = 0x00000002,
ATH_DBG_EEPROM = 0x00000004,
ATH_DBG_CALIBRATE = 0x00000008,
ATH_DBG_INTERRUPT = 0x00000010,
ATH_DBG_REGULATORY = 0x00000020,
ATH_DBG_ANI = 0x00000040,
ATH_DBG_XMIT = 0x00000080,
ATH_DBG_BEACON = 0x00000100,
ATH_DBG_CONFIG = 0x00000200,
ATH_DBG_FATAL = 0x00000400,
ATH_DBG_PS = 0x00000800,
ATH_DBG_BTCOEX = 0x00001000,
ATH_DBG_WMI = 0x00002000,
ATH_DBG_BSTUCK = 0x00004000,
ATH_DBG_MCI = 0x00008000,
ATH_DBG_DFS = 0x00010000,
ATH_DBG_WOW = 0x00020000,
ATH_DBG_CHAN_CTX = 0x00040000,
ATH_DBG_DYNACK = 0x00080000,
ATH_DBG_ANY = 0xffffffff
};
#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
#define ATH_DBG_MAX_LEN 512
#ifdef CONFIG_ATH_DEBUG
#define ath_dbg(common, dbg_mask, fmt, ...) \
do { \
if ((common)->debug_mask & ATH_DBG_##dbg_mask) \
ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \
} while (0)
#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg)
#define ATH_DBG_WARN_ON_ONCE(foo) WARN_ON_ONCE(foo)
#else
static inline __attribute__ ((format (printf, 3, 4)))
void _ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
const char *fmt, ...)
{
}
#define ath_dbg(common, dbg_mask, fmt, ...) \
_ath_dbg(common, ATH_DBG_##dbg_mask, fmt, ##__VA_ARGS__)
#define ATH_DBG_WARN(foo, arg...) do {} while (0)
#define ATH_DBG_WARN_ON_ONCE(foo) ({ \
int __ret_warn_once = !!(foo); \
unlikely(__ret_warn_once); \
})
#endif /* CONFIG_ATH_DEBUG */
/** Returns string describing opmode, or NULL if unknown mode. */
#ifdef CONFIG_ATH_DEBUG
const char *ath_opmode_to_string(enum nl80211_iftype opmode);
#else
static inline const char *ath_opmode_to_string(enum nl80211_iftype opmode)
{
return "UNKNOWN";
}
#endif
#endif /* ATH_H */

View file

@ -0,0 +1,47 @@
config ATH10K
tristate "Atheros 802.11ac wireless cards support"
depends on MAC80211 && HAS_DMA
select ATH_COMMON
---help---
This module adds support for wireless adapters based on
Atheros IEEE 802.11ac family of chipsets.
If you choose to build a module, it'll be called ath10k.
config ATH10K_PCI
tristate "Atheros ath10k PCI support"
depends on ATH10K && PCI
---help---
This module adds support for PCIE bus
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
depends on ATH10K
---help---
Enables debug support
If unsure, say Y to make it easier to debug problems.
config ATH10K_DEBUGFS
bool "Atheros ath10k debugfs support"
depends on ATH10K && DEBUG_FS
select RELAY
---help---
Enabled debugfs support
If unsure, say Y to make it easier to debug problems.
config ATH10K_TRACING
bool "Atheros ath10k tracing support"
depends on ATH10K
depends on EVENT_TRACING
---help---
Select this to ath10k use tracing infrastructure.
config ATH10K_DFS_CERTIFIED
bool "Atheros DFS support for certified platforms"
depends on ATH10K && CFG80211_CERTIFICATION_ONUS
default n
---help---
This option enables DFS support for initiating radiation on
ath10k.

View file

@ -0,0 +1,22 @@
obj-$(CONFIG_ATH10K) += ath10k_core.o
ath10k_core-y += mac.o \
debug.o \
core.o \
htc.o \
htt.o \
htt_rx.o \
htt_tx.o \
txrx.o \
wmi.o \
bmi.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
ce.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)

View file

@ -0,0 +1,318 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "bmi.h"
#include "hif.h"
#include "debug.h"
#include "htc.h"
void ath10k_bmi_start(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
ar->bmi.done_sent = false;
}
int ath10k_bmi_done(struct ath10k *ar)
{
struct bmi_cmd cmd;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
if (ar->bmi.done_sent) {
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
return 0;
}
ar->bmi.done_sent = true;
cmd.id = __cpu_to_le32(BMI_DONE);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to write to the device: %d\n", ret);
return ret;
}
return 0;
}
int ath10k_bmi_get_target_info(struct ath10k *ar,
struct bmi_target_info *target_info)
{
struct bmi_cmd cmd;
union bmi_resp resp;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
u32 resplen = sizeof(resp.get_target_info);
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
if (ar->bmi.done_sent) {
ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
return -EBUSY;
}
cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
ath10k_warn(ar, "unable to get target info from device\n");
return ret;
}
if (resplen < sizeof(resp.get_target_info)) {
ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
resplen);
return -EIO;
}
target_info->version = __le32_to_cpu(resp.get_target_info.version);
target_info->type = __le32_to_cpu(resp.get_target_info.type);
return 0;
}
int ath10k_bmi_read_memory(struct ath10k *ar,
u32 address, void *buffer, u32 length)
{
struct bmi_cmd cmd;
union bmi_resp resp;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
u32 rxlen;
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
while (length) {
rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
cmd.read_mem.addr = __cpu_to_le32(address);
cmd.read_mem.len = __cpu_to_le32(rxlen);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
&resp, &rxlen);
if (ret) {
ath10k_warn(ar, "unable to read from the device (%d)\n",
ret);
return ret;
}
memcpy(buffer, resp.read_mem.payload, rxlen);
address += rxlen;
buffer += rxlen;
length -= rxlen;
}
return 0;
}
int ath10k_bmi_write_memory(struct ath10k *ar,
u32 address, const void *buffer, u32 length)
{
struct bmi_cmd cmd;
u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
u32 txlen;
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
/* copy before roundup to avoid reading beyond buffer*/
memcpy(cmd.write_mem.payload, buffer, txlen);
txlen = roundup(txlen, 4);
cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
cmd.write_mem.addr = __cpu_to_le32(address);
cmd.write_mem.len = __cpu_to_le32(txlen);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to write to the device (%d)\n",
ret);
return ret;
}
/* fixup roundup() so `length` zeroes out for last chunk */
txlen = min(txlen, length);
address += txlen;
buffer += txlen;
length -= txlen;
}
return 0;
}
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
{
struct bmi_cmd cmd;
union bmi_resp resp;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
u32 resplen = sizeof(resp.execute);
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
address, param);
if (ar->bmi.done_sent) {
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
cmd.execute.param = __cpu_to_le32(param);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
ath10k_warn(ar, "unable to read from the device\n");
return ret;
}
if (resplen < sizeof(resp.execute)) {
ath10k_warn(ar, "invalid execute response length (%d)\n",
resplen);
return -EIO;
}
*result = __le32_to_cpu(resp.execute.result);
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
return 0;
}
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
{
struct bmi_cmd cmd;
u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
u32 txlen;
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
WARN_ON_ONCE(txlen & 3);
cmd.id = __cpu_to_le32(BMI_LZ_DATA);
cmd.lz_data.len = __cpu_to_le32(txlen);
memcpy(cmd.lz_data.payload, buffer, txlen);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to write to the device\n");
return ret;
}
buffer += txlen;
length -= txlen;
}
return 0;
}
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
{
struct bmi_cmd cmd;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
address);
if (ar->bmi.done_sent) {
ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
cmd.lz_start.addr = __cpu_to_le32(address);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
return ret;
}
return 0;
}
int ath10k_bmi_fast_download(struct ath10k *ar,
u32 address, const void *buffer, u32 length)
{
u8 trailer[4] = {};
u32 head_len = rounddown(length, 4);
u32 trailer_len = length - head_len;
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI,
"bmi fast download address 0x%x buffer 0x%p length %d\n",
address, buffer, length);
ret = ath10k_bmi_lz_stream_start(ar, address);
if (ret)
return ret;
/* copy the last word into a zero padded buffer */
if (trailer_len > 0)
memcpy(trailer, buffer + head_len, trailer_len);
ret = ath10k_bmi_lz_data(ar, buffer, head_len);
if (ret)
return ret;
if (trailer_len > 0)
ret = ath10k_bmi_lz_data(ar, trailer, 4);
if (ret != 0)
return ret;
/*
* Close compressed stream and open a new (fake) one.
* This serves mainly to flush Target caches.
*/
ret = ath10k_bmi_lz_stream_start(ar, 0x00);
return ret;
}

View file

@ -0,0 +1,225 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _BMI_H_
#define _BMI_H_
#include "core.h"
/*
* Bootloader Messaging Interface (BMI)
*
* BMI is a very simple messaging interface used during initialization
* to read memory, write memory, execute code, and to define an
* application entry PC.
*
* It is used to download an application to QCA988x, to provide
* patches to code that is already resident on QCA988x, and generally
* to examine and modify state. The Host has an opportunity to use
* BMI only once during bootup. Once the Host issues a BMI_DONE
* command, this opportunity ends.
*
* The Host writes BMI requests to mailbox0, and reads BMI responses
* from mailbox0. BMI requests all begin with a command
* (see below for specific commands), and are followed by
* command-specific data.
*
* Flow control:
* The Host can only issue a command once the Target gives it a
* "BMI Command Credit", using AR8K Counter #4. As soon as the
* Target has completed a command, it issues another BMI Command
* Credit (so the Host can issue the next command).
*
* BMI handles all required Target-side cache flushing.
*/
/* Maximum data size used for BMI transfers */
#define BMI_MAX_DATA_SIZE 256
/* len = cmd + addr + length */
#define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \
sizeof(u32) + \
sizeof(u32) + \
sizeof(u32))
/* BMI Commands */
enum bmi_cmd_id {
BMI_NO_COMMAND = 0,
BMI_DONE = 1,
BMI_READ_MEMORY = 2,
BMI_WRITE_MEMORY = 3,
BMI_EXECUTE = 4,
BMI_SET_APP_START = 5,
BMI_READ_SOC_REGISTER = 6,
BMI_READ_SOC_WORD = 6,
BMI_WRITE_SOC_REGISTER = 7,
BMI_WRITE_SOC_WORD = 7,
BMI_GET_TARGET_ID = 8,
BMI_GET_TARGET_INFO = 8,
BMI_ROMPATCH_INSTALL = 9,
BMI_ROMPATCH_UNINSTALL = 10,
BMI_ROMPATCH_ACTIVATE = 11,
BMI_ROMPATCH_DEACTIVATE = 12,
BMI_LZ_STREAM_START = 13, /* should be followed by LZ_DATA */
BMI_LZ_DATA = 14,
BMI_NVRAM_PROCESS = 15,
};
#define BMI_NVRAM_SEG_NAME_SZ 16
struct bmi_cmd {
__le32 id; /* enum bmi_cmd_id */
union {
struct {
} done;
struct {
__le32 addr;
__le32 len;
} read_mem;
struct {
__le32 addr;
__le32 len;
u8 payload[0];
} write_mem;
struct {
__le32 addr;
__le32 param;
} execute;
struct {
__le32 addr;
} set_app_start;
struct {
__le32 addr;
} read_soc_reg;
struct {
__le32 addr;
__le32 value;
} write_soc_reg;
struct {
} get_target_info;
struct {
__le32 rom_addr;
__le32 ram_addr; /* or value */
__le32 size;
__le32 activate; /* 0=install, but dont activate */
} rompatch_install;
struct {
__le32 patch_id;
} rompatch_uninstall;
struct {
__le32 count;
__le32 patch_ids[0]; /* length of @count */
} rompatch_activate;
struct {
__le32 count;
__le32 patch_ids[0]; /* length of @count */
} rompatch_deactivate;
struct {
__le32 addr;
} lz_start;
struct {
__le32 len; /* max BMI_MAX_DATA_SIZE */
u8 payload[0]; /* length of @len */
} lz_data;
struct {
u8 name[BMI_NVRAM_SEG_NAME_SZ];
} nvram_process;
u8 payload[BMI_MAX_CMDBUF_SIZE];
};
} __packed;
union bmi_resp {
struct {
u8 payload[0];
} read_mem;
struct {
__le32 result;
} execute;
struct {
__le32 value;
} read_soc_reg;
struct {
__le32 len;
__le32 version;
__le32 type;
} get_target_info;
struct {
__le32 patch_id;
} rompatch_install;
struct {
__le32 patch_id;
} rompatch_uninstall;
struct {
/* 0 = nothing executed
* otherwise = NVRAM segment return value */
__le32 result;
} nvram_process;
u8 payload[BMI_MAX_CMDBUF_SIZE];
} __packed;
struct bmi_target_info {
u32 version;
u32 type;
};
/* in msec */
#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
#define BMI_CE_NUM_TO_TARG 0
#define BMI_CE_NUM_TO_HOST 1
void ath10k_bmi_start(struct ath10k *ar);
int ath10k_bmi_done(struct ath10k *ar);
int ath10k_bmi_get_target_info(struct ath10k *ar,
struct bmi_target_info *target_info);
int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
void *buffer, u32 length);
int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
#define ath10k_bmi_read32(ar, item, val) \
({ \
int ret; \
u32 addr; \
__le32 tmp; \
\
addr = host_interest_item_address(HI_ITEM(item)); \
ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
if (!ret) \
*val = __le32_to_cpu(tmp); \
ret; \
})
#define ath10k_bmi_write32(ar, item, val) \
({ \
int ret; \
u32 address; \
__le32 v = __cpu_to_le32(val); \
\
address = host_interest_item_address(HI_ITEM(item)); \
ret = ath10k_bmi_write_memory(ar, address, \
(u8 *)&v, sizeof(v)); \
ret; \
})
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
#endif /* _BMI_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,425 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _CE_H_
#define _CE_H_
#include "hif.h"
/* Maximum number of Copy Engine's supported */
#define CE_COUNT_MAX 8
#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
#define CE_SEND_FLAG_GATHER 0x00010000
/*
* Copy Engine support: low-level Target-side Copy Engine API.
* This is a hardware access layer used by code that understands
* how to use copy engines.
*/
struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
#define CE_DESC_FLAGS_META_DATA_LSB 3
struct ce_desc {
__le32 addr;
__le16 nbytes;
__le16 flags; /* %CE_DESC_FLAGS_ */
};
struct ath10k_ce_ring {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
unsigned int nentries_mask;
/*
* For dest ring, this is the next index to be processed
* by software after it was/is received into.
*
* For src ring, this is the last descriptor that was sent
* and completion processed by software.
*
* Regardless of src or dest ring, this is an invariant
* (modulo ring size):
* write index >= read index >= sw_index
*/
unsigned int sw_index;
/* cached copy */
unsigned int write_index;
/*
* For src ring, this is the next index not yet processed by HW.
* This is a cached copy of the real HW index (read index), used
* for avoiding reading the HW index register more often than
* necessary.
* This extends the invariant:
* write index >= read index >= hw_index >= sw_index
*
* For dest ring, this is currently unused.
*/
/* cached copy */
unsigned int hw_index;
/* Start of DMA-coherent area reserved for descriptors */
/* Host address space */
void *base_addr_owner_space_unaligned;
/* CE address space */
u32 base_addr_ce_space_unaligned;
/*
* Actual start of descriptors.
* Aligned to descriptor-size boundary.
* Points into reserved DMA-coherent area, above.
*/
/* Host address space */
void *base_addr_owner_space;
/* CE address space */
u32 base_addr_ce_space;
/*
* Start of shadow copy of descriptors, within regular memory.
* Aligned to descriptor-size boundary.
*/
void *shadow_base_unaligned;
struct ce_desc *shadow_base;
/* keep last */
void *per_transfer_context[0];
};
struct ath10k_ce_pipe {
struct ath10k *ar;
unsigned int id;
unsigned int attr_flags;
u32 ctrl_addr;
void (*send_cb)(struct ath10k_ce_pipe *);
void (*recv_cb)(struct ath10k_ce_pipe *);
unsigned int src_sz_max;
struct ath10k_ce_ring *src_ring;
struct ath10k_ce_ring *dest_ring;
};
/* Copy Engine settable attributes */
struct ce_attr;
/*==================Send====================*/
/* ath10k_ce_send flags */
#define CE_SEND_FLAG_BYTE_SWAP 1
/*
* Queue a source buffer to be sent to an anonymous destination buffer.
* ce - which copy engine to use
* buffer - address of buffer
* nbytes - number of bytes to send
* transfer_id - arbitrary ID; reflected to destination
* flags - CE_SEND_FLAG_* values
* Returns 0 on success; otherwise an error status.
*
* Note: If no flags are specified, use CE's default data swap mode.
*
* Implementation note: pushes 1 buffer to Source ring
*/
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
/* 14 bits */
unsigned int transfer_id,
unsigned int flags);
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
/*==================Recv=======================*/
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
/* recv flags */
/* Data is byte-swapped */
#define CE_RECV_FLAG_SWAPPED 1
/*
* Supply data for the next completed unprocessed receive descriptor.
* Pops buffer from Dest ring.
*/
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp);
/*
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
*/
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
/*==================CE Engine Initialization=======================*/
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr,
void (*send_cb)(struct ath10k_ce_pipe *),
void (*recv_cb)(struct ath10k_ce_pipe *));
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr);
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
/*==================CE Engine Shutdown=======================*/
/*
* Support clean shutdown by allowing the caller to revoke
* receive buffers. Target DMA must be stopped before using
* this API.
*/
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
/*
* Support clean shutdown by allowing the caller to cancel
* pending sends. Target DMA must be stopped before using
* this API.
*/
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_disable_interrupts(struct ath10k *ar);
void ath10k_ce_enable_interrupts(struct ath10k *ar);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
#define CE_ATTR_NO_SNOOP 1
/* Byte swap data words */
#define CE_ATTR_BYTE_SWAP_DATA 2
/* Swizzle descriptors? */
#define CE_ATTR_SWIZZLE_DESCRIPTORS 4
/* no interrupt on copy completion */
#define CE_ATTR_DIS_INTR 8
/* Attributes of an instance of a Copy Engine */
struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
/* #entries in source ring - Must be a power of 2 */
unsigned int src_nentries;
/*
* Max source send size for this CE.
* This is also the minimum size of a destination buffer.
*/
unsigned int src_sz_max;
/* #entries in destination ring - Must be a power of 2 */
unsigned int dest_nentries;
};
#define SR_BA_ADDRESS 0x0000
#define SR_SIZE_ADDRESS 0x0004
#define DR_BA_ADDRESS 0x0008
#define DR_SIZE_ADDRESS 0x000c
#define CE_CMD_ADDRESS 0x0018
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
(((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
(((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
(((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
#define CE_CTRL1_DMAX_LENGTH_MSB 15
#define CE_CTRL1_DMAX_LENGTH_LSB 0
#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
#define CE_CTRL1_DMAX_LENGTH_GET(x) \
(((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
#define CE_CTRL1_DMAX_LENGTH_SET(x) \
(((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
#define CE_CTRL1_ADDRESS 0x0010
#define CE_CTRL1_HW_MASK 0x0007ffff
#define CE_CTRL1_SW_MASK 0x0007ffff
#define CE_CTRL1_HW_WRITE_MASK 0x00000000
#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
#define CE_CTRL1_RSTMASK 0xffffffff
#define CE_CTRL1_RESET 0x00000080
#define CE_CMD_HALT_STATUS_MSB 3
#define CE_CMD_HALT_STATUS_LSB 3
#define CE_CMD_HALT_STATUS_MASK 0x00000008
#define CE_CMD_HALT_STATUS_GET(x) \
(((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
#define CE_CMD_HALT_STATUS_SET(x) \
(((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
#define CE_CMD_HALT_STATUS_RESET 0
#define CE_CMD_HALT_MSB 0
#define CE_CMD_HALT_MASK 0x00000001
#define HOST_IE_COPY_COMPLETE_MSB 0
#define HOST_IE_COPY_COMPLETE_LSB 0
#define HOST_IE_COPY_COMPLETE_MASK 0x00000001
#define HOST_IE_COPY_COMPLETE_GET(x) \
(((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
#define HOST_IE_COPY_COMPLETE_SET(x) \
(((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
#define HOST_IE_COPY_COMPLETE_RESET 0
#define HOST_IE_ADDRESS 0x002c
#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
#define HOST_IS_COPY_COMPLETE_MASK 0x00000001
#define HOST_IS_ADDRESS 0x0030
#define MISC_IE_ADDRESS 0x0034
#define MISC_IS_AXI_ERR_MASK 0x00000400
#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
#define MISC_IS_ADDRESS 0x0038
#define SR_WR_INDEX_ADDRESS 0x003c
#define DST_WR_INDEX_ADDRESS 0x0040
#define CURRENT_SRRI_ADDRESS 0x0044
#define CURRENT_DRRI_ADDRESS 0x0048
#define SRC_WATERMARK_LOW_MSB 31
#define SRC_WATERMARK_LOW_LSB 16
#define SRC_WATERMARK_LOW_MASK 0xffff0000
#define SRC_WATERMARK_LOW_GET(x) \
(((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
#define SRC_WATERMARK_LOW_SET(x) \
(((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
#define SRC_WATERMARK_LOW_RESET 0
#define SRC_WATERMARK_HIGH_MSB 15
#define SRC_WATERMARK_HIGH_LSB 0
#define SRC_WATERMARK_HIGH_MASK 0x0000ffff
#define SRC_WATERMARK_HIGH_GET(x) \
(((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
#define SRC_WATERMARK_HIGH_SET(x) \
(((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
#define SRC_WATERMARK_HIGH_RESET 0
#define SRC_WATERMARK_ADDRESS 0x004c
#define DST_WATERMARK_LOW_LSB 16
#define DST_WATERMARK_LOW_MASK 0xffff0000
#define DST_WATERMARK_LOW_SET(x) \
(((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
#define DST_WATERMARK_LOW_RESET 0
#define DST_WATERMARK_HIGH_MSB 15
#define DST_WATERMARK_HIGH_LSB 0
#define DST_WATERMARK_HIGH_MASK 0x0000ffff
#define DST_WATERMARK_HIGH_GET(x) \
(((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
#define DST_WATERMARK_HIGH_SET(x) \
(((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
#define DST_WATERMARK_HIGH_RESET 0
#define DST_WATERMARK_ADDRESS 0x0050
static inline u32 ath10k_ce_base_address(unsigned int ce_id)
{
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
}
#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
MISC_IS_DST_ADDR_ERR_MASK | \
MISC_IS_SRC_LEN_ERR_MASK | \
MISC_IS_DST_MAX_LEN_VIO_MASK | \
MISC_IS_DST_RING_OVERFLOW_MASK | \
MISC_IS_SRC_RING_OVERFLOW_MASK)
#define CE_SRC_RING_TO_DESC(baddr, idx) \
(&(((struct ce_desc *)baddr)[idx]))
#define CE_DEST_RING_TO_DESC(baddr, idx) \
(&(((struct ce_desc *)baddr)[idx]))
/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
(((int)(toidx)-(int)(fromidx)) & (nentries_mask))
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
#define CE_INTERRUPT_SUMMARY(ar) \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
#endif /* _CE_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,580 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _CORE_H_
#define _CORE_H_
#include <linux/completion.h>
#include <linux/if_ether.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/uuid.h>
#include <linux/time.h>
#include "htt.h"
#include "htc.h"
#include "hw.h"
#include "targaddrs.h"
#include "wmi.h"
#include "../ath.h"
#include "../regd.h"
#include "../dfs_pattern_detector.h"
#include "spectral.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
#define WO(_f) ((_f##_OFFSET) >> 2)
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
#define ATH10K_NUM_CHANS 38
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
#define ATH10K_MAX_NUM_MGMT_PENDING 128
/* number of failed packets */
#define ATH10K_KICKOUT_THRESHOLD 50
/*
* Use insanely high numbers to make sure that the firmware implementation
* won't start, we have the same functionality already in hostapd. Unit
* is seconds.
*/
#define ATH10K_KEEPALIVE_MIN_IDLE 3747
#define ATH10K_KEEPALIVE_MAX_IDLE 3895
#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
struct ath10k;
struct ath10k_skb_cb {
dma_addr_t paddr;
u8 vdev_id;
struct {
u8 tid;
bool is_offchan;
struct ath10k_htt_txbuf *txbuf;
u32 txbuf_paddr;
} __packed htt;
struct {
bool dtim_zero;
bool deliver_cab;
} bcn;
} __packed;
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
}
static inline u32 host_interest_item_address(u32 item_offset)
{
return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
}
struct ath10k_bmi {
bool done_sent;
};
#define ATH10K_MAX_MEM_REQS 16
struct ath10k_mem_chunk {
void *vaddr;
dma_addr_t paddr;
u32 len;
u32 req_id;
};
struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
wait_queue_head_t tx_credits_wq;
struct wmi_cmd_map *cmd;
struct wmi_vdev_param_map *vdev_param;
struct wmi_pdev_param_map *pdev_param;
u32 num_mem_chunks;
struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
};
struct ath10k_peer_stat {
u8 peer_macaddr[ETH_ALEN];
u32 peer_rssi;
u32 peer_tx_rate;
u32 peer_rx_rate; /* 10x only */
};
struct ath10k_target_stats {
/* PDEV stats */
s32 ch_noise_floor;
u32 tx_frame_count;
u32 rx_frame_count;
u32 rx_clear_count;
u32 cycle_count;
u32 phy_err_count;
u32 chan_tx_power;
u32 ack_rx_bad;
u32 rts_bad;
u32 rts_good;
u32 fcs_bad;
u32 no_beacons;
u32 mib_int_count;
/* PDEV TX stats */
s32 comp_queued;
s32 comp_delivered;
s32 msdu_enqued;
s32 mpdu_enqued;
s32 wmm_drop;
s32 local_enqued;
s32 local_freed;
s32 hw_queued;
s32 hw_reaped;
s32 underrun;
s32 tx_abort;
s32 mpdus_requed;
u32 tx_ko;
u32 data_rc;
u32 self_triggers;
u32 sw_retry_failure;
u32 illgl_rate_phy_err;
u32 pdev_cont_xretry;
u32 pdev_tx_timeout;
u32 pdev_resets;
u32 phy_underrun;
u32 txop_ovf;
/* PDEV RX stats */
s32 mid_ppdu_route_change;
s32 status_rcvd;
s32 r0_frags;
s32 r1_frags;
s32 r2_frags;
s32 r3_frags;
s32 htt_msdus;
s32 htt_mpdus;
s32 loc_msdus;
s32 loc_mpdus;
s32 oversize_amsdu;
s32 phy_errs;
s32 phy_err_drop;
s32 mpdu_errs;
/* VDEV STATS */
/* PEER STATS */
u8 peers;
struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
/* TODO: Beacon filter stats */
};
struct ath10k_dfs_stats {
u32 phy_errors;
u32 pulses_total;
u32 pulses_detected;
u32 pulses_discarded;
u32 radar_detected;
};
#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
struct ath10k_peer {
struct list_head list;
int vdev_id;
u8 addr[ETH_ALEN];
DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
};
struct ath10k_sta {
struct ath10k_vif *arvif;
/* the following are protected by ar->data_lock */
u32 changed; /* IEEE80211_RC_* */
u32 bw;
u32 nss;
u32 smps;
struct work_struct update_wk;
};
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
struct ath10k_vif {
struct list_head list;
u32 vdev_id;
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
struct sk_buff *beacon;
/* protected by data_lock */
bool beacon_sent;
struct ath10k *ar;
struct ieee80211_vif *vif;
bool is_started;
bool is_up;
bool spectral_enabled;
u32 aid;
u8 bssid[ETH_ALEN];
struct work_struct wep_key_work;
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
u8 def_wep_key_idx;
u8 def_wep_key_newidx;
u16 tx_seq_no;
union {
struct {
u32 uapsd;
} sta;
struct {
/* 127 stations; wmi limit */
u8 tim_bitmap[16];
u8 tim_len;
u32 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
bool hidden_ssid;
/* P2P_IE with NoA attribute for P2P_GO case */
u32 noa_len;
u8 *noa_data;
} ap;
} u;
u8 fixed_rate;
u8 fixed_nss;
u8 force_sgi;
bool use_cts_prot;
int num_legacy_stations;
};
struct ath10k_vif_iter {
u32 vdev_id;
struct ath10k_vif *arvif;
};
/* used for crash-dump storage, protected by data-lock */
struct ath10k_fw_crash_data {
bool crashed_since_read;
uuid_le uuid;
struct timespec timestamp;
__le32 registers[REG_DUMP_COUNT_QCA988X];
};
struct ath10k_debug {
struct dentry *debugfs_phy;
struct ath10k_target_stats target_stats;
DECLARE_BITMAP(wmi_service_bitmap, WMI_SERVICE_MAX);
struct completion event_stats_compl;
unsigned long htt_stats_mask;
struct delayed_work htt_stats_dwork;
struct ath10k_dfs_stats dfs_stats;
struct ath_dfs_pool_stats dfs_pool_stats;
u32 fw_dbglog_mask;
u8 htt_max_amsdu;
u8 htt_max_ampdu;
struct ath10k_fw_crash_data *fw_crash_data;
};
enum ath10k_state {
ATH10K_STATE_OFF = 0,
ATH10K_STATE_ON,
/* When doing firmware recovery the device is first powered down.
* mac80211 is supposed to call in to start() hook later on. It is
* however possible that driver unloading and firmware crash overlap.
* mac80211 can wait on conf_mutex in stop() while the device is
* stopped in ath10k_core_restart() work holding conf_mutex. The state
* RESTARTED means that the device is up and mac80211 has started hw
* reconfiguration. Once mac80211 is done with the reconfiguration we
* set the state to STATE_ON in restart_complete(). */
ATH10K_STATE_RESTARTING,
ATH10K_STATE_RESTARTED,
/* The device has crashed while restarting hw. This state is like ON
* but commands are blocked in HTC and -ECOMM response is given. This
* prevents completion timeouts and makes the driver more responsive to
* userspace commands. This is also prevents recursive recovery. */
ATH10K_STATE_WEDGED,
/* factory tests */
ATH10K_STATE_UTF,
};
enum ath10k_firmware_mode {
/* the default mode, standard 802.11 functionality */
ATH10K_FIRMWARE_MODE_NORMAL,
/* factory tests etc */
ATH10K_FIRMWARE_MODE_UTF,
};
enum ath10k_fw_features {
/* wmi_mgmt_rx_hdr contains extra RSSI information */
ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
/* firmware from 10X branch */
ATH10K_FW_FEATURE_WMI_10X = 1,
/* firmware support tx frame management over WMI, otherwise it's HTT */
ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
/* Firmware does not support P2P */
ATH10K_FW_FEATURE_NO_P2P = 3,
/* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature bit
* is required to be set as well.
*/
ATH10K_FW_FEATURE_WMI_10_2 = 4,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
enum ath10k_dev_flags {
/* Indicates that ath10k device is during CAC phase of DFS */
ATH10K_CAC_RUNNING,
ATH10K_FLAG_CORE_REGISTERED,
};
enum ath10k_scan_state {
ATH10K_SCAN_IDLE,
ATH10K_SCAN_STARTING,
ATH10K_SCAN_RUNNING,
ATH10K_SCAN_ABORTING,
};
static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
{
switch (state) {
case ATH10K_SCAN_IDLE:
return "idle";
case ATH10K_SCAN_STARTING:
return "starting";
case ATH10K_SCAN_RUNNING:
return "running";
case ATH10K_SCAN_ABORTING:
return "aborting";
}
return "unknown";
}
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
struct device *dev;
u8 mac_addr[ETH_ALEN];
u32 chip_id;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
u16 fw_version_release;
u16 fw_version_build;
u32 phy_capability;
u32 hw_min_tx_power;
u32 hw_max_tx_power;
u32 ht_cap_info;
u32 vht_cap_info;
u32 num_rf_chains;
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
struct targetdef *targetdef;
struct hostdef *hostdef;
bool p2p;
struct {
const struct ath10k_hif_ops *ops;
} hif;
struct completion target_suspend;
struct ath10k_bmi bmi;
struct ath10k_wmi wmi;
struct ath10k_htc htc;
struct ath10k_htt htt;
struct ath10k_hw_params {
u32 id;
const char *name;
u32 patch_load_addr;
struct ath10k_hw_params_fw {
const char *dir;
const char *fw;
const char *otp;
const char *board;
} fw;
} hw_params;
const struct firmware *board;
const void *board_data;
size_t board_len;
const struct firmware *otp;
const void *otp_data;
size_t otp_len;
const struct firmware *firmware;
const void *firmware_data;
size_t firmware_len;
int fw_api;
struct {
struct completion started;
struct completion completed;
struct completion on_channel;
struct delayed_work timeout;
enum ath10k_scan_state state;
bool is_roc;
int vdev_id;
int roc_freq;
} scan;
struct {
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
} mac;
/* should never be NULL; needed for regular htt rx */
struct ieee80211_channel *rx_channel;
/* valid during scan; needed for mgmt rx during scan */
struct ieee80211_channel *scan_channel;
/* current operating channel definition */
struct cfg80211_chan_def chandef;
int free_vdev_map;
bool monitor;
int monitor_vdev_id;
bool monitor_started;
unsigned int filter_flags;
unsigned long dev_flags;
u32 dfs_block_radar_events;
/* protected by conf_mutex */
bool radar_enabled;
int num_started_vdevs;
/* Protected by conf-mutex */
u8 supp_tx_chainmask;
u8 supp_rx_chainmask;
u8 cfg_tx_chainmask;
u8 cfg_rx_chainmask;
struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done;
struct completion vdev_setup_done;
struct workqueue_struct *workqueue;
/* prevents concurrent FW reconfiguration */
struct mutex conf_mutex;
/* protects shared structure data */
spinlock_t data_lock;
struct list_head arvifs;
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
/* number of created peers; protected by data_lock */
int num_peers;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
struct completion offchan_tx_completed;
struct sk_buff *offchan_tx_skb;
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
enum ath10k_state state;
struct work_struct register_work;
struct work_struct restart_work;
/* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock */
u32 survey_last_rx_clear_count;
u32 survey_last_cycle_count;
struct survey_info survey[ATH10K_NUM_CHANS];
struct dfs_pattern_detector *dfs_detector;
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
struct {
/* relay(fs) channel for spectral scan */
struct rchan *rfs_chan_spec_scan;
/* spectral_mode and spec_config are protected by conf_mutex */
enum ath10k_spectral_mode mode;
struct ath10k_spec_scan config;
} spectral;
struct {
/* protected by conf_mutex */
const struct firmware *utf;
DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
/* protected by data_lock */
bool utf_monitor;
} testmode;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, u32 chip_id);
void ath10k_core_unregister(struct ath10k *ar);
#endif /* _CORE_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,145 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _DEBUG_H_
#define _DEBUG_H_
#include <linux/types.h>
#include "trace.h"
enum ath10k_debug_mask {
ATH10K_DBG_PCI = 0x00000001,
ATH10K_DBG_WMI = 0x00000002,
ATH10K_DBG_HTC = 0x00000004,
ATH10K_DBG_HTT = 0x00000008,
ATH10K_DBG_MAC = 0x00000010,
ATH10K_DBG_BOOT = 0x00000020,
ATH10K_DBG_PCI_DUMP = 0x00000040,
ATH10K_DBG_HTT_DUMP = 0x00000080,
ATH10K_DBG_MGMT = 0x00000100,
ATH10K_DBG_DATA = 0x00000200,
ATH10K_DBG_BMI = 0x00000400,
ATH10K_DBG_REGULATORY = 0x00000800,
ATH10K_DBG_TESTMODE = 0x00001000,
ATH10K_DBG_ANY = 0xffffffff,
};
extern unsigned int ath10k_debug_mask;
__printf(2, 3) int ath10k_info(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) int ath10k_err(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) int ath10k_warn(struct ath10k *ar, const char *fmt, ...);
void ath10k_print_driver_info(struct ath10k *ar);
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_debug_start(struct ath10k *ar);
void ath10k_debug_stop(struct ath10k *ar);
int ath10k_debug_create(struct ath10k *ar);
void ath10k_debug_destroy(struct ath10k *ar);
int ath10k_debug_register(struct ath10k *ar);
void ath10k_debug_unregister(struct ath10k *ar);
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size);
void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
#else
static inline int ath10k_debug_start(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_debug_stop(struct ath10k *ar)
{
}
static inline int ath10k_debug_create(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_debug_destroy(struct ath10k *ar)
{
}
static inline int ath10k_debug_register(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_debug_unregister(struct ath10k *ar)
{
}
static inline void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size)
{
}
static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev)
{
}
static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
int len)
{
}
static inline struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
{
return NULL;
}
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
enum ath10k_debug_mask mask,
const char *fmt, ...);
void ath10k_dbg_dump(struct ath10k *ar,
enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len);
#else /* CONFIG_ATH10K_DEBUG */
static inline int ath10k_dbg(struct ath10k *ar,
enum ath10k_debug_mask dbg_mask,
const char *fmt, ...)
{
return 0;
}
static inline void ath10k_dbg_dump(struct ath10k *ar,
enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
}
#endif /* CONFIG_ATH10K_DEBUG */
#endif /* _DEBUG_H_ */

View file

@ -0,0 +1,180 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HIF_H_
#define _HIF_H_
#include <linux/kernel.h>
#include "core.h"
struct ath10k_hif_sg_item {
u16 transfer_id;
void *transfer_context; /* NULL = tx completion callback not called */
void *vaddr; /* for debugging mostly */
u32 paddr;
u16 len;
};
struct ath10k_hif_cb {
int (*tx_completion)(struct ath10k *ar,
struct sk_buff *wbuf,
unsigned transfer_id);
int (*rx_completion)(struct ath10k *ar,
struct sk_buff *wbuf,
u8 pipe_id);
};
struct ath10k_hif_ops {
/* send a scatter-gather list to the target */
int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items, int n_items);
/*
* API to handle HIF-specific BMI message exchanges, this API is
* synchronous and only allowed to be called from a context that
* can block (sleep)
*/
int (*exchange_bmi_msg)(struct ath10k *ar,
void *request, u32 request_len,
void *response, u32 *response_len);
/* Post BMI phase, after FW is loaded. Starts regular operation */
int (*start)(struct ath10k *ar);
/* Clean up what start() did. This does not revert to BMI phase. If
* desired so, call power_down() and power_up() */
void (*stop)(struct ath10k *ar);
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe,
int *ul_is_polled, int *dl_is_polled);
void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
/*
* Check if prior sends have completed.
*
* Check whether the pipe in question has any completed
* sends that have not yet been processed.
* This function is only relevant for HIF pipes that are configured
* to be polled rather than interrupt-driven.
*/
void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
void (*set_callbacks)(struct ath10k *ar,
struct ath10k_hif_cb *callbacks);
u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
/* Power up the device and enter BMI transfer mode for FW download */
int (*power_up)(struct ath10k *ar);
/* Power down the device and free up resources. stop() must be called
* before this if start() was called earlier */
void (*power_down)(struct ath10k *ar);
int (*suspend)(struct ath10k *ar);
int (*resume)(struct ath10k *ar);
};
static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items,
int n_items)
{
return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
}
static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
void *request, u32 request_len,
void *response, u32 *response_len)
{
return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
response, response_len);
}
static inline int ath10k_hif_start(struct ath10k *ar)
{
return ar->hif.ops->start(ar);
}
static inline void ath10k_hif_stop(struct ath10k *ar)
{
return ar->hif.ops->stop(ar);
}
static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id,
u8 *ul_pipe, u8 *dl_pipe,
int *ul_is_polled,
int *dl_is_polled)
{
return ar->hif.ops->map_service_to_pipe(ar, service_id,
ul_pipe, dl_pipe,
ul_is_polled, dl_is_polled);
}
static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
u8 *ul_pipe, u8 *dl_pipe)
{
ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
}
static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
u8 pipe_id, int force)
{
ar->hif.ops->send_complete_check(ar, pipe_id, force);
}
static inline void ath10k_hif_set_callbacks(struct ath10k *ar,
struct ath10k_hif_cb *callbacks)
{
ar->hif.ops->set_callbacks(ar, callbacks);
}
static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
u8 pipe_id)
{
return ar->hif.ops->get_free_queue_number(ar, pipe_id);
}
static inline int ath10k_hif_power_up(struct ath10k *ar)
{
return ar->hif.ops->power_up(ar);
}
static inline void ath10k_hif_power_down(struct ath10k *ar)
{
ar->hif.ops->power_down(ar);
}
static inline int ath10k_hif_suspend(struct ath10k *ar)
{
if (!ar->hif.ops->suspend)
return -EOPNOTSUPP;
return ar->hif.ops->suspend(ar);
}
static inline int ath10k_hif_resume(struct ath10k *ar)
{
if (!ar->hif.ops->resume)
return -EOPNOTSUPP;
return ar->hif.ops->resume(ar);
}
#endif /* _HIF_H_ */

View file

@ -0,0 +1,873 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "hif.h"
#include "debug.h"
/********/
/* Send */
/********/
static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
int force)
{
/*
* Check whether HIF has any prior sends that have finished,
* have not had the post-processing done.
*/
ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
}
static void ath10k_htc_control_tx_complete(struct ath10k *ar,
struct sk_buff *skb)
{
kfree_skb(skb);
}
static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
{
struct sk_buff *skb;
struct ath10k_skb_cb *skb_cb;
skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
if (!skb)
return NULL;
skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
skb_cb = ATH10K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
return skb;
}
static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
struct sk_buff *skb)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
}
static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath10k *ar = ep->htc->ar;
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
ep->eid, skb);
ath10k_htc_restore_tx_skb(ep->htc, skb);
if (!ep->ep_ops.ep_tx_complete) {
ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
dev_kfree_skb_any(skb);
return;
}
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
}
/* assumes tx_lock is held */
static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
{
struct ath10k *ar = ep->htc->ar;
if (!ep->tx_credit_flow_enabled)
return false;
if (ep->tx_credits >= ep->tx_credits_per_max_message)
return false;
ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
ep->eid);
return true;
}
static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath10k_htc_hdr *hdr;
hdr = (struct ath10k_htc_hdr *)skb->data;
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
if (ath10k_htc_ep_need_credit_update(ep))
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_unlock_bh(&ep->htc->tx_lock);
}
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_hif_sg_item sg_item;
struct device *dev = htc->ar->dev;
int credits = 0;
int ret;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
if (eid >= ATH10K_HTC_EP_COUNT) {
ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
return -ENOENT;
}
skb_push(skb, sizeof(struct ath10k_htc_hdr));
if (ep->tx_credit_flow_enabled) {
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
goto err_pull;
}
ep->tx_credits -= credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d consumed %d credits (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
}
ath10k_htc_prepare_tx_skb(ep, skb);
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret)
goto err_credits;
sg_item.transfer_id = ep->eid;
sg_item.transfer_context = skb;
sg_item.vaddr = skb->data;
sg_item.paddr = skb_cb->paddr;
sg_item.len = skb->len;
ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
if (ret)
goto err_unmap;
return 0;
err_unmap:
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d reverted %d credits back (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ar);
}
err_pull:
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
return ret;
}
static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
struct sk_buff *skb,
unsigned int eid)
{
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
if (WARN_ON_ONCE(!skb))
return 0;
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
return 0;
}
/***********/
/* Receive */
/***********/
static void
ath10k_htc_process_credit_report(struct ath10k_htc *htc,
const struct ath10k_htc_credit_report *report,
int len,
enum ath10k_htc_ep_id eid)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_ep *ep;
int i, n_reports;
if (len % sizeof(*report))
ath10k_warn(ar, "Uneven credit report len %d", len);
n_reports = len / sizeof(*report);
spin_lock_bh(&htc->tx_lock);
for (i = 0; i < n_reports; i++, report++) {
if (report->eid >= ATH10K_HTC_EP_COUNT)
break;
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
report->eid, report->credits, ep->tx_credits);
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ar);
spin_lock_bh(&htc->tx_lock);
}
}
spin_unlock_bh(&htc->tx_lock);
}
static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
u8 *buffer,
int length,
enum ath10k_htc_ep_id src_eid)
{
struct ath10k *ar = htc->ar;
int status = 0;
struct ath10k_htc_record *record;
u8 *orig_buffer;
int orig_length;
size_t len;
orig_buffer = buffer;
orig_length = length;
while (length > 0) {
record = (struct ath10k_htc_record *)buffer;
if (length < sizeof(record->hdr)) {
status = -EINVAL;
break;
}
if (record->hdr.len > length) {
/* no room left in buffer for record */
ath10k_warn(ar, "Invalid record length: %d\n",
record->hdr.len);
status = -EINVAL;
break;
}
switch (record->hdr.id) {
case ATH10K_HTC_RECORD_CREDITS:
len = sizeof(struct ath10k_htc_credit_report);
if (record->hdr.len < len) {
ath10k_warn(ar, "Credit report too long\n");
status = -EINVAL;
break;
}
ath10k_htc_process_credit_report(htc,
record->credit_report,
record->hdr.len,
src_eid);
break;
default:
ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
record->hdr.id, record->hdr.len);
break;
}
if (status)
break;
/* multiple records may be present in a trailer */
buffer += sizeof(record->hdr) + record->hdr.len;
length -= sizeof(record->hdr) + record->hdr.len;
}
if (status)
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
orig_buffer, orig_length);
return status;
}
static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
struct sk_buff *skb,
u8 pipe_id)
{
int status = 0;
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_hdr *hdr;
struct ath10k_htc_ep *ep;
u16 payload_len;
u32 trailer_len = 0;
size_t min_len;
u8 eid;
bool trailer_present;
hdr = (struct ath10k_htc_hdr *)skb->data;
skb_pull(skb, sizeof(*hdr));
eid = hdr->eid;
if (eid >= ATH10K_HTC_EP_COUNT) {
ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
hdr, sizeof(*hdr));
status = -EINVAL;
goto out;
}
ep = &htc->endpoint[eid];
/*
* If this endpoint that received a message from the target has
* a to-target HIF pipe whose send completions are polled rather
* than interrupt-driven, this is a good point to ask HIF to check
* whether it has any completed sends to handle.
*/
if (ep->ul_is_polled)
ath10k_htc_send_complete_check(ep, 1);
payload_len = __le16_to_cpu(hdr->len);
if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
payload_len + sizeof(*hdr));
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
hdr, sizeof(*hdr));
status = -EINVAL;
goto out;
}
if (skb->len < payload_len) {
ath10k_dbg(ar, ATH10K_DBG_HTC,
"HTC Rx: insufficient length, got %d, expected %d\n",
skb->len, payload_len);
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
"", hdr, sizeof(*hdr));
status = -EINVAL;
goto out;
}
/* get flags to check for trailer */
trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
if (trailer_present) {
u8 *trailer;
trailer_len = hdr->trailer_len;
min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
if ((trailer_len < min_len) ||
(trailer_len > payload_len)) {
ath10k_warn(ar, "Invalid trailer length: %d\n",
trailer_len);
status = -EPROTO;
goto out;
}
trailer = (u8 *)hdr;
trailer += sizeof(*hdr);
trailer += payload_len;
trailer -= trailer_len;
status = ath10k_htc_process_trailer(htc, trailer,
trailer_len, hdr->eid);
if (status)
goto out;
skb_trim(skb, skb->len - trailer_len);
}
if (((int)payload_len - (int)trailer_len) <= 0)
/* zero length packet with trailer data, just drop these */
goto out;
if (eid == ATH10K_HTC_EP_0) {
struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
switch (__le16_to_cpu(msg->hdr.message_id)) {
default:
/* handle HTC control message */
if (completion_done(&htc->ctl_resp)) {
/*
* this is a fatal error, target should not be
* sending unsolicited messages on the ep 0
*/
ath10k_warn(ar, "HTC rx ctrl still processing\n");
status = -EINVAL;
complete(&htc->ctl_resp);
goto out;
}
htc->control_resp_len =
min_t(int, skb->len,
ATH10K_HTC_MAX_CTRL_MSG_LEN);
memcpy(htc->control_resp_buffer, skb->data,
htc->control_resp_len);
complete(&htc->ctl_resp);
break;
case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
htc->htc_ops.target_send_suspend_complete(ar);
}
goto out;
}
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);
/* skb is now owned by the rx completion handler */
skb = NULL;
out:
kfree_skb(skb);
return status;
}
static void ath10k_htc_control_rx_complete(struct ath10k *ar,
struct sk_buff *skb)
{
/* This is unexpected. FW is not supposed to send regular rx on this
* endpoint. */
ath10k_warn(ar, "unexpected htc rx\n");
kfree_skb(skb);
}
/***************/
/* Init/Deinit */
/***************/
static const char *htc_service_name(enum ath10k_htc_svc_id id)
{
switch (id) {
case ATH10K_HTC_SVC_ID_RESERVED:
return "Reserved";
case ATH10K_HTC_SVC_ID_RSVD_CTRL:
return "Control";
case ATH10K_HTC_SVC_ID_WMI_CONTROL:
return "WMI";
case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
return "DATA BE";
case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
return "DATA BK";
case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
return "DATA VI";
case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
return "DATA VO";
case ATH10K_HTC_SVC_ID_NMI_CONTROL:
return "NMI Control";
case ATH10K_HTC_SVC_ID_NMI_DATA:
return "NMI Data";
case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
return "HTT Data";
case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
return "RAW";
}
return "Unknown";
}
static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
{
struct ath10k_htc_ep *ep;
int i;
for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
ep = &htc->endpoint[i];
ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
}
}
static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
{
struct ath10k_htc_svc_tx_credits *entry;
entry = &htc->service_tx_alloc[0];
/*
* for PCIE allocate all credists/HTC buffers to WMI.
* no buffers are used/required for data. data always
* remains on host.
*/
entry++;
entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
entry->credit_allocation = htc->total_transmit_credits;
}
static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
u16 service_id)
{
u8 allocation = 0;
int i;
for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
if (htc->service_tx_alloc[i].service_id == service_id)
allocation =
htc->service_tx_alloc[i].credit_allocation;
}
return allocation;
}
int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
int i, status = 0;
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k_htc_msg *msg;
u16 message_id;
u16 credit_count;
u16 credit_size;
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (status == 0) {
/* Workaround: In some cases the PCI HIF doesn't
* receive interrupt for the control response message
* even if the buffer was completed. It is suspected
* iomap writes unmasking PCI CE irqs aren't propagated
* properly in KVM PCI-passthrough sometimes.
*/
ath10k_warn(ar, "failed to receive control response completion, polling..\n");
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(htc->ar, i, 1);
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (status == 0)
status = -ETIMEDOUT;
}
if (status < 0) {
ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
return status;
}
if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
htc->control_resp_len);
return -ECOMM;
}
msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
message_id = __le16_to_cpu(msg->hdr.message_id);
credit_count = __le16_to_cpu(msg->ready.credit_count);
credit_size = __le16_to_cpu(msg->ready.credit_size);
if (message_id != ATH10K_HTC_MSG_READY_ID) {
ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
return -ECOMM;
}
htc->total_transmit_credits = credit_count;
htc->target_credit_size = credit_size;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"Target ready! transmit resources: %d size:%d\n",
htc->total_transmit_credits,
htc->target_credit_size);
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
ath10k_err(ar, "Invalid credit size received\n");
return -ECOMM;
}
ath10k_htc_setup_target_buffer_assignments(htc);
/* setup our pseudo HTC control endpoint connection */
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
/* connect fake service */
status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
if (status) {
ath10k_err(ar, "could not connect to htc service (%d)\n",
status);
return status;
}
return 0;
}
int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_msg *msg;
struct ath10k_htc_conn_svc *req_msg;
struct ath10k_htc_conn_svc_response resp_msg_dummy;
struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
struct ath10k_htc_ep *ep;
struct sk_buff *skb;
unsigned int max_msg_size = 0;
int length, status;
bool disable_credit_flow_ctrl = false;
u16 message_id, service_id, flags = 0;
u8 tx_alloc = 0;
/* special case for HTC pseudo control service */
if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
disable_credit_flow_ctrl = true;
assigned_eid = ATH10K_HTC_EP_0;
max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
goto setup;
}
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
if (!skb) {
ath10k_err(ar, "Failed to allocate HTC packet\n");
return -ENOMEM;
}
length = sizeof(msg->hdr) + sizeof(msg->connect_service);
skb_put(skb, length);
memset(skb->data, 0, length);
msg = (struct ath10k_htc_msg *)skb->data;
msg->hdr.message_id =
__cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
/* Only enable credit flow control for WMI ctrl service */
if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
req_msg = &msg->connect_service;
req_msg->flags = __cpu_to_le16(flags);
req_msg->service_id = __cpu_to_le16(conn_req->service_id);
reinit_completion(&htc->ctl_resp);
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
if (status) {
kfree_skb(skb);
return status;
}
/* wait for response */
status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
if (status <= 0) {
if (status == 0)
status = -ETIMEDOUT;
ath10k_err(ar, "Service connect timeout: %d\n", status);
return status;
}
/* we controlled the buffer creation, it's aligned */
msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
resp_msg = &msg->connect_service_response;
message_id = __le16_to_cpu(msg->hdr.message_id);
service_id = __le16_to_cpu(resp_msg->service_id);
if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
(htc->control_resp_len < sizeof(msg->hdr) +
sizeof(msg->connect_service_response))) {
ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
return -EPROTO;
}
ath10k_dbg(ar, ATH10K_DBG_HTC,
"HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
htc_service_name(service_id),
resp_msg->status, resp_msg->eid);
conn_resp->connect_resp_code = resp_msg->status;
/* check response status */
if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
htc_service_name(service_id),
resp_msg->status);
return -EPROTO;
}
assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
setup:
if (assigned_eid >= ATH10K_HTC_EP_COUNT)
return -EPROTO;
if (max_msg_size == 0)
return -EPROTO;
ep = &htc->endpoint[assigned_eid];
ep->eid = assigned_eid;
if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
return -EPROTO;
/* return assigned endpoint to caller */
conn_resp->eid = assigned_eid;
conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
/* setup the endpoint */
ep->service_id = conn_req->service_id;
ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
ep->tx_credits = tx_alloc;
ep->tx_credit_size = htc->target_credit_size;
ep->tx_credits_per_max_message = ep->max_ep_message_len /
htc->target_credit_size;
if (ep->max_ep_message_len % htc->target_credit_size)
ep->tx_credits_per_max_message++;
/* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops;
status = ath10k_hif_map_service_to_pipe(htc->ar,
ep->service_id,
&ep->ul_pipe_id,
&ep->dl_pipe_id,
&ep->ul_is_polled,
&ep->dl_is_polled);
if (status)
return status;
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc ep %d ul polled %d dl polled %d\n",
ep->eid, ep->ul_is_polled, ep->dl_is_polled);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
return status;
}
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
{
struct sk_buff *skb;
skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
if (!skb)
return NULL;
skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
/* FW/HTC requires 4-byte aligned streams */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath10k_warn(ar, "Unaligned HTC tx skb\n");
return skb;
}
int ath10k_htc_start(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
struct sk_buff *skb;
int status = 0;
struct ath10k_htc_msg *msg;
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
if (!skb)
return -ENOMEM;
skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
memset(skb->data, 0, skb->len);
msg = (struct ath10k_htc_msg *)skb->data;
msg->hdr.message_id =
__cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
if (status) {
kfree_skb(skb);
return status;
}
return 0;
}
/* registered target arrival callback from the HIF layer */
int ath10k_htc_init(struct ath10k *ar)
{
struct ath10k_hif_cb htc_callbacks;
struct ath10k_htc_ep *ep = NULL;
struct ath10k_htc *htc = &ar->htc;
spin_lock_init(&htc->tx_lock);
ath10k_htc_reset_endpoint_states(htc);
/* setup HIF layer callbacks */
htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
htc->ar = ar;
/* Get HIF default pipe for HTC message exchange */
ep = &htc->endpoint[ATH10K_HTC_EP_0];
ath10k_hif_set_callbacks(ar, &htc_callbacks);
ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
init_completion(&htc->ctl_resp);
return 0;
}

View file

@ -0,0 +1,359 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HTC_H_
#define _HTC_H_
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/skbuff.h>
#include <linux/semaphore.h>
#include <linux/timer.h>
struct ath10k;
/****************/
/* HTC protocol */
/****************/
/*
* HTC - host-target control protocol
*
* tx packets are generally <htc_hdr><payload>
* rx packets are more complex: <htc_hdr><payload><trailer>
*
* The payload + trailer length is stored in len.
* To get payload-only length one needs to payload - trailer_len.
*
* Trailer contains (possibly) multiple <htc_record>.
* Each record is a id-len-value.
*
* HTC header flags, control_byte0, control_byte1
* have different meaning depending whether its tx
* or rx.
*
* Alignment: htc_hdr, payload and trailer are
* 4-byte aligned.
*/
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
ATH10K_HTC_FLAG_SEND_BUNDLE = 0x02
};
enum ath10k_htc_rx_flags {
ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
};
struct ath10k_htc_hdr {
u8 eid; /* @enum ath10k_htc_ep_id */
u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
__le16 len;
union {
u8 trailer_len; /* for rx */
u8 control_byte0;
} __packed;
union {
u8 seq_no; /* for tx */
u8 control_byte1;
} __packed;
u8 pad0;
u8 pad1;
} __packed __aligned(4);
enum ath10k_ath10k_htc_msg_id {
ATH10K_HTC_MSG_READY_ID = 1,
ATH10K_HTC_MSG_CONNECT_SERVICE_ID = 2,
ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
ATH10K_HTC_MSG_SETUP_COMPLETE_ID = 4,
ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6
};
enum ath10k_htc_version {
ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
};
enum ath10k_htc_conn_flags {
ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
#define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3
ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00
#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8
};
enum ath10k_htc_conn_svc_status {
ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
ATH10K_HTC_CONN_SVC_STATUS_FAILED = 2,
ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
};
struct ath10k_ath10k_htc_msg_hdr {
__le16 message_id; /* @enum htc_message_id */
} __packed;
struct ath10k_htc_unknown {
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_ready {
__le16 credit_count;
__le16 credit_size;
u8 max_endpoints;
u8 pad0;
} __packed;
struct ath10k_htc_ready_extended {
struct ath10k_htc_ready base;
u8 htc_version; /* @enum ath10k_htc_version */
u8 max_msgs_per_htc_bundle;
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_conn_svc {
__le16 service_id;
__le16 flags; /* @enum ath10k_htc_conn_flags */
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_conn_svc_response {
__le16 service_id;
u8 status; /* @enum ath10k_htc_conn_svc_status */
u8 eid;
__le16 max_msg_size;
} __packed;
struct ath10k_htc_setup_complete_extended {
u8 pad0;
u8 pad1;
__le32 flags; /* @enum htc_setup_complete_flags */
u8 max_msgs_per_bundled_recv;
u8 pad2;
u8 pad3;
u8 pad4;
} __packed;
struct ath10k_htc_msg {
struct ath10k_ath10k_htc_msg_hdr hdr;
union {
/* host-to-target */
struct ath10k_htc_conn_svc connect_service;
struct ath10k_htc_ready ready;
struct ath10k_htc_ready_extended ready_ext;
struct ath10k_htc_unknown unknown;
struct ath10k_htc_setup_complete_extended setup_complete_ext;
/* target-to-host */
struct ath10k_htc_conn_svc_response connect_service_response;
};
} __packed __aligned(4);
enum ath10k_ath10k_htc_record_id {
ATH10K_HTC_RECORD_NULL = 0,
ATH10K_HTC_RECORD_CREDITS = 1
};
struct ath10k_ath10k_htc_record_hdr {
u8 id; /* @enum ath10k_ath10k_htc_record_id */
u8 len;
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_credit_report {
u8 eid; /* @enum ath10k_htc_ep_id */
u8 credits;
u8 pad0;
u8 pad1;
} __packed;
struct ath10k_htc_record {
struct ath10k_ath10k_htc_record_hdr hdr;
union {
struct ath10k_htc_credit_report credit_report[0];
u8 pauload[0];
};
} __packed __aligned(4);
/*
* note: the trailer offset is dynamic depending
* on payload length. this is only a struct layout draft
*/
struct ath10k_htc_frame {
struct ath10k_htc_hdr hdr;
union {
struct ath10k_htc_msg msg;
u8 payload[0];
};
struct ath10k_htc_record trailer[0];
} __packed __aligned(4);
/*******************/
/* Host-side stuff */
/*******************/
enum ath10k_htc_svc_gid {
ATH10K_HTC_SVC_GRP_RSVD = 0,
ATH10K_HTC_SVC_GRP_WMI = 1,
ATH10K_HTC_SVC_GRP_NMI = 2,
ATH10K_HTC_SVC_GRP_HTT = 3,
ATH10K_HTC_SVC_GRP_TEST = 254,
ATH10K_HTC_SVC_GRP_LAST = 255,
};
#define SVC(group, idx) \
(int)(((int)(group) << 8) | (int)(idx))
enum ath10k_htc_svc_id {
/* NOTE: service ID of 0x0000 is reserved and should never be used */
ATH10K_HTC_SVC_ID_RESERVED = 0x0000,
ATH10K_HTC_SVC_ID_UNUSED = ATH10K_HTC_SVC_ID_RESERVED,
ATH10K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH10K_HTC_SVC_GRP_RSVD, 1),
ATH10K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_WMI, 0),
ATH10K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH10K_HTC_SVC_GRP_WMI, 1),
ATH10K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH10K_HTC_SVC_GRP_WMI, 2),
ATH10K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH10K_HTC_SVC_GRP_WMI, 3),
ATH10K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH10K_HTC_SVC_GRP_WMI, 4),
ATH10K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_NMI, 0),
ATH10K_HTC_SVC_ID_NMI_DATA = SVC(ATH10K_HTC_SVC_GRP_NMI, 1),
ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
/* raw stream service (i.e. flash, tcmd, calibration apps) */
ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
};
#undef SVC
enum ath10k_htc_ep_id {
ATH10K_HTC_EP_UNUSED = -1,
ATH10K_HTC_EP_0 = 0,
ATH10K_HTC_EP_1 = 1,
ATH10K_HTC_EP_2,
ATH10K_HTC_EP_3,
ATH10K_HTC_EP_4,
ATH10K_HTC_EP_5,
ATH10K_HTC_EP_6,
ATH10K_HTC_EP_7,
ATH10K_HTC_EP_8,
ATH10K_HTC_EP_COUNT,
};
struct ath10k_htc_ops {
void (*target_send_suspend_complete)(struct ath10k *ar);
};
struct ath10k_htc_ep_ops {
void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_tx_credits)(struct ath10k *);
};
/* service connection information */
struct ath10k_htc_svc_conn_req {
u16 service_id;
struct ath10k_htc_ep_ops ep_ops;
int max_send_queue_depth;
};
/* service connection response information */
struct ath10k_htc_svc_conn_resp {
u8 buffer_len;
u8 actual_len;
enum ath10k_htc_ep_id eid;
unsigned int max_msg_len;
u8 connect_resp_code;
};
#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
#define ATH10K_HTC_MAX_LEN 4096
#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
sizeof(struct ath10k_htc_hdr))
#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
struct ath10k_htc_ep {
struct ath10k_htc *htc;
enum ath10k_htc_ep_id eid;
enum ath10k_htc_svc_id service_id;
struct ath10k_htc_ep_ops ep_ops;
int max_tx_queue_depth;
int max_ep_message_len;
u8 ul_pipe_id;
u8 dl_pipe_id;
int ul_is_polled; /* call HIF to get tx completions */
int dl_is_polled; /* call HIF to fetch rx (not implemented) */
u8 seq_no; /* for debugging */
int tx_credits;
int tx_credit_size;
int tx_credits_per_max_message;
bool tx_credit_flow_enabled;
};
struct ath10k_htc_svc_tx_credits {
u16 service_id;
u8 credit_allocation;
};
struct ath10k_htc {
struct ath10k *ar;
struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
/* protects endpoints */
spinlock_t tx_lock;
struct ath10k_htc_ops htc_ops;
u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN];
int control_resp_len;
struct completion ctl_resp;
int total_transmit_credits;
struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
int target_credit_size;
};
int ath10k_htc_init(struct ath10k *ar);
int ath10k_htc_wait_target(struct ath10k_htc *htc);
int ath10k_htc_start(struct ath10k_htc *htc);
int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp);
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
#endif

View file

@ -0,0 +1,115 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/if_ether.h>
#include "htt.h"
#include "core.h"
#include "debug.h"
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
int status;
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req,
&conn_resp);
if (status)
return status;
htt->eid = conn_resp.eid;
return 0;
}
int ath10k_htt_init(struct ath10k *ar)
{
struct ath10k_htt *htt = &ar->htt;
htt->ar = ar;
htt->max_throughput_mbps = 800;
/*
* Prefetch enough data to satisfy target
* classification engine.
* This is for LL chips. HL chips will probably
* transfer all frame in the tx fragment.
*/
htt->prefetch_len =
36 + /* 802.11 + qos + ht */
4 + /* 802.1q */
8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */
return 0;
}
#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n",
htt->target_version_major, htt->target_version_minor);
if (htt->target_version_major != 2 &&
htt->target_version_major != 3) {
ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
htt->target_version_major);
return -ENOTSUPP;
}
return 0;
}
int ath10k_htt_setup(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int status;
init_completion(&htt->target_version_received);
status = ath10k_htt_h2t_ver_req_msg(htt);
if (status)
return status;
status = wait_for_completion_timeout(&htt->target_version_received,
HTT_TARGET_VERSION_TIMEOUT_HZ);
if (status <= 0) {
ath10k_warn(ar, "htt version request timed out\n");
return -ETIMEDOUT;
}
status = ath10k_htt_verify_version(htt);
if (status)
return status;
return ath10k_htt_send_rx_ring_cfg_ll(htt);
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,605 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/etherdevice.h>
#include "htt.h"
#include "mac.h"
#include "hif.h"
#include "txrx.h"
#include "debug.h"
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
{
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ieee80211_wake_queues(htt->ar->hw);
}
static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
{
spin_lock_bh(&htt->tx_lock);
__ath10k_htt_tx_dec_pending(htt);
spin_unlock_bh(&htt->tx_lock);
}
static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
{
int ret = 0;
spin_lock_bh(&htt->tx_lock);
if (htt->num_pending_tx >= htt->max_num_pending_tx) {
ret = -EBUSY;
goto exit;
}
htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx)
ieee80211_stop_queues(htt->ar->hw);
exit:
spin_unlock_bh(&htt->tx_lock);
return ret;
}
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int msdu_id;
lockdep_assert_held(&htt->tx_lock);
msdu_id = find_first_zero_bit(htt->used_msdu_ids,
htt->max_num_pending_tx);
if (msdu_id == htt->max_num_pending_tx)
return -ENOBUFS;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
__set_bit(msdu_id, htt->used_msdu_ids);
return msdu_id;
}
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
{
struct ath10k *ar = htt->ar;
lockdep_assert_held(&htt->tx_lock);
if (!test_bit(msdu_id, htt->used_msdu_ids))
ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
msdu_id);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
__clear_bit(msdu_id, htt->used_msdu_ids);
}
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
spin_lock_init(&htt->tx_lock);
init_waitqueue_head(&htt->empty_tx_wq);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
else
htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
htt->max_num_pending_tx, GFP_KERNEL);
if (!htt->pending_tx)
return -ENOMEM;
htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(htt->max_num_pending_tx),
GFP_KERNEL);
if (!htt->used_msdu_ids) {
kfree(htt->pending_tx);
return -ENOMEM;
}
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) {
kfree(htt->used_msdu_ids);
kfree(htt->pending_tx);
return -ENOMEM;
}
return 0;
}
static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct htt_tx_done tx_done = {0};
int msdu_id;
spin_lock_bh(&htt->tx_lock);
for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
msdu_id);
tx_done.discard = 1;
tx_done.msdu_id = msdu_id;
ath10k_txrx_tx_unref(htt, &tx_done);
}
spin_unlock_bh(&htt->tx_lock);
}
void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
ath10k_htt_tx_free_pending(htt);
kfree(htt->pending_tx);
kfree(htt->used_msdu_ids);
dma_pool_destroy(htt->tx_pool);
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
dev_kfree_skb_any(skb);
}
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
int len = 0;
int ret;
len += sizeof(cmd->hdr);
len += sizeof(cmd->ver_req);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
{
struct ath10k *ar = htt->ar;
struct htt_stats_req *req;
struct sk_buff *skb;
struct htt_cmd *cmd;
int len = 0, ret;
len += sizeof(cmd->hdr);
len += sizeof(cmd->stats_req);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
req = &cmd->stats_req;
memset(req, 0, sizeof(*req));
/* currently we support only max 8 bit masks so no need to worry
* about endian support */
req->upload_types[0] = mask;
req->reset_types[0] = mask;
req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
ath10k_warn(ar, "failed to send htt type stats request: %d",
ret);
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring *ring;
const int num_rx_ring = 1;
u16 flags;
u32 fw_idx;
int len;
int ret;
/*
* the HW expects the buffer to be an integral number of 4-byte
* "words"
*/
BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+ (sizeof(*ring) * num_rx_ring);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
ring = &cmd->rx_setup.rings[0];
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
cmd->rx_setup.hdr.num_rings = 1;
/* FIXME: do we need all of this? */
flags = 0;
flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
flags |= HTT_RX_RING_FLAGS_PPDU_START;
flags |= HTT_RX_RING_FLAGS_PPDU_END;
flags |= HTT_RX_RING_FLAGS_MPDU_START;
flags |= HTT_RX_RING_FLAGS_MPDU_END;
flags |= HTT_RX_RING_FLAGS_MSDU_START;
flags |= HTT_RX_RING_FLAGS_MSDU_END;
flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
flags |= HTT_RX_RING_FLAGS_CTRL_RX;
flags |= HTT_RX_RING_FLAGS_MGMT_RX;
flags |= HTT_RX_RING_FLAGS_NULL_RX;
flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
ring->fw_idx_shadow_reg_paddr =
__cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
#undef desc_offset
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu)
{
struct ath10k *ar = htt->ar;
struct htt_aggr_conf *aggr_conf;
struct sk_buff *skb;
struct htt_cmd *cmd;
int len;
int ret;
/* Firmware defaults are: amsdu = 3 and ampdu = 64 */
if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
return -EINVAL;
if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
return -EINVAL;
len = sizeof(cmd->hdr);
len += sizeof(cmd->aggr_conf);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
aggr_conf = &cmd->aggr_conf;
aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
aggr_conf->max_num_amsdu_subframes,
aggr_conf->max_num_ampdu_subframes);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
u8 vdev_id = skb_cb->vdev_id;
int len = 0;
int msdu_id = -1;
int res;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(ar, len);
if (!txdesc) {
res = -ENOMEM;
goto err_free_msdu_id;
}
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr);
if (res)
goto err_free_txdesc;
skb_put(txdesc, len);
cmd = (struct htt_cmd *)txdesc->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
skb_cb->htt.txbuf = NULL;
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
goto err_unmap_msdu;
return 0;
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
err:
return res;
}
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
struct htt_data_tx_desc_frag *frags;
u8 vdev_id = skb_cb->vdev_id;
u8 tid = skb_cb->htt.tid;
int prefetch_len;
int res;
u8 flags0 = 0;
u16 msdu_id, flags1 = 0;
dma_addr_t paddr;
u32 frags_paddr;
bool use_frags;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
goto err;
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt);
if (res < 0) {
spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec;
}
msdu_id = res;
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
* fragment list host driver specifies directly frame pointer. */
use_frags = htt->target_version_major < 3 ||
!ieee80211_is_mgmt(hdr->frame_control);
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
&paddr);
if (!skb_cb->htt.txbuf)
goto err_free_msdu_id;
skb_cb->htt.txbuf_paddr = paddr;
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr);
if (res)
goto err_free_txbuf;
if (likely(use_frags)) {
frags = skb_cb->htt.txbuf->frags;
frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
frags[0].len = __cpu_to_le32(msdu->len);
frags[1].paddr = 0;
frags[1].len = 0;
flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
frags_paddr = skb_cb->htt.txbuf_paddr;
} else {
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
frags_paddr = skb_cb->paddr;
}
/* Normally all commands go through HTC which manages tx credits for
* each endpoint and notifies when tx is completed.
*
* HTT endpoint is creditless so there's no need to care about HTC
* flags. In that case it is trivial to fill the HTC header here.
*
* MSDU transmission is considered completed upon HTT event. This
* implies no relevant resources can be freed until after the event is
* received. That's why HTC tx completion handler itself is ignored by
* setting NULL to transfer_context for all sg items.
*
* There is simply no point in pushing HTT TX_FRM through HTC tx path
* as it's a waste of resources. By bypassing HTC it is possible to
* avoid extra memory allocations, compress data structures and thus
* improve performance. */
skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
sizeof(skb_cb->htt.txbuf->cmd_hdr) +
sizeof(skb_cb->htt.txbuf->cmd_tx) +
prefetch_len);
skb_cb->htt.txbuf->htc_hdr.flags = 0;
if (!ieee80211_has_protected(hdr->frame_control))
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
/* Prevent firmware from sending up tx inspection requests. There's
* nothing ath10k can do with frames requested for inspection so force
* it to simply rely a regular tx completion with discard status.
*/
flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
flags0, flags1, msdu->len, msdu_id, frags_paddr,
(u32)skb_cb->paddr, vdev_id, tid);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
sg_items[0].transfer_id = 0;
sg_items[0].transfer_context = NULL;
sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
sizeof(skb_cb->htt.txbuf->frags);
sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
sizeof(skb_cb->htt.txbuf->cmd_hdr) +
sizeof(skb_cb->htt.txbuf->cmd_tx);
sg_items[1].transfer_id = 0;
sg_items[1].transfer_context = NULL;
sg_items[1].vaddr = msdu->data;
sg_items[1].paddr = skb_cb->paddr;
sg_items[1].len = prefetch_len;
res = ath10k_hif_tx_sg(htt->ar,
htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
sg_items, ARRAY_SIZE(sg_items));
if (res)
goto err_unmap_msdu;
return 0;
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txbuf:
dma_pool_free(htt->tx_pool,
skb_cb->htt.txbuf,
skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
ath10k_htt_tx_dec_pending(htt);
err:
return res;
}

View file

@ -0,0 +1,369 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HW_H_
#define _HW_H_
#include "targaddrs.h"
/* QCA988X 1.0 definitions (unsupported) */
#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
/* QCA988X 2.0 definitions */
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_FW_3_FILE "firmware-3.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
#define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin"
#define ATH10K_FW_UTF_FILE "utf.bin"
/* includes also the null byte */
#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
#define REG_DUMP_COUNT_QCA988X 60
struct ath10k_fw_ie {
__le32 id;
__le32 len;
u8 data[0];
};
enum ath10k_fw_ie_type {
ATH10K_FW_IE_FW_VERSION = 0,
ATH10K_FW_IE_TIMESTAMP = 1,
ATH10K_FW_IE_FEATURES = 2,
ATH10K_FW_IE_FW_IMAGE = 3,
ATH10K_FW_IE_OTP_IMAGE = 4,
};
/* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599)
* - current FW dumps upon raw tx mode (last tested v599)
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
* - raw have FCS, nwifi doesn't
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
* param, llc/snap) are aligned to 4byte boundaries each */
enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
ATH10K_HW_TXRX_ETHERNET = 2,
/* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
ATH10K_HW_TXRX_MGMT = 3,
};
enum ath10k_mcast2ucast_mode {
ATH10K_MCAST2UCAST_DISABLED = 0,
ATH10K_MCAST2UCAST_ENABLED = 1,
};
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 0
#define TARGET_MAC_AGGR_DELIM 0
#define TARGET_AST_SKID_LIMIT 16
#define TARGET_NUM_PEERS 16
#define TARGET_NUM_OFFLOAD_PEERS 0
#define TARGET_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_NUM_PEER_KEYS 2
#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS)))
#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
* avoid a very expensive re-alignment in mac80211. */
#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
#define TARGET_NUM_MCAST_GROUPS 0
#define TARGET_NUM_MCAST_TABLE_ELEMS 0
#define TARGET_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
#define TARGET_TX_DBG_LOG_SIZE 1024
#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0
#define TARGET_VOW_CONFIG 0
#define TARGET_NUM_MSDU_DESC (1024 + 400)
#define TARGET_MAX_FRAG_ENTRIES 0
/* Target specific defines for 10.X firmware */
#define TARGET_10X_NUM_VDEVS 16
#define TARGET_10X_NUM_PEER_AST 2
#define TARGET_10X_NUM_WDS_ENTRIES 32
#define TARGET_10X_DMA_BURST_SIZE 0
#define TARGET_10X_MAC_AGGR_DELIM 0
#define TARGET_10X_AST_SKID_LIMIT 16
#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
#define TARGET_10X_NUM_PEERS_MAX 128
#define TARGET_10X_NUM_OFFLOAD_PEERS 0
#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_10X_NUM_PEER_KEYS 2
#define TARGET_10X_NUM_TIDS 256
#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES 8
#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV 3
#define TARGET_10X_NUM_MCAST_GROUPS 0
#define TARGET_10X_NUM_MCAST_TABLE_ELEMS 0
#define TARGET_10X_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
#define TARGET_10X_TX_DBG_LOG_SIZE 1024
#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
#define TARGET_10X_VOW_CONFIG 0
#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
#define TARGET_10X_MAX_FRAG_ENTRIES 0
/* Number of Copy Engines supported */
#define CE_COUNT 8
/*
* Total number of PCIe MSI interrupts requested for all interrupt sources.
* PCIe standard forces this to be a power of 2.
* Some Host OS's limit MSI requests that can be granted to 8
* so for now we abide by this limit and avoid requesting more
* than that.
*/
#define MSI_NUM_REQUEST_LOG2 3
#define MSI_NUM_REQUEST (1<<MSI_NUM_REQUEST_LOG2)
/*
* Granted MSIs are assigned as follows:
* Firmware uses the first
* Remaining MSIs, if any, are used by Copy Engines
* This mapping is known to both Target firmware and Host software.
* It may be changed as long as Host and Target are kept in sync.
*/
/* MSI for firmware (errors, etc.) */
#define MSI_ASSIGN_FW 0
/* MSIs for Copy Engines */
#define MSI_ASSIGN_CE_INITIAL 1
#define MSI_ASSIGN_CE_MAX 7
/* as of IP3.7.1 */
#define RTC_STATE_V_ON 3
#define RTC_STATE_COLD_RESET_MASK 0x00000400
#define RTC_STATE_V_LSB 0
#define RTC_STATE_V_MASK 0x00000007
#define RTC_STATE_ADDRESS 0x0000
#define PCIE_SOC_WAKE_V_MASK 0x00000001
#define PCIE_SOC_WAKE_ADDRESS 0x0004
#define PCIE_SOC_WAKE_RESET 0x00000000
#define SOC_GLOBAL_RESET_ADDRESS 0x0008
#define RTC_SOC_BASE_ADDRESS 0x00004000
#define RTC_WMAC_BASE_ADDRESS 0x00005000
#define MAC_COEX_BASE_ADDRESS 0x00006000
#define BT_COEX_BASE_ADDRESS 0x00007000
#define SOC_PCIE_BASE_ADDRESS 0x00008000
#define SOC_CORE_BASE_ADDRESS 0x00009000
#define WLAN_UART_BASE_ADDRESS 0x0000c000
#define WLAN_SI_BASE_ADDRESS 0x00010000
#define WLAN_GPIO_BASE_ADDRESS 0x00014000
#define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
#define WLAN_MAC_BASE_ADDRESS 0x00020000
#define EFUSE_BASE_ADDRESS 0x00030000
#define FPGA_REG_BASE_ADDRESS 0x00039000
#define WLAN_UART2_BASE_ADDRESS 0x00054c00
#define CE_WRAPPER_BASE_ADDRESS 0x00057000
#define CE0_BASE_ADDRESS 0x00057400
#define CE1_BASE_ADDRESS 0x00057800
#define CE2_BASE_ADDRESS 0x00057c00
#define CE3_BASE_ADDRESS 0x00058000
#define CE4_BASE_ADDRESS 0x00058400
#define CE5_BASE_ADDRESS 0x00058800
#define CE6_BASE_ADDRESS 0x00058c00
#define CE7_BASE_ADDRESS 0x00059000
#define DBI_BASE_ADDRESS 0x00060000
#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
#define PCIE_LOCAL_BASE_ADDRESS 0x00080000
#define SOC_RESET_CONTROL_ADDRESS 0x00000000
#define SOC_RESET_CONTROL_OFFSET 0x00000000
#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
#define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000
#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
#define SOC_CPU_CLOCK_OFFSET 0x00000020
#define SOC_CPU_CLOCK_STANDARD_LSB 0
#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
#define SOC_CLOCK_CONTROL_OFFSET 0x00000028
#define SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
#define SOC_SYSTEM_SLEEP_OFFSET 0x000000c4
#define SOC_LPO_CAL_OFFSET 0x000000e0
#define SOC_LPO_CAL_ENABLE_LSB 20
#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
#define SOC_CHIP_ID_ADDRESS 0x000000ec
#define SOC_CHIP_ID_REV_LSB 8
#define SOC_CHIP_ID_REV_MASK 0x00000f00
#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
#define WLAN_GPIO_PIN11_ADDRESS 0x00000054
#define WLAN_GPIO_PIN12_ADDRESS 0x00000058
#define WLAN_GPIO_PIN13_ADDRESS 0x0000005c
#define CLOCK_GPIO_OFFSET 0xffffffff
#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0
#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
#define SI_CONFIG_OFFSET 0x00000000
#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
#define SI_CONFIG_I2C_LSB 16
#define SI_CONFIG_I2C_MASK 0x00010000
#define SI_CONFIG_POS_SAMPLE_LSB 7
#define SI_CONFIG_POS_SAMPLE_MASK 0x00000080
#define SI_CONFIG_INACTIVE_DATA_LSB 5
#define SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
#define SI_CONFIG_INACTIVE_CLK_LSB 4
#define SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
#define SI_CONFIG_DIVIDER_LSB 0
#define SI_CONFIG_DIVIDER_MASK 0x0000000f
#define SI_CS_OFFSET 0x00000004
#define SI_CS_DONE_ERR_MASK 0x00000400
#define SI_CS_DONE_INT_MASK 0x00000200
#define SI_CS_START_LSB 8
#define SI_CS_START_MASK 0x00000100
#define SI_CS_RX_CNT_LSB 4
#define SI_CS_RX_CNT_MASK 0x000000f0
#define SI_CS_TX_CNT_LSB 0
#define SI_CS_TX_CNT_MASK 0x0000000f
#define SI_TX_DATA0_OFFSET 0x00000008
#define SI_TX_DATA1_OFFSET 0x0000000c
#define SI_RX_DATA0_OFFSET 0x00000010
#define SI_RX_DATA1_OFFSET 0x00000014
#define CORE_CTRL_CPU_INTR_MASK 0x00002000
#define CORE_CTRL_ADDRESS 0x0000
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
#define PCIE_INTR_CAUSE_ADDRESS 0x000c
#define PCIE_INTR_CLR_ADDRESS 0x0014
#define SCRATCH_3_ADDRESS 0x0030
#define CPU_INTR_ADDRESS 0x0010
/* Firmware indications to the Host via SCRATCH_3 register. */
#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
#define FW_IND_EVENT_PENDING 1
#define FW_IND_INITIALIZED 2
/* HOST_REG interrupt from firmware */
#define PCIE_INTR_FIRMWARE_MASK 0x00000400
#define PCIE_INTR_CE_MASK_ALL 0x0007f800
#define DRAM_BASE_ADDRESS 0x00400000
#define MISSING 0
#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET
#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET
#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK
#define RESET_CONTROL_MBOX_RST_MASK MISSING
#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK
#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS
#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS
#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS
#define LOCAL_SCRATCH_OFFSET 0x18
#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_OFFSET
#define LPO_CAL_OFFSET SOC_LPO_CAL_OFFSET
#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS
#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS
#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS
#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS
#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB
#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK
#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB
#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK
#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS
#define MBOX_BASE_ADDRESS MISSING
#define INT_STATUS_ENABLE_ERROR_LSB MISSING
#define INT_STATUS_ENABLE_ERROR_MASK MISSING
#define INT_STATUS_ENABLE_CPU_LSB MISSING
#define INT_STATUS_ENABLE_CPU_MASK MISSING
#define INT_STATUS_ENABLE_COUNTER_LSB MISSING
#define INT_STATUS_ENABLE_COUNTER_MASK MISSING
#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING
#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING
#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING
#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING
#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING
#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING
#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING
#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING
#define INT_STATUS_ENABLE_ADDRESS MISSING
#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING
#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING
#define HOST_INT_STATUS_ADDRESS MISSING
#define CPU_INT_STATUS_ADDRESS MISSING
#define ERROR_INT_STATUS_ADDRESS MISSING
#define ERROR_INT_STATUS_WAKEUP_MASK MISSING
#define ERROR_INT_STATUS_WAKEUP_LSB MISSING
#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING
#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING
#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING
#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING
#define COUNT_DEC_ADDRESS MISSING
#define HOST_INT_STATUS_CPU_MASK MISSING
#define HOST_INT_STATUS_CPU_LSB MISSING
#define HOST_INT_STATUS_ERROR_MASK MISSING
#define HOST_INT_STATUS_ERROR_LSB MISSING
#define HOST_INT_STATUS_COUNTER_MASK MISSING
#define HOST_INT_STATUS_COUNTER_LSB MISSING
#define RX_LOOKAHEAD_VALID_ADDRESS MISSING
#define WINDOW_DATA_ADDRESS MISSING
#define WINDOW_READ_ADDR_ADDRESS MISSING
#define WINDOW_WRITE_ADDR_ADDRESS MISSING
#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
#endif /* _HW_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,66 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _MAC_H_
#define _MAC_H_
#include <net/mac80211.h>
#include "core.h"
struct ath10k_generic_iter {
struct ath10k *ar;
int ret;
};
struct ath10k *ath10k_mac_create(size_t priv_size);
void ath10k_mac_destroy(struct ath10k *ar);
int ath10k_mac_register(struct ath10k *ar);
void ath10k_mac_unregister(struct ath10k *ar);
struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
void __ath10k_scan_finish(struct ath10k *ar);
void ath10k_scan_finish(struct ath10k *ar);
void ath10k_scan_timeout_work(struct work_struct *work);
void ath10k_offchan_tx_purge(struct ath10k *ar);
void ath10k_offchan_tx_work(struct work_struct *work);
void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
void ath10k_halt(struct ath10k *ar);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
return (struct ath10k_vif *)vif->drv_priv;
}
static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
if (arvif->tx_seq_no == 0)
arvif->tx_seq_no = 0x1000;
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
arvif->tx_seq_no += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no);
}
}
#endif /* _MAC_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,264 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _PCI_H_
#define _PCI_H_
#include <linux/interrupt.h>
#include "hw.h"
#include "ce.h"
/*
* maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
*/
#define DIAG_TRANSFER_LIMIT 2048
/*
* maximum number of bytes that can be
* handled atomically by DiagRead/DiagWrite
*/
#define DIAG_TRANSFER_LIMIT 2048
struct bmi_xfer {
bool tx_done;
bool rx_done;
bool wait_for_resp;
u32 resp_len;
};
/*
* PCI-specific Target state
*
* NOTE: Structure is shared between Host software and Target firmware!
*
* Much of this may be of interest to the Host so
* HOST_INTEREST->hi_interconnect_state points here
* (and all members are 32-bit quantities in order to
* facilitate Host access). In particular, Host software is
* required to initialize pipe_cfg_addr and svc_to_pipe_map.
*/
struct pcie_state {
/* Pipe configuration Target address */
/* NB: ce_pipe_config[CE_COUNT] */
u32 pipe_cfg_addr;
/* Service to pipe map Target address */
/* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
u32 svc_to_pipe_map;
/* number of MSI interrupts requested */
u32 msi_requested;
/* number of MSI interrupts granted */
u32 msi_granted;
/* Message Signalled Interrupt address */
u32 msi_addr;
/* Base data */
u32 msi_data;
/*
* Data for firmware interrupt;
* MSI data for other interrupts are
* in various SoC registers
*/
u32 msi_fw_intr_data;
/* PCIE_PWR_METHOD_* */
u32 power_mgmt_method;
/* PCIE_CONFIG_FLAG_* */
u32 config_flags;
};
/* PCIE_CONFIG_FLAG definitions */
#define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
/* Host software's Copy Engine configuration. */
#define CE_ATTR_FLAGS 0
/*
* Configuration information for a Copy Engine pipe.
* Passed from Host to Target during startup (one per CE).
*
* NOTE: Structure is shared between Host software and Target firmware!
*/
struct ce_pipe_config {
__le32 pipenum;
__le32 pipedir;
__le32 nentries;
__le32 nbytes_max;
__le32 flags;
__le32 reserved;
};
/*
* Directions for interconnect pipe configuration.
* These definitions may be used during configuration and are shared
* between Host and Target.
*
* Pipe Directions are relative to the Host, so PIPEDIR_IN means
* "coming IN over air through Target to Host" as with a WiFi Rx operation.
* Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
* as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
* Target since things that are "PIPEDIR_OUT" are coming IN to the Target
* over the interconnect.
*/
#define PIPEDIR_NONE 0
#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
#define PIPEDIR_INOUT 3 /* bidirectional */
/* Establish a mapping between a service/direction and a pipe. */
struct service_to_pipe {
__le32 service_id;
__le32 pipedir;
__le32 pipenum;
};
/* Per-pipe state. */
struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
struct ath10k_ce_pipe *ce_hdl;
/* Our pipe number; facilitiates use of pipe_info ptrs. */
u8 pipe_num;
/* Convenience back pointer to hif_ce_state. */
struct ath10k *hif_ce_state;
size_t buf_sz;
/* protects compl_free and num_send_allowed */
spinlock_t pipe_lock;
struct ath10k_pci *ar_pci;
struct tasklet_struct intr;
};
struct ath10k_pci {
struct pci_dev *pdev;
struct device *dev;
struct ath10k *ar;
void __iomem *mem;
/*
* Number of MSI interrupts granted, 0 --> using legacy PCI line
* interrupts.
*/
int num_msi_intrs;
struct tasklet_struct intr_tq;
struct tasklet_struct msi_fw_err;
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
struct ath10k_hif_cb msg_callbacks_current;
/* Copy Engine used for Diagnostic Accesses */
struct ath10k_ce_pipe *ce_diag;
/* FIXME: document what this really protects */
spinlock_t ce_lock;
/* Map CE id to ce_state */
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
struct timer_list rx_post_retry;
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
{
return (struct ath10k_pci *)ar->drv_priv;
}
#define ATH10K_PCI_RX_POST_RETRY_MS 50
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
#define BAR_NUM 0
#define CDC_WAR_MAGIC_STR 0xceef0000
#define CDC_WAR_DATA_CE 4
/*
* TODO: Should be a function call specific to each Target-type.
* This convoluted macro converts from Target CPU Virtual Address Space to CE
* Address Space. As part of this process, we conservatively fetch the current
* PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
* for this device; but that's not guaranteed.
*/
#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
(((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
0x100000 | ((addr) & 0xfffff))
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
/* Target exposes its registers for direct access. However before host can
* access them it needs to make sure the target is awake (ath10k_pci_wake,
* ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go
* to sleep unless host tells it to (ath10k_pci_sleep).
*
* If host tries to access target registers without waking it up it can
* scribble over host memory.
*
* If target is asleep waking it up may take up to even 2ms.
*/
static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
iowrite32(value, ar_pci->mem + offset);
}
static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return ioread32(ar_pci->mem + offset);
}
static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
{
return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
{
ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
}
static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
#endif /* _PCI_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,561 @@
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/relay.h>
#include "core.h"
#include "debug.h"
static void send_fft_sample(struct ath10k *ar,
const struct fft_sample_tlv *fft_sample_tlv)
{
int length;
if (!ar->spectral.rfs_chan_spec_scan)
return;
length = __be16_to_cpu(fft_sample_tlv->length) +
sizeof(*fft_sample_tlv);
relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length);
}
static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
u8 *data)
{
int dc_pos;
u8 max_exp;
dc_pos = bin_len / 2;
/* peak index outside of bins */
if (dc_pos < max_index || -dc_pos >= max_index)
return 0;
for (max_exp = 0; max_exp < 8; max_exp++) {
if (data[dc_pos + max_index] == (max_magnitude >> max_exp))
break;
}
/* max_exp not found */
if (data[dc_pos + max_index] != (max_magnitude >> max_exp))
return 0;
return max_exp;
}
int ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
struct fft_sample_ath10k *fft_sample;
u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS];
u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag;
u32 reg0, reg1, nf_list1, nf_list2;
u8 chain_idx, *bins;
int dc_pos;
fft_sample = (struct fft_sample_ath10k *)&buf;
if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
return -EINVAL;
reg0 = __le32_to_cpu(fftr->reg0);
reg1 = __le32_to_cpu(fftr->reg1);
length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + bin_len;
fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH10K;
fft_sample->tlv.length = __cpu_to_be16(length);
/* TODO: there might be a reason why the hardware reports 20/40/80 MHz,
* but the results/plots suggest that its actually 22/44/88 MHz.
*/
switch (event->hdr.chan_width_mhz) {
case 20:
fft_sample->chan_width_mhz = 22;
break;
case 40:
fft_sample->chan_width_mhz = 44;
break;
case 80:
/* TODO: As experiments with an analogue sender and various
* configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz)
* show, the particular configuration of 80 MHz/64 bins does
* not match with the other smaples at all. Until the reason
* for that is found, don't report these samples.
*/
if (bin_len == 64)
return -EINVAL;
fft_sample->chan_width_mhz = 88;
break;
default:
fft_sample->chan_width_mhz = event->hdr.chan_width_mhz;
}
fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB);
fft_sample->avgpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB);
peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
fft_sample->max_magnitude = __cpu_to_be16(peak_mag);
fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX);
fft_sample->rssi = event->hdr.rssi_combined;
total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB);
base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB);
fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
freq1 = __le16_to_cpu(event->hdr.freq1);
freq2 = __le16_to_cpu(event->hdr.freq2);
fft_sample->freq1 = __cpu_to_be16(freq1);
fft_sample->freq2 = __cpu_to_be16(freq2);
nf_list1 = __le32_to_cpu(event->hdr.nf_list_1);
nf_list2 = __le32_to_cpu(event->hdr.nf_list_2);
chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
switch (chain_idx) {
case 0:
fft_sample->noise = __cpu_to_be16(nf_list1 & 0xffffu);
break;
case 1:
fft_sample->noise = __cpu_to_be16((nf_list1 >> 16) & 0xffffu);
break;
case 2:
fft_sample->noise = __cpu_to_be16(nf_list2 & 0xffffu);
break;
case 3:
fft_sample->noise = __cpu_to_be16((nf_list2 >> 16) & 0xffffu);
break;
}
bins = (u8 *)fftr;
bins += sizeof(*fftr);
fft_sample->tsf = __cpu_to_be64(tsf);
/* max_exp has been directly reported by previous hardware (ath9k),
* maybe its possible to get it by other means?
*/
fft_sample->max_exp = get_max_exp(fft_sample->max_index, peak_mag,
bin_len, bins);
memcpy(fft_sample->data, bins, bin_len);
/* DC value (value in the middle) is the blind spot of the spectral
* sample and invalid, interpolate it.
*/
dc_pos = bin_len / 2;
fft_sample->data[dc_pos] = (fft_sample->data[dc_pos + 1] +
fft_sample->data[dc_pos - 1]) / 2;
send_fft_sample(ar, &fft_sample->tlv);
return 0;
}
static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar)
{
struct ath10k_vif *arvif;
lockdep_assert_held(&ar->conf_mutex);
if (list_empty(&ar->arvifs))
return NULL;
/* if there already is a vif doing spectral, return that. */
list_for_each_entry(arvif, &ar->arvifs, list)
if (arvif->spectral_enabled)
return arvif;
/* otherwise, return the first vif. */
return list_first_entry(&ar->arvifs, typeof(*arvif), list);
}
static int ath10k_spectral_scan_trigger(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int res;
int vdev_id;
lockdep_assert_held(&ar->conf_mutex);
arvif = ath10k_get_spectral_vdev(ar);
if (!arvif)
return -ENODEV;
vdev_id = arvif->vdev_id;
if (ar->spectral.mode == SPECTRAL_DISABLED)
return 0;
res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
WMI_SPECTRAL_ENABLE_CMD_ENABLE);
if (res < 0)
return res;
res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
WMI_SPECTRAL_ENABLE_CMD_ENABLE);
if (res < 0)
return res;
return 0;
}
static int ath10k_spectral_scan_config(struct ath10k *ar,
enum ath10k_spectral_mode mode)
{
struct wmi_vdev_spectral_conf_arg arg;
struct ath10k_vif *arvif;
int vdev_id, count, res = 0;
lockdep_assert_held(&ar->conf_mutex);
arvif = ath10k_get_spectral_vdev(ar);
if (!arvif)
return -ENODEV;
vdev_id = arvif->vdev_id;
arvif->spectral_enabled = (mode != SPECTRAL_DISABLED);
ar->spectral.mode = mode;
res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
WMI_SPECTRAL_ENABLE_CMD_DISABLE);
if (res < 0) {
ath10k_warn(ar, "failed to enable spectral scan: %d\n", res);
return res;
}
if (mode == SPECTRAL_DISABLED)
return 0;
if (mode == SPECTRAL_BACKGROUND)
count = WMI_SPECTRAL_COUNT_DEFAULT;
else
count = max_t(u8, 1, ar->spectral.config.count);
arg.vdev_id = vdev_id;
arg.scan_count = count;
arg.scan_period = WMI_SPECTRAL_PERIOD_DEFAULT;
arg.scan_priority = WMI_SPECTRAL_PRIORITY_DEFAULT;
arg.scan_fft_size = ar->spectral.config.fft_size;
arg.scan_gc_ena = WMI_SPECTRAL_GC_ENA_DEFAULT;
arg.scan_restart_ena = WMI_SPECTRAL_RESTART_ENA_DEFAULT;
arg.scan_noise_floor_ref = WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
arg.scan_init_delay = WMI_SPECTRAL_INIT_DELAY_DEFAULT;
arg.scan_nb_tone_thr = WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
arg.scan_str_bin_thr = WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
arg.scan_wb_rpt_mode = WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
arg.scan_rssi_rpt_mode = WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
arg.scan_rssi_thr = WMI_SPECTRAL_RSSI_THR_DEFAULT;
arg.scan_pwr_format = WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
arg.scan_rpt_mode = WMI_SPECTRAL_RPT_MODE_DEFAULT;
arg.scan_bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
arg.scan_dbm_adj = WMI_SPECTRAL_DBM_ADJ_DEFAULT;
arg.scan_chn_mask = WMI_SPECTRAL_CHN_MASK_DEFAULT;
res = ath10k_wmi_vdev_spectral_conf(ar, &arg);
if (res < 0) {
ath10k_warn(ar, "failed to configure spectral scan: %d\n", res);
return res;
}
return 0;
}
static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char *mode = "";
unsigned int len;
enum ath10k_spectral_mode spectral_mode;
mutex_lock(&ar->conf_mutex);
spectral_mode = ar->spectral.mode;
mutex_unlock(&ar->conf_mutex);
switch (spectral_mode) {
case SPECTRAL_DISABLED:
mode = "disable";
break;
case SPECTRAL_BACKGROUND:
mode = "background";
break;
case SPECTRAL_MANUAL:
mode = "manual";
break;
}
len = strlen(mode);
return simple_read_from_buffer(user_buf, count, ppos, mode, len);
}
static ssize_t write_file_spec_scan_ctl(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32];
ssize_t len;
int res;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
mutex_lock(&ar->conf_mutex);
if (strncmp("trigger", buf, 7) == 0) {
if (ar->spectral.mode == SPECTRAL_MANUAL ||
ar->spectral.mode == SPECTRAL_BACKGROUND) {
/* reset the configuration to adopt possibly changed
* debugfs parameters
*/
res = ath10k_spectral_scan_config(ar,
ar->spectral.mode);
if (res < 0) {
ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n",
res);
}
res = ath10k_spectral_scan_trigger(ar);
if (res < 0) {
ath10k_warn(ar, "failed to trigger spectral scan: %d\n",
res);
}
} else {
res = -EINVAL;
}
} else if (strncmp("background", buf, 9) == 0) {
res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND);
} else if (strncmp("manual", buf, 6) == 0) {
res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL);
} else if (strncmp("disable", buf, 7) == 0) {
res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED);
} else {
res = -EINVAL;
}
mutex_unlock(&ar->conf_mutex);
if (res < 0)
return res;
return count;
}
static const struct file_operations fops_spec_scan_ctl = {
.read = read_file_spec_scan_ctl,
.write = write_file_spec_scan_ctl,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t read_file_spectral_count(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32];
unsigned int len;
u8 spectral_count;
mutex_lock(&ar->conf_mutex);
spectral_count = ar->spectral.config.count;
mutex_unlock(&ar->conf_mutex);
len = sprintf(buf, "%d\n", spectral_count);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_spectral_count(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val < 0 || val > 255)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
ar->spectral.config.count = val;
mutex_unlock(&ar->conf_mutex);
return count;
}
static const struct file_operations fops_spectral_count = {
.read = read_file_spectral_count,
.write = write_file_spectral_count,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t read_file_spectral_bins(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32];
unsigned int len, bins, fft_size, bin_scale;
mutex_lock(&ar->conf_mutex);
fft_size = ar->spectral.config.fft_size;
bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
bins = 1 << (fft_size - bin_scale);
mutex_unlock(&ar->conf_mutex);
len = sprintf(buf, "%d\n", bins);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_spectral_bins(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
unsigned long val;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS)
return -EINVAL;
if (!is_power_of_2(val))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
ar->spectral.config.fft_size = ilog2(val);
ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT;
mutex_unlock(&ar->conf_mutex);
return count;
}
static const struct file_operations fops_spectral_bins = {
.read = read_file_spectral_bins,
.write = write_file_spectral_bins,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static struct dentry *create_buf_file_handler(const char *filename,
struct dentry *parent,
umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
struct dentry *buf_file;
buf_file = debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
*is_global = 1;
return buf_file;
}
static int remove_buf_file_handler(struct dentry *dentry)
{
debugfs_remove(dentry);
return 0;
}
static struct rchan_callbacks rfs_spec_scan_cb = {
.create_buf_file = create_buf_file_handler,
.remove_buf_file = remove_buf_file_handler,
};
int ath10k_spectral_start(struct ath10k *ar)
{
struct ath10k_vif *arvif;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list)
arvif->spectral_enabled = 0;
ar->spectral.mode = SPECTRAL_DISABLED;
ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT;
ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT;
return 0;
}
int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
{
if (!arvif->spectral_enabled)
return 0;
return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED);
}
int ath10k_spectral_create(struct ath10k *ar)
{
ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
ar->debug.debugfs_phy,
1024, 256,
&rfs_spec_scan_cb, NULL);
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_spec_scan_ctl);
debugfs_create_file("spectral_count",
S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_spectral_count);
debugfs_create_file("spectral_bins",
S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_spectral_bins);
return 0;
}
void ath10k_spectral_destroy(struct ath10k *ar)
{
if (ar->spectral.rfs_chan_spec_scan) {
relay_close(ar->spectral.rfs_chan_spec_scan);
ar->spectral.rfs_chan_spec_scan = NULL;
}
}

View file

@ -0,0 +1,90 @@
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef SPECTRAL_H
#define SPECTRAL_H
#include "../spectral_common.h"
/**
* struct ath10k_spec_scan - parameters for Atheros spectral scan
*
* @count: number of scan results requested for manual mode
* @fft_size: number of bins to be requested = 2^(fft_size - bin_scale)
*/
struct ath10k_spec_scan {
u8 count;
u8 fft_size;
};
/* enum ath10k_spectral_mode:
*
* @SPECTRAL_DISABLED: spectral mode is disabled
* @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
* something else.
* @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
* is performed manually.
*/
enum ath10k_spectral_mode {
SPECTRAL_DISABLED = 0,
SPECTRAL_BACKGROUND,
SPECTRAL_MANUAL,
};
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf);
int ath10k_spectral_start(struct ath10k *ar);
int ath10k_spectral_vif_stop(struct ath10k_vif *arvif);
int ath10k_spectral_create(struct ath10k *ar);
void ath10k_spectral_destroy(struct ath10k *ar);
#else
static inline int
ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
return 0;
}
static inline int ath10k_spectral_start(struct ath10k *ar)
{
return 0;
}
static inline int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
{
return 0;
}
static inline int ath10k_spectral_create(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_spectral_destroy(struct ath10k *ar)
{
}
#endif /* CONFIG_ATH10K_DEBUGFS */
#endif /* SPECTRAL_H */

View file

@ -0,0 +1,448 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __TARGADDRS_H__
#define __TARGADDRS_H__
/*
* xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
* host_interest structure. It must match the address of the _host_interest
* symbol (see linker script).
*
* Host Interest is shared between Host and Target in order to coordinate
* between the two, and is intended to remain constant (with additions only
* at the end) across software releases.
*
* All addresses are available here so that it's possible to
* write a single binary that works with all Target Types.
* May be used in assembler code as well as C.
*/
#define QCA988X_HOST_INTEREST_ADDRESS 0x00400800
#define HOST_INTEREST_MAX_SIZE 0x200
/*
* These are items that the Host may need to access via BMI or via the
* Diagnostic Window. The position of items in this structure must remain
* constant across firmware revisions! Types for each item must be fixed
* size across target and host platforms. More items may be added at the end.
*/
struct host_interest {
/*
* Pointer to application-defined area, if any.
* Set by Target application during startup.
*/
u32 hi_app_host_interest; /* 0x00 */
/* Pointer to register dump area, valid after Target crash. */
u32 hi_failure_state; /* 0x04 */
/* Pointer to debug logging header */
u32 hi_dbglog_hdr; /* 0x08 */
u32 hi_unused0c; /* 0x0c */
/*
* General-purpose flag bits, similar to SOC_OPTION_* flags.
* Can be used by application rather than by OS.
*/
u32 hi_option_flag; /* 0x10 */
/*
* Boolean that determines whether or not to
* display messages on the serial port.
*/
u32 hi_serial_enable; /* 0x14 */
/* Start address of DataSet index, if any */
u32 hi_dset_list_head; /* 0x18 */
/* Override Target application start address */
u32 hi_app_start; /* 0x1c */
/* Clock and voltage tuning */
u32 hi_skip_clock_init; /* 0x20 */
u32 hi_core_clock_setting; /* 0x24 */
u32 hi_cpu_clock_setting; /* 0x28 */
u32 hi_system_sleep_setting; /* 0x2c */
u32 hi_xtal_control_setting; /* 0x30 */
u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
u32 hi_ref_voltage_trim_setting; /* 0x3c */
u32 hi_clock_info; /* 0x40 */
/* Host uses BE CPU or not */
u32 hi_be; /* 0x44 */
u32 hi_stack; /* normal stack */ /* 0x48 */
u32 hi_err_stack; /* error stack */ /* 0x4c */
u32 hi_desired_cpu_speed_hz; /* 0x50 */
/* Pointer to Board Data */
u32 hi_board_data; /* 0x54 */
/*
* Indication of Board Data state:
* 0: board data is not yet initialized.
* 1: board data is initialized; unknown size
* >1: number of bytes of initialized board data
*/
u32 hi_board_data_initialized; /* 0x58 */
u32 hi_dset_ram_index_table; /* 0x5c */
u32 hi_desired_baud_rate; /* 0x60 */
u32 hi_dbglog_config; /* 0x64 */
u32 hi_end_ram_reserve_sz; /* 0x68 */
u32 hi_mbox_io_block_sz; /* 0x6c */
u32 hi_num_bpatch_streams; /* 0x70 -- unused */
u32 hi_mbox_isr_yield_limit; /* 0x74 */
u32 hi_refclk_hz; /* 0x78 */
u32 hi_ext_clk_detected; /* 0x7c */
u32 hi_dbg_uart_txpin; /* 0x80 */
u32 hi_dbg_uart_rxpin; /* 0x84 */
u32 hi_hci_uart_baud; /* 0x88 */
u32 hi_hci_uart_pin_assignments; /* 0x8C */
u32 hi_hci_uart_baud_scale_val; /* 0x90 */
u32 hi_hci_uart_baud_step_val; /* 0x94 */
u32 hi_allocram_start; /* 0x98 */
u32 hi_allocram_sz; /* 0x9c */
u32 hi_hci_bridge_flags; /* 0xa0 */
u32 hi_hci_uart_support_pins; /* 0xa4 */
u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
/*
* 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
* [31:16]: wakeup timeout in ms
*/
/* Pointer to extended board Data */
u32 hi_board_ext_data; /* 0xac */
u32 hi_board_ext_data_config; /* 0xb0 */
/*
* Bit [0] : valid
* Bit[31:16: size
*/
/*
* hi_reset_flag is used to do some stuff when target reset.
* such as restore app_start after warm reset or
* preserve host Interest area, or preserve ROM data, literals etc.
*/
u32 hi_reset_flag; /* 0xb4 */
/* indicate hi_reset_flag is valid */
u32 hi_reset_flag_valid; /* 0xb8 */
u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
/* 0xbc - [31:0]: idle timeout in ms */
/* ACS flags */
u32 hi_acs_flags; /* 0xc0 */
u32 hi_console_flags; /* 0xc4 */
u32 hi_nvram_state; /* 0xc8 */
u32 hi_option_flag2; /* 0xcc */
/* If non-zero, override values sent to Host in WMI_READY event. */
u32 hi_sw_version_override; /* 0xd0 */
u32 hi_abi_version_override; /* 0xd4 */
/*
* Percentage of high priority RX traffic to total expected RX traffic
* applicable only to ar6004
*/
u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
/* test applications flags */
u32 hi_test_apps_related; /* 0xdc */
/* location of test script */
u32 hi_ota_testscript; /* 0xe0 */
/* location of CAL data */
u32 hi_cal_data; /* 0xe4 */
/* Number of packet log buffers */
u32 hi_pktlog_num_buffers; /* 0xe8 */
/* wow extension configuration */
u32 hi_wow_ext_config; /* 0xec */
u32 hi_pwr_save_flags; /* 0xf0 */
/* Spatial Multiplexing Power Save (SMPS) options */
u32 hi_smps_options; /* 0xf4 */
/* Interconnect-specific state */
u32 hi_interconnect_state; /* 0xf8 */
/* Coex configuration flags */
u32 hi_coex_config; /* 0xfc */
/* Early allocation support */
u32 hi_early_alloc; /* 0x100 */
/* FW swap field */
/*
* Bits of this 32bit word will be used to pass specific swap
* instruction to FW
*/
/*
* Bit 0 -- AP Nart descriptor no swap. When this bit is set
* FW will not swap TX descriptor. Meaning packets are formed
* on the target processor.
*/
/* Bit 1 - unused */
u32 hi_fw_swap; /* 0x104 */
} __packed;
#define HI_ITEM(item) offsetof(struct host_interest, item)
/* Bits defined in hi_option_flag */
/* Enable timer workaround */
#define HI_OPTION_TIMER_WAR 0x01
/* Limit BMI command credits */
#define HI_OPTION_BMI_CRED_LIMIT 0x02
/* Relay Dot11 hdr to/from host */
#define HI_OPTION_RELAY_DOT11_HDR 0x04
/* MAC addr method 0-locally administred 1-globally unique addrs */
#define HI_OPTION_MAC_ADDR_METHOD 0x08
/* Firmware Bridging */
#define HI_OPTION_FW_BRIDGE 0x10
/* Enable CPU profiling */
#define HI_OPTION_ENABLE_PROFILE 0x20
/* Disable debug logging */
#define HI_OPTION_DISABLE_DBGLOG 0x40
/* Skip Era Tracking */
#define HI_OPTION_SKIP_ERA_TRACKING 0x80
/* Disable PAPRD (debug) */
#define HI_OPTION_PAPRD_DISABLE 0x100
#define HI_OPTION_NUM_DEV_LSB 0x200
#define HI_OPTION_NUM_DEV_MSB 0x800
#define HI_OPTION_DEV_MODE_LSB 0x1000
#define HI_OPTION_DEV_MODE_MSB 0x8000000
/* Disable LowFreq Timer Stabilization */
#define HI_OPTION_NO_LFT_STBL 0x10000000
/* Skip regulatory scan */
#define HI_OPTION_SKIP_REG_SCAN 0x20000000
/*
* Do regulatory scan during init before
* sending WMI ready event to host
*/
#define HI_OPTION_INIT_REG_SCAN 0x40000000
/* REV6: Do not adjust memory map */
#define HI_OPTION_SKIP_MEMMAP 0x80000000
#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
/* 2 bits of hi_option_flag are used to represent 3 modes */
#define HI_OPTION_FW_MODE_IBSS 0x0 /* IBSS Mode */
#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
#define HI_OPTION_FW_MODE_AP 0x2 /* AP Mode */
#define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */
/* 2 bits of hi_option flag are usedto represent 4 submodes */
#define HI_OPTION_FW_SUBMODE_NONE 0x0 /* Normal mode */
#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1 /* p2p device mode */
#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */
#define HI_OPTION_FW_SUBMODE_P2PGO 0x3 /* p2p go mode */
/* Num dev Mask */
#define HI_OPTION_NUM_DEV_MASK 0x7
#define HI_OPTION_NUM_DEV_SHIFT 0x9
/* firmware bridging */
#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
/*
Fw Mode/SubMode Mask
|-----------------------------------------------------------------------------|
| SUB | SUB | SUB | SUB | | | | |
|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]|
| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) |
|-----------------------------------------------------------------------------|
*/
#define HI_OPTION_FW_MODE_BITS 0x2
#define HI_OPTION_FW_MODE_MASK 0x3
#define HI_OPTION_FW_MODE_SHIFT 0xC
#define HI_OPTION_ALL_FW_MODE_MASK 0xFF
#define HI_OPTION_FW_SUBMODE_BITS 0x2
#define HI_OPTION_FW_SUBMODE_MASK 0x3
#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
#define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00
#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
/* hi_option_flag2 options */
#define HI_OPTION_OFFLOAD_AMSDU 0x01
#define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */
#define HI_OPTION_ENABLE_RFKILL 0x04 /* RFKill Enable Feature*/
#define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */
#define HI_OPTION_EARLY_CFG_DONE 0x10 /* Early configuration is complete */
#define HI_OPTION_RF_KILL_SHIFT 0x2
#define HI_OPTION_RF_KILL_MASK 0x1
/* hi_reset_flag */
/* preserve App Start address */
#define HI_RESET_FLAG_PRESERVE_APP_START 0x01
/* preserve host interest */
#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST 0x02
/* preserve ROM data */
#define HI_RESET_FLAG_PRESERVE_ROMDATA 0x04
#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE 0x08
#define HI_RESET_FLAG_PRESERVE_BOOT_INFO 0x10
#define HI_RESET_FLAG_WARM_RESET 0x20
/* define hi_fw_swap bits */
#define HI_DESC_IN_FW_BIT 0x01
/* indicate the reset flag is valid */
#define HI_RESET_FLAG_IS_VALID 0x12345678
/* ACS is enabled */
#define HI_ACS_FLAGS_ENABLED (1 << 0)
/* Use physical WWAN device */
#define HI_ACS_FLAGS_USE_WWAN (1 << 1)
/* Use test VAP */
#define HI_ACS_FLAGS_TEST_VAP (1 << 2)
/*
* CONSOLE FLAGS
*
* Bit Range Meaning
* --------- --------------------------------
* 2..0 UART ID (0 = Default)
* 3 Baud Select (0 = 9600, 1 = 115200)
* 30..4 Reserved
* 31 Enable Console
*
*/
#define HI_CONSOLE_FLAGS_ENABLE (1 << 31)
#define HI_CONSOLE_FLAGS_UART_MASK (0x7)
#define HI_CONSOLE_FLAGS_UART_SHIFT 0
#define HI_CONSOLE_FLAGS_BAUD_SELECT (1 << 3)
/* SM power save options */
#define HI_SMPS_ALLOW_MASK (0x00000001)
#define HI_SMPS_MODE_MASK (0x00000002)
#define HI_SMPS_MODE_STATIC (0x00000000)
#define HI_SMPS_MODE_DYNAMIC (0x00000002)
#define HI_SMPS_DISABLE_AUTO_MODE (0x00000004)
#define HI_SMPS_DATA_THRESH_MASK (0x000007f8)
#define HI_SMPS_DATA_THRESH_SHIFT (3)
#define HI_SMPS_RSSI_THRESH_MASK (0x0007f800)
#define HI_SMPS_RSSI_THRESH_SHIFT (11)
#define HI_SMPS_LOWPWR_CM_MASK (0x00380000)
#define HI_SMPS_LOWPWR_CM_SHIFT (15)
#define HI_SMPS_HIPWR_CM_MASK (0x03c00000)
#define HI_SMPS_HIPWR_CM_SHIFT (19)
/*
* WOW Extension configuration
*
* Bit Range Meaning
* --------- --------------------------------
* 8..0 Size of each WOW pattern (max 511)
* 15..9 Number of patterns per list (max 127)
* 17..16 Number of lists (max 4)
* 30..18 Reserved
* 31 Enabled
*
* set values (except enable) to zeros for default settings
*/
#define HI_WOW_EXT_ENABLED_MASK (1 << 31)
#define HI_WOW_EXT_NUM_LIST_SHIFT 16
#define HI_WOW_EXT_NUM_LIST_MASK (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT)
#define HI_WOW_EXT_NUM_PATTERNS_SHIFT 9
#define HI_WOW_EXT_NUM_PATTERNS_MASK (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT)
#define HI_WOW_EXT_PATTERN_SIZE_SHIFT 0
#define HI_WOW_EXT_PATTERN_SIZE_MASK (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT)
#define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \
((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \
HI_WOW_EXT_NUM_LIST_MASK) | \
(((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \
HI_WOW_EXT_NUM_PATTERNS_MASK) | \
(((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \
HI_WOW_EXT_PATTERN_SIZE_MASK))
#define HI_WOW_EXT_GET_NUM_LISTS(config) \
(((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT)
#define HI_WOW_EXT_GET_NUM_PATTERNS(config) \
(((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \
HI_WOW_EXT_NUM_PATTERNS_SHIFT)
#define HI_WOW_EXT_GET_PATTERN_SIZE(config) \
(((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \
HI_WOW_EXT_PATTERN_SIZE_SHIFT)
/*
* Early allocation configuration
* Support RAM bank configuration before BMI done and this eases the memory
* allocation at very early stage
* Bit Range Meaning
* --------- ----------------------------------
* [0:3] number of bank assigned to be IRAM
* [4:15] reserved
* [16:31] magic number
*
* Note:
* 1. target firmware would check magic number and if it's a match, firmware
* would consider the bits[0:15] are valid and base on that to calculate
* the end of DRAM. Early allocation would be located at that area and
* may be reclaimed when necesary
* 2. if no magic number is found, early allocation would happen at "_end"
* symbol of ROM which is located before the app-data and might NOT be
* re-claimable. If this is adopted, link script should keep this in
* mind to avoid data corruption.
*/
#define HI_EARLY_ALLOC_MAGIC 0x6d8a
#define HI_EARLY_ALLOC_MAGIC_MASK 0xffff0000
#define HI_EARLY_ALLOC_MAGIC_SHIFT 16
#define HI_EARLY_ALLOC_IRAM_BANKS_MASK 0x0000000f
#define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT 0
#define HI_EARLY_ALLOC_VALID() \
((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \
HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC))
#define HI_EARLY_ALLOC_GET_IRAM_BANKS() \
(((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \
>> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
/*power save flag bit definitions*/
#define HI_PWR_SAVE_LPL_ENABLED 0x1
/*b1-b3 reserved*/
/*b4-b5 : dev0 LPL type : 0 - none
1- Reduce Pwr Search
2- Reduce Pwr Listen*/
/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
#define HI_PWR_SAVE_LPL_DEV0_LSB 4
#define HI_PWR_SAVE_LPL_DEV_MASK 0x3
/*power save related utility macros*/
#define HI_LPL_ENABLED() \
((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
#define HI_DEV_LPL_TYPE_GET(_devix) \
(HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
(HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
#define HOST_INTEREST_SMPS_IS_ALLOWED() \
((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
/* Reserve 1024 bytes for extended board data */
#define QCA988X_BOARD_DATA_SZ 7168
#define QCA988X_BOARD_EXT_DATA_SZ 0
#endif /* __TARGADDRS_H__ */

View file

@ -0,0 +1,382 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "testmode.h"
#include <net/netlink.h>
#include <linux/firmware.h>
#include "debug.h"
#include "wmi.h"
#include "hif.h"
#include "hw.h"
#include "testmode_i.h"
static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = {
[ATH10K_TM_ATTR_CMD] = { .type = NLA_U32 },
[ATH10K_TM_ATTR_DATA] = { .type = NLA_BINARY,
.len = ATH10K_TM_DATA_MAX_LEN },
[ATH10K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
[ATH10K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
[ATH10K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
};
/* Returns true if callee consumes the skb and the skb should be discarded.
* Returns false if skb is not used. Does not sleep.
*/
bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
{
struct sk_buff *nl_skb;
bool consumed;
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
"testmode event wmi cmd_id %d skb %p skb->len %d\n",
cmd_id, skb, skb->len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
spin_lock_bh(&ar->data_lock);
if (!ar->testmode.utf_monitor) {
consumed = false;
goto out;
}
/* Only testmode.c should be handling events from utf firmware,
* otherwise all sort of problems will arise as mac80211 operations
* are not initialised.
*/
consumed = true;
nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
2 * sizeof(u32) + skb->len,
GFP_ATOMIC);
if (!nl_skb) {
ath10k_warn(ar,
"failed to allocate skb for testmode wmi event\n");
goto out;
}
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
if (ret) {
ath10k_warn(ar,
"failed to to put testmode wmi event cmd attribute: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
if (ret) {
ath10k_warn(ar,
"failed to to put testmode wmi even cmd_id: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data);
if (ret) {
ath10k_warn(ar,
"failed to copy skb to testmode wmi event: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
out:
spin_unlock_bh(&ar->data_lock);
return consumed;
}
static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
{
struct sk_buff *skb;
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
"testmode cmd get version_major %d version_minor %d\n",
ATH10K_TESTMODE_VERSION_MAJOR,
ATH10K_TESTMODE_VERSION_MINOR);
skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
nla_total_size(sizeof(u32)));
if (!skb)
return -ENOMEM;
ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MAJOR,
ATH10K_TESTMODE_VERSION_MAJOR);
if (ret) {
kfree_skb(skb);
return ret;
}
ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MINOR,
ATH10K_TESTMODE_VERSION_MINOR);
if (ret) {
kfree_skb(skb);
return ret;
}
return cfg80211_testmode_reply(skb);
}
static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
{
char filename[100];
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n");
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH10K_STATE_UTF) {
ret = -EALREADY;
goto err;
}
/* start utf only when the driver is not in use */
if (ar->state != ATH10K_STATE_OFF) {
ret = -EBUSY;
goto err;
}
if (WARN_ON(ar->testmode.utf != NULL)) {
/* utf image is already downloaded, it shouldn't be */
ret = -EEXIST;
goto err;
}
snprintf(filename, sizeof(filename), "%s/%s",
ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
/* load utf firmware image */
ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
if (ret) {
ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
filename, ret);
goto err;
}
spin_lock_bh(&ar->data_lock);
ar->testmode.utf_monitor = true;
spin_unlock_bh(&ar->data_lock);
BUILD_BUG_ON(sizeof(ar->fw_features) !=
sizeof(ar->testmode.orig_fw_features));
memcpy(ar->testmode.orig_fw_features, ar->fw_features,
sizeof(ar->fw_features));
/* utf.bin firmware image does not advertise firmware features. Do
* an ugly hack where we force the firmware features so that wmi.c
* will use the correct WMI interface.
*/
memset(ar->fw_features, 0, sizeof(ar->fw_features));
__set_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features);
ret = ath10k_hif_power_up(ar);
if (ret) {
ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
goto err_fw_features;
}
ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF);
if (ret) {
ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
goto err_power_down;
}
ar->state = ATH10K_STATE_UTF;
ath10k_info(ar, "UTF firmware started\n");
mutex_unlock(&ar->conf_mutex);
return 0;
err_power_down:
ath10k_hif_power_down(ar);
err_fw_features:
/* return the original firmware features */
memcpy(ar->fw_features, ar->testmode.orig_fw_features,
sizeof(ar->fw_features));
release_firmware(ar->testmode.utf);
ar->testmode.utf = NULL;
err:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
ath10k_core_stop(ar);
ath10k_hif_power_down(ar);
spin_lock_bh(&ar->data_lock);
ar->testmode.utf_monitor = false;
spin_unlock_bh(&ar->data_lock);
/* return the original firmware features */
memcpy(ar->fw_features, ar->testmode.orig_fw_features,
sizeof(ar->fw_features));
release_firmware(ar->testmode.utf);
ar->testmode.utf = NULL;
ar->state = ATH10K_STATE_OFF;
}
static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[])
{
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf stop\n");
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_UTF) {
ret = -ENETDOWN;
goto out;
}
__ath10k_tm_cmd_utf_stop(ar);
ret = 0;
ath10k_info(ar, "UTF firmware stopped\n");
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
{
struct sk_buff *skb;
int ret, buf_len;
u32 cmd_id;
void *buf;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_UTF) {
ret = -ENETDOWN;
goto out;
}
if (!tb[ATH10K_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) {
ret = -EINVAL;
goto out;
}
buf = nla_data(tb[ATH10K_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]);
cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
"testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
cmd_id, buf, buf_len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
skb = ath10k_wmi_alloc_skb(ar, buf_len);
if (!skb) {
ret = -ENOMEM;
goto out;
}
memcpy(skb->data, buf, buf_len);
ret = ath10k_wmi_cmd_send(ar, skb, cmd_id);
if (ret) {
ath10k_warn(ar, "failed to transmit wmi command (testmode): %d\n",
ret);
goto out;
}
ret = 0;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct ath10k *ar = hw->priv;
struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1];
int ret;
ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len,
ath10k_tm_policy);
if (ret)
return ret;
if (!tb[ATH10K_TM_ATTR_CMD])
return -EINVAL;
switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) {
case ATH10K_TM_CMD_GET_VERSION:
return ath10k_tm_cmd_get_version(ar, tb);
case ATH10K_TM_CMD_UTF_START:
return ath10k_tm_cmd_utf_start(ar, tb);
case ATH10K_TM_CMD_UTF_STOP:
return ath10k_tm_cmd_utf_stop(ar, tb);
case ATH10K_TM_CMD_WMI:
return ath10k_tm_cmd_wmi(ar, tb);
default:
return -EOPNOTSUPP;
}
}
void ath10k_testmode_destroy(struct ath10k *ar)
{
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_UTF) {
/* utf firmware is not running, nothing to do */
goto out;
}
__ath10k_tm_cmd_utf_stop(ar);
out:
mutex_unlock(&ar->conf_mutex);
}

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#ifdef CONFIG_NL80211_TESTMODE
void ath10k_testmode_destroy(struct ath10k *ar);
bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb);
int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
#else
static inline void ath10k_testmode_destroy(struct ath10k *ar)
{
}
static inline bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id,
struct sk_buff *skb)
{
return false;
}
static inline int ath10k_tm_cmd(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
void *data, int len)
{
return 0;
}
#endif

View file

@ -0,0 +1,70 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* "API" level of the ath10k testmode interface. Bump it after every
* incompatible interface change.
*/
#define ATH10K_TESTMODE_VERSION_MAJOR 1
/* Bump this after every _compatible_ interface change, for example
* addition of a new command or an attribute.
*/
#define ATH10K_TESTMODE_VERSION_MINOR 0
#define ATH10K_TM_DATA_MAX_LEN 5000
enum ath10k_tm_attr {
__ATH10K_TM_ATTR_INVALID = 0,
ATH10K_TM_ATTR_CMD = 1,
ATH10K_TM_ATTR_DATA = 2,
ATH10K_TM_ATTR_WMI_CMDID = 3,
ATH10K_TM_ATTR_VERSION_MAJOR = 4,
ATH10K_TM_ATTR_VERSION_MINOR = 5,
/* keep last */
__ATH10K_TM_ATTR_AFTER_LAST,
ATH10K_TM_ATTR_MAX = __ATH10K_TM_ATTR_AFTER_LAST - 1,
};
/* All ath10k testmode interface commands specified in
* ATH10K_TM_ATTR_CMD
*/
enum ath10k_tm_cmd {
/* Returns the supported ath10k testmode interface version in
* ATH10K_TM_ATTR_VERSION. Always guaranteed to work. User space
* uses this to verify it's using the correct version of the
* testmode interface
*/
ATH10K_TM_CMD_GET_VERSION = 0,
/* Boots the UTF firmware, the netdev interface must be down at the
* time.
*/
ATH10K_TM_CMD_UTF_START = 1,
/* Shuts down the UTF firmware and puts the driver back into OFF
* state.
*/
ATH10K_TM_CMD_UTF_STOP = 2,
/* The command used to transmit a WMI command to the firmware and
* the event to receive WMI events from the firmware. Without
* struct wmi_cmd_hdr header, only the WMI payload. Command id is
* provided with ATH10K_TM_ATTR_WMI_CMDID and payload in
* ATH10K_TM_ATTR_DATA.
*/
ATH10K_TM_CMD_WMI = 3,
};

View file

@ -0,0 +1,20 @@
/*
* Copyright (c) 2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#define CREATE_TRACE_POINTS
#include "trace.h"

View file

@ -0,0 +1,266 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#include <linux/tracepoint.h>
#include "core.h"
#define _TRACE_H_
/* create empty functions when tracing is disabled */
#if !defined(CONFIG_ATH10K_TRACING)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(...)
#undef DEFINE_EVENT
#define DEFINE_EVENT(evt_class, name, proto, ...) \
static inline void trace_ ## name(proto) {}
#endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath10k
#define ATH10K_MSG_MAX 200
DECLARE_EVENT_CLASS(ath10k_log_event,
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
TP_ARGS(ar, vaf),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__dynamic_array(char, msg, ATH10K_MSG_MAX)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
ATH10K_MSG_MAX,
vaf->fmt,
*vaf->va) >= ATH10K_MSG_MAX);
),
TP_printk(
"%s %s %s",
__get_str(driver),
__get_str(device),
__get_str(msg)
)
);
DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
TP_ARGS(ar, vaf)
);
DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
TP_ARGS(ar, vaf)
);
DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
TP_ARGS(ar, vaf)
);
TRACE_EVENT(ath10k_log_dbg,
TP_PROTO(struct ath10k *ar, unsigned int level, struct va_format *vaf),
TP_ARGS(ar, level, vaf),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(unsigned int, level)
__dynamic_array(char, msg, ATH10K_MSG_MAX)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->level = level;
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
ATH10K_MSG_MAX,
vaf->fmt,
*vaf->va) >= ATH10K_MSG_MAX);
),
TP_printk(
"%s %s %s",
__get_str(driver),
__get_str(device),
__get_str(msg)
)
);
TRACE_EVENT(ath10k_log_dbg_dump,
TP_PROTO(struct ath10k *ar, const char *msg, const char *prefix,
const void *buf, size_t buf_len),
TP_ARGS(ar, msg, prefix, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__string(msg, msg)
__string(prefix, prefix)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__assign_str(msg, msg);
__assign_str(prefix, prefix);
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s %s/%s\n",
__get_str(driver),
__get_str(device),
__get_str(prefix),
__get_str(msg)
)
);
TRACE_EVENT(ath10k_wmi_cmd,
TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len, int ret),
TP_ARGS(ar, id, buf, buf_len, ret),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
__field(int, ret)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->id = id;
__entry->buf_len = buf_len;
__entry->ret = ret;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s id %d len %zu ret %d",
__get_str(driver),
__get_str(device),
__entry->id,
__entry->buf_len,
__entry->ret
)
);
TRACE_EVENT(ath10k_wmi_event,
TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len),
TP_ARGS(ar, id, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->id = id;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s id %d len %zu",
__get_str(driver),
__get_str(device),
__entry->id,
__entry->buf_len
)
);
TRACE_EVENT(ath10k_htt_stats,
TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len),
TP_ARGS(ar, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s len %zu",
__get_str(driver),
__get_str(device),
__entry->buf_len
)
);
TRACE_EVENT(ath10k_wmi_dbglog,
TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len),
TP_ARGS(ar, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"%s %s len %zu",
__get_str(driver),
__get_str(device),
__entry->buf_len
)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -0,0 +1,221 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "txrx.h"
#include "htt.h"
#include "mac.h"
#include "debug.h"
static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
{
if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
return;
/* If the original wait_for_completion() timed out before
* {data,mgmt}_tx_completed() was called then we could complete
* offchan_tx_completed for a different skb. Prevent this by using
* offchan_tx_skb. */
spin_lock_bh(&ar->data_lock);
if (ar->offchan_tx_skb != skb) {
ath10k_warn(ar, "completed old offchannel frame\n");
goto out;
}
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_tx_info *info;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
lockdep_assert_held(&htt->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
return;
}
msdu = htt->pending_tx[tx_done->msdu_id];
skb_cb = ATH10K_SKB_CB(msdu);
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->htt.txbuf)
dma_pool_free(htt->tx_pool,
skb_cb->htt.txbuf,
skb_cb->htt.txbuf_paddr);
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
goto exit;
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
exit:
htt->pending_tx[tx_done->msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr)
{
struct ath10k_peer *peer;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(peer, &ar->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
if (memcmp(peer->addr, addr, ETH_ALEN))
continue;
return peer;
}
return NULL;
}
struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
{
struct ath10k_peer *peer;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(peer, &ar->peers, list)
if (test_bit(peer_id, peer->peer_ids))
return peer;
return NULL;
}
static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
const u8 *addr, bool expect_mapped)
{
int ret;
ret = wait_event_timeout(ar->peer_mapping_wq, ({
bool mapped;
spin_lock_bh(&ar->data_lock);
mapped = !!ath10k_peer_find(ar, vdev_id, addr);
spin_unlock_bh(&ar->data_lock);
mapped == expect_mapped;
}), 3*HZ);
if (ret <= 0)
return -ETIMEDOUT;
return 0;
}
int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
{
return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
}
int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
{
return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
}
void ath10k_peer_map_event(struct ath10k_htt *htt,
struct htt_peer_map_event *ev)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
if (!peer) {
peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
if (!peer)
goto exit;
peer->vdev_id = ev->vdev_id;
ether_addr_copy(peer->addr, ev->addr);
list_add(&peer->list, &ar->peers);
wake_up(&ar->peer_mapping_wq);
}
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
ev->vdev_id, ev->addr, ev->peer_id);
set_bit(ev->peer_id, peer->peer_ids);
exit:
spin_unlock_bh(&ar->data_lock);
}
void ath10k_peer_unmap_event(struct ath10k_htt *htt,
struct htt_peer_unmap_event *ev)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
ev->peer_id);
goto exit;
}
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, ev->peer_id);
clear_bit(ev->peer_id, peer->peer_ids);
if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
list_del(&peer->list);
kfree(peer);
wake_up(&ar->peer_mapping_wq);
}
exit:
spin_unlock_bh(&ar->data_lock);
}

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _TXRX_H_
#define _TXRX_H_
#include "htt.h"
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr);
struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id);
int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
const u8 *addr);
int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
const u8 *addr);
void ath10k_peer_map_event(struct ath10k_htt *htt,
struct htt_peer_map_event *ev);
void ath10k_peer_unmap_event(struct ath10k_htt *htt,
struct htt_peer_unmap_event *ev);
#endif

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,67 @@
config ATH5K
tristate "Atheros 5xxx wireless cards support"
depends on PCI && MAC80211
select ATH_COMMON
select MAC80211_LEDS
select LEDS_CLASS
select NEW_LEDS
select AVERAGE
select ATH5K_PCI
---help---
This module adds support for wireless adapters based on
Atheros 5xxx chipset.
Currently the following chip versions are supported:
MAC: AR5211 AR5212
PHY: RF5111/2111 RF5112/2112 RF5413/2413
This driver uses the kernel's mac80211 subsystem.
If you choose to build a module, it'll be called ath5k. Say M if
unsure.
config ATH5K_DEBUG
bool "Atheros 5xxx debugging"
depends on ATH5K
---help---
Atheros 5xxx debugging messages.
Say Y, if and you will get debug options for ath5k.
To use this, you need to mount debugfs:
mount -t debugfs debug /sys/kernel/debug
You will get access to files under:
/sys/kernel/debug/ath5k/phy0/
To enable debug, pass the debug level to the debug module
parameter. For example:
modprobe ath5k debug=0x00000400
config ATH5K_TRACER
bool "Atheros 5xxx tracer"
depends on ATH5K
depends on EVENT_TRACING
---help---
Say Y here to enable tracepoints for the ath5k driver
using the kernel tracing infrastructure. Select this
option if you are interested in debugging the driver.
If unsure, say N.
config ATH5K_PCI
bool "Atheros 5xxx PCI bus support"
depends on PCI
---help---
This adds support for PCI type chipsets of the 5xxx Atheros
family.
config ATH5K_TEST_CHANNELS
bool "Enables testing channels on ath5k"
depends on ATH5K && CFG80211_CERTIFICATION_ONUS
---help---
This enables non-standard IEEE 802.11 channels on ath5k, which
can be used for research purposes. This option should be disabled
unless doing research.

View file

@ -0,0 +1,21 @@
ath5k-y += caps.o
ath5k-y += initvals.o
ath5k-y += eeprom.o
ath5k-y += gpio.o
ath5k-y += desc.o
ath5k-y += dma.o
ath5k-y += qcu.o
ath5k-y += pcu.o
ath5k-y += phy.o
ath5k-y += reset.o
ath5k-y += attach.o
ath5k-y += base.o
CFLAGS_base.o += -I$(src)
ath5k-y += led.o
ath5k-y += rfkill.o
ath5k-y += ani.o
ath5k-y += sysfs.o
ath5k-y += mac80211-ops.o
ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
ath5k-$(CONFIG_ATH5K_PCI) += pci.o
obj-$(CONFIG_ATH5K) += ath5k.o

View file

@ -0,0 +1,754 @@
/*
* Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
#include "ani.h"
/**
* DOC: Basic ANI Operation
*
* Adaptive Noise Immunity (ANI) controls five noise immunity parameters
* depending on the amount of interference in the environment, increasing
* or reducing sensitivity as necessary.
*
* The parameters are:
*
* - "noise immunity"
*
* - "spur immunity"
*
* - "firstep level"
*
* - "OFDM weak signal detection"
*
* - "CCK weak signal detection"
*
* Basically we look at the amount of ODFM and CCK timing errors we get and then
* raise or lower immunity accordingly by setting one or more of these
* parameters.
*
* Newer chipsets have PHY error counters in hardware which will generate a MIB
* interrupt when they overflow. Older hardware has too enable PHY error frames
* by setting a RX flag and then count every single PHY error. When a specified
* threshold of errors has been reached we will raise immunity.
* Also we regularly check the amount of errors and lower or raise immunity as
* necessary.
*/
/***********************\
* ANI parameter control *
\***********************/
/**
* ath5k_ani_set_noise_immunity_level() - Set noise immunity level
* @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
*/
void
ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
{
/* TODO:
* ANI documents suggest the following five levels to use, but the HAL
* and ath9k use only the last two levels, making this
* essentially an on/off option. There *may* be a reason for this (???),
* so i stick with the HAL version for now...
*/
#if 0
static const s8 lo[] = { -52, -56, -60, -64, -70 };
static const s8 hi[] = { -18, -18, -16, -14, -12 };
static const s8 sz[] = { -34, -41, -48, -55, -62 };
static const s8 fr[] = { -70, -72, -75, -78, -80 };
#else
static const s8 lo[] = { -64, -70 };
static const s8 hi[] = { -14, -12 };
static const s8 sz[] = { -55, -62 };
static const s8 fr[] = { -78, -80 };
#endif
if (level < 0 || level >= ARRAY_SIZE(sz)) {
ATH5K_ERR(ah, "noise immunity level %d out of range",
level);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
AR5K_PHY_DESIRED_SIZE_TOT, sz[level]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
AR5K_PHY_AGCCOARSE_LO, lo[level]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
AR5K_PHY_AGCCOARSE_HI, hi[level]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
AR5K_PHY_SIG_FIRPWR, fr[level]);
ah->ani_state.noise_imm_level = level;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
/**
* ath5k_ani_set_spur_immunity_level() - Set spur immunity level
* @ah: The &struct ath5k_hw
* @level: level between 0 and @max_spur_level (the maximum level is dependent
* on the chip revision).
*/
void
ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
{
static const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
if (level < 0 || level >= ARRAY_SIZE(val) ||
level > ah->ani_state.max_spur_level) {
ATH5K_ERR(ah, "spur immunity level %d out of range",
level);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
ah->ani_state.spur_level = level;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
/**
* ath5k_ani_set_firstep_level() - Set "firstep" level
* @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
*/
void
ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
{
static const int val[] = { 0, 4, 8 };
if (level < 0 || level >= ARRAY_SIZE(val)) {
ATH5K_ERR(ah, "firstep level %d out of range", level);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
AR5K_PHY_SIG_FIRSTEP, val[level]);
ah->ani_state.firstep_level = level;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
/**
* ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection
* @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
{
static const int m1l[] = { 127, 50 };
static const int m2l[] = { 127, 40 };
static const int m1[] = { 127, 0x4d };
static const int m2[] = { 127, 0x40 };
static const int m2cnt[] = { 31, 16 };
static const int m2lcnt[] = { 63, 48 };
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_M2, m2l[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
AR5K_PHY_WEAK_OFDM_HIGH_THR_M1, m1[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
AR5K_PHY_WEAK_OFDM_HIGH_THR_M2, m2[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT, m2cnt[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT, m2lcnt[on]);
if (on)
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
else
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
ah->ani_state.ofdm_weak_sig = on;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "turned %s",
on ? "on" : "off");
}
/**
* ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection
* @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
{
static const int val[] = { 8, 6 };
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
ah->ani_state.cck_weak_sig = on;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "turned %s",
on ? "on" : "off");
}
/***************\
* ANI algorithm *
\***************/
/**
* ath5k_ani_raise_immunity() - Increase noise immunity
* @ah: The &struct ath5k_hw
* @as: The &struct ath5k_ani_state
* @ofdm_trigger: If this is true we are called because of too many OFDM errors,
* the algorithm will tune more parameters then.
*
* Try to raise noise immunity (=decrease sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
*/
static void
ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
bool ofdm_trigger)
{
int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)",
ofdm_trigger ? "ODFM" : "CCK");
/* first: raise noise immunity */
if (as->noise_imm_level < ATH5K_ANI_MAX_NOISE_IMM_LVL) {
ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level + 1);
return;
}
/* only OFDM: raise spur immunity level */
if (ofdm_trigger &&
as->spur_level < ah->ani_state.max_spur_level) {
ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
return;
}
/* AP mode */
if (ah->opmode == NL80211_IFTYPE_AP) {
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
return;
}
/* STA and IBSS mode */
/* TODO: for IBSS mode it would be better to keep a beacon RSSI average
* per each neighbour node and use the minimum of these, to make sure we
* don't shut out a remote node by raising immunity too high. */
if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"beacon RSSI high");
/* only OFDM: beacon RSSI is high, we can disable ODFM weak
* signal detection */
if (ofdm_trigger && as->ofdm_weak_sig) {
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
ath5k_ani_set_spur_immunity_level(ah, 0);
return;
}
/* as a last resort or CCK: raise firstep level */
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) {
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
return;
}
} else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
/* beacon RSSI in mid range, we need OFDM weak signal detect,
* but can raise firstep level */
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"beacon RSSI mid");
if (ofdm_trigger && !as->ofdm_weak_sig)
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
return;
} else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
/* beacon RSSI is low. in B/G mode turn of OFDM weak signal
* detect and zero firstep level to maximize CCK sensitivity */
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"beacon RSSI low, 2GHz");
if (ofdm_trigger && as->ofdm_weak_sig)
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
if (as->firstep_level > 0)
ath5k_ani_set_firstep_level(ah, 0);
return;
}
/* TODO: why not?:
if (as->cck_weak_sig == true) {
ath5k_ani_set_cck_weak_signal_detection(ah, false);
}
*/
}
/**
* ath5k_ani_lower_immunity() - Decrease noise immunity
* @ah: The &struct ath5k_hw
* @as: The &struct ath5k_ani_state
*
* Try to lower noise immunity (=increase sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
*/
static void
ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
{
int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity");
if (ah->opmode == NL80211_IFTYPE_AP) {
/* AP mode */
if (as->firstep_level > 0) {
ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
return;
}
} else {
/* STA and IBSS mode (see TODO above) */
if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
/* beacon signal is high, leave OFDM weak signal
* detection off or it may oscillate
* TODO: who said it's off??? */
} else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
/* beacon RSSI is mid-range: turn on ODFM weak signal
* detection and next, lower firstep level */
if (!as->ofdm_weak_sig) {
ath5k_ani_set_ofdm_weak_signal_detection(ah,
true);
return;
}
if (as->firstep_level > 0) {
ath5k_ani_set_firstep_level(ah,
as->firstep_level - 1);
return;
}
} else {
/* beacon signal is low: only reduce firstep level */
if (as->firstep_level > 0) {
ath5k_ani_set_firstep_level(ah,
as->firstep_level - 1);
return;
}
}
}
/* all modes */
if (as->spur_level > 0) {
ath5k_ani_set_spur_immunity_level(ah, as->spur_level - 1);
return;
}
/* finally, reduce noise immunity */
if (as->noise_imm_level > 0) {
ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level - 1);
return;
}
}
/**
* ath5k_hw_ani_get_listen_time() - Update counters and return listening time
* @ah: The &struct ath5k_hw
* @as: The &struct ath5k_ani_state
*
* Return an approximation of the time spent "listening" in milliseconds (ms)
* since the last call of this function.
* Save a snapshot of the counter values for debugging/statistics.
*/
static int
ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
{
struct ath_common *common = ath5k_hw_common(ah);
int listen;
spin_lock_bh(&common->cc_lock);
ath_hw_cycle_counters_update(common);
memcpy(&as->last_cc, &common->cc_ani, sizeof(as->last_cc));
/* clears common->cc_ani */
listen = ath_hw_get_listen_time(common);
spin_unlock_bh(&common->cc_lock);
return listen;
}
/**
* ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
* @ah: The &struct ath5k_hw
* @as: The &struct ath5k_ani_state
*
* Clear the PHY error counters as soon as possible, since this might be called
* from a MIB interrupt and we want to make sure we don't get interrupted again.
* Add the count of CCK and OFDM errors to our internal state, so it can be used
* by the algorithm later.
*
* Will be called from interrupt and tasklet context.
* Returns 0 if both counters are zero.
*/
static int
ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
struct ath5k_ani_state *as)
{
unsigned int ofdm_err, cck_err;
if (!ah->ah_capabilities.cap_has_phyerr_counters)
return 0;
ofdm_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1);
cck_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2);
/* reset counters first, we might be in a hurry (interrupt) */
ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
AR5K_PHYERR_CNT1);
ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
AR5K_PHYERR_CNT2);
ofdm_err = ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ofdm_err);
cck_err = ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - cck_err);
/* sometimes both can be zero, especially when there is a superfluous
* second interrupt. detect that here and return an error. */
if (ofdm_err <= 0 && cck_err <= 0)
return 0;
/* avoid negative values should one of the registers overflow */
if (ofdm_err > 0) {
as->ofdm_errors += ofdm_err;
as->sum_ofdm_errors += ofdm_err;
}
if (cck_err > 0) {
as->cck_errors += cck_err;
as->sum_cck_errors += cck_err;
}
return 1;
}
/**
* ath5k_ani_period_restart() - Restart ANI period
* @as: The &struct ath5k_ani_state
*
* Just reset counters, so they are clear for the next "ani period".
*/
static void
ath5k_ani_period_restart(struct ath5k_ani_state *as)
{
/* keep last values for debugging */
as->last_ofdm_errors = as->ofdm_errors;
as->last_cck_errors = as->cck_errors;
as->last_listen = as->listen_time;
as->ofdm_errors = 0;
as->cck_errors = 0;
as->listen_time = 0;
}
/**
* ath5k_ani_calibration() - The main ANI calibration function
* @ah: The &struct ath5k_hw
*
* We count OFDM and CCK errors relative to the time where we did not send or
* receive ("listen" time) and raise or lower immunity accordingly.
* This is called regularly (every second) from the calibration timer, but also
* when an error threshold has been reached.
*
* In order to synchronize access from different contexts, this should be
* called only indirectly by scheduling the ANI tasklet!
*/
void
ath5k_ani_calibration(struct ath5k_hw *ah)
{
struct ath5k_ani_state *as = &ah->ani_state;
int listen, ofdm_high, ofdm_low, cck_high, cck_low;
/* get listen time since last call and add it to the counter because we
* might not have restarted the "ani period" last time.
* always do this to calculate the busy time also in manual mode */
listen = ath5k_hw_ani_get_listen_time(ah, as);
as->listen_time += listen;
if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
return;
ath5k_ani_save_and_clear_phy_errors(ah, as);
ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
cck_high = as->listen_time * ATH5K_ANI_CCK_TRIG_HIGH / 1000;
ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"listen %d (now %d)", as->listen_time, listen);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"check high ofdm %d/%d cck %d/%d",
as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
/* too many PHY errors - we have to raise immunity */
bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
ath5k_ani_raise_immunity(ah, as, ofdm_flag);
ath5k_ani_period_restart(as);
} else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
/* If more than 5 (TODO: why 5?) periods have passed and we got
* relatively little errors we can try to lower immunity */
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"check low ofdm %d/%d cck %d/%d",
as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
ath5k_ani_lower_immunity(ah, as);
ath5k_ani_period_restart(as);
}
}
/*******************\
* Interrupt handler *
\*******************/
/**
* ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
* @ah: The &struct ath5k_hw
*
* Just read & reset the registers quickly, so they don't generate more
* interrupts, save the counters and schedule the tasklet to decide whether
* to raise immunity or not.
*
* We just need to handle PHY error counters, ath5k_hw_update_mib_counters()
* should take care of all "normal" MIB interrupts.
*/
void
ath5k_ani_mib_intr(struct ath5k_hw *ah)
{
struct ath5k_ani_state *as = &ah->ani_state;
/* nothing to do here if HW does not have PHY error counters - they
* can't be the reason for the MIB interrupt then */
if (!ah->ah_capabilities.cap_has_phyerr_counters)
return;
/* not in use but clear anyways */
ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
if (ah->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
return;
/* If one of the errors triggered, we can get a superfluous second
* interrupt, even though we have already reset the register. The
* function detects that so we can return early. */
if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0)
return;
if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
tasklet_schedule(&ah->ani_tasklet);
}
/**
* ath5k_ani_phy_error_report - Used by older HW to report PHY errors
*
* @ah: The &struct ath5k_hw
* @phyerr: One of enum ath5k_phy_error_code
*
* This is used by hardware without PHY error counters to report PHY errors
* on a frame-by-frame basis, instead of the interrupt.
*/
void
ath5k_ani_phy_error_report(struct ath5k_hw *ah,
enum ath5k_phy_error_code phyerr)
{
struct ath5k_ani_state *as = &ah->ani_state;
if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
as->ofdm_errors++;
if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
tasklet_schedule(&ah->ani_tasklet);
} else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
as->cck_errors++;
if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
tasklet_schedule(&ah->ani_tasklet);
}
}
/****************\
* Initialization *
\****************/
/**
* ath5k_enable_phy_err_counters() - Enable PHY error counters
* @ah: The &struct ath5k_hw
*
* Enable PHY error counters for OFDM and CCK timing errors.
*/
static void
ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
AR5K_PHYERR_CNT1);
ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
AR5K_PHYERR_CNT2);
ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_OFDM, AR5K_PHYERR_CNT1_MASK);
ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_CCK, AR5K_PHYERR_CNT2_MASK);
/* not in use */
ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
/**
* ath5k_disable_phy_err_counters() - Disable PHY error counters
* @ah: The &struct ath5k_hw
*
* Disable PHY error counters for OFDM and CCK timing errors.
*/
static void
ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1);
ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2);
ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1_MASK);
ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2_MASK);
/* not in use */
ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
/**
* ath5k_ani_init() - Initialize ANI
* @ah: The &struct ath5k_hw
* @mode: One of enum ath5k_ani_mode
*
* Initialize ANI according to mode.
*/
void
ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
{
/* ANI is only possible on 5212 and newer */
if (ah->ah_version < AR5K_AR5212)
return;
if (mode < ATH5K_ANI_MODE_OFF || mode > ATH5K_ANI_MODE_AUTO) {
ATH5K_ERR(ah, "ANI mode %d out of range", mode);
return;
}
/* clear old state information */
memset(&ah->ani_state, 0, sizeof(ah->ani_state));
/* older hardware has more spur levels than newer */
if (ah->ah_mac_srev < AR5K_SREV_AR2414)
ah->ani_state.max_spur_level = 7;
else
ah->ani_state.max_spur_level = 2;
/* initial values for our ani parameters */
if (mode == ATH5K_ANI_MODE_OFF) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "ANI off\n");
} else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"ANI manual low -> high sensitivity\n");
ath5k_ani_set_noise_immunity_level(ah, 0);
ath5k_ani_set_spur_immunity_level(ah, 0);
ath5k_ani_set_firstep_level(ah, 0);
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
ath5k_ani_set_cck_weak_signal_detection(ah, true);
} else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"ANI manual high -> low sensitivity\n");
ath5k_ani_set_noise_immunity_level(ah,
ATH5K_ANI_MAX_NOISE_IMM_LVL);
ath5k_ani_set_spur_immunity_level(ah,
ah->ani_state.max_spur_level);
ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
ath5k_ani_set_cck_weak_signal_detection(ah, false);
} else if (mode == ATH5K_ANI_MODE_AUTO) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "ANI auto\n");
ath5k_ani_set_noise_immunity_level(ah, 0);
ath5k_ani_set_spur_immunity_level(ah, 0);
ath5k_ani_set_firstep_level(ah, 0);
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
ath5k_ani_set_cck_weak_signal_detection(ah, false);
}
/* newer hardware has PHY error counter registers which we can use to
* get OFDM and CCK error counts. older hardware has to set rxfilter and
* report every single PHY error by calling ath5k_ani_phy_error_report()
*/
if (mode == ATH5K_ANI_MODE_AUTO) {
if (ah->ah_capabilities.cap_has_phyerr_counters)
ath5k_enable_phy_err_counters(ah);
else
ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) |
AR5K_RX_FILTER_PHYERR);
} else {
if (ah->ah_capabilities.cap_has_phyerr_counters)
ath5k_disable_phy_err_counters(ah);
else
ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) &
~AR5K_RX_FILTER_PHYERR);
}
ah->ani_state.ani_mode = mode;
}
/**************\
* Debug output *
\**************/
#ifdef CONFIG_ATH5K_DEBUG
/**
* ath5k_ani_print_counters() - Print ANI counters
* @ah: The &struct ath5k_hw
*
* Used for debugging ANI
*/
void
ath5k_ani_print_counters(struct ath5k_hw *ah)
{
/* clears too */
pr_notice("ACK fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
pr_notice("RTS fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
pr_notice("RTS success\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_OK));
pr_notice("FCS error\t%d\n", ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
/* no clear */
pr_notice("tx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
pr_notice("rx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
pr_notice("busy\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
pr_notice("cycles\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
pr_notice("AR5K_PHYERR_CNT1\t%d\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
pr_notice("AR5K_PHYERR_CNT2\t%d\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
pr_notice("AR5K_OFDM_FIL_CNT\t%d\n",
ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
pr_notice("AR5K_CCK_FIL_CNT\t%d\n",
ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
}
#endif

View file

@ -0,0 +1,119 @@
/*
* Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef ANI_H
#define ANI_H
#include "../ath.h"
enum ath5k_phy_error_code;
/* these thresholds are relative to the ATH5K_ANI_LISTEN_PERIOD */
#define ATH5K_ANI_LISTEN_PERIOD 100
#define ATH5K_ANI_OFDM_TRIG_HIGH 500
#define ATH5K_ANI_OFDM_TRIG_LOW 200
#define ATH5K_ANI_CCK_TRIG_HIGH 200
#define ATH5K_ANI_CCK_TRIG_LOW 100
/* average beacon RSSI thresholds */
#define ATH5K_ANI_RSSI_THR_HIGH 40
#define ATH5K_ANI_RSSI_THR_LOW 7
/* maximum available levels */
#define ATH5K_ANI_MAX_FIRSTEP_LVL 2
#define ATH5K_ANI_MAX_NOISE_IMM_LVL 1
/**
* enum ath5k_ani_mode - mode for ANI / noise sensitivity
*
* @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
* algorithm after it has been on auto mode.
* @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
* maximizing sensitivity. ANI will not run.
* @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
* minimizing sensitivity. ANI will not run.
* @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
* amount of OFDM and CCK frame errors (default).
*/
enum ath5k_ani_mode {
ATH5K_ANI_MODE_OFF = 0,
ATH5K_ANI_MODE_MANUAL_LOW = 1,
ATH5K_ANI_MODE_MANUAL_HIGH = 2,
ATH5K_ANI_MODE_AUTO = 3
};
/**
* struct ath5k_ani_state - ANI state and associated counters
* @ani_mode: One of enum ath5k_ani_mode
* @noise_imm_level: Noise immunity level
* @spur_level: Spur immunity level
* @firstep_level: FIRstep level
* @ofdm_weak_sig: OFDM weak signal detection state (on/off)
* @cck_weak_sig: CCK weak signal detection state (on/off)
* @max_spur_level: Max spur immunity level (chip specific)
* @listen_time: Listen time
* @ofdm_errors: OFDM timing error count
* @cck_errors: CCK timing error count
* @last_cc: The &struct ath_cycle_counters (for stats)
* @last_listen: Listen time from previous run (for stats)
* @last_ofdm_errors: OFDM timing error count from previous run (for tats)
* @last_cck_errors: CCK timing error count from previous run (for stats)
* @sum_ofdm_errors: Sum of OFDM timing errors (for stats)
* @sum_cck_errors: Sum of all CCK timing errors (for stats)
*/
struct ath5k_ani_state {
enum ath5k_ani_mode ani_mode;
/* state */
int noise_imm_level;
int spur_level;
int firstep_level;
bool ofdm_weak_sig;
bool cck_weak_sig;
int max_spur_level;
/* used by the algorithm */
unsigned int listen_time;
unsigned int ofdm_errors;
unsigned int cck_errors;
/* debug/statistics only: numbers from last ANI calibration */
struct ath_cycle_counters last_cc;
unsigned int last_listen;
unsigned int last_ofdm_errors;
unsigned int last_cck_errors;
unsigned int sum_ofdm_errors;
unsigned int sum_cck_errors;
};
void ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode);
void ath5k_ani_mib_intr(struct ath5k_hw *ah);
void ath5k_ani_calibration(struct ath5k_hw *ah);
void ath5k_ani_phy_error_report(struct ath5k_hw *ah,
enum ath5k_phy_error_code phyerr);
/* for manual control */
void ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level);
void ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level);
void ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level);
void ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on);
void ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on);
void ath5k_ani_print_counters(struct ath5k_hw *ah);
#endif /* ANI_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,359 @@
/*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/*************************************\
* Attach/Detach Functions and helpers *
\*************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/pci.h>
#include <linux/slab.h>
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/**
* ath5k_hw_post() - Power On Self Test helper function
* @ah: The &struct ath5k_hw
*/
static int ath5k_hw_post(struct ath5k_hw *ah)
{
static const u32 static_pattern[4] = {
0x55555555, 0xaaaaaaaa,
0x66666666, 0x99999999
};
static const u16 regs[2] = { AR5K_STA_ID0, AR5K_PHY(8) };
int i, c;
u16 cur_reg;
u32 var_pattern;
u32 init_val;
u32 cur_val;
for (c = 0; c < 2; c++) {
cur_reg = regs[c];
/* Save previous value */
init_val = ath5k_hw_reg_read(ah, cur_reg);
for (i = 0; i < 256; i++) {
var_pattern = i << 16 | i;
ath5k_hw_reg_write(ah, var_pattern, cur_reg);
cur_val = ath5k_hw_reg_read(ah, cur_reg);
if (cur_val != var_pattern) {
ATH5K_ERR(ah, "POST Failed !!!\n");
return -EAGAIN;
}
/* Found on ndiswrapper dumps */
var_pattern = 0x0039080f;
ath5k_hw_reg_write(ah, var_pattern, cur_reg);
}
for (i = 0; i < 4; i++) {
var_pattern = static_pattern[i];
ath5k_hw_reg_write(ah, var_pattern, cur_reg);
cur_val = ath5k_hw_reg_read(ah, cur_reg);
if (cur_val != var_pattern) {
ATH5K_ERR(ah, "POST Failed !!!\n");
return -EAGAIN;
}
/* Found on ndiswrapper dumps */
var_pattern = 0x003b080f;
ath5k_hw_reg_write(ah, var_pattern, cur_reg);
}
/* Restore previous value */
ath5k_hw_reg_write(ah, init_val, cur_reg);
}
return 0;
}
/**
* ath5k_hw_init() - Check if hw is supported and init the needed structs
* @ah: The &struct ath5k_hw associated with the device
*
* Check if the device is supported, perform a POST and initialize the needed
* structs. Returns -ENOMEM if we don't have memory for the needed structs,
* -ENODEV if the device is not supported or prints an error msg if something
* else went wrong.
*/
int ath5k_hw_init(struct ath5k_hw *ah)
{
static const u8 zero_mac[ETH_ALEN] = { };
struct ath_common *common = ath5k_hw_common(ah);
struct pci_dev *pdev = ah->pdev;
struct ath5k_eeprom_info *ee;
int ret;
u32 srev;
/*
* HW information
*/
ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
ah->ah_imr = 0;
ah->ah_retry_short = AR5K_INIT_RETRY_SHORT;
ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
ah->ah_noise_floor = -95; /* until first NF calibration is run */
ah->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
ah->ah_current_channel = &ah->channels[0];
/*
* Find the mac version
*/
ath5k_hw_read_srev(ah);
srev = ah->ah_mac_srev;
if (srev < AR5K_SREV_AR5311)
ah->ah_version = AR5K_AR5210;
else if (srev < AR5K_SREV_AR5212)
ah->ah_version = AR5K_AR5211;
else
ah->ah_version = AR5K_AR5212;
/* Get the MAC version */
ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
/* Fill the ath5k_hw struct with the needed functions */
ret = ath5k_hw_init_desc_functions(ah);
if (ret)
goto err;
/* Bring device out of sleep and reset its units */
ret = ath5k_hw_nic_wakeup(ah, NULL);
if (ret)
goto err;
/* Get PHY and RADIO revisions */
ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
0xffffffff;
ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
IEEE80211_BAND_5GHZ);
/* Try to identify radio chip based on its srev */
switch (ah->ah_radio_5ghz_revision & 0xf0) {
case AR5K_SREV_RAD_5111:
ah->ah_radio = AR5K_RF5111;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
IEEE80211_BAND_2GHZ);
break;
case AR5K_SREV_RAD_5112:
case AR5K_SREV_RAD_2112:
ah->ah_radio = AR5K_RF5112;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
IEEE80211_BAND_2GHZ);
break;
case AR5K_SREV_RAD_2413:
ah->ah_radio = AR5K_RF2413;
ah->ah_single_chip = true;
break;
case AR5K_SREV_RAD_5413:
ah->ah_radio = AR5K_RF5413;
ah->ah_single_chip = true;
break;
case AR5K_SREV_RAD_2316:
ah->ah_radio = AR5K_RF2316;
ah->ah_single_chip = true;
break;
case AR5K_SREV_RAD_2317:
ah->ah_radio = AR5K_RF2317;
ah->ah_single_chip = true;
break;
case AR5K_SREV_RAD_5424:
if (ah->ah_mac_version == AR5K_SREV_AR2425 ||
ah->ah_mac_version == AR5K_SREV_AR2417) {
ah->ah_radio = AR5K_RF2425;
ah->ah_single_chip = true;
} else {
ah->ah_radio = AR5K_RF5413;
ah->ah_single_chip = true;
}
break;
default:
/* Identify radio based on mac/phy srev */
if (ah->ah_version == AR5K_AR5210) {
ah->ah_radio = AR5K_RF5110;
ah->ah_single_chip = false;
} else if (ah->ah_version == AR5K_AR5211) {
ah->ah_radio = AR5K_RF5111;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
IEEE80211_BAND_2GHZ);
} else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) ||
ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
ah->ah_radio = AR5K_RF2425;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2425;
} else if (srev == AR5K_SREV_AR5213A &&
ah->ah_phy_revision == AR5K_SREV_PHY_5212B) {
ah->ah_radio = AR5K_RF5112;
ah->ah_single_chip = false;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B;
} else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4) ||
ah->ah_mac_version == (AR5K_SREV_AR2315_R6 >> 4)) {
ah->ah_radio = AR5K_RF2316;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
} else if (ah->ah_mac_version == (AR5K_SREV_AR5414 >> 4) ||
ah->ah_phy_revision == AR5K_SREV_PHY_5413) {
ah->ah_radio = AR5K_RF5413;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5413;
} else if (ah->ah_mac_version == (AR5K_SREV_AR2414 >> 4) ||
ah->ah_phy_revision == AR5K_SREV_PHY_2413) {
ah->ah_radio = AR5K_RF2413;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2413;
} else {
ATH5K_ERR(ah, "Couldn't identify radio revision.\n");
ret = -ENODEV;
goto err;
}
}
/* Return on unsupported chips (unsupported eeprom etc) */
if ((srev >= AR5K_SREV_AR5416) && (srev < AR5K_SREV_AR2425)) {
ATH5K_ERR(ah, "Device not yet supported.\n");
ret = -ENODEV;
goto err;
}
/*
* POST
*/
ret = ath5k_hw_post(ah);
if (ret)
goto err;
/* Enable pci core retry fix on Hainan (5213A) and later chips */
if (srev >= AR5K_SREV_AR5213A)
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_RETRY_FIX);
/*
* Get card capabilities, calibration values etc
* TODO: EEPROM work
*/
ret = ath5k_eeprom_init(ah);
if (ret) {
ATH5K_ERR(ah, "unable to init EEPROM\n");
goto err;
}
ee = &ah->ah_capabilities.cap_eeprom;
/*
* Write PCI-E power save settings
*/
if ((ah->ah_version == AR5K_AR5212) && pdev && (pci_is_pcie(pdev))) {
ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES);
ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES);
/* Shut off RX when elecidle is asserted */
ath5k_hw_reg_write(ah, 0x28000039, AR5K_PCIE_SERDES);
ath5k_hw_reg_write(ah, 0x53160824, AR5K_PCIE_SERDES);
/* If serdes programming is enabled, increase PCI-E
* tx power for systems with long trace from host
* to minicard connector. */
if (ee->ee_serdes)
ath5k_hw_reg_write(ah, 0xe5980579, AR5K_PCIE_SERDES);
else
ath5k_hw_reg_write(ah, 0xf6800579, AR5K_PCIE_SERDES);
/* Shut off PLL and CLKREQ active in L1 */
ath5k_hw_reg_write(ah, 0x001defff, AR5K_PCIE_SERDES);
/* Preserve other settings */
ath5k_hw_reg_write(ah, 0x1aaabe40, AR5K_PCIE_SERDES);
ath5k_hw_reg_write(ah, 0xbe105554, AR5K_PCIE_SERDES);
ath5k_hw_reg_write(ah, 0x000e3007, AR5K_PCIE_SERDES);
/* Reset SERDES to load new settings */
ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
usleep_range(1000, 1500);
}
/* Get misc capabilities */
ret = ath5k_hw_set_capabilities(ah);
if (ret) {
ATH5K_ERR(ah, "unable to get device capabilities\n");
goto err;
}
/* Crypto settings */
common->keymax = (ah->ah_version == AR5K_AR5210 ?
AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
if (srev >= AR5K_SREV_AR5212_V4 &&
(ee->ee_version < AR5K_EEPROM_VERSION_5_0 ||
!AR5K_EEPROM_AES_DIS(ee->ee_misc5)))
common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
if (srev >= AR5K_SREV_AR2414) {
common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
AR5K_REG_ENABLE_BITS(ah, AR5K_MISC_MODE,
AR5K_MISC_MODE_COMBINED_MIC);
}
/* MAC address is cleared until add_interface */
ath5k_hw_set_lladdr(ah, zero_mac);
/* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
ath5k_hw_set_bssid(ah);
ath5k_hw_set_opmode(ah, ah->opmode);
ath5k_hw_rfgain_opt_init(ah);
ath5k_hw_init_nfcal_hist(ah);
/* turn on HW LEDs */
ath5k_hw_set_ledstate(ah, AR5K_LED_INIT);
return 0;
err:
return ret;
}
/**
* ath5k_hw_deinit() - Free the &struct ath5k_hw
* @ah: The &struct ath5k_hw
*/
void ath5k_hw_deinit(struct ath5k_hw *ah)
{
__set_bit(ATH_STAT_INVALID, ah->status);
kfree(ah->ah_rf_banks);
ath5k_eeprom_detach(ah);
/* assume interrupts are down */
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,121 @@
/*-
* Copyright (c) 2002-2007 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
*/
/*
* Definitions for the Atheros Wireless LAN controller driver.
*/
#ifndef _DEV_ATH5K_BASE_H
#define _DEV_ATH5K_BASE_H
struct ieee80211_vif;
struct ieee80211_hw;
struct ath5k_hw;
struct ath5k_txq;
struct ieee80211_channel;
struct ath_bus_ops;
struct ieee80211_tx_control;
enum nl80211_iftype;
enum ath5k_srev_type {
AR5K_VERSION_MAC,
AR5K_VERSION_RAD,
};
struct ath5k_srev_name {
const char *sr_name;
enum ath5k_srev_type sr_type;
u_int sr_val;
};
struct ath5k_buf {
struct list_head list;
struct ath5k_desc *desc; /* virtual addr of desc */
dma_addr_t daddr; /* physical addr of desc */
struct sk_buff *skb; /* skbuff for buf */
dma_addr_t skbaddr; /* physical addr of skb data */
struct ieee80211_tx_rate rates[4]; /* number of multi-rate stages */
};
struct ath5k_vif {
bool assoc; /* are we associated or not */
enum nl80211_iftype opmode;
int bslot;
struct ath5k_buf *bbuf; /* beacon buffer */
};
struct ath5k_vif_iter_data {
const u8 *hw_macaddr;
u8 mask[ETH_ALEN];
u8 active_mac[ETH_ALEN]; /* first active MAC */
bool need_set_hw_addr;
bool found_active;
bool any_assoc;
enum nl80211_iftype opmode;
int n_stas;
};
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
bool ath5k_any_vif_assoc(struct ath5k_hw *ah);
int ath5k_start(struct ieee80211_hw *hw);
void ath5k_stop(struct ieee80211_hw *hw);
void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf);
int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void ath5k_beacon_config(struct ath5k_hw *ah);
void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable);
void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
struct ieee80211_vif *vif);
int ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef);
void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath5k_txq *txq, struct ieee80211_tx_control *control);
const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
int ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops);
void ath5k_deinit_ah(struct ath5k_hw *ah);
/* Check whether BSSID mask is supported */
#define ath5k_hw_hasbssidmask(_ah) (ah->ah_version == AR5K_AR5212)
/* Check whether virtual EOL is supported */
#define ath5k_hw_hasveol(_ah) (ah->ah_version != AR5K_AR5210)
#endif /* _DEV_ATH5K_BASE_H */

View file

@ -0,0 +1,154 @@
/*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/**************\
* Capabilities *
\**************/
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
#include "../regd.h"
/*
* Fill the capabilities struct
* TODO: Merge this with EEPROM code when we are done with it
*/
int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
{
struct ath5k_capabilities *caps = &ah->ah_capabilities;
u16 ee_header;
/* Capabilities stored in the EEPROM */
ee_header = caps->cap_eeprom.ee_header;
if (ah->ah_version == AR5K_AR5210) {
/*
* Set radio capabilities
* (The AR5110 only supports the middle 5GHz band)
*/
caps->cap_range.range_5ghz_min = 5120;
caps->cap_range.range_5ghz_max = 5430;
caps->cap_range.range_2ghz_min = 0;
caps->cap_range.range_2ghz_max = 0;
/* Set supported modes */
__set_bit(AR5K_MODE_11A, caps->cap_mode);
} else {
/*
* XXX The transceiver supports frequencies from 4920 to 6100MHz
* XXX and from 2312 to 2732MHz. There are problems with the
* XXX current ieee80211 implementation because the IEEE
* XXX channel mapping does not support negative channel
* XXX numbers (2312MHz is channel -19). Of course, this
* XXX doesn't matter because these channels are out of the
* XXX legal range.
*/
/*
* Set radio capabilities
*/
if (AR5K_EEPROM_HDR_11A(ee_header)) {
if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain))
caps->cap_range.range_5ghz_min = 4920;
else
caps->cap_range.range_5ghz_min = 5005;
caps->cap_range.range_5ghz_max = 6100;
/* Set supported modes */
__set_bit(AR5K_MODE_11A, caps->cap_mode);
}
/* Enable 802.11b if a 2GHz capable radio (2111/5112) is
* connected */
if (AR5K_EEPROM_HDR_11B(ee_header) ||
(AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)) {
/* 2312 */
caps->cap_range.range_2ghz_min = 2412;
caps->cap_range.range_2ghz_max = 2732;
/* Override 2GHz modes on SoCs that need it
* NOTE: cap_needs_2GHz_ovr gets set from
* ath_ahb_probe */
if (!caps->cap_needs_2GHz_ovr) {
if (AR5K_EEPROM_HDR_11B(ee_header))
__set_bit(AR5K_MODE_11B,
caps->cap_mode);
if (AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)
__set_bit(AR5K_MODE_11G,
caps->cap_mode);
}
}
}
if ((ah->ah_radio_5ghz_revision & 0xf0) == AR5K_SREV_RAD_2112)
__clear_bit(AR5K_MODE_11A, caps->cap_mode);
/* Set number of supported TX queues */
if (ah->ah_version == AR5K_AR5210)
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
else
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
/* Newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
caps->cap_has_phyerr_counters = true;
else
caps->cap_has_phyerr_counters = false;
/* MACs since AR5212 have MRR support */
if (ah->ah_version == AR5K_AR5212)
caps->cap_has_mrr_support = true;
else
caps->cap_has_mrr_support = false;
return 0;
}
/*
* TODO: Following functions should be part of a new function
* set_capability
*/
int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
u16 assoc_id)
{
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
return 0;
}
return -EIO;
}
int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
{
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
return 0;
}
return -EIO;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,165 @@
/*
* Copyright (c) 2007 Bruno Randolf <bruno@thinktube.com>
*
* This file is free software: you may copy, redistribute and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation, either version 2 of the License, or (at your
* option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* This file incorporates work covered by the following copyright and
* permission notice:
*
* Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
* Copyright (c) 2004-2005 Atheros Communications, Inc.
* Copyright (c) 2006 Devicescape Software, Inc.
* Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
* Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#ifndef _ATH5K_DEBUG_H
#define _ATH5K_DEBUG_H
struct ath5k_hw;
struct sk_buff;
struct ath5k_buf;
struct ath5k_dbg_info {
unsigned int level; /* debug level */
};
/**
* enum ath5k_debug_level - ath5k debug level
*
* @ATH5K_DEBUG_RESET: reset processing
* @ATH5K_DEBUG_INTR: interrupt handling
* @ATH5K_DEBUG_MODE: mode init/setup
* @ATH5K_DEBUG_XMIT: basic xmit operation
* @ATH5K_DEBUG_BEACON: beacon handling
* @ATH5K_DEBUG_CALIBRATE: periodic calibration
* @ATH5K_DEBUG_TXPOWER: transmit power setting
* @ATH5K_DEBUG_LED: led management
* @ATH5K_DEBUG_DUMP_RX: print received skb content
* @ATH5K_DEBUG_DUMP_TX: print transmit skb content
* @ATH5K_DEBUG_DUMPBANDS: dump bands
* @ATH5K_DEBUG_DMA: debug dma start/stop
* @ATH5K_DEBUG_TRACE: trace function calls
* @ATH5K_DEBUG_DESC: descriptor setup
* @ATH5K_DEBUG_ANY: show at any debug level
*
* The debug level is used to control the amount and type of debugging output
* we want to see. The debug level is given in calls to ATH5K_DBG to specify
* where the message should appear, and the user can control the debugging
* messages he wants to see, either by the module parameter 'debug' on module
* load, or dynamically by using debugfs 'ath5k/phyX/debug'. these levels can
* be combined together by bitwise OR.
*/
enum ath5k_debug_level {
ATH5K_DEBUG_RESET = 0x00000001,
ATH5K_DEBUG_INTR = 0x00000002,
ATH5K_DEBUG_MODE = 0x00000004,
ATH5K_DEBUG_XMIT = 0x00000008,
ATH5K_DEBUG_BEACON = 0x00000010,
ATH5K_DEBUG_CALIBRATE = 0x00000020,
ATH5K_DEBUG_TXPOWER = 0x00000040,
ATH5K_DEBUG_LED = 0x00000080,
ATH5K_DEBUG_DUMPBANDS = 0x00000400,
ATH5K_DEBUG_DMA = 0x00000800,
ATH5K_DEBUG_ANI = 0x00002000,
ATH5K_DEBUG_DESC = 0x00004000,
ATH5K_DEBUG_ANY = 0xffffffff
};
#ifdef CONFIG_ATH5K_DEBUG
#define ATH5K_DBG(_sc, _m, _fmt, ...) do { \
if (unlikely((_sc)->debug.level & (_m) && net_ratelimit())) \
ATH5K_PRINTK(_sc, KERN_DEBUG, "(%s:%d): " _fmt, \
__func__, __LINE__, ##__VA_ARGS__); \
} while (0)
#define ATH5K_DBG_UNLIMIT(_sc, _m, _fmt, ...) do { \
if (unlikely((_sc)->debug.level & (_m))) \
ATH5K_PRINTK(_sc, KERN_DEBUG, "(%s:%d): " _fmt, \
__func__, __LINE__, ##__VA_ARGS__); \
} while (0)
void
ath5k_debug_init_device(struct ath5k_hw *ah);
void
ath5k_debug_printrxbuffs(struct ath5k_hw *ah);
void
ath5k_debug_dump_bands(struct ath5k_hw *ah);
void
ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf);
#else /* no debugging */
#include <linux/compiler.h>
static inline __printf(3, 4) void
ATH5K_DBG(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...) {}
static inline __printf(3, 4) void
ATH5K_DBG_UNLIMIT(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...)
{}
static inline void
ath5k_debug_init_device(struct ath5k_hw *ah) {}
static inline void
ath5k_debug_printrxbuffs(struct ath5k_hw *ah) {}
static inline void
ath5k_debug_dump_bands(struct ath5k_hw *ah) {}
static inline void
ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf) {}
#endif /* ifdef CONFIG_ATH5K_DEBUG */
#endif /* ifndef _ATH5K_DEBUG_H */

View file

@ -0,0 +1,786 @@
/*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/******************************\
Hardware Descriptor Functions
\******************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/**
* DOC: Hardware descriptor functions
*
* Here we handle the processing of the low-level hw descriptors
* that hw reads and writes via DMA for each TX and RX attempt (that means
* we can also have descriptors for failed TX/RX tries). We have two kind of
* descriptors for RX and TX, control descriptors tell the hw how to send or
* receive a packet where to read/write it from/to etc and status descriptors
* that contain information about how the packet was sent or received (errors
* included).
*
* Descriptor format is not exactly the same for each MAC chip version so we
* have function pointers on &struct ath5k_hw we initialize at runtime based on
* the chip used.
*/
/************************\
* TX Control descriptors *
\************************/
/**
* ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @pkt_len: Frame length in bytes
* @hdr_len: Header length in bytes (only used on AR5210)
* @padsize: Any padding we've added to the frame length
* @type: One of enum ath5k_pkt_type
* @tx_power: Tx power in 0.5dB steps
* @tx_rate0: HW idx for transmission rate
* @tx_tries0: Max number of retransmissions
* @key_index: Index on key table to use for encryption
* @antenna_mode: Which antenna to use (0 for auto)
* @flags: One of AR5K_TXDESC_* flags (desc.h)
* @rtscts_rate: HW idx for RTS/CTS transmission rate
* @rtscts_duration: What to put on duration field on the header of RTS/CTS
*
* Internal function to initialize a 2-Word TX control descriptor
* found on AR5210 and AR5211 MACs chips.
*
* Returns 0 on success or -EINVAL on false input
*/
static int
ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
unsigned int pkt_len, unsigned int hdr_len,
int padsize,
enum ath5k_pkt_type type,
unsigned int tx_power,
unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index,
unsigned int antenna_mode,
unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
{
u32 frame_type;
struct ath5k_hw_2w_tx_ctl *tx_ctl;
unsigned int frame_len;
tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
/*
* Validate input
* - Zero retries don't make sense.
* - A zero rate will put the HW into a mode where it continuously sends
* noise on the channel, so it is important to avoid this.
*/
if (unlikely(tx_tries0 == 0)) {
ATH5K_ERR(ah, "zero retries\n");
WARN_ON(1);
return -EINVAL;
}
if (unlikely(tx_rate0 == 0)) {
ATH5K_ERR(ah, "zero rate\n");
WARN_ON(1);
return -EINVAL;
}
/* Clear descriptor */
memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc));
/* Setup control descriptor */
/* Verify and set frame length */
/* remove padding we might have added before */
frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
/* Verify and set buffer length */
/* NB: beacon's BufLen must be a multiple of 4 bytes */
if (type == AR5K_PKT_TYPE_BEACON)
pkt_len = roundup(pkt_len, 4);
if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
return -EINVAL;
tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
/*
* Verify and set header length (only 5210)
*/
if (ah->ah_version == AR5K_AR5210) {
if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210)
return -EINVAL;
tx_ctl->tx_control_0 |=
AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210);
}
/*Differences between 5210-5211*/
if (ah->ah_version == AR5K_AR5210) {
switch (type) {
case AR5K_PKT_TYPE_BEACON:
case AR5K_PKT_TYPE_PROBE_RESP:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
break;
case AR5K_PKT_TYPE_PIFS:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
break;
default:
frame_type = type;
break;
}
tx_ctl->tx_control_0 |=
AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210) |
AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
} else {
tx_ctl->tx_control_0 |=
AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) |
AR5K_REG_SM(antenna_mode,
AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
tx_ctl->tx_control_1 |=
AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211);
}
#define _TX_FLAGS(_c, _flag) \
if (flags & AR5K_TXDESC_##_flag) { \
tx_ctl->tx_control_##_c |= \
AR5K_2W_TX_DESC_CTL##_c##_##_flag; \
}
#define _TX_FLAGS_5211(_c, _flag) \
if (flags & AR5K_TXDESC_##_flag) { \
tx_ctl->tx_control_##_c |= \
AR5K_2W_TX_DESC_CTL##_c##_##_flag##_5211; \
}
_TX_FLAGS(0, CLRDMASK);
_TX_FLAGS(0, INTREQ);
_TX_FLAGS(0, RTSENA);
if (ah->ah_version == AR5K_AR5211) {
_TX_FLAGS_5211(0, VEOL);
_TX_FLAGS_5211(1, NOACK);
}
#undef _TX_FLAGS
#undef _TX_FLAGS_5211
/*
* WEP crap
*/
if (key_index != AR5K_TXKEYIX_INVALID) {
tx_ctl->tx_control_0 |=
AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
tx_ctl->tx_control_1 |=
AR5K_REG_SM(key_index,
AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX);
}
/*
* RTS/CTS Duration [5210 ?]
*/
if ((ah->ah_version == AR5K_AR5210) &&
(flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
tx_ctl->tx_control_1 |= rtscts_duration &
AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210;
return 0;
}
/**
* ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @pkt_len: Frame length in bytes
* @hdr_len: Header length in bytes (only used on AR5210)
* @padsize: Any padding we've added to the frame length
* @type: One of enum ath5k_pkt_type
* @tx_power: Tx power in 0.5dB steps
* @tx_rate0: HW idx for transmission rate
* @tx_tries0: Max number of retransmissions
* @key_index: Index on key table to use for encryption
* @antenna_mode: Which antenna to use (0 for auto)
* @flags: One of AR5K_TXDESC_* flags (desc.h)
* @rtscts_rate: HW idx for RTS/CTS transmission rate
* @rtscts_duration: What to put on duration field on the header of RTS/CTS
*
* Internal function to initialize a 4-Word TX control descriptor
* found on AR5212 and later MACs chips.
*
* Returns 0 on success or -EINVAL on false input
*/
static int
ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
unsigned int pkt_len, unsigned int hdr_len,
int padsize,
enum ath5k_pkt_type type,
unsigned int tx_power,
unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index,
unsigned int antenna_mode,
unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
unsigned int frame_len;
/*
* Use local variables for these to reduce load/store access on
* uncached memory
*/
u32 txctl0 = 0, txctl1 = 0, txctl2 = 0, txctl3 = 0;
tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
/*
* Validate input
* - Zero retries don't make sense.
* - A zero rate will put the HW into a mode where it continuously sends
* noise on the channel, so it is important to avoid this.
*/
if (unlikely(tx_tries0 == 0)) {
ATH5K_ERR(ah, "zero retries\n");
WARN_ON(1);
return -EINVAL;
}
if (unlikely(tx_rate0 == 0)) {
ATH5K_ERR(ah, "zero rate\n");
WARN_ON(1);
return -EINVAL;
}
tx_power += ah->ah_txpower.txp_offset;
if (tx_power > AR5K_TUNE_MAX_TXPOWER)
tx_power = AR5K_TUNE_MAX_TXPOWER;
/* Clear descriptor status area */
memset(&desc->ud.ds_tx5212.tx_stat, 0,
sizeof(desc->ud.ds_tx5212.tx_stat));
/* Setup control descriptor */
/* Verify and set frame length */
/* remove padding we might have added before */
frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
txctl0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
/* Verify and set buffer length */
/* NB: beacon's BufLen must be a multiple of 4 bytes */
if (type == AR5K_PKT_TYPE_BEACON)
pkt_len = roundup(pkt_len, 4);
if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
return -EINVAL;
txctl1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
txctl0 |= AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
txctl1 |= AR5K_REG_SM(type, AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
txctl2 = AR5K_REG_SM(tx_tries0, AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
txctl3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
#define _TX_FLAGS(_c, _flag) \
if (flags & AR5K_TXDESC_##_flag) { \
txctl##_c |= AR5K_4W_TX_DESC_CTL##_c##_##_flag; \
}
_TX_FLAGS(0, CLRDMASK);
_TX_FLAGS(0, VEOL);
_TX_FLAGS(0, INTREQ);
_TX_FLAGS(0, RTSENA);
_TX_FLAGS(0, CTSENA);
_TX_FLAGS(1, NOACK);
#undef _TX_FLAGS
/*
* WEP crap
*/
if (key_index != AR5K_TXKEYIX_INVALID) {
txctl0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
txctl1 |= AR5K_REG_SM(key_index,
AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX);
}
/*
* RTS/CTS
*/
if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) {
if ((flags & AR5K_TXDESC_RTSENA) &&
(flags & AR5K_TXDESC_CTSENA))
return -EINVAL;
txctl2 |= rtscts_duration & AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
txctl3 |= AR5K_REG_SM(rtscts_rate,
AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
}
tx_ctl->tx_control_0 = txctl0;
tx_ctl->tx_control_1 = txctl1;
tx_ctl->tx_control_2 = txctl2;
tx_ctl->tx_control_3 = txctl3;
return 0;
}
/**
* ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @tx_rate1: HW idx for rate used on transmission series 1
* @tx_tries1: Max number of retransmissions for transmission series 1
* @tx_rate2: HW idx for rate used on transmission series 2
* @tx_tries2: Max number of retransmissions for transmission series 2
* @tx_rate3: HW idx for rate used on transmission series 3
* @tx_tries3: Max number of retransmissions for transmission series 3
*
* Multi rate retry (MRR) tx control descriptors are available only on AR5212
* MACs, they are part of the normal 4-word tx control descriptor (see above)
* but we handle them through a separate function for better abstraction.
*
* Returns 0 on success or -EINVAL on invalid input
*/
int
ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
u_int tx_rate1, u_int tx_tries1,
u_int tx_rate2, u_int tx_tries2,
u_int tx_rate3, u_int tx_tries3)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
/* no mrr support for cards older than 5212 */
if (ah->ah_version < AR5K_AR5212)
return 0;
/*
* Rates can be 0 as long as the retry count is 0 too.
* A zero rate and nonzero retry count will put the HW into a mode where
* it continuously sends noise on the channel, so it is important to
* avoid this.
*/
if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
(tx_rate2 == 0 && tx_tries2 != 0) ||
(tx_rate3 == 0 && tx_tries3 != 0))) {
ATH5K_ERR(ah, "zero rate\n");
WARN_ON(1);
return -EINVAL;
}
if (ah->ah_version == AR5K_AR5212) {
tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
#define _XTX_TRIES(_n) \
if (tx_tries##_n) { \
tx_ctl->tx_control_2 |= \
AR5K_REG_SM(tx_tries##_n, \
AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \
tx_ctl->tx_control_3 |= \
AR5K_REG_SM(tx_rate##_n, \
AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \
}
_XTX_TRIES(1);
_XTX_TRIES(2);
_XTX_TRIES(3);
#undef _XTX_TRIES
return 1;
}
return 0;
}
/***********************\
* TX Status descriptors *
\***********************/
/**
* ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @ts: The &struct ath5k_tx_status
*/
static int
ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_tx_status *ts)
{
struct ath5k_hw_tx_status *tx_status;
tx_status = &desc->ud.ds_tx5210.tx_stat;
/* No frame has been send or error */
if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
return -EINPROGRESS;
/*
* Get descriptor status
*/
ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
ts->ts_final_retry = AR5K_REG_MS(tx_status->tx_status_0,
AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
/*TODO: ts->ts_virtcol + test*/
ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
AR5K_DESC_TX_STATUS1_SEQ_NUM);
ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
ts->ts_antenna = 1;
ts->ts_status = 0;
ts->ts_final_idx = 0;
if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
if (tx_status->tx_status_0 &
AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
ts->ts_status |= AR5K_TXERR_XRETRY;
if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
ts->ts_status |= AR5K_TXERR_FIFO;
if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
ts->ts_status |= AR5K_TXERR_FILT;
}
return 0;
}
/**
* ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @ts: The &struct ath5k_tx_status
*/
static int
ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_tx_status *ts)
{
struct ath5k_hw_tx_status *tx_status;
u32 txstat0, txstat1;
tx_status = &desc->ud.ds_tx5212.tx_stat;
txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
/* No frame has been send or error */
if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
return -EINPROGRESS;
txstat0 = ACCESS_ONCE(tx_status->tx_status_0);
/*
* Get descriptor status
*/
ts->ts_tstamp = AR5K_REG_MS(txstat0,
AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
ts->ts_shortretry = AR5K_REG_MS(txstat0,
AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
ts->ts_final_retry = AR5K_REG_MS(txstat0,
AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
ts->ts_seqnum = AR5K_REG_MS(txstat1,
AR5K_DESC_TX_STATUS1_SEQ_NUM);
ts->ts_rssi = AR5K_REG_MS(txstat1,
AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
ts->ts_antenna = (txstat1 &
AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1;
ts->ts_status = 0;
ts->ts_final_idx = AR5K_REG_MS(txstat1,
AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212);
/* TX error */
if (!(txstat0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
if (txstat0 & AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
ts->ts_status |= AR5K_TXERR_XRETRY;
if (txstat0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
ts->ts_status |= AR5K_TXERR_FIFO;
if (txstat0 & AR5K_DESC_TX_STATUS0_FILTERED)
ts->ts_status |= AR5K_TXERR_FILT;
}
return 0;
}
/****************\
* RX Descriptors *
\****************/
/**
* ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @size: RX buffer length in bytes
* @flags: One of AR5K_RXDESC_* flags
*/
int
ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
u32 size, unsigned int flags)
{
struct ath5k_hw_rx_ctl *rx_ctl;
rx_ctl = &desc->ud.ds_rx.rx_ctl;
/*
* Clear the descriptor
* If we don't clean the status descriptor,
* while scanning we get too many results,
* most of them virtual, after some secs
* of scanning system hangs. M.F.
*/
memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
if (unlikely(size & ~AR5K_DESC_RX_CTL1_BUF_LEN))
return -EINVAL;
/* Setup descriptor */
rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
if (flags & AR5K_RXDESC_INTREQ)
rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
return 0;
}
/**
* ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @rs: The &struct ath5k_rx_status
*
* Internal function used to process an RX status descriptor
* on AR5210/5211 MAC.
*
* Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
* frame yet.
*/
static int
ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
rx_status = &desc->ud.ds_rx.rx_stat;
/* No frame received / not ready */
if (unlikely(!(rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
memset(rs, 0, sizeof(struct ath5k_rx_status));
/*
* Frame receive status
*/
rs->rs_datalen = rx_status->rx_status_0 &
AR5K_5210_RX_DESC_STATUS0_DATA_LEN;
rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
rs->rs_more = !!(rx_status->rx_status_0 &
AR5K_5210_RX_DESC_STATUS0_MORE);
/* TODO: this timestamp is 13 bit, later on we assume 15 bit!
* also the HAL code for 5210 says the timestamp is bits [10..22] of the
* TSF, and extends the timestamp here to 15 bit.
* we need to check on 5210...
*/
rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
if (ah->ah_version == AR5K_AR5211)
rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211);
else
rs->rs_antenna = (rx_status->rx_status_0 &
AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210)
? 2 : 1;
/*
* Key table status
*/
if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID)
rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_KEY_INDEX);
else
rs->rs_keyix = AR5K_RXKEYIX_INVALID;
/*
* Receive/descriptor errors
*/
if (!(rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_CRC;
/* only on 5210 */
if ((ah->ah_version == AR5K_AR5210) &&
(rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210))
rs->rs_status |= AR5K_RXERR_FIFO;
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
rs->rs_status |= AR5K_RXERR_PHY;
rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
}
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_DECRYPT;
}
return 0;
}
/**
* ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @rs: The &struct ath5k_rx_status
*
* Internal function used to process an RX status descriptor
* on AR5212 and later MAC.
*
* Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
* frame yet.
*/
static int
ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
u32 rxstat0, rxstat1;
rx_status = &desc->ud.ds_rx.rx_stat;
rxstat1 = ACCESS_ONCE(rx_status->rx_status_1);
/* No frame received / not ready */
if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
memset(rs, 0, sizeof(struct ath5k_rx_status));
rxstat0 = ACCESS_ONCE(rx_status->rx_status_0);
/*
* Frame receive status
*/
rs->rs_datalen = rxstat0 & AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
rs->rs_rssi = AR5K_REG_MS(rxstat0,
AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
rs->rs_rate = AR5K_REG_MS(rxstat0,
AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
rs->rs_antenna = AR5K_REG_MS(rxstat0,
AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA);
rs->rs_more = !!(rxstat0 & AR5K_5212_RX_DESC_STATUS0_MORE);
rs->rs_tstamp = AR5K_REG_MS(rxstat1,
AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
/*
* Key table status
*/
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
rs->rs_keyix = AR5K_REG_MS(rxstat1,
AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
else
rs->rs_keyix = AR5K_RXKEYIX_INVALID;
/*
* Receive/descriptor errors
*/
if (!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_CRC;
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
rs->rs_status |= AR5K_RXERR_PHY;
rs->rs_phyerr = AR5K_REG_MS(rxstat1,
AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE);
if (!ah->ah_capabilities.cap_has_phyerr_counters)
ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
}
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_DECRYPT;
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
rs->rs_status |= AR5K_RXERR_MIC;
}
return 0;
}
/********\
* Attach *
\********/
/**
* ath5k_hw_init_desc_functions() - Init function pointers inside ah
* @ah: The &struct ath5k_hw
*
* Maps the internal descriptor functions to the function pointers on ah, used
* from above. This is used as an abstraction layer to handle the various chips
* the same way.
*/
int
ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
{
if (ah->ah_version == AR5K_AR5212) {
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
} else if (ah->ah_version <= AR5K_AR5211) {
ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
} else
return -ENOTSUPP;
return 0;
}

View file

@ -0,0 +1,367 @@
/*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/*
* RX/TX descriptor structures
*/
/**
* struct ath5k_hw_rx_ctl - Common hardware RX control descriptor
* @rx_control_0: RX control word 0
* @rx_control_1: RX control word 1
*/
struct ath5k_hw_rx_ctl {
u32 rx_control_0;
u32 rx_control_1;
} __packed __aligned(4);
/* RX control word 1 fields/flags */
#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */
/**
* struct ath5k_hw_rx_status - Common hardware RX status descriptor
* @rx_status_0: RX status word 0
* @rx_status_1: RX status word 1
*
* 5210, 5211 and 5212 differ only in the fields and flags defined below
*/
struct ath5k_hw_rx_status {
u32 rx_status_0;
u32 rx_status_1;
} __packed __aligned(4);
/* 5210/5211 */
/* RX status word 0 fields/flags */
#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff /* RX data length */
#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000 /* more desc for this frame */
#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210 0x00004000 /* [5210] receive on ant 1 */
#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000 /* reception rate */
#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE_S 15
#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000 /* rssi */
#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19
#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211 0x38000000 /* [5211] receive antenna */
#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211_S 27
/* RX status word 1 fields/flags */
#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001 /* descriptor complete */
#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* reception success */
#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */
#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210 0x00000008 /* [5210] FIFO overrun */
#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010 /* decryption CRC failure */
#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0 /* PHY error */
#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S 5
#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */
#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00 /* decryption key index */
#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S 9
#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000 /* 13 bit of TSF */
#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 15
#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000 /* key cache miss */
/* 5212 */
/* RX status word 0 fields/flags */
#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff /* RX data length */
#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000 /* more desc for this frame */
#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000 /* decompression CRC error */
#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000 /* reception rate */
#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE_S 15
#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000 /* rssi */
#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20
#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000 /* receive antenna */
#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28
/* RX status word 1 fields/flags */
#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001 /* descriptor complete */
#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* frame reception success */
#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */
#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008 /* decryption CRC failure */
#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR 0x00000010 /* PHY error */
#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR 0x00000020 /* MIC decrypt error */
#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */
#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00 /* decryption key index */
#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_S 9
#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000 /* first 15bit of the TSF */
#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 16
#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000 /* key cache miss */
#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE 0x0000ff00 /* phy error code overlays key index and valid fields */
#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE_S 8
/**
* enum ath5k_phy_error_code - PHY Error codes
* @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error
* @AR5K_RX_PHY_ERROR_TIMING: Timing error
* @AR5K_RX_PHY_ERROR_PARITY: Illegal parity
* @AR5K_RX_PHY_ERROR_RATE: Illegal rate
* @AR5K_RX_PHY_ERROR_LENGTH: Illegal length
* @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate
* @AR5K_RX_PHY_ERROR_SERVICE: Illegal service
* @AR5K_RX_PHY_ERROR_TOR: Transmit override receive
* @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+]
* @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+]
* @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+]
* @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+]
* @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+]
* @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+]
* @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+]
* @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+]
* @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+]
* @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+]
* @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+]
* @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+]
*/
enum ath5k_phy_error_code {
AR5K_RX_PHY_ERROR_UNDERRUN = 0,
AR5K_RX_PHY_ERROR_TIMING = 1,
AR5K_RX_PHY_ERROR_PARITY = 2,
AR5K_RX_PHY_ERROR_RATE = 3,
AR5K_RX_PHY_ERROR_LENGTH = 4,
AR5K_RX_PHY_ERROR_RADAR = 5,
AR5K_RX_PHY_ERROR_SERVICE = 6,
AR5K_RX_PHY_ERROR_TOR = 7,
AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL = 20,
AR5K_RX_PHY_ERROR_OFDM_POWER_DROP = 21,
AR5K_RX_PHY_ERROR_OFDM_SERVICE = 22,
AR5K_RX_PHY_ERROR_OFDM_RESTART = 23,
AR5K_RX_PHY_ERROR_CCK_TIMING = 25,
AR5K_RX_PHY_ERROR_CCK_HEADER_CRC = 26,
AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL = 27,
AR5K_RX_PHY_ERROR_CCK_SERVICE = 30,
AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
};
/**
* struct ath5k_hw_2w_tx_ctl - 5210/5211 hardware 2-word TX control descriptor
* @tx_control_0: TX control word 0
* @tx_control_1: TX control word 1
*/
struct ath5k_hw_2w_tx_ctl {
u32 tx_control_0;
u32 tx_control_1;
} __packed __aligned(4);
/* TX control word 0 fields/flags */
#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210 0x0003f000 /* [5210] header length */
#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210_S 12
#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE 0x003c0000 /* tx rate */
#define AR5K_2W_TX_DESC_CTL0_XMIT_RATE_S 18
#define AR5K_2W_TX_DESC_CTL0_RTSENA 0x00400000 /* RTS/CTS enable */
#define AR5K_2W_TX_DESC_CTL0_LONG_PACKET_5210 0x00800000 /* [5210] long packet */
#define AR5K_2W_TX_DESC_CTL0_VEOL_5211 0x00800000 /* [5211] virtual end-of-list */
#define AR5K_2W_TX_DESC_CTL0_CLRDMASK 0x01000000 /* clear destination mask */
#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000 /* [5210] antenna selection */
#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000 /* [5211] antenna selection */
#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT \
(ah->ah_version == AR5K_AR5210 ? \
AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \
AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211)
#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25
#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210 0x1c000000 /* [5210] frame type */
#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210_S 26
#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000 /* TX interrupt request */
#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 /* key is valid */
/* TX control word 1 fields/flags */
#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff /* data buffer length */
#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000 /* more desc for this frame */
#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 0x0007e000 /* [5210] key table index */
#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211 0x000fe000 /* [5211] key table index */
#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX \
(ah->ah_version == AR5K_AR5210 ? \
AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5210 : \
AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_5211)
#define AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX_S 13
#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211 0x00700000 /* [5211] frame type */
#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211_S 20
#define AR5K_2W_TX_DESC_CTL1_NOACK_5211 0x00800000 /* [5211] no ACK */
#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210 0xfff80000 /* [5210] lower 13 bit of duration */
/* Frame types */
#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0
#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 1
#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 2
#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY 3
#define AR5K_AR5211_TX_DESC_FRAME_TYPE_BEACON 3
#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4
#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4
/**
* struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor
* @tx_control_0: TX control word 0
* @tx_control_1: TX control word 1
* @tx_control_2: TX control word 2
* @tx_control_3: TX control word 3
*/
struct ath5k_hw_4w_tx_ctl {
u32 tx_control_0;
u32 tx_control_1;
u32 tx_control_2;
u32 tx_control_3;
} __packed __aligned(4);
/* TX control word 0 fields/flags */
#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER 0x003f0000 /* transmit power */
#define AR5K_4W_TX_DESC_CTL0_XMIT_POWER_S 16
#define AR5K_4W_TX_DESC_CTL0_RTSENA 0x00400000 /* RTS/CTS enable */
#define AR5K_4W_TX_DESC_CTL0_VEOL 0x00800000 /* virtual end-of-list */
#define AR5K_4W_TX_DESC_CTL0_CLRDMASK 0x01000000 /* clear destination mask */
#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT 0x1e000000 /* TX antenna selection */
#define AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25
#define AR5K_4W_TX_DESC_CTL0_INTREQ 0x20000000 /* TX interrupt request */
#define AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 /* destination index valid */
#define AR5K_4W_TX_DESC_CTL0_CTSENA 0x80000000 /* precede frame with CTS */
/* TX control word 1 fields/flags */
#define AR5K_4W_TX_DESC_CTL1_BUF_LEN 0x00000fff /* data buffer length */
#define AR5K_4W_TX_DESC_CTL1_MORE 0x00001000 /* more desc for this frame */
#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX 0x000fe000 /* destination table index */
#define AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX_S 13
#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE 0x00f00000 /* frame type */
#define AR5K_4W_TX_DESC_CTL1_FRAME_TYPE_S 20
#define AR5K_4W_TX_DESC_CTL1_NOACK 0x01000000 /* no ACK */
#define AR5K_4W_TX_DESC_CTL1_COMP_PROC 0x06000000 /* compression processing */
#define AR5K_4W_TX_DESC_CTL1_COMP_PROC_S 25
#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN 0x18000000 /* length of frame IV */
#define AR5K_4W_TX_DESC_CTL1_COMP_IV_LEN_S 27
#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN 0x60000000 /* length of frame ICV */
#define AR5K_4W_TX_DESC_CTL1_COMP_ICV_LEN_S 29
/* TX control word 2 fields/flags */
#define AR5K_4W_TX_DESC_CTL2_RTS_DURATION 0x00007fff /* RTS/CTS duration */
#define AR5K_4W_TX_DESC_CTL2_DURATION_UPD_EN 0x00008000 /* frame duration update */
#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0 0x000f0000 /* series 0 max attempts */
#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0_S 16
#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1 0x00f00000 /* series 1 max attempts */
#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1_S 20
#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2 0x0f000000 /* series 2 max attempts */
#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2_S 24
#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3 0xf0000000 /* series 3 max attempts */
#define AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3_S 28
/* TX control word 3 fields/flags */
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE0 0x0000001f /* series 0 tx rate */
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1 0x000003e0 /* series 1 tx rate */
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE1_S 5
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2 0x00007c00 /* series 2 tx rate */
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE2_S 10
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3 0x000f8000 /* series 3 tx rate */
#define AR5K_4W_TX_DESC_CTL3_XMIT_RATE3_S 15
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20
/**
* struct ath5k_hw_tx_status - Common TX status descriptor
* @tx_status_0: TX status word 0
* @tx_status_1: TX status word 1
*/
struct ath5k_hw_tx_status {
u32 tx_status_0;
u32 tx_status_1;
} __packed __aligned(4);
/* TX status word 0 fields/flags */
#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 /* TX success */
#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002 /* excessive retries */
#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004 /* FIFO underrun */
#define AR5K_DESC_TX_STATUS0_FILTERED 0x00000008 /* TX filter indication */
/* according to the HAL sources the spec has short/long retry counts reversed.
* we have it reversed to the HAL sources as well, for 5210 and 5211.
* For 5212 these fields are defined as RTS_FAIL_COUNT and DATA_FAIL_COUNT,
* but used respectively as SHORT and LONG retry count in the code later. This
* is consistent with the definitions here... TODO: check */
#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT 0x000000f0 /* short retry count */
#define AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT_S 4
#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT 0x00000f00 /* long retry count */
#define AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT_S 8
#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5211 0x0000f000 /* [5211+] virtual collision count */
#define AR5K_DESC_TX_STATUS0_VIRTCOLL_CT_5212_S 12
#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000 /* TX timestamp */
#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S 16
/* TX status word 1 fields/flags */
#define AR5K_DESC_TX_STATUS1_DONE 0x00000001 /* descriptor complete */
#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe /* TX sequence number */
#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S 1
#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH 0x001fe000 /* signal strength of ACK */
#define AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH_S 13
#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212 0x00600000 /* [5212] final TX attempt series ix */
#define AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212_S 21
#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
/**
* struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor
* @tx_ctl: The &struct ath5k_hw_2w_tx_ctl
* @tx_stat: The &struct ath5k_hw_tx_status
*/
struct ath5k_hw_5210_tx_desc {
struct ath5k_hw_2w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
} __packed __aligned(4);
/**
* struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor
* @tx_ctl: The &struct ath5k_hw_4w_tx_ctl
* @tx_stat: The &struct ath5k_hw_tx_status
*/
struct ath5k_hw_5212_tx_desc {
struct ath5k_hw_4w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
} __packed __aligned(4);
/**
* struct ath5k_hw_all_rx_desc - Common hardware RX descriptor
* @rx_ctl: The &struct ath5k_hw_rx_ctl
* @rx_stat: The &struct ath5k_hw_rx_status
*/
struct ath5k_hw_all_rx_desc {
struct ath5k_hw_rx_ctl rx_ctl;
struct ath5k_hw_rx_status rx_stat;
} __packed __aligned(4);
/**
* struct ath5k_desc - Atheros hardware DMA descriptor
* @ds_link: Physical address of the next descriptor
* @ds_data: Physical address of data buffer (skb)
* @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc
*
* This is read and written to by the hardware
*/
struct ath5k_desc {
u32 ds_link;
u32 ds_data;
union {
struct ath5k_hw_5210_tx_desc ds_tx5210;
struct ath5k_hw_5212_tx_desc ds_tx5212;
struct ath5k_hw_all_rx_desc ds_rx;
} ud;
} __packed __aligned(4);
#define AR5K_RXDESC_INTREQ 0x0020
#define AR5K_TXDESC_CLRDMASK 0x0001
#define AR5K_TXDESC_NOACK 0x0002 /*[5211+]*/
#define AR5K_TXDESC_RTSENA 0x0004
#define AR5K_TXDESC_CTSENA 0x0008
#define AR5K_TXDESC_INTREQ 0x0010
#define AR5K_TXDESC_VEOL 0x0020 /*[5211+]*/

View file

@ -0,0 +1,928 @@
/*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/*************************************\
* DMA and interrupt masking functions *
\*************************************/
/**
* DOC: DMA and interrupt masking functions
*
* Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
* handle queue setup for 5210 chipset (rest are handled on qcu.c).
* Also we setup interrupt mask register (IMR) and read the various interrupt
* status registers (ISR).
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/*********\
* Receive *
\*********/
/**
* ath5k_hw_start_rx_dma() - Start DMA receive
* @ah: The &struct ath5k_hw
*/
void
ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
}
/**
* ath5k_hw_stop_rx_dma() - Stop DMA receive
* @ah: The &struct ath5k_hw
*/
static int
ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
{
unsigned int i;
ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
/*
* It may take some time to disable the DMA receive unit
*/
for (i = 1000; i > 0 &&
(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
i--)
udelay(100);
if (!i)
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"failed to stop RX DMA !\n");
return i ? 0 : -EBUSY;
}
/**
* ath5k_hw_get_rxdp() - Get RX Descriptor's address
* @ah: The &struct ath5k_hw
*/
u32
ath5k_hw_get_rxdp(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_RXDP);
}
/**
* ath5k_hw_set_rxdp() - Set RX Descriptor's address
* @ah: The &struct ath5k_hw
* @phys_addr: RX descriptor address
*
* Returns -EIO if rx is active
*/
int
ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
{
if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"tried to set RXDP while rx was active !\n");
return -EIO;
}
ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
return 0;
}
/**********\
* Transmit *
\**********/
/**
* ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
* Start DMA transmit for a specific queue and since 5210 doesn't have
* QCU/DCU, set up queue parameters for 5210 here based on queue type (one
* queue for normal data and one queue for beacons). For queue setup
* on newer chips check out qcu.c. Returns -EINVAL if queue number is out
* of range or if queue is already disabled.
*
* NOTE: Must be called after setting up tx control descriptor for that
* queue (see below).
*/
int
ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
u32 tx_queue;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/* Return if queue is declared inactive */
if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
return -EINVAL;
if (ah->ah_version == AR5K_AR5210) {
tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
/*
* Set the queue by type on 5210
*/
switch (ah->ah_txq[queue].tqi_type) {
case AR5K_TX_QUEUE_DATA:
tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
break;
case AR5K_TX_QUEUE_BEACON:
tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
AR5K_BSR);
break;
case AR5K_TX_QUEUE_CAB:
tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
AR5K_BCR_BDMAE, AR5K_BSR);
break;
default:
return -EINVAL;
}
/* Start queue */
ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
} else {
/* Return if queue is disabled */
if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
return -EIO;
/* Start queue */
AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
}
return 0;
}
/**
* ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
* Stop DMA transmit on a specific hw queue and drain queue so we don't
* have any pending frames. Returns -EBUSY if we still have pending frames,
* -EINVAL if queue number is out of range or inactive.
*/
static int
ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
unsigned int i = 40;
u32 tx_queue, pending;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/* Return if queue is declared inactive */
if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
return -EINVAL;
if (ah->ah_version == AR5K_AR5210) {
tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
/*
* Set by queue type
*/
switch (ah->ah_txq[queue].tqi_type) {
case AR5K_TX_QUEUE_DATA:
tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
break;
case AR5K_TX_QUEUE_BEACON:
case AR5K_TX_QUEUE_CAB:
/* XXX Fix me... */
tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
ath5k_hw_reg_write(ah, 0, AR5K_BSR);
break;
default:
return -EINVAL;
}
/* Stop queue */
ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
} else {
/*
* Enable DCU early termination to quickly
* flush any pending frames from QCU
*/
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_DCU_EARLY);
/*
* Schedule TX disable and wait until queue is empty
*/
AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
/* Wait for queue to stop */
for (i = 1000; i > 0 &&
(AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0);
i--)
udelay(100);
if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"queue %i didn't stop !\n", queue);
/* Check for pending frames */
i = 1000;
do {
pending = ath5k_hw_reg_read(ah,
AR5K_QUEUE_STATUS(queue)) &
AR5K_QCU_STS_FRMPENDCNT;
udelay(100);
} while (--i && pending);
/* For 2413+ order PCU to drop packets using
* QUIET mechanism */
if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
pending) {
/* Set periodicity and duration */
ath5k_hw_reg_write(ah,
AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
AR5K_QUIET_CTL2);
/* Enable quiet period for current TSF */
ath5k_hw_reg_write(ah,
AR5K_QUIET_CTL1_QT_EN |
AR5K_REG_SM(ath5k_hw_reg_read(ah,
AR5K_TSF_L32_5211) >> 10,
AR5K_QUIET_CTL1_NEXT_QT_TSF),
AR5K_QUIET_CTL1);
/* Force channel idle high */
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
/* Wait a while and disable mechanism */
udelay(400);
AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
AR5K_QUIET_CTL1_QT_EN);
/* Re-check for pending frames */
i = 100;
do {
pending = ath5k_hw_reg_read(ah,
AR5K_QUEUE_STATUS(queue)) &
AR5K_QCU_STS_FRMPENDCNT;
udelay(100);
} while (--i && pending);
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
if (pending)
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"quiet mechanism didn't work q:%i !\n",
queue);
}
/*
* Disable DCU early termination
*/
AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_DCU_EARLY);
/* Clear register */
ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
if (pending) {
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"tx dma didn't stop (q:%i, frm:%i) !\n",
queue, pending);
return -EBUSY;
}
}
/* TODO: Check for success on 5210 else return error */
return 0;
}
/**
* ath5k_hw_stop_beacon_queue() - Stop beacon queue
* @ah: The &struct ath5k_hw
* @queue: The queue number
*
* Returns -EIO if queue didn't stop
*/
int
ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
{
int ret;
ret = ath5k_hw_stop_tx_dma(ah, queue);
if (ret) {
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"beacon queue didn't stop !\n");
return -EIO;
}
return 0;
}
/**
* ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
* Get TX descriptor's address for a specific queue. For 5210 we ignore
* the queue number and use tx queue type since we only have 2 queues.
* We use TXDP0 for normal data queue and TXDP1 for beacon queue.
* For newer chips with QCU/DCU we just read the corresponding TXDP register.
*
* XXX: Is TXDP read and clear ?
*/
u32
ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
{
u16 tx_reg;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/*
* Get the transmit queue descriptor pointer from the selected queue
*/
/*5210 doesn't have QCU*/
if (ah->ah_version == AR5K_AR5210) {
switch (ah->ah_txq[queue].tqi_type) {
case AR5K_TX_QUEUE_DATA:
tx_reg = AR5K_NOQCU_TXDP0;
break;
case AR5K_TX_QUEUE_BEACON:
case AR5K_TX_QUEUE_CAB:
tx_reg = AR5K_NOQCU_TXDP1;
break;
default:
return 0xffffffff;
}
} else {
tx_reg = AR5K_QUEUE_TXDP(queue);
}
return ath5k_hw_reg_read(ah, tx_reg);
}
/**
* ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
* @phys_addr: The physical address
*
* Set TX descriptor's address for a specific queue. For 5210 we ignore
* the queue number and we use tx queue type since we only have 2 queues
* so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue.
* For newer chips with QCU/DCU we just set the corresponding TXDP register.
* Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
* active.
*/
int
ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
{
u16 tx_reg;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/*
* Set the transmit queue descriptor pointer register by type
* on 5210
*/
if (ah->ah_version == AR5K_AR5210) {
switch (ah->ah_txq[queue].tqi_type) {
case AR5K_TX_QUEUE_DATA:
tx_reg = AR5K_NOQCU_TXDP0;
break;
case AR5K_TX_QUEUE_BEACON:
case AR5K_TX_QUEUE_CAB:
tx_reg = AR5K_NOQCU_TXDP1;
break;
default:
return -EINVAL;
}
} else {
/*
* Set the transmit queue descriptor pointer for
* the selected queue on QCU for 5211+
* (this won't work if the queue is still active)
*/
if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
return -EIO;
tx_reg = AR5K_QUEUE_TXDP(queue);
}
/* Set descriptor pointer */
ath5k_hw_reg_write(ah, phys_addr, tx_reg);
return 0;
}
/**
* ath5k_hw_update_tx_triglevel() - Update tx trigger level
* @ah: The &struct ath5k_hw
* @increase: Flag to force increase of trigger level
*
* This function increases/decreases the tx trigger level for the tx fifo
* buffer (aka FIFO threshold) that is used to indicate when PCU flushes
* the buffer and transmits its data. Lowering this results sending small
* frames more quickly but can lead to tx underruns, raising it a lot can
* result other problems. Right now we start with the lowest possible
* (64Bytes) and if we get tx underrun we increase it using the increase
* flag. Returns -EIO if we have reached maximum/minimum.
*
* XXX: Link this with tx DMA size ?
* XXX2: Use it to save interrupts ?
*/
int
ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
{
u32 trigger_level, imr;
int ret = -EIO;
/*
* Disable interrupts by setting the mask
*/
imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
AR5K_TXCFG_TXFULL);
if (!increase) {
if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
goto done;
} else
trigger_level +=
((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
/*
* Update trigger level on success
*/
if (ah->ah_version == AR5K_AR5210)
ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
else
AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
AR5K_TXCFG_TXFULL, trigger_level);
ret = 0;
done:
/*
* Restore interrupt mask
*/
ath5k_hw_set_imr(ah, imr);
return ret;
}
/*******************\
* Interrupt masking *
\*******************/
/**
* ath5k_hw_is_intr_pending() - Check if we have pending interrupts
* @ah: The &struct ath5k_hw
*
* Check if we have pending interrupts to process. Returns 1 if we
* have pending interrupts and 0 if we haven't.
*/
bool
ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
}
/**
* ath5k_hw_get_isr() - Get interrupt status
* @ah: The @struct ath5k_hw
* @interrupt_mask: Driver's interrupt mask used to filter out
* interrupts in sw.
*
* This function is used inside our interrupt handler to determine the reason
* for the interrupt by reading Primary Interrupt Status Register. Returns an
* abstract interrupt status mask which is mostly ISR with some uncommon bits
* being mapped on some standard non hw-specific positions
* (check out &ath5k_int).
*
* NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
* function gets called are cleared on return.
*/
int
ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
{
u32 data = 0;
/*
* Read interrupt status from Primary Interrupt
* Register.
*
* Note: PISR/SISR Not available on 5210
*/
if (ah->ah_version == AR5K_AR5210) {
u32 isr = 0;
isr = ath5k_hw_reg_read(ah, AR5K_ISR);
if (unlikely(isr == AR5K_INT_NOCARD)) {
*interrupt_mask = isr;
return -ENODEV;
}
/*
* Filter out the non-common bits from the interrupt
* status.
*/
*interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
/* Hanlde INT_FATAL */
if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
| AR5K_ISR_DPERR)))
*interrupt_mask |= AR5K_INT_FATAL;
/*
* XXX: BMISS interrupts may occur after association.
* I found this on 5210 code but it needs testing. If this is
* true we should disable them before assoc and re-enable them
* after a successful assoc + some jiffies.
interrupt_mask &= ~AR5K_INT_BMISS;
*/
data = isr;
} else {
u32 pisr = 0;
u32 pisr_clear = 0;
u32 sisr0 = 0;
u32 sisr1 = 0;
u32 sisr2 = 0;
u32 sisr3 = 0;
u32 sisr4 = 0;
/* Read PISR and SISRs... */
pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
if (unlikely(pisr == AR5K_INT_NOCARD)) {
*interrupt_mask = pisr;
return -ENODEV;
}
sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
/*
* PISR holds the logical OR of interrupt bits
* from SISR registers:
*
* TXOK and TXDESC -> Logical OR of TXOK and TXDESC
* per-queue bits on SISR0
*
* TXERR and TXEOL -> Logical OR of TXERR and TXEOL
* per-queue bits on SISR1
*
* TXURN -> Logical OR of TXURN per-queue bits on SISR2
*
* HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
*
* BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
* BCN_TIMEOUT, CAB_TIMEOUT and DTIM
* (and TSFOOR ?) bits on SISR2
*
* QCBRORN and QCBRURN -> Logical OR of QCBRORN and
* QCBRURN per-queue bits on SISR3
* QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
*
* If we clean these bits on PISR we 'll also clear all
* related bits from SISRs, e.g. if we write the TXOK bit on
* PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
* interrupt got fired for another queue while we were reading
* the interrupt registers and we write back the TXOK bit on
* PISR we 'll lose it. So make sure that we don't write back
* on PISR any bits that come from SISRs. Clearing them from
* SISRs will also clear PISR so no need to worry here.
*/
/* XXX: There seems to be an issue on some cards
* with tx interrupt flags not being updated
* on PISR despite that all Tx interrupt bits
* are cleared on SISRs. Since we handle all
* Tx queues all together it shouldn't be an
* issue if we clear Tx interrupt flags also
* on PISR to avoid that.
*/
pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) |
(pisr & AR5K_INT_TX_ALL);
/*
* Write to clear them...
* Note: This means that each bit we write back
* to the registers will get cleared, leaving the
* rest unaffected. So this won't affect new interrupts
* we didn't catch while reading/processing, we 'll get
* them next time get_isr gets called.
*/
ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
/* Flush previous write */
ath5k_hw_reg_read(ah, AR5K_PISR);
/*
* Filter out the non-common bits from the interrupt
* status.
*/
*interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
/* We treat TXOK,TXDESC, TXERR and TXEOL
* the same way (schedule the tx tasklet)
* so we track them all together per queue */
if (pisr & AR5K_ISR_TXOK)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
AR5K_SISR0_QCU_TXOK);
if (pisr & AR5K_ISR_TXDESC)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
AR5K_SISR0_QCU_TXDESC);
if (pisr & AR5K_ISR_TXERR)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
AR5K_SISR1_QCU_TXERR);
if (pisr & AR5K_ISR_TXEOL)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
AR5K_SISR1_QCU_TXEOL);
/* Currently this is not much useful since we treat
* all queues the same way if we get a TXURN (update
* tx trigger level) but we might need it later on*/
if (pisr & AR5K_ISR_TXURN)
ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
AR5K_SISR2_QCU_TXURN);
/* Misc Beacon related interrupts */
/* For AR5211 */
if (pisr & AR5K_ISR_TIM)
*interrupt_mask |= AR5K_INT_TIM;
/* For AR5212+ */
if (pisr & AR5K_ISR_BCNMISC) {
if (sisr2 & AR5K_SISR2_TIM)
*interrupt_mask |= AR5K_INT_TIM;
if (sisr2 & AR5K_SISR2_DTIM)
*interrupt_mask |= AR5K_INT_DTIM;
if (sisr2 & AR5K_SISR2_DTIM_SYNC)
*interrupt_mask |= AR5K_INT_DTIM_SYNC;
if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
*interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
*interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
}
/* Below interrupts are unlikely to happen */
/* HIU = Host Interface Unit (PCI etc)
* Can be one of MCABT, SSERR, DPERR from SISR2 */
if (unlikely(pisr & (AR5K_ISR_HIUERR)))
*interrupt_mask |= AR5K_INT_FATAL;
/*Beacon Not Ready*/
if (unlikely(pisr & (AR5K_ISR_BNR)))
*interrupt_mask |= AR5K_INT_BNR;
/* A queue got CBR overrun */
if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
*interrupt_mask |= AR5K_INT_QCBRORN;
ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
AR5K_SISR3_QCBRORN);
}
/* A queue got CBR underrun */
if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
*interrupt_mask |= AR5K_INT_QCBRURN;
ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
AR5K_SISR3_QCBRURN);
}
/* A queue got triggered */
if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
*interrupt_mask |= AR5K_INT_QTRIG;
ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
AR5K_SISR4_QTRIG);
}
data = pisr;
}
/*
* In case we didn't handle anything,
* print the register value.
*/
if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr);
return 0;
}
/**
* ath5k_hw_set_imr() - Set interrupt mask
* @ah: The &struct ath5k_hw
* @new_mask: The new interrupt mask to be set
*
* Set the interrupt mask in hw to save interrupts. We do that by mapping
* ath5k_int bits to hw-specific bits to remove abstraction and writing
* Interrupt Mask Register.
*/
enum ath5k_int
ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
{
enum ath5k_int old_mask, int_mask;
old_mask = ah->ah_imr;
/*
* Disable card interrupts to prevent any race conditions
* (they will be re-enabled afterwards if AR5K_INT GLOBAL
* is set again on the new mask).
*/
if (old_mask & AR5K_INT_GLOBAL) {
ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
ath5k_hw_reg_read(ah, AR5K_IER);
}
/*
* Add additional, chipset-dependent interrupt mask flags
* and write them to the IMR (interrupt mask register).
*/
int_mask = new_mask & AR5K_INT_COMMON;
if (ah->ah_version != AR5K_AR5210) {
/* Preserve per queue TXURN interrupt mask */
u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
& AR5K_SIMR2_QCU_TXURN;
/* Fatal interrupt abstraction for 5211+ */
if (new_mask & AR5K_INT_FATAL) {
int_mask |= AR5K_IMR_HIUERR;
simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
| AR5K_SIMR2_DPERR);
}
/* Misc beacon related interrupts */
if (new_mask & AR5K_INT_TIM)
int_mask |= AR5K_IMR_TIM;
if (new_mask & AR5K_INT_TIM)
simr2 |= AR5K_SISR2_TIM;
if (new_mask & AR5K_INT_DTIM)
simr2 |= AR5K_SISR2_DTIM;
if (new_mask & AR5K_INT_DTIM_SYNC)
simr2 |= AR5K_SISR2_DTIM_SYNC;
if (new_mask & AR5K_INT_BCN_TIMEOUT)
simr2 |= AR5K_SISR2_BCN_TIMEOUT;
if (new_mask & AR5K_INT_CAB_TIMEOUT)
simr2 |= AR5K_SISR2_CAB_TIMEOUT;
/*Beacon Not Ready*/
if (new_mask & AR5K_INT_BNR)
int_mask |= AR5K_INT_BNR;
/* Note: Per queue interrupt masks
* are set via ath5k_hw_reset_tx_queue() (qcu.c) */
ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
} else {
/* Fatal interrupt abstraction for 5210 */
if (new_mask & AR5K_INT_FATAL)
int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
| AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
/* Only common interrupts left for 5210 (no SIMRs) */
ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
}
/* If RXNOFRM interrupt is masked disable it
* by setting AR5K_RXNOFRM to zero */
if (!(new_mask & AR5K_INT_RXNOFRM))
ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);
/* Store new interrupt mask */
ah->ah_imr = new_mask;
/* ..re-enable interrupts if AR5K_INT_GLOBAL is set */
if (new_mask & AR5K_INT_GLOBAL) {
ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
ath5k_hw_reg_read(ah, AR5K_IER);
}
return old_mask;
}
/********************\
Init/Stop functions
\********************/
/**
* ath5k_hw_dma_init() - Initialize DMA unit
* @ah: The &struct ath5k_hw
*
* Set DMA size and pre-enable interrupts
* (driver handles tx/rx buffer setup and
* dma start/stop)
*
* XXX: Save/restore RXDP/TXDP registers ?
*/
void
ath5k_hw_dma_init(struct ath5k_hw *ah)
{
/*
* Set Rx/Tx DMA Configuration
*
* Set standard DMA size (128). Note that
* a DMA size of 512 causes rx overruns and tx errors
* on pci-e cards (tested on 5424 but since rx overruns
* also occur on 5416/5418 with madwifi we set 128
* for all PCI-E cards to be safe).
*
* XXX: need to check 5210 for this
* TODO: Check out tx trigger level, it's always 64 on dumps but I
* guess we can tweak it and see how it goes ;-)
*/
if (ah->ah_version != AR5K_AR5210) {
AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
}
/* Pre-enable interrupts on 5211/5212*/
if (ah->ah_version != AR5K_AR5210)
ath5k_hw_set_imr(ah, ah->ah_imr);
}
/**
* ath5k_hw_dma_stop() - stop DMA unit
* @ah: The &struct ath5k_hw
*
* Stop tx/rx DMA and interrupts. Returns
* -EBUSY if tx or rx dma failed to stop.
*
* XXX: Sometimes DMA unit hangs and we have
* stuck frames on tx queues, only a reset
* can fix that.
*/
int
ath5k_hw_dma_stop(struct ath5k_hw *ah)
{
int i, qmax, err;
err = 0;
/* Disable interrupts */
ath5k_hw_set_imr(ah, 0);
/* Stop rx dma */
err = ath5k_hw_stop_rx_dma(ah);
if (err)
return err;
/* Clear any pending interrupts
* and disable tx dma */
if (ah->ah_version != AR5K_AR5210) {
ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
qmax = AR5K_NUM_TX_QUEUES;
} else {
/* PISR/SISR Not available on 5210 */
ath5k_hw_reg_read(ah, AR5K_ISR);
qmax = AR5K_NUM_TX_QUEUES_NOQCU;
}
for (i = 0; i < qmax; i++) {
err = ath5k_hw_stop_tx_dma(ah, i);
/* -EINVAL -> queue inactive */
if (err && err != -EINVAL)
return err;
}
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,495 @@
/*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/*
* Common ar5xxx EEPROM data offsets (set these on AR5K_EEPROM_BASE)
*/
#define AR5K_EEPROM_PCIE_OFFSET 0x02 /* Contains offset to PCI-E infos */
#define AR5K_EEPROM_PCIE_SERDES_SECTION 0x40 /* PCIE_OFFSET points here when
* SERDES infos are present */
#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */
#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */
#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */
#define AR5K_EEPROM_RFKILL 0x0f
#define AR5K_EEPROM_RFKILL_GPIO_SEL 0x0000001c
#define AR5K_EEPROM_RFKILL_GPIO_SEL_S 2
#define AR5K_EEPROM_RFKILL_POLARITY 0x00000002
#define AR5K_EEPROM_RFKILL_POLARITY_S 1
#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
/* FLASH(EEPROM) Defines for AR531X chips */
#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */
#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */
#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0
#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4
#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12
#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
#define AR5K_EEPROM_INFO_CKSUM 0xffff
#define AR5K_EEPROM_INFO(_n) (AR5K_EEPROM_INFO_BASE + (_n))
#define AR5K_EEPROM_VERSION AR5K_EEPROM_INFO(1) /* EEPROM Version */
#define AR5K_EEPROM_VERSION_3_0 0x3000 /* No idea what's going on before this version */
#define AR5K_EEPROM_VERSION_3_1 0x3001 /* ob/db values for 2GHz (ar5211_rfregs) */
#define AR5K_EEPROM_VERSION_3_2 0x3002 /* different frequency representation (eeprom_bin2freq) */
#define AR5K_EEPROM_VERSION_3_3 0x3003 /* offsets changed, has 32 CTLs (see below) and ee_false_detect (eeprom_read_modes) */
#define AR5K_EEPROM_VERSION_3_4 0x3004 /* has ee_i_gain, ee_cck_ofdm_power_delta (eeprom_read_modes) */
#define AR5K_EEPROM_VERSION_4_0 0x4000 /* has ee_misc, ee_cal_pier, ee_turbo_max_power and ee_xr_power (eeprom_init) */
#define AR5K_EEPROM_VERSION_4_1 0x4001 /* has ee_margin_tx_rx (eeprom_init) */
#define AR5K_EEPROM_VERSION_4_2 0x4002 /* has ee_cck_ofdm_gain_delta (eeprom_init) */
#define AR5K_EEPROM_VERSION_4_3 0x4003 /* power calibration changes */
#define AR5K_EEPROM_VERSION_4_4 0x4004
#define AR5K_EEPROM_VERSION_4_5 0x4005
#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */
#define AR5K_EEPROM_VERSION_4_7 0x3007 /* 4007 ? */
#define AR5K_EEPROM_VERSION_4_9 0x4009 /* EAR futureproofing */
#define AR5K_EEPROM_VERSION_5_0 0x5000 /* Has 2413 PDADC calibration etc */
#define AR5K_EEPROM_VERSION_5_1 0x5001 /* Has capability values */
#define AR5K_EEPROM_VERSION_5_3 0x5003 /* Has spur mitigation tables */
#define AR5K_EEPROM_MODE_11A 0
#define AR5K_EEPROM_MODE_11B 1
#define AR5K_EEPROM_MODE_11G 2
#define AR5K_EEPROM_HDR AR5K_EEPROM_INFO(2) /* Header that contains the device caps */
#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2GHz */
#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for < 2W power consumption */
#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) /* Device type (1 Cardbus, 2 PCI, 3 MiniPCI, 4 AP) */
#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5GHz */
/* Newer EEPROMs are using a different offset */
#define AR5K_EEPROM_OFF(_v, _v3_0, _v3_3) \
(((_v) >= AR5K_EEPROM_VERSION_3_3) ? _v3_3 : _v3_0)
#define AR5K_EEPROM_ANT_GAIN(_v) AR5K_EEPROM_OFF(_v, 0x00c4, 0x00c3)
#define AR5K_EEPROM_ANT_GAIN_5GHZ(_v) ((s8)(((_v) >> 8) & 0xff))
#define AR5K_EEPROM_ANT_GAIN_2GHZ(_v) ((s8)((_v) & 0xff))
/* Misc values available since EEPROM 4.0 */
#define AR5K_EEPROM_MISC0 AR5K_EEPROM_INFO(4)
#define AR5K_EEPROM_EARSTART(_v) ((_v) & 0xfff)
#define AR5K_EEPROM_HDR_XR2_DIS(_v) (((_v) >> 12) & 0x1)
#define AR5K_EEPROM_HDR_XR5_DIS(_v) (((_v) >> 13) & 0x1)
#define AR5K_EEPROM_EEMAP(_v) (((_v) >> 14) & 0x3)
#define AR5K_EEPROM_MISC1 AR5K_EEPROM_INFO(5)
#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1) /* has 32KHz crystal for sleep mode */
#define AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(_v) (((_v) >> 15) & 0x1)
#define AR5K_EEPROM_MISC2 AR5K_EEPROM_INFO(6)
#define AR5K_EEPROM_EEP_FILE_VERSION(_v) (((_v) >> 8) & 0xff)
#define AR5K_EEPROM_EAR_FILE_VERSION(_v) ((_v) & 0xff)
#define AR5K_EEPROM_MISC3 AR5K_EEPROM_INFO(7)
#define AR5K_EEPROM_ART_BUILD_NUM(_v) (((_v) >> 10) & 0x3f)
#define AR5K_EEPROM_EAR_FILE_ID(_v) ((_v) & 0xff)
#define AR5K_EEPROM_MISC4 AR5K_EEPROM_INFO(8)
#define AR5K_EEPROM_CAL_DATA_START(_v) (((_v) >> 4) & 0xfff)
#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3) /* modes supported by radio 0 (bit 1: G, bit 2: A) */
#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3) /* modes supported by radio 1 (bit 1: G, bit 2: A) */
#define AR5K_EEPROM_MISC5 AR5K_EEPROM_INFO(9)
#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1) /* disable compression */
#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1) /* disable AES */
#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1) /* disable fast frames */
#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1) /* disable bursting */
#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf) /* max number of QCUs. defaults to 10 */
#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) /* enable heavy clipping */
#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf) /* key cache size. defaults to 128 */
#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10)
#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x7) /* MIMO chains disabled for TX bitmask */
#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x7) /* MIMO chains disabled for RX bitmask */
#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1) /* 5.47-5.7GHz supported */
#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1) /* Japan UNII1 band (5.15-5.25GHz) on even channels (5180, 5200, 5220, 5240) supported */
#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1) /* Japan UNII2 band (5.25-5.35GHz) supported */
#define AR5K_EEPROM_JAP_MID_EN (((_v) >> 9) & 0x1) /* Japan band from 5.47-5.7GHz supported */
#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 10) & 0x1) /* Japan UNII2 band (5.15-5.25GHz) on odd channels (5170, 5190, 5210, 5230) supported */
#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 11) & 0x1) /* Japan A mode enabled (using even channels) */
/* calibration settings */
#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
#define AR5K_EEPROM_MODES_11B(_v) AR5K_EEPROM_OFF(_v, 0x00d0, 0x00f2)
#define AR5K_EEPROM_MODES_11G(_v) AR5K_EEPROM_OFF(_v, 0x00da, 0x010d)
#define AR5K_EEPROM_CTL(_v) AR5K_EEPROM_OFF(_v, 0x00e4, 0x0128) /* Conformance test limits */
#define AR5K_EEPROM_GROUPS_START(_v) AR5K_EEPROM_OFF(_v, 0x0100, 0x0150) /* Start of Groups */
#define AR5K_EEPROM_GROUP1_OFFSET 0x0
#define AR5K_EEPROM_GROUP2_OFFSET 0x5
#define AR5K_EEPROM_GROUP3_OFFSET 0x37
#define AR5K_EEPROM_GROUP4_OFFSET 0x46
#define AR5K_EEPROM_GROUP5_OFFSET 0x55
#define AR5K_EEPROM_GROUP6_OFFSET 0x65
#define AR5K_EEPROM_GROUP7_OFFSET 0x69
#define AR5K_EEPROM_GROUP8_OFFSET 0x6f
#define AR5K_EEPROM_TARGET_PWR_OFF_11A(_v) AR5K_EEPROM_OFF(_v, AR5K_EEPROM_GROUPS_START(_v) + \
AR5K_EEPROM_GROUP5_OFFSET, 0x0000)
#define AR5K_EEPROM_TARGET_PWR_OFF_11B(_v) AR5K_EEPROM_OFF(_v, AR5K_EEPROM_GROUPS_START(_v) + \
AR5K_EEPROM_GROUP6_OFFSET, 0x0010)
#define AR5K_EEPROM_TARGET_PWR_OFF_11G(_v) AR5K_EEPROM_OFF(_v, AR5K_EEPROM_GROUPS_START(_v) + \
AR5K_EEPROM_GROUP7_OFFSET, 0x0014)
/* [3.1 - 3.3] */
#define AR5K_EEPROM_OBDB0_2GHZ 0x00ec
#define AR5K_EEPROM_OBDB1_2GHZ 0x00ed
#define AR5K_EEPROM_PROTECT 0x003f /* EEPROM protect status */
#define AR5K_EEPROM_PROTECT_RD_0_31 0x0001 /* Read protection bit for offsets 0x0 - 0x1f */
#define AR5K_EEPROM_PROTECT_WR_0_31 0x0002 /* Write protection bit for offsets 0x0 - 0x1f */
#define AR5K_EEPROM_PROTECT_RD_32_63 0x0004 /* 0x20 - 0x3f */
#define AR5K_EEPROM_PROTECT_WR_32_63 0x0008
#define AR5K_EEPROM_PROTECT_RD_64_127 0x0010 /* 0x40 - 0x7f */
#define AR5K_EEPROM_PROTECT_WR_64_127 0x0020
#define AR5K_EEPROM_PROTECT_RD_128_191 0x0040 /* 0x80 - 0xbf (regdom) */
#define AR5K_EEPROM_PROTECT_WR_128_191 0x0080
#define AR5K_EEPROM_PROTECT_RD_192_207 0x0100 /* 0xc0 - 0xcf */
#define AR5K_EEPROM_PROTECT_WR_192_207 0x0200
#define AR5K_EEPROM_PROTECT_RD_208_223 0x0400 /* 0xd0 - 0xdf */
#define AR5K_EEPROM_PROTECT_WR_208_223 0x0800
#define AR5K_EEPROM_PROTECT_RD_224_239 0x1000 /* 0xe0 - 0xef */
#define AR5K_EEPROM_PROTECT_WR_224_239 0x2000
#define AR5K_EEPROM_PROTECT_RD_240_255 0x4000 /* 0xf0 - 0xff */
#define AR5K_EEPROM_PROTECT_WR_240_255 0x8000
/* Some EEPROM defines */
#define AR5K_EEPROM_EEP_SCALE 100
#define AR5K_EEPROM_EEP_DELTA 10
#define AR5K_EEPROM_N_MODES 3
#define AR5K_EEPROM_N_5GHZ_CHAN 10
#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8
#define AR5K_EEPROM_N_2GHZ_CHAN 3
#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
#define AR5K_EEPROM_MAX_CHAN 10
#define AR5K_EEPROM_N_PWR_POINTS_5111 11
#define AR5K_EEPROM_N_PCDAC 11
#define AR5K_EEPROM_N_PHASE_CAL 5
#define AR5K_EEPROM_N_TEST_FREQ 8
#define AR5K_EEPROM_N_EDGES 8
#define AR5K_EEPROM_N_INTERCEPTS 11
#define AR5K_EEPROM_FREQ_M(_v) AR5K_EEPROM_OFF(_v, 0x7f, 0xff)
#define AR5K_EEPROM_PCDAC_M 0x3f
#define AR5K_EEPROM_PCDAC_START 1
#define AR5K_EEPROM_PCDAC_STOP 63
#define AR5K_EEPROM_PCDAC_STEP 1
#define AR5K_EEPROM_NON_EDGE_M 0x40
#define AR5K_EEPROM_CHANNEL_POWER 8
#define AR5K_EEPROM_N_OBDB 4
#define AR5K_EEPROM_OBDB_DIS 0xffff
#define AR5K_EEPROM_CHANNEL_DIS 0xff
#define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10)
#define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32)
#define AR5K_EEPROM_MAX_CTLS 32
#define AR5K_EEPROM_N_PD_CURVES 4
#define AR5K_EEPROM_N_XPD0_POINTS 4
#define AR5K_EEPROM_N_XPD3_POINTS 3
#define AR5K_EEPROM_N_PD_GAINS 4
#define AR5K_EEPROM_N_PD_POINTS 5
#define AR5K_EEPROM_N_INTERCEPT_10_2GHZ 35
#define AR5K_EEPROM_N_INTERCEPT_10_5GHZ 55
#define AR5K_EEPROM_POWER_M 0x3f
#define AR5K_EEPROM_POWER_MIN 0
#define AR5K_EEPROM_POWER_MAX 3150
#define AR5K_EEPROM_POWER_STEP 50
#define AR5K_EEPROM_POWER_TABLE_SIZE 64
#define AR5K_EEPROM_N_POWER_LOC_11B 4
#define AR5K_EEPROM_N_POWER_LOC_11G 6
#define AR5K_EEPROM_I_GAIN 10
#define AR5K_EEPROM_CCK_OFDM_DELTA 15
#define AR5K_EEPROM_N_IQ_CAL 2
/* 5GHz/2GHz */
enum ath5k_eeprom_freq_bands {
AR5K_EEPROM_BAND_5GHZ = 0,
AR5K_EEPROM_BAND_2GHZ = 1,
AR5K_EEPROM_N_FREQ_BANDS,
};
/* Spur chans per freq band */
#define AR5K_EEPROM_N_SPUR_CHANS 5
/* fbin value for chan 2464 x2 */
#define AR5K_EEPROM_5413_SPUR_CHAN_1 1640
/* fbin value for chan 2420 x2 */
#define AR5K_EEPROM_5413_SPUR_CHAN_2 1200
#define AR5K_EEPROM_SPUR_CHAN_MASK 0x3FFF
#define AR5K_EEPROM_NO_SPUR 0x8000
#define AR5K_SPUR_CHAN_WIDTH 87
#define AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz 3125
#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250
#define AR5K_EEPROM_READ(_o, _v) do { \
if (!ath5k_hw_nvram_read(ah, (_o), &(_v))) \
return -EIO; \
} while (0)
#define AR5K_EEPROM_READ_HDR(_o, _v) \
AR5K_EEPROM_READ(_o, ah->ah_capabilities.cap_eeprom._v); \
enum ath5k_ant_table {
AR5K_ANT_CTL = 0, /* Idle switch table settings */
AR5K_ANT_SWTABLE_A = 1, /* Switch table for antenna A */
AR5K_ANT_SWTABLE_B = 2, /* Switch table for antenna B */
AR5K_ANT_MAX,
};
enum ath5k_ctl_mode {
AR5K_CTL_11A = 0,
AR5K_CTL_11B = 1,
AR5K_CTL_11G = 2,
AR5K_CTL_TURBO = 3,
AR5K_CTL_TURBOG = 4,
AR5K_CTL_2GHT20 = 5,
AR5K_CTL_5GHT20 = 6,
AR5K_CTL_2GHT40 = 7,
AR5K_CTL_5GHT40 = 8,
AR5K_CTL_MODE_M = 15,
};
/* Per channel calibration data, used for power table setup */
struct ath5k_chan_pcal_info_rf5111 {
/* Power levels in half dBm units
* for one power curve. */
u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111];
/* PCDAC table steps
* for the above values */
u8 pcdac[AR5K_EEPROM_N_PWR_POINTS_5111];
/* Starting PCDAC step */
u8 pcdac_min;
/* Final PCDAC step */
u8 pcdac_max;
};
struct ath5k_chan_pcal_info_rf5112 {
/* Power levels in quarter dBm units
* for lower (0) and higher (3)
* level curves in 0.25dB units */
s8 pwr_x0[AR5K_EEPROM_N_XPD0_POINTS];
s8 pwr_x3[AR5K_EEPROM_N_XPD3_POINTS];
/* PCDAC table steps
* for the above values */
u8 pcdac_x0[AR5K_EEPROM_N_XPD0_POINTS];
u8 pcdac_x3[AR5K_EEPROM_N_XPD3_POINTS];
};
struct ath5k_chan_pcal_info_rf2413 {
/* Starting pwr/pddac values */
s8 pwr_i[AR5K_EEPROM_N_PD_GAINS];
u8 pddac_i[AR5K_EEPROM_N_PD_GAINS];
/* (pwr,pddac) points
* power levels in 0.5dB units */
s8 pwr[AR5K_EEPROM_N_PD_GAINS]
[AR5K_EEPROM_N_PD_POINTS];
u8 pddac[AR5K_EEPROM_N_PD_GAINS]
[AR5K_EEPROM_N_PD_POINTS];
};
enum ath5k_powertable_type {
AR5K_PWRTABLE_PWR_TO_PCDAC = 0,
AR5K_PWRTABLE_LINEAR_PCDAC = 1,
AR5K_PWRTABLE_PWR_TO_PDADC = 2,
};
struct ath5k_pdgain_info {
u8 pd_points;
u8 *pd_step;
/* Power values are in
* 0.25dB units */
s16 *pd_pwr;
};
struct ath5k_chan_pcal_info {
/* Frequency */
u16 freq;
/* Tx power boundaries */
s16 max_pwr;
s16 min_pwr;
union {
struct ath5k_chan_pcal_info_rf5111 rf5111_info;
struct ath5k_chan_pcal_info_rf5112 rf5112_info;
struct ath5k_chan_pcal_info_rf2413 rf2413_info;
};
/* Raw values used by phy code
* Curves are stored in order from lower
* gain to higher gain (max txpower -> min txpower) */
struct ath5k_pdgain_info *pd_curves;
};
/* Per rate calibration data for each mode,
* used for rate power table setup.
* Note: Values in 0.5dB units */
struct ath5k_rate_pcal_info {
u16 freq; /* Frequency */
/* Power level for 6-24Mbit/s rates or
* 1Mb rate */
u16 target_power_6to24;
/* Power level for 36Mbit rate or
* 2Mb rate */
u16 target_power_36;
/* Power level for 48Mbit rate or
* 5.5Mbit rate */
u16 target_power_48;
/* Power level for 54Mbit rate or
* 11Mbit rate */
u16 target_power_54;
};
/* Power edges for conformance test limits */
struct ath5k_edge_power {
u16 freq;
u16 edge; /* in half dBm */
bool flag;
};
/**
* struct ath5k_eeprom_info - EEPROM calibration data
*
* @ee_regdomain: ath/regd.c takes care of COUNTRY_ERD and WORLDWIDE_ROAMING
* flags
* @ee_ant_gain: Antenna gain in 0.5dB steps signed [5211 only?]
* @ee_cck_ofdm_gain_delta: difference in gainF to output the same power for
* OFDM and CCK packets
* @ee_cck_ofdm_power_delta: power difference between OFDM (6Mbps) and CCK
* (11Mbps) rate in G mode. 0.1dB steps
* @ee_scaled_cck_delta: for Japan Channel 14: 0.1dB resolution
*
* @ee_i_cal: Initial I coefficient to correct I/Q mismatch in the receive path
* @ee_q_cal: Initial Q coefficient to correct I/Q mismatch in the receive path
* @ee_fixed_bias: use ee_ob and ee_db settings or use automatic control
* @ee_switch_settling: RX/TX Switch settling time
* @ee_atn_tx_rx: Difference in attenuation between TX and RX in 1dB steps
* @ee_ant_control: Antenna Control Settings
* @ee_ob: Bias current for Output stage of PA
* B/G mode: Index [0] is used for AR2112/5112, otherwise [1]
* A mode: [0] 5.15-5.25 [1] 5.25-5.50 [2] 5.50-5.70 [3] 5.70-5.85 GHz
* @ee_db: Bias current for Output stage of PA. see @ee_ob
* @ee_tx_end2xlna_enable: Time difference from when BB finishes sending a frame
* to when the external LNA is activated
* @ee_tx_end2xpa_disable: Time difference from when BB finishes sending a frame
* to when the external PA switch is deactivated
* @ee_tx_frm2xpa_enable: Time difference from when MAC sends frame to when
* external PA switch is activated
* @ee_thr_62: Clear Channel Assessment (CCA) sensitivity
* (IEEE802.11a section 17.3.10.5 )
* @ee_xlna_gain: Total gain of the LNA (information only)
* @ee_xpd: Use external (1) or internal power detector
* @ee_x_gain: Gain for external power detector output (differences in EEMAP
* versions!)
* @ee_i_gain: Initial gain value after reset
* @ee_margin_tx_rx: Margin in dB when final attenuation stage should be used
*
* @ee_false_detect: Backoff in Sensitivity (dB) on channels with spur signals
* @ee_noise_floor_thr: Noise floor threshold in 1dB steps
* @ee_adc_desired_size: Desired amplitude for ADC, used by AGC; in 0.5 dB steps
* @ee_pga_desired_size: Desired output of PGA (for BB gain) in 0.5 dB steps
* @ee_pd_gain_overlap: PD ADC curves need to overlap in 0.5dB steps (ee_map>=2)
*/
struct ath5k_eeprom_info {
/* Header information */
u16 ee_magic;
u16 ee_protect;
u16 ee_regdomain;
u16 ee_version;
u16 ee_header;
u16 ee_ant_gain;
u8 ee_rfkill_pin;
bool ee_rfkill_pol;
bool ee_is_hb63;
bool ee_serdes;
u16 ee_misc0;
u16 ee_misc1;
u16 ee_misc2;
u16 ee_misc3;
u16 ee_misc4;
u16 ee_misc5;
u16 ee_misc6;
u16 ee_cck_ofdm_gain_delta;
u16 ee_cck_ofdm_power_delta;
u16 ee_scaled_cck_delta;
/* RF Calibration settings (reset, rfregs) */
u16 ee_i_cal[AR5K_EEPROM_N_MODES];
u16 ee_q_cal[AR5K_EEPROM_N_MODES];
u16 ee_fixed_bias[AR5K_EEPROM_N_MODES];
u16 ee_turbo_max_power[AR5K_EEPROM_N_MODES];
u16 ee_xr_power[AR5K_EEPROM_N_MODES];
u16 ee_switch_settling[AR5K_EEPROM_N_MODES];
u16 ee_atn_tx_rx[AR5K_EEPROM_N_MODES];
u16 ee_ant_control[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PCDAC];
u16 ee_ob[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
u16 ee_db[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
u16 ee_tx_end2xlna_enable[AR5K_EEPROM_N_MODES];
u16 ee_tx_end2xpa_disable[AR5K_EEPROM_N_MODES];
u16 ee_tx_frm2xpa_enable[AR5K_EEPROM_N_MODES];
u16 ee_thr_62[AR5K_EEPROM_N_MODES];
u16 ee_xlna_gain[AR5K_EEPROM_N_MODES];
u16 ee_xpd[AR5K_EEPROM_N_MODES];
u16 ee_x_gain[AR5K_EEPROM_N_MODES];
u16 ee_i_gain[AR5K_EEPROM_N_MODES];
u16 ee_margin_tx_rx[AR5K_EEPROM_N_MODES];
u16 ee_switch_settling_turbo[AR5K_EEPROM_N_MODES];
u16 ee_margin_tx_rx_turbo[AR5K_EEPROM_N_MODES];
u16 ee_atn_tx_rx_turbo[AR5K_EEPROM_N_MODES];
/* Power calibration data */
u16 ee_false_detect[AR5K_EEPROM_N_MODES];
/* Number of pd gain curves per mode */
u8 ee_pd_gains[AR5K_EEPROM_N_MODES];
/* Back mapping pdcurve number -> pdcurve index in pd->pd_curves */
u8 ee_pdc_to_idx[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PD_GAINS];
u8 ee_n_piers[AR5K_EEPROM_N_MODES];
struct ath5k_chan_pcal_info ee_pwr_cal_a[AR5K_EEPROM_N_5GHZ_CHAN];
struct ath5k_chan_pcal_info ee_pwr_cal_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
struct ath5k_chan_pcal_info ee_pwr_cal_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
/* Per rate target power levels */
u8 ee_rate_target_pwr_num[AR5K_EEPROM_N_MODES];
struct ath5k_rate_pcal_info ee_rate_tpwr_a[AR5K_EEPROM_N_5GHZ_CHAN];
struct ath5k_rate_pcal_info ee_rate_tpwr_b[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
struct ath5k_rate_pcal_info ee_rate_tpwr_g[AR5K_EEPROM_N_2GHZ_CHAN_MAX];
/* Conformance test limits (Unused) */
u8 ee_ctls;
u8 ee_ctl[AR5K_EEPROM_MAX_CTLS];
struct ath5k_edge_power ee_ctl_pwr[AR5K_EEPROM_N_EDGES * AR5K_EEPROM_MAX_CTLS];
/* Noise Floor Calibration settings */
s16 ee_noise_floor_thr[AR5K_EEPROM_N_MODES];
s8 ee_adc_desired_size[AR5K_EEPROM_N_MODES];
s8 ee_pga_desired_size[AR5K_EEPROM_N_MODES];
s8 ee_adc_desired_size_turbo[AR5K_EEPROM_N_MODES];
s8 ee_pga_desired_size_turbo[AR5K_EEPROM_N_MODES];
s8 ee_pd_gain_overlap;
/* Spur mitigation data (fbin values for spur channels) */
u16 ee_spur_chans[AR5K_EEPROM_N_SPUR_CHANS][AR5K_EEPROM_N_FREQ_BANDS];
/* Antenna raw switch tables */
u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
};

View file

@ -0,0 +1,213 @@
/*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/****************\
GPIO Functions
\****************/
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/**
* DOC: GPIO/LED functions
*
* Here we control the 6 bidirectional GPIO pins provided by the hw.
* We can set a GPIO pin to be an input or an output pin on GPIO control
* register and then read or set its status from GPIO data input/output
* registers.
*
* We also control the two LED pins provided by the hw, LED_0 is our
* "power" LED and LED_1 is our "network activity" LED but many scenarios
* are available from hw. Vendors might also provide LEDs connected to the
* GPIO pins, we handle them through the LED subsystem on led.c
*/
/**
* ath5k_hw_set_ledstate() - Set led state
* @ah: The &struct ath5k_hw
* @state: One of AR5K_LED_*
*
* Used to set the LED blinking state. This only
* works for the LED connected to the LED_0, LED_1 pins,
* not the GPIO based.
*/
void
ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
{
u32 led;
/*5210 has different led mode handling*/
u32 led_5210;
/*Reset led status*/
if (ah->ah_version != AR5K_AR5210)
AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
AR5K_PCICFG_LEDMODE | AR5K_PCICFG_LED);
else
AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_LED);
/*
* Some blinking values, define at your wish
*/
switch (state) {
case AR5K_LED_SCAN:
case AR5K_LED_AUTH:
led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_PEND;
led_5210 = AR5K_PCICFG_LED_PEND | AR5K_PCICFG_LED_BCTL;
break;
case AR5K_LED_INIT:
led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_NONE;
led_5210 = AR5K_PCICFG_LED_PEND;
break;
case AR5K_LED_ASSOC:
case AR5K_LED_RUN:
led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_ASSOC;
led_5210 = AR5K_PCICFG_LED_ASSOC;
break;
default:
led = AR5K_PCICFG_LEDMODE_PROM | AR5K_PCICFG_LED_NONE;
led_5210 = AR5K_PCICFG_LED_PEND;
break;
}
/*Write new status to the register*/
if (ah->ah_version != AR5K_AR5210)
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led);
else
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
}
/**
* ath5k_hw_set_gpio_input() - Set GPIO inputs
* @ah: The &struct ath5k_hw
* @gpio: GPIO pin to set as input
*/
int
ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
ath5k_hw_reg_write(ah,
(ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio))
| AR5K_GPIOCR_IN(gpio), AR5K_GPIOCR);
return 0;
}
/**
* ath5k_hw_set_gpio_output() - Set GPIO outputs
* @ah: The &struct ath5k_hw
* @gpio: The GPIO pin to set as output
*/
int
ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
ath5k_hw_reg_write(ah,
(ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio))
| AR5K_GPIOCR_OUT(gpio), AR5K_GPIOCR);
return 0;
}
/**
* ath5k_hw_get_gpio() - Get GPIO state
* @ah: The &struct ath5k_hw
* @gpio: The GPIO pin to read
*/
u32
ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return 0xffffffff;
/* GPIO input magic */
return ((ath5k_hw_reg_read(ah, AR5K_GPIODI) & AR5K_GPIODI_M) >> gpio) &
0x1;
}
/**
* ath5k_hw_set_gpio() - Set GPIO state
* @ah: The &struct ath5k_hw
* @gpio: The GPIO pin to set
* @val: Value to set (boolean)
*/
int
ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
{
u32 data;
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
/* GPIO output magic */
data = ath5k_hw_reg_read(ah, AR5K_GPIODO);
data &= ~(1 << gpio);
data |= (val & 1) << gpio;
ath5k_hw_reg_write(ah, data, AR5K_GPIODO);
return 0;
}
/**
* ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch)
* @ah: The &struct ath5k_hw
* @gpio: The GPIO pin to use
* @interrupt_level: True to generate interrupt on active pin (high)
*
* This function is used to set up the GPIO interrupt for the hw RFKill switch.
* That switch is connected to a GPIO pin and it's number is stored on EEPROM.
* It can either open or close the circuit to indicate that we should disable
* RF/Wireless to save power (we also get that from EEPROM).
*/
void
ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
u32 interrupt_level)
{
u32 data;
if (gpio >= AR5K_NUM_GPIO)
return;
/*
* Set the GPIO interrupt
*/
data = (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &
~(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_SELH |
AR5K_GPIOCR_INT_ENA | AR5K_GPIOCR_OUT(gpio))) |
(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_ENA);
ath5k_hw_reg_write(ah, interrupt_level ? data :
(data | AR5K_GPIOCR_INT_SELH), AR5K_GPIOCR);
ah->ah_imr |= AR5K_IMR_GPIO;
/* Enable GPIO interrupts */
AR5K_REG_ENABLE_BITS(ah, AR5K_PIMR, AR5K_IMR_GPIO);
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,197 @@
/*
* Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
* Copyright (c) 2004-2005 Atheros Communications, Inc.
* Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
* Copyright (c) 2009 Bob Copeland <me@bobcopeland.com>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/pci.h>
#include "ath5k.h"
#define ATH_SDEVICE(subv, subd) \
.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
.subvendor = (subv), .subdevice = (subd)
#define ATH_LED(pin, polarity) .driver_data = (((pin) << 8) | (polarity))
#define ATH_PIN(data) ((data) >> 8)
#define ATH_POLARITY(data) ((data) & 0xff)
/* Devices we match on for LED config info (typically laptops) */
static const struct pci_device_id ath5k_led_devices[] = {
/* AR5211 */
{ PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5211), ATH_LED(0, 0) },
/* HP Compaq nc6xx, nc4000, nx6000 */
{ ATH_SDEVICE(PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID), ATH_LED(1, 1) },
/* Acer Aspire One A150 (maximlevitsky@gmail.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe008), ATH_LED(3, 0) },
/* Acer Aspire One AO531h AO751h (keng-yu.lin@canonical.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe00d), ATH_LED(3, 0) },
/* Acer Ferrari 5000 (russ.dill@gmail.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) },
/* E-machines E510 (tuliom@gmail.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) },
/* BenQ Joybook R55v (nowymarluk@wp.pl) */
{ ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0100), ATH_LED(1, 0) },
/* Acer Extensa 5620z (nekoreeve@gmail.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) },
/* Fukato Datacask Jupiter 1014a (mrb74@gmx.at) */
{ ATH_SDEVICE(PCI_VENDOR_ID_AZWAVE, 0x1026), ATH_LED(3, 0) },
/* IBM ThinkPad AR5BXB6 (legovini@spiro.fisica.unipd.it) */
{ ATH_SDEVICE(PCI_VENDOR_ID_IBM, 0x058a), ATH_LED(1, 0) },
/* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
/* HP Compaq C700 (nitrousnrg@gmail.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
/* LiteOn AR5BXB63 (magooz@salug.it) */
{ ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
/* IBM-specific AR5212 (all others) */
{ PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) },
/* Dell Vostro A860 (shahar@shahar-or.co.il) */
{ ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0112), ATH_LED(3, 0) },
{ }
};
void ath5k_led_enable(struct ath5k_hw *ah)
{
if (test_bit(ATH_STAT_LEDSOFT, ah->status)) {
ath5k_hw_set_gpio_output(ah, ah->led_pin);
ath5k_led_off(ah);
}
}
static void ath5k_led_on(struct ath5k_hw *ah)
{
if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
return;
ath5k_hw_set_gpio(ah, ah->led_pin, ah->led_on);
}
void ath5k_led_off(struct ath5k_hw *ah)
{
if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
return;
ath5k_hw_set_gpio(ah, ah->led_pin, !ah->led_on);
}
static void
ath5k_led_brightness_set(struct led_classdev *led_dev,
enum led_brightness brightness)
{
struct ath5k_led *led = container_of(led_dev, struct ath5k_led,
led_dev);
if (brightness == LED_OFF)
ath5k_led_off(led->ah);
else
ath5k_led_on(led->ah);
}
static int
ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
const char *name, char *trigger)
{
int err;
led->ah = ah;
strncpy(led->name, name, sizeof(led->name));
led->name[sizeof(led->name)-1] = 0;
led->led_dev.name = led->name;
led->led_dev.default_trigger = trigger;
led->led_dev.brightness_set = ath5k_led_brightness_set;
err = led_classdev_register(ah->dev, &led->led_dev);
if (err) {
ATH5K_WARN(ah, "could not register LED %s\n", name);
led->ah = NULL;
}
return err;
}
static void
ath5k_unregister_led(struct ath5k_led *led)
{
if (!led->ah)
return;
led_classdev_unregister(&led->led_dev);
ath5k_led_off(led->ah);
led->ah = NULL;
}
void ath5k_unregister_leds(struct ath5k_hw *ah)
{
ath5k_unregister_led(&ah->rx_led);
ath5k_unregister_led(&ah->tx_led);
}
int ath5k_init_leds(struct ath5k_hw *ah)
{
int ret = 0;
struct ieee80211_hw *hw = ah->hw;
struct pci_dev *pdev = ah->pdev;
char name[ATH5K_LED_MAX_NAME_LEN + 1];
const struct pci_device_id *match;
if (!ah->pdev)
return 0;
match = pci_match_id(&ath5k_led_devices[0], pdev);
if (match) {
__set_bit(ATH_STAT_LEDSOFT, ah->status);
ah->led_pin = ATH_PIN(match->driver_data);
ah->led_on = ATH_POLARITY(match->driver_data);
}
if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
goto out;
ath5k_led_enable(ah);
snprintf(name, sizeof(name), "ath5k-%s::rx", wiphy_name(hw->wiphy));
ret = ath5k_register_led(ah, &ah->rx_led, name,
ieee80211_get_rx_led_name(hw));
if (ret)
goto out;
snprintf(name, sizeof(name), "ath5k-%s::tx", wiphy_name(hw->wiphy));
ret = ath5k_register_led(ah, &ah->tx_led, name,
ieee80211_get_tx_led_name(hw));
out:
return ret;
}

View file

@ -0,0 +1,832 @@
/*-
* Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
* Copyright (c) 2004-2005 Atheros Communications, Inc.
* Copyright (c) 2006 Devicescape Software, Inc.
* Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
* Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
* Copyright (c) 2010 Bruno Randolf <br1@einfach.org>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "ath5k.h"
#include "base.h"
#include "reg.h"
/********************\
* Mac80211 functions *
\********************/
static void
ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ath5k_hw *ah = hw->priv;
u16 qnum = skb_get_queue_mapping(skb);
if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) {
ieee80211_free_txskb(hw, skb);
return;
}
ath5k_tx_queue(hw, skb, &ah->txqs[qnum], control);
}
static int
ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
int ret;
struct ath5k_vif *avf = (void *)vif->drv_priv;
mutex_lock(&ah->lock);
if ((vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
&& (ah->num_ap_vifs + ah->num_adhoc_vifs) >= ATH_BCBUF) {
ret = -ELNRNG;
goto end;
}
/* Don't allow other interfaces if one ad-hoc is configured.
* TODO: Fix the problems with ad-hoc and multiple other interfaces.
* We would need to operate the HW in ad-hoc mode to allow TSF updates
* for the IBSS, but this breaks with additional AP or STA interfaces
* at the moment. */
if (ah->num_adhoc_vifs ||
(ah->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
ATH5K_ERR(ah, "Only one single ad-hoc interface is allowed.\n");
ret = -ELNRNG;
goto end;
}
switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
avf->opmode = vif->type;
break;
default:
ret = -EOPNOTSUPP;
goto end;
}
ah->nvifs++;
ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
/* Assign the vap/adhoc to a beacon xmit slot. */
if ((avf->opmode == NL80211_IFTYPE_AP) ||
(avf->opmode == NL80211_IFTYPE_ADHOC) ||
(avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
int slot;
WARN_ON(list_empty(&ah->bcbuf));
avf->bbuf = list_first_entry(&ah->bcbuf, struct ath5k_buf,
list);
list_del(&avf->bbuf->list);
avf->bslot = 0;
for (slot = 0; slot < ATH_BCBUF; slot++) {
if (!ah->bslot[slot]) {
avf->bslot = slot;
break;
}
}
BUG_ON(ah->bslot[avf->bslot] != NULL);
ah->bslot[avf->bslot] = vif;
if (avf->opmode == NL80211_IFTYPE_AP)
ah->num_ap_vifs++;
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
ah->num_adhoc_vifs++;
else if (avf->opmode == NL80211_IFTYPE_MESH_POINT)
ah->num_mesh_vifs++;
}
/* Any MAC address is fine, all others are included through the
* filter.
*/
ath5k_hw_set_lladdr(ah, vif->addr);
ath5k_update_bssid_mask_and_opmode(ah, vif);
ret = 0;
end:
mutex_unlock(&ah->lock);
return ret;
}
static void
ath5k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
struct ath5k_vif *avf = (void *)vif->drv_priv;
unsigned int i;
mutex_lock(&ah->lock);
ah->nvifs--;
if (avf->bbuf) {
ath5k_txbuf_free_skb(ah, avf->bbuf);
list_add_tail(&avf->bbuf->list, &ah->bcbuf);
for (i = 0; i < ATH_BCBUF; i++) {
if (ah->bslot[i] == vif) {
ah->bslot[i] = NULL;
break;
}
}
avf->bbuf = NULL;
}
if (avf->opmode == NL80211_IFTYPE_AP)
ah->num_ap_vifs--;
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
ah->num_adhoc_vifs--;
else if (avf->opmode == NL80211_IFTYPE_MESH_POINT)
ah->num_mesh_vifs--;
ath5k_update_bssid_mask_and_opmode(ah, NULL);
mutex_unlock(&ah->lock);
}
/*
* TODO: Phy disable/diversity etc
*/
static int
ath5k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath5k_hw *ah = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
int i;
mutex_lock(&ah->lock);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ret = ath5k_chan_set(ah, &conf->chandef);
if (ret < 0)
goto unlock;
}
if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
(ah->ah_txpower.txp_requested != conf->power_level)) {
ah->ah_txpower.txp_requested = conf->power_level;
/* Half dB steps */
ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
}
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
ah->ah_retry_long = conf->long_frame_max_tx_count;
ah->ah_retry_short = conf->short_frame_max_tx_count;
for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++)
ath5k_hw_set_tx_retry_limits(ah, i);
}
/* TODO:
* 1) Move this on config_interface and handle each case
* separately eg. when we have only one STA vif, use
* AR5K_ANTMODE_SINGLE_AP
*
* 2) Allow the user to change antenna mode eg. when only
* one antenna is present
*
* 3) Allow the user to set default/tx antenna when possible
*
* 4) Default mode should handle 90% of the cases, together
* with fixed a/b and single AP modes we should be able to
* handle 99%. Sectored modes are extreme cases and i still
* haven't found a usage for them. If we decide to support them,
* then we must allow the user to set how many tx antennas we
* have available
*/
ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
unlock:
mutex_unlock(&ah->lock);
return ret;
}
static void
ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u32 changes)
{
struct ath5k_vif *avf = (void *)vif->drv_priv;
struct ath5k_hw *ah = hw->priv;
struct ath_common *common = ath5k_hw_common(ah);
mutex_lock(&ah->lock);
if (changes & BSS_CHANGED_BSSID) {
/* Cache for later use during resets */
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = 0;
ath5k_hw_set_bssid(ah);
mmiowb();
}
if (changes & BSS_CHANGED_BEACON_INT)
ah->bintval = bss_conf->beacon_int;
if (changes & BSS_CHANGED_ERP_SLOT) {
int slot_time;
ah->ah_short_slot = bss_conf->use_short_slot;
slot_time = ath5k_hw_get_default_slottime(ah) +
3 * ah->ah_coverage_class;
ath5k_hw_set_ifs_intervals(ah, slot_time);
}
if (changes & BSS_CHANGED_ASSOC) {
avf->assoc = bss_conf->assoc;
if (bss_conf->assoc)
ah->assoc = bss_conf->assoc;
else
ah->assoc = ath5k_any_vif_assoc(ah);
if (ah->opmode == NL80211_IFTYPE_STATION)
ath5k_set_beacon_filter(hw, ah->assoc);
ath5k_hw_set_ledstate(ah, ah->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
if (bss_conf->assoc) {
ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
"Bss Info ASSOC %d, bssid: %pM\n",
bss_conf->aid, common->curbssid);
common->curaid = bss_conf->aid;
ath5k_hw_set_bssid(ah);
/* Once ANI is available you would start it here */
}
}
if (changes & BSS_CHANGED_BEACON) {
spin_lock_bh(&ah->block);
ath5k_beacon_update(hw, vif);
spin_unlock_bh(&ah->block);
}
if (changes & BSS_CHANGED_BEACON_ENABLED)
ah->enable_beacon = bss_conf->enable_beacon;
if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_BEACON_INT))
ath5k_beacon_config(ah);
mutex_unlock(&ah->lock);
}
static u64
ath5k_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
u32 mfilt[2], val;
u8 pos;
struct netdev_hw_addr *ha;
mfilt[0] = 0;
mfilt[1] = 0;
netdev_hw_addr_list_for_each(ha, mc_list) {
/* calculate XOR of eight 6-bit values */
val = get_unaligned_le32(ha->addr + 0);
pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
val = get_unaligned_le32(ha->addr + 3);
pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
pos &= 0x3f;
mfilt[pos / 32] |= (1 << (pos % 32));
/* XXX: we might be able to just do this instead,
* but not sure, needs testing, if we do use this we'd
* need to inform below not to reset the mcast */
/* ath5k_hw_set_mcast_filterindex(ah,
* ha->addr[5]); */
}
return ((u64)(mfilt[1]) << 32) | mfilt[0];
}
/*
* o always accept unicast, broadcast, and multicast traffic
* o multicast traffic for all BSSIDs will be enabled if mac80211
* says it should be
* o maintain current state of phy ofdm or phy cck error reception.
* If the hardware detects any of these type of errors then
* ath5k_hw_get_rx_filter() will pass to us the respective
* hardware filters to be able to receive these type of frames.
* o probe request frames are accepted only when operating in
* hostap, adhoc, or monitor modes
* o enable promiscuous mode according to the interface state
* o accept beacons:
* - when operating in adhoc mode so the 802.11 layer creates
* node table entries for peers,
* - when operating in station mode for collecting rssi data when
* the station is otherwise quiet, or
* - when scanning
*/
static void
ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *new_flags, u64 multicast)
{
#define SUPPORTED_FIF_FLAGS \
(FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \
FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
FIF_BCN_PRBRESP_PROMISC)
struct ath5k_hw *ah = hw->priv;
u32 mfilt[2], rfilt;
struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */
mutex_lock(&ah->lock);
mfilt[0] = multicast;
mfilt[1] = multicast >> 32;
/* Only deal with supported flags */
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
/* If HW detects any phy or radar errors, leave those filters on.
* Also, always enable Unicast, Broadcasts and Multicast
* XXX: move unicast, bssid broadcasts and multicast to mac80211 */
rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) |
(AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
AR5K_RX_FILTER_MCAST);
if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
if (*new_flags & FIF_PROMISC_IN_BSS)
__set_bit(ATH_STAT_PROMISC, ah->status);
else
__clear_bit(ATH_STAT_PROMISC, ah->status);
}
if (test_bit(ATH_STAT_PROMISC, ah->status))
rfilt |= AR5K_RX_FILTER_PROM;
/* Note, AR5K_RX_FILTER_MCAST is already enabled */
if (*new_flags & FIF_ALLMULTI) {
mfilt[0] = ~0;
mfilt[1] = ~0;
}
/* This is the best we can do */
if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL))
rfilt |= AR5K_RX_FILTER_PHYERR;
/* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
* and probes for any BSSID */
if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (ah->nvifs > 1))
rfilt |= AR5K_RX_FILTER_BEACON;
/* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
* set we should only pass on control frames for this
* station. This needs testing. I believe right now this
* enables *all* control frames, which is OK.. but
* but we should see if we can improve on granularity */
if (*new_flags & FIF_CONTROL)
rfilt |= AR5K_RX_FILTER_CONTROL;
/* Additional settings per mode -- this is per ath5k */
/* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
switch (ah->opmode) {
case NL80211_IFTYPE_MESH_POINT:
rfilt |= AR5K_RX_FILTER_CONTROL |
AR5K_RX_FILTER_BEACON |
AR5K_RX_FILTER_PROBEREQ |
AR5K_RX_FILTER_PROM;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
rfilt |= AR5K_RX_FILTER_PROBEREQ |
AR5K_RX_FILTER_BEACON;
break;
case NL80211_IFTYPE_STATION:
if (ah->assoc)
rfilt |= AR5K_RX_FILTER_BEACON;
default:
break;
}
iter_data.hw_macaddr = NULL;
iter_data.n_stas = 0;
iter_data.need_set_hw_addr = false;
ieee80211_iterate_active_interfaces_atomic(
ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
ath5k_vif_iter, &iter_data);
/* Set up RX Filter */
if (iter_data.n_stas > 1) {
/* If you have multiple STA interfaces connected to
* different APs, ARPs are not received (most of the time?)
* Enabling PROMISC appears to fix that problem.
*/
rfilt |= AR5K_RX_FILTER_PROM;
}
/* Set filters */
ath5k_hw_set_rx_filter(ah, rfilt);
/* Set multicast bits */
ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
/* Set the cached hw filter flags, this will later actually
* be set in HW */
ah->filter_flags = rfilt;
/* Store current FIF filter flags */
ah->fif_filter_flags = *new_flags;
mutex_unlock(&ah->lock);
}
static int
ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct ath5k_hw *ah = hw->priv;
struct ath_common *common = ath5k_hw_common(ah);
int ret = 0;
if (ath5k_modparam_nohwcrypt)
return -EOPNOTSUPP;
if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT)
return -EOPNOTSUPP;
if (vif->type == NL80211_IFTYPE_ADHOC &&
(key->cipher == WLAN_CIPHER_SUITE_TKIP ||
key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
/* don't program group keys when using IBSS_RSN */
return -EOPNOTSUPP;
}
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
break;
case WLAN_CIPHER_SUITE_CCMP:
if (common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)
break;
return -EOPNOTSUPP;
default:
WARN_ON(1);
return -EINVAL;
}
mutex_lock(&ah->lock);
switch (cmd) {
case SET_KEY:
ret = ath_key_config(common, vif, sta, key);
if (ret >= 0) {
key->hw_key_idx = ret;
/* push IV and Michael MIC generation to stack */
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ret = 0;
}
break;
case DISABLE_KEY:
ath_key_delete(common, key);
break;
default:
ret = -EINVAL;
}
mmiowb();
mutex_unlock(&ah->lock);
return ret;
}
static void
ath5k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath5k_hw *ah = hw->priv;
if (!ah->assoc)
ath5k_hw_set_ledstate(ah, AR5K_LED_SCAN);
}
static void
ath5k_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath5k_hw *ah = hw->priv;
ath5k_hw_set_ledstate(ah, ah->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
}
static int
ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct ath5k_hw *ah = hw->priv;
/* Force update */
ath5k_hw_update_mib_counters(ah);
stats->dot11ACKFailureCount = ah->stats.ack_fail;
stats->dot11RTSFailureCount = ah->stats.rts_fail;
stats->dot11RTSSuccessCount = ah->stats.rts_ok;
stats->dot11FCSErrorCount = ah->stats.fcs_error;
return 0;
}
static int
ath5k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct ath5k_hw *ah = hw->priv;
struct ath5k_txq_info qi;
int ret = 0;
if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
return 0;
mutex_lock(&ah->lock);
ath5k_hw_get_tx_queueprops(ah, queue, &qi);
qi.tqi_aifs = params->aifs;
qi.tqi_cw_min = params->cw_min;
qi.tqi_cw_max = params->cw_max;
qi.tqi_burst_time = params->txop * 32;
ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
"Configure tx [queue %d], "
"aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
queue, params->aifs, params->cw_min,
params->cw_max, params->txop);
if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
ATH5K_ERR(ah,
"Unable to update hardware queue %u!\n", queue);
ret = -EIO;
} else
ath5k_hw_reset_tx_queue(ah, queue);
mutex_unlock(&ah->lock);
return ret;
}
static u64
ath5k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
return ath5k_hw_get_tsf64(ah);
}
static void
ath5k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 tsf)
{
struct ath5k_hw *ah = hw->priv;
ath5k_hw_set_tsf64(ah, tsf);
}
static void
ath5k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
/*
* in IBSS mode we need to update the beacon timers too.
* this will also reset the TSF if we call it with 0
*/
if (ah->opmode == NL80211_IFTYPE_ADHOC)
ath5k_beacon_update_timers(ah, 0);
else
ath5k_hw_reset_tsf(ah);
}
static int
ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
{
struct ath5k_hw *ah = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
struct ath_common *common = ath5k_hw_common(ah);
struct ath_cycle_counters *cc = &common->cc_survey;
unsigned int div = common->clockrate * 1000;
if (idx != 0)
return -ENOENT;
spin_lock_bh(&common->cc_lock);
ath_hw_cycle_counters_update(common);
if (cc->cycles > 0) {
ah->survey.channel_time += cc->cycles / div;
ah->survey.channel_time_busy += cc->rx_busy / div;
ah->survey.channel_time_rx += cc->rx_frame / div;
ah->survey.channel_time_tx += cc->tx_frame / div;
}
memset(cc, 0, sizeof(*cc));
spin_unlock_bh(&common->cc_lock);
memcpy(survey, &ah->survey, sizeof(*survey));
survey->channel = conf->chandef.chan;
survey->noise = ah->ah_noise_floor;
survey->filled = SURVEY_INFO_NOISE_DBM |
SURVEY_INFO_IN_USE |
SURVEY_INFO_CHANNEL_TIME |
SURVEY_INFO_CHANNEL_TIME_BUSY |
SURVEY_INFO_CHANNEL_TIME_RX |
SURVEY_INFO_CHANNEL_TIME_TX;
return 0;
}
/**
* ath5k_set_coverage_class - Set IEEE 802.11 coverage class
*
* @hw: struct ieee80211_hw pointer
* @coverage_class: IEEE 802.11 coverage class number
*
* Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
* coverage class. The values are persistent, they are restored after device
* reset.
*/
static void
ath5k_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct ath5k_hw *ah = hw->priv;
mutex_lock(&ah->lock);
ath5k_hw_set_coverage_class(ah, coverage_class);
mutex_unlock(&ah->lock);
}
static int
ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
struct ath5k_hw *ah = hw->priv;
if (tx_ant == 1 && rx_ant == 1)
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
else if (tx_ant == 2 && rx_ant == 2)
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
else if ((tx_ant & 3) == 3 && (rx_ant & 3) == 3)
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
else
return -EINVAL;
return 0;
}
static int
ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
struct ath5k_hw *ah = hw->priv;
switch (ah->ah_ant_mode) {
case AR5K_ANTMODE_FIXED_A:
*tx_ant = 1; *rx_ant = 1; break;
case AR5K_ANTMODE_FIXED_B:
*tx_ant = 2; *rx_ant = 2; break;
case AR5K_ANTMODE_DEFAULT:
*tx_ant = 3; *rx_ant = 3; break;
}
return 0;
}
static void ath5k_get_ringparam(struct ieee80211_hw *hw,
u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
{
struct ath5k_hw *ah = hw->priv;
*tx = ah->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
*tx_max = ATH5K_TXQ_LEN_MAX;
*rx = *rx_max = ATH_RXBUF;
}
static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
{
struct ath5k_hw *ah = hw->priv;
u16 qnum;
/* only support setting tx ring size for now */
if (rx != ATH_RXBUF)
return -EINVAL;
/* restrict tx ring size min/max */
if (!tx || tx > ATH5K_TXQ_LEN_MAX)
return -EINVAL;
for (qnum = 0; qnum < ARRAY_SIZE(ah->txqs); qnum++) {
if (!ah->txqs[qnum].setup)
continue;
if (ah->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
ah->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
continue;
ah->txqs[qnum].txq_max = tx;
if (ah->txqs[qnum].txq_len >= ah->txqs[qnum].txq_max)
ieee80211_stop_queue(hw, ah->txqs[qnum].qnum);
}
return 0;
}
const struct ieee80211_ops ath5k_hw_ops = {
.tx = ath5k_tx,
.start = ath5k_start,
.stop = ath5k_stop,
.add_interface = ath5k_add_interface,
/* .change_interface = not implemented */
.remove_interface = ath5k_remove_interface,
.config = ath5k_config,
.bss_info_changed = ath5k_bss_info_changed,
.prepare_multicast = ath5k_prepare_multicast,
.configure_filter = ath5k_configure_filter,
/* .set_tim = not implemented */
.set_key = ath5k_set_key,
/* .update_tkip_key = not implemented */
/* .hw_scan = not implemented */
.sw_scan_start = ath5k_sw_scan_start,
.sw_scan_complete = ath5k_sw_scan_complete,
.get_stats = ath5k_get_stats,
/* .get_tkip_seq = not implemented */
/* .set_frag_threshold = not implemented */
/* .set_rts_threshold = not implemented */
/* .sta_add = not implemented */
/* .sta_remove = not implemented */
/* .sta_notify = not implemented */
.conf_tx = ath5k_conf_tx,
.get_tsf = ath5k_get_tsf,
.set_tsf = ath5k_set_tsf,
.reset_tsf = ath5k_reset_tsf,
/* .tx_last_beacon = not implemented */
/* .ampdu_action = not needed */
.get_survey = ath5k_get_survey,
.set_coverage_class = ath5k_set_coverage_class,
/* .rfkill_poll = not implemented */
/* .flush = not implemented */
/* .channel_switch = not implemented */
/* .napi_poll = not implemented */
.set_antenna = ath5k_set_antenna,
.get_antenna = ath5k_get_antenna,
.set_ringparam = ath5k_set_ringparam,
.get_ringparam = ath5k_get_ringparam,
};

View file

@ -0,0 +1,343 @@
/*
* Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/nl80211.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
#include "../ath.h"
#include "ath5k.h"
#include "debug.h"
#include "base.h"
#include "reg.h"
/* Known PCI ids */
static const struct pci_device_id ath5k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
{ PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
{ PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
{ PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
{ PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
{ PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */
{ PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */
{ PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
{ PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
{ PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
{ PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
{ PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
{ PCI_VDEVICE(ATHEROS, 0xff1b) }, /* AR5BXB63 */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
/* return bus cachesize in 4B word units */
static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
{
struct ath5k_hw *ah = (struct ath5k_hw *) common->priv;
u8 u8tmp;
pci_read_config_byte(ah->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
*csz = (int)u8tmp;
/*
* This check was put in to avoid "unpleasant" consequences if
* the bootrom has not fully initialized all PCI devices.
* Sometimes the cache line size register is not set
*/
if (*csz == 0)
*csz = L1_CACHE_BYTES >> 2; /* Use the default size */
}
/*
* Read from eeprom
*/
static bool
ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
{
struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
u32 status, timeout;
/*
* Initialize EEPROM access
*/
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
(void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
} else {
ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
AR5K_EEPROM_CMD_READ);
}
for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
if (status & AR5K_EEPROM_STAT_RDDONE) {
if (status & AR5K_EEPROM_STAT_RDERR)
return false;
*data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
0xffff);
return true;
}
usleep_range(15, 20);
}
return false;
}
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
ah->ah_mac_srev = ath5k_hw_reg_read(ah, AR5K_SREV);
return 0;
}
/*
* Read the MAC address from eeprom or platform_data
*/
static int ath5k_pci_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
u8 mac_d[ETH_ALEN] = {};
u32 total, offset;
u16 data;
int octet;
AR5K_EEPROM_READ(0x20, data);
for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
AR5K_EEPROM_READ(offset, data);
total += data;
mac_d[octet + 1] = data & 0xff;
mac_d[octet] = data >> 8;
octet += 2;
}
if (!total || total == 3 * 0xffff)
return -EINVAL;
memcpy(mac, mac_d, ETH_ALEN);
return 0;
}
/* Common ath_bus_opts structure */
static const struct ath_bus_ops ath_pci_bus_ops = {
.ath_bus_type = ATH_PCI,
.read_cachesize = ath5k_pci_read_cachesize,
.eeprom_read = ath5k_pci_eeprom_read,
.eeprom_read_mac = ath5k_pci_eeprom_read_mac,
};
/********************\
* PCI Initialization *
\********************/
static int
ath5k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
void __iomem *mem;
struct ath5k_hw *ah;
struct ieee80211_hw *hw;
int ret;
u8 csz;
/*
* L0s needs to be disabled on all ath5k cards.
*
* For distributions shipping with CONFIG_PCIEASPM (this will be enabled
* by default in the future in 2.6.36) this will also mean both L1 and
* L0s will be disabled when a pre 1.1 PCIe device is detected. We do
* know L1 works correctly even for all ath5k pre 1.1 PCIe devices
* though but cannot currently undue the effect of a blacklist, for
* details you can read pcie_aspm_sanity_check() and see how it adjusts
* the device link capability.
*
* It may be possible in the future to implement some PCI API to allow
* drivers to override blacklists for pre 1.1 PCIe but for now it is
* best to accept that both L0s and L1 will be disabled completely for
* distributions shipping with CONFIG_PCIEASPM rather than having this
* issue present. Motivation for adding this new API will be to help
* with power consumption for some of these devices.
*/
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "can't enable device\n");
goto err;
}
/* XXX 32-bit addressing only */
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "32-bit DMA not available\n");
goto err_dis;
}
/*
* Cache line size is used to size and align various
* structures used to communicate with the hardware.
*/
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
if (csz == 0) {
/*
* Linux 2.4.18 (at least) writes the cache line size
* register as a 16-bit wide register which is wrong.
* We must have this setup properly for rx buffer
* DMA to work so force a reasonable value here if it
* comes up zero.
*/
csz = L1_CACHE_BYTES >> 2;
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
}
/*
* The default setting of latency timer yields poor results,
* set it to the value used by other systems. It may be worth
* tweaking this setting more.
*/
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
/* Enable bus mastering */
pci_set_master(pdev);
/*
* Disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state.
*/
pci_write_config_byte(pdev, 0x41, 0);
ret = pci_request_region(pdev, 0, "ath5k");
if (ret) {
dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
goto err_dis;
}
mem = pci_iomap(pdev, 0, 0);
if (!mem) {
dev_err(&pdev->dev, "cannot remap PCI memory region\n");
ret = -EIO;
goto err_reg;
}
/*
* Allocate hw (mac80211 main struct)
* and hw->priv (driver private data)
*/
hw = ieee80211_alloc_hw(sizeof(*ah), &ath5k_hw_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
ret = -ENOMEM;
goto err_map;
}
dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
ah = hw->priv;
ah->hw = hw;
ah->pdev = pdev;
ah->dev = &pdev->dev;
ah->irq = pdev->irq;
ah->devid = id->device;
ah->iobase = mem; /* So we can unmap it on detach */
/* Initialize */
ret = ath5k_init_ah(ah, &ath_pci_bus_ops);
if (ret)
goto err_free;
/* Set private data */
pci_set_drvdata(pdev, hw);
return 0;
err_free:
ieee80211_free_hw(hw);
err_map:
pci_iounmap(pdev, mem);
err_reg:
pci_release_region(pdev, 0);
err_dis:
pci_disable_device(pdev);
err:
return ret;
}
static void
ath5k_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_hw *ah = hw->priv;
ath5k_deinit_ah(ah);
pci_iounmap(pdev, ah->iobase);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
ieee80211_free_hw(hw);
}
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_hw *ah = hw->priv;
ath5k_led_off(ah);
return 0;
}
static int ath5k_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_hw *ah = hw->priv;
/*
* Suspend/Resume resets the PCI configuration space, so we have to
* re-disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state
*/
pci_write_config_byte(pdev, 0x41, 0);
ath5k_led_enable(ah);
return 0;
}
static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
#define ATH5K_PM_OPS (&ath5k_pm_ops)
#else
#define ATH5K_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static struct pci_driver ath5k_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = ath5k_pci_id_table,
.probe = ath5k_pci_probe,
.remove = ath5k_pci_remove,
.driver.pm = ATH5K_PM_OPS,
};
module_pci_driver(ath5k_pci_driver);

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,731 @@
/*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/********************************************\
Queue Control Unit, DCF Control Unit Functions
\********************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
#include <linux/log2.h>
/**
* DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
*
* Here we setup parameters for the 12 available TX queues. Note that
* on the various registers we can usually only map the first 10 of them so
* basically we have 10 queues to play with. Each queue has a matching
* QCU that controls when the queue will get triggered and multiple QCUs
* can be mapped to a single DCU that controls the various DFS parameters
* for the various queues. In our setup we have a 1:1 mapping between QCUs
* and DCUs allowing us to have different DFS settings for each queue.
*
* When a frame goes into a TX queue, QCU decides when it'll trigger a
* transmission based on various criteria (such as how many data we have inside
* it's buffer or -if it's a beacon queue- if it's time to fire up the queue
* based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
* (arbitrator) decides the priority of each QCU based on it's configuration
* (e.g. beacons are always transmitted when they leave DCU bypassing all other
* frames from other queues waiting to be transmitted). After a frame leaves
* the DCU it goes to PCU for further processing and then to PHY for
* the actual transmission.
*/
/******************\
* Helper functions *
\******************/
/**
* ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
*/
u32
ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
{
u32 pending;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/* Return if queue is declared inactive */
if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
return false;
/* XXX: How about AR5K_CFG_TXCNT ? */
if (ah->ah_version == AR5K_AR5210)
return false;
pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
pending &= AR5K_QCU_STS_FRMPENDCNT;
/* It's possible to have no frames pending even if TXE
* is set. To indicate that q has not stopped return
* true */
if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
return true;
return pending;
}
/**
* ath5k_hw_release_tx_queue() - Set a transmit queue inactive
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
*/
void
ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
return;
/* This queue will be skipped in further operations */
ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
/*For SIMR setup*/
AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
}
/**
* ath5k_cw_validate() - Make sure the given cw is valid
* @cw_req: The contention window value to check
*
* Make sure cw is a power of 2 minus 1 and smaller than 1024
*/
static u16
ath5k_cw_validate(u16 cw_req)
{
cw_req = min(cw_req, (u16)1023);
/* Check if cw_req + 1 a power of 2 */
if (is_power_of_2(cw_req + 1))
return cw_req;
/* Check if cw_req is a power of 2 */
if (is_power_of_2(cw_req))
return cw_req - 1;
/* If none of the above is correct
* find the closest power of 2 */
cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
return cw_req;
}
/**
* ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
* @queue_info: The &struct ath5k_txq_info to fill
*/
int
ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
struct ath5k_txq_info *queue_info)
{
memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
return 0;
}
/**
* ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
* @qinfo: The &struct ath5k_txq_info to use
*
* Returns 0 on success or -EIO if queue is inactive
*/
int
ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
const struct ath5k_txq_info *qinfo)
{
struct ath5k_txq_info *qi;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
qi = &ah->ah_txq[queue];
if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
return -EIO;
/* copy and validate values */
qi->tqi_type = qinfo->tqi_type;
qi->tqi_subtype = qinfo->tqi_subtype;
qi->tqi_flags = qinfo->tqi_flags;
/*
* According to the docs: Although the AIFS field is 8 bit wide,
* the maximum supported value is 0xFC. Setting it higher than that
* will cause the DCU to hang.
*/
qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
qi->tqi_cbr_period = qinfo->tqi_cbr_period;
qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
qi->tqi_burst_time = qinfo->tqi_burst_time;
qi->tqi_ready_time = qinfo->tqi_ready_time;
/*XXX: Is this supported on 5210 ?*/
/*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
(qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
return 0;
}
/**
* ath5k_hw_setup_tx_queue() - Initialize a transmit queue
* @ah: The &struct ath5k_hw
* @queue_type: One of enum ath5k_tx_queue
* @queue_info: The &struct ath5k_txq_info to use
*
* Returns 0 on success, -EINVAL on invalid arguments
*/
int
ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
struct ath5k_txq_info *queue_info)
{
unsigned int queue;
int ret;
/*
* Get queue by type
*/
/* 5210 only has 2 queues */
if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
switch (queue_type) {
case AR5K_TX_QUEUE_DATA:
queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
break;
case AR5K_TX_QUEUE_BEACON:
case AR5K_TX_QUEUE_CAB:
queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
break;
default:
return -EINVAL;
}
} else {
switch (queue_type) {
case AR5K_TX_QUEUE_DATA:
queue = queue_info->tqi_subtype;
break;
case AR5K_TX_QUEUE_UAPSD:
queue = AR5K_TX_QUEUE_ID_UAPSD;
break;
case AR5K_TX_QUEUE_BEACON:
queue = AR5K_TX_QUEUE_ID_BEACON;
break;
case AR5K_TX_QUEUE_CAB:
queue = AR5K_TX_QUEUE_ID_CAB;
break;
default:
return -EINVAL;
}
}
/*
* Setup internal queue structure
*/
memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
ah->ah_txq[queue].tqi_type = queue_type;
if (queue_info != NULL) {
queue_info->tqi_type = queue_type;
ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
if (ret)
return ret;
}
/*
* We use ah_txq_status to hold a temp value for
* the Secondary interrupt mask registers on 5211+
* check out ath5k_hw_reset_tx_queue
*/
AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
return queue;
}
/*******************************\
* Single QCU/DCU initialization *
\*******************************/
/**
* ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
*
* This function is used when initializing a queue, to set
* retry limits based on ah->ah_retry_* and the chipset used.
*/
void
ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
unsigned int queue)
{
/* Single data queue on AR5210 */
if (ah->ah_version == AR5K_AR5210) {
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
if (queue > 0)
return;
ath5k_hw_reg_write(ah,
(tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
| AR5K_REG_SM(ah->ah_retry_long,
AR5K_NODCU_RETRY_LMT_SLG_RETRY)
| AR5K_REG_SM(ah->ah_retry_short,
AR5K_NODCU_RETRY_LMT_SSH_RETRY)
| AR5K_REG_SM(ah->ah_retry_long,
AR5K_NODCU_RETRY_LMT_LG_RETRY)
| AR5K_REG_SM(ah->ah_retry_short,
AR5K_NODCU_RETRY_LMT_SH_RETRY),
AR5K_NODCU_RETRY_LMT);
/* DCU on AR5211+ */
} else {
ath5k_hw_reg_write(ah,
AR5K_REG_SM(ah->ah_retry_long,
AR5K_DCU_RETRY_LMT_RTS)
| AR5K_REG_SM(ah->ah_retry_long,
AR5K_DCU_RETRY_LMT_STA_RTS)
| AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
AR5K_DCU_RETRY_LMT_STA_DATA),
AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
}
}
/**
* ath5k_hw_reset_tx_queue() - Initialize a single hw queue
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
*
* Set DCF properties for the given transmit queue on DCU
* and configures all queue-specific parameters.
*/
int
ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
tq = &ah->ah_txq[queue];
/* Skip if queue inactive or if we are on AR5210
* that doesn't have QCU/DCU */
if ((ah->ah_version == AR5K_AR5210) ||
(tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
return 0;
/*
* Set contention window (cw_min/cw_max)
* and arbitrated interframe space (aifs)...
*/
ath5k_hw_reg_write(ah,
AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
AR5K_QUEUE_DFS_LOCAL_IFS(queue));
/*
* Set tx retry limits for this queue
*/
ath5k_hw_set_tx_retry_limits(ah, queue);
/*
* Set misc registers
*/
/* Enable DCU to wait for next fragment from QCU */
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
AR5K_DCU_MISC_FRAG_WAIT);
/* On Maui and Spirit use the global seqnum on DCU */
if (ah->ah_mac_version < AR5K_SREV_AR5211)
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
AR5K_DCU_MISC_SEQNUM_CTL);
/* Constant bit rate period */
if (tq->tqi_cbr_period) {
ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
AR5K_QCU_CBRCFG_INTVAL) |
AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
AR5K_QCU_CBRCFG_ORN_THRES),
AR5K_QUEUE_CBRCFG(queue));
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_FRSHED_CBR);
if (tq->tqi_cbr_overflow_limit)
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_CBR_THRES_ENABLE);
}
/* Ready time interval */
if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
AR5K_QCU_RDYTIMECFG_INTVAL) |
AR5K_QCU_RDYTIMECFG_ENABLE,
AR5K_QUEUE_RDYTIMECFG(queue));
if (tq->tqi_burst_time) {
ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
AR5K_DCU_CHAN_TIME_DUR) |
AR5K_DCU_CHAN_TIME_ENABLE,
AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_RDY_VEOL_POLICY);
}
/* Enable/disable Post frame backoff */
if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
AR5K_QUEUE_DFS_MISC(queue));
/* Enable/disable fragmentation burst backoff */
if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
AR5K_QUEUE_DFS_MISC(queue));
/*
* Set registers by queue type
*/
switch (tq->tqi_type) {
case AR5K_TX_QUEUE_BEACON:
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_FRSHED_DBA_GT |
AR5K_QCU_MISC_CBREXP_BCN_DIS |
AR5K_QCU_MISC_BCN_ENABLE);
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
AR5K_DCU_MISC_ARBLOCK_CTL_S) |
AR5K_DCU_MISC_ARBLOCK_IGNORE |
AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
AR5K_DCU_MISC_BCN_ENABLE);
break;
case AR5K_TX_QUEUE_CAB:
/* XXX: use BCN_SENT_GT, if we can figure out how */
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_FRSHED_DBA_GT |
AR5K_QCU_MISC_CBREXP_DIS |
AR5K_QCU_MISC_CBREXP_BCN_DIS);
ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
(AR5K_TUNE_SW_BEACON_RESP -
AR5K_TUNE_DMA_BEACON_RESP) -
AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
AR5K_QCU_RDYTIMECFG_ENABLE,
AR5K_QUEUE_RDYTIMECFG(queue));
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
AR5K_DCU_MISC_ARBLOCK_CTL_S));
break;
case AR5K_TX_QUEUE_UAPSD:
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_CBREXP_DIS);
break;
case AR5K_TX_QUEUE_DATA:
default:
break;
}
/* TODO: Handle frame compression */
/*
* Enable interrupts for this tx queue
* in the secondary interrupt mask registers
*/
if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
/* Update secondary interrupt mask registers */
/* Filter out inactive queues */
ah->ah_txq_imr_txok &= ah->ah_txq_status;
ah->ah_txq_imr_txerr &= ah->ah_txq_status;
ah->ah_txq_imr_txurn &= ah->ah_txq_status;
ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
ah->ah_txq_imr_txeol &= ah->ah_txq_status;
ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
AR5K_SIMR0_QCU_TXOK) |
AR5K_REG_SM(ah->ah_txq_imr_txdesc,
AR5K_SIMR0_QCU_TXDESC),
AR5K_SIMR0);
ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
AR5K_SIMR1_QCU_TXERR) |
AR5K_REG_SM(ah->ah_txq_imr_txeol,
AR5K_SIMR1_QCU_TXEOL),
AR5K_SIMR1);
/* Update SIMR2 but don't overwrite rest simr2 settings */
AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
AR5K_REG_SM(ah->ah_txq_imr_txurn,
AR5K_SIMR2_QCU_TXURN));
ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
AR5K_SIMR3_QCBRORN) |
AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
AR5K_SIMR3_QCBRURN),
AR5K_SIMR3);
ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
AR5K_SIMR4_QTRIG), AR5K_SIMR4);
/* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
/* No queue has TXNOFRM enabled, disable the interrupt
* by setting AR5K_TXNOFRM to zero */
if (ah->ah_txq_imr_nofrm == 0)
ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
/* Set QCU mask for this DCU to save power */
AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
return 0;
}
/**************************\
* Global QCU/DCU functions *
\**************************/
/**
* ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU
* @ah: The &struct ath5k_hw
* @slot_time: Slot time in us
*
* Sets the global IFS intervals on DCU (also works on AR5210) for
* the given slot time and the current bwmode.
*/
int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
enum ieee80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
u32 rate_flags, i;
if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
return -EINVAL;
sifs = ath5k_hw_get_default_sifs(ah);
sifs_clock = ath5k_hw_htoclock(ah, sifs - 2);
/* EIFS
* Txtime of ack at lowest rate + SIFS + DIFS
* (DIFS = SIFS + 2 * Slot time)
*
* Note: HAL has some predefined values for EIFS
* Turbo: (37 + 2 * 6)
* Default: (74 + 2 * 9)
* Half: (149 + 2 * 13)
* Quarter: (298 + 2 * 21)
*
* (74 + 2 * 6) for AR5210 default and turbo !
*
* According to the formula we have
* ack_tx_time = 25 for turbo and
* ack_tx_time = 42.5 * clock multiplier
* for default/half/quarter.
*
* This can't be right, 42 is what we would get
* from ath5k_hw_get_frame_dur_for_bwmode or
* ieee80211_generic_frame_duration for zero frame
* length and without SIFS !
*
* Also we have different lowest rate for 802.11a
*/
if (channel->band == IEEE80211_BAND_5GHZ)
band = IEEE80211_BAND_5GHZ;
else
band = IEEE80211_BAND_2GHZ;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_5MHZ:
rate_flags = IEEE80211_RATE_SUPPORTS_5MHZ;
break;
case AR5K_BWMODE_10MHZ:
rate_flags = IEEE80211_RATE_SUPPORTS_10MHZ;
break;
default:
rate_flags = 0;
break;
}
sband = &ah->sbands[band];
rate = NULL;
for (i = 0; i < sband->n_bitrates; i++) {
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
rate = &sband->bitrates[i];
break;
}
if (WARN_ON(!rate))
return -EINVAL;
ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
/* ack_tx_time includes an SIFS already */
eifs = ack_tx_time + sifs + 2 * slot_time;
eifs_clock = ath5k_hw_htoclock(ah, eifs);
/* Set IFS settings on AR5210 */
if (ah->ah_version == AR5K_AR5210) {
u32 pifs, pifs_clock, difs, difs_clock;
/* Set slot time */
ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
/* Set EIFS */
eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
/* PIFS = Slot time + SIFS */
pifs = slot_time + sifs;
pifs_clock = ath5k_hw_htoclock(ah, pifs);
pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
/* DIFS = SIFS + 2 * Slot time */
difs = sifs + 2 * slot_time;
difs_clock = ath5k_hw_htoclock(ah, difs);
/* Set SIFS/DIFS */
ath5k_hw_reg_write(ah, (difs_clock <<
AR5K_IFS0_DIFS_S) | sifs_clock,
AR5K_IFS0);
/* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
(AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
AR5K_IFS1);
return 0;
}
/* Set IFS slot time */
ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
/* Set EIFS interval */
ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
/* Set SIFS interval in usecs */
AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
sifs);
/* Set SIFS interval in clock cycles */
ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
return 0;
}
/**
* ath5k_hw_init_queues() - Initialize tx queues
* @ah: The &struct ath5k_hw
*
* Initializes all tx queues based on information on
* ah->ah_txq* set by the driver
*/
int
ath5k_hw_init_queues(struct ath5k_hw *ah)
{
int i, ret;
/* TODO: HW Compression support for data queues */
/* TODO: Burst prefetch for data queues */
/*
* Reset queues and start beacon timers at the end of the reset routine
* This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
* Note: If we want we can assign multiple qcus on one dcu.
*/
if (ah->ah_version != AR5K_AR5210)
for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
ret = ath5k_hw_reset_tx_queue(ah, i);
if (ret) {
ATH5K_ERR(ah,
"failed to reset TX queue #%d\n", i);
return ret;
}
}
else
/* No QCU/DCU on AR5210, just set tx
* retry limits. We set IFS parameters
* on ath5k_hw_set_ifs_intervals */
ath5k_hw_set_tx_retry_limits(ah, 0);
/* Set the turbo flag when operating on 40MHz */
if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
/* If we didn't set IFS timings through
* ath5k_hw_set_coverage_class make sure
* we set them here */
if (!ah->ah_coverage_class) {
unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
ath5k_hw_set_ifs_intervals(ah, slot_time);
}
return 0;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,853 @@
/*
* RF Buffer handling functions
*
* Copyright (c) 2009 Nick Kossifidis <mickflemm@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/**
* DOC: RF Buffer registers
*
* There are some special registers on the RF chip
* that control various operation settings related mostly to
* the analog parts (channel, gain adjustment etc).
*
* We don't write on those registers directly but
* we send a data packet on the chip, using a special register,
* that holds all the settings we need. After we've sent the
* data packet, we write on another special register to notify hw
* to apply the settings. This is done so that control registers
* can be dynamically programmed during operation and the settings
* are applied faster on the hw.
*
* We call each data packet an "RF Bank" and all the data we write
* (all RF Banks) "RF Buffer". This file holds initial RF Buffer
* data for the different RF chips, and various info to match RF
* Buffer offsets with specific RF registers so that we can access
* them. We tweak these settings on rfregs_init function.
*
* Also check out reg.h and U.S. Patent 6677779 B1 (about buffer
* registers and control registers):
*
* http://www.google.com/patents?id=qNURAAAAEBAJ
*/
/**
* struct ath5k_ini_rfbuffer - Initial RF Buffer settings
* @rfb_bank: RF Bank number
* @rfb_ctrl_register: RF Buffer control register
* @rfb_mode_data: RF Buffer data for each mode
*
* Struct to hold default mode specific RF
* register values (RF Banks) for each chip.
*/
struct ath5k_ini_rfbuffer {
u8 rfb_bank;
u16 rfb_ctrl_register;
u32 rfb_mode_data[3];
};
/**
* struct ath5k_rfb_field - An RF Buffer field (register/value)
* @len: Field length
* @pos: Offset on the raw packet
* @col: Used for shifting
*
* Struct to hold RF Buffer field
* infos used to access certain RF
* analog registers
*/
struct ath5k_rfb_field {
u8 len;
u16 pos;
u8 col;
};
/**
* struct ath5k_rf_reg - RF analog register definition
* @bank: RF Buffer Bank number
* @index: Register's index on ath5k_rf_regx_idx
* @field: The &struct ath5k_rfb_field
*
* We use this struct to define the set of RF registers
* on each chip that we want to tweak. Some RF registers
* are common between different chip versions so this saves
* us space and complexity because we can refer to an rf
* register by it's index no matter what chip we work with
* as long as it has that register.
*/
struct ath5k_rf_reg {
u8 bank;
u8 index;
struct ath5k_rfb_field field;
};
/**
* enum ath5k_rf_regs_idx - Map RF registers to indexes
*
* We do this to handle common bits and make our
* life easier by using an index for each register
* instead of a full rfb_field
*/
enum ath5k_rf_regs_idx {
/* BANK 2 */
AR5K_RF_TURBO = 0,
/* BANK 6 */
AR5K_RF_OB_2GHZ,
AR5K_RF_OB_5GHZ,
AR5K_RF_DB_2GHZ,
AR5K_RF_DB_5GHZ,
AR5K_RF_FIXED_BIAS_A,
AR5K_RF_FIXED_BIAS_B,
AR5K_RF_PWD_XPD,
AR5K_RF_XPD_SEL,
AR5K_RF_XPD_GAIN,
AR5K_RF_PD_GAIN_LO,
AR5K_RF_PD_GAIN_HI,
AR5K_RF_HIGH_VC_CP,
AR5K_RF_MID_VC_CP,
AR5K_RF_LOW_VC_CP,
AR5K_RF_PUSH_UP,
AR5K_RF_PAD2GND,
AR5K_RF_XB2_LVL,
AR5K_RF_XB5_LVL,
AR5K_RF_PWD_ICLOBUF_2G,
AR5K_RF_PWD_84,
AR5K_RF_PWD_90,
AR5K_RF_PWD_130,
AR5K_RF_PWD_131,
AR5K_RF_PWD_132,
AR5K_RF_PWD_136,
AR5K_RF_PWD_137,
AR5K_RF_PWD_138,
AR5K_RF_PWD_166,
AR5K_RF_PWD_167,
AR5K_RF_DERBY_CHAN_SEL_MODE,
/* BANK 7 */
AR5K_RF_GAIN_I,
AR5K_RF_PLO_SEL,
AR5K_RF_RFGAIN_SEL,
AR5K_RF_RFGAIN_STEP,
AR5K_RF_WAIT_S,
AR5K_RF_WAIT_I,
AR5K_RF_MAX_TIME,
AR5K_RF_MIXVGA_OVR,
AR5K_RF_MIXGAIN_OVR,
AR5K_RF_MIXGAIN_STEP,
AR5K_RF_PD_DELAY_A,
AR5K_RF_PD_DELAY_B,
AR5K_RF_PD_DELAY_XR,
AR5K_RF_PD_PERIOD_A,
AR5K_RF_PD_PERIOD_B,
AR5K_RF_PD_PERIOD_XR,
};
/*******************\
* RF5111 (Sombrero) *
\*******************/
/* BANK 2 len pos col */
#define AR5K_RF5111_RF_TURBO { 1, 3, 0 }
/* BANK 6 len pos col */
#define AR5K_RF5111_OB_2GHZ { 3, 119, 0 }
#define AR5K_RF5111_DB_2GHZ { 3, 122, 0 }
#define AR5K_RF5111_OB_5GHZ { 3, 104, 0 }
#define AR5K_RF5111_DB_5GHZ { 3, 107, 0 }
#define AR5K_RF5111_PWD_XPD { 1, 95, 0 }
#define AR5K_RF5111_XPD_GAIN { 4, 96, 0 }
/* Access to PWD registers */
#define AR5K_RF5111_PWD(_n) { 1, (135 - _n), 3 }
/* BANK 7 len pos col */
#define AR5K_RF5111_GAIN_I { 6, 29, 0 }
#define AR5K_RF5111_PLO_SEL { 1, 4, 0 }
#define AR5K_RF5111_RFGAIN_SEL { 1, 36, 0 }
#define AR5K_RF5111_RFGAIN_STEP { 6, 37, 0 }
/* Only on AR5212 BaseBand and up */
#define AR5K_RF5111_WAIT_S { 5, 19, 0 }
#define AR5K_RF5111_WAIT_I { 5, 24, 0 }
#define AR5K_RF5111_MAX_TIME { 2, 49, 0 }
static const struct ath5k_rf_reg rf_regs_5111[] = {
{2, AR5K_RF_TURBO, AR5K_RF5111_RF_TURBO},
{6, AR5K_RF_OB_2GHZ, AR5K_RF5111_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF5111_DB_2GHZ},
{6, AR5K_RF_OB_5GHZ, AR5K_RF5111_OB_5GHZ},
{6, AR5K_RF_DB_5GHZ, AR5K_RF5111_DB_5GHZ},
{6, AR5K_RF_PWD_XPD, AR5K_RF5111_PWD_XPD},
{6, AR5K_RF_XPD_GAIN, AR5K_RF5111_XPD_GAIN},
{6, AR5K_RF_PWD_84, AR5K_RF5111_PWD(84)},
{6, AR5K_RF_PWD_90, AR5K_RF5111_PWD(90)},
{7, AR5K_RF_GAIN_I, AR5K_RF5111_GAIN_I},
{7, AR5K_RF_PLO_SEL, AR5K_RF5111_PLO_SEL},
{7, AR5K_RF_RFGAIN_SEL, AR5K_RF5111_RFGAIN_SEL},
{7, AR5K_RF_RFGAIN_STEP, AR5K_RF5111_RFGAIN_STEP},
{7, AR5K_RF_WAIT_S, AR5K_RF5111_WAIT_S},
{7, AR5K_RF_WAIT_I, AR5K_RF5111_WAIT_I},
{7, AR5K_RF_MAX_TIME, AR5K_RF5111_MAX_TIME}
};
/* Default mode specific settings */
static const struct ath5k_ini_rfbuffer rfb_5111[] = {
/* BANK / C.R. A/XR B G */
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00380000, 0x00380000, 0x00380000 } },
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 0, 0x989c, { 0x00000000, 0x000000c0, 0x00000080 } },
{ 0, 0x989c, { 0x000400f9, 0x000400ff, 0x000400fd } },
{ 0, 0x98d4, { 0x00000000, 0x00000004, 0x00000004 } },
{ 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
{ 2, 0x98d4, { 0x00000010, 0x00000010, 0x00000010 } },
{ 3, 0x98d8, { 0x00601068, 0x00601068, 0x00601068 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
{ 6, 0x989c, { 0x04000000, 0x04000000, 0x04000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x0a000000, 0x00000000 } },
{ 6, 0x989c, { 0x003800c0, 0x023800c0, 0x003800c0 } },
{ 6, 0x989c, { 0x00020006, 0x00000006, 0x00020006 } },
{ 6, 0x989c, { 0x00000089, 0x00000089, 0x00000089 } },
{ 6, 0x989c, { 0x000000a0, 0x000000a0, 0x000000a0 } },
{ 6, 0x989c, { 0x00040007, 0x00040007, 0x00040007 } },
{ 6, 0x98d4, { 0x0000001a, 0x0000001a, 0x0000001a } },
{ 7, 0x989c, { 0x00000040, 0x00000040, 0x00000040 } },
{ 7, 0x989c, { 0x00000010, 0x00000010, 0x00000010 } },
{ 7, 0x989c, { 0x00000008, 0x00000008, 0x00000008 } },
{ 7, 0x989c, { 0x0000004f, 0x0000004f, 0x0000004f } },
{ 7, 0x989c, { 0x000000f1, 0x00000061, 0x000000f1 } },
{ 7, 0x989c, { 0x0000904f, 0x0000904c, 0x0000904f } },
{ 7, 0x989c, { 0x0000125a, 0x0000129a, 0x0000125a } },
{ 7, 0x98cc, { 0x0000000e, 0x0000000f, 0x0000000e } },
};
/***********************\
* RF5112/RF2112 (Derby) *
\***********************/
/* BANK 2 (Common) len pos col */
#define AR5K_RF5112X_RF_TURBO { 1, 1, 2 }
/* BANK 7 (Common) len pos col */
#define AR5K_RF5112X_GAIN_I { 6, 14, 0 }
#define AR5K_RF5112X_MIXVGA_OVR { 1, 36, 0 }
#define AR5K_RF5112X_MIXGAIN_OVR { 2, 37, 0 }
#define AR5K_RF5112X_MIXGAIN_STEP { 4, 32, 0 }
#define AR5K_RF5112X_PD_DELAY_A { 4, 58, 0 }
#define AR5K_RF5112X_PD_DELAY_B { 4, 62, 0 }
#define AR5K_RF5112X_PD_DELAY_XR { 4, 66, 0 }
#define AR5K_RF5112X_PD_PERIOD_A { 4, 70, 0 }
#define AR5K_RF5112X_PD_PERIOD_B { 4, 74, 0 }
#define AR5K_RF5112X_PD_PERIOD_XR { 4, 78, 0 }
/* RFX112 (Derby 1) */
/* BANK 6 len pos col */
#define AR5K_RF5112_OB_2GHZ { 3, 269, 0 }
#define AR5K_RF5112_DB_2GHZ { 3, 272, 0 }
#define AR5K_RF5112_OB_5GHZ { 3, 261, 0 }
#define AR5K_RF5112_DB_5GHZ { 3, 264, 0 }
#define AR5K_RF5112_FIXED_BIAS_A { 1, 260, 0 }
#define AR5K_RF5112_FIXED_BIAS_B { 1, 259, 0 }
#define AR5K_RF5112_XPD_SEL { 1, 284, 0 }
#define AR5K_RF5112_XPD_GAIN { 2, 252, 0 }
/* Access to PWD registers */
#define AR5K_RF5112_PWD(_n) { 1, (302 - _n), 3 }
static const struct ath5k_rf_reg rf_regs_5112[] = {
{2, AR5K_RF_TURBO, AR5K_RF5112X_RF_TURBO},
{6, AR5K_RF_OB_2GHZ, AR5K_RF5112_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF5112_DB_2GHZ},
{6, AR5K_RF_OB_5GHZ, AR5K_RF5112_OB_5GHZ},
{6, AR5K_RF_DB_5GHZ, AR5K_RF5112_DB_5GHZ},
{6, AR5K_RF_FIXED_BIAS_A, AR5K_RF5112_FIXED_BIAS_A},
{6, AR5K_RF_FIXED_BIAS_B, AR5K_RF5112_FIXED_BIAS_B},
{6, AR5K_RF_XPD_SEL, AR5K_RF5112_XPD_SEL},
{6, AR5K_RF_XPD_GAIN, AR5K_RF5112_XPD_GAIN},
{6, AR5K_RF_PWD_130, AR5K_RF5112_PWD(130)},
{6, AR5K_RF_PWD_131, AR5K_RF5112_PWD(131)},
{6, AR5K_RF_PWD_132, AR5K_RF5112_PWD(132)},
{6, AR5K_RF_PWD_136, AR5K_RF5112_PWD(136)},
{6, AR5K_RF_PWD_137, AR5K_RF5112_PWD(137)},
{6, AR5K_RF_PWD_138, AR5K_RF5112_PWD(138)},
{7, AR5K_RF_GAIN_I, AR5K_RF5112X_GAIN_I},
{7, AR5K_RF_MIXVGA_OVR, AR5K_RF5112X_MIXVGA_OVR},
{7, AR5K_RF_MIXGAIN_OVR, AR5K_RF5112X_MIXGAIN_OVR},
{7, AR5K_RF_MIXGAIN_STEP, AR5K_RF5112X_MIXGAIN_STEP},
{7, AR5K_RF_PD_DELAY_A, AR5K_RF5112X_PD_DELAY_A},
{7, AR5K_RF_PD_DELAY_B, AR5K_RF5112X_PD_DELAY_B},
{7, AR5K_RF_PD_DELAY_XR, AR5K_RF5112X_PD_DELAY_XR},
{7, AR5K_RF_PD_PERIOD_A, AR5K_RF5112X_PD_PERIOD_A},
{7, AR5K_RF_PD_PERIOD_B, AR5K_RF5112X_PD_PERIOD_B},
{7, AR5K_RF_PD_PERIOD_XR, AR5K_RF5112X_PD_PERIOD_XR},
};
/* Default mode specific settings */
static const struct ath5k_ini_rfbuffer rfb_5112[] = {
/* BANK / C.R. A/XR B G */
{ 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
{ 2, 0x98d0, { 0x03060408, 0x03060408, 0x03060408 } },
{ 3, 0x98dc, { 0x00a0c0c0, 0x00e0c0c0, 0x00e0c0c0 } },
{ 6, 0x989c, { 0x00a00000, 0x00a00000, 0x00a00000 } },
{ 6, 0x989c, { 0x000a0000, 0x000a0000, 0x000a0000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00660000, 0x00660000, 0x00660000 } },
{ 6, 0x989c, { 0x00db0000, 0x00db0000, 0x00db0000 } },
{ 6, 0x989c, { 0x00f10000, 0x00f10000, 0x00f10000 } },
{ 6, 0x989c, { 0x00120000, 0x00120000, 0x00120000 } },
{ 6, 0x989c, { 0x00120000, 0x00120000, 0x00120000 } },
{ 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x000c0000, 0x000c0000, 0x000c0000 } },
{ 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
{ 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
{ 6, 0x989c, { 0x008b0000, 0x008b0000, 0x008b0000 } },
{ 6, 0x989c, { 0x00600000, 0x00600000, 0x00600000 } },
{ 6, 0x989c, { 0x000c0000, 0x000c0000, 0x000c0000 } },
{ 6, 0x989c, { 0x00840000, 0x00840000, 0x00840000 } },
{ 6, 0x989c, { 0x00640000, 0x00640000, 0x00640000 } },
{ 6, 0x989c, { 0x00200000, 0x00200000, 0x00200000 } },
{ 6, 0x989c, { 0x00240000, 0x00240000, 0x00240000 } },
{ 6, 0x989c, { 0x00250000, 0x00250000, 0x00250000 } },
{ 6, 0x989c, { 0x00110000, 0x00110000, 0x00110000 } },
{ 6, 0x989c, { 0x00110000, 0x00110000, 0x00110000 } },
{ 6, 0x989c, { 0x00510000, 0x00510000, 0x00510000 } },
{ 6, 0x989c, { 0x1c040000, 0x1c040000, 0x1c040000 } },
{ 6, 0x989c, { 0x000a0000, 0x000a0000, 0x000a0000 } },
{ 6, 0x989c, { 0x00a10000, 0x00a10000, 0x00a10000 } },
{ 6, 0x989c, { 0x00400000, 0x00400000, 0x00400000 } },
{ 6, 0x989c, { 0x03090000, 0x03090000, 0x03090000 } },
{ 6, 0x989c, { 0x06000000, 0x06000000, 0x06000000 } },
{ 6, 0x989c, { 0x000000b0, 0x000000a8, 0x000000a8 } },
{ 6, 0x989c, { 0x0000002e, 0x0000002e, 0x0000002e } },
{ 6, 0x989c, { 0x006c4a41, 0x006c4af1, 0x006c4a61 } },
{ 6, 0x989c, { 0x0050892a, 0x0050892b, 0x0050892b } },
{ 6, 0x989c, { 0x00842400, 0x00842400, 0x00842400 } },
{ 6, 0x989c, { 0x00c69200, 0x00c69200, 0x00c69200 } },
{ 6, 0x98d0, { 0x0002000c, 0x0002000c, 0x0002000c } },
{ 7, 0x989c, { 0x00000094, 0x00000094, 0x00000094 } },
{ 7, 0x989c, { 0x00000091, 0x00000091, 0x00000091 } },
{ 7, 0x989c, { 0x0000000a, 0x00000012, 0x00000012 } },
{ 7, 0x989c, { 0x00000080, 0x00000080, 0x00000080 } },
{ 7, 0x989c, { 0x000000c1, 0x000000c1, 0x000000c1 } },
{ 7, 0x989c, { 0x00000060, 0x00000060, 0x00000060 } },
{ 7, 0x989c, { 0x000000f0, 0x000000f0, 0x000000f0 } },
{ 7, 0x989c, { 0x00000022, 0x00000022, 0x00000022 } },
{ 7, 0x989c, { 0x00000092, 0x00000092, 0x00000092 } },
{ 7, 0x989c, { 0x000000d4, 0x000000d4, 0x000000d4 } },
{ 7, 0x989c, { 0x000014cc, 0x000014cc, 0x000014cc } },
{ 7, 0x989c, { 0x0000048c, 0x0000048c, 0x0000048c } },
{ 7, 0x98c4, { 0x00000003, 0x00000003, 0x00000003 } },
};
/* RFX112A (Derby 2) */
/* BANK 6 len pos col */
#define AR5K_RF5112A_OB_2GHZ { 3, 287, 0 }
#define AR5K_RF5112A_DB_2GHZ { 3, 290, 0 }
#define AR5K_RF5112A_OB_5GHZ { 3, 279, 0 }
#define AR5K_RF5112A_DB_5GHZ { 3, 282, 0 }
#define AR5K_RF5112A_FIXED_BIAS_A { 1, 278, 0 }
#define AR5K_RF5112A_FIXED_BIAS_B { 1, 277, 0 }
#define AR5K_RF5112A_XPD_SEL { 1, 302, 0 }
#define AR5K_RF5112A_PDGAINLO { 2, 270, 0 }
#define AR5K_RF5112A_PDGAINHI { 2, 257, 0 }
/* Access to PWD registers */
#define AR5K_RF5112A_PWD(_n) { 1, (306 - _n), 3 }
/* Voltage regulators */
#define AR5K_RF5112A_HIGH_VC_CP { 2, 90, 2 }
#define AR5K_RF5112A_MID_VC_CP { 2, 92, 2 }
#define AR5K_RF5112A_LOW_VC_CP { 2, 94, 2 }
#define AR5K_RF5112A_PUSH_UP { 1, 254, 2 }
/* Power consumption */
#define AR5K_RF5112A_PAD2GND { 1, 281, 1 }
#define AR5K_RF5112A_XB2_LVL { 2, 1, 3 }
#define AR5K_RF5112A_XB5_LVL { 2, 3, 3 }
static const struct ath5k_rf_reg rf_regs_5112a[] = {
{2, AR5K_RF_TURBO, AR5K_RF5112X_RF_TURBO},
{6, AR5K_RF_OB_2GHZ, AR5K_RF5112A_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF5112A_DB_2GHZ},
{6, AR5K_RF_OB_5GHZ, AR5K_RF5112A_OB_5GHZ},
{6, AR5K_RF_DB_5GHZ, AR5K_RF5112A_DB_5GHZ},
{6, AR5K_RF_FIXED_BIAS_A, AR5K_RF5112A_FIXED_BIAS_A},
{6, AR5K_RF_FIXED_BIAS_B, AR5K_RF5112A_FIXED_BIAS_B},
{6, AR5K_RF_XPD_SEL, AR5K_RF5112A_XPD_SEL},
{6, AR5K_RF_PD_GAIN_LO, AR5K_RF5112A_PDGAINLO},
{6, AR5K_RF_PD_GAIN_HI, AR5K_RF5112A_PDGAINHI},
{6, AR5K_RF_PWD_130, AR5K_RF5112A_PWD(130)},
{6, AR5K_RF_PWD_131, AR5K_RF5112A_PWD(131)},
{6, AR5K_RF_PWD_132, AR5K_RF5112A_PWD(132)},
{6, AR5K_RF_PWD_136, AR5K_RF5112A_PWD(136)},
{6, AR5K_RF_PWD_137, AR5K_RF5112A_PWD(137)},
{6, AR5K_RF_PWD_138, AR5K_RF5112A_PWD(138)},
{6, AR5K_RF_PWD_166, AR5K_RF5112A_PWD(166)},
{6, AR5K_RF_PWD_167, AR5K_RF5112A_PWD(167)},
{6, AR5K_RF_HIGH_VC_CP, AR5K_RF5112A_HIGH_VC_CP},
{6, AR5K_RF_MID_VC_CP, AR5K_RF5112A_MID_VC_CP},
{6, AR5K_RF_LOW_VC_CP, AR5K_RF5112A_LOW_VC_CP},
{6, AR5K_RF_PUSH_UP, AR5K_RF5112A_PUSH_UP},
{6, AR5K_RF_PAD2GND, AR5K_RF5112A_PAD2GND},
{6, AR5K_RF_XB2_LVL, AR5K_RF5112A_XB2_LVL},
{6, AR5K_RF_XB5_LVL, AR5K_RF5112A_XB5_LVL},
{7, AR5K_RF_GAIN_I, AR5K_RF5112X_GAIN_I},
{7, AR5K_RF_MIXVGA_OVR, AR5K_RF5112X_MIXVGA_OVR},
{7, AR5K_RF_MIXGAIN_OVR, AR5K_RF5112X_MIXGAIN_OVR},
{7, AR5K_RF_MIXGAIN_STEP, AR5K_RF5112X_MIXGAIN_STEP},
{7, AR5K_RF_PD_DELAY_A, AR5K_RF5112X_PD_DELAY_A},
{7, AR5K_RF_PD_DELAY_B, AR5K_RF5112X_PD_DELAY_B},
{7, AR5K_RF_PD_DELAY_XR, AR5K_RF5112X_PD_DELAY_XR},
{7, AR5K_RF_PD_PERIOD_A, AR5K_RF5112X_PD_PERIOD_A},
{7, AR5K_RF_PD_PERIOD_B, AR5K_RF5112X_PD_PERIOD_B},
{7, AR5K_RF_PD_PERIOD_XR, AR5K_RF5112X_PD_PERIOD_XR},
};
/* Default mode specific settings */
static const struct ath5k_ini_rfbuffer rfb_5112a[] = {
/* BANK / C.R. A/XR B G */
{ 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
{ 2, 0x98d0, { 0x03060408, 0x03060408, 0x03060408 } },
{ 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
{ 6, 0x989c, { 0x0f000000, 0x0f000000, 0x0f000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00800000, 0x00800000, 0x00800000 } },
{ 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
{ 6, 0x989c, { 0x00010000, 0x00010000, 0x00010000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00180000, 0x00180000, 0x00180000 } },
{ 6, 0x989c, { 0x00600000, 0x006e0000, 0x006e0000 } },
{ 6, 0x989c, { 0x00c70000, 0x00c70000, 0x00c70000 } },
{ 6, 0x989c, { 0x004b0000, 0x004b0000, 0x004b0000 } },
{ 6, 0x989c, { 0x04480000, 0x04480000, 0x04480000 } },
{ 6, 0x989c, { 0x004c0000, 0x004c0000, 0x004c0000 } },
{ 6, 0x989c, { 0x00e40000, 0x00e40000, 0x00e40000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00fc0000, 0x00fc0000, 0x00fc0000 } },
{ 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
{ 6, 0x989c, { 0x043f0000, 0x043f0000, 0x043f0000 } },
{ 6, 0x989c, { 0x000c0000, 0x000c0000, 0x000c0000 } },
{ 6, 0x989c, { 0x02190000, 0x02190000, 0x02190000 } },
{ 6, 0x989c, { 0x00240000, 0x00240000, 0x00240000 } },
{ 6, 0x989c, { 0x00b40000, 0x00b40000, 0x00b40000 } },
{ 6, 0x989c, { 0x00990000, 0x00990000, 0x00990000 } },
{ 6, 0x989c, { 0x00500000, 0x00500000, 0x00500000 } },
{ 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
{ 6, 0x989c, { 0x00120000, 0x00120000, 0x00120000 } },
{ 6, 0x989c, { 0xc0320000, 0xc0320000, 0xc0320000 } },
{ 6, 0x989c, { 0x01740000, 0x01740000, 0x01740000 } },
{ 6, 0x989c, { 0x00110000, 0x00110000, 0x00110000 } },
{ 6, 0x989c, { 0x86280000, 0x86280000, 0x86280000 } },
{ 6, 0x989c, { 0x31840000, 0x31840000, 0x31840000 } },
{ 6, 0x989c, { 0x00f20080, 0x00f20080, 0x00f20080 } },
{ 6, 0x989c, { 0x00270019, 0x00270019, 0x00270019 } },
{ 6, 0x989c, { 0x00000003, 0x00000003, 0x00000003 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x000000b2, 0x000000b2, 0x000000b2 } },
{ 6, 0x989c, { 0x00b02084, 0x00b02084, 0x00b02084 } },
{ 6, 0x989c, { 0x004125a4, 0x004125a4, 0x004125a4 } },
{ 6, 0x989c, { 0x00119220, 0x00119220, 0x00119220 } },
{ 6, 0x989c, { 0x001a4800, 0x001a4800, 0x001a4800 } },
{ 6, 0x98d8, { 0x000b0230, 0x000b0230, 0x000b0230 } },
{ 7, 0x989c, { 0x00000094, 0x00000094, 0x00000094 } },
{ 7, 0x989c, { 0x00000091, 0x00000091, 0x00000091 } },
{ 7, 0x989c, { 0x00000012, 0x00000012, 0x00000012 } },
{ 7, 0x989c, { 0x00000080, 0x00000080, 0x00000080 } },
{ 7, 0x989c, { 0x000000d9, 0x000000d9, 0x000000d9 } },
{ 7, 0x989c, { 0x00000060, 0x00000060, 0x00000060 } },
{ 7, 0x989c, { 0x000000f0, 0x000000f0, 0x000000f0 } },
{ 7, 0x989c, { 0x000000a2, 0x000000a2, 0x000000a2 } },
{ 7, 0x989c, { 0x00000052, 0x00000052, 0x00000052 } },
{ 7, 0x989c, { 0x000000d4, 0x000000d4, 0x000000d4 } },
{ 7, 0x989c, { 0x000014cc, 0x000014cc, 0x000014cc } },
{ 7, 0x989c, { 0x0000048c, 0x0000048c, 0x0000048c } },
{ 7, 0x98c4, { 0x00000003, 0x00000003, 0x00000003 } },
};
/******************\
* RF2413 (Griffin) *
\******************/
/* BANK 2 len pos col */
#define AR5K_RF2413_RF_TURBO { 1, 1, 2 }
/* BANK 6 len pos col */
#define AR5K_RF2413_OB_2GHZ { 3, 168, 0 }
#define AR5K_RF2413_DB_2GHZ { 3, 165, 0 }
static const struct ath5k_rf_reg rf_regs_2413[] = {
{2, AR5K_RF_TURBO, AR5K_RF2413_RF_TURBO},
{6, AR5K_RF_OB_2GHZ, AR5K_RF2413_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF2413_DB_2GHZ},
};
/* Default mode specific settings
* XXX: a/aTurbo ???
*/
static const struct ath5k_ini_rfbuffer rfb_2413[] = {
/* BANK / C.R. A/XR B G */
{ 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
{ 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
{ 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
{ 6, 0x989c, { 0xf0000000, 0xf0000000, 0xf0000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x03000000, 0x03000000, 0x03000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x40400000, 0x40400000, 0x40400000 } },
{ 6, 0x989c, { 0x65050000, 0x65050000, 0x65050000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00420000, 0x00420000, 0x00420000 } },
{ 6, 0x989c, { 0x00b50000, 0x00b50000, 0x00b50000 } },
{ 6, 0x989c, { 0x00030000, 0x00030000, 0x00030000 } },
{ 6, 0x989c, { 0x00f70000, 0x00f70000, 0x00f70000 } },
{ 6, 0x989c, { 0x009d0000, 0x009d0000, 0x009d0000 } },
{ 6, 0x989c, { 0x00220000, 0x00220000, 0x00220000 } },
{ 6, 0x989c, { 0x04220000, 0x04220000, 0x04220000 } },
{ 6, 0x989c, { 0x00230018, 0x00230018, 0x00230018 } },
{ 6, 0x989c, { 0x00280000, 0x00280060, 0x00280060 } },
{ 6, 0x989c, { 0x005000c0, 0x005000c3, 0x005000c3 } },
{ 6, 0x989c, { 0x0004007f, 0x0004007f, 0x0004007f } },
{ 6, 0x989c, { 0x00000458, 0x00000458, 0x00000458 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x0000c000, 0x0000c000, 0x0000c000 } },
{ 6, 0x98d8, { 0x00400230, 0x00400230, 0x00400230 } },
{ 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
{ 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
{ 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
};
/***************************\
* RF2315/RF2316 (Cobra SoC) *
\***************************/
/* BANK 2 len pos col */
#define AR5K_RF2316_RF_TURBO { 1, 1, 2 }
/* BANK 6 len pos col */
#define AR5K_RF2316_OB_2GHZ { 3, 178, 0 }
#define AR5K_RF2316_DB_2GHZ { 3, 175, 0 }
static const struct ath5k_rf_reg rf_regs_2316[] = {
{2, AR5K_RF_TURBO, AR5K_RF2316_RF_TURBO},
{6, AR5K_RF_OB_2GHZ, AR5K_RF2316_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF2316_DB_2GHZ},
};
/* Default mode specific settings */
static const struct ath5k_ini_rfbuffer rfb_2316[] = {
/* BANK / C.R. A/XR B G */
{ 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
{ 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
{ 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0xc0000000, 0xc0000000, 0xc0000000 } },
{ 6, 0x989c, { 0x0f000000, 0x0f000000, 0x0f000000 } },
{ 6, 0x989c, { 0x02000000, 0x02000000, 0x02000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0xf8000000, 0xf8000000, 0xf8000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x95150000, 0x95150000, 0x95150000 } },
{ 6, 0x989c, { 0xc1000000, 0xc1000000, 0xc1000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00080000, 0x00080000, 0x00080000 } },
{ 6, 0x989c, { 0x00d50000, 0x00d50000, 0x00d50000 } },
{ 6, 0x989c, { 0x000e0000, 0x000e0000, 0x000e0000 } },
{ 6, 0x989c, { 0x00dc0000, 0x00dc0000, 0x00dc0000 } },
{ 6, 0x989c, { 0x00770000, 0x00770000, 0x00770000 } },
{ 6, 0x989c, { 0x008a0000, 0x008a0000, 0x008a0000 } },
{ 6, 0x989c, { 0x10880000, 0x10880000, 0x10880000 } },
{ 6, 0x989c, { 0x008c0060, 0x008c0060, 0x008c0060 } },
{ 6, 0x989c, { 0x00a00000, 0x00a00080, 0x00a00080 } },
{ 6, 0x989c, { 0x00400000, 0x0040000d, 0x0040000d } },
{ 6, 0x989c, { 0x00110400, 0x00110400, 0x00110400 } },
{ 6, 0x989c, { 0x00000060, 0x00000060, 0x00000060 } },
{ 6, 0x989c, { 0x00000001, 0x00000001, 0x00000001 } },
{ 6, 0x989c, { 0x00000b00, 0x00000b00, 0x00000b00 } },
{ 6, 0x989c, { 0x00000be8, 0x00000be8, 0x00000be8 } },
{ 6, 0x98c0, { 0x00010000, 0x00010000, 0x00010000 } },
{ 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
{ 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
{ 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
};
/******************************\
* RF5413/RF5424 (Eagle/Condor) *
\******************************/
/* BANK 6 len pos col */
#define AR5K_RF5413_OB_2GHZ { 3, 241, 0 }
#define AR5K_RF5413_DB_2GHZ { 3, 238, 0 }
#define AR5K_RF5413_OB_5GHZ { 3, 247, 0 }
#define AR5K_RF5413_DB_5GHZ { 3, 244, 0 }
#define AR5K_RF5413_PWD_ICLOBUF2G { 3, 131, 3 }
#define AR5K_RF5413_DERBY_CHAN_SEL_MODE { 1, 291, 2 }
static const struct ath5k_rf_reg rf_regs_5413[] = {
{6, AR5K_RF_OB_2GHZ, AR5K_RF5413_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF5413_DB_2GHZ},
{6, AR5K_RF_OB_5GHZ, AR5K_RF5413_OB_5GHZ},
{6, AR5K_RF_DB_5GHZ, AR5K_RF5413_DB_5GHZ},
{6, AR5K_RF_PWD_ICLOBUF_2G, AR5K_RF5413_PWD_ICLOBUF2G},
{6, AR5K_RF_DERBY_CHAN_SEL_MODE, AR5K_RF5413_DERBY_CHAN_SEL_MODE},
};
/* Default mode specific settings */
static const struct ath5k_ini_rfbuffer rfb_5413[] = {
/* BANK / C.R. A/XR B G */
{ 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
{ 2, 0x98d0, { 0x00000008, 0x00000008, 0x00000008 } },
{ 3, 0x98dc, { 0x00a000c0, 0x00e000c0, 0x00e000c0 } },
{ 6, 0x989c, { 0x33000000, 0x33000000, 0x33000000 } },
{ 6, 0x989c, { 0x01000000, 0x01000000, 0x01000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x1f000000, 0x1f000000, 0x1f000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00b80000, 0x00b80000, 0x00b80000 } },
{ 6, 0x989c, { 0x00b70000, 0x00b70000, 0x00b70000 } },
{ 6, 0x989c, { 0x00840000, 0x00840000, 0x00840000 } },
{ 6, 0x989c, { 0x00980000, 0x00980000, 0x00980000 } },
{ 6, 0x989c, { 0x00c00000, 0x00c00000, 0x00c00000 } },
{ 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
{ 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
{ 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
{ 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
{ 6, 0x989c, { 0x00d70000, 0x00d70000, 0x00d70000 } },
{ 6, 0x989c, { 0x00610000, 0x00610000, 0x00610000 } },
{ 6, 0x989c, { 0x00fe0000, 0x00fe0000, 0x00fe0000 } },
{ 6, 0x989c, { 0x00de0000, 0x00de0000, 0x00de0000 } },
{ 6, 0x989c, { 0x007f0000, 0x007f0000, 0x007f0000 } },
{ 6, 0x989c, { 0x043d0000, 0x043d0000, 0x043d0000 } },
{ 6, 0x989c, { 0x00770000, 0x00770000, 0x00770000 } },
{ 6, 0x989c, { 0x00440000, 0x00440000, 0x00440000 } },
{ 6, 0x989c, { 0x00980000, 0x00980000, 0x00980000 } },
{ 6, 0x989c, { 0x00100080, 0x00100080, 0x00100080 } },
{ 6, 0x989c, { 0x0005c034, 0x0005c034, 0x0005c034 } },
{ 6, 0x989c, { 0x003100f0, 0x003100f0, 0x003100f0 } },
{ 6, 0x989c, { 0x000c011f, 0x000c011f, 0x000c011f } },
{ 6, 0x989c, { 0x00510040, 0x00510040, 0x00510040 } },
{ 6, 0x989c, { 0x005000da, 0x005000da, 0x005000da } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00004044, 0x00004044, 0x00004044 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x000060c0, 0x000060c0, 0x000060c0 } },
{ 6, 0x989c, { 0x00002c00, 0x00003600, 0x00003600 } },
{ 6, 0x98c8, { 0x00000403, 0x00040403, 0x00040403 } },
{ 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
{ 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
{ 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
};
/***************************\
* RF2425/RF2417 (Swan/Nala) *
* AR2317 (Spider SoC) *
\***************************/
/* BANK 2 len pos col */
#define AR5K_RF2425_RF_TURBO { 1, 1, 2 }
/* BANK 6 len pos col */
#define AR5K_RF2425_OB_2GHZ { 3, 193, 0 }
#define AR5K_RF2425_DB_2GHZ { 3, 190, 0 }
static const struct ath5k_rf_reg rf_regs_2425[] = {
{2, AR5K_RF_TURBO, AR5K_RF2425_RF_TURBO},
{6, AR5K_RF_OB_2GHZ, AR5K_RF2425_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF2425_DB_2GHZ},
};
/* Default mode specific settings
*/
static const struct ath5k_ini_rfbuffer rfb_2425[] = {
/* BANK / C.R. A/XR B G */
{ 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
{ 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
{ 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
{ 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00100000, 0x00100000, 0x00100000 } },
{ 6, 0x989c, { 0x00020000, 0x00020000, 0x00020000 } },
{ 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
{ 6, 0x989c, { 0x00f80000, 0x00f80000, 0x00f80000 } },
{ 6, 0x989c, { 0x00e70000, 0x00e70000, 0x00e70000 } },
{ 6, 0x989c, { 0x00140000, 0x00140000, 0x00140000 } },
{ 6, 0x989c, { 0x00910040, 0x00910040, 0x00910040 } },
{ 6, 0x989c, { 0x0007001a, 0x0007001a, 0x0007001a } },
{ 6, 0x989c, { 0x00410000, 0x00410000, 0x00410000 } },
{ 6, 0x989c, { 0x00810000, 0x00810060, 0x00810060 } },
{ 6, 0x989c, { 0x00020800, 0x00020803, 0x00020803 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00001660, 0x00001660, 0x00001660 } },
{ 6, 0x989c, { 0x00001688, 0x00001688, 0x00001688 } },
{ 6, 0x98c4, { 0x00000001, 0x00000001, 0x00000001 } },
{ 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
{ 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
{ 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
};
/*
* TODO: Handle the few differences with swan during
* bank modification and get rid of this
*/
static const struct ath5k_ini_rfbuffer rfb_2317[] = {
/* BANK / C.R. A/XR B G */
{ 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
{ 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
{ 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
{ 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00100000, 0x00100000, 0x00100000 } },
{ 6, 0x989c, { 0x00020000, 0x00020000, 0x00020000 } },
{ 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
{ 6, 0x989c, { 0x00f80000, 0x00f80000, 0x00f80000 } },
{ 6, 0x989c, { 0x00e70000, 0x00e70000, 0x00e70000 } },
{ 6, 0x989c, { 0x00140100, 0x00140100, 0x00140100 } },
{ 6, 0x989c, { 0x00910040, 0x00910040, 0x00910040 } },
{ 6, 0x989c, { 0x0007001a, 0x0007001a, 0x0007001a } },
{ 6, 0x989c, { 0x00410000, 0x00410000, 0x00410000 } },
{ 6, 0x989c, { 0x00810000, 0x00810060, 0x00810060 } },
{ 6, 0x989c, { 0x00020800, 0x00020803, 0x00020803 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00001660, 0x00001660, 0x00001660 } },
{ 6, 0x989c, { 0x00009688, 0x00009688, 0x00009688 } },
{ 6, 0x98c4, { 0x00000001, 0x00000001, 0x00000001 } },
{ 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
{ 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
{ 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
};
/*
* TODO: Handle the few differences with swan during
* bank modification and get rid of this
*/
static const struct ath5k_ini_rfbuffer rfb_2417[] = {
/* BANK / C.R. A/XR B G */
{ 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
{ 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
{ 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
{ 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00100000, 0x00100000, 0x00100000 } },
{ 6, 0x989c, { 0x00020000, 0x00020000, 0x00020000 } },
{ 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
{ 6, 0x989c, { 0x00f80000, 0x00f80000, 0x00f80000 } },
{ 6, 0x989c, { 0x00e70000, 0x80e70000, 0x80e70000 } },
{ 6, 0x989c, { 0x00140000, 0x00140000, 0x00140000 } },
{ 6, 0x989c, { 0x00910040, 0x00910040, 0x00910040 } },
{ 6, 0x989c, { 0x0007001a, 0x0207001a, 0x0207001a } },
{ 6, 0x989c, { 0x00410000, 0x00410000, 0x00410000 } },
{ 6, 0x989c, { 0x00810000, 0x00810060, 0x00810060 } },
{ 6, 0x989c, { 0x00020800, 0x00020803, 0x00020803 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
{ 6, 0x989c, { 0x00001660, 0x00001660, 0x00001660 } },
{ 6, 0x989c, { 0x00001688, 0x00001688, 0x00001688 } },
{ 6, 0x98c4, { 0x00000001, 0x00000001, 0x00000001 } },
{ 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
{ 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
{ 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
};

View file

@ -0,0 +1,534 @@
/*
* RF Gain optimization
*
* Copyright (c) 2004-2009 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/**
* struct ath5k_ini_rfgain - RF Gain table
* @rfg_register: RF Gain register address
* @rfg_value: Register value for 5 and 2GHz
*
* Mode-specific RF Gain table (64bytes) for RF5111/5112
* (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
* RF Gain values are included in AR5K_AR5210_INI)
*/
struct ath5k_ini_rfgain {
u16 rfg_register;
u32 rfg_value[2]; /* [freq (see below)] */
};
/* Initial RF Gain settings for RF5111 */
static const struct ath5k_ini_rfgain rfgain_5111[] = {
/* 5GHz 2GHz */
{ AR5K_RF_GAIN(0), { 0x000001a9, 0x00000000 } },
{ AR5K_RF_GAIN(1), { 0x000001e9, 0x00000040 } },
{ AR5K_RF_GAIN(2), { 0x00000029, 0x00000080 } },
{ AR5K_RF_GAIN(3), { 0x00000069, 0x00000150 } },
{ AR5K_RF_GAIN(4), { 0x00000199, 0x00000190 } },
{ AR5K_RF_GAIN(5), { 0x000001d9, 0x000001d0 } },
{ AR5K_RF_GAIN(6), { 0x00000019, 0x00000010 } },
{ AR5K_RF_GAIN(7), { 0x00000059, 0x00000044 } },
{ AR5K_RF_GAIN(8), { 0x00000099, 0x00000084 } },
{ AR5K_RF_GAIN(9), { 0x000001a5, 0x00000148 } },
{ AR5K_RF_GAIN(10), { 0x000001e5, 0x00000188 } },
{ AR5K_RF_GAIN(11), { 0x00000025, 0x000001c8 } },
{ AR5K_RF_GAIN(12), { 0x000001c8, 0x00000014 } },
{ AR5K_RF_GAIN(13), { 0x00000008, 0x00000042 } },
{ AR5K_RF_GAIN(14), { 0x00000048, 0x00000082 } },
{ AR5K_RF_GAIN(15), { 0x00000088, 0x00000178 } },
{ AR5K_RF_GAIN(16), { 0x00000198, 0x000001b8 } },
{ AR5K_RF_GAIN(17), { 0x000001d8, 0x000001f8 } },
{ AR5K_RF_GAIN(18), { 0x00000018, 0x00000012 } },
{ AR5K_RF_GAIN(19), { 0x00000058, 0x00000052 } },
{ AR5K_RF_GAIN(20), { 0x00000098, 0x00000092 } },
{ AR5K_RF_GAIN(21), { 0x000001a4, 0x0000017c } },
{ AR5K_RF_GAIN(22), { 0x000001e4, 0x000001bc } },
{ AR5K_RF_GAIN(23), { 0x00000024, 0x000001fc } },
{ AR5K_RF_GAIN(24), { 0x00000064, 0x0000000a } },
{ AR5K_RF_GAIN(25), { 0x000000a4, 0x0000004a } },
{ AR5K_RF_GAIN(26), { 0x000000e4, 0x0000008a } },
{ AR5K_RF_GAIN(27), { 0x0000010a, 0x0000015a } },
{ AR5K_RF_GAIN(28), { 0x0000014a, 0x0000019a } },
{ AR5K_RF_GAIN(29), { 0x0000018a, 0x000001da } },
{ AR5K_RF_GAIN(30), { 0x000001ca, 0x0000000e } },
{ AR5K_RF_GAIN(31), { 0x0000000a, 0x0000004e } },
{ AR5K_RF_GAIN(32), { 0x0000004a, 0x0000008e } },
{ AR5K_RF_GAIN(33), { 0x0000008a, 0x0000015e } },
{ AR5K_RF_GAIN(34), { 0x000001ba, 0x0000019e } },
{ AR5K_RF_GAIN(35), { 0x000001fa, 0x000001de } },
{ AR5K_RF_GAIN(36), { 0x0000003a, 0x00000009 } },
{ AR5K_RF_GAIN(37), { 0x0000007a, 0x00000049 } },
{ AR5K_RF_GAIN(38), { 0x00000186, 0x00000089 } },
{ AR5K_RF_GAIN(39), { 0x000001c6, 0x00000179 } },
{ AR5K_RF_GAIN(40), { 0x00000006, 0x000001b9 } },
{ AR5K_RF_GAIN(41), { 0x00000046, 0x000001f9 } },
{ AR5K_RF_GAIN(42), { 0x00000086, 0x00000039 } },
{ AR5K_RF_GAIN(43), { 0x000000c6, 0x00000079 } },
{ AR5K_RF_GAIN(44), { 0x000000c6, 0x000000b9 } },
{ AR5K_RF_GAIN(45), { 0x000000c6, 0x000001bd } },
{ AR5K_RF_GAIN(46), { 0x000000c6, 0x000001fd } },
{ AR5K_RF_GAIN(47), { 0x000000c6, 0x0000003d } },
{ AR5K_RF_GAIN(48), { 0x000000c6, 0x0000007d } },
{ AR5K_RF_GAIN(49), { 0x000000c6, 0x000000bd } },
{ AR5K_RF_GAIN(50), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(51), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(52), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(53), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(54), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(55), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(56), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(57), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(58), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(59), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(60), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(61), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(62), { 0x000000c6, 0x000000fd } },
{ AR5K_RF_GAIN(63), { 0x000000c6, 0x000000fd } },
};
/* Initial RF Gain settings for RF5112 */
static const struct ath5k_ini_rfgain rfgain_5112[] = {
/* 5GHz 2GHz */
{ AR5K_RF_GAIN(0), { 0x00000007, 0x00000007 } },
{ AR5K_RF_GAIN(1), { 0x00000047, 0x00000047 } },
{ AR5K_RF_GAIN(2), { 0x00000087, 0x00000087 } },
{ AR5K_RF_GAIN(3), { 0x000001a0, 0x000001a0 } },
{ AR5K_RF_GAIN(4), { 0x000001e0, 0x000001e0 } },
{ AR5K_RF_GAIN(5), { 0x00000020, 0x00000020 } },
{ AR5K_RF_GAIN(6), { 0x00000060, 0x00000060 } },
{ AR5K_RF_GAIN(7), { 0x000001a1, 0x000001a1 } },
{ AR5K_RF_GAIN(8), { 0x000001e1, 0x000001e1 } },
{ AR5K_RF_GAIN(9), { 0x00000021, 0x00000021 } },
{ AR5K_RF_GAIN(10), { 0x00000061, 0x00000061 } },
{ AR5K_RF_GAIN(11), { 0x00000162, 0x00000162 } },
{ AR5K_RF_GAIN(12), { 0x000001a2, 0x000001a2 } },
{ AR5K_RF_GAIN(13), { 0x000001e2, 0x000001e2 } },
{ AR5K_RF_GAIN(14), { 0x00000022, 0x00000022 } },
{ AR5K_RF_GAIN(15), { 0x00000062, 0x00000062 } },
{ AR5K_RF_GAIN(16), { 0x00000163, 0x00000163 } },
{ AR5K_RF_GAIN(17), { 0x000001a3, 0x000001a3 } },
{ AR5K_RF_GAIN(18), { 0x000001e3, 0x000001e3 } },
{ AR5K_RF_GAIN(19), { 0x00000023, 0x00000023 } },
{ AR5K_RF_GAIN(20), { 0x00000063, 0x00000063 } },
{ AR5K_RF_GAIN(21), { 0x00000184, 0x00000184 } },
{ AR5K_RF_GAIN(22), { 0x000001c4, 0x000001c4 } },
{ AR5K_RF_GAIN(23), { 0x00000004, 0x00000004 } },
{ AR5K_RF_GAIN(24), { 0x000001ea, 0x0000000b } },
{ AR5K_RF_GAIN(25), { 0x0000002a, 0x0000004b } },
{ AR5K_RF_GAIN(26), { 0x0000006a, 0x0000008b } },
{ AR5K_RF_GAIN(27), { 0x000000aa, 0x000001ac } },
{ AR5K_RF_GAIN(28), { 0x000001ab, 0x000001ec } },
{ AR5K_RF_GAIN(29), { 0x000001eb, 0x0000002c } },
{ AR5K_RF_GAIN(30), { 0x0000002b, 0x00000012 } },
{ AR5K_RF_GAIN(31), { 0x0000006b, 0x00000052 } },
{ AR5K_RF_GAIN(32), { 0x000000ab, 0x00000092 } },
{ AR5K_RF_GAIN(33), { 0x000001ac, 0x00000193 } },
{ AR5K_RF_GAIN(34), { 0x000001ec, 0x000001d3 } },
{ AR5K_RF_GAIN(35), { 0x0000002c, 0x00000013 } },
{ AR5K_RF_GAIN(36), { 0x0000003a, 0x00000053 } },
{ AR5K_RF_GAIN(37), { 0x0000007a, 0x00000093 } },
{ AR5K_RF_GAIN(38), { 0x000000ba, 0x00000194 } },
{ AR5K_RF_GAIN(39), { 0x000001bb, 0x000001d4 } },
{ AR5K_RF_GAIN(40), { 0x000001fb, 0x00000014 } },
{ AR5K_RF_GAIN(41), { 0x0000003b, 0x0000003a } },
{ AR5K_RF_GAIN(42), { 0x0000007b, 0x0000007a } },
{ AR5K_RF_GAIN(43), { 0x000000bb, 0x000000ba } },
{ AR5K_RF_GAIN(44), { 0x000001bc, 0x000001bb } },
{ AR5K_RF_GAIN(45), { 0x000001fc, 0x000001fb } },
{ AR5K_RF_GAIN(46), { 0x0000003c, 0x0000003b } },
{ AR5K_RF_GAIN(47), { 0x0000007c, 0x0000007b } },
{ AR5K_RF_GAIN(48), { 0x000000bc, 0x000000bb } },
{ AR5K_RF_GAIN(49), { 0x000000fc, 0x000001bc } },
{ AR5K_RF_GAIN(50), { 0x000000fc, 0x000001fc } },
{ AR5K_RF_GAIN(51), { 0x000000fc, 0x0000003c } },
{ AR5K_RF_GAIN(52), { 0x000000fc, 0x0000007c } },
{ AR5K_RF_GAIN(53), { 0x000000fc, 0x000000bc } },
{ AR5K_RF_GAIN(54), { 0x000000fc, 0x000000fc } },
{ AR5K_RF_GAIN(55), { 0x000000fc, 0x000000fc } },
{ AR5K_RF_GAIN(56), { 0x000000fc, 0x000000fc } },
{ AR5K_RF_GAIN(57), { 0x000000fc, 0x000000fc } },
{ AR5K_RF_GAIN(58), { 0x000000fc, 0x000000fc } },
{ AR5K_RF_GAIN(59), { 0x000000fc, 0x000000fc } },
{ AR5K_RF_GAIN(60), { 0x000000fc, 0x000000fc } },
{ AR5K_RF_GAIN(61), { 0x000000fc, 0x000000fc } },
{ AR5K_RF_GAIN(62), { 0x000000fc, 0x000000fc } },
{ AR5K_RF_GAIN(63), { 0x000000fc, 0x000000fc } },
};
/* Initial RF Gain settings for RF2413 */
static const struct ath5k_ini_rfgain rfgain_2413[] = {
{ AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
{ AR5K_RF_GAIN(1), { 0x00000000, 0x00000040 } },
{ AR5K_RF_GAIN(2), { 0x00000000, 0x00000080 } },
{ AR5K_RF_GAIN(3), { 0x00000000, 0x00000181 } },
{ AR5K_RF_GAIN(4), { 0x00000000, 0x000001c1 } },
{ AR5K_RF_GAIN(5), { 0x00000000, 0x00000001 } },
{ AR5K_RF_GAIN(6), { 0x00000000, 0x00000041 } },
{ AR5K_RF_GAIN(7), { 0x00000000, 0x00000081 } },
{ AR5K_RF_GAIN(8), { 0x00000000, 0x00000168 } },
{ AR5K_RF_GAIN(9), { 0x00000000, 0x000001a8 } },
{ AR5K_RF_GAIN(10), { 0x00000000, 0x000001e8 } },
{ AR5K_RF_GAIN(11), { 0x00000000, 0x00000028 } },
{ AR5K_RF_GAIN(12), { 0x00000000, 0x00000068 } },
{ AR5K_RF_GAIN(13), { 0x00000000, 0x00000189 } },
{ AR5K_RF_GAIN(14), { 0x00000000, 0x000001c9 } },
{ AR5K_RF_GAIN(15), { 0x00000000, 0x00000009 } },
{ AR5K_RF_GAIN(16), { 0x00000000, 0x00000049 } },
{ AR5K_RF_GAIN(17), { 0x00000000, 0x00000089 } },
{ AR5K_RF_GAIN(18), { 0x00000000, 0x00000190 } },
{ AR5K_RF_GAIN(19), { 0x00000000, 0x000001d0 } },
{ AR5K_RF_GAIN(20), { 0x00000000, 0x00000010 } },
{ AR5K_RF_GAIN(21), { 0x00000000, 0x00000050 } },
{ AR5K_RF_GAIN(22), { 0x00000000, 0x00000090 } },
{ AR5K_RF_GAIN(23), { 0x00000000, 0x00000191 } },
{ AR5K_RF_GAIN(24), { 0x00000000, 0x000001d1 } },
{ AR5K_RF_GAIN(25), { 0x00000000, 0x00000011 } },
{ AR5K_RF_GAIN(26), { 0x00000000, 0x00000051 } },
{ AR5K_RF_GAIN(27), { 0x00000000, 0x00000091 } },
{ AR5K_RF_GAIN(28), { 0x00000000, 0x00000178 } },
{ AR5K_RF_GAIN(29), { 0x00000000, 0x000001b8 } },
{ AR5K_RF_GAIN(30), { 0x00000000, 0x000001f8 } },
{ AR5K_RF_GAIN(31), { 0x00000000, 0x00000038 } },
{ AR5K_RF_GAIN(32), { 0x00000000, 0x00000078 } },
{ AR5K_RF_GAIN(33), { 0x00000000, 0x00000199 } },
{ AR5K_RF_GAIN(34), { 0x00000000, 0x000001d9 } },
{ AR5K_RF_GAIN(35), { 0x00000000, 0x00000019 } },
{ AR5K_RF_GAIN(36), { 0x00000000, 0x00000059 } },
{ AR5K_RF_GAIN(37), { 0x00000000, 0x00000099 } },
{ AR5K_RF_GAIN(38), { 0x00000000, 0x000000d9 } },
{ AR5K_RF_GAIN(39), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(40), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(41), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(42), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(43), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(44), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(45), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(46), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(47), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(48), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(49), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(50), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(51), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(52), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(53), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(54), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(55), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(56), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(57), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(58), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(59), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(60), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(61), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(62), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(63), { 0x00000000, 0x000000f9 } },
};
/* Initial RF Gain settings for AR2316 */
static const struct ath5k_ini_rfgain rfgain_2316[] = {
{ AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
{ AR5K_RF_GAIN(1), { 0x00000000, 0x00000040 } },
{ AR5K_RF_GAIN(2), { 0x00000000, 0x00000080 } },
{ AR5K_RF_GAIN(3), { 0x00000000, 0x000000c0 } },
{ AR5K_RF_GAIN(4), { 0x00000000, 0x000000e0 } },
{ AR5K_RF_GAIN(5), { 0x00000000, 0x000000e0 } },
{ AR5K_RF_GAIN(6), { 0x00000000, 0x00000128 } },
{ AR5K_RF_GAIN(7), { 0x00000000, 0x00000128 } },
{ AR5K_RF_GAIN(8), { 0x00000000, 0x00000128 } },
{ AR5K_RF_GAIN(9), { 0x00000000, 0x00000168 } },
{ AR5K_RF_GAIN(10), { 0x00000000, 0x000001a8 } },
{ AR5K_RF_GAIN(11), { 0x00000000, 0x000001e8 } },
{ AR5K_RF_GAIN(12), { 0x00000000, 0x00000028 } },
{ AR5K_RF_GAIN(13), { 0x00000000, 0x00000068 } },
{ AR5K_RF_GAIN(14), { 0x00000000, 0x000000a8 } },
{ AR5K_RF_GAIN(15), { 0x00000000, 0x000000e8 } },
{ AR5K_RF_GAIN(16), { 0x00000000, 0x000000e8 } },
{ AR5K_RF_GAIN(17), { 0x00000000, 0x00000130 } },
{ AR5K_RF_GAIN(18), { 0x00000000, 0x00000130 } },
{ AR5K_RF_GAIN(19), { 0x00000000, 0x00000170 } },
{ AR5K_RF_GAIN(20), { 0x00000000, 0x000001b0 } },
{ AR5K_RF_GAIN(21), { 0x00000000, 0x000001f0 } },
{ AR5K_RF_GAIN(22), { 0x00000000, 0x00000030 } },
{ AR5K_RF_GAIN(23), { 0x00000000, 0x00000070 } },
{ AR5K_RF_GAIN(24), { 0x00000000, 0x000000b0 } },
{ AR5K_RF_GAIN(25), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(26), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(27), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(28), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(29), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(30), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(31), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(32), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(33), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(34), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(35), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(36), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(37), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(38), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(39), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(40), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(41), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(42), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(43), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(44), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(45), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(46), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(47), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(48), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(49), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(50), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(51), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(52), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(53), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(54), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(55), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(56), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(57), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(58), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(59), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(60), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(61), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(62), { 0x00000000, 0x000000f0 } },
{ AR5K_RF_GAIN(63), { 0x00000000, 0x000000f0 } },
};
/* Initial RF Gain settings for RF5413 */
static const struct ath5k_ini_rfgain rfgain_5413[] = {
/* 5GHz 2GHz */
{ AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
{ AR5K_RF_GAIN(1), { 0x00000040, 0x00000040 } },
{ AR5K_RF_GAIN(2), { 0x00000080, 0x00000080 } },
{ AR5K_RF_GAIN(3), { 0x000001a1, 0x00000161 } },
{ AR5K_RF_GAIN(4), { 0x000001e1, 0x000001a1 } },
{ AR5K_RF_GAIN(5), { 0x00000021, 0x000001e1 } },
{ AR5K_RF_GAIN(6), { 0x00000061, 0x00000021 } },
{ AR5K_RF_GAIN(7), { 0x00000188, 0x00000061 } },
{ AR5K_RF_GAIN(8), { 0x000001c8, 0x00000188 } },
{ AR5K_RF_GAIN(9), { 0x00000008, 0x000001c8 } },
{ AR5K_RF_GAIN(10), { 0x00000048, 0x00000008 } },
{ AR5K_RF_GAIN(11), { 0x00000088, 0x00000048 } },
{ AR5K_RF_GAIN(12), { 0x000001a9, 0x00000088 } },
{ AR5K_RF_GAIN(13), { 0x000001e9, 0x00000169 } },
{ AR5K_RF_GAIN(14), { 0x00000029, 0x000001a9 } },
{ AR5K_RF_GAIN(15), { 0x00000069, 0x000001e9 } },
{ AR5K_RF_GAIN(16), { 0x000001d0, 0x00000029 } },
{ AR5K_RF_GAIN(17), { 0x00000010, 0x00000069 } },
{ AR5K_RF_GAIN(18), { 0x00000050, 0x00000190 } },
{ AR5K_RF_GAIN(19), { 0x00000090, 0x000001d0 } },
{ AR5K_RF_GAIN(20), { 0x000001b1, 0x00000010 } },
{ AR5K_RF_GAIN(21), { 0x000001f1, 0x00000050 } },
{ AR5K_RF_GAIN(22), { 0x00000031, 0x00000090 } },
{ AR5K_RF_GAIN(23), { 0x00000071, 0x00000171 } },
{ AR5K_RF_GAIN(24), { 0x000001b8, 0x000001b1 } },
{ AR5K_RF_GAIN(25), { 0x000001f8, 0x000001f1 } },
{ AR5K_RF_GAIN(26), { 0x00000038, 0x00000031 } },
{ AR5K_RF_GAIN(27), { 0x00000078, 0x00000071 } },
{ AR5K_RF_GAIN(28), { 0x00000199, 0x00000198 } },
{ AR5K_RF_GAIN(29), { 0x000001d9, 0x000001d8 } },
{ AR5K_RF_GAIN(30), { 0x00000019, 0x00000018 } },
{ AR5K_RF_GAIN(31), { 0x00000059, 0x00000058 } },
{ AR5K_RF_GAIN(32), { 0x00000099, 0x00000098 } },
{ AR5K_RF_GAIN(33), { 0x000000d9, 0x00000179 } },
{ AR5K_RF_GAIN(34), { 0x000000f9, 0x000001b9 } },
{ AR5K_RF_GAIN(35), { 0x000000f9, 0x000001f9 } },
{ AR5K_RF_GAIN(36), { 0x000000f9, 0x00000039 } },
{ AR5K_RF_GAIN(37), { 0x000000f9, 0x00000079 } },
{ AR5K_RF_GAIN(38), { 0x000000f9, 0x000000b9 } },
{ AR5K_RF_GAIN(39), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(40), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(41), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(42), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(43), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(44), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(45), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(46), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(47), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(48), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(49), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(50), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(51), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(52), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(53), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(54), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(55), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(56), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(57), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(58), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(59), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(60), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(61), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(62), { 0x000000f9, 0x000000f9 } },
{ AR5K_RF_GAIN(63), { 0x000000f9, 0x000000f9 } },
};
/* Initial RF Gain settings for RF2425 */
static const struct ath5k_ini_rfgain rfgain_2425[] = {
{ AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
{ AR5K_RF_GAIN(1), { 0x00000000, 0x00000040 } },
{ AR5K_RF_GAIN(2), { 0x00000000, 0x00000080 } },
{ AR5K_RF_GAIN(3), { 0x00000000, 0x00000181 } },
{ AR5K_RF_GAIN(4), { 0x00000000, 0x000001c1 } },
{ AR5K_RF_GAIN(5), { 0x00000000, 0x00000001 } },
{ AR5K_RF_GAIN(6), { 0x00000000, 0x00000041 } },
{ AR5K_RF_GAIN(7), { 0x00000000, 0x00000081 } },
{ AR5K_RF_GAIN(8), { 0x00000000, 0x00000188 } },
{ AR5K_RF_GAIN(9), { 0x00000000, 0x000001c8 } },
{ AR5K_RF_GAIN(10), { 0x00000000, 0x00000008 } },
{ AR5K_RF_GAIN(11), { 0x00000000, 0x00000048 } },
{ AR5K_RF_GAIN(12), { 0x00000000, 0x00000088 } },
{ AR5K_RF_GAIN(13), { 0x00000000, 0x00000189 } },
{ AR5K_RF_GAIN(14), { 0x00000000, 0x000001c9 } },
{ AR5K_RF_GAIN(15), { 0x00000000, 0x00000009 } },
{ AR5K_RF_GAIN(16), { 0x00000000, 0x00000049 } },
{ AR5K_RF_GAIN(17), { 0x00000000, 0x00000089 } },
{ AR5K_RF_GAIN(18), { 0x00000000, 0x000001b0 } },
{ AR5K_RF_GAIN(19), { 0x00000000, 0x000001f0 } },
{ AR5K_RF_GAIN(20), { 0x00000000, 0x00000030 } },
{ AR5K_RF_GAIN(21), { 0x00000000, 0x00000070 } },
{ AR5K_RF_GAIN(22), { 0x00000000, 0x00000171 } },
{ AR5K_RF_GAIN(23), { 0x00000000, 0x000001b1 } },
{ AR5K_RF_GAIN(24), { 0x00000000, 0x000001f1 } },
{ AR5K_RF_GAIN(25), { 0x00000000, 0x00000031 } },
{ AR5K_RF_GAIN(26), { 0x00000000, 0x00000071 } },
{ AR5K_RF_GAIN(27), { 0x00000000, 0x000001b8 } },
{ AR5K_RF_GAIN(28), { 0x00000000, 0x000001f8 } },
{ AR5K_RF_GAIN(29), { 0x00000000, 0x00000038 } },
{ AR5K_RF_GAIN(30), { 0x00000000, 0x00000078 } },
{ AR5K_RF_GAIN(31), { 0x00000000, 0x000000b8 } },
{ AR5K_RF_GAIN(32), { 0x00000000, 0x000001b9 } },
{ AR5K_RF_GAIN(33), { 0x00000000, 0x000001f9 } },
{ AR5K_RF_GAIN(34), { 0x00000000, 0x00000039 } },
{ AR5K_RF_GAIN(35), { 0x00000000, 0x00000079 } },
{ AR5K_RF_GAIN(36), { 0x00000000, 0x000000b9 } },
{ AR5K_RF_GAIN(37), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(38), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(39), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(40), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(41), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(42), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(43), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(44), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(45), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(46), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(47), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(48), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(49), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(50), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(51), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(52), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(53), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(54), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(55), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(56), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(57), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(58), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(59), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(60), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(61), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(62), { 0x00000000, 0x000000f9 } },
{ AR5K_RF_GAIN(63), { 0x00000000, 0x000000f9 } },
};
#define AR5K_GAIN_CRN_FIX_BITS_5111 4
#define AR5K_GAIN_CRN_FIX_BITS_5112 7
#define AR5K_GAIN_CRN_MAX_FIX_BITS AR5K_GAIN_CRN_FIX_BITS_5112
#define AR5K_GAIN_DYN_ADJUST_HI_MARGIN 15
#define AR5K_GAIN_DYN_ADJUST_LO_MARGIN 20
#define AR5K_GAIN_CCK_PROBE_CORR 5
#define AR5K_GAIN_CCK_OFDM_GAIN_DELTA 15
#define AR5K_GAIN_STEP_COUNT 10
/* Check if our current measurement is inside our
* current variable attenuation window */
#define AR5K_GAIN_CHECK_ADJUST(_g) \
((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
/**
* struct ath5k_gain_opt_step - An RF gain optimization step
* @gos_param: Set of parameters
* @gos_gain: Gain
*/
struct ath5k_gain_opt_step {
s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
s8 gos_gain;
};
/**
* struct ath5k_gain_opt - RF Gain optimization ladder
* @go_default: The default step
* @go_steps_count: How many optimization steps
* @go_step: Array of &struct ath5k_gain_opt_step
*/
struct ath5k_gain_opt {
u8 go_default;
u8 go_steps_count;
const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT];
};
/*
* RF5111
* Parameters on gos_param:
* 1) Tx clip PHY register
* 2) PWD 90 RF register
* 3) PWD 84 RF register
* 4) RFGainSel RF register
*/
static const struct ath5k_gain_opt rfgain_opt_5111 = {
4,
9,
{
{ { 4, 1, 1, 1 }, 6 },
{ { 4, 0, 1, 1 }, 4 },
{ { 3, 1, 1, 1 }, 3 },
{ { 4, 0, 0, 1 }, 1 },
{ { 4, 1, 1, 0 }, 0 },
{ { 4, 0, 1, 0 }, -2 },
{ { 3, 1, 1, 0 }, -3 },
{ { 4, 0, 0, 0 }, -4 },
{ { 2, 1, 1, 0 }, -6 }
}
};
/*
* RF5112
* Parameters on gos_param:
* 1) Mixgain ovr RF register
* 2) PWD 138 RF register
* 3) PWD 137 RF register
* 4) PWD 136 RF register
* 5) PWD 132 RF register
* 6) PWD 131 RF register
* 7) PWD 130 RF register
*/
static const struct ath5k_gain_opt rfgain_opt_5112 = {
1,
8,
{
{ { 3, 0, 0, 0, 0, 0, 0 }, 6 },
{ { 2, 0, 0, 0, 0, 0, 0 }, 0 },
{ { 1, 0, 0, 0, 0, 0, 0 }, -3 },
{ { 0, 0, 0, 0, 0, 0, 0 }, -6 },
{ { 0, 1, 1, 0, 0, 0, 0 }, -8 },
{ { 0, 1, 1, 0, 1, 1, 0 }, -10 },
{ { 0, 1, 0, 1, 1, 1, 0 }, -13 },
{ { 0, 1, 0, 1, 1, 0, 1 }, -16 },
}
};

View file

@ -0,0 +1,116 @@
/*
* RFKILL support for ath5k
*
* Copyright (c) 2009 Tobias Doerffel <tobias.doerffel@gmail.com>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include "ath5k.h"
static inline void ath5k_rfkill_disable(struct ath5k_hw *ah)
{
ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n",
ah->rf_kill.gpio, ah->rf_kill.polarity);
ath5k_hw_set_gpio_output(ah, ah->rf_kill.gpio);
ath5k_hw_set_gpio(ah, ah->rf_kill.gpio, !ah->rf_kill.polarity);
}
static inline void ath5k_rfkill_enable(struct ath5k_hw *ah)
{
ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n",
ah->rf_kill.gpio, ah->rf_kill.polarity);
ath5k_hw_set_gpio_output(ah, ah->rf_kill.gpio);
ath5k_hw_set_gpio(ah, ah->rf_kill.gpio, ah->rf_kill.polarity);
}
static inline void ath5k_rfkill_set_intr(struct ath5k_hw *ah, bool enable)
{
u32 curval;
ath5k_hw_set_gpio_input(ah, ah->rf_kill.gpio);
curval = ath5k_hw_get_gpio(ah, ah->rf_kill.gpio);
ath5k_hw_set_gpio_intr(ah, ah->rf_kill.gpio, enable ?
!!curval : !curval);
}
static bool
ath5k_is_rfkill_set(struct ath5k_hw *ah)
{
/* configuring GPIO for input for some reason disables rfkill */
/*ath5k_hw_set_gpio_input(ah, ah->rf_kill.gpio);*/
return ath5k_hw_get_gpio(ah, ah->rf_kill.gpio) ==
ah->rf_kill.polarity;
}
static void
ath5k_tasklet_rfkill_toggle(unsigned long data)
{
struct ath5k_hw *ah = (void *)data;
bool blocked;
blocked = ath5k_is_rfkill_set(ah);
wiphy_rfkill_set_hw_state(ah->hw->wiphy, blocked);
}
void
ath5k_rfkill_hw_start(struct ath5k_hw *ah)
{
/* read rfkill GPIO configuration from EEPROM header */
ah->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
ah->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
tasklet_init(&ah->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle,
(unsigned long)ah);
ath5k_rfkill_disable(ah);
/* enable interrupt for rfkill switch */
if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
ath5k_rfkill_set_intr(ah, true);
}
void
ath5k_rfkill_hw_stop(struct ath5k_hw *ah)
{
/* disable interrupt for rfkill switch */
if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
ath5k_rfkill_set_intr(ah, false);
tasklet_kill(&ah->rf_kill.toggleq);
/* enable RFKILL when stopping HW so Wifi LED is turned off */
ath5k_rfkill_enable(ah);
}

View file

@ -0,0 +1,122 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/pci.h>
#include "ath5k.h"
#include "reg.h"
#define SIMPLE_SHOW_STORE(name, get, set) \
static ssize_t ath5k_attr_show_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
struct ath5k_hw *ah = hw->priv; \
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
} \
\
static ssize_t ath5k_attr_store_##name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
struct ath5k_hw *ah = hw->priv; \
int val, ret; \
\
ret = kstrtoint(buf, 10, &val); \
if (ret < 0) \
return ret; \
set(ah, val); \
return count; \
} \
static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \
ath5k_attr_show_##name, ath5k_attr_store_##name)
#define SIMPLE_SHOW(name, get) \
static ssize_t ath5k_attr_show_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
struct ath5k_hw *ah = hw->priv; \
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
} \
static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
/*** ANI ***/
SIMPLE_SHOW_STORE(ani_mode, ah->ani_state.ani_mode, ath5k_ani_init);
SIMPLE_SHOW_STORE(noise_immunity_level, ah->ani_state.noise_imm_level,
ath5k_ani_set_noise_immunity_level);
SIMPLE_SHOW_STORE(spur_level, ah->ani_state.spur_level,
ath5k_ani_set_spur_immunity_level);
SIMPLE_SHOW_STORE(firstep_level, ah->ani_state.firstep_level,
ath5k_ani_set_firstep_level);
SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, ah->ani_state.ofdm_weak_sig,
ath5k_ani_set_ofdm_weak_signal_detection);
SIMPLE_SHOW_STORE(cck_weak_signal_detection, ah->ani_state.cck_weak_sig,
ath5k_ani_set_cck_weak_signal_detection);
SIMPLE_SHOW(spur_level_max, ah->ani_state.max_spur_level);
static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL);
}
static DEVICE_ATTR(noise_immunity_level_max, S_IRUGO,
ath5k_attr_show_noise_immunity_level_max, NULL);
static ssize_t ath5k_attr_show_firstep_level_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL);
}
static DEVICE_ATTR(firstep_level_max, S_IRUGO,
ath5k_attr_show_firstep_level_max, NULL);
static struct attribute *ath5k_sysfs_entries_ani[] = {
&dev_attr_ani_mode.attr,
&dev_attr_noise_immunity_level.attr,
&dev_attr_spur_level.attr,
&dev_attr_firstep_level.attr,
&dev_attr_ofdm_weak_signal_detection.attr,
&dev_attr_cck_weak_signal_detection.attr,
&dev_attr_noise_immunity_level_max.attr,
&dev_attr_spur_level_max.attr,
&dev_attr_firstep_level_max.attr,
NULL
};
static struct attribute_group ath5k_attribute_group_ani = {
.name = "ani",
.attrs = ath5k_sysfs_entries_ani,
};
/*** register / unregister ***/
int
ath5k_sysfs_register(struct ath5k_hw *ah)
{
struct device *dev = ah->dev;
int err;
err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani);
if (err) {
ATH5K_ERR(ah, "failed to create sysfs group\n");
return err;
}
return 0;
}
void
ath5k_sysfs_unregister(struct ath5k_hw *ah)
{
struct device *dev = ah->dev;
sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani);
}

View file

@ -0,0 +1,106 @@
#if !defined(__TRACE_ATH5K_H) || defined(TRACE_HEADER_MULTI_READ)
#define __TRACE_ATH5K_H
#include <linux/tracepoint.h>
#if !defined(CONFIG_ATH5K_TRACER) || defined(__CHECKER__)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
#endif
struct sk_buff;
struct ath5k_txq;
struct ath5k_tx_status;
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath5k
TRACE_EVENT(ath5k_rx,
TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb),
TP_ARGS(priv, skb),
TP_STRUCT__entry(
__field(struct ath5k_hw *, priv)
__field(unsigned long, skbaddr)
__dynamic_array(u8, frame, skb->len)
),
TP_fast_assign(
__entry->priv = priv;
__entry->skbaddr = (unsigned long) skb;
memcpy(__get_dynamic_array(frame), skb->data, skb->len);
),
TP_printk(
"[%p] RX skb=%lx", __entry->priv, __entry->skbaddr
)
);
TRACE_EVENT(ath5k_tx,
TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
struct ath5k_txq *q),
TP_ARGS(priv, skb, q),
TP_STRUCT__entry(
__field(struct ath5k_hw *, priv)
__field(unsigned long, skbaddr)
__field(u8, qnum)
__dynamic_array(u8, frame, skb->len)
),
TP_fast_assign(
__entry->priv = priv;
__entry->skbaddr = (unsigned long) skb;
__entry->qnum = (u8) q->qnum;
memcpy(__get_dynamic_array(frame), skb->data, skb->len);
),
TP_printk(
"[%p] TX skb=%lx q=%d", __entry->priv, __entry->skbaddr,
__entry->qnum
)
);
TRACE_EVENT(ath5k_tx_complete,
TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
struct ath5k_txq *q, struct ath5k_tx_status *ts),
TP_ARGS(priv, skb, q, ts),
TP_STRUCT__entry(
__field(struct ath5k_hw *, priv)
__field(unsigned long, skbaddr)
__field(u8, qnum)
__field(u8, ts_status)
__field(s8, ts_rssi)
__field(u8, ts_antenna)
),
TP_fast_assign(
__entry->priv = priv;
__entry->skbaddr = (unsigned long) skb;
__entry->qnum = (u8) q->qnum;
__entry->ts_status = ts->ts_status;
__entry->ts_rssi = ts->ts_rssi;
__entry->ts_antenna = ts->ts_antenna;
),
TP_printk(
"[%p] TX end skb=%lx q=%d stat=%x rssi=%d ant=%x",
__entry->priv, __entry->skbaddr, __entry->qnum,
__entry->ts_status, __entry->ts_rssi, __entry->ts_antenna
)
);
#endif /* __TRACE_ATH5K_H */
#if defined(CONFIG_ATH5K_TRACER) && !defined(__CHECKER__)
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
#endif

View file

@ -0,0 +1,65 @@
config ATH6KL
tristate "Atheros mobile chipsets support"
depends on CFG80211
---help---
This module adds core support for wireless adapters based on
Atheros AR6003 and AR6004 chipsets. You still need separate
bus drivers for USB and SDIO to be able to use real devices.
If you choose to build it as a module, it will be called
ath6kl_core. Please note that AR6002 and AR6001 are not
supported by this driver.
config ATH6KL_SDIO
tristate "Atheros ath6kl SDIO support"
depends on ATH6KL
depends on MMC
---help---
This module adds support for wireless adapters based on
Atheros AR6003 and AR6004 chipsets running over SDIO. If you
choose to build it as a module, it will be called ath6kl_sdio.
Please note that AR6002 and AR6001 are not supported by this
driver.
config ATH6KL_USB
tristate "Atheros ath6kl USB support"
depends on ATH6KL
depends on USB
---help---
This module adds support for wireless adapters based on
Atheros AR6004 chipset and chipsets based on it running over
USB. If you choose to build it as a module, it will be
called ath6kl_usb.
config ATH6KL_DEBUG
bool "Atheros ath6kl debugging"
depends on ATH6KL
---help---
Enables ath6kl debug support, including debug messages
enabled with debug_mask module parameter and debugfs
interface.
If unsure, say Y to make it easier to debug problems.
config ATH6KL_TRACING
bool "Atheros ath6kl tracing support"
depends on ATH6KL
depends on EVENT_TRACING
---help---
Select this to ath6kl use tracing infrastructure which, for
example, can be enabled with help of trace-cmd. All debug
messages and commands are delivered to using individually
enablable trace points.
If unsure, say Y to make it easier to debug problems.
config ATH6KL_REGDOMAIN
bool "Atheros ath6kl regdomain support"
depends on ATH6KL
depends on CFG80211_CERTIFICATION_ONUS
---help---
Enabling this makes it possible to change the regdomain in
the firmware. This can be only enabled if regulatory requirements
are taken into account.
If unsure, say N.

View file

@ -0,0 +1,49 @@
#------------------------------------------------------------------------------
# Copyright (c) 2004-2011 Atheros Communications Inc.
# Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
# All rights reserved.
#
#
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
#
#
# Author(s): ="Atheros"
#------------------------------------------------------------------------------
obj-$(CONFIG_ATH6KL) += ath6kl_core.o
ath6kl_core-y += debug.o
ath6kl_core-y += hif.o
ath6kl_core-y += htc_mbox.o
ath6kl_core-y += htc_pipe.o
ath6kl_core-y += bmi.o
ath6kl_core-y += cfg80211.o
ath6kl_core-y += init.o
ath6kl_core-y += main.o
ath6kl_core-y += txrx.o
ath6kl_core-y += wmi.o
ath6kl_core-y += core.o
ath6kl_core-y += recovery.o
ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o
obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o
ath6kl_sdio-y += sdio.o
obj-$(CONFIG_ATH6KL_USB) += ath6kl_usb.o
ath6kl_usb-y += usb.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)

View file

@ -0,0 +1,548 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "hif-ops.h"
#include "target.h"
#include "debug.h"
int ath6kl_bmi_done(struct ath6kl *ar)
{
int ret;
u32 cid = BMI_DONE;
if (ar->bmi.done_sent) {
ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
return 0;
}
ar->bmi.done_sent = true;
ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
if (ret) {
ath6kl_err("Unable to send bmi done: %d\n", ret);
return ret;
}
return 0;
}
int ath6kl_bmi_get_target_info(struct ath6kl *ar,
struct ath6kl_bmi_target_info *targ_info)
{
int ret;
u32 cid = BMI_GET_TARGET_INFO;
if (ar->bmi.done_sent) {
ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
return -EACCES;
}
ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
if (ret) {
ath6kl_err("Unable to send get target info: %d\n", ret);
return ret;
}
if (ar->hif_type == ATH6KL_HIF_TYPE_USB) {
ret = ath6kl_hif_bmi_read(ar, (u8 *)targ_info,
sizeof(*targ_info));
} else {
ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version,
sizeof(targ_info->version));
}
if (ret) {
ath6kl_err("Unable to recv target info: %d\n", ret);
return ret;
}
if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
/* Determine how many bytes are in the Target's targ_info */
ret = ath6kl_hif_bmi_read(ar,
(u8 *)&targ_info->byte_count,
sizeof(targ_info->byte_count));
if (ret) {
ath6kl_err("unable to read target info byte count: %d\n",
ret);
return ret;
}
/*
* The target's targ_info doesn't match the host's targ_info.
* We need to do some backwards compatibility to make this work.
*/
if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
WARN_ON(1);
return -EINVAL;
}
/* Read the remainder of the targ_info */
ret = ath6kl_hif_bmi_read(ar,
((u8 *)targ_info) +
sizeof(targ_info->byte_count),
sizeof(*targ_info) -
sizeof(targ_info->byte_count));
if (ret) {
ath6kl_err("Unable to read target info (%d bytes): %d\n",
targ_info->byte_count, ret);
return ret;
}
}
ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
targ_info->version, targ_info->type);
return 0;
}
int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
{
u32 cid = BMI_READ_MEMORY;
int ret;
u32 offset;
u32 len_remain, rx_len;
u16 size;
if (ar->bmi.done_sent) {
ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
return -EACCES;
}
size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len);
if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
memset(ar->bmi.cmd_buf, 0, size);
ath6kl_dbg(ATH6KL_DBG_BMI,
"bmi read memory: device: addr: 0x%x, len: %d\n",
addr, len);
len_remain = len;
while (len_remain) {
rx_len = (len_remain < ar->bmi.max_data_size) ?
len_remain : ar->bmi.max_data_size;
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
offset += sizeof(len);
ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n",
ret);
return ret;
}
ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len);
if (ret) {
ath6kl_err("Unable to read from the device: %d\n",
ret);
return ret;
}
memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
len_remain -= rx_len; addr += rx_len;
}
return 0;
}
int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
{
u32 cid = BMI_WRITE_MEMORY;
int ret;
u32 offset;
u32 len_remain, tx_len;
const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
u8 aligned_buf[400];
u8 *src;
if (ar->bmi.done_sent) {
ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
return -EACCES;
}
if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf)))
return -E2BIG;
memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header);
ath6kl_dbg(ATH6KL_DBG_BMI,
"bmi write memory: addr: 0x%x, len: %d\n", addr, len);
len_remain = len;
while (len_remain) {
src = &buf[len - len_remain];
if (len_remain < (ar->bmi.max_data_size - header)) {
if (len_remain & 3) {
/* align it with 4 bytes */
len_remain = len_remain +
(4 - (len_remain & 3));
memcpy(aligned_buf, src, len_remain);
src = aligned_buf;
}
tx_len = len_remain;
} else {
tx_len = (ar->bmi.max_data_size - header);
}
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
offset += sizeof(tx_len);
memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
offset += tx_len;
ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n",
ret);
return ret;
}
len_remain -= tx_len; addr += tx_len;
}
return 0;
}
int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
{
u32 cid = BMI_EXECUTE;
int ret;
u32 offset;
u16 size;
if (ar->bmi.done_sent) {
ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
return -EACCES;
}
size = sizeof(cid) + sizeof(addr) + sizeof(param);
if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
memset(ar->bmi.cmd_buf, 0, size);
ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
addr, *param);
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
offset += sizeof(*param);
ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
}
ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
if (ret) {
ath6kl_err("Unable to read from the device: %d\n", ret);
return ret;
}
memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
return 0;
}
int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
{
u32 cid = BMI_SET_APP_START;
int ret;
u32 offset;
u16 size;
if (ar->bmi.done_sent) {
ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
return -EACCES;
}
size = sizeof(cid) + sizeof(addr);
if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
memset(ar->bmi.cmd_buf, 0, size);
ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
}
return 0;
}
int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
{
u32 cid = BMI_READ_SOC_REGISTER;
int ret;
u32 offset;
u16 size;
if (ar->bmi.done_sent) {
ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
return -EACCES;
}
size = sizeof(cid) + sizeof(addr);
if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
memset(ar->bmi.cmd_buf, 0, size);
ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
}
ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
if (ret) {
ath6kl_err("Unable to read from the device: %d\n", ret);
return ret;
}
memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
return 0;
}
int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
{
u32 cid = BMI_WRITE_SOC_REGISTER;
int ret;
u32 offset;
u16 size;
if (ar->bmi.done_sent) {
ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
return -EACCES;
}
size = sizeof(cid) + sizeof(addr) + sizeof(param);
if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
memset(ar->bmi.cmd_buf, 0, size);
ath6kl_dbg(ATH6KL_DBG_BMI,
"bmi write SOC reg: addr: 0x%x, param: %d\n",
addr, param);
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
offset += sizeof(param);
ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
}
return 0;
}
int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
{
u32 cid = BMI_LZ_DATA;
int ret;
u32 offset;
u32 len_remain, tx_len;
const u32 header = sizeof(cid) + sizeof(len);
u16 size;
if (ar->bmi.done_sent) {
ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
return -EACCES;
}
size = ar->bmi.max_data_size + header;
if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
memset(ar->bmi.cmd_buf, 0, size);
ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
len);
len_remain = len;
while (len_remain) {
tx_len = (len_remain < (ar->bmi.max_data_size - header)) ?
len_remain : (ar->bmi.max_data_size - header);
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
offset += sizeof(tx_len);
memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
tx_len);
offset += tx_len;
ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n",
ret);
return ret;
}
len_remain -= tx_len;
}
return 0;
}
int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
{
u32 cid = BMI_LZ_STREAM_START;
int ret;
u32 offset;
u16 size;
if (ar->bmi.done_sent) {
ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
return -EACCES;
}
size = sizeof(cid) + sizeof(addr);
if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
memset(ar->bmi.cmd_buf, 0, size);
ath6kl_dbg(ATH6KL_DBG_BMI,
"bmi LZ stream start: addr: 0x%x)\n",
addr);
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to start LZ stream to the device: %d\n",
ret);
return ret;
}
return 0;
}
int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
{
int ret;
u32 last_word = 0;
u32 last_word_offset = len & ~0x3;
u32 unaligned_bytes = len & 0x3;
ret = ath6kl_bmi_lz_stream_start(ar, addr);
if (ret)
return ret;
if (unaligned_bytes) {
/* copy the last word into a zero padded buffer */
memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
}
ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
if (ret)
return ret;
if (unaligned_bytes)
ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
if (!ret) {
/* Close compressed stream and open a new (fake) one.
* This serves mainly to flush Target caches. */
ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
}
return ret;
}
void ath6kl_bmi_reset(struct ath6kl *ar)
{
ar->bmi.done_sent = false;
}
int ath6kl_bmi_init(struct ath6kl *ar)
{
if (WARN_ON(ar->bmi.max_data_size == 0))
return -EINVAL;
/* cmd + addr + len + data_size */
ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3);
ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC);
if (!ar->bmi.cmd_buf)
return -ENOMEM;
return 0;
}
void ath6kl_bmi_cleanup(struct ath6kl *ar)
{
kfree(ar->bmi.cmd_buf);
ar->bmi.cmd_buf = NULL;
}

View file

@ -0,0 +1,271 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef BMI_H
#define BMI_H
/*
* Bootloader Messaging Interface (BMI)
*
* BMI is a very simple messaging interface used during initialization
* to read memory, write memory, execute code, and to define an
* application entry PC.
*
* It is used to download an application to ATH6KL, to provide
* patches to code that is already resident on ATH6KL, and generally
* to examine and modify state. The Host has an opportunity to use
* BMI only once during bootup. Once the Host issues a BMI_DONE
* command, this opportunity ends.
*
* The Host writes BMI requests to mailbox0, and reads BMI responses
* from mailbox0. BMI requests all begin with a command
* (see below for specific commands), and are followed by
* command-specific data.
*
* Flow control:
* The Host can only issue a command once the Target gives it a
* "BMI Command Credit", using ATH6KL Counter #4. As soon as the
* Target has completed a command, it issues another BMI Command
* Credit (so the Host can issue the next command).
*
* BMI handles all required Target-side cache flushing.
*/
/* BMI Commands */
#define BMI_NO_COMMAND 0
#define BMI_DONE 1
/*
* Semantics: Host is done using BMI
* Request format:
* u32 command (BMI_DONE)
* Response format: none
*/
#define BMI_READ_MEMORY 2
/*
* Semantics: Host reads ATH6KL memory
* Request format:
* u32 command (BMI_READ_MEMORY)
* u32 address
* u32 length, at most BMI_DATASZ_MAX
* Response format:
* u8 data[length]
*/
#define BMI_WRITE_MEMORY 3
/*
* Semantics: Host writes ATH6KL memory
* Request format:
* u32 command (BMI_WRITE_MEMORY)
* u32 address
* u32 length, at most BMI_DATASZ_MAX
* u8 data[length]
* Response format: none
*/
#define BMI_EXECUTE 4
/*
* Semantics: Causes ATH6KL to execute code
* Request format:
* u32 command (BMI_EXECUTE)
* u32 address
* u32 parameter
* Response format:
* u32 return value
*/
#define BMI_SET_APP_START 5
/*
* Semantics: Set Target application starting address
* Request format:
* u32 command (BMI_SET_APP_START)
* u32 address
* Response format: none
*/
#define BMI_READ_SOC_REGISTER 6
/*
* Semantics: Read a 32-bit Target SOC register.
* Request format:
* u32 command (BMI_READ_REGISTER)
* u32 address
* Response format:
* u32 value
*/
#define BMI_WRITE_SOC_REGISTER 7
/*
* Semantics: Write a 32-bit Target SOC register.
* Request format:
* u32 command (BMI_WRITE_REGISTER)
* u32 address
* u32 value
*
* Response format: none
*/
#define BMI_GET_TARGET_ID 8
#define BMI_GET_TARGET_INFO 8
/*
* Semantics: Fetch the 4-byte Target information
* Request format:
* u32 command (BMI_GET_TARGET_ID/INFO)
* Response format1 (old firmware):
* u32 TargetVersionID
* Response format2 (newer firmware):
* u32 TARGET_VERSION_SENTINAL
* struct bmi_target_info;
*/
#define TARGET_VERSION_SENTINAL 0xffffffff
#define TARGET_TYPE_AR6003 3
#define TARGET_TYPE_AR6004 5
#define BMI_ROMPATCH_INSTALL 9
/*
* Semantics: Install a ROM Patch.
* Request format:
* u32 command (BMI_ROMPATCH_INSTALL)
* u32 Target ROM Address
* u32 Target RAM Address or Value (depending on Target Type)
* u32 Size, in bytes
* u32 Activate? 1-->activate;
* 0-->install but do not activate
* Response format:
* u32 PatchID
*/
#define BMI_ROMPATCH_UNINSTALL 10
/*
* Semantics: Uninstall a previously-installed ROM Patch,
* automatically deactivating, if necessary.
* Request format:
* u32 command (BMI_ROMPATCH_UNINSTALL)
* u32 PatchID
*
* Response format: none
*/
#define BMI_ROMPATCH_ACTIVATE 11
/*
* Semantics: Activate a list of previously-installed ROM Patches.
* Request format:
* u32 command (BMI_ROMPATCH_ACTIVATE)
* u32 rompatch_count
* u32 PatchID[rompatch_count]
*
* Response format: none
*/
#define BMI_ROMPATCH_DEACTIVATE 12
/*
* Semantics: Deactivate a list of active ROM Patches.
* Request format:
* u32 command (BMI_ROMPATCH_DEACTIVATE)
* u32 rompatch_count
* u32 PatchID[rompatch_count]
*
* Response format: none
*/
#define BMI_LZ_STREAM_START 13
/*
* Semantics: Begin an LZ-compressed stream of input
* which is to be uncompressed by the Target to an
* output buffer at address. The output buffer must
* be sufficiently large to hold the uncompressed
* output from the compressed input stream. This BMI
* command should be followed by a series of 1 or more
* BMI_LZ_DATA commands.
* u32 command (BMI_LZ_STREAM_START)
* u32 address
* Note: Not supported on all versions of ROM firmware.
*/
#define BMI_LZ_DATA 14
/*
* Semantics: Host writes ATH6KL memory with LZ-compressed
* data which is uncompressed by the Target. This command
* must be preceded by a BMI_LZ_STREAM_START command. A series
* of BMI_LZ_DATA commands are considered part of a single
* input stream until another BMI_LZ_STREAM_START is issued.
* Request format:
* u32 command (BMI_LZ_DATA)
* u32 length (of compressed data),
* at most BMI_DATASZ_MAX
* u8 CompressedData[length]
* Response format: none
* Note: Not supported on all versions of ROM firmware.
*/
#define BMI_COMMUNICATION_TIMEOUT 1000 /* in msec */
struct ath6kl;
struct ath6kl_bmi_target_info {
__le32 byte_count; /* size of this structure */
__le32 version; /* target version id */
__le32 type; /* target type */
} __packed;
#define ath6kl_bmi_write_hi32(ar, item, val) \
({ \
u32 addr; \
__le32 v; \
\
addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \
v = cpu_to_le32(val); \
ath6kl_bmi_write(ar, addr, (u8 *) &v, sizeof(v)); \
})
#define ath6kl_bmi_read_hi32(ar, item, val) \
({ \
u32 addr, *check_type = val; \
__le32 tmp; \
int ret; \
\
(void) (check_type == val); \
addr = ath6kl_get_hi_item_addr(ar, HI_ITEM(item)); \
ret = ath6kl_bmi_read(ar, addr, (u8 *) &tmp, 4); \
if (!ret) \
*val = le32_to_cpu(tmp); \
ret; \
})
int ath6kl_bmi_init(struct ath6kl *ar);
void ath6kl_bmi_cleanup(struct ath6kl *ar);
void ath6kl_bmi_reset(struct ath6kl *ar);
int ath6kl_bmi_done(struct ath6kl *ar);
int ath6kl_bmi_get_target_info(struct ath6kl *ar,
struct ath6kl_bmi_target_info *targ_info);
int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
int ath6kl_bmi_execute(struct ath6kl *ar,
u32 addr, u32 *param);
int ath6kl_bmi_set_app_start(struct ath6kl *ar,
u32 addr);
int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param);
int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param);
int ath6kl_bmi_lz_data(struct ath6kl *ar,
u8 *buf, u32 len);
int ath6kl_bmi_lz_stream_start(struct ath6kl *ar,
u32 addr);
int ath6kl_bmi_fast_download(struct ath6kl *ar,
u32 addr, u8 *buf, u32 len);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,65 @@
/*
* Copyright (c) 2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef ATH6KL_CFG80211_H
#define ATH6KL_CFG80211_H
enum ath6kl_cfg_suspend_mode {
ATH6KL_CFG_SUSPEND_DEEPSLEEP,
ATH6KL_CFG_SUSPEND_CUTPOWER,
ATH6KL_CFG_SUSPEND_WOW,
};
struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
enum nl80211_iftype type,
u8 fw_vif_idx, u8 nw_type);
void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
enum wmi_phy_mode mode);
void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_intvl,
u16 beacon_intvl,
enum network_type nw_type,
u8 beacon_ie_len, u8 assoc_req_len,
u8 assoc_resp_len, u8 *assoc_info);
void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 proto_reason);
void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
bool ismcast);
int ath6kl_cfg80211_suspend(struct ath6kl *ar,
enum ath6kl_cfg_suspend_mode mode,
struct cfg80211_wowlan *wow);
int ath6kl_cfg80211_resume(struct ath6kl *ar);
void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif);
void ath6kl_cfg80211_stop(struct ath6kl_vif *vif);
void ath6kl_cfg80211_stop_all(struct ath6kl *ar);
int ath6kl_cfg80211_init(struct ath6kl *ar);
void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
struct ath6kl *ath6kl_cfg80211_create(void);
void ath6kl_cfg80211_destroy(struct ath6kl *ar);
#endif /* ATH6KL_CFG80211_H */

View file

@ -0,0 +1,85 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef COMMON_H
#define COMMON_H
#include <linux/netdevice.h>
#define ATH6KL_MAX_IE 256
__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
/*
* Reflects the version of binary interface exposed by ATH6KL target
* firmware. Needs to be incremented by 1 for any change in the firmware
* that requires upgrade of the driver on the host side for the change to
* work correctly
*/
#define ATH6KL_ABI_VERSION 1
#define SIGNAL_QUALITY_METRICS_NUM_MAX 2
enum {
SIGNAL_QUALITY_METRICS_SNR = 0,
SIGNAL_QUALITY_METRICS_RSSI,
SIGNAL_QUALITY_METRICS_ALL,
};
/*
* Data Path
*/
#define WMI_MAX_TX_DATA_FRAME_LENGTH \
(1500 + sizeof(struct wmi_data_hdr) + \
sizeof(struct ethhdr) + \
sizeof(struct ath6kl_llc_snap_hdr))
/* An AMSDU frame */ /* The MAX AMSDU length of AR6003 is 3839 */
#define WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH \
(3840 + sizeof(struct wmi_data_hdr) + \
sizeof(struct ethhdr) + \
sizeof(struct ath6kl_llc_snap_hdr))
#define EPPING_ALIGNMENT_PAD \
(((sizeof(struct htc_frame_hdr) + 3) & (~0x3)) \
- sizeof(struct htc_frame_hdr))
struct ath6kl_llc_snap_hdr {
u8 dsap;
u8 ssap;
u8 cntl;
u8 org_code[3];
__be16 eth_type;
} __packed;
enum crypto_type {
NONE_CRYPT = 0x01,
WEP_CRYPT = 0x02,
TKIP_CRYPT = 0x04,
AES_CRYPT = 0x08,
WAPI_CRYPT = 0x10,
};
struct htc_endpoint_credit_dist;
struct ath6kl;
struct ath6kl_htcap;
enum htc_credit_dist_reason;
struct ath6kl_htc_credit_info;
struct sk_buff *ath6kl_buf_alloc(int size);
#endif /* COMMON_H */

View file

@ -0,0 +1,362 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include "debug.h"
#include "hif-ops.h"
#include "htc-ops.h"
#include "cfg80211.h"
unsigned int debug_mask;
static unsigned int suspend_mode;
static unsigned int wow_mode;
static unsigned int uart_debug;
static unsigned int ath6kl_p2p;
static unsigned int testmode;
static unsigned int recovery_enable;
static unsigned int heart_beat_poll;
module_param(debug_mask, uint, 0644);
module_param(suspend_mode, uint, 0644);
module_param(wow_mode, uint, 0644);
module_param(uart_debug, uint, 0644);
module_param(ath6kl_p2p, uint, 0644);
module_param(testmode, uint, 0644);
module_param(recovery_enable, uint, 0644);
module_param(heart_beat_poll, uint, 0644);
MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
MODULE_PARM_DESC(heart_beat_poll,
"Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective");
void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
{
ath6kl_htc_tx_complete(ar, skb);
}
EXPORT_SYMBOL(ath6kl_core_tx_complete);
void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe)
{
ath6kl_htc_rx_complete(ar, skb, pipe);
}
EXPORT_SYMBOL(ath6kl_core_rx_complete);
int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
{
struct ath6kl_bmi_target_info targ_info;
struct wireless_dev *wdev;
int ret = 0, i;
switch (htc_type) {
case ATH6KL_HTC_TYPE_MBOX:
ath6kl_htc_mbox_attach(ar);
break;
case ATH6KL_HTC_TYPE_PIPE:
ath6kl_htc_pipe_attach(ar);
break;
default:
WARN_ON(1);
return -ENOMEM;
}
ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
if (!ar->ath6kl_wq)
return -ENOMEM;
ret = ath6kl_bmi_init(ar);
if (ret)
goto err_wq;
/*
* Turn on power to get hardware (target) version and leave power
* on delibrately as we will boot the hardware anyway within few
* seconds.
*/
ret = ath6kl_hif_power_on(ar);
if (ret)
goto err_bmi_cleanup;
ret = ath6kl_bmi_get_target_info(ar, &targ_info);
if (ret)
goto err_power_off;
ar->version.target_ver = le32_to_cpu(targ_info.version);
ar->target_type = le32_to_cpu(targ_info.type);
ar->wiphy->hw_version = le32_to_cpu(targ_info.version);
ret = ath6kl_init_hw_params(ar);
if (ret)
goto err_power_off;
ar->htc_target = ath6kl_htc_create(ar);
if (!ar->htc_target) {
ret = -ENOMEM;
goto err_power_off;
}
ar->testmode = testmode;
ret = ath6kl_init_fetch_firmwares(ar);
if (ret)
goto err_htc_cleanup;
/* FIXME: we should free all firmwares in the error cases below */
/*
* Backwards compatibility support for older ar6004 firmware images
* which do not set these feature flags.
*/
if (ar->target_type == TARGET_TYPE_AR6004 &&
ar->fw_api <= 4) {
__set_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES,
ar->fw_capabilities);
__set_bit(ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS,
ar->fw_capabilities);
if (ar->hw.id == AR6004_HW_1_3_VERSION)
__set_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT,
ar->fw_capabilities);
}
/* Indicate that WMI is enabled (although not ready yet) */
set_bit(WMI_ENABLED, &ar->flag);
ar->wmi = ath6kl_wmi_init(ar);
if (!ar->wmi) {
ath6kl_err("failed to initialize wmi\n");
ret = -EIO;
goto err_htc_cleanup;
}
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
/* setup access class priority mappings */
ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
ar->ac_stream_pri_map[WMM_AC_BE] = 1;
ar->ac_stream_pri_map[WMM_AC_VI] = 2;
ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
/* allocate some buffers that handle larger AMSDU frames */
ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
ath6kl_cookie_init(ar);
ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
if (suspend_mode &&
suspend_mode >= WLAN_POWER_STATE_CUT_PWR &&
suspend_mode <= WLAN_POWER_STATE_WOW)
ar->suspend_mode = suspend_mode;
else
ar->suspend_mode = 0;
if (suspend_mode == WLAN_POWER_STATE_WOW &&
(wow_mode == WLAN_POWER_STATE_CUT_PWR ||
wow_mode == WLAN_POWER_STATE_DEEP_SLEEP))
ar->wow_suspend_mode = wow_mode;
else
ar->wow_suspend_mode = 0;
if (uart_debug)
ar->conf_flags |= ATH6KL_CONF_UART_DEBUG;
set_bit(FIRST_BOOT, &ar->flag);
ath6kl_debug_init(ar);
ret = ath6kl_init_hw_start(ar);
if (ret) {
ath6kl_err("Failed to start hardware: %d\n", ret);
goto err_rxbuf_cleanup;
}
/* give our connected endpoints some buffers */
ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
ret = ath6kl_cfg80211_init(ar);
if (ret)
goto err_rxbuf_cleanup;
ret = ath6kl_debug_init_fs(ar);
if (ret) {
wiphy_unregister(ar->wiphy);
goto err_rxbuf_cleanup;
}
for (i = 0; i < ar->vif_max; i++)
ar->avail_idx_map |= BIT(i);
rtnl_lock();
/* Add an initial station interface */
wdev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
INFRA_NETWORK);
rtnl_unlock();
if (!wdev) {
ath6kl_err("Failed to instantiate a network device\n");
ret = -ENOMEM;
wiphy_unregister(ar->wiphy);
goto err_rxbuf_cleanup;
}
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
__func__, wdev->netdev->name, wdev->netdev, ar);
ar->fw_recovery.enable = !!recovery_enable;
if (!ar->fw_recovery.enable)
return ret;
if (heart_beat_poll &&
test_bit(ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL,
ar->fw_capabilities))
ar->fw_recovery.hb_poll = heart_beat_poll;
ath6kl_recovery_init(ar);
return ret;
err_rxbuf_cleanup:
ath6kl_debug_cleanup(ar);
ath6kl_htc_flush_rx_buf(ar->htc_target);
ath6kl_cleanup_amsdu_rxbufs(ar);
ath6kl_wmi_shutdown(ar->wmi);
clear_bit(WMI_ENABLED, &ar->flag);
ar->wmi = NULL;
err_htc_cleanup:
ath6kl_htc_cleanup(ar->htc_target);
err_power_off:
ath6kl_hif_power_off(ar);
err_bmi_cleanup:
ath6kl_bmi_cleanup(ar);
err_wq:
destroy_workqueue(ar->ath6kl_wq);
return ret;
}
EXPORT_SYMBOL(ath6kl_core_init);
struct ath6kl *ath6kl_core_create(struct device *dev)
{
struct ath6kl *ar;
u8 ctr;
ar = ath6kl_cfg80211_create();
if (!ar)
return NULL;
ar->p2p = !!ath6kl_p2p;
ar->dev = dev;
ar->vif_max = 1;
ar->max_norm_iface = 1;
spin_lock_init(&ar->lock);
spin_lock_init(&ar->mcastpsq_lock);
spin_lock_init(&ar->list_lock);
init_waitqueue_head(&ar->event_wq);
sema_init(&ar->sem, 1);
INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
INIT_LIST_HEAD(&ar->vif_list);
clear_bit(WMI_ENABLED, &ar->flag);
clear_bit(SKIP_SCAN, &ar->flag);
clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
ar->tx_pwr = 0;
ar->intra_bss = 1;
ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
ar->state = ATH6KL_STATE_OFF;
memset((u8 *)ar->sta_list, 0,
AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
/* Init the PS queues */
for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
spin_lock_init(&ar->sta_list[ctr].psq_lock);
skb_queue_head_init(&ar->sta_list[ctr].psq);
skb_queue_head_init(&ar->sta_list[ctr].apsdq);
ar->sta_list[ctr].mgmt_psq_len = 0;
INIT_LIST_HEAD(&ar->sta_list[ctr].mgmt_psq);
ar->sta_list[ctr].aggr_conn =
kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
if (!ar->sta_list[ctr].aggr_conn) {
ath6kl_err("Failed to allocate memory for sta aggregation information\n");
ath6kl_core_destroy(ar);
return NULL;
}
}
skb_queue_head_init(&ar->mcastpsq);
memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
return ar;
}
EXPORT_SYMBOL(ath6kl_core_create);
void ath6kl_core_cleanup(struct ath6kl *ar)
{
ath6kl_hif_power_off(ar);
ath6kl_recovery_cleanup(ar);
destroy_workqueue(ar->ath6kl_wq);
if (ar->htc_target)
ath6kl_htc_cleanup(ar->htc_target);
ath6kl_cookie_cleanup(ar);
ath6kl_cleanup_amsdu_rxbufs(ar);
ath6kl_bmi_cleanup(ar);
ath6kl_debug_cleanup(ar);
kfree(ar->fw_board);
kfree(ar->fw_otp);
vfree(ar->fw);
kfree(ar->fw_patch);
kfree(ar->fw_testscript);
ath6kl_cfg80211_cleanup(ar);
}
EXPORT_SYMBOL(ath6kl_core_cleanup);
void ath6kl_core_destroy(struct ath6kl *ar)
{
ath6kl_cfg80211_destroy(ar);
}
EXPORT_SYMBOL(ath6kl_core_destroy);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Core module for AR600x SDIO and USB devices.");
MODULE_LICENSE("Dual BSD/GPL");

View file

@ -0,0 +1,990 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef CORE_H
#define CORE_H
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/firmware.h>
#include <linux/sched.h>
#include <linux/circ_buf.h>
#include <net/cfg80211.h>
#include "htc.h"
#include "wmi.h"
#include "bmi.h"
#include "target.h"
#define MAX_ATH6KL 1
#define ATH6KL_MAX_RX_BUFFERS 16
#define ATH6KL_BUFFER_SIZE 1664
#define ATH6KL_MAX_AMSDU_RX_BUFFERS 4
#define ATH6KL_AMSDU_REFILL_THRESHOLD 3
#define ATH6KL_AMSDU_BUFFER_SIZE (WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH + 128)
#define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508
#define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46
#define USER_SAVEDKEYS_STAT_INIT 0
#define USER_SAVEDKEYS_STAT_RUN 1
#define ATH6KL_TX_TIMEOUT 10
#define ATH6KL_MAX_ENDPOINTS 4
#define MAX_NODE_NUM 15
#define ATH6KL_APSD_ALL_FRAME 0xFFFF
#define ATH6KL_APSD_NUM_OF_AC 0x4
#define ATH6KL_APSD_FRAME_MASK 0xF
/* Extra bytes for htc header alignment */
#define ATH6KL_HTC_ALIGN_BYTES 3
/* MAX_HI_COOKIE_NUM are reserved for high priority traffic */
#define MAX_DEF_COOKIE_NUM 180
#define MAX_HI_COOKIE_NUM 18 /* 10% of MAX_COOKIE_NUM */
#define MAX_COOKIE_NUM (MAX_DEF_COOKIE_NUM + MAX_HI_COOKIE_NUM)
#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC)
#define DISCON_TIMER_INTVAL 10000 /* in msec */
/* Channel dwell time in fg scan */
#define ATH6KL_FG_SCAN_INTERVAL 50 /* in ms */
/* includes also the null byte */
#define ATH6KL_FIRMWARE_MAGIC "QCA-ATH6KL"
enum ath6kl_fw_ie_type {
ATH6KL_FW_IE_FW_VERSION = 0,
ATH6KL_FW_IE_TIMESTAMP = 1,
ATH6KL_FW_IE_OTP_IMAGE = 2,
ATH6KL_FW_IE_FW_IMAGE = 3,
ATH6KL_FW_IE_PATCH_IMAGE = 4,
ATH6KL_FW_IE_RESERVED_RAM_SIZE = 5,
ATH6KL_FW_IE_CAPABILITIES = 6,
ATH6KL_FW_IE_PATCH_ADDR = 7,
ATH6KL_FW_IE_BOARD_ADDR = 8,
ATH6KL_FW_IE_VIF_MAX = 9,
};
enum ath6kl_fw_capability {
ATH6KL_FW_CAPABILITY_HOST_P2P = 0,
ATH6KL_FW_CAPABILITY_SCHED_SCAN = 1,
/*
* Firmware is capable of supporting P2P mgmt operations on a
* station interface. After group formation, the station
* interface will become a P2P client/GO interface as the case may be
*/
ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
/*
* Firmware has support to cleanup inactive stations
* in AP mode.
*/
ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
/* Firmware has support to override rsn cap of rsn ie */
ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
/*
* Multicast support in WOW and host awake mode.
* Allow all multicast in host awake mode.
* Apply multicast filter in WOW mode.
*/
ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
/* Firmware supports enhanced bmiss detection */
ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
/*
* FW supports matching of ssid in schedule scan
*/
ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
/* Firmware supports filtering BSS results by RSSI */
ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
/* FW sets mac_addr[4] ^= 0x80 for newly created interfaces */
ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR,
/* Firmware supports TX error rate notification */
ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY,
/* supports WMI_SET_REGDOMAIN_CMDID command */
ATH6KL_FW_CAPABILITY_REGDOMAIN,
/* Firmware supports sched scan decoupled from host sleep */
ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2,
/*
* Firmware capability for hang detection through heart beat
* challenge messages.
*/
ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL,
/* WMI_SET_TX_SELECT_RATES_CMDID uses 64 bit size rate table */
ATH6KL_FW_CAPABILITY_64BIT_RATES,
/* WMI_AP_CONN_INACT_CMDID uses minutes as units */
ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS,
/* use low priority endpoint for all data */
ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT,
/* ratetable is the 2 stream version (max MCS15) */
ATH6KL_FW_CAPABILITY_RATETABLE_MCS15,
/* firmare doesn't support IP checksumming */
ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM,
/* this needs to be last */
ATH6KL_FW_CAPABILITY_MAX,
};
#define ATH6KL_CAPABILITY_LEN (ALIGN(ATH6KL_FW_CAPABILITY_MAX, 32) / 32)
struct ath6kl_fw_ie {
__le32 id;
__le32 len;
u8 data[0];
};
enum ath6kl_hw_flags {
ATH6KL_HW_SDIO_CRC_ERROR_WAR = BIT(3),
};
#define ATH6KL_FW_API2_FILE "fw-2.bin"
#define ATH6KL_FW_API3_FILE "fw-3.bin"
#define ATH6KL_FW_API4_FILE "fw-4.bin"
#define ATH6KL_FW_API5_FILE "fw-5.bin"
/* AR6003 1.0 definitions */
#define AR6003_HW_1_0_VERSION 0x300002ba
/* AR6003 2.0 definitions */
#define AR6003_HW_2_0_VERSION 0x30000384
#define AR6003_HW_2_0_PATCH_DOWNLOAD_ADDRESS 0x57e910
#define AR6003_HW_2_0_FW_DIR "ath6k/AR6003/hw2.0"
#define AR6003_HW_2_0_OTP_FILE "otp.bin.z77"
#define AR6003_HW_2_0_FIRMWARE_FILE "athwlan.bin.z77"
#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "athtcmd_ram.bin"
#define AR6003_HW_2_0_PATCH_FILE "data.patch.bin"
#define AR6003_HW_2_0_BOARD_DATA_FILE AR6003_HW_2_0_FW_DIR "/bdata.bin"
#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \
AR6003_HW_2_0_FW_DIR "/bdata.SD31.bin"
/* AR6003 3.0 definitions */
#define AR6003_HW_2_1_1_VERSION 0x30000582
#define AR6003_HW_2_1_1_FW_DIR "ath6k/AR6003/hw2.1.1"
#define AR6003_HW_2_1_1_OTP_FILE "otp.bin"
#define AR6003_HW_2_1_1_FIRMWARE_FILE "athwlan.bin"
#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE "athtcmd_ram.bin"
#define AR6003_HW_2_1_1_UTF_FIRMWARE_FILE "utf.bin"
#define AR6003_HW_2_1_1_TESTSCRIPT_FILE "nullTestFlow.bin"
#define AR6003_HW_2_1_1_PATCH_FILE "data.patch.bin"
#define AR6003_HW_2_1_1_BOARD_DATA_FILE AR6003_HW_2_1_1_FW_DIR "/bdata.bin"
#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \
AR6003_HW_2_1_1_FW_DIR "/bdata.SD31.bin"
/* AR6004 1.0 definitions */
#define AR6004_HW_1_0_VERSION 0x30000623
#define AR6004_HW_1_0_FW_DIR "ath6k/AR6004/hw1.0"
#define AR6004_HW_1_0_FIRMWARE_FILE "fw.ram.bin"
#define AR6004_HW_1_0_BOARD_DATA_FILE AR6004_HW_1_0_FW_DIR "/bdata.bin"
#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \
AR6004_HW_1_0_FW_DIR "/bdata.DB132.bin"
/* AR6004 1.1 definitions */
#define AR6004_HW_1_1_VERSION 0x30000001
#define AR6004_HW_1_1_FW_DIR "ath6k/AR6004/hw1.1"
#define AR6004_HW_1_1_FIRMWARE_FILE "fw.ram.bin"
#define AR6004_HW_1_1_BOARD_DATA_FILE AR6004_HW_1_1_FW_DIR "/bdata.bin"
#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \
AR6004_HW_1_1_FW_DIR "/bdata.DB132.bin"
/* AR6004 1.2 definitions */
#define AR6004_HW_1_2_VERSION 0x300007e8
#define AR6004_HW_1_2_FW_DIR "ath6k/AR6004/hw1.2"
#define AR6004_HW_1_2_FIRMWARE_FILE "fw.ram.bin"
#define AR6004_HW_1_2_BOARD_DATA_FILE AR6004_HW_1_2_FW_DIR "/bdata.bin"
#define AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE \
AR6004_HW_1_2_FW_DIR "/bdata.bin"
/* AR6004 1.3 definitions */
#define AR6004_HW_1_3_VERSION 0x31c8088a
#define AR6004_HW_1_3_FW_DIR "ath6k/AR6004/hw1.3"
#define AR6004_HW_1_3_FIRMWARE_FILE "fw.ram.bin"
#define AR6004_HW_1_3_TCMD_FIRMWARE_FILE "utf.bin"
#define AR6004_HW_1_3_UTF_FIRMWARE_FILE "utf.bin"
#define AR6004_HW_1_3_TESTSCRIPT_FILE "nullTestFlow.bin"
#define AR6004_HW_1_3_BOARD_DATA_FILE AR6004_HW_1_3_FW_DIR "/bdata.bin"
#define AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE AR6004_HW_1_3_FW_DIR "/bdata.bin"
/* AR6004 3.0 definitions */
#define AR6004_HW_3_0_VERSION 0x31C809F8
#define AR6004_HW_3_0_FW_DIR "ath6k/AR6004/hw3.0"
#define AR6004_HW_3_0_FIRMWARE_FILE "fw.ram.bin"
#define AR6004_HW_3_0_TCMD_FIRMWARE_FILE "utf.bin"
#define AR6004_HW_3_0_UTF_FIRMWARE_FILE "utf.bin"
#define AR6004_HW_3_0_TESTSCRIPT_FILE "nullTestFlow.bin"
#define AR6004_HW_3_0_BOARD_DATA_FILE AR6004_HW_3_0_FW_DIR "/bdata.bin"
#define AR6004_HW_3_0_DEFAULT_BOARD_DATA_FILE AR6004_HW_3_0_FW_DIR "/bdata.bin"
/* Per STA data, used in AP mode */
#define STA_PS_AWAKE BIT(0)
#define STA_PS_SLEEP BIT(1)
#define STA_PS_POLLED BIT(2)
#define STA_PS_APSD_TRIGGER BIT(3)
#define STA_PS_APSD_EOSP BIT(4)
/* HTC TX packet tagging definitions */
#define ATH6KL_CONTROL_PKT_TAG HTC_TX_PACKET_TAG_USER_DEFINED
#define ATH6KL_DATA_PKT_TAG (ATH6KL_CONTROL_PKT_TAG + 1)
#define AR6003_CUST_DATA_SIZE 16
#define AGGR_WIN_IDX(x, y) ((x) % (y))
#define AGGR_INCR_IDX(x, y) AGGR_WIN_IDX(((x) + 1), (y))
#define AGGR_DCRM_IDX(x, y) AGGR_WIN_IDX(((x) - 1), (y))
#define ATH6KL_MAX_SEQ_NO 0xFFF
#define ATH6KL_NEXT_SEQ_NO(x) (((x) + 1) & ATH6KL_MAX_SEQ_NO)
#define NUM_OF_TIDS 8
#define AGGR_SZ_DEFAULT 8
#define AGGR_WIN_SZ_MIN 2
#define AGGR_WIN_SZ_MAX 8
#define TID_WINDOW_SZ(_x) ((_x) << 1)
#define AGGR_NUM_OF_FREE_NETBUFS 16
#define AGGR_RX_TIMEOUT 100 /* in ms */
#define WMI_TIMEOUT (2 * HZ)
#define MBOX_YIELD_LIMIT 99
#define ATH6KL_DEFAULT_LISTEN_INTVAL 100 /* in TUs */
#define ATH6KL_DEFAULT_BMISS_TIME 1500
#define ATH6KL_MAX_WOW_LISTEN_INTL 300 /* in TUs */
#define ATH6KL_MAX_BMISS_TIME 5000
/* configuration lags */
/*
* ATH6KL_CONF_IGNORE_ERP_BARKER: Ignore the barker premable in
* ERP IE of beacon to determine the short premable support when
* sending (Re)Assoc req.
* ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN: Don't send the power
* module state transition failure events which happen during
* scan, to the host.
*/
#define ATH6KL_CONF_IGNORE_ERP_BARKER BIT(0)
#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
#define ATH6KL_CONF_ENABLE_11N BIT(2)
#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
#define ATH6KL_CONF_UART_DEBUG BIT(4)
#define P2P_WILDCARD_SSID_LEN 7 /* DIRECT- */
enum wlan_low_pwr_state {
WLAN_POWER_STATE_ON,
WLAN_POWER_STATE_CUT_PWR,
WLAN_POWER_STATE_DEEP_SLEEP,
WLAN_POWER_STATE_WOW
};
enum sme_state {
SME_DISCONNECTED,
SME_CONNECTING,
SME_CONNECTED
};
struct skb_hold_q {
struct sk_buff *skb;
bool is_amsdu;
u16 seq_no;
};
struct rxtid {
bool aggr;
bool timer_mon;
u16 win_sz;
u16 seq_next;
u32 hold_q_sz;
struct skb_hold_q *hold_q;
struct sk_buff_head q;
/*
* lock mainly protects seq_next and hold_q. Movement of seq_next
* needs to be protected between aggr_timeout() and
* aggr_process_recv_frm(). hold_q will be holding the pending
* reorder frames and it's access should also be protected.
* Some of the other fields like hold_q_sz, win_sz and aggr are
* initialized/reset when receiving addba/delba req, also while
* deleting aggr state all the pending buffers are flushed before
* resetting these fields, so there should not be any race in accessing
* these fields.
*/
spinlock_t lock;
};
struct rxtid_stats {
u32 num_into_aggr;
u32 num_dups;
u32 num_oow;
u32 num_mpdu;
u32 num_amsdu;
u32 num_delivered;
u32 num_timeouts;
u32 num_hole;
u32 num_bar;
};
struct aggr_info_conn {
u8 aggr_sz;
u8 timer_scheduled;
struct timer_list timer;
struct net_device *dev;
struct rxtid rx_tid[NUM_OF_TIDS];
struct rxtid_stats stat[NUM_OF_TIDS];
struct aggr_info *aggr_info;
};
struct aggr_info {
struct aggr_info_conn *aggr_conn;
struct sk_buff_head rx_amsdu_freeq;
};
struct ath6kl_wep_key {
u8 key_index;
u8 key_len;
u8 key[64];
};
#define ATH6KL_KEY_SEQ_LEN 8
struct ath6kl_key {
u8 key[WLAN_MAX_KEY_LEN];
u8 key_len;
u8 seq[ATH6KL_KEY_SEQ_LEN];
u8 seq_len;
u32 cipher;
};
struct ath6kl_node_mapping {
u8 mac_addr[ETH_ALEN];
u8 ep_id;
u8 tx_pend;
};
struct ath6kl_cookie {
struct sk_buff *skb;
u32 map_no;
struct htc_packet htc_pkt;
struct ath6kl_cookie *arc_list_next;
};
struct ath6kl_mgmt_buff {
struct list_head list;
u32 freq;
u32 wait;
u32 id;
bool no_cck;
size_t len;
u8 buf[0];
};
struct ath6kl_sta {
u16 sta_flags;
u8 mac[ETH_ALEN];
u8 aid;
u8 keymgmt;
u8 ucipher;
u8 auth;
u8 wpa_ie[ATH6KL_MAX_IE];
struct sk_buff_head psq;
/* protects psq, mgmt_psq, apsdq, and mgmt_psq_len fields */
spinlock_t psq_lock;
struct list_head mgmt_psq;
size_t mgmt_psq_len;
u8 apsd_info;
struct sk_buff_head apsdq;
struct aggr_info_conn *aggr_conn;
};
struct ath6kl_version {
u32 target_ver;
u32 wlan_ver;
u32 abi_ver;
};
struct ath6kl_bmi {
u32 cmd_credits;
bool done_sent;
u8 *cmd_buf;
u32 max_data_size;
u32 max_cmd_size;
};
struct target_stats {
u64 tx_pkt;
u64 tx_byte;
u64 tx_ucast_pkt;
u64 tx_ucast_byte;
u64 tx_mcast_pkt;
u64 tx_mcast_byte;
u64 tx_bcast_pkt;
u64 tx_bcast_byte;
u64 tx_rts_success_cnt;
u64 tx_pkt_per_ac[4];
u64 tx_err;
u64 tx_fail_cnt;
u64 tx_retry_cnt;
u64 tx_mult_retry_cnt;
u64 tx_rts_fail_cnt;
u64 rx_pkt;
u64 rx_byte;
u64 rx_ucast_pkt;
u64 rx_ucast_byte;
u64 rx_mcast_pkt;
u64 rx_mcast_byte;
u64 rx_bcast_pkt;
u64 rx_bcast_byte;
u64 rx_frgment_pkt;
u64 rx_err;
u64 rx_crc_err;
u64 rx_key_cache_miss;
u64 rx_decrypt_err;
u64 rx_dupl_frame;
u64 tkip_local_mic_fail;
u64 tkip_cnter_measures_invoked;
u64 tkip_replays;
u64 tkip_fmt_err;
u64 ccmp_fmt_err;
u64 ccmp_replays;
u64 pwr_save_fail_cnt;
u64 cs_bmiss_cnt;
u64 cs_low_rssi_cnt;
u64 cs_connect_cnt;
u64 cs_discon_cnt;
s32 tx_ucast_rate;
s32 rx_ucast_rate;
u32 lq_val;
u32 wow_pkt_dropped;
u16 wow_evt_discarded;
s16 noise_floor_calib;
s16 cs_rssi;
s16 cs_ave_beacon_rssi;
u8 cs_ave_beacon_snr;
u8 cs_last_roam_msec;
u8 cs_snr;
u8 wow_host_pkt_wakeups;
u8 wow_host_evt_wakeups;
u32 arp_received;
u32 arp_matched;
u32 arp_replied;
};
struct ath6kl_mbox_info {
u32 htc_addr;
u32 htc_ext_addr;
u32 htc_ext_sz;
u32 block_size;
u32 gmbox_addr;
u32 gmbox_sz;
};
/*
* 802.11i defines an extended IV for use with non-WEP ciphers.
* When the EXTIV bit is set in the key id byte an additional
* 4 bytes immediately follow the IV for TKIP. For CCMP the
* EXTIV bit is likewise set but the 8 bytes represent the
* CCMP header rather than IV+extended-IV.
*/
#define ATH6KL_KEYBUF_SIZE 16
#define ATH6KL_MICBUF_SIZE (8+8) /* space for both tx and rx */
#define ATH6KL_KEY_XMIT 0x01
#define ATH6KL_KEY_RECV 0x02
#define ATH6KL_KEY_DEFAULT 0x80 /* default xmit key */
/* Initial group key for AP mode */
struct ath6kl_req_key {
bool valid;
u8 key_index;
int key_type;
u8 key[WLAN_MAX_KEY_LEN];
u8 key_len;
};
enum ath6kl_hif_type {
ATH6KL_HIF_TYPE_SDIO,
ATH6KL_HIF_TYPE_USB,
};
enum ath6kl_htc_type {
ATH6KL_HTC_TYPE_MBOX,
ATH6KL_HTC_TYPE_PIPE,
};
/* Max number of filters that hw supports */
#define ATH6K_MAX_MC_FILTERS_PER_LIST 7
struct ath6kl_mc_filter {
struct list_head list;
char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
};
struct ath6kl_htcap {
bool ht_enable;
u8 ampdu_factor;
unsigned short cap_info;
};
/*
* Driver's maximum limit, note that some firmwares support only one vif
* and the runtime (current) limit must be checked from ar->vif_max.
*/
#define ATH6KL_VIF_MAX 3
/* vif flags info */
enum ath6kl_vif_state {
CONNECTED,
CONNECT_PEND,
WMM_ENABLED,
NETQ_STOPPED,
DTIM_EXPIRED,
CLEAR_BSSFILTER_ON_BEACON,
DTIM_PERIOD_AVAIL,
WLAN_ENABLED,
STATS_UPDATE_PEND,
HOST_SLEEP_MODE_CMD_PROCESSED,
NETDEV_MCAST_ALL_ON,
NETDEV_MCAST_ALL_OFF,
SCHED_SCANNING,
};
struct ath6kl_vif {
struct list_head list;
struct wireless_dev wdev;
struct net_device *ndev;
struct ath6kl *ar;
/* Lock to protect vif specific net_stats and flags */
spinlock_t if_lock;
u8 fw_vif_idx;
unsigned long flags;
int ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 dot11_auth_mode;
u8 auth_mode;
u8 prwise_crypto;
u8 prwise_crypto_len;
u8 grp_crypto;
u8 grp_crypto_len;
u8 def_txkey_index;
u8 next_mode;
u8 nw_type;
u8 bssid[ETH_ALEN];
u8 req_bssid[ETH_ALEN];
u16 ch_hint;
u16 bss_ch;
struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
struct aggr_info *aggr_cntxt;
struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS];
struct timer_list disconnect_timer;
struct timer_list sched_scan_timer;
struct cfg80211_scan_request *scan_req;
enum sme_state sme_state;
int reconnect_flag;
u32 last_roc_id;
u32 last_cancel_roc_id;
u32 send_action_id;
bool probe_req_report;
u16 assoc_bss_beacon_int;
u16 listen_intvl_t;
u16 bmiss_time_t;
u32 txe_intvl;
u16 bg_scan_period;
u8 assoc_bss_dtim_period;
struct net_device_stats net_stats;
struct target_stats target_stats;
struct wmi_connect_cmd profile;
u16 rsn_capab;
struct list_head mc_filter;
};
static inline struct ath6kl_vif *ath6kl_vif_from_wdev(struct wireless_dev *wdev)
{
return container_of(wdev, struct ath6kl_vif, wdev);
}
#define WOW_LIST_ID 0
#define WOW_HOST_REQ_DELAY 500 /* ms */
#define ATH6KL_SCHED_SCAN_RESULT_DELAY 5000 /* ms */
/* Flag info */
enum ath6kl_dev_state {
WMI_ENABLED,
WMI_READY,
WMI_CTRL_EP_FULL,
TESTMODE,
DESTROY_IN_PROGRESS,
SKIP_SCAN,
ROAM_TBL_PEND,
FIRST_BOOT,
RECOVERY_CLEANUP,
};
enum ath6kl_state {
ATH6KL_STATE_OFF,
ATH6KL_STATE_ON,
ATH6KL_STATE_SUSPENDING,
ATH6KL_STATE_RESUMING,
ATH6KL_STATE_DEEPSLEEP,
ATH6KL_STATE_CUTPOWER,
ATH6KL_STATE_WOW,
ATH6KL_STATE_RECOVERY,
};
/* Fw error recovery */
#define ATH6KL_HB_RESP_MISS_THRES 5
enum ath6kl_fw_err {
ATH6KL_FW_ASSERT,
ATH6KL_FW_HB_RESP_FAILURE,
ATH6KL_FW_EP_FULL,
};
struct ath6kl {
struct device *dev;
struct wiphy *wiphy;
enum ath6kl_state state;
unsigned int testmode;
struct ath6kl_bmi bmi;
const struct ath6kl_hif_ops *hif_ops;
const struct ath6kl_htc_ops *htc_ops;
struct wmi *wmi;
int tx_pending[ENDPOINT_MAX];
int total_tx_data_pend;
struct htc_target *htc_target;
enum ath6kl_hif_type hif_type;
void *hif_priv;
struct list_head vif_list;
/* Lock to avoid race in vif_list entries among add/del/traverse */
spinlock_t list_lock;
u8 num_vif;
unsigned int vif_max;
u8 max_norm_iface;
u8 avail_idx_map;
/*
* Protects at least amsdu_rx_buffer_queue, ath6kl_alloc_cookie()
* calls, tx_pending and total_tx_data_pend.
*/
spinlock_t lock;
struct semaphore sem;
u8 lrssi_roam_threshold;
struct ath6kl_version version;
u32 target_type;
u8 tx_pwr;
struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
u8 ibss_ps_enable;
bool ibss_if_active;
u8 node_num;
u8 next_ep_id;
struct ath6kl_cookie *cookie_list;
u32 cookie_count;
enum htc_endpoint_id ac2ep_map[WMM_NUM_AC];
bool ac_stream_active[WMM_NUM_AC];
u8 ac_stream_pri_map[WMM_NUM_AC];
u8 hiac_stream_active_pri;
u8 ep2ac_map[ENDPOINT_MAX];
enum htc_endpoint_id ctrl_ep;
struct ath6kl_htc_credit_info credit_state_info;
u32 connect_ctrl_flags;
u32 user_key_ctrl;
u8 usr_bss_filter;
struct ath6kl_sta sta_list[AP_MAX_NUM_STA];
u8 sta_list_index;
struct ath6kl_req_key ap_mode_bkey;
struct sk_buff_head mcastpsq;
u32 want_ch_switch;
u16 last_ch;
/*
* FIXME: protects access to mcastpsq but is actually useless as
* all skbe_queue_*() functions provide serialisation themselves
*/
spinlock_t mcastpsq_lock;
u8 intra_bss;
struct wmi_ap_mode_stat ap_stats;
u8 ap_country_code[3];
struct list_head amsdu_rx_buffer_queue;
u8 rx_meta_ver;
enum wlan_low_pwr_state wlan_pwr_state;
u8 mac_addr[ETH_ALEN];
#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
struct {
void *rx_report;
size_t rx_report_len;
} tm;
struct ath6kl_hw {
u32 id;
const char *name;
u32 dataset_patch_addr;
u32 app_load_addr;
u32 app_start_override_addr;
u32 board_ext_data_addr;
u32 reserved_ram_size;
u32 board_addr;
u32 refclk_hz;
u32 uarttx_pin;
u32 testscript_addr;
enum wmi_phy_cap cap;
u32 flags;
struct ath6kl_hw_fw {
const char *dir;
const char *otp;
const char *fw;
const char *tcmd;
const char *patch;
const char *utf;
const char *testscript;
} fw;
const char *fw_board;
const char *fw_default_board;
} hw;
u16 conf_flags;
u16 suspend_mode;
u16 wow_suspend_mode;
wait_queue_head_t event_wq;
struct ath6kl_mbox_info mbox_info;
struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
unsigned long flag;
u8 *fw_board;
size_t fw_board_len;
u8 *fw_otp;
size_t fw_otp_len;
u8 *fw;
size_t fw_len;
u8 *fw_patch;
size_t fw_patch_len;
u8 *fw_testscript;
size_t fw_testscript_len;
unsigned int fw_api;
unsigned long fw_capabilities[ATH6KL_CAPABILITY_LEN];
struct workqueue_struct *ath6kl_wq;
struct dentry *debugfs_phy;
bool p2p;
bool wiphy_registered;
struct ath6kl_fw_recovery {
struct work_struct recovery_work;
unsigned long err_reason;
unsigned long hb_poll;
struct timer_list hb_timer;
u32 seq_num;
bool hb_pending;
u8 hb_misscnt;
bool enable;
} fw_recovery;
#ifdef CONFIG_ATH6KL_DEBUG
struct {
struct sk_buff_head fwlog_queue;
struct completion fwlog_completion;
bool fwlog_open;
u32 fwlog_mask;
unsigned int dbgfs_diag_reg;
u32 diag_reg_addr_wr;
u32 diag_reg_val_wr;
struct {
unsigned int invalid_rate;
} war_stats;
u8 *roam_tbl;
unsigned int roam_tbl_len;
u8 keepalive;
u8 disc_timeout;
} debug;
#endif /* CONFIG_ATH6KL_DEBUG */
};
static inline struct ath6kl *ath6kl_priv(struct net_device *dev)
{
return ((struct ath6kl_vif *) netdev_priv(dev))->ar;
}
static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
u32 item_offset)
{
u32 addr = 0;
if (ar->target_type == TARGET_TYPE_AR6003)
addr = ATH6KL_AR6003_HI_START_ADDR + item_offset;
else if (ar->target_type == TARGET_TYPE_AR6004)
addr = ATH6KL_AR6004_HI_START_ADDR + item_offset;
return addr;
}
int ath6kl_configure_target(struct ath6kl *ar);
void ath6kl_detect_error(unsigned long ptr);
void disconnect_timer_handler(unsigned long ptr);
void init_netdev(struct net_device *dev);
void ath6kl_cookie_init(struct ath6kl *ar);
void ath6kl_cookie_cleanup(struct ath6kl *ar);
void ath6kl_rx(struct htc_target *target, struct htc_packet *packet);
void ath6kl_tx_complete(struct htc_target *context,
struct list_head *packet_queue);
enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
struct htc_packet *packet);
void ath6kl_stop_txrx(struct ath6kl *ar);
void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar);
int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value);
int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length);
int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value);
int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length);
int ath6kl_read_fwlogs(struct ath6kl *ar);
void ath6kl_init_profile_info(struct ath6kl_vif *vif);
void ath6kl_tx_data_cleanup(struct ath6kl *ar);
struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev);
struct aggr_info *aggr_init(struct ath6kl_vif *vif);
void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
struct aggr_info_conn *aggr_conn);
void ath6kl_rx_refill(struct htc_target *target,
enum htc_endpoint_id endpoint);
void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count);
struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
enum htc_endpoint_id endpoint,
int len);
void aggr_module_destroy(struct aggr_info *aggr_info);
void aggr_reset_state(struct aggr_info_conn *aggr_conn);
struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr);
struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver,
enum wmi_phy_cap cap);
int ath6kl_control_tx(void *devt, struct sk_buff *skb,
enum htc_endpoint_id eid);
void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_int,
u16 beacon_int, enum network_type net_type,
u8 beacon_ie_len, u8 assoc_req_len,
u8 assoc_resp_len, u8 *assoc_info);
void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel);
void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
u8 keymgmt, u8 ucipher, u8 auth,
u8 assoc_req_len, u8 *assoc_info, u8 apsd_info);
void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 prot_reason_status);
void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast);
void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status);
void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len);
void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid);
void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif);
void ath6kl_disconnect(struct ath6kl_vif *vif);
void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid);
void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
u8 win_sz);
void ath6kl_wakeup_event(void *dev);
void ath6kl_init_control_info(struct ath6kl_vif *vif);
struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready);
int ath6kl_init_hw_start(struct ath6kl *ar);
int ath6kl_init_hw_stop(struct ath6kl *ar);
int ath6kl_init_fetch_firmwares(struct ath6kl *ar);
int ath6kl_init_hw_params(struct ath6kl *ar);
void ath6kl_check_wow_status(struct ath6kl *ar);
void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb);
void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
struct ath6kl *ath6kl_core_create(struct device *dev);
int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type);
void ath6kl_core_cleanup(struct ath6kl *ar);
void ath6kl_core_destroy(struct ath6kl *ar);
/* Fw error recovery */
void ath6kl_init_hw_restart(struct ath6kl *ar);
void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason);
void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie);
void ath6kl_recovery_init(struct ath6kl *ar);
void ath6kl_recovery_cleanup(struct ath6kl *ar);
void ath6kl_recovery_suspend(struct ath6kl *ar);
void ath6kl_recovery_resume(struct ath6kl *ar);
#endif /* CORE_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,144 @@
/*
* Copyright (c) 2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef DEBUG_H
#define DEBUG_H
#include "hif.h"
#include "trace.h"
enum ATH6K_DEBUG_MASK {
ATH6KL_DBG_CREDIT = BIT(0),
/* hole */
ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */
ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */
ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */
ATH6KL_DBG_HTC = BIT(5),
ATH6KL_DBG_HIF = BIT(6),
ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */
/* hole */
/* hole */
ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */
ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */
ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */
ATH6KL_DBG_WLAN_CFG = BIT(13), /* cfg80211 i/f file tracing */
ATH6KL_DBG_RAW_BYTES = BIT(14), /* dump tx/rx frames */
ATH6KL_DBG_AGGR = BIT(15), /* aggregation */
ATH6KL_DBG_SDIO = BIT(16),
ATH6KL_DBG_SDIO_DUMP = BIT(17),
ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */
ATH6KL_DBG_WMI_DUMP = BIT(19),
ATH6KL_DBG_SUSPEND = BIT(20),
ATH6KL_DBG_USB = BIT(21),
ATH6KL_DBG_USB_BULK = BIT(22),
ATH6KL_DBG_RECOVERY = BIT(23),
ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
};
extern unsigned int debug_mask;
__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
__printf(1, 2) int ath6kl_info(const char *fmt, ...);
__printf(1, 2) int ath6kl_err(const char *fmt, ...);
__printf(1, 2) int ath6kl_warn(const char *fmt, ...);
enum ath6kl_war {
ATH6KL_WAR_INVALID_RATE,
};
#ifdef CONFIG_ATH6KL_DEBUG
void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...);
void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
const char *msg, const char *prefix,
const void *buf, size_t len);
void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg,
struct ath6kl_irq_enable_reg *irq_en_reg);
void dump_cred_dist_stats(struct htc_target *target);
void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len);
void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war);
int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
size_t len);
void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
void ath6kl_debug_init(struct ath6kl *ar);
int ath6kl_debug_init_fs(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar);
#else
static inline int ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
const char *fmt, ...)
{
return 0;
}
static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
}
static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg,
struct ath6kl_irq_enable_reg *irq_en_reg)
{
}
static inline void dump_cred_dist_stats(struct htc_target *target)
{
}
static inline void ath6kl_debug_fwlog_event(struct ath6kl *ar,
const void *buf, size_t len)
{
}
static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
{
}
static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar,
const void *buf, size_t len)
{
return 0;
}
static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
{
}
static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar,
u8 timeout)
{
}
static inline void ath6kl_debug_init(struct ath6kl *ar)
{
}
static inline int ath6kl_debug_init_fs(struct ath6kl *ar)
{
return 0;
}
static inline void ath6kl_debug_cleanup(struct ath6kl *ar)
{
}
#endif
#endif

View file

@ -0,0 +1,187 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef HIF_OPS_H
#define HIF_OPS_H
#include "hif.h"
#include "debug.h"
static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
u32 len, u32 request)
{
ath6kl_dbg(ATH6KL_DBG_HIF,
"hif %s sync addr 0x%x buf 0x%p len %d request 0x%x\n",
(request & HIF_WRITE) ? "write" : "read",
addr, buf, len, request);
return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
}
static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
u32 length, u32 request,
struct htc_packet *packet)
{
ath6kl_dbg(ATH6KL_DBG_HIF,
"hif write async addr 0x%x buf 0x%p len %d request 0x%x\n",
address, buffer, length, request);
return ar->hif_ops->write_async(ar, address, buffer, length,
request, packet);
}
static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq enable\n");
return ar->hif_ops->irq_enable(ar);
}
static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq disable\n");
return ar->hif_ops->irq_disable(ar);
}
static inline struct hif_scatter_req *hif_scatter_req_get(struct ath6kl *ar)
{
return ar->hif_ops->scatter_req_get(ar);
}
static inline void hif_scatter_req_add(struct ath6kl *ar,
struct hif_scatter_req *s_req)
{
return ar->hif_ops->scatter_req_add(ar, s_req);
}
static inline int ath6kl_hif_enable_scatter(struct ath6kl *ar)
{
return ar->hif_ops->enable_scatter(ar);
}
static inline int ath6kl_hif_scat_req_rw(struct ath6kl *ar,
struct hif_scatter_req *scat_req)
{
return ar->hif_ops->scat_req_rw(ar, scat_req);
}
static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar)
{
return ar->hif_ops->cleanup_scatter(ar);
}
static inline int ath6kl_hif_suspend(struct ath6kl *ar,
struct cfg80211_wowlan *wow)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif suspend\n");
return ar->hif_ops->suspend(ar, wow);
}
/*
* Read from the ATH6KL through its diagnostic window. No cooperation from
* the Target is required for this.
*/
static inline int ath6kl_hif_diag_read32(struct ath6kl *ar, u32 address,
u32 *value)
{
return ar->hif_ops->diag_read32(ar, address, value);
}
/*
* Write to the ATH6KL through its diagnostic window. No cooperation from
* the Target is required for this.
*/
static inline int ath6kl_hif_diag_write32(struct ath6kl *ar, u32 address,
__le32 value)
{
return ar->hif_ops->diag_write32(ar, address, value);
}
static inline int ath6kl_hif_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
{
return ar->hif_ops->bmi_read(ar, buf, len);
}
static inline int ath6kl_hif_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
{
return ar->hif_ops->bmi_write(ar, buf, len);
}
static inline int ath6kl_hif_resume(struct ath6kl *ar)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif resume\n");
return ar->hif_ops->resume(ar);
}
static inline int ath6kl_hif_power_on(struct ath6kl *ar)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif power on\n");
return ar->hif_ops->power_on(ar);
}
static inline int ath6kl_hif_power_off(struct ath6kl *ar)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif power off\n");
return ar->hif_ops->power_off(ar);
}
static inline void ath6kl_hif_stop(struct ath6kl *ar)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n");
ar->hif_ops->stop(ar);
}
static inline int ath6kl_hif_pipe_send(struct ath6kl *ar,
u8 pipe, struct sk_buff *hdr_buf,
struct sk_buff *buf)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe send\n");
return ar->hif_ops->pipe_send(ar, pipe, hdr_buf, buf);
}
static inline void ath6kl_hif_pipe_get_default(struct ath6kl *ar,
u8 *ul_pipe, u8 *dl_pipe)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n");
ar->hif_ops->pipe_get_default(ar, ul_pipe, dl_pipe);
}
static inline int ath6kl_hif_pipe_map_service(struct ath6kl *ar,
u16 service_id, u8 *ul_pipe,
u8 *dl_pipe)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n");
return ar->hif_ops->pipe_map_service(ar, service_id, ul_pipe, dl_pipe);
}
static inline u16 ath6kl_hif_pipe_get_free_queue_number(struct ath6kl *ar,
u8 pipe)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get free queue number\n");
return ar->hif_ops->pipe_get_free_queue_number(ar, pipe);
}
#endif

View file

@ -0,0 +1,702 @@
/*
* Copyright (c) 2007-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "hif.h"
#include <linux/export.h>
#include "core.h"
#include "target.h"
#include "hif-ops.h"
#include "debug.h"
#include "trace.h"
#define MAILBOX_FOR_BLOCK_SIZE 1
#define ATH6KL_TIME_QUANTUM 10 /* in ms */
static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
bool from_dma)
{
u8 *buf;
int i;
buf = req->virt_dma_buf;
for (i = 0; i < req->scat_entries; i++) {
if (from_dma)
memcpy(req->scat_list[i].buf, buf,
req->scat_list[i].len);
else
memcpy(buf, req->scat_list[i].buf,
req->scat_list[i].len);
buf += req->scat_list[i].len;
}
return 0;
}
int ath6kl_hif_rw_comp_handler(void *context, int status)
{
struct htc_packet *packet = context;
ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n",
packet, status);
packet->status = status;
packet->completion(packet->context, packet);
return 0;
}
EXPORT_SYMBOL(ath6kl_hif_rw_comp_handler);
#define REG_DUMP_COUNT_AR6003 60
#define REGISTER_DUMP_LEN_MAX 60
static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
{
__le32 regdump_val[REGISTER_DUMP_LEN_MAX];
u32 i, address, regdump_addr = 0;
int ret;
if (ar->target_type != TARGET_TYPE_AR6003)
return;
/* the reg dump pointer is copied to the host interest area */
address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
address = TARG_VTOP(ar->target_type, address);
/* read RAM location through diagnostic window */
ret = ath6kl_diag_read32(ar, address, &regdump_addr);
if (ret || !regdump_addr) {
ath6kl_warn("failed to get ptr to register dump area: %d\n",
ret);
return;
}
ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n",
regdump_addr);
regdump_addr = TARG_VTOP(ar->target_type, regdump_addr);
/* fetch register dump data */
ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
if (ret) {
ath6kl_warn("failed to get register dump: %d\n", ret);
return;
}
ath6kl_info("crash dump:\n");
ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
ar->wiphy->fw_version);
BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
for (i = 0; i < REG_DUMP_COUNT_AR6003; i += 4) {
ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
i,
le32_to_cpu(regdump_val[i]),
le32_to_cpu(regdump_val[i + 1]),
le32_to_cpu(regdump_val[i + 2]),
le32_to_cpu(regdump_val[i + 3]));
}
}
static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
{
u32 dummy;
int ret;
ath6kl_warn("firmware crashed\n");
/*
* read counter to clear the interrupt, the debug error interrupt is
* counter 0.
*/
ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
(u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
if (ret)
ath6kl_warn("Failed to clear debug interrupt: %d\n", ret);
ath6kl_hif_dump_fw_crash(dev->ar);
ath6kl_read_fwlogs(dev->ar);
ath6kl_recovery_err_notify(dev->ar, ATH6KL_FW_ASSERT);
return ret;
}
/* mailbox recv message polling */
int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
int timeout)
{
struct ath6kl_irq_proc_registers *rg;
int status = 0, i;
u8 htc_mbox = 1 << HTC_MAILBOX;
for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) {
/* this is the standard HIF way, load the reg table */
status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
(u8 *) &dev->irq_proc_reg,
sizeof(dev->irq_proc_reg),
HIF_RD_SYNC_BYTE_INC);
if (status) {
ath6kl_err("failed to read reg table\n");
return status;
}
/* check for MBOX data and valid lookahead */
if (dev->irq_proc_reg.host_int_status & htc_mbox) {
if (dev->irq_proc_reg.rx_lkahd_valid &
htc_mbox) {
/*
* Mailbox has a message and the look ahead
* is valid.
*/
rg = &dev->irq_proc_reg;
*lk_ahd =
le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
break;
}
}
/* delay a little */
mdelay(ATH6KL_TIME_QUANTUM);
ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i);
}
if (i == 0) {
ath6kl_err("timeout waiting for recv message\n");
status = -ETIME;
/* check if the target asserted */
if (dev->irq_proc_reg.counter_int_status &
ATH6KL_TARGET_DEBUG_INTR_MASK)
/*
* Target failure handler will be called in case of
* an assert.
*/
ath6kl_hif_proc_dbg_intr(dev);
}
return status;
}
/*
* Disable packet reception (used in case the host runs out of buffers)
* using the interrupt enable registers through the host I/F
*/
int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx)
{
struct ath6kl_irq_enable_reg regs;
int status = 0;
ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n",
enable_rx ? "enable" : "disable");
/* take the lock to protect interrupt enable shadows */
spin_lock_bh(&dev->lock);
if (enable_rx)
dev->irq_en_reg.int_status_en |=
SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
else
dev->irq_en_reg.int_status_en &=
~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
spin_unlock_bh(&dev->lock);
status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
&regs.int_status_en,
sizeof(struct ath6kl_irq_enable_reg),
HIF_WR_SYNC_BYTE_INC);
return status;
}
int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
struct hif_scatter_req *scat_req, bool read)
{
int status = 0;
if (read) {
scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
scat_req->addr = dev->ar->mbox_info.htc_addr;
} else {
scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
scat_req->addr =
(scat_req->len > HIF_MBOX_WIDTH) ?
dev->ar->mbox_info.htc_ext_addr :
dev->ar->mbox_info.htc_addr;
}
ath6kl_dbg(ATH6KL_DBG_HIF,
"hif submit scatter request entries %d len %d mbox 0x%x %s %s\n",
scat_req->scat_entries, scat_req->len,
scat_req->addr, !read ? "async" : "sync",
(read) ? "rd" : "wr");
if (!read && scat_req->virt_scat) {
status = ath6kl_hif_cp_scat_dma_buf(scat_req, false);
if (status) {
scat_req->status = status;
scat_req->complete(dev->ar->htc_target, scat_req);
return 0;
}
}
status = ath6kl_hif_scat_req_rw(dev->ar, scat_req);
if (read) {
/* in sync mode, we can touch the scatter request */
scat_req->status = status;
if (!status && scat_req->virt_scat)
scat_req->status =
ath6kl_hif_cp_scat_dma_buf(scat_req, true);
}
return status;
}
static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
{
u8 counter_int_status;
ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n");
counter_int_status = dev->irq_proc_reg.counter_int_status &
dev->irq_en_reg.cntr_int_status_en;
ath6kl_dbg(ATH6KL_DBG_IRQ,
"valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
counter_int_status);
/*
* NOTE: other modules like GMBOX may use the counter interrupt for
* credit flow control on other counters, we only need to check for
* the debug assertion counter interrupt.
*/
if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
return ath6kl_hif_proc_dbg_intr(dev);
return 0;
}
static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
{
int status;
u8 error_int_status;
u8 reg_buf[4];
ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n");
error_int_status = dev->irq_proc_reg.error_int_status & 0x0F;
if (!error_int_status) {
WARN_ON(1);
return -EIO;
}
ath6kl_dbg(ATH6KL_DBG_IRQ,
"valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
error_int_status);
if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status))
ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n");
if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status))
ath6kl_err("rx underflow\n");
if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status))
ath6kl_err("tx overflow\n");
/* Clear the interrupt */
dev->irq_proc_reg.error_int_status &= ~error_int_status;
/* set W1C value to clear the interrupt, this hits the register first */
reg_buf[0] = error_int_status;
reg_buf[1] = 0;
reg_buf[2] = 0;
reg_buf[3] = 0;
status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
WARN_ON(status);
return status;
}
static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
{
int status;
u8 cpu_int_status;
u8 reg_buf[4];
ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n");
cpu_int_status = dev->irq_proc_reg.cpu_int_status &
dev->irq_en_reg.cpu_int_status_en;
if (!cpu_int_status) {
WARN_ON(1);
return -EIO;
}
ath6kl_dbg(ATH6KL_DBG_IRQ,
"valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
cpu_int_status);
/* Clear the interrupt */
dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status;
/*
* Set up the register transfer buffer to hit the register 4 times ,
* this is done to make the access 4-byte aligned to mitigate issues
* with host bus interconnects that restrict bus transfer lengths to
* be a multiple of 4-bytes.
*/
/* set W1C value to clear the interrupt, this hits the register first */
reg_buf[0] = cpu_int_status;
/* the remaining are set to zero which have no-effect */
reg_buf[1] = 0;
reg_buf[2] = 0;
reg_buf[3] = 0;
status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
WARN_ON(status);
return status;
}
/* process pending interrupts synchronously */
static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
{
struct ath6kl_irq_proc_registers *rg;
int status = 0;
u8 host_int_status = 0;
u32 lk_ahd = 0;
u8 htc_mbox = 1 << HTC_MAILBOX;
ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev);
/*
* NOTE: HIF implementation guarantees that the context of this
* call allows us to perform SYNCHRONOUS I/O, that is we can block,
* sleep or call any API that can block or switch thread/task
* contexts. This is a fully schedulable context.
*/
/*
* Process pending intr only when int_status_en is clear, it may
* result in unnecessary bus transaction otherwise. Target may be
* unresponsive at the time.
*/
if (dev->irq_en_reg.int_status_en) {
/*
* Read the first 28 bytes of the HTC register table. This
* will yield us the value of different int status
* registers and the lookahead registers.
*
* length = sizeof(int_status) + sizeof(cpu_int_status)
* + sizeof(error_int_status) +
* sizeof(counter_int_status) +
* sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
* + sizeof(hole) + sizeof(rx_lkahd) +
* sizeof(int_status_en) +
* sizeof(cpu_int_status_en) +
* sizeof(err_int_status_en) +
* sizeof(cntr_int_status_en);
*/
status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
(u8 *) &dev->irq_proc_reg,
sizeof(dev->irq_proc_reg),
HIF_RD_SYNC_BYTE_INC);
if (status)
goto out;
ath6kl_dump_registers(dev, &dev->irq_proc_reg,
&dev->irq_en_reg);
trace_ath6kl_sdio_irq(&dev->irq_en_reg,
sizeof(dev->irq_en_reg));
/* Update only those registers that are enabled */
host_int_status = dev->irq_proc_reg.host_int_status &
dev->irq_en_reg.int_status_en;
/* Look at mbox status */
if (host_int_status & htc_mbox) {
/*
* Mask out pending mbox value, we use "lookAhead as
* the real flag for mbox processing.
*/
host_int_status &= ~htc_mbox;
if (dev->irq_proc_reg.rx_lkahd_valid &
htc_mbox) {
rg = &dev->irq_proc_reg;
lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
if (!lk_ahd)
ath6kl_err("lookAhead is zero!\n");
}
}
}
if (!host_int_status && !lk_ahd) {
*done = true;
goto out;
}
if (lk_ahd) {
int fetched = 0;
ath6kl_dbg(ATH6KL_DBG_IRQ,
"pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd);
/*
* Mailbox Interrupt, the HTC layer may issue async
* requests to empty the mailbox. When emptying the recv
* mailbox we use the async handler above called from the
* completion routine of the callers read request. This can
* improve performance by reducing context switching when
* we rapidly pull packets.
*/
status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
lk_ahd, &fetched);
if (status)
goto out;
if (!fetched)
/*
* HTC could not pull any messages out due to lack
* of resources.
*/
dev->htc_cnxt->chk_irq_status_cnt = 0;
}
/* now handle the rest of them */
ath6kl_dbg(ATH6KL_DBG_IRQ,
"valid interrupt source(s) for other interrupts: 0x%x\n",
host_int_status);
if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
/* CPU Interrupt */
status = ath6kl_hif_proc_cpu_intr(dev);
if (status)
goto out;
}
if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
/* Error Interrupt */
status = ath6kl_hif_proc_err_intr(dev);
if (status)
goto out;
}
if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
/* Counter Interrupt */
status = ath6kl_hif_proc_counter_intr(dev);
out:
/*
* An optimization to bypass reading the IRQ status registers
* unecessarily which can re-wake the target, if upper layers
* determine that we are in a low-throughput mode, we can rely on
* taking another interrupt rather than re-checking the status
* registers which can re-wake the target.
*
* NOTE : for host interfaces that makes use of detecting pending
* mbox messages at hif can not use this optimization due to
* possible side effects, SPI requires the host to drain all
* messages from the mailbox before exiting the ISR routine.
*/
ath6kl_dbg(ATH6KL_DBG_IRQ,
"bypassing irq status re-check, forcing done\n");
if (!dev->htc_cnxt->chk_irq_status_cnt)
*done = true;
ath6kl_dbg(ATH6KL_DBG_IRQ,
"proc_pending_irqs: (done:%d, status=%d\n", *done, status);
return status;
}
/* interrupt handler, kicks off all interrupt processing */
int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
{
struct ath6kl_device *dev = ar->htc_target->dev;
unsigned long timeout;
int status = 0;
bool done = false;
/*
* Reset counter used to flag a re-scan of IRQ status registers on
* the target.
*/
dev->htc_cnxt->chk_irq_status_cnt = 0;
/*
* IRQ processing is synchronous, interrupt status registers can be
* re-read.
*/
timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT);
while (time_before(jiffies, timeout) && !done) {
status = proc_pending_irqs(dev, &done);
if (status)
break;
}
return status;
}
EXPORT_SYMBOL(ath6kl_hif_intr_bh_handler);
static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
{
struct ath6kl_irq_enable_reg regs;
int status;
spin_lock_bh(&dev->lock);
/* Enable all but ATH6KL CPU interrupts */
dev->irq_en_reg.int_status_en =
SM(INT_STATUS_ENABLE_ERROR, 0x01) |
SM(INT_STATUS_ENABLE_CPU, 0x01) |
SM(INT_STATUS_ENABLE_COUNTER, 0x01);
/*
* NOTE: There are some cases where HIF can do detection of
* pending mbox messages which is disabled now.
*/
dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
/* Set up the CPU Interrupt status Register */
dev->irq_en_reg.cpu_int_status_en = 0;
/* Set up the Error Interrupt status Register */
dev->irq_en_reg.err_int_status_en =
SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) |
SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1);
/*
* Enable Counter interrupt status register to get fatal errors for
* debugging.
*/
dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT,
ATH6KL_TARGET_DEBUG_INTR_MASK);
memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
spin_unlock_bh(&dev->lock);
status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
&regs.int_status_en, sizeof(regs),
HIF_WR_SYNC_BYTE_INC);
if (status)
ath6kl_err("failed to update interrupt ctl reg err: %d\n",
status);
return status;
}
int ath6kl_hif_disable_intrs(struct ath6kl_device *dev)
{
struct ath6kl_irq_enable_reg regs;
spin_lock_bh(&dev->lock);
/* Disable all interrupts */
dev->irq_en_reg.int_status_en = 0;
dev->irq_en_reg.cpu_int_status_en = 0;
dev->irq_en_reg.err_int_status_en = 0;
dev->irq_en_reg.cntr_int_status_en = 0;
memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
spin_unlock_bh(&dev->lock);
return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
&regs.int_status_en, sizeof(regs),
HIF_WR_SYNC_BYTE_INC);
}
/* enable device interrupts */
int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev)
{
int status = 0;
/*
* Make sure interrupt are disabled before unmasking at the HIF
* layer. The rationale here is that between device insertion
* (where we clear the interrupts the first time) and when HTC
* is finally ready to handle interrupts, other software can perform
* target "soft" resets. The ATH6KL interrupt enables reset back to an
* "enabled" state when this happens.
*/
ath6kl_hif_disable_intrs(dev);
/* unmask the host controller interrupts */
ath6kl_hif_irq_enable(dev->ar);
status = ath6kl_hif_enable_intrs(dev);
return status;
}
/* disable all device interrupts */
int ath6kl_hif_mask_intrs(struct ath6kl_device *dev)
{
/*
* Mask the interrupt at the HIF layer to avoid any stray interrupt
* taken while we zero out our shadow registers in
* ath6kl_hif_disable_intrs().
*/
ath6kl_hif_irq_disable(dev->ar);
return ath6kl_hif_disable_intrs(dev);
}
int ath6kl_hif_setup(struct ath6kl_device *dev)
{
int status = 0;
spin_lock_init(&dev->lock);
/*
* NOTE: we actually get the block size of a mailbox other than 0,
* for SDIO the block size on mailbox 0 is artificially set to 1.
* So we use the block size that is set for the other 3 mailboxes.
*/
dev->htc_cnxt->block_sz = dev->ar->mbox_info.block_size;
/* must be a power of 2 */
if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
WARN_ON(1);
status = -EINVAL;
goto fail_setup;
}
/* assemble mask, used for padding to a block */
dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
status = ath6kl_hif_disable_intrs(dev);
fail_setup:
return status;
}

View file

@ -0,0 +1,282 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef HIF_H
#define HIF_H
#include "common.h"
#include "core.h"
#include <linux/scatterlist.h>
#define BUS_REQUEST_MAX_NUM 64
#define HIF_MBOX_BLOCK_SIZE 128
#define HIF_MBOX0_BLOCK_SIZE 1
#define HIF_DMA_BUFFER_SIZE (32 * 1024)
#define CMD53_FIXED_ADDRESS 1
#define CMD53_INCR_ADDRESS 2
#define MAX_SCATTER_REQUESTS 4
#define MAX_SCATTER_ENTRIES_PER_REQ 16
#define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024)
#define MANUFACTURER_ID_AR6003_BASE 0x300
#define MANUFACTURER_ID_AR6004_BASE 0x400
/* SDIO manufacturer ID and Codes */
#define MANUFACTURER_ID_ATH6KL_BASE_MASK 0xFF00
#define MANUFACTURER_CODE 0x271 /* Atheros */
/* Mailbox address in SDIO address space */
#define HIF_MBOX_BASE_ADDR 0x800
#define HIF_MBOX_WIDTH 0x800
#define HIF_MBOX_END_ADDR (HTC_MAILBOX_NUM_MAX * HIF_MBOX_WIDTH - 1)
/* version 1 of the chip has only a 12K extended mbox range */
#define HIF_MBOX0_EXT_BASE_ADDR 0x4000
#define HIF_MBOX0_EXT_WIDTH (12*1024)
/* GMBOX addresses */
#define HIF_GMBOX_BASE_ADDR 0x7000
#define HIF_GMBOX_WIDTH 0x4000
/* interrupt mode register */
#define CCCR_SDIO_IRQ_MODE_REG 0xF0
/* mode to enable special 4-bit interrupt assertion without clock */
#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0)
/* HTC runs over mailbox 0 */
#define HTC_MAILBOX 0
#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
/* FIXME: are these duplicates with MAX_SCATTER_ values in hif.h? */
#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
#define ATH6KL_SCATTER_REQS 4
#define ATH6KL_HIF_COMMUNICATION_TIMEOUT 1000
struct bus_request {
struct list_head list;
/* request data */
u32 address;
u8 *buffer;
u32 length;
u32 request;
struct htc_packet *packet;
int status;
/* this is a scatter request */
struct hif_scatter_req *scat_req;
};
/* direction of transfer (read/write) */
#define HIF_READ 0x00000001
#define HIF_WRITE 0x00000002
#define HIF_DIR_MASK (HIF_READ | HIF_WRITE)
/*
* emode - This indicates the whether the command is to be executed in a
* blocking or non-blocking fashion (HIF_SYNCHRONOUS/
* HIF_ASYNCHRONOUS). The read/write data paths in HTC have been
* implemented using the asynchronous mode allowing the the bus
* driver to indicate the completion of operation through the
* registered callback routine. The requirement primarily comes
* from the contexts these operations get called from (a driver's
* transmit context or the ISR context in case of receive).
* Support for both of these modes is essential.
*/
#define HIF_SYNCHRONOUS 0x00000010
#define HIF_ASYNCHRONOUS 0x00000020
#define HIF_EMODE_MASK (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS)
/*
* dmode - An interface may support different kinds of commands based on
* the tradeoff between the amount of data it can carry and the
* setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/
* HIF_BLOCK_BASIS). In case of latter, the data is rounded off
* to the nearest block size by padding. The size of the block is
* configurable at compile time using the HIF_BLOCK_SIZE and is
* negotiated with the target during initialization after the
* ATH6KL interrupts are enabled.
*/
#define HIF_BYTE_BASIS 0x00000040
#define HIF_BLOCK_BASIS 0x00000080
#define HIF_DMODE_MASK (HIF_BYTE_BASIS | HIF_BLOCK_BASIS)
/*
* amode - This indicates if the address has to be incremented on ATH6KL
* after every read/write operation (HIF?FIXED_ADDRESS/
* HIF_INCREMENTAL_ADDRESS).
*/
#define HIF_FIXED_ADDRESS 0x00000100
#define HIF_INCREMENTAL_ADDRESS 0x00000200
#define HIF_AMODE_MASK (HIF_FIXED_ADDRESS | HIF_INCREMENTAL_ADDRESS)
#define HIF_WR_ASYNC_BYTE_INC \
(HIF_WRITE | HIF_ASYNCHRONOUS | \
HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
#define HIF_WR_ASYNC_BLOCK_INC \
(HIF_WRITE | HIF_ASYNCHRONOUS | \
HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
#define HIF_WR_SYNC_BYTE_FIX \
(HIF_WRITE | HIF_SYNCHRONOUS | \
HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
#define HIF_WR_SYNC_BYTE_INC \
(HIF_WRITE | HIF_SYNCHRONOUS | \
HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
#define HIF_WR_SYNC_BLOCK_INC \
(HIF_WRITE | HIF_SYNCHRONOUS | \
HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
#define HIF_RD_SYNC_BYTE_INC \
(HIF_READ | HIF_SYNCHRONOUS | \
HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
#define HIF_RD_SYNC_BYTE_FIX \
(HIF_READ | HIF_SYNCHRONOUS | \
HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
#define HIF_RD_ASYNC_BLOCK_FIX \
(HIF_READ | HIF_ASYNCHRONOUS | \
HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
#define HIF_RD_SYNC_BLOCK_FIX \
(HIF_READ | HIF_SYNCHRONOUS | \
HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
struct hif_scatter_item {
u8 *buf;
int len;
struct htc_packet *packet;
};
struct hif_scatter_req {
struct list_head list;
/* address for the read/write operation */
u32 addr;
/* request flags */
u32 req;
/* total length of entire transfer */
u32 len;
bool virt_scat;
void (*complete) (struct htc_target *, struct hif_scatter_req *);
int status;
int scat_entries;
struct bus_request *busrequest;
struct scatterlist *sgentries;
/* bounce buffer for upper layers to copy to/from */
u8 *virt_dma_buf;
u32 scat_q_depth;
struct hif_scatter_item scat_list[0];
};
struct ath6kl_irq_proc_registers {
u8 host_int_status;
u8 cpu_int_status;
u8 error_int_status;
u8 counter_int_status;
u8 mbox_frame;
u8 rx_lkahd_valid;
u8 host_int_status2;
u8 gmbox_rx_avail;
__le32 rx_lkahd[2];
__le32 rx_gmbox_lkahd_alias[2];
} __packed;
struct ath6kl_irq_enable_reg {
u8 int_status_en;
u8 cpu_int_status_en;
u8 err_int_status_en;
u8 cntr_int_status_en;
} __packed;
struct ath6kl_device {
/* protects irq_proc_reg and irq_en_reg below */
spinlock_t lock;
struct ath6kl_irq_proc_registers irq_proc_reg;
struct ath6kl_irq_enable_reg irq_en_reg;
struct htc_target *htc_cnxt;
struct ath6kl *ar;
};
struct ath6kl_hif_ops {
int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
u32 len, u32 request);
int (*write_async)(struct ath6kl *ar, u32 address, u8 *buffer,
u32 length, u32 request, struct htc_packet *packet);
void (*irq_enable)(struct ath6kl *ar);
void (*irq_disable)(struct ath6kl *ar);
struct hif_scatter_req *(*scatter_req_get)(struct ath6kl *ar);
void (*scatter_req_add)(struct ath6kl *ar,
struct hif_scatter_req *s_req);
int (*enable_scatter)(struct ath6kl *ar);
int (*scat_req_rw) (struct ath6kl *ar,
struct hif_scatter_req *scat_req);
void (*cleanup_scatter)(struct ath6kl *ar);
int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow);
int (*resume)(struct ath6kl *ar);
int (*diag_read32)(struct ath6kl *ar, u32 address, u32 *value);
int (*diag_write32)(struct ath6kl *ar, u32 address, __le32 value);
int (*bmi_read)(struct ath6kl *ar, u8 *buf, u32 len);
int (*bmi_write)(struct ath6kl *ar, u8 *buf, u32 len);
int (*power_on)(struct ath6kl *ar);
int (*power_off)(struct ath6kl *ar);
void (*stop)(struct ath6kl *ar);
int (*pipe_send)(struct ath6kl *ar, u8 pipe, struct sk_buff *hdr_buf,
struct sk_buff *buf);
void (*pipe_get_default)(struct ath6kl *ar, u8 *pipe_ul, u8 *pipe_dl);
int (*pipe_map_service)(struct ath6kl *ar, u16 service_id, u8 *pipe_ul,
u8 *pipe_dl);
u16 (*pipe_get_free_queue_number)(struct ath6kl *ar, u8 pipe);
};
int ath6kl_hif_setup(struct ath6kl_device *dev);
int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev);
int ath6kl_hif_mask_intrs(struct ath6kl_device *dev);
int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev,
u32 *lk_ahd, int timeout);
int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx);
int ath6kl_hif_disable_intrs(struct ath6kl_device *dev);
int ath6kl_hif_rw_comp_handler(void *context, int status);
int ath6kl_hif_intr_bh_handler(struct ath6kl *ar);
/* Scatter Function and Definitions */
int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
struct hif_scatter_req *scat_req, bool read);
#endif

View file

@ -0,0 +1,113 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef HTC_OPS_H
#define HTC_OPS_H
#include "htc.h"
#include "debug.h"
static inline void *ath6kl_htc_create(struct ath6kl *ar)
{
return ar->htc_ops->create(ar);
}
static inline int ath6kl_htc_wait_target(struct htc_target *target)
{
return target->dev->ar->htc_ops->wait_target(target);
}
static inline int ath6kl_htc_start(struct htc_target *target)
{
return target->dev->ar->htc_ops->start(target);
}
static inline int ath6kl_htc_conn_service(struct htc_target *target,
struct htc_service_connect_req *req,
struct htc_service_connect_resp *resp)
{
return target->dev->ar->htc_ops->conn_service(target, req, resp);
}
static inline int ath6kl_htc_tx(struct htc_target *target,
struct htc_packet *packet)
{
return target->dev->ar->htc_ops->tx(target, packet);
}
static inline void ath6kl_htc_stop(struct htc_target *target)
{
return target->dev->ar->htc_ops->stop(target);
}
static inline void ath6kl_htc_cleanup(struct htc_target *target)
{
return target->dev->ar->htc_ops->cleanup(target);
}
static inline void ath6kl_htc_flush_txep(struct htc_target *target,
enum htc_endpoint_id endpoint,
u16 tag)
{
return target->dev->ar->htc_ops->flush_txep(target, endpoint, tag);
}
static inline void ath6kl_htc_flush_rx_buf(struct htc_target *target)
{
return target->dev->ar->htc_ops->flush_rx_buf(target);
}
static inline void ath6kl_htc_activity_changed(struct htc_target *target,
enum htc_endpoint_id endpoint,
bool active)
{
return target->dev->ar->htc_ops->activity_changed(target, endpoint,
active);
}
static inline int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
enum htc_endpoint_id endpoint)
{
return target->dev->ar->htc_ops->get_rxbuf_num(target, endpoint);
}
static inline int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
struct list_head *pktq)
{
return target->dev->ar->htc_ops->add_rxbuf_multiple(target, pktq);
}
static inline int ath6kl_htc_credit_setup(struct htc_target *target,
struct ath6kl_htc_credit_info *info)
{
return target->dev->ar->htc_ops->credit_setup(target, info);
}
static inline void ath6kl_htc_tx_complete(struct ath6kl *ar,
struct sk_buff *skb)
{
ar->htc_ops->tx_complete(ar, skb);
}
static inline void ath6kl_htc_rx_complete(struct ath6kl *ar,
struct sk_buff *skb, u8 pipe)
{
ar->htc_ops->rx_complete(ar, skb, pipe);
}
#endif

View file

@ -0,0 +1,677 @@
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef HTC_H
#define HTC_H
#include "common.h"
/* frame header flags */
/* send direction */
#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0)
#define HTC_FLAGS_SEND_BUNDLE (1 << 1)
#define HTC_FLAGS_TX_FIXUP_NETBUF (1 << 2)
/* receive direction */
#define HTC_FLG_RX_UNUSED (1 << 0)
#define HTC_FLG_RX_TRAILER (1 << 1)
/* Bundle count maske and shift */
#define HTC_FLG_RX_BNDL_CNT (0xF0)
#define HTC_FLG_RX_BNDL_CNT_S 4
#define HTC_HDR_LENGTH (sizeof(struct htc_frame_hdr))
#define HTC_MAX_PAYLOAD_LENGTH (4096 - sizeof(struct htc_frame_hdr))
/* HTC control message IDs */
#define HTC_MSG_READY_ID 1
#define HTC_MSG_CONN_SVC_ID 2
#define HTC_MSG_CONN_SVC_RESP_ID 3
#define HTC_MSG_SETUP_COMPLETE_ID 4
#define HTC_MSG_SETUP_COMPLETE_EX_ID 5
#define HTC_MAX_CTRL_MSG_LEN 256
#define HTC_VERSION_2P0 0x00
#define HTC_VERSION_2P1 0x01
#define HTC_SERVICE_META_DATA_MAX_LENGTH 128
#define HTC_CONN_FLGS_THRESH_LVL_QUAT 0x0
#define HTC_CONN_FLGS_THRESH_LVL_HALF 0x1
#define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2
#define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4
#define HTC_CONN_FLGS_THRESH_MASK 0x3
/* disable credit flow control on a specific service */
#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3)
#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8
#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00U
/* connect response status codes */
#define HTC_SERVICE_SUCCESS 0
#define HTC_SERVICE_NOT_FOUND 1
#define HTC_SERVICE_FAILED 2
/* no resources (i.e. no more endpoints) */
#define HTC_SERVICE_NO_RESOURCES 3
/* specific service is not allowing any more endpoints */
#define HTC_SERVICE_NO_MORE_EP 4
/* report record IDs */
#define HTC_RECORD_NULL 0
#define HTC_RECORD_CREDITS 1
#define HTC_RECORD_LOOKAHEAD 2
#define HTC_RECORD_LOOKAHEAD_BUNDLE 3
#define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0)
#define HTC_SETUP_COMP_FLG_DISABLE_TX_CREDIT_FLOW (1 << 1)
#define MAKE_SERVICE_ID(group, index) \
(int)(((int)group << 8) | (int)(index))
/* NOTE: service ID of 0x0000 is reserved and should never be used */
#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
#define WMI_MAX_SERVICES 5
#define WMM_NUM_AC 4
/* reserved and used to flush ALL packets */
#define HTC_TX_PACKET_TAG_ALL 0
#define HTC_SERVICE_TX_PACKET_TAG 1
#define HTC_TX_PACKET_TAG_USER_DEFINED (HTC_SERVICE_TX_PACKET_TAG + 9)
/* more packets on this endpoint are being fetched */
#define HTC_RX_FLAGS_INDICATE_MORE_PKTS (1 << 0)
/* TODO.. for BMI */
#define ENDPOINT1 0
/* TODO -remove me, but we have to fix BMI first */
#define HTC_MAILBOX_NUM_MAX 4
/* enable send bundle padding for this endpoint */
#define HTC_FLGS_TX_BNDL_PAD_EN (1 << 0)
#define HTC_EP_ACTIVE ((u32) (1u << 31))
/* HTC operational parameters */
#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
#define HTC_TARGET_RESPONSE_POLL_WAIT 10
#define HTC_TARGET_RESPONSE_POLL_COUNT 200
#define HTC_TARGET_DEBUG_INTR_MASK 0x01
#define HTC_TARGET_CREDIT_INTR_MASK 0xF0
#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
#define HTC_MIN_HTC_MSGS_TO_BUNDLE 2
/* packet flags */
#define HTC_RX_PKT_IGNORE_LOOKAHEAD (1 << 0)
#define HTC_RX_PKT_REFRESH_HDR (1 << 1)
#define HTC_RX_PKT_PART_OF_BUNDLE (1 << 2)
#define HTC_RX_PKT_NO_RECYCLE (1 << 3)
#define NUM_CONTROL_BUFFERS 8
#define NUM_CONTROL_TX_BUFFERS 2
#define NUM_CONTROL_RX_BUFFERS (NUM_CONTROL_BUFFERS - NUM_CONTROL_TX_BUFFERS)
#define HTC_RECV_WAIT_BUFFERS (1 << 0)
#define HTC_OP_STATE_STOPPING (1 << 0)
#define HTC_OP_STATE_SETUP_COMPLETE (1 << 1)
/*
* The frame header length and message formats defined herein were selected
* to accommodate optimal alignment for target processing. This reduces
* code size and improves performance. Any changes to the header length may
* alter the alignment and cause exceptions on the target. When adding to
* the messagestructures insure that fields are properly aligned.
*/
/* HTC frame header
*
* NOTE: do not remove or re-arrange the fields, these are minimally
* required to take advantage of 4-byte lookaheads in some hardware
* implementations.
*/
struct htc_frame_hdr {
u8 eid;
u8 flags;
/* length of data (including trailer) that follows the header */
__le16 payld_len;
/* end of 4-byte lookahead */
u8 ctrl[2];
} __packed;
/* HTC ready message */
struct htc_ready_msg {
__le16 msg_id;
__le16 cred_cnt;
__le16 cred_sz;
u8 max_ep;
u8 pad;
} __packed;
/* extended HTC ready message */
struct htc_ready_ext_msg {
struct htc_ready_msg ver2_0_info;
u8 htc_ver;
u8 msg_per_htc_bndl;
} __packed;
/* connect service */
struct htc_conn_service_msg {
__le16 msg_id;
__le16 svc_id;
__le16 conn_flags;
u8 svc_meta_len;
u8 pad;
} __packed;
/* connect response */
struct htc_conn_service_resp {
__le16 msg_id;
__le16 svc_id;
u8 status;
u8 eid;
__le16 max_msg_sz;
u8 svc_meta_len;
u8 pad;
} __packed;
struct htc_setup_comp_msg {
__le16 msg_id;
} __packed;
/* extended setup completion message */
struct htc_setup_comp_ext_msg {
__le16 msg_id;
__le32 flags;
u8 msg_per_rxbndl;
u8 Rsvd[3];
} __packed;
struct htc_record_hdr {
u8 rec_id;
u8 len;
} __packed;
struct htc_credit_report {
u8 eid;
u8 credits;
} __packed;
/*
* NOTE: The lk_ahd array is guarded by a pre_valid
* and Post Valid guard bytes. The pre_valid bytes must
* equal the inverse of the post_valid byte.
*/
struct htc_lookahead_report {
u8 pre_valid;
u8 lk_ahd[4];
u8 post_valid;
} __packed;
struct htc_bundle_lkahd_rpt {
u8 lk_ahd[4];
} __packed;
/* Current service IDs */
enum htc_service_grp_ids {
RSVD_SERVICE_GROUP = 0,
WMI_SERVICE_GROUP = 1,
HTC_TEST_GROUP = 254,
HTC_SERVICE_GROUP_LAST = 255
};
/* ------ endpoint IDS ------ */
enum htc_endpoint_id {
ENDPOINT_UNUSED = -1,
ENDPOINT_0 = 0,
ENDPOINT_1 = 1,
ENDPOINT_2 = 2,
ENDPOINT_3,
ENDPOINT_4,
ENDPOINT_5,
ENDPOINT_6,
ENDPOINT_7,
ENDPOINT_8,
ENDPOINT_MAX,
};
struct htc_tx_packet_info {
u16 tag;
int cred_used;
u8 flags;
int seqno;
};
struct htc_rx_packet_info {
u32 exp_hdr;
u32 rx_flags;
u32 indicat_flags;
};
struct htc_target;
/* wrapper around endpoint-specific packets */
struct htc_packet {
struct list_head list;
/* caller's per packet specific context */
void *pkt_cntxt;
/*
* the true buffer start , the caller can store the real
* buffer start here. In receive callbacks, the HTC layer
* sets buf to the start of the payload past the header.
* This field allows the caller to reset buf when it recycles
* receive packets back to HTC.
*/
u8 *buf_start;
/*
* Pointer to the start of the buffer. In the transmit
* direction this points to the start of the payload. In the
* receive direction, however, the buffer when queued up
* points to the start of the HTC header but when returned
* to the caller points to the start of the payload
*/
u8 *buf;
u32 buf_len;
/* actual length of payload */
u32 act_len;
/* endpoint that this packet was sent/recv'd from */
enum htc_endpoint_id endpoint;
/* completion status */
int status;
union {
struct htc_tx_packet_info tx;
struct htc_rx_packet_info rx;
} info;
void (*completion) (struct htc_target *, struct htc_packet *);
struct htc_target *context;
/*
* optimization for network-oriented data, the HTC packet
* can pass the network buffer corresponding to the HTC packet
* lower layers may optimized the transfer knowing this is
* a network buffer
*/
struct sk_buff *skb;
};
enum htc_send_full_action {
HTC_SEND_FULL_KEEP = 0,
HTC_SEND_FULL_DROP = 1,
};
struct htc_ep_callbacks {
void (*tx_complete) (struct htc_target *, struct htc_packet *);
void (*rx) (struct htc_target *, struct htc_packet *);
void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint);
enum htc_send_full_action (*tx_full) (struct htc_target *,
struct htc_packet *);
struct htc_packet *(*rx_allocthresh) (struct htc_target *,
enum htc_endpoint_id, int);
void (*tx_comp_multi) (struct htc_target *, struct list_head *);
int rx_alloc_thresh;
int rx_refill_thresh;
};
/* service connection information */
struct htc_service_connect_req {
u16 svc_id;
u16 conn_flags;
struct htc_ep_callbacks ep_cb;
int max_txq_depth;
u32 flags;
unsigned int max_rxmsg_sz;
};
/* service connection response information */
struct htc_service_connect_resp {
u8 buf_len;
u8 act_len;
enum htc_endpoint_id endpoint;
unsigned int len_max;
u8 resp_code;
};
/* endpoint distributionstructure */
struct htc_endpoint_credit_dist {
struct list_head list;
/* Service ID (set by HTC) */
u16 svc_id;
/* endpoint for this distributionstruct (set by HTC) */
enum htc_endpoint_id endpoint;
u32 dist_flags;
/*
* credits for normal operation, anything above this
* indicates the endpoint is over-subscribed.
*/
int cred_norm;
/* floor for credit distribution */
int cred_min;
int cred_assngd;
/* current credits available */
int credits;
/*
* pending credits to distribute on this endpoint, this
* is set by HTC when credit reports arrive. The credit
* distribution functions sets this to zero when it distributes
* the credits.
*/
int cred_to_dist;
/*
* the number of credits that the current pending TX packet needs
* to transmit. This is set by HTC when endpoint needs credits in
* order to transmit.
*/
int seek_cred;
/* size in bytes of each credit */
int cred_sz;
/* credits required for a maximum sized messages */
int cred_per_msg;
/* reserved for HTC use */
struct htc_endpoint *htc_ep;
/*
* current depth of TX queue , i.e. messages waiting for credits
* This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE
* or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint
* that has non-zero credits to recover.
*/
int txq_depth;
};
/*
* credit distibution code that is passed into the distrbution function,
* there are mandatory and optional codes that must be handled
*/
enum htc_credit_dist_reason {
HTC_CREDIT_DIST_SEND_COMPLETE = 0,
HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1,
HTC_CREDIT_DIST_SEEK_CREDITS,
};
struct ath6kl_htc_credit_info {
int total_avail_credits;
int cur_free_credits;
/* list of lowest priority endpoints */
struct list_head lowestpri_ep_dist;
};
/* endpoint statistics */
struct htc_endpoint_stats {
/*
* number of times the host set the credit-low flag in a send
* message on this endpoint
*/
u32 cred_low_indicate;
u32 tx_issued;
u32 tx_pkt_bundled;
u32 tx_bundles;
u32 tx_dropped;
/* running count of total credit reports received for this endpoint */
u32 tx_cred_rpt;
/* credit reports received from this endpoint's RX packets */
u32 cred_rpt_from_rx;
/* credit reports received from RX packets of other endpoints */
u32 cred_rpt_from_other;
/* credit reports received from endpoint 0 RX packets */
u32 cred_rpt_ep0;
/* count of credits received via Rx packets on this endpoint */
u32 cred_from_rx;
/* count of credits received via another endpoint */
u32 cred_from_other;
/* count of credits received via another endpoint */
u32 cred_from_ep0;
/* count of consummed credits */
u32 cred_cosumd;
/* count of credits returned */
u32 cred_retnd;
u32 rx_pkts;
/* count of lookahead records found in Rx msg */
u32 rx_lkahds;
/* count of recv packets received in a bundle */
u32 rx_bundl;
/* count of number of bundled lookaheads */
u32 rx_bundle_lkahd;
/* count of the number of bundle indications from the HTC header */
u32 rx_bundle_from_hdr;
/* the number of times the recv allocation threshold was hit */
u32 rx_alloc_thresh_hit;
/* total number of bytes */
u32 rxalloc_thresh_byte;
};
struct htc_endpoint {
enum htc_endpoint_id eid;
u16 svc_id;
struct list_head txq;
struct list_head rx_bufq;
struct htc_endpoint_credit_dist cred_dist;
struct htc_ep_callbacks ep_cb;
int max_txq_depth;
int len_max;
int tx_proc_cnt;
int rx_proc_cnt;
struct htc_target *target;
u8 seqno;
u32 conn_flags;
struct htc_endpoint_stats ep_st;
u16 tx_drop_packet_threshold;
struct {
u8 pipeid_ul;
u8 pipeid_dl;
struct list_head tx_lookup_queue;
bool tx_credit_flow_enabled;
} pipe;
};
struct htc_control_buffer {
struct htc_packet packet;
u8 *buf;
};
struct htc_pipe_txcredit_alloc {
u16 service_id;
u8 credit_alloc;
};
enum htc_send_queue_result {
HTC_SEND_QUEUE_OK = 0, /* packet was queued */
HTC_SEND_QUEUE_DROP = 1, /* this packet should be dropped */
};
struct ath6kl_htc_ops {
void* (*create)(struct ath6kl *ar);
int (*wait_target)(struct htc_target *target);
int (*start)(struct htc_target *target);
int (*conn_service)(struct htc_target *target,
struct htc_service_connect_req *req,
struct htc_service_connect_resp *resp);
int (*tx)(struct htc_target *target, struct htc_packet *packet);
void (*stop)(struct htc_target *target);
void (*cleanup)(struct htc_target *target);
void (*flush_txep)(struct htc_target *target,
enum htc_endpoint_id endpoint, u16 tag);
void (*flush_rx_buf)(struct htc_target *target);
void (*activity_changed)(struct htc_target *target,
enum htc_endpoint_id endpoint,
bool active);
int (*get_rxbuf_num)(struct htc_target *target,
enum htc_endpoint_id endpoint);
int (*add_rxbuf_multiple)(struct htc_target *target,
struct list_head *pktq);
int (*credit_setup)(struct htc_target *target,
struct ath6kl_htc_credit_info *cred_info);
int (*tx_complete)(struct ath6kl *ar, struct sk_buff *skb);
int (*rx_complete)(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
};
struct ath6kl_device;
/* our HTC target state */
struct htc_target {
struct htc_endpoint endpoint[ENDPOINT_MAX];
/* contains struct htc_endpoint_credit_dist */
struct list_head cred_dist_list;
struct list_head free_ctrl_txbuf;
struct list_head free_ctrl_rxbuf;
struct ath6kl_htc_credit_info *credit_info;
int tgt_creds;
unsigned int tgt_cred_sz;
/* protects free_ctrl_txbuf and free_ctrl_rxbuf */
spinlock_t htc_lock;
/* FIXME: does this protext rx_bufq and endpoint structures or what? */
spinlock_t rx_lock;
/* protects endpoint->txq */
spinlock_t tx_lock;
struct ath6kl_device *dev;
u32 htc_flags;
u32 rx_st_flags;
enum htc_endpoint_id ep_waiting;
u8 htc_tgt_ver;
/* max messages per bundle for HTC */
int msg_per_bndl_max;
u32 tx_bndl_mask;
int rx_bndl_enable;
int max_rx_bndl_sz;
int max_tx_bndl_sz;
u32 block_sz;
u32 block_mask;
int max_scat_entries;
int max_xfer_szper_scatreq;
int chk_irq_status_cnt;
/* counts the number of Tx without bundling continously per AC */
u32 ac_tx_count[WMM_NUM_AC];
struct {
struct htc_packet *htc_packet_pool;
u8 ctrl_response_buf[HTC_MAX_CTRL_MSG_LEN];
int ctrl_response_len;
bool ctrl_response_valid;
struct htc_pipe_txcredit_alloc txcredit_alloc[ENDPOINT_MAX];
} pipe;
};
int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
u32 msg_look_ahead, int *n_pkts);
static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
u8 *buf, unsigned int len,
enum htc_endpoint_id eid, u16 tag)
{
packet->pkt_cntxt = context;
packet->buf = buf;
packet->act_len = len;
packet->endpoint = eid;
packet->info.tx.tag = tag;
}
static inline void htc_rxpkt_reset(struct htc_packet *packet)
{
packet->buf = packet->buf_start;
packet->act_len = 0;
}
static inline void set_htc_rxpkt_info(struct htc_packet *packet, void *context,
u8 *buf, unsigned long len,
enum htc_endpoint_id eid)
{
packet->pkt_cntxt = context;
packet->buf = buf;
packet->buf_start = buf;
packet->buf_len = len;
packet->endpoint = eid;
}
static inline int get_queue_depth(struct list_head *queue)
{
struct list_head *tmp_list;
int depth = 0;
list_for_each(tmp_list, queue)
depth++;
return depth;
}
void ath6kl_htc_pipe_attach(struct ath6kl *ar);
void ath6kl_htc_mbox_attach(struct ath6kl *ar);
#endif

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more