diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/Makefile b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/Makefile index e3801d6a4461..01ba8166efd5 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/Makefile +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/Makefile @@ -8,6 +8,8 @@ CONFIG_BCMDHD_SDIO := y CONFIG_BCMDHD_OOB := y CONFIG_BCMDHD_PROPTXSTATUS := y CONFIG_BCMDHD_AG := y +#CONFIG_DHD_USE_STATIC_BUF := y +CONFIG_VTS_SUPPORT := y DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER -DSDTEST \ -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \ @@ -15,7 +17,7 @@ DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER -DSDTEST \ -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DSUPPORT_PM2_ONLY \ -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DPNO_SUPPORT -DDHDTCPACK_SUPPRESS \ -DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT -DRXFRAME_THREAD \ - -DTSQ_MULTIPLIER \ + -DTSQ_MULTIPLIER -DMFP \ -DBCMSDIOH_TXGLOM_EXT -DWL_EXT_IAPSTA \ -DENABLE_INSMOD_NO_FW_LOAD \ -Idrivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd \ @@ -58,11 +60,25 @@ endif ifneq ($(CONFIG_BCMDHD_PCIE),) DHDCFLAGS += \ -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1 +ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y) +DHDCFLAGS += -DDHD_USE_STATIC_CTRLBUF +endif DHDOFILES += dhd_pcie.o dhd_pcie_linux.o pcie_core.o dhd_flowring.o \ dhd_msgbuf.o endif +ifeq ($(CONFIG_VTS_SUPPORT),y) +DHDCFLAGS += \ + -DGSCAN_SUPPORT -DRTT_SUPPORT -DCUSTOM_FORCE_NODFS_FLAG \ + -DLINKSTAT_SUPPORT -DDEBUGABILITY -DDBG_PKT_MON -DKEEP_ALIVE -DPKT_FILTER_SUPPORT \ + -DAPF -DNDO_CONFIG_SUPPORT -DRSSI_MONITOR_SUPPORT -DDHDTCPACK_SUPPRESS -DDHD_WAKE_STATUS \ + -DCUSTOM_COUNTRY_CODE -DDHD_FW_COREDUMP -DEXPLICIT_DISCIF_CLEANUP + +DHDOFILES += dhd_debug_linux.o dhd_debug.o bcmxtlv.o \ + dhd_rtt.o bcm_app_utils.o +endif + obj-$(CONFIG_AP6XXX) += bcmdhd.o bcmdhd-objs += $(DHDOFILES) @@ -79,6 +95,7 @@ endif ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y) obj-m += dhd_static_buf.o DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT -DENHANCED_STATIC_BUF +DHDCFLAGS += -DDHD_USE_STATIC_MEMDUMP -DCONFIG_DHD_USE_STATIC_BUF endif ifneq ($(CONFIG_WIRELESS_EXT),) @@ -87,7 +104,7 @@ DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW -DWL_ESCAN endif ifneq ($(CONFIG_CFG80211),) DHDOFILES += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o -DHDOFILES += dhd_cfg80211.o dhd_cfg_vendor.o +DHDOFILES += dhd_cfg80211.o DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF #DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65 diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/aiutils.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/aiutils.c index 493a2e08ab9c..f88b12336027 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/aiutils.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/aiutils.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: aiutils.c 607900 2015-12-22 13:38:53Z $ + * $Id: aiutils.c 625027 2016-03-15 08:20:18Z $ */ #include #include @@ -38,8 +38,8 @@ #include #include "siutils_priv.h" +#include -#define BCM47162_DMP() (0) #define BCM5357_DMP() (0) #define BCM53573_DMP() (0) #define BCM4707_DMP() (0) @@ -94,10 +94,12 @@ get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match) static uint32 get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh, - uint32 *sizel, uint32 *sizeh) + uint32 *sizel, uint32 *sizeh) { uint32 asd, sz, szd; + BCM_REFERENCE(ad); + asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID); if (((asd & ER_TAG1) != ER_ADD) || (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) || @@ -127,11 +129,6 @@ get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, u return asd; } -static void -ai_hwfixup(si_info_t *sii) -{ -} - /* parse the enumeration rom to identify all cores */ void @@ -141,6 +138,9 @@ ai_scan(si_t *sih, void *regs, uint devid) si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; chipcregs_t *cc = (chipcregs_t *)regs; uint32 erombase, *eromptr, *eromlim; + axi_wrapper_t * axi_wrapper = sii->axi_wrapper; + + BCM_REFERENCE(devid); erombase = R_REG(sii->osh, &cc->eromptr); @@ -172,9 +172,11 @@ ai_scan(si_t *sih, void *regs, uint devid) return; } eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); + sii->axi_num_wrappers = 0; SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", - regs, erombase, eromptr, eromlim)); + OSL_OBFUSCATE_BUF(regs), erombase, + OSL_OBFUSCATE_BUF(eromptr), OSL_OBFUSATE_BUF(eromlim))); while (eromptr < eromlim) { uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp; uint32 mpd, asd, addrl, addrh, sizel, sizeh; @@ -187,7 +189,6 @@ ai_scan(si_t *sih, void *regs, uint devid) cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI); if (cia == (ER_END | ER_VALID)) { SI_VMSG(("Found END of erom after %d cores\n", sii->numcores)); - ai_hwfixup(sii); return; } @@ -209,13 +210,29 @@ ai_scan(si_t *sih, void *regs, uint devid) #ifdef BCMDBG_SI SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", - mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp)); + mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp)); #else BCM_REFERENCE(crev); #endif - if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0)) - continue; + if (CHIPID(sih->chip) == BCM4347_CHIP_ID) { + /* 4347 has more entries for ARM core + * This should apply to all chips but crashes on router + * This is a temp fix to be further analyze + */ + if (nsp == 0) + continue; + } else { + /* Include Default slave wrapper for timeout monitoring */ + if ((nsp == 0) || +#if !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) + ((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || +#endif /* !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) */ + FALSE) { + continue; + } + } + if ((nmw + nsw == 0)) { /* A component which is not a core */ if (cid == OOB_ROUTER_CORE_ID) { @@ -321,6 +338,17 @@ ai_scan(si_t *sih, void *regs, uint devid) cores_info->wrapba[idx] = addrl; else if (i == 1) cores_info->wrapba2[idx] = addrl; + + + ASSERT(sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS); + axi_wrapper[sii->axi_num_wrappers].mfg = mfg; + axi_wrapper[sii->axi_num_wrappers].cid = cid; + axi_wrapper[sii->axi_num_wrappers].rev = crev; + axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER; + axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl; + sii->axi_num_wrappers++; + SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x, rev:%x, addr:%x, size:%x\n", + sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel)); } /* And finally slave wrappers */ @@ -334,6 +362,7 @@ ai_scan(si_t *sih, void *regs, uint devid) ASSERT(sii->num_br < SI_MAXBR); sii->br_wrapba[sii->num_br++] = addrl; } + if (asd == 0) { SI_ERROR(("Missing descriptor for SW %d\n", i)); goto error; @@ -346,12 +375,29 @@ ai_scan(si_t *sih, void *regs, uint devid) cores_info->wrapba[idx] = addrl; else if ((nmw == 0) && (i == 1)) cores_info->wrapba2[idx] = addrl; + + /* Include all slave wrappers to the list to + * enable and monitor watchdog timeouts + */ + + ASSERT(sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS); + axi_wrapper[sii->axi_num_wrappers].mfg = mfg; + axi_wrapper[sii->axi_num_wrappers].cid = cid; + axi_wrapper[sii->axi_num_wrappers].rev = crev; + axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER; + axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl; + sii->axi_num_wrappers++; + + SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x, rev:%x, addr:%x, size:%x\n", + sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel)); } +#ifndef BCM_BACKPLANE_TIMEOUT /* Don't record bridges */ if (br) continue; +#endif /* Done with core */ sii->numcores++; @@ -370,13 +416,13 @@ error: /* This function changes the logical "focus" to the indicated core. * Return the current core's virtual address. */ -static void * +static volatile void * _ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2) { si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint32 addr, wrap, wrap2; - void *regs; + volatile void *regs; if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) return (NULL); @@ -385,11 +431,19 @@ _ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2) wrap = cores_info->wrapba[coreidx]; wrap2 = cores_info->wrapba2[coreidx]; - /* - * If the user has provided an interrupt mask enabled function, - * then assert interrupts are disabled before switching the core. - */ - ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); +#ifdef BCM_BACKPLANE_TIMEOUT + /* No need to disable interrupts while entering/exiting APB bridge core */ + if ((cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) && + (cores_info->coreid[sii->curidx] != APB_BRIDGE_CORE_ID)) +#endif /* BCM_BACKPLANE_TIMEOUT */ + { + /* + * If the user has provided an interrupt mask enabled function, + * then assert interrupts are disabled before switching the core. + */ + ASSERT((sii->intrsenabled_fn == NULL) || + !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); + } switch (BUSTYPE(sih->bustype)) { case SI_BUS: @@ -415,8 +469,17 @@ _ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2) break; case PCI_BUS: - /* point bar0 window */ - OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr); +#ifdef BCM_BACKPLANE_TIMEOUT + /* No need to set the BAR0 if core is APB Bridge. + * This is to reduce 2 PCI writes while checkng for errlog + */ + if (cores_info->coreid[coreidx] != APB_BRIDGE_CORE_ID) +#endif /* BCM_BACKPLANE_TIMEOUT */ + { + /* point bar0 window */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr); + } + regs = sii->curmap; /* point bar0 2nd 4KB window to the primary wrapper */ if (use_wrap2) @@ -451,13 +514,13 @@ _ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2) return regs; } -void * +volatile void * ai_setcoreidx(si_t *sih, uint coreidx) { return _ai_setcoreidx(sih, coreidx, 0); } -void * +volatile void * ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx) { return _ai_setcoreidx(sih, coreidx, 1); @@ -553,6 +616,9 @@ error: int ai_numaddrspaces(si_t *sih) { + + BCM_REFERENCE(sih); + return 2; } @@ -604,10 +670,6 @@ ai_flag(si_t *sih) si_info_t *sii = SI_INFO(sih); aidmp_t *ai; - if (BCM47162_DMP()) { - SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__)); - return sii->curidx; - } if (BCM5357_DMP()) { SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__)); return sii->curidx; @@ -650,10 +712,6 @@ ai_flag_alt(si_t *sih) si_info_t *sii = SI_INFO(sih); aidmp_t *ai; - if (BCM47162_DMP()) { - SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__)); - return sii->curidx; - } if (BCM5357_DMP()) { SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__)); return sii->curidx; @@ -679,6 +737,9 @@ ai_flag_alt(si_t *sih) void ai_setint(si_t *sih, int siflag) { + BCM_REFERENCE(sih); + BCM_REFERENCE(siflag); + } uint @@ -745,7 +806,7 @@ uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) { uint origidx = 0; - uint32 *r = NULL; + volatile uint32 *r = NULL; uint w; uint intr_val = 0; bool fast = FALSE; @@ -769,7 +830,7 @@ ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) SI_CORE_SIZE); ASSERT(GOODREGS(cores_info->regs[coreidx])); } - r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); + r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff); } else if (BUSTYPE(sih->bustype) == PCI_BUS) { /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ @@ -777,17 +838,18 @@ ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) /* Chipc registers are mapped at 12KB */ fast = TRUE; - r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); } else if (sii->pub.buscoreidx == coreidx) { /* pci registers are at either in the last 2KB of an 8KB window * or, in pcie and pci rev 13 at 8KB */ fast = TRUE; if (SI_FAST(sii)) - r = (uint32 *)((char *)sii->curmap + + r = (volatile uint32 *)((volatile char *)sii->curmap + PCI_16KB0_PCIREGS_OFFSET + regoff); else - r = (uint32 *)((char *)sii->curmap + + r = (volatile uint32 *)((volatile char *)sii->curmap + ((regoff >= SBCONFIGOFF) ? PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + regoff); @@ -801,7 +863,8 @@ ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) origidx = si_coreidx(&sii->pub); /* switch core */ - r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff); + r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) + + regoff); } ASSERT(r != NULL); @@ -834,10 +897,10 @@ ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) * For accessing registers that would need a core switch, this function will return * NULL. */ -uint32 * +volatile uint32 * ai_corereg_addr(si_t *sih, uint coreidx, uint regoff) { - uint32 *r = NULL; + volatile uint32 *r = NULL; bool fast = FALSE; si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; @@ -858,7 +921,7 @@ ai_corereg_addr(si_t *sih, uint coreidx, uint regoff) SI_CORE_SIZE); ASSERT(GOODREGS(cores_info->regs[coreidx])); } - r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); + r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff); } else if (BUSTYPE(sih->bustype) == PCI_BUS) { /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ @@ -866,17 +929,18 @@ ai_corereg_addr(si_t *sih, uint coreidx, uint regoff) /* Chipc registers are mapped at 12KB */ fast = TRUE; - r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); } else if (sii->pub.buscoreidx == coreidx) { /* pci registers are at either in the last 2KB of an 8KB window * or, in pcie and pci rev 13 at 8KB */ fast = TRUE; if (SI_FAST(sii)) - r = (uint32 *)((char *)sii->curmap + + r = (volatile uint32 *)((volatile char *)sii->curmap + PCI_16KB0_PCIREGS_OFFSET + regoff); else - r = (uint32 *)((char *)sii->curmap + + r = (volatile uint32 *)((volatile char *)sii->curmap + ((regoff >= SBCONFIGOFF) ? PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + regoff); @@ -885,7 +949,7 @@ ai_corereg_addr(si_t *sih, uint coreidx, uint regoff) if (!fast) { ASSERT(sii->curidx == coreidx); - r = (uint32*) ((uchar*)sii->curmap + regoff); + r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff); } return (r); @@ -904,8 +968,9 @@ ai_core_disable(si_t *sih, uint32 bits) ai = sii->curwrap; /* if core is already in reset, just return */ - if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) + if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) { return; + } /* ensure there are no pending backplane operations */ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); @@ -942,24 +1007,17 @@ _ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) aidmp_t *ai; volatile uint32 dummy; uint loop_counter = 10; -#ifdef CUSTOMER_HW4_DEBUG - printf("%s: bits: 0x%x, resetbits: 0x%x\n", __FUNCTION__, bits, resetbits); -#endif ASSERT(GOODREGS(sii->curwrap)); ai = sii->curwrap; + /* if core is already out of reset, just return */ + /* ensure there are no pending backplane operations */ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); -#ifdef CUSTOMER_HW4_DEBUG - printf("%s: resetstatus: %p dummy: %x\n", __FUNCTION__, &ai->resetstatus, dummy); -#endif /* put core into reset state */ -#ifdef CUSTOMER_HW4_DEBUG - printf("%s: resetctrl: %p\n", __FUNCTION__, &ai->resetctrl); -#endif W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); OSL_DELAY(10); @@ -968,9 +1026,6 @@ _ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN)); dummy = R_REG(sii->osh, &ai->ioctrl); -#ifdef CUSTOMER_HW4_DEBUG - printf("%s: ioctrl: %p dummy: 0x%x\n", __FUNCTION__, &ai->ioctrl, dummy); -#endif BCM_REFERENCE(dummy); /* ensure there are no pending backplane operations */ @@ -984,10 +1039,6 @@ _ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) /* take core out of reset */ W_REG(sii->osh, &ai->resetctrl, 0); -#ifdef CUSTOMER_HW4_DEBUG - printf("%s: loop_counter: %d resetstatus: %p resetctrl: %p\n", - __FUNCTION__, loop_counter, &ai->resetstatus, &ai->resetctrl); -#endif /* ensure there are no pending backplane operations */ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); @@ -996,9 +1047,6 @@ _ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN)); dummy = R_REG(sii->osh, &ai->ioctrl); -#ifdef CUSTOMER_HW4_DEBUG - printf("%s: ioctl: %p dummy: 0x%x\n", __FUNCTION__, &ai->ioctrl, dummy); -#endif BCM_REFERENCE(dummy); OSL_DELAY(1); } @@ -1026,12 +1074,6 @@ ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) aidmp_t *ai; uint32 w; - - if (BCM47162_DMP()) { - SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0", - __FUNCTION__)); - return; - } if (BCM5357_DMP()) { SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n", __FUNCTION__)); @@ -1066,11 +1108,6 @@ ai_core_cflags(si_t *sih, uint32 mask, uint32 val) aidmp_t *ai; uint32 w; - if (BCM47162_DMP()) { - SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0", - __FUNCTION__)); - return 0; - } if (BCM5357_DMP()) { SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n", __FUNCTION__)); @@ -1107,11 +1144,6 @@ ai_core_sflags(si_t *sih, uint32 mask, uint32 val) aidmp_t *ai; uint32 w; - if (BCM47162_DMP()) { - SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", - __FUNCTION__)); - return 0; - } if (BCM5357_DMP()) { SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n", __FUNCTION__)); @@ -1148,43 +1180,78 @@ void ai_dumpregs(si_t *sih, struct bcmstrbuf *b) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; osl_t *osh; aidmp_t *ai; uint i; + uint32 prev_value = 0; + axi_wrapper_t * axi_wrapper = sii->axi_wrapper; + uint32 cfg_reg = 0; + uint bar0_win_offset = 0; osh = sii->osh; - for (i = 0; i < sii->numcores; i++) { - si_setcoreidx(&sii->pub, i); - ai = sii->curwrap; - bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]); - if (BCM47162_DMP()) { - bcm_bprintf(b, "Skipping mips74k in 47162a0\n"); - continue; + /* Save and restore wrapper access window */ + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + if (PCIE_GEN2(sii)) { + cfg_reg = PCIE2_BAR0_CORE2_WIN2; + bar0_win_offset = PCIE2_BAR0_CORE2_WIN2_OFFSET; + } else { + cfg_reg = PCI_BAR0_WIN2; + bar0_win_offset = PCI_BAR0_WIN2_OFFSET; } - if (BCM5357_DMP()) { + + prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4); + + if (prev_value == ID32_INVALID) { + SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value)); + return; + } + } + + bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n", + sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor); + + for (i = 0; i < sii->axi_num_wrappers; i++) { + + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* Set BAR0 window to bridge wapper base address */ + OSL_PCI_WRITE_CONFIG(osh, + cfg_reg, 4, axi_wrapper[i].wrapper_addr); + + ai = (aidmp_t *) ((volatile uint8*)sii->curmap + bar0_win_offset); + } else { + ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr; + } + + bcm_bprintf(b, "core 0x%x: core_rev:%d, %s_WR ADDR:%x \n", axi_wrapper[i].cid, + axi_wrapper[i].rev, + axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER", + axi_wrapper[i].wrapper_addr); + + /* BCM5357_DMP() */ + if (((CHIPID(sih->chip) == BCM5357_CHIP_ID) || + (CHIPID(sih->chip) == BCM4749_CHIP_ID)) && + (sih->chippkg == BCM5357_PKG_ID) && + (axi_wrapper[i].cid == USB20H_CORE_ID)) { bcm_bprintf(b, "Skipping usb20h in 5357\n"); continue; } - if (BCM4707_DMP()) { + + /* BCM4707_DMP() */ + if (BCM4707_CHIP(CHIPID(sih->chip)) && + (axi_wrapper[i].cid == NS_CCB_CORE_ID)) { bcm_bprintf(b, "Skipping chipcommonb in 4707\n"); continue; } - if (PMU_DMP()) { - bcm_bprintf(b, "Skipping pmu core\n"); - continue; - } - - bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x" + bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x " "ioctrlwidth 0x%x iostatuswidth 0x%x\n" "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n" - "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x" + "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x " "errlogaddrlo 0x%x errlogaddrhi 0x%x\n" "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n" - "intstatus 0x%x config 0x%x itcr 0x%x\n", + "intstatus 0x%x config 0x%x itcr 0x%x\n\n", R_REG(osh, &ai->ioctrlset), R_REG(osh, &ai->ioctrlclear), R_REG(osh, &ai->ioctrl), @@ -1207,6 +1274,12 @@ ai_dumpregs(si_t *sih, struct bcmstrbuf *b) R_REG(osh, &ai->config), R_REG(osh, &ai->itcr)); } + + /* Restore the initial wrapper space */ + if (prev_value && cfg_reg) { + OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value); + } + } #endif @@ -1214,47 +1287,340 @@ ai_dumpregs(si_t *sih, struct bcmstrbuf *b) void ai_enable_backplane_timeouts(si_t *sih) { -#ifdef AXI_TIMEOUTS +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) si_info_t *sii = SI_INFO(sih); aidmp_t *ai; - int i; + uint32 i; + axi_wrapper_t * axi_wrapper = sii->axi_wrapper; + +#ifdef BCM_BACKPLANE_TIMEOUT + uint32 prev_value = 0; + osl_t *osh = sii->osh; + uint32 cfg_reg = 0; + uint32 offset = 0; +#endif /* BCM_BACKPLANE_TIMEOUT */ + + if ((sii->axi_num_wrappers == 0) || +#ifdef BCM_BACKPLANE_TIMEOUT + (!PCIE(sii)) || +#endif /* BCM_BACKPLANE_TIMEOUT */ + FALSE) { + SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n", + __FUNCTION__, sii->axi_num_wrappers, PCIE(sii), + BUSTYPE(sii->pub.bustype), sii->pub.buscoretype)); + return; + } + +#ifdef BCM_BACKPLANE_TIMEOUT + /* Save and restore the wrapper access window */ + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + if (PCIE_GEN1(sii)) { + cfg_reg = PCI_BAR0_WIN2; + offset = PCI_BAR0_WIN2_OFFSET; + } else if (PCIE_GEN2(sii)) { + cfg_reg = PCIE2_BAR0_CORE2_WIN2; + offset = PCIE2_BAR0_CORE2_WIN2_OFFSET; + } + else { + osl_panic("!PCIE_GEN1 && !PCIE_GEN2\n"); + } + + prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4); + if (prev_value == ID32_INVALID) { + SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value)); + return; + } + } + +#endif /* BCM_BACKPLANE_TIMEOUT */ + + for (i = 0; i < sii->axi_num_wrappers; ++i) { + + if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) { + SI_VMSG(("SKIP ENABLE BPT: MFG:%x, CID:%x, ADDR:%x\n", + axi_wrapper[i].mfg, + axi_wrapper[i].cid, + axi_wrapper[i].wrapper_addr)); + continue; + } + +#ifdef BCM_BACKPLANE_TIMEOUT + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* Set BAR0_CORE2_WIN2 to wapper base address */ + OSL_PCI_WRITE_CONFIG(osh, + cfg_reg, 4, axi_wrapper[i].wrapper_addr); + + /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */ + ai = (aidmp_t *) ((uint8*)sii->curmap + offset); + } + else +#endif /* BCM_BACKPLANE_TIMEOUT */ + { + ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr; + } - for (i = 0; i < sii->num_br; ++i) { - ai = (aidmp_t *) sii->br_wrapba[i]; W_REG(sii->osh, &ai->errlogctrl, (1 << AIELC_TO_ENAB_SHIFT) | ((AXI_TO_VAL << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK)); + + SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n", + axi_wrapper[i].mfg, + axi_wrapper[i].cid, + axi_wrapper[i].wrapper_addr, + R_REG(sii->osh, &ai->errlogctrl))); } -#endif /* AXI_TIMEOUTS */ + +#ifdef BCM_BACKPLANE_TIMEOUT + /* Restore the initial wrapper space */ + if (prev_value) { + OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value); + } +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ } -void -ai_clear_backplane_to(si_t *sih) +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) + +/* slave error is ignored, so account for those cases */ +static uint32 si_ignore_errlog_cnt = 0; + +static bool +ai_ignore_errlog(si_info_t *sii, uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts) { -#ifdef AXI_TIMEOUTS + uint32 axi_id; + + /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */ + switch (CHIPID(sii->pub.chip)) { + case BCM4350_CHIP_ID: + axi_id = BCM4350_BT_AXI_ID; + break; + case BCM4345_CHIP_ID: + axi_id = BCM4345_BT_AXI_ID; + break; + default: + return FALSE; + } + + /* AXI ID check */ + if ((err_axi_id & AI_ERRLOGID_AXI_ID_MASK) != axi_id) + return FALSE; + + /* slave errors */ + if ((errsts & AIELS_TIMEOUT_MASK) != AIELS_SLAVE_ERR) + return FALSE; + + /* chipc reg 0x190 */ + if ((hi_addr != BT_CC_SPROM_BADREG_HI) || (lo_addr != BT_CC_SPROM_BADREG_LO)) + return FALSE; + + return TRUE; +} +#endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */ + +#ifdef BCM_BACKPLANE_TIMEOUT + +/* Function to return the APB bridge details corresponding to the core */ +bool +ai_get_apb_bridge(si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreuinit) +{ + uint i; + uint32 core_base, core_end; si_info_t *sii = SI_INFO(sih); - aidmp_t *ai; - int i; - uint32 errlogstatus; + static uint32 coreidx_cached = 0, apb_id_cached = 0, apb_coreunit_cached = 0; + uint32 tmp_coreunit = 0; + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; - for (i = 0; i < sii->num_br; ++i) { - ai = (aidmp_t *) sii->br_wrapba[i]; - /* check for backplane timeout & clear backplane hang */ - errlogstatus = R_REG(sii->osh, &ai->errlogstatus); + if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) + return FALSE; - if ((errlogstatus & AIELS_TIMEOUT_MASK) != 0) { - /* set ErrDone to clear the condition */ + /* Most of the time apb bridge query will be for d11 core. + * Maintain the last cache and return if found rather than iterating the table + */ + if (coreidx_cached == coreidx) { + *apb_id = apb_id_cached; + *apb_coreuinit = apb_coreunit_cached; + return TRUE; + } + + core_base = cores_info->coresba[coreidx]; + core_end = core_base + cores_info->coresba_size[coreidx]; + + for (i = 0; i < sii->numcores; i++) { + if (cores_info->coreid[i] == APB_BRIDGE_ID) { + uint32 apb_base; + uint32 apb_end; + + apb_base = cores_info->coresba[i]; + apb_end = apb_base + cores_info->coresba_size[i]; + + if ((core_base >= apb_base) && + (core_end <= apb_end)) { + /* Current core is attached to this APB bridge */ + *apb_id = apb_id_cached = APB_BRIDGE_ID; + *apb_coreuinit = apb_coreunit_cached = tmp_coreunit; + coreidx_cached = coreidx; + return TRUE; + } + /* Increment the coreunit */ + tmp_coreunit++; + } + } + + return FALSE; +} + +uint32 +ai_clear_backplane_to_fast(si_t *sih, void * addr) +{ + si_info_t *sii = SI_INFO(sih); + void * curmap = sii->curmap; + bool core_reg = FALSE; + + /* Use fast path only for core register access */ + if ((addr >= curmap) && (addr < (curmap + SI_CORE_SIZE))) { + /* address being accessed is within current core reg map */ + core_reg = TRUE; + } + + if (core_reg) { + uint32 apb_id, apb_coreuinit; + + if (ai_get_apb_bridge(sih, si_coreidx(&sii->pub), + &apb_id, &apb_coreuinit) == TRUE) { + /* Found the APB bridge corresponding to current core, + * Check for bus errors in APB wrapper + */ + return ai_clear_backplane_to_per_core(sih, + apb_id, apb_coreuinit, NULL); + } + } + + /* Default is to poll for errors on all slave wrappers */ + return si_clear_backplane_to(sih); +} +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) +/* + * API to clear the back plane timeout per core. + * Caller may passs optional wrapper address. If present this will be used as + * the wrapper base address. If wrapper base address is provided then caller + * must provide the coreid also. + * If both coreid and wrapper is zero, then err status of current bridge + * will be verified. + */ +uint32 +ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap) +{ + int ret = AXI_WRAP_STS_NONE; + aidmp_t *ai = NULL; + uint32 errlog_status = 0; + si_info_t *sii = SI_INFO(sih); + uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0; + uint32 current_coreidx = si_coreidx(sih); + uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit); + +#if defined(BCM_BACKPLANE_TIMEOUT) + si_axi_error_t * axi_error = &sih->err_info->axi_error[sih->err_info->count]; +#endif /* BCM_BACKPLANE_TIMEOUT */ + bool restore_core = FALSE; + + if ((sii->axi_num_wrappers == 0) || +#ifdef BCM_BACKPLANE_TIMEOUT + (!PCIE(sii)) || +#endif /* BCM_BACKPLANE_TIMEOUT */ + FALSE) { + SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n", + __FUNCTION__, sii->axi_num_wrappers, PCIE(sii), + BUSTYPE(sii->pub.bustype), sii->pub.buscoretype)); + return AXI_WRAP_STS_NONE; + } + + if (wrap != NULL) { + ai = (aidmp_t *)wrap; + } else if (coreid && (target_coreidx != current_coreidx)) { + + if (ai_setcoreidx(sih, target_coreidx) == NULL) { + /* Unable to set the core */ + SI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n", + coreid, coreunit, target_coreidx)); + errlog_lo = target_coreidx; + ret = AXI_WRAP_STS_SET_CORE_FAIL; + goto end; + } + + restore_core = TRUE; + ai = (aidmp_t *)si_wrapperregs(sih); + } else { + /* Read error status of current wrapper */ + ai = (aidmp_t *)si_wrapperregs(sih); + + /* Update CoreID to current Code ID */ + coreid = si_coreid(sih); + } + + /* read error log status */ + errlog_status = R_REG(sii->osh, &ai->errlogstatus); + + if (errlog_status == ID32_INVALID) { + /* Do not try to peek further */ + SI_PRINT(("%s, errlogstatus:%x - Slave Wrapper:%x\n", + __FUNCTION__, errlog_status, coreid)); + ret = AXI_WRAP_STS_WRAP_RD_ERR; + errlog_lo = (uint32)&ai->errlogstatus; + goto end; + } + + if ((errlog_status & AIELS_TIMEOUT_MASK) != 0) { + uint32 tmp; + uint32 count = 0; + /* set ErrDone to clear the condition */ + W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK); + + /* SPINWAIT on errlogstatus timeout status bits */ + while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_TIMEOUT_MASK) { + + if (tmp == ID32_INVALID) { + SI_PRINT(("%s: prev errlogstatus:%x, errlogstatus:%x\n", + __FUNCTION__, errlog_status, tmp)); + ret = AXI_WRAP_STS_WRAP_RD_ERR; + errlog_lo = (uint32)&ai->errlogstatus; + goto end; + } + /* + * Clear again, to avoid getting stuck in the loop, if a new error + * is logged after we cleared the first timeout + */ W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK); - /* SPINWAIT on errlogstatus timeout status bits */ - while (R_REG(sii->osh, &ai->errlogstatus) & AIELS_TIMEOUT_MASK) - ; - - /* only reset APB Bridge on timeout (not slave error, or dec error) */ - switch (errlogstatus & AIELS_TIMEOUT_MASK) { - case 0x1: - printf("AXI slave error"); + count++; + OSL_DELAY(10); + if ((10 * count) > AI_REG_READ_TIMEOUT) { + errlog_status = tmp; break; - case 0x2: + } + } + + errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo); + errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi); + errlog_id = R_REG(sii->osh, &ai->errlogid); + errlog_flags = R_REG(sii->osh, &ai->errlogflags); + + /* we are already in the error path, so OK to check for the slave error */ + if (ai_ignore_errlog(sii, errlog_lo, errlog_hi, errlog_id, + errlog_status)) { + si_ignore_errlog_cnt++; + goto end; + } + + /* only reset APB Bridge on timeout (not slave error, or dec error) */ + switch (errlog_status & AIELS_TIMEOUT_MASK) { + case AIELS_SLAVE_ERR: + SI_PRINT(("AXI slave error")); + ret = AXI_WRAP_STS_SLAVE_ERR; + break; + + case AIELS_TIMEOUT: /* reset APB Bridge */ OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET); /* sync write */ @@ -1263,22 +1629,182 @@ ai_clear_backplane_to(si_t *sih) AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET)); /* sync write */ (void)R_REG(sii->osh, &ai->resetctrl); - printf("AXI timeout"); + SI_PRINT(("AXI timeout")); + ret = AXI_WRAP_STS_TIMEOUT; break; - case 0x3: - printf("AXI decode error"); + + case AIELS_DECODE: + SI_PRINT(("AXI decode error")); + ret = AXI_WRAP_STS_DECODE_ERR; break; default: - ; /* should be impossible */ - } - printf("; APB Bridge %d\n", i); - printf("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x", - R_REG(sii->osh, &ai->errlogaddrlo), - R_REG(sii->osh, &ai->errlogaddrhi), - R_REG(sii->osh, &ai->errlogid), - R_REG(sii->osh, &ai->errlogflags)); - printf(", status 0x%08x\n", errlogstatus); + ASSERT(0); /* should be impossible */ + } + + SI_PRINT(("\tCoreID: %x\n", coreid)); + SI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x" + ", status 0x%08x\n", + errlog_lo, errlog_hi, errlog_id, errlog_flags, + errlog_status)); + } + +end: + +#if defined(BCM_BACKPLANE_TIMEOUT) + if (axi_error && (ret != AXI_WRAP_STS_NONE)) { + axi_error->error = ret; + axi_error->coreid = coreid; + axi_error->errlog_lo = errlog_lo; + axi_error->errlog_hi = errlog_hi; + axi_error->errlog_id = errlog_id; + axi_error->errlog_flags = errlog_flags; + axi_error->errlog_status = errlog_status; + sih->err_info->count++; + + if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) { + sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1; + SI_PRINT(("AXI Error log overflow\n")); } } -#endif /* AXI_TIMEOUTS */ +#endif /* BCM_BACKPLANE_TIMEOUT */ + + if (restore_core) { + if (ai_setcoreidx(sih, current_coreidx) == NULL) { + /* Unable to set the core */ + return ID32_INVALID; + } + } + + return ret; +} +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ + +/* + * This API polls all slave wrappers for errors and returns bit map of + * all reported errors. + * return - bit map of + * AXI_WRAP_STS_NONE + * AXI_WRAP_STS_TIMEOUT + * AXI_WRAP_STS_SLAVE_ERR + * AXI_WRAP_STS_DECODE_ERR + * AXI_WRAP_STS_PCI_RD_ERR + * AXI_WRAP_STS_WRAP_RD_ERR + * AXI_WRAP_STS_SET_CORE_FAIL + * On timeout detection, correspondign bridge will be reset to + * unblock the bus. + * Error reported in each wrapper can be retrieved using the API + * si_get_axi_errlog_info() + */ +uint32 +ai_clear_backplane_to(si_t *sih) +{ + uint32 ret = 0; +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) + + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 i; + axi_wrapper_t * axi_wrapper = sii->axi_wrapper; + +#ifdef BCM_BACKPLANE_TIMEOUT + uint32 prev_value = 0; + osl_t *osh = sii->osh; + uint32 cfg_reg = 0; + uint32 offset = 0; + + if ((sii->axi_num_wrappers == 0) || (!PCIE(sii))) +#else + if (sii->axi_num_wrappers == 0) +#endif + { + SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n", + __FUNCTION__, sii->axi_num_wrappers, PCIE(sii), + BUSTYPE(sii->pub.bustype), sii->pub.buscoretype)); + return AXI_WRAP_STS_NONE; + } + +#ifdef BCM_BACKPLANE_TIMEOUT + /* Save and restore wrapper access window */ + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + if (PCIE_GEN1(sii)) { + cfg_reg = PCI_BAR0_WIN2; + offset = PCI_BAR0_WIN2_OFFSET; + } else if (PCIE_GEN2(sii)) { + cfg_reg = PCIE2_BAR0_CORE2_WIN2; + offset = PCIE2_BAR0_CORE2_WIN2_OFFSET; + } + else { + osl_panic("!PCIE_GEN1 && !PCIE_GEN2\n"); + } + + prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4); + + if (prev_value == ID32_INVALID) { + si_axi_error_t * axi_error = + &sih->err_info->axi_error[sih->err_info->count]; + SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value)); + + axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR; + axi_error->errlog_lo = cfg_reg; + sih->err_info->count++; + + if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) { + sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1; + SI_PRINT(("AXI Error log overflow\n")); + } + + return ret; + } + } +#endif /* BCM_BACKPLANE_TIMEOUT */ + + for (i = 0; i < sii->axi_num_wrappers; ++i) { + uint32 tmp; + + if (axi_wrapper[i].wrapper_type != AI_SLAVE_WRAPPER) { + continue; + } + +#ifdef BCM_BACKPLANE_TIMEOUT + + if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* Set BAR0_CORE2_WIN2 to bridge wapper base address */ + OSL_PCI_WRITE_CONFIG(osh, + cfg_reg, 4, axi_wrapper[i].wrapper_addr); + + /* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */ + ai = (aidmp_t *) ((uint8*)sii->curmap + offset); + } + else +#endif /* BCM_BACKPLANE_TIMEOUT */ + { + ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr; + } + + tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0, (void*)ai); + + ret |= tmp; + } + +#ifdef BCM_BACKPLANE_TIMEOUT + /* Restore the initial wrapper space */ + if (prev_value) { + OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value); + } +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ + + return ret; +} + +uint +ai_num_slaveports(si_t *sih, uint coreidx) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 cib; + + cib = cores_info->cib[coreidx]; + return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT); } diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcm_app_utils.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcm_app_utils.c index d138849d65c6..a5a7a5b23d3d 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcm_app_utils.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcm_app_utils.c @@ -3,7 +3,7 @@ * Contents are wifi-specific, used by any kernel or app-level * software that might want wifi things as it grows. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: bcm_app_utils.c 547371 2015-04-08 12:51:39Z $ + * $Id: bcm_app_utils.c 623866 2016-03-09 11:58:34Z $ */ #include @@ -119,7 +119,7 @@ spec_to_chan(chanspec_t chspec) center_ch = CHSPEC_CHANNEL(chspec); - if (CHSPEC_IS20(chspec)) { + if (CHSPEC_BW_LE20(chspec)) { return center_ch; } else { /* the lower edge of the wide channel is half the bw from @@ -590,7 +590,11 @@ static const uint8 wlcntver11t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T] = IDX_IN_WL_CNT_VER_11_T(txprobersp), IDX_IN_WL_CNT_VER_11_T(rxprobersp), IDX_IN_WL_CNT_VER_11_T(txaction), - IDX_IN_WL_CNT_VER_11_T(rxaction) + IDX_IN_WL_CNT_VER_11_T(rxaction), + IDX_IN_WL_CNT_VER_11_T(ampdu_wds), + IDX_IN_WL_CNT_VER_11_T(txlost), + IDX_IN_WL_CNT_VER_11_T(txdatamcast), + IDX_IN_WL_CNT_VER_11_T(txdatabcast) }; /* Index conversion table from wl_cnt_ver_11_t to @@ -819,7 +823,7 @@ wl_copy_wlccnt(uint16 cntver, uint32 *dst, uint32 *src, uint8 src_max_idx) if (cntver == WL_CNT_VERSION_6) { for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T; i++) { if (wlcntver6t_to_wlcntwlct[i] >= src_max_idx) { - /* src buffer does not have counters from here */ + /* src buffer does not have counters from here */ break; } dst[i] = src[wlcntver6t_to_wlcntwlct[i]]; @@ -827,7 +831,7 @@ wl_copy_wlccnt(uint16 cntver, uint32 *dst, uint32 *src, uint8 src_max_idx) } else { for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T; i++) { if (wlcntver11t_to_wlcntwlct[i] >= src_max_idx) { - /* src buffer does not have counters from here */ + /* src buffer does not have counters from here */ break; } dst[i] = src[wlcntver11t_to_wlcntwlct[i]]; @@ -910,7 +914,7 @@ wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev) BCM_REFERENCE(ctx); #endif - if (ver == WL_CNT_T_VERSION) { + if (ver >= WL_CNT_VERSION_XTLV) { /* Already in xtlv format. */ goto exit; } @@ -922,8 +926,8 @@ wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev) wlccnt = (wl_cnt_wlc_t *)malloc(sizeof(*wlccnt)); macstat = (uint32 *)malloc(WL_CNT_MCST_STRUCT_SZ); #endif - if (!wlccnt) { - printf("wl_cntbuf_to_xtlv_format malloc fail!\n"); + if (!wlccnt || !macstat) { + printf("%s: malloc fail!\n", __FUNCTION__); res = BCME_NOMEM; goto exit; } @@ -939,12 +943,11 @@ wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev) /* Exclude version and length fields in either wlc_cnt_ver_6_t or wlc_cnt_ver_11_t */ src_max_idx = (cntinfo->datalen - OFFSETOF(wl_cnt_info_t, data)) / sizeof(uint32); - if (src_max_idx > (uint8)(-1)) { printf("wlcntverXXt_to_wlcntwlct and src_max_idx need" " to be of uint16 instead of uint8\n" "Try updating wl utility to the latest.\n"); - res = BCME_ERROR; + src_max_idx = (uint8)(-1); } /* Copy wlc layer counters to wl_cnt_wlc_t */ @@ -987,7 +990,7 @@ wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev) xtlv_desc[2].len = 0; xtlv_desc[2].ptr = NULL; - memset(cntbuf, 0, WL_CNTBUF_MAX_SIZE); + memset(cntbuf, 0, buflen); res = bcm_pack_xtlv_buf_from_mem(&xtlvbuf_p, &xtlvbuflen, xtlv_desc, BCM_XTLV_OPTION_ALIGN32); diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmevent.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmevent.c index 1746f47fd613..f8c2f161b0ba 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmevent.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmevent.c @@ -1,7 +1,7 @@ /* * bcmevent read-only data shared by kernel or app layers * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,16 +24,16 @@ * * <> * - * $Id: bcmevent.c 530174 2015-01-29 09:47:55Z $ + * $Id: bcmevent.c 707287 2017-06-27 06:44:29Z $ */ #include #include #include -#include -#include -#include - +#include +#include +#include +#include <802.11.h> /* Table of event name strings for UIs and debugging dumps */ typedef struct { @@ -81,6 +81,9 @@ static const bcmevent_name_str_t bcmevent_names[] = { BCMEVENT_NAME(WLC_E_PFN_NET_FOUND), BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE), BCMEVENT_NAME(WLC_E_PFN_NET_LOST), + BCMEVENT_NAME(WLC_E_JOIN_START), + BCMEVENT_NAME(WLC_E_ROAM_START), + BCMEVENT_NAME(WLC_E_ASSOC_START), #if defined(IBSS_PEER_DISCOVERY_EVENT) BCMEVENT_NAME(WLC_E_IBSS_ASSOC), #endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */ @@ -100,6 +103,7 @@ static const bcmevent_name_str_t bcmevent_names[] = { BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE), #endif BCMEVENT_NAME(WLC_E_RSSI), + BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE), BCMEVENT_NAME(WLC_E_EXTLOG_MSG), BCMEVENT_NAME(WLC_E_ACTION_FRAME), BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX), @@ -157,7 +161,7 @@ static const bcmevent_name_str_t bcmevent_names[] = { BCMEVENT_NAME(WLC_E_TXFAIL_THRESH), #ifdef GSCAN_SUPPORT BCMEVENT_NAME(WLC_E_PFN_GSCAN_FULL_RESULT), - BCMEVENT_NAME(WLC_E_PFN_SWC), + BCMEVENT_NAME(WLC_E_PFN_SSID_EXT), #endif /* GSCAN_SUPPORT */ #ifdef WLBSSLOAD_REPORT BCMEVENT_NAME(WLC_E_BSS_LOAD), @@ -172,6 +176,11 @@ static const bcmevent_name_str_t bcmevent_names[] = { BCMEVENT_NAME(WLC_E_CSA_FAILURE_IND), BCMEVENT_NAME(WLC_E_RMC_EVENT), BCMEVENT_NAME(WLC_E_DPSTA_INTF_IND), + BCMEVENT_NAME(WLC_E_ALLOW_CREDIT_BORROW), + BCMEVENT_NAME(WLC_E_MSCH), + BCMEVENT_NAME(WLC_E_ULP), + BCMEVENT_NAME(WLC_E_PSK_AUTH), + BCMEVENT_NAME(WLC_E_SDB_TRANSITION), }; @@ -228,3 +237,142 @@ wl_event_to_network_order(wl_event_msg_t * evt) evt->datalen = hton32(evt->datalen); evt->version = hton16(evt->version); } + +/* + * Validate if the event is proper and if valid copy event header to event. + * If proper event pointer is passed, to just validate, pass NULL to event. + * + * Return values are + * BCME_OK - It is a BRCM event or BRCM dongle event + * BCME_NOTFOUND - Not BRCM, not an event, may be okay + * BCME_BADLEN - Bad length, should not process, just drop + */ +int +is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype, + bcm_event_msg_u_t *out_event) +{ + uint16 evlen = 0; /* length in bcmeth_hdr */ + uint16 subtype; + uint16 usr_subtype; + bcm_event_t *bcm_event; + uint8 *pktend; + uint8 *evend; + int err = BCME_OK; + uint32 data_len = 0; /* data length in bcm_event */ + + pktend = (uint8 *)pktdata + pktlen; + bcm_event = (bcm_event_t *)pktdata; + + /* only care about 16-bit subtype / length versions */ + if ((uint8 *)&bcm_event->bcm_hdr < pktend) { + uint8 short_subtype = *(uint8 *)&bcm_event->bcm_hdr; + if (!(short_subtype & 0x80)) { + err = BCME_NOTFOUND; + goto done; + } + } + + /* must have both ether_header and bcmeth_hdr */ + if (pktlen < OFFSETOF(bcm_event_t, event)) { + err = BCME_BADLEN; + goto done; + } + + /* check length in bcmeth_hdr */ + + /* temporary - header length not always set properly. When the below + * !BCMDONGLEHOST is in all branches that use trunk DHD, the code + * under BCMDONGLEHOST can be removed. + */ + evlen = (uint16)(pktend - (uint8 *)&bcm_event->bcm_hdr.version); + evend = (uint8 *)&bcm_event->bcm_hdr.version + evlen; + if (evend != pktend) { + err = BCME_BADLEN; + goto done; + } + + /* match on subtype, oui and usr subtype for BRCM events */ + subtype = ntoh16_ua((void *)&bcm_event->bcm_hdr.subtype); + if (subtype != BCMILCP_SUBTYPE_VENDOR_LONG) { + err = BCME_NOTFOUND; + goto done; + } + + if (bcmp(BRCM_OUI, &bcm_event->bcm_hdr.oui[0], DOT11_OUI_LEN)) { + err = BCME_NOTFOUND; + goto done; + } + + /* if it is a bcm_event or bcm_dngl_event_t, validate it */ + usr_subtype = ntoh16_ua((void *)&bcm_event->bcm_hdr.usr_subtype); + switch (usr_subtype) { + case BCMILCP_BCM_SUBTYPE_EVENT: + /* check that header length and pkt length are sufficient */ + if ((pktlen < sizeof(bcm_event_t)) || + (evend < ((uint8 *)bcm_event + sizeof(bcm_event_t)))) { + err = BCME_BADLEN; + goto done; + } + + /* ensure data length in event is not beyond the packet. */ + data_len = ntoh32_ua((void *)&bcm_event->event.datalen); + if ((sizeof(bcm_event_t) + data_len + + BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD) != pktlen) { + err = BCME_BADLEN; + goto done; + } + + if (exp_usr_subtype && (exp_usr_subtype != usr_subtype)) { + err = BCME_NOTFOUND; + goto done; + } + + if (out_event) { + /* ensure BRCM event pkt aligned */ + memcpy(&out_event->event, &bcm_event->event, sizeof(wl_event_msg_t)); + } + + break; + + case BCMILCP_BCM_SUBTYPE_DNGLEVENT: +#if defined(DNGL_EVENT_SUPPORT) + if ((pktlen < sizeof(bcm_dngl_event_t)) || + (evend < ((uint8 *)bcm_event + sizeof(bcm_dngl_event_t)))) { + err = BCME_BADLEN; + goto done; + } + + /* ensure data length in event is not beyond the packet. */ + data_len = ntoh16_ua((void *)&((bcm_dngl_event_t *)pktdata)->dngl_event.datalen); + if ((sizeof(bcm_dngl_event_t) + data_len + + BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD) != pktlen) { + err = BCME_BADLEN; + goto done; + } + + if (exp_usr_subtype && (exp_usr_subtype != usr_subtype)) { + err = BCME_NOTFOUND; + goto done; + } + + if (out_event) { + /* ensure BRCM dngl event pkt aligned */ + memcpy(&out_event->dngl_event, &((bcm_dngl_event_t *)pktdata)->dngl_event, + sizeof(bcm_dngl_event_msg_t)); + } + + break; +#else + err = BCME_UNSUPPORTED; + break; +#endif + + default: + err = BCME_NOTFOUND; + goto done; + } + + BCM_REFERENCE(data_len); +done: + return err; +} diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh.c index 400f441cc58d..a4144efe30bf 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh.c @@ -2,7 +2,7 @@ * BCMSDH interface glue * implement bcmsdh API for SDIOH driver * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: bcmsdh.c 514727 2014-11-12 03:02:48Z $ + * $Id: bcmsdh.c 671319 2016-11-21 14:27:29Z $ */ /** @@ -47,12 +47,23 @@ #include /* SDIO device core hardware definitions. */ #include /* SDIO Device and Protocol Specs */ +#if defined(BT_OVER_SDIO) +#include +#endif /* defined (BT_OVER_SDIO) */ + #define SDIOH_API_ACCESS_RETRY_LIMIT 2 const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL; /* local copy of bcm sd handler */ bcmsdh_info_t * l_bcmsdh = NULL; +#if defined(BT_OVER_SDIO) +struct sdio_func *func_f3 = NULL; +static f3intr_handler processf3intr = NULL; +static dhd_hang_notification process_dhd_hang_notification = NULL; +static dhd_hang_state_t g_dhd_hang_state = NO_HANG_STATE; +#endif /* defined (BT_OVER_SDIO) */ + #if defined(OOB_INTR_ONLY) && defined(HW_OOB) || defined(FORCE_WOWLAN) extern int @@ -65,6 +76,81 @@ bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable) } #endif +#if defined(BT_OVER_SDIO) +void bcmsdh_btsdio_process_hang_state(dhd_hang_state_t new_state) +{ + bool state_change = false; + + BCMSDH_ERROR(("%s: DHD hang state changed - [%d] -> [%d]\n", + __FUNCTION__, g_dhd_hang_state, new_state)); + + if (g_dhd_hang_state == new_state) + return; + + switch (g_dhd_hang_state) { + case NO_HANG_STATE: + if (HANG_START_STATE == new_state) + state_change = true; + break; + + case HANG_START_STATE: + if (HANG_RECOVERY_STATE == new_state || + NO_HANG_STATE == new_state) + state_change = true; + break; + + case HANG_RECOVERY_STATE: + if (NO_HANG_STATE == new_state) + state_change = true; + break; + + default: + BCMSDH_ERROR(("%s: Unhandled Hang state\n", __FUNCTION__)); + break; + } + + if (!state_change) { + BCMSDH_ERROR(("%s: Hang state cannot be changed\n", __FUNCTION__)); + return; + } + + g_dhd_hang_state = new_state; +} + +void bcmsdh_btsdio_process_f3_intr(void) +{ + if (processf3intr && (g_dhd_hang_state == NO_HANG_STATE)) + processf3intr(func_f3); +} + +void bcmsdh_btsdio_process_dhd_hang_notification(bool wifi_recovery_completed) +{ + bcmsdh_btsdio_process_hang_state(HANG_START_STATE); + + if (process_dhd_hang_notification) + process_dhd_hang_notification(func_f3, wifi_recovery_completed); + + /* WiFi was off, so HANG_RECOVERY_STATE is not needed */ + if (wifi_recovery_completed) + bcmsdh_btsdio_process_hang_state(NO_HANG_STATE); + else { + bcmsdh_btsdio_process_hang_state(HANG_RECOVERY_STATE); + } +} + +void bcmsdh_btsdio_interface_init(struct sdio_func *func, + f3intr_handler f3intr_fun, dhd_hang_notification hang_notification) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)l_bcmsdh; + BCMSDH_INFO(("%s: func %p \n", __FUNCTION__, func)); + func_f3 = func; + processf3intr = f3intr_fun; + sdioh_sdmmc_card_enable_func_f3(bcmsdh->sdioh, func); + process_dhd_hang_notification = hang_notification; + +} EXPORT_SYMBOL(bcmsdh_btsdio_interface_init); +#endif /* defined (BT_OVER_SDIO) */ + /* Attach BCMSDH layer to SDIO Host Controller Driver * * @param osh OSL Handle. @@ -89,6 +175,8 @@ bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva) bcmsdh->init_success = TRUE; *regsva = SI_ENUM_BASE; + bcmsdh_force_sbwad_calc(bcmsdh, FALSE); + /* Report the BAR, to fix if needed */ bcmsdh->sbwad = SI_ENUM_BASE; @@ -162,6 +250,10 @@ bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + ASSERT(bcmsdh); status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh); @@ -173,6 +265,10 @@ bcmsdh_intr_dereg(void *sdh) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + ASSERT(bcmsdh); status = sdioh_interrupt_deregister(bcmsdh->sdioh); @@ -239,7 +335,7 @@ bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err) fnc_num, addr, data)); return data; -} +} EXPORT_SYMBOL(bcmsdh_cfg_read); void bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err) @@ -269,7 +365,7 @@ bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err) BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, fnc_num, addr, data)); -} +} EXPORT_SYMBOL(bcmsdh_cfg_write); uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err) @@ -356,6 +452,25 @@ bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length) return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); } +int +bcmsdh_cisaddr_read(void *sdh, uint func, uint8 *cisd, uint32 offset) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + func &= 0x7; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + ASSERT(cisd); + + status = sdioh_cisaddr_read(bcmsdh->sdioh, func, cisd, offset); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set) @@ -386,13 +501,13 @@ bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set) } uint32 -bcmsdh_reg_read(void *sdh, uint32 addr, uint size) +bcmsdh_reg_read(void *sdh, uintptr addr, uint size) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; uint32 word = 0; - BCMSDH_INFO(("%s:fun = 1, addr = 0x%x\n", __FUNCTION__, addr)); + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x\n", __FUNCTION__, (unsigned int)addr)); if (!bcmsdh) bcmsdh = l_bcmsdh; @@ -431,26 +546,27 @@ bcmsdh_reg_read(void *sdh, uint32 addr, uint size) } /* otherwise, bad sdio access or invalid size */ - BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size)); + BCMSDH_ERROR(("%s: error reading addr 0x%x size %d\n", + __FUNCTION__, (unsigned int)addr, size)); return 0xFFFFFFFF; } uint32 -bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data) +bcmsdh_reg_write(void *sdh, uintptr addr, uint size, uint32 data) { bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; SDIOH_API_RC status; int err = 0; BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n", - __FUNCTION__, addr, size*8, data)); + __FUNCTION__, (unsigned int)addr, size*8, data)); if (!bcmsdh) bcmsdh = l_bcmsdh; ASSERT(bcmsdh->init_success); - if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) { + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, bcmsdh->force_sbwad_calc))) { bcmsdh->regfail = TRUE; // terence 20130621: return err; } @@ -466,7 +582,7 @@ bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data) return 0; BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n", - __FUNCTION__, data, addr, size)); + __FUNCTION__, data, (unsigned int)addr, size)); return 0xFFFFFFFF; } @@ -604,7 +720,6 @@ bcmsdh_waitlockfree(void *sdh) return sdioh_waitlockfree(bcmsdh->sdioh); } - int bcmsdh_query_device(void *sdh) { @@ -655,6 +770,19 @@ bcmsdh_cur_sbwad(void *sdh) return (bcmsdh->sbwad); } +/* example usage: if force is TRUE, forces the bcmsdhsdio_set_sbaddr_window to + * calculate sbwad always instead of caching. + */ +void +bcmsdh_force_sbwad_calc(void *sdh, bool force) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + bcmsdh->force_sbwad_calc = force; +} + void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev) { @@ -717,57 +845,3 @@ bcmsdh_set_mode(void *sdh, uint mode) bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; return (sdioh_set_mode(bcmsdh->sdioh, mode)); } - -#if defined(SWTXGLOM) -int -bcmsdh_send_swtxglom_buf(void *sdh, uint32 addr, uint fn, uint flags, - uint8 *buf, uint nbytes, void *pkt, - bcmsdh_cmplt_fn_t complete_fn, void *handle) -{ - bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; - SDIOH_API_RC status; - uint incr_fix; - uint width; - int err = 0; - - ASSERT(bcmsdh); - ASSERT(bcmsdh->init_success); - - BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", - __FUNCTION__, fn, addr, nbytes)); - - /* Async not implemented yet */ - ASSERT(!(flags & SDIO_REQ_ASYNC)); - if (flags & SDIO_REQ_ASYNC) - return BCME_UNSUPPORTED; - - if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) - return err; - - addr &= SBSDIO_SB_OFT_ADDR_MASK; - - incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; - width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; - if (width == 4) - addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; - - status = sdioh_request_swtxglom_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, - SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt); - - return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); -} - -void -bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len) -{ - bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; - sdioh_glom_post(bcmsdh->sdioh, frame, pkt, len); -} - -void -bcmsdh_glom_clear(void *sdh) -{ - bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; - sdioh_glom_clear(bcmsdh->sdioh); -} -#endif /* SWTXGLOM */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh_linux.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh_linux.c old mode 100755 new mode 100644 index a0e5f2400c1e..5231c7739a75 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh_linux.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh_linux.c @@ -1,7 +1,7 @@ /* * SDIO access interface for drivers - linux specific (pci only) * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmsdh_linux.c 514727 2014-11-12 03:02:48Z $ + * $Id: bcmsdh_linux.c 672609 2016-11-29 07:00:46Z $ */ /** @@ -217,6 +217,29 @@ int bcmsdh_remove(bcmsdh_info_t *bcmsdh) return 0; } +#ifdef DHD_WAKE_STATUS +int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh) +{ + return bcmsdh->total_wake_count; +} + +int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + unsigned long flags; + int ret; + + spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags); + + ret = bcmsdh->pkt_wake; + bcmsdh->total_wake_count += flag; + bcmsdh->pkt_wake = flag; + + spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags); + return ret; +} +#endif /* DHD_WAKE_STATUS */ + int bcmsdh_suspend(bcmsdh_info_t *bcmsdh) { bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; @@ -339,17 +362,16 @@ int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handl int type; bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; - SDLX_MSG(("%s: Enter\n", __FUNCTION__)); if (bcmsdh_osinfo->oob_irq_registered) { SDLX_MSG(("%s: irq is already registered\n", __FUNCTION__)); return -EBUSY; } + SDLX_MSG(("%s %s irq=%d flags=0x%X\n", __FUNCTION__, #ifdef HW_OOB - printf("%s: HW_OOB enabled\n", __FUNCTION__); + "HW_OOB", #else - printf("%s: SW_OOB enabled\n", __FUNCTION__); + "SW_OOB", #endif - SDLX_MSG(("%s OOB irq=%d flags=0x%X\n", __FUNCTION__, (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags)); bcmsdh_osinfo->oob_irq_handler = oob_irq_handler; bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context; @@ -363,9 +385,9 @@ int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handl bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh); #endif /* defined(CONFIG_ARCH_ODIN) */ if (err) { + SDLX_MSG(("%s: request_irq failed with %d\n", __FUNCTION__, err)); bcmsdh_osinfo->oob_irq_enabled = FALSE; bcmsdh_osinfo->oob_irq_registered = FALSE; - SDLX_MSG(("%s: request_irq failed with %d\n", __FUNCTION__, err)); return err; } @@ -376,14 +398,12 @@ int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handl SDLX_MSG(("%s: disable_irq_wake\n", __FUNCTION__)); bcmsdh_osinfo->oob_irq_wake_enabled = FALSE; #else - SDLX_MSG(("%s: enable_irq_wake\n", __FUNCTION__)); err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num); if (err) SDLX_MSG(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err)); else bcmsdh_osinfo->oob_irq_wake_enabled = TRUE; #endif - return 0; } @@ -434,6 +454,9 @@ module_param(sd_hiok, uint, 0); extern uint sd_f2_blocksize; module_param(sd_f2_blocksize, int, 0); +extern uint sd_f1_blocksize; +module_param(sd_f1_blocksize, int, 0); + #ifdef BCMSDIOH_STD extern int sd_uhsimode; module_param(sd_uhsimode, int, 0); @@ -461,6 +484,10 @@ EXPORT_SYMBOL(bcmsdh_intr_dereg); EXPORT_SYMBOL(bcmsdh_intr_pending); #endif +#if defined(BT_OVER_SDIO) +EXPORT_SYMBOL(bcmsdh_btsdio_interface_init); +#endif /* defined (BT_OVER_SDIO) */ + EXPORT_SYMBOL(bcmsdh_devremove_reg); EXPORT_SYMBOL(bcmsdh_cfg_read); EXPORT_SYMBOL(bcmsdh_cfg_write); diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh_sdmmc.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh_sdmmc.c old mode 100755 new mode 100644 index 1605f65af5d0..ccfcce2c66ca --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh_sdmmc.c @@ -1,7 +1,7 @@ /* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmsdh_sdmmc.c 591104 2015-10-07 04:45:18Z $ + * $Id: bcmsdh_sdmmc.c 710913 2017-07-14 10:17:51Z $ */ #include @@ -38,13 +38,33 @@ #include /* ioctl/iovars */ #include +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) +#include +void +mmc_host_clk_hold(struct mmc_host *host) +{ + BCM_REFERENCE(host); + return; +} + +void +mmc_host_clk_release(struct mmc_host *host) +{ + BCM_REFERENCE(host); + return; +} +#elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8)) +#include +#else #include +#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) */ #include #include #include #include #include +#include #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) #include @@ -79,10 +99,21 @@ extern PBCMSDH_SDMMC_INSTANCE gInstance; #define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE #endif +#define DEFAULT_SDIO_F1_BLKSIZE 64 +#ifndef CUSTOM_SDIO_F1_BLKSIZE +#define CUSTOM_SDIO_F1_BLKSIZE DEFAULT_SDIO_F1_BLKSIZE +#endif + #define MAX_IO_RW_EXTENDED_BLK 511 uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */ uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE; +uint sd_f1_blocksize = CUSTOM_SDIO_F1_BLKSIZE; + +#if defined(BT_OVER_SDIO) +uint sd_f3_blocksize = 64; +#endif /* defined (BT_OVER_SDIO) */ + uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */ uint sd_power = 1; /* Default to SD Slot powered ON */ @@ -105,6 +136,18 @@ DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait); int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data); +void sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz); +uint sdmmc_get_clock_rate(sdioh_info_t *sd); +void sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div); +#if defined(BT_OVER_SDIO) +extern +void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func) +{ + sd->func[3] = func; + sd_info(("%s sd->func[3] %p\n", __FUNCTION__, sd->func[3])); +} +#endif /* defined (BT_OVER_SDIO) */ + static int sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd) { @@ -135,7 +178,7 @@ sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd) err_ret = sdio_enable_func(sd->func[1]); sdio_release_host(sd->func[1]); if (err_ret) { - sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret)); + sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x\n", err_ret)); } return FALSE; @@ -176,6 +219,11 @@ sdioh_attach(osl_t *osh, struct sdio_func *func) #ifdef GLOBAL_SDMMC_INSTANCE sd->func[func->num] = func; #endif + +#if defined(BT_OVER_SDIO) + sd->func[3] = NULL; +#endif /* defined (BT_OVER_SDIO) */ + sd->num_funcs = 2; sd->sd_blockmode = TRUE; sd->use_client_ints = TRUE; @@ -188,8 +236,8 @@ sdioh_attach(osl_t *osh, struct sdio_func *func) sdio_set_drvdata(sd->func[1], sd); sdio_claim_host(sd->func[1]); - sd->client_block_size[1] = 64; - err_ret = sdio_set_block_size(sd->func[1], 64); + sd->client_block_size[1] = sd_f1_blocksize; + err_ret = sdio_set_block_size(sd->func[1], sd_f1_blocksize); sdio_release_host(sd->func[1]); if (err_ret) { sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret)); @@ -207,6 +255,8 @@ sdioh_attach(osl_t *osh, struct sdio_func *func) goto fail; } + sd->sd_clk_rate = sdmmc_get_clock_rate(sd); + printf("%s: sd clock rate = %u\n", __FUNCTION__, sd->sd_clk_rate); sdioh_sdmmc_card_enablefuncs(sd); sd_trace(("%s: Done\n", __FUNCTION__)); @@ -270,6 +320,9 @@ sdioh_enable_func_intr(sdioh_info_t *sd) /* Enable F1 and F2 interrupts, clear master enable */ reg &= ~INTR_CTL_MASTER_EN; reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); +#if defined(BT_OVER_SDIO) + reg |= (INTR_CTL_FUNC3_EN); +#endif /* defined (BT_OVER_SDIO) */ sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err); sdio_release_host(sd->func[0]); @@ -300,6 +353,9 @@ sdioh_disable_func_intr(sdioh_info_t *sd) return SDIOH_API_RC_FAIL; } reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); +#if defined(BT_OVER_SDIO) + reg &= ~INTR_CTL_FUNC3_EN; +#endif /* Disable master interrupt with the last function interrupt */ if (!(reg & 0xFE)) reg = 0; @@ -422,22 +478,20 @@ enum { }; const bcm_iovar_t sdioh_iovars[] = { - {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, - {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 }, - {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ - {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, - {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, - {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, - {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, - {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, - {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, - {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, - {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, - {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, - {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, - {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 }, - {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 }, - {NULL, 0, 0, 0, 0 } + {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 }, + {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 }, + {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0 }, + {"sd_rxchain", IOV_RXCHAIN, 0, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0, 0 } }; int @@ -593,7 +647,14 @@ sdioh_iovar_op(sdioh_info_t *si, const char *name, break; case IOV_SVAL(IOV_DIVISOR): - sd_divisor = int_val; + /* set the clock to divisor, if value is non-zero & power of 2 */ + if (int_val && !(int_val & (int_val - 1))) { + sd_divisor = int_val; + sdmmc_set_clock_divisor(si, sd_divisor); + } else { + DHD_ERROR(("%s: Invalid sd_divisor value, should be power of 2!\n", + __FUNCTION__)); + } break; case IOV_GVAL(IOV_POWER): @@ -641,74 +702,6 @@ sdioh_iovar_op(sdioh_info_t *si, const char *name, int_val = (int32)0; bcopy(&int_val, arg, val_size); break; - - case IOV_GVAL(IOV_HOSTREG): - { - sdreg_t *sd_ptr = (sdreg_t *)params; - - if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { - sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); - bcmerror = BCME_BADARG; - break; - } - - sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__, - (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), - sd_ptr->offset)); - if (sd_ptr->offset & 1) - int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */ - else if (sd_ptr->offset & 2) - int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */ - else - int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */ - - bcopy(&int_val, arg, sizeof(int_val)); - break; - } - - case IOV_SVAL(IOV_HOSTREG): - { - sdreg_t *sd_ptr = (sdreg_t *)params; - - if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { - sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); - bcmerror = BCME_BADARG; - break; - } - - sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value, - (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), - sd_ptr->offset)); - break; - } - - case IOV_GVAL(IOV_DEVREG): - { - sdreg_t *sd_ptr = (sdreg_t *)params; - uint8 data = 0; - - if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { - bcmerror = BCME_SDIO_ERROR; - break; - } - - int_val = (int)data; - bcopy(&int_val, arg, sizeof(int_val)); - break; - } - - case IOV_SVAL(IOV_DEVREG): - { - sdreg_t *sd_ptr = (sdreg_t *)params; - uint8 data = (uint8)sd_ptr->value; - - if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { - bcmerror = BCME_SDIO_ERROR; - break; - } - break; - } - default: bcmerror = BCME_UNSUPPORTED; break; @@ -795,7 +788,7 @@ sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) return SDIOH_API_RC_FAIL; } - sd_trace(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); + sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); for (count = 0; count < length; count++) { offset = sd->func_cis_ptr[func] + count; @@ -811,6 +804,30 @@ sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) return SDIOH_API_RC_SUCCESS; } +extern SDIOH_API_RC +sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset) +{ + uint32 foo; + + sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); + + if (!sd->func_cis_ptr[func]) { + sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func)); + return SDIOH_API_RC_FAIL; + } + + sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); + + if (sdioh_sdmmc_card_regread (sd, 0, sd->func_cis_ptr[func]+offset, 1, &foo) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + *cisd = (uint8)(foo & 0xff); + + return SDIOH_API_RC_SUCCESS; +} + extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) { @@ -833,6 +850,52 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *by * as a special case. */ if (regaddr == SDIOD_CCCR_IOEN) { +#if defined(BT_OVER_SDIO) + do { + if (sd->func[3]) { + sd_info(("bcmsdh_sdmmc F3: *byte 0x%x\n", *byte)); + + if (*byte & SDIO_FUNC_ENABLE_3) { + sdio_claim_host(sd->func[3]); + + /* Set Function 3 Block Size */ + err_ret = sdio_set_block_size(sd->func[3], + sd_f3_blocksize); + if (err_ret) { + sd_err(("F3 blocksize set err%d\n", + err_ret)); + } + + /* Enable Function 3 */ + sd_info(("bcmsdh_sdmmc F3: enable F3 fn %p\n", + sd->func[3])); + err_ret = sdio_enable_func(sd->func[3]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: enable F3 err:%d\n", + err_ret)); + } + + sdio_release_host(sd->func[3]); + + break; + } else if (*byte & SDIO_FUNC_DISABLE_3) { + sdio_claim_host(sd->func[3]); + + /* Disable Function 3 */ + sd_info(("bcmsdh_sdmmc F3: disable F3 fn %p\n", + sd->func[3])); + err_ret = sdio_disable_func(sd->func[3]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Disable F3 err:%d\n", + err_ret)); + } + sdio_release_host(sd->func[3]); + sd->func[3] = NULL; + + break; + } + } +#endif /* defined (BT_OVER_SDIO) */ if (sd->func[2]) { sdio_claim_host(sd->func[2]); if (*byte & SDIO_FUNC_ENABLE_2) { @@ -852,7 +915,10 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *by } sdio_release_host(sd->func[2]); } - } +#if defined(BT_OVER_SDIO) + } while (0); +#endif /* defined (BT_OVER_SDIO) */ + } #if defined(MMC_SDIO_ABORT) /* to allow abort command through F1 */ else if (regaddr == SDIOD_CCCR_IOABORT) { @@ -922,388 +988,6 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *by return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); } -#if defined(SWTXGLOM) -static INLINE int sdioh_request_packet_align(uint pkt_len, uint write, uint func, int blk_size) -{ - /* Align Patch */ - if (!write || pkt_len < 32) - pkt_len = (pkt_len + 3) & 0xFFFFFFFC; - else if ((pkt_len > blk_size) && (pkt_len % blk_size)) { - if (func == SDIO_FUNC_2) { - sd_err(("%s: [%s] dhd_sdio must align %d bytes" - " packet larger than a %d bytes blk size by a blk size\n", - __FUNCTION__, write ? "W" : "R", pkt_len, blk_size)); - } - pkt_len += blk_size - (pkt_len % blk_size); - } -#ifdef CONFIG_MMC_MSM7X00A - if ((pkt_len % 64) == 32) { - sd_err(("%s: Rounding up TX packet +=32\n", __FUNCTION__)); - pkt_len += 32; - } -#endif /* CONFIG_MMC_MSM7X00A */ - return pkt_len; -} - -void -sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len) -{ - void *phead = sd->glom_info.glom_pkt_head; - void *ptail = sd->glom_info.glom_pkt_tail; - - BCM_REFERENCE(frame); - - ASSERT(!PKTLINK(pkt)); - if (!phead) { - ASSERT(!phead); - sd->glom_info.glom_pkt_head = sd->glom_info.glom_pkt_tail = pkt; - } - else { - ASSERT(ptail); - PKTSETNEXT(sd->osh, ptail, pkt); - sd->glom_info.glom_pkt_tail = pkt; - } - sd->glom_info.count++; -} - -void -sdioh_glom_clear(sdioh_info_t *sd) -{ - void *pnow, *pnext; - - pnext = sd->glom_info.glom_pkt_head; - - if (!pnext) { - sd_err(("sdioh_glom_clear: no first packet to clear!\n")); - return; - } - - while (pnext) { - pnow = pnext; - pnext = PKTNEXT(sd->osh, pnow); - PKTSETNEXT(sd->osh, pnow, NULL); - sd->glom_info.count--; - } - - sd->glom_info.glom_pkt_head = NULL; - sd->glom_info.glom_pkt_tail = NULL; - if (sd->glom_info.count != 0) { - sd_err(("sdioh_glom_clear: glom count mismatch!\n")); - sd->glom_info.count = 0; - } -} - -static SDIOH_API_RC -sdioh_request_swtxglom_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func, - uint addr, void *pkt) -{ - bool fifo = (fix_inc == SDIOH_DATA_FIX); - uint32 SGCount = 0; - int err_ret = 0; - void *pnext; - uint ttl_len, dma_len, lft_len, xfred_len, pkt_len; - uint blk_num; - int blk_size; - struct mmc_request mmc_req; - struct mmc_command mmc_cmd; - struct mmc_data mmc_dat; -#ifdef BCMSDIOH_TXGLOM - uint8 *localbuf = NULL; - uint local_plen = 0; - bool need_txglom = write && - (pkt == sd->glom_info.glom_pkt_tail) && - (sd->glom_info.glom_pkt_head != sd->glom_info.glom_pkt_tail); -#endif /* BCMSDIOH_TXGLOM */ - - sd_trace(("%s: Enter\n", __FUNCTION__)); - - ASSERT(pkt); - DHD_PM_RESUME_WAIT(sdioh_request_packet_wait); - DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); - - ttl_len = xfred_len = 0; -#ifdef BCMSDIOH_TXGLOM - if (need_txglom) { - pkt = sd->glom_info.glom_pkt_head; - } -#endif /* BCMSDIOH_TXGLOM */ - - /* at least 4 bytes alignment of skb buff is guaranteed */ - for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) - ttl_len += PKTLEN(sd->osh, pnext); - - blk_size = sd->client_block_size[func]; - if (((!write && sd->use_rxchain) || -#ifdef BCMSDIOH_TXGLOM - (need_txglom && sd->txglom_mode == SDPCM_TXGLOM_MDESC) || -#endif - 0) && (ttl_len >= blk_size)) { - blk_num = ttl_len / blk_size; - dma_len = blk_num * blk_size; - } else { - blk_num = 0; - dma_len = 0; - } - - lft_len = ttl_len - dma_len; - - sd_trace(("%s: %s %dB to func%d:%08x, %d blks with DMA, %dB leftover\n", - __FUNCTION__, write ? "W" : "R", - ttl_len, func, addr, blk_num, lft_len)); - - if (0 != dma_len) { - memset(&mmc_req, 0, sizeof(struct mmc_request)); - memset(&mmc_cmd, 0, sizeof(struct mmc_command)); - memset(&mmc_dat, 0, sizeof(struct mmc_data)); - - /* Set up DMA descriptors */ - for (pnext = pkt; - pnext && dma_len; - pnext = PKTNEXT(sd->osh, pnext)) { - pkt_len = PKTLEN(sd->osh, pnext); - - if (dma_len > pkt_len) - dma_len -= pkt_len; - else { - pkt_len = xfred_len = dma_len; - dma_len = 0; - pkt = pnext; - } - - sg_set_buf(&sd->sg_list[SGCount++], - (uint8*)PKTDATA(sd->osh, pnext), - pkt_len); - - if (SGCount >= SDIOH_SDMMC_MAX_SG_ENTRIES) { - sd_err(("%s: sg list entries exceed limit\n", - __FUNCTION__)); - return (SDIOH_API_RC_FAIL); - } - } - - mmc_dat.sg = sd->sg_list; - mmc_dat.sg_len = SGCount; - mmc_dat.blksz = blk_size; - mmc_dat.blocks = blk_num; - mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; - - mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */ - mmc_cmd.arg = write ? 1<<31 : 0; - mmc_cmd.arg |= (func & 0x7) << 28; - mmc_cmd.arg |= 1<<27; - mmc_cmd.arg |= fifo ? 0 : 1<<26; - mmc_cmd.arg |= (addr & 0x1FFFF) << 9; - mmc_cmd.arg |= blk_num & 0x1FF; - mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; - - mmc_req.cmd = &mmc_cmd; - mmc_req.data = &mmc_dat; - - sdio_claim_host(sd->func[func]); - mmc_set_data_timeout(&mmc_dat, sd->func[func]->card); - mmc_wait_for_req(sd->func[func]->card->host, &mmc_req); - sdio_release_host(sd->func[func]); - - err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error; - if (0 != err_ret) { - sd_err(("%s:CMD53 %s failed with code %d\n", - __FUNCTION__, - write ? "write" : "read", - err_ret)); - } - if (!fifo) { - addr = addr + ttl_len - lft_len - dma_len; - } - } - - /* PIO mode */ - if (0 != lft_len) { - /* Claim host controller */ - sdio_claim_host(sd->func[func]); - for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) { - uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext) + - xfred_len; - uint pad = 0; - pkt_len = PKTLEN(sd->osh, pnext); - if (0 != xfred_len) { - pkt_len -= xfred_len; - xfred_len = 0; - } -#ifdef BCMSDIOH_TXGLOM - if (need_txglom) { - if (!localbuf) { - uint prev_lft_len = lft_len; - lft_len = sdioh_request_packet_align(lft_len, write, - func, blk_size); - - if (lft_len > prev_lft_len) { - sd_err(("%s: padding is unexpected! lft_len %d," - " prev_lft_len %d %s\n", - __FUNCTION__, lft_len, prev_lft_len, - write ? "Write" : "Read")); - } - - localbuf = (uint8 *)MALLOC(sd->osh, lft_len); - if (localbuf == NULL) { - sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n", - __FUNCTION__, (write) ? "TX" : "RX")); - need_txglom = FALSE; - goto txglomfail; - } - } - bcopy(buf, (localbuf + local_plen), pkt_len); - local_plen += pkt_len; - - if (PKTNEXT(sd->osh, pnext)) { - continue; - } - - buf = localbuf; - pkt_len = local_plen; - } - -txglomfail: -#endif /* BCMSDIOH_TXGLOM */ - - if ( -#ifdef BCMSDIOH_TXGLOM - !need_txglom && -#endif - TRUE) { - pkt_len = sdioh_request_packet_align(pkt_len, write, - func, blk_size); - - pad = pkt_len - PKTLEN(sd->osh, pnext); - - if (pad > 0) { - if (func == SDIO_FUNC_2) { - sd_err(("%s: padding is unexpected! pkt_len %d," - " PKTLEN %d lft_len %d %s\n", - __FUNCTION__, pkt_len, PKTLEN(sd->osh, pnext), - lft_len, write ? "Write" : "Read")); - } - if (PKTTAILROOM(sd->osh, pkt) < pad) { - sd_info(("%s: insufficient tailroom %d, pad %d," - " lft_len %d pktlen %d, func %d %s\n", - __FUNCTION__, (int)PKTTAILROOM(sd->osh, pkt), - pad, lft_len, PKTLEN(sd->osh, pnext), func, - write ? "W" : "R")); - if (PKTPADTAILROOM(sd->osh, pkt, pad)) { - sd_err(("%s: padding error size %d.\n", - __FUNCTION__, pad)); - return SDIOH_API_RC_FAIL; - } - } - } - } - - if ((write) && (!fifo)) - err_ret = sdio_memcpy_toio( - sd->func[func], - addr, buf, pkt_len); - else if (write) - err_ret = sdio_memcpy_toio( - sd->func[func], - addr, buf, pkt_len); - else if (fifo) - err_ret = sdio_readsb( - sd->func[func], - buf, addr, pkt_len); - else - err_ret = sdio_memcpy_fromio( - sd->func[func], - buf, addr, pkt_len); - - if (err_ret) - sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n", - __FUNCTION__, - (write) ? "TX" : "RX", - pnext, SGCount, addr, pkt_len, err_ret)); - else - sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n", - __FUNCTION__, - (write) ? "TX" : "RX", - pnext, SGCount, addr, pkt_len)); - - if (!fifo) - addr += pkt_len; - SGCount ++; - } - sdio_release_host(sd->func[func]); - } -#ifdef BCMSDIOH_TXGLOM - if (localbuf) - MFREE(sd->osh, localbuf, lft_len); -#endif /* BCMSDIOH_TXGLOM */ - - sd_trace(("%s: Exit\n", __FUNCTION__)); - return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); -} - -/* - * This function takes a buffer or packet, and fixes everything up so that in the - * end, a DMA-able packet is created. - * - * A buffer does not have an associated packet pointer, and may or may not be aligned. - * A packet may consist of a single packet, or a packet chain. If it is a packet chain, - * then all the packets in the chain must be properly aligned. If the packet data is not - * aligned, then there may only be one packet, and in this case, it is copied to a new - * aligned packet. - * - */ -extern SDIOH_API_RC -sdioh_request_swtxglom_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func, - uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) -{ - SDIOH_API_RC Status; - void *tmppkt; - void *orig_buf = NULL; - uint copylen = 0; - - sd_trace(("%s: Enter\n", __FUNCTION__)); - - DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait); - DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); - - if (pkt == NULL) { - /* Case 1: we don't have a packet. */ - orig_buf = buffer; - copylen = buflen_u; - } else if ((ulong)PKTDATA(sd->osh, pkt) & DMA_ALIGN_MASK) { - /* Case 2: We have a packet, but it is unaligned. - * in this case, we cannot have a chain. - */ - ASSERT(PKTNEXT(sd->osh, pkt) == NULL); - - orig_buf = PKTDATA(sd->osh, pkt); - copylen = PKTLEN(sd->osh, pkt); - } - - tmppkt = pkt; - if (copylen) { - tmppkt = PKTGET_STATIC(sd->osh, copylen, write ? TRUE : FALSE); - if (tmppkt == NULL) { - sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, copylen)); - return SDIOH_API_RC_FAIL; - } - /* For a write, copy the buffer data into the packet. */ - if (write) - bcopy(orig_buf, PKTDATA(sd->osh, tmppkt), copylen); - } - - Status = sdioh_request_swtxglom_packet(sd, fix_inc, write, func, addr, tmppkt); - - if (copylen) { - /* For a read, copy the packet data back to the buffer. */ - if (!write) - bcopy(PKTDATA(sd->osh, tmppkt), orig_buf, PKTLEN(sd->osh, tmppkt)); - PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE); - } - - return (Status); -} -#endif - uint sdioh_set_mode(sdioh_info_t *sd, uint mode) { @@ -1399,6 +1083,7 @@ sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint add return (((err_ret == 0)&&(err_ret2 == 0)) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); } +#ifdef BCMSDIOH_TXGLOM static SDIOH_API_RC sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func, uint addr, void *pkt) @@ -1417,11 +1102,9 @@ sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func uint32 sg_count; struct sdio_func *sdio_func = sd->func[func]; struct mmc_host *host = sdio_func->card->host; -#ifdef BCMSDIOH_TXGLOM uint8 *localbuf = NULL; uint local_plen = 0; uint pkt_len = 0; -#endif /* BCMSDIOH_TXGLOM */ struct timespec now, before; sd_trace(("%s: Enter\n", __FUNCTION__)); @@ -1439,11 +1122,9 @@ sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func pkt_offset = 0; pnext = pkt; -#ifdef BCMSDIOH_TXGLOM ttl_len = 0; sg_count = 0; if(sd->txglom_mode == SDPCM_TXGLOM_MDESC) { -#endif while (pnext != NULL) { ttl_len = 0; sg_count = 0; @@ -1531,7 +1212,6 @@ sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func return SDIOH_API_RC_FAIL; } } -#ifdef BCMSDIOH_TXGLOM } else if(sd->txglom_mode == SDPCM_TXGLOM_CPY) { for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) { ttl_len += PKTLEN(sd->osh, pnext); @@ -1605,7 +1285,6 @@ txglomfail: if (localbuf) MFREE(sd->osh, localbuf, ttl_len); -#endif /* BCMSDIOH_TXGLOM */ if (sd_msglevel & SDH_COST_VAL) { getnstimeofday(&now); @@ -1616,6 +1295,7 @@ txglomfail: sd_trace(("%s: Exit\n", __FUNCTION__)); return SDIOH_API_RC_SUCCESS; } +#endif /* BCMSDIOH_TXGLOM */ static SDIOH_API_RC sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func, @@ -1699,12 +1379,13 @@ sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, u getnstimeofday(&before); if (pkt) { +#ifdef BCMSDIOH_TXGLOM /* packet chain, only used for tx/rx glom, all packets length * are aligned, total length is a block multiple */ if (PKTNEXT(sd->osh, pkt)) return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt); - +#endif /* BCMSDIOH_TXGLOM */ /* non-glom mode, ignore the buffer parameter and use the packet pointer * (this shouldn't happen) */ @@ -1804,7 +1485,10 @@ sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize sd_data(("%s: byte read data=0x%02x\n", __FUNCTION__, *data)); } else { - sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize); + if (sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize)) { + return BCME_SDIO_ERROR; + } + if (regsize == 2) *data &= 0xffff; @@ -2024,3 +1708,57 @@ sdioh_gpio_init(sdioh_info_t *sd) { return SDIOH_API_RC_FAIL; } + +uint +sdmmc_get_clock_rate(sdioh_info_t *sd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + return 0; +#else + struct sdio_func *sdio_func = sd->func[0]; + struct mmc_host *host = sdio_func->card->host; + return mmc_host_clk_rate(host); +#endif +} + + +void +sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) + return; +#else + struct sdio_func *sdio_func = sd->func[0]; + struct mmc_host *host = sdio_func->card->host; + struct mmc_ios *ios = &host->ios; + + mmc_host_clk_hold(host); + DHD_INFO(("%s: Before change: sd clock rate is %u\n", __FUNCTION__, ios->clock)); + if (hz < host->f_min) { + DHD_ERROR(("%s: Intended rate is below min rate, setting to min\n", __FUNCTION__)); + hz = host->f_min; + } + + if (hz > host->f_max) { + DHD_ERROR(("%s: Intended rate exceeds max rate, setting to max\n", __FUNCTION__)); + hz = host->f_max; + } + ios->clock = hz; + host->ops->set_ios(host, ios); + DHD_ERROR(("%s: After change: sd clock rate is %u\n", __FUNCTION__, ios->clock)); + mmc_host_clk_release(host); +#endif +} + +void +sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div) +{ + uint hz; + uint old_div = sdmmc_get_clock_rate(sd); + if (old_div == sd_div) { + return; + } + + hz = sd->sd_clk_rate / sd_div; + sdmmc_set_clock_rate(sd, hz); +} diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh_sdmmc_linux.c index 741c50892102..cdb5af50fc70 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh_sdmmc_linux.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdh_sdmmc_linux.c @@ -1,7 +1,7 @@ /* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmsdh_sdmmc_linux.c 591173 2015-10-07 06:24:22Z $ + * $Id: bcmsdh_sdmmc_linux.c 644124 2016-06-17 07:59:34Z $ */ #include @@ -180,7 +180,7 @@ static int bcmsdh_sdmmc_probe(struct sdio_func *func, if (func == NULL) return -EINVAL; - sd_err(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + sd_err(("%s: Enter num=%d\n", __FUNCTION__, func->num)); sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); sd_info(("sdio_device: 0x%04x\n", func->device)); @@ -226,7 +226,8 @@ static const struct sdio_device_id bcmsdh_sdmmc_ids[] = { { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4324) }, { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43239) }, { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) }, - { /* end: all zeroes */ }, + { 0, 0, 0, 0 /* end: all zeroes */ + }, }; MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids); @@ -266,9 +267,6 @@ static int bcmsdh_sdmmc_suspend(struct device *pdev) dhd_mmc_suspend = FALSE; return err; } -#if defined(OOB_INTR_ONLY) - bcmsdh_oob_intr_set(sdioh->bcmsdh, FALSE); -#endif smp_mb(); printf("%s Exit\n", __FUNCTION__); @@ -353,7 +351,7 @@ static struct sdio_driver bcmsdh_sdmmc_driver = { .pm = &bcmsdh_sdmmc_pm_ops, }, #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ -}; + }; struct sdos_info { sdioh_info_t *sd; @@ -399,12 +397,6 @@ MODULE_AUTHOR(AUTHOR); */ int bcmsdh_register_client_driver(void) { -#ifdef GLOBAL_SDMMC_INSTANCE - gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL); - if (!gInstance) - return -ENOMEM; -#endif - return sdio_register_driver(&bcmsdh_sdmmc_driver); } @@ -414,8 +406,4 @@ int bcmsdh_register_client_driver(void) void bcmsdh_unregister_client_driver(void) { sdio_unregister_driver(&bcmsdh_sdmmc_driver); -#ifdef GLOBAL_SDMMC_INSTANCE - if (gInstance) - kfree(gInstance); -#endif } diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdspi_linux.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdspi_linux.c index 139288e73ad8..b7091e5985f9 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdspi_linux.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmsdspi_linux.c @@ -1,7 +1,7 @@ /* * Broadcom SPI Host Controller Driver - Linux Per-port * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmspibrcm.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmspibrcm.c index 10d982e0f8d8..1bbff169f6ea 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmspibrcm.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmspibrcm.c @@ -1,7 +1,7 @@ /* * Broadcom BCMSDH to gSPI Protocol Conversion Layer * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmspibrcm.c 591086 2015-10-07 02:51:01Z $ + * $Id: bcmspibrcm.c 611787 2016-01-12 06:07:27Z $ */ #define HSMODE @@ -64,8 +64,6 @@ #define CMDLEN 4 -#define DWORDMODE_ON (sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 2) && (sd->dwordmode == TRUE) - /* Globals */ #if defined(DHD_DEBUG) uint sd_msglevel = SDH_ERROR_VAL; @@ -238,13 +236,6 @@ sdioh_interrupt_pending(sdioh_info_t *sd) } #endif -extern SDIOH_API_RC -sdioh_query_device(sdioh_info_t *sd) -{ - /* Return a BRCM ID appropriate to the dongle class */ - return (sd->num_funcs > 1) ? BCM4329_D11N_ID : BCM4318_D11G_ID; -} - /* Provide dstatus bits of spi-transaction for dhd layers. */ extern uint32 sdioh_get_dstatus(sdioh_info_t *sd) @@ -1529,22 +1520,6 @@ bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, sd_trace(("spi cmd = 0x%x\n", cmd_arg)); - if (DWORDMODE_ON) { - spilen = GFIELD(cmd_arg, SPI_LEN); - if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_0) || - (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_1)) - dstatus_idx = spilen * 3; - - if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) && - (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) { - spilen = spilen << 2; - dstatus_idx = (spilen % 16) ? (16 - (spilen % 16)) : 0; - /* convert len to mod16 size */ - spilen = ROUNDUP(spilen, 16); - cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); - } - } - /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen * according to the wordlen mode(16/32bit) the device is in. */ @@ -1567,17 +1542,6 @@ bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, /* for Write, put the data into the output buffer */ if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) { /* We send len field of hw-header always a mod16 size, both from host and dongle */ - if (DWORDMODE_ON) { - if (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) { - ptr = (uint16 *)&data[0]; - templen = *ptr; - /* ASSERT(*ptr == ~*(ptr + 1)); */ - templen = ROUNDUP(templen, 16); - *ptr = templen; - sd_trace(("actual tx len = %d\n", (uint16)(~*(ptr+1)))); - } - } - if (datalen != 0) { for (i = 0; i < datalen/4; i++) { if (sd->wordlen == 4) { /* 32bit spid */ @@ -1640,25 +1604,6 @@ bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, CMDLEN + resp_delay]); } } - - if ((DWORDMODE_ON) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { - ptr = (uint16 *)&data[0]; - templen = *ptr; - buslen = len = ~(*(ptr + 1)); - buslen = ROUNDUP(buslen, 16); - /* populate actual len in hw-header */ - if (templen == buslen) - *ptr = len; - } - } - } - - /* Restore back the len field of the hw header */ - if (DWORDMODE_ON) { - if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) && - (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) { - ptr = (uint16 *)&data[0]; - *ptr = (uint16)(~*(ptr+1)); } } diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmutils.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmutils.c index 32ddc1f43bf5..05ee100fb492 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmutils.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmutils.c @@ -1,7 +1,7 @@ /* * Driver O/S-independent utility routines * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmutils.c 591286 2015-10-07 11:59:26Z $ + * $Id: bcmutils.c 699163 2017-05-12 05:18:23Z $ */ #include @@ -54,12 +54,50 @@ #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include <802.1d.h> +#include <802.11.h> +#include +#include +#include +/* Look-up table to calculate head room present in a number */ +static const uint8 msb_table[] = { + 0, 1, 2, 2, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, +}; void *_bcmutils_dummy_fn = NULL; @@ -69,7 +107,6 @@ void *_bcmutils_dummy_fn = NULL; #ifdef BCMDRIVER - /* copy a pkt buffer chain into a buffer */ uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf) @@ -336,8 +373,16 @@ bcm_strtoul(const char *cp, char **endp, uint base) (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) { result = result*base + value; /* Detected overflow */ - if (result < last_result && !minus) + if (result < last_result && !minus) { + if (endp) { + /* Go to the end of current number */ + while (bcm_isxdigit(*cp)) { + cp++; + } + *endp = DISCARD_QUAL(cp, char); + } return (ulong)-1; + } last_result = result; cp++; } @@ -798,11 +843,11 @@ pktsetprio(void *pkt, bool update_vtag) evh->vlan_tag = hton16(vlan_tag); rc |= PKTPRIO_UPD; } -#ifdef DHD_LOSSLESS_ROAMING +#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING) } else if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { priority = PRIO_8021D_NC; rc = PKTPRIO_DSCP; -#endif /* DHD_LOSSLESS_ROAMING */ +#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */ } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) || (eh->ether_type == hton16(ETHER_TYPE_IPV6))) { uint8 *ip_body = pktdata + sizeof(struct ether_header); @@ -936,6 +981,73 @@ pktset8021xprio(void *pkt, int prio) } } +/* usr_prio range from low to high with usr_prio value */ +static bool +up_table_set(uint8 *up_table, uint8 usr_prio, uint8 low, uint8 high) +{ + int i; + + if (usr_prio > 7 || low > high || low >= UP_TABLE_MAX || high >= UP_TABLE_MAX) { + return FALSE; + } + + for (i = low; i <= high; i++) { + up_table[i] = usr_prio; + } + + return TRUE; +} + +/* set user priority table */ +int BCMFASTPATH +wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie) +{ + uint8 len; + + if (up_table == NULL || qos_map_ie == NULL) { + return BCME_ERROR; + } + + /* clear table to check table was set or not */ + memset(up_table, 0xff, UP_TABLE_MAX); + + /* length of QoS Map IE must be 16+n*2, n is number of exceptions */ + if (qos_map_ie != NULL && qos_map_ie->id == DOT11_MNG_QOS_MAP_ID && + (len = qos_map_ie->len) >= QOS_MAP_FIXED_LENGTH && + (len % 2) == 0) { + uint8 *except_ptr = (uint8 *)qos_map_ie->data; + uint8 except_len = len - QOS_MAP_FIXED_LENGTH; + uint8 *range_ptr = except_ptr + except_len; + int i; + + /* fill in ranges */ + for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) { + uint8 low = range_ptr[i]; + uint8 high = range_ptr[i + 1]; + if (low == 255 && high == 255) { + continue; + } + + if (!up_table_set(up_table, i / 2, low, high)) { + /* clear the table on failure */ + memset(up_table, 0xff, UP_TABLE_MAX); + return BCME_ERROR; + } + } + + /* update exceptions */ + for (i = 0; i < except_len; i += 2) { + uint8 dscp = except_ptr[i]; + uint8 usr_prio = except_ptr[i+1]; + + /* exceptions with invalid dscp/usr_prio are ignored */ + up_table_set(up_table, usr_prio, dscp, dscp); + } + } + + return BCME_OK; +} + /* The 0.5KB string table is not removed by compiler even though it's unused */ static char bcm_undeferrstr[32]; @@ -959,7 +1071,6 @@ bcmerrorstr(int bcmerror) } - /* iovar table lookup */ /* could mandate sorted tables and do a binary search */ const bcm_iovar_t* @@ -990,6 +1101,7 @@ int bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set) { int bcmerror = 0; + BCM_REFERENCE(arg); /* length check on io buf */ switch (vi->type) { @@ -1874,6 +1986,41 @@ bcm_parse_tlvs(void *buf, int buflen, uint key) return NULL; } +bcm_tlv_t * +bcm_parse_tlvs_dot11(void *buf, int buflen, uint key, bool id_ext) +{ + bcm_tlv_t *elt; + int totlen; + + elt = (bcm_tlv_t*)buf; + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + int len = elt->len; + + do { + /* validate remaining totlen */ + if (totlen < (int)(len + TLV_HDR_LEN)) + break; + + if (id_ext) { + if (!DOT11_MNG_IE_ID_EXT_MATCH(elt, key)) + break; + } else if (elt->id != key) { + break; + } + + return (elt); + } while (0); + + elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + + return NULL; +} + /* * Traverse a string of 1-byte tag/1-byte length/variable-length value * triples, returning a pointer to the substring whose first element @@ -1883,7 +2030,8 @@ bcm_parse_tlvs(void *buf, int buflen, uint key) bcm_tlv_t * bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen) { - bcm_tlv_t * ret = bcm_parse_tlvs(buf, buflen, key); + bcm_tlv_t * ret; + ret = bcm_parse_tlvs(buf, buflen, key); if (ret == NULL || ret->len < min_bodylen) { return NULL; } @@ -2022,7 +2170,7 @@ bcm_format_hex(char *str, const void *bytes, int len) /* pretty hex print a contiguous buffer */ void -prhex(const char *msg, uchar *buf, uint nbytes) +prhex(const char *msg, volatile uchar *buf, uint nbytes) { char line[128], *p; int len = sizeof(line); @@ -2166,7 +2314,7 @@ bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc } uint -bcm_mkiovar(const char *name, char *data, uint datalen, char *buf, uint buflen) +bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint buflen) { uint len; @@ -2178,8 +2326,10 @@ bcm_mkiovar(const char *name, char *data, uint datalen, char *buf, uint buflen) strncpy(buf, name, buflen); /* append data onto the end of the name string */ - memcpy(&buf[len], data, datalen); - len += datalen; + if (data && datalen != 0) { + memcpy(&buf[len], data, datalen); + len += datalen; + } return len; } @@ -2645,7 +2795,12 @@ bcm_bitprint32(const uint32 u32arg) { int i; for (i = NBITS(uint32) - 1; i >= 0; i--) { - isbitset(u32arg, i) ? printf("1") : printf("0"); + if (isbitset(u32arg, i)) { + printf("1"); + } else { + printf("0"); + } + if ((i % NBBY) == 0) printf(" "); } printf("\n"); @@ -3464,6 +3619,230 @@ bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset) (*r_hi) --; } +/* Does unsigned 64 bit fixed point multiplication */ +uint64 +fp_mult_64(uint64 val1, uint64 val2, uint8 nf1, uint8 nf2, uint8 nf_res) +{ + uint64 mult_out_tmp, mult_out, rnd_val; + uint8 shift_amt; + + shift_amt = nf1 + nf2 - nf_res; + /* 0.5 in 1.0.shift_amt */ + rnd_val = bcm_shl_64(1, (shift_amt - 1)); + rnd_val = (shift_amt == 0) ? 0 : rnd_val; + mult_out_tmp = (uint64)((uint64)val1 * (uint64)val2) + (uint64)rnd_val; + mult_out = bcm_shr_64(mult_out_tmp, shift_amt); + + return mult_out; +} + + +/* Does unsigned 64 bit by 32 bit fixed point division */ +uint8 +fp_div_64(uint64 num, uint32 den, uint8 nf_num, uint8 nf_den, uint32 *div_out) +{ + uint8 shift_amt1, shift_amt2, shift_amt, nf_res, hd_rm_nr, hd_rm_dr; + uint32 num_hi, num_lo; + uint64 num_scale; + + /* Worst case shift possible */ + hd_rm_nr = fp_calc_head_room_64(num); + hd_rm_dr = fp_calc_head_room_32(den); + + /* (Nr / Dr) <= 2^32 */ + shift_amt1 = hd_rm_nr - hd_rm_dr - 1; + /* Shift <= 32 + N2 - N1 */ + shift_amt2 = 31 + nf_den - nf_num; + shift_amt = MINIMUM(shift_amt1, shift_amt2); + + /* Scale numerator */ + num_scale = bcm_shl_64(num, shift_amt); + + /* Do division */ + num_hi = (uint32)((uint64)num_scale >> 32) & MASK_32_BITS; + num_lo = (uint32)(num_scale & MASK_32_BITS); + bcm_uint64_divide(div_out, num_hi, num_lo, den); + + /* Result format */ + nf_res = nf_num - nf_den + shift_amt; + return nf_res; +} + +/* Finds the number of bits available for shifting in unsigned 64 bit number */ +uint8 +fp_calc_head_room_64(uint64 num) +{ + uint8 n_room_bits = 0, msb_pos; + uint32 num_hi, num_lo, x; + + num_hi = (uint32)((uint64)num >> 32) & MASK_32_BITS; + num_lo = (uint32)(num & MASK_32_BITS); + + if (num_hi > 0) { + x = num_hi; + n_room_bits = 0; + } else { + x = num_lo; + n_room_bits = 32; + } + + msb_pos = (x >> 16) ? ((x >> 24) ? (24 + msb_table[(x >> 24) & MASK_8_BITS]) + : (16 + msb_table[(x >> 16) & MASK_8_BITS])) + : ((x >> 8) ? (8 + msb_table[(x >> 8) & MASK_8_BITS]) + : msb_table[x & MASK_8_BITS]); + + return (n_room_bits + 32 - msb_pos); +} + +/* Finds the number of bits available for shifting in unsigned 32 bit number */ +uint8 +fp_calc_head_room_32(uint32 x) +{ + uint8 msb_pos; + + msb_pos = (x >> 16) ? ((x >> 24) ? (24 + msb_table[(x >> 24) & MASK_8_BITS]) + : (16 + msb_table[(x >> 16) & MASK_8_BITS])) + : ((x >> 8) ? (8 + msb_table[(x >> 8) & MASK_8_BITS]) + : msb_table[x & MASK_8_BITS]); + + return (32 - msb_pos); +} + +/* Does unsigned 64 bit fixed point floor */ +uint32 +fp_floor_64(uint64 num, uint8 floor_pos) +{ + uint32 floor_out; + + floor_out = (uint32)bcm_shr_64(num, floor_pos); + + return floor_out; +} + +/* Does unsigned 32 bit fixed point floor */ +uint32 +fp_floor_32(uint32 num, uint8 floor_pos) +{ + return num >> floor_pos; +} + +/* Does unsigned 64 bit fixed point rounding */ +uint32 +fp_round_64(uint64 num, uint8 rnd_pos) +{ + uint64 rnd_val, rnd_out_tmp; + uint32 rnd_out; + + /* 0.5 in 1.0.rnd_pos */ + rnd_val = bcm_shl_64(1, (rnd_pos - 1)); + rnd_val = (rnd_pos == 0) ? 0 : rnd_val; + rnd_out_tmp = num + rnd_val; + rnd_out = (uint32)bcm_shr_64(rnd_out_tmp, rnd_pos); + + return rnd_out; +} + +/* Does unsigned 32 bit fixed point rounding */ +uint32 +fp_round_32(uint32 num, uint8 rnd_pos) +{ + uint32 rnd_val, rnd_out_tmp; + + /* 0.5 in 1.0.rnd_pos */ + rnd_val = 1 << (rnd_pos - 1); + rnd_val = (rnd_pos == 0) ? 0 : rnd_val; + rnd_out_tmp = num + rnd_val; + return (rnd_out_tmp >> rnd_pos); +} + +/* Does unsigned fixed point ceiling */ +uint32 +fp_ceil_64(uint64 num, uint8 ceil_pos) +{ + uint64 ceil_val, ceil_out_tmp; + uint32 ceil_out; + + /* 0.999 in 1.0.rnd_pos */ + ceil_val = bcm_shl_64(1, ceil_pos) - 1; + ceil_out_tmp = num + ceil_val; + ceil_out = (uint32)bcm_shr_64(ceil_out_tmp, ceil_pos); + + return ceil_out; +} + +/* Does left shift of unsigned 64 bit number */ +uint64 +bcm_shl_64(uint64 input, uint8 shift_amt) +{ + uint32 in_hi, in_lo; + uint32 masked_lo = 0; + uint32 mask; + uint64 shl_out; + + if (shift_amt == 0) { + return input; + } + + /* Get hi and lo part */ + in_hi = (uint32)((uint64)input >> 32) & MASK_32_BITS; + in_lo = (uint32)(input & MASK_32_BITS); + + if (shift_amt < 32) { + /* Extract bit which belongs to hi part after shifting */ + mask = ((uint32)~0) << (32 - shift_amt); + masked_lo = (in_lo & mask) >> (32 - shift_amt); + + /* Shift hi and lo and prepare output */ + in_hi = (in_hi << shift_amt) | masked_lo; + in_lo = in_lo << shift_amt; + } else { + /* Extract bit which belongs to hi part after shifting */ + shift_amt = shift_amt - 32; + + /* Shift hi and lo and prepare output */ + in_hi = in_lo << shift_amt; + in_lo = 0; + } + + shl_out = (((uint64)in_hi << 32) | in_lo); + return shl_out; +} + +/* Does right shift of unsigned 64 bit number */ +uint64 +bcm_shr_64(uint64 input, uint8 shift_amt) +{ + uint32 in_hi, in_lo; + uint32 masked_hi = 0; + uint32 mask; + uint64 shr_out; + + if (shift_amt == 0) { + return input; + } + + /* Get hi and lo part */ + in_hi = (uint32)((uint64)input >> 32) & MASK_32_BITS; + in_lo = (uint32)(input & MASK_32_BITS); + + if (shift_amt < 32) { + /* Extract bit which belongs to lo part after shifting */ + mask = (1 << shift_amt) - 1; + masked_hi = in_hi & mask; + + /* Shift hi and lo and prepare output */ + in_hi = (uint32)in_hi >> shift_amt; + in_lo = ((uint32)in_lo >> shift_amt) | (masked_hi << (32 - shift_amt)); + } else { + shift_amt = shift_amt - 32; + in_lo = in_hi >> shift_amt; + in_hi = 0; + } + + shr_out = (((uint64)in_hi << 32) | in_lo); + return shr_out; +} + #ifdef DEBUG_COUNTER #if (OSL_SYSUPTIME_SUPPORT == TRUE) void counter_printlog(counter_tbl_t *ctr_tbl) @@ -3572,3 +3951,156 @@ dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p) } #endif + +/* calculate partial checksum */ +static uint32 +ip_cksum_partial(uint32 sum, uint8 *val8, uint32 count) +{ + uint32 i; + uint16 *val16 = (uint16 *)val8; + + ASSERT(val8 != NULL); + /* partial chksum calculated on 16-bit values */ + ASSERT((count % 2) == 0); + + count /= 2; + + for (i = 0; i < count; i++) { + sum += *val16++; + } + return sum; +} + +/* calculate IP checksum */ +static uint16 +ip_cksum(uint32 sum, uint8 *val8, uint32 count) +{ + uint16 *val16 = (uint16 *)val8; + + ASSERT(val8 != NULL); + + while (count > 1) { + sum += *val16++; + count -= 2; + } + /* add left-over byte, if any */ + if (count > 0) { + sum += (*(uint8 *)val16); + } + + /* fold 32-bit sum to 16 bits */ + sum = (sum >> 16) + (sum & 0xffff); + sum += (sum >> 16); + return ((uint16)~sum); +} + +/* calculate IPv4 header checksum + * - input ip points to IP header in network order + * - output cksum is in network order + */ +uint16 +ipv4_hdr_cksum(uint8 *ip, int ip_len) +{ + uint32 sum = 0; + uint8 *ptr = ip; + + ASSERT(ip != NULL); + ASSERT(ip_len >= IPV4_MIN_HEADER_LEN); + + /* partial cksum skipping the hdr_chksum field */ + sum = ip_cksum_partial(sum, ptr, OFFSETOF(struct ipv4_hdr, hdr_chksum)); + ptr += OFFSETOF(struct ipv4_hdr, hdr_chksum) + 2; + + /* return calculated chksum */ + return ip_cksum(sum, ptr, ip_len - OFFSETOF(struct ipv4_hdr, src_ip)); +} + +/* calculate TCP header checksum using partial sum */ +static uint16 +tcp_hdr_chksum(uint32 sum, uint8 *tcp_hdr, uint16 tcp_len) +{ + uint8 *ptr = tcp_hdr; + + ASSERT(tcp_hdr != NULL); + ASSERT(tcp_len >= TCP_MIN_HEADER_LEN); + + /* partial TCP cksum skipping the chksum field */ + sum = ip_cksum_partial(sum, ptr, OFFSETOF(struct bcmtcp_hdr, chksum)); + ptr += OFFSETOF(struct bcmtcp_hdr, chksum) + 2; + + /* return calculated chksum */ + return ip_cksum(sum, ptr, tcp_len - OFFSETOF(struct bcmtcp_hdr, urg_ptr)); +} + +struct tcp_pseudo_hdr { + uint8 src_ip[IPV4_ADDR_LEN]; /* Source IP Address */ + uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination IP Address */ + uint8 zero; + uint8 prot; + uint16 tcp_size; +}; + +/* calculate IPv4 TCP header checksum + * - input ip and tcp points to IP and TCP header in network order + * - output cksum is in network order + */ +uint16 +ipv4_tcp_hdr_cksum(uint8 *ip, uint8 *tcp, uint16 tcp_len) +{ + struct ipv4_hdr *ip_hdr = (struct ipv4_hdr *)ip; + struct tcp_pseudo_hdr tcp_ps; + uint32 sum = 0; + + ASSERT(ip != NULL); + ASSERT(tcp != NULL); + ASSERT(tcp_len >= TCP_MIN_HEADER_LEN); + + /* pseudo header cksum */ + memset(&tcp_ps, 0, sizeof(tcp_ps)); + memcpy(&tcp_ps.dst_ip, ip_hdr->dst_ip, IPV4_ADDR_LEN); + memcpy(&tcp_ps.src_ip, ip_hdr->src_ip, IPV4_ADDR_LEN); + tcp_ps.zero = 0; + tcp_ps.prot = ip_hdr->prot; + tcp_ps.tcp_size = hton16(tcp_len); + sum = ip_cksum_partial(sum, (uint8 *)&tcp_ps, sizeof(tcp_ps)); + + /* return calculated TCP header chksum */ + return tcp_hdr_chksum(sum, tcp, tcp_len); +} + +struct ipv6_pseudo_hdr { + uint8 saddr[IPV6_ADDR_LEN]; + uint8 daddr[IPV6_ADDR_LEN]; + uint16 payload_len; + uint8 zero; + uint8 next_hdr; +}; + +/* calculate IPv6 TCP header checksum + * - input ipv6 and tcp points to IPv6 and TCP header in network order + * - output cksum is in network order + */ +uint16 +ipv6_tcp_hdr_cksum(uint8 *ipv6, uint8 *tcp, uint16 tcp_len) +{ + struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)ipv6; + struct ipv6_pseudo_hdr ipv6_pseudo; + uint32 sum = 0; + + ASSERT(ipv6 != NULL); + ASSERT(tcp != NULL); + ASSERT(tcp_len >= TCP_MIN_HEADER_LEN); + + /* pseudo header cksum */ + memset((char *)&ipv6_pseudo, 0, sizeof(ipv6_pseudo)); + memcpy((char *)ipv6_pseudo.saddr, (char *)ipv6_hdr->saddr.addr, + sizeof(ipv6_pseudo.saddr)); + memcpy((char *)ipv6_pseudo.daddr, (char *)ipv6_hdr->daddr.addr, + sizeof(ipv6_pseudo.daddr)); + ipv6_pseudo.payload_len = ipv6_hdr->payload_len; + ipv6_pseudo.next_hdr = ipv6_hdr->nexthdr; + sum = ip_cksum_partial(sum, (uint8 *)&ipv6_pseudo, sizeof(ipv6_pseudo)); + + /* return calculated TCP header chksum */ + return tcp_hdr_chksum(sum, tcp, tcp_len); +} diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmwifi_channels.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmwifi_channels.c index be884cc33cc1..40fc3f7578fb 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmwifi_channels.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmwifi_channels.c @@ -3,7 +3,7 @@ * Contents are wifi-specific, used by any kernel or app-level * software that might want wifi things as it grows. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: bcmwifi_channels.c 591285 2015-10-07 11:56:29Z $ + * $Id: bcmwifi_channels.c 612483 2016-01-14 03:44:27Z $ */ #include @@ -160,6 +160,42 @@ static const uint8 wf_5g_160m_chans[] = #define WF_NUM_5G_160M_CHANS \ (sizeof(wf_5g_160m_chans)/sizeof(uint8)) +/* opclass and channel information for US. Table E-1 */ +static const uint16 opclass_data[] = { + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_5)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_5)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_10)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_10)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_2G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_3G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_3G |((WL_CHANSPEC_BW_10)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_3G |((WL_CHANSPEC_BW_5)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_5)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_10)&WL_CHANSPEC_BW_MASK)), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_20)&WL_CHANSPEC_BW_MASK)), + 0, + 0, + 0, + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER), + (WL_CHANSPEC_BAND_5G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER), + (WL_CHANSPEC_BAND_2G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_LOWER), + (WL_CHANSPEC_BAND_2G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER), +}; /* convert bandwidth from chanspec to MHz */ static uint @@ -788,10 +824,10 @@ wf_chspec_ctlchan(chanspec_t chspec) } /* given a chanspec, return the bandwidth string */ -char * +const char * wf_chspec_to_bw_str(chanspec_t chspec) { - return (char *)wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)]; + return wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)]; } /* @@ -1251,3 +1287,63 @@ wf_chspec_channel(chanspec_t chspec) } } #endif /* WL11AC_80P80 */ + +/* This routine returns the chanspec for a given operating class and + * channel number + */ +chanspec_t +wf_channel_create_chspec_frm_opclass(uint8 opclass, uint8 channel) +{ + chanspec_t chanspec = 0; + uint16 opclass_info = 0; + uint16 lookupindex = 0; + switch (opclass) { + case 115: + lookupindex = 1; + break; + case 124: + lookupindex = 3; + break; + case 125: + lookupindex = 5; + break; + case 81: + lookupindex = 12; + break; + case 116: + lookupindex = 22; + break; + case 119: + lookupindex = 23; + break; + case 126: + lookupindex = 25; + break; + case 83: + lookupindex = 32; + break; + case 84: + lookupindex = 33; + break; + default: + lookupindex = 12; + } + + if (lookupindex < 33) { + opclass_info = opclass_data[lookupindex-1]; + } + else { + opclass_info = opclass_data[11]; + } + chanspec = opclass_info | (uint16)channel; + return chanspec; +} + +/* This routine returns the opclass for a given chanspec */ +int +wf_channel_create_opclass_frm_chspec(chanspec_t chspec) +{ + BCM_REFERENCE(chspec); + /* TODO: Implement this function ! */ + return 12; /* opclass 12 for basic 2G channels */ +} diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmwifi_channels.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmwifi_channels.h index 186c0e18ee92..28c6e2739303 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmwifi_channels.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmwifi_channels.h @@ -3,7 +3,7 @@ * This header file housing the define and function prototype use by * both the wl driver, tools & Apps. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: bcmwifi_channels.h 591285 2015-10-07 11:56:29Z $ + * $Id: bcmwifi_channels.h 612483 2016-01-14 03:44:27Z $ */ #ifndef _bcmwifi_channels_h_ @@ -116,6 +116,9 @@ typedef struct { #define INVCHANSPEC 255 #define MAX_CHANSPEC 0xFFFF +#define WL_CHANNEL_BAND(ch) (((ch) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G) + /* channel defines */ #define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? \ ((channel) - CH_10MHZ_APART) : 0) @@ -172,11 +175,18 @@ typedef struct { #define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK) #ifdef WL11N_20MHZONLY - +#ifdef WL11ULB +#define CHSPEC_IS2P5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_2P5) +#define CHSPEC_IS5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_5) +#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10) +#else #define CHSPEC_IS2P5(chspec) 0 #define CHSPEC_IS5(chspec) 0 #define CHSPEC_IS10(chspec) 0 +#endif #define CHSPEC_IS20(chspec) 1 +#define CHSPEC_IS20_2G(chspec) ((((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) && \ + CHSPEC_IS2G(chspec)) #ifndef CHSPEC_IS40 #define CHSPEC_IS40(chspec) 0 #endif @@ -197,6 +207,8 @@ typedef struct { #define CHSPEC_IS5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_5) #define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10) #define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) +#define CHSPEC_IS20_5G(chspec) ((((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) && \ + CHSPEC_IS5G(chspec)) #ifndef CHSPEC_IS40 #define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40) #endif @@ -472,7 +484,7 @@ extern uint8 wf_chspec_ctlchan(chanspec_t chspec); * * @return Returns the bandwidth string */ -extern char * wf_chspec_to_bw_str(chanspec_t chspec); +extern const char *wf_chspec_to_bw_str(chanspec_t chspec); /** * Return the primary (control) chanspec. @@ -628,4 +640,6 @@ extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec); */ extern uint8 wf_chspec_channel(chanspec_t chspec); #endif +extern chanspec_t wf_channel_create_chspec_frm_opclass(uint8 opclass, uint8 channel); +extern int wf_channel_create_opclass_frm_chspec(chanspec_t chspec); #endif /* _bcmwifi_channels_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmwifi_rates.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmwifi_rates.h index 1329e9bc80da..542055df3dc1 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmwifi_rates.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmwifi_rates.h @@ -1,7 +1,7 @@ /* * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmwifi_rates.h 591285 2015-10-07 11:56:29Z $ + * $Id: bcmwifi_rates.h 612483 2016-01-14 03:44:27Z $ */ #ifndef _bcmwifi_rates_h_ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmxtlv.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmxtlv.c index 26cfb9ac264a..d6bef6fa2c37 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmxtlv.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/bcmxtlv.c @@ -1,7 +1,7 @@ /* * Driver O/S-independent utility routines * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmxtlv.c 527361 2015-01-17 01:48:34Z $ + * $Id: bcmxtlv.c 628611 2016-03-31 17:53:25Z $ */ #include @@ -215,7 +215,7 @@ bcm_unpack_xtlv_entry(uint8 **tlv_buf, uint16 xpct_type, uint16 xpct_len, void * /* copy tlv record to caller's buffer */ memcpy(dst, ptlv->data, ptlv->len); } - *tlv_buf += BCM_XTLV_SIZE(ptlv, opts); + *tlv_buf = (uint8*)(*tlv_buf) + BCM_XTLV_SIZE(ptlv, opts); return BCME_OK; } @@ -249,7 +249,7 @@ bcm_pack_xtlv_entry(uint8 **tlv_buf, uint16 *buflen, uint16 type, uint16 len, vo memcpy(ptlv->data, src, len); /* advance callers pointer to tlv buff */ - *tlv_buf += size; + *tlv_buf = (uint8*)(*tlv_buf) + size; /* decrement the len */ *buflen -= (uint16)size; return BCME_OK; @@ -289,7 +289,7 @@ bcm_unpack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t op if ((res = cbfn(ctx, ptlv->data, type, len)) != BCME_OK) break; - tlv_buf += size; + tlv_buf = (uint8*)tlv_buf + size; } return res; } @@ -317,7 +317,7 @@ bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts, while (more && (buf < endp)) { more = get_next(ctx, &tlv_id, &tlv_len); size = bcm_xtlv_size_for_data(tlv_len, opts); - if ((buf + size) >= endp) { + if ((buf + size) > endp) { res = BCME_BUFTOOSHORT; goto done; } @@ -349,7 +349,7 @@ bcm_pack_xtlv_buf_from_mem(void **tlv_buf, uint16 *buflen, xtlv_desc_t *items, uint8 *ptlv = (uint8 *)*tlv_buf; while (items->type != 0) { - if ((res = bcm_pack_xtlv_entry(&ptlv, + if ((items->len > 0) && (res = bcm_pack_xtlv_entry(&ptlv, buflen, items->type, items->len, items->ptr, opts) != BCME_OK)) { break; diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd.h index 7beb91e1cc90..a10a80fb8cbc 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd.h @@ -4,7 +4,7 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,7 +27,7 @@ * * <> * - * $Id: dhd.h 610267 2016-01-06 16:03:53Z $ + * $Id: dhd.h 711448 2017-07-18 08:27:03Z $ */ /**************** @@ -51,10 +51,12 @@ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK) #include #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */ -#include /* The kernel threading is sdio-specific */ struct task_struct; struct sched_param; +#if defined(BT_OVER_SDIO) +#include +#endif /* defined (BT_OVER_SDIO) */ int setScheduler(struct task_struct *p, int policy, struct sched_param *param); int get_scheduler_policy(struct task_struct *p); #define MAX_EVENT 16 @@ -63,6 +65,10 @@ int get_scheduler_policy(struct task_struct *p); #include #include +#include +#if defined(DUMP_IOCTL_IOV_LIST) || defined(DHD_DEBUG) +#include +#endif /* DUMP_IOCTL_IOV_LIST || DHD_DEBUG */ #include #if defined(BCMWDF) @@ -74,15 +80,10 @@ int get_scheduler_policy(struct task_struct *p); #define MAX_RESCHED_CNT 600 #endif /* DEBUG_DPC_THREAD_WATCHDOG */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) && LINUX_VERSION_CODE < \ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE < \ KERNEL_VERSION(3, 18, 0) || defined(CONFIG_BCMDHD_VENDOR_EXT)) #define WL_VENDOR_EXT_SUPPORT -#endif /* 3.13.0 <= LINUX_KERNEL_VERSION < 3.18.0 || CONFIG_BCMDHD_VENDOR_EXT */ -#if defined(CONFIG_ANDROID) && defined(WL_VENDOR_EXT_SUPPORT) -#if !defined(GSCAN_SUPPORT) -#define GSCAN_SUPPORT -#endif -#endif /* CONFIG_ANDROID && WL_VENDOR_EXT_SUPPORT */ +#endif /* 3.18 > KERNEL_VER >= 3.14 || defined(CONFIG_BCMDHD_VENDOR_EXT) */ #if defined(KEEP_ALIVE) /* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ @@ -95,6 +96,8 @@ struct dhd_bus; struct dhd_prot; struct dhd_info; struct dhd_ioctl; +struct dhd_dbg; +struct dhd_ts; /* The level of bus communication with the dongle */ enum dhd_bus_state { @@ -116,21 +119,124 @@ enum dhd_bus_state { #define DHD_BUS_BUSY_IN_WD 0x08 #define DHD_BUS_BUSY_IN_IOVAR 0x10 #define DHD_BUS_BUSY_IN_DHD_IOVAR 0x20 -#define DHD_BUS_BUSY_IN_SUSPEND 0x40 -#define DHD_BUS_BUSY_IN_RESUME 0x80 +#define DHD_BUS_BUSY_SUSPEND_IN_PROGRESS 0x40 +#define DHD_BUS_BUSY_RESUME_IN_PROGRESS 0x80 #define DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS 0x100 #define DHD_BUS_BUSY_RPM_SUSPEND_DONE 0x200 #define DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS 0x400 #define DHD_BUS_BUSY_RPM_ALL (DHD_BUS_BUSY_RPM_SUSPEND_DONE | \ DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS | \ DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) +#define DHD_BUS_BUSY_IN_CHECKDIED 0x800 + +#define DHD_BUS_BUSY_SET_IN_TX(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX +#define DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT +#define DHD_BUS_BUSY_SET_IN_DPC(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC +#define DHD_BUS_BUSY_SET_IN_WD(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD +#define DHD_BUS_BUSY_SET_IN_IOVAR(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_IOVAR +#define DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DHD_IOVAR +#define DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_SUSPEND_IN_PROGRESS +#define DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RESUME_IN_PROGRESS +#define DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS +#define DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_DONE +#define DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS +#define DHD_BUS_BUSY_SET_IN_CHECKDIED(dhdp) \ + (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_CHECKDIED + +#define DHD_BUS_BUSY_CLEAR_IN_TX(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX +#define DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT +#define DHD_BUS_BUSY_CLEAR_IN_DPC(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC +#define DHD_BUS_BUSY_CLEAR_IN_WD(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD +#define DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_IOVAR +#define DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DHD_IOVAR +#define DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_SUSPEND_IN_PROGRESS +#define DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RESUME_IN_PROGRESS +#define DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS +#define DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_DONE +#define DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS +#define DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(dhdp) \ + (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_CHECKDIED + +#define DHD_BUS_BUSY_CHECK_IN_TX(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX) +#define DHD_BUS_BUSY_CHECK_IN_SEND_PKT(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SEND_PKT) +#define DHD_BUS_BUSY_CHECK_IN_DPC(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DPC) +#define DHD_BUS_BUSY_CHECK_IN_WD(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_WD) +#define DHD_BUS_BUSY_CHECK_IN_IOVAR(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_IOVAR) +#define DHD_BUS_BUSY_CHECK_IN_DHD_IOVAR(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DHD_IOVAR) +#define DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_SUSPEND_IN_PROGRESS) +#define DHD_BUS_BUSY_CHECK_RESUME_IN_PROGRESS(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RESUME_IN_PROGRESS) +#define DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS) +#define DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) +#define DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) +#define DHD_BUS_BUSY_CHECK_RPM_ALL(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL) +#define DHD_BUS_BUSY_CHECK_IN_CHECKDIED(dhdp) \ + ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_CHECKDIED) +#define DHD_BUS_BUSY_CHECK_IDLE(dhdp) \ + ((dhdp)->dhd_bus_busy_state == 0) + +#define DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp) \ + ((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) || \ + DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp)) + +#define DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp) \ + (DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) || \ + DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp)) + +#define DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) \ + ((dhdp)->busstate == DHD_BUS_DOWN || (dhdp)->busstate == DHD_BUS_DOWN_IN_PROGRESS) + +/* Macro to print Ethernet Address as String + * expects both arguements as (char *) + */ +#define DHD_MAC_TO_STR(mac, str) (snprintf(str, ETHER_ADDR_STR_LEN, \ + "%02x:%02x:%02x:%02x:%02x:%02x\n", \ + (uchar)mac[0]&0xff, \ + (uchar)mac[1]&0xff, \ + (uchar)mac[2]&0xff, \ + (uchar)mac[3]&0xff, \ + (uchar)mac[4]&0xff, \ + (uchar)mac[5]&0xff)) + /* Download Types */ typedef enum download_type { FW, NVRAM, - CLM_BLOB, - CLMINFO + CLM_BLOB } download_type_t; @@ -138,6 +244,7 @@ typedef enum download_type { #define DHD_MAX_IFS 16 #define DHD_DEL_IF -0xE #define DHD_BAD_IF -0xF +#define DHD_EVENT_IF 0xFFFF /* Hack i/f to handle events from INFO Ring */ enum dhd_op_flags { /* Firmware requested operation mode */ @@ -146,6 +253,8 @@ enum dhd_op_flags { DHD_FLAG_P2P_MODE = (1 << (2)), /* P2P Only */ /* STA + P2P */ DHD_FLAG_CONCURR_SINGLE_CHAN_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_P2P_MODE), + /* STA + SoftAP */ + DHD_FLAG_CONCURR_STA_HOSTAP_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_HOSTAP_MODE), DHD_FLAG_CONCURR_MULTI_CHAN_MODE = (1 << (4)), /* STA + P2P */ /* Current P2P mode for P2P connection */ DHD_FLAG_P2P_GC_MODE = (1 << (5)), @@ -171,6 +280,8 @@ enum dhd_op_flags { #define DHD_SCAN_ASSOC_ACTIVE_TIME 40 /* ms: Embedded default Active setting from DHD */ #define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */ #define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */ +#define DHD_SCAN_HOME_TIME 45 /* ms: Embedded default Home time setting from DHD */ +#define DHD_SCAN_HOME_AWAY_TIME 100 /* ms: Embedded default Home Away time setting from DHD */ #ifndef POWERUP_MAX_RETRY #define POWERUP_MAX_RETRY 1 /* how many times we retry to power up the chip */ @@ -178,8 +289,12 @@ enum dhd_op_flags { #ifndef POWERUP_WAIT_MS #define POWERUP_WAIT_MS 2000 /* ms: time out in waiting wifi to come up */ #endif +/* + * MAX_NVRAMBUF_SIZE determines the size of the Buffer in the DHD that holds + * the NVRAM data. That is the size of the buffer pointed by bus->vars + * This also needs to be increased to 16K to support NVRAM size higher than 8K + */ #define MAX_NVRAMBUF_SIZE (16 * 1024) /* max nvram buf size */ -#define MAX_CLMINFO_BUF_SIZE (4 * 1024) /* max clminfo buf size */ #define MAX_CLM_BUF_SIZE (48 * 1024) /* max clm blob size */ #ifdef DHD_DEBUG #define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */ @@ -193,6 +308,8 @@ enum dhd_op_flags { #define FW_VER_STR_LEN 128 #define CLM_VER_STR_LEN 128 +#define BUS_API_REV_STR_LEN 128 +extern char bus_api_revision[]; enum dhd_bus_wake_state { WAKE_LOCK_OFF, @@ -210,6 +327,21 @@ enum dhd_bus_wake_state { WAKE_LOCK_SOFTAP_THREAD }; +#ifdef PCIE_INB_DW +enum dhd_bus_ds_state { + DW_DEVICE_DS_INVALID = -1, + DW_DEVICE_DS_DEV_SLEEP = 0, + DW_DEVICE_DS_DEV_SLEEP_PEND, + DW_DEVICE_DS_DISABLED_WAIT, + DW_DEVICE_DS_DEV_WAKE, + DW_DEVICE_DS_ACTIVE, + DW_DEVICE_HOST_SLEEP_WAIT, + DW_DEVICE_HOST_SLEEP, + DW_DEVICE_HOST_WAKE_WAIT, + DW_DEVICE_DS_D3_INFORM_WAIT +}; +#endif /* PCIE_INB_DW */ + enum dhd_prealloc_index { DHD_PREALLOC_PROT = 0, DHD_PREALLOC_RXBUF, @@ -221,12 +353,20 @@ enum dhd_prealloc_index { DHD_PREALLOC_DHD_INFO = 7, DHD_PREALLOC_DHD_WLFC_INFO = 8, DHD_PREALLOC_IF_FLOW_LKUP = 9, - DHD_PREALLOC_MEMDUMP_BUF = 10, + /* 10 */ DHD_PREALLOC_MEMDUMP_RAM = 11, DHD_PREALLOC_DHD_WLFC_HANGER = 12, DHD_PREALLOC_PKTID_MAP = 13, DHD_PREALLOC_PKTID_MAP_IOCTL = 14, - DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15 + DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15, + DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16, + DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17, + DHD_PREALLOC_STAT_REPORT_BUF = 18, + DHD_PREALLOC_WL_ESCAN_INFO = 19, + DHD_PREALLOC_FW_VERBOSE_RING = 20, + DHD_PREALLOC_FW_EVENT_RING = 21, + DHD_PREALLOC_DHD_EVENT_RING = 22, + DHD_PREALLOC_NAN_EVENT_RING = 23 }; enum dhd_dongledump_mode { @@ -243,11 +383,25 @@ enum dhd_dongledump_type { DUMP_TYPE_DONGLE_TRAP, DUMP_TYPE_MEMORY_CORRUPTION, DUMP_TYPE_PKTID_AUDIT_FAILURE, + DUMP_TYPE_PKTID_INVALID, DUMP_TYPE_SCAN_TIMEOUT, + DUMP_TYPE_JOIN_TIMEOUT, DUMP_TYPE_SCAN_BUSY, DUMP_TYPE_BY_SYSDUMP, DUMP_TYPE_BY_LIVELOCK, - DUMP_TYPE_AP_LINKUP_FAILURE + DUMP_TYPE_AP_LINKUP_FAILURE, + DUMP_TYPE_AP_ABNORMAL_ACCESS, + DUMP_TYPE_CFG_VENDOR_TRIGGERED, + DUMP_TYPE_RESUMED_ON_TIMEOUT_TX, + DUMP_TYPE_RESUMED_ON_TIMEOUT_RX, + DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR, + DUMP_TYPE_DONGLE_HOST_EVENT, + DUMP_TYPE_RESUMED_UNKNOWN, + DUMP_TYPE_TRANS_ID_MISMATCH, + DUMP_TYPE_HANG_ON_IFACE_OP_FAIL, +#ifdef SUPPORT_LINKDOWN_RECOVERY + DUMP_TYPE_READ_SHM_FAIL +#endif /* SUPPORT_LINKDOWN_RECOVERY */ }; enum dhd_hang_reason { @@ -256,12 +410,15 @@ enum dhd_hang_reason { HANG_REASON_DONGLE_TRAP = 0x8002, HANG_REASON_D3_ACK_TIMEOUT = 0x8003, HANG_REASON_BUS_DOWN = 0x8004, - HANG_REASON_PCIE_LINK_DOWN = 0x8005, HANG_REASON_MSGBUF_LIVELOCK = 0x8006, - HANG_REASON_P2P_IFACE_DEL_FAILURE = 0x8007, + HANG_REASON_IFACE_OP_FAILURE = 0x8007, HANG_REASON_HT_AVAIL_ERROR = 0x8008, HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009, - HANG_REASON_MAX = 0x800a + HANG_REASON_PCIE_PKTID_ERROR = 0x800A, + HANG_REASON_PCIE_LINK_DOWN = 0x8805, + HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806, + HANG_REASON_UNKNOWN = 0x8807, + HANG_REASON_MAX = 0x8808 }; enum dhd_rsdb_scan_features { @@ -322,8 +479,55 @@ enum { TCPACK_SUP_HOLD, TCPACK_SUP_LAST_MODE }; + +#ifdef BCMSDIO +#define TCPACK_SUP_DEFAULT TCPACK_SUP_DELAYTX +#elif defined(BCMPCIE) +#define TCPACK_SUP_DEFAULT TCPACK_SUP_HOLD +#else +#define TCPACK_SUP_DEFAULT TCPACK_SUP_OFF +#endif /* BCMSDIO */ #endif /* DHDTCPACK_SUPPRESS */ +#if defined(TRAFFIC_MGMT_DWM) +#define DHD_DWM_TBL_SIZE 57 +/* DSCP WMM AC Mapping macros and structures */ +#define DHD_TRF_MGMT_DWM_FILTER_BIT 0x8 +#define DHD_TRF_MGMT_DWM_PRIO_BITS 0x7 +#define DHD_TRF_MGMT_DWM_FAVORED_BIT 0x10 +#define DHD_TRF_MGMT_DWM_PRIO(dwm_tbl_entry) ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_PRIO_BITS) +#define DHD_TRF_MGMT_DWM_IS_FAVORED_SET(dwm_tbl_entry) \ + ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_FAVORED_BIT) +#define DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry) \ + ((dwm_tbl_entry) |= DHD_TRF_MGMT_DWM_FAVORED_BIT) +#define DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_tbl_entry) \ + ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_FILTER_BIT) +#define DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry) \ + ((dwm_tbl_entry) |= DHD_TRF_MGMT_DWM_FILTER_BIT) + +typedef struct { + uint8 dhd_dwm_enabled; + uint8 dhd_dwm_tbl[DHD_DWM_TBL_SIZE]; +} dhd_trf_mgmt_dwm_tbl_t; +#endif + +#define DHD_NULL_CHK_AND_RET(cond) \ + if (!cond) { \ + DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \ + return; \ + } + +#define DHD_NULL_CHK_AND_RET_VAL(cond, value) \ + if (!cond) { \ + DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \ + return value; \ + } + +#define DHD_NULL_CHK_AND_GOTO(cond, label) \ + if (!cond) { \ + DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \ + goto label; \ + } /* * Accumulating the queue lengths of all flowring queues in a parent object, @@ -357,15 +561,6 @@ typedef uint32 cumm_ctr_t; #define DHD_CUMM_CTR_DECR(clen) \ ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); -/* DMA'ing r/w indices for rings supported */ -#ifdef BCM_INDX_TCM /* FW gets r/w indices in TCM */ -#define DMA_INDX_ENAB(dma_indxsup) 0 -#elif defined BCM_INDX_DMA /* FW gets r/w indices from Host memory */ -#define DMA_INDX_ENAB(dma_indxsup) 1 -#else /* r/w indices in TCM or host memory based on FW/Host agreement */ -#define DMA_INDX_ENAB(dma_indxsup) dma_indxsup -#endif /* BCM_INDX_TCM */ - #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) struct tdls_peer_node { uint8 addr[ETHER_ADDR_LEN]; @@ -383,6 +578,7 @@ typedef struct { struct dhd_log_dump_buf { spinlock_t lock; + unsigned int enable; unsigned int wraparound; unsigned long max; unsigned int remain; @@ -391,13 +587,72 @@ struct dhd_log_dump_buf char* buffer; }; -#define DHD_LOG_DUMP_BUFFER_SIZE (1024 * 1024) -#define DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE 256 - -extern void dhd_log_dump_print(const char *fmt, ...); +#define DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE 256 +extern void dhd_log_dump_write(int type, const char *fmt, ...); extern char *dhd_log_dump_get_timestamp(void); #endif /* DHD_LOG_DUMP */ -#define DHD_COMMON_DUMP_PATH "/data/misc/wifi/log/" + +#if defined(CUSTOMER_HW2) +#define DHD_COMMON_DUMP_PATH "/data/misc/wifi/" +#else +#define DHD_COMMON_DUMP_PATH "/installmedia/" +#endif + +struct cntry_locales_custom { + char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */ + char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */ + int32 custom_locale_rev; /* Custom local revisin default -1 */ +}; + +#ifdef REPORT_FATAL_TIMEOUTS +typedef struct timeout_info { + void *scan_timer_lock; + void *join_timer_lock; + void *cmd_timer_lock; + void *bus_timer_lock; + uint32 scan_timeout_val; + uint32 join_timeout_val; + uint32 cmd_timeout_val; + uint32 bus_timeout_val; + bool scan_timer_active; + bool join_timer_active; + bool cmd_timer_active; + bool bus_timer_active; + osl_timer_t *scan_timer; + osl_timer_t *join_timer; + osl_timer_t *cmd_timer; + osl_timer_t *bus_timer; + uint16 cmd_request_id; + uint32 cmd; + uint32 cmd_join_error; +} timeout_info_t; +#endif /* REPORT_FATAL_TIMEOUTS */ + +#ifdef HOFFLOAD_MODULES +/* Metadata structure containing module information */ +struct module_metadata { + void *data; /* module data */ + uint32_t size; /* module size */ + u64 data_addr; /* address of module data in host */ +}; +#endif + +#ifdef DMAMAP_STATS +typedef struct dmamap_stats { + uint64 txdata; + uint64 txdata_sz; + uint64 rxdata; + uint64 rxdata_sz; + uint64 ioctl_rx; + uint64 ioctl_rx_sz; + uint64 event_rx; + uint64 event_rx_sz; + uint64 info_rx; + uint64 info_rx_sz; + uint64 tsbuf_rx; + uint64 tsbuf_rx_sz; +} dma_stats_t; +#endif /* DMAMAP_STATS */ /* Common structure for module and instance linkage */ typedef struct dhd_pub { @@ -406,6 +661,7 @@ typedef struct dhd_pub { struct dhd_bus *bus; /* Bus module handle */ struct dhd_prot *prot; /* Protocol module handle */ struct dhd_info *info; /* Info module handle */ + struct dhd_dbg *dbg; /* Debugability module handle */ /* to NDIS developer, the structure dhd_common is redundant, * please do NOT merge it back from other branches !!! @@ -414,6 +670,9 @@ typedef struct dhd_pub { /* Internal dhd items */ bool up; /* Driver up/down (to OS) */ +#ifdef WL_CFG80211 + spinlock_t up_lock; /* Synchronization with CFG80211 down */ +#endif /* WL_CFG80211 */ bool txoff; /* Transmit flow-controlled */ bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */ enum dhd_bus_state busstate; @@ -450,6 +709,11 @@ typedef struct dhd_pub { ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */ ulong fc_packets; /* Number of flow control pkts recvd */ +#ifdef DMAMAP_STATS + /* DMA Mapping statistics */ + dma_stats_t dma_stats; +#endif /* DMAMAP_STATS */ + /* Last error return */ int bcmerror; uint tickcnt; @@ -481,8 +745,11 @@ typedef struct dhd_pub { wl_country_t dhd_cspec; /* Current Locale info */ #ifdef CUSTOM_COUNTRY_CODE - u32 dhd_cflags; + uint dhd_cflags; #endif /* CUSTOM_COUNTRY_CODE */ +#if defined(DHD_BLOB_EXISTENCE_CHECK) + bool is_blob; /* Checking for existance of Blob file */ +#endif /* DHD_BLOB_EXISTENCE_CHECK */ bool force_country_change; char eventmask[WL_EVENTING_MASK_LEN]; int op_mode; /* STA, HostAPD, WFD, SoftAP */ @@ -517,11 +784,14 @@ typedef struct dhd_pub { bool proptxstatus_txstatus_ignore; bool wlfc_rxpkt_chk; +#ifdef LIMIT_BORROW + bool wlfc_borrow_allowed; +#endif /* LIMIT_BORROW */ /* * implement below functions in each platform if needed. */ /* platform specific function whether to skip flow control */ - bool (*skip_fc)(void); + bool (*skip_fc)(void * dhdp, uint8 ifx); /* platform specific function for wlfc_enable and wlfc_deinit */ void (*plat_init)(void *dhd); void (*plat_deinit)(void *dhd); @@ -538,7 +808,15 @@ typedef struct dhd_pub { void *rtt_state; #endif bool dongle_isolation; + bool is_pcie_watchdog_reset; bool dongle_trap_occured; /* flag for sending HANG event to upper layer */ + bool iovar_timeout_occured; /* flag to indicate iovar resumed on timeout */ +#ifdef PCIE_FULL_DONGLE + bool d3ack_timeout_occured; /* flag to indicate d3ack resumed on timeout */ +#endif /* PCIE_FULL_DONGLE */ +#ifdef BT_OVER_SDIO + bool is_bt_recovery_required; +#endif int hang_was_sent; int rxcnt_timeout; /* counter rxcnt timeout to send HANG */ int txcnt_timeout; /* counter txcnt timeout to send HANG */ @@ -547,6 +825,12 @@ typedef struct dhd_pub { #endif /* BCMPCIE */ bool hang_report; /* enable hang report by default */ uint16 hang_reason; /* reason codes for HANG event */ +#if defined(DHD_HANG_SEND_UP_TEST) + uint req_hang_type; +#endif /* DHD_HANG_SEND_UP_TEST */ +#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG) + uint hang_counts; +#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */ #ifdef WLMEDIA_HTSF uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */ #endif @@ -591,12 +875,21 @@ typedef struct dhd_pub { void *flowring_list_lock; /* per os lock for flowring list protection */ uint32 num_flow_rings; cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */ + cumm_ctr_t l2cumm_ctr; /* level 2 cumm queue length placeholder */ uint32 d2h_sync_mode; /* D2H DMA completion sync mode */ uint8 flow_prio_map[NUMPRIO]; uint8 flow_prio_map_type; char enable_log[MAX_EVENT]; bool dma_d2h_ring_upd_support; bool dma_h2d_ring_upd_support; + bool dma_ring_upd_overwrite; /* host overwrites support setting */ + + bool idma_enable; + uint idma_inited; + bool idma_retention_ds; /* Implicit DMA memory retention */ + + bool ifrm_enable; /* implicit frm enable */ + uint ifrm_inited; /* implicit frm init */ #ifdef DHD_WMF bool wmf_ucast_igmp; @@ -607,43 +900,156 @@ typedef struct dhd_pub { bool wmf_ucast_upnp; #endif #endif /* DHD_WMF */ +#if defined(TRAFFIC_MGMT_DWM) + dhd_trf_mgmt_dwm_tbl_t dhd_tm_dwm_tbl; +#endif #ifdef DHD_L2_FILTER unsigned long l2_filter_cnt; /* for L2_FILTER ARP table timeout */ #endif /* DHD_L2_FILTER */ +#ifdef DHD_SSSR_DUMP + bool sssr_inited; + sssr_reg_info_t sssr_reg_info; + uint8 *sssr_mempool; + uint *sssr_d11_before[MAX_NUM_D11CORES]; + uint *sssr_d11_after[MAX_NUM_D11CORES]; + bool sssr_d11_outofreset[MAX_NUM_D11CORES]; + uint *sssr_vasip_buf_before; + uint *sssr_vasip_buf_after; +#endif /* DHD_SSSR_DUMP */ uint8 *soc_ram; uint32 soc_ram_length; uint32 memdump_type; #ifdef DHD_FW_COREDUMP uint32 memdump_enabled; + bool memdump_success; #endif /* DHD_FW_COREDUMP */ #ifdef PCIE_FULL_DONGLE #ifdef WLTDLS tdls_peer_tbl_t peer_tbl; #endif /* WLTDLS */ + uint8 tx_in_progress; #endif /* PCIE_FULL_DONGLE */ +#ifdef DHD_ULP + void *dhd_ulp; +#endif #ifdef CACHE_FW_IMAGES char *cached_fw; int cached_fw_length; char *cached_nvram; int cached_nvram_length; + char *cached_clm; + int cached_clm_length; #endif #ifdef WLTDLS uint32 tdls_mode; #endif +#ifdef GSCAN_SUPPORT + bool lazy_roam_enable; +#endif +#if defined(PKT_FILTER_SUPPORT) && defined(APF) + bool apf_set; +#endif /* PKT_FILTER_SUPPORT && APF */ +#ifdef DHD_WET + void *wet_info; +#endif + bool h2d_phase_supported; + bool force_dongletrap_on_bad_h2d_phase; + uint32 dongle_trap_data; + bool cto_enable; /* enable PCIE CTO Prevention and recovery */ + uint32 cto_threshold; /* PCIE CTO timeout threshold */ + bool fw_download_done; + trap_t last_trap_info; /* trap info from the last trap */ + uint8 rand_mac_oui[DOT11_OUI_LEN]; #ifdef DHD_LOSSLESS_ROAMING uint8 dequeue_prec_map; + uint8 prio_8021x; +#endif +#ifdef REPORT_FATAL_TIMEOUTS + timeout_info_t *timeout_info; +#endif /* REPORT_FATAL_TIMEOUTS */ + /* timesync link */ + struct dhd_ts *ts; + bool d2h_hostrdy_supported; +#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING) + bool d11_tx_status; +#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */ + uint16 ndo_version; /* ND offload version supported */ +#ifdef NDO_CONFIG_SUPPORT + bool ndo_enable; /* ND offload feature enable */ + bool ndo_host_ip_overflow; /* # of host ip addr exceed FW capacity */ + uint32 ndo_max_host_ip; /* # of host ip addr supported by FW */ +#endif /* NDO_CONFIG_SUPPORT */ +#if defined(DHD_EFI) && defined(DHD_LOG_DUMP) + uint8 log_capture_enable; +#endif /* DHD_EFI && DHD_LOG_DUMP */ + bool max_dtim_enable; /* use MAX bcn_li_dtim value in suspend mode */ +#ifdef PCIE_OOB + bool d2h_no_oob_dw; +#endif /* PCIE_OOB */ +#ifdef PCIE_INB_DW + bool d2h_inband_dw; + enum dhd_bus_ds_state ds_state; +#endif /* PCIE_INB_DW */ +#ifdef CUSTOM_SET_ANTNPM + uint32 mimo_ant_set; +#endif /* CUSTOM_SET_ANTNPM */ +#ifdef CUSTOM_SET_OCLOFF + bool ocl_off; +#endif /* CUSTOM_SET_OCLOFF */ +#ifdef HOFFLOAD_MODULES + struct module_metadata hmem; +#endif + bool wbtext_support; +#ifdef DUMP_IOCTL_IOV_LIST + /* dump iovar list */ + dll_t dump_iovlist_head; + uint8 dump_iovlist_len; +#endif /* DUMP_IOCTL_IOV_LIST */ +#ifdef DHD_DEBUG +/* memwaste feature */ + dll_t mw_list_head; /* memwaste list head */ + uint32 mw_id; /* memwaste list unique id */ +#endif /* DHD_DEBUG */ +#ifdef WLTDLS + spinlock_t tdls_lock; +#endif /* WLTDLS */ +#ifdef WLADPS_SEAK_AP_WAR + uint32 disabled_adps; +#endif /* WLADPS_SEAK_AP_WAR */ + bool ext_trap_data_supported; + uint32 *extended_trap_data; +#ifdef DHD_PKT_LOGGING + struct dhd_pktlog *pktlog; +#endif /* DHD_PKT_LOGGING */ +#if defined(STAT_REPORT) + void *stat_report_info; #endif - struct mutex wl_up_lock; - bool is_fw_download_done; -#ifdef DHD_LOG_DUMP - struct dhd_log_dump_buf dld_buf; - unsigned int dld_enable; -#endif /* DHD_LOG_DUMP */ char *clm_path; /* module_param: path to clm vars file */ char *conf_path; /* module_param: path to config vars file */ struct dhd_conf *conf; /* Bus module handle */ } dhd_pub_t; +typedef struct { + uint rxwake; + uint rcwake; +#ifdef DHD_WAKE_RX_STATUS + uint rx_bcast; + uint rx_arp; + uint rx_mcast; + uint rx_multi_ipv6; + uint rx_icmpv6; + uint rx_icmpv6_ra; + uint rx_icmpv6_na; + uint rx_icmpv6_ns; + uint rx_multi_ipv4; + uint rx_multi_other; + uint rx_ucast; +#endif /* DHD_WAKE_RX_STATUS */ +#ifdef DHD_WAKE_EVENT_STATUS + uint rc_event[WLC_E_LAST]; +#endif /* DHD_WAKE_EVENT_STATUS */ +} wake_counts_t; + #if defined(PCIE_FULL_DONGLE) /* Packet Tag for PCIE Full Dongle DHD */ @@ -751,16 +1157,25 @@ WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_ #ifdef PNO_SUPPORT int dhd_pno_clean(dhd_pub_t *dhd); #endif /* PNO_SUPPORT */ + +#ifdef HOFFLOAD_MODULES +void dhd_linux_get_modfw_address(dhd_pub_t *dhd); +#endif + /* * Wake locks are an Android power management concept. They are used by applications and services * to request CPU resources. */ extern int dhd_os_wake_lock(dhd_pub_t *pub); extern int dhd_os_wake_unlock(dhd_pub_t *pub); -extern int dhd_event_wake_lock(dhd_pub_t *pub); -extern int dhd_event_wake_unlock(dhd_pub_t *pub); extern int dhd_os_wake_lock_waive(dhd_pub_t *pub); extern int dhd_os_wake_lock_restore(dhd_pub_t *pub); +extern void dhd_event_wake_lock(dhd_pub_t *pub); +extern void dhd_event_wake_unlock(dhd_pub_t *pub); +extern void dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_pm_wake_unlock(dhd_pub_t *pub); +extern void dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_txfl_wake_unlock(dhd_pub_t *pub); extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub); extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val); extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val); @@ -769,10 +1184,6 @@ extern int dhd_os_wd_wake_lock(dhd_pub_t *pub); extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub); extern void dhd_os_wake_lock_init(struct dhd_info *dhd); extern void dhd_os_wake_lock_destroy(struct dhd_info *dhd); -#ifdef BCMPCIE_OOB_HOST_WAKE -extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val); -extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub); -#endif /* BCMPCIE_OOB_HOST_WAKE */ #ifdef DHD_USE_SCAN_WAKELOCK extern void dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val); extern void dhd_os_scan_wake_unlock(dhd_pub_t *pub); @@ -799,55 +1210,126 @@ inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp) #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ } -#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub) -#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub) -#define DHD_EVENT_WAKE_LOCK(pub) dhd_event_wake_lock(pub) -#define DHD_EVENT_WAKE_UNLOCK(pub) dhd_event_wake_unlock(pub) -#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub) +#define PRINT_CALL_INFO(str) +#define PRINT_CALL_INFO_TIMEOUT(str, val) +#define DHD_OS_WAKE_LOCK(pub) \ + do { \ + PRINT_CALL_INFO("call wakelock"); \ + dhd_os_wake_lock(pub); \ + } while (0) +#define DHD_OS_WAKE_UNLOCK(pub) \ + do { \ + PRINT_CALL_INFO("call wake_unlock"); \ + dhd_os_wake_unlock(pub); \ + } while (0) +#define DHD_EVENT_WAKE_LOCK(pub) \ + do { \ + PRINT_CALL_INFO("call event_wake lock"); \ + dhd_event_wake_lock(pub); \ + } while (0) +#define DHD_EVENT_WAKE_UNLOCK(pub) \ + do { \ + PRINT_CALL_INFO("call event_wake unlock"); \ + dhd_event_wake_unlock(pub); \ + } while (0) +#define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val) \ + do { \ + PRINT_CALL_INFO("call pm_wake_timeout enable"); \ + dhd_pm_wake_lock_timeout(pub, val); \ + } while (0) +#define DHD_PM_WAKE_UNLOCK(pub) \ + do { \ + PRINT_CALL_INFO("call pm_wake unlock"); \ + dhd_pm_wake_unlock(pub); \ + } while (0) +#define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val) \ + do { \ + PRINT_CALL_INFO("call pm_wake_timeout enable"); \ + dhd_txfl_wake_lock_timeout(pub, val); \ + } while (0) +#define DHD_TXFL_WAKE_UNLOCK(pub) \ + do { \ + PRINT_CALL_INFO("call pm_wake unlock"); \ + dhd_txfl_wake_unlock(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) \ + do { \ + PRINT_CALL_INFO("call wake_lock_timeout"); \ + dhd_os_wake_lock_timeout(pub); \ + } while (0) #define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \ - dhd_os_wake_lock_rx_timeout_enable(pub, val) + do { \ + PRINT_CALL_INFO_TIMEOUT("call wake_lock_rx_timeout_enable", val); \ + dhd_os_wake_lock_rx_timeout_enable(pub, val); \ + } while (0) #define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \ - dhd_os_wake_lock_ctrl_timeout_enable(pub, val) + do { \ + PRINT_CALL_INFO_TIMEOUT("call wake_lock_ctrl_timeout_enable", val); \ + dhd_os_wake_lock_ctrl_timeout_enable(pub, val); \ + } while (0) #define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \ - dhd_os_wake_lock_ctrl_timeout_cancel(pub) -#define DHD_OS_WAKE_LOCK_WAIVE(pub) dhd_os_wake_lock_waive(pub) -#define DHD_OS_WAKE_LOCK_RESTORE(pub) dhd_os_wake_lock_restore(pub) -#define DHD_OS_WAKE_LOCK_INIT(dhd) dhd_os_wake_lock_init(dhd); -#define DHD_OS_WAKE_LOCK_DESTROY(dhd) dhd_os_wake_lock_destroy(dhd); + do { \ + PRINT_CALL_INFO("call wake_lock_ctrl_timeout_cancel"); \ + dhd_os_wake_lock_ctrl_timeout_cancel(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_WAIVE(pub) \ + do { \ + PRINT_CALL_INFO("call wake_lock_waive"); \ + dhd_os_wake_lock_waive(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_RESTORE(pub) \ + do { \ + PRINT_CALL_INFO("call wake_lock_restore"); \ + dhd_os_wake_lock_restore(pub); \ + } while (0) +#define DHD_OS_WAKE_LOCK_INIT(dhd) \ + do { \ + PRINT_CALL_INFO("call wake_lock_init"); \ + dhd_os_wake_lock_init(dhd); \ + } while (0) +#define DHD_OS_WAKE_LOCK_DESTROY(dhd) \ + do { \ + PRINT_CALL_INFO("call wake_lock_destroy"); \ + dhd_os_wake_lock_destroy(dhd); \ + } while (0) #define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub) #define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub) -#ifdef BCMPCIE_OOB_HOST_WAKE -#define OOB_WAKE_LOCK_TIMEOUT 500 -#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val) -#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub) -#endif /* BCMPCIE_OOB_HOST_WAKE */ #ifdef DHD_USE_SCAN_WAKELOCK #ifdef DHD_DEBUG_SCAN_WAKELOCK +#define PRINT_SCAN_CALL(str) printf("%s: %s %d\n", \ + str, __FUNCTION__, __LINE__) +#else +#define PRINT_SCAN_CALL(str) +#endif /* DHD_DEBUG_SCAN_WAKELOCK */ #define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) \ do { \ - printf("call wake_lock_scan: %s %d\n", \ - __FUNCTION__, __LINE__); \ + PRINT_SCAN_CALL("call wake_lock_scan"); \ dhd_os_scan_wake_lock_timeout(pub, val); \ } while (0) #define DHD_OS_SCAN_WAKE_UNLOCK(pub) \ do { \ - printf("call wake_unlock_scan: %s %d\n", \ - __FUNCTION__, __LINE__); \ + PRINT_SCAN_CALL("call wake_unlock_scan"); \ dhd_os_scan_wake_unlock(pub); \ } while (0) #else -#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_scan_wake_lock_timeout(pub, val) -#define DHD_OS_SCAN_WAKE_UNLOCK(pub) dhd_os_scan_wake_unlock(pub) -#endif /* DHD_DEBUG_SCAN_WAKELOCK */ -#else #define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) #define DHD_OS_SCAN_WAKE_UNLOCK(pub) #endif /* DHD_USE_SCAN_WAKELOCK */ + +#ifdef BCMPCIE_OOB_HOST_WAKE +#define OOB_WAKE_LOCK_TIMEOUT 500 +extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub); +#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val) +#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub) +#endif /* BCMPCIE_OOB_HOST_WAKE */ + #define DHD_PACKET_TIMEOUT_MS 500 #define DHD_EVENT_TIMEOUT_MS 1500 #define SCAN_WAKE_LOCK_TIMEOUT 10000 +#define MAX_TX_TIMEOUT 500 /* Enum for IOCTL recieved status */ typedef enum dhd_ioctl_recieved_status @@ -855,7 +1337,8 @@ typedef enum dhd_ioctl_recieved_status IOCTL_WAIT = 0, IOCTL_RETURN_ON_SUCCESS, IOCTL_RETURN_ON_TRAP, - IOCTL_RETURN_ON_BUS_STOP + IOCTL_RETURN_ON_BUS_STOP, + IOCTL_RETURN_ON_ERROR } dhd_ioctl_recieved_status_t; /* interface operations (register, remove) should be atomic, use this lock to prevent race @@ -882,7 +1365,10 @@ typedef enum dhd_attach_states DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40, DHD_ATTACH_STATE_CFG80211 = 0x80, DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100, - DHD_ATTACH_STATE_DONE = 0x200 + DHD_ATTACH_TIMESYNC_ATTACH_DONE = 0x200, + DHD_ATTACH_LOGTRACE_INIT = 0x400, + DHD_ATTACH_STATE_LB_ATTACH_DONE = 0x800, + DHD_ATTACH_STATE_DONE = 0x1000 } dhd_attach_states_t; /* Value -1 means we are unsuccessful in creating the kthread. */ @@ -920,18 +1406,21 @@ extern void dhd_store_conn_status(uint32 event, uint32 status, uint32 reason); extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec); -/* Receive frame for delivery to OS. Callee disposes of rxp. */ extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan); /* Return pointer to interface name */ extern char *dhd_ifname(dhd_pub_t *dhdp, int idx); +#ifdef DHD_UCODE_DOWNLOAD +/* Returns the ucode path */ +extern char *dhd_get_ucode_path(dhd_pub_t *dhdp); +#endif /* DHD_UCODE_DOWNLOAD */ + /* Request scheduling of the bus dpc */ extern void dhd_sched_dpc(dhd_pub_t *dhdp); /* Notify tx completion */ extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success); -extern void dhd_dpc_enable(dhd_pub_t *dhdp); #define WIFI_FEATURE_INFRA 0x0001 /* Basic infrastructure mode */ #define WIFI_FEATURE_INFRA_5G 0x0002 /* Support for 5 GHz Band */ @@ -950,14 +1439,43 @@ extern void dhd_dpc_enable(dhd_pub_t *dhdp); #define WIFI_FEATURE_EPR 0x4000 /* Enhanced power reporting */ #define WIFI_FEATURE_AP_STA 0x8000 /* Support for AP STA Concurrency */ #define WIFI_FEATURE_LINKSTAT 0x10000 /* Support for Linkstats */ +#define WIFI_FEATURE_LOGGER 0x20000 /* WiFi Logger */ +#define WIFI_FEATURE_HAL_EPNO 0x40000 /* WiFi PNO enhanced */ +#define WIFI_FEATURE_RSSI_MONITOR 0x80000 /* RSSI Monitor */ +#define WIFI_FEATURE_MKEEP_ALIVE 0x100000 /* WiFi mkeep_alive */ +#define WIFI_FEATURE_CONFIG_NDO 0x200000 /* ND offload configure */ +#define WIFI_FEATURE_TX_TRANSMIT_POWER 0x400000 /* Capture Tx transmit power levels */ +#define WIFI_FEATURE_INVALID 0xFFFFFFFF /* Invalid Feature */ #define MAX_FEATURE_SET_CONCURRRENT_GROUPS 3 extern int dhd_dev_get_feature_set(struct net_device *dev); -extern int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num); +extern int dhd_dev_get_feature_set_matrix(struct net_device *dev, int num); +extern int dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui); #ifdef CUSTOM_FORCE_NODFS_FLAG extern int dhd_dev_set_nodfs(struct net_device *dev, uint nodfs); #endif /* CUSTOM_FORCE_NODFS_FLAG */ + +#ifdef NDO_CONFIG_SUPPORT +#ifndef NDO_MAX_HOST_IP_ENTRIES +#define NDO_MAX_HOST_IP_ENTRIES 10 +#endif /* NDO_MAX_HOST_IP_ENTRIES */ +extern int dhd_dev_ndo_cfg(struct net_device *dev, u8 enable); +extern int dhd_dev_ndo_update_inet6addr(struct net_device * dev); +#endif /* NDO_CONFIG_SUPPORT */ +extern int dhd_set_rand_mac_oui(dhd_pub_t *dhd); +#ifdef GSCAN_SUPPORT +extern int dhd_dev_set_lazy_roam_cfg(struct net_device *dev, + wlc_roam_exp_params_t *roam_param); +extern int dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable); +extern int dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev, + wl_bssid_pref_cfg_t *bssid_pref, uint32 flush); +extern int dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist, + uint32 len, uint32 flush); +extern int dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *whitelist, + uint32 len, uint32 flush); +#endif /* GSCAN_SUPPORT */ + /* OS independent layer functions */ extern void dhd_os_dhdiovar_lock(dhd_pub_t *pub); extern void dhd_os_dhdiovar_unlock(dhd_pub_t *pub); @@ -969,7 +1487,20 @@ extern unsigned int dhd_os_get_ioctl_resp_timeout(void); extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec); extern void dhd_os_ioctl_resp_lock(dhd_pub_t * pub); extern void dhd_os_ioctl_resp_unlock(dhd_pub_t * pub); -extern int dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason); +#ifdef PCIE_FULL_DONGLE +extern void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason); +#else +static INLINE void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason) +{ printf("%s is NOT implemented for SDIO", __FUNCTION__); return; } +#endif +#ifdef SHOW_LOGTRACE +extern int dhd_os_read_file(void *file, char *buf, uint32 size); +extern int dhd_os_seek_file(void *file, int64 offset); +#endif /* SHOW_LOGTRACE */ + +extern void +dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr); +extern void wl_dhdpcie_dump_regs(void * context); #define DHD_OS_IOCTL_RESP_LOCK(x) #define DHD_OS_IOCTL_RESP_UNLOCK(x) @@ -977,6 +1508,11 @@ extern int dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t re extern int dhd_os_get_image_block(char * buf, int len, void * image); extern int dhd_os_get_image_size(void * image); +#if defined(BT_OVER_SDIO) +extern int dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image); +extern void dhdsdio_bus_usr_cnt_inc(dhd_pub_t *pub); +extern void dhdsdio_bus_usr_cnt_dec(dhd_pub_t *pub); +#endif /* (BT_OVER_SDIO) */ extern void * dhd_os_open_image(char * filename); extern void dhd_os_close_image(void * image); extern void dhd_os_wd_timer(void *bus, uint wdtick); @@ -990,6 +1526,7 @@ extern void dhd_os_sdunlock_txq(dhd_pub_t * pub); extern void dhd_os_sdlock_rxq(dhd_pub_t * pub); extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub); extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub); +extern void dhd_os_tracelog(const char *format, ...); #ifdef DHDTCPACK_SUPPRESS extern unsigned long dhd_os_tcpacklock(dhd_pub_t *pub); extern void dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags); @@ -998,9 +1535,9 @@ extern void dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags); extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr); extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff); extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf); -#ifdef CUSTOM_COUNTRY_CODE +#if defined(CUSTOM_COUNTRY_CODE) extern void get_customized_country_code(void *adapter, char *country_iso_code, -wl_country_t *cspec, u32 flags); + wl_country_t *cspec, u32 flags); #else extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec); #endif /* CUSTOM_COUNTRY_CODE */ @@ -1022,19 +1559,16 @@ extern void dhd_set_cpucore(dhd_pub_t *dhd, int set); extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); #endif /* KEEP_ALIVE */ -#ifdef SUPPORT_AP_POWERSAVE -extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable); -#endif - #if defined(DHD_FW_COREDUMP) void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size); #endif /* DHD_FW_COREDUMP */ +void dhd_schedule_sssr_dump(dhd_pub_t *dhdp); + #ifdef SUPPORT_AP_POWERSAVE extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable); #endif /* SUPPORT_AP_POWERSAVE */ - #ifdef PKT_FILTER_SUPPORT #define DHD_UNICAST_FILTER_NUM 0 #define DHD_BROADCAST_FILTER_NUM 1 @@ -1042,18 +1576,44 @@ extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable); #define DHD_MULTICAST6_FILTER_NUM 3 #define DHD_MDNS_FILTER_NUM 4 #define DHD_ARP_FILTER_NUM 5 -extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val); +#define DHD_BROADCAST_ARP_FILTER_NUM 6 +#define DHD_IP4BCAST_DROP_FILTER_NUM 7 +#define DISCARD_IPV4_MCAST "102 1 6 IP4_H:16 0xf0 0xe0" +#define DISCARD_IPV6_MCAST "103 1 6 IP6_H:24 0xff 0xff" +extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val); extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd); +extern int dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num); extern int net_os_enable_packet_filter(struct net_device *dev, int val); extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num); extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val); #endif /* PKT_FILTER_SUPPORT */ -extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); -extern bool dhd_support_sta_mode(dhd_pub_t *dhd); +#if defined(BCMPCIE) +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval); +#else +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); +#endif /* OEM_ANDROID && BCMPCIE */ + +extern bool dhd_support_sta_mode(dhd_pub_t *dhd); extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size); +#ifdef RSSI_MONITOR_SUPPORT +extern int dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start, + int8 max_rssi, int8 min_rssi); +#endif /* RSSI_MONITOR_SUPPORT */ + +#ifdef DHDTCPACK_SUPPRESS +extern int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable); +#endif /* DHDTCPACK_SUPPRESS */ + +#define DHD_RSSI_MONITOR_EVT_VERSION 1 +typedef struct { + uint8 version; + int8 cur_rssi; + struct ether_addr BSSID; +} dhd_rssi_monitor_evt_t; + typedef struct { uint32 limit; /* Expiration time (usec) */ uint32 increment; /* Current expiration increment (usec) */ @@ -1067,16 +1627,47 @@ typedef struct { char **fmts; char *raw_fmts; char *raw_sstr; + uint32 fmts_size; + uint32 raw_fmts_size; + uint32 raw_sstr_size; uint32 ramstart; uint32 rodata_start; uint32 rodata_end; char *rom_raw_sstr; + uint32 rom_raw_sstr_size; uint32 rom_ramstart; uint32 rom_rodata_start; uint32 rom_rodata_end; } dhd_event_log_t; #endif /* SHOW_LOGTRACE */ +#ifdef KEEP_ALIVE +extern int dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt, + uint16 ip_pkt_len, uint8* src_mac_addr, uint8* dst_mac_addr, uint32 period_msec); +extern int dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id); +#endif /* KEEP_ALIVE */ + +#if defined(PKT_FILTER_SUPPORT) && defined(APF) +/* + * As per Google's current implementation, there will be only one APF filter. + * Therefore, userspace doesn't bother about filter id and because of that + * DHD has to manage the filter id. + */ +#define PKT_FILTER_APF_ID 200 +#define DHD_APF_LOCK(ndev) dhd_apf_lock(ndev) +#define DHD_APF_UNLOCK(ndev) dhd_apf_unlock(ndev) + +extern void dhd_apf_lock(struct net_device *dev); +extern void dhd_apf_unlock(struct net_device *dev); +extern int dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version); +extern int dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len); +extern int dhd_dev_apf_add_filter(struct net_device *ndev, u8* program, + uint32 program_len); +extern int dhd_dev_apf_enable_filter(struct net_device *ndev); +extern int dhd_dev_apf_disable_filter(struct net_device *ndev); +extern int dhd_dev_apf_delete_filter(struct net_device *ndev); +#endif /* PKT_FILTER_SUPPORT && APF */ + extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec); extern int dhd_timeout_expired(dhd_timeout_t *tmo); @@ -1086,12 +1677,12 @@ extern struct net_device * dhd_idx2net(void *pub, int ifidx); extern int net_os_send_hang_message(struct net_device *dev); extern int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num); extern bool dhd_wowl_cap(void *bus); - -extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, - wl_event_msg_t *, void **data_ptr, void *); +extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, uint pktlen, + wl_event_msg_t *, void **data_ptr, void *); +extern int wl_process_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, uint pktlen, + wl_event_msg_t *, void **data_ptr, void *); extern void wl_event_to_host_order(wl_event_msg_t * evt); -extern int wl_host_event_get_data(void *pktdata, wl_event_msg_t *event, void **data_ptr); - +extern int wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu); extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len); extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifindex); @@ -1106,8 +1697,10 @@ extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifeven char *name, uint8 *mac); extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent, char *name, uint8 *mac); -extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, - uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name); +extern int dhd_event_ifchange(struct dhd_info *dhd, struct wl_event_data_if *ifevent, + char *name, uint8 *mac); +extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name, + uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name); extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock); extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name); extern void dhd_vif_del(struct dhd_info *dhd, int ifidx); @@ -1137,6 +1730,7 @@ extern uint dhd_bus_chip_id(dhd_pub_t *dhdp); extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp); extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp); #endif /* defined(BCMSDIO) || defined(BCMPCIE) */ +int dhd_bus_get_fw_mode(dhd_pub_t *dhdp); #if defined(KEEP_ALIVE) extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); @@ -1148,12 +1742,35 @@ extern void dhd_os_spin_lock_deinit(osl_t *osh, void *lock); extern unsigned long dhd_os_spin_lock(void *lock); void dhd_os_spin_unlock(void *lock, unsigned long flags); +#ifdef DHD_EFI +extern int dhd_os_ds_enter_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_ds_enter_wake(dhd_pub_t * pub); +#else +static INLINE int dhd_os_ds_enter_wait(dhd_pub_t * pub, uint * condition) +{ printf("%s is Not supported for this platform", __FUNCTION__); return 0; } +static INLINE int dhd_os_ds_enter_wake(dhd_pub_t * pub) +{ return 0; } +#endif /* DHD_EFI */ + +#ifdef PCIE_INB_DW +extern int dhd_os_ds_exit_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_ds_exit_wake(dhd_pub_t * pub); +#endif /* PCIE_INB_DW */ +extern int dhd_os_busbusy_wake(dhd_pub_t * pub); +extern int dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition); +extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition); +extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_d3ack_wake(dhd_pub_t * pub); + /* * Manage sta objects in an interface. Interface is identified by an ifindex and * sta(s) within an interfaces are managed using a MacAddress of the sta. */ struct dhd_sta; +extern bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac); +extern struct dhd_sta *dhd_find_sta(void *pub, int ifidx, void *ea); extern struct dhd_sta *dhd_findadd_sta(void *pub, int ifidx, void *ea); +extern void dhd_del_all_sta(void *pub, int ifidx); extern void dhd_del_sta(void *pub, int ifidx, void *ea); extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx); extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val); @@ -1161,13 +1778,18 @@ extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val); extern int dhd_set_dev_def(dhd_pub_t *dhdp, uint32 idx, int val); #endif extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx); -extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition); -extern int dhd_os_d3ack_wake(dhd_pub_t * pub); -extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition); -extern int dhd_os_busbusy_wake(dhd_pub_t * pub); +extern struct net_device *dhd_linux_get_primary_netdev(dhd_pub_t *dhdp); extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd); -extern int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set); +int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, + char *res_buf, uint res_len, int set); +extern int dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, + uint cmd_len, char **resptr, uint resp_len); + +#ifdef DHD_MCAST_REGEN +extern int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val); +#endif typedef enum cust_gpio_modes { WLAN_RESET_ON, WLAN_RESET_OFF, @@ -1175,6 +1797,11 @@ typedef enum cust_gpio_modes { WLAN_POWER_OFF } cust_gpio_modes_t; +typedef struct dmaxref_mem_map { + dhd_dma_buf_t *srcmem; + dhd_dma_buf_t *dstmem; +} dmaxref_mem_map_t; + extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag); extern int wl_iw_send_priv_event(struct net_device *dev, char *flag); /* @@ -1184,15 +1811,12 @@ extern int wl_iw_send_priv_event(struct net_device *dev, char *flag); /* Watchdog timer interval */ extern uint dhd_watchdog_ms; extern bool dhd_os_wd_timer_enabled(void *bus); - #ifdef DHD_PCIE_RUNTIMEPM extern uint dhd_runtimepm_ms; #endif /* DHD_PCIE_RUNTIMEPM */ -#if defined(DHD_DEBUG) /* Console output poll interval */ extern uint dhd_console_ms; -#endif /* defined(DHD_DEBUG) */ extern uint android_msg_level; extern uint config_msg_level; extern uint sd_msglevel; @@ -1248,6 +1872,13 @@ extern bool bcm_bprintf_bypass; /* Override to force tx queueing all the time */ extern uint dhd_force_tx_queueing; + +/* Default bcn_timeout value is 4 */ +#define DEFAULT_BCN_TIMEOUT_VALUE 4 +#ifndef CUSTOM_BCN_TIMEOUT_SETTING +#define CUSTOM_BCN_TIMEOUT_SETTING DEFAULT_BCN_TIMEOUT_VALUE +#endif + /* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ #define DEFAULT_KEEP_ALIVE_VALUE 55000 /* msec */ #ifndef CUSTOM_KEEP_ALIVE_SETTING @@ -1298,6 +1929,10 @@ extern uint dhd_force_tx_queueing; #define CUSTOM_SUSPEND_BCN_LI_DTIM DEFAULT_SUSPEND_BCN_LI_DTIM #endif +#ifndef BCN_TIMEOUT_IN_SUSPEND +#define BCN_TIMEOUT_IN_SUSPEND 6 /* bcn timeout value in suspend mode */ +#endif + #ifndef CUSTOM_RXF_PRIO_SETTING #define CUSTOM_RXF_PRIO_SETTING MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1) #endif @@ -1324,6 +1959,15 @@ extern uint dhd_force_tx_queueing; #define CUSTOM_ASSOC_RETRY_MAX DEFAULT_ASSOC_RETRY_MAX #endif /* DEFAULT_ASSOC_RETRY_MAX */ +#if defined(BCMSDIO) || defined(DISABLE_FRAMEBURST) +#define DEFAULT_FRAMEBURST_SET 0 +#else +#define DEFAULT_FRAMEBURST_SET 1 +#endif /* BCMSDIO */ + +#ifndef CUSTOM_FRAMEBURST_SET +#define CUSTOM_FRAMEBURST_SET DEFAULT_FRAMEBURST_SET +#endif /* CUSTOM_FRAMEBURST_SET */ #ifdef WLTDLS #ifndef CUSTOM_TDLS_IDLE_MODE_SETTING @@ -1337,15 +1981,25 @@ extern uint dhd_force_tx_queueing; #endif #endif /* WLTDLS */ -#define DEFAULT_BCN_TIMEOUT 8 +#if defined(VSDB) || defined(ROAM_ENABLE) +#define DEFAULT_BCN_TIMEOUT 8 +#else +#define DEFAULT_BCN_TIMEOUT 4 +#endif + #ifndef CUSTOM_BCN_TIMEOUT -#define CUSTOM_BCN_TIMEOUT DEFAULT_BCN_TIMEOUT +#define CUSTOM_BCN_TIMEOUT DEFAULT_BCN_TIMEOUT #endif #define MAX_DTIM_SKIP_BEACON_INTERVAL 100 /* max allowed associated AP beacon for DTIM skip */ #ifndef MAX_DTIM_ALLOWED_INTERVAL #define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */ #endif + +#ifndef MIN_DTIM_FOR_ROAM_THRES_EXTEND +#define MIN_DTIM_FOR_ROAM_THRES_EXTEND 600 /* minimum dtim interval to extend roam threshold */ +#endif + #define NO_DTIM_SKIP 1 #ifdef SDTEST /* Echo packet generator (SDIO), pkts/s */ @@ -1360,15 +2014,61 @@ extern uint dhd_pktgen_len; /* optionally set by a module_param_string() */ #define MOD_PARAM_PATHLEN 2048 #define MOD_PARAM_INFOLEN 512 +#define MOD_PARAM_SRLEN 64 #ifdef SOFTAP extern char fw_path2[MOD_PARAM_PATHLEN]; #endif +#ifdef DHD_LEGACY_FILE_PATH +#define PLATFORM_PATH "/data/" +#elif defined(PLATFORM_SLP) +#define PLATFORM_PATH "/opt/etc/" +#else +#define PLATFORM_PATH "/data/misc/conn/" +#endif /* DHD_LEGACY_FILE_PATH */ + /* Flag to indicate if we should download firmware on driver load */ extern uint dhd_download_fw_on_driverload; extern int allow_delay_fwdl; +extern int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost); +extern int dhd_write_file(const char *filepath, char *buf, int buf_len); +extern int dhd_read_file(const char *filepath, char *buf, int buf_len); +extern int dhd_write_file_and_check(const char *filepath, char *buf, int buf_len); + +#ifdef READ_MACADDR +extern int dhd_set_macaddr_from_file(dhd_pub_t *dhdp); +#else +static INLINE int dhd_set_macaddr_from_file(dhd_pub_t *dhdp) { return 0; } +#endif /* READ_MACADDR */ +#ifdef WRITE_MACADDR +extern int dhd_write_macaddr(struct ether_addr *mac); +#else +static INLINE int dhd_write_macaddr(struct ether_addr *mac) { return 0; } +#endif /* WRITE_MACADDR */ +static INLINE int dhd_check_module_cid(dhd_pub_t *dhdp) { return 0; } +#ifdef GET_MAC_FROM_OTP +extern int dhd_check_module_mac(dhd_pub_t *dhdp); +#else +static INLINE int dhd_check_module_mac(dhd_pub_t *dhdp) { return 0; } +#endif /* GET_MAC_FROM_OTP */ + +#if defined(READ_MACADDR) || defined(WRITE_MACADDR) || defined(GET_MAC_FROM_OTP) +#define DHD_USE_CISINFO +#endif + +#ifdef DHD_USE_CISINFO +int dhd_read_cis(dhd_pub_t *dhdp); +void dhd_clear_cis(dhd_pub_t *dhdp); +#else +static INLINE int dhd_read_cis(dhd_pub_t *dhdp) { return 0; } +static INLINE void dhd_clear_cis(dhd_pub_t *dhdp) { } +#endif /* DHD_USE_CISINFO */ + +#define IBSS_COALESCE_DEFAULT 1 +#define IBSS_INITIAL_SCAN_ALLOWED_DEFAULT 1 + extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar); extern void dhd_wait_event_wakeup(dhd_pub_t*dhd); @@ -1394,13 +2094,24 @@ void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx); int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac); int dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode); #ifdef PCIE_FULL_DONGLE -void dhd_tdls_update_peer_info(struct net_device *dev, bool connect_disconnect, uint8 *addr); +int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event); +int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event); +int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub); #endif /* PCIE_FULL_DONGLE */ #endif /* WLTDLS */ + /* Neighbor Discovery Offload Support */ extern int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable); int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx); int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx); + +/* Enhanced ND offload support */ +uint16 dhd_ndo_get_version(dhd_pub_t *dhdp); +int dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx); +int dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx); +int dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx); +int dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable); + /* ioctl processing for nl80211 */ int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf); @@ -1418,11 +2129,15 @@ int dhd_os_wlfc_unblock(dhd_pub_t *pub); extern const uint8 prio2fifo[]; #endif /* PROP_TXSTATUS */ +int dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size); +int dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size); +int dhd_common_socram_dump(dhd_pub_t *dhdp); + +int dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size); + uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail); void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size); -int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost); - #if defined(CONFIG_DHD_USE_STATIC_BUF) #define DHD_OS_PREALLOC(dhdpub, section, size) dhd_os_prealloc(dhdpub, section, size, FALSE) #define DHD_OS_PREFREE(dhdpub, addr, size) dhd_os_prefree(dhdpub, addr, size) @@ -1452,6 +2167,7 @@ extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val); #define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0) #define dhd_del_flowid(pub, ifidx, flowid) do {} while (0) +bool dhd_wet_chainable(dhd_pub_t *dhdp); extern unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub); extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags); @@ -1470,6 +2186,10 @@ extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags); #define DHD_GENERAL_UNLOCK(dhdp, flags) \ dhd_os_general_spin_unlock((dhdp), (flags)) +/* Enable DHD timer spin lock/unlock */ +#define DHD_TIMER_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_TIMER_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, (flags)) + /* Enable DHD flowring spin lock/unlock */ #define DHD_FLOWRING_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) #define DHD_FLOWRING_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) @@ -1482,6 +2202,27 @@ extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags); #define DHD_FLOWRING_LIST_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) #define DHD_FLOWRING_LIST_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) +#define DHD_SPIN_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_SPIN_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_BUS_INB_DW_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +/* Enable DHD TDLS peer list spin lock/unlock */ +#ifdef WLTDLS +#define DHD_TDLS_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_TDLS_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) +#endif /* WLTDLS */ + +#ifdef DBG_PKT_MON +/* Enable DHD PKT MON spin lock/unlock */ +#define DHD_PKT_MON_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_PKT_MON_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, (flags)) +#endif /* DBG_PKT_MON */ + +#define DHD_LINUX_GENERAL_LOCK(dhdp, flags) DHD_GENERAL_LOCK(dhdp, flags) +#define DHD_LINUX_GENERAL_UNLOCK(dhdp, flags) DHD_GENERAL_UNLOCK(dhdp, flags) + extern void dhd_dump_to_kernelog(dhd_pub_t *dhdp); @@ -1496,6 +2237,7 @@ extern int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx); extern int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val); #endif /* DHD_L2_FILTER */ + typedef struct wl_io_pport { dhd_pub_t *dhd_pub; uint ifidx; @@ -1505,13 +2247,14 @@ typedef struct wl_evt_pport { dhd_pub_t *dhd_pub; int *ifidx; void *pktdata; + uint data_len; void **data_ptr; void *raw_event; } wl_evt_pport_t; extern void *dhd_pub_shim(dhd_pub_t *dhd_pub); #ifdef DHD_FW_COREDUMP -void dhd_save_fwdump(dhd_pub_t *dhd_pub, void * buffer, uint32 length); +void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length); #endif /* DHD_FW_COREDUMP */ #if defined(SET_RPS_CPUS) @@ -1538,20 +2281,42 @@ int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t com void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length); -int dhd_download_clm_blob(dhd_pub_t *dhd, unsigned char *image, uint32 len); +int dhd_download_blob(dhd_pub_t *dhd, unsigned char *image, + uint32 len, char *iovar); int dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path); + +#ifdef SHOW_LOGTRACE +int dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size, + dhd_event_log_t *event_log); +int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, + uint32 *rodata_start, uint32 *rodata_end); +#ifdef PCIE_FULL_DONGLE +int dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf, + dhd_event_log_t *event_data); +#endif /* PCIE_FULL_DONGLE */ +#endif /* SHOW_LOGTRACE */ + #define dhd_is_device_removed(x) FALSE #define dhd_os_ind_firmware_stall(x) -#ifdef DHD_FW_COREDUMP +#if defined(DHD_FW_COREDUMP) extern void dhd_get_memdump_info(dhd_pub_t *dhd); -#endif /* DHD_FW_COREDUMP */ +#endif /* defined(DHD_FW_COREDUMP) */ #ifdef BCMASSERT_LOG extern void dhd_get_assert_info(dhd_pub_t *dhd); +#else +static INLINE void dhd_get_assert_info(dhd_pub_t *dhd) { } #endif /* BCMASSERT_LOG */ +#define DMAXFER_FREE(dhdp, dmap) dhd_schedule_dmaxfer_free(dhdp, dmap); +#if defined(PCIE_FULL_DONGLE) +extern void dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap); +void dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap); +#endif /* PCIE_FULL_DONGLE */ + +#define DHD_LB_STATS_NOOP do { /* noop */ } while (0) #if defined(DHD_LB_STATS) #include extern void dhd_lb_stats_init(dhd_pub_t *dhd); @@ -1561,30 +2326,31 @@ extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count); extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count); extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp); extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp); -#define DHD_LB_STATS_INIT(dhdp) dhd_lb_stats_init(dhdp) +#define DHD_LB_STATS_INIT(dhdp) dhd_lb_stats_init(dhdp) +#define DHD_LB_STATS_DEINIT(dhdp) dhd_lb_stats_deinit(dhdp) /* Reset is called from common layer so it takes dhd_pub_t as argument */ #define DHD_LB_STATS_RESET(dhdp) dhd_lb_stats_init(dhdp) -#define DHD_LB_STATS_CLR(x) (x) = 0U -#define DHD_LB_STATS_INCR(x) (x) = (x) + 1 -#define DHD_LB_STATS_ADD(x, c) (x) = (x) + (c) +#define DHD_LB_STATS_CLR(x) (x) = 0U +#define DHD_LB_STATS_INCR(x) (x) = (x) + 1 +#define DHD_LB_STATS_ADD(x, c) (x) = (x) + (c) #define DHD_LB_STATS_PERCPU_ARR_INCR(x) \ { \ int cpu = get_cpu(); put_cpu(); \ DHD_LB_STATS_INCR(x[cpu]); \ } -#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhdp, x) dhd_lb_stats_update_napi_histo(dhdp, x) -#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhdp, x) dhd_lb_stats_update_txc_histo(dhdp, x) -#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhdp, x) dhd_lb_stats_update_rxc_histo(dhdp, x) -#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_txc_percpu_cnt_incr(dhdp) -#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_rxc_percpu_cnt_incr(dhdp) +#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhdp, x) dhd_lb_stats_update_napi_histo(dhdp, x) +#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhdp, x) dhd_lb_stats_update_txc_histo(dhdp, x) +#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhdp, x) dhd_lb_stats_update_rxc_histo(dhdp, x) +#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_txc_percpu_cnt_incr(dhdp) +#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_rxc_percpu_cnt_incr(dhdp) #else /* !DHD_LB_STATS */ -#define DHD_LB_STATS_NOOP do { /* noop */ } while (0) -#define DHD_LB_STATS_INIT(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_INIT(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_DEINIT(dhdp) DHD_LB_STATS_NOOP #define DHD_LB_STATS_RESET(dhdp) DHD_LB_STATS_NOOP -#define DHD_LB_STATS_CLR(x) DHD_LB_STATS_NOOP -#define DHD_LB_STATS_INCR(x) DHD_LB_STATS_NOOP -#define DHD_LB_STATS_ADD(x, c) DHD_LB_STATS_NOOP -#define DHD_LB_STATS_PERCPU_ARR_INCR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_CLR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_INCR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_ADD(x, c) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_PERCPU_ARR_INCR(x) DHD_LB_STATS_NOOP #define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhd, x) DHD_LB_STATS_NOOP #define DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, x) DHD_LB_STATS_NOOP #define DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, x) DHD_LB_STATS_NOOP @@ -1592,6 +2358,68 @@ extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp); #define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP #endif /* !DHD_LB_STATS */ +#ifdef DHD_SSSR_DUMP +#define DHD_SSSR_MEMPOOL_SIZE (1024 * 1024) /* 1MB size */ +extern int dhd_sssr_mempool_init(dhd_pub_t *dhd); +extern void dhd_sssr_mempool_deinit(dhd_pub_t *dhd); +extern int dhd_sssr_dump_init(dhd_pub_t *dhd); +extern void dhd_sssr_dump_deinit(dhd_pub_t *dhd); +#define DHD_SSSR_MEMPOOL_INIT(dhdp) dhd_sssr_mempool_init(dhdp) +#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) dhd_sssr_mempool_deinit(dhdp) +#define DHD_SSSR_DUMP_INIT(dhdp) dhd_sssr_dump_init(dhdp) +#define DHD_SSSR_DUMP_DEINIT(dhdp) dhd_sssr_dump_deinit(dhdp) +#else +#define DHD_SSSR_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_DUMP_INIT(dhdp) do { /* noop */ } while (0) +#define DHD_SSSR_DUMP_DEINIT(dhdp) do { /* noop */ } while (0) +#endif /* DHD_SSSR_DUMP */ + +#ifdef SHOW_LOGTRACE +void dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *read_buf_info); +#endif /* SHOW_LOGTRACE */ + +#ifdef BCMPCIE +extern int dhd_prot_debug_info_print(dhd_pub_t *dhd); +#else +#define dhd_prot_debug_info_print(x) +#endif /* BCMPCIE */ + +extern bool dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info); + +bool dhd_fw_download_status(dhd_pub_t * dhd_pub); + +/* Bitmask used for Join Timeout */ +#define WLC_SSID_MASK 0x01 +#define WLC_WPA_MASK 0x02 + +extern int dhd_start_join_timer(dhd_pub_t *pub); +extern int dhd_stop_join_timer(dhd_pub_t *pub); +extern int dhd_start_scan_timer(dhd_pub_t *pub); +extern int dhd_stop_scan_timer(dhd_pub_t *pub); +extern int dhd_start_cmd_timer(dhd_pub_t *pub); +extern int dhd_stop_cmd_timer(dhd_pub_t *pub); +extern int dhd_start_bus_timer(dhd_pub_t *pub); +extern int dhd_stop_bus_timer(dhd_pub_t *pub); +extern uint16 dhd_get_request_id(dhd_pub_t *pub); +extern int dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd); +extern void dhd_set_join_error(dhd_pub_t *pub, uint32 mask); +extern void dhd_clear_join_error(dhd_pub_t *pub, uint32 mask); +extern void dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val); +extern void dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val); +extern void dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val); +extern void dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val); +extern void dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val); +extern void dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val); +extern void dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val); +extern void dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val); +extern int dhd_start_timesync_timer(dhd_pub_t *pub); +extern int dhd_stop_timesync_timer(dhd_pub_t *pub); + +#ifdef DHD_PKTID_AUDIT_ENABLED +void dhd_pktid_error_handler(dhd_pub_t *dhdp); +#endif /* DHD_PKTID_AUDIT_ENABLED */ + #ifdef DHD_PCIE_RUNTIMEPM extern bool dhd_runtimepm_state(dhd_pub_t *dhd); extern bool dhd_runtime_bus_wake(struct dhd_bus *bus, bool wait, void *func_addr); @@ -1616,8 +2444,6 @@ do { \ #define DHD_ENABLE_RUNTIME_PM(dhdp) #endif /* DHD_PCIE_RUNTIMEPM */ -extern void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs); - /* * Enable this macro if you want to track the calls to wake lock * This records can be printed using the following command @@ -1632,14 +2458,63 @@ extern void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs); void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp); #endif -extern int dhd_prot_debug_info_print(dhd_pub_t *dhd); +extern bool dhd_query_bus_erros(dhd_pub_t *dhdp); + +extern void init_dhd_timeouts(dhd_pub_t *pub); +extern void deinit_dhd_timeouts(dhd_pub_t *pub); + +typedef enum timeout_resons { + DHD_REASON_COMMAND_TO, + DHD_REASON_JOIN_TO, + DHD_REASON_SCAN_TO, + DHD_REASON_OQS_TO +} timeout_reasons_t; + +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) +extern int dhd_bus_set_device_wake(struct dhd_bus *bus, bool val); +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ + +#ifdef DHD_EFI +extern void dhd_schedule_reset(dhd_pub_t *dhdp); +#else +static INLINE void dhd_schedule_reset(dhd_pub_t *dhdp) {;} +#endif #ifdef ENABLE_TEMP_THROTTLING -#define TEMP_THROTTLE_CONTROL_BIT 0xf //Enable all feature. +#ifndef TEMP_THROTTLE_CONTROL_BIT +#define TEMP_THROTTLE_CONTROL_BIT 0xd +#endif #endif /* ENABLE_TEMP_THROTTLING */ -#ifdef DHD_PKTID_AUDIT_ENABLED -void dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp); -#endif /* DHD_PKTID_AUDIT_ENABLED */ +int dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size); +#ifdef REPORT_FATAL_TIMEOUTS +void dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason); +#endif +#if defined(CONFIG_64BIT) +#define DHD_SUPPORT_64BIT +#elif defined(DHD_EFI) +#define DHD_SUPPORT_64BIT +/* by default disabled for other platforms, can enable appropriate macro to enable 64 bit support */ +#endif /* (linux || LINUX) && CONFIG_64BIT */ + +#ifdef SET_PCIE_IRQ_CPU_CORE +extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set); +extern void set_irq_cpucore(unsigned int irq, int set); +#endif /* SET_PCIE_IRQ_CPU_CORE */ +#if defined(DHD_HANG_SEND_UP_TEST) +extern void dhd_make_hang_with_reason(struct net_device *dev, const char *string_num); +#endif /* DHD_HANG_SEND_UP_TEST */ + +#if defined(DHD_BLOB_EXISTENCE_CHECK) +extern void dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path); +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + +#ifdef DHD_WAKE_STATUS +wake_counts_t* dhd_get_wakecount(dhd_pub_t *dhdp); +#endif /* DHD_WAKE_STATUS */ + +#ifdef BCM_ASLR_HEAP +extern uint32 dhd_get_random_number(void); +#endif /* BCM_ASLR_HEAP */ #endif /* _dhd_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_bta.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_bta.c deleted file mode 100755 index dc24edbb5c30..000000000000 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_bta.c +++ /dev/null @@ -1,340 +0,0 @@ -/* - * BT-AMP support routines - * - * Copyright (C) 1999-2016, Broadcom Corporation - * - * Unless you and Broadcom execute a separate written software license - * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2 (the "GPL"), - * available at http://www.broadcom.com/licenses/GPLv2.php, with the - * following added to such license: - * - * As a special exception, the copyright holders of this software give you - * permission to link this software with independent modules, and to copy and - * distribute the resulting executable under terms of your choice, provided that - * you also meet, for each linked independent module, the terms and conditions of - * the license of that module. An independent module is a module which is not - * derived from this software. The special exception does not apply to any - * modifications of the software. - * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a license - * other than the GPL, without Broadcom's express prior written consent. - * - * - * <> - * - * $Id: dhd_bta.c 514727 2014-11-12 03:02:48Z $ - */ -#error "WLBTAMP is not defined" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - - -#ifdef SEND_HCI_CMD_VIA_IOCTL -#define BTA_HCI_CMD_MAX_LEN HCI_CMD_PREAMBLE_SIZE + HCI_CMD_DATA_SIZE - -/* Send HCI cmd via wl iovar HCI_cmd to the dongle. */ -int -dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len) -{ - amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf; - uint8 buf[BTA_HCI_CMD_MAX_LEN + 16]; - uint len = sizeof(buf); - wl_ioctl_t ioc; - - if (cmd_len < HCI_CMD_PREAMBLE_SIZE) - return BCME_BADLEN; - - if ((uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE > cmd_len) - return BCME_BADLEN; - - len = bcm_mkiovar("HCI_cmd", - (char *)cmd, (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE, (char *)buf, len); - - - memset(&ioc, 0, sizeof(ioc)); - - ioc.cmd = WLC_SET_VAR; - ioc.buf = buf; - ioc.len = len; - ioc.set = TRUE; - - return dhd_wl_ioctl(pub, &ioc, ioc.buf, ioc.len); -} -#else /* !SEND_HCI_CMD_VIA_IOCTL */ - -static void -dhd_bta_flush_hcidata(dhd_pub_t *pub, uint16 llh) -{ - int prec; - struct pktq *q; - uint count = 0; - - q = dhd_bus_txq(pub->bus); - if (q == NULL) - return; - - DHD_BTA(("dhd: flushing HCI ACL data for logical link %u...\n", llh)); - - dhd_os_sdlock_txq(pub); - - /* Walk through the txq and toss all HCI ACL data packets */ - PKTQ_PREC_ITER(q, prec) { - void *head_pkt = NULL; - - while (pktq_ppeek(q, prec) != head_pkt) { - void *pkt = pktq_pdeq(q, prec); - int ifidx; - - dhd_prot_hdrpull(pub, &ifidx, pkt, NULL, NULL); - - if (PKTLEN(pub->osh, pkt) >= RFC1042_HDR_LEN) { - struct ether_header *eh = - (struct ether_header *)PKTDATA(pub->osh, pkt); - - if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) { - struct dot11_llc_snap_header *lsh = - (struct dot11_llc_snap_header *)&eh[1]; - - if (bcmp(lsh, BT_SIG_SNAP_MPROT, - DOT11_LLC_SNAP_HDR_LEN - 2) == 0 && - ntoh16(lsh->type) == BTA_PROT_L2CAP) { - amp_hci_ACL_data_t *ACL_data = - (amp_hci_ACL_data_t *)&lsh[1]; - uint16 handle = ltoh16(ACL_data->handle); - - if (HCI_ACL_DATA_HANDLE(handle) == llh) { - PKTFREE(pub->osh, pkt, TRUE); - count ++; - continue; - } - } - } - } - - dhd_prot_hdrpush(pub, ifidx, pkt); - - if (head_pkt == NULL) - head_pkt = pkt; - pktq_penq(q, prec, pkt); - } - } - - dhd_os_sdunlock_txq(pub); - - DHD_BTA(("dhd: flushed %u packet(s) for logical link %u...\n", count, llh)); -} - -/* Handle HCI cmd locally. - * Return 0: continue to send the cmd across SDIO - * < 0: stop, fail - * > 0: stop, succuess - */ -static int -_dhd_bta_docmd(dhd_pub_t *pub, amp_hci_cmd_t *cmd) -{ - int status = 0; - - switch (ltoh16_ua((uint8 *)&cmd->opcode)) { - case HCI_Enhanced_Flush: { - eflush_cmd_parms_t *cmdparms = (eflush_cmd_parms_t *)cmd->parms; - dhd_bta_flush_hcidata(pub, ltoh16_ua(cmdparms->llh)); - break; - } - default: - break; - } - - return status; -} - -/* Send HCI cmd encapsulated in BT-SIG frame via data channel to the dongle. */ -int -dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len) -{ - amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf; - struct ether_header *eh; - struct dot11_llc_snap_header *lsh; - osl_t *osh = pub->osh; - uint len; - void *p; - int status; - - if (cmd_len < HCI_CMD_PREAMBLE_SIZE) { - DHD_ERROR(("dhd_bta_docmd: short command, cmd_len %u\n", cmd_len)); - return BCME_BADLEN; - } - - if ((len = (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE) > cmd_len) { - DHD_ERROR(("dhd_bta_docmd: malformed command, len %u cmd_len %u\n", - len, cmd_len)); - /* return BCME_BADLEN; */ - } - - p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE); - if (p == NULL) { - DHD_ERROR(("dhd_bta_docmd: out of memory\n")); - return BCME_NOMEM; - } - - - /* intercept and handle the HCI cmd locally */ - if ((status = _dhd_bta_docmd(pub, cmd)) > 0) - return 0; - else if (status < 0) - return status; - - /* copy in HCI cmd */ - PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN); - bcopy(cmd, PKTDATA(osh, p), len); - - /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */ - PKTPUSH(osh, p, RFC1042_HDR_LEN); - eh = (struct ether_header *)PKTDATA(osh, p); - bzero(eh->ether_dhost, ETHER_ADDR_LEN); - ETHER_SET_LOCALADDR(eh->ether_dhost); - bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN); - eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN); - lsh = (struct dot11_llc_snap_header *)&eh[1]; - bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2); - lsh->type = 0; - - return dhd_sendpkt(pub, 0, p); -} -#endif /* !SEND_HCI_CMD_VIA_IOCTL */ - -/* Send HCI ACL data to dongle via data channel */ -int -dhd_bta_tx_hcidata(dhd_pub_t *pub, void *data_buf, uint data_len) -{ - amp_hci_ACL_data_t *data = (amp_hci_ACL_data_t *)data_buf; - struct ether_header *eh; - struct dot11_llc_snap_header *lsh; - osl_t *osh = pub->osh; - uint len; - void *p; - - if (data_len < HCI_ACL_DATA_PREAMBLE_SIZE) { - DHD_ERROR(("dhd_bta_tx_hcidata: short data_buf, data_len %u\n", data_len)); - return BCME_BADLEN; - } - - if ((len = (uint)ltoh16(data->dlen) + HCI_ACL_DATA_PREAMBLE_SIZE) > data_len) { - DHD_ERROR(("dhd_bta_tx_hcidata: malformed hci data, len %u data_len %u\n", - len, data_len)); - /* return BCME_BADLEN; */ - } - - p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE); - if (p == NULL) { - DHD_ERROR(("dhd_bta_tx_hcidata: out of memory\n")); - return BCME_NOMEM; - } - - - /* copy in HCI ACL data header and HCI ACL data */ - PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN); - bcopy(data, PKTDATA(osh, p), len); - - /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */ - PKTPUSH(osh, p, RFC1042_HDR_LEN); - eh = (struct ether_header *)PKTDATA(osh, p); - bzero(eh->ether_dhost, ETHER_ADDR_LEN); - bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN); - eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN); - lsh = (struct dot11_llc_snap_header *)&eh[1]; - bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2); - lsh->type = HTON16(BTA_PROT_L2CAP); - - return dhd_sendpkt(pub, 0, p); -} - -/* txcomplete callback */ -void -dhd_bta_tx_hcidata_complete(dhd_pub_t *dhdp, void *txp, bool success) -{ - uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, txp); - amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)(pktdata + RFC1042_HDR_LEN); - uint16 handle = ltoh16(ACL_data->handle); - uint16 llh = HCI_ACL_DATA_HANDLE(handle); - - wl_event_msg_t event; - uint8 data[HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t)]; - amp_hci_event_t *evt; - num_completed_data_blocks_evt_parms_t *parms; - - uint16 len = HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t); - - /* update the event struct */ - memset(&event, 0, sizeof(event)); - event.version = hton16(BCM_EVENT_MSG_VERSION); - event.event_type = hton32(WLC_E_BTA_HCI_EVENT); - event.status = 0; - event.reason = 0; - event.auth_type = 0; - event.datalen = hton32(len); - event.flags = 0; - - /* generate Number of Completed Blocks event */ - evt = (amp_hci_event_t *)data; - evt->ecode = HCI_Number_of_Completed_Data_Blocks; - evt->plen = sizeof(num_completed_data_blocks_evt_parms_t); - - parms = (num_completed_data_blocks_evt_parms_t *)evt->parms; - htol16_ua_store(dhdp->maxdatablks, (uint8 *)&parms->num_blocks); - parms->num_handles = 1; - htol16_ua_store(llh, (uint8 *)&parms->completed[0].handle); - parms->completed[0].pkts = 1; - parms->completed[0].blocks = 1; - - dhd_sendup_event_common(dhdp, &event, data); -} - -/* event callback */ -void -dhd_bta_doevt(dhd_pub_t *dhdp, void *data_buf, uint data_len) -{ - amp_hci_event_t *evt = (amp_hci_event_t *)data_buf; - - ASSERT(dhdp); - ASSERT(evt); - - switch (evt->ecode) { - case HCI_Command_Complete: { - cmd_complete_parms_t *parms = (cmd_complete_parms_t *)evt->parms; - switch (ltoh16_ua((uint8 *)&parms->opcode)) { - case HCI_Read_Data_Block_Size: { - read_data_block_size_evt_parms_t *parms2 = - (read_data_block_size_evt_parms_t *)parms->parms; - dhdp->maxdatablks = ltoh16_ua((uint8 *)&parms2->data_block_num); - break; - } - } - break; - } - - case HCI_Flush_Occurred: { - flush_occurred_evt_parms_t *evt_parms = (flush_occurred_evt_parms_t *)evt->parms; - dhd_bta_flush_hcidata(dhdp, ltoh16_ua((uint8 *)&evt_parms->handle)); - break; - } - default: - break; - } -} diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_bus.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_bus.h index 3517d800cb0e..c785f1210997 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_bus.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_bus.h @@ -4,7 +4,7 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,7 +27,7 @@ * * <> * - * $Id: dhd_bus.h 602721 2015-11-27 10:32:48Z $ + * $Id: dhd_bus.h 698895 2017-05-11 02:55:17Z $ */ #ifndef _dhd_bus_h_ @@ -44,6 +44,9 @@ extern void dhd_bus_unregister(void); /* Download firmware image and nvram image */ extern int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, char *fw_path, char *nv_path, char *clm_path, char *conf_path); +#if defined(BT_OVER_SDIO) +extern int dhd_bus_download_btfw(struct dhd_bus *bus, osl_t *osh, char *btfw_path); +#endif /* defined (BT_OVER_SDIO) */ /* Stop bus module: clear pending frames, disable data flow */ extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex); @@ -57,6 +60,11 @@ extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime); /* Set the Bus Idle Time */ extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time); +/* Size of Extended Trap data Buffer */ +#ifdef BCMPCIE +#define BCMPCIE_EXT_TRAP_DATA_MAXLEN 4096 +#endif + /* Send a data frame to the dongle. Callee disposes of txp. */ #ifdef BCMPCIE extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx); @@ -64,6 +72,7 @@ extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx); extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp); #endif +extern struct device * dhd_bus_to_dev(struct dhd_bus *bus); /* Send/receive a control message to/from the dongle. * Expects caller to enforce a single outstanding transaction. @@ -81,10 +90,8 @@ extern void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub); extern void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub); extern bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub); -#if defined(DHD_DEBUG) /* Device console input function */ extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen); -#endif /* defined(DHD_DEBUG) */ /* Deferred processing for the bus, return TRUE requests reschedule */ extern bool dhd_bus_dpc(struct dhd_bus *bus); @@ -112,7 +119,7 @@ extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_par extern void *dhd_bus_pub(struct dhd_bus *bus); extern void *dhd_bus_txq(struct dhd_bus *bus); -extern void *dhd_bus_sih(struct dhd_bus *bus); +extern const void *dhd_bus_sih(struct dhd_bus *bus); extern uint dhd_bus_hdrlen(struct dhd_bus *bus); #ifdef BCMSDIO extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val); @@ -133,6 +140,12 @@ extern void dhd_txglom_enable(dhd_pub_t *dhdp, bool enable); extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num); +#if defined(DHD_FW_COREDUMP) && (defined(BCMPCIE) || defined(BCMSDIO)) +extern int dhd_bus_mem_dump(dhd_pub_t *dhd); +#else +#define dhd_bus_mem_dump(x) +#endif /* DHD_FW_COREDUMP && (BCMPCIE || BCMSDIO) */ + #ifdef BCMPCIE enum { /* Scratch buffer confiuguration update */ @@ -151,6 +164,10 @@ enum { D2H_DMA_INDX_WR_UPD, /* update D2H WR index in D2H WR dma indices buf */ D2H_DMA_INDX_RD_UPD, /* update D2H RD index in D2H RD dma indices buf */ + /* DHD Indices array buffers and update for: H2D flow ring WR */ + H2D_IFRM_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */ + H2D_IFRM_INDX_WR_UPD, /* update H2D WR dma indices buf base addr to dongle */ + /* H2D and D2H Mailbox data update */ H2D_MB_DATA, D2H_MB_DATA, @@ -165,20 +182,27 @@ enum { RING_WR_UPD, /* update ring write index from/to dongle */ TOTAL_LFRAG_PACKET_CNT, - MAX_HOST_RXBUFS + MAX_HOST_RXBUFS, + HOST_API_VERSION, + DNGL_TO_HOST_TRAP_ADDR, +#ifdef HOFFLOAD_MODULES + WRT_HOST_MODULE_ADDR +#endif }; typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32); +typedef void (*dhd_mb_ring_2_t) (struct dhd_bus *, uint32, bool); extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type, uint16 ringid); extern void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value); +extern void dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake); extern void dhd_bus_cmn_readshared(struct dhd_bus *bus, void* data, uint8 type, uint16 ringid); extern uint32 dhd_bus_get_sharedflags(struct dhd_bus *bus); extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count); extern void dhd_bus_start_queue(struct dhd_bus *bus); extern void dhd_bus_stop_queue(struct dhd_bus *bus); - extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus); +extern dhd_mb_ring_2_t dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus); extern void dhd_bus_write_flow_ring_states(struct dhd_bus *bus, void * data, uint16 flowid); extern void dhd_bus_read_flow_ring_states(struct dhd_bus *bus, @@ -194,6 +218,10 @@ extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus); extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs); extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val); +#ifdef IDLE_TX_FLOW_MGMT +extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status); +#endif /* IDLE_TX_FLOW_MGMT */ + extern int dhdpcie_bus_clock_start(struct dhd_bus *bus); extern int dhdpcie_bus_clock_stop(struct dhd_bus *bus); @@ -204,11 +232,60 @@ extern void dhdpcie_bus_free_resource(struct dhd_bus *bus); extern bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus); extern int dhd_bus_release_dongle(struct dhd_bus *bus); extern int dhd_bus_request_irq(struct dhd_bus *bus); +extern int dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq); + +extern void dhdpcie_cto_init(struct dhd_bus *bus, bool enable); #ifdef DHD_FW_COREDUMP -extern int dhd_bus_mem_dump(dhd_pub_t *dhd); +extern struct dhd_bus *g_dhd_bus; +extern int dhd_dongle_mem_dump(void); #endif /* DHD_FW_COREDUMP */ +#ifdef IDLE_TX_FLOW_MGMT +extern void dhd_bus_idle_tx_ring_suspend(dhd_pub_t *dhd, uint16 flow_ring_id); +#endif /* IDLE_TX_FLOW_MGMT */ +extern void dhd_bus_handle_mb_data(struct dhd_bus *bus, uint32 d2h_mb_data); #endif /* BCMPCIE */ + +/* dump the device trap informtation */ +extern void dhd_bus_dump_trap_info(struct dhd_bus *bus, struct bcmstrbuf *b); + +/* Function to set default min res mask */ +extern bool dhd_bus_set_default_min_res_mask(struct dhd_bus *bus); + +/* Function to reset PMU registers */ +extern void dhd_bus_pmu_reg_reset(dhd_pub_t *dhdp); + +#ifdef DHD_ULP +extern void dhd_bus_ulp_disable_console(dhd_pub_t *dhdp); +extern void dhd_bus_ucode_download(struct dhd_bus *bus); +#endif /* DHD_ULP */ +extern int dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read); + +#ifdef BT_OVER_SDIO +/* + * SDIO layer clock control functions exposed to be called from other layers. + * This is required especially in the case where the BUS is shared between + * BT and SDIO and we have to control the clock. The callers of this function + * are expected to hold the sdlock + */ +int __dhdsdio_clk_enable(struct dhd_bus *bus, bus_owner_t owner, int can_wait); +int __dhdsdio_clk_disable(struct dhd_bus *bus, bus_owner_t owner, int can_wait); +void dhdsdio_reset_bt_use_count(struct dhd_bus *bus); +#endif /* BT_OVER_SDIO */ +#ifdef BCMPCIE +extern void dhd_bus_dump_console_buffer(struct dhd_bus *bus); +#else +#define dhd_bus_dump_console_buffer(x) +#endif /* BCMPCIE */ + +extern uint16 dhd_get_chipid(dhd_pub_t *dhd); + +extern int dhd_get_idletime(dhd_pub_t *dhd); + +#ifdef DHD_WAKE_STATUS +extern wake_counts_t* dhd_bus_get_wakecount(dhd_pub_t *dhd); +extern int dhd_bus_get_bus_wake(dhd_pub_t * dhd); +#endif /* DHD_WAKE_STATUS */ #endif /* _dhd_bus_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_buzzz.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_buzzz.h index a5422d58d7ef..e349a3f451dd 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_buzzz.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_buzzz.h @@ -3,7 +3,7 @@ /* * Broadcom logging system - Empty implementaiton - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: dhd_buzzz.h 591283 2015-10-07 11:52:00Z $ + * $Id$ */ #define dhd_buzzz_attach() do { /* noop */ } while (0) diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cdc.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cdc.c index b1fb4094e3db..11344de2a068 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cdc.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cdc.c @@ -1,7 +1,7 @@ /* * DHD Protocol Module for CDC and BDC. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_cdc.c 596022 2015-10-29 11:02:47Z $ + * $Id: dhd_cdc.c 699163 2017-05-12 05:18:23Z $ * * BDC is like CDC, except it includes a header for data packets to convey * packet priority over the bus, and flags (e.g. to indicate checksum status @@ -50,6 +50,10 @@ #include #endif +#ifdef DHD_ULP +#include +#endif /* DHD_ULP */ + #define RETRIES 2 /* # of retries to retrieve matching ioctl response */ #define BUS_HEADER_LEN (24+DHD_SDALIGN) /* Must be at least SDPCM_RESERVE @@ -222,7 +226,7 @@ dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 } if (cmd == WLC_SET_PM) { - DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf)); + DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0)); } memset(msg, 0, sizeof(cdc_ioctl_t)); @@ -239,6 +243,13 @@ dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 if (buf) memcpy(prot->buf, buf, len); +#ifdef DHD_ULP + if (buf && (!strncmp(buf, "ulp", sizeof("ulp")))) { + /* force all the writes after this point to NOT to use cached sbwad value */ + dhd_ulp_disable_cached_sbwad(dhd); + } +#endif /* DHD_ULP */ + if ((ret = dhdcdc_msg(dhd)) < 0) { DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret)); goto done; @@ -257,6 +268,12 @@ dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 goto done; } +#ifdef DHD_ULP + /* For ulp prototyping temporary */ + if ((ret = dhd_ulp_check_ulp_request(dhd, buf)) < 0) + goto done; +#endif /* DHD_ULP */ + /* Check the ERROR flag */ if (flags & CDCF_IOC_ERROR) { @@ -279,7 +296,8 @@ dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) static int error_cnt = 0; if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { - DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + DHD_ERROR(("%s : bus is down. we have nothing to do - bs: %d, has: %d\n", + __FUNCTION__, dhd->busstate, dhd->hang_was_sent)); goto done; } @@ -295,7 +313,7 @@ dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd, (unsigned long)prot->lastcmd)); if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) { - DHD_TRACE(("iovar cmd=%s\n", (char*)buf)); + DHD_TRACE(("iovar cmd=%s\n", buf ? (char*)buf : "\0")); } goto done; } @@ -353,8 +371,10 @@ dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) { - if (!dhdp || !dhdp->prot) + if (!dhdp || !dhdp->prot) { return; + } + bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid); #ifdef PROP_TXSTATUS dhd_wlfc_dump(dhdp, strbuf); @@ -538,6 +558,11 @@ dhd_sync_with_dongle(dhd_pub_t *dhd) wlc_rev_info_t revinfo; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); +#ifdef DHD_FW_COREDUMP + /* Check the memdump capability */ + dhd_get_memdump_info(dhd); +#endif /* DHD_FW_COREDUMP */ + #ifdef BCMASSERT_LOG dhd_get_assert_info(dhd); #endif /* BCMASSERT_LOG */ @@ -549,12 +574,11 @@ dhd_sync_with_dongle(dhd_pub_t *dhd) goto done; + DHD_SSSR_DUMP_INIT(dhd); + dhd_process_cid_mac(dhd, TRUE); - ret = dhd_preinit_ioctls(dhd); - - if (!ret) - dhd_process_cid_mac(dhd, FALSE); + dhd_process_cid_mac(dhd, FALSE); /* Always assumes wl for now */ dhd->iswl = TRUE; diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cfg80211.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cfg80211.c index 390747fe101d..d01e7680142d 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cfg80211.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cfg80211.c @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver - Dongle Host Driver (DHD) related * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_cfg80211.c 591285 2015-10-07 11:56:29Z $ + * $Id: dhd_cfg80211.c 699163 2017-05-12 05:18:23Z $ */ #include @@ -40,8 +40,6 @@ #include #endif -extern struct bcm_cfg80211 *g_bcm_cfg; - #ifdef PKT_FILTER_SUPPORT extern uint dhd_pkt_filter_enable; extern uint dhd_master_mode; @@ -126,20 +124,22 @@ s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg) return 0; } -struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name, - uint8 *mac, uint8 bssidx, char *dngl_name) +struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, const char *name, + uint8 *mac, uint8 bssidx, const char *dngl_name) { return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE, dngl_name); } -int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev) +int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, + int ifidx, struct net_device* ndev, bool rtnl_lock_reqd) { - return dhd_register_if(cfg->pub, ifidx, FALSE); + return dhd_register_if(cfg->pub, ifidx, rtnl_lock_reqd); } -int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev) +int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, + int ifidx, struct net_device* ndev, bool rtnl_lock_reqd) { - return dhd_remove_if(cfg->pub, ifidx, FALSE); + return dhd_remove_if(cfg->pub, ifidx, rtnl_lock_reqd); } struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev) @@ -169,9 +169,9 @@ static s32 wl_dongle_up(struct net_device *ndev) { s32 err = 0; - u32 up = 0; + u32 local_up = 0; - err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), true); + err = wldev_ioctl_set(ndev, WLC_UP, &local_up, sizeof(local_up)); if (unlikely(err)) { WL_ERR(("WLC_UP error (%d)\n", err)); } @@ -182,9 +182,9 @@ static s32 wl_dongle_down(struct net_device *ndev) { s32 err = 0; - u32 down = 0; + u32 local_down = 0; - err = wldev_ioctl(ndev, WLC_DOWN, &down, sizeof(down), true); + err = wldev_ioctl_set(ndev, WLC_DOWN, &local_down, sizeof(local_down)); if (unlikely(err)) { WL_ERR(("WLC_DOWN error (%d)\n", err)); } @@ -226,7 +226,7 @@ int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, struct wireless_ { struct net_device *ndev = NULL; dhd_pub_t *dhd; - dhd_ioctl_t ioc = { 0 }; + dhd_ioctl_t ioc = { 0, NULL, 0, 0, 0, 0, 0}; int ret = 0; int8 index; @@ -255,6 +255,7 @@ int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, struct wireless_ ioc.len = nlioc->len; ioc.set = nlioc->set; ioc.driver = nlioc->magic; + ioc.buf = buf; ret = dhd_ioctl_process(dhd, index, &ioc, buf); if (ret) { WL_TRACE(("dhd_ioctl_process return err %d\n", ret)); diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cfg80211.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cfg80211.h index cae7cc9b247b..20923ce5fda8 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cfg80211.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cfg80211.h @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver - Dongle Host Driver (DHD) related * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_cfg80211.h 591285 2015-10-07 11:56:29Z $ + * $Id: dhd_cfg80211.h 612483 2016-01-14 03:44:27Z $ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cfg_vendor.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cfg_vendor.c deleted file mode 100755 index c72f8299aadb..000000000000 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_cfg_vendor.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Linux cfg80211 vendor command/event handlers of DHD - * - * Copyright (C) 1999-2016, Broadcom Corporation - * - * Unless you and Broadcom execute a separate written software license - * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2 (the "GPL"), - * available at http://www.broadcom.com/licenses/GPLv2.php, with the - * following added to such license: - * - * As a special exception, the copyright holders of this software give you - * permission to link this software with independent modules, and to copy and - * distribute the resulting executable under terms of your choice, provided that - * you also meet, for each linked independent module, the terms and conditions of - * the license of that module. An independent module is a module which is not - * derived from this software. The special exception does not apply to any - * modifications of the software. - * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a license - * other than the GPL, without Broadcom's express prior written consent. - * - * - * <> - * - * $Id: dhd_cfg_vendor.c 525516 2015-01-09 23:12:53Z $ - */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef VENDOR_EXT_SUPPORT -static int dhd_cfgvendor_priv_string_handler(struct wiphy *wiphy, - struct wireless_dev *wdev, const void *data, int len) -{ - const struct bcm_nlmsg_hdr *nlioc = data; - struct net_device *ndev = NULL; - struct bcm_cfg80211 *cfg; - struct sk_buff *reply; - void *buf = NULL, *cur; - dhd_pub_t *dhd; - dhd_ioctl_t ioc = { 0 }; - int ret = 0, ret_len, payload, msglen; - int maxmsglen = PAGE_SIZE - 0x100; - int8 index; - - WL_TRACE(("entry: cmd = %d\n", nlioc->cmd)); - DHD_ERROR(("entry: cmd = %d\n", nlioc->cmd)); - - cfg = wiphy_priv(wiphy); - dhd = cfg->pub; - - DHD_OS_WAKE_LOCK(dhd); - - /* send to dongle only if we are not waiting for reload already */ - if (dhd->hang_was_sent) { - WL_ERR(("HANG was sent up earlier\n")); - DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS); - DHD_OS_WAKE_UNLOCK(dhd); - return OSL_ERROR(BCME_DONGLE_DOWN); - } - - len -= sizeof(struct bcm_nlmsg_hdr); - ret_len = nlioc->len; - if (ret_len > 0 || len > 0) { - if (len > DHD_IOCTL_MAXLEN) { - WL_ERR(("oversize input buffer %d\n", len)); - len = DHD_IOCTL_MAXLEN; - } - if (ret_len > DHD_IOCTL_MAXLEN) { - WL_ERR(("oversize return buffer %d\n", ret_len)); - ret_len = DHD_IOCTL_MAXLEN; - } - payload = max(ret_len, len) + 1; - buf = vzalloc(payload); - if (!buf) { - DHD_OS_WAKE_UNLOCK(dhd); - return -ENOMEM; - } - memcpy(buf, (void *)nlioc + nlioc->offset, len); - *(char *)(buf + len) = '\0'; - } - - ndev = wdev_to_wlc_ndev(wdev, cfg); - index = dhd_net2idx(dhd->info, ndev); - if (index == DHD_BAD_IF) { - WL_ERR(("Bad ifidx from wdev:%p\n", wdev)); - ret = BCME_ERROR; - goto done; - } - - ioc.cmd = nlioc->cmd; - ioc.len = nlioc->len; - ioc.set = nlioc->set; - ioc.driver = nlioc->magic; - ret = dhd_ioctl_process(dhd, index, &ioc, buf); - if (ret) { - WL_TRACE(("dhd_ioctl_process return err %d\n", ret)); - ret = OSL_ERROR(ret); - goto done; - } - - cur = buf; - while (ret_len > 0) { - msglen = nlioc->len > maxmsglen ? maxmsglen : ret_len; - ret_len -= msglen; - payload = msglen + sizeof(msglen); - reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); - if (!reply) { - WL_ERR(("Failed to allocate reply msg\n")); - ret = -ENOMEM; - break; - } - - if (nla_put(reply, BCM_NLATTR_DATA, msglen, cur) || - nla_put_u16(reply, BCM_NLATTR_LEN, msglen)) { - kfree_skb(reply); - ret = -ENOBUFS; - break; - } - - ret = cfg80211_vendor_cmd_reply(reply); - if (ret) { - WL_ERR(("testmode reply failed:%d\n", ret)); - break; - } - cur += msglen; - } - -done: - vfree(buf); - DHD_OS_WAKE_UNLOCK(dhd); - return ret; -} - -const struct wiphy_vendor_command dhd_cfgvendor_cmds [] = { - { - { - .vendor_id = OUI_BRCM, - .subcmd = BRCM_VENDOR_SCMD_PRIV_STR - }, - .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, - .doit = dhd_cfgvendor_priv_string_handler - }, -}; - -int cfgvendor_attach(struct wiphy *wiphy) -{ - wiphy->vendor_commands = dhd_cfgvendor_cmds; - wiphy->n_vendor_commands = ARRAY_SIZE(dhd_cfgvendor_cmds); - - return 0; -} - -int cfgvendor_detach(struct wiphy *wiphy) -{ - wiphy->vendor_commands = NULL; - wiphy->n_vendor_commands = 0; - - return 0; -} -#endif /* VENDOR_EXT_SUPPORT */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_common.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_common.c index 631cb4fb2372..19a9226ecd2c 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_common.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_common.c @@ -1,7 +1,7 @@ /* * Broadcom Dongle Host Driver (DHD), common DHD core. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_common.c 609263 2015-12-31 16:21:33Z $ + * $Id: dhd_common.c 710862 2017-07-14 07:43:59Z $ */ #include #include @@ -37,7 +37,11 @@ #include #include #include -#include +#include + +#ifdef PCIE_FULL_DONGLE +#include +#endif /* PCIE_FULL_DONGLE */ #ifdef SHOW_LOGTRACE #include @@ -52,6 +56,8 @@ #include #include #include +#include +#include #include #ifdef WL_CFG80211 @@ -60,6 +66,13 @@ #ifdef PNO_SUPPORT #include #endif +#ifdef RTT_SUPPORT +#include +#endif + +#ifdef DNGL_EVENT_SUPPORT +#include +#endif #define htod32(i) (i) #define htod16(i) (i) @@ -85,17 +98,27 @@ #ifdef DHD_PSTA #include #endif /* DHD_PSTA */ +#ifdef DHD_TIMESYNC +#include +#endif /* DHD_TIMESYNC */ +#ifdef DHD_WET +#include +#endif /* DHD_WET */ + +#if defined(BCMEMBEDIMAGE) && defined(DHD_EFI) +#include +#endif #ifdef WLMEDIA_HTSF extern void htsf_update(struct dhd_info *dhd, void *data); #endif -#ifdef DHD_LOG_DUMP -int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL | DHD_EVENT_VAL; -#else +extern int is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype, + bcm_event_msg_u_t *out_event); + +/* By default all logs are enabled */ int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL; -#endif /* DHD_LOG_DUMP */ #if defined(WL_WLC_SHIM) @@ -103,21 +126,50 @@ int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL; #else #endif /* WL_WLC_SHIM */ -#include +#ifdef DHD_ULP +#include +#endif /* DHD_ULP */ + +#ifdef DHD_DEBUG +#include +#endif /* DHD_DEBUG */ #ifdef SOFTAP char fw_path2[MOD_PARAM_PATHLEN]; extern bool softap_enabled; #endif +#ifdef REPORT_FATAL_TIMEOUTS +/* Default timeout value in ms */ +#define SCAN_TIMEOUT_DEFAULT 1 +#define JOIN_TIMEOUT_DEFAULT 7500 +#ifdef DHD_EFI +#define BUS_TIMEOUT_DEFAULT 8000000 /* 800ms, in units of 100ns */ +#define CMD_TIMEOUT_DEFAULT 15000000 /* 1.5sec, in units of 100ns */ +#else +#define BUS_TIMEOUT_DEFAULT 800 +#define CMD_TIMEOUT_DEFAULT 1200 +#endif /* DHD_EFI */ +#endif /* REPORT_FATAL_TIMEOUTS */ + +#ifdef SHOW_LOGTRACE +#define BYTES_AHEAD_NUM 11 /* address in map file is before these many bytes */ +#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */ +#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */ +static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */ +static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */ +static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */ +#define RAMSTART_BIT 0x01 +#define RDSTART_BIT 0x02 +#define RDEND_BIT 0x04 +#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT) +#endif /* SHOW_LOGTRACE */ + /* Last connection success/failure status */ uint32 dhd_conn_event; uint32 dhd_conn_status; uint32 dhd_conn_reason; -#if defined(SHOW_EVENTS) && defined(SHOW_LOGTRACE) -static int check_event_log_sequence_number(uint32 seq_no); -#endif /* defined(SHOW_EVENTS) && defined(SHOW_LOGTRACE) */ extern int dhd_iscan_request(void * dhdp, uint16 action); extern void dhd_ind_scan_confirm(void *h, bool status); extern int dhd_iscan_in_progress(void *h); @@ -130,6 +182,13 @@ extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd); extern int dhd_socram_dump(struct dhd_bus *bus); +#ifdef DNGL_EVENT_SUPPORT +static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event, + bcm_dngl_event_msg_t *dngl_event, size_t pktlen); +static int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, + size_t pktlen); +#endif /* DNGL_EVENT_SUPPORT */ + #define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */ bool ap_cfg_running = FALSE; @@ -148,14 +207,19 @@ bool ap_fw_loaded = FALSE; #if defined(DHD_DEBUG) const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR; #else -const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR "\nCompiled from "; +const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR; #endif char fw_version[FW_VER_STR_LEN] = "\0"; char clm_version[CLM_VER_STR_LEN] = "\0"; +char bus_api_revision[BUS_API_REV_STR_LEN] = "\0"; + void dhd_set_timer(void *bus, uint wdtick); - +#if defined(TRAFFIC_MGMT_DWM) +static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd, + trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len); +#endif /* IOVar table */ enum { @@ -172,11 +236,12 @@ enum { IOV_LOGSTAMP, IOV_GPIOOB, IOV_IOCTLTIMEOUT, -#if defined(DHD_DEBUG) IOV_CONS, IOV_DCONSOLE_POLL, +#if defined(DHD_DEBUG) IOV_DHD_JOIN_TIMEOUT_DBG, IOV_SCAN_TIMEOUT, + IOV_MEM_DEBUG, #endif /* defined(DHD_DEBUG) */ #ifdef PROP_TXSTATUS IOV_PROPTXSTATUS_ENABLE, @@ -206,7 +271,11 @@ enum { #ifdef DHD_UCAST_UPNP IOV_WMF_UCAST_UPNP, #endif /* DHD_UCAST_UPNP */ + IOV_WMF_PSTA_DISABLE, #endif /* DHD_WMF */ +#if defined(TRAFFIC_MGMT_DWM) + IOV_TRAFFIC_MGMT_DWM, +#endif IOV_AP_ISOLATE, #ifdef DHD_L2_FILTER IOV_DHCP_UNICAST, @@ -214,117 +283,363 @@ enum { IOV_PROXY_ARP, IOV_GRAT_ARP, #endif /* DHD_L2_FILTER */ + IOV_DHD_IE, #ifdef DHD_PSTA IOV_PSTA, #endif /* DHD_PSTA */ +#ifdef DHD_WET + IOV_WET, + IOV_WET_HOST_IPV4, + IOV_WET_HOST_MAC, +#endif /* DHD_WET */ IOV_CFG80211_OPMODE, IOV_ASSERT_TYPE, IOV_LMTEST, - IOV_LAST +#ifdef DHD_MCAST_REGEN + IOV_MCAST_REGEN_BSS_ENABLE, +#endif +#ifdef SHOW_LOGTRACE + IOV_DUMP_TRACE_LOG, +#endif /* SHOW_LOGTRACE */ +#ifdef REPORT_FATAL_TIMEOUTS + IOV_SCAN_TO, + IOV_JOIN_TO, + IOV_CMD_TO, + IOV_OQS_TO, +#endif /* REPORT_FATAL_TIMEOUTS */ + IOV_DONGLE_TRAP_TYPE, + IOV_DONGLE_TRAP_INFO, + IOV_BPADDR, + IOV_LAST, +#if defined(DHD_EFI) && defined(DHD_LOG_DUMP) + IOV_LOG_CAPTURE_ENABLE, + IOV_LOG_DUMP +#endif /* DHD_EFI && DHD_LOG_DUMP */ }; const bcm_iovar_t dhd_iovars[] = { - {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version) }, - {"wlmsglevel", IOV_WLMSGLEVEL, 0, IOVT_UINT32, 0 }, + {"version", IOV_VERSION, 0, 0, IOVT_BUFFER, sizeof(dhd_version) }, + {"wlmsglevel", IOV_WLMSGLEVEL, 0, 0, IOVT_UINT32, 0 }, #ifdef DHD_DEBUG - {"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 }, + {"mem_debug", IOV_MEM_DEBUG, 0, 0, IOVT_BUFFER, 0 }, #endif /* DHD_DEBUG */ - {"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER, BCME_STRLEN }, - {"bcmerror", IOV_BCMERROR, 0, IOVT_INT8, 0 }, - {"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0 }, - {"dump", IOV_DUMP, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, -#ifdef DHD_DEBUG - {"cons", IOV_CONS, 0, IOVT_BUFFER, 0 }, - {"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0 }, -#endif - {"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0 }, - {"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0 }, - {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0 }, + {"bcmerrorstr", IOV_BCMERRORSTR, 0, 0, IOVT_BUFFER, BCME_STRLEN }, + {"bcmerror", IOV_BCMERROR, 0, 0, IOVT_INT8, 0 }, + {"wdtick", IOV_WDTICK, 0, 0, IOVT_UINT32, 0 }, + {"dump", IOV_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, + {"cons", IOV_CONS, 0, 0, IOVT_BUFFER, 0 }, + {"dconpoll", IOV_DCONSOLE_POLL, 0, 0, IOVT_UINT32, 0 }, + {"clearcounts", IOV_CLEARCOUNTS, 0, 0, IOVT_VOID, 0 }, + {"gpioob", IOV_GPIOOB, 0, 0, IOVT_UINT32, 0 }, + {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, 0, IOVT_UINT32, 0 }, #ifdef PROP_TXSTATUS - {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, IOVT_BOOL, 0 }, + {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, 0, IOVT_BOOL, 0 }, /* set the proptxtstatus operation mode: 0 - Do not do any proptxtstatus flow control 1 - Use implied credit from a packet status 2 - Use explicit credit */ - {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, IOVT_UINT32, 0 }, - {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, IOVT_UINT32, 0 }, - {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, IOVT_BOOL, 0 }, - {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, IOVT_BOOL, 0 }, - {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, IOVT_BOOL, 0 }, - {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, IOVT_BOOL, 0 }, + {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, 0, IOVT_UINT32, 0 }, + {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, 0, IOVT_UINT32, 0 }, + {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 }, + {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 }, + {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0, IOVT_BOOL, 0 }, + {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 }, #endif /* PROP_TXSTATUS */ - {"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0}, + {"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0}, #ifdef WLMEDIA_HTSF - {"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, IOVT_UINT8, 0 }, + {"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, 0, IOVT_UINT8, 0 }, #endif - {"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 }, - {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, IOVT_BUFFER, + {"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 }, + {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER, (WLHOST_REORDERDATA_MAXFLOWS + 1) }, #ifdef DHDTCPACK_SUPPRESS - {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, IOVT_UINT8, 0 }, + {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, 0, IOVT_UINT8, 0 }, #endif /* DHDTCPACK_SUPPRESS */ #ifdef DHD_WMF - {"wmf_bss_enable", IOV_WMF_BSS_ENAB, 0, IOVT_BOOL, 0 }, - {"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP, 0, IOVT_BOOL, 0 }, - {"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP, 0, IOVT_BOOL, 0 }, + {"wmf_bss_enable", IOV_WMF_BSS_ENAB, 0, 0, IOVT_BOOL, 0 }, + {"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP, 0, 0, IOVT_BOOL, 0 }, + {"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP, 0, 0, IOVT_BOOL, 0 }, #ifdef WL_IGMP_UCQUERY - {"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), IOVT_BOOL, 0 }, + {"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), 0, IOVT_BOOL, 0 }, #endif /* WL_IGMP_UCQUERY */ #ifdef DHD_UCAST_UPNP - {"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), IOVT_BOOL, 0 }, + {"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), 0, IOVT_BOOL, 0 }, #endif /* DHD_UCAST_UPNP */ + {"wmf_psta_disable", IOV_WMF_PSTA_DISABLE, (0), 0, IOVT_BOOL, 0 }, #endif /* DHD_WMF */ +#if defined(TRAFFIC_MGMT_DWM) + {"trf_mgmt_filters_add", IOV_TRAFFIC_MGMT_DWM, (0), 0, IOVT_BUFFER, 0}, +#endif #ifdef DHD_L2_FILTER - {"dhcp_unicast", IOV_DHCP_UNICAST, (0), IOVT_BOOL, 0 }, + {"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 }, #endif /* DHD_L2_FILTER */ - {"ap_isolate", IOV_AP_ISOLATE, (0), IOVT_BOOL, 0}, + {"ap_isolate", IOV_AP_ISOLATE, (0), 0, IOVT_BOOL, 0}, #ifdef DHD_L2_FILTER - {"block_ping", IOV_BLOCK_PING, (0), IOVT_BOOL, 0}, - {"proxy_arp", IOV_PROXY_ARP, (0), IOVT_BOOL, 0}, - {"grat_arp", IOV_GRAT_ARP, (0), IOVT_BOOL, 0}, + {"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0}, + {"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0}, + {"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0}, #endif /* DHD_L2_FILTER */ + {"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0}, #ifdef DHD_PSTA /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */ - {"psta", IOV_PSTA, 0, IOVT_UINT32, 0}, + {"psta", IOV_PSTA, 0, 0, IOVT_UINT32, 0}, #endif /* DHD PSTA */ - {"op_mode", IOV_CFG80211_OPMODE, 0, IOVT_UINT32, 0 }, - {"assert_type", IOV_ASSERT_TYPE, (0), IOVT_UINT32, 0}, - {"lmtest", IOV_LMTEST, 0, IOVT_UINT32, 0 }, - {NULL, 0, 0, 0, 0 } +#ifdef DHD_WET + /* WET Mode configuration. 0: DIABLED 1: WET */ + {"wet", IOV_WET, 0, 0, IOVT_UINT32, 0}, + {"wet_host_ipv4", IOV_WET_HOST_IPV4, 0, 0, IOVT_UINT32, 0}, + {"wet_host_mac", IOV_WET_HOST_MAC, 0, 0, IOVT_BUFFER, 0}, +#endif /* DHD WET */ + {"op_mode", IOV_CFG80211_OPMODE, 0, 0, IOVT_UINT32, 0 }, + {"assert_type", IOV_ASSERT_TYPE, (0), 0, IOVT_UINT32, 0}, + {"lmtest", IOV_LMTEST, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_MCAST_REGEN + {"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0}, +#endif +#ifdef SHOW_LOGTRACE + {"dump_trace_buf", IOV_DUMP_TRACE_LOG, 0, 0, IOVT_BUFFER, sizeof(trace_buf_info_t) }, +#endif /* SHOW_LOGTRACE */ +#ifdef REPORT_FATAL_TIMEOUTS + {"scan_timeout", IOV_SCAN_TO, 0, 0, IOVT_UINT32, 0 }, + {"join_timeout", IOV_JOIN_TO, 0, 0, IOVT_UINT32, 0 }, + {"cmd_timeout", IOV_CMD_TO, 0, 0, IOVT_UINT32, 0 }, + {"oqs_timeout", IOV_OQS_TO, 0, 0, IOVT_UINT32, 0 }, +#endif /* REPORT_FATAL_TIMEOUTS */ + {"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 }, + {"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) }, +#ifdef DHD_DEBUG + {"bpaddr", IOV_BPADDR, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, +#endif /* DHD_DEBUG */ +#if defined(DHD_EFI) && defined(DHD_LOG_DUMP) + {"log_capture_enable", IOV_LOG_CAPTURE_ENABLE, 0, 0, IOVT_UINT8, 0}, + {"log_dump", IOV_LOG_DUMP, 0, 0, IOVT_UINT8, 0}, +#endif /* DHD_EFI && DHD_LOG_DUMP */ + {NULL, 0, 0, 0, 0, 0 } }; #define DHD_IOVAR_BUF_SIZE 128 -#ifdef DHD_FW_COREDUMP -void dhd_save_fwdump(dhd_pub_t *dhd_pub, void * buffer, uint32 length) +bool +dhd_query_bus_erros(dhd_pub_t *dhdp) { - if (dhd_pub->soc_ram) { -#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) - DHD_OS_PREFREE(dhd_pub, dhd_pub->soc_ram, dhd_pub->soc_ram_length); -#else - MFREE(dhd_pub->osh, dhd_pub->soc_ram, dhd_pub->soc_ram_length); -#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ - dhd_pub->soc_ram = NULL; - dhd_pub->soc_ram_length = 0; + bool ret = FALSE; + + if (dhdp->dongle_reset) { + DHD_ERROR(("%s: Dongle Reset occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; } + if (dhdp->dongle_trap_occured) { + DHD_ERROR(("%s: FW TRAP has occurred, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + dhdp->hang_reason = HANG_REASON_DONGLE_TRAP; + dhd_os_send_hang_message(dhdp); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ + } + + if (dhdp->iovar_timeout_occured) { + DHD_ERROR(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } + +#ifdef PCIE_FULL_DONGLE + if (dhdp->d3ack_timeout_occured) { + DHD_ERROR(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n", + __FUNCTION__)); + ret = TRUE; + } +#endif /* PCIE_FULL_DONGLE */ + + return ret; +} + +#ifdef DHD_SSSR_DUMP +int +dhd_sssr_mempool_init(dhd_pub_t *dhd) +{ + dhd->sssr_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SSSR_MEMPOOL_SIZE); + if (dhd->sssr_mempool == NULL) { + DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n", + __FUNCTION__)); + return BCME_ERROR; + } + return BCME_OK; +} + +void +dhd_sssr_mempool_deinit(dhd_pub_t *dhd) +{ + if (dhd->sssr_mempool) { + MFREE(dhd->osh, dhd->sssr_mempool, DHD_SSSR_MEMPOOL_SIZE); + dhd->sssr_mempool = NULL; + } +} + +int +dhd_get_sssr_reg_info(dhd_pub_t *dhd) +{ + int ret = BCME_ERROR; + + DHD_ERROR(("%s: get sssr_reg_info\n", __FUNCTION__)); + /* get sssr_reg_info from firmware */ + memset((void *)&dhd->sssr_reg_info, 0, sizeof(dhd->sssr_reg_info)); + if (bcm_mkiovar("sssr_reg_info", 0, 0, (char *)&dhd->sssr_reg_info, + sizeof(dhd->sssr_reg_info))) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, &dhd->sssr_reg_info, + sizeof(dhd->sssr_reg_info), FALSE, 0)) < 0) { + DHD_ERROR(("%s: dhd_wl_ioctl_cmd failed (error=%d)\n", __FUNCTION__, ret)); + } + } else { + DHD_ERROR(("%s: bcm_mkiovar failed\n", __FUNCTION__)); + } + + return ret; +} + +uint32 +dhd_get_sssr_bufsize(dhd_pub_t *dhd) +{ + int i; + uint32 sssr_bufsize = 0; + /* Init all pointers to NULL */ + for (i = 0; i < MAX_NUM_D11CORES; i++) { + sssr_bufsize += dhd->sssr_reg_info.mac_regs[i].sr_size; + } + sssr_bufsize += dhd->sssr_reg_info.vasip_regs.vasip_sr_size; + + /* Double the size as different dumps will be saved before and after SR */ + sssr_bufsize = 2 * sssr_bufsize; + + return sssr_bufsize; +} + +int +dhd_sssr_dump_init(dhd_pub_t *dhd) +{ + int i; + uint32 sssr_bufsize; + uint32 mempool_used = 0; + + dhd->sssr_inited = FALSE; + + /* check if sssr mempool is allocated */ + if (dhd->sssr_mempool == NULL) { + DHD_ERROR(("%s: sssr_mempool is not allocated\n", + __FUNCTION__)); + return BCME_ERROR; + } + + /* Get SSSR reg info */ + if (dhd_get_sssr_reg_info(dhd) != BCME_OK) { + DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Validate structure version */ + if (dhd->sssr_reg_info.version != SSSR_REG_INFO_VER) { + DHD_ERROR(("%s: dhd->sssr_reg_info.version (%d : %d) mismatch\n", + __FUNCTION__, (int)dhd->sssr_reg_info.version, SSSR_REG_INFO_VER)); + return BCME_ERROR; + } + + /* Validate structure length */ + if (dhd->sssr_reg_info.length != sizeof(dhd->sssr_reg_info)) { + DHD_ERROR(("%s: dhd->sssr_reg_info.length (%d : %d) mismatch\n", + __FUNCTION__, (int)dhd->sssr_reg_info.length, + (int)sizeof(dhd->sssr_reg_info))); + return BCME_ERROR; + } + + /* validate fifo size */ + sssr_bufsize = dhd_get_sssr_bufsize(dhd); + if (sssr_bufsize > DHD_SSSR_MEMPOOL_SIZE) { + DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n", + __FUNCTION__, (int)sssr_bufsize, DHD_SSSR_MEMPOOL_SIZE)); + return BCME_ERROR; + } + + /* init all pointers to NULL */ + for (i = 0; i < MAX_NUM_D11CORES; i++) { + dhd->sssr_d11_before[i] = NULL; + dhd->sssr_d11_after[i] = NULL; + } + dhd->sssr_vasip_buf_before = NULL; + dhd->sssr_vasip_buf_after = NULL; + + /* Allocate memory */ + for (i = 0; i < MAX_NUM_D11CORES; i++) { + if (dhd->sssr_reg_info.mac_regs[i].sr_size) { + dhd->sssr_d11_before[i] = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size; + + dhd->sssr_d11_after[i] = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size; + } + } + + if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) { + dhd->sssr_vasip_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size; + + dhd->sssr_vasip_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used); + mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size; + } + + dhd->sssr_inited = TRUE; + + return BCME_OK; + +} + +void +dhd_sssr_dump_deinit(dhd_pub_t *dhd) +{ + int i; + + dhd->sssr_inited = FALSE; + /* init all pointers to NULL */ + for (i = 0; i < MAX_NUM_D11CORES; i++) { + dhd->sssr_d11_before[i] = NULL; + dhd->sssr_d11_after[i] = NULL; + } + dhd->sssr_vasip_buf_before = NULL; + dhd->sssr_vasip_buf_after = NULL; + + return; +} + +#endif /* DHD_SSSR_DUMP */ + +#ifdef DHD_FW_COREDUMP +void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length) +{ + if (!dhd_pub->soc_ram) { #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) - dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub, - DHD_PREALLOC_MEMDUMP_RAM, length); - memset(dhd_pub->soc_ram, 0, length); + dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub, + DHD_PREALLOC_MEMDUMP_RAM, length); #else - dhd_pub->soc_ram = (uint8*) MALLOCZ(dhd_pub->osh, length); + dhd_pub->soc_ram = (uint8*) MALLOC(dhd_pub->osh, length); #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + } + if (dhd_pub->soc_ram == NULL) { DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n", __FUNCTION__)); - return; + dhd_pub->soc_ram_length = 0; + } else { + memset(dhd_pub->soc_ram, 0, length); + dhd_pub->soc_ram_length = length; } - dhd_pub->soc_ram_length = length; - memcpy(dhd_pub->soc_ram, buffer, length); + /* soc_ram free handled in dhd_{free,clear} */ + return dhd_pub->soc_ram; } #endif /* DHD_FW_COREDUMP */ @@ -332,6 +647,12 @@ void dhd_save_fwdump(dhd_pub_t *dhd_pub, void * buffer, uint32 length) * please do NOT merge it back from other branches !!! */ +int +dhd_common_socram_dump(dhd_pub_t *dhdp) +{ + return dhd_socram_dump(dhdp->bus); +} + static int dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) { @@ -339,8 +660,10 @@ dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) struct bcmstrbuf b; struct bcmstrbuf *strbuf = &b; - if (!dhdp || !dhdp->prot || !buf) + + if (!dhdp || !dhdp->prot || !buf) { return BCME_ERROR; + } bcm_binit(strbuf, buf, buflen); @@ -379,6 +702,27 @@ dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) dhdp->tx_pktgetfail, dhdp->rx_pktgetfail); bcm_bprintf(strbuf, "\n"); +#ifdef DMAMAP_STATS + /* Add DMA MAP info */ + bcm_bprintf(strbuf, "DMA MAP stats: \n"); + bcm_bprintf(strbuf, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n", + dhdp->dma_stats.txdata, KB(dhdp->dma_stats.txdata_sz), + dhdp->dma_stats.rxdata, KB(dhdp->dma_stats.rxdata_sz)); +#ifndef IOCTLRESP_USE_CONSTMEM + bcm_bprintf(strbuf, "IOCTL RX: %lu size: %luK ,", + dhdp->dma_stats.ioctl_rx, KB(dhdp->dma_stats.ioctl_rx_sz)); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + bcm_bprintf(strbuf, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, " + "TSBUF RX: %lu size %luK\n", + dhdp->dma_stats.event_rx, KB(dhdp->dma_stats.event_rx_sz), + dhdp->dma_stats.info_rx, KB(dhdp->dma_stats.info_rx_sz), + dhdp->dma_stats.tsbuf_rx, KB(dhdp->dma_stats.tsbuf_rx_sz)); + bcm_bprintf(strbuf, "Total : %luK \n", + KB(dhdp->dma_stats.txdata_sz + dhdp->dma_stats.rxdata_sz + + dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz + + dhdp->dma_stats.tsbuf_rx_sz)); +#endif /* DMAMAP_STATS */ + /* Add any prot info */ dhd_prot_dump(dhdp, strbuf); bcm_bprintf(strbuf, "\n"); @@ -390,7 +734,12 @@ dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) #if defined(DHD_LB_STATS) dhd_lb_stats_dump(dhdp, strbuf); #endif /* DHD_LB_STATS */ - +#ifdef DHD_WET + if (dhd_get_wet_mode(dhdp)) { + bcm_bprintf(strbuf, "Wet Dump:\n"); + dhd_wet_dump(dhdp, strbuf); + } +#endif /* DHD_WET */ return (!strbuf->size ? BCME_BUFTOOSHORT : 0); } @@ -425,7 +774,7 @@ dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval, char iovbuf[WLC_IOCTL_SMLEN]; int ret = -1; - /* memset(iovbuf, 0, sizeof(iovbuf)); */ + memset(iovbuf, 0, sizeof(iovbuf)); if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) { ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx); if (!ret) { @@ -446,13 +795,15 @@ int dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val, int cmd, uint8 set, int ifidx) { - char iovbuf[WLC_IOCTL_SMLEN]; + char iovbuf[WLC_IOCTL_SMLEN] = {0}; int ret = -1; int lval = htol32(val); + uint len; - /* memset(iovbuf, 0, sizeof(iovbuf)); */ - if (bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf))) { - ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx); + len = bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf)); + + if (len) { + ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, len, set, ifidx); if (ret) { DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n", __FUNCTION__, name, ret)); @@ -470,37 +821,73 @@ dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) { int ret = BCME_ERROR; unsigned long flags; +#ifdef DUMP_IOCTL_IOV_LIST + dhd_iov_li_t *iov_li; +#endif /* DUMP_IOCTL_IOV_LIST */ +#ifdef KEEPIF_ON_DEVICE_RESET + if (ioc->cmd == WLC_GET_VAR) { + dbus_config_t config; + config.general_param = 0; + if (!strcmp(buf, "wowl_activate")) { + config.general_param = 2; /* 1 (TRUE) after decreased by 1 */ + } else if (!strcmp(buf, "wowl_clear")) { + config.general_param = 1; /* 0 (FALSE) after decreased by 1 */ + } + if (config.general_param) { + config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET; + config.general_param--; + dbus_set_config(dhd_pub->dbus, &config); + } + } +#endif /* KEEPIF_ON_DEVICE_RESET */ if (dhd_os_proto_block(dhd_pub)) { #ifdef DHD_LOG_DUMP - int slen, i, val, rem; - long int lval; + int slen, i, val, rem, lval, min_len; char *pval, *pos, *msg; char tmp[64]; + + /* WLC_GET_VAR */ + if (ioc->cmd == WLC_GET_VAR) { + min_len = MIN(sizeof(tmp) - 1, strlen(buf)); + memset(tmp, 0, sizeof(tmp)); + bcopy(buf, tmp, min_len); + tmp[min_len] = '\0'; + } #endif /* DHD_LOG_DUMP */ - DHD_GENERAL_LOCK(dhd_pub, flags); - if (dhd_pub->busstate == DHD_BUS_DOWN || - dhd_pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) { +#ifdef DHD_EFI + DHD_INFO(("%s: returning as busstate=%d\n", + __FUNCTION__, dhd_pub->busstate)); +#else DHD_ERROR(("%s: returning as busstate=%d\n", __FUNCTION__, dhd_pub->busstate)); - DHD_GENERAL_UNLOCK(dhd_pub, flags); +#endif /* DHD_EFI */ + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); dhd_os_proto_unblock(dhd_pub); return -ENODEV; } - dhd_pub->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_IOVAR; - DHD_GENERAL_UNLOCK(dhd_pub, flags); + DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); -#ifdef DHD_LOG_DUMP - /* WLC_GET_VAR */ - if (ioc->cmd == WLC_GET_VAR) { - memset(tmp, 0, sizeof(tmp)); - bcopy(ioc->buf, tmp, strlen(ioc->buf) + 1); - } -#endif /* DHD_LOG_DUMP */ #ifdef DHD_PCIE_RUNTIMEPM dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl); #endif /* DHD_PCIE_RUNTIMEPM */ + + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhd_pub->busstate, dhd_pub->dhd_bus_busy_state)); + DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub); + dhd_os_busbusy_wake(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_proto_unblock(dhd_pub); + return -ENODEV; + } + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + #if defined(WL_WLC_SHIM) { struct wl_shim_node *shim = dhd_pub_shim(dhd_pub); @@ -516,9 +903,70 @@ dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) } } #else +#ifdef DUMP_IOCTL_IOV_LIST + if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) { + if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) { + DHD_ERROR(("iovar dump list item allocation Failed\n")); + } else { + iov_li->cmd = ioc->cmd; + bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1); + dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head, + &iov_li->list); + } + } +#endif /* DUMP_IOCTL_IOV_LIST */ ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len); +#ifdef DUMP_IOCTL_IOV_LIST + if (ret == -ETIMEDOUT) { + DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n", + IOV_LIST_MAX_LEN)); + dhd_iov_li_print(&dhd_pub->dump_iovlist_head); + } +#endif /* DUMP_IOCTL_IOV_LIST */ #endif /* defined(WL_WLC_SHIM) */ - +#ifdef DHD_LOG_DUMP + if (ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) { + lval = 0; + slen = strlen(buf) + 1; + msg = (char*)buf; + if (len >= slen + sizeof(lval)) { + if (ioc->cmd == WLC_GET_VAR) { + msg = tmp; + lval = *(int*)buf; + } else { + min_len = MIN(ioc->len - slen, sizeof(int)); + bcopy((msg + slen), &lval, min_len); + } + } + DHD_ERROR_MEM(("%s: cmd: %d, msg: %s, val: 0x%x, len: %d, set: %d\n", + ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR", + ioc->cmd, msg, lval, ioc->len, ioc->set)); + } else { + slen = ioc->len; + if (buf != NULL) { + val = *(int*)buf; + pval = (char*)buf; + pos = tmp; + rem = sizeof(tmp); + memset(tmp, 0, sizeof(tmp)); + for (i = 0; i < slen; i++) { + if (rem <= 3) { + /* At least 2 byte required + 1 byte(NULL) */ + break; + } + pos += snprintf(pos, rem, "%02x ", pval[i]); + rem = sizeof(tmp) - (int)(pos - tmp); + } + /* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */ + if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) + DHD_ERROR_MEM(("WLC_IOCTL: cmd: %d, val: %d(%s), " + "len: %d, set: %d\n", + ioc->cmd, val, tmp, ioc->len, ioc->set)); + } else { + DHD_ERROR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd)); + } + } +#endif /* DHD_LOG_DUMP */ if (ret && dhd_pub->up) { /* Send hang event only if dhd_open() was success */ dhd_os_check_hang(dhd_pub, ifidx, ret); @@ -531,49 +979,13 @@ dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) dhd_pub->busstate = DHD_BUS_DOWN; } - DHD_GENERAL_LOCK(dhd_pub, flags); - dhd_pub->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_IOVAR; + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub); dhd_os_busbusy_wake(dhd_pub); - DHD_GENERAL_UNLOCK(dhd_pub, flags); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); dhd_os_proto_unblock(dhd_pub); -#ifdef DHD_LOG_DUMP - if (ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) { - lval = 0; - slen = strlen(ioc->buf) + 1; - msg = (char*)ioc->buf; - if (ioc->cmd == WLC_GET_VAR) { - bcopy(msg, &lval, sizeof(long int)); - msg = tmp; - } else { - bcopy((msg + slen), &lval, sizeof(long int)); - } - DHD_ERROR_EX(("%s: cmd: %d, msg: %s, val: 0x%lx, len: %d, set: %d\n", - ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR", - ioc->cmd, msg, lval, ioc->len, ioc->set)); - } else { - slen = ioc->len; - if (ioc->buf != NULL) { - val = *(int*)ioc->buf; - pval = (char*)ioc->buf; - pos = tmp; - rem = sizeof(tmp); - memset(tmp, 0, sizeof(tmp)); - for (i = 0; i < slen; i++) { - pos += snprintf(pos, rem, "%02x ", pval[i]); - rem = sizeof(tmp) - (int)(pos - tmp); - if (rem <= 0) { - break; - } - } - DHD_ERROR_EX(("WLC_IOCTL: cmd: %d, val: %d(%s), len: %d, set: %d\n", - ioc->cmd, val, tmp, ioc->len, ioc->set)); - } else { - DHD_ERROR_EX(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd)); - } - } -#endif /* DHD_LOG_DUMP */ } return ret; @@ -591,14 +1003,14 @@ uint wl_get_port_num(wl_io_pport_t *io_pport) * val - ponter to the IOVAR arguments */ static int -dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, char *params, int *idx, char **val) +dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, const char *params, uint32 *idx, const char **val) { char *prefix = "bsscfg:"; uint32 bssidx; if (!(strncmp(params, prefix, strlen(prefix)))) { /* per bss setting should be prefixed with 'bsscfg:' */ - char *p = (char *)params + strlen(prefix); + const char *p = params + strlen(prefix); /* Skip Name */ while (*p != '\0') @@ -632,21 +1044,141 @@ int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) { DHD_TRACE(("%s \n", __FUNCTION__)); - return dhd_iovar(dhd, 0, "cons", msg, msglen, 1); + return dhd_iovar(dhd, 0, "cons", msg, msglen, NULL, 0, TRUE); } #endif /* DHD_DEBUG && BCMDHDUSB */ +#ifdef DHD_DEBUG +int +dhd_mem_debug(dhd_pub_t *dhd, char *msg, uint msglen) +{ + unsigned long int_arg = 0; + char *p; + char *end_ptr = NULL; + dhd_dbg_mwli_t *mw_li; + dll_t *item, *next; + /* check if mwalloc, mwquery or mwfree was supplied arguement with space */ + p = bcmstrstr(msg, " "); + if (p != NULL) { + /* space should be converted to null as separation flag for firmware */ + *p = '\0'; + /* store the argument in int_arg */ + int_arg = bcm_strtoul(p+1, &end_ptr, 10); + } + + if (!p && !strcmp(msg, "query")) { + /* lets query the list inetrnally */ + if (dll_empty(dll_head_p(&dhd->mw_list_head))) { + DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n")); + /* reset the id */ + dhd->mw_id = 0; + } else { + for (item = dll_head_p(&dhd->mw_list_head); + !dll_end(&dhd->mw_list_head, item); item = next) { + next = dll_next_p(item); + mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list); + DHD_ERROR(("item: \n", mw_li->id, mw_li->size)); + } + } + } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "alloc")) { + int32 alloc_handle; + /* convert size into KB and append as integer */ + *((int32 *)(p+1)) = int_arg*1024; + *(p+1+sizeof(int32)) = '\0'; + + /* recalculated length -> 5 bytes for "alloc" + 4 bytes for size + + *1 bytes for null caracter + */ + msglen = strlen(msg) + sizeof(int32) + 1; + if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen, FALSE, 0) < 0) { + DHD_ERROR(("IOCTL failed for memdebug alloc\n")); + } + + /* returned allocated handle from dongle, basically address of the allocated unit */ + alloc_handle = *((int32 *)msg); + + /* add a node in the list with tuple */ + if (alloc_handle == 0) { + DHD_ERROR(("Reuqested size could not be allocated\n")); + } else if (!(mw_li = MALLOC(dhd->osh, sizeof(*mw_li)))) { + DHD_ERROR(("mw list item allocation Failed\n")); + } else { + mw_li->id = dhd->mw_id++; + mw_li->handle = alloc_handle; + mw_li->size = int_arg; + /* append the node in the list */ + dll_append(&dhd->mw_list_head, &mw_li->list); + } + } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "free")) { + /* inform dongle to free wasted chunk */ + int handle = 0; + int size = 0; + for (item = dll_head_p(&dhd->mw_list_head); + !dll_end(&dhd->mw_list_head, item); item = next) { + next = dll_next_p(item); + mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list); + + if (mw_li->id == (int)int_arg) { + handle = mw_li->handle; + size = mw_li->size; + dll_delete(item); + MFREE(dhd->osh, mw_li, sizeof(*mw_li)); + } + } + if (handle) { + int len; + /* append the free handle and the chunk size in first 8 bytes + * after the command and null character + */ + *((int32 *)(p+1)) = handle; + *((int32 *)((p+1)+sizeof(int32))) = size; + /* append null as terminator */ + *(p+1+2*sizeof(int32)) = '\0'; + /* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size + * + 1 bytes for null caracter + */ + len = strlen(msg) + 2*sizeof(int32) + 1; + /* send iovar to free the chunk */ + if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, len, FALSE, 0) < 0) { + DHD_ERROR(("IOCTL failed for memdebug free\n")); + } + } else { + DHD_ERROR(("specified id does not exist\n")); + } + } else { + /* for all the wrong argument formats */ + return BCME_BADARG; + } + return 0; +} + +extern void +dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head) +{ + dll_t *item; + dhd_dbg_mwli_t *mw_li; + while (!(dll_empty(list_head))) { + item = dll_head_p(list_head); + mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list); + dll_delete(item); + MFREE(dhd->osh, mw_li, sizeof(*mw_li)); + } +} +#endif /* DHD_DEBUG */ + #ifdef PKT_STATICS extern pkt_statics_t tx_statics; extern void dhdsdio_txpktstatics(void); #endif + static int dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name, void *params, int plen, void *arg, int len, int val_size) { int bcmerror = 0; int32 int_val = 0; + uint32 dhd_ver_len, bus_api_rev_len; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name)); @@ -660,7 +1192,13 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch switch (actionid) { case IOV_GVAL(IOV_VERSION): /* Need to have checked buffer length */ - bcm_strncpy_s((char*)arg, len, dhd_version, len); + dhd_ver_len = strlen(dhd_version); + bus_api_rev_len = strlen(bus_api_revision); + if (dhd_ver_len) + bcm_strncpy_s((char*)arg, dhd_ver_len, dhd_version, dhd_ver_len); + if (bus_api_rev_len) + bcm_strncat_s((char*)arg + dhd_ver_len, bus_api_rev_len, bus_api_revision, + bus_api_rev_len); #ifdef PKT_STATICS memset((uint8*) &tx_statics, 0, sizeof(pkt_statics_t)); #endif @@ -747,7 +1285,6 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch bcmerror = dhd_dump(dhd_pub, arg, len); break; -#ifdef DHD_DEBUG case IOV_GVAL(IOV_DCONSOLE_POLL): int_val = (int32)dhd_console_ms; bcopy(&int_val, arg, val_size); @@ -761,7 +1298,6 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch if (len > 0) bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1); break; -#endif /* DHD_DEBUG */ case IOV_SVAL(IOV_CLEARCOUNTS): dhd_pub->tx_packets = dhd_pub->rx_packets = 0; @@ -781,7 +1317,9 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch /* clear proptxstatus related counters */ dhd_wlfc_clear_counts(dhd_pub); #endif /* PROP_TXSTATUS */ +#if defined(DHD_LB_STATS) DHD_LB_STATS_RESET(dhd_pub); +#endif /* DHD_LB_STATS */ break; @@ -799,7 +1337,6 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch break; } - #ifdef PROP_TXSTATUS case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): { bool wlfc_enab = FALSE; @@ -948,9 +1485,9 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch case IOV_GVAL(IOV_WMF_BSS_ENAB): { uint32 bssidx; dhd_wmf_t *wmf; - char *val; + const char *val; - if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__)); bcmerror = BCME_BADARG; break; @@ -965,9 +1502,9 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch /* Enable/Disable WMF */ uint32 bssidx; dhd_wmf_t *wmf; - char *val; + const char *val; - if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__)); bcmerror = BCME_BADARG; break; @@ -1049,14 +1586,53 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch bcmerror = BCME_RANGE; break; #endif /* DHD_UCAST_UPNP */ + + case IOV_GVAL(IOV_WMF_PSTA_DISABLE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + int_val = dhd_get_wmf_psta_disable(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + + case IOV_SVAL(IOV_WMF_PSTA_DISABLE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + ASSERT(val); + bcopy(val, &int_val, sizeof(uint32)); + dhd_set_wmf_psta_disable(dhd_pub, bssidx, int_val); + break; + } #endif /* DHD_WMF */ +#if defined(TRAFFIC_MGMT_DWM) + case IOV_SVAL(IOV_TRAFFIC_MGMT_DWM): { + trf_mgmt_filter_list_t *trf_mgmt_filter_list = + (trf_mgmt_filter_list_t *)(arg); + bcmerror = traffic_mgmt_add_dwm_filter(dhd_pub, trf_mgmt_filter_list, len); + } + break; +#endif #ifdef DHD_L2_FILTER case IOV_GVAL(IOV_DHCP_UNICAST): { uint32 bssidx; - char *val; - if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + const char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n", __FUNCTION__, name)); bcmerror = BCME_BADARG; @@ -1068,8 +1644,8 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch } case IOV_SVAL(IOV_DHCP_UNICAST): { uint32 bssidx; - char *val; - if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + const char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n", __FUNCTION__, name)); bcmerror = BCME_BADARG; @@ -1081,9 +1657,9 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch } case IOV_GVAL(IOV_BLOCK_PING): { uint32 bssidx; - char *val; + const char *val; - if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__)); bcmerror = BCME_BADARG; break; @@ -1094,9 +1670,9 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch } case IOV_SVAL(IOV_BLOCK_PING): { uint32 bssidx; - char *val; + const char *val; - if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__)); bcmerror = BCME_BADARG; break; @@ -1107,9 +1683,9 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch } case IOV_GVAL(IOV_PROXY_ARP): { uint32 bssidx; - char *val; + const char *val; - if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__)); bcmerror = BCME_BADARG; break; @@ -1120,10 +1696,9 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch } case IOV_SVAL(IOV_PROXY_ARP): { uint32 bssidx; - char *val; - char iobuf[32]; + const char *val; - if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__)); bcmerror = BCME_BADARG; break; @@ -1133,11 +1708,8 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch /* Issue a iovar request to WL to update the proxy arp capability bit * in the Extended Capability IE of beacons/probe responses. */ - bcm_mkiovar("proxy_arp_advertise", val, sizeof(int_val), iobuf, - sizeof(iobuf)); - bcmerror = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, iobuf, - sizeof(iobuf), TRUE, bssidx); - + bcmerror = dhd_iovar(dhd_pub, bssidx, "proxy_arp_advertise", val, sizeof(int_val), + NULL, 0, TRUE); if (bcmerror == BCME_OK) { dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0); } @@ -1145,9 +1717,9 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch } case IOV_GVAL(IOV_GRAT_ARP): { uint32 bssidx; - char *val; + const char *val; - if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__)); bcmerror = BCME_BADARG; break; @@ -1158,9 +1730,9 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch } case IOV_SVAL(IOV_GRAT_ARP): { uint32 bssidx; - char *val; + const char *val; - if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__)); bcmerror = BCME_BADARG; break; @@ -1170,11 +1742,23 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch break; } #endif /* DHD_L2_FILTER */ + case IOV_SVAL(IOV_DHD_IE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + break; + } case IOV_GVAL(IOV_AP_ISOLATE): { uint32 bssidx; - char *val; + const char *val; - if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__)); bcmerror = BCME_BADARG; break; @@ -1186,9 +1770,9 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch } case IOV_SVAL(IOV_AP_ISOLATE): { uint32 bssidx; - char *val; + const char *val; - if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) { DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__)); bcmerror = BCME_BADARG; break; @@ -1214,6 +1798,63 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch break; } #endif /* DHD_PSTA */ +#ifdef DHD_WET + case IOV_GVAL(IOV_WET): + int_val = dhd_get_wet_mode(dhd_pub); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WET): + if (int_val == 0 || int_val == 1) { + dhd_set_wet_mode(dhd_pub, int_val); + /* Delete the WET DB when disabled */ + if (!int_val) { + dhd_wet_sta_delete_list(dhd_pub); + } + } else { + bcmerror = BCME_RANGE; + } + break; + case IOV_SVAL(IOV_WET_HOST_IPV4): + dhd_set_wet_host_ipv4(dhd_pub, params, plen); + break; + case IOV_SVAL(IOV_WET_HOST_MAC): + dhd_set_wet_host_mac(dhd_pub, params, plen); + break; +#endif /* DHD_WET */ +#ifdef DHD_MCAST_REGEN + case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + int_val = dhd_get_mcast_regen_bss_enable(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + + case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE): { + uint32 bssidx; + const char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + ASSERT(val); + bcopy(val, &int_val, sizeof(uint32)); + dhd_set_mcast_regen_bss_enable(dhd_pub, bssidx, int_val); + break; + } +#endif /* DHD_MCAST_REGEN */ + case IOV_GVAL(IOV_CFG80211_OPMODE): { int_val = (int32)dhd_pub->op_mode; bcopy(&int_val, arg, sizeof(int_val)); @@ -1237,6 +1878,7 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch break; +#if !defined(MACOSX_DHD) case IOV_GVAL(IOV_LMTEST): { *(uint32 *)arg = (uint32)lmtest; break; @@ -1253,7 +1895,147 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const ch } break; } +#endif +#ifdef SHOW_LOGTRACE + case IOV_GVAL(IOV_DUMP_TRACE_LOG): { + trace_buf_info_t *trace_buf_info; + + trace_buf_info = (trace_buf_info_t *)MALLOC(dhd_pub->osh, + sizeof(trace_buf_info_t)); + if (trace_buf_info != NULL) { + dhd_get_read_buf_ptr(dhd_pub, trace_buf_info); + memcpy((void*)arg, (void*)trace_buf_info, sizeof(trace_buf_info_t)); + MFREE(dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t)); + } else { + DHD_ERROR(("Memory allocation Failed\n")); + bcmerror = BCME_NOMEM; + } + break; + } +#endif /* SHOW_LOGTRACE */ +#ifdef REPORT_FATAL_TIMEOUTS + case IOV_GVAL(IOV_SCAN_TO): { + dhd_get_scan_to_val(dhd_pub, (uint32 *)&int_val); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_SCAN_TO): { + dhd_set_scan_to_val(dhd_pub, (uint32)int_val); + break; + } + case IOV_GVAL(IOV_JOIN_TO): { + dhd_get_join_to_val(dhd_pub, (uint32 *)&int_val); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_JOIN_TO): { + dhd_set_join_to_val(dhd_pub, (uint32)int_val); + break; + } + case IOV_GVAL(IOV_CMD_TO): { + dhd_get_cmd_to_val(dhd_pub, (uint32 *)&int_val); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_CMD_TO): { + dhd_set_cmd_to_val(dhd_pub, (uint32)int_val); + break; + } + case IOV_GVAL(IOV_OQS_TO): { + dhd_get_bus_to_val(dhd_pub, (uint32 *)&int_val); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_OQS_TO): { + dhd_set_bus_to_val(dhd_pub, (uint32)int_val); + break; + } +#endif /* REPORT_FATAL_TIMEOUTS */ + case IOV_GVAL(IOV_DONGLE_TRAP_TYPE): + if (dhd_pub->dongle_trap_occured) + int_val = ltoh32(dhd_pub->last_trap_info.type); + else + int_val = 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DONGLE_TRAP_INFO): + { + struct bcmstrbuf strbuf; + bcm_binit(&strbuf, arg, len); + if (dhd_pub->dongle_trap_occured == FALSE) { + bcm_bprintf(&strbuf, "no trap recorded\n"); + break; + } + dhd_bus_dump_trap_info(dhd_pub->bus, &strbuf); + break; + } +#ifdef DHD_DEBUG +#if defined(BCMSDIO) || defined(BCMPCIE) + + case IOV_GVAL(IOV_BPADDR): + { + sdreg_t sdreg; + uint32 addr, size; + + memcpy(&sdreg, params, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + + bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size, + (uint *)&int_val, TRUE); + + memcpy(arg, &int_val, sizeof(int32)); + + break; + } + + case IOV_SVAL(IOV_BPADDR): + { + sdreg_t sdreg; + uint32 addr, size; + + memcpy(&sdreg, params, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + + bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size, + (uint *)&sdreg.value, + FALSE); + + break; + } +#endif /* BCMSDIO || BCMPCIE */ + case IOV_SVAL(IOV_MEM_DEBUG): + if (len > 0) { + bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1); + } + break; +#endif /* DHD_DEBUG */ +#if defined(DHD_EFI) && defined(DHD_LOG_DUMP) + case IOV_GVAL(IOV_LOG_CAPTURE_ENABLE): + { + int_val = dhd_pub->log_capture_enable; + bcopy(&int_val, arg, val_size); + break; + } + + case IOV_SVAL(IOV_LOG_CAPTURE_ENABLE): + { + dhd_pub->log_capture_enable = (uint8)int_val; + break; + } + + case IOV_GVAL(IOV_LOG_DUMP): + { + dhd_prot_debug_info_print(dhd_pub); + dhd_bus_mem_dump(dhd_pub); + break; + } +#endif /* DHD_EFI && DHD_LOG_DUMP */ default: bcmerror = BCME_UNSUPPORTED; break; @@ -1472,7 +2254,7 @@ exit: } int -dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen) +dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen) { int bcmerror = 0; unsigned long flags; @@ -1505,27 +2287,70 @@ dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen) char *arg; uint arglen; - DHD_GENERAL_LOCK(dhd_pub, flags); - if (dhd_pub->busstate == DHD_BUS_DOWN || - dhd_pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) { /* In platforms like FC19, the FW download is done via IOCTL * and should not return error for IOCTLs fired before FW * Download is done */ - if (dhd_pub->is_fw_download_done) { + if (dhd_fw_download_status(dhd_pub)) { DHD_ERROR(("%s: returning as busstate=%d\n", __FUNCTION__, dhd_pub->busstate)); - DHD_GENERAL_UNLOCK(dhd_pub, flags); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); dhd_os_dhdiovar_unlock(dhd_pub); return -ENODEV; } } - dhd_pub->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DHD_IOVAR; - DHD_GENERAL_UNLOCK(dhd_pub, flags); + DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + #ifdef DHD_PCIE_RUNTIMEPM dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl); #endif /* DHD_PCIE_RUNTIMEPM */ + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) { + /* If Suspend/Resume is tested via pcie_suspend IOVAR + * then continue to execute the IOVAR, return from here for + * other IOVARs, also include pciecfgreg and devreset to go + * through. + */ +#ifdef DHD_EFI + if (bcmstricmp((char *)buf, "pcie_suspend") && + bcmstricmp((char *)buf, "pciecfgreg") && + bcmstricmp((char *)buf, "devreset") && + bcmstricmp((char *)buf, "sdio_suspend") && + bcmstricmp((char *)buf, "control_signal")) +#else + if (bcmstricmp((char *)buf, "pcie_suspend") && + bcmstricmp((char *)buf, "pciecfgreg") && + bcmstricmp((char *)buf, "devreset") && + bcmstricmp((char *)buf, "sdio_suspend")) +#endif /* DHD_EFI */ + { + DHD_ERROR(("%s: bus is in suspend(%d)" + "or suspending(0x%x) state\n", + __FUNCTION__, dhd_pub->busstate, + dhd_pub->dhd_bus_busy_state)); + DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub); + dhd_os_busbusy_wake(dhd_pub); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); + return -ENODEV; + } + } + /* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup, + * which will wait for all the busy contexts to get over for + * particular time and call ASSERT if timeout happens. As during + * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR, + * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is + * not used in Production platforms but only used in FC19 setups. + */ + if (!bcmstricmp((char *)buf, "devreset")) { + DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub); + } + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); + /* scan past the name to any arguments */ for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--) ; @@ -1537,7 +2362,6 @@ dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen) /* account for the NUL terminator */ arg++, arglen--; - /* call with the appropriate arguments */ if (ioc->cmd == DHD_GET_VAR) { bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen, @@ -1570,6 +2394,19 @@ dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen) bcmerror = dhd_bus_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET); } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } + +#ifdef DHD_TIMESYNC + /* check TS module */ + if (ioc->cmd == DHD_GET_VAR) + bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf, arg, + arglen, buf, buflen, IOV_GET); + else + bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf, + NULL, 0, arg, arglen, IOV_SET); +#endif /* DHD_TIMESYNC */ } goto unlock_exit; @@ -1580,435 +2417,15 @@ dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen) return bcmerror; unlock_exit: - DHD_GENERAL_LOCK(dhd_pub, flags); - dhd_pub->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DHD_IOVAR; + DHD_LINUX_GENERAL_LOCK(dhd_pub, flags); + DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub); dhd_os_busbusy_wake(dhd_pub); - DHD_GENERAL_UNLOCK(dhd_pub, flags); + DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags); dhd_os_dhdiovar_unlock(dhd_pub); return bcmerror; } #ifdef SHOW_EVENTS -#ifdef SHOW_LOGTRACE - -#define MAX_NO_OF_ARG 16 - -#define FMTSTR_SIZE 132 -#define SIZE_LOC_STR 50 -#define MIN_DLEN 4 -#define TAG_BYTES 12 -#define TAG_WORDS 3 -#define ROMSTR_SIZE 200 - - -static int -check_event_log_sequence_number(uint32 seq_no) -{ - int32 diff; - uint32 ret; - static uint32 logtrace_seqnum_prev = 0; - - diff = ntoh32(seq_no)-logtrace_seqnum_prev; - switch (diff) - { - case 0: - ret = -1; /* duplicate packet . drop */ - break; - - case 1: - ret =0; /* in order */ - break; - - default: - if ((ntoh32(seq_no) == 0) && - (logtrace_seqnum_prev == 0xFFFFFFFF) ) { /* in-order - Roll over */ - ret = 0; - } else { - - if (diff > 0) { - DHD_EVENT(("WLC_E_TRACE:" - "Event lost (log) seqnum %d nblost %d\n", - ntoh32(seq_no), (diff-1))); - } else { - DHD_EVENT(("WLC_E_TRACE:" - "Event Packets coming out of order!!\n")); - } - ret = 0; - } - } - - logtrace_seqnum_prev = ntoh32(seq_no); - - return ret; -} - -static void -dhd_eventmsg_print(dhd_pub_t *dhd_pub, void *event_data, void *raw_event_ptr, - uint datalen, const char *event_name) -{ - msgtrace_hdr_t hdr; - uint32 nblost; - uint8 count; - char *s, *p; - static uint32 seqnum_prev = 0; - uint32 *log_ptr = NULL; - uchar *buf; - event_log_hdr_t event_hdr; - uint32 i; - int32 j; - - dhd_event_log_t *raw_event = (dhd_event_log_t *) raw_event_ptr; - - char fmtstr_loc_buf[FMTSTR_SIZE] = {0}; - char (*str_buf)[SIZE_LOC_STR] = NULL; - char * str_tmpptr = NULL; - uint32 addr = 0; - uint32 **hdr_ptr = NULL; - uint32 h_i = 0; - uint32 hdr_ptr_len = 0; - - typedef union { - uint32 val; - char * addr; - } u_arg; - u_arg arg[MAX_NO_OF_ARG] = {{0}}; - char *c_ptr = NULL; - char rom_log_str[ROMSTR_SIZE] = {0}; - uint32 rom_str_len = 0; - - BCM_REFERENCE(arg); - - if (!DHD_FWLOG_ON()) - return; - - buf = (uchar *) event_data; - memcpy(&hdr, buf, MSGTRACE_HDRLEN); - - if (hdr.version != MSGTRACE_VERSION) { - DHD_EVENT(("\nMACEVENT: %s [unsupported version --> " - "dhd version:%d dongle version:%d]\n", - event_name, MSGTRACE_VERSION, hdr.version)); - /* Reset datalen to avoid display below */ - datalen = 0; - return; - } - - if (hdr.trace_type == MSGTRACE_HDR_TYPE_MSG) { - /* There are 2 bytes available at the end of data */ - buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0'; - - if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) { - DHD_FWLOG(("WLC_E_TRACE: [Discarded traces in dongle -->" - "discarded_bytes %d discarded_printf %d]\n", - ntoh32(hdr.discarded_bytes), - ntoh32(hdr.discarded_printf))); - } - - nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1; - if (nblost > 0) { - DHD_FWLOG(("WLC_E_TRACE:" - "[Event lost (msg) --> seqnum %d nblost %d\n", - ntoh32(hdr.seqnum), nblost)); - } - seqnum_prev = ntoh32(hdr.seqnum); - - /* Display the trace buffer. Advance from - * \n to \n to avoid display big - * printf (issue with Linux printk ) - */ - p = (char *)&buf[MSGTRACE_HDRLEN]; - while (*p != '\0' && (s = strstr(p, "\n")) != NULL) { - *s = '\0'; - DHD_FWLOG(("[FWLOG] %s\n", p)); - p = s+1; - } - if (*p) - DHD_FWLOG(("[FWLOG] %s", p)); - - /* Reset datalen to avoid display below */ - datalen = 0; - - } else if (hdr.trace_type == MSGTRACE_HDR_TYPE_LOG) { - /* Let the standard event printing work for now */ - uint32 timestamp, seq, pktlen; - - if (check_event_log_sequence_number(hdr.seqnum)) { - - DHD_EVENT(("%s: WLC_E_TRACE:" - "[Event duplicate (log) %d] dropping!!\n", - __FUNCTION__, hdr.seqnum)); - return; /* drop duplicate events */ - } - - p = (char *)&buf[MSGTRACE_HDRLEN]; - datalen -= MSGTRACE_HDRLEN; - pktlen = ltoh16(*((uint16 *)p)); - seq = ltoh16(*((uint16 *)(p + 2))); - p += MIN_DLEN; - datalen -= MIN_DLEN; - timestamp = ltoh32(*((uint32 *)p)); - BCM_REFERENCE(pktlen); - BCM_REFERENCE(seq); - BCM_REFERENCE(timestamp); - - /* - * Allocating max possible number of event TAGs in the received buffer - * considering that each event requires minimum of TAG_BYTES. - */ - hdr_ptr_len = ((datalen/TAG_BYTES)+1) * sizeof(uint32*); - - if ((raw_event->fmts)) { - if (!(str_buf = MALLOCZ(dhd_pub->osh, (MAX_NO_OF_ARG * SIZE_LOC_STR)))) { - DHD_ERROR(("%s: malloc failed str_buf \n", __FUNCTION__)); - } - } - - if (!(hdr_ptr = MALLOCZ(dhd_pub->osh, hdr_ptr_len))) { - DHD_ERROR(("%s: malloc failed hdr_ptr \n", __FUNCTION__)); - } - - - DHD_MSGTRACE_LOG(("EVENT_LOG_HDR[No.%d]: timestamp 0x%08x length = %d\n", - seq, timestamp, pktlen)); - - /* (raw_event->fmts) has value */ - - log_ptr = (uint32 *) (p + datalen); - - /* Store all hdr pointer while parsing from last of the log buffer - * sample format of - * 001d3c54 00000064 00000064 001d3c54 001dba08 035d6ce1 0c540639 - * 001d3c54 00000064 00000064 035d6d89 0c580439 - * in above example 0c580439 -- 39 is tag , 04 is count, 580c is format number - * all these uint32 values comes in reverse order as group as EL data - * while decoding we can parse only from last to first - */ - - while (datalen > MIN_DLEN) { - log_ptr--; - datalen -= MIN_DLEN; - event_hdr.t = *log_ptr; - /* - * Check for partially overriten entries - */ - if (log_ptr - (uint32 *) p < event_hdr.count) { - break; - } - /* - * Check argument count (only when format is valid) - */ - if ((event_hdr.count > MAX_NO_OF_ARG) && - (event_hdr.fmt_num != 0xffff)) { - break; - } - /* - * Check for end of the Frame. - */ - if (event_hdr.tag == EVENT_LOG_TAG_NULL) { - continue; - } - log_ptr[0] = event_hdr.t; - if (h_i < (hdr_ptr_len / sizeof(uint32*))) { - hdr_ptr[h_i++] = log_ptr; - } - - /* Now place the header at the front - * and copy back. - */ - log_ptr -= event_hdr.count; - - c_ptr = NULL; - datalen = datalen - (event_hdr.count * MIN_DLEN); - } - datalen = 0; - - /* print all log using stored hdr pointer in reverse order of EL data - * which is actually print older log first and then other in order - */ - - for (j = (h_i-1); j >= 0; j--) { - if (!(hdr_ptr[j])) { - break; - } - - event_hdr.t = *hdr_ptr[j]; - - log_ptr = hdr_ptr[j]; - - /* Now place the header at the front - * and copy back. - */ - log_ptr -= event_hdr.count; - - if (event_hdr.tag == EVENT_LOG_TAG_ROM_PRINTF) { - - rom_str_len = ((event_hdr.count)-1) * sizeof(uint32); - - if (rom_str_len >= (ROMSTR_SIZE -1)) { - rom_str_len = ROMSTR_SIZE - 1; - } - - /* copy all ascii data for ROM printf to local string */ - memcpy(rom_log_str, log_ptr, rom_str_len); - /* add end of line at last */ - rom_log_str[rom_str_len] = '\0'; - - DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s", - log_ptr[event_hdr.count - 1], rom_log_str)); - - /* Add newline if missing */ - if (rom_log_str[strlen(rom_log_str) - 1] != '\n') { - DHD_EVENT(("\n")); - } - - memset(rom_log_str, 0, ROMSTR_SIZE); - - continue; - } - - /* - * Check For Special Time Stamp Packet - */ - if (event_hdr.tag == EVENT_LOG_TAG_TS) { - DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n", - log_ptr[event_hdr.count-1], log_ptr[0], log_ptr[1])); - continue; - } - - /* Simply print out event dump buffer (fmt_num = 0xffff) */ - if (!str_buf || event_hdr.fmt_num == 0xffff) { - /* - * Print out raw value if unable to interpret - */ -#ifdef DHD_LOG_DUMP - char buf[256]; - char *pos = buf; - memset(buf, 0, sizeof(buf)); - pos += snprintf(pos, 256, -#else - DHD_MSGTRACE_LOG(( -#endif /* DHD_LOG_DUMP */ - "EVENT_LOG_BUF[0x%08x]: tag=%d len=%d fmt=%04x", - log_ptr[event_hdr.count-1], event_hdr.tag, - event_hdr.count, event_hdr.fmt_num -#ifdef DHD_LOG_DUMP -); -#else -)); -#endif /* DHD_LOG_DUMP */ - - for (count = 0; count < (event_hdr.count-1); count++) { -#ifdef DHD_LOG_DUMP - if (strlen(buf) >= (256 - 1)) { - DHD_MSGTRACE_LOG(("%s\n", buf)); - memset(buf, 0, sizeof(buf)); - pos = buf; - } - pos += snprintf(pos, (256 - (int)(pos-buf)), - " %08x", log_ptr[count]); -#else - if (count % 8 == 0) - DHD_MSGTRACE_LOG(("\n\t%08x", log_ptr[count])); - else - DHD_MSGTRACE_LOG((" %08x", log_ptr[count])); -#endif /* DHD_LOG_DUMP */ - } -#ifdef DHD_LOG_DUMP - DHD_MSGTRACE_LOG(("%s\n", buf)); -#else - DHD_MSGTRACE_LOG(("\n")); -#endif /* DHD_LOG_DUMP */ - continue; - } - - /* Copy the format string to parse %s and add "EVENT_LOG: */ - if ((event_hdr.fmt_num >> 2) < raw_event->num_fmts) { - snprintf(fmtstr_loc_buf, FMTSTR_SIZE, - "EVENT_LOG[0x%08x]: %s", log_ptr[event_hdr.count-1], - raw_event->fmts[event_hdr.fmt_num >> 2]); - c_ptr = fmtstr_loc_buf; - } else { - DHD_ERROR(("%s: fmt number out of range \n", __FUNCTION__)); - continue; - } - - for (count = 0; count < (event_hdr.count-1); count++) { - if (c_ptr != NULL) { - if ((c_ptr = strstr(c_ptr, "%")) != NULL) { - c_ptr++; - } - } - - if ((c_ptr != NULL) && (*c_ptr == 's')) { - if ((raw_event->raw_sstr) && - ((log_ptr[count] > raw_event->rodata_start) && - (log_ptr[count] < raw_event->rodata_end))) { - /* ram static string */ - addr = log_ptr[count] - raw_event->rodata_start; - str_tmpptr = raw_event->raw_sstr + addr; - memcpy(str_buf[count], str_tmpptr, SIZE_LOC_STR); - str_buf[count][SIZE_LOC_STR-1] = '\0'; - arg[count].addr = str_buf[count]; - } else if ((raw_event->rom_raw_sstr) && - ((log_ptr[count] > - raw_event->rom_rodata_start) && - (log_ptr[count] < - raw_event->rom_rodata_end))) { - /* rom static string */ - addr = log_ptr[count] - raw_event->rom_rodata_start; - str_tmpptr = raw_event->rom_raw_sstr + addr; - memcpy(str_buf[count], str_tmpptr, SIZE_LOC_STR); - str_buf[count][SIZE_LOC_STR-1] = '\0'; - arg[count].addr = str_buf[count]; - } else { - /* - * Dynamic string OR - * No data for static string. - * So store all string's address as string. - */ - snprintf(str_buf[count], SIZE_LOC_STR, "(s)0x%x", - log_ptr[count]); - arg[count].addr = str_buf[count]; - } - } else { - /* Other than string */ - arg[count].val = log_ptr[count]; - } - } - - DHD_MSGTRACE_LOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3], - arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10], - arg[11], arg[12], arg[13], arg[14], arg[15])); - - if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n') { - /* Add newline if missing */ - DHD_MSGTRACE_LOG(("\n")); - } - - memset(fmtstr_loc_buf, 0, FMTSTR_SIZE); - - for (i = 0; i < MAX_NO_OF_ARG; i++) { - arg[i].addr = 0; - } - for (i = 0; i < MAX_NO_OF_ARG; i++) { - memset(str_buf[i], 0, SIZE_LOC_STR); - } - - } - DHD_MSGTRACE_LOG(("\n")); - - if (str_buf) { - MFREE(dhd_pub->osh, str_buf, (MAX_NO_OF_ARG * SIZE_LOC_STR)); - } - - if (hdr_ptr) { - MFREE(dhd_pub->osh, hdr_ptr, hdr_ptr_len); - } - } -} - -#endif /* SHOW_LOGTRACE */ - static void wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, void *raw_event_ptr, char *eventmask) @@ -2092,7 +2509,8 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type); auth_str = err_msg; } - if (event_type == WLC_E_AUTH_IND) { + + if (event_type == WLC_E_AUTH_IND) { DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str)); } else if (status == WLC_E_STATUS_SUCCESS) { DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n", @@ -2113,13 +2531,21 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, case WLC_E_SET_SSID: if (status == WLC_E_STATUS_SUCCESS) { DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); - } else if (status == WLC_E_STATUS_FAIL) { - DHD_EVENT(("MACEVENT: %s, failed\n", event_name)); - } else if (status == WLC_E_STATUS_NO_NETWORKS) { - DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name)); +#ifdef REPORT_FATAL_TIMEOUTS + dhd_clear_join_error(dhd_pub, WLC_SSID_MASK); +#endif /* REPORT_FATAL_TIMEOUTS */ } else { - DHD_EVENT(("MACEVENT: %s, unexpected status %d\n", - event_name, (int)status)); +#ifdef REPORT_FATAL_TIMEOUTS + dhd_set_join_error(dhd_pub, WLC_SSID_MASK); +#endif /* REPORT_FATAL_TIMEOUTS */ + if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, failed\n", event_name)); + } else if (status == WLC_E_STATUS_NO_NETWORKS) { + DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, unexpected status %d\n", + event_name, (int)status)); + } } break; @@ -2159,16 +2585,23 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, case WLC_E_ASSOC_REQ_IE: case WLC_E_ASSOC_RESP_IE: case WLC_E_PMKID_CACHE: - case WLC_E_SCAN_COMPLETE: DHD_EVENT(("MACEVENT: %s\n", event_name)); break; + case WLC_E_SCAN_COMPLETE: + DHD_EVENT(("MACEVENT: %s\n", event_name)); +#ifdef REPORT_FATAL_TIMEOUTS + dhd_stop_scan_timer(dhd_pub); +#endif /* REPORT_FATAL_TIMEOUTS */ + break; + case WLC_E_RSSI_LQM: case WLC_E_PFN_NET_FOUND: case WLC_E_PFN_NET_LOST: + case WLC_E_PFN_SCAN_COMPLETE: case WLC_E_PFN_SCAN_NONE: case WLC_E_PFN_SCAN_ALLGONE: case WLC_E_PFN_GSCAN_FULL_RESULT: - case WLC_E_PFN_SWC: + case WLC_E_PFN_SSID_EXT: DHD_EVENT(("PNOEVENT: %s\n", event_name)); break; @@ -2176,6 +2609,14 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, case WLC_E_PRUNE: DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n", event_name, (int)status, (int)reason)); +#ifdef REPORT_FATAL_TIMEOUTS + if ((status == WLC_E_STATUS_SUCCESS || status == WLC_E_STATUS_UNSOLICITED) && + (reason == WLC_E_SUP_OTHER)) { + dhd_clear_join_error(dhd_pub, WLC_WPA_MASK); + } else { + dhd_set_join_error(dhd_pub, WLC_WPA_MASK); + } +#endif /* REPORT_FATAL_TIMEOUTS */ break; #ifdef WIFI_ACT_FRAME @@ -2186,10 +2627,9 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, #ifdef SHOW_LOGTRACE case WLC_E_TRACE: - { - dhd_eventmsg_print(dhd_pub, event_data, raw_event_ptr, datalen, event_name); + DHD_EVENT(("MACEVENT: %s Logtrace\n", event_name)); + dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen); break; - } #endif /* SHOW_LOGTRACE */ case WLC_E_RSSI: @@ -2218,12 +2658,32 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, break; case WLC_E_ESCAN_RESULT: { -#ifndef DHD_IFDEBUG DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d \n", event_name, event_type, eabuf, (int)status)); -#endif } break; + case WLC_E_PSK_AUTH: + DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n", + event_name, eabuf, status, reason)); + break; + case WLC_E_IF: + { + struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data; + BCM_REFERENCE(ifevent); + + DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d\n", + event_name, ifevent->opcode, ifevent->ifidx)); + break; + } + +#ifdef SHOW_LOGTRACE + case WLC_E_MSCH: + { + wl_mschdbg_event_handler(dhd_pub, raw_event_ptr, reason, event_data, datalen); + break; + } +#endif /* SHOW_LOGTRACE */ + default: DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n", event_name, event_type, eabuf, (int)status, (int)reason, @@ -2236,13 +2696,139 @@ wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, buf = (uchar *) event_data; BCM_REFERENCE(buf); DHD_EVENT((" data (%d) : ", datalen)); - for (i = 0; i < datalen; i++) - DHD_EVENT((" 0x%02x ", *buf++)); + for (i = 0; i < datalen; i++) { + DHD_EVENT((" 0x%02x ", buf[i])); + } DHD_EVENT(("\n")); } } #endif /* SHOW_EVENTS */ +#ifdef DNGL_EVENT_SUPPORT +/* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */ +int +dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen) +{ + bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata; + + dngl_host_event_process(dhdp, pvt_data, dngl_event, pktlen); + return BCME_OK; +} + +void +dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event, + bcm_dngl_event_msg_t *dngl_event, size_t pktlen) +{ + uint8 *p = (uint8 *)(event + 1); + uint16 type = ntoh16_ua((void *)&dngl_event->event_type); + uint16 datalen = ntoh16_ua((void *)&dngl_event->datalen); + uint16 version = ntoh16_ua((void *)&dngl_event->version); + + DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version, type, datalen)); + if (datalen > (pktlen - sizeof(bcm_dngl_event_t) + ETHER_TYPE_LEN)) { + return; + } + if (version != BCM_DNGL_EVENT_MSG_VERSION) { + DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__, + version, BCM_DNGL_EVENT_MSG_VERSION)); + return; + } + switch (type) { + case DNGL_E_SOCRAM_IND: + { + bcm_dngl_socramind_t *socramind_ptr = (bcm_dngl_socramind_t *)p; + uint16 tag = ltoh32(socramind_ptr->tag); + uint16 taglen = ltoh32(socramind_ptr->length); + p = (uint8 *)socramind_ptr->value; + DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag, taglen, datalen)); + switch (tag) { + case SOCRAM_IND_ASSERT_TAG: + { + /* + * The payload consists of - + * null terminated function name padded till 32 bit boundary + + * Line number - (32 bits) + * Caller address (32 bits) + */ + char *fnname = (char *)p; + if (datalen < (ROUNDUP(strlen(fnname) + 1, sizeof(uint32)) + + sizeof(uint32) * 2)) { + DHD_ERROR(("Wrong length:%d\n", datalen)); + return; + } + DHD_EVENT(("ASSRT Function:%s ", p)); + p += ROUNDUP(strlen(p) + 1, sizeof(uint32)); + DHD_EVENT(("Line:%d ", *(uint32 *)p)); + p += sizeof(uint32); + DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p)); + break; + } + case SOCRAM_IND_TAG_HEALTH_CHECK: + { + bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p; + DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d\n", + ltoh32(dngl_hc->top_module_tag), ltoh32(dngl_hc->top_module_len))); + if (DHD_EVENT_ON()) { + prhex("HEALTHCHECK", p, ltoh32(dngl_hc->top_module_len)); + } + p = (uint8 *)dngl_hc->value; + + switch (ltoh32(dngl_hc->top_module_tag)) { + case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE: + { + bcm_dngl_pcie_hc_t *pcie_hc; + pcie_hc = (bcm_dngl_pcie_hc_t *)p; + BCM_REFERENCE(pcie_hc); + if (ltoh32(dngl_hc->top_module_len) < + sizeof(bcm_dngl_pcie_hc_t)) { + DHD_ERROR(("Wrong length:%d\n", + ltoh32(dngl_hc->top_module_len))); + return; + } + DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x," + " control:0x%x\n", + ltoh32(pcie_hc->version), + ltoh32(pcie_hc->pcie_err_ind_type), + ltoh32(pcie_hc->pcie_flag), + ltoh32(pcie_hc->pcie_control_reg))); + break; + } + default: + DHD_ERROR(("%s:Unknown module TAG:%d\n", + __FUNCTION__, + ltoh32(dngl_hc->top_module_tag))); + break; + } + break; + } + default: + DHD_ERROR(("%s:Unknown TAG", __FUNCTION__)); + if (p && DHD_EVENT_ON()) { + prhex("SOCRAMIND", p, taglen); + } + break; + } + break; + } + default: + DHD_ERROR(("%s:Unknown DNGL Event Type:%d", __FUNCTION__, type)); + if (p && DHD_EVENT_ON()) { + prhex("SOCRAMIND", p, datalen); + } + break; + } +#ifdef DHD_FW_COREDUMP + dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT; +#endif /* DHD_FW_COREDUMP */ + if (dhd_socram_dump(dhdp->bus)) { + DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__)); + } else { + /* Notify framework */ + dhd_dbg_send_urgent_evt(dhdp, p, datalen); + } +} +#endif /* DNGL_EVENT_SUPPORT */ + /* Stub for now. Will become real function as soon as shim * is being integrated to Android, Linux etc. */ @@ -2253,17 +2839,22 @@ wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport) } int -wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, void **data_ptr, void *raw_event) +wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, + uint pktlen, void **data_ptr, void *raw_event) { wl_evt_pport_t evt_pport; wl_event_msg_t event; + bcm_event_msg_u_t evu; + int ret; /* make sure it is a BRCM event pkt and record event data */ - int ret = wl_host_event_get_data(pktdata, &event, data_ptr); + ret = wl_host_event_get_data(pktdata, pktlen, &evu); if (ret != BCME_OK) { return ret; } + memcpy(&event, &evu.event, sizeof(wl_event_msg_t)); + /* convert event from network order to host order */ wl_event_to_host_order(&event); @@ -2273,64 +2864,90 @@ wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, void **data_ptr, evt_pport.pktdata = pktdata; evt_pport.data_ptr = data_ptr; evt_pport.raw_event = raw_event; + evt_pport.data_len = pktlen; #if defined(WL_WLC_SHIM) && defined(WL_WLC_SHIM_EVENTS) { struct wl_shim_node *shim = dhd_pub_shim(dhd_pub); - ASSERT(shim); - ret = wl_shim_event_process(shim, &event, &evt_pport); + if (shim) { + ret = wl_shim_event_process(shim, &event, &evt_pport); + } else { + /* events can come even before shim is initialized + (when waiting for "wlc_ver" response) + * handle them in a non-shim way. + */ + DHD_ERROR(("%s: Events coming before shim initialization!\n", + __FUNCTION__)); + ret = wl_event_process_default(&event, &evt_pport); + } } #else ret = wl_event_process_default(&event, &evt_pport); -#endif +#endif /* WL_WLC_SHIM && WL_WLC_SHIM_EVENTS */ return ret; } /* Check whether packet is a BRCM event pkt. If it is, record event data. */ int -wl_host_event_get_data(void *pktdata, wl_event_msg_t *event, void **data_ptr) +wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu) { - bcm_event_t *pvt_data = (bcm_event_t *)pktdata; + int ret; - if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) { - DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__)); - return BCME_ERROR; + ret = is_wlc_event_frame(pktdata, pktlen, 0, evu); + if (ret != BCME_OK) { + DHD_ERROR(("%s: Invalid event frame, err = %d\n", + __FUNCTION__, ret)); } - /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */ - if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) { - DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__)); - return BCME_ERROR; - } - - *data_ptr = &pvt_data[1]; - - /* memcpy since BRCM event pkt may be unaligned. */ - memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t)); - - return BCME_OK; + return ret; } int -wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, +wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen, wl_event_msg_t *event, void **data_ptr, void *raw_event) { - bcm_event_t *pvt_data; + bcm_event_t *pvt_data = (bcm_event_t *)pktdata; + bcm_event_msg_u_t evu; uint8 *event_data; uint32 type, status, datalen; uint16 flags; - int evlen; + uint evlen; + int ret; + uint16 usr_subtype; + char macstr[ETHER_ADDR_STR_LEN]; - /* make sure it is a BRCM event pkt and record event data */ - int ret = wl_host_event_get_data(pktdata, event, data_ptr); + BCM_REFERENCE(macstr); + + ret = wl_host_event_get_data(pktdata, pktlen, &evu); if (ret != BCME_OK) { return ret; } - pvt_data = (bcm_event_t *)pktdata; - event_data = *data_ptr; + usr_subtype = ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype); + switch (usr_subtype) { + case BCMILCP_BCM_SUBTYPE_EVENT: + memcpy(event, &evu.event, sizeof(wl_event_msg_t)); + *data_ptr = &pvt_data[1]; + break; + case BCMILCP_BCM_SUBTYPE_DNGLEVENT: +#ifdef DNGL_EVENT_SUPPORT + /* If it is a DNGL event process it first */ + if (dngl_host_event(dhd_pub, pktdata, &evu.dngl_event, pktlen) == BCME_OK) { + /* + * Return error purposely to prevent DNGL event being processed + * as BRCM event + */ + return BCME_ERROR; + } +#endif /* DNGL_EVENT_SUPPORT */ + return BCME_NOTFOUND; + default: + return BCME_NOTFOUND; + } + /* start wl_event_msg process */ + event_data = *data_ptr; type = ntoh32_ua((void *)&event->event_type); flags = ntoh16_ua((void *)&event->flags); status = ntoh32_ua((void *)&event->status); @@ -2351,7 +2968,38 @@ wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, case WLC_E_BCMC_CREDIT_SUPPORT: dhd_wlfc_BCMCCredit_support_event(dhd_pub); break; +#ifdef LIMIT_BORROW + case WLC_E_ALLOW_CREDIT_BORROW: + dhd_wlfc_disable_credit_borrow_event(dhd_pub, event_data); + break; +#endif /* LIMIT_BORROW */ +#endif /* PROP_TXSTATUS */ + + + case WLC_E_ULP: +#ifdef DHD_ULP + { + wl_ulp_event_t *ulp_evt = (wl_ulp_event_t *)event_data; + + /* Flush and disable console messages */ + if (ulp_evt->ulp_dongle_action == WL_ULP_DISABLE_CONSOLE) { +#ifdef DHD_ULP_NOT_USED + dhd_bus_ulp_disable_console(dhd_pub); +#endif /* DHD_ULP_NOT_USED */ + } + if (ulp_evt->ulp_dongle_action == WL_ULP_UCODE_DOWNLOAD) { + dhd_bus_ucode_download(dhd_pub->bus); + } + } +#endif /* DHD_ULP */ + break; + case WLC_E_TDLS_PEER_EVENT: +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) + { + dhd_tdls_event_handler(dhd_pub, event); + } #endif + break; case WLC_E_IF: { @@ -2370,11 +3018,11 @@ wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, { uint8* ea = pvt_data->eth.ether_dhost; WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, " - "[%02x:%02x:%02x:%02x:%02x:%02x]\n", - ifevent->ifidx, - ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"), - ((ifevent->role == 0) ? "STA":"AP "), - ea[0], ea[1], ea[2], ea[3], ea[4], ea[5])); + "[%02x:%02x:%02x:%02x:%02x:%02x]\n", + ifevent->ifidx, + ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"), + ((ifevent->role == 0) ? "STA":"AP "), + ea[0], ea[1], ea[2], ea[3], ea[4], ea[5])); (void)ea; if (ifevent->opcode == WLC_E_IF_CHANGE) @@ -2403,18 +3051,29 @@ wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, return (BCME_ERROR); } } else if (ifevent->opcode == WLC_E_IF_DEL) { +#ifdef PCIE_FULL_DONGLE + /* Delete flowrings unconditionally for i/f delete */ + dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname)); +#endif /* PCIE_FULL_DONGLE */ dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname, event->addr.octet); + /* Return ifidx (for vitual i/f, it will be > 0) + * so that no other operations on deleted interface + * are carried out + */ + ret = ifevent->ifidx; + goto exit; } else if (ifevent->opcode == WLC_E_IF_CHANGE) { #ifdef WL_CFG80211 - wl_cfg80211_notify_ifchange(ifevent->ifidx, - event->ifname, event->addr.octet, ifevent->bssidx); + dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname, + event->addr.octet); #endif /* WL_CFG80211 */ } } else { #if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211) - DHD_ERROR(("%s: Invalid ifidx %d for %s\n", - __FUNCTION__, ifevent->ifidx, event->ifname)); + DHD_INFO(("%s: Invalid ifidx %d for %s\n", + __FUNCTION__, ifevent->ifidx, event->ifname)); #endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */ } /* send up the if event: btamp user needs it */ @@ -2441,6 +3100,11 @@ wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, dhd_pno_event_handler(dhd_pub, event, (void *)event_data); break; #endif +#if defined(RTT_SUPPORT) + case WLC_E_PROXD: + dhd_rtt_event_handler(dhd_pub, event, (void *)event_data); + break; +#endif /* RTT_SUPPORT */ /* These are what external supplicant/authenticator wants */ case WLC_E_ASSOC_IND: case WLC_E_AUTH_IND: @@ -2456,60 +3120,91 @@ wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__)); } break; +#endif +#ifdef DHD_WMF + case WLC_E_PSTA_PRIMARY_INTF_IND: + dhd_update_psta_interface_for_sta(dhd_pub, event->ifname, + (void *)(event->addr.octet), (void*) event_data); + break; #endif case WLC_E_LINK: #ifdef PCIE_FULL_DONGLE + DHD_EVENT(("%s: Link event %d, flags %x, status %x\n", + __FUNCTION__, type, flags, status)); if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, - event->ifname), (uint8)flags) != BCME_OK) + event->ifname), (uint8)flags) != BCME_OK) { + DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n", + __FUNCTION__)); break; + } if (!flags) { + DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n", + __FUNCTION__)); + /* Delete all sta and flowrings */ + dhd_del_all_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname)); dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname)); } /* fall through */ -#endif +#endif /* PCIE_FULL_DONGLE */ case WLC_E_DEAUTH: case WLC_E_DEAUTH_IND: case WLC_E_DISASSOC: case WLC_E_DISASSOC_IND: - DHD_EVENT(("%s: Link event %d, flags %x, status %x\n", - __FUNCTION__, type, flags, status)); #ifdef PCIE_FULL_DONGLE if (type != WLC_E_LINK) { uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname); uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex); uint8 del_sta = TRUE; #ifdef WL_CFG80211 - if (role == WLC_E_IF_ROLE_STA && !wl_cfg80211_is_roam_offload() && - !wl_cfg80211_is_event_from_connected_bssid(event, *ifidx)) { + if (role == WLC_E_IF_ROLE_STA && + !wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub, ifindex)) && + !wl_cfg80211_is_event_from_connected_bssid( + dhd_idx2net(dhd_pub, ifindex), event, *ifidx)) { del_sta = FALSE; } #endif /* WL_CFG80211 */ + DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n", + __FUNCTION__, type, flags, status, role, del_sta)); if (del_sta) { + DHD_MAC_TO_STR((event->addr.octet), macstr); + DHD_EVENT(("%s: Deleting STA %s\n", __FUNCTION__, macstr)); + dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname), &event->addr.octet); - if (role == WLC_E_IF_ROLE_STA) { + /* Delete all flowrings for STA and P2P Client */ + if (role == WLC_E_IF_ROLE_STA || role == WLC_E_IF_ROLE_P2P_CLIENT) { dhd_flow_rings_delete(dhd_pub, ifindex); } else { dhd_flow_rings_delete_for_peer(dhd_pub, ifindex, - &event->addr.octet[0]); + (char *)&event->addr.octet[0]); } } } #endif /* PCIE_FULL_DONGLE */ /* fall through */ + default: *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); /* push up to external supp/auth */ dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n", - __FUNCTION__, type, flags, status)); + __FUNCTION__, type, flags, status)); BCM_REFERENCE(flags); BCM_REFERENCE(status); break; } +#if defined(STBAP) + /* For routers, EAPD will be working on these events. + * Overwrite interface name to that event is pushed + * to host with its registered interface name + */ + memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ); +#endif + +exit: #ifdef SHOW_EVENTS if (DHD_FWLOG_ON() || DHD_EVENT_ON()) { @@ -2518,7 +3213,15 @@ wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, } #endif /* SHOW_EVENTS */ - return (BCME_OK); + return ret; +} + +int +wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen, + wl_event_msg_t *event, void **data_ptr, void *raw_event) +{ + return wl_process_host_event(dhd_pub, ifidx, pktdata, pktlen, event, data_ptr, + raw_event); } void @@ -2549,9 +3252,9 @@ dhd_print_buf(void *pbuf, int len, int bytes_per_line) #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) #endif -#ifdef PKT_FILTER_SUPPORT +#if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING) /* Convert user's input in hex pattern to byte-size mask */ -static int +int wl_pattern_atoh(char *src, char *dst) { int i; @@ -2574,7 +3277,9 @@ wl_pattern_atoh(char *src, char *dst) } return i; } +#endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */ +#ifdef PKT_FILTER_SUPPORT void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode) { @@ -2651,6 +3356,39 @@ fail: MFREE(dhd->osh, arg_org, strlen(arg) + 1); } +/* Packet filter section: extended filters have named offsets, add table here */ +typedef struct { + char *name; + uint16 base; +} wl_pfbase_t; + +static wl_pfbase_t basenames[] = { WL_PKT_FILTER_BASE_NAMES }; + +static int +wl_pkt_filter_base_parse(char *name) +{ + uint i; + char *bname, *uname; + + for (i = 0; i < ARRAYSIZE(basenames); i++) { + bname = basenames[i].name; + for (uname = name; *uname; bname++, uname++) { + if (*bname != bcm_toupper(*uname)) { + break; + } + } + if (!*uname && !*bname) { + break; + } + } + + if (i < ARRAYSIZE(basenames)) { + return basenames[i].base; + } else { + return -1; + } +} + void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) { @@ -2662,7 +3400,7 @@ dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) int rc; uint32 mask_size; uint32 pattern_size; - char *argv[8], * buf = 0; + char *argv[16], * buf = 0; int i = 0; char *arg_save = 0, *arg_org = 0; #define BUF_SIZE 2048 @@ -2681,7 +3419,7 @@ dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); goto fail; } - + memset(buf, 0, BUF_SIZE); memcpy(arg_save, arg, strlen(arg) + 1); if (strlen(arg) > BUF_SIZE) { @@ -2728,55 +3466,176 @@ dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) /* Parse filter type. */ pkt_filter.type = htod32(strtoul(argv[i], NULL, 0)); - if (argv[++i] == NULL) { - DHD_ERROR(("Offset not provided\n")); + if ((pkt_filter.type == 0) || (pkt_filter.type == 1)) { + if (argv[++i] == NULL) { + DHD_ERROR(("Offset not provided\n")); + goto fail; + } + + /* Parse pattern filter offset. */ + pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Bitmask not provided\n")); + goto fail; + } + + /* Parse pattern filter mask. */ + mask_size = + htod32(wl_pattern_atoh(argv[i], + (char *) pkt_filterp->u.pattern.mask_and_pattern)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Pattern not provided\n")); + goto fail; + } + + /* Parse pattern filter pattern. */ + pattern_size = + htod32(wl_pattern_atoh(argv[i], + (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size])); + + if (mask_size != pattern_size) { + DHD_ERROR(("Mask and pattern not the same size\n")); + goto fail; + } + + pkt_filter.u.pattern.size_bytes = mask_size; + buf_len += WL_PKT_FILTER_FIXED_LEN; + buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size); + + /* Keep-alive attributes are set in local variable (keep_alive_pkt), and + * then memcpy'ed into buffer (keep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)pkt_filterp, + &pkt_filter, + WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN); + } else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) { + int list_cnt = 0; + char *endptr = '\0'; + wl_pkt_filter_pattern_listel_t *pf_el = &pkt_filterp->u.patlist.patterns[0]; + + while (argv[++i] != NULL) { + /* Parse pattern filter base and offset. */ + if (bcm_isdigit(*argv[i])) { + /* Numeric base */ + rc = strtoul(argv[i], &endptr, 0); + } else { + endptr = strchr(argv[i], ':'); + if (endptr) { + *endptr = '\0'; + rc = wl_pkt_filter_base_parse(argv[i]); + if (rc == -1) { + printf("Invalid base %s\n", argv[i]); + goto fail; + } + *endptr = ':'; + } else { + printf("Invalid [base:]offset format: %s\n", argv[i]); + goto fail; + } + } + + if (*endptr == ':') { + pkt_filter.u.patlist.patterns[0].base_offs = htod16(rc); + rc = strtoul(endptr + 1, &endptr, 0); + } else { + /* Must have had a numeric offset only */ + pkt_filter.u.patlist.patterns[0].base_offs = htod16(0); + } + + if (*endptr) { + printf("Invalid [base:]offset format: %s\n", argv[i]); + goto fail; + } + if (rc > 0x0000FFFF) { + printf("Offset too large\n"); + goto fail; + } + pkt_filter.u.patlist.patterns[0].rel_offs = htod16(rc); + + /* Clear match_flag (may be set in parsing which follows) */ + pkt_filter.u.patlist.patterns[0].match_flags = htod16(0); + + /* Parse pattern filter mask and pattern directly into ioctl buffer */ + if (argv[++i] == NULL) { + printf("Bitmask not provided\n"); + goto fail; + } + rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data); + if (rc == -1) { + printf("Rejecting: %s\n", argv[i]); + goto fail; + } + mask_size = htod16(rc); + + if (argv[++i] == NULL) { + printf("Pattern not provided\n"); + goto fail; + } + + if (*argv[i] == '!') { + pkt_filter.u.patlist.patterns[0].match_flags = + htod16(WL_PKT_FILTER_MFLAG_NEG); + (argv[i])++; + } + if (argv[i] == '\0') { + printf("Pattern not provided\n"); + goto fail; + } + rc = wl_pattern_atoh(argv[i], (char*)&pf_el->mask_and_data[rc]); + if (rc == -1) { + printf("Rejecting: %s\n", argv[i]); + goto fail; + } + pattern_size = htod16(rc); + + if (mask_size != pattern_size) { + printf("Mask and pattern not the same size\n"); + goto fail; + } + + pkt_filter.u.patlist.patterns[0].size_bytes = mask_size; + + /* Account for the size of this pattern element */ + buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc; + + /* And the pattern element fields that were put in a local for + * alignment purposes now get copied to the ioctl buffer. + */ + memcpy((char*)pf_el, &pkt_filter.u.patlist.patterns[0], + WL_PKT_FILTER_PATTERN_FIXED_LEN); + + /* Move to next element location in ioctl buffer */ + pf_el = (wl_pkt_filter_pattern_listel_t*) + ((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc); + + /* Count list element */ + list_cnt++; + } + + /* Account for initial fixed size, and copy initial fixed fields */ + buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN; + + /* Update list count and total size */ + pkt_filter.u.patlist.list_cnt = list_cnt; + pkt_filter.u.patlist.PAD1[0] = 0; + pkt_filter.u.patlist.totsize = buf + buf_len - (char*)pkt_filterp; + pkt_filter.u.patlist.totsize -= WL_PKT_FILTER_FIXED_LEN; + + memcpy((char *)pkt_filterp, &pkt_filter, + WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN); + } else { + DHD_ERROR(("Invalid filter type %d\n", pkt_filter.type)); goto fail; } - /* Parse pattern filter offset. */ - pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0)); - - if (argv[++i] == NULL) { - DHD_ERROR(("Bitmask not provided\n")); - goto fail; - } - - /* Parse pattern filter mask. */ - mask_size = - htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern)); - - if (argv[++i] == NULL) { - DHD_ERROR(("Pattern not provided\n")); - goto fail; - } - - /* Parse pattern filter pattern. */ - pattern_size = - htod32(wl_pattern_atoh(argv[i], - (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size])); - - if (mask_size != pattern_size) { - DHD_ERROR(("Mask and pattern not the same size\n")); - goto fail; - } - - pkt_filter.u.pattern.size_bytes = mask_size; - buf_len += WL_PKT_FILTER_FIXED_LEN; - buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size); - - /* Keep-alive attributes are set in local variable (keep_alive_pkt), and - ** then memcpy'ed into buffer (keep_alive_pktp) since there is no - ** guarantee that the buffer is properly aligned. - */ - memcpy((char *)pkt_filterp, - &pkt_filter, - WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN); - rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); rc = rc >= 0 ? 0 : rc; if (rc) - DHD_ERROR(("%s: failed to add pktfilter %s, retcode = %d\n", + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", __FUNCTION__, arg, rc)); else DHD_TRACE(("%s: successfully added pktfilter %s\n", @@ -2790,7 +3649,8 @@ fail: MFREE(dhd->osh, buf, BUF_SIZE); } -void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id) +void +dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id) { int ret; @@ -2862,20 +3722,13 @@ void dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx) { int ret = 0; - int iov_len = 0; - char iovbuf[DHD_IOVAR_BUF_SIZE]; if (dhd == NULL) return; if (dhd->arp_version == 1) idx = 0; - iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf)); - if (!iov_len) { - DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", - __FUNCTION__, sizeof(iovbuf))); - return; - } - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0) + ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE); + if (ret < 0) DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); } @@ -2883,46 +3736,29 @@ void dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx) { int ret = 0; - int iov_len = 0; - char iovbuf[DHD_IOVAR_BUF_SIZE]; if (dhd == NULL) return; if (dhd->arp_version == 1) idx = 0; - iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf)); - if (!iov_len) { - DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", - __FUNCTION__, sizeof(iovbuf))); - return; - } - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0) + ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE); + if (ret < 0) DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); } void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx) { - int iov_len = 0; - char iovbuf[DHD_IOVAR_BUF_SIZE]; - int retcode; - + int ret; if (dhd == NULL) return; if (dhd->arp_version == 1) idx = 0; - iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr, - sizeof(ipaddr), iovbuf, sizeof(iovbuf)); - if (!iov_len) { - DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", - __FUNCTION__, sizeof(iovbuf))); - return; - } - retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); - if (retcode) - DHD_ERROR(("%s: ARP ip addr add failed, retcode = %d\n", - __FUNCTION__, retcode)); + ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr), + NULL, 0, TRUE); + if (ret) + DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret)); else DHD_ARPOE(("%s: sARP H ipaddr entry added \n", __FUNCTION__)); @@ -2931,8 +3767,7 @@ dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx) int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx) { - int retcode, i; - int iov_len; + int ret, i; uint32 *ptr32 = buf; bool clr_bottom = FALSE; @@ -2942,13 +3777,11 @@ dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx) if (dhd->arp_version == 1) idx = 0; - iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen); - BCM_REFERENCE(iov_len); - retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, FALSE, idx); - - if (retcode) { + ret = dhd_iovar(dhd, idx, "arp_hostip", NULL, 0, (char *)buf, buflen, + FALSE); + if (ret) { DHD_ERROR(("%s: ioctl WLC_GET_VAR error %d\n", - __FUNCTION__, retcode)); + __FUNCTION__, ret)); return -1; } @@ -3000,7 +3833,7 @@ int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx) { int iov_len = 0; - char iovbuf[DHD_IOVAR_BUF_SIZE]; + char iovbuf[DHD_IOVAR_BUF_SIZE] = {0}; int retcode; if (dhd == NULL) @@ -3024,6 +3857,7 @@ dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx) return retcode; } + /* * Neighbor Discover Offload: enable NDO feature * Called by ipv6 event handler when interface goes down @@ -3032,7 +3866,7 @@ int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx) { int iov_len = 0; - char iovbuf[DHD_IOVAR_BUF_SIZE]; + char iovbuf[DHD_IOVAR_BUF_SIZE] = {0}; int retcode; if (dhd == NULL) @@ -3057,6 +3891,210 @@ dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx) return retcode; } +/* Enhanced ND offload */ +uint16 +dhd_ndo_get_version(dhd_pub_t *dhdp) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + wl_nd_hostip_t ndo_get_ver; + int iov_len; + int retcode; + uint16 ver = 0; + + if (dhdp == NULL) { + return BCME_ERROR; + } + + memset(&iovbuf, 0, sizeof(iovbuf)); + ndo_get_ver.version = htod16(WL_ND_HOSTIP_IOV_VER); + ndo_get_ver.op_type = htod16(WL_ND_HOSTIP_OP_VER); + ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16)); + ndo_get_ver.u.version = 0; + iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver, + WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0); + if (retcode) { + DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode)); + /* ver iovar not supported. NDO version is 0 */ + ver = 0; + } else { + wl_nd_hostip_t *ndo_ver_ret = (wl_nd_hostip_t *)iovbuf; + + if ((dtoh16(ndo_ver_ret->version) == WL_ND_HOSTIP_IOV_VER) && + (dtoh16(ndo_ver_ret->op_type) == WL_ND_HOSTIP_OP_VER) && + (dtoh32(ndo_ver_ret->length) == WL_ND_HOSTIP_FIXED_LEN + + sizeof(uint16))) { + /* nd_hostip iovar version */ + ver = dtoh16(ndo_ver_ret->u.version); + } + + DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__, ver)); + } + + return ver; +} + +int +dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + wl_nd_hostip_t ndo_add_addr; + int iov_len; + int retcode; + + if (dhdp == NULL || ipv6addr == 0) { + return BCME_ERROR; + } + + /* wl_nd_hostip_t fixed param */ + ndo_add_addr.version = htod16(WL_ND_HOSTIP_IOV_VER); + ndo_add_addr.op_type = htod16(WL_ND_HOSTIP_OP_ADD); + ndo_add_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN); + /* wl_nd_host_ip_addr_t param for add */ + memcpy(&ndo_add_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN); + ndo_add_addr.u.host_ip.type = type; + + iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr, + WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + if (retcode) { + DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode)); +#ifdef NDO_CONFIG_SUPPORT + if (retcode == BCME_NORESOURCE) { + /* number of host ip addr exceeds FW capacity, Deactivate ND offload */ + DHD_INFO(("%s: Host IP count exceed device capacity," + "ND offload deactivated\n", __FUNCTION__)); + dhdp->ndo_host_ip_overflow = TRUE; + dhd_ndo_enable(dhdp, 0); + } +#endif /* NDO_CONFIG_SUPPORT */ + } else { + DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__, retcode)); + } + + return retcode; +} + +int +dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + wl_nd_hostip_t ndo_del_addr; + int iov_len; + int retcode; + + if (dhdp == NULL || ipv6addr == 0) { + return BCME_ERROR; + } + + /* wl_nd_hostip_t fixed param */ + ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER); + ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL); + ndo_del_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN); + /* wl_nd_host_ip_addr_t param for del */ + memcpy(&ndo_del_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN); + ndo_del_addr.u.host_ip.type = 0; /* don't care */ + + iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, + WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + if (retcode) { + DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode)); + } else { + DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode)); + } + + return retcode; +} + +int +dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + wl_nd_hostip_t ndo_del_addr; + int iov_len; + int retcode; + + if (dhdp == NULL) { + return BCME_ERROR; + } + + /* wl_nd_hostip_t fixed param */ + ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER); + if (type == WL_ND_IPV6_ADDR_TYPE_UNICAST) { + ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_UC); + } else if (type == WL_ND_IPV6_ADDR_TYPE_ANYCAST) { + ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_AC); + } else { + return BCME_BADARG; + } + ndo_del_addr.length = htod32(WL_ND_HOSTIP_FIXED_LEN); + + iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN, + iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + if (retcode) { + DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode)); + } else { + DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode)); + } + + return retcode; +} + +int +dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable) +{ + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int iov_len; + int retcode; + + if (dhdp == NULL) { + return BCME_ERROR; + } + + iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int), + iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return BCME_ERROR; + } + retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0); + if (retcode) + DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n", + __FUNCTION__, enable, retcode)); + else { + DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n", + __FUNCTION__, enable)); + } + + return retcode; +} /* @@ -3092,6 +4130,81 @@ bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval) } /* Function to estimate possible DTIM_SKIP value */ +#if defined(BCMPCIE) +int +dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval) +{ + int bcn_li_dtim = 1; /* deafult no dtim skip setting */ + int ret = -1; + int allowed_skip_dtim_cnt = 0; + + /* Check if associated */ + if (dhd_is_associated(dhd, 0, NULL) == FALSE) { + DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret)); + return bcn_li_dtim; + } + + if (dtim_period == NULL || bcn_interval == NULL) + return bcn_li_dtim; + + /* read associated AP beacon interval */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD, + bcn_interval, sizeof(*bcn_interval), FALSE, 0)) < 0) { + DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret)); + return bcn_li_dtim; + } + + /* read associated AP dtim setup */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD, + dtim_period, sizeof(*dtim_period), FALSE, 0)) < 0) { + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + return bcn_li_dtim; + } + + /* if not assocated just return */ + if (*dtim_period == 0) { + return bcn_li_dtim; + } + + if (dhd->max_dtim_enable) { + bcn_li_dtim = + (int) (MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval))); + if (bcn_li_dtim == 0) { + bcn_li_dtim = 1; + } + } else { + /* attemp to use platform defined dtim skip interval */ + bcn_li_dtim = dhd->suspend_bcn_li_dtim; + + /* check if sta listen interval fits into AP dtim */ + if (*dtim_period > CUSTOM_LISTEN_INTERVAL) { + /* AP DTIM to big for our Listen Interval : no dtim skiping */ + bcn_li_dtim = NO_DTIM_SKIP; + DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", + __FUNCTION__, *dtim_period, CUSTOM_LISTEN_INTERVAL)); + return bcn_li_dtim; + } + + if (((*dtim_period) * (*bcn_interval) * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) { + allowed_skip_dtim_cnt = + MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval)); + bcn_li_dtim = + (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP; + } + + if ((bcn_li_dtim * (*dtim_period)) > CUSTOM_LISTEN_INTERVAL) { + /* Round up dtim_skip to fit into STAs Listen Interval */ + bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / *dtim_period); + DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim)); + } + } + + DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n", + __FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL)); + + return bcn_li_dtim; +} +#else /* OEM_ANDROID && BCMPCIE */ int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd) { @@ -3099,9 +4212,7 @@ dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd) int ret = -1; int dtim_period = 0; int ap_beacon = 0; -#ifndef ENABLE_MAX_DTIM_IN_SUSPEND int allowed_skip_dtim_cnt = 0; -#endif /* !ENABLE_MAX_DTIM_IN_SUSPEND */ /* Check if associated */ if (dhd_is_associated(dhd, 0, NULL) == FALSE) { DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret)); @@ -3122,41 +4233,43 @@ dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd) goto exit; } - /* if not assocated just eixt */ + /* if not assocated just exit */ if (dtim_period == 0) { goto exit; } -#ifdef ENABLE_MAX_DTIM_IN_SUSPEND - bcn_li_dtim = (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period)); - if (bcn_li_dtim == 0) { - bcn_li_dtim = 1; - } - bcn_li_dtim = MAX(dhd->suspend_bcn_li_dtim, bcn_li_dtim); -#else /* ENABLE_MAX_DTIM_IN_SUSPEND */ - /* attemp to use platform defined dtim skip interval */ - bcn_li_dtim = dhd->suspend_bcn_li_dtim; + if (dhd->max_dtim_enable) { + bcn_li_dtim = (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period)); + if (bcn_li_dtim == 0) { + bcn_li_dtim = 1; + } + bcn_li_dtim = MAX(dhd->suspend_bcn_li_dtim, bcn_li_dtim); + } else { + /* attemp to use platform defined dtim skip interval */ + bcn_li_dtim = dhd->suspend_bcn_li_dtim; - /* check if sta listen interval fits into AP dtim */ - if (dtim_period > CUSTOM_LISTEN_INTERVAL) { - /* AP DTIM to big for our Listen Interval : no dtim skiping */ - bcn_li_dtim = NO_DTIM_SKIP; - DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", - __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL)); - goto exit; - } + /* check if sta listen interval fits into AP dtim */ + if (dtim_period > CUSTOM_LISTEN_INTERVAL) { + /* AP DTIM to big for our Listen Interval : no dtim skiping */ + bcn_li_dtim = NO_DTIM_SKIP; + DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", + __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL)); + goto exit; + } - if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) { - allowed_skip_dtim_cnt = MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon); - bcn_li_dtim = (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP; - } + if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) { + allowed_skip_dtim_cnt = + MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon); + bcn_li_dtim = + (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP; + } - if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) { - /* Round up dtim_skip to fit into STAs Listen Interval */ - bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period); - DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim)); + if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) { + /* Round up dtim_skip to fit into STAs Listen Interval */ + bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period); + DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim)); + } } -#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */ DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n", __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL)); @@ -3164,6 +4277,7 @@ dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd) exit: return bcn_li_dtim; } +#endif /* OEM_ANDROID && BCMPCIE */ /* Check if the mode supports STA MODE */ bool dhd_support_sta_mode(dhd_pub_t *dhd) @@ -3182,7 +4296,7 @@ int dhd_keep_alive_onoff(dhd_pub_t *dhd) { char buf[32] = {0}; const char *str; - wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0}; + wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0, 0, 0, 0, 0, {0}}; wl_mkeep_alive_pkt_t *mkeep_alive_pktp; int buf_len; int str_len; @@ -3218,6 +4332,82 @@ int dhd_keep_alive_onoff(dhd_pub_t *dhd) return res; } #endif /* defined(KEEP_ALIVE) */ + +#define CSCAN_TLV_TYPE_SSID_IE 'S' +/* + * SSIDs list parsing from cscan tlv list + */ +int +wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left) +{ + char* str; + int idx = 0; + + if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + str = *list_str; + while (*bytes_left > 0) { + + if (str[0] != CSCAN_TLV_TYPE_SSID_IE) { + *list_str = str; + DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + + /* Get proper CSCAN_TLV_TYPE_SSID_IE */ + *bytes_left -= 1; + str += 1; + ssid[idx].rssi_thresh = 0; + ssid[idx].flags = 0; + if (str[0] == 0) { + /* Broadcast SSID */ + ssid[idx].SSID_len = 0; + memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN); + *bytes_left -= 1; + str += 1; + + DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left)); + } + else if (str[0] <= DOT11_MAX_SSID_LEN) { + /* Get proper SSID size */ + ssid[idx].SSID_len = str[0]; + *bytes_left -= 1; + str += 1; + + /* Get SSID */ + if (ssid[idx].SSID_len > *bytes_left) { + DHD_ERROR(("%s out of memory range len=%d but left=%d\n", + __FUNCTION__, ssid[idx].SSID_len, *bytes_left)); + return -1; + } + + memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len); + + *bytes_left -= ssid[idx].SSID_len; + str += ssid[idx].SSID_len; + ssid[idx].hidden = TRUE; + + DHD_TRACE(("%s :size=%d left=%d\n", + (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left)); + } + else { + DHD_ERROR(("### SSID size more that %d\n", str[0])); + return -1; + } + + if (idx++ > max) { + DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx)); + return -1; + } + } + + *list_str = str; + return idx; +} + +#if defined(WL_WIRELESS_EXT) /* Android ComboSCAN support */ /* @@ -3318,78 +4508,6 @@ wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list, return idx; } -/* - * SSIDs list parsing from cscan tlv list - */ -int -wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left) -{ - char* str; - int idx = 0; - - if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) { - DHD_ERROR(("%s error paramters\n", __FUNCTION__)); - return -1; - } - str = *list_str; - while (*bytes_left > 0) { - - if (str[0] != CSCAN_TLV_TYPE_SSID_IE) { - *list_str = str; - DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); - return idx; - } - - /* Get proper CSCAN_TLV_TYPE_SSID_IE */ - *bytes_left -= 1; - str += 1; - - if (str[0] == 0) { - /* Broadcast SSID */ - ssid[idx].SSID_len = 0; - memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN); - *bytes_left -= 1; - str += 1; - - DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left)); - } - else if (str[0] <= DOT11_MAX_SSID_LEN) { - /* Get proper SSID size */ - ssid[idx].SSID_len = str[0]; - *bytes_left -= 1; - str += 1; - - /* Get SSID */ - if (ssid[idx].SSID_len > *bytes_left) { - DHD_ERROR(("%s out of memory range len=%d but left=%d\n", - __FUNCTION__, ssid[idx].SSID_len, *bytes_left)); - return -1; - } - - memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len); - - *bytes_left -= ssid[idx].SSID_len; - str += ssid[idx].SSID_len; - ssid[idx].hidden = TRUE; - - DHD_TRACE(("%s :size=%d left=%d\n", - (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left)); - } - else { - DHD_ERROR(("### SSID size more that %d\n", str[0])); - return -1; - } - - if (idx++ > max) { - DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx)); - return -1; - } - } - - *list_str = str; - return idx; -} - /* Parse a comma-separated list from list_str into ssid array, starting * at index idx. Max specifies size of the ssid array. Parses ssids * and returns updated idx; if idx >= max not all fit, the excess have @@ -3471,6 +4589,56 @@ wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num) return num; } +#endif + +#if defined(TRAFFIC_MGMT_DWM) +static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd, + trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len) +{ + int ret = 0; + uint32 i; + trf_mgmt_filter_t *trf_mgmt_filter; + uint8 dwm_tbl_entry; + uint32 dscp = 0; + uint16 dwm_filter_enabled = 0; + + + /* Check parameter length is adequate */ + if (len < (OFFSETOF(trf_mgmt_filter_list_t, filter) + + trf_mgmt_filter_list->num_filters * sizeof(trf_mgmt_filter_t))) { + ret = BCME_BUFTOOSHORT; + return ret; + } + + bzero(&dhd->dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t)); + + for (i = 0; i < trf_mgmt_filter_list->num_filters; i++) { + trf_mgmt_filter = &trf_mgmt_filter_list->filter[i]; + + dwm_filter_enabled = (trf_mgmt_filter->flags & TRF_FILTER_DWM); + + if (dwm_filter_enabled) { + dscp = trf_mgmt_filter->dscp; + if (dscp >= DHD_DWM_TBL_SIZE) { + ret = BCME_BADARG; + return ret; + } + } + + dhd->dhd_tm_dwm_tbl.dhd_dwm_enabled = 1; + /* set WMM AC bits */ + dwm_tbl_entry = (uint8) trf_mgmt_filter->priority; + DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry); + + /* set favored bits */ + if (trf_mgmt_filter->flags & TRF_FILTER_FAVORED) + DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry); + + dhd->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp] = dwm_tbl_entry; + } + return ret; +} +#endif /* Given filename and download type, returns a buffer pointer and length * for download to f/w. Type can be FW or NVRAM. @@ -3493,7 +4661,8 @@ int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t com len = dhd->cached_fw_length; buf = dhd->cached_fw; } - } else if (component == NVRAM) { + } + else if (component == NVRAM) { if (dhd->cached_nvram_length) { len = dhd->cached_nvram_length; buf = dhd->cached_nvram; @@ -3507,7 +4676,7 @@ int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t com } else { return ret; } -#endif +#endif /* CACHE_FW_IMAGES */ /* No Valid cache found on this call */ if (!len) { file_len = *length; @@ -3529,16 +4698,29 @@ int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t com } /* Download image */ - len = dhd_os_get_image_block(buf, file_len, image); +#if defined(BCMEMBEDIMAGE) && defined(DHD_EFI) + if (!image) { + memcpy(buf, nvram_arr, sizeof(nvram_arr)); + len = sizeof(nvram_arr); + } else { + len = dhd_os_get_image_block((char *)buf, file_len, image); + if ((len <= 0 || len > file_len)) { + MFREE(dhd->osh, buf, file_len); + goto err; + } + } +#else + len = dhd_os_get_image_block((char *)buf, file_len, image); if ((len <= 0 || len > file_len)) { MFREE(dhd->osh, buf, file_len); goto err; } +#endif /* DHD_EFI */ } ret = BCME_OK; *length = len; - *buffer = buf; + *buffer = (char *)buf; /* Cache if first call. */ #ifdef CACHE_FW_IMAGES @@ -3547,7 +4729,8 @@ int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t com dhd->cached_fw = buf; dhd->cached_fw_length = len; } - } else if (component == NVRAM) { + } + else if (component == NVRAM) { if (!dhd->cached_nvram_length) { dhd->cached_nvram = buf; dhd->cached_nvram_length = len; @@ -3559,7 +4742,7 @@ int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t com dhd->cached_clm_length = len; } } -#endif +#endif /* CACHE_FW_IMAGES */ err: if (image) @@ -3585,9 +4768,9 @@ dhd_download_2_dongle(dhd_pub_t *dhd, char *iovar, uint16 flag, uint16 dload_typ dload_ptr->dload_type = dload_type; dload_ptr->len = htod32(len - dload_data_offset); dload_ptr->crc = 0; - len = len + 8 - (len%8); + len = ROUNDUP(len, 8); - iovar_len = bcm_mkiovar(iovar, dload_buf, + iovar_len = bcm_mkiovar(iovar, (char *)dload_buf, (uint)len, iovar_buf, sizeof(iovar_buf)); if (iovar_len == 0) { DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n", @@ -3602,7 +4785,8 @@ dhd_download_2_dongle(dhd_pub_t *dhd, char *iovar, uint16 flag, uint16 dload_typ } int -dhd_download_clm_blob(dhd_pub_t *dhd, unsigned char *image, uint32 len) +dhd_download_blob(dhd_pub_t *dhd, unsigned char *image, + uint32 len, char *iovar) { int chunk_len; int size2alloc; @@ -3612,6 +4796,7 @@ dhd_download_clm_blob(dhd_pub_t *dhd, unsigned char *image, uint32 len) data_offset = OFFSETOF(wl_dload_data_t, data); size2alloc = data_offset + MAX_CHUNK_LEN; + size2alloc = ROUNDUP(size2alloc, 8); if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) { do { @@ -3627,7 +4812,7 @@ dhd_download_clm_blob(dhd_pub_t *dhd, unsigned char *image, uint32 len) if (len - chunk_len == 0) dl_flag |= DL_END; - err = dhd_download_2_dongle(dhd, "clmload", dl_flag, DL_TYPE_CLM, + err = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM, new_buf, data_offset + chunk_len); dl_flag &= ~DL_BEGIN; @@ -3645,6 +4830,31 @@ exit: return err; } +int +dhd_check_current_clm_data(dhd_pub_t *dhd) +{ + char iovbuf[WLC_IOCTL_SMLEN] = {0}; + wl_country_t *cspec; + int err = BCME_OK; + + bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf)); + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); + if (err) { + DHD_ERROR(("%s: country code get failed\n", __FUNCTION__)); + return err; + } + + cspec = (wl_country_t *)iovbuf; + if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) { + DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n", + __FUNCTION__)); + return FALSE; + } + DHD_ERROR(("%s: ----- This FW is included CLM data -----\n", + __FUNCTION__)); + return TRUE; +} + int dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path) { @@ -3652,8 +4862,8 @@ dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path) int len; unsigned char *imgbuf = NULL; int err = BCME_OK; - char iovbuf[WLC_IOCTL_SMLEN]; - wl_country_t *cspec; + char iovbuf[WLC_IOCTL_SMLEN] = {0}; + int status = FALSE; if (clm_path[0] != '\0') { if (strlen(clm_path) > MOD_PARAM_PATHLEN) { @@ -3675,29 +4885,43 @@ dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path) imgbuf = dhd_os_open_image((char *)clm_blob_path); if (imgbuf == NULL) { printf("%s: Ignore clm file %s\n", __FUNCTION__, clm_path); +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (dhd->is_blob) { + err = BCME_ERROR; + } else { + status = dhd_check_current_clm_data(dhd); + if (status == TRUE) { + err = BCME_OK; + } else { + err = BCME_ERROR; + } + } +#endif /* DHD_BLOB_EXISTENCE_CHECK */ goto exit; } len = dhd_os_get_image_size(imgbuf); if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && imgbuf) { - bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf)); - err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); - if (err) { - DHD_ERROR(("%s: country code get failed\n", __FUNCTION__)); - goto exit; - } - - cspec = (wl_country_t *)iovbuf; - if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) != 0) { + status = dhd_check_current_clm_data(dhd); + if (status == TRUE) { +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (dhd->op_mode != DHD_FLAG_MFG_MODE) { + if (dhd->is_blob) { + err = BCME_ERROR; + } + goto exit; + } +#else DHD_ERROR(("%s: CLM already exist in F/W, " "new CLM data will be added to the end of existing CLM data!\n", __FUNCTION__)); +#endif /* DHD_BLOB_EXISTENCE_CHECK */ } /* Found blob file. Download the file */ DHD_ERROR(("clm file download from %s \n", clm_blob_path)); - err = dhd_download_clm_blob(dhd, imgbuf, len); + err = dhd_download_blob(dhd, imgbuf, len, "clmload"); if (err) { DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err)); /* Retrieve clmload_status and print */ @@ -3720,22 +4944,12 @@ dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path) } } else { DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, imgbuf)); -#ifdef DHD_USE_CLMINFO_PARSER - err = BCME_ERROR; - goto exit; -#endif /* DHD_USE_CLMINFO_PARSER */ } /* Verify country code */ - bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf)); - err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0); - if (err) { - DHD_ERROR(("%s: country code get failed\n", __FUNCTION__)); - goto exit; - } + status = dhd_check_current_clm_data(dhd); - cspec = (wl_country_t *)iovbuf; - if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) { + if (status != TRUE) { /* Country code not initialized or CLM download not proper */ DHD_ERROR(("country code not initialized\n")); err = BCME_ERROR; @@ -3756,6 +4970,11 @@ void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length) #endif MFREE(dhd->osh, buffer, length); } + +#if defined(DHD_8021X_DUMP) +#define EAP_PRINT(str) \ + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: " str "\n", \ + ifname, direction ? "TX" : "RX")); /* Parse EAPOL 4 way handshake messages */ void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction) @@ -3763,8 +4982,43 @@ dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction) unsigned char type; int pair, ack, mic, kerr, req, sec, install; unsigned short us_tmp; - type = dump_data[18]; - if (type == 2 || type == 254) { + + type = dump_data[15]; + if (type == 0) { + if ((dump_data[22] == 1) && (dump_data[18] == 1)) { + EAP_PRINT("EAP Packet, Request, Identity"); + } else if ((dump_data[22] == 1) && (dump_data[18] == 2)) { + EAP_PRINT("EAP Packet, Response, Identity"); + } else if (dump_data[22] == 254) { + if (dump_data[30] == 1) { + EAP_PRINT("EAP Packet, WSC Start"); + } else if (dump_data[30] == 4) { + if (dump_data[41] == 4) { + EAP_PRINT("EAP Packet, WPS M1"); + } else if (dump_data[41] == 5) { + EAP_PRINT("EAP Packet, WPS M2"); + } else if (dump_data[41] == 7) { + EAP_PRINT("EAP Packet, WPS M3"); + } else if (dump_data[41] == 8) { + EAP_PRINT("EAP Packet, WPS M4"); + } else if (dump_data[41] == 9) { + EAP_PRINT("EAP Packet, WPS M5"); + } else if (dump_data[41] == 10) { + EAP_PRINT("EAP Packet, WPS M6"); + } else if (dump_data[41] == 11) { + EAP_PRINT("EAP Packet, WPS M7"); + } else if (dump_data[41] == 12) { + EAP_PRINT("EAP Packet, WPS M8"); + } + } else if (dump_data[30] == 5) { + EAP_PRINT("EAP Packet, WSC Done"); + } + } else { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", + ifname, direction ? "TX" : "RX", + dump_data[14], dump_data[15], dump_data[30])); + } + } else if (type == 3 && dump_data[18] == 2) { us_tmp = (dump_data[19] << 8) | dump_data[20]; pair = 0 != (us_tmp & 0x08); ack = 0 != (us_tmp & 0x80); @@ -3773,18 +5027,15 @@ dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction) req = 0 != (us_tmp & 0x800); sec = 0 != (us_tmp & 0x200); install = 0 != (us_tmp & 0x40); + if (!sec && !mic && ack && !install && pair && !kerr && !req) { - DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M1 of 4way\n", - ifname, direction ? "TX" : "RX")); + EAP_PRINT("EAPOL Packet, 4-way handshake, M1"); } else if (pair && !install && !ack && mic && !sec && !kerr && !req) { - DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M2 of 4way\n", - ifname, direction ? "TX" : "RX")); + EAP_PRINT("EAPOL Packet, 4-way handshake, M2"); } else if (pair && ack && mic && sec && !kerr && !req) { - DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M3 of 4way\n", - ifname, direction ? "TX" : "RX")); + EAP_PRINT("EAPOL Packet, 4-way handshake, M3"); } else if (pair && !install && !ack && mic && sec && !req && !kerr) { - DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M4 of 4way\n", - ifname, direction ? "TX" : "RX")); + EAP_PRINT("EAPOL Packet, 4-way handshake, M4"); } else { DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", ifname, direction ? "TX" : "RX", @@ -3796,3 +5047,933 @@ dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction) dump_data[14], dump_data[15], dump_data[30])); } } +#endif /* DHD_8021X_DUMP */ + +#ifdef REPORT_FATAL_TIMEOUTS +void init_dhd_timeouts(dhd_pub_t *pub) +{ + pub->timeout_info = MALLOC(pub->osh, sizeof(timeout_info_t)); + if (pub->timeout_info == NULL) { + DHD_ERROR(("%s: Failed to alloc timeout_info\n", __FUNCTION__)); + } else { + DHD_INFO(("Initializing dhd_timeouts\n")); + pub->timeout_info->scan_timer_lock = dhd_os_spin_lock_init(pub->osh); + pub->timeout_info->join_timer_lock = dhd_os_spin_lock_init(pub->osh); + pub->timeout_info->bus_timer_lock = dhd_os_spin_lock_init(pub->osh); + pub->timeout_info->cmd_timer_lock = dhd_os_spin_lock_init(pub->osh); + pub->timeout_info->scan_timeout_val = SCAN_TIMEOUT_DEFAULT; + pub->timeout_info->join_timeout_val = JOIN_TIMEOUT_DEFAULT; + pub->timeout_info->cmd_timeout_val = CMD_TIMEOUT_DEFAULT; + pub->timeout_info->bus_timeout_val = BUS_TIMEOUT_DEFAULT; + pub->timeout_info->scan_timer_active = FALSE; + pub->timeout_info->join_timer_active = FALSE; + pub->timeout_info->cmd_timer_active = FALSE; + pub->timeout_info->bus_timer_active = FALSE; + pub->timeout_info->cmd_join_error = WLC_SSID_MASK; + pub->timeout_info->cmd_request_id = 0; + } +} + +void +deinit_dhd_timeouts(dhd_pub_t *pub) +{ + /* stop the join, scan bus, cmd timers + * as failing to do so may cause a kernel panic if + * an rmmod is done + */ + if (!pub->timeout_info) { + DHD_ERROR(("timeout_info pointer is NULL\n")); + ASSERT(0); + return; + } + if (dhd_stop_scan_timer(pub)) { + DHD_ERROR(("dhd_stop_scan_timer failed\n")); + ASSERT(0); + } + if (dhd_stop_bus_timer(pub)) { + DHD_ERROR(("dhd_stop_bus_timer failed\n")); + ASSERT(0); + } + if (dhd_stop_cmd_timer(pub)) { + DHD_ERROR(("dhd_stop_cmd_timer failed\n")); + ASSERT(0); + } + if (dhd_stop_join_timer(pub)) { + DHD_ERROR(("dhd_stop_join_timer failed\n")); + ASSERT(0); + } + + dhd_os_spin_lock_deinit(pub->osh, pub->timeout_info->scan_timer_lock); + dhd_os_spin_lock_deinit(pub->osh, pub->timeout_info->join_timer_lock); + dhd_os_spin_lock_deinit(pub->osh, pub->timeout_info->bus_timer_lock); + dhd_os_spin_lock_deinit(pub->osh, pub->timeout_info->cmd_timer_lock); + MFREE(pub->osh, pub->timeout_info, sizeof(timeout_info_t)); + pub->timeout_info = NULL; +} + +static void +dhd_cmd_timeout(void *ctx) +{ + dhd_pub_t *pub = (dhd_pub_t *)ctx; + unsigned long flags; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ASSERT(0); + return; + } + + DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags); + if (pub->timeout_info && pub->timeout_info->cmd_timer_active) { + DHD_ERROR(("\nERROR COMMAND TIMEOUT TO:%d\n", pub->timeout_info->cmd_timeout_val)); + DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags); +#ifdef PCIE_OOB + /* Assert device_wake so that UART_Rx is available */ + if (dhd_bus_set_device_wake(pub->bus, TRUE)) { + DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__)); + ASSERT(0); + } +#endif /* PCIE_OOB */ + if (dhd_stop_cmd_timer(pub)) { + DHD_ERROR(("%s: dhd_stop_cmd_timer() failed\n", __FUNCTION__)); + ASSERT(0); + } + dhd_wakeup_ioctl_event(pub, IOCTL_RETURN_ON_ERROR); + if (!dhd_query_bus_erros(pub)) + dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_COMMAND_TO); + } else { + DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags); + } +} + +int +dhd_start_cmd_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags = 0; + uint32 cmd_to_ms; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit_null; + } + DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags); + cmd_to_ms = pub->timeout_info->cmd_timeout_val; + + if (pub->timeout_info->cmd_timeout_val == 0) { + /* Disable Command timer timeout */ + DHD_INFO(("DHD: Command Timeout Disabled\n")); + goto exit; + } + if (pub->timeout_info->cmd_timer_active) { + DHD_ERROR(("%s:Timer already active\n", __FUNCTION__)); + ret = BCME_ERROR; + ASSERT(0); + } else { + pub->timeout_info->cmd_timer = osl_timer_init(pub->osh, + "cmd_timer", dhd_cmd_timeout, pub); + osl_timer_update(pub->osh, pub->timeout_info->cmd_timer, + cmd_to_ms, 0); + pub->timeout_info->cmd_timer_active = TRUE; + } + if (ret == BCME_OK) { + DHD_INFO(("%s Cmd Timer started\n", __FUNCTION__)); + } +exit: + DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags); +exit_null: + return ret; +} + +int +dhd_stop_cmd_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags = 0; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit; + } + DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags); + + if (pub->timeout_info->cmd_timer_active) { + osl_timer_del(pub->osh, pub->timeout_info->cmd_timer); + pub->timeout_info->cmd_timer_active = FALSE; + } + else { + DHD_INFO(("DHD: CMD timer is not active\n")); + } + if (ret == BCME_OK) { + DHD_INFO(("%s Cmd Timer Stopped\n", __FUNCTION__)); + } + DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags); +exit: + return ret; +} + +static int +__dhd_stop_join_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ASSERT(0); + return BCME_ERROR; + } + + if (pub->timeout_info->join_timer_active) { + osl_timer_del(pub->osh, pub->timeout_info->join_timer); + pub->timeout_info->join_timer_active = FALSE; + } else { + DHD_INFO(("DHD: JOIN timer is not active\n")); + } + if (ret == BCME_OK) { + DHD_INFO(("%s: Join Timer Stopped\n", __FUNCTION__)); + } + return ret; +} + +static void +dhd_join_timeout(void *ctx) +{ + dhd_pub_t *pub = (dhd_pub_t *)ctx; + unsigned long flags; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ASSERT(0); + return; + } + + DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags); + if (pub->timeout_info->join_timer_active) { + DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags); + if (dhd_stop_join_timer(pub)) { + DHD_ERROR(("%s: dhd_stop_join_timer() failed\n", __FUNCTION__)); + ASSERT(0); + } + if (pub->timeout_info->cmd_join_error) { + DHD_ERROR(("\nERROR JOIN TIMEOUT TO:%d:0x%x\n", + pub->timeout_info->join_timeout_val, + pub->timeout_info->cmd_join_error)); +#ifdef DHD_FW_COREDUMP + /* collect core dump and crash */ + pub->memdump_enabled = DUMP_MEMFILE_BUGON; + pub->memdump_type = DUMP_TYPE_JOIN_TIMEOUT; + dhd_bus_mem_dump(pub); +#endif /* DHD_FW_COREDUMP */ + + } + } else { + DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags); + } +} + +int +dhd_start_join_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags = 0; + uint32 join_to_ms; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit; + } + + join_to_ms = pub->timeout_info->join_timeout_val; + DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags); + if (pub->timeout_info->join_timer_active) { + DHD_ERROR(("%s:Stoping active timer\n", __FUNCTION__)); + __dhd_stop_join_timer(pub); + } + if (pub->timeout_info->join_timeout_val == 0) { + /* Disable Join timer timeout */ + DHD_INFO(("DHD: Join Timeout Disabled\n")); + } else { + pub->timeout_info->join_timer = osl_timer_init(pub->osh, + "join_timer", dhd_join_timeout, pub); + osl_timer_update(pub->osh, pub->timeout_info->join_timer, join_to_ms, 0); + pub->timeout_info->join_timer_active = TRUE; + pub->timeout_info->cmd_join_error |= WLC_SSID_MASK; + } + if (ret == BCME_OK) { + DHD_INFO(("%s:Join Timer started 0x%x\n", __FUNCTION__, + pub->timeout_info->cmd_join_error)); + } + DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags); +exit: + return ret; +} + +int +dhd_stop_join_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags; + + DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags); + ret = __dhd_stop_join_timer(pub); + DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags); + return ret; +} + +static void +dhd_scan_timeout(void *ctx) +{ + dhd_pub_t *pub = (dhd_pub_t *)ctx; + unsigned long flags; + + if (pub->timeout_info == NULL) { + DHD_ERROR(("timeout_info pointer is NULL\n")); + ASSERT(0); + return; + } + + DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags); + if (pub->timeout_info && pub->timeout_info->scan_timer_active) { + DHD_ERROR(("\nERROR SCAN TIMEOUT TO:%d\n", pub->timeout_info->scan_timeout_val)); + DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags); + dhd_stop_scan_timer(pub); + if (!dhd_query_bus_erros(pub)) + dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_SCAN_TO); + } else { + DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags); + } +} + +int +dhd_start_scan_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags = 0; + uint32 scan_to_ms; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit_null; + } + DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags); + scan_to_ms = pub->timeout_info->scan_timeout_val; + + if (pub->timeout_info->scan_timer_active) { + /* NOTE : New scan timeout value will be effective + * only once current scan is completed. + */ + DHD_ERROR(("%s:Timer already active\n", __FUNCTION__)); + ret = BCME_ERROR; + goto exit; + } + + if (pub->timeout_info->scan_timeout_val == 0) { + /* Disable Scan timer timeout */ + DHD_INFO(("DHD: Scan Timeout Disabled\n")); + } else { + pub->timeout_info->scan_timer = osl_timer_init(pub->osh, "scan_timer", + dhd_scan_timeout, pub); + pub->timeout_info->scan_timer_active = TRUE; + osl_timer_update(pub->osh, pub->timeout_info->scan_timer, scan_to_ms, 0); + } + if (ret == BCME_OK) { + DHD_INFO(("%s Scan Timer started\n", __FUNCTION__)); + } +exit: + DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags); +exit_null: + return ret; +} + +int +dhd_stop_scan_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags = 0; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit; + } + DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags); + + if (pub->timeout_info->scan_timer_active) { + osl_timer_del(pub->osh, pub->timeout_info->scan_timer); + pub->timeout_info->scan_timer_active = FALSE; + } + else { + DHD_INFO(("DHD: SCAN timer is not active\n")); + } + + if (ret == BCME_OK) { + DHD_INFO(("%s Scan Timer Stopped\n", __FUNCTION__)); + } + DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags); +exit: + return ret; +} + +static void +dhd_bus_timeout(void *ctx) +{ + dhd_pub_t *pub = (dhd_pub_t *)ctx; + unsigned long flags; + + if (pub->timeout_info == NULL) { + DHD_ERROR(("timeout_info pointer is NULL\n")); + ASSERT(0); + return; + } + + DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags); + if (pub->timeout_info->bus_timer_active) { + DHD_ERROR(("\nERROR BUS TIMEOUT TO:%d\n", pub->timeout_info->bus_timeout_val)); + DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags); +#ifdef PCIE_OOB + /* Assert device_wake so that UART_Rx is available */ + if (dhd_bus_set_device_wake(pub->bus, TRUE)) { + DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__)); + ASSERT(0); + } +#endif /* PCIE_OOB */ + if (dhd_stop_bus_timer(pub)) { + DHD_ERROR(("%s: dhd_stop_bus_timer() failed\n", __FUNCTION__)); + ASSERT(0); + } + if (!dhd_query_bus_erros(pub)) + dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_OQS_TO); + } else { + DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags); + } +} + +int +dhd_start_bus_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags = 0; + uint32 bus_to_ms; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit_null; + } + DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags); + bus_to_ms = pub->timeout_info->bus_timeout_val; + + if (pub->timeout_info->bus_timeout_val == 0) { + /* Disable Bus timer timeout */ + DHD_INFO(("DHD: Bus Timeout Disabled\n")); + goto exit; + } + if (pub->timeout_info->bus_timer_active) { + DHD_ERROR(("%s:Timer already active\n", __FUNCTION__)); + ret = BCME_ERROR; + ASSERT(0); + } else { + pub->timeout_info->bus_timer = osl_timer_init(pub->osh, + "bus_timer", dhd_bus_timeout, pub); + pub->timeout_info->bus_timer_active = TRUE; + osl_timer_update(pub->osh, pub->timeout_info->bus_timer, bus_to_ms, 0); + } + if (ret == BCME_OK) { + DHD_INFO(("%s: BUS Timer started\n", __FUNCTION__)); + } +exit: + DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags); +exit_null: + return ret; +} + +int +dhd_stop_bus_timer(dhd_pub_t *pub) +{ + int ret = BCME_OK; + unsigned long flags = 0; + + if (!pub->timeout_info) { + DHD_ERROR(("DHD: timeout_info NULL\n")); + ret = BCME_ERROR; + ASSERT(0); + goto exit; + } + DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags); + + if (pub->timeout_info->bus_timer_active) { + osl_timer_del(pub->osh, pub->timeout_info->bus_timer); + pub->timeout_info->bus_timer_active = FALSE; + } + else { + DHD_INFO(("DHD: BUS timer is not active\n")); + } + if (ret == BCME_OK) { + DHD_INFO(("%s: Bus Timer Stopped\n", __FUNCTION__)); + } + DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags); +exit: + return ret; +} + +int +dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd) +{ + DHD_INFO(("%s: id:%d\n", __FUNCTION__, id)); + if (pub->timeout_info) { + pub->timeout_info->cmd_request_id = id; + pub->timeout_info->cmd = cmd; + return BCME_OK; + } else { + return BCME_ERROR; + } +} + +uint16 +dhd_get_request_id(dhd_pub_t *pub) +{ + if (pub->timeout_info) { + return (pub->timeout_info->cmd_request_id); + } else { + return 0; + } +} + +void +dhd_set_join_error(dhd_pub_t *pub, uint32 mask) +{ + DHD_INFO(("Setting join Error %d\n", mask)); + if (pub->timeout_info) { + pub->timeout_info->cmd_join_error |= mask; + } +} + +void +dhd_clear_join_error(dhd_pub_t *pub, uint32 mask) +{ + DHD_INFO(("clear join Error %d\n", mask)); + if (pub->timeout_info) { + pub->timeout_info->cmd_join_error &= ~mask; + } +} + +void +dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val) +{ + if (pub->timeout_info) { + *to_val = pub->timeout_info->scan_timeout_val; + } else { + *to_val = 0; + } +} + +void +dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val) +{ + if (pub->timeout_info) { + DHD_INFO(("Setting TO val:%d\n", to_val)); + pub->timeout_info->scan_timeout_val = to_val; + } +} + +void +dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val) +{ + if (pub->timeout_info) { + *to_val = pub->timeout_info->join_timeout_val; + } else { + *to_val = 0; + } +} + +void +dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val) +{ + if (pub->timeout_info) { + DHD_INFO(("Setting TO val:%d\n", to_val)); + pub->timeout_info->join_timeout_val = to_val; + } +} + +void +dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val) +{ + if (pub->timeout_info) { + *to_val = pub->timeout_info->cmd_timeout_val; + } else { + *to_val = 0; + } +} + +void +dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val) +{ + if (pub->timeout_info) { + DHD_INFO(("Setting TO val:%d\n", to_val)); + pub->timeout_info->cmd_timeout_val = to_val; + } +} + +void +dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val) +{ + if (pub->timeout_info) { + *to_val = pub->timeout_info->bus_timeout_val; + } else { + *to_val = 0; + } +} + +void +dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val) +{ + if (pub->timeout_info) { + DHD_INFO(("Setting TO val:%d\n", to_val)); + pub->timeout_info->bus_timeout_val = to_val; + } +} +#endif /* REPORT_FATAL_TIMEOUTS */ + +#ifdef SHOW_LOGTRACE +int +dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size, + dhd_event_log_t *event_log) +{ + logstr_header_t *hdr = NULL; + uint32 *lognums = NULL; + char *logstrs = NULL; + int ram_index = 0; + char **fmts; + int num_fmts = 0; + int32 i = 0; + + /* Remember header from the logstrs.bin file */ + hdr = (logstr_header_t *) (raw_fmts + logstrs_size - + sizeof(logstr_header_t)); + + if (hdr->log_magic == LOGSTRS_MAGIC) { + /* + * logstrs.bin start with header. + */ + num_fmts = hdr->rom_logstrs_offset / sizeof(uint32); + ram_index = (hdr->ram_lognums_offset - + hdr->rom_lognums_offset) / sizeof(uint32); + lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset]; + logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset]; + } else { + /* + * Legacy logstrs.bin format without header. + */ + num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32); + if (num_fmts == 0) { + /* Legacy ROM/RAM logstrs.bin format: + * - ROM 'lognums' section + * - RAM 'lognums' section + * - ROM 'logstrs' section. + * - RAM 'logstrs' section. + * + * 'lognums' is an array of indexes for the strings in the + * 'logstrs' section. The first uint32 is 0 (index of first + * string in ROM 'logstrs' section). + * + * The 4324b5 is the only ROM that uses this legacy format. Use the + * fixed number of ROM fmtnums to find the start of the RAM + * 'lognums' section. Use the fixed first ROM string ("Con\n") to + * find the ROM 'logstrs' section. + */ + #define NUM_4324B5_ROM_FMTS 186 + #define FIRST_4324B5_ROM_LOGSTR "Con\n" + ram_index = NUM_4324B5_ROM_FMTS; + lognums = (uint32 *) raw_fmts; + num_fmts = ram_index; + logstrs = (char *) &raw_fmts[num_fmts << 2]; + while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) { + num_fmts++; + logstrs = (char *) &raw_fmts[num_fmts << 2]; + } + } else { + /* Legacy RAM-only logstrs.bin format: + * - RAM 'lognums' section + * - RAM 'logstrs' section. + * + * 'lognums' is an array of indexes for the strings in the + * 'logstrs' section. The first uint32 is an index to the + * start of 'logstrs'. Therefore, if this index is divided + * by 'sizeof(uint32)' it provides the number of logstr + * entries. + */ + ram_index = 0; + lognums = (uint32 *) raw_fmts; + logstrs = (char *) &raw_fmts[num_fmts << 2]; + } + } + fmts = MALLOC(osh, num_fmts * sizeof(char *)); + if (fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__)); + return BCME_ERROR; + } + event_log->fmts_size = num_fmts * sizeof(char *); + + for (i = 0; i < num_fmts; i++) { + /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base + * (they are 0-indexed relative to 'rom_logstrs_offset'). + * + * RAM lognums are already indexed to point to the correct RAM logstrs (they + * are 0-indexed relative to the start of the logstrs.bin file). + */ + if (i == ram_index) { + logstrs = raw_fmts; + } + fmts[i] = &logstrs[lognums[i]]; + } + event_log->fmts = fmts; + event_log->raw_fmts_size = logstrs_size; + event_log->raw_fmts = raw_fmts; + event_log->num_fmts = num_fmts; + + return BCME_OK; +} + +int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start, + uint32 *rodata_end) +{ + char *raw_fmts = NULL; + uint32 read_size = READ_NUM_BYTES; + int error = 0; + char * cptr = NULL; + char c; + uint8 count = 0; + + *ramstart = 0; + *rodata_start = 0; + *rodata_end = 0; + + /* Allocate 1 byte more than read_size to terminate it with NULL */ + raw_fmts = MALLOC(osh, read_size + 1); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); + goto fail; + } + + /* read ram start, rodata_start and rodata_end values from map file */ + while (count != ALL_MAP_VAL) + { + error = dhd_os_read_file(file, raw_fmts, read_size); + if (error < 0) { + DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__, + error)); + goto fail; + } + + /* End raw_fmts with NULL as strstr expects NULL terminated strings */ + raw_fmts[read_size] = '\0'; + + /* Get ramstart address */ + if ((cptr = strstr(raw_fmts, ramstart_str))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c text_start", ramstart, &c); + count |= RAMSTART_BIT; + } + + /* Get ram rodata start address */ + if ((cptr = strstr(raw_fmts, rodata_start_str))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c rodata_start", rodata_start, &c); + count |= RDSTART_BIT; + } + + /* Get ram rodata end address */ + if ((cptr = strstr(raw_fmts, rodata_end_str))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c rodata_end", rodata_end, &c); + count |= RDEND_BIT; + } + + if (error < (int)read_size) { + /* + * since we reset file pos back to earlier pos by + * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF. + * The reason for this is if string is spreaded across + * bytes, the read function should not miss it. + * So if ret value is less than read_size, reached EOF don't read further + */ + break; + } + memset(raw_fmts, 0, read_size); + /* + * go back to predefined NUM of bytes so that we won't miss + * the string and addr even if it comes as splited in next read. + */ + dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES); + } + + +fail: + if (raw_fmts) { + MFREE(osh, raw_fmts, read_size + 1); + raw_fmts = NULL; + } + if (count == ALL_MAP_VAL) + return BCME_OK; + else { + DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__, + count)); + return BCME_ERROR; + } + +} + +#ifdef PCIE_FULL_DONGLE +int +dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf, + dhd_event_log_t *event_data) +{ + uint32 infobuf_version; + info_buf_payload_hdr_t *payload_hdr_ptr; + uint16 payload_hdr_type; + uint16 payload_hdr_length; + + DHD_TRACE(("%s:Enter\n", __FUNCTION__)); + + if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) { + DHD_ERROR(("%s: infobuf too small for version field\n", + __FUNCTION__)); + goto exit; + } + infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf)); + PKTPULL(dhdp->osh, pktbuf, sizeof(uint32)); + if (infobuf_version != PCIE_INFOBUF_V1) { + DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n", + __FUNCTION__, infobuf_version)); + goto exit; + } + + /* Version 1 infobuf has a single type/length (and then value) field */ + if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) { + DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n", + __FUNCTION__)); + goto exit; + } + /* Process/parse the common info payload header (type/length) */ + payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf); + payload_hdr_type = ltoh16(payload_hdr_ptr->type); + payload_hdr_length = ltoh16(payload_hdr_ptr->length); + if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) { + DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n", + __FUNCTION__, payload_hdr_type)); + goto exit; + } + PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t)); + + /* Validate that the specified length isn't bigger than the + * provided data. + */ + if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) { + DHD_ERROR(("%s: infobuf logtrace length is bigger" + " than actual buffer data\n", __FUNCTION__)); + goto exit; + } + dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf), + event_data, payload_hdr_length); + + return BCME_OK; + +exit: + return BCME_ERROR; +} +#endif /* PCIE_FULL_DONGLE */ +#endif /* SHOW_LOGTRACE */ + +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) + +/* To handle the TDLS event in the dhd_common.c + */ +int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event) +{ + int ret = BCME_OK; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + ret = dhd_tdls_update_peer_info(dhd_pub, event); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + return ret; +} + +int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub) +{ + tdls_peer_node_t *cur = NULL, *prev = NULL; + if (!dhd_pub) + return BCME_ERROR; + cur = dhd_pub->peer_tbl.node; + + if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count) + return BCME_ERROR; + + while (cur != NULL) { + prev = cur; + cur = cur->next; + MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t)); + } + dhd_pub->peer_tbl.tdls_peer_count = 0; + dhd_pub->peer_tbl.node = NULL; + return BCME_OK; +} +#endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */ + +#ifdef DUMP_IOCTL_IOV_LIST +void +dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node) +{ + dll_t *item; + dhd_iov_li_t *iov_li; + dhd->dump_iovlist_len++; + + if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) { + item = dll_head_p(list_head); + iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list); + dll_delete(item); + MFREE(dhd->osh, iov_li, sizeof(*iov_li)); + dhd->dump_iovlist_len--; + } + dll_append(list_head, node); +} + +void +dhd_iov_li_print(dll_t *list_head) +{ + dhd_iov_li_t *iov_li; + dll_t *item, *next; + uint8 index = 0; + for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) { + next = dll_next_p(item); + iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list); + index++; + DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", index, iov_li->buff, iov_li->cmd)); + } +} + +void +dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head) +{ + dll_t *item; + dhd_iov_li_t *iov_li; + while (!(dll_empty(list_head))) { + item = dll_head_p(list_head); + iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list); + dll_delete(item); + MFREE(dhd->osh, iov_li, sizeof(*iov_li)); + } +} +#endif /* DUMP_IOCTL_IOV_LIST */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_config.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_config.c index 58927d709be5..39b91c2aaf21 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_config.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_config.c @@ -5,7 +5,7 @@ #include #include #include -#if defined(HW_OOB) || defined(FORCE_WOWLAN) +#if defined(HW_OOB) || defined(FORCE_WOWLAN) #include #include #include @@ -15,25 +15,25 @@ #include #include -/* message levels */ +/* message levels */ #define CONFIG_ERROR_LEVEL 0x0001 #define CONFIG_TRACE_LEVEL 0x0002 - + uint config_msg_level = CONFIG_ERROR_LEVEL; - + #define CONFIG_ERROR(x) \ - do { \ + do { \ if (config_msg_level & CONFIG_ERROR_LEVEL) { \ printk(KERN_ERR "CONFIG-ERROR) "); \ - printk x; \ - } \ - } while (0) + printk x; \ + } \ + } while (0) #define CONFIG_TRACE(x) \ - do { \ + do { \ if (config_msg_level & CONFIG_TRACE_LEVEL) { \ printk(KERN_ERR "CONFIG-TRACE) "); \ - printk x; \ - } \ + printk x; \ + } \ } while (0) #define MAXSZ_BUF 1000 @@ -59,6 +59,7 @@ uint config_msg_level = CONFIG_ERROR_LEVEL; #define CONFIG_BCM43341B0 "config_43341b0.txt" #define CONFIG_BCM43241B4 "config_43241b4.txt" #define CONFIG_BCM4339A0 "config_4339a0.txt" +#define CONFIG_BCM43454C0 "config_43454c0.txt" #define CONFIG_BCM43455C0 "config_43455c0.txt" #define CONFIG_BCM43456C5 "config_43456c5.txt" #define CONFIG_BCM4354A1 "config_4354a1.txt" @@ -70,7 +71,7 @@ uint config_msg_level = CONFIG_ERROR_LEVEL; #endif #ifdef BCMSDIO -#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */ +#define SBSDIO_CIS_SIZE_LIMIT 0x200 #define FW_BCM4330B2 "fw_RK903b2" #define FW_BCM4330B2_AG "fw_RK903_ag" @@ -98,11 +99,11 @@ uint config_msg_level = CONFIG_ERROR_LEVEL; #define FW_BCM4356A2 "fw_bcm4356a2_pcie_ag" #endif -#define htod32(i) i -#define htod16(i) i -#define dtoh32(i) i -#define dtoh16(i) i -#define htodchanspec(i) i +#define htod32(i) i +#define htod16(i) i +#define dtoh32(i) i +#define dtoh16(i) i +#define htodchanspec(i) i #define dtohchanspec(i) i #ifdef BCMSDIO @@ -156,6 +157,48 @@ dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip) } #endif +#define F0_BLOCK_SIZE 32 +int +dhd_conf_set_blksize(bcmsdh_info_t *sdh) +{ + int err = 0; + uint fn, numfn; + int32 blksize = 0, cur_blksize = 0; + uint8 cisd; + + numfn = bcmsdh_query_iofnum(sdh); + + for (fn = 0; fn <= numfn; fn++) { + if (!fn) + blksize = F0_BLOCK_SIZE; + else { + bcmsdh_cisaddr_read(sdh, fn, &cisd, 24); + blksize = cisd; + bcmsdh_cisaddr_read(sdh, fn, &cisd, 25); + blksize |= cisd << 8; + } +#ifdef CUSTOM_SDIO_F2_BLKSIZE + if (fn == 2 && blksize > CUSTOM_SDIO_F2_BLKSIZE) { + blksize = CUSTOM_SDIO_F2_BLKSIZE; + } +#endif + bcmsdh_iovar_op(sdh, "sd_blocksize", &fn, sizeof(int32), + &cur_blksize, sizeof(int32), FALSE); + if (cur_blksize != blksize) { + printf("%s: fn=%d, blksize=%d, cur_blksize=%d\n", __FUNCTION__, + fn, blksize, cur_blksize); + blksize |= (fn<<16); + if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &blksize, + sizeof(blksize), TRUE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + err = -1; + } + } + } + + return err; +} + int dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac) { @@ -348,12 +391,12 @@ dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path) #endif void -dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path, char *nv_path) +dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path, char *nv_path) { int fw_type, ag_type; uint chip, chiprev; - int i,j; - char fw_tail[20]; + int i, j; + char fw_tail[20]; chip = dhd->conf->chip; chiprev = dhd->conf->chiprev; @@ -378,12 +421,12 @@ dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path, char *nv_path) if (fw_path[i] == '/') break; i--; } - j = strlen(nv_path); - while (j > 0) { - if (nv_path[j] == '/') - break; - j--; - } + j = strlen(nv_path); + while (j > 0) { + if (nv_path[j] == '/') + break; + j--; + } #ifdef BAND_AG ag_type = FW_TYPE_AG; #else @@ -423,8 +466,8 @@ dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path, char *nv_path) strcpy(&fw_path[i+1], FW_BCM43362A0); else strcpy(&fw_path[i+1], FW_BCM43362A2); - if (!strstr(nv_path, "6476")) - strcpy(&nv_path[j + 1], "nvram_AP6210.txt"); + if (!strstr(nv_path, "6476")) + strcpy(&nv_path[j + 1], "nvram_AP6210.txt"); break; case BCM43430_CHIP_ID: if (chiprev == BCM43430A0_CHIP_REV) { @@ -432,15 +475,16 @@ dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path, char *nv_path) strcpy(&nv_path[j + 1], "nvram_ap6212.txt"); } else if (chiprev == BCM43430A1_CHIP_REV) { strcpy(&fw_path[i+1], FW_BCM43438A1); - strcpy(&nv_path[j + 1], "nvram_ap6212a.txt"); + strcpy(&nv_path[j + 1], "nvram_ap6212a.txt"); } else if (chiprev == BCM43430A2_CHIP_REV) { strcpy(&fw_path[i+1], FW_BCM43436B0); - strcpy(&nv_path[j + 1], "nvram_ap6236.txt"); - } + strcpy(&nv_path[j + 1], "nvram_ap6236.txt"); + } break; case BCM43012_CHIP_ID: if (chiprev == BCM43012B0_CHIP_REV) strcpy(&fw_path[i+1], FW_BCM43012B0); + break; case BCM4334_CHIP_ID: if (chiprev == BCM4334B1_CHIP_REV) strcpy(&fw_path[i+1], FW_BCM4334B1); @@ -453,7 +497,7 @@ dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path, char *nv_path) case BCM4324_CHIP_ID: if (chiprev == BCM43241B4_CHIP_REV) strcpy(&fw_path[i+1], FW_BCM43241B4); - strcpy(&nv_path[j + 1], "nvram_ap62x2.txt"); + strcpy(&nv_path[j + 1], "nvram_ap62x2.txt"); break; case BCM4335_CHIP_ID: if (chiprev == BCM4335A0_CHIP_REV) @@ -467,28 +511,28 @@ dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path, char *nv_path) break; case BCM4345_CHIP_ID: case BCM43454_CHIP_ID: - if (chiprev == BCM43455C0_CHIP_REV) { - strcpy(&fw_path[i+1], FW_BCM43455C0); - strcpy(&nv_path[j + 1], "nvram_ap6255.txt"); - } - else if (chiprev == BCM43455C5_CHIP_REV) { - strcpy(&fw_path[i+1], FW_BCM43456C5); - } - break; + if (chiprev == BCM43455C0_CHIP_REV) { + strcpy(&fw_path[i+1], FW_BCM43455C0); + strcpy(&nv_path[j + 1], "nvram_ap6255.txt"); + } + else if (chiprev == BCM43455C5_CHIP_REV) { + strcpy(&fw_path[i+1], FW_BCM43456C5); + } + break; case BCM4354_CHIP_ID: if (chiprev == BCM4354A1_CHIP_REV) { strcpy(&fw_path[i+1], FW_BCM4354A1); - strcpy(&nv_path[j + 1], "nvram_ap6354.txt"); + strcpy(&nv_path[j + 1], "nvram_ap6354.txt"); } else if (chiprev == BCM4356A2_CHIP_REV) { strcpy(&fw_path[i+1], FW_BCM4356A2); - strcpy(&nv_path[j + 1], "nvram_ap6356.txt"); - } + strcpy(&nv_path[j + 1], "nvram_ap6356.txt"); + } break; case BCM4356_CHIP_ID: case BCM4371_CHIP_ID: if (chiprev == BCM4356A2_CHIP_REV) strcpy(&fw_path[i+1], FW_BCM4356A2); - strcpy(&nv_path[j + 1], "nvram_ap6356.txt"); + strcpy(&nv_path[j + 1], "nvram_ap6356.txt"); break; case BCM43569_CHIP_ID: if (chiprev == BCM4358A3_CHIP_REV) @@ -588,7 +632,7 @@ dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path) /* find out the last '/' */ i = strlen(nv_path); - while (i > 0) { + while (i > 0) { if (nv_path[i] == '/') break; i--; } @@ -611,7 +655,7 @@ dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path /* find out the last '/' */ i = strlen(dst_path); - while (i > 0) { + while (i > 0) { if (dst_path[i] == '/') break; i--; } @@ -679,8 +723,11 @@ dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path) if (chiprev == BCM4335A0_CHIP_REV) strcpy(&conf_path[i+1], CONFIG_BCM4339A0); break; - case BCM4345_CHIP_ID: case BCM43454_CHIP_ID: + if (chiprev == BCM43455C0_CHIP_REV) + strcpy(&conf_path[i+1], CONFIG_BCM43454C0); + break; + case BCM4345_CHIP_ID: if (chiprev == BCM43455C0_CHIP_REV) strcpy(&conf_path[i+1], CONFIG_BCM43455C0); else if (chiprev == BCM43455C5_CHIP_REV) @@ -873,10 +920,10 @@ dhd_conf_fix_country(dhd_pub_t *dhd) if (!(dhd && dhd->conf)) { return bcmerror; } - + memset(valid_chan_list, 0, sizeof(valid_chan_list)); - list = (wl_uint32_list_t *)(void *) valid_chan_list; - list->count = htod32(WL_NUMCHANNELS); + list = (wl_uint32_list_t *)(void *) valid_chan_list; + list->count = htod32(WL_NUMCHANNELS); if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), FALSE, 0)) < 0) { CONFIG_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, bcmerror)); } @@ -1168,7 +1215,7 @@ dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id) void dhd_conf_discard_pkt_filter(dhd_pub_t *dhd) -{ +{ dhd->pktfilter_count = 6; dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = NULL; dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; @@ -1416,14 +1463,13 @@ dhd_conf_read_log_level(dhd_pub_t *dhd, char *full_param, uint len_param) return false; return true; -} - +} + void dhd_conf_read_wme_ac_value(wme_param_t *wme, char *pick, int ac_val) { char *pick_tmp, *pch; - /* Process WMM parameters */ pick_tmp = pick; pch = bcmstrstr(pick_tmp, "aifsn "); if (pch) { @@ -1450,7 +1496,7 @@ dhd_conf_read_wme_ac_value(wme_param_t *wme, char *pick, int ac_val) } } - + bool dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *full_param, uint len_param) { @@ -1460,7 +1506,6 @@ dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *full_param, uint len_param) // wme_ac_sta_be=aifsn 1 ecwmin 2 ecwmax 3 txop 0x5e // wme_ac_sta_vo=aifsn 1 ecwmin 1 ecwmax 1 txop 0x5e - /* Process WMM parameters */ if (!strncmp("force_wme_ac=", full_param, len_param)) { conf->force_wme_ac = (int)simple_strtol(data, NULL, 10); printf("%s: force_wme_ac = %d\n", __FUNCTION__, conf->force_wme_ac); @@ -1916,7 +1961,7 @@ dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param) printf("%s: dhd_slpauto = %d\n", __FUNCTION__, dhd_slpauto); } else if (!strncmp("kso_enable=", full_param, len_param)) { - if (!strncmp(data, "1", 1)) + if (!strncmp(data, "0", 1)) dhd_slpauto = FALSE; else dhd_slpauto = TRUE; @@ -1964,13 +2009,6 @@ dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param) conf->txglomsize = SDPCM_MAXGLOM_SIZE; printf("%s: txglomsize = %d\n", __FUNCTION__, conf->txglomsize); } - else if (!strncmp("swtxglom=", full_param, len_param)) { - if (!strncmp(data, "0", 1)) - conf->swtxglom = FALSE; - else - conf->swtxglom = TRUE; - printf("%s: swtxglom = %d\n", __FUNCTION__, conf->swtxglom); - } else if (!strncmp("txglom_ext=", full_param, len_param)) { if (!strncmp(data, "0", 1)) conf->txglom_ext = FALSE; @@ -2330,10 +2368,10 @@ dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path) continue; #endif /* BCMSDIO */ else if (dhd_conf_read_pm_params(dhd, pick, len_param)) - continue; - else if (dhd_conf_read_others(dhd, pick, len_param)) - continue; - else + continue; + else if (dhd_conf_read_others(dhd, pick, len_param)) + continue; + else continue; } @@ -2392,18 +2430,7 @@ dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable) struct dhd_conf *conf = dhd->conf; if (enable) { -#if defined(SWTXGLOM) - if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || - conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || - conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { - // 43362/4330/4334/43340/43341/43241 must use 1.88.45.x swtxglom if txglom_ext is true, since 1.201.59 not support swtxglom - conf->swtxglom = TRUE; - conf->txglom_ext = TRUE; - } - if (conf->chip == BCM43362_CHIP_ID && conf->bus_txglom == 0) { - conf->bus_txglom = 1; // improve tcp tx tput. and cpu idle for 43362 only - } -#elif defined(BCMSDIOH_TXGLOM_EXT) +#if defined(BCMSDIOH_TXGLOM_EXT) if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { @@ -2418,8 +2445,9 @@ dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable) conf->txglomsize = 0; conf->deferred_tx_len = 0; } - printf("%s: swtxglom=%d, txglom_ext=%d, txglom_bucket_size=%d\n", __FUNCTION__, - conf->swtxglom, conf->txglom_ext, conf->txglom_bucket_size); + if (conf->txglom_ext) + printf("%s: txglom_ext=%d, txglom_bucket_size=%d\n", __FUNCTION__, + conf->txglom_ext, conf->txglom_bucket_size); printf("%s: txglomsize=%d, deferred_tx_len=%d, bus_txglom=%d\n", __FUNCTION__, conf->txglomsize, conf->deferred_tx_len, conf->bus_txglom); printf("%s: tx_in_rx=%d, txinrx_thres=%d, dhd_txminmax=%d\n", __FUNCTION__, @@ -2443,7 +2471,7 @@ dhd_conf_preinit(dhd_pub_t *dhd) dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip); #endif memset(&conf->country_list, 0, sizeof(conf_country_list_t)); - conf->band = WLC_BAND_AUTO; + conf->band = -1; conf->mimo_bw_cap = -1; conf->bw_cap_2g = -1; conf->bw_cap_5g = -1; @@ -2480,9 +2508,9 @@ dhd_conf_preinit(dhd_pub_t *dhd) conf->roam_delta[0] = 15; #endif conf->roam_delta[1] = WLC_BAND_ALL; -#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC +#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC conf->fullroamperiod = 60; -#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ +#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ conf->fullroamperiod = 120; #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ #ifdef CUSTOM_KEEP_ALIVE_SETTING @@ -2502,7 +2530,7 @@ dhd_conf_preinit(dhd_pub_t *dhd) #endif conf->srl = -1; conf->lrl = -1; - conf->bcn_timeout = 15; + conf->bcn_timeout = 16; conf->spect = -1; conf->txbf = -1; conf->lpc = -1; @@ -2515,7 +2543,7 @@ dhd_conf_preinit(dhd_pub_t *dhd) conf->tx_max_offset = 0; conf->txglomsize = SDPCM_DEFGLOM_SIZE; conf->dhd_poll = -1; - conf->txctl_tmo_fix = FALSE; + conf->txctl_tmo_fix = 5; conf->tx_in_rx = TRUE; conf->txglom_mode = SDPCM_TXGLOM_CPY; conf->deferred_tx_len = 0; @@ -2540,12 +2568,12 @@ dhd_conf_preinit(dhd_pub_t *dhd) conf->dhcpc_enable = -1; conf->dhcpd_enable = -1; #endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) conf->tsq = 10; #else conf->tsq = 0; #endif -#ifdef DHDTCPACK_SUPPRESS +#ifdef DHDTCPACK_SUPPRESS conf->tcpack_sup_mode = TCPACK_SUP_OFF; #endif conf->pktprio8021x = -1; @@ -2556,11 +2584,6 @@ dhd_conf_preinit(dhd_pub_t *dhd) memset(conf->iapsta_init, 0, sizeof(conf->iapsta_init)); memset(conf->iapsta_config, 0, sizeof(conf->iapsta_config)); memset(conf->iapsta_enable, 0, sizeof(conf->iapsta_enable)); -#endif -#ifdef BCMSDIO - if (conf->chip == BCM43430_CHIP_ID || conf->chip == BCM4345_CHIP_ID) { - conf->txctl_tmo_fix = 1; - } #endif if (conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID || conf->chip == BCM4371_CHIP_ID || conf->chip == BCM43569_CHIP_ID || @@ -2575,51 +2598,12 @@ dhd_conf_preinit(dhd_pub_t *dhd) #ifdef BCMSDIO conf->dhd_txminmax = -1; conf->txinrx_thres = 128; - conf->sd_f2_blocksize = 256; + conf->sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE; conf->oob_enabled_later = TRUE; #endif } #ifdef BCMSDIO -#if defined(SWTXGLOM) - if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || - conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || - conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) { - conf->swtxglom = FALSE; // disabled by default - conf->txglom_ext = TRUE; // enabled by default - conf->use_rxchain = 0; // use_rxchain have been disabled if swtxglom enabled - conf->txglomsize = 16; - } else { - conf->swtxglom = FALSE; // use 1.201.59.x txglom by default - conf->txglom_ext = FALSE; - } - - if (conf->chip == BCM43362_CHIP_ID) { - conf->txglom_bucket_size = 1680; // fixed value, don't change - conf->tx_in_rx = FALSE; - conf->tx_max_offset = 1; - } - if (conf->chip == BCM4330_CHIP_ID) { - conf->txglom_bucket_size = 1680; // fixed value, don't change - conf->tx_in_rx = FALSE; - conf->tx_max_offset = 0; - } - if (conf->chip == BCM4334_CHIP_ID) { - conf->txglom_bucket_size = 1684; // fixed value, don't change - conf->tx_in_rx = TRUE; // improve tcp tx tput. and cpu idle - conf->tx_max_offset = 0; // reduce udp tx: dhdsdio_readframes: got unlikely tx max 109 with tx_seq 110 - } - if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID) { - conf->txglom_bucket_size = 1684; // fixed value, don't change - conf->tx_in_rx = TRUE; // improve tcp tx tput. and cpu idle - conf->tx_max_offset = 1; - } - if (conf->chip == BCM4324_CHIP_ID) { - conf->txglom_bucket_size = 1684; // fixed value, don't change - conf->tx_in_rx = TRUE; // improve tcp tx tput. and cpu idle - conf->tx_max_offset = 0; - } -#endif #if defined(BCMSDIOH_TXGLOM_EXT) if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID || conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID || diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_config.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_config.h index 29411dd505e2..6d875e3935c0 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_config.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_config.h @@ -1,272 +1,254 @@ - -#ifndef _dhd_config_ -#define _dhd_config_ - -#include -#include -#include -#include -#include - -#define FW_PATH_AUTO_SELECT 1 -//#define CONFIG_PATH_AUTO_SELECT -extern char firmware_path[MOD_PARAM_PATHLEN]; + +#ifndef _dhd_config_ +#define _dhd_config_ + +#include +#include +#include +#include +#include <802.11.h> + +#define FW_PATH_AUTO_SELECT 1 +//#define CONFIG_PATH_AUTO_SELECT +extern char firmware_path[MOD_PARAM_PATHLEN]; extern uint dhd_rxbound; extern uint dhd_txbound; -#ifdef BCMSDIO -#define TXGLOM_RECV_OFFSET 8 -extern uint dhd_doflow; -extern uint dhd_slpauto; - -#define BCM43362A0_CHIP_REV 0 -#define BCM43362A2_CHIP_REV 1 -#define BCM43430A0_CHIP_REV 0 -#define BCM43430A1_CHIP_REV 1 -#define BCM43430A2_CHIP_REV 2 -#define BCM43012B0_CHIP_REV 1 -#define BCM4330B2_CHIP_REV 4 -#define BCM4334B1_CHIP_REV 3 -#define BCM43341B0_CHIP_REV 2 -#define BCM43241B4_CHIP_REV 5 -#define BCM4335A0_CHIP_REV 2 -#define BCM4339A0_CHIP_REV 1 -#define BCM43455C0_CHIP_REV 6 -#define BCM43455C5_CHIP_REV 9 -#define BCM4354A1_CHIP_REV 1 -#define BCM4359B1_CHIP_REV 5 -#define BCM4359C0_CHIP_REV 9 -#endif -#define BCM4356A2_CHIP_REV 2 -#define BCM4358A3_CHIP_REV 3 - -/* mac range */ -typedef struct wl_mac_range { - uint32 oui; - uint32 nic_start; - uint32 nic_end; -} wl_mac_range_t; - -/* mac list */ -typedef struct wl_mac_list { - int count; - wl_mac_range_t *mac; - char name[MOD_PARAM_PATHLEN]; /* path */ -} wl_mac_list_t; - -/* mac list head */ -typedef struct wl_mac_list_ctrl { - int count; - struct wl_mac_list *m_mac_list_head; -} wl_mac_list_ctrl_t; - -/* chip_nv_path */ -typedef struct wl_chip_nv_path { - uint chip; - uint chiprev; - char name[MOD_PARAM_PATHLEN]; /* path */ -} wl_chip_nv_path_t; - -/* chip_nv_path list head */ -typedef struct wl_chip_nv_path_list_ctrl { - int count; - struct wl_chip_nv_path *m_chip_nv_path_head; -} wl_chip_nv_path_list_ctrl_t; - -/* channel list */ -typedef struct wl_channel_list { - /* in - # of channels, out - # of entries */ - uint32 count; - /* variable length channel list */ - uint32 channel[WL_NUMCHANNELS]; -} wl_channel_list_t; - -typedef struct wmes_param { - int aifsn[AC_COUNT]; - int ecwmin[AC_COUNT]; - int ecwmax[AC_COUNT]; - int txop[AC_COUNT]; -} wme_param_t; - -#ifdef PKT_FILTER_SUPPORT -#define DHD_CONF_FILTER_MAX 8 -/* filter list */ -#define PKT_FILTER_LEN 300 -typedef struct conf_pkt_filter_add { - /* in - # of channels, out - # of entries */ - uint32 count; - /* variable length filter list */ - char filter[DHD_CONF_FILTER_MAX][PKT_FILTER_LEN]; -} conf_pkt_filter_add_t; - -/* pkt_filter_del list */ -typedef struct conf_pkt_filter_del { - /* in - # of channels, out - # of entries */ - uint32 count; - /* variable length filter list */ - uint32 id[DHD_CONF_FILTER_MAX]; -} conf_pkt_filter_del_t; -#endif - -#define CONFIG_COUNTRY_LIST_SIZE 100 -/* country list */ -typedef struct conf_country_list { - uint32 count; - wl_country_t cspec[CONFIG_COUNTRY_LIST_SIZE]; -} conf_country_list_t; - -typedef struct dhd_conf { - uint chip; /* chip number */ - uint chiprev; /* chip revision */ - wl_mac_list_ctrl_t fw_by_mac; /* Firmware auto selection by MAC */ - wl_mac_list_ctrl_t nv_by_mac; /* NVRAM auto selection by MAC */ - wl_chip_nv_path_list_ctrl_t nv_by_chip; /* NVRAM auto selection by chip */ - conf_country_list_t country_list; /* Country list */ - int band; /* Band, b:2.4G only, otherwise for auto */ - int mimo_bw_cap; /* Bandwidth, 0:HT20ALL, 1: HT40ALL, 2:HT20IN2G_HT40PIN5G */ - int bw_cap_2g; /* Bandwidth, 1:20MHz, 3: 20/40MHz, 7:20/40/80MHz */ - int bw_cap_5g; /* Bandwidth, 1:20MHz, 3: 20/40MHz, 7:20/40/80MHz */ - wl_country_t cspec; /* Country */ - wl_channel_list_t channels; /* Support channels */ - uint roam_off; /* Roaming, 0:enable, 1:disable */ - uint roam_off_suspend; /* Roaming in suspend, 0:enable, 1:disable */ - int roam_trigger[2]; /* The RSSI threshold to trigger roaming */ - int roam_scan_period[2]; /* Roaming scan period */ - int roam_delta[2]; /* Roaming candidate qualification delta */ - int fullroamperiod; /* Full Roaming period */ - uint keep_alive_period; /* The perioid in ms to send keep alive packet */ - int force_wme_ac; - wme_param_t wme_sta; /* WME parameters */ - wme_param_t wme_ap; /* WME parameters */ - int stbc; /* STBC for Tx/Rx */ - int phy_oclscdenable; /* phy_oclscdenable */ -#ifdef PKT_FILTER_SUPPORT - conf_pkt_filter_add_t pkt_filter_add; /* Packet filter add */ - conf_pkt_filter_del_t pkt_filter_del; /* Packet filter add */ - conf_pkt_filter_add_t magic_pkt_filter_add; /* Magic Packet filter add */ -#endif - int srl; /* short retry limit */ - int lrl; /* long retry limit */ - uint bcn_timeout; /* beacon timeout */ - int spect; - int txbf; - int lpc; - int disable_proptx; -#ifdef BCMSDIO - int bus_txglom; /* bus:txglom */ - int use_rxchain; - bool bus_rxglom; /* bus:rxglom */ +#ifdef BCMSDIO +#define TXGLOM_RECV_OFFSET 8 +extern uint dhd_doflow; +extern uint dhd_slpauto; + +#define BCM43362A0_CHIP_REV 0 +#define BCM43362A2_CHIP_REV 1 +#define BCM43430A0_CHIP_REV 0 +#define BCM43430A1_CHIP_REV 1 +#define BCM43430A2_CHIP_REV 2 +#define BCM43012B0_CHIP_REV 1 +#define BCM4330B2_CHIP_REV 4 +#define BCM4334B1_CHIP_REV 3 +#define BCM43341B0_CHIP_REV 2 +#define BCM43241B4_CHIP_REV 5 +#define BCM4335A0_CHIP_REV 2 +#define BCM4339A0_CHIP_REV 1 +#define BCM43455C0_CHIP_REV 6 +#define BCM43455C5_CHIP_REV 9 +#define BCM4354A1_CHIP_REV 1 +#define BCM4359B1_CHIP_REV 5 +#define BCM4359C0_CHIP_REV 9 +#endif +#define BCM4356A2_CHIP_REV 2 +#define BCM4358A3_CHIP_REV 3 + +typedef struct wl_mac_range { + uint32 oui; + uint32 nic_start; + uint32 nic_end; +} wl_mac_range_t; + +typedef struct wl_mac_list { + int count; + wl_mac_range_t *mac; + char name[MOD_PARAM_PATHLEN]; +} wl_mac_list_t; + +typedef struct wl_mac_list_ctrl { + int count; + struct wl_mac_list *m_mac_list_head; +} wl_mac_list_ctrl_t; + +typedef struct wl_chip_nv_path { + uint chip; + uint chiprev; + char name[MOD_PARAM_PATHLEN]; +} wl_chip_nv_path_t; + +typedef struct wl_chip_nv_path_list_ctrl { + int count; + struct wl_chip_nv_path *m_chip_nv_path_head; +} wl_chip_nv_path_list_ctrl_t; + +typedef struct wl_channel_list { + uint32 count; + uint32 channel[WL_NUMCHANNELS]; +} wl_channel_list_t; + +typedef struct wmes_param { + int aifsn[AC_COUNT]; + int ecwmin[AC_COUNT]; + int ecwmax[AC_COUNT]; + int txop[AC_COUNT]; +} wme_param_t; + +#ifdef PKT_FILTER_SUPPORT +#define DHD_CONF_FILTER_MAX 8 +#define PKT_FILTER_LEN 300 +typedef struct conf_pkt_filter_add { + uint32 count; + char filter[DHD_CONF_FILTER_MAX][PKT_FILTER_LEN]; +} conf_pkt_filter_add_t; + +typedef struct conf_pkt_filter_del { + uint32 count; + uint32 id[DHD_CONF_FILTER_MAX]; +} conf_pkt_filter_del_t; +#endif + +#define CONFIG_COUNTRY_LIST_SIZE 100 +typedef struct conf_country_list { + uint32 count; + wl_country_t cspec[CONFIG_COUNTRY_LIST_SIZE]; +} conf_country_list_t; + +typedef struct dhd_conf { + uint chip; + uint chiprev; + wl_mac_list_ctrl_t fw_by_mac; + wl_mac_list_ctrl_t nv_by_mac; + wl_chip_nv_path_list_ctrl_t nv_by_chip; + conf_country_list_t country_list; + int band; + int mimo_bw_cap; + int bw_cap_2g; + int bw_cap_5g; + wl_country_t cspec; + wl_channel_list_t channels; + uint roam_off; + uint roam_off_suspend; + int roam_trigger[2]; + int roam_scan_period[2]; + int roam_delta[2]; + int fullroamperiod; + uint keep_alive_period; + int force_wme_ac; + wme_param_t wme_sta; + wme_param_t wme_ap; + int stbc; + int phy_oclscdenable; +#ifdef PKT_FILTER_SUPPORT + conf_pkt_filter_add_t pkt_filter_add; + conf_pkt_filter_del_t pkt_filter_del; + conf_pkt_filter_add_t magic_pkt_filter_add; +#endif + int srl; + int lrl; + uint bcn_timeout; + int spect; + int txbf; + int lpc; + int disable_proptx; +#ifdef BCMSDIO + int bus_txglom; + int use_rxchain; + bool bus_rxglom; bool txglom_ext; /* Only for 43362/4330/43340/43341/43241 */ - /* terence 20161011: - 1) conf->tx_max_offset = 1 to fix credict issue in adaptivity testing - 2) conf->tx_max_offset = 1 will cause to UDP Tx not work in rxglom supported, - but not happened in sw txglom - */ - int tx_max_offset; - uint txglomsize; - int dhd_poll; - /* terence 20161011: conf->txctl_tmo_fix = 1 to fix for "sched: RT throttling activated, " - this issue happened in tx tput. and tx cmd at the same time in inband interrupt mode - */ - bool txctl_tmo_fix; - bool tx_in_rx; // Skip tx before rx, in order to get more glomed in tx - bool txglom_mode; - uint deferred_tx_len; - bool swtxglom; /* SW TXGLOM */ + /* terence 20161011: + 1) conf->tx_max_offset = 1 to fix credict issue in adaptivity testing + 2) conf->tx_max_offset = 1 will cause to UDP Tx not work in rxglom supported, + but not happened in sw txglom + */ + int tx_max_offset; + uint txglomsize; + int dhd_poll; + int txctl_tmo_fix; + bool tx_in_rx; + bool txglom_mode; + uint deferred_tx_len; /*txglom_bucket_size: * 43362/4330: 1680 * 43340/43341/43241: 1684 */ int txglom_bucket_size; - int txinrx_thres; - int dhd_txminmax; // -1=DATABUFCNT(bus) - uint sd_f2_blocksize; - bool oob_enabled_later; -#endif - int ampdu_ba_wsize; - int ampdu_hostreorder; - int dpc_cpucore; - int rxf_cpucore; - int frameburst; - bool deepsleep; - int pm; - int pm_in_suspend; - int pm2_sleep_ret; -#ifdef DHDTCPACK_SUPPRESS - uint8 tcpack_sup_mode; -#endif - int pktprio8021x; - int rsdb_mode; - int vhtmode; - int num_different_channels; - int xmit_in_suspend; - int ap_in_suspend; -#ifdef SUSPEND_EVENT - bool suspend_eventmask_enable; - char suspend_eventmask[WL_EVENTING_MASK_LEN]; - char resume_eventmask[WL_EVENTING_MASK_LEN]; -#endif -#ifdef IDHCP - int dhcpc_enable; - int dhcpd_enable; - struct ipv4_addr dhcpd_ip_addr; - struct ipv4_addr dhcpd_ip_mask; - struct ipv4_addr dhcpd_ip_start; - struct ipv4_addr dhcpd_ip_end; -#endif -#ifdef IAPSTA_PREINIT - char iapsta_init[50]; - char iapsta_config[300]; - char iapsta_enable[50]; -#endif - int autocountry; - int tsq; -} dhd_conf_t; - -#ifdef BCMSDIO -int dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac); -void dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *fw_path); -void dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path); -#if defined(HW_OOB) || defined(FORCE_WOWLAN) -void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip); -#endif -void dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable); -#endif + int txinrx_thres; + int dhd_txminmax; // -1=DATABUFCNT(bus) + uint sd_f2_blocksize; + bool oob_enabled_later; +#endif + int ampdu_ba_wsize; + int ampdu_hostreorder; + int dpc_cpucore; + int rxf_cpucore; + int frameburst; + bool deepsleep; + int pm; + int pm_in_suspend; + int pm2_sleep_ret; +#ifdef DHDTCPACK_SUPPRESS + uint8 tcpack_sup_mode; +#endif + int pktprio8021x; + int rsdb_mode; + int vhtmode; + int num_different_channels; + int xmit_in_suspend; + int ap_in_suspend; +#ifdef SUSPEND_EVENT + bool suspend_eventmask_enable; + char suspend_eventmask[WL_EVENTING_MASK_LEN]; + char resume_eventmask[WL_EVENTING_MASK_LEN]; +#endif +#ifdef IDHCP + int dhcpc_enable; + int dhcpd_enable; + struct ipv4_addr dhcpd_ip_addr; + struct ipv4_addr dhcpd_ip_mask; + struct ipv4_addr dhcpd_ip_start; + struct ipv4_addr dhcpd_ip_end; +#endif +#ifdef IAPSTA_PREINIT + char iapsta_init[50]; + char iapsta_config[300]; + char iapsta_enable[50]; +#endif + int autocountry; + int tsq; +} dhd_conf_t; + +#ifdef BCMSDIO +int dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac); +void dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *fw_path); +void dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path); +#if defined(HW_OOB) || defined(FORCE_WOWLAN) +void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip); +#endif +void dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable); +int dhd_conf_set_blksize(bcmsdh_info_t *sdh); +#endif void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path, char *nv_path); -void dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path); -void dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path); -void dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path); -#ifdef CONFIG_PATH_AUTO_SELECT -void dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path); -#endif -int dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, int def, bool down); -int dhd_conf_get_iovar(dhd_pub_t *dhd, int cmd, char *name, char *buf, int len, int ifidx); -int dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf, int len, bool down); -uint dhd_conf_get_band(dhd_pub_t *dhd); -int dhd_conf_set_country(dhd_pub_t *dhd); -int dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec); -int dhd_conf_get_country_from_config(dhd_pub_t *dhd, wl_country_t *cspec); -int dhd_conf_fix_country(dhd_pub_t *dhd); -bool dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel); -int dhd_conf_set_roam(dhd_pub_t *dhd); -void dhd_conf_set_bw_cap(dhd_pub_t *dhd); -void dhd_conf_set_wme(dhd_pub_t *dhd, int mode); -void dhd_conf_add_pkt_filter(dhd_pub_t *dhd); -bool dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id); -void dhd_conf_discard_pkt_filter(dhd_pub_t *dhd); -int dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path); -int dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev); -uint dhd_conf_get_chip(void *context); -uint dhd_conf_get_chiprev(void *context); -int dhd_conf_get_pm(dhd_pub_t *dhd); -#ifdef PROP_TXSTATUS -int dhd_conf_get_disable_proptx(dhd_pub_t *dhd); -#endif -int dhd_conf_get_ap_mode_in_suspend(dhd_pub_t *dhd); -int dhd_conf_set_ap_in_suspend(dhd_pub_t *dhd, int suspend); -int dhd_conf_preinit(dhd_pub_t *dhd); -int dhd_conf_reset(dhd_pub_t *dhd); -int dhd_conf_attach(dhd_pub_t *dhd); -void dhd_conf_detach(dhd_pub_t *dhd); -void *dhd_get_pub(struct net_device *dev); -void *dhd_get_conf(struct net_device *dev); -#endif /* _dhd_config_ */ +void dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path); +void dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path); +void dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path); +#ifdef CONFIG_PATH_AUTO_SELECT +void dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path); +#endif +int dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, int def, bool down); +int dhd_conf_get_iovar(dhd_pub_t *dhd, int cmd, char *name, char *buf, int len, int ifidx); +int dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf, int len, bool down); +uint dhd_conf_get_band(dhd_pub_t *dhd); +int dhd_conf_set_country(dhd_pub_t *dhd); +int dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec); +int dhd_conf_get_country_from_config(dhd_pub_t *dhd, wl_country_t *cspec); +int dhd_conf_fix_country(dhd_pub_t *dhd); +bool dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel); +int dhd_conf_set_roam(dhd_pub_t *dhd); +void dhd_conf_set_bw_cap(dhd_pub_t *dhd); +void dhd_conf_set_wme(dhd_pub_t *dhd, int mode); +void dhd_conf_add_pkt_filter(dhd_pub_t *dhd); +bool dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id); +void dhd_conf_discard_pkt_filter(dhd_pub_t *dhd); +int dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path); +int dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev); +uint dhd_conf_get_chip(void *context); +uint dhd_conf_get_chiprev(void *context); +int dhd_conf_get_pm(dhd_pub_t *dhd); +#ifdef PROP_TXSTATUS +int dhd_conf_get_disable_proptx(dhd_pub_t *dhd); +#endif +int dhd_conf_get_ap_mode_in_suspend(dhd_pub_t *dhd); +int dhd_conf_set_ap_in_suspend(dhd_pub_t *dhd, int suspend); +int dhd_conf_preinit(dhd_pub_t *dhd); +int dhd_conf_reset(dhd_pub_t *dhd); +int dhd_conf_attach(dhd_pub_t *dhd); +void dhd_conf_detach(dhd_pub_t *dhd); +void *dhd_get_pub(struct net_device *dev); +void *dhd_get_conf(struct net_device *dev); +#endif /* _dhd_config_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_custom_gpio.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_custom_gpio.c index cbe4af515c4d..0aa51ca97e24 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_custom_gpio.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_custom_gpio.c @@ -1,7 +1,7 @@ /* * Customer code to add GPIO control during WLAN start/stop * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_custom_gpio.c 591129 2015-10-07 05:22:14Z $ + * $Id: dhd_custom_gpio.c 664997 2016-10-14 11:56:35Z $ */ #include @@ -36,9 +36,6 @@ #include #include -#if defined(WL_WIRELESS_EXT) -#include -#endif #define WL_ERROR(x) printf x #define WL_TRACE(x) @@ -132,14 +129,6 @@ dhd_custom_get_mac_address(void *adapter, unsigned char *buf) } #endif /* GET_CUSTOM_MAC_ENABLE */ -#if !defined(WL_WIRELESS_EXT) -struct cntry_locales_custom { - char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */ - char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */ - int32 custom_locale_rev; /* Custom local revisin default -1 */ -}; -#endif /* WL_WIRELESS_EXT */ - /* Customized Locale table : OPTIONAL feature */ const struct cntry_locales_custom translate_custom_table[] = { /* Table should be filled out based on custom platform regulatory requirement */ @@ -259,7 +248,7 @@ const struct cntry_locales_custom translate_custom_table[] = { void #ifdef CUSTOM_COUNTRY_CODE get_customized_country_code(void *adapter, char *country_iso_code, - wl_country_t *cspec, u32 flags) + wl_country_t *cspec, u32 flags) #else get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec) #endif /* CUSTOM_COUNTRY_CODE */ @@ -271,11 +260,11 @@ get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t if (!cspec) return; #ifdef CUSTOM_COUNTRY_CODE - cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code, - flags); + cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code, flags); #else cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code); #endif /* CUSTOM_COUNTRY_CODE */ + if (cloc_ptr) { strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ); cspec->rev = cloc_ptr->custom_locale_rev; diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_custom_memprealloc.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_custom_memprealloc.c new file mode 100644 index 000000000000..1f593a488f9d --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_custom_memprealloc.c @@ -0,0 +1,513 @@ +/* + * Platform Dependent file for usage of Preallocted Memory + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * <> + * + * $Id: dhd_custom_memprealloc.c 707595 2017-06-28 08:28:30Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + +#define WLAN_STATIC_SCAN_BUF0 5 +#define WLAN_STATIC_SCAN_BUF1 6 +#define WLAN_STATIC_DHD_INFO_BUF 7 +#define WLAN_STATIC_DHD_WLFC_BUF 8 +#define WLAN_STATIC_DHD_IF_FLOW_LKUP 9 +#define WLAN_STATIC_DHD_MEMDUMP_RAM 11 +#define WLAN_STATIC_DHD_WLFC_HANGER 12 +#define WLAN_STATIC_DHD_PKTID_MAP 13 +#define WLAN_STATIC_DHD_PKTID_IOCTL_MAP 14 +#define WLAN_STATIC_DHD_LOG_DUMP_BUF 15 +#define WLAN_STATIC_DHD_LOG_DUMP_BUF_EX 16 +#define WLAN_STATIC_DHD_PKTLOG_DUMP_BUF 17 +#define WLAN_STATIC_STAT_REPORT_BUF 18 + +#define WLAN_SCAN_BUF_SIZE (64 * 1024) + +#if defined(CONFIG_64BIT) +#define WLAN_DHD_INFO_BUF_SIZE (32 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024) +#else +#define WLAN_DHD_INFO_BUF_SIZE (32 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (16 * 1024) +#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024) +#endif /* CONFIG_64BIT */ +#define WLAN_DHD_MEMDUMP_SIZE (1536 * 1024) + +#define PREALLOC_WLAN_SEC_NUM 4 +#define PREALLOC_WLAN_BUF_NUM 160 +#define PREALLOC_WLAN_SECTION_HEADER 24 + +#ifdef CONFIG_BCMDHD_PCIE +#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) +#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) +#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) + +#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_BUF_NUM * 128) +#define WLAN_SECTION_SIZE_1 0 +#define WLAN_SECTION_SIZE_2 0 +#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_BUF_NUM * 1024) + +#define DHD_SKB_1PAGE_BUF_NUM 0 +#define DHD_SKB_2PAGE_BUF_NUM 128 +#define DHD_SKB_4PAGE_BUF_NUM 0 + +#else +#define DHD_SKB_HDRSIZE 336 +#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE) +#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE) +#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE) + +#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_BUF_NUM * 128) +#define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_BUF_NUM * 128) +#define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_BUF_NUM * 512) +#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_BUF_NUM * 1024) + +#define DHD_SKB_1PAGE_BUF_NUM 8 +#define DHD_SKB_2PAGE_BUF_NUM 8 +#define DHD_SKB_4PAGE_BUF_NUM 1 +#endif /* CONFIG_BCMDHD_PCIE */ + +#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \ + (DHD_SKB_2PAGE_BUF_NUM)) +#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + \ + (DHD_SKB_4PAGE_BUF_NUM)) + +#define WLAN_MAX_PKTID_ITEMS (8192) +#define WLAN_DHD_PKTID_MAP_HDR_SIZE (20 + 4*(WLAN_MAX_PKTID_ITEMS + 1)) +#define WLAN_DHD_PKTID_MAP_ITEM_SIZE (32) +#define WLAN_DHD_PKTID_MAP_SIZE ((WLAN_DHD_PKTID_MAP_HDR_SIZE) + \ + ((WLAN_MAX_PKTID_ITEMS+1) * WLAN_DHD_PKTID_MAP_ITEM_SIZE)) + +#define WLAN_MAX_PKTID_IOCTL_ITEMS (32) +#define WLAN_DHD_PKTID_IOCTL_MAP_HDR_SIZE (20 + 4*(WLAN_MAX_PKTID_IOCTL_ITEMS + 1)) +#define WLAN_DHD_PKTID_IOCTL_MAP_ITEM_SIZE (32) +#define WLAN_DHD_PKTID_IOCTL_MAP_SIZE ((WLAN_DHD_PKTID_IOCTL_MAP_HDR_SIZE) + \ + ((WLAN_MAX_PKTID_IOCTL_ITEMS+1) * WLAN_DHD_PKTID_IOCTL_MAP_ITEM_SIZE)) + +#define DHD_LOG_DUMP_BUF_SIZE (1024 * 1024) +#define DHD_LOG_DUMP_BUF_EX_SIZE (8 * 1024) + +#define DHD_PKTLOG_DUMP_BUF_SIZE (64 * 1024) + +#define DHD_STAT_REPORT_BUF_SIZE (128 * 1024) + +#define WLAN_DHD_WLFC_HANGER_MAXITEMS 3072 +#define WLAN_DHD_WLFC_HANGER_ITEM_SIZE 32 +#define WLAN_DHD_WLFC_HANGER_SIZE ((WLAN_DHD_WLFC_HANGER_ITEM_SIZE) + \ + ((WLAN_DHD_WLFC_HANGER_MAXITEMS) * (WLAN_DHD_WLFC_HANGER_ITEM_SIZE))) + +static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; + +struct wlan_mem_prealloc { + void *mem_ptr; + unsigned long size; +}; + +static struct wlan_mem_prealloc wlan_mem_array[PREALLOC_WLAN_SEC_NUM] = { + {NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER)}, + {NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER)}, + {NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER)}, + {NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER)} +}; + +static void *wlan_static_scan_buf0 = NULL; +static void *wlan_static_scan_buf1 = NULL; +static void *wlan_static_dhd_info_buf = NULL; +static void *wlan_static_dhd_wlfc_buf = NULL; +static void *wlan_static_if_flow_lkup = NULL; +static void *wlan_static_dhd_memdump_ram = NULL; +static void *wlan_static_dhd_wlfc_hanger = NULL; +static void *wlan_static_dhd_pktid_map = NULL; +static void *wlan_static_dhd_pktid_ioctl_map = NULL; +static void *wlan_static_dhd_log_dump_buf = NULL; +static void *wlan_static_dhd_log_dump_buf_ex = NULL; +static void *wlan_static_dhd_pktlog_dump_buf = NULL; +static void *wlan_static_stat_report_buf = NULL; + +#define GET_STATIC_BUF(section, config_size, req_size, buf) ({\ + void *__ret; \ + if (req_size > config_size) {\ + pr_err("request " #section " size(%lu) is bigger than" \ + " static size(%d)\n", \ + req_size, config_size); \ + __ret = NULL; \ + } else { __ret = buf;} \ + __ret; \ +}) + +void +*dhd_wlan_mem_prealloc(int section, unsigned long size) +{ + if (section == PREALLOC_WLAN_SEC_NUM) { + return wlan_static_skb; + } + + if (section == WLAN_STATIC_SCAN_BUF0) { + return wlan_static_scan_buf0; + } + + if (section == WLAN_STATIC_SCAN_BUF1) { + return wlan_static_scan_buf1; + } + + if (section == WLAN_STATIC_DHD_INFO_BUF) { + if (size > WLAN_DHD_INFO_BUF_SIZE) { + pr_err("request DHD_INFO size(%lu) is bigger than" + " static size(%d).\n", size, + WLAN_DHD_INFO_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_info_buf; + } + + if (section == WLAN_STATIC_DHD_WLFC_BUF) { + if (size > WLAN_DHD_WLFC_BUF_SIZE) { + pr_err("request DHD_WLFC size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_WLFC_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_buf; + } + + if (section == WLAN_STATIC_DHD_WLFC_HANGER) { + if (size > WLAN_DHD_WLFC_HANGER_SIZE) { + pr_err("request DHD_WLFC_HANGER size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_WLFC_HANGER_SIZE); + return NULL; + } + return wlan_static_dhd_wlfc_hanger; + } + + if (section == WLAN_STATIC_DHD_IF_FLOW_LKUP) { + if (size > WLAN_DHD_IF_FLOW_LKUP_SIZE) { + pr_err("request DHD_WLFC size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_WLFC_BUF_SIZE); + return NULL; + } + return wlan_static_if_flow_lkup; + } + + if (section == WLAN_STATIC_DHD_MEMDUMP_RAM) { + if (size > WLAN_DHD_MEMDUMP_SIZE) { + pr_err("request DHD_MEMDUMP_RAM size(%lu) is bigger" + " than static size(%d).\n", + size, WLAN_DHD_MEMDUMP_SIZE); + return NULL; + } + return wlan_static_dhd_memdump_ram; + } + + if (section == WLAN_STATIC_DHD_PKTID_MAP) { + if (size > WLAN_DHD_PKTID_MAP_SIZE) { + pr_err("request DHD_PKTID_MAP size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_PKTID_MAP_SIZE); + return NULL; + } + return wlan_static_dhd_pktid_map; + } + + + if (section == WLAN_STATIC_DHD_PKTID_IOCTL_MAP) { + if (size > WLAN_DHD_PKTID_IOCTL_MAP_SIZE) { + pr_err("request DHD_PKTID_IOCTL_MAP size(%lu) is bigger than" + " static size(%d).\n", + size, WLAN_DHD_PKTID_IOCTL_MAP_SIZE); + return NULL; + } + return wlan_static_dhd_pktid_ioctl_map; + } + + if (section == WLAN_STATIC_DHD_LOG_DUMP_BUF) { + if (size > DHD_LOG_DUMP_BUF_SIZE) { + pr_err("request DHD_LOG_DUMP_BUF size(%lu) is bigger then" + " static size(%d).\n", + size, DHD_LOG_DUMP_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_log_dump_buf; + } + + if (section == WLAN_STATIC_DHD_LOG_DUMP_BUF_EX) { + if (size > DHD_LOG_DUMP_BUF_EX_SIZE) { + pr_err("request DHD_LOG_DUMP_BUF_EX size(%lu) is bigger then" + " static size(%d).\n", + size, DHD_LOG_DUMP_BUF_EX_SIZE); + return NULL; + } + return wlan_static_dhd_log_dump_buf_ex; + } + + if (section == WLAN_STATIC_DHD_PKTLOG_DUMP_BUF) { + if (size > DHD_PKTLOG_DUMP_BUF_SIZE) { + pr_err("request DHD_PKTLOG_DUMP_BUF size(%lu) is bigger then" + " static size(%d).\n", + size, DHD_PKTLOG_DUMP_BUF_SIZE); + return NULL; + } + return wlan_static_dhd_pktlog_dump_buf; + } + + if (section == WLAN_STATIC_STAT_REPORT_BUF) { + return GET_STATIC_BUF(WLAN_STATIC_STAT_REPORT_BUF, + DHD_STAT_REPORT_BUF_SIZE, size, wlan_static_stat_report_buf); + } + + if ((section < 0) || (section >= PREALLOC_WLAN_SEC_NUM)) { + return NULL; + } + + if (wlan_mem_array[section].size < size) { + return NULL; + } + + return wlan_mem_array[section].mem_ptr; +} +EXPORT_SYMBOL(dhd_wlan_mem_prealloc); + +int +dhd_init_wlan_mem(void) +{ + int i; + int j; + + for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) { + wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE); + if (!wlan_static_skb[i]) { + goto err_skb_alloc; + } + } + + for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) { + wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE); + if (!wlan_static_skb[i]) { + goto err_skb_alloc; + } + } + +#if !defined(CONFIG_BCMDHD_PCIE) + wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE); + if (!wlan_static_skb[i]) { + goto err_skb_alloc; + } +#endif /* !CONFIG_BCMDHD_PCIE */ + + for (i = 0; i < PREALLOC_WLAN_SEC_NUM; i++) { + if (wlan_mem_array[i].size > 0) { + wlan_mem_array[i].mem_ptr = + kmalloc(wlan_mem_array[i].size, GFP_KERNEL); + + if (!wlan_mem_array[i].mem_ptr) { + goto err_mem_alloc; + } + } + } + + wlan_static_scan_buf0 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_scan_buf0) { + pr_err("Failed to alloc wlan_static_scan_buf0\n"); + goto err_mem_alloc; + } + + wlan_static_scan_buf1 = kmalloc(WLAN_SCAN_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_scan_buf1) { + pr_err("Failed to alloc wlan_static_scan_buf1\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_log_dump_buf = kmalloc(DHD_LOG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_log_dump_buf) { + pr_err("Failed to alloc wlan_static_dhd_log_dump_buf\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_log_dump_buf_ex = kmalloc(DHD_LOG_DUMP_BUF_EX_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_log_dump_buf_ex) { + pr_err("Failed to alloc wlan_static_dhd_log_dump_buf_ex\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_info_buf = kmalloc(WLAN_DHD_INFO_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_info_buf) { + pr_err("Failed to alloc wlan_static_dhd_info_buf\n"); + goto err_mem_alloc; + } + +#ifdef CONFIG_BCMDHD_PCIE + wlan_static_if_flow_lkup = kmalloc(WLAN_DHD_IF_FLOW_LKUP_SIZE, + GFP_KERNEL); + if (!wlan_static_if_flow_lkup) { + pr_err("Failed to alloc wlan_static_if_flow_lkup\n"); + goto err_mem_alloc; + } + +#ifdef CONFIG_BCMDHD_PREALLOC_PKTIDMAP + wlan_static_dhd_pktid_map = kmalloc(WLAN_DHD_PKTID_MAP_SIZE, + GFP_KERNEL); + if (!wlan_static_dhd_pktid_map) { + pr_err("Failed to alloc wlan_static_dhd_pktid_map\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_pktid_ioctl_map = kmalloc(WLAN_DHD_PKTID_IOCTL_MAP_SIZE, + GFP_KERNEL); + if (!wlan_static_dhd_pktid_ioctl_map) { + pr_err("Failed to alloc wlan_static_dhd_pktid_ioctl_map\n"); + goto err_mem_alloc; + } +#endif /* CONFIG_BCMDHD_PREALLOC_PKTIDMAP */ +#else + wlan_static_dhd_wlfc_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, + GFP_KERNEL); + if (!wlan_static_dhd_wlfc_buf) { + pr_err("Failed to alloc wlan_static_dhd_wlfc_buf\n"); + goto err_mem_alloc; + } + + wlan_static_dhd_wlfc_hanger = kmalloc(WLAN_DHD_WLFC_HANGER_SIZE, + GFP_KERNEL); + if (!wlan_static_dhd_wlfc_hanger) { + pr_err("Failed to alloc wlan_static_dhd_wlfc_hanger\n"); + goto err_mem_alloc; + } +#endif /* CONFIG_BCMDHD_PCIE */ + +#ifdef CONFIG_BCMDHD_PREALLOC_MEMDUMP + wlan_static_dhd_memdump_ram = kmalloc(WLAN_DHD_MEMDUMP_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_memdump_ram) { + pr_err("Failed to alloc wlan_static_dhd_memdump_ram\n"); + goto err_mem_alloc; + } +#endif /* CONFIG_BCMDHD_PREALLOC_MEMDUMP */ + + wlan_static_dhd_pktlog_dump_buf = kmalloc(DHD_PKTLOG_DUMP_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_pktlog_dump_buf) { + pr_err("Failed to alloc wlan_static_dhd_pktlog_dump_buf\n"); + goto err_mem_alloc; + } + + wlan_static_stat_report_buf = kmalloc(DHD_STAT_REPORT_BUF_SIZE, GFP_KERNEL); + if (!wlan_static_stat_report_buf) { + pr_err("Failed to alloc wlan_static_stat_report_buf\n"); + goto err_mem_alloc; + } + + pr_err("%s: WIFI MEM Allocated\n", __FUNCTION__); + return 0; + +err_mem_alloc: +#ifdef CONFIG_BCMDHD_PREALLOC_MEMDUMP + if (wlan_static_dhd_memdump_ram) { + kfree(wlan_static_dhd_memdump_ram); + } + +#endif /* CONFIG_BCMDHD_PREALLOC_MEMDUMP */ + +#ifdef CONFIG_BCMDHD_PCIE + if (wlan_static_if_flow_lkup) { + kfree(wlan_static_if_flow_lkup); + } + +#ifdef CONFIG_BCMDHD_PREALLOC_PKTIDMAP + if (wlan_static_dhd_pktid_map) { + kfree(wlan_static_dhd_pktid_map); + } + + if (wlan_static_dhd_pktid_ioctl_map) { + kfree(wlan_static_dhd_pktid_ioctl_map); + } +#endif /* CONFIG_BCMDHD_PREALLOC_PKTIDMAP */ +#else + if (wlan_static_dhd_wlfc_buf) { + kfree(wlan_static_dhd_wlfc_buf); + } + + if (wlan_static_dhd_wlfc_hanger) { + kfree(wlan_static_dhd_wlfc_hanger); + } +#endif /* CONFIG_BCMDHD_PCIE */ + if (wlan_static_dhd_info_buf) { + kfree(wlan_static_dhd_info_buf); + } + + if (wlan_static_dhd_log_dump_buf) { + kfree(wlan_static_dhd_log_dump_buf); + } + + if (wlan_static_dhd_log_dump_buf_ex) { + kfree(wlan_static_dhd_log_dump_buf_ex); + } + + if (wlan_static_scan_buf1) { + kfree(wlan_static_scan_buf1); + } + + if (wlan_static_scan_buf0) { + kfree(wlan_static_scan_buf0); + } + + if (wlan_static_dhd_pktlog_dump_buf) { + kfree(wlan_static_dhd_pktlog_dump_buf); + } + + if (wlan_static_stat_report_buf) { + kfree(wlan_static_stat_report_buf); + } + + pr_err("Failed to mem_alloc for WLAN\n"); + + for (j = 0; j < i; j++) { + kfree(wlan_mem_array[j].mem_ptr); + } + + i = WLAN_SKB_BUF_NUM; + +err_skb_alloc: + pr_err("Failed to skb_alloc for WLAN\n"); + for (j = 0; j < i; j++) { + dev_kfree_skb(wlan_static_skb[j]); + } + + return -ENOMEM; +} +EXPORT_SYMBOL(dhd_init_wlan_mem); +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_custom_msm.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_custom_msm.c new file mode 100644 index 000000000000..27399d0d0588 --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_custom_msm.c @@ -0,0 +1,253 @@ +/* + * Platform Dependent file for Qualcomm MSM/APQ + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * <> + * + * $Id: dhd_custom_msm.c 674523 2016-12-09 04:05:27Z $ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) +#include +#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 */ + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM +extern int dhd_init_wlan_mem(void); +extern void *dhd_wlan_mem_prealloc(int section, unsigned long size); +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ + +#define WIFI_TURNON_DELAY 200 +static int wlan_reg_on = -1; +#define DHD_DT_COMPAT_ENTRY "android,bcmdhd_wlan" +#ifdef CUSTOMER_HW2 +#define WIFI_WL_REG_ON_PROPNAME "wl_reg_on" +#else +#define WIFI_WL_REG_ON_PROPNAME "wlan-en-gpio" +#endif /* CUSTOMER_HW2 */ + +#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) +#define MSM_PCIE_CH_NUM 0 +#else +#define MSM_PCIE_CH_NUM 1 +#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 */ + +#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE +static int wlan_host_wake_up = -1; +static int wlan_host_wake_irq = 0; +#ifdef CUSTOMER_HW2 +#define WIFI_WLAN_HOST_WAKE_PROPNAME "wl_host_wake" +#else +#define WIFI_WLAN_HOST_WAKE_PROPNAME "wlan-host-wake-gpio" +#endif /* CUSTOMER_HW2 */ +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */ + +int __init +dhd_wifi_init_gpio(void) +{ + char *wlan_node = DHD_DT_COMPAT_ENTRY; + struct device_node *root_node = NULL; + + root_node = of_find_compatible_node(NULL, NULL, wlan_node); + if (!root_node) { + WARN(1, "failed to get device node of BRCM WLAN\n"); + return -ENODEV; + } + + /* ========== WLAN_PWR_EN ============ */ + wlan_reg_on = of_get_named_gpio(root_node, WIFI_WL_REG_ON_PROPNAME, 0); + printk(KERN_INFO "%s: gpio_wlan_power : %d\n", __FUNCTION__, wlan_reg_on); + + if (gpio_request_one(wlan_reg_on, GPIOF_OUT_INIT_LOW, "WL_REG_ON")) { + printk(KERN_ERR "%s: Faiiled to request gpio %d for WL_REG_ON\n", + __FUNCTION__, wlan_reg_on); + } else { + printk(KERN_ERR "%s: gpio_request WL_REG_ON done - WLAN_EN: GPIO %d\n", + __FUNCTION__, wlan_reg_on); + } + + if (gpio_direction_output(wlan_reg_on, 1)) { + printk(KERN_ERR "%s: WL_REG_ON failed to pull up\n", __FUNCTION__); + } else { + printk(KERN_ERR "%s: WL_REG_ON is pulled up\n", __FUNCTION__); + } + + if (gpio_get_value(wlan_reg_on)) { + printk(KERN_INFO "%s: Initial WL_REG_ON: [%d]\n", + __FUNCTION__, gpio_get_value(wlan_reg_on)); + } + + /* Wait for WIFI_TURNON_DELAY due to power stability */ + msleep(WIFI_TURNON_DELAY); + +#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE + /* ========== WLAN_HOST_WAKE ============ */ + wlan_host_wake_up = of_get_named_gpio(root_node, WIFI_WLAN_HOST_WAKE_PROPNAME, 0); + printk(KERN_INFO "%s: gpio_wlan_host_wake : %d\n", __FUNCTION__, wlan_host_wake_up); + +#ifndef CUSTOMER_HW2 + if (gpio_request_one(wlan_host_wake_up, GPIOF_IN, "WLAN_HOST_WAKE")) { + printk(KERN_ERR "%s: Failed to request gpio %d for WLAN_HOST_WAKE\n", + __FUNCTION__, wlan_host_wake_up); + return -ENODEV; + } else { + printk(KERN_ERR "%s: gpio_request WLAN_HOST_WAKE done" + " - WLAN_HOST_WAKE: GPIO %d\n", + __FUNCTION__, wlan_host_wake_up); + } +#endif /* !CUSTOMER_HW2 */ + + gpio_direction_input(wlan_host_wake_up); + wlan_host_wake_irq = gpio_to_irq(wlan_host_wake_up); +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */ + +#if defined(CONFIG_BCM4359) || defined(CONFIG_BCM4361) + printk(KERN_INFO "%s: Call msm_pcie_enumerate\n", __FUNCTION__); + msm_pcie_enumerate(MSM_PCIE_CH_NUM); +#endif /* CONFIG_BCM4359 || CONFIG_BCM4361 */ + + return 0; +} + +int +dhd_wlan_power(int onoff) +{ + printk(KERN_INFO"------------------------------------------------"); + printk(KERN_INFO"------------------------------------------------\n"); + printk(KERN_INFO"%s Enter: power %s\n", __func__, onoff ? "on" : "off"); + + if (onoff) { + if (gpio_direction_output(wlan_reg_on, 1)) { + printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__); + return -EIO; + } + if (gpio_get_value(wlan_reg_on)) { + printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n", + gpio_get_value(wlan_reg_on)); + } else { + printk("[%s] gpio value is 0. We need reinit.\n", __func__); + if (gpio_direction_output(wlan_reg_on, 1)) { + printk(KERN_ERR "%s: WL_REG_ON is " + "failed to pull up\n", __func__); + } + } + } else { + if (gpio_direction_output(wlan_reg_on, 0)) { + printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__); + return -EIO; + } + if (gpio_get_value(wlan_reg_on)) { + printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n", + gpio_get_value(wlan_reg_on)); + } + } + return 0; +} +EXPORT_SYMBOL(dhd_wlan_power); + +static int +dhd_wlan_reset(int onoff) +{ + return 0; +} + +static int +dhd_wlan_set_carddetect(int val) +{ + return 0; +} + +struct resource dhd_wlan_resources = { + .name = "bcmdhd_wlan_irq", + .start = 0, /* Dummy */ + .end = 0, /* Dummy */ + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE | +#ifdef CONFIG_BCMDHD_PCIE + IORESOURCE_IRQ_HIGHEDGE, +#else + IORESOURCE_IRQ_HIGHLEVEL, +#endif /* CONFIG_BCMDHD_PCIE */ +}; +EXPORT_SYMBOL(dhd_wlan_resources); + +struct wifi_platform_data dhd_wlan_control = { + .set_power = dhd_wlan_power, + .set_reset = dhd_wlan_reset, + .set_carddetect = dhd_wlan_set_carddetect, +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + .mem_prealloc = dhd_wlan_mem_prealloc, +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ +}; +EXPORT_SYMBOL(dhd_wlan_control); + +int __init +dhd_wlan_init(void) +{ + int ret; + + printk(KERN_INFO"%s: START.......\n", __FUNCTION__); + ret = dhd_wifi_init_gpio(); + if (ret < 0) { + printk(KERN_ERR "%s: failed to initiate GPIO, ret=%d\n", + __FUNCTION__, ret); + goto fail; + } + +#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE + dhd_wlan_resources.start = wlan_host_wake_irq; + dhd_wlan_resources.end = wlan_host_wake_irq; +#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */ + +#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM + ret = dhd_init_wlan_mem(); + if (ret < 0) { + printk(KERN_ERR "%s: failed to alloc reserved memory," + " ret=%d\n", __FUNCTION__, ret); + } +#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */ + +fail: + printk(KERN_INFO"%s: FINISH.......\n", __FUNCTION__); + return ret; +} +#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) +#if defined(CONFIG_DEFERRED_INITCALLS) +deferred_module_init(dhd_wlan_init); +#else +late_initcall(dhd_wlan_init); +#endif /* CONFIG_DEFERRED_INITCALLS */ +#else +device_initcall(dhd_wlan_init); +#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_dbg.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_dbg.h index 2c32acf0aa7d..3f90c522936f 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_dbg.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_dbg.h @@ -1,7 +1,7 @@ /* * Debug/trace/assert driver definitions for Dongle Host Driver. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,31 +24,65 @@ * * <> * - * $Id: dhd_dbg.h 598059 2015-11-07 07:31:52Z $ + * $Id: dhd_dbg.h 667145 2016-10-26 04:27:53Z $ */ #ifndef _dhd_dbg_ #define _dhd_dbg_ -#define USE_NET_RATELIMIT 1 + +#if defined(DHD_EFI) && defined(DHD_LOG_DUMP) +extern void dhd_log_dump_print(const char *fmt, ...); +extern void dhd_log_dump_print_drv(const char *fmt, ...); +#endif #if defined(DHD_DEBUG) + #ifdef DHD_LOG_DUMP -extern void dhd_log_dump_print(const char *fmt, ...); +extern void dhd_log_dump_write(int type, const char *fmt, ...); extern char *dhd_log_dump_get_timestamp(void); +#ifndef _DHD_LOG_DUMP_DEFINITIONS_ +#define _DHD_LOG_DUMP_DEFINITIONS_ +#define DLD_BUF_TYPE_GENERAL 0 +#define DLD_BUF_TYPE_SPECIAL 1 +#define DHD_LOG_DUMP_WRITE(fmt, ...) dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, fmt, ##__VA_ARGS__) +#define DHD_LOG_DUMP_WRITE_EX(fmt, ...) dhd_log_dump_write(DLD_BUF_TYPE_SPECIAL, fmt, ##__VA_ARGS__) +#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */ + +#ifdef DHD_EFI #define DHD_ERROR(args) \ do { \ if (dhd_msg_level & DHD_ERROR_VAL) { \ printf args; \ - dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ - dhd_log_dump_print args; \ + dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \ + dhd_log_dump_print_drv args; \ } \ } while (0) -#else +#define DHD_INFO(args) \ +do { \ + if (dhd_msg_level & DHD_INFO_VAL) { \ + printf args; \ + dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \ + dhd_log_dump_print_drv args; \ + } \ +} while (0) +#else /* DHD_EFI */ +#define DHD_ERROR(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0) +#endif /* DHD_EFI */ +#else /* DHD_LOG_DUMP */ #define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) printf args;} while (0) +#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0) #endif /* DHD_LOG_DUMP */ #define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0) -#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0) + #define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0) #define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0) #define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0) @@ -57,48 +91,97 @@ do { \ #define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0) #define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0) #ifdef DHD_LOG_DUMP +#ifndef DHD_EFI #define DHD_EVENT(args) \ do { \ if (dhd_msg_level & DHD_EVENT_VAL) { \ printf args; \ - dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ - dhd_log_dump_print args; \ + DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + DHD_LOG_DUMP_WRITE args; \ } \ } while (0) #else +#define DHD_EVENT(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#endif /* !DHD_EFI */ +#else #define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0) #endif /* DHD_LOG_DUMP */ -#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0) #define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0) #define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0) #define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0) #define DHD_PNO(args) do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0) +#define DHD_RTT(args) do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0) +#define DHD_PKT_MON(args) do {if (dhd_msg_level & DHD_PKT_MON_VAL) printf args;} while (0) #ifdef DHD_LOG_DUMP +#ifndef DHD_EFI #define DHD_MSGTRACE_LOG(args) \ do { \ if (dhd_msg_level & DHD_MSGTRACE_VAL) { \ printf args; \ - dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ - dhd_log_dump_print args; \ + DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + DHD_LOG_DUMP_WRITE args; \ } \ } while (0) #else +#define DHD_MSGTRACE_LOG(args) \ +do { \ + if (dhd_msg_level & DHD_MSGTRACE_VAL) { \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#endif /* !DHD_EFI */ +#else #define DHD_MSGTRACE_LOG(args) do {if (dhd_msg_level & DHD_MSGTRACE_VAL) printf args;} while (0) #endif /* DHD_LOG_DUMP */ + +#if defined(DHD_LOG_DUMP) && defined(DHD_EFI) +#define DHD_FWLOG(args) DHD_MSGTRACE_LOG(args) +#else + #define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0) -#define DHD_RTT(args) do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0) -#define DHD_IOV_INFO(args) do {if (dhd_msg_level & DHD_IOV_INFO_VAL) printf args;} while (0) +#endif /* DHD_LOG_DUMP & DHD_EFI */ +#define DHD_DBGIF(args) do {if (dhd_msg_level & DHD_DBGIF_VAL) printf args;} while (0) #ifdef DHD_LOG_DUMP -#define DHD_ERROR_EX(args) \ +#ifdef DHD_EFI +#define DHD_ERROR_MEM(args) \ do { \ if (dhd_msg_level & DHD_ERROR_VAL) { \ - dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ - dhd_log_dump_print args; \ + dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \ + dhd_log_dump_print_drv args; \ } \ } while (0) +#define DHD_ERROR_EX(args) DHD_ERROR(args) #else -#define DHD_ERROR_EX(args) DHD_ERROR(args) +#define DHD_ERROR_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + if (dhd_msg_level & DHD_ERROR_MEM_VAL) { \ + printf args; \ + } \ + DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \ + DHD_LOG_DUMP_WRITE args; \ + } \ +} while (0) +#define DHD_ERROR_EX(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + DHD_LOG_DUMP_WRITE_EX("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \ + DHD_LOG_DUMP_WRITE_EX args; \ + } \ +} while (0) +#endif /* DHD_EFI */ +#else +#define DHD_ERROR_MEM(args) DHD_ERROR(args) +#define DHD_ERROR_EX(args) DHD_ERROR(args) #endif /* DHD_LOG_DUMP */ #ifdef CUSTOMER_HW4_DEBUG @@ -120,20 +203,48 @@ do { \ #define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL) #define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL) #define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL) -#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL) #define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL) #define DHD_ARPOE_ON() (dhd_msg_level & DHD_ARPOE_VAL) #define DHD_REORDER_ON() (dhd_msg_level & DHD_REORDER_VAL) #define DHD_NOCHECKDIED_ON() (dhd_msg_level & DHD_NOCHECKDIED_VAL) #define DHD_PNO_ON() (dhd_msg_level & DHD_PNO_VAL) +#define DHD_RTT_ON() (dhd_msg_level & DHD_RTT_VAL) +#define DHD_MSGTRACE_ON() (dhd_msg_level & DHD_MSGTRACE_VAL) #define DHD_FWLOG_ON() (dhd_msg_level & DHD_FWLOG_VAL) -#define DHD_IOV_INFO_ON() (dhd_msg_level & DHD_IOV_INFO_VAL) +#define DHD_DBGIF_ON() (dhd_msg_level & DHD_DBGIF_VAL) +#define DHD_PKT_MON_ON() (dhd_msg_level & DHD_PKT_MON_VAL) +#define DHD_PKT_MON_DUMP_ON() (dhd_msg_level & DHD_PKT_MON_DUMP_VAL) #else /* defined(BCMDBG) || defined(DHD_DEBUG) */ -#define DHD_ERROR(args) do {printf args;} while (0) +#if defined(DHD_EFI) +extern void dhd_log_dump_print_drv(const char *fmt, ...); +extern char *dhd_log_dump_get_timestamp(void); +#define DHD_ERROR(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \ + dhd_log_dump_print_drv args; \ + } \ +} while (0) +#define DHD_INFO(args) \ +do { \ + if (dhd_msg_level & DHD_INFO_VAL) { \ + printf args; \ + dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \ + dhd_log_dump_print_drv args; \ + } \ +} while (0) +#define DHD_TRACE(args) +#else /* DHD_EFI */ + +#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) \ + printf args;} while (0) #define DHD_TRACE(args) #define DHD_INFO(args) +#endif + #define DHD_DATA(args) #define DHD_CTL(args) #define DHD_TIMER(args) @@ -141,16 +252,55 @@ do { \ #define DHD_BYTES(args) #define DHD_INTR(args) #define DHD_GLOM(args) + +#if defined(DHD_EFI) && defined(DHD_LOG_DUMP) +#define DHD_EVENT(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else #define DHD_EVENT(args) -#define DHD_BTA(args) +#endif /* DHD_EFI && DHD_LOG_DUMP */ + #define DHD_ISCAN(args) #define DHD_ARPOE(args) #define DHD_REORDER(args) #define DHD_PNO(args) +#define DHD_RTT(args) +#define DHD_PKT_MON(args) + +#if defined(DHD_EFI) && defined(DHD_LOG_DUMP) +#define DHD_MSGTRACE_LOG(args) \ +do { \ + if (dhd_msg_level & DHD_MSGTRACE_VAL) { \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#define DHD_FWLOG(args) DHD_MSGTRACE_LOG(args) +#else #define DHD_MSGTRACE_LOG(args) #define DHD_FWLOG(args) -#define DHD_IOV_INFO(args) -#define DHD_ERROR_EX(args) DHD_ERROR(args) +#endif /* DHD_EFI && DHD_LOG_DUMP */ + +#define DHD_DBGIF(args) + +#if defined(DHD_EFI) && defined(DHD_LOG_DUMP) +#define DHD_ERROR_MEM(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#define DHD_ERROR_EX(args) DHD_ERROR(args) +#else +#define DHD_ERROR_MEM(args) DHD_ERROR(args) +#define DHD_ERROR_EX(args) DHD_ERROR(args) +#endif /* DHD_EFI && DHD_LOG_DUMP */ #ifdef CUSTOMER_HW4_DEBUG #define DHD_TRACE_HW4 DHD_ERROR @@ -171,15 +321,17 @@ do { \ #define DHD_INTR_ON() 0 #define DHD_GLOM_ON() 0 #define DHD_EVENT_ON() 0 -#define DHD_BTA_ON() 0 #define DHD_ISCAN_ON() 0 #define DHD_ARPOE_ON() 0 #define DHD_REORDER_ON() 0 #define DHD_NOCHECKDIED_ON() 0 #define DHD_PNO_ON() 0 +#define DHD_RTT_ON() 0 +#define DHD_PKT_MON_ON() 0 +#define DHD_PKT_MON_DUMP_ON() 0 +#define DHD_MSGTRACE_ON() 0 #define DHD_FWLOG_ON() 0 -#define DHD_IOV_INFO_ON() 0 - +#define DHD_DBGIF_ON() 0 #endif #define DHD_LOG(args) diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_debug.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_debug.c new file mode 100644 index 000000000000..cd4c6f7e75f0 --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_debug.c @@ -0,0 +1,2335 @@ +/* + * DHD debugability support + * + * <> + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_debug.c 711908 2017-07-20 10:37:34Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if defined(DHD_EFI) +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#define container_of(ptr, type, member) \ + ((type *)((char *)(ptr) - offsetof(type, member))) +#endif + +#define DBGRING_FLUSH_THRESHOLD(ring) (ring->ring_size / 3) +#define RING_STAT_TO_STATUS(ring, status) \ + do { \ + strncpy(status.name, ring->name, \ + sizeof(status.name) - 1); \ + status.ring_id = ring->id; \ + status.ring_buffer_byte_size = ring->ring_size; \ + status.written_bytes = ring->stat.written_bytes; \ + status.written_records = ring->stat.written_records; \ + status.read_bytes = ring->stat.read_bytes; \ + status.verbose_level = ring->log_level; \ + } while (0) + +#define DHD_PKT_INFO DHD_ERROR +struct map_table { + uint16 fw_id; + uint16 host_id; + char *desc; +}; + +struct map_table event_map[] = { + {WLC_E_AUTH, WIFI_EVENT_AUTH_COMPLETE, "AUTH_COMPLETE"}, + {WLC_E_ASSOC, WIFI_EVENT_ASSOC_COMPLETE, "ASSOC_COMPLETE"}, + {TRACE_FW_AUTH_STARTED, WIFI_EVENT_FW_AUTH_STARTED, "AUTH STARTED"}, + {TRACE_FW_ASSOC_STARTED, WIFI_EVENT_FW_ASSOC_STARTED, "ASSOC STARTED"}, + {TRACE_FW_RE_ASSOC_STARTED, WIFI_EVENT_FW_RE_ASSOC_STARTED, "REASSOC STARTED"}, + {TRACE_G_SCAN_STARTED, WIFI_EVENT_G_SCAN_STARTED, "GSCAN STARTED"}, + {WLC_E_PFN_SCAN_COMPLETE, WIFI_EVENT_G_SCAN_COMPLETE, "GSCAN COMPLETE"}, + {WLC_E_DISASSOC, WIFI_EVENT_DISASSOCIATION_REQUESTED, "DIASSOC REQUESTED"}, + {WLC_E_REASSOC, WIFI_EVENT_RE_ASSOCIATION_REQUESTED, "REASSOC REQUESTED"}, + {TRACE_ROAM_SCAN_STARTED, WIFI_EVENT_ROAM_REQUESTED, "ROAM REQUESTED"}, + {WLC_E_BEACON_FRAME_RX, WIFI_EVENT_BEACON_RECEIVED, "BEACON Received"}, + {TRACE_ROAM_SCAN_STARTED, WIFI_EVENT_ROAM_SCAN_STARTED, "ROAM SCAN STARTED"}, + {TRACE_ROAM_SCAN_COMPLETE, WIFI_EVENT_ROAM_SCAN_COMPLETE, "ROAM SCAN COMPLETED"}, + {TRACE_ROAM_AUTH_STARTED, WIFI_EVENT_ROAM_AUTH_STARTED, "ROAM AUTH STARTED"}, + {WLC_E_AUTH, WIFI_EVENT_ROAM_AUTH_COMPLETE, "ROAM AUTH COMPLETED"}, + {TRACE_FW_RE_ASSOC_STARTED, WIFI_EVENT_ROAM_ASSOC_STARTED, "ROAM ASSOC STARTED"}, + {WLC_E_ASSOC, WIFI_EVENT_ROAM_ASSOC_COMPLETE, "ROAM ASSOC COMPLETED"}, + {TRACE_ROAM_SCAN_COMPLETE, WIFI_EVENT_ROAM_SCAN_COMPLETE, "ROAM SCAN COMPLETED"}, + {TRACE_BT_COEX_BT_SCO_START, WIFI_EVENT_BT_COEX_BT_SCO_START, "BT SCO START"}, + {TRACE_BT_COEX_BT_SCO_STOP, WIFI_EVENT_BT_COEX_BT_SCO_STOP, "BT SCO STOP"}, + {TRACE_BT_COEX_BT_SCAN_START, WIFI_EVENT_BT_COEX_BT_SCAN_START, "BT COEX SCAN START"}, + {TRACE_BT_COEX_BT_SCAN_STOP, WIFI_EVENT_BT_COEX_BT_SCAN_STOP, "BT COEX SCAN STOP"}, + {TRACE_BT_COEX_BT_HID_START, WIFI_EVENT_BT_COEX_BT_HID_START, "BT HID START"}, + {TRACE_BT_COEX_BT_HID_STOP, WIFI_EVENT_BT_COEX_BT_HID_STOP, "BT HID STOP"}, + {WLC_E_EAPOL_MSG, WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED, "FW EAPOL PKT RECEIVED"}, + {TRACE_FW_EAPOL_FRAME_TRANSMIT_START, WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START, + "FW EAPOL PKT TRANSMITED"}, + {TRACE_FW_EAPOL_FRAME_TRANSMIT_STOP, WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP, + "FW EAPOL PKT TX STOPPED"}, + {TRACE_BLOCK_ACK_NEGOTIATION_COMPLETE, WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE, + "BLOCK ACK NEGO COMPLETED"}, +}; + +struct map_table event_tag_map[] = { + {TRACE_TAG_VENDOR_SPECIFIC, WIFI_TAG_VENDOR_SPECIFIC, "VENDOR SPECIFIC DATA"}, + {TRACE_TAG_BSSID, WIFI_TAG_BSSID, "BSSID"}, + {TRACE_TAG_ADDR, WIFI_TAG_ADDR, "ADDR_0"}, + {TRACE_TAG_SSID, WIFI_TAG_SSID, "SSID"}, + {TRACE_TAG_STATUS, WIFI_TAG_STATUS, "STATUS"}, + {TRACE_TAG_CHANNEL_SPEC, WIFI_TAG_CHANNEL_SPEC, "CHANSPEC"}, + {TRACE_TAG_WAKE_LOCK_EVENT, WIFI_TAG_WAKE_LOCK_EVENT, "WAKELOCK EVENT"}, + {TRACE_TAG_ADDR1, WIFI_TAG_ADDR1, "ADDR_1"}, + {TRACE_TAG_ADDR2, WIFI_TAG_ADDR2, "ADDR_2"}, + {TRACE_TAG_ADDR3, WIFI_TAG_ADDR3, "ADDR_3"}, + {TRACE_TAG_ADDR4, WIFI_TAG_ADDR4, "ADDR_4"}, + {TRACE_TAG_TSF, WIFI_TAG_TSF, "TSF"}, + {TRACE_TAG_IE, WIFI_TAG_IE, "802.11 IE"}, + {TRACE_TAG_INTERFACE, WIFI_TAG_INTERFACE, "INTERFACE"}, + {TRACE_TAG_REASON_CODE, WIFI_TAG_REASON_CODE, "REASON CODE"}, + {TRACE_TAG_RATE_MBPS, WIFI_TAG_RATE_MBPS, "RATE"}, +}; + +/* define log level per ring type */ +struct log_level_table fw_verbose_level_map[] = { + {1, EVENT_LOG_TAG_PCI_ERROR, EVENT_LOG_SET_BUS, "PCI_ERROR"}, + {1, EVENT_LOG_TAG_PCI_WARN, EVENT_LOG_SET_BUS, "PCI_WARN"}, + {2, EVENT_LOG_TAG_PCI_INFO, EVENT_LOG_SET_BUS, "PCI_INFO"}, + {3, EVENT_LOG_TAG_PCI_DBG, EVENT_LOG_SET_BUS, "PCI_DEBUG"}, + {3, EVENT_LOG_TAG_BEACON_LOG, EVENT_LOG_SET_WL, "BEACON_LOG"}, + {2, EVENT_LOG_TAG_WL_ASSOC_LOG, EVENT_LOG_SET_WL, "ASSOC_LOG"}, + {2, EVENT_LOG_TAG_WL_ROAM_LOG, EVENT_LOG_SET_WL, "ROAM_LOG"}, + {1, EVENT_LOG_TAG_TRACE_WL_INFO, EVENT_LOG_SET_WL, "WL_INFO"}, + {1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, EVENT_LOG_SET_WL, "BTCOEX_INFO"}, +#ifdef CUSTOMER_HW4_DEBUG + {3, EVENT_LOG_TAG_SCAN_WARN, EVENT_LOG_SET_WL, "SCAN_WARN"}, +#else + {1, EVENT_LOG_TAG_SCAN_WARN, EVENT_LOG_SET_WL, "SCAN_WARN"}, +#endif /* CUSTOMER_HW4_DEBUG */ + {1, EVENT_LOG_TAG_SCAN_ERROR, EVENT_LOG_SET_WL, "SCAN_ERROR"}, + {2, EVENT_LOG_TAG_SCAN_TRACE_LOW, EVENT_LOG_SET_WL, "SCAN_TRACE_LOW"}, + {2, EVENT_LOG_TAG_SCAN_TRACE_HIGH, EVENT_LOG_SET_WL, "SCAN_TRACE_HIGH"} +}; + +struct log_level_table fw_event_level_map[] = { + {1, EVENT_LOG_TAG_TRACE_WL_INFO, EVENT_LOG_SET_WL, "WL_INFO"}, + {1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, EVENT_LOG_SET_WL, "BTCOEX_INFO"}, +#ifdef CUSTOMER_HW4_DEBUG + {3, EVENT_LOG_TAG_BEACON_LOG, EVENT_LOG_SET_WL, "BEACON LOG"}, +#else + {2, EVENT_LOG_TAG_BEACON_LOG, EVENT_LOG_SET_WL, "BEACON LOG"}, +#endif /* CUSTOMER_HW4_DEBUG */ +}; + +struct map_table nan_event_map[] = { + {TRACE_NAN_CLUSTER_STARTED, NAN_EVENT_CLUSTER_STARTED, "NAN_CLUSTER_STARTED"}, + {TRACE_NAN_CLUSTER_JOINED, NAN_EVENT_CLUSTER_JOINED, "NAN_CLUSTER_JOINED"}, + {TRACE_NAN_CLUSTER_MERGED, NAN_EVENT_CLUSTER_MERGED, "NAN_CLUSTER_MERGED"}, + {TRACE_NAN_ROLE_CHANGED, NAN_EVENT_ROLE_CHANGED, "NAN_ROLE_CHANGED"}, + {TRACE_NAN_SCAN_COMPLETE, NAN_EVENT_SCAN_COMPLETE, "NAN_SCAN_COMPLETE"}, + {TRACE_NAN_STATUS_CHNG, NAN_EVENT_STATUS_CHNG, "NAN_STATUS_CHNG"}, +}; + +struct log_level_table nan_event_level_map[] = { + {1, EVENT_LOG_TAG_NAN_ERROR, 0, "NAN_ERROR"}, + {2, EVENT_LOG_TAG_NAN_INFO, 0, "NAN_INFO"}, + {3, EVENT_LOG_TAG_NAN_DBG, 0, "NAN_DEBUG"}, +}; + +struct map_table nan_evt_tag_map[] = { + {TRACE_TAG_BSSID, WIFI_TAG_BSSID, "BSSID"}, + {TRACE_TAG_ADDR, WIFI_TAG_ADDR, "ADDR_0"}, +}; + +/* reference tab table */ +uint ref_tag_tbl[EVENT_LOG_TAG_MAX + 1] = {0}; + +typedef struct dhddbg_loglist_item { + dll_t list; + event_log_hdr_t *hdr; +} loglist_item_t; + +typedef struct dhbdbg_pending_item { + dll_t list; + dhd_dbg_ring_status_t ring_status; + dhd_dbg_ring_entry_t *ring_entry; +} pending_item_t; + +/* trace log entry header user space processing */ +struct tracelog_header { + int magic_num; + int buf_size; + int seq_num; +}; +#define TRACE_LOG_MAGIC_NUMBER 0xEAE47C06 + +int +dhd_dbg_ring_pull_single(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len, + bool strip_header) +{ + dhd_dbg_ring_t *ring; + dhd_dbg_ring_entry_t *r_entry; + uint32 rlen; + char *buf; + + if (!dhdp || !dhdp->dbg) { + return 0; + } + + ring = &dhdp->dbg->dbg_rings[ring_id]; + + if (ring->state != RING_ACTIVE) { + return 0; + } + + if (ring->rp == ring->wp) { + return 0; + } + + r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->rp); + + /* Boundary Check */ + rlen = ENTRY_LENGTH(r_entry); + if ((ring->rp + rlen) > ring->ring_size) { + DHD_ERROR(("%s: entry len %d is out of boundary of ring size %d," + " current ring %d[%s] - rp=%d\n", __FUNCTION__, rlen, + ring->ring_size, ring->id, ring->name, ring->rp)); + return 0; + } + + if (strip_header) { + rlen = r_entry->len; + buf = (char *)r_entry + DBG_RING_ENTRY_SIZE; + } else { + rlen = ENTRY_LENGTH(r_entry); + buf = (char *)r_entry; + } + if (rlen > buf_len) { + DHD_ERROR(("%s: buf len %d is too small for entry len %d\n", + __FUNCTION__, buf_len, rlen)); + DHD_ERROR(("%s: ring %d[%s] - ring size=%d, wp=%d, rp=%d\n", + __FUNCTION__, ring->id, ring->name, ring->ring_size, + ring->wp, ring->rp)); + ASSERT(0); + return 0; + } + + memcpy(data, buf, rlen); + /* update ring context */ + ring->rp += ENTRY_LENGTH(r_entry); + /* skip padding if there is one */ + if (ring->tail_padded && ((ring->rp + ring->rem_len) == ring->ring_size)) { + DHD_DBGIF(("%s: RING%d[%s] Found padding, rp=%d, wp=%d\n", + __FUNCTION__, ring->id, ring->name, ring->rp, ring->wp)); + ring->rp = 0; + ring->tail_padded = FALSE; + ring->rem_len = 0; + } + if (ring->rp >= ring->ring_size) { + DHD_ERROR(("%s: RING%d[%s] rp pointed out of ring boundary," + " rp=%d, ring_size=%d\n", __FUNCTION__, ring->id, + ring->name, ring->rp, ring->ring_size)); + ASSERT(0); + } + ring->stat.read_bytes += ENTRY_LENGTH(r_entry); + DHD_DBGIF(("%s RING%d[%s]read_bytes %d, wp=%d, rp=%d\n", __FUNCTION__, + ring->id, ring->name, ring->stat.read_bytes, ring->wp, ring->rp)); + + return rlen; +} + +int +dhd_dbg_ring_pull(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len) +{ + int32 r_len, total_r_len = 0; + dhd_dbg_ring_t *ring; + + if (!dhdp || !dhdp->dbg) + return 0; + ring = &dhdp->dbg->dbg_rings[ring_id]; + if (ring->state != RING_ACTIVE) + return 0; + + while (buf_len > 0) { + r_len = dhd_dbg_ring_pull_single(dhdp, ring_id, data, buf_len, FALSE); + if (r_len == 0) + break; + data = (uint8 *)data + r_len; + buf_len -= r_len; + total_r_len += r_len; + } + + return total_r_len; +} + +int +dhd_dbg_ring_push(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data) +{ + unsigned long flags; + uint32 pending_len; + uint32 w_len; + uint32 avail_size; + dhd_dbg_ring_t *ring; + dhd_dbg_ring_entry_t *w_entry, *r_entry; + + if (!dhdp || !dhdp->dbg) { + return BCME_BADADDR; + } + + ring = &dhdp->dbg->dbg_rings[ring_id]; + + if (ring->state != RING_ACTIVE) { + return BCME_OK; + } + + flags = dhd_os_spin_lock(ring->lock); + + w_len = ENTRY_LENGTH(hdr); + + if (w_len > ring->ring_size) { + dhd_os_spin_unlock(ring->lock, flags); + return BCME_ERROR; + } + + /* Claim the space */ + do { + avail_size = DBG_RING_CHECK_WRITE_SPACE(ring->rp, ring->wp, ring->ring_size); + if (avail_size <= w_len) { + /* Prepare the space */ + if (ring->rp <= ring->wp) { + ring->tail_padded = TRUE; + ring->rem_len = ring->ring_size - ring->wp; + DHD_DBGIF(("%s: RING%d[%s] Insuffient tail space," + " rp=%d, wp=%d, rem_len=%d, ring_size=%d," + " avail_size=%d, w_len=%d\n", __FUNCTION__, + ring->id, ring->name, ring->rp, ring->wp, + ring->rem_len, ring->ring_size, avail_size, + w_len)); + + /* 0 pad insufficient tail space */ + memset((uint8 *)ring->ring_buf + ring->wp, 0, ring->rem_len); + if (ring->rp == ring->wp) { + ring->rp = 0; + } + ring->wp = 0; + } else { + /* Not enough space for new entry, free some up */ + r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + + ring->rp); + ring->rp += ENTRY_LENGTH(r_entry); + /* skip padding if there is one */ + if (ring->tail_padded && + ((ring->rp + ring->rem_len) == ring->ring_size)) { + DHD_DBGIF(("%s: RING%d[%s] Found padding," + " avail_size=%d, w_len=%d\n", __FUNCTION__, + ring->id, ring->name, avail_size, w_len)); + ring->rp = 0; + ring->tail_padded = FALSE; + ring->rem_len = 0; + } + if (ring->rp >= ring->ring_size) { + DHD_ERROR(("%s: RING%d[%s] rp points out of boundary," + " ring->rp = %d, ring->ring_size=%d\n", + __FUNCTION__, ring->id, ring->name, ring->rp, + ring->ring_size)); + ASSERT(0); + } + ring->stat.read_bytes += ENTRY_LENGTH(r_entry); + DHD_DBGIF(("%s: RING%d[%s] read_bytes %d, wp=%d, rp=%d\n", + __FUNCTION__, ring->id, ring->name, ring->stat.read_bytes, + ring->wp, ring->rp)); + } + } else { + break; + } + } while (TRUE); + + w_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->wp); + /* header */ + memcpy(w_entry, hdr, DBG_RING_ENTRY_SIZE); + w_entry->len = hdr->len; + /* payload */ + memcpy((char *)w_entry + DBG_RING_ENTRY_SIZE, data, w_entry->len); + /* update write pointer */ + ring->wp += w_len; + if (ring->wp >= ring->ring_size) { + DHD_ERROR(("%s: RING%d[%s] wp pointed out of ring boundary, " + "wp=%d, ring_size=%d\n", __FUNCTION__, ring->id, + ring->name, ring->wp, ring->ring_size)); + ASSERT(0); + } + /* update statistics */ + ring->stat.written_records++; + ring->stat.written_bytes += w_len; + DHD_DBGIF(("%s : RING%d[%s] written_records %d, written_bytes %d, read_bytes=%d," + " ring->threshold=%d, wp=%d, rp=%d\n", __FUNCTION__, ring->id, ring->name, + ring->stat.written_records, ring->stat.written_bytes, ring->stat.read_bytes, + ring->threshold, ring->wp, ring->rp)); + + /* Calculate current pending size */ + if (ring->stat.written_bytes > ring->stat.read_bytes) { + pending_len = ring->stat.written_bytes - ring->stat.read_bytes; + } else if (ring->stat.written_bytes < ring->stat.read_bytes) { + pending_len = 0xFFFFFFFF - ring->stat.read_bytes + ring->stat.written_bytes; + } else { + pending_len = 0; + } + + /* if the current pending size is bigger than threshold */ + if (ring->threshold > 0 && + (pending_len >= ring->threshold) && ring->sched_pull) { + dhdp->dbg->pullreq(dhdp->dbg->private, ring->id); + ring->sched_pull = FALSE; + } + dhd_os_spin_unlock(ring->lock, flags); + return BCME_OK; +} + +static int +dhd_dbg_msgtrace_seqchk(uint32 *prev, uint32 cur) +{ + /* normal case including wrap around */ + if ((cur == 0 && *prev == 0xFFFFFFFF) || ((cur - *prev) == 1)) { + goto done; + } else if (cur == *prev) { + DHD_EVENT(("%s duplicate trace\n", __FUNCTION__)); + return -1; + } else if (cur > *prev) { + DHD_EVENT(("%s lost %d packets\n", __FUNCTION__, cur - *prev)); + } else { + DHD_EVENT(("%s seq out of order, dhd %d, dongle %d\n", + __FUNCTION__, *prev, cur)); + } +done: + *prev = cur; + return 0; +} + +#ifndef MACOSX_DHD +static void +dhd_dbg_msgtrace_msg_parser(void *event_data) +{ + msgtrace_hdr_t *hdr; + char *data, *s; + static uint32 seqnum_prev = 0; + + hdr = (msgtrace_hdr_t *)event_data; + data = (char *)event_data + MSGTRACE_HDRLEN; + + /* There are 2 bytes available at the end of data */ + data[ntoh16(hdr->len)] = '\0'; + + if (ntoh32(hdr->discarded_bytes) || ntoh32(hdr->discarded_printf)) { + DHD_DBGIF(("WLC_E_TRACE: [Discarded traces in dongle -->" + "discarded_bytes %d discarded_printf %d]\n", + ntoh32(hdr->discarded_bytes), + ntoh32(hdr->discarded_printf))); + } + + if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(hdr->seqnum))) + return; + + /* Display the trace buffer. Advance from + * \n to \n to avoid display big + * printf (issue with Linux printk ) + */ + while (*data != '\0' && (s = strstr(data, "\n")) != NULL) { + *s = '\0'; + DHD_FWLOG(("[FWLOG] %s\n", data)); + data = s+1; + } + if (*data) + DHD_FWLOG(("[FWLOG] %s", data)); +} +#endif /* MACOSX_DHD */ +#ifdef SHOW_LOGTRACE +static const uint8 * +event_get_tlv(uint16 id, const char* tlvs, uint tlvs_len) +{ + const uint8 *pos = (const uint8 *)tlvs; + const uint8 *end = pos + tlvs_len; + const tlv_log *tlv; + int rest; + + while (pos + 1 < end) { + if (pos + 4 + pos[1] > end) + break; + tlv = (const tlv_log *) pos; + if (tlv->tag == id) + return pos; + rest = tlv->len % 4; /* padding values */ + pos += 4 + tlv->len + rest; + } + return NULL; +} + +#define DATA_UNIT_FOR_LOG_CNT 4 +/* #pragma used as a WAR to fix build failure, + * ignore dropping of 'const' qualifier in tlv_data assignment + * this pragma disables the warning only for the following function + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif +static int +dhd_dbg_nan_event_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr, uint32 *data) +{ + int ret = BCME_OK; + wl_event_log_id_ver_t nan_hdr; + log_nan_event_t *evt_payload; + uint16 evt_payload_len = 0, tot_payload_len = 0; + dhd_dbg_ring_entry_t msg_hdr; + bool evt_match = FALSE; + event_log_hdr_t *ts_hdr; + uint32 *ts_data; + char *tlvs, *dest_tlvs; + tlv_log *tlv_data; + int tlv_len = 0; + int i = 0, evt_idx = 0; + char eaddr_buf[ETHER_ADDR_STR_LEN]; + + BCM_REFERENCE(eaddr_buf); + + nan_hdr.t = *data; + DHD_DBGIF(("%s: version %u event %x\n", __FUNCTION__, nan_hdr.version, + nan_hdr.event)); + + if (nan_hdr.version != DIAG_VERSION) { + DHD_ERROR(("Event payload version %u mismatch with current version %u\n", + nan_hdr.version, DIAG_VERSION)); + return BCME_VERSION; + } + + /* nan event log should at least contain a wl_event_log_id_ver_t + * header and a arm cycle count + */ + if (hdr->count < NAN_EVENT_LOG_MIN_LENGTH) { + return BCME_BADLEN; + } + + memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t)); + ts_hdr = (event_log_hdr_t *)((uint8 *)data - sizeof(event_log_hdr_t)); + if (ts_hdr->tag == EVENT_LOG_TAG_TS) { + ts_data = (uint32 *)ts_hdr - ts_hdr->count; + msg_hdr.timestamp = (uint64)ts_data[0]; + msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP; + } + msg_hdr.type = DBG_RING_ENTRY_NAN_EVENT_TYPE; + for (i = 0; i < ARRAYSIZE(nan_event_map); i++) { + if (nan_event_map[i].fw_id == nan_hdr.event) { + evt_match = TRUE; + evt_idx = i; + break; + } + } + if (evt_match) { + DHD_DBGIF(("%s : event (%s)\n", __FUNCTION__, nan_event_map[evt_idx].desc)); + /* payload length for nan event data */ + evt_payload_len = sizeof(log_nan_event_t) + + (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT; + if ((evt_payload = MALLOC(dhdp->osh, evt_payload_len)) == NULL) { + DHD_ERROR(("Memory allocation failed for nan evt log (%u)\n", + evt_payload_len)); + return BCME_NOMEM; + } + evt_payload->version = NAN_EVENT_VERSION; + evt_payload->event = nan_event_map[evt_idx].host_id; + dest_tlvs = (char *)evt_payload->tlvs; + tot_payload_len = sizeof(log_nan_event_t); + tlvs = (char *)(&data[1]); + tlv_len = (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT; + for (i = 0; i < ARRAYSIZE(nan_evt_tag_map); i++) { + tlv_data = (tlv_log *)event_get_tlv(nan_evt_tag_map[i].fw_id, + tlvs, tlv_len); + if (tlv_data) { + DHD_DBGIF(("NAN evt tlv.tag(%s), tlv.len : %d, tlv.data : ", + nan_evt_tag_map[i].desc, tlv_data->len)); + memcpy(dest_tlvs, tlv_data, sizeof(tlv_log) + tlv_data->len); + tot_payload_len += tlv_data->len + sizeof(tlv_log); + switch (tlv_data->tag) { + case TRACE_TAG_BSSID: + case TRACE_TAG_ADDR: + DHD_DBGIF(("%s\n", + bcm_ether_ntoa( + (const struct ether_addr *)tlv_data->value, + eaddr_buf))); + break; + default: + if (DHD_DBGIF_ON()) { + prhex(NULL, &tlv_data->value[0], + tlv_data->len); + } + break; + } + dest_tlvs += tlv_data->len + sizeof(tlv_log); + } + } + msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY; + msg_hdr.len = tot_payload_len; + dhd_dbg_ring_push(dhdp, NAN_EVENT_RING_ID, &msg_hdr, evt_payload); + MFREE(dhdp->osh, evt_payload, evt_payload_len); + } + return ret; +} + +static int +dhd_dbg_custom_evnt_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr, uint32 *data) +{ + int i = 0, match_idx = 0; + int payload_len, tlv_len; + uint16 tot_payload_len = 0; + int ret = BCME_OK; + int log_level; + wl_event_log_id_ver_t wl_log_id; + dhd_dbg_ring_entry_t msg_hdr; + log_conn_event_t *event_data; + bool evt_match = FALSE; + event_log_hdr_t *ts_hdr; + uint32 *ts_data; + char *tlvs, *dest_tlvs; + tlv_log *tlv_data; + static uint64 ts_saved = 0; + char eabuf[ETHER_ADDR_STR_LEN]; + char chanbuf[CHANSPEC_STR_LEN]; + + BCM_REFERENCE(eabuf); + BCM_REFERENCE(chanbuf); + /* get a event type and version */ + wl_log_id.t = *data; + if (wl_log_id.version != DIAG_VERSION) + return BCME_VERSION; + + /* custom event log should at least contain a wl_event_log_id_ver_t + * header and a arm cycle count + */ + if (hdr->count < NAN_EVENT_LOG_MIN_LENGTH) { + return BCME_BADLEN; + } + + ts_hdr = (event_log_hdr_t *)((uint8 *)data - sizeof(event_log_hdr_t)); + if (ts_hdr->tag == EVENT_LOG_TAG_TS) { + ts_data = (uint32 *)ts_hdr - ts_hdr->count; + ts_saved = (uint64)ts_data[0]; + } + memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t)); + msg_hdr.timestamp = ts_saved; + + DHD_DBGIF(("Android Event ver %d, payload %d words, ts %llu\n", + (*data >> 16), hdr->count - 1, ts_saved)); + + /* Perform endian convertion */ + for (i = 0; i < hdr->count; i++) { + /* *(data + i) = ntoh32(*(data + i)); */ + DHD_DATA(("%08x ", *(data + i))); + } + DHD_DATA(("\n")); + msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP; + msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY; + msg_hdr.type = DBG_RING_ENTRY_EVENT_TYPE; + + /* convert the data to log_conn_event_t format */ + for (i = 0; i < ARRAYSIZE(event_map); i++) { + if (event_map[i].fw_id == wl_log_id.event) { + evt_match = TRUE; + match_idx = i; + break; + } + } + if (evt_match) { + log_level = dhdp->dbg->dbg_rings[FW_EVENT_RING_ID].log_level; + /* filter the data based on log_level */ + for (i = 0; i < ARRAYSIZE(fw_event_level_map); i++) { + if ((fw_event_level_map[i].tag == hdr->tag) && + (fw_event_level_map[i].log_level > log_level)) { + return BCME_OK; + } + } + DHD_DBGIF(("%s : event (%s)\n", __FUNCTION__, event_map[match_idx].desc)); + /* get the payload length for event data (skip : log header + timestamp) */ + payload_len = sizeof(log_conn_event_t) + DATA_UNIT_FOR_LOG_CNT * (hdr->count - 2); + event_data = MALLOC(dhdp->osh, payload_len); + if (!event_data) { + DHD_ERROR(("failed to allocate the log_conn_event_t with length(%d)\n", + payload_len)); + return BCME_NOMEM; + } + event_data->event = event_map[match_idx].host_id; + dest_tlvs = (char *)event_data->tlvs; + tot_payload_len = sizeof(log_conn_event_t); + tlvs = (char *)(&data[1]); + tlv_len = (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT; + for (i = 0; i < ARRAYSIZE(event_tag_map); i++) { + tlv_data = (tlv_log *)event_get_tlv(event_tag_map[i].fw_id, + tlvs, tlv_len); + if (tlv_data) { + DHD_DBGIF(("tlv.tag(%s), tlv.len : %d, tlv.data : ", + event_tag_map[i].desc, tlv_data->len)); + memcpy(dest_tlvs, tlv_data, sizeof(tlv_log) + tlv_data->len); + tot_payload_len += tlv_data->len + sizeof(tlv_log); + switch (tlv_data->tag) { + case TRACE_TAG_BSSID: + case TRACE_TAG_ADDR: + case TRACE_TAG_ADDR1: + case TRACE_TAG_ADDR2: + case TRACE_TAG_ADDR3: + case TRACE_TAG_ADDR4: + DHD_DBGIF(("%s\n", + bcm_ether_ntoa((const struct ether_addr *)tlv_data->value, + eabuf))); + break; + case TRACE_TAG_SSID: + DHD_DBGIF(("%s\n", tlv_data->value)); + break; + case TRACE_TAG_STATUS: + DHD_DBGIF(("%d\n", ltoh32_ua(&tlv_data->value[0]))); + break; + case TRACE_TAG_REASON_CODE: + DHD_DBGIF(("%d\n", ltoh16_ua(&tlv_data->value[0]))); + break; + case TRACE_TAG_RATE_MBPS: + DHD_DBGIF(("%d Kbps\n", + ltoh16_ua(&tlv_data->value[0]) * 500)); + break; + case TRACE_TAG_CHANNEL_SPEC: + DHD_DBGIF(("%s\n", + wf_chspec_ntoa( + ltoh16_ua(&tlv_data->value[0]), chanbuf))); + break; + default: + if (DHD_DBGIF_ON()) { + prhex(NULL, &tlv_data->value[0], tlv_data->len); + } + } + dest_tlvs += tlv_data->len + sizeof(tlv_log); + } + } + msg_hdr.len = tot_payload_len; + dhd_dbg_ring_push(dhdp, FW_EVENT_RING_ID, &msg_hdr, event_data); + MFREE(dhdp->osh, event_data, payload_len); + } + return ret; +} +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + +/* To identify format of types %Ns where N >= 0 is a number */ +bool +check_valid_string_format(char *curr_ptr) +{ + char *next_ptr; + if ((next_ptr = bcmstrstr(curr_ptr, "s")) != NULL) { + /* Default %s format */ + if (curr_ptr == next_ptr) { + return TRUE; + } + + /* Verify each charater between '%' and 's' is a valid number */ + while (curr_ptr < next_ptr) { + if (bcm_isdigit(*curr_ptr) == FALSE) { + return FALSE; + } + curr_ptr++; + } + + return TRUE; + } else { + return FALSE; + } +} + +#define MAX_NO_OF_ARG 16 +#define FMTSTR_SIZE 132 +#define ROMSTR_SIZE 200 +#define SIZE_LOC_STR 50 +static uint64 verboselog_ts_saved = 0; +static void +dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr, + void *raw_event_ptr) +{ + event_log_hdr_t *ts_hdr; + uint32 *log_ptr = (uint32 *)hdr - hdr->count; + char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 }; + uint32 rom_str_len = 0; + uint32 *ts_data; + + if (!raw_event_ptr) { + return; + } + + /* Get time stamp if it's updated */ + ts_hdr = (event_log_hdr_t *)((char *)log_ptr - sizeof(event_log_hdr_t)); + if (ts_hdr->tag == EVENT_LOG_TAG_TS) { + ts_data = (uint32 *)ts_hdr - ts_hdr->count; + verboselog_ts_saved = (uint64)ts_data[0]; + DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n", + ts_data[ts_hdr->count - 1], ts_data[0], ts_data[1])); + } + + if (hdr->tag == EVENT_LOG_TAG_ROM_PRINTF) { + rom_str_len = (hdr->count - 1) * sizeof(uint32); + if (rom_str_len >= (ROMSTR_SIZE -1)) + rom_str_len = ROMSTR_SIZE - 1; + + /* copy all ascii data for ROM printf to local string */ + memcpy(fmtstr_loc_buf, log_ptr, rom_str_len); + /* add end of line at last */ + fmtstr_loc_buf[rom_str_len] = '\0'; + + DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s", + log_ptr[hdr->count - 1], fmtstr_loc_buf)); + + /* Add newline if missing */ + if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n') + DHD_MSGTRACE_LOG(("\n")); + + return; + } + + if (hdr->tag == EVENT_LOG_TAG_MSCHPROFILE || hdr->tag == EVENT_LOG_TAG_MSCHPROFILE_TLV) { + wl_mschdbg_verboselog_handler(dhdp, raw_event_ptr, hdr->tag, log_ptr); + return; + } + + /* print the message out in a logprint */ + dhd_dbg_verboselog_printf(dhdp, hdr, raw_event_ptr, log_ptr); +} + +void +dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, event_log_hdr_t *hdr, + void *raw_event_ptr, uint32 *log_ptr) +{ + dhd_event_log_t *raw_event = (dhd_event_log_t *)raw_event_ptr; + uint16 count; + int log_level, id; + char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 }; + char (*str_buf)[SIZE_LOC_STR] = NULL; + char *str_tmpptr = NULL; + uint32 addr = 0; + typedef union { + uint32 val; + char * addr; + } u_arg; + u_arg arg[MAX_NO_OF_ARG] = {{0}}; + char *c_ptr = NULL; + + BCM_REFERENCE(arg); + + if (!raw_event) { + return; + } + + /* print the message out in a logprint */ + if (!(raw_event->fmts) || hdr->fmt_num == 0xffff) { + if (dhdp->dbg) { + log_level = dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level; + for (id = 0; id < ARRAYSIZE(fw_verbose_level_map); id++) { + if ((fw_verbose_level_map[id].tag == hdr->tag) && + (fw_verbose_level_map[id].log_level > log_level)) + return; + } + } + + DHD_EVENT(("%d.%d EL:tag=%d len=%d fmt=0x%x", + (uint32)verboselog_ts_saved / 1000, + (uint32)verboselog_ts_saved % 1000, + hdr->tag, + hdr->count, + hdr->fmt_num)); + + for (count = 0; count < (hdr->count-1); count++) { + if (count % 8 == 0) + DHD_EVENT(("\n\t%08x", log_ptr[count])); + else + DHD_EVENT((" %08x", log_ptr[count])); + } + DHD_EVENT(("\n")); + + return; + } + + str_buf = MALLOCZ(dhdp->osh, (MAX_NO_OF_ARG * SIZE_LOC_STR)); + if (!str_buf) { + DHD_ERROR(("%s: malloc failed str_buf\n", __FUNCTION__)); + return; + } + + if ((hdr->fmt_num >> 2) < raw_event->num_fmts) { + if (hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) { + snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "%s", + raw_event->fmts[hdr->fmt_num >> 2]); + hdr->count++; + } else { + snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "CONSOLE_E: %6d.%3d %s", + log_ptr[hdr->count-1]/1000, (log_ptr[hdr->count - 1] % 1000), + raw_event->fmts[hdr->fmt_num >> 2]); + } + c_ptr = fmtstr_loc_buf; + } else { + DHD_ERROR(("%s: fmt number out of range \n", __FUNCTION__)); + goto exit; + } + + for (count = 0; count < (hdr->count - 1); count++) { + if (c_ptr != NULL) + if ((c_ptr = bcmstrstr(c_ptr, "%")) != NULL) + c_ptr++; + + if (c_ptr != NULL) { + if (check_valid_string_format(c_ptr)) { + if ((raw_event->raw_sstr) && + ((log_ptr[count] > raw_event->rodata_start) && + (log_ptr[count] < raw_event->rodata_end))) { + /* ram static string */ + addr = log_ptr[count] - raw_event->rodata_start; + str_tmpptr = raw_event->raw_sstr + addr; + memcpy(str_buf[count], str_tmpptr, + SIZE_LOC_STR); + str_buf[count][SIZE_LOC_STR-1] = '\0'; + arg[count].addr = str_buf[count]; + } else if ((raw_event->rom_raw_sstr) && + ((log_ptr[count] > + raw_event->rom_rodata_start) && + (log_ptr[count] < + raw_event->rom_rodata_end))) { + /* rom static string */ + addr = log_ptr[count] - raw_event->rom_rodata_start; + str_tmpptr = raw_event->rom_raw_sstr + addr; + memcpy(str_buf[count], str_tmpptr, + SIZE_LOC_STR); + str_buf[count][SIZE_LOC_STR-1] = '\0'; + arg[count].addr = str_buf[count]; + } else { + /* + * Dynamic string OR + * No data for static string. + * So store all string's address as string. + */ + snprintf(str_buf[count], SIZE_LOC_STR, + "(s)0x%x", log_ptr[count]); + arg[count].addr = str_buf[count]; + } + } else { + /* Other than string */ + arg[count].val = log_ptr[count]; + } + } + } + + /* Print FW logs */ + DHD_FWLOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3], + arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10], + arg[11], arg[12], arg[13], arg[14], arg[15])); + +exit: + MFREE(dhdp->osh, str_buf, (MAX_NO_OF_ARG * SIZE_LOC_STR)); +} + +static void +dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data, + void *raw_event_ptr, uint datalen) +{ + msgtrace_hdr_t *hdr; + char *data; + int id; + uint32 log_hdr_len = sizeof(event_log_hdr_t); + uint32 log_pyld_len; + static uint32 seqnum_prev = 0; + event_log_hdr_t *log_hdr; + bool msg_processed = FALSE; + uint32 *log_ptr = NULL; + dll_t list_head, *cur; + loglist_item_t *log_item; + int32 nan_evt_ring_log_level = 0; + dhd_dbg_ring_entry_t msg_hdr; + char *logbuf; + struct tracelog_header *logentry_header; + + /* log trace event consists of: + * msgtrace header + * event log block header + * event log payload + */ + if (datalen <= MSGTRACE_HDRLEN + EVENT_LOG_BLOCK_HDRLEN) { + return; + } + hdr = (msgtrace_hdr_t *)event_data; + data = (char *)event_data + MSGTRACE_HDRLEN; + datalen -= MSGTRACE_HDRLEN; + + if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(hdr->seqnum))) + return; + + /* Save the whole message to event log ring */ + memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t)); + logbuf = VMALLOC(dhdp->osh, sizeof(*logentry_header) + datalen); + if (logbuf == NULL) + return; + logentry_header = (struct tracelog_header *)logbuf; + logentry_header->magic_num = TRACE_LOG_MAGIC_NUMBER; + logentry_header->buf_size = datalen; + logentry_header->seq_num = hdr->seqnum; + msg_hdr.type = DBG_RING_ENTRY_DATA_TYPE; + + if ((sizeof(*logentry_header) + datalen) > PAYLOAD_MAX_LEN) { + DHD_ERROR(("%s:Payload len=%u exceeds max len\n", __FUNCTION__, + ((uint)sizeof(*logentry_header) + datalen))); + VMFREE(dhdp->osh, logbuf, sizeof(*logentry_header) + datalen); + return; + } + + msg_hdr.len = sizeof(*logentry_header) + datalen; + memcpy(logbuf + sizeof(*logentry_header), data, datalen); + dhd_dbg_ring_push(dhdp, FW_VERBOSE_RING_ID, &msg_hdr, logbuf); + VMFREE(dhdp->osh, logbuf, sizeof(*logentry_header) + datalen); + + /* Print sequence number, originating set and length of received + * event log buffer. Refer to event log buffer structure in + * event_log.h + */ + DHD_MSGTRACE_LOG(("EVENT_LOG_HDR[0x%x]: Set: 0x%08x length = %d\n", + ltoh16(*((uint16 *)(data+2))), ltoh32(*((uint32 *)(data + 4))), + ltoh16(*((uint16 *)(data))))); + data += EVENT_LOG_BLOCK_HDRLEN; + datalen -= EVENT_LOG_BLOCK_HDRLEN; + + /* start parsing from the tail of packet + * Sameple format of a meessage + * 001d3c54 00000064 00000064 001d3c54 001dba08 035d6ce1 0c540639 + * 001d3c54 00000064 00000064 035d6d89 0c580439 + * 0x0c580439 -- 39 is tag, 04 is count, 580c is format number + * all these uint32 values comes in reverse order as group as EL data + * while decoding we can only parse from last to first + * |<- datalen ->| + * |----(payload and maybe more logs)----|event_log_hdr_t| + * data log_hdr + */ + dll_init(&list_head); + while (datalen > log_hdr_len) { + log_hdr = (event_log_hdr_t *)(data + datalen - log_hdr_len); + /* skip zero padding at end of frame */ + if (log_hdr->tag == EVENT_LOG_TAG_NULL) { + datalen -= log_hdr_len; + continue; + } + /* Check argument count, any event log should contain at least + * one argument (4 bytes) for arm cycle count and up to 16 + * arguments when the format is valid + */ + if (log_hdr->count == 0) { + break; + } + if ((log_hdr->count > MAX_NO_OF_ARG) && (log_hdr->fmt_num != 0xffff)) { + break; + } + + log_pyld_len = log_hdr->count * DATA_UNIT_FOR_LOG_CNT; + /* log data should not cross the event data boundary */ + if ((char *)log_hdr - data < log_pyld_len) + break; + /* skip 4 bytes time stamp packet */ + if (log_hdr->tag == EVENT_LOG_TAG_TS) { + datalen -= log_pyld_len + log_hdr_len; + continue; + } + if (!(log_item = MALLOC(dhdp->osh, sizeof(*log_item)))) { + DHD_ERROR(("%s allocating log list item failed\n", + __FUNCTION__)); + break; + } + log_item->hdr = log_hdr; + dll_insert(&log_item->list, &list_head); + datalen -= (log_pyld_len + log_hdr_len); + } + + while (!dll_empty(&list_head)) { + msg_processed = FALSE; + cur = dll_head_p(&list_head); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + log_item = (loglist_item_t *)container_of(cur, loglist_item_t, list); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + log_hdr = log_item->hdr; + log_ptr = (uint32 *)log_hdr - log_hdr->count; + dll_delete(cur); + MFREE(dhdp->osh, log_item, sizeof(*log_item)); + + /* Before DHD debugability is implemented WLC_E_TRACE had been + * used to carry verbose logging from firmware. We need to + * be able to handle those messages even without a initialized + * debug layer. + */ + if (dhdp->dbg) { + /* check the data for NAN event ring; keeping first as small table */ + /* process only user configured to log */ + nan_evt_ring_log_level = dhdp->dbg->dbg_rings[NAN_EVENT_RING_ID].log_level; + if (dhdp->dbg->dbg_rings[NAN_EVENT_RING_ID].log_level) { + for (id = 0; id < ARRAYSIZE(nan_event_level_map); id++) { + if (nan_event_level_map[id].tag == log_hdr->tag) { + /* dont process if tag log level is greater + * than ring log level + */ + if (nan_event_level_map[id].log_level > + nan_evt_ring_log_level) { + msg_processed = TRUE; + break; + } + /* In case of BCME_VERSION error, + * this is not NAN event type data + */ + if (dhd_dbg_nan_event_handler(dhdp, + log_hdr, log_ptr) != BCME_VERSION) { + msg_processed = TRUE; + } + break; + } + } + } + if (!msg_processed) { + /* check the data for event ring */ + for (id = 0; id < ARRAYSIZE(fw_event_level_map); id++) { + if (fw_event_level_map[id].tag == log_hdr->tag) { + /* In case of BCME_VERSION error, + * this is not event type data + */ + if (dhd_dbg_custom_evnt_handler(dhdp, + log_hdr, log_ptr) != BCME_VERSION) { + msg_processed = TRUE; + } + break; + } + } + } + } + if (!msg_processed) + dhd_dbg_verboselog_handler(dhdp, log_hdr, raw_event_ptr); + + } +} +#else /* !SHOW_LOGTRACE */ +static INLINE void dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, + event_log_hdr_t *hdr, void *raw_event_ptr) {}; +static INLINE void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, + void *event_data, void *raw_event_ptr, uint datalen) {}; +#endif /* SHOW_LOGTRACE */ +#ifndef MACOSX_DHD +void +dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data, + void *raw_event_ptr, uint datalen) +{ + msgtrace_hdr_t *hdr; + + hdr = (msgtrace_hdr_t *)event_data; + + if (hdr->version != MSGTRACE_VERSION) { + DHD_DBGIF(("%s unsupported MSGTRACE version, dhd %d, dongle %d\n", + __FUNCTION__, MSGTRACE_VERSION, hdr->version)); + return; + } + + if (hdr->trace_type == MSGTRACE_HDR_TYPE_MSG) + dhd_dbg_msgtrace_msg_parser(event_data); + else if (hdr->trace_type == MSGTRACE_HDR_TYPE_LOG) + dhd_dbg_msgtrace_log_parser(dhdp, event_data, raw_event_ptr, datalen); +} +#endif /* MACOSX_DHD */ +static int +dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name, + uint32 ring_sz, int section) +{ + void *buf; + unsigned long flags; +#ifdef CONFIG_DHD_USE_STATIC_BUF + buf = DHD_OS_PREALLOC(dhdp, section, ring_sz); +#else + buf = MALLOCZ(dhdp->osh, ring_sz); +#endif + if (!buf) + return BCME_NOMEM; + + ring->lock = dhd_os_spin_lock_init(dhdp->osh); + + flags = dhd_os_spin_lock(ring->lock); + ring->id = id; + strncpy(ring->name, name, DBGRING_NAME_MAX); + ring->name[DBGRING_NAME_MAX - 1] = 0; + ring->ring_size = ring_sz; + ring->wp = ring->rp = 0; + ring->ring_buf = buf; + ring->threshold = DBGRING_FLUSH_THRESHOLD(ring); + ring->state = RING_SUSPEND; + ring->sched_pull = TRUE; + ring->rem_len = 0; + dhd_os_spin_unlock(ring->lock, flags); + + return BCME_OK; +} + +static void +dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring) +{ + void *buf; + uint32 ring_sz; + unsigned long flags; + + if (!ring->ring_buf) + return; + + flags = dhd_os_spin_lock(ring->lock); + ring->id = 0; + ring->name[0] = 0; + ring_sz = ring->ring_size; + ring->ring_size = 0; + ring->wp = ring->rp = 0; + buf = ring->ring_buf; + ring->ring_buf = NULL; + memset(&ring->stat, 0, sizeof(ring->stat)); + ring->threshold = 0; + ring->state = RING_STOP; + dhd_os_spin_unlock(ring->lock, flags); + + dhd_os_spin_lock_deinit(dhdp->osh, ring->lock); +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(dhdp->osh, buf, ring_sz); +#endif +} + +uint8 +dhd_dbg_find_sets_by_tag(uint16 tag) +{ + uint i; + uint8 sets = 0; + + for (i = 0; i < ARRAYSIZE(fw_verbose_level_map); i++) { + if (fw_verbose_level_map[i].tag == tag) { + sets |= fw_verbose_level_map[i].sets; + } + } + + for (i = 0; i < ARRAYSIZE(fw_event_level_map); i++) { + if (fw_event_level_map[i].tag == tag) { + sets |= fw_event_level_map[i].sets; + } + } + + return sets; +} + +/* + * dhd_dbg_set_event_log_tag : modify the state of an event log tag + */ +void +dhd_dbg_set_event_log_tag(dhd_pub_t *dhdp, uint16 tag, uint8 set) +{ + wl_el_tag_params_t pars; + char *cmd = "event_log_tag_control"; + char iovbuf[WLC_IOCTL_SMLEN] = { 0 }; + int ret; + + memset(&pars, 0, sizeof(pars)); + pars.tag = tag; + pars.set = dhd_dbg_find_sets_by_tag(tag); + pars.flags = set ? EVENT_LOG_TAG_FLAG_LOG : EVENT_LOG_TAG_FLAG_NONE; + + if (!bcm_mkiovar(cmd, (char *)&pars, sizeof(pars), iovbuf, sizeof(iovbuf))) { + DHD_ERROR(("%s mkiovar failed\n", __FUNCTION__)); + return; + } + + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + if (ret) { + DHD_ERROR(("%s set log tag iovar failed %d\n", __FUNCTION__, ret)); + } +} + +int +dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id, int log_level, int flags, uint32 threshold) +{ + dhd_dbg_ring_t *ring; + uint8 set = 1; + unsigned long lock_flags; + int i, array_len = 0; + struct log_level_table *log_level_tbl = NULL; + if (!dhdp || !dhdp->dbg) + return BCME_BADADDR; + + ring = &dhdp->dbg->dbg_rings[ring_id]; + + if (ring->state == RING_STOP) + return BCME_UNSUPPORTED; + + lock_flags = dhd_os_spin_lock(ring->lock); + if (log_level == 0) + ring->state = RING_SUSPEND; + else + ring->state = RING_ACTIVE; + ring->log_level = log_level; + + ring->threshold = MIN(threshold, DBGRING_FLUSH_THRESHOLD(ring)); + dhd_os_spin_unlock(ring->lock, lock_flags); + if (log_level > 0) + set = TRUE; + + if (ring->id == FW_EVENT_RING_ID) { + log_level_tbl = fw_event_level_map; + array_len = ARRAYSIZE(fw_event_level_map); + } else if (ring->id == FW_VERBOSE_RING_ID) { + log_level_tbl = fw_verbose_level_map; + array_len = ARRAYSIZE(fw_verbose_level_map); + } else if (ring->id == NAN_EVENT_RING_ID) { + log_level_tbl = nan_event_level_map; + array_len = ARRAYSIZE(nan_event_level_map); + } + + for (i = 0; i < array_len; i++) { + if (log_level == 0 || (log_level_tbl[i].log_level > log_level)) { + /* clear the reference per ring */ + ref_tag_tbl[log_level_tbl[i].tag] &= ~(1 << ring_id); + } else { + /* set the reference per ring */ + ref_tag_tbl[log_level_tbl[i].tag] |= (1 << ring_id); + } + set = (ref_tag_tbl[log_level_tbl[i].tag])? 1 : 0; + DHD_DBGIF(("%s TAG(%s) is %s for the ring(%s)\n", __FUNCTION__, + log_level_tbl[i].desc, (set)? "SET" : "CLEAR", ring->name)); + dhd_dbg_set_event_log_tag(dhdp, log_level_tbl[i].tag, set); + } + return BCME_OK; +} + +/* +* dhd_dbg_get_ring_status : get the ring status from the coresponding ring buffer +* Return: An error code or 0 on success. +*/ + +int +dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg_ring_status) +{ + int ret = BCME_OK; + int id = 0; + dhd_dbg_t *dbg; + dhd_dbg_ring_t *dbg_ring; + dhd_dbg_ring_status_t ring_status; + if (!dhdp || !dhdp->dbg) + return BCME_BADADDR; + dbg = dhdp->dbg; + + memset(&ring_status, 0, sizeof(dhd_dbg_ring_status_t)); + for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) { + dbg_ring = &dbg->dbg_rings[id]; + if (VALID_RING(dbg_ring->id) && (dbg_ring->id == ring_id)) { + RING_STAT_TO_STATUS(dbg_ring, ring_status); + *dbg_ring_status = ring_status; + break; + } + } + if (!VALID_RING(id)) { + DHD_ERROR(("%s : cannot find the ring_id : %d\n", __FUNCTION__, ring_id)); + ret = BCME_NOTFOUND; + } + return ret; +} + +/* +* dhd_dbg_find_ring_id : return ring_id based on ring_name +* Return: An invalid ring id for failure or valid ring id on success. +*/ + +int +dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name) +{ + int id; + dhd_dbg_t *dbg; + dhd_dbg_ring_t *ring; + + if (!dhdp || !dhdp->dbg) + return BCME_BADADDR; + + dbg = dhdp->dbg; + for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) { + ring = &dbg->dbg_rings[id]; + if (!strncmp((char *)ring->name, ring_name, sizeof(ring->name) - 1)) + break; + } + return id; +} + +/* +* dhd_dbg_get_priv : get the private data of dhd dbugability module +* Return : An NULL on failure or valid data address +*/ +void * +dhd_dbg_get_priv(dhd_pub_t *dhdp) +{ + if (!dhdp || !dhdp->dbg) + return NULL; + return dhdp->dbg->private; +} + +/* +* dhd_dbg_start : start and stop All of Ring buffers +* Return: An error code or 0 on success. +*/ +int +dhd_dbg_start(dhd_pub_t *dhdp, bool start) +{ + int ret = BCME_OK; + int ring_id; + dhd_dbg_t *dbg; + dhd_dbg_ring_t *dbg_ring; + if (!dhdp) + return BCME_BADARG; + dbg = dhdp->dbg; + + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + dbg_ring = &dbg->dbg_rings[ring_id]; + if (!start) { + if (VALID_RING(dbg_ring->id)) { + /* Initialize the information for the ring */ + dbg_ring->state = RING_SUSPEND; + dbg_ring->log_level = 0; + dbg_ring->rp = dbg_ring->wp = 0; + dbg_ring->threshold = 0; + memset(&dbg_ring->stat, 0, sizeof(struct ring_statistics)); + memset(dbg_ring->ring_buf, 0, dbg_ring->ring_size); + } + } + } + return ret; +} + +/* + * dhd_dbg_send_urgent_evt: send the health check evt to Upper layer + * + * Return: An error code or 0 on success. + */ + +int +dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len) +{ + dhd_dbg_t *dbg; + int ret = BCME_OK; + if (!dhdp || !dhdp->dbg) + return BCME_BADADDR; + + dbg = dhdp->dbg; + if (dbg->urgent_notifier) { + dbg->urgent_notifier(dhdp, data, len); + } + return ret; +} + +#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING) +uint32 +__dhd_dbg_pkt_hash(uintptr_t pkt, uint32 pktid) +{ + uint32 __pkt; + uint32 __pktid; + + __pkt = ((int)pkt) >= 0 ? (2 * pkt) : (-2 * pkt - 1); + __pktid = ((int)pktid) >= 0 ? (2 * pktid) : (-2 * pktid - 1); + + return (__pkt >= __pktid ? (__pkt * __pkt + __pkt + __pktid) : + (__pkt + __pktid * __pktid)); +} + +#define __TIMESPEC_TO_US(ts) \ + (((uint32)(ts).tv_sec * USEC_PER_SEC) + ((ts).tv_nsec / NSEC_PER_USEC)) + +uint32 +__dhd_dbg_driver_ts_usec(void) +{ + struct timespec ts; + + get_monotonic_boottime(&ts); + return ((uint32)(__TIMESPEC_TO_US(ts))); +} + +wifi_tx_packet_fate +__dhd_dbg_map_tx_status_to_pkt_fate(uint16 status) +{ + wifi_tx_packet_fate pkt_fate; + + switch (status) { + case WLFC_CTL_PKTFLAG_DISCARD: + pkt_fate = TX_PKT_FATE_ACKED; + break; + case WLFC_CTL_PKTFLAG_D11SUPPRESS: + /* intensional fall through */ + case WLFC_CTL_PKTFLAG_WLSUPPRESS: + pkt_fate = TX_PKT_FATE_FW_QUEUED; + break; + case WLFC_CTL_PKTFLAG_TOSSED_BYWLC: + pkt_fate = TX_PKT_FATE_FW_DROP_INVALID; + break; + case WLFC_CTL_PKTFLAG_DISCARD_NOACK: + pkt_fate = TX_PKT_FATE_SENT; + break; + default: + pkt_fate = TX_PKT_FATE_FW_DROP_OTHER; + break; + } + + return pkt_fate; +} +#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */ + +#ifdef DBG_PKT_MON +static int +__dhd_dbg_free_tx_pkts(dhd_pub_t *dhdp, dhd_dbg_tx_info_t *tx_pkts, + uint16 pkt_count) +{ + uint16 count; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + count = 0; + while ((count < pkt_count) && tx_pkts) { + if (tx_pkts->info.pkt) + PKTFREE(dhdp->osh, tx_pkts->info.pkt, TRUE); + tx_pkts++; + count++; + } + + return BCME_OK; +} + +static int +__dhd_dbg_free_rx_pkts(dhd_pub_t *dhdp, dhd_dbg_rx_info_t *rx_pkts, + uint16 pkt_count) +{ + uint16 count; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + count = 0; + while ((count < pkt_count) && rx_pkts) { + if (rx_pkts->info.pkt) + PKTFREE(dhdp->osh, rx_pkts->info.pkt, TRUE); + rx_pkts++; + count++; + } + + return BCME_OK; +} + +void +__dhd_dbg_dump_pkt_info(dhd_pub_t *dhdp, dhd_dbg_pkt_info_t *info) +{ + if (DHD_PKT_MON_DUMP_ON()) { + DHD_PKT_MON(("payload type = %d\n", info->payload_type)); + DHD_PKT_MON(("driver ts = %u\n", info->driver_ts)); + DHD_PKT_MON(("firmware ts = %u\n", info->firmware_ts)); + DHD_PKT_MON(("packet hash = %u\n", info->pkt_hash)); + DHD_PKT_MON(("packet length = %zu\n", info->pkt_len)); + DHD_PKT_MON(("packet address = %p\n", info->pkt)); + DHD_PKT_MON(("packet data = \n")); + if (DHD_PKT_MON_ON()) { + prhex(NULL, PKTDATA(dhdp->osh, info->pkt), info->pkt_len); + } + } +} + +void +__dhd_dbg_dump_tx_pkt_info(dhd_pub_t *dhdp, dhd_dbg_tx_info_t *tx_pkt, + uint16 count) +{ + if (DHD_PKT_MON_DUMP_ON()) { + DHD_PKT_MON(("\nTX (count: %d)\n", ++count)); + DHD_PKT_MON(("packet fate = %d\n", tx_pkt->fate)); + __dhd_dbg_dump_pkt_info(dhdp, &tx_pkt->info); + } +} + +void +__dhd_dbg_dump_rx_pkt_info(dhd_pub_t *dhdp, dhd_dbg_rx_info_t *rx_pkt, + uint16 count) +{ + if (DHD_PKT_MON_DUMP_ON()) { + DHD_PKT_MON(("\nRX (count: %d)\n", ++count)); + DHD_PKT_MON(("packet fate = %d\n", rx_pkt->fate)); + __dhd_dbg_dump_pkt_info(dhdp, &rx_pkt->info); + } +} + +int +dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp, + dbg_mon_tx_pkts_t tx_pkt_mon, + dbg_mon_tx_status_t tx_status_mon, + dbg_mon_rx_pkts_t rx_pkt_mon) +{ + + dhd_dbg_tx_report_t *tx_report = NULL; + dhd_dbg_rx_report_t *rx_report = NULL; + dhd_dbg_tx_info_t *tx_pkts = NULL; + dhd_dbg_rx_info_t *rx_pkts = NULL; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + gfp_t kflags; + uint32 alloc_len; + int ret = BCME_OK; + unsigned long flags; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_pkt_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + if (PKT_MON_ATTACHED(tx_pkt_state) || PKT_MON_ATTACHED(tx_status_state) || + PKT_MON_ATTACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet monitor is already attached, " + "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n", + __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + /* return success as the intention was to initialize packet monitor */ + return BCME_OK; + } + + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + + /* allocate and initialize tx packet monitoring */ + alloc_len = sizeof(*tx_report); + tx_report = (dhd_dbg_tx_report_t *)kzalloc(alloc_len, kflags); + if (unlikely(!tx_report)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dbg_tx_report_t\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + + alloc_len = (sizeof(*tx_pkts) * MAX_FATE_LOG_LEN); + tx_pkts = (dhd_dbg_tx_info_t *)kzalloc(alloc_len, kflags); + if (unlikely(!tx_pkts)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dbg_tx_info_t\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + dhdp->dbg->pkt_mon.tx_report = tx_report; + dhdp->dbg->pkt_mon.tx_report->tx_pkts = tx_pkts; + dhdp->dbg->pkt_mon.tx_pkt_mon = tx_pkt_mon; + dhdp->dbg->pkt_mon.tx_status_mon = tx_status_mon; + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_ATTACHED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_ATTACHED; + + /* allocate and initialze rx packet monitoring */ + alloc_len = sizeof(*rx_report); + rx_report = (dhd_dbg_rx_report_t *)kzalloc(alloc_len, kflags); + if (unlikely(!rx_report)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dbg_rx_report_t\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + + alloc_len = (sizeof(*rx_pkts) * MAX_FATE_LOG_LEN); + rx_pkts = (dhd_dbg_rx_info_t *)kzalloc(alloc_len, kflags); + if (unlikely(!rx_pkts)) { + DHD_ERROR(("%s(): could not allocate memory for - " + "dhd_dbg_rx_info_t\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + dhdp->dbg->pkt_mon.rx_report = rx_report; + dhdp->dbg->pkt_mon.rx_report->rx_pkts = rx_pkts; + dhdp->dbg->pkt_mon.rx_pkt_mon = rx_pkt_mon; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_ATTACHED; + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + DHD_PKT_MON(("%s(): packet monitor attach succeeded\n", __FUNCTION__)); + return ret; + +fail: + /* tx packet monitoring */ + if (tx_pkts) { + kfree(tx_pkts); + } + if (tx_report) { + kfree(tx_report); + } + dhdp->dbg->pkt_mon.tx_report = NULL; + dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL; + dhdp->dbg->pkt_mon.tx_pkt_mon = NULL; + dhdp->dbg->pkt_mon.tx_status_mon = NULL; + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_DETACHED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_DETACHED; + + /* rx packet monitoring */ + if (rx_pkts) { + kfree(rx_pkts); + } + if (rx_report) { + kfree(rx_report); + } + dhdp->dbg->pkt_mon.rx_report = NULL; + dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL; + dhdp->dbg->pkt_mon.rx_pkt_mon = NULL; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED; + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + DHD_ERROR(("%s(): packet monitor attach failed\n", __FUNCTION__)); + return ret; +} + +int +dhd_dbg_start_pkt_monitor(dhd_pub_t *dhdp) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + unsigned long flags; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) || + PKT_MON_DETACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet monitor is not yet enabled, " + "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n", + __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STARTING; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STARTING; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTING; + + tx_report = dhdp->dbg->pkt_mon.tx_report; + rx_report = dhdp->dbg->pkt_mon.rx_report; + if (!tx_report || !rx_report) { + DHD_PKT_MON(("%s(): tx_report=%p, rx_report=%p\n", + __FUNCTION__, tx_report, rx_report)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + /* Safe to free packets as state pkt_state is STARTING */ + __dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts, tx_report->pkt_pos); + + __dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts, rx_report->pkt_pos); + + /* reset array postion */ + tx_report->pkt_pos = 0; + tx_report->status_pos = 0; + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STARTED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STARTED; + + rx_report->pkt_pos = 0; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STARTED; + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + + DHD_PKT_MON(("%s(): packet monitor started\n", __FUNCTION__)); + return BCME_OK; +} + +int +dhd_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_tx_info_t *tx_pkts; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + uint32 pkt_hash, driver_ts; + uint16 pkt_pos; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + if (PKT_MON_STARTED(tx_pkt_state)) { + tx_report = dhdp->dbg->pkt_mon.tx_report; + pkt_pos = tx_report->pkt_pos; + + if (!PKT_MON_PKT_FULL(pkt_pos)) { + tx_pkts = tx_report->tx_pkts; + pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid); + driver_ts = __dhd_dbg_driver_ts_usec(); + + tx_pkts[pkt_pos].info.pkt = PKTDUP(dhdp->osh, pkt); + tx_pkts[pkt_pos].info.pkt_len = PKTLEN(dhdp->osh, pkt); + tx_pkts[pkt_pos].info.pkt_hash = pkt_hash; + tx_pkts[pkt_pos].info.driver_ts = driver_ts; + tx_pkts[pkt_pos].info.firmware_ts = 0U; + tx_pkts[pkt_pos].info.payload_type = FRAME_TYPE_ETHERNET_II; + tx_pkts[pkt_pos].fate = TX_PKT_FATE_DRV_QUEUED; + + tx_report->pkt_pos++; + } else { + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STOPPED; + DHD_PKT_MON(("%s(): tx pkt logging stopped, reached " + "max limit\n", __FUNCTION__)); + } + } + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return BCME_OK; +} + +int +dhd_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid, + uint16 status) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_tx_info_t *tx_pkt; + dhd_dbg_pkt_mon_state_t tx_status_state; + wifi_tx_packet_fate pkt_fate; + uint32 pkt_hash, temp_hash; + uint16 pkt_pos, status_pos; + int16 count; + bool found = FALSE; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + if (PKT_MON_STARTED(tx_status_state)) { + tx_report = dhdp->dbg->pkt_mon.tx_report; + pkt_pos = tx_report->pkt_pos; + status_pos = tx_report->status_pos; + + if (!PKT_MON_STATUS_FULL(pkt_pos, status_pos)) { + pkt_hash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid); + pkt_fate = __dhd_dbg_map_tx_status_to_pkt_fate(status); + + /* best bet (in-order tx completion) */ + count = status_pos; + tx_pkt = (((dhd_dbg_tx_info_t *)tx_report->tx_pkts) + status_pos); + while ((count < pkt_pos) && tx_pkt) { + temp_hash = tx_pkt->info.pkt_hash; + if (temp_hash == pkt_hash) { + tx_pkt->fate = pkt_fate; + tx_report->status_pos++; + found = TRUE; + break; + } + tx_pkt++; + count++; + } + + /* search until beginning (handles out-of-order completion) */ + if (!found) { + count = status_pos - 1; + tx_pkt = (((dhd_dbg_tx_info_t *)tx_report->tx_pkts) + count); + while ((count >= 0) && tx_pkt) { + temp_hash = tx_pkt->info.pkt_hash; + if (temp_hash == pkt_hash) { + tx_pkt->fate = pkt_fate; + tx_report->status_pos++; + found = TRUE; + break; + } + tx_pkt--; + count--; + } + + if (!found) { + /* still couldn't match tx_status */ + DHD_ERROR(("%s(): couldn't match tx_status, pkt_pos=%u, " + "status_pos=%u, pkt_fate=%u\n", __FUNCTION__, + pkt_pos, status_pos, pkt_fate)); + } + } + } else { + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STOPPED; + DHD_PKT_MON(("%s(): tx_status logging stopped, reached " + "max limit\n", __FUNCTION__)); + } + } + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return BCME_OK; +} + +int +dhd_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt) +{ + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_rx_info_t *rx_pkts; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + uint32 driver_ts; + uint16 pkt_pos; + unsigned long flags; + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + if (PKT_MON_STARTED(rx_pkt_state)) { + rx_report = dhdp->dbg->pkt_mon.rx_report; + pkt_pos = rx_report->pkt_pos; + + if (!PKT_MON_PKT_FULL(pkt_pos)) { + rx_pkts = rx_report->rx_pkts; + driver_ts = __dhd_dbg_driver_ts_usec(); + + rx_pkts[pkt_pos].info.pkt = PKTDUP(dhdp->osh, pkt); + rx_pkts[pkt_pos].info.pkt_len = PKTLEN(dhdp->osh, pkt); + rx_pkts[pkt_pos].info.pkt_hash = 0U; + rx_pkts[pkt_pos].info.driver_ts = driver_ts; + rx_pkts[pkt_pos].info.firmware_ts = 0U; + rx_pkts[pkt_pos].info.payload_type = FRAME_TYPE_ETHERNET_II; + rx_pkts[pkt_pos].fate = RX_PKT_FATE_SUCCESS; + + rx_report->pkt_pos++; + } else { + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STOPPED; + DHD_PKT_MON(("%s(): rx pkt logging stopped, reached " + "max limit\n", __FUNCTION__)); + } + } + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return BCME_OK; +} + +int +dhd_dbg_stop_pkt_monitor(dhd_pub_t *dhdp) +{ + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + unsigned long flags; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) || + PKT_MON_DETACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet monitor is not yet enabled, " + "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n", + __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_STOPPED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_STOPPED; + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_STOPPED; + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + + DHD_PKT_MON(("%s(): packet monitor stopped\n", __FUNCTION__)); + return BCME_OK; +} + +#define __COPY_TO_USER(to, from, n) \ + do { \ + int __ret; \ + __ret = copy_to_user((void __user *)(to), (void *)(from), \ + (unsigned long)(n)); \ + if (unlikely(__ret)) { \ + DHD_ERROR(("%s():%d: copy_to_user failed, ret=%d\n", \ + __FUNCTION__, __LINE__, __ret)); \ + return __ret; \ + } \ + } while (0); + +int +dhd_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_tx_info_t *tx_pkt; + wifi_tx_report_t *ptr; + compat_wifi_tx_report_t *cptr; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + uint16 pkt_count, count; + unsigned long flags; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + BCM_REFERENCE(ptr); + BCM_REFERENCE(cptr); + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state)) { + DHD_PKT_MON(("%s(): packet monitor is not yet enabled, " + "tx_pkt_state=%d, tx_status_state=%d\n", __FUNCTION__, + tx_pkt_state, tx_status_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + count = 0; + tx_report = dhdp->dbg->pkt_mon.tx_report; + tx_pkt = tx_report->tx_pkts; + pkt_count = MIN(req_count, tx_report->status_pos); + +#ifdef CONFIG_COMPAT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) +#else + if (is_compat_task()) +#endif + { + cptr = (compat_wifi_tx_report_t *)user_buf; + while ((count < pkt_count) && tx_pkt && cptr) { + compat_wifi_tx_report_t *comp_ptr = compat_ptr((uintptr_t) cptr); + compat_dhd_dbg_pkt_info_t compat_tx_pkt; + __dhd_dbg_dump_tx_pkt_info(dhdp, tx_pkt, count); + __COPY_TO_USER(&comp_ptr->fate, &tx_pkt->fate, sizeof(tx_pkt->fate)); + + compat_tx_pkt.payload_type = tx_pkt->info.payload_type; + compat_tx_pkt.pkt_len = tx_pkt->info.pkt_len; + compat_tx_pkt.driver_ts = tx_pkt->info.driver_ts; + compat_tx_pkt.firmware_ts = tx_pkt->info.firmware_ts; + compat_tx_pkt.pkt_hash = tx_pkt->info.pkt_hash; + __COPY_TO_USER(&comp_ptr->frame_inf.payload_type, + &compat_tx_pkt.payload_type, + OFFSETOF(compat_dhd_dbg_pkt_info_t, pkt_hash)); + __COPY_TO_USER(comp_ptr->frame_inf.frame_content.ethernet_ii, + PKTDATA(dhdp->osh, tx_pkt->info.pkt), tx_pkt->info.pkt_len); + + cptr++; + tx_pkt++; + count++; + } + } else +#endif /* CONFIG_COMPAT */ + + { + ptr = (wifi_tx_report_t *)user_buf; + while ((count < pkt_count) && tx_pkt && ptr) { + __dhd_dbg_dump_tx_pkt_info(dhdp, tx_pkt, count); + __COPY_TO_USER(&ptr->fate, &tx_pkt->fate, sizeof(tx_pkt->fate)); + __COPY_TO_USER(&ptr->frame_inf.payload_type, + &tx_pkt->info.payload_type, + OFFSETOF(dhd_dbg_pkt_info_t, pkt_hash)); + __COPY_TO_USER(ptr->frame_inf.frame_content.ethernet_ii, + PKTDATA(dhdp->osh, tx_pkt->info.pkt), tx_pkt->info.pkt_len); + + ptr++; + tx_pkt++; + count++; + } + } + *resp_count = pkt_count; + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + if (!pkt_count) { + DHD_ERROR(("%s(): no tx_status in tx completion messages, " + "make sure that 'd11status' is enabled in firmware, " + "status_pos=%u\n", __FUNCTION__, pkt_count)); + } + + return BCME_OK; +} + +int +dhd_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count) +{ + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_rx_info_t *rx_pkt; + wifi_rx_report_t *ptr; + compat_wifi_rx_report_t *cptr; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + uint16 pkt_count, count; + unsigned long flags; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + BCM_REFERENCE(ptr); + BCM_REFERENCE(cptr); + + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + if (PKT_MON_DETACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet fetch is not allowed , " + "rx_pkt_state=%d\n", __FUNCTION__, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + count = 0; + rx_report = dhdp->dbg->pkt_mon.rx_report; + rx_pkt = rx_report->rx_pkts; + pkt_count = MIN(req_count, rx_report->pkt_pos); + +#ifdef CONFIG_COMPAT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)) + if (in_compat_syscall()) +#else + if (is_compat_task()) +#endif + { + cptr = (compat_wifi_rx_report_t *)user_buf; + while ((count < pkt_count) && rx_pkt && cptr) { + compat_wifi_rx_report_t *comp_ptr = compat_ptr((uintptr_t) cptr); + compat_dhd_dbg_pkt_info_t compat_rx_pkt; + __dhd_dbg_dump_rx_pkt_info(dhdp, rx_pkt, count); + __COPY_TO_USER(&comp_ptr->fate, &rx_pkt->fate, sizeof(rx_pkt->fate)); + + compat_rx_pkt.payload_type = rx_pkt->info.payload_type; + compat_rx_pkt.pkt_len = rx_pkt->info.pkt_len; + compat_rx_pkt.driver_ts = rx_pkt->info.driver_ts; + compat_rx_pkt.firmware_ts = rx_pkt->info.firmware_ts; + compat_rx_pkt.pkt_hash = rx_pkt->info.pkt_hash; + __COPY_TO_USER(&comp_ptr->frame_inf.payload_type, + &compat_rx_pkt.payload_type, + OFFSETOF(compat_dhd_dbg_pkt_info_t, pkt_hash)); + __COPY_TO_USER(comp_ptr->frame_inf.frame_content.ethernet_ii, + PKTDATA(dhdp->osh, rx_pkt->info.pkt), rx_pkt->info.pkt_len); + + cptr++; + rx_pkt++; + count++; + } + } else +#endif /* CONFIG_COMPAT */ + { + ptr = (wifi_rx_report_t *)user_buf; + while ((count < pkt_count) && rx_pkt && ptr) { + __dhd_dbg_dump_rx_pkt_info(dhdp, rx_pkt, count); + + __COPY_TO_USER(&ptr->fate, &rx_pkt->fate, sizeof(rx_pkt->fate)); + __COPY_TO_USER(&ptr->frame_inf.payload_type, + &rx_pkt->info.payload_type, + OFFSETOF(dhd_dbg_pkt_info_t, pkt_hash)); + __COPY_TO_USER(ptr->frame_inf.frame_content.ethernet_ii, + PKTDATA(dhdp->osh, rx_pkt->info.pkt), rx_pkt->info.pkt_len); + + ptr++; + rx_pkt++; + count++; + } + } + + *resp_count = pkt_count; + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + + return BCME_OK; +} + +int +dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp) +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + unsigned long flags; + + DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__)); + if (!dhdp || !dhdp->dbg) { + DHD_PKT_MON(("%s(): dhdp=%p, dhdp->dbg=%p\n", __FUNCTION__, + dhdp, (dhdp ? dhdp->dbg : NULL))); + return -EINVAL; + } + + DHD_PKT_MON_LOCK(dhdp->dbg->pkt_mon_lock, flags); + tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state; + tx_status_state = dhdp->dbg->pkt_mon.tx_status_state; + rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state; + + if (PKT_MON_DETACHED(tx_pkt_state) || PKT_MON_DETACHED(tx_status_state) || + PKT_MON_DETACHED(rx_pkt_state)) { + DHD_PKT_MON(("%s(): packet monitor is already detached, " + "tx_pkt_state=%d, tx_status_state=%d, rx_pkt_state=%d\n", + __FUNCTION__, tx_pkt_state, tx_status_state, rx_pkt_state)); + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + return -EINVAL; + } + + tx_report = dhdp->dbg->pkt_mon.tx_report; + rx_report = dhdp->dbg->pkt_mon.rx_report; + + /* free and de-initalize tx packet monitoring */ + dhdp->dbg->pkt_mon.tx_pkt_state = PKT_MON_DETACHED; + dhdp->dbg->pkt_mon.tx_status_state = PKT_MON_DETACHED; + if (tx_report) { + if (tx_report->tx_pkts) { + __dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts, + tx_report->pkt_pos); + kfree(tx_report->tx_pkts); + dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL; + } + kfree(tx_report); + dhdp->dbg->pkt_mon.tx_report = NULL; + } + dhdp->dbg->pkt_mon.tx_pkt_mon = NULL; + dhdp->dbg->pkt_mon.tx_status_mon = NULL; + + /* free and de-initalize rx packet monitoring */ + dhdp->dbg->pkt_mon.rx_pkt_state = PKT_MON_DETACHED; + if (rx_report) { + if (rx_report->rx_pkts) { + __dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts, + rx_report->pkt_pos); + kfree(rx_report->rx_pkts); + dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL; + } + kfree(rx_report); + dhdp->dbg->pkt_mon.rx_report = NULL; + } + dhdp->dbg->pkt_mon.rx_pkt_mon = NULL; + + DHD_PKT_MON_UNLOCK(dhdp->dbg->pkt_mon_lock, flags); + DHD_PKT_MON(("%s(): packet monitor detach succeeded\n", __FUNCTION__)); + return BCME_OK; +} +#endif /* DBG_PKT_MON */ + +/* + * dhd_dbg_attach: initialziation of dhd dbugability module + * + * Return: An error code or 0 on success. + */ +int +dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq, + dbg_urgent_noti_t os_urgent_notifier, void *os_priv) +{ + dhd_dbg_t *dbg; + int ret, ring_id; + + dbg = MALLOCZ(dhdp->osh, sizeof(dhd_dbg_t)); + if (!dbg) + return BCME_NOMEM; + + ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_VERBOSE_RING_ID], FW_VERBOSE_RING_ID, + (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, DHD_PREALLOC_FW_VERBOSE_RING); + if (ret) + goto error; + + ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_EVENT_RING_ID], FW_EVENT_RING_ID, + (uint8 *)FW_EVENT_RING_NAME, FW_EVENT_RING_SIZE, DHD_PREALLOC_FW_EVENT_RING); + if (ret) + goto error; + + ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[DHD_EVENT_RING_ID], DHD_EVENT_RING_ID, + (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, DHD_PREALLOC_DHD_EVENT_RING); + if (ret) + goto error; + + ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[NAN_EVENT_RING_ID], NAN_EVENT_RING_ID, + (uint8 *)NAN_EVENT_RING_NAME, NAN_EVENT_RING_SIZE, DHD_PREALLOC_NAN_EVENT_RING); + if (ret) + goto error; + + dbg->private = os_priv; + dbg->pullreq = os_pullreq; + dbg->urgent_notifier = os_urgent_notifier; + dhdp->dbg = dbg; + + return BCME_OK; + +error: + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + if (VALID_RING(dbg->dbg_rings[ring_id].id)) { + dhd_dbg_ring_deinit(dhdp, &dbg->dbg_rings[ring_id]); + } + } + MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t)); + + return ret; +} + +/* + * dhd_dbg_detach: clean up dhd dbugability module + */ +void +dhd_dbg_detach(dhd_pub_t *dhdp) +{ + int ring_id; + dhd_dbg_t *dbg; + if (!dhdp->dbg) + return; + dbg = dhdp->dbg; + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + if (VALID_RING(dbg->dbg_rings[ring_id].id)) { + dhd_dbg_ring_deinit(dhdp, &dbg->dbg_rings[ring_id]); + } + } + MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t)); +} diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_debug.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_debug.h new file mode 100644 index 000000000000..93371137101e --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_debug.h @@ -0,0 +1,868 @@ +/* + * DHD debugability header file + * + * <> + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_debug.h 705824 2017-06-19 13:58:39Z $ + */ + +#ifndef _dhd_debug_h_ +#define _dhd_debug_h_ +#include +#include + +enum { + DEBUG_RING_ID_INVALID = 0, + FW_VERBOSE_RING_ID, + FW_EVENT_RING_ID, + DHD_EVENT_RING_ID, + NAN_EVENT_RING_ID, + /* add new id here */ + DEBUG_RING_ID_MAX +}; + +enum { + /* Feature set */ + DBG_MEMORY_DUMP_SUPPORTED = (1 << (0)), /* Memory dump of FW */ + DBG_PER_PACKET_TX_RX_STATUS_SUPPORTED = (1 << (1)), /* PKT Status */ + DBG_CONNECT_EVENT_SUPPORTED = (1 << (2)), /* Connectivity Event */ + DBG_POWER_EVENT_SUPOORTED = (1 << (3)), /* POWER of Driver */ + DBG_WAKE_LOCK_SUPPORTED = (1 << (4)), /* WAKE LOCK of Driver */ + DBG_VERBOSE_LOG_SUPPORTED = (1 << (5)), /* verbose log of FW */ + DBG_HEALTH_CHECK_SUPPORTED = (1 << (6)), /* monitor the health of FW */ + DBG_DRIVER_DUMP_SUPPORTED = (1 << (7)), /* dumps driver state */ + DBG_PACKET_FATE_SUPPORTED = (1 << (8)), /* tracks connection packets' fate */ + DBG_NAN_EVENT_SUPPORTED = (1 << (9)), /* NAN Events */ +}; + +enum { + /* set for binary entries */ + DBG_RING_ENTRY_FLAGS_HAS_BINARY = (1 << (0)), + /* set if 64 bits timestamp is present */ + DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP = (1 << (1)) +}; + +#define DBGRING_NAME_MAX 32 +/* firmware verbose ring, ring id 1 */ +#define FW_VERBOSE_RING_NAME "fw_verbose" +#define FW_VERBOSE_RING_SIZE (64 * 1024) +/* firmware event ring, ring id 2 */ +#define FW_EVENT_RING_NAME "fw_event" +#define FW_EVENT_RING_SIZE (64 * 1024) +/* DHD connection event ring, ring id 3 */ +#define DHD_EVENT_RING_NAME "dhd_event" +#define DHD_EVENT_RING_SIZE (64 * 1024) + +/* NAN event ring, ring id 4 */ +#define NAN_EVENT_RING_NAME "nan_event" +#define NAN_EVENT_RING_SIZE (64 * 1024) + +#define TLV_LOG_SIZE(tlv) ((tlv) ? (sizeof(tlv_log) + (tlv)->len) : 0) + +#define TLV_LOG_NEXT(tlv) \ + ((tlv) ? ((tlv_log *)((uint8 *)tlv + TLV_LOG_SIZE(tlv))) : 0) + +#define DBG_RING_STATUS_SIZE (sizeof(dhd_dbg_ring_status_t)) + +#define VALID_RING(id) \ + ((id > DEBUG_RING_ID_INVALID) && (id < DEBUG_RING_ID_MAX)) + +#ifdef DEBUGABILITY +#define DBG_RING_ACTIVE(dhdp, ring_id) \ + ((dhdp)->dbg->dbg_rings[(ring_id)].state == RING_ACTIVE) +#else +#define DBG_RING_ACTIVE(dhdp, ring_id) 0 +#endif /* DEBUGABILITY */ + +#define TXACTIVESZ(r, w, d) (((r) <= (w)) ? ((w) - (r)) : ((d) - (r) + (w))) +#define DBG_RING_READ_AVAIL_SPACE(w, r, d) (((w) >= (r)) ? ((w) - (r)) : ((d) - (r))) +#define DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d) (((w) >= (r)) ? ((d) - (w)) : ((r) - (w))) +#define DBG_RING_WRITE_SPACE_AVAIL(r, w, d) (d - (TXACTIVESZ(r, w, d))) +#define DBG_RING_CHECK_WRITE_SPACE(r, w, d) \ + MIN(DBG_RING_WRITE_SPACE_AVAIL(r, w, d), DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d)) + +enum { + /* driver receive association command from kernel */ + WIFI_EVENT_ASSOCIATION_REQUESTED = 0, + WIFI_EVENT_AUTH_COMPLETE, + WIFI_EVENT_ASSOC_COMPLETE, + /* received firmware event indicating auth frames are sent */ + WIFI_EVENT_FW_AUTH_STARTED, + /* received firmware event indicating assoc frames are sent */ + WIFI_EVENT_FW_ASSOC_STARTED, + /* received firmware event indicating reassoc frames are sent */ + WIFI_EVENT_FW_RE_ASSOC_STARTED, + WIFI_EVENT_DRIVER_SCAN_REQUESTED, + WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND, + WIFI_EVENT_DRIVER_SCAN_COMPLETE, + WIFI_EVENT_G_SCAN_STARTED, + WIFI_EVENT_G_SCAN_COMPLETE, + WIFI_EVENT_DISASSOCIATION_REQUESTED, + WIFI_EVENT_RE_ASSOCIATION_REQUESTED, + WIFI_EVENT_ROAM_REQUESTED, + /* received beacon from AP (event enabled only in verbose mode) */ + WIFI_EVENT_BEACON_RECEIVED, + /* firmware has triggered a roam scan (not g-scan) */ + WIFI_EVENT_ROAM_SCAN_STARTED, + /* firmware has completed a roam scan (not g-scan) */ + WIFI_EVENT_ROAM_SCAN_COMPLETE, + /* firmware has started searching for roam candidates (with reason =xx) */ + WIFI_EVENT_ROAM_SEARCH_STARTED, + /* firmware has stopped searching for roam candidates (with reason =xx) */ + WIFI_EVENT_ROAM_SEARCH_STOPPED, + WIFI_EVENT_UNUSED_0, + /* received channel switch anouncement from AP */ + WIFI_EVENT_CHANNEL_SWITCH_ANOUNCEMENT, + /* fw start transmit eapol frame, with EAPOL index 1-4 */ + WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_START, + /* fw gives up eapol frame, with rate, success/failure and number retries */ + WIFI_EVENT_FW_EAPOL_FRAME_TRANSMIT_STOP, + /* kernel queue EAPOL for transmission in driver with EAPOL index 1-4 */ + WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED, + /* with rate, regardless of the fact that EAPOL frame is accepted or + * rejected by firmware + */ + WIFI_EVENT_FW_EAPOL_FRAME_RECEIVED, + WIFI_EVENT_UNUSED_1, + /* with rate, and eapol index, driver has received */ + /* EAPOL frame and will queue it up to wpa_supplicant */ + WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED, + /* with success/failure, parameters */ + WIFI_EVENT_BLOCK_ACK_NEGOTIATION_COMPLETE, + WIFI_EVENT_BT_COEX_BT_SCO_START, + WIFI_EVENT_BT_COEX_BT_SCO_STOP, + /* for paging/scan etc..., when BT starts transmiting twice per BT slot */ + WIFI_EVENT_BT_COEX_BT_SCAN_START, + WIFI_EVENT_BT_COEX_BT_SCAN_STOP, + WIFI_EVENT_BT_COEX_BT_HID_START, + WIFI_EVENT_BT_COEX_BT_HID_STOP, + /* firmware sends auth frame in roaming to next candidate */ + WIFI_EVENT_ROAM_AUTH_STARTED, + /* firmware receive auth confirm from ap */ + WIFI_EVENT_ROAM_AUTH_COMPLETE, + /* firmware sends assoc/reassoc frame in */ + WIFI_EVENT_ROAM_ASSOC_STARTED, + /* firmware receive assoc/reassoc confirm from ap */ + WIFI_EVENT_ROAM_ASSOC_COMPLETE, + /* firmware sends stop G_SCAN */ + WIFI_EVENT_G_SCAN_STOP, + /* firmware indicates G_SCAN scan cycle started */ + WIFI_EVENT_G_SCAN_CYCLE_STARTED, + /* firmware indicates G_SCAN scan cycle completed */ + WIFI_EVENT_G_SCAN_CYCLE_COMPLETED, + /* firmware indicates G_SCAN scan start for a particular bucket */ + WIFI_EVENT_G_SCAN_BUCKET_STARTED, + /* firmware indicates G_SCAN scan completed for particular bucket */ + WIFI_EVENT_G_SCAN_BUCKET_COMPLETED, + /* Event received from firmware about G_SCAN scan results being available */ + WIFI_EVENT_G_SCAN_RESULTS_AVAILABLE, + /* Event received from firmware with G_SCAN capabilities */ + WIFI_EVENT_G_SCAN_CAPABILITIES, + /* Event received from firmware when eligible candidate is found */ + WIFI_EVENT_ROAM_CANDIDATE_FOUND, + /* Event received from firmware when roam scan configuration gets enabled or disabled */ + WIFI_EVENT_ROAM_SCAN_CONFIG, + /* firmware/driver timed out authentication */ + WIFI_EVENT_AUTH_TIMEOUT, + /* firmware/driver timed out association */ + WIFI_EVENT_ASSOC_TIMEOUT, + /* firmware/driver encountered allocation failure */ + WIFI_EVENT_MEM_ALLOC_FAILURE, + /* driver added a PNO network in firmware */ + WIFI_EVENT_DRIVER_PNO_ADD, + /* driver removed a PNO network in firmware */ + WIFI_EVENT_DRIVER_PNO_REMOVE, + /* driver received PNO networks found indication from firmware */ + WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND, + /* driver triggered a scan for PNO networks */ + WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED, + /* driver received scan results of PNO networks */ + WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND, + /* driver updated scan results from PNO candidates to cfg */ + WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE +}; + +enum { + WIFI_TAG_VENDOR_SPECIFIC = 0, /* take a byte stream as parameter */ + WIFI_TAG_BSSID, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_ADDR, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_SSID, /* takes a 32 bytes SSID address as parameter */ + WIFI_TAG_STATUS, /* takes an integer as parameter */ + WIFI_TAG_CHANNEL_SPEC, /* takes one or more wifi_channel_spec as parameter */ + WIFI_TAG_WAKE_LOCK_EVENT, /* takes a wake_lock_event struct as parameter */ + WIFI_TAG_ADDR1, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_ADDR2, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_ADDR3, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_ADDR4, /* takes a 6 bytes MAC address as parameter */ + WIFI_TAG_TSF, /* take a 64 bits TSF value as parameter */ + WIFI_TAG_IE, + /* take one or more specific 802.11 IEs parameter, IEs are in turn + * indicated in TLV format as per 802.11 spec + */ + WIFI_TAG_INTERFACE, /* take interface name as parameter */ + WIFI_TAG_REASON_CODE, /* take a reason code as per 802.11 as parameter */ + WIFI_TAG_RATE_MBPS, /* take a wifi rate in 0.5 mbps */ + WIFI_TAG_REQUEST_ID, /* take an integer as parameter */ + WIFI_TAG_BUCKET_ID, /* take an integer as parameter */ + WIFI_TAG_GSCAN_PARAMS, /* takes a wifi_scan_cmd_params struct as parameter */ + WIFI_TAG_GSCAN_CAPABILITIES, /* takes a wifi_gscan_capabilities struct as parameter */ + WIFI_TAG_SCAN_ID, /* take an integer as parameter */ + WIFI_TAG_RSSI, /* takes s16 as parameter */ + WIFI_TAG_CHANNEL, /* takes u16 as parameter */ + WIFI_TAG_LINK_ID, /* take an integer as parameter */ + WIFI_TAG_LINK_ROLE, /* take an integer as parameter */ + WIFI_TAG_LINK_STATE, /* take an integer as parameter */ + WIFI_TAG_LINK_TYPE, /* take an integer as parameter */ + WIFI_TAG_TSCO, /* take an integer as parameter */ + WIFI_TAG_RSCO, /* take an integer as parameter */ + WIFI_TAG_EAPOL_MESSAGE_TYPE /* take an integer as parameter */ +}; + +/* NAN events */ +typedef enum { + NAN_EVENT_INVALID = 0, + NAN_EVENT_CLUSTER_STARTED = 1, + NAN_EVENT_CLUSTER_JOINED = 2, + NAN_EVENT_CLUSTER_MERGED = 3, + NAN_EVENT_ROLE_CHANGED = 4, + NAN_EVENT_SCAN_COMPLETE = 5, + NAN_EVENT_STATUS_CHNG = 6, + /* ADD new events before this line */ + NAN_EVENT_MAX +} nan_event_id_t; + +typedef struct { + uint16 tag; + uint16 len; /* length of value */ + uint8 value[0]; +} tlv_log; + +typedef struct per_packet_status_entry { + uint8 flags; + uint8 tid; /* transmit or received tid */ + uint16 MCS; /* modulation and bandwidth */ + /* + * TX: RSSI of ACK for that packet + * RX: RSSI of packet + */ + uint8 rssi; + uint8 num_retries; /* number of attempted retries */ + uint16 last_transmit_rate; /* last transmit rate in .5 mbps */ + /* transmit/reeive sequence for that MPDU packet */ + uint16 link_layer_transmit_sequence; + /* + * TX: firmware timestamp (us) when packet is queued within firmware buffer + * for SDIO/HSIC or into PCIe buffer + * RX : firmware receive timestamp + */ + uint64 firmware_entry_timestamp; + /* + * firmware timestamp (us) when packet start contending for the + * medium for the first time, at head of its AC queue, + * or as part of an MPDU or A-MPDU. This timestamp is not updated + * for each retry, only the first transmit attempt. + */ + uint64 start_contention_timestamp; + /* + * fimrware timestamp (us) when packet is successfully transmitted + * or aborted because it has exhausted its maximum number of retries + */ + uint64 transmit_success_timestamp; + /* + * packet data. The length of packet data is determined by the entry_size field of + * the wifi_ring_buffer_entry structure. It is expected that first bytes of the + * packet, or packet headers only (up to TCP or RTP/UDP headers) will be copied into the ring + */ + uint8 *data; +} per_packet_status_entry_t; + +#define PACKED_STRUCT __attribute__ ((packed)) +typedef struct log_conn_event { + uint16 event; + tlv_log *tlvs; + /* + * separate parameter structure per event to be provided and optional data + * the event_data is expected to include an official android part, with some + * parameter as transmit rate, num retries, num scan result found etc... + * as well, event_data can include a vendor proprietary part which is + * understood by the developer only. + */ +} PACKED_STRUCT log_conn_event_t; + +/* + * Ring buffer name for power events ring. note that power event are extremely frequents + * and thus should be stored in their own ring/file so as not to clobber connectivity events + */ + +typedef struct wake_lock_event { + uint32 status; /* 0 taken, 1 released */ + uint32 reason; /* reason why this wake lock is taken */ + char *name; /* null terminated */ +} wake_lock_event_t; + +typedef struct wifi_power_event { + uint16 event; + tlv_log *tlvs; +} wifi_power_event_t; + +#define NAN_EVENT_VERSION 1 +typedef struct log_nan_event { + uint8 version; + uint8 pad; + uint16 event; + tlv_log *tlvs; +} log_nan_event_t; + +/* entry type */ +enum { + DBG_RING_ENTRY_EVENT_TYPE = 1, + DBG_RING_ENTRY_PKT_TYPE, + DBG_RING_ENTRY_WAKE_LOCK_EVENT_TYPE, + DBG_RING_ENTRY_POWER_EVENT_TYPE, + DBG_RING_ENTRY_DATA_TYPE, + DBG_RING_ENTRY_NAN_EVENT_TYPE +}; + +typedef struct dhd_dbg_ring_entry { + uint16 len; /* payload length excluding the header */ + uint8 flags; + uint8 type; /* Per ring specific */ + uint64 timestamp; /* present if has_timestamp bit is set. */ +} PACKED_STRUCT dhd_dbg_ring_entry_t; + +#define DBG_RING_ENTRY_SIZE (sizeof(dhd_dbg_ring_entry_t)) + +#define ENTRY_LENGTH(hdr) ((hdr)->len + DBG_RING_ENTRY_SIZE) + +#define PAYLOAD_MAX_LEN 65535 + +typedef struct dhd_dbg_ring_status { + uint8 name[DBGRING_NAME_MAX]; + uint32 flags; + int ring_id; /* unique integer representing the ring */ + /* total memory size allocated for the buffer */ + uint32 ring_buffer_byte_size; + uint32 verbose_level; + /* number of bytes that was written to the buffer by driver */ + uint32 written_bytes; + /* number of bytes that was read from the buffer by user land */ + uint32 read_bytes; + /* number of records that was read from the buffer by user land */ + uint32 written_records; +} dhd_dbg_ring_status_t; + +struct log_level_table { + int log_level; + uint16 tag; + uint8 sets; + char *desc; +}; + +#ifdef DEBUGABILITY +#define DBG_EVENT_LOG(dhdp, connect_state) \ +{ \ + do { \ + uint16 state = connect_state; \ + if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) \ + dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID, \ + &state, sizeof(state)); \ + } while (0); \ +} +#else +#define DBG_EVENT_LOG(dhdp, connect_state) +#endif /* DEBUGABILITY */ + + +#define MD5_PREFIX_LEN 4 +#define MAX_FATE_LOG_LEN 32 + +#define MAX_FRAME_LEN_ETHERNET 1518 +#define MAX_FRAME_LEN_80211_MGMT 2352 /* 802.11-2012 Fig. 8-34 */ + +typedef enum { + /* Sent over air and ACKed. */ + TX_PKT_FATE_ACKED, + + /* Sent over air but not ACKed. (Normal for broadcast/multicast.) */ + TX_PKT_FATE_SENT, + + /* Queued within firmware, but not yet sent over air. */ + TX_PKT_FATE_FW_QUEUED, + + /* + * Dropped by firmware as invalid. E.g. bad source address, + * bad checksum, or invalid for current state. + */ + TX_PKT_FATE_FW_DROP_INVALID, + + /* Dropped by firmware due to lack of buffer space. */ + TX_PKT_FATE_FW_DROP_NOBUFS, + + /* + * Dropped by firmware for any other reason. Includes + * frames that were sent by driver to firmware, but + * unaccounted for by firmware. + */ + TX_PKT_FATE_FW_DROP_OTHER, + + /* Queued within driver, not yet sent to firmware. */ + TX_PKT_FATE_DRV_QUEUED, + + /* + * Dropped by driver as invalid. E.g. bad source address, + * or invalid for current state. + */ + TX_PKT_FATE_DRV_DROP_INVALID, + + /* Dropped by driver due to lack of buffer space. */ + TX_PKT_FATE_DRV_DROP_NOBUFS, + + /* Dropped by driver for any other reason. */ + TX_PKT_FATE_DRV_DROP_OTHER, + + } wifi_tx_packet_fate; + +typedef enum { + /* Valid and delivered to network stack (e.g., netif_rx()). */ + RX_PKT_FATE_SUCCESS, + + /* Queued within firmware, but not yet sent to driver. */ + RX_PKT_FATE_FW_QUEUED, + + /* Dropped by firmware due to host-programmable filters. */ + RX_PKT_FATE_FW_DROP_FILTER, + + /* + * Dropped by firmware as invalid. E.g. bad checksum, + * decrypt failed, or invalid for current state. + */ + RX_PKT_FATE_FW_DROP_INVALID, + + /* Dropped by firmware due to lack of buffer space. */ + RX_PKT_FATE_FW_DROP_NOBUFS, + + /* Dropped by firmware for any other reason. */ + RX_PKT_FATE_FW_DROP_OTHER, + + /* Queued within driver, not yet delivered to network stack. */ + RX_PKT_FATE_DRV_QUEUED, + + /* Dropped by driver due to filter rules. */ + RX_PKT_FATE_DRV_DROP_FILTER, + + /* Dropped by driver as invalid. E.g. not permitted in current state. */ + RX_PKT_FATE_DRV_DROP_INVALID, + + /* Dropped by driver due to lack of buffer space. */ + RX_PKT_FATE_DRV_DROP_NOBUFS, + + /* Dropped by driver for any other reason. */ + RX_PKT_FATE_DRV_DROP_OTHER, + + } wifi_rx_packet_fate; + +typedef enum { + FRAME_TYPE_UNKNOWN, + FRAME_TYPE_ETHERNET_II, + FRAME_TYPE_80211_MGMT, + } frame_type; + +typedef struct wifi_frame_info { + /* + * The type of MAC-layer frame that this frame_info holds. + * - For data frames, use FRAME_TYPE_ETHERNET_II. + * - For management frames, use FRAME_TYPE_80211_MGMT. + * - If the type of the frame is unknown, use FRAME_TYPE_UNKNOWN. + */ + frame_type payload_type; + + /* + * The number of bytes included in |frame_content|. If the frame + * contents are missing (e.g. RX frame dropped in firmware), + * |frame_len| should be set to 0. + */ + size_t frame_len; + + /* + * Host clock when this frame was received by the driver (either + * outbound from the host network stack, or inbound from the + * firmware). + * - The timestamp should be taken from a clock which includes time + * the host spent suspended (e.g. ktime_get_boottime()). + * - If no host timestamp is available (e.g. RX frame was dropped in + * firmware), this field should be set to 0. + */ + uint32 driver_timestamp_usec; + + /* + * Firmware clock when this frame was received by the firmware + * (either outbound from the host, or inbound from a remote + * station). + * - The timestamp should be taken from a clock which includes time + * firmware spent suspended (if applicable). + * - If no firmware timestamp is available (e.g. TX frame was + * dropped by driver), this field should be set to 0. + * - Consumers of |frame_info| should _not_ assume any + * synchronization between driver and firmware clocks. + */ + uint32 firmware_timestamp_usec; + + /* + * Actual frame content. + * - Should be provided for TX frames originated by the host. + * - Should be provided for RX frames received by the driver. + * - Optionally provided for TX frames originated by firmware. (At + * discretion of HAL implementation.) + * - Optionally provided for RX frames dropped in firmware. (At + * discretion of HAL implementation.) + * - If frame content is not provided, |frame_len| should be set + * to 0. + */ + union { + char ethernet_ii[MAX_FRAME_LEN_ETHERNET]; + char ieee_80211_mgmt[MAX_FRAME_LEN_80211_MGMT]; + } frame_content; +} wifi_frame_info_t; + +typedef struct wifi_tx_report { + /* + * Prefix of MD5 hash of |frame_inf.frame_content|. If frame + * content is not provided, prefix of MD5 hash over the same data + * that would be in frame_content, if frame content were provided. + */ + char md5_prefix[MD5_PREFIX_LEN]; + wifi_tx_packet_fate fate; + wifi_frame_info_t frame_inf; +} wifi_tx_report_t; + +typedef struct wifi_rx_report { + /* + * Prefix of MD5 hash of |frame_inf.frame_content|. If frame + * content is not provided, prefix of MD5 hash over the same data + * that would be in frame_content, if frame content were provided. + */ + char md5_prefix[MD5_PREFIX_LEN]; + wifi_rx_packet_fate fate; + wifi_frame_info_t frame_inf; +} wifi_rx_report_t; + +typedef struct compat_wifi_frame_info { + frame_type payload_type; + + uint32 frame_len; + + uint32 driver_timestamp_usec; + + uint32 firmware_timestamp_usec; + + union { + char ethernet_ii[MAX_FRAME_LEN_ETHERNET]; + char ieee_80211_mgmt[MAX_FRAME_LEN_80211_MGMT]; + } frame_content; +} compat_wifi_frame_info_t; + + +typedef struct compat_wifi_tx_report { + char md5_prefix[MD5_PREFIX_LEN]; + wifi_tx_packet_fate fate; + compat_wifi_frame_info_t frame_inf; +} compat_wifi_tx_report_t; + +typedef struct compat_wifi_rx_report { + char md5_prefix[MD5_PREFIX_LEN]; + wifi_rx_packet_fate fate; + compat_wifi_frame_info_t frame_inf; +} compat_wifi_rx_report_t; + + +/* + * Packet logging - internal data + */ + +typedef enum dhd_dbg_pkt_mon_state { + PKT_MON_INVALID = 0, + PKT_MON_ATTACHED, + PKT_MON_STARTING, + PKT_MON_STARTED, + PKT_MON_STOPPING, + PKT_MON_STOPPED, + PKT_MON_DETACHED, + } dhd_dbg_pkt_mon_state_t; + +typedef struct dhd_dbg_pkt_info { + frame_type payload_type; + size_t pkt_len; + uint32 driver_ts; + uint32 firmware_ts; + uint32 pkt_hash; + void *pkt; +} dhd_dbg_pkt_info_t; + +typedef struct compat_dhd_dbg_pkt_info { + frame_type payload_type; + uint32 pkt_len; + uint32 driver_ts; + uint32 firmware_ts; + uint32 pkt_hash; + void *pkt; +} compat_dhd_dbg_pkt_info_t; + +typedef struct dhd_dbg_tx_info +{ + wifi_tx_packet_fate fate; + dhd_dbg_pkt_info_t info; +} dhd_dbg_tx_info_t; + +typedef struct dhd_dbg_rx_info +{ + wifi_rx_packet_fate fate; + dhd_dbg_pkt_info_t info; +} dhd_dbg_rx_info_t; + +typedef struct dhd_dbg_tx_report +{ + dhd_dbg_tx_info_t *tx_pkts; + uint16 pkt_pos; + uint16 status_pos; +} dhd_dbg_tx_report_t; + +typedef struct dhd_dbg_rx_report +{ + dhd_dbg_rx_info_t *rx_pkts; + uint16 pkt_pos; +} dhd_dbg_rx_report_t; + +typedef void (*dbg_pullreq_t)(void *os_priv, const int ring_id); +typedef void (*dbg_urgent_noti_t) (dhd_pub_t *dhdp, const void *data, const uint32 len); +typedef int (*dbg_mon_tx_pkts_t) (dhd_pub_t *dhdp, void *pkt, uint32 pktid); +typedef int (*dbg_mon_tx_status_t) (dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status); +typedef int (*dbg_mon_rx_pkts_t) (dhd_pub_t *dhdp, void *pkt); + +typedef struct dhd_dbg_pkt_mon +{ + dhd_dbg_tx_report_t *tx_report; + dhd_dbg_rx_report_t *rx_report; + dhd_dbg_pkt_mon_state_t tx_pkt_state; + dhd_dbg_pkt_mon_state_t tx_status_state; + dhd_dbg_pkt_mon_state_t rx_pkt_state; + + /* call backs */ + dbg_mon_tx_pkts_t tx_pkt_mon; + dbg_mon_tx_status_t tx_status_mon; + dbg_mon_rx_pkts_t rx_pkt_mon; +} dhd_dbg_pkt_mon_t; + +enum dbg_ring_state { + RING_STOP = 0, /* ring is not initialized */ + RING_ACTIVE, /* ring is live and logging */ + RING_SUSPEND /* ring is initialized but not logging */ +}; + +struct ring_statistics { + /* number of bytes that was written to the buffer by driver */ + uint32 written_bytes; + /* number of bytes that was read from the buffer by user land */ + uint32 read_bytes; + /* number of records that was written to the buffer by driver */ + uint32 written_records; +}; + +typedef struct dhd_dbg_ring { + int id; /* ring id */ + uint8 name[DBGRING_NAME_MAX]; /* name string */ + uint32 ring_size; /* numbers of item in ring */ + uint32 wp; /* write pointer */ + uint32 rp; /* read pointer */ + uint32 log_level; /* log_level */ + uint32 threshold; /* threshold bytes */ + void * ring_buf; /* pointer of actually ring buffer */ + void * lock; /* spin lock for ring access */ + struct ring_statistics stat; /* statistics */ + enum dbg_ring_state state; /* ring state enum */ + bool tail_padded; /* writer does not have enough space */ + uint32 rem_len; /* number of bytes from wp_pad to end */ + bool sched_pull; /* schedule reader immediately */ +} dhd_dbg_ring_t; + +typedef struct dhd_dbg { + dhd_dbg_ring_t dbg_rings[DEBUG_RING_ID_MAX]; + void *private; /* os private_data */ + dhd_dbg_pkt_mon_t pkt_mon; + void *pkt_mon_lock; /* spin lock for packet monitoring */ + dbg_pullreq_t pullreq; + dbg_urgent_noti_t urgent_notifier; +} dhd_dbg_t; + +#define PKT_MON_ATTACHED(state) \ + (((state) > PKT_MON_INVALID) && ((state) < PKT_MON_DETACHED)) +#define PKT_MON_DETACHED(state) \ + (((state) == PKT_MON_INVALID) || ((state) == PKT_MON_DETACHED)) +#define PKT_MON_STARTED(state) ((state) == PKT_MON_STARTED) +#define PKT_MON_STOPPED(state) ((state) == PKT_MON_STOPPED) +#define PKT_MON_NOT_OPERATIONAL(state) \ + (((state) != PKT_MON_STARTED) && ((state) != PKT_MON_STOPPED)) +#define PKT_MON_SAFE_TO_FREE(state) \ + (((state) == PKT_MON_STARTING) || ((state) == PKT_MON_STOPPED)) +#define PKT_MON_PKT_FULL(pkt_count) ((pkt_count) >= MAX_FATE_LOG_LEN) +#define PKT_MON_STATUS_FULL(pkt_count, status_count) \ + (((status_count) >= (pkt_count)) || ((status_count) >= MAX_FATE_LOG_LEN)) + +#ifdef DBG_PKT_MON +#define DHD_DBG_PKT_MON_TX(dhdp, pkt, pktid) \ + do { \ + if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.tx_pkt_mon && (pkt)) { \ + (dhdp)->dbg->pkt_mon.tx_pkt_mon((dhdp), (pkt), (pktid)); \ + } \ + } while (0); +#define DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status) \ + do { \ + if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.tx_status_mon && (pkt)) { \ + (dhdp)->dbg->pkt_mon.tx_status_mon((dhdp), (pkt), (pktid), (status)); \ + } \ + } while (0); +#define DHD_DBG_PKT_MON_RX(dhdp, pkt) \ + do { \ + if ((dhdp) && (dhdp)->dbg && (dhdp)->dbg->pkt_mon.rx_pkt_mon && (pkt)) { \ + if (ntoh16((pkt)->protocol) != ETHER_TYPE_BRCM) { \ + (dhdp)->dbg->pkt_mon.rx_pkt_mon((dhdp), (pkt)); \ + } \ + } \ + } while (0); + +#define DHD_DBG_PKT_MON_START(dhdp) \ + dhd_os_dbg_start_pkt_monitor((dhdp)); +#define DHD_DBG_PKT_MON_STOP(dhdp) \ + dhd_os_dbg_stop_pkt_monitor((dhdp)); +#else +#define DHD_DBG_PKT_MON_TX(dhdp, pkt, pktid) +#define DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status) +#define DHD_DBG_PKT_MON_RX(dhdp, pkt) +#define DHD_DBG_PKT_MON_START(dhdp) +#define DHD_DBG_PKT_MON_STOP(dhdp) +#endif /* DBG_PKT_MON */ + +#ifdef DUMP_IOCTL_IOV_LIST +typedef struct dhd_iov_li { + dll_t list; + uint32 cmd; + char buff[100]; +} dhd_iov_li_t; + +#define IOV_LIST_MAX_LEN 5 +#endif /* DUMP_IOCTL_IOV_LIST */ + +#ifdef DHD_DEBUG +typedef struct { + dll_t list; + uint32 id; /* wasted chunk id */ + uint32 handle; /* wasted chunk handle */ + uint32 size; /* wasted chunk size */ +} dhd_dbg_mwli_t; +#endif /* DHD_DEBUG */ + +/* dhd_dbg functions */ +extern void dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data, + void *raw_event_ptr, uint datalen); +extern int dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq, + dbg_urgent_noti_t os_urgent_notifier, void *os_priv); +extern void dhd_dbg_detach(dhd_pub_t *dhdp); +extern int dhd_dbg_start(dhd_pub_t *dhdp, bool start); +extern int dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id, + int log_level, int flags, uint32 threshold); +extern int dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id, + dhd_dbg_ring_status_t *dbg_ring_status); +extern int dhd_dbg_ring_push(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data); +extern int dhd_dbg_ring_pull(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len); +extern int dhd_dbg_ring_pull_single(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len, + bool strip_header); +extern int dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name); +extern void *dhd_dbg_get_priv(dhd_pub_t *dhdp); +extern int dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len); +extern void dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, event_log_hdr_t *hdr, + void *raw_event_ptr, uint32 *log_ptr); + +#ifdef DBG_PKT_MON +extern int dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp, + dbg_mon_tx_pkts_t tx_pkt_mon, + dbg_mon_tx_status_t tx_status_mon, + dbg_mon_rx_pkts_t rx_pkt_mon); +extern int dhd_dbg_start_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid); +extern int dhd_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status); +extern int dhd_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt); +extern int dhd_dbg_stop_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count); +extern int dhd_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count); +extern int dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp); +#endif /* DBG_PKT_MON */ + +/* os wrapper function */ +extern int dhd_os_dbg_attach(dhd_pub_t *dhdp); +extern void dhd_os_dbg_detach(dhd_pub_t *dhdp); +extern int dhd_os_dbg_register_callback(int ring_id, + void (*dbg_ring_sub_cb)(void *ctx, const int ring_id, const void *data, + const uint32 len, const dhd_dbg_ring_status_t dbg_ring_status)); +extern int dhd_os_dbg_register_urgent_notifier(dhd_pub_t *dhdp, + void (*urgent_noti)(void *ctx, const void *data, const uint32 len, const uint32 fw_len)); + +extern int dhd_os_start_logging(dhd_pub_t *dhdp, char *ring_name, int log_level, + int flags, int time_intval, int threshold); +extern int dhd_os_reset_logging(dhd_pub_t *dhdp); +extern int dhd_os_suppress_logging(dhd_pub_t *dhdp, bool suppress); + +extern int dhd_os_get_ring_status(dhd_pub_t *dhdp, int ring_id, + dhd_dbg_ring_status_t *dbg_ring_status); +extern int dhd_os_trigger_get_ring_data(dhd_pub_t *dhdp, char *ring_name); +extern int dhd_os_push_push_ring_data(dhd_pub_t *dhdp, int ring_id, void *data, int32 data_len); +extern int dhd_os_dbg_get_feature(dhd_pub_t *dhdp, int32 *features); + +#ifdef DBG_PKT_MON +extern int dhd_os_dbg_attach_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_os_dbg_start_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_os_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, + uint32 pktid); +extern int dhd_os_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, + uint32 pktid, uint16 status); +extern int dhd_os_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt); +extern int dhd_os_dbg_stop_pkt_monitor(dhd_pub_t *dhdp); +extern int dhd_os_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, + void __user *user_buf, uint16 req_count, uint16 *resp_count); +extern int dhd_os_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, + void __user *user_buf, uint16 req_count, uint16 *resp_count); +extern int dhd_os_dbg_detach_pkt_monitor(dhd_pub_t *dhdp); +#endif /* DBG_PKT_MON */ + +#ifdef DUMP_IOCTL_IOV_LIST +extern void dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node); +extern void dhd_iov_li_print(dll_t *list_head); +extern void dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head); +#endif /* DUMP_IOCTL_IOV_LIST */ + +#ifdef DHD_DEBUG +extern void dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head); +#endif /* DHD_DEBUG */ +#endif /* _dhd_debug_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_debug_linux.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_debug_linux.c new file mode 100644 index 000000000000..7137529ce463 --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_debug_linux.c @@ -0,0 +1,512 @@ +/* + * DHD debugability Linux os layer + * + * <> + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_debug_linux.c 710862 2017-07-14 07:43:59Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +typedef void (*dbg_ring_send_sub_t)(void *ctx, const int ring_id, const void *data, + const uint32 len, const dhd_dbg_ring_status_t ring_status); +typedef void (*dbg_urgent_noti_sub_t)(void *ctx, const void *data, + const uint32 len, const uint32 fw_len); + +static dbg_ring_send_sub_t ring_send_sub_cb[DEBUG_RING_ID_MAX]; +static dbg_urgent_noti_sub_t urgent_noti_sub_cb; +typedef struct dhd_dbg_os_ring_info { + dhd_pub_t *dhdp; + int ring_id; + int log_level; + unsigned long interval; + struct delayed_work work; + uint64 tsoffset; +} linux_dbgring_info_t; + +struct log_level_table dhd_event_map[] = { + {1, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED, 0, "DRIVER EAPOL TX REQ"}, + {1, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED, 0, "DRIVER EAPOL RX"}, + {2, WIFI_EVENT_DRIVER_SCAN_REQUESTED, 0, "SCAN_REQUESTED"}, + {2, WIFI_EVENT_DRIVER_SCAN_COMPLETE, 0, "SCAN COMPELETE"}, + {3, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND, 0, "SCAN RESULT FOUND"}, + {2, WIFI_EVENT_DRIVER_PNO_ADD, 0, "PNO ADD"}, + {2, WIFI_EVENT_DRIVER_PNO_REMOVE, 0, "PNO REMOVE"}, + {2, WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND, 0, "PNO NETWORK FOUND"}, + {2, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED, 0, "PNO SCAN_REQUESTED"}, + {1, WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND, 0, "PNO SCAN RESULT FOUND"}, + {1, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE, 0, "PNO SCAN COMPELETE"} +}; + +static void +debug_data_send(dhd_pub_t *dhdp, int ring_id, const void *data, const uint32 len, + const dhd_dbg_ring_status_t ring_status) +{ + struct net_device *ndev; + dbg_ring_send_sub_t ring_sub_send; + ndev = dhd_linux_get_primary_netdev(dhdp); + if (!ndev) + return; + if (ring_send_sub_cb[ring_id]) { + ring_sub_send = ring_send_sub_cb[ring_id]; + ring_sub_send(ndev, ring_id, data, len, ring_status); + } +} + +static void +dhd_os_dbg_urgent_notifier(dhd_pub_t *dhdp, const void *data, const uint32 len) +{ + struct net_device *ndev; + ndev = dhd_linux_get_primary_netdev(dhdp); + if (!ndev) + return; + if (urgent_noti_sub_cb) { + urgent_noti_sub_cb(ndev, data, len, dhdp->soc_ram_length); + } +} + +static void +dbg_ring_poll_worker(struct work_struct *work) +{ + struct delayed_work *d_work = to_delayed_work(work); + bool sched = TRUE; + dhd_dbg_ring_t *ring; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + linux_dbgring_info_t *ring_info = + container_of(d_work, linux_dbgring_info_t, work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + dhd_pub_t *dhdp = ring_info->dhdp; + int ringid = ring_info->ring_id; + dhd_dbg_ring_status_t ring_status; + void *buf; + dhd_dbg_ring_entry_t *hdr; + uint32 buflen, rlen; + unsigned long flags; + + ring = &dhdp->dbg->dbg_rings[ringid]; + flags = dhd_os_spin_lock(ring->lock); + dhd_dbg_get_ring_status(dhdp, ringid, &ring_status); + + if (ring->wp > ring->rp) { + buflen = ring->wp - ring->rp; + } else if (ring->wp < ring->rp) { + buflen = ring->ring_size - ring->rp + ring->wp; + } else { + goto exit; + } + + if (buflen > ring->ring_size) { + goto exit; + } + + buf = MALLOCZ(dhdp->osh, buflen); + if (!buf) { + DHD_ERROR(("%s failed to allocate read buf\n", __FUNCTION__)); + sched = FALSE; + goto exit; + } + + rlen = dhd_dbg_ring_pull(dhdp, ringid, buf, buflen); + if (!ring->sched_pull) { + ring->sched_pull = TRUE; + } + + hdr = (dhd_dbg_ring_entry_t *)buf; + while (rlen > 0) { + ring_status.read_bytes += ENTRY_LENGTH(hdr); + /* offset fw ts to host ts */ + hdr->timestamp += ring_info->tsoffset; + debug_data_send(dhdp, ringid, hdr, ENTRY_LENGTH(hdr), + ring_status); + rlen -= ENTRY_LENGTH(hdr); + hdr = (dhd_dbg_ring_entry_t *)((char *)hdr + ENTRY_LENGTH(hdr)); + } + MFREE(dhdp->osh, buf, buflen); + +exit: + if (sched) { + /* retrigger the work at same interval */ + if ((ring_status.written_bytes == ring_status.read_bytes) && + (ring_info->interval)) { + schedule_delayed_work(d_work, ring_info->interval); + } + } + + dhd_os_spin_unlock(ring->lock, flags); + + return; +} + +int +dhd_os_dbg_register_callback(int ring_id, dbg_ring_send_sub_t callback) +{ + if (!VALID_RING(ring_id)) + return BCME_RANGE; + + ring_send_sub_cb[ring_id] = callback; + return BCME_OK; +} + +int +dhd_os_dbg_register_urgent_notifier(dhd_pub_t *dhdp, dbg_urgent_noti_sub_t urgent_noti_sub) +{ + if (!dhdp || !urgent_noti_sub) + return BCME_BADARG; + urgent_noti_sub_cb = urgent_noti_sub; + + return BCME_OK; +} + +int +dhd_os_start_logging(dhd_pub_t *dhdp, char *ring_name, int log_level, + int flags, int time_intval, int threshold) +{ + int ret = BCME_OK; + int ring_id; + linux_dbgring_info_t *os_priv, *ring_info; + uint32 ms; + + ring_id = dhd_dbg_find_ring_id(dhdp, ring_name); + if (!VALID_RING(ring_id)) + return BCME_UNSUPPORTED; + + DHD_DBGIF(("%s , log_level : %d, time_intval : %d, threshod %d Bytes\n", + __FUNCTION__, log_level, time_intval, threshold)); + + /* change the configuration */ + ret = dhd_dbg_set_configuration(dhdp, ring_id, log_level, flags, threshold); + if (ret) { + DHD_ERROR(("dhd_set_configuration is failed : %d\n", ret)); + return ret; + } + + os_priv = dhd_dbg_get_priv(dhdp); + if (!os_priv) + return BCME_ERROR; + ring_info = &os_priv[ring_id]; + ring_info->log_level = log_level; + if (ring_id == FW_VERBOSE_RING_ID || ring_id == FW_EVENT_RING_ID) { + ring_info->tsoffset = local_clock(); + if (dhd_wl_ioctl_get_intiovar(dhdp, "rte_timesync", &ms, WLC_GET_VAR, + FALSE, 0)) + DHD_ERROR(("%s rte_timesync failed\n", __FUNCTION__)); + do_div(ring_info->tsoffset, 1000000); + ring_info->tsoffset -= ms; + } + if (time_intval == 0 || log_level == 0) { + ring_info->interval = 0; + cancel_delayed_work_sync(&ring_info->work); + } else { + ring_info->interval = msecs_to_jiffies(time_intval * MSEC_PER_SEC); + cancel_delayed_work_sync(&ring_info->work); + schedule_delayed_work(&ring_info->work, ring_info->interval); + } + + return ret; +} + +int +dhd_os_reset_logging(dhd_pub_t *dhdp) +{ + int ret = BCME_OK; + int ring_id; + linux_dbgring_info_t *os_priv, *ring_info; + + os_priv = dhd_dbg_get_priv(dhdp); + if (!os_priv) + return BCME_ERROR; + + /* Stop all rings */ + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + DHD_DBGIF(("%s: Stop ring buffer %d\n", __FUNCTION__, ring_id)); + + ring_info = &os_priv[ring_id]; + /* cancel any pending work */ + cancel_delayed_work_sync(&ring_info->work); + /* log level zero makes stop logging on that ring */ + ring_info->log_level = 0; + ring_info->interval = 0; + /* change the configuration */ + ret = dhd_dbg_set_configuration(dhdp, ring_id, 0, 0, 0); + if (ret) { + DHD_ERROR(("dhd_set_configuration is failed : %d\n", ret)); + return ret; + } + } + return ret; +} + +#define SUPPRESS_LOG_LEVEL 1 +int +dhd_os_suppress_logging(dhd_pub_t *dhdp, bool suppress) +{ + int ret = BCME_OK; + int max_log_level; + int enable = (suppress) ? 0 : 1; + linux_dbgring_info_t *os_priv; + + os_priv = dhd_dbg_get_priv(dhdp); + if (!os_priv) + return BCME_ERROR; + + max_log_level = MAX(os_priv[FW_VERBOSE_RING_ID].log_level, + os_priv[FW_EVENT_RING_ID].log_level); + if (max_log_level == SUPPRESS_LOG_LEVEL) { + /* suppress the logging in FW not to wake up host while device in suspend mode */ + ret = dhd_iovar(dhdp, 0, "logtrace", (char *)&enable, sizeof(enable), NULL, 0, + TRUE); + if (ret < 0 && (ret != BCME_UNSUPPORTED)) { + DHD_ERROR(("logtrace is failed : %d\n", ret)); + } + } + + return ret; +} + +int +dhd_os_get_ring_status(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_status_t *dbg_ring_status) +{ + return dhd_dbg_get_ring_status(dhdp, ring_id, dbg_ring_status); +} + +int +dhd_os_trigger_get_ring_data(dhd_pub_t *dhdp, char *ring_name) +{ + int ret = BCME_OK; + int ring_id; + linux_dbgring_info_t *os_priv, *ring_info; + ring_id = dhd_dbg_find_ring_id(dhdp, ring_name); + if (!VALID_RING(ring_id)) + return BCME_UNSUPPORTED; + os_priv = dhd_dbg_get_priv(dhdp); + if (os_priv) { + ring_info = &os_priv[ring_id]; + if (ring_info->interval) { + cancel_delayed_work_sync(&ring_info->work); + } + schedule_delayed_work(&ring_info->work, 0); + } else { + DHD_ERROR(("%s : os_priv is NULL\n", __FUNCTION__)); + ret = BCME_ERROR; + } + return ret; +} + +int +dhd_os_push_push_ring_data(dhd_pub_t *dhdp, int ring_id, void *data, int32 data_len) +{ + int ret = BCME_OK, i; + dhd_dbg_ring_entry_t msg_hdr; + log_conn_event_t *event_data = (log_conn_event_t *)data; + linux_dbgring_info_t *os_priv, *ring_info = NULL; + + if (!VALID_RING(ring_id)) + return BCME_UNSUPPORTED; + os_priv = dhd_dbg_get_priv(dhdp); + + if (os_priv) { + ring_info = &os_priv[ring_id]; + } else + return BCME_NORESOURCE; + + memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t)); + + if (ring_id == DHD_EVENT_RING_ID) { + msg_hdr.type = DBG_RING_ENTRY_EVENT_TYPE; + msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP; + msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY; + msg_hdr.timestamp = local_clock(); + /* convert to ms */ + do_div(msg_hdr.timestamp, 1000000); + msg_hdr.len = data_len; + /* filter the event for higher log level with current log level */ + for (i = 0; i < ARRAYSIZE(dhd_event_map); i++) { + if ((dhd_event_map[i].tag == event_data->event) && + dhd_event_map[i].log_level > ring_info->log_level) { + return ret; + } + } + } + ret = dhd_dbg_ring_push(dhdp, ring_id, &msg_hdr, event_data); + if (ret) { + DHD_ERROR(("%s : failed to push data into the ring (%d) with ret(%d)\n", + __FUNCTION__, ring_id, ret)); + } + + return ret; +} + +#ifdef DBG_PKT_MON +int +dhd_os_dbg_attach_pkt_monitor(dhd_pub_t *dhdp) +{ + return dhd_dbg_attach_pkt_monitor(dhdp, dhd_os_dbg_monitor_tx_pkts, + dhd_os_dbg_monitor_tx_status, dhd_os_dbg_monitor_rx_pkts); +} + +int +dhd_os_dbg_start_pkt_monitor(dhd_pub_t *dhdp) +{ + return dhd_dbg_start_pkt_monitor(dhdp); +} + +int +dhd_os_dbg_monitor_tx_pkts(dhd_pub_t *dhdp, void *pkt, uint32 pktid) +{ + return dhd_dbg_monitor_tx_pkts(dhdp, pkt, pktid); +} + +int +dhd_os_dbg_monitor_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid, + uint16 status) +{ + return dhd_dbg_monitor_tx_status(dhdp, pkt, pktid, status); +} + +int +dhd_os_dbg_monitor_rx_pkts(dhd_pub_t *dhdp, void *pkt) +{ + return dhd_dbg_monitor_rx_pkts(dhdp, pkt); +} + +int +dhd_os_dbg_stop_pkt_monitor(dhd_pub_t *dhdp) +{ + return dhd_dbg_stop_pkt_monitor(dhdp); +} + +int +dhd_os_dbg_monitor_get_tx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count) +{ + return dhd_dbg_monitor_get_tx_pkts(dhdp, user_buf, req_count, resp_count); +} + +int +dhd_os_dbg_monitor_get_rx_pkts(dhd_pub_t *dhdp, void __user *user_buf, + uint16 req_count, uint16 *resp_count) +{ + return dhd_dbg_monitor_get_rx_pkts(dhdp, user_buf, req_count, resp_count); +} + +int +dhd_os_dbg_detach_pkt_monitor(dhd_pub_t *dhdp) +{ + return dhd_dbg_detach_pkt_monitor(dhdp); +} +#endif /* DBG_PKT_MON */ + +int +dhd_os_dbg_get_feature(dhd_pub_t *dhdp, int32 *features) +{ + int ret = BCME_OK; + *features = 0; + *features |= DBG_MEMORY_DUMP_SUPPORTED; + if (FW_SUPPORTED(dhdp, logtrace)) { + *features |= DBG_CONNECT_EVENT_SUPPORTED; + *features |= DBG_VERBOSE_LOG_SUPPORTED; + } + if (FW_SUPPORTED(dhdp, hchk)) { + *features |= DBG_HEALTH_CHECK_SUPPORTED; + } +#ifdef DBG_PKT_MON + if (FW_SUPPORTED(dhdp, d11status)) { + *features |= DBG_PACKET_FATE_SUPPORTED; + } +#endif /* DBG_PKT_MON */ + return ret; +} + +static void +dhd_os_dbg_pullreq(void *os_priv, int ring_id) +{ + linux_dbgring_info_t *ring_info; + + ring_info = &((linux_dbgring_info_t *)os_priv)[ring_id]; + cancel_delayed_work(&ring_info->work); + schedule_delayed_work(&ring_info->work, 0); +} + +int +dhd_os_dbg_attach(dhd_pub_t *dhdp) +{ + int ret = BCME_OK; + linux_dbgring_info_t *os_priv, *ring_info; + int ring_id; + + /* os_dbg data */ + os_priv = MALLOCZ(dhdp->osh, sizeof(*os_priv) * DEBUG_RING_ID_MAX); + if (!os_priv) + return BCME_NOMEM; + + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; + ring_id++) { + ring_info = &os_priv[ring_id]; + INIT_DELAYED_WORK(&ring_info->work, dbg_ring_poll_worker); + ring_info->dhdp = dhdp; + ring_info->ring_id = ring_id; + } + + ret = dhd_dbg_attach(dhdp, dhd_os_dbg_pullreq, dhd_os_dbg_urgent_notifier, os_priv); + if (ret) + MFREE(dhdp->osh, os_priv, sizeof(*os_priv) * DEBUG_RING_ID_MAX); + + return ret; +} + +void +dhd_os_dbg_detach(dhd_pub_t *dhdp) +{ + linux_dbgring_info_t *os_priv, *ring_info; + int ring_id; + /* free os_dbg data */ + os_priv = dhd_dbg_get_priv(dhdp); + if (!os_priv) + return; + /* abort pending any job */ + for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) { + ring_info = &os_priv[ring_id]; + if (ring_info->interval) { + ring_info->interval = 0; + cancel_delayed_work_sync(&ring_info->work); + } + } + MFREE(dhdp->osh, os_priv, sizeof(*os_priv) * DEBUG_RING_ID_MAX); + + return dhd_dbg_detach(dhdp); +} diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_flowring.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_flowring.c index 759dd0ed8bcf..17a9dc314ea0 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_flowring.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_flowring.c @@ -4,7 +4,7 @@ * Flow rings are transmit traffic (=propagating towards antenna) related entities * * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,7 +27,7 @@ * * <> * - * $Id: dhd_flowring.c 591285 2015-10-07 11:56:29Z $ + * $Id: dhd_flowring.c 710862 2017-07-14 07:43:59Z $ */ @@ -36,8 +36,8 @@ #include #include -#include -#include +#include +#include #include #include @@ -46,12 +46,11 @@ #include #include #include -#include +#include <802.1d.h> #include #include #include - static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue); static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, @@ -67,11 +66,11 @@ int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt); #define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p) #define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x)) -#ifdef DHD_LOSSLESS_ROAMING +#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING) const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 }; #else const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 }; -#endif +#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */ const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */ @@ -114,13 +113,12 @@ dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid) /* Flow ring's queue management functions */ -/** Initialize a flow ring's queue, called on driver initialization. */ +/** Reinitialize a flow ring's queue. */ void -dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max) +dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max) { ASSERT((queue != NULL) && (max > 0)); - dll_init(&queue->list); queue->head = queue->tail = NULL; queue->len = 0; @@ -129,11 +127,22 @@ dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max) DHD_FLOW_QUEUE_SET_MAX(queue, max); DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max); DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr); + DHD_FLOW_QUEUE_SET_L2CLEN(queue, &dhdp->l2cumm_ctr); queue->failures = 0U; queue->cb = &dhd_flow_queue_overflow; } +/** Initialize a flow ring's queue, called on driver initialization. */ +void +dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max) +{ + ASSERT((queue != NULL) && (max > 0)); + + dll_init(&queue->list); + dhd_flow_queue_reinit(dhdp, queue, max); +} + /** Register an enqueue overflow callback handler */ void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb) @@ -172,6 +181,8 @@ dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) queue->len++; /* increment parent's cummulative length */ DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + /* increment grandparent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue)); done: return ret; @@ -199,6 +210,8 @@ dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue) queue->len--; /* decrement parent's cummulative length */ DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + /* decrement grandparent's cummulative length */ + DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue)); FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */ @@ -219,12 +232,15 @@ dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) queue->len++; /* increment parent's cummulative length */ DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + /* increment grandparent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue)); } /** Fetch the backup queue for a flowring, and assign flow control thresholds */ void dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, - int queue_budget, int cumm_threshold, void *cumm_ctr) + int queue_budget, int cumm_threshold, void *cumm_ctr, + int l2cumm_threshold, void *l2cumm_ctr) { flow_queue_t * queue; @@ -232,6 +248,8 @@ dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, ASSERT(queue_budget > 1); ASSERT(cumm_threshold > 1); ASSERT(cumm_ctr != (void*)NULL); + ASSERT(l2cumm_threshold > 1); + ASSERT(l2cumm_ctr != (void*)NULL); queue = dhd_flow_queue(dhdp, flowid); @@ -240,6 +258,10 @@ dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, /* Set the queue's parent threshold and cummulative counter */ DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold); DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr); + + /* Set the queue's grandparent threshold and cummulative counter */ + DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold); + DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr); } /** Initializes data structures of multiple flow rings */ @@ -260,7 +282,7 @@ dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) /* Construct a 16bit flowid allocator */ flowid_allocator = id16_map_init(dhdp->osh, - num_flow_rings - FLOW_RING_COMMON, FLOWID_RESERVED); + num_flow_rings - dhdp->bus->max_cmn_rings, FLOWID_RESERVED); if (flowid_allocator == NULL) { DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__)); return BCME_NOMEM; @@ -276,11 +298,15 @@ dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) /* Initialize flow ring table state */ DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr); + DHD_CUMM_CTR_INIT(&dhdp->l2cumm_ctr); bzero((uchar *)flow_ring_table, flow_ring_table_sz); for (idx = 0; idx < num_flow_rings; idx++) { flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED; flow_ring_table[idx].flowid = (uint16)idx; flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh); +#ifdef IDLE_TX_FLOW_MGMT + flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME(); +#endif /* IDLE_TX_FLOW_MGMT */ if (flow_ring_table[idx].lock == NULL) { DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__)); goto fail; @@ -391,7 +417,9 @@ void dhd_flow_rings_deinit(dhd_pub_t *dhdp) ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue)); /* Deinit flow ring queue locks before destroying flow ring table */ - dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); + if (flow_ring_table[idx].lock != NULL) { + dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); + } flow_ring_table[idx].lock = NULL; } @@ -421,8 +449,10 @@ void dhd_flow_rings_deinit(dhd_pub_t *dhdp) lock = dhdp->flowid_lock; dhdp->flowid_lock = NULL; - DHD_FLOWID_UNLOCK(lock, flags); - dhd_os_spin_lock_deinit(dhdp->osh, lock); + if (lock) { + DHD_FLOWID_UNLOCK(lock, flags); + dhd_os_spin_lock_deinit(dhdp->osh, lock); + } dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock); dhdp->flowring_list_lock = NULL; @@ -445,13 +475,20 @@ dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex) #ifdef WLTDLS bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da) { - tdls_peer_node_t *cur = dhdp->peer_tbl.node; + unsigned long flags; + tdls_peer_node_t *cur = NULL; + + DHD_TDLS_LOCK(&dhdp->tdls_lock, flags); + cur = dhdp->peer_tbl.node; + while (cur != NULL) { if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags); return TRUE; } cur = cur->next; } + DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags); return FALSE; } #endif /* WLTDLS */ @@ -471,7 +508,8 @@ dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) ASSERT(if_flow_lkup); - if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { + if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) || + (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) { #ifdef WLTDLS if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) && is_tdls_destination(dhdp, da)) { @@ -488,6 +526,7 @@ dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) return FLOWID_INVALID; } #endif /* WLTDLS */ + /* For STA non TDLS dest and WDS dest flow ring id is mapped based on prio only */ cur = if_flow_lkup[ifindex].fl_hash[prio]; if (cur) { DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); @@ -530,7 +569,11 @@ dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) uint16 flowid; unsigned long flags; - fl_hash_node = (flow_hash_info_t *) MALLOC(dhdp->osh, sizeof(flow_hash_info_t)); + fl_hash_node = (flow_hash_info_t *) MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t)); + if (fl_hash_node == NULL) { + DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__)); + return FLOWID_INVALID; + } memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da)); DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); @@ -552,8 +595,9 @@ dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; - if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { - /* For STA non TDLS dest we allocate entry based on prio only */ + if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) || + (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) { + /* For STA non TDLS dest and WDS dest we allocate entry based on prio only */ #ifdef WLTDLS if (dhdp->peer_tbl.tdls_peer_count && (is_tdls_destination(dhdp, da))) { @@ -600,9 +644,9 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, flow_ring_table_t *flow_ring_table; unsigned long flags; int ret; + bool is_sta_assoc; DHD_INFO(("%s\n", __FUNCTION__)); - if (!dhdp->flow_ring_table) { return BCME_ERROR; } @@ -618,7 +662,18 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, if (!if_flow_lkup[ifindex].status) return BCME_ERROR; - + BCM_REFERENCE(is_sta_assoc); +#if defined(PCIE_FULL_DONGLE) + is_sta_assoc = dhd_sta_associated(dhdp, ifindex, (uint8 *)da); + DHD_ERROR(("%s: multi %x ifindex %d role %x assoc %d\n", __FUNCTION__, + ETHER_ISMULTI(da), ifindex, if_flow_lkup[ifindex].role, + is_sta_assoc)); + if (!ETHER_ISMULTI(da) && + ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_AP) || + (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_P2P_GO)) && + (!is_sta_assoc)) + return BCME_ERROR; +#endif /* (linux || LINUX) && PCIE_FULL_DONGLE */ id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da); if (id == FLOWID_INVALID) { @@ -627,11 +682,11 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, return BCME_ERROR; } + ASSERT(id < dhdp->num_flow_rings); + /* register this flowid in dhd_pub */ dhd_add_flowid(dhdp, ifindex, prio, da, id); - ASSERT(id < dhdp->num_flow_rings); - flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); @@ -642,7 +697,13 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, flow_ring_node->flow_info.tid = prio; flow_ring_node->flow_info.ifindex = ifindex; flow_ring_node->active = TRUE; - flow_ring_node->status = FLOW_RING_STATUS_PENDING; + flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING; + +#ifdef DEVICE_TX_STUCK_DETECT + flow_ring_node->tx_cmpl = flow_ring_node->tx_cmpl_prev = OSL_SYSUPTIME(); + flow_ring_node->stuck_count = 0; +#endif /* DEVICE_TX_STUCK_DETECT */ + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); /* Create and inform device about the new flow */ @@ -677,19 +738,23 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, * active is made TRUE when a flow_ring_node gets allocated and is made * FALSE when the flow ring gets removed and does not reflect the True state * of the Flow ring. + * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring + * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid + * is to be returned and from dhd_bus_txdata, the flowring would be resumed again. + * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to + * FLOW_RING_STATUS_CREATE_PENDING. */ - if (flow_ring_node->status == FLOW_RING_STATUS_OPEN || - flow_ring_node->status == FLOW_RING_STATUS_PENDING) { - *flowid = id; - ret = BCME_OK; - } else { + if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING || + flow_ring_node->status == FLOW_RING_STATUS_CLOSED) { *flowid = FLOWID_INVALID; ret = BCME_ERROR; + } else { + *flowid = id; + ret = BCME_OK; } DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); return ret; - } /* Flow Id found in the hash */ } /* dhd_flowid_lookup */ @@ -715,7 +780,7 @@ dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf) return BCME_ERROR; } - if (dhd_flowid_lookup(dhdp, ifindex, prio, eh->ether_shost, eh->ether_dhost, + if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost, (char *)eh->ether_dhost, &flowid) != BCME_OK) { return BCME_ERROR; } @@ -782,7 +847,7 @@ dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) } /* dhd_flowid_free */ /** - * Delete all Flow rings associated with the given interface. Is called when e.g. the dongle + * Delete all Flow rings associated with the given interface. Is called when eg the dongle * indicates that a wireless link has gone down. */ void @@ -791,6 +856,32 @@ dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex) uint32 id; flow_ring_table_t *flow_ring_table; + DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex)); + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + if (!dhdp->flow_ring_table) + return; + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + for (id = 0; id < dhdp->num_flow_rings; id++) { + if (flow_ring_table[id].active && + (flow_ring_table[id].flow_info.ifindex == ifindex) && + (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) { + dhd_bus_flow_ring_delete_request(dhdp->bus, + (void *) &flow_ring_table[id]); + } + } +} + +void +dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex) +{ + uint32 id; + flow_ring_table_t *flow_ring_table; + DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex)); ASSERT(ifindex < DHD_MAX_IFS); @@ -799,17 +890,19 @@ dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex) if (!dhdp->flow_ring_table) return; - flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; - for (id = 0; id < dhdp->num_flow_rings; id++) { + + for (id = 0; id <= dhdp->num_flow_rings; id++) { if (flow_ring_table[id].active && - (flow_ring_table[id].flow_info.ifindex == ifindex)) { - dhd_bus_flow_ring_delete_request(dhdp->bus, + (flow_ring_table[id].flow_info.ifindex == ifindex) && + (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) { + dhd_bus_flow_ring_flush_request(dhdp->bus, (void *) &flow_ring_table[id]); } } } + /** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */ void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr) @@ -831,8 +924,8 @@ dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr) if (flow_ring_table[id].active && (flow_ring_table[id].flow_info.ifindex == ifindex) && (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) && - (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) { - DHD_INFO(("%s: deleting flowid %d\n", + (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) { + DHD_ERROR(("%s: deleting flowid %d\n", __FUNCTION__, flow_ring_table[id].flowid)); dhd_bus_flow_ring_delete_request(dhdp->bus, (void *) &flow_ring_table[id]); @@ -852,7 +945,7 @@ dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, if (ifindex >= DHD_MAX_IFS) return; - DHD_INFO(("%s: ifindex %u op %u role is %u \n", + DHD_ERROR(("%s: ifindex %u op %u role is %u \n", __FUNCTION__, ifindex, op, role)); if (!dhdp->flowid_allocator) { DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); @@ -866,13 +959,19 @@ dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, if_flow_lkup[ifindex].role = role; - if (role != WLC_E_IF_ROLE_STA) { + if (role == WLC_E_IF_ROLE_WDS) { + /** + * WDS role does not send WLC_E_LINK event after interface is up. + * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself. + * same is true while making the status as FALSE. + * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the + * interfaces are handled uniformly. + */ if_flow_lkup[ifindex].status = TRUE; DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n", __FUNCTION__, ifindex, role)); - /* Create Mcast Flow */ } - } else if (op == WLC_E_IF_DEL) { + } else if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) { if_flow_lkup[ifindex].status = FALSE; DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n", __FUNCTION__, ifindex, role)); @@ -891,17 +990,17 @@ dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status) if (ifindex >= DHD_MAX_IFS) return BCME_BADARG; - DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status)); + DHD_ERROR(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status)); DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; - if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { - if (status) - if_flow_lkup[ifindex].status = TRUE; - else - if_flow_lkup[ifindex].status = FALSE; + if (status) { + if_flow_lkup[ifindex].status = TRUE; + } else { + if_flow_lkup[ifindex].status = FALSE; } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); return BCME_OK; @@ -944,7 +1043,7 @@ int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map) /** Inform firmware on updated flow priority mapping, called on IOVAR */ int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set) { - uint8 iovbuf[24]; + uint8 iovbuf[24] = {0}; if (!set) { bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf)); if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) { diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_flowring.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_flowring.h index 7c36de5459bf..d5faf0aa8cf3 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_flowring.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_flowring.h @@ -6,7 +6,7 @@ * Provides type definitions and function prototypes used to create, delete and manage flow rings at * high level. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -29,7 +29,7 @@ * * <> * - * $Id: dhd_flowring.h 591285 2015-10-07 11:56:29Z $ + * $Id: dhd_flowring.h 672438 2016-11-28 12:35:24Z $ */ @@ -50,40 +50,28 @@ #define FLOWID_RESERVED (FLOW_RING_COMMON) #define FLOW_RING_STATUS_OPEN 0 -#define FLOW_RING_STATUS_PENDING 1 +#define FLOW_RING_STATUS_CREATE_PENDING 1 #define FLOW_RING_STATUS_CLOSED 2 #define FLOW_RING_STATUS_DELETE_PENDING 3 #define FLOW_RING_STATUS_FLUSH_PENDING 4 -#define FLOW_RING_STATUS_STA_FREEING 5 +#ifdef IDLE_TX_FLOW_MGMT +#define FLOW_RING_STATUS_SUSPENDED 5 +#define FLOW_RING_STATUS_RESUME_PENDING 6 +#endif /* IDLE_TX_FLOW_MGMT */ +#define FLOW_RING_STATUS_STA_FREEING 7 + +#ifdef DHD_EFI +#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 1600 +#else #define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048 +#endif #define DHD_FLOW_PRIO_AC_MAP 0 #define DHD_FLOW_PRIO_TID_MAP 1 +/* Flow ring prority map for lossless roaming */ #define DHD_FLOW_PRIO_LLR_MAP 2 -/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ -typedef struct dhd_pkttag_fr { - uint16 flowid; - uint16 ifid; - int dataoff; - dmaaddr_t physaddr; - uint32 pa_len; - -} dhd_pkttag_fr_t; - -#define DHD_PKTTAG_SET_FLOWID(tag, flow) ((tag)->flowid = (uint16)(flow)) -#define DHD_PKTTAG_SET_IFID(tag, idx) ((tag)->ifid = (uint16)(idx)) -#define DHD_PKTTAG_SET_DATAOFF(tag, offset) ((tag)->dataoff = (int)(offset)) -#define DHD_PKTTAG_SET_PA(tag, pa) ((tag)->physaddr = (pa)) -#define DHD_PKTTAG_SET_PA_LEN(tag, palen) ((tag)->pa_len = (palen)) - -#define DHD_PKTTAG_FLOWID(tag) ((tag)->flowid) -#define DHD_PKTTAG_IFID(tag) ((tag)->ifid) -#define DHD_PKTTAG_DATAOFF(tag) ((tag)->dataoff) -#define DHD_PKTTAG_PA(tag) ((tag)->physaddr) -#define DHD_PKTTAG_PA_LEN(tag) ((tag)->pa_len) - /* Hashing a MacAddress for lkup into a per interface flow hash table */ #define DHD_FLOWRING_HASH_SIZE 256 #define DHD_FLOWRING_HASHINDEX(ea, prio) \ @@ -94,6 +82,7 @@ typedef struct dhd_pkttag_fr { #define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP) #define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA) #define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO) +#define DHD_IF_ROLE_WDS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_WDS) #define DHD_FLOW_RING(dhdp, flowid) \ (flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid]) @@ -118,11 +107,14 @@ typedef struct flow_queue { void * clen_ptr; /* parent's cummulative length counter */ uint32 failures; /* enqueue failures due to queue overflow */ flow_queue_cb_t cb; /* callback invoked on threshold crossing */ + uint32 l2threshold; /* grandparent's (level 2) cummulative length threshold */ + void * l2clen_ptr; /* grandparent's (level 2) cummulative length counter */ } flow_queue_t; #define DHD_FLOW_QUEUE_LEN(queue) ((int)(queue)->len) #define DHD_FLOW_QUEUE_MAX(queue) ((int)(queue)->max) #define DHD_FLOW_QUEUE_THRESHOLD(queue) ((int)(queue)->threshold) +#define DHD_FLOW_QUEUE_L2THRESHOLD(queue) ((int)(queue)->l2threshold) #define DHD_FLOW_QUEUE_EMPTY(queue) ((queue)->len == 0) #define DHD_FLOW_QUEUE_FAILURES(queue) ((queue)->failures) @@ -146,9 +138,38 @@ typedef struct flow_queue { #define DHD_FLOW_QUEUE_SET_CLEN(queue, parent_clen_ptr) \ ((queue)->clen_ptr) = (void *)(parent_clen_ptr) +/* Queue's level 2 cummulative threshold. */ +#define DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold) \ + ((queue)->l2threshold) = ((l2cumm_threshold) - 1) + +/* Queue's level 2 cummulative length object accessor. */ +#define DHD_FLOW_QUEUE_L2CLEN_PTR(queue) ((queue)->l2clen_ptr) + +/* Set a queue's level 2 cumm_len point to a grandparent's cumm_ctr_t cummulative length */ +#define DHD_FLOW_QUEUE_SET_L2CLEN(queue, grandparent_clen_ptr) \ + ((queue)->l2clen_ptr) = (void *)(grandparent_clen_ptr) + /* see wlfc_proto.h for tx status details */ #define DHD_FLOWRING_MAXSTATUS_MSGS 5 #define DHD_FLOWRING_TXSTATUS_CNT_UPDATE(bus, flowid, txstatus) + +/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ +typedef struct dhd_pkttag_fr { + uint16 flowid; + uint16 ifid; + int dataoff; + dmaaddr_t physaddr; + uint32 pa_len; +} dhd_pkttag_fr_t; + +#define DHD_PKTTAG_SET_IFID(tag, idx) ((tag)->ifid = (uint16)(idx)) +#define DHD_PKTTAG_SET_PA(tag, pa) ((tag)->physaddr = (pa)) +#define DHD_PKTTAG_SET_PA_LEN(tag, palen) ((tag)->pa_len = (palen)) +#define DHD_PKTTAG_IFID(tag) ((tag)->ifid) +#define DHD_PKTTAG_PA(tag) ((tag)->physaddr) +#define DHD_PKTTAG_PA_LEN(tag) ((tag)->pa_len) + + /** each flow ring is dedicated to a tid/sa/da combination */ typedef struct flow_info { uint8 tid; @@ -171,6 +192,22 @@ typedef struct flow_ring_node { flow_info_t flow_info; void *prot_info; void *lock; /* lock for flowring access protection */ + +#ifdef IDLE_TX_FLOW_MGMT + uint64 last_active_ts; /* contains last active timestamp */ +#endif /* IDLE_TX_FLOW_MGMT */ +#ifdef DEVICE_TX_STUCK_DETECT + /* Time stamp(msec) when last time a Tx packet completion is received on this flow ring */ + uint32 tx_cmpl; + /* + * Holds the tx_cmpl which was read during the previous + * iteration of the stuck detection algo + */ + uint32 tx_cmpl_prev; + /* counter to decide if this particlur flow is stuck or not */ + uint32 stuck_count; +#endif /* DEVICE_TX_STUCK_DETECT */ + } flow_ring_node_t; typedef flow_ring_node_t flow_ring_table_t; @@ -200,13 +237,15 @@ extern flow_ring_node_t * dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid); extern flow_queue_t * dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid); extern void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max); +extern void dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max); extern void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb); extern int dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); extern void * dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue); extern void dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); extern void dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, - int queue_budget, int cumm_threshold, void *cumm_ctr); + int queue_budget, int cumm_threshold, void *cumm_ctr, + int l2cumm_threshold, void *l2cumm_ctr); extern int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings); extern void dhd_flow_rings_deinit(dhd_pub_t *dhdp); @@ -217,6 +256,7 @@ extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid); extern void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex); +extern void dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex); extern void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr); diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_gpio.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_gpio.c index 187a14922ce8..8746c804e784 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_gpio.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_gpio.c @@ -26,7 +26,7 @@ static int gpio_wl_host_wake = -1; // WL_HOST_WAKE is output pin of WLAN module #endif static int -dhd_wlan_set_power(bool on +dhd_wlan_set_power(int on #ifdef BUS_POWER_RESTORE , wifi_adapter_info_t *adapter #endif /* BUS_POWER_RESTORE */ @@ -65,6 +65,8 @@ dhd_wlan_set_power(bool on } #endif /* BCMPCIE */ #endif /* BUS_POWER_RESTORE */ + /* Lets customer power to get stable */ + mdelay(100); } else { #if defined(BUS_POWER_RESTORE) #if defined(BCMSDIO) @@ -102,7 +104,7 @@ static int dhd_wlan_set_reset(int onoff) return 0; } -static int dhd_wlan_set_carddetect(bool present) +static int dhd_wlan_set_carddetect(int present) { int err = 0; @@ -166,14 +168,6 @@ static int dhd_wlan_get_mac_addr(unsigned char *buf) return err; } -#if !defined(WL_WIRELESS_EXT) -struct cntry_locales_custom { - char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */ - char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */ - int32 custom_locale_rev; /* Custom local revisin default -1 */ -}; -#endif - static struct cntry_locales_custom brcm_wlan_translate_custom_table[] = { /* Table should be filled out based on custom platform regulatory requirement */ {"", "XT", 49}, /* Universal if Country code is unknown or empty */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_ip.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_ip.c index 971e4ca8fe7d..ee7d105d2317 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_ip.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_ip.c @@ -1,7 +1,7 @@ /* * IP Packet Parser Module. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,15 +24,15 @@ * * <> * - * $Id: dhd_ip.c 569132 2015-07-07 09:09:33Z $ + * $Id: dhd_ip.c 700317 2017-05-18 15:13:29Z $ */ #include #include -#include -#include -#include -#include +#include +#include +#include <802.3.h> +#include #include #include @@ -42,7 +42,7 @@ #ifdef DHDTCPACK_SUPPRESS #include #include -#include +#include #endif /* DHDTCPACK_SUPPRESS */ /* special values */ @@ -209,7 +209,6 @@ _tdata_psh_info_pool_deq(tcpack_sup_module_t *tcpack_sup_mod) return tdata_psh_info; } -#ifdef BCMSDIO static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp, tcpack_sup_module_t *tcpack_sup_mod) { @@ -287,7 +286,6 @@ static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp, return; } -#endif /* BCMSDIO */ static void dhd_tcpack_send(ulong data) { @@ -340,89 +338,137 @@ int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) { int ret = BCME_OK; unsigned long flags; + tcpack_sup_module_t *tcpack_sup_module; + uint8 invalid_mode = FALSE; + int prev_mode; + int i = 0; flags = dhd_os_tcpacklock(dhdp); + tcpack_sup_module = dhdp->tcpack_sup_module; + prev_mode = dhdp->tcpack_sup_mode; - if (dhdp->tcpack_sup_mode == mode) { + /* Check a new mode */ + if (prev_mode == mode) { DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode)); goto exit; } - if (mode >= TCPACK_SUP_LAST_MODE || -#ifndef BCMSDIO - mode == TCPACK_SUP_DELAYTX || -#endif /* !BCMSDIO */ - FALSE) { - DHD_ERROR(("%s %d: Invalid mode %d\n", __FUNCTION__, __LINE__, mode)); + invalid_mode |= (mode >= TCPACK_SUP_LAST_MODE); +#ifdef BCMSDIO + invalid_mode |= (mode == TCPACK_SUP_HOLD); +#endif /* BCMSDIO */ +#ifdef BCMPCIE + invalid_mode |= ((mode == TCPACK_SUP_REPLACE) || (mode == TCPACK_SUP_DELAYTX)); +#endif /* BCMPCIE */ + + if (invalid_mode) { + DHD_ERROR(("%s %d: Invalid TCP ACK Suppress mode %d\n", + __FUNCTION__, __LINE__, mode)); ret = BCME_BADARG; goto exit; } - DHD_TRACE(("%s: %d -> %d\n", + DHD_TRACE(("%s: TCP ACK Suppress mode %d -> mode %d\n", __FUNCTION__, dhdp->tcpack_sup_mode, mode)); -#ifdef BCMSDIO - /* Old tcpack_sup_mode is TCPACK_SUP_DELAYTX */ - if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX) { - tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module; - /* We won't need tdata_psh_info pool and tcpddata_info_tbl anymore */ - _tdata_psh_info_pool_deinit(dhdp, tcpack_sup_mod); - tcpack_sup_mod->tcpdata_info_cnt = 0; - bzero(tcpack_sup_mod->tcpdata_info_tbl, - sizeof(tcpdata_info_t) * TCPDATA_INFO_MAXNUM); - /* For half duplex bus interface, tx precedes rx by default */ - if (dhdp->bus) - dhd_bus_set_dotxinrx(dhdp->bus, TRUE); + /* Pre-process routines to change a new mode as per previous mode */ + switch (prev_mode) { + case TCPACK_SUP_OFF: + if (tcpack_sup_module == NULL) { + tcpack_sup_module = MALLOC(dhdp->osh, sizeof(tcpack_sup_module_t)); + if (tcpack_sup_module == NULL) { + DHD_ERROR(("%s[%d]: Failed to allocate the new memory for " + "tcpack_sup_module\n", __FUNCTION__, __LINE__)); + dhdp->tcpack_sup_mode = TCPACK_SUP_OFF; + ret = BCME_NOMEM; + goto exit; + } + dhdp->tcpack_sup_module = tcpack_sup_module; + } + bzero(tcpack_sup_module, sizeof(tcpack_sup_module_t)); + break; + case TCPACK_SUP_DELAYTX: + if (tcpack_sup_module) { + /* We won't need tdata_psh_info pool and + * tcpddata_info_tbl anymore + */ + _tdata_psh_info_pool_deinit(dhdp, tcpack_sup_module); + tcpack_sup_module->tcpdata_info_cnt = 0; + bzero(tcpack_sup_module->tcpdata_info_tbl, + sizeof(tcpdata_info_t) * TCPDATA_INFO_MAXNUM); + } + + /* For half duplex bus interface, tx precedes rx by default */ + if (dhdp->bus) { + dhd_bus_set_dotxinrx(dhdp->bus, TRUE); + } + + if (tcpack_sup_module == NULL) { + DHD_ERROR(("%s[%d]: tcpack_sup_module should not be NULL\n", + __FUNCTION__, __LINE__)); + dhdp->tcpack_sup_mode = TCPACK_SUP_OFF; + goto exit; + } + break; } -#endif /* BCMSDIO */ + + /* Update a new mode */ dhdp->tcpack_sup_mode = mode; - if (mode == TCPACK_SUP_OFF) { - ASSERT(dhdp->tcpack_sup_module != NULL); - /* Clean up timer/data structure for any remaining/pending packet or timer. */ - dhd_tcpack_info_tbl_clean(dhdp); - MFREE(dhdp->osh, dhdp->tcpack_sup_module, sizeof(tcpack_sup_module_t)); - dhdp->tcpack_sup_module = NULL; - goto exit; - } - - if (dhdp->tcpack_sup_module == NULL) { - tcpack_sup_module_t *tcpack_sup_mod = - MALLOC(dhdp->osh, sizeof(tcpack_sup_module_t)); - if (tcpack_sup_mod == NULL) { - DHD_ERROR(("%s %d: No MEM\n", __FUNCTION__, __LINE__)); - dhdp->tcpack_sup_mode = TCPACK_SUP_OFF; - ret = BCME_NOMEM; - goto exit; - } - bzero(tcpack_sup_mod, sizeof(tcpack_sup_module_t)); - dhdp->tcpack_sup_module = tcpack_sup_mod; - } - -#ifdef BCMSDIO - if (mode == TCPACK_SUP_DELAYTX) { - ret = _tdata_psh_info_pool_init(dhdp, dhdp->tcpack_sup_module); - if (ret != BCME_OK) - DHD_ERROR(("%s %d: pool init fail with %d\n", __FUNCTION__, __LINE__, ret)); - else if (dhdp->bus) - dhd_bus_set_dotxinrx(dhdp->bus, FALSE); - } -#endif /* BCMSDIO */ - - if (mode == TCPACK_SUP_HOLD) { - int i; - tcpack_sup_module_t *tcpack_sup_mod = - (tcpack_sup_module_t *)dhdp->tcpack_sup_module; - dhdp->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO; - dhdp->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME; - for (i = 0; i < TCPACK_INFO_MAXNUM; i++) - { - tcpack_sup_mod->tcpack_info_tbl[i].dhdp = dhdp; - init_timer(&tcpack_sup_mod->tcpack_info_tbl[i].timer); - tcpack_sup_mod->tcpack_info_tbl[i].timer.data = - (ulong)&tcpack_sup_mod->tcpack_info_tbl[i]; - tcpack_sup_mod->tcpack_info_tbl[i].timer.function = dhd_tcpack_send; - } + /* Process for a new mode */ + switch (mode) { + case TCPACK_SUP_OFF: + ASSERT(tcpack_sup_module != NULL); + /* Clean up timer/data structure for + * any remaining/pending packet or timer. + */ + if (tcpack_sup_module) { + /* Check if previous mode is TCAPACK_SUP_HOLD */ + if (prev_mode == TCPACK_SUP_HOLD) { + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + tcpack_info_t *tcpack_info_tbl = + &tcpack_sup_module->tcpack_info_tbl[i]; + del_timer(&tcpack_info_tbl->timer); + if (tcpack_info_tbl->pkt_in_q) { + PKTFREE(dhdp->osh, + tcpack_info_tbl->pkt_in_q, TRUE); + tcpack_info_tbl->pkt_in_q = NULL; + } + } + } + MFREE(dhdp->osh, tcpack_sup_module, sizeof(tcpack_sup_module_t)); + dhdp->tcpack_sup_module = NULL; + } else { + DHD_ERROR(("%s[%d]: tcpack_sup_module should not be NULL\n", + __FUNCTION__, __LINE__)); + } + break; + case TCPACK_SUP_REPLACE: + /* There is nothing to configure for this mode */ + break; + case TCPACK_SUP_DELAYTX: + ret = _tdata_psh_info_pool_init(dhdp, tcpack_sup_module); + if (ret != BCME_OK) { + DHD_ERROR(("%s %d: pool init fail with %d\n", + __FUNCTION__, __LINE__, ret)); + break; + } + if (dhdp->bus) { + dhd_bus_set_dotxinrx(dhdp->bus, FALSE); + } + break; + case TCPACK_SUP_HOLD: + dhdp->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO; + dhdp->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME; + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + tcpack_info_t *tcpack_info_tbl = + &tcpack_sup_module->tcpack_info_tbl[i]; + tcpack_info_tbl->dhdp = dhdp; + init_timer(&tcpack_info_tbl->timer); + tcpack_info_tbl->timer.data = (ulong)tcpack_info_tbl; + tcpack_info_tbl->timer.function = dhd_tcpack_send; + } + break; } exit: @@ -1182,7 +1228,7 @@ dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx) for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */ uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr; - uint32 old_ip_hdr_len, old_tcp_hdr_len; + uint32 old_ip_hdr_len; uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */ if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) { @@ -1204,7 +1250,6 @@ dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx) old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN; old_ip_hdr_len = IPV4_HLEN(old_ip_hdr); old_tcp_hdr = old_ip_hdr + old_ip_hdr_len; - old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]); DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i, diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_ip.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_ip.h index a72976b07ccf..240d852151f4 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_ip.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_ip.h @@ -3,7 +3,7 @@ * * Provides type definitions and function prototypes used to parse ip packet. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: dhd_ip.h 537119 2015-02-25 04:24:14Z $ + * $Id: dhd_ip.h 536854 2015-02-24 13:17:29Z $ */ #ifndef _dhd_ip_h_ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux.c index 7f3320b1ac27..2a198a7abc73 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux.c @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), Linux-specific network interface * Basically selected code segments from usb-cdc.c and usb-rndis.c * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $ + * $Id: dhd_linux.c 710862 2017-07-14 07:43:59Z $ */ #include @@ -36,7 +36,6 @@ #include #endif /* SHOW_LOGTRACE */ - #include #include #include @@ -66,15 +65,19 @@ #include #include -#include -#include -#include -#include + +#include +#include +#include +#include <802.3.h> #include #include #include #include +#ifdef DHD_WET +#include +#endif /* DHD_WET */ #ifdef PCIE_FULL_DONGLE #include #endif @@ -85,6 +88,7 @@ #include #endif #include +#include #ifdef CONFIG_HAS_WAKELOCK #include #endif @@ -97,17 +101,24 @@ #ifdef RTT_SUPPORT #include #endif +#ifdef DHD_TIMESYNC +#include +#endif /* DHD_TIMESYNC */ #ifdef CONFIG_COMPAT #include #endif +#if defined(CONFIG_SOC_EXYNOS8895) +#include +#endif /* CONFIG_SOC_EXYNOS8895 */ + #ifdef DHD_WMF #include #endif /* DHD_WMF */ #ifdef DHD_L2_FILTER -#include +#include #include #include #endif /* DHD_L2_FILTER */ @@ -120,7 +131,13 @@ #ifdef DHDTCPACK_SUPPRESS #include #endif /* DHDTCPACK_SUPPRESS */ - +#include +#ifdef DHD_PKT_LOGGING +#include +#endif /* DHD_PKT_LOGGING */ +#if defined(STAT_REPORT) +#include +#endif /* STAT_REPORT */ #ifdef DHD_DEBUG_PAGEALLOC typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len); void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len); @@ -128,6 +145,19 @@ extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle); #endif /* DHD_DEBUG_PAGEALLOC */ +#if defined(DHD_LB) +#if !defined(PCIE_FULL_DONGLE) +#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE" +#endif /* !PCIE_FULL_DONGLE */ +#endif /* DHD_LB */ + +#if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \ + defined(DHD_LB_STATS) +#if !defined(DHD_LB) +#error "DHD loadbalance derivatives are supported only if DHD_LB is defined" +#endif /* !DHD_LB */ +#endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */ + #if defined(DHD_LB) /* Dynamic CPU selection for load balancing */ #include @@ -139,19 +169,38 @@ extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle); #if !defined(DHD_LB_PRIMARY_CPUS) #define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */ #endif - #if !defined(DHD_LB_SECONDARY_CPUS) #define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */ #endif -#define HIST_BIN_SIZE 8 +#define HIST_BIN_SIZE 9 -#if defined(DHD_LB_RXP) static void dhd_rx_napi_dispatcher_fn(struct work_struct * work); -#endif /* DHD_LB_RXP */ +#if defined(DHD_LB_TXP) +static void dhd_lb_tx_handler(unsigned long data); +static void dhd_tx_dispatcher_work(struct work_struct * work); +static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp); +static void dhd_lb_tx_dispatch(dhd_pub_t *dhdp); + +/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ +typedef struct dhd_tx_lb_pkttag_fr { + struct net_device *net; + int ifidx; +} dhd_tx_lb_pkttag_fr_t; + +#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp) +#define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net) + +#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx) +#define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx) +#endif /* DHD_LB_TXP */ #endif /* DHD_LB */ +#ifdef HOFFLOAD_MODULES +#include +#endif + #ifdef WLMEDIA_HTSF #include #include @@ -178,6 +227,18 @@ typedef struct histo_ { static histo_t vi_d1, vi_d2, vi_d3, vi_d4; #endif /* WLMEDIA_HTSF */ +#ifdef WL_MONITOR +#include +#include +#endif + +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + #ifdef STBLINUX #ifdef quote_str #undef quote_str @@ -192,21 +253,27 @@ static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET); #endif /* STBLINUX */ + #if defined(SOFTAP) extern bool ap_cfg_running; extern bool ap_fw_loaded; #endif + +#ifdef DHD_8021X_DUMP extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction); +#endif /* DHD_8021X_DUMP */ #ifdef FIX_CPU_MIN_CLOCK #include #endif /* FIX_CPU_MIN_CLOCK */ + #ifdef SET_RANDOM_MAC_SOFTAP #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL #define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11 #endif static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL; #endif /* SET_RANDOM_MAC_SOFTAP */ + #ifdef ENABLE_ADAPTIVE_SCHED #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */ #ifndef CUSTOM_CPUFREQ_THRESH @@ -251,7 +318,7 @@ static bool dhd_inetaddr_notifier_registered = FALSE; #endif /* ARP_OFFLOAD_SUPPORT */ #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) -static int dhd_inet6addr_notifier_call(struct notifier_block *this, +int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr); static struct notifier_block dhd_inet6addr_notifier = { .notifier_call = dhd_inet6addr_notifier_call @@ -278,8 +345,16 @@ static void dhd_hang_process(void *dhd_info, void *event_data, u8 event); MODULE_LICENSE("GPL and additional rights"); #endif /* LinuxVer */ +#ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG +#define MAX_CONSECUTIVE_HANG_COUNTS 5 +#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */ + #include +#ifdef DHD_ULP +#include +#endif /* DHD_ULP */ + #ifdef BCM_FD_AGGR #define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE) #else @@ -291,7 +366,7 @@ MODULE_LICENSE("GPL and additional rights"); #endif /* BCM_FD_AGGR */ #ifdef PROP_TXSTATUS -extern bool dhd_wlfc_skip_fc(void); +extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx); extern void dhd_wlfc_plat_init(void *dhd); extern void dhd_wlfc_plat_deinit(void *dhd); #endif /* PROP_TXSTATUS */ @@ -326,7 +401,15 @@ extern wl_iw_extra_params_t g_wl_iw_params; #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */ #endif /* CONFIG_PARTIALSUSPEND_SLP */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) +#include +#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */ + +#if defined(BCMPCIE) +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval); +#else extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); +#endif /* OEM_ANDROID && BCMPCIE */ #ifdef PKT_FILTER_SUPPORT extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg); @@ -334,26 +417,39 @@ extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id); #endif - -#ifdef READ_MACADDR -extern int dhd_read_macaddr(struct dhd_info *dhd); -#else -static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; } -#endif -#ifdef WRITE_MACADDR -extern int dhd_write_macaddr(struct ether_addr *mac); -#else -static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; } -#endif +#if defined(PKT_FILTER_SUPPORT) && defined(APF) +static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id, + u8* program, uint32 program_len); +static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id, + uint32 mode, uint32 enable); +static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id); +#endif /* PKT_FILTER_SUPPORT && APF */ +static INLINE int argos_register_notifier_init(struct net_device *net) { return 0;} +static INLINE int argos_register_notifier_deinit(void) { return 0;} +#if defined(BT_OVER_SDIO) +extern void wl_android_set_wifi_on_flag(bool enable); +#endif /* BT_OVER_SDIO */ + + +#if defined(TRAFFIC_MGMT_DWM) +void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf); +#endif #ifdef DHD_FW_COREDUMP static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event); #endif /* DHD_FW_COREDUMP */ #ifdef DHD_LOG_DUMP +#define DLD_BUFFER_NUM 2 +/* [0]: General, [1]: Special */ +struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM]; +static const int dld_buf_size[] = { + (1024 * 1024), /* DHD_LOG_DUMP_BUFFER_SIZE */ + (8 * 1024) /* DHD_LOG_DUMP_BUFFER_EX_SIZE */ +}; static void dhd_log_dump_init(dhd_pub_t *dhd); static void dhd_log_dump_deinit(dhd_pub_t *dhd); static void dhd_log_dump(void *handle, void *event_info, u8 event); @@ -361,6 +457,13 @@ void dhd_schedule_log_dump(dhd_pub_t *dhdp); static int do_dhd_log_dump(dhd_pub_t *dhdp); #endif /* DHD_LOG_DUMP */ +#ifdef DHD_DEBUG_UART +#include +#define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu" +static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event); +static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd); +#endif /* DHD_DEBUG_UART */ + static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused); static struct notifier_block dhd_reboot_notifier = { .notifier_call = dhd_reboot_callback, @@ -371,6 +474,13 @@ static struct notifier_block dhd_reboot_notifier = { static int is_reboot = 0; #endif /* BCMPCIE */ +#if defined(BT_OVER_SDIO) +#include "dhd_bt_interface.h" +dhd_pub_t *g_dhd_pub = NULL; +#endif /* defined (BT_OVER_SDIO) */ + +atomic_t exit_in_progress = ATOMIC_INIT(0); + typedef struct dhd_if_event { struct list_head list; wl_event_data_if_t event; @@ -396,6 +506,9 @@ typedef struct dhd_if { struct net_device_stats stats; #ifdef DHD_WMF dhd_wmf_t wmf; /* per bsscfg wmf setting */ + bool wmf_psta_disable; /* enable/disable MC pkt to each mac + * of MC group behind PSTA + */ #endif /* DHD_WMF */ #ifdef PCIE_FULL_DONGLE struct list_head sta_list; /* sll of associated stations */ @@ -409,11 +522,16 @@ typedef struct dhd_if { bool parp_discard; bool parp_allnode; arp_table_t *phnd_arp_table; -/* for Per BSS modification */ + /* for Per BSS modification */ bool dhcp_unicast; bool block_ping; bool grat_arp; #endif /* DHD_L2_FILTER */ +#ifdef DHD_MCAST_REGEN + bool mcast_regen_bss_enable; +#endif + bool rx_pkt_chainable; /* set all rx packet to chainable config by default */ + cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */ } dhd_if_t; #ifdef WLMEDIA_HTSF @@ -446,16 +564,27 @@ static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0; struct ipv6_work_info_t { uint8 if_idx; - char ipv6_addr[16]; + char ipv6_addr[IPV6_ADDR_LEN]; unsigned long event; }; +static void dhd_process_daemon_msg(struct sk_buff *skb); +static void dhd_destroy_to_notifier_skt(void); +static int dhd_create_to_notifier_skt(void); +static struct sock *nl_to_event_sk = NULL; +int sender_pid = 0; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) +struct netlink_kernel_cfg g_cfg = { + .groups = 1, + .input = dhd_process_daemon_msg, +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */ -#ifdef DHD_DEBUG typedef struct dhd_dump { uint8 *buf; int bufsize; } dhd_dump_t; -#endif /* DHD_DEBUG */ + /* When Perimeter locks are deployed, any blocking calls must be preceeded * with a PERIM UNLOCK and followed by a PERIM LOCK. @@ -476,6 +605,9 @@ typedef struct dhd_info { char nv_path[PATH_MAX]; /* path to nvram vars file */ char clm_path[PATH_MAX]; /* path to clm vars file */ char conf_path[PATH_MAX]; /* path to config vars file */ +#ifdef DHD_UCODE_DOWNLOAD + char uc_path[PATH_MAX]; /* path to ucode image */ +#endif /* DHD_UCODE_DOWNLOAD */ /* serialize dhd iovars */ struct mutex dhd_iovar_mutex; @@ -503,6 +635,7 @@ typedef struct dhd_info { struct tasklet_struct tasklet; spinlock_t sdlock; spinlock_t txqlock; + spinlock_t rxqlock; spinlock_t dhd_lock; struct semaphore sdsem; @@ -520,6 +653,8 @@ typedef struct dhd_info { struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ struct wake_lock wl_wdwake; /* Wifi wd wakelock */ struct wake_lock wl_evtwake; /* Wifi event wakelock */ + struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */ + struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */ #ifdef BCMPCIE_OOB_HOST_WAKE struct wake_lock wl_intrwake; /* Host wakeup wakelock */ #endif /* BCMPCIE_OOB_HOST_WAKE */ @@ -534,10 +669,12 @@ typedef struct dhd_info { */ struct mutex dhd_net_if_mutex; struct mutex dhd_suspend_mutex; -#endif +#if defined(PKT_FILTER_SUPPORT) && defined(APF) + struct mutex dhd_apf_mutex; +#endif /* PKT_FILTER_SUPPORT && APF */ +#endif spinlock_t wakelock_spinlock; spinlock_t wakelock_evt_spinlock; - uint32 wakelock_event_counter; uint32 wakelock_counter; int wakelock_wd_counter; int wakelock_rx_timeout_enable; @@ -588,6 +725,9 @@ typedef struct dhd_info { #ifdef DHD_PSTA uint32 psta_mode; /* PSTA or PSR */ #endif /* DHD_PSTA */ +#ifdef DHD_WET + uint32 wet_mode; +#endif /* DHD_WET */ #ifdef DHD_DEBUG dhd_dump_t *dump; struct timer_list join_timer; @@ -611,12 +751,11 @@ typedef struct dhd_info { /* Tasklet to handle Tx Completion packet freeing */ struct tasklet_struct tx_compl_tasklet; - atomic_t tx_compl_cpu; - + atomic_t tx_compl_cpu; /* Tasklet to handle RxBuf Post during Rx completion */ struct tasklet_struct rx_compl_tasklet; - atomic_t rx_compl_cpu; + atomic_t rx_compl_cpu; /* Napi struct for handling rx packet sendup. Packets are removed from * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then @@ -626,37 +765,83 @@ typedef struct dhd_info { struct sk_buff_head rx_pend_queue ____cacheline_aligned; struct sk_buff_head rx_napi_queue ____cacheline_aligned; struct napi_struct rx_napi_struct ____cacheline_aligned; - atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ + atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ struct net_device *rx_napi_netdev; /* netdev of primary interface */ struct work_struct rx_napi_dispatcher_work; - struct work_struct tx_compl_dispatcher_work; - struct work_struct rx_compl_dispatcher_work; + struct work_struct tx_compl_dispatcher_work; + struct work_struct tx_dispatcher_work; + /* Number of times DPC Tasklet ran */ uint32 dhd_dpc_cnt; - /* Number of times NAPI processing got scheduled */ uint32 napi_sched_cnt; - /* Number of times NAPI processing ran on each available core */ - uint32 napi_percpu_run_cnt[NR_CPUS]; - + uint32 *napi_percpu_run_cnt; /* Number of times RX Completions got scheduled */ uint32 rxc_sched_cnt; /* Number of times RX Completion ran on each available core */ - uint32 rxc_percpu_run_cnt[NR_CPUS]; - + uint32 *rxc_percpu_run_cnt; /* Number of times TX Completions got scheduled */ uint32 txc_sched_cnt; /* Number of times TX Completions ran on each available core */ - uint32 txc_percpu_run_cnt[NR_CPUS]; - + uint32 *txc_percpu_run_cnt; /* CPU status */ /* Number of times each CPU came online */ - uint32 cpu_online_cnt[NR_CPUS]; - + uint32 *cpu_online_cnt; /* Number of times each CPU went offline */ - uint32 cpu_offline_cnt[NR_CPUS]; + uint32 *cpu_offline_cnt; + + /* Number of times TX processing run on each core */ + uint32 *txp_percpu_run_cnt; + /* Number of times TX start run on each core */ + uint32 *tx_start_percpu_run_cnt; + + /* Tx load balancing */ + + /* TODO: Need to see if batch processing is really required in case of TX + * processing. In case of RX the Dongle can send a bunch of rx completions, + * hence we took a 3 queue approach + * enque - adds the skbs to rx_pend_queue + * dispatch - uses a lock and adds the list of skbs from pend queue to + * napi queue + * napi processing - copies the pend_queue into a local queue and works + * on it. + * But for TX its going to be 1 skb at a time, so we are just thinking + * of using only one queue and use the lock supported skb queue functions + * to add and process it. If its in-efficient we'll re-visit the queue + * design. + */ + + /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */ + /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */ + /* + * From the Tasklet that actually sends out data + * copy the list tx_pend_queue into tx_active_queue. There by we need + * to spinlock to only perform the copy the rest of the code ie to + * construct the tx_pend_queue and the code to process tx_active_queue + * can be lockless. The concept is borrowed as is from RX processing + */ + /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */ + + /* Control TXP in runtime, enable by default */ + atomic_t lb_txp_active; + + /* + * When the NET_TX tries to send a TX packet put it into tx_pend_queue + * For now, the processing tasklet will also direcly operate on this + * queue + */ + struct sk_buff_head tx_pend_queue ____cacheline_aligned; + + /* cpu on which the DHD Tx is happenning */ + atomic_t tx_cpu; + + /* CPU on which the Network stack is calling the DHD's xmit function */ + atomic_t net_tx_cpu; + + /* Tasklet context from which the DHD's TX processing happens */ + struct tasklet_struct tx_tasklet; /* * Consumer Histogram - NAPI RX Packet processing @@ -674,29 +859,61 @@ typedef struct dhd_info { * iteration 2 - 30 (the bin counter 2^5 increments to 1) * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) */ - uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE]; - uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE]; - uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE]; + uint32 *napi_rx_hist[HIST_BIN_SIZE]; + uint32 *txc_hist[HIST_BIN_SIZE]; + uint32 *rxc_hist[HIST_BIN_SIZE]; #endif /* DHD_LB */ +#ifdef SHOW_LOGTRACE + struct work_struct event_log_dispatcher_work; +#endif /* SHOW_LOGTRACE */ + #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ - struct kobject dhd_kobj; -#ifdef SUPPORT_SENSORHUB - uint32 shub_enable; -#endif /* SUPPORT_SENSORHUB */ +#ifdef SHOW_LOGTRACE + struct sk_buff_head evt_trace_queue ____cacheline_aligned; +#endif + struct timer_list timesync_timer; +#if defined(BT_OVER_SDIO) + char btfw_path[PATH_MAX]; +#endif /* defined (BT_OVER_SDIO) */ - struct delayed_work dhd_memdump_work; +#ifdef WL_MONITOR + struct net_device *monitor_dev; /* monitor pseudo device */ + struct sk_buff *monitor_skb; + uint monitor_len; + uint monitor_type; /* monitor pseudo device */ + monitor_info_t *monitor_info; +#endif /* WL_MONITOR */ + uint32 shub_enable; +#if defined(BT_OVER_SDIO) + struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */ + int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */ +#endif /* BT_OVER_SDIO */ +#ifdef DHD_DEBUG_UART + bool duart_execute; +#endif +#ifdef PCIE_INB_DW + wait_queue_head_t ds_exit_wait; +#endif /* PCIE_INB_DW */ } dhd_info_t; +#ifdef WL_MONITOR +#define MONPKT_EXTRA_LEN 48 +#endif + #define DHDIF_FWDER(dhdif) FALSE -/* Flag to indicate if we should download firmware on driver load */ -uint dhd_download_fw_on_driverload = TRUE; - +#if defined(BT_OVER_SDIO) +/* Flag to indicate if driver is initialized */ +uint dhd_driver_init_done = TRUE; +#else /* Flag to indicate if driver is initialized */ uint dhd_driver_init_done = FALSE; +#endif +/* Flag to indicate if we should download firmware on driver load */ +uint dhd_download_fw_on_driverload = TRUE; /* Definitions to provide path to the firmware and nvram * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt" @@ -705,6 +922,12 @@ char firmware_path[MOD_PARAM_PATHLEN]; char nvram_path[MOD_PARAM_PATHLEN]; char clm_path[MOD_PARAM_PATHLEN]; char config_path[MOD_PARAM_PATHLEN]; +#ifdef DHD_UCODE_DOWNLOAD +char ucode_path[MOD_PARAM_PATHLEN]; +#endif /* DHD_UCODE_DOWNLOAD */ + +module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660); + /* backup buffer for firmware and nvram path */ char fw_bak_path[MOD_PARAM_PATHLEN]; @@ -716,12 +939,6 @@ module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444); int op_mode = 0; int disable_proptx = 0; module_param(op_mode, int, 0644); - -#if defined(DHD_LB_RXP) -static int dhd_napi_weight = 32; -module_param(dhd_napi_weight, int, 0644); -#endif /* DHD_LB_RXP */ - extern int wl_control_wl_start(struct net_device *dev); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC) struct semaphore dhd_registration_sem; @@ -732,6 +949,7 @@ static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event); static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event); static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event); static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event); + #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event); #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ @@ -739,6 +957,15 @@ static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event); extern void dhd_netdev_free(struct net_device *ndev); #endif /* WL_CFG80211 */ +#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER)) +/* update rx_pkt_chainable state of dhd interface */ +static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx); +#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */ + +#ifdef HOFFLOAD_MODULES +char dhd_hmem_module_string[MOD_PARAM_SRLEN]; +module_param_string(dhd_hmem_module_string, dhd_hmem_module_string, MOD_PARAM_SRLEN, 0660); +#endif /* Error bits */ module_param(dhd_msg_level, int, 0); #if defined(WL_WIRELESS_EXT) @@ -758,10 +985,10 @@ module_param(dhd_arp_enable, uint, 0); /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */ #ifdef ENABLE_ARP_SNOOP_MODE -uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP; +uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY; #else uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY; -#endif /* ENABLE_ARP_SNOOP_MODE */ +#endif /* ENABLE_ARP_SNOOP_MODE */ module_param(dhd_arp_mode, uint, 0); #endif /* ARP_OFFLOAD_SUPPORT */ @@ -771,8 +998,10 @@ module_param(disable_proptx, int, 0644); /* load firmware and/or nvram values from the filesystem */ module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660); module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660); -module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660); module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0); +#ifdef DHD_UCODE_DOWNLOAD +module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660); +#endif /* DHD_UCODE_DOWNLOAD */ /* Watchdog interval */ @@ -789,8 +1018,9 @@ uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS; /* Console poll interval */ uint dhd_console_ms = 0; module_param(dhd_console_ms, uint, 0644); -#endif /* defined(DHD_DEBUG) */ - +#else +uint dhd_console_ms = 0; +#endif /* DHD_DEBUG */ uint dhd_slpauto = TRUE; module_param(dhd_slpauto, uint, 0); @@ -824,23 +1054,107 @@ module_param(dhd_dpc_prio, int, 0); int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING; module_param(dhd_rxf_prio, int, 0); -int passive_channel_skip = 0; -module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR)); - #if !defined(BCMDHDUSB) extern int dhd_dongle_ramsize; module_param(dhd_dongle_ramsize, int, 0); #endif /* BCMDHDUSB */ +#ifdef WL_CFG80211 +int passive_channel_skip = 0; +module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR)); +#endif /* WL_CFG80211 */ + /* Keep track of number of instances */ static int dhd_found = 0; static int instance_base = 0; /* Starting instance number */ module_param(instance_base, int, 0644); +#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE) +static int dhd_napi_weight = 32; +module_param(dhd_napi_weight, int, 0644); +#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */ + +#ifdef PCIE_FULL_DONGLE +extern int h2d_max_txpost; +module_param(h2d_max_txpost, int, 0644); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_DHCP_DUMP +struct bootp_fmt { + struct iphdr ip_header; + struct udphdr udp_header; + uint8 op; + uint8 htype; + uint8 hlen; + uint8 hops; + uint32 transaction_id; + uint16 secs; + uint16 flags; + uint32 client_ip; + uint32 assigned_ip; + uint32 server_ip; + uint32 relay_ip; + uint8 hw_address[16]; + uint8 server_name[64]; + uint8 file_name[128]; + uint8 options[312]; +}; + +static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 }; +static const char dhcp_ops[][10] = { + "NA", "REQUEST", "REPLY" +}; +static const char dhcp_types[][10] = { + "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM" +}; +static void dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx); +#endif /* DHD_DHCP_DUMP */ + +#ifdef DHD_ICMP_DUMP +#include +static void dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx); +#endif /* DHD_ICMP_DUMP */ + /* Functions to manage sysfs interface for dhd */ static int dhd_sysfs_init(dhd_info_t *dhd); static void dhd_sysfs_exit(dhd_info_t *dhd); +#ifdef SHOW_LOGTRACE +#if defined(CUSTOMER_HW4_DEBUG) +static char *logstrs_path = PLATFORM_PATH"logstrs.bin"; +static char *st_str_file_path = PLATFORM_PATH"rtecdc.bin"; +static char *map_file_path = PLATFORM_PATH"rtecdc.map"; +static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin"; +static char *rom_map_file_path = PLATFORM_PATH"roml.map"; +#elif defined(CUSTOMER_HW2) +static char *logstrs_path = "/data/misc/wifi/logstrs.bin"; +static char *st_str_file_path = "/data/misc/wifi/rtecdc.bin"; +static char *map_file_path = "/data/misc/wifi/rtecdc.map"; +static char *rom_st_str_file_path = "/data/misc/wifi/roml.bin"; +static char *rom_map_file_path = "/data/misc/wifi/roml.map"; +#else +static char *logstrs_path = "/installmedia/logstrs.bin"; +static char *st_str_file_path = "/installmedia/rtecdc.bin"; +static char *map_file_path = "/installmedia/rtecdc.map"; +static char *rom_st_str_file_path = "/installmedia/roml.bin"; +static char *rom_map_file_path = "/installmedia/roml.map"; +#endif /* CUSTOMER_HW4_DEBUG || CUSTOMER_HW2 */ +static char *ram_file_str = "rtecdc"; +static char *rom_file_str = "roml"; + +module_param(logstrs_path, charp, S_IRUGO); +module_param(st_str_file_path, charp, S_IRUGO); +module_param(map_file_path, charp, S_IRUGO); +module_param(rom_st_str_file_path, charp, S_IRUGO); +module_param(rom_map_file_path, charp, S_IRUGO); + +static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp); +static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start, + uint32 *rodata_end); +static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, + char *map_file); +#endif /* SHOW_LOGTRACE */ + #if defined(DHD_LB) static void @@ -850,6 +1164,8 @@ dhd_lb_set_default_cpus(dhd_info_t *dhd) atomic_set(&dhd->rx_napi_cpu, 1); atomic_set(&dhd->rx_compl_cpu, 2); atomic_set(&dhd->tx_compl_cpu, 2); + atomic_set(&dhd->tx_cpu, 2); + atomic_set(&dhd->net_tx_cpu, 0); } static void @@ -866,14 +1182,17 @@ static int dhd_cpumasks_init(dhd_info_t *dhd) { int id; - uint32 cpus; + uint32 cpus, num_cpus = num_possible_cpus(); int ret = 0; + DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__, + DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS)); + if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) || - !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) || - !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) || - !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) || - !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) { + !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) { DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__)); ret = -ENOMEM; goto fail; @@ -883,14 +1202,19 @@ dhd_cpumasks_init(dhd_info_t *dhd) cpumask_clear(dhd->cpumask_primary); cpumask_clear(dhd->cpumask_secondary); + if (num_cpus > 32) { + DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus)); + ASSERT(0); + } + cpus = DHD_LB_PRIMARY_CPUS; - for (id = 0; id < NR_CPUS; id++) { + for (id = 0; id < num_cpus; id++) { if (isset(&cpus, id)) cpumask_set_cpu(id, dhd->cpumask_primary); } cpus = DHD_LB_SECONDARY_CPUS; - for (id = 0; id < NR_CPUS; id++) { + for (id = 0; id < num_cpus; id++) { if (isset(&cpus, id)) cpumask_set_cpu(id, dhd->cpumask_secondary); } @@ -931,6 +1255,7 @@ void dhd_select_cpu_candidacy(dhd_info_t *dhd) uint32 secondary_available_cpus; /* count of secondary available cpus */ uint32 napi_cpu = 0; /* cpu selected for napi rx processing */ uint32 compl_cpu = 0; /* cpu selected for completion jobs */ + uint32 tx_cpu = 0; /* cpu selected for tx processing job */ cpumask_clear(dhd->cpumask_primary_new); cpumask_clear(dhd->cpumask_secondary_new); @@ -954,13 +1279,18 @@ void dhd_select_cpu_candidacy(dhd_info_t *dhd) /* If no further CPU is available, * cpumask_next returns >= nr_cpu_ids */ - compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new); + tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new); + if (tx_cpu >= nr_cpu_ids) + tx_cpu = 0; + + /* In case there are no more CPUs, do completions & Tx in same CPU */ + compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new); if (compl_cpu >= nr_cpu_ids) - compl_cpu = 0; + compl_cpu = tx_cpu; } - DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n", - __FUNCTION__, napi_cpu, compl_cpu)); + DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n", + __FUNCTION__, napi_cpu, compl_cpu, tx_cpu)); /* -- Now check for the CPUs from the secondary mask -- */ secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new); @@ -974,11 +1304,19 @@ void dhd_select_cpu_candidacy(dhd_info_t *dhd) */ if (napi_cpu == 0) { napi_cpu = cpumask_first(dhd->cpumask_secondary_new); - compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new); + tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new); + compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new); + } else if (tx_cpu == 0) { + tx_cpu = cpumask_first(dhd->cpumask_secondary_new); + compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new); } else if (compl_cpu == 0) { compl_cpu = cpumask_first(dhd->cpumask_secondary_new); } + /* If no CPU was available for tx processing, choose CPU 0 */ + if (tx_cpu >= nr_cpu_ids) + tx_cpu = 0; + /* If no CPU was available for completion, choose CPU 0 */ if (compl_cpu >= nr_cpu_ids) compl_cpu = 0; @@ -986,18 +1324,23 @@ void dhd_select_cpu_candidacy(dhd_info_t *dhd) if ((primary_available_cpus == 0) && (secondary_available_cpus == 0)) { /* No CPUs available from primary or secondary mask */ - napi_cpu = 0; + napi_cpu = 1; compl_cpu = 0; + tx_cpu = 2; } - DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n", - __FUNCTION__, napi_cpu, compl_cpu)); + DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n", + __FUNCTION__, napi_cpu, compl_cpu, tx_cpu)); + ASSERT(napi_cpu < nr_cpu_ids); ASSERT(compl_cpu < nr_cpu_ids); + ASSERT(tx_cpu < nr_cpu_ids); atomic_set(&dhd->rx_napi_cpu, napi_cpu); atomic_set(&dhd->tx_compl_cpu, compl_cpu); atomic_set(&dhd->rx_compl_cpu, compl_cpu); + atomic_set(&dhd->tx_cpu, tx_cpu); + return; } @@ -1009,13 +1352,27 @@ void dhd_select_cpu_candidacy(dhd_info_t *dhd) int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { - unsigned int cpu = (unsigned int)(long)hcpu; + unsigned long int cpu = (unsigned long int)hcpu; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + + if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) { + DHD_INFO(("%s(): LB data is not initialized yet.\n", + __FUNCTION__)); + return NOTIFY_BAD; + } switch (action) { case CPU_ONLINE: + case CPU_ONLINE_FROZEN: DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); dhd_select_cpu_candidacy(dhd); @@ -1038,10 +1395,11 @@ dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) void dhd_lb_stats_init(dhd_pub_t *dhdp) { dhd_info_t *dhd; - int i, j; + int i, j, num_cpus = num_possible_cpus(); + int alloc_size = sizeof(uint32) * num_cpus; if (dhdp == NULL) { - DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n", + DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n", __FUNCTION__)); return; } @@ -1054,73 +1412,237 @@ void dhd_lb_stats_init(dhd_pub_t *dhdp) DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt); DHD_LB_STATS_CLR(dhd->napi_sched_cnt); + + dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->napi_percpu_run_cnt) { + DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]); + DHD_LB_STATS_CLR(dhd->rxc_sched_cnt); + + dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->rxc_percpu_run_cnt) { + DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]); + DHD_LB_STATS_CLR(dhd->txc_sched_cnt); - for (i = 0; i < NR_CPUS; i++) { - DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]); - DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]); + dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->txc_percpu_run_cnt) { + DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]); + dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->cpu_online_cnt) { + DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]); + + dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->cpu_offline_cnt) { + DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]); + + dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->txp_percpu_run_cnt) { + DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]); + + dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->tx_start_percpu_run_cnt) { + DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n", + __FUNCTION__)); + return; + } + for (i = 0; i < num_cpus; i++) + DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]); + + for (j = 0; j < HIST_BIN_SIZE; j++) { + dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->napi_rx_hist[j]) { + DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n", + __FUNCTION__, j)); + return; + } + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]); + } + } +#ifdef DHD_LB_TXC + for (j = 0; j < HIST_BIN_SIZE; j++) { + dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->txc_hist[j]) { + DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n", + __FUNCTION__, j)); + return; + } + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->txc_hist[j][i]); + } + } +#endif /* DHD_LB_TXC */ +#ifdef DHD_LB_RXC + for (j = 0; j < HIST_BIN_SIZE; j++) { + dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size); + if (!dhd->rxc_hist[j]) { + DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n", + __FUNCTION__, j)); + return; + } + for (i = 0; i < num_cpus; i++) { + DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]); + } + } +#endif /* DHD_LB_RXC */ + return; +} + +void dhd_lb_stats_deinit(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + int j, num_cpus = num_possible_cpus(); + int alloc_size = sizeof(uint32) * num_cpus; + + if (dhdp == NULL) { + DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n", + __FUNCTION__)); + return; } - for (i = 0; i < NR_CPUS; i++) { - for (j = 0; j < HIST_BIN_SIZE; j++) { - DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]); - DHD_LB_STATS_CLR(dhd->txc_hist[i][j]); - DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]); + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + if (dhd->napi_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size); + dhd->napi_percpu_run_cnt = NULL; + } + if (dhd->rxc_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size); + dhd->rxc_percpu_run_cnt = NULL; + } + if (dhd->txc_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size); + dhd->txc_percpu_run_cnt = NULL; + } + if (dhd->cpu_online_cnt) { + MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size); + dhd->cpu_online_cnt = NULL; + } + if (dhd->cpu_offline_cnt) { + MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size); + dhd->cpu_offline_cnt = NULL; + } + + if (dhd->txp_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size); + dhd->txp_percpu_run_cnt = NULL; + } + if (dhd->tx_start_percpu_run_cnt) { + MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size); + dhd->tx_start_percpu_run_cnt = NULL; + } + + for (j = 0; j < HIST_BIN_SIZE; j++) { + if (dhd->napi_rx_hist[j]) { + MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size); + dhd->napi_rx_hist[j] = NULL; } +#ifdef DHD_LB_TXC + if (dhd->txc_hist[j]) { + MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size); + dhd->txc_hist[j] = NULL; + } +#endif /* DHD_LB_TXC */ +#ifdef DHD_LB_RXC + if (dhd->rxc_hist[j]) { + MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size); + dhd->rxc_hist[j] = NULL; + } +#endif /* DHD_LB_RXC */ } return; } static void dhd_lb_stats_dump_histo( - struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE]) + struct bcmstrbuf *strbuf, uint32 **hist) { int i, j; - uint32 per_cpu_total[NR_CPUS] = {0}; + uint32 *per_cpu_total; uint32 total = 0; + uint32 num_cpus = num_possible_cpus(); + + per_cpu_total = (uint32 *)kmalloc(sizeof(uint32) * num_cpus, GFP_ATOMIC); + if (!per_cpu_total) { + DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__)); + return; + } + bzero(per_cpu_total, sizeof(uint32) * num_cpus); bcm_bprintf(strbuf, "CPU: \t\t"); - for (i = 0; i < num_possible_cpus(); i++) + for (i = 0; i < num_cpus; i++) bcm_bprintf(strbuf, "%d\t", i); bcm_bprintf(strbuf, "\nBin\n"); for (i = 0; i < HIST_BIN_SIZE; i++) { - bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1)); - for (j = 0; j < num_possible_cpus(); j++) { - bcm_bprintf(strbuf, "%d\t", hist[j][i]); + bcm_bprintf(strbuf, "%d:\t\t", 1<cpu_online_cnt); - bcm_bprintf(strbuf, "cpu_offline_cnt:\n"); + bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n"); dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt); bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n", dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt, dhd->txc_sched_cnt); + #ifdef DHD_LB_RXP - bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n"); + bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n"); dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt); bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n"); dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist); #endif /* DHD_LB_RXP */ #ifdef DHD_LB_RXC - bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n"); + bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n"); dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt); bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n"); dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist); #endif /* DHD_LB_RXC */ - #ifdef DHD_LB_TXC - bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n"); + bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n"); dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt); bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n"); dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist); #endif /* DHD_LB_TXC */ + +#ifdef DHD_LB_TXP + bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt); + + bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt); +#endif /* DHD_LB_TXP */ + + bcm_bprintf(strbuf, "\nCPU masks primary(big)=0x%x secondary(little)=0x%x\n", + DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS); + + bcm_bprintf(strbuf, "napi_cpu %x tx_cpu %x\n", + atomic_read(&dhd->rx_napi_cpu), atomic_read(&dhd->tx_cpu)); + } -static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count) +/* Given a number 'n' returns 'm' that is next larger power of 2 after n */ +static inline uint32 next_larger_power2(uint32 num) +{ + num--; + num |= (num >> 1); + num |= (num >> 2); + num |= (num >> 4); + num |= (num >> 8); + num |= (num >> 16); + + return (num + 1); +} + +static void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu) { uint32 bin_power; - uint32 *p = NULL; - + uint32 *p; bin_power = next_larger_power2(count); switch (bin_power) { - case 0: break; - case 1: /* Fall through intentionally */ - case 2: p = bin + 0; break; - case 4: p = bin + 1; break; - case 8: p = bin + 2; break; - case 16: p = bin + 3; break; - case 32: p = bin + 4; break; - case 64: p = bin + 5; break; - case 128: p = bin + 6; break; - default : p = bin + 7; break; + case 1: p = bin[0] + cpu; break; + case 2: p = bin[1] + cpu; break; + case 4: p = bin[2] + cpu; break; + case 8: p = bin[3] + cpu; break; + case 16: p = bin[4] + cpu; break; + case 32: p = bin[5] + cpu; break; + case 64: p = bin[6] + cpu; break; + case 128: p = bin[7] + cpu; break; + default : p = bin[8] + cpu; break; } - if (p) - *p = *p + 1; + + *p = *p + 1; return; } @@ -1205,7 +1753,7 @@ extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count) cpu = get_cpu(); put_cpu(); - dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count); + dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu); return; } @@ -1217,7 +1765,7 @@ extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count) cpu = get_cpu(); put_cpu(); - dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count); + dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu); return; } @@ -1229,7 +1777,7 @@ extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count) cpu = get_cpu(); put_cpu(); - dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count); + dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu); return; } @@ -1245,10 +1793,9 @@ extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp) dhd_info_t *dhd = dhdp->info; DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt); } - #endif /* DHD_LB_STATS */ -#endif /* DHD_LB */ +#endif /* DHD_LB */ #if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF) int g_frameburst = 1; @@ -1294,10 +1841,11 @@ static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapsh uint dhd_roam_disable = 0; #ifdef BCMDBGFS -extern int dhd_dbg_init(dhd_pub_t *dhdp); -extern void dhd_dbg_remove(void); +extern void dhd_dbgfs_init(dhd_pub_t *dhdp); +extern void dhd_dbgfs_remove(void); #endif + /* Control radio state */ uint dhd_radio_up = 1; @@ -1310,6 +1858,9 @@ module_param_string(iface_name, iface_name, IFNAMSIZ, 0); /* IOCTL response timeout */ int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; +/* DS Exit response timeout */ +int ds_exit_timeout_msec = DS_EXIT_TIMEOUT; + /* Idle timeout for backplane clock */ int dhd_idletime = DHD_IDLETIME_TICKS; module_param(dhd_idletime, int, 0); @@ -1400,17 +1951,26 @@ static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); #endif /* TOE */ -static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, - wl_event_msg_t *event_ptr, void **data_ptr); +static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen, + wl_event_msg_t *event_ptr, void **data_ptr); #if defined(CONFIG_PM_SLEEP) static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) { int ret = NOTIFY_DONE; bool suspend = FALSE; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif BCM_REFERENCE(dhdinfo); + BCM_REFERENCE(suspend); switch (action) { case PM_HIBERNATION_PREPARE: @@ -1424,16 +1984,15 @@ static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, voi break; } -#if defined(SUPPORT_P2P_GO_PS) -#ifdef PROP_TXSTATUS +#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) if (suspend) { DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub); dhd_wlfc_suspend(&dhdinfo->pub); DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub); - } else + } else { dhd_wlfc_resume(&dhdinfo->pub); -#endif /* PROP_TXSTATUS */ -#endif /* defined(SUPPORT_P2P_GO_PS) */ + } +#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ KERNEL_VERSION(2, 6, 39)) @@ -1463,6 +2022,7 @@ typedef struct dhd_dev_priv { dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */ dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */ int ifidx; /* interface index */ + void * lkup; } dhd_dev_priv_t; #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t)) @@ -1470,7 +2030,11 @@ typedef struct dhd_dev_priv { #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd) #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp) #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx) +#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup) +#if defined(DHD_OF_SUPPORT) +extern int dhd_wlan_init(void); +#endif /* defined(DHD_OF_SUPPORT) */ /** Clear the dhd net_device's private structure. */ static inline void dhd_dev_priv_clear(struct net_device * dev) @@ -1481,6 +2045,7 @@ dhd_dev_priv_clear(struct net_device * dev) dev_priv->dhd = (dhd_info_t *)NULL; dev_priv->ifp = (dhd_if_t *)NULL; dev_priv->ifidx = DHD_BAD_IF; + dev_priv->lkup = (void *)NULL; } /** Setup the dhd net_device's private structure. */ @@ -1515,6 +2080,9 @@ dhd_info_t dhd_info_null = { #ifdef DHDTCPACK_SUPPRESS .tcpack_sup_mode = TCPACK_SUP_REPLACE, #endif /* DHDTCPACK_SUPPRESS */ +#if defined(TRAFFIC_MGMT_DWM) + .dhd_tm_dwm_tbl = { .dhd_dwm_enabled = TRUE }, +#endif .up = FALSE, .busstate = DHD_BUS_DOWN } @@ -1668,18 +2236,23 @@ dhd_if_del_sta_list(dhd_if_t *ifp) unsigned long flags; DHD_IF_STA_LIST_LOCK(ifp, flags); - +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { #if defined(BCM_GMAC3) if (ifp->fwdh) { /* Remove sta from WOFA forwarder. */ - fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta); + fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (uintptr_t)sta); } #endif /* BCM_GMAC3 */ list_del(&sta->list); dhd_sta_free(&ifp->info->pub, sta); } - +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif DHD_IF_STA_LIST_UNLOCK(ifp, flags); return; @@ -1699,7 +2272,7 @@ dhd_if_flush_sta(dhd_if_t * ifp) list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { /* Remove any sta entry from WOFA forwarder. */ - fwder_flush(ifp->fwdh, (wofa_t)sta); + fwder_flush(ifp->fwdh, (uintptr_t)sta); } DHD_IF_STA_LIST_UNLOCK(ifp, flags); @@ -1844,14 +2417,21 @@ dhd_find_sta(void *pub, int ifidx, void *ea) return DHD_STA_NULL; DHD_IF_STA_LIST_LOCK(ifp, flags); - +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry(sta, &ifp->sta_list, list) { if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { + DHD_INFO(("%s: found STA " MACDBG "\n", + __FUNCTION__, MAC2STRDBG((char *)ea))); DHD_IF_STA_LIST_UNLOCK(ifp, flags); return sta; } } - +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif DHD_IF_STA_LIST_UNLOCK(ifp, flags); return DHD_STA_NULL; @@ -1881,6 +2461,9 @@ dhd_add_sta(void *pub, int ifidx, void *ea) /* link the sta and the dhd interface */ sta->ifp = ifp; sta->ifidx = ifidx; +#ifdef DHD_WMF + sta->psta_prim = NULL; +#endif INIT_LIST_HEAD(&sta->list); DHD_IF_STA_LIST_LOCK(ifp, flags); @@ -1891,7 +2474,7 @@ dhd_add_sta(void *pub, int ifidx, void *ea) if (ifp->fwdh) { ASSERT(ISALIGNED(ea, 2)); /* Add sta to WOFA forwarder. */ - fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta); + fwder_reassoc(ifp->fwdh, (uint16 *)ea, (uintptr_t)sta); } #endif /* BCM_GMAC3 */ @@ -1900,6 +2483,50 @@ dhd_add_sta(void *pub, int ifidx, void *ea) return sta; } +/** Delete all STAs from the interface's STA list. */ +void +dhd_del_all_sta(void *pub, int ifidx) +{ + dhd_sta_t *sta, *next; + dhd_if_t *ifp; + unsigned long flags; + + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return; + + DHD_IF_STA_LIST_LOCK(ifp, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { +#if defined(BCM_GMAC3) + if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */ + ASSERT(ISALIGNED(sta->ea.octet, 2)); + fwder_deassoc(ifp->fwdh, (uint16 *)sta->ea.octet, (uintptr_t)sta); + } +#endif /* BCM_GMAC3 */ + + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); +#ifdef DHD_L2_FILTER + if (ifp->parp_enable) { + /* clear Proxy ARP cache of specific Ethernet Address */ + bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, + ifp->phnd_arp_table, FALSE, + sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt); + } +#endif /* DHD_L2_FILTER */ + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return; +} + /** Delete STA from the interface's STA list. */ void dhd_del_sta(void *pub, int ifidx, void *ea) @@ -1907,6 +2534,7 @@ dhd_del_sta(void *pub, int ifidx, void *ea) dhd_sta_t *sta, *next; dhd_if_t *ifp; unsigned long flags; + char macstr[ETHER_ADDR_STR_LEN]; ASSERT(ea != NULL); ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); @@ -1914,20 +2542,27 @@ dhd_del_sta(void *pub, int ifidx, void *ea) return; DHD_IF_STA_LIST_LOCK(ifp, flags); - +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { #if defined(BCM_GMAC3) if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */ ASSERT(ISALIGNED(ea, 2)); - fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta); + fwder_deassoc(ifp->fwdh, (uint16 *)ea, (uintptr_t)sta); } #endif /* BCM_GMAC3 */ + DHD_MAC_TO_STR(((char *)ea), macstr); + DHD_ERROR(("%s: Deleting STA %s\n", __FUNCTION__, macstr)); list_del(&sta->list); dhd_sta_free(&ifp->info->pub, sta); } } - +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif DHD_IF_STA_LIST_UNLOCK(ifp, flags); #ifdef DHD_L2_FILTER if (ifp->parp_enable) { @@ -2006,24 +2641,25 @@ static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_ static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {} static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {} dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; } +dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; } void dhd_del_sta(void *pub, int ifidx, void *ea) {} #endif /* PCIE_FULL_DONGLE */ + #if defined(DHD_LB) -#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) +#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP) /** * dhd_tasklet_schedule - Function that runs in IPI context of the destination * CPU and schedules a tasklet. * @tasklet: opaque pointer to the tasklet */ -static INLINE void +INLINE void dhd_tasklet_schedule(void *tasklet) { tasklet_schedule((struct tasklet_struct *)tasklet); } - /** * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU * @tasklet: tasklet to be scheduled @@ -2033,15 +2669,30 @@ dhd_tasklet_schedule(void *tasklet) * smp_call_function_single with no wait and the tasklet_schedule function * will be invoked to schedule the specified tasklet on the requested CPU. */ -static void +INLINE void dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu) { const int wait = 0; smp_call_function_single(on_cpu, dhd_tasklet_schedule, (void *)tasklet, wait); } -#endif /* DHD_LB_TXC || DHD_LB_RXC */ +/** + * dhd_work_schedule_on - Executes the passed work in a given CPU + * @work: work to be scheduled + * @on_cpu: cpu core id + * + * If the requested cpu is online, then an IPI is sent to this cpu via the + * schedule_work_on and the work function + * will be invoked to schedule the specified work on the requested CPU. + */ + +INLINE void +dhd_work_schedule_on(struct work_struct *work, int on_cpu) +{ + schedule_work_on(on_cpu, work); +} +#endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP */ #if defined(DHD_LB_TXC) /** @@ -2091,10 +2742,8 @@ static void dhd_tx_compl_dispatcher_fn(struct work_struct * work) dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu); put_online_cpus(); } - #endif /* DHD_LB_TXC */ - #if defined(DHD_LB_RXC) /** * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet @@ -2122,33 +2771,104 @@ dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp) */ curr_cpu = get_cpu(); put_cpu(); - on_cpu = atomic_read(&dhd->rx_compl_cpu); if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { dhd_tasklet_schedule(&dhd->rx_compl_tasklet); } else { - schedule_work(&dhd->rx_compl_dispatcher_work); + dhd_rx_compl_dispatcher_fn(dhdp); } } -static void dhd_rx_compl_dispatcher_fn(struct work_struct * work) +static void dhd_rx_compl_dispatcher_fn(dhd_pub_t *dhdp) { - struct dhd_info *dhd = - container_of(work, struct dhd_info, rx_compl_dispatcher_work); + struct dhd_info *dhd = dhdp->info; int cpu; - get_online_cpus(); - cpu = atomic_read(&dhd->tx_compl_cpu); + preempt_disable(); + cpu = atomic_read(&dhd->rx_compl_cpu); if (!cpu_online(cpu)) dhd_tasklet_schedule(&dhd->rx_compl_tasklet); - else + else { dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu); - put_online_cpus(); + } + preempt_enable(); } - #endif /* DHD_LB_RXC */ +#if defined(DHD_LB_TXP) +static void dhd_tx_dispatcher_work(struct work_struct * work) +{ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + struct dhd_info *dhd = + container_of(work, struct dhd_info, tx_dispatcher_work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + dhd_tasklet_schedule(&dhd->tx_tasklet); +} + +static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp) +{ + int cpu; + int net_tx_cpu; + dhd_info_t *dhd = dhdp->info; + + preempt_disable(); + cpu = atomic_read(&dhd->tx_cpu); + net_tx_cpu = atomic_read(&dhd->net_tx_cpu); + + /* + * Now if the NET_TX has pushed the packet in the same + * CPU that is chosen for Tx processing, seperate it out + * i.e run the TX processing tasklet in compl_cpu + */ + if (net_tx_cpu == cpu) + cpu = atomic_read(&dhd->tx_compl_cpu); + + if (!cpu_online(cpu)) { + /* + * Ooohh... but the Chosen CPU is not online, + * Do the job in the current CPU itself. + */ + dhd_tasklet_schedule(&dhd->tx_tasklet); + } else { + /* + * Schedule tx_dispatcher_work to on the cpu which + * in turn will schedule tx_tasklet. + */ + dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu); + } + preempt_enable(); +} + +/** + * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet + * on another cpu. The tx_tasklet will take care of actually putting + * the skbs into appropriate flow ring and ringing H2D interrupt + * + * @dhdp: pointer to dhd_pub object + */ +static void +dhd_lb_tx_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu; + + curr_cpu = get_cpu(); + put_cpu(); + + /* Record the CPU in which the TX request from Network stack came */ + atomic_set(&dhd->net_tx_cpu, curr_cpu); + + /* Schedule the work to dispatch ... */ + dhd_tx_dispatcher_fn(dhdp); + +} +#endif /* DHD_LB_TXP */ #if defined(DHD_LB_RXP) /** @@ -2175,11 +2895,18 @@ dhd_napi_poll(struct napi_struct *napi, int budget) int processed = 0; struct sk_buff_head rx_process_queue; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif dhd = container_of(napi, struct dhd_info, rx_napi_struct); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + DHD_INFO(("%s napi_queue<%d> budget<%d>\n", __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget)); - - __skb_queue_head_init(&rx_process_queue); + __skb_queue_head_init(&rx_process_queue); /* extract the entire rx_napi_queue into local rx_process_queue */ spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); @@ -2249,7 +2976,6 @@ static INLINE int dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu) { int wait = 0; /* asynchronous IPI */ - DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n", __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu)); @@ -2291,16 +3017,25 @@ dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu) */ static void dhd_rx_napi_dispatcher_fn(struct work_struct * work) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif struct dhd_info *dhd = container_of(work, struct dhd_info, rx_napi_dispatcher_work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif int cpu; get_online_cpus(); cpu = atomic_read(&dhd->rx_napi_cpu); + if (!cpu_online(cpu)) dhd_napi_schedule(dhd); else dhd_napi_schedule_on(dhd, cpu); + put_online_cpus(); } @@ -2340,7 +3075,6 @@ dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp) put_cpu(); on_cpu = atomic_read(&dhd->rx_napi_cpu); - if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { dhd_napi_schedule(dhd); } else { @@ -2365,17 +3099,6 @@ dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx) #endif /* DHD_LB */ -static void dhd_memdump_work_handler(struct work_struct * work) -{ - struct dhd_info *dhd = - container_of(work, struct dhd_info, dhd_memdump_work.work); - - BCM_REFERENCE(dhd); -#ifdef BCMPCIE - dhd_prot_collect_memdump(&dhd->pub); -#endif -} - /** Returns dhd iflist index corresponding the the bssidx provided by apps */ int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx) @@ -2471,31 +3194,33 @@ static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp) int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost) { - dhd_info_t *dhd = (dhd_info_t *)dhdp->info; - if (prepost) { /* pre process */ - dhd_read_macaddr(dhd); + dhd_read_cis(dhdp); + dhd_check_module_cid(dhdp); + dhd_check_module_mac(dhdp); + dhd_set_macaddr_from_file(dhdp); } else { /* post process */ - dhd_write_macaddr(&dhd->pub.mac); + dhd_write_macaddr(&dhdp->mac); + dhd_clear_cis(dhdp); } return 0; } // terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed -#if defined(PKT_FILTER_SUPPORT) &&defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) +#if defined(PKT_FILTER_SUPPORT) && defined(ARP_OFFLOAD_SUPPORT) +#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER static bool -_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode) +_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param) { bool _apply = FALSE; /* In case of IBSS mode, apply arp pkt filter */ - if (op_mode & DHD_FLAG_IBSS_MODE) { + if (op_mode_param & DHD_FLAG_IBSS_MODE) { _apply = TRUE; goto exit; } /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */ - if ((dhd->arp_version == 1) && - (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) { + if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) { _apply = TRUE; goto exit; } @@ -2503,11 +3228,11 @@ _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode) exit: return _apply; } -#endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */ +#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ -void dhd_set_packet_filter(dhd_pub_t *dhd) +void +dhd_set_packet_filter(dhd_pub_t *dhd) { -#ifdef PKT_FILTER_SUPPORT int i; DHD_TRACE(("%s: enter\n", __FUNCTION__)); @@ -2516,16 +3241,14 @@ void dhd_set_packet_filter(dhd_pub_t *dhd) dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]); } } -#endif /* PKT_FILTER_SUPPORT */ } -void dhd_enable_packet_filter(int value, dhd_pub_t *dhd) +void +dhd_enable_packet_filter(int value, dhd_pub_t *dhd) { -#ifdef PKT_FILTER_SUPPORT int i; DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value)); - if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) { DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__)); return; @@ -2550,24 +3273,96 @@ void dhd_enable_packet_filter(int value, dhd_pub_t *dhd) value, dhd_master_mode); } } -#endif /* PKT_FILTER_SUPPORT */ } +int +dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num) +{ + char *filterp = NULL; + int filter_id = 0; + + switch (num) { + case DHD_BROADCAST_FILTER_NUM: + filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; + filter_id = 101; + break; + case DHD_MULTICAST4_FILTER_NUM: + filter_id = 102; + if (FW_SUPPORTED((dhdp), pf6)) { + if (dhdp->pktfilter[num] != NULL) { + dhd_pktfilter_offload_delete(dhdp, filter_id); + dhdp->pktfilter[num] = NULL; + } + if (!add_remove) { + filterp = DISCARD_IPV4_MCAST; + add_remove = 1; + break; + } + } + filterp = "102 0 0 0 0xFFFFFF 0x01005E"; + break; + case DHD_MULTICAST6_FILTER_NUM: + filter_id = 103; + if (FW_SUPPORTED((dhdp), pf6)) { + if (dhdp->pktfilter[num] != NULL) { + dhd_pktfilter_offload_delete(dhdp, filter_id); + dhdp->pktfilter[num] = NULL; + } + if (!add_remove) { + filterp = DISCARD_IPV6_MCAST; + add_remove = 1; + break; + } + } + filterp = "103 0 0 0 0xFFFF 0x3333"; + break; + case DHD_MDNS_FILTER_NUM: + filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB"; + filter_id = 104; + break; + case DHD_ARP_FILTER_NUM: + filterp = "105 0 0 12 0xFFFF 0x0806"; + filter_id = 105; + break; + case DHD_BROADCAST_ARP_FILTER_NUM: + filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806" + " 0xFFFFFFFFFFFF0000000000000806"; + filter_id = 106; + break; + default: + return -EINVAL; + } + + /* Add filter */ + if (add_remove) { + dhdp->pktfilter[num] = filterp; + dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]); + } else { /* Delete filter */ + if (dhdp->pktfilter[num]) { + dhd_pktfilter_offload_delete(dhdp, filter_id); + dhdp->pktfilter[num] = NULL; + } + } + + return 0; +} +#endif /* PKT_FILTER_SUPPORT */ + static int dhd_set_suspend(int value, dhd_pub_t *dhd) { int power_mode = PM_MAX; #ifdef SUPPORT_SENSORHUB - uint32 shub_msreq; + shub_control_t shub_ctl; #endif /* SUPPORT_SENSORHUB */ /* wl_pkt_filter_enable_t enable_parm; */ - char iovbuf[32]; int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */ + int ret = 0; #ifdef DHD_USE_EARLYSUSPEND #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND int bcn_timeout = 0; #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND - int roam_time_thresh = 0; /* (ms) */ + int roam_time_thresh = 0; /* (ms) */ #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ #ifndef ENABLE_FW_ROAM_SUSPEND uint roamvar = dhd->conf->roam_off_suspend; @@ -2576,13 +3371,15 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) int bcn_li_bcn; #endif /* ENABLE_BCN_LI_BCN_WAKEUP */ uint nd_ra_filter = 0; - int ret = 0; #endif /* DHD_USE_EARLYSUSPEND */ #ifdef PASS_ALL_MCAST_PKTS struct dhd_info *dhdinfo; uint32 allmulti; uint i; #endif /* PASS_ALL_MCAST_PKTS */ +#ifdef ENABLE_IPMCAST_FILTER + int ipmcast_l2filter; +#endif /* ENABLE_IPMCAST_FILTER */ #ifdef DYNAMIC_SWOOB_DURATION #ifndef CUSTOM_INTR_WIDTH #define CUSTOM_INTR_WIDTH 100 @@ -2590,6 +3387,18 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) #endif /* CUSTOM_INTR_WIDTH */ #endif /* DYNAMIC_SWOOB_DURATION */ +#if defined(BCMPCIE) + int lpas = 0; + int dtim_period = 0; + int bcn_interval = 0; + int bcn_to_dly = 0; +#ifndef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING; +#else + bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING; +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#endif /* OEM_ANDROID && BCMPCIE */ + if (!dhd) return -ENODEV; @@ -2612,47 +3421,53 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) power_mode = dhd->conf->pm; else power_mode = PM_FAST; + if (dhd->up) { if (value && dhd->in_suspend) { #ifdef PKT_FILTER_SUPPORT dhd->early_suspended = 1; #endif /* Kernel suspended */ - DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__)); - -#ifdef SUPPORT_SENSORHUB - shub_msreq = 1; - if (dhd->info->shub_enable == 1) { - bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4, - iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, - iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n", - __FUNCTION__, ret)); - } - } -#endif /* SUPPORT_SENSORHUB */ + DHD_ERROR(("%s: force extra suspend setting\n", __FUNCTION__)); if (dhd->conf->pm_in_suspend >= 0) power_mode = dhd->conf->pm_in_suspend; dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, - sizeof(power_mode), TRUE, 0); + sizeof(power_mode), TRUE, 0); #ifdef PKT_FILTER_SUPPORT /* Enable packet filter, * only allow unicast packet to send up */ dhd_enable_packet_filter(1, dhd); +#ifdef APF + dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd)); +#endif /* APF */ #endif /* PKT_FILTER_SUPPORT */ +#ifdef SUPPORT_SENSORHUB + shub_ctl.enable = 1; + shub_ctl.cmd = 0x000; + shub_ctl.op_mode = 1; + shub_ctl.interval = 0; + if (dhd->info->shub_enable == 1) { + ret = dhd_iovar(dhd, 0, "shub_msreq", + (char *)&shub_ctl, sizeof(shub_ctl), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s SensorHub MS start: failed %d\n", + __FUNCTION__, ret)); + } + } +#endif /* SUPPORT_SENSORHUB */ + + #ifdef PASS_ALL_MCAST_PKTS allmulti = 0; - bcm_mkiovar("allmulti", (char *)&allmulti, 4, - iovbuf, sizeof(iovbuf)); for (i = 0; i < DHD_MAX_IFS; i++) { if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, i); + dhd_iovar(dhd, i, "allmulti", (char *)&allmulti, + sizeof(allmulti), NULL, 0, TRUE); + } #endif /* PASS_ALL_MCAST_PKTS */ @@ -2666,53 +3481,107 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) bcn_li_dtim = 0; } else #endif /* WLTDLS */ +#if defined(BCMPCIE) + bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period, + &bcn_interval); + dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, + sizeof(bcn_li_dtim), NULL, 0, TRUE); + + if ((bcn_li_dtim * dtim_period * bcn_interval) >= + MIN_DTIM_FOR_ROAM_THRES_EXTEND) { + /* + * Increase max roaming threshold from 2 secs to 8 secs + * the real roam threshold is MIN(max_roam_threshold, + * bcn_timeout/2) + */ + lpas = 1; + dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL, + 0, TRUE); + + bcn_to_dly = 1; + /* + * if bcn_to_dly is 1, the real roam threshold is + * MIN(max_roam_threshold, bcn_timeout -1); + * notify link down event after roaming procedure complete + * if we hit bcn_timeout while we are in roaming progress. + */ + dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly, + sizeof(bcn_to_dly), NULL, 0, TRUE); + /* Increase beacon timeout to 6 secs or use bigger one */ + bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND); + dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, + sizeof(bcn_timeout), NULL, 0, TRUE); + } +#else bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd); - bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, - 4, iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), - TRUE, 0) < 0) - DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__)); + if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, + sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0) + DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__)); +#endif /* OEM_ANDROID && BCMPCIE */ #ifdef DHD_USE_EARLYSUSPEND #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND; - bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, - 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, + sizeof(bcn_timeout), NULL, 0, TRUE); #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND; - bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh, - 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh, + sizeof(roam_time_thresh), NULL, 0, TRUE); #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ #ifndef ENABLE_FW_ROAM_SUSPEND /* Disable firmware roaming during suspend */ - bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), + NULL, 0, TRUE); #endif /* ENABLE_FW_ROAM_SUSPEND */ #ifdef ENABLE_BCN_LI_BCN_WAKEUP bcn_li_bcn = 0; - bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, - 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, + sizeof(bcn_li_bcn), NULL, 0, TRUE); #endif /* ENABLE_BCN_LI_BCN_WAKEUP */ - if (FW_SUPPORTED(dhd, ndoe)) { +#ifdef NDO_CONFIG_SUPPORT + if (dhd->ndo_enable) { + if (!dhd->ndo_host_ip_overflow) { + /* enable ND offload on suspend */ + ret = dhd_ndo_enable(dhd, 1); + if (ret < 0) { + DHD_ERROR(("%s: failed to enable NDO\n", + __FUNCTION__)); + } + } else { + DHD_INFO(("%s: NDO disabled on suspend due to" + "HW capacity\n", __FUNCTION__)); + } + } +#endif /* NDO_CONFIG_SUPPORT */ +#ifndef APF + if (FW_SUPPORTED(dhd, ndoe)) +#else + if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) +#endif /* APF */ + { /* enable IPv6 RA filter in firmware during suspend */ nd_ra_filter = 1; - bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4, - iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) + ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable", + (char *)&nd_ra_filter, sizeof(nd_ra_filter), + NULL, 0, TRUE); + if (ret < 0) DHD_ERROR(("failed to set nd_ra_filter (%d)\n", ret)); } + dhd_os_suppress_logging(dhd, TRUE); +#ifdef ENABLE_IPMCAST_FILTER + ipmcast_l2filter = 1; + ret = dhd_iovar(dhd, 0, "ipmcast_l2filter", + (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter), + NULL, 0, TRUE); +#endif /* ENABLE_IPMCAST_FILTER */ #ifdef DYNAMIC_SWOOB_DURATION intr_width = CUSTOM_INTR_WIDTH; - bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4, - iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width, + sizeof(intr_width), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("failed to set intr_width (%d)\n", ret)); } #endif /* DYNAMIC_SWOOB_DURATION */ @@ -2724,87 +3593,131 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) dhd->early_suspended = 0; #endif /* Kernel resumed */ - DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__)); + DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__)); #ifdef SUPPORT_SENSORHUB - shub_msreq = 0; + shub_ctl.enable = 1; + shub_ctl.cmd = 0x000; + shub_ctl.op_mode = 0; + shub_ctl.interval = 0; if (dhd->info->shub_enable == 1) { - bcm_mkiovar("shub_msreq", (char *)&shub_msreq, - 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, - iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s Sensor Hub move/stop stop:" - "failed %d\n", __FUNCTION__, ret)); + ret = dhd_iovar(dhd, 0, "shub_msreq", + (char *)&shub_ctl, sizeof(shub_ctl), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s SensorHub MS stop: failed %d\n", + __FUNCTION__, ret)); } } #endif /* SUPPORT_SENSORHUB */ - #ifdef DYNAMIC_SWOOB_DURATION intr_width = 0; - bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4, - iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width, + sizeof(intr_width), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("failed to set intr_width (%d)\n", ret)); } #endif /* DYNAMIC_SWOOB_DURATION */ +#ifndef SUPPORT_PM2_ONLY + power_mode = PM_FAST; dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, - sizeof(power_mode), TRUE, 0); + sizeof(power_mode), TRUE, 0); +#endif /* SUPPORT_PM2_ONLY */ #ifdef PKT_FILTER_SUPPORT /* disable pkt filter */ dhd_enable_packet_filter(0, dhd); +#ifdef APF + dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd)); +#endif /* APF */ #endif /* PKT_FILTER_SUPPORT */ #ifdef PASS_ALL_MCAST_PKTS allmulti = 1; - bcm_mkiovar("allmulti", (char *)&allmulti, 4, - iovbuf, sizeof(iovbuf)); for (i = 0; i < DHD_MAX_IFS; i++) { if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, i); + dhd_iovar(dhd, i, "allmulti", (char *)&allmulti, + sizeof(allmulti), NULL, 0, TRUE); } #endif /* PASS_ALL_MCAST_PKTS */ +#if defined(BCMPCIE) + /* restore pre-suspend setting */ + ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, + sizeof(bcn_li_dtim), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret)); + } + dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL, 0, + TRUE); + + dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly, + sizeof(bcn_to_dly), NULL, 0, TRUE); + + dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, + sizeof(bcn_timeout), NULL, 0, TRUE); +#else /* restore pre-suspend setting for dtim_skip */ - bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, - 4, iovbuf, sizeof(iovbuf)); - - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim, + sizeof(bcn_li_dtim), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret)); + } +#endif /* OEM_ANDROID && BCMPCIE */ #ifdef DHD_USE_EARLYSUSPEND #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND bcn_timeout = CUSTOM_BCN_TIMEOUT; - bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, - 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, + sizeof(bcn_timeout), NULL, 0, TRUE); #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND roam_time_thresh = 2000; - bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh, - 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh, + sizeof(roam_time_thresh), NULL, 0, TRUE); + #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ #ifndef ENABLE_FW_ROAM_SUSPEND roamvar = dhd_roam_disable; - bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), + NULL, 0, TRUE); #endif /* ENABLE_FW_ROAM_SUSPEND */ #ifdef ENABLE_BCN_LI_BCN_WAKEUP bcn_li_bcn = 1; - bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, - 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, + sizeof(bcn_li_bcn), NULL, 0, TRUE); #endif /* ENABLE_BCN_LI_BCN_WAKEUP */ - if (FW_SUPPORTED(dhd, ndoe)) { +#ifdef NDO_CONFIG_SUPPORT + if (dhd->ndo_enable) { + /* Disable ND offload on resume */ + ret = dhd_ndo_enable(dhd, 0); + if (ret < 0) { + DHD_ERROR(("%s: failed to disable NDO\n", + __FUNCTION__)); + } + } +#endif /* NDO_CONFIG_SUPPORT */ +#ifndef APF + if (FW_SUPPORTED(dhd, ndoe)) +#else + if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) +#endif /* APF */ + { /* disable IPv6 RA filter in firmware during suspend */ nd_ra_filter = 0; - bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4, - iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) + ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable", + (char *)&nd_ra_filter, sizeof(nd_ra_filter), + NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("failed to set nd_ra_filter (%d)\n", ret)); + } } + dhd_os_suppress_logging(dhd, FALSE); +#ifdef ENABLE_IPMCAST_FILTER + ipmcast_l2filter = 0; + ret = dhd_iovar(dhd, 0, "ipmcast_l2filter", + (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter), + NULL, 0, TRUE); +#endif /* ENABLE_IPMCAST_FILTER */ #endif /* DHD_USE_EARLYSUSPEND */ /* terence 2017029: Reject in early suspend */ @@ -3054,7 +3967,7 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN); if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) { DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n", - dhd_ifname(&dhd->pub, ifidx), cnt)); + dhd_ifname(&dhd->pub, ifidx), cnt)); return; } @@ -3070,6 +3983,10 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) netif_addr_lock_bh(dev); #endif /* LINUX >= 2.6.27 */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif netdev_for_each_mc_addr(ha, dev) { if (!cnt) break; @@ -3077,9 +3994,12 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) bufp += ETHER_ADDR_LEN; cnt--; } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif #else /* LINUX < 2.6.35 */ for (mclist = dev->mc_list; (mclist && (cnt > 0)); - cnt--, mclist = mclist->next) { + cnt--, mclist = mclist->next) { memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); bufp += ETHER_ADDR_LEN; } @@ -3108,35 +4028,14 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) * were trying to set some addresses and dongle rejected it... */ - buflen = sizeof("allmulti") + sizeof(allmulti); - if (!(buf = MALLOC(dhd->pub.osh, buflen))) { - DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx))); - return; - } allmulti = htol32(allmulti); - - if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) { - DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n", - dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen)); - MFREE(dhd->pub.osh, buf, buflen); - return; - } - - - memset(&ioc, 0, sizeof(ioc)); - ioc.cmd = WLC_SET_VAR; - ioc.buf = buf; - ioc.len = buflen; - ioc.set = TRUE; - - ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti, + sizeof(allmulti), NULL, 0, TRUE); if (ret < 0) { DHD_ERROR(("%s: set allmulti %d failed\n", dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); } - MFREE(dhd->pub.osh, buf, buflen); - /* Finally, pick up the PROMISC flag as well, like the NIC driver does */ allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE; @@ -3159,21 +4058,10 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) int _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr) { - char buf[32]; - wl_ioctl_t ioc; int ret; - if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) { - DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx))); - return -1; - } - memset(&ioc, 0, sizeof(ioc)); - ioc.cmd = WLC_SET_VAR; - ioc.buf = buf; - ioc.len = 32; - ioc.set = TRUE; - - ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr, + ETHER_ADDR_LEN, NULL, 0, TRUE); if (ret < 0) { DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx))); } else { @@ -3190,6 +4078,52 @@ extern struct net_device *ap_net_dev; extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */ #endif +#ifdef DHD_WMF +void dhd_update_psta_interface_for_sta(dhd_pub_t* dhdp, char* ifname, void* ea, + void* event_data) +{ + struct wl_psta_primary_intf_event *psta_prim_event = + (struct wl_psta_primary_intf_event*)event_data; + dhd_sta_t *psta_interface = NULL; + dhd_sta_t *sta = NULL; + uint8 ifindex; + ASSERT(ifname); + ASSERT(psta_prim_event); + ASSERT(ea); + + ifindex = (uint8)dhd_ifname2idx(dhdp->info, ifname); + sta = dhd_find_sta(dhdp, ifindex, ea); + if (sta != NULL) { + psta_interface = dhd_find_sta(dhdp, ifindex, + (void *)(psta_prim_event->prim_ea.octet)); + if (psta_interface != NULL) { + sta->psta_prim = psta_interface; + } + } +} + +/* Get wmf_psta_disable configuration configuration */ +int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + return ifp->wmf_psta_disable; +} + +/* Set wmf_psta_disable configuration configuration */ +int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + ifp->wmf_psta_disable = val; + return 0; +} +#endif /* DHD_WMF */ + #ifdef DHD_PSTA /* Get psta/psr configuration configuration */ int dhd_get_psta_mode(dhd_pub_t *dhdp) @@ -3206,6 +4140,74 @@ int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val) } #endif /* DHD_PSTA */ +#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER)) +static void +dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + if ( +#ifdef DHD_L2_FILTER + (ifp->block_ping) || +#endif +#ifdef DHD_WET + (dhd->wet_mode) || +#endif +#ifdef DHD_MCAST_REGEN + (ifp->mcast_regen_bss_enable) || +#endif + FALSE) { + ifp->rx_pkt_chainable = FALSE; + } +} +#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */ + +#ifdef DHD_WET +/* Get wet configuration configuration */ +int dhd_get_wet_mode(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return (int)dhd->wet_mode; +} + +/* Set wet configuration configuration */ +int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val) +{ + dhd_info_t *dhd = dhdp->info; + dhd->wet_mode = val; + dhd_update_rx_pkt_chainable_state(dhdp, 0); + return 0; +} +#endif /* DHD_WET */ + +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) +int32 dhd_role_to_nl80211_iftype(int32 role) +{ + switch (role) { + case WLC_E_IF_ROLE_STA: + return NL80211_IFTYPE_STATION; + case WLC_E_IF_ROLE_AP: + return NL80211_IFTYPE_AP; + case WLC_E_IF_ROLE_WDS: + return NL80211_IFTYPE_WDS; + case WLC_E_IF_ROLE_P2P_GO: + return NL80211_IFTYPE_P2P_GO; + case WLC_E_IF_ROLE_P2P_CLIENT: + return NL80211_IFTYPE_P2P_CLIENT; + case WLC_E_IF_ROLE_IBSS: + case WLC_E_IF_ROLE_NAN: + return NL80211_IFTYPE_ADHOC; + default: + return NL80211_IFTYPE_UNSPECIFIED; + } +} +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) { @@ -3214,10 +4216,9 @@ dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) struct net_device *ndev; int ifidx, bssidx; int ret; -#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) - struct wireless_dev *vwdev, *primary_wdev; - struct net_device *primary_ndev; -#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */ +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + struct wl_if_event_info info; +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ if (event != DHD_WQ_WORK_IF_ADD) { DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); @@ -3242,6 +4243,23 @@ dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) bssidx = if_event->event.bssidx; DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx)); + +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (if_event->event.ifidx > 0) { + bzero(&info, sizeof(info)); + info.ifidx = if_event->event.ifidx; + info.bssidx = if_event->event.bssidx; + info.role = if_event->event.role; + strncpy(info.name, if_event->name, IFNAMSIZ); + if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net, + &info, if_event->mac, NULL, true) != NULL) { + /* Do the post interface create ops */ + DHD_ERROR(("Post ifcreate ops done. Returning \n")); + goto done; + } + } +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + /* This path is for non-android case */ /* The interface name in host and in event msg are same */ /* if name in event msg is used to create dongle if list on host */ @@ -3252,22 +4270,6 @@ dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) goto done; } -#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) - vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL); - if (unlikely(!vwdev)) { - DHD_ERROR(("Could not allocate wireless device\n")); - goto done; - } - primary_ndev = dhd->pub.info->iflist[0]->net; - primary_wdev = ndev_to_wdev(primary_ndev); - vwdev->wiphy = primary_wdev->wiphy; - vwdev->iftype = if_event->event.role; - vwdev->netdev = ndev; - ndev->ieee80211_ptr = vwdev; - SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy)); - DHD_ERROR(("virtual interface(%s) is created\n", if_event->name)); -#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */ - DHD_PERIM_UNLOCK(&dhd->pub); ret = dhd_register_if(&dhd->pub, ifidx, TRUE); DHD_PERIM_LOCK(&dhd->pub); @@ -3276,16 +4278,12 @@ dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) dhd_remove_if(&dhd->pub, ifidx, TRUE); goto done; } -#ifdef PCIE_FULL_DONGLE +#ifndef PCIE_FULL_DONGLE /* Turn on AP isolation in the firmware for interfaces operating in AP mode */ if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) { - char iovbuf[WLC_IOCTL_SMLEN]; uint32 var_int = 1; - - memset(iovbuf, 0, sizeof(iovbuf)); - bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf)); - ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx); - + ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int), + NULL, 0, TRUE); if (ret != BCME_OK) { DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__)); dhd_remove_if(&dhd->pub, ifidx, TRUE); @@ -3332,9 +4330,23 @@ dhd_ifdel_event_handler(void *handle, void *event_info, u8 event) DHD_TRACE(("Removing interface with idx %d\n", ifidx)); DHD_PERIM_UNLOCK(&dhd->pub); +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (if_event->event.ifidx > 0) { + /* Do the post interface del ops */ + if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net, true) == 0) { + DHD_TRACE(("Post ifdel ops done. Returning \n")); + DHD_PERIM_LOCK(&dhd->pub); + goto done; + } + } +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + dhd_remove_if(&dhd->pub, ifidx, TRUE); DHD_PERIM_LOCK(&dhd->pub); +#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) +done: +#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); DHD_PERIM_UNLOCK(&dhd->pub); @@ -3403,8 +4415,8 @@ static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event) { dhd_info_t *dhd = handle; - dhd_if_t *ifp = event_info; - int ifidx; + int ifidx = (int)((long int)event_info); + dhd_if_t *ifp = NULL; if (event != DHD_WQ_WORK_SET_MCAST_LIST) { DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); @@ -3420,6 +4432,13 @@ dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event) DHD_OS_WAKE_LOCK(&dhd->pub); DHD_PERIM_LOCK(&dhd->pub); + ifp = dhd->iflist[ifidx]; + + if (ifp == NULL || !dhd->pub.up) { + DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__)); + goto done; + } + #ifdef SOFTAP { bool in_ap = FALSE; @@ -3475,7 +4494,7 @@ dhd_set_mac_address(struct net_device *dev, void *addr) dhdif->set_macaddress = TRUE; dhd_net_if_unlock_local(dhd); dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC, - dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW); + dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW); return ret; } @@ -3490,14 +4509,24 @@ dhd_set_multicast_list(struct net_device *dev) return; dhd->iflist[ifidx]->set_multicast = TRUE; - dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx], - DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW); + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx), + DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW); // terence 20160907: fix for not able to set mac when wlan0 is down dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx], - DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW); + DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW); } +#ifdef DHD_UCODE_DOWNLOAD +/* Get ucode path */ +char * +dhd_get_ucode_path(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return dhd->uc_path; +} +#endif /* DHD_UCODE_DOWNLOAD */ + #ifdef PROP_TXSTATUS int dhd_os_wlfc_block(dhd_pub_t *pub) @@ -3610,11 +4639,11 @@ int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p) if (ifp->fwdh) { struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p); uint16 * da = (uint16 *)(eh->ether_dhost); - wofa_t wofa; + uintptr_t wofa_data; ASSERT(ISALIGNED(da, 2)); - wofa = fwder_lookup(ifp->fwdh->mate, da, ifp->idx); - if (wofa == FWDER_WOFA_INVALID) { /* Unknown MAC address */ + wofa_data = fwder_lookup(ifp->fwdh->mate, da, ifp->idx); + if (wofa_data == WOFA_DATA_INVALID) { /* Unknown MAC address */ if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) { return BCME_OK; } @@ -3672,12 +4701,9 @@ __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) int ret = BCME_OK; dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); struct ether_header *eh = NULL; -#ifdef DHD_L2_FILTER +#if defined(DHD_L2_FILTER) dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx); -#endif -#ifdef DHD_8021X_DUMP - struct net_device *ndev; -#endif /* DHD_8021X_DUMP */ +#endif /* Reject if down */ if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) { @@ -3689,8 +4715,12 @@ __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) #ifdef PCIE_FULL_DONGLE if (dhdp->busstate == DHD_BUS_SUSPEND) { DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__)); - PKTFREE(dhdp->osh, pktbuf, TRUE); - return -EBUSY; + PKTCFREE(dhdp->osh, pktbuf, TRUE); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */ } #endif /* PCIE_FULL_DONGLE */ @@ -3739,67 +4769,56 @@ __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) if (ETHER_ISMULTI(eh->ether_dhost)) dhdp->tx_multicast++; - if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) - atomic_inc(&dhd->pend_8021x_cnt); -#ifdef DHD_DHCP_DUMP - if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) { - uint16 dump_hex; - uint16 source_port; - uint16 dest_port; - uint16 udp_port_pos; - uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN]; - uint8 ip_header_len = (*ptr8 & 0x0f)<<2; - struct net_device *net; - char *ifname; - - net = dhd_idx2net(dhdp, ifidx); - ifname = net ? net->name : "N/A"; - udp_port_pos = ETHER_HDR_LEN + ip_header_len; - source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1]; - dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3]; - if (source_port == 0x0044 || dest_port == 0x0044) { - dump_hex = (pktdata[udp_port_pos+249] << 8) | - pktdata[udp_port_pos+250]; - if (dump_hex == 0x0101) { - DHD_ERROR(("DHCP[%s] - DISCOVER [TX]", ifname)); - } else if (dump_hex == 0x0102) { - DHD_ERROR(("DHCP[%s] - OFFER [TX]", ifname)); - } else if (dump_hex == 0x0103) { - DHD_ERROR(("DHCP[%s] - REQUEST [TX]", ifname)); - } else if (dump_hex == 0x0105) { - DHD_ERROR(("DHCP[%s] - ACK [TX]", ifname)); - } else { - DHD_ERROR(("DHCP[%s] - 0x%X [TX]", ifname, dump_hex)); - } + if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) { #ifdef DHD_LOSSLESS_ROAMING - if (dhdp->dequeue_prec_map != (uint8)ALLPRIO) { - DHD_ERROR(("/%d", dhdp->dequeue_prec_map)); - } + uint8 prio = (uint8)PKTPRIO(pktbuf); + + /* back up 802.1x's priority */ + dhdp->prio_8021x = prio; #endif /* DHD_LOSSLESS_ROAMING */ - DHD_ERROR(("\n")); - } else if (source_port == 0x0043 || dest_port == 0x0043) { - DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname)); - } + DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED); + atomic_inc(&dhd->pend_8021x_cnt); +#if defined(DHD_8021X_DUMP) + dhd_dump_eapol_4way_message(dhd_ifname(dhdp, ifidx), pktdata, TRUE); +#endif /* DHD_8021X_DUMP */ } + + if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) { +#ifdef DHD_DHCP_DUMP + dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE); #endif /* DHD_DHCP_DUMP */ +#ifdef DHD_ICMP_DUMP + dhd_icmp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE); +#endif /* DHD_ICMP_DUMP */ + } } else { PKTCFREE(dhdp->osh, pktbuf, TRUE); return BCME_ERROR; } - /* Look into the packet and update the packet priority */ -#ifndef PKTPRIO_OVERRIDE - if (PKTPRIO(pktbuf) == 0) -#endif /* !PKTPRIO_OVERRIDE */ { -#ifdef QOS_MAP_SET - pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE); + /* Look into the packet and update the packet priority */ +#ifndef PKTPRIO_OVERRIDE + if (PKTPRIO(pktbuf) == 0) +#endif /* !PKTPRIO_OVERRIDE */ + { +#if defined(QOS_MAP_SET) + pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE); #else - pktsetprio(pktbuf, FALSE); + pktsetprio(pktbuf, FALSE); #endif /* QOS_MAP_SET */ + } } +#if defined(TRAFFIC_MGMT_DWM) + traffic_mgmt_pkt_set_prio(dhdp, pktbuf); + +#ifdef BCM_GMAC3 + DHD_PKT_SET_DATAOFF(pktbuf, 0); +#endif /* BCM_GMAC3 */ +#endif + #ifdef PCIE_FULL_DONGLE /* * Lkup the per interface hash table, for a matching flowring. If one is not @@ -3877,15 +4896,14 @@ dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) unsigned long flags; DHD_GENERAL_LOCK(dhdp, flags); - if (dhdp->busstate == DHD_BUS_DOWN || - dhdp->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) { DHD_ERROR(("%s: returning as busstate=%d\n", __FUNCTION__, dhdp->busstate)); DHD_GENERAL_UNLOCK(dhdp, flags); PKTCFREE(dhdp->osh, pktbuf, TRUE); return -ENODEV; } - dhdp->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT; + DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp); DHD_GENERAL_UNLOCK(dhdp, flags); #ifdef DHD_PCIE_RUNTIMEPM @@ -3897,17 +4915,60 @@ dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) } #endif /* DHD_PCIE_RUNTIMEPM */ + DHD_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state)); + DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_GENERAL_UNLOCK(dhdp, flags); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + DHD_GENERAL_UNLOCK(dhdp, flags); + ret = __dhd_sendpkt(dhdp, ifidx, pktbuf); #ifdef DHD_PCIE_RUNTIMEPM exit: #endif DHD_GENERAL_LOCK(dhdp, flags); - dhdp->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT; + DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp); + dhd_os_busbusy_wake(dhdp); DHD_GENERAL_UNLOCK(dhdp, flags); return ret; } +#if defined(DHD_LB_TXP) + +int BCMFASTPATH +dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, + int ifidx, void *skb) +{ + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt); + + /* If the feature is disabled run-time do TX from here */ + if (atomic_read(&dhd->lb_txp_active) == 0) { + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt); + return __dhd_sendpkt(&dhd->pub, ifidx, skb); + } + + /* Store the address of net device and interface index in the Packet tag */ + DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net); + DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx); + + /* Enqueue the skb into tx_pend_queue */ + skb_queue_tail(&dhd->tx_pend_queue, skb); + + DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net)); + + /* Dispatch the Tx job to be processed by the tx_tasklet */ + dhd_lb_tx_dispatch(&dhd->pub); + + return NETDEV_TX_OK; +} +#endif /* DHD_LB_TXP */ + int BCMFASTPATH dhd_start_xmit(struct sk_buff *skb, struct net_device *net) { @@ -3930,6 +4991,10 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (dhd_query_bus_erros(&dhd->pub)) { + return -ENODEV; + } + /* terence 2017029: Reject in early suspend */ if (!dhd->pub.conf->xmit_in_suspend && dhd->pub.early_suspended) { dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, ON); @@ -3940,12 +5005,9 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) #endif } - -#ifdef PCIE_FULL_DONGLE DHD_GENERAL_LOCK(&dhd->pub, flags); - dhd->pub.dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX; + DHD_BUS_BUSY_SET_IN_TX(&dhd->pub); DHD_GENERAL_UNLOCK(&dhd->pub, flags); -#endif /* PCIE_FULL_DONGLE */ #ifdef DHD_PCIE_RUNTIMEPM if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) { @@ -3955,7 +5017,7 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) if (!dhdpcie_is_resume_done(&dhd->pub)) { dhd_bus_stop_queue(dhd->pub.bus); } - dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); dhd_os_busbusy_wake(&dhd->pub); DHD_GENERAL_UNLOCK(&dhd->pub, flags); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) @@ -3967,9 +5029,16 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) #endif /* DHD_PCIE_RUNTIMEPM */ DHD_GENERAL_LOCK(&dhd->pub, flags); + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n", + __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state)); + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); #ifdef PCIE_FULL_DONGLE - if (dhd->pub.busstate == DHD_BUS_SUSPEND) { - dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + /* Stop tx queues if suspend is in progress */ + if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) { + dhd_bus_stop_queue(dhd->pub.bus); + } +#endif /* PCIE_FULL_DONGLE */ dhd_os_busbusy_wake(&dhd->pub); DHD_GENERAL_UNLOCK(&dhd->pub, flags); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) @@ -3978,14 +5047,19 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) return NETDEV_TX_BUSY; #endif } -#endif /* PCIE_FULL_DONGLE */ DHD_OS_WAKE_LOCK(&dhd->pub); DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + +#if defined(DHD_HANG_SEND_UP_TEST) + if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) { + dhd->pub.busstate = DHD_BUS_DOWN; + } +#endif /* DHD_HANG_SEND_UP_TEST */ + /* Reject if down */ - if (dhd->pub.hang_was_sent || dhd->pub.busstate == DHD_BUS_DOWN || - dhd->pub.busstate == DHD_BUS_DOWN_IN_PROGRESS) { + if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) { DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n", __FUNCTION__, dhd->pub.up, dhd->pub.busstate)); netif_stop_queue(net); @@ -3995,13 +5069,11 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) dhd->pub.hang_reason = HANG_REASON_BUS_DOWN; net_os_send_hang_message(net); } -#ifdef PCIE_FULL_DONGLE - dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); dhd_os_busbusy_wake(&dhd->pub); -#endif /* PCIE_FULL_DONGLE */ + DHD_GENERAL_UNLOCK(&dhd->pub, flags); DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); - DHD_GENERAL_UNLOCK(&dhd->pub, flags); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) return -ENODEV; #else @@ -4011,24 +5083,21 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) ifp = DHD_DEV_IFP(net); ifidx = DHD_DEV_IFIDX(net); - BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb); - if (ifidx == DHD_BAD_IF) { DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx)); netif_stop_queue(net); -#ifdef PCIE_FULL_DONGLE - dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); dhd_os_busbusy_wake(&dhd->pub); -#endif /* PCIE_FULL_DONGLE */ + DHD_GENERAL_UNLOCK(&dhd->pub, flags); DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); - DHD_GENERAL_UNLOCK(&dhd->pub, flags); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) return -ENODEV; #else return NETDEV_TX_BUSY; #endif } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); ASSERT(ifidx == dhd_net2idx(dhd, net)); @@ -4089,6 +5158,19 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) } } #endif +#ifdef DHD_WET + /* wet related packet proto manipulation should be done in DHD + since dongle doesn't have complete payload + */ + if (WET_ENABLED(&dhd->pub) && + (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) { + DHD_INFO(("%s:%s: wet send proc failed\n", + __FUNCTION__, dhd_ifname(&dhd->pub, ifidx))); + PKTFREE(dhd->pub.osh, pktbuf, FALSE); + ret = -EFAULT; + goto done; + } +#endif /* DHD_WET */ #ifdef DHD_WMF eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf); @@ -4116,9 +5198,7 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) #endif /* DHD_IGMP_UCQUERY */ if (ucast_convert) { dhd_sta_t *sta; -#ifdef PCIE_FULL_DONGLE unsigned long flags; -#endif struct list_head snapshot_list; struct list_head *wmf_ucforward_list; @@ -4131,6 +5211,10 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) /* Convert upnp/igmp query to unicast for each assoc STA */ list_for_each_entry(sta, wmf_ucforward_list, list) { + /* Skip sending to proxy interfaces of proxySTA */ + if (sta->psta_prim != NULL && !ifp->wmf_psta_disable) { + continue; + } if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) { ret = WMF_NOP; break; @@ -4139,12 +5223,10 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) } DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list); -#ifdef PCIE_FULL_DONGLE DHD_GENERAL_LOCK(&dhd->pub, flags); - dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); dhd_os_busbusy_wake(&dhd->pub); DHD_GENERAL_UNLOCK(&dhd->pub, flags); -#endif /* PCIE_FULL_DONGLE */ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); @@ -4165,12 +5247,11 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) /* Either taken by WMF or we should drop it. * Exiting send path */ -#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); - dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); dhd_os_busbusy_wake(&dhd->pub); DHD_GENERAL_UNLOCK(&dhd->pub, flags); -#endif /* PCIE_FULL_DONGLE */ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); return NETDEV_TX_OK; @@ -4208,19 +5289,21 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) } #endif /* DHDTCPACK_SUPPRESS */ - /* no segmented SKB support (Kernel-3.18.y) */ - if ((PKTLINK(skb) != NULL) && (PKTLINK(skb) == skb)) { - PKTSETLINK(skb, NULL); - } - + /* + * If Load Balance is enabled queue the packet + * else send directly from here. + */ +#if defined(DHD_LB_TXP) + ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf); +#else ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf); +#endif done: if (ret) { ifp->stats.tx_dropped++; dhd->pub.tx_dropped++; } else { - #ifdef PROP_TXSTATUS /* tx_packets counter can counted only when wlfc is disabled */ if (!dhd_wlfc_is_supported(&dhd->pub)) @@ -4232,17 +5315,13 @@ done: } } -#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); - dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub); dhd_os_busbusy_wake(&dhd->pub); DHD_GENERAL_UNLOCK(&dhd->pub, flags); -#endif /* PCIE_FULL_DONGLE */ - DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); DHD_OS_WAKE_UNLOCK(&dhd->pub); - BUZZZ_LOG(START_XMIT_END, 0); - /* Return ok: we always eat the packet */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) return 0; @@ -4304,6 +5383,204 @@ dhd_is_rxthread_enabled(dhd_pub_t *dhdp) } #endif /* DHD_WMF */ +#ifdef DHD_MCAST_REGEN +/* + * Description: This function is called to do the reverse translation + * + * Input eh - pointer to the ethernet header + */ +int32 +dhd_mcast_reverse_translation(struct ether_header *eh) +{ + uint8 *iph; + uint32 dest_ip; + + iph = (uint8 *)eh + ETHER_HDR_LEN; + dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET))); + + /* Only IP packets are handled */ + if (eh->ether_type != hton16(ETHER_TYPE_IP)) + return BCME_ERROR; + + /* Non-IPv4 multicast packets are not handled */ + if (IP_VER(iph) != IP_VER_4) + return BCME_ERROR; + + /* + * The packet has a multicast IP and unicast MAC. That means + * we have to do the reverse translation + */ + if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) { + ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip); + return BCME_OK; + } + + return BCME_ERROR; +} +#endif /* MCAST_REGEN */ + +#ifdef SHOW_LOGTRACE +static int +dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + int ret = BCME_OK; + uint datalen; + bcm_event_msg_u_t evu; + void *data = NULL; + void *pktdata = NULL; + bcm_event_t *pvt_data; + uint pktlen; + + DHD_TRACE(("%s:Enter\n", __FUNCTION__)); + + /* In dhd_rx_frame, header is stripped using skb_pull + * of size ETH_HLEN, so adjust pktlen accordingly + */ + pktlen = skb->len + ETH_HLEN; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) + pktdata = (void *)skb_mac_header(skb); +#else + pktdata = (void *)skb->mac.raw; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */ + + ret = wl_host_event_get_data(pktdata, pktlen, &evu); + + if (ret != BCME_OK) { + DHD_ERROR(("%s: wl_host_event_get_data err = %d\n", + __FUNCTION__, ret)); + goto exit; + } + + datalen = ntoh32(evu.event.datalen); + + pvt_data = (bcm_event_t *)pktdata; + data = &pvt_data[1]; + + dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen); + +exit: + return ret; +} + +static void +dhd_event_logtrace_process(struct work_struct * work) +{ +/* Ignore compiler warnings due to -Werror=cast-qual */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + struct dhd_info *dhd = + container_of(work, struct dhd_info, event_log_dispatcher_work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + + dhd_pub_t *dhdp; + struct sk_buff *skb; + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dhdp = &dhd->pub; + + if (!dhdp) { + DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__)); + return; + } + + DHD_TRACE(("%s:Enter\n", __FUNCTION__)); + + /* Run while(1) loop till all skbs are dequeued */ + while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) { +#ifdef PCIE_FULL_DONGLE + int ifid; + ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb)); + if (ifid == DHD_EVENT_IF) { + dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data); + /* For sending skb to network layer, convert it to Native PKT + * after that assign skb->dev with Primary interface n/w device + * as for infobuf events, we are sending special DHD_EVENT_IF + */ +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, skb, FALSE); +#else + PKTFREE(dhdp->osh, skb, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + continue; + } + else { + dhd_event_logtrace_pkt_process(dhdp, skb); + } +#else + dhd_event_logtrace_pkt_process(dhdp, skb); +#endif /* PCIE_FULL_DONGLE */ + + /* Free skb buffer here if DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT + * macro is defined the Info Ring event and WLC_E_TRACE event is freed in DHD + * else it is always sent up to network layers. + */ +#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, skb, FALSE); +#else + PKTFREE(dhdp->osh, skb, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ +#else /* !DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */ + /* Do not call netif_recieve_skb as this workqueue scheduler is not from NAPI + * Also as we are not in INTR context, do not call netif_rx, instead call + * netif_rx_ni (for kerenl >= 2.6) which does netif_rx, disables irq, raise + * NET_IF_RX softirq and enables interrupts back + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(skb); +#else + { + ulong flags; + netif_rx(skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ +#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */ + } +} + +void +dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + +#ifdef PCIE_FULL_DONGLE + /* Add ifidx in the PKTTAG */ + DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx); +#endif /* PCIE_FULL_DONGLE */ + skb_queue_tail(&dhd->evt_trace_queue, pktbuf); + + schedule_work(&dhd->event_log_dispatcher_work); +} + +void +dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) { +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, skb, FALSE); +#else + PKTFREE(dhdp->osh, skb, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } +} +#endif /* SHOW_LOGTRACE */ + /** Called when a frame is received by the dongle on interface 'ifidx' */ void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) @@ -4320,11 +5597,20 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) int tout_ctrl = 0; void *skbhead = NULL; void *skbprev = NULL; -#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) - char *dump_data; uint16 protocol; - char *ifname; -#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */ +#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) || \ + defined(DHD_ICMP_DUMP) || defined(DHD_WAKE_STATUS) + unsigned char *dump_data; +#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP || DHD_ICMP_DUMP || DHD_WAKE_STATUS */ +#ifdef DHD_MCAST_REGEN + uint8 interface_role; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; +#endif +#ifdef DHD_WAKE_STATUS + int pkt_wake = 0; + wake_counts_t *wcp = NULL; +#endif /* DHD_WAKE_STATUS */ DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -4334,6 +5620,44 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) pnext = PKTNEXT(dhdp->osh, pktbuf); PKTSETNEXT(dhdp->osh, pktbuf, NULL); + /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a + * special ifidx of DHD_EVENT_IF. This is just internal to dhd to get the data from + * dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame). + */ + if (ifidx == DHD_EVENT_IF) { + /* Event msg printing is called from dhd_rx_frame which is in Tasklet + * context in case of PCIe FD, in case of other bus this will be from + * DPC context. If we get bunch of events from Dongle then printing all + * of them from Tasklet/DPC context that too in data path is costly. + * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as + * events with type WLC_E_TRACE. + * We'll print this console logs from the WorkQueue context by enqueing SKB + * here and Dequeuing will be done in WorkQueue and will be freed only if + * DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT is defined + */ +#ifdef SHOW_LOGTRACE + dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf); +#else /* !SHOW_LOGTRACE */ + /* If SHOW_LOGTRACE not defined and ifidx is DHD_EVENT_IF, + * free the PKT here itself + */ +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ +#endif /* SHOW_LOGTRACE */ + continue; + } +#ifdef DHD_WAKE_STATUS + pkt_wake = dhd_bus_get_bus_wake(dhdp); + wcp = dhd_bus_get_wakecount(dhdp); + if (wcp == NULL) { + /* If wakeinfo count buffer is null do not update wake count values */ + pkt_wake = 0; + } +#endif /* DHD_WAKE_STATUS */ + ifp = dhd->iflist[ifidx]; if (ifp == NULL) { DHD_ERROR(("%s: ifp is NULL. drop packet\n", @@ -4359,7 +5683,6 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) continue; } - #ifdef PROP_TXSTATUS if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) { /* WLFC may send header only packet when @@ -4396,6 +5719,36 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) } } #endif /* DHD_L2_FILTER */ + +#ifdef DHD_MCAST_REGEN + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + ASSERT(if_flow_lkup); + + interface_role = if_flow_lkup[ifidx].role; + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) && + !DHD_IF_ROLE_AP(dhdp, ifidx) && + ETHER_ISUCAST(eh->ether_dhost)) { + if (dhd_mcast_reverse_translation(eh) == BCME_OK) { +#ifdef DHD_PSTA + /* Change bsscfg to primary bsscfg for unicast-multicast packets */ + if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) || + (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) { + if (ifidx != 0) { + /* Let the primary in PSTA interface handle this + * frame after unicast to Multicast conversion + */ + ifp = dhd_get_ifp(dhdp, 0); + ASSERT(ifp); + } + } + } +#endif /* PSTA */ + } +#endif /* MCAST_REGEN */ + #ifdef DHD_WMF /* WMF processing for multicast packets */ if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) { @@ -4428,6 +5781,16 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) ASSERT(ifp); skb->dev = ifp->net; +#ifdef DHD_WET + /* wet related packet proto manipulation should be done in DHD + * since dongle doesn't have complete payload + */ + if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info, + pktbuf) < 0)) { + DHD_INFO(("%s:%s: wet recv proc failed\n", + __FUNCTION__, dhd_ifname(dhdp, ifidx))); + } +#endif /* DHD_WET */ #ifdef DHD_PSTA if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) { @@ -4465,49 +5828,30 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) eth = skb->data; len = skb->len; -#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) +#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) || \ + defined(DHD_ICMP_DUMP) || defined(DHD_WAKE_STATUS) dump_data = skb->data; - protocol = (dump_data[12] << 8) | dump_data[13]; - ifname = skb->dev ? skb->dev->name : "N/A"; -#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */ -#ifdef DHD_8021X_DUMP - if (protocol == ETHER_TYPE_802_1X) { - dhd_dump_eapol_4way_message(ifname, dump_data, FALSE); - } -#endif /* DHD_8021X_DUMP */ -#ifdef DHD_DHCP_DUMP - if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) { - uint16 dump_hex; - uint16 source_port; - uint16 dest_port; - uint16 udp_port_pos; - uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN]; - uint8 ip_header_len = (*ptr8 & 0x0f)<<2; +#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP || DHD_ICMP_DUMP || DHD_WAKE_STATUS */ - udp_port_pos = ETHER_HDR_LEN + ip_header_len; - source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1]; - dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3]; - if (source_port == 0x0044 || dest_port == 0x0044) { - dump_hex = (dump_data[udp_port_pos+249] << 8) | - dump_data[udp_port_pos+250]; - if (dump_hex == 0x0101) { - DHD_ERROR(("DHCP[%s] - DISCOVER [RX]\n", ifname)); - } else if (dump_hex == 0x0102) { - DHD_ERROR(("DHCP[%s] - OFFER [RX]\n", ifname)); - } else if (dump_hex == 0x0103) { - DHD_ERROR(("DHCP[%s] - REQUEST [RX]\n", ifname)); - } else if (dump_hex == 0x0105) { - DHD_ERROR(("DHCP[%s] - ACK [RX]\n", ifname)); - } else { - DHD_ERROR(("DHCP[%s] - 0x%X [RX]\n", ifname, dump_hex)); - } - } else if (source_port == 0x0043 || dest_port == 0x0043) { - DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname)); - } + protocol = (skb->data[12] << 8) | skb->data[13]; + if (protocol == ETHER_TYPE_802_1X) { + DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED); +#ifdef DHD_8021X_DUMP + dhd_dump_eapol_4way_message(dhd_ifname(dhdp, ifidx), dump_data, FALSE); +#endif /* DHD_8021X_DUMP */ } + + if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) { +#ifdef DHD_DHCP_DUMP + dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE); #endif /* DHD_DHCP_DUMP */ -#if defined(DHD_RX_DUMP) - DHD_ERROR(("RX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol))); +#ifdef DHD_ICMP_DUMP + dhd_icmp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE); +#endif /* DHD_ICMP_DUMP */ + } +#ifdef DHD_RX_DUMP + DHD_ERROR(("RX DUMP[%s] - %s\n", + dhd_ifname(dhdp, ifidx), _get_packet_type_str(protocol))); if (protocol != ETHER_TYPE_BRCM) { if (dump_data[0] == 0xFF) { DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__)); @@ -4534,6 +5878,11 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) #endif /* DHD_RX_FULL_DUMP */ } #endif /* DHD_RX_DUMP */ +#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP) + if (pkt_wake) { + prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 32)); + } +#endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */ skb->protocol = eth_type_trans(skb, skb->dev); @@ -4548,31 +5897,110 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) #ifdef WLMEDIA_HTSF dhd_htsf_addrxts(dhdp, pktbuf); #endif +#ifdef DBG_PKT_MON + DHD_DBG_PKT_MON_RX(dhdp, skb); +#endif /* DBG_PKT_MON */ +#ifdef DHD_PKT_LOGGING + DHD_PKTLOG_RX(dhdp, skb); +#endif /* DHD_PKT_LOGGING */ /* Strip header, count, deliver upward */ skb_pull(skb, ETH_HLEN); /* Process special event packets and then discard them */ memset(&event, 0, sizeof(event)); + if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) { - dhd_wl_host_event(dhd, &ifidx, + bcm_event_msg_u_t evu; + int ret_event; + int event_type; + + ret_event = wl_host_event_get_data( #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) skb_mac_header(skb), #else skb->mac.raw, #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */ - &event, - &data); + len, &evu); + + if (ret_event != BCME_OK) { + DHD_ERROR(("%s: wl_host_event_get_data err = %d\n", + __FUNCTION__, ret_event)); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif + continue; + } + + memcpy(&event, &evu.event, sizeof(wl_event_msg_t)); + event_type = ntoh32_ua((void *)&event.event_type); +#ifdef SHOW_LOGTRACE + /* Event msg printing is called from dhd_rx_frame which is in Tasklet + * context in case of PCIe FD, in case of other bus this will be from + * DPC context. If we get bunch of events from Dongle then printing all + * of them from Tasklet/DPC context that too in data path is costly. + * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as + * events with type WLC_E_TRACE. + * We'll print this console logs from the WorkQueue context by enqueing SKB + * here and Dequeuing will be done in WorkQueue and will be freed only if + * DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT is defined + */ + if (event_type == WLC_E_TRACE) { + DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__)); + dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf); + continue; + } +#endif /* SHOW_LOGTRACE */ + + ret_event = dhd_wl_host_event(dhd, ifidx, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) + skb_mac_header(skb), +#else + skb->mac.raw, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */ + len, &event, &data); wl_event_to_host_order(&event); if (!tout_ctrl) tout_ctrl = DHD_PACKET_TIMEOUT_MS; #if defined(PNO_SUPPORT) - if (event.event_type == WLC_E_PFN_NET_FOUND) { + if (event_type == WLC_E_PFN_NET_FOUND) { /* enforce custom wake lock to garantee that Kernel not suspended */ tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS; } #endif /* PNO_SUPPORT */ + if (numpkt != 1) { + DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n", + __FUNCTION__)); + } + +#ifdef DHD_WAKE_STATUS + if (unlikely(pkt_wake)) { +#ifdef DHD_WAKE_EVENT_STATUS + if (event.event_type < WLC_E_LAST) { + wcp->rc_event[event.event_type]++; + wcp->rcwake++; + pkt_wake = 0; + } +#endif /* DHD_WAKE_EVENT_STATUS */ + } +#endif /* DHD_WAKE_STATUS */ + + /* For delete virtual interface event, wl_host_event returns positive + * i/f index, do not proceed. just free the pkt. + */ + if ((event_type == WLC_E_IF) && (ret_event > 0)) { + DHD_ERROR(("%s: interface is deleted. Free event packet\n", + __FUNCTION__)); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif + continue; + } #ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT #ifdef DHD_USE_STATIC_CTRLBUF @@ -4581,6 +6009,30 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) PKTFREE(dhdp->osh, pktbuf, FALSE); #endif /* DHD_USE_STATIC_CTRLBUF */ continue; +#else + /* + * For the event packets, there is a possibility + * of ifidx getting modifed.Thus update the ifp + * once again. + */ + ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]); + ifp = dhd->iflist[ifidx]; +#ifndef PROP_TXSTATUS_VSDB + if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED))) +#else + if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) && + dhd->pub.up)) +#endif /* PROP_TXSTATUS_VSDB */ + { + DHD_ERROR(("%s: net device is NOT registered. drop event packet\n", + __FUNCTION__)); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif + continue; + } #endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */ } else { tout_rx = DHD_PACKET_TIMEOUT_MS; @@ -4588,10 +6040,58 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) #ifdef PROP_TXSTATUS dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb)); #endif /* PROP_TXSTATUS */ - } - ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]); - ifp = dhd->iflist[ifidx]; +#ifdef DHD_WAKE_STATUS + if (unlikely(pkt_wake)) { + wcp->rxwake++; +#ifdef DHD_WAKE_RX_STATUS +#define ETHER_ICMP6_HEADER 20 +#define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2) +#define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN) +#define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN) + + if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */ + wcp->rx_arp++; + if (dump_data[0] == 0xFF) { /* Broadcast */ + wcp->rx_bcast++; + } else if (dump_data[0] & 0x01) { /* Multicast */ + wcp->rx_mcast++; + if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) { + wcp->rx_multi_ipv6++; + if ((skb->len > ETHER_ICMP6_HEADER) && + (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) { + wcp->rx_icmpv6++; + if (skb->len > ETHER_ICMPV6_TYPE) { + switch (dump_data[ETHER_ICMPV6_TYPE]) { + case NDISC_ROUTER_ADVERTISEMENT: + wcp->rx_icmpv6_ra++; + break; + case NDISC_NEIGHBOUR_ADVERTISEMENT: + wcp->rx_icmpv6_na++; + break; + case NDISC_NEIGHBOUR_SOLICITATION: + wcp->rx_icmpv6_ns++; + break; + } + } + } + } else if (dump_data[2] == 0x5E) { + wcp->rx_multi_ipv4++; + } else { + wcp->rx_multi_other++; + } + } else { /* Unicast */ + wcp->rx_ucast++; + } +#undef ETHER_ICMP6_HEADER +#undef ETHER_IPV6_SADDR +#undef ETHER_IPV6_DAADR +#undef ETHER_ICMPV6_TYPE +#endif /* DHD_WAKE_RX_STATUS */ + pkt_wake = 0; + } +#endif /* DHD_WAKE_STATUS */ + } if (ifp->net) ifp->net->last_rx = jiffies; @@ -4607,11 +6107,11 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); -#if defined(DHD_LB) && defined(DHD_LB_RXP) +#if defined(DHD_LB_RXP) netif_receive_skb(skb); -#else +#else /* !defined(DHD_LB_RXP) */ netif_rx(skb); -#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */ +#endif /* !defined(DHD_LB_RXP) */ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); } else { if (dhd->rxthread_enabled) { @@ -4631,11 +6131,11 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); -#if defined(DHD_LB) && defined(DHD_LB_RXP) +#if defined(DHD_LB_RXP) DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); netif_receive_skb(skb); DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); -#else +#else /* !defined(DHD_LB_RXP) */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); netif_rx_ni(skb); @@ -4649,7 +6149,7 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) RAISE_RX_SOFTIRQ(); local_irq_restore(flags); #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ -#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */ +#endif /* !defined(DHD_LB_RXP) */ } } } @@ -4659,7 +6159,6 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx); DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl); - DHD_OS_WAKE_LOCK_TIMEOUT(dhdp); } void @@ -4678,11 +6177,13 @@ dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success) dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL); + eh = (struct ether_header *)PKTDATA(dhdp->osh, txp); type = ntoh16(eh->ether_type); - if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0)) + if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0)) { atomic_dec(&dhd->pend_8021x_cnt); + } #ifdef PROP_TXSTATUS if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) { @@ -4710,22 +6211,34 @@ dhd_get_stats(struct net_device *net) DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (!dhd) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + goto error; + } + ifidx = dhd_net2idx(dhd, net); if (ifidx == DHD_BAD_IF) { DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__)); - - memset(&net->stats, 0, sizeof(net->stats)); - return &net->stats; + goto error; } ifp = dhd->iflist[ifidx]; - ASSERT(dhd && ifp); + + if (!ifp) { + ASSERT(ifp); + DHD_ERROR(("%s: ifp is NULL\n", __FUNCTION__)); + goto error; + } if (dhd->pub.up) { /* Use the protocol to get dongle stats */ dhd_prot_dstats(&dhd->pub); } return &ifp->stats; + +error: + memset(&net->stats, 0, sizeof(net->stats)); + return &net->stats; } static int @@ -4748,8 +6261,8 @@ dhd_watchdog_thread(void *data) unsigned long flags; unsigned long jiffies_at_start = jiffies; unsigned long time_lapse; - DHD_OS_WD_WAKE_LOCK(&dhd->pub); + SMP_RD_BARRIER_DEPENDS(); if (tsk->terminated) { break; @@ -4759,6 +6272,11 @@ dhd_watchdog_thread(void *data) DHD_TIMER(("%s:\n", __FUNCTION__)); dhd_bus_watchdog(&dhd->pub); +#ifdef DHD_TIMESYNC + /* Call the timesync module watchdog */ + dhd_timesync_watchdog(&dhd->pub); +#endif /* DHD_TIMESYNC */ + DHD_GENERAL_LOCK(&dhd->pub, flags); /* Count the tick for reference */ dhd->pub.tickcnt++; @@ -4794,11 +6312,6 @@ static void dhd_watchdog(ulong data) return; } - if (dhd->pub.busstate == DHD_BUS_SUSPEND) { - DHD_ERROR(("%s wd while suspend in progress \n", __FUNCTION__)); - return; - } - if (dhd->thr_wdt_ctl.thr_pid >= 0) { up(&dhd->thr_wdt_ctl.sema); return; @@ -4807,6 +6320,12 @@ static void dhd_watchdog(ulong data) DHD_OS_WD_WAKE_LOCK(&dhd->pub); /* Call the bus module watchdog */ dhd_bus_watchdog(&dhd->pub); + +#ifdef DHD_TIMESYNC + /* Call the timesync module watchdog */ + dhd_timesync_watchdog(&dhd->pub); +#endif /* DHD_TIMESYNC */ + DHD_GENERAL_LOCK(&dhd->pub, flags); /* Count the tick for reference */ dhd->pub.tickcnt++; @@ -4889,8 +6408,10 @@ void dhd_runtime_pm_disable(dhd_pub_t *dhdp) void dhd_runtime_pm_enable(dhd_pub_t *dhdp) { - dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms); - DHD_ERROR(("DHD Runtime PM Enabled \n")); + if (dhd_get_idletime(dhdp)) { + dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms); + DHD_ERROR(("DHD Runtime PM Enabled \n")); + } } #endif /* DHD_PCIE_RUNTIMEPM */ @@ -5023,16 +6544,6 @@ dhd_rxf_thread(void *data) setScheduler(current, SCHED_FIFO, ¶m); } - DAEMONIZE("dhd_rxf"); - /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */ - -#ifdef CUSTOM_RXF_CPUCORE - /* change rxf thread to other cpu core */ - set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_RXF_CPUCORE)); -#endif - - /* signal: thread has started */ - complete(&tsk->completed); #ifdef CUSTOM_SET_CPUCORE dhd->pub.current_rxf = current; #endif /* CUSTOM_SET_CPUCORE */ @@ -5096,31 +6607,24 @@ dhd_rxf_thread(void *data) #ifdef BCMPCIE void dhd_dpc_enable(dhd_pub_t *dhdp) { +#if defined(DHD_LB_RXP) || defined(DHD_LB_TXP) dhd_info_t *dhd; if (!dhdp || !dhdp->info) return; dhd = dhdp->info; +#endif /* DHD_LB_RXP || DHD_LB_TXP */ -#ifdef DHD_LB #ifdef DHD_LB_RXP __skb_queue_head_init(&dhd->rx_pend_queue); #endif /* DHD_LB_RXP */ -#ifdef DHD_LB_TXC - if (atomic_read(&dhd->tx_compl_tasklet.count) == 1) - tasklet_enable(&dhd->tx_compl_tasklet); -#endif /* DHD_LB_TXC */ -#ifdef DHD_LB_RXC - if (atomic_read(&dhd->rx_compl_tasklet.count) == 1) - tasklet_enable(&dhd->rx_compl_tasklet); -#endif /* DHD_LB_RXC */ -#endif /* DHD_LB */ - if (atomic_read(&dhd->tasklet.count) == 1) - tasklet_enable(&dhd->tasklet); + +#ifdef DHD_LB_TXP + skb_queue_head_init(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ } #endif /* BCMPCIE */ - #ifdef BCMPCIE void dhd_dpc_kill(dhd_pub_t *dhdp) @@ -5138,25 +6642,52 @@ dhd_dpc_kill(dhd_pub_t *dhdp) } if (dhd->thr_dpc_ctl.thr_pid < 0) { - tasklet_disable(&dhd->tasklet); tasklet_kill(&dhd->tasklet); DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__)); } -#if defined(DHD_LB) + +#ifdef DHD_LB #ifdef DHD_LB_RXP + cancel_work_sync(&dhd->rx_napi_dispatcher_work); __skb_queue_purge(&dhd->rx_pend_queue); #endif /* DHD_LB_RXP */ +#ifdef DHD_LB_TXP + cancel_work_sync(&dhd->tx_dispatcher_work); + skb_queue_purge(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ + /* Kill the Load Balancing Tasklets */ #if defined(DHD_LB_TXC) - tasklet_disable(&dhd->tx_compl_tasklet); tasklet_kill(&dhd->tx_compl_tasklet); #endif /* DHD_LB_TXC */ #if defined(DHD_LB_RXC) - tasklet_disable(&dhd->rx_compl_tasklet); tasklet_kill(&dhd->rx_compl_tasklet); #endif /* DHD_LB_RXC */ +#if defined(DHD_LB_TXP) + tasklet_kill(&dhd->tx_tasklet); +#endif /* DHD_LB_TXP */ #endif /* DHD_LB */ } + +void +dhd_dpc_tasklet_kill(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + if (!dhdp) { + return; + } + + dhd = dhdp->info; + + if (!dhd) { + return; + } + + if (dhd->thr_dpc_ctl.thr_pid < 0) { + tasklet_kill(&dhd->tasklet); + } +} #endif /* BCMPCIE */ static void @@ -5172,8 +6703,10 @@ dhd_dpc(ulong data) */ /* Call bus dpc unless it indicated down (then clean stop) */ if (dhd->pub.busstate != DHD_BUS_DOWN) { +#if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE) + DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt); +#endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */ if (dhd_bus_dpc(dhd->pub.bus)) { - DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt); tasklet_schedule(&dhd->tasklet); } } else { @@ -5258,24 +6791,15 @@ dhd_sched_rxf(dhd_pub_t *dhdp, void *skb) static int dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol) { - wl_ioctl_t ioc; char buf[32]; int ret; - memset(&ioc, 0, sizeof(ioc)); + ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE); - ioc.cmd = WLC_GET_VAR; - ioc.buf = buf; - ioc.len = (uint)sizeof(buf); - ioc.set = FALSE; - - strncpy(buf, "toe_ol", sizeof(buf) - 1); - buf[sizeof(buf) - 1] = '\0'; - if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { - /* Check for older dongle image that doesn't support toe_ol */ + if (ret < 0) { if (ret == -EIO) { - DHD_ERROR(("%s: toe not supported by device\n", - dhd_ifname(&dhd->pub, ifidx))); + DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub, + ifidx))); return -EOPNOTSUPP; } @@ -5291,37 +6815,20 @@ dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol) static int dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol) { - wl_ioctl_t ioc; - char buf[32]; int toe, ret; - memset(&ioc, 0, sizeof(ioc)); - - ioc.cmd = WLC_SET_VAR; - ioc.buf = buf; - ioc.len = (uint)sizeof(buf); - ioc.set = TRUE; - /* Set toe_ol as requested */ - - strncpy(buf, "toe_ol", sizeof(buf) - 1); - buf[sizeof(buf) - 1] = '\0'; - memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32)); - - if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s: could not set toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); return ret; } /* Enable toe globally only if any components are enabled. */ - toe = (toe_ol != 0); - - strcpy(buf, "toe"); - memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32)); - - if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); return ret; } @@ -5333,29 +6840,24 @@ dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol) #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE) void dhd_set_scb_probe(dhd_pub_t *dhd) { - int ret = 0; wl_scb_probe_t scb_probe; - char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)]; - - memset(&scb_probe, 0, sizeof(wl_scb_probe_t)); + int ret; if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { return; } - bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf)); - - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0, + (char *)&scb_probe, sizeof(scb_probe), FALSE); + if (ret < 0) { DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__)); } - memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t)); - scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE; - bcm_mkiovar("scb_probe", (char *)&scb_probe, - sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(scb_probe), + NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__)); return; } @@ -5492,8 +6994,6 @@ dhd_ethtool(dhd_info_t *dhd, void *uaddr) static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error) { - dhd_info_t *dhd; - if (!dhdp) { DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); return FALSE; @@ -5502,9 +7002,8 @@ static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error) if (!dhdp->up) return FALSE; - dhd = (dhd_info_t *)dhdp->info; #if !defined(BCMPCIE) - if (dhd->thr_dpc_ctl.thr_pid < 0) { + if (dhdp->info->thr_dpc_ctl.thr_pid < 0) { DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__)); return FALSE; } @@ -5537,33 +7036,361 @@ static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error) return FALSE; } +#ifdef WL_MONITOR +bool +dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx) +{ + return (dhd->info->monitor_type != 0); +} + +void +dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; +#ifdef HOST_RADIOTAP_CONV + uint16 len = 0, offset = 0; + monitor_pkt_info_t pkt_info; + memcpy(&pkt_info.marker, &msg->marker, sizeof(msg->marker)); + memcpy(&pkt_info.ts, &msg->ts, sizeof(monitor_pkt_ts_t)); + + if (!dhd->monitor_skb) { + if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL) + return; + } + + len = bcmwifi_monitor(dhd->monitor_info, &pkt_info, PKTDATA(dhdp->osh, pkt), + PKTLEN(dhdp->osh, pkt), PKTDATA(dhdp->osh, dhd->monitor_skb), &offset); + + if (dhd->monitor_type && dhd->monitor_dev) + dhd->monitor_skb->dev = dhd->monitor_dev; + else { + PKTFREE(dhdp->osh, pkt, FALSE); + dev_kfree_skb(dhd->monitor_skb); + return; + } + + PKTFREE(dhdp->osh, pkt, FALSE); + + if (!len) { + return; + } + + skb_put(dhd->monitor_skb, len); + skb_pull(dhd->monitor_skb, offset); + + dhd->monitor_skb->protocol = eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev); +#else + uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >> + BCMPCIE_PKT_FLAGS_MONITOR_SHIFT; + switch (amsdu_flag) { + case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU: + default: + if (!dhd->monitor_skb) { + if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt)) == NULL) + return; + } + + if (dhd->monitor_type && dhd->monitor_dev) + dhd->monitor_skb->dev = dhd->monitor_dev; + else { + PKTFREE(dhdp->osh, pkt, FALSE); + dhd->monitor_skb = NULL; + return; + } + + dhd->monitor_skb->protocol = + eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev); + dhd->monitor_len = 0; + break; + case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT: + if (!dhd->monitor_skb) { + if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL) + return; + dhd->monitor_len = 0; + } + if (dhd->monitor_type && dhd->monitor_dev) + dhd->monitor_skb->dev = dhd->monitor_dev; + else { + PKTFREE(dhdp->osh, pkt, FALSE); + dev_kfree_skb(dhd->monitor_skb); + return; + } + memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb), + PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt)); + + dhd->monitor_len = PKTLEN(dhdp->osh, pkt); + PKTFREE(dhdp->osh, pkt, FALSE); + return; + case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT: + memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len, + PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt)); + dhd->monitor_len += PKTLEN(dhdp->osh, pkt); + + PKTFREE(dhdp->osh, pkt, FALSE); + return; + case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT: + memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len, + PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt)); + dhd->monitor_len += PKTLEN(dhdp->osh, pkt); + + PKTFREE(dhdp->osh, pkt, FALSE); + skb_put(dhd->monitor_skb, dhd->monitor_len); + dhd->monitor_skb->protocol = + eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev); + dhd->monitor_len = 0; + break; + } + +#endif /* HOST_RADIOTAP_CONV */ + if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_rx(dhd->monitor_skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + } else { + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ + bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_rx_ni(dhd->monitor_skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#else + ulong flags; + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_rx(dhd->monitor_skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ + } + + dhd->monitor_skb = NULL; +} + +typedef struct dhd_mon_dev_priv { + struct net_device_stats stats; +} dhd_mon_dev_priv_t; + +#define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t)) +#define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev)) +#define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats) + +static int +dhd_monitor_start(struct sk_buff *skb, struct net_device *dev) +{ + PKTFREE(NULL, skb, FALSE); + return 0; +} + +static int +dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + return 0; +} + +static struct net_device_stats* +dhd_monitor_get_stats(struct net_device *dev) +{ + return &DHD_MON_DEV_STATS(dev); +} + +static const struct net_device_ops netdev_monitor_ops = +{ + .ndo_start_xmit = dhd_monitor_start, + .ndo_get_stats = dhd_monitor_get_stats, + .ndo_do_ioctl = dhd_monitor_ioctl +}; + +static void +dhd_add_monitor_if(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + struct net_device *dev; + char *devname; + + if (event != DHD_WQ_WORK_IF_ADD) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE); + if (!dev) { + DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__)); + return; + } + + devname = "radiotap"; + + snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit); + +#ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */ +#define ARPHRD_IEEE80211_PRISM 802 +#endif + +#ifndef ARPHRD_IEEE80211_RADIOTAP +#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */ +#endif /* ARPHRD_IEEE80211_RADIOTAP */ + + dev->type = ARPHRD_IEEE80211_RADIOTAP; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + dev->hard_start_xmit = dhd_monitor_start; + dev->do_ioctl = dhd_monitor_ioctl; + dev->get_stats = dhd_monitor_get_stats; +#else + dev->netdev_ops = &netdev_monitor_ops; +#endif + + if (register_netdev(dev)) { + DHD_ERROR(("%s, register_netdev failed for %s\n", + __FUNCTION__, dev->name)); + free_netdev(dev); + } + + bcmwifi_monitor_create(&dhd->monitor_info); + dhd->monitor_dev = dev; +} + +static void +dhd_del_monitor_if(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + + if (event != DHD_WQ_WORK_IF_DEL) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (dhd->monitor_dev) { + unregister_netdev(dhd->monitor_dev); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) + MFREE(dhd->osh, dhd->monitor_dev->priv, DHD_MON_DEV_PRIV_SIZE); + MFREE(dhd->osh, dhd->monitor_dev, sizeof(struct net_device)); +#else + free_netdev(dhd->monitor_dev); +#endif /* 2.6.24 */ + + dhd->monitor_dev = NULL; + } + + if (dhd->monitor_info) { + bcmwifi_monitor_delete(dhd->monitor_info); + dhd->monitor_info = NULL; + } +} + +static void +dhd_set_monitor(dhd_pub_t *dhd, int ifidx, int val) +{ + dhd_info_t *info = dhd->info; + + DHD_TRACE(("%s: val %d\n", __FUNCTION__, val)); + if ((val && info->monitor_dev) || (!val && !info->monitor_dev)) { + DHD_ERROR(("%s: Mismatched params, return\n", __FUNCTION__)); + return; + } + + /* Delete monitor */ + if (!val) { + info->monitor_type = val; + dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_DEL, + dhd_del_monitor_if, DHD_WQ_WORK_PRIORITY_LOW); + return; + } + + /* Add monitor */ + info->monitor_type = val; + dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_ADD, + dhd_add_monitor_if, DHD_WQ_WORK_PRIORITY_LOW); +} +#endif /* WL_MONITOR */ + int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf) { int bcmerror = BCME_OK; int buflen = 0; struct net_device *net; +#ifdef REPORT_FATAL_TIMEOUTS + if (ioc->cmd == WLC_SET_WPA_AUTH) { + int wpa_auth; + + wpa_auth = *((int *)ioc->buf); + DHD_INFO(("wpa_auth:%d\n", wpa_auth)); + if (wpa_auth != WPA_AUTH_DISABLED) { + /* If AP is with security then enable WLC_E_PSK_SUP event checking */ + dhd_set_join_error(pub, WLC_WPA_MASK); + } else { + /* If AP is with open then disable WLC_E_PSK_SUP event checking */ + dhd_clear_join_error(pub, WLC_WPA_MASK); + } + } + + if (ioc->cmd == WLC_SET_AUTH) { + int auth; + auth = *((int *)ioc->buf); + DHD_INFO(("Auth:%d\n", auth)); + + if (auth != WL_AUTH_OPEN_SYSTEM) { + /* If AP is with security then enable WLC_E_PSK_SUP event checking */ + dhd_set_join_error(pub, WLC_WPA_MASK); + } else { + /* If AP is with open then disable WLC_E_PSK_SUP event checking */ + dhd_clear_join_error(pub, WLC_WPA_MASK); + } + } +#endif /* REPORT_FATAL_TIMEOUTS */ net = dhd_idx2net(pub, ifidx); if (!net) { bcmerror = BCME_BADARG; goto done; } - if (data_buf) - buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN); - /* check for local dhd ioctl and handle it */ if (ioc->driver == DHD_IOCTL_MAGIC) { + /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */ + if (data_buf) + buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN); bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen); if (bcmerror) pub->bcmerror = bcmerror; goto done; } + /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */ + if (data_buf) + buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN); + /* send to dongle (must be up, and wl). */ if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) { - if (allow_delay_fwdl) { - int ret = dhd_bus_start(pub); + if ((!pub->dongle_trap_occured) && allow_delay_fwdl) { + int ret; + if (atomic_read(&exit_in_progress)) { + DHD_ERROR(("%s module exit in progress\n", __func__)); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + ret = dhd_bus_start(pub); if (ret != 0) { DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); bcmerror = BCME_DONGLE_DOWN; @@ -5654,30 +7481,24 @@ int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_bu #endif goto done; } - -#ifdef DHD_DEBUG - if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) { - if (ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) { - /* Print IOVAR Information */ - DHD_IOV_INFO(("%s: IOVAR_INFO name = %s set = %d\n", - __FUNCTION__, (char *)data_buf, ioc->set)); - if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) { - prhex(NULL, data_buf + strlen(data_buf) + 1, - buflen - strlen(data_buf) - 1); - } - } else { - /* Print IOCTL Information */ - DHD_IOV_INFO(("%s: IOCTL_INFO cmd = %d set = %d\n", - __FUNCTION__, ioc->cmd, ioc->set)); - if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) { - prhex(NULL, data_buf, buflen); - } - } - } -#endif /* DHD_DEBUG */ - bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen); +#ifdef WL_MONITOR + /* Intercept monitor ioctl here, add/del monitor if */ + if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) { + dhd_set_monitor(pub, ifidx, *(int32*)data_buf); + } +#endif + +#ifdef REPORT_FATAL_TIMEOUTS + if (ioc->cmd == WLC_SCAN && bcmerror == 0) { + dhd_start_scan_timer(pub); + } + if (ioc->cmd == WLC_SET_SSID && bcmerror == 0) { + dhd_start_join_timer(pub); + } +#endif /* REPORT_FATAL_TIMEOUTS */ + done: dhd_check_hang(net, pub, bcmerror); @@ -5689,27 +7510,28 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) { dhd_info_t *dhd = DHD_DEV_INFO(net); dhd_ioctl_t ioc; + int bcmerror = 0; int ifidx; int ret; void *local_buf = NULL; + void __user *ioc_buf_user = NULL; u16 buflen = 0; + if (atomic_read(&exit_in_progress)) { + DHD_ERROR(("%s module exit in progress\n", __func__)); + bcmerror = BCME_DONGLE_DOWN; + return OSL_ERROR(bcmerror); + } + DHD_OS_WAKE_LOCK(&dhd->pub); DHD_PERIM_LOCK(&dhd->pub); /* Interface up check for built-in type */ if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) { DHD_ERROR(("%s: Interface is down \n", __FUNCTION__)); - ret = BCME_NOTUP; - goto exit; - } - - /* send to dongle only if we are not waiting for reload already */ - if (dhd->pub.hang_was_sent) { - DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__)); - DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS); - ret = BCME_DONGLE_DOWN; - goto exit; + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return OSL_ERROR(BCME_NOTUP); } ifidx = dhd_net2idx(dhd, net); @@ -5717,8 +7539,9 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) if (ifidx == DHD_BAD_IF) { DHD_ERROR(("%s: BAD IF\n", __FUNCTION__)); - ret = -1; - goto exit; + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return -1; } #if defined(WL_WIRELESS_EXT) @@ -5726,26 +7549,33 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) { /* may recurse, do NOT lock */ ret = wl_iw_ioctl(net, ifr, cmd); - goto exit; + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; } #endif /* defined(WL_WIRELESS_EXT) */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) if (cmd == SIOCETHTOOL) { ret = dhd_ethtool(dhd, (void*)ifr->ifr_data); - goto exit; + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; } #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ if (cmd == SIOCDEVPRIVATE+1) { ret = wl_android_priv_cmd(net, ifr, cmd); dhd_check_hang(net, &dhd->pub, ret); - goto exit; + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; } if (cmd != SIOCDEVPRIVATE) { - ret = -EOPNOTSUPP; - goto exit; + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return -EOPNOTSUPP; } memset(&ioc, 0, sizeof(ioc)); @@ -5759,7 +7589,7 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) { compat_wl_ioctl_t compat_ioc; if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) { - ret = BCME_BADADDR; + bcmerror = BCME_BADADDR; goto done; } ioc.cmd = compat_ioc.cmd; @@ -5767,7 +7597,7 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) memset(&ioc, 0, sizeof(ioc)); /* Copy the ioc control structure part of ioctl request */ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) { - ret = BCME_BADADDR; + bcmerror = BCME_BADADDR; goto done; } ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */ @@ -5775,7 +7605,7 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) /* To differentiate between wl and dhd read 4 more byes */ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t), sizeof(uint)) != 0)) { - ret = BCME_BADADDR; + bcmerror = BCME_BADADDR; goto done; } @@ -5788,7 +7618,7 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) /* To differentiate between wl and dhd read 4 more byes */ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t), sizeof(uint)) != 0)) { - ret = BCME_BADADDR; + bcmerror = BCME_BADADDR; goto done; } } /* ioc.cmd & WLC_SPEC_FLAG */ @@ -5797,49 +7627,70 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) { /* Copy the ioc control structure part of ioctl request */ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) { - ret = BCME_BADADDR; + bcmerror = BCME_BADADDR; goto done; } #ifdef CONFIG_COMPAT ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/ #endif + /* To differentiate between wl and dhd read 4 more byes */ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t), sizeof(uint)) != 0)) { - ret = BCME_BADADDR; + bcmerror = BCME_BADADDR; goto done; } } if (!capable(CAP_NET_ADMIN)) { - ret = BCME_EPERM; + bcmerror = BCME_EPERM; goto done; } + /* Take backup of ioc.buf and restore later */ + ioc_buf_user = ioc.buf; + if (ioc.len > 0) { buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN); if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) { - ret = BCME_NOMEM; + bcmerror = BCME_NOMEM; goto done; } DHD_PERIM_UNLOCK(&dhd->pub); if (copy_from_user(local_buf, ioc.buf, buflen)) { DHD_PERIM_LOCK(&dhd->pub); - ret = BCME_BADADDR; + bcmerror = BCME_BADADDR; goto done; } DHD_PERIM_LOCK(&dhd->pub); - *(char *)(local_buf + buflen) = '\0'; + *((char *)local_buf + buflen) = '\0'; + + /* For some platforms accessing userspace memory + * of ioc.buf is causing kernel panic, so to avoid that + * make ioc.buf pointing to kernel space memory local_buf + */ + ioc.buf = local_buf; } - ret = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf); + /* Skip all the non DHD iovars (wl iovars) after f/w hang */ + if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) { + DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } - if (!ret && buflen && local_buf && ioc.buf) { + bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf); + + /* Restore back userspace pointer to ioc.buf */ + ioc.buf = ioc_buf_user; + + if (!bcmerror && buflen && local_buf && ioc.buf) { DHD_PERIM_UNLOCK(&dhd->pub); if (copy_to_user(ioc.buf, local_buf, buflen)) - ret = -EFAULT; + bcmerror = -EFAULT; DHD_PERIM_LOCK(&dhd->pub); } @@ -5847,11 +7698,10 @@ done: if (local_buf) MFREE(dhd->pub.osh, local_buf, buflen+1); -exit: DHD_PERIM_UNLOCK(&dhd->pub); DHD_OS_WAKE_UNLOCK(&dhd->pub); - return OSL_ERROR(ret); + return OSL_ERROR(bcmerror); } @@ -5911,6 +7761,219 @@ static void dhd_rollback_cpu_freq(dhd_info_t *dhd) } #endif /* FIX_CPU_MIN_CLOCK */ +#if defined(BT_OVER_SDIO) + +void +dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp) +{ + dhdp->info->bus_user_count++; +} + +void +dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp) +{ + dhdp->info->bus_user_count--; +} + +/* Return values: + * Success: Returns 0 + * Failure: Returns -1 or errono code + */ +int +dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + int ret = 0; + + mutex_lock(&dhd->bus_user_lock); + ++dhd->bus_user_count; + if (dhd->bus_user_count < 0) { + DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + if (dhd->bus_user_count == 1) { + + dhd->pub.hang_was_sent = 0; + + /* First user, turn on WL_REG, start the bus */ + DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__)); + + if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) { + /* Enable F1 */ + ret = dhd_bus_resume(dhdp, 0); + if (ret) { + DHD_ERROR(("%s(): Failed to enable F1, err=%d\n", + __FUNCTION__, ret)); + goto exit; + } + } + + dhd_update_fw_nv_path(dhd); + /* update firmware and nvram path to sdio bus */ + dhd_bus_update_fw_nv_path(dhd->pub.bus, + dhd->fw_path, dhd->nv_path); + /* download the firmware, Enable F2 */ + /* TODO: Should be done only in case of FW switch */ + ret = dhd_bus_devreset(dhdp, FALSE); + dhd_bus_resume(dhdp, 1); + if (!ret) { + if (dhd_sync_with_dongle(&dhd->pub) < 0) { + DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__)); + ret = -EFAULT; + } + } else { + DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret)); + } + } else { + DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n", + __FUNCTION__, dhd->bus_user_count)); + } +exit: + mutex_unlock(&dhd->bus_user_lock); + return ret; +} +EXPORT_SYMBOL(dhd_bus_get); + +/* Return values: + * Success: Returns 0 + * Failure: Returns -1 or errono code + */ +int +dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + int ret = 0; + BCM_REFERENCE(owner); + + mutex_lock(&dhd->bus_user_lock); + --dhd->bus_user_count; + if (dhd->bus_user_count < 0) { + DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__)); + dhd->bus_user_count = 0; + ret = -1; + goto exit; + } + + if (dhd->bus_user_count == 0) { + /* Last user, stop the bus and turn Off WL_REG */ + DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n", + __FUNCTION__)); +#ifdef PROP_TXSTATUS + if (dhd->pub.wlfc_enabled) { + dhd_wlfc_deinit(&dhd->pub); + } +#endif /* PROP_TXSTATUS */ +#ifdef PNO_SUPPORT + if (dhd->pub.pno_state) { + dhd_pno_deinit(&dhd->pub); + } +#endif /* PNO_SUPPORT */ +#ifdef RTT_SUPPORT + if (dhd->pub.rtt_state) { + dhd_rtt_deinit(&dhd->pub); + } +#endif /* RTT_SUPPORT */ + ret = dhd_bus_devreset(dhdp, TRUE); + if (!ret) { + dhd_bus_suspend(dhdp); + wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY); + } + } else { + DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n", + __FUNCTION__, dhd->bus_user_count)); + } +exit: + mutex_unlock(&dhd->bus_user_lock); + return ret; +} +EXPORT_SYMBOL(dhd_bus_put); + +int +dhd_net_bus_get(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_get(&dhd->pub, WLAN_MODULE); +} + +int +dhd_net_bus_put(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_put(&dhd->pub, WLAN_MODULE); +} + +/* + * Function to enable the Bus Clock + * Returns BCME_OK on success and BCME_xxx on failure + * + * This function is not callable from non-sleepable context + */ +int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + int ret; + + dhd_os_sdlock(dhdp); + /* + * The second argument is TRUE, that means, we expect + * the function to "wait" until the clocks are really + * available + */ + ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE); + dhd_os_sdunlock(dhdp); + + return ret; +} +EXPORT_SYMBOL(dhd_bus_clk_enable); + +/* + * Function to disable the Bus Clock + * Returns BCME_OK on success and BCME_xxx on failure + * + * This function is not callable from non-sleepable context + */ +int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + int ret; + + dhd_os_sdlock(dhdp); + /* + * The second argument is TRUE, that means, we expect + * the function to "wait" until the clocks are really + * disabled + */ + ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE); + dhd_os_sdunlock(dhdp); + + return ret; +} +EXPORT_SYMBOL(dhd_bus_clk_disable); + +/* + * Function to reset bt_use_count counter to zero. + * + * This function is not callable from non-sleepable context + */ +void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + /* take the lock and reset bt use count */ + dhd_os_sdlock(dhdp); + dhdsdio_reset_bt_use_count(dhdp->bus); + dhd_os_sdunlock(dhdp); +} +EXPORT_SYMBOL(dhd_bus_reset_bt_use_count); + +#endif /* BT_OVER_SDIO */ + #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */ int dhd_deepsleep(dhd_info_t *dhd, int flag) { @@ -5987,6 +8050,9 @@ static int dhd_stop(struct net_device *net) { int ifidx = 0; +#ifdef WL_CFG80211 + unsigned long flags = 0; +#endif /* WL_CFG80211 */ dhd_info_t *dhd = DHD_DEV_INFO(net); DHD_OS_WAKE_LOCK(&dhd->pub); DHD_PERIM_LOCK(&dhd->pub); @@ -6001,6 +8067,13 @@ dhd_stop(struct net_device *net) if (dhd->pub.up == 0) { goto exit; } +#if defined(DHD_HANG_SEND_UP_TEST) + if (dhd->pub.req_hang_type) { + DHD_ERROR(("%s, Clear HANG test request 0x%x\n", + __FUNCTION__, dhd->pub.req_hang_type)); + dhd->pub.req_hang_type = 0; + } +#endif /* DHD_HANG_SEND_UP_TEST */ dhd_if_flush_sta(DHD_DEV_IFP(net)); @@ -6017,12 +8090,18 @@ dhd_stop(struct net_device *net) /* Set state and stop OS transmissions */ netif_stop_queue(net); +#ifdef WL_CFG80211 + spin_lock_irqsave(&dhd->pub.up_lock, flags); dhd->pub.up = 0; + spin_unlock_irqrestore(&dhd->pub.up_lock, flags); +#else + dhd->pub.up = 0; +#endif /* WL_CFG80211 */ #ifdef WL_CFG80211 if (ifidx == 0) { dhd_if_t *ifp; - wl_cfg80211_down(NULL); + wl_cfg80211_down(net); ifp = dhd->iflist[0]; ASSERT(ifp && ifp->net); @@ -6034,9 +8113,8 @@ dhd_stop(struct net_device *net) if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) && (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) { int i; - #ifdef WL_CFG80211_P2P_DEV_IF - wl_cfg80211_del_p2p_wdev(); + wl_cfg80211_del_p2p_wdev(net); #endif /* WL_CFG80211_P2P_DEV_IF */ dhd_net_if_lock_local(dhd); @@ -6046,7 +8124,6 @@ dhd_stop(struct net_device *net) if (ifp && ifp->net) { dhd_if_del_sta_list(ifp); } - #ifdef ARP_OFFLOAD_SUPPORT if (dhd_inetaddr_notifier_registered) { dhd_inetaddr_notifier_registered = FALSE; @@ -6065,15 +8142,26 @@ dhd_stop(struct net_device *net) // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process cancel_work_sync(dhd->dhd_deferred_wq); #endif -#if defined(DHD_LB) && defined(DHD_LB_RXP) + +#ifdef SHOW_LOGTRACE + /* Wait till event_log_dispatcher_work finishes */ + cancel_work_sync(&dhd->event_log_dispatcher_work); +#endif /* SHOW_LOGTRACE */ + +#if defined(DHD_LB_RXP) __skb_queue_purge(&dhd->rx_pend_queue); -#endif /* DHD_LB && DHD_LB_RXP */ +#endif /* DHD_LB_RXP */ + +#if defined(DHD_LB_TXP) + skb_queue_purge(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ } -#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) + argos_register_notifier_deinit(); +#ifdef DHDTCPACK_SUPPRESS dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); -#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ -#if defined(DHD_LB) && defined(DHD_LB_RXP) +#endif /* DHDTCPACK_SUPPRESS */ +#if defined(DHD_LB_RXP) if (ifp->net == dhd->rx_napi_netdev) { DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n", __FUNCTION__, &dhd->rx_napi_struct, net, net->name)); @@ -6082,20 +8170,58 @@ dhd_stop(struct net_device *net) netif_napi_del(&dhd->rx_napi_struct); dhd->rx_napi_netdev = NULL; } -#endif /* DHD_LB && DHD_LB_RXP */ - +#endif /* DHD_LB_RXP */ } #endif /* WL_CFG80211 */ + DHD_SSSR_DUMP_DEINIT(&dhd->pub); + #ifdef PROP_TXSTATUS dhd_wlfc_cleanup(&dhd->pub, NULL, 0); #endif +#ifdef SHOW_LOGTRACE + if (!dhd_download_fw_on_driverload) { + /* Release the skbs from queue for WLC_E_TRACE event */ + dhd_event_logtrace_flush_queue(&dhd->pub); + if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) { + if (dhd->event_data.fmts) { + MFREE(dhd->pub.osh, dhd->event_data.fmts, + dhd->event_data.fmts_size); + dhd->event_data.fmts = NULL; + } + if (dhd->event_data.raw_fmts) { + MFREE(dhd->pub.osh, dhd->event_data.raw_fmts, + dhd->event_data.raw_fmts_size); + dhd->event_data.raw_fmts = NULL; + } + if (dhd->event_data.raw_sstr) { + MFREE(dhd->pub.osh, dhd->event_data.raw_sstr, + dhd->event_data.raw_sstr_size); + dhd->event_data.raw_sstr = NULL; + } + if (dhd->event_data.rom_raw_sstr) { + MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr, + dhd->event_data.rom_raw_sstr_size); + dhd->event_data.rom_raw_sstr = NULL; + } + dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT; + } + } +#endif /* SHOW_LOGTRACE */ +#ifdef APF + dhd_dev_apf_delete_filter(net); +#endif /* APF */ + /* Stop the protocol module */ dhd_prot_stop(&dhd->pub); OLD_MOD_DEC_USE_COUNT; exit: if (ifidx == 0 && !dhd_download_fw_on_driverload) { +#if defined(BT_OVER_SDIO) + dhd_bus_put(&dhd->pub, WLAN_MODULE); + wl_android_set_wifi_on_flag(FALSE); +#else wl_android_wifi_off(net, TRUE); #ifdef WL_EXT_IAPSTA wl_android_ext_dettach_netdev(); @@ -6103,6 +8229,7 @@ exit: } else { if (dhd->pub.conf->deepsleep) dhd_deepsleep(dhd, 1); +#endif /* BT_OVER_SDIO */ } dhd->pub.hang_was_sent = 0; @@ -6114,7 +8241,7 @@ exit: } #ifdef BCMDBGFS - dhd_dbg_remove(); + dhd_dbgfs_remove(); #endif DHD_PERIM_UNLOCK(&dhd->pub); @@ -6138,26 +8265,14 @@ extern bool g_first_broadcast_scan; #ifdef WL11U static int dhd_interworking_enable(dhd_pub_t *dhd) { - char iovbuf[WLC_IOCTL_SMLEN]; uint32 enable = true; int ret = BCME_OK; - bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf)); - ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE); if (ret < 0) { DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret)); } - if (ret == BCME_OK) { - /* basic capabilities for HS20 REL2 */ - uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF; - bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf)); - ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); - if (ret < 0) { - DHD_ERROR(("%s: set wnm returned (%d)\n", __FUNCTION__, ret)); - } - } - return ret; } #endif /* WL11u */ @@ -6184,9 +8299,11 @@ dhd_open(struct net_device *net) wifi_adapter_info_t *adapter = NULL; #endif - if (!dhd_download_fw_on_driverload && !dhd_driver_init_done) { - DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__)); - return -1; + if (!dhd_download_fw_on_driverload) { + if (!dhd_driver_init_done) { + DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__)); + return -1; + } } printf("%s: Enter %p\n", __FUNCTION__, net); @@ -6199,13 +8316,28 @@ dhd_open(struct net_device *net) #endif #endif /* MULTIPLE_SUPPLICANT */ /* Init wakelock */ - if (!dhd_download_fw_on_driverload && - !(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { - DHD_OS_WAKE_LOCK_INIT(dhd); - dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; + if (!dhd_download_fw_on_driverload) { + if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_OS_WAKE_LOCK_INIT(dhd); + dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; + } +#ifdef SHOW_LOGTRACE + skb_queue_head_init(&dhd->evt_trace_queue); + + if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) { + ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data); + if (ret == BCME_OK) { + dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data, + st_str_file_path, map_file_path); + dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data, + rom_st_str_file_path, rom_map_file_path); + dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT; + } + } +#endif /* SHOW_LOGTRACE */ } -#ifdef PREVENT_REOPEN_DURING_HANG +#if defined(PREVENT_REOPEN_DURING_HANG) /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */ if (dhd->pub.hang_was_sent == 1) { DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__)); @@ -6227,6 +8359,11 @@ dhd_open(struct net_device *net) dhd->pub.dongle_trap_occured = 0; dhd->pub.hang_was_sent = 0; dhd->pub.hang_reason = 0; + dhd->pub.iovar_timeout_occured = 0; +#ifdef PCIE_FULL_DONGLE + dhd->pub.d3ack_timeout_occured = 0; +#endif /* PCIE_FULL_DONGLE */ + #ifdef DHD_LOSSLESS_ROAMING dhd->pub.dequeue_prec_map = ALLPRIO; #endif @@ -6266,7 +8403,12 @@ dhd_open(struct net_device *net) #if defined(USE_INITIAL_SHORT_DWELL_TIME) g_first_broadcast_scan = TRUE; #endif +#if defined(BT_OVER_SDIO) + ret = dhd_bus_get(&dhd->pub, WLAN_MODULE); + wl_android_set_wifi_on_flag(TRUE); +#else ret = wl_android_wifi_on(net); +#endif /* BT_OVER_SDIO */ if (ret != 0) { DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n", __FUNCTION__, ret)); @@ -6349,6 +8491,14 @@ dhd_open(struct net_device *net) } #endif /* BCM_FD_AGGR */ +#ifdef BT_OVER_SDIO + if (dhd->pub.is_bt_recovery_required) { + DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__)); + bcmsdh_btsdio_process_dhd_hang_notification(TRUE); + } + dhd->pub.is_bt_recovery_required = FALSE; +#endif + /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */ memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); @@ -6361,8 +8511,28 @@ dhd_open(struct net_device *net) } #endif /* TOE */ +#if defined(DHD_LB_RXP) + __skb_queue_head_init(&dhd->rx_pend_queue); + if (dhd->rx_napi_netdev == NULL) { + dhd->rx_napi_netdev = dhd->iflist[ifidx]->net; + memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct)); + netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct, + dhd_napi_poll, dhd_napi_weight); + DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n", + __FUNCTION__, &dhd->rx_napi_struct, net, net->name)); + napi_enable(&dhd->rx_napi_struct); + DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__)); + skb_queue_head_init(&dhd->rx_napi_queue); + } /* rx_napi_netdev == NULL */ +#endif /* DHD_LB_RXP */ + +#if defined(DHD_LB_TXP) + /* Use the variant that uses locks */ + skb_queue_head_init(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ + #if defined(WL_CFG80211) - if (unlikely(wl_cfg80211_up(NULL))) { + if (unlikely(wl_cfg80211_up(net))) { DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__)); ret = -1; goto exit; @@ -6381,34 +8551,12 @@ dhd_open(struct net_device *net) register_inet6addr_notifier(&dhd_inet6addr_notifier); } #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ -#ifdef DHD_LB - DHD_LB_STATS_INIT(&dhd->pub); -#ifdef DHD_LB_RXP - __skb_queue_head_init(&dhd->rx_pend_queue); -#endif /* DHD_LB_RXP */ -#endif /* DHD_LB */ } -#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) -#if defined(SET_RPS_CPUS) - dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); -#else - dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD); -#endif -#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ -#if defined(DHD_LB) && defined(DHD_LB_RXP) - if (dhd->rx_napi_netdev == NULL) { - dhd->rx_napi_netdev = dhd->iflist[ifidx]->net; - memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct)); - netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct, - dhd_napi_poll, dhd_napi_weight); - DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n", - __FUNCTION__, &dhd->rx_napi_struct, net, net->name)); - napi_enable(&dhd->rx_napi_struct); - DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__)); - skb_queue_head_init(&dhd->rx_napi_queue); - } -#endif /* DHD_LB && DHD_LB_RXP */ + argos_register_notifier_init(net); +#if defined(DHDTCPACK_SUPPRESS) + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DEFAULT); +#endif /* DHDTCPACK_SUPPRESS */ #if defined(NUM_SCB_MAX_PROBE) dhd_set_scb_probe(&dhd->pub); #endif /* NUM_SCB_MAX_PROBE */ @@ -6422,7 +8570,7 @@ dhd_open(struct net_device *net) OLD_MOD_INC_USE_COUNT; #ifdef BCMDBGFS - dhd_dbg_init(&dhd->pub); + dhd_dbgfs_init(&dhd->pub); #endif exit: @@ -6484,7 +8632,8 @@ dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, ui { #ifdef WL_CFG80211 - if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK) + if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub), + ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK) return BCME_OK; #endif @@ -6506,7 +8655,7 @@ dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, ui strncpy(if_event->name, name, IFNAMSIZ); if_event->name[IFNAMSIZ - 1] = '\0'; dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, - DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW); + DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW); } return BCME_OK; @@ -6518,7 +8667,8 @@ dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, ui dhd_if_event_t *if_event; #ifdef WL_CFG80211 - if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK) + if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub), + ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK) return BCME_OK; #endif /* WL_CFG80211 */ @@ -6536,18 +8686,28 @@ dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, ui strncpy(if_event->name, name, IFNAMSIZ); if_event->name[IFNAMSIZ - 1] = '\0'; dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL, - dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW); + dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW); return BCME_OK; } +int +dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac) +{ +#ifdef WL_CFG80211 + wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub), + ifevent->ifidx, name, mac, ifevent->bssidx); +#endif /* WL_CFG80211 */ + return BCME_OK; +} + /* unregister and free the existing net_device interface (if any) in iflist and * allocate a new one. the slot is reused. this function does NOT register the * new interface to linux kernel. dhd_register_if does the job */ struct net_device* -dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, - uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name) +dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name, + uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name) { dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info; dhd_if_t *ifp; @@ -6557,7 +8717,17 @@ dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, if (ifp != NULL) { if (ifp->net != NULL) { - DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name)); + DHD_ERROR(("%s: free existing IF %s ifidx:%d \n", + __FUNCTION__, ifp->net->name, ifidx)); + + if (ifidx == 0) { + /* For primary ifidx (0), there shouldn't be + * any netdev present already. + */ + DHD_ERROR(("Primary ifidx populated already\n")); + ASSERT(0); + return NULL; + } dhd_dev_priv_clear(ifp->net); /* clear net_device private */ @@ -6587,6 +8757,12 @@ dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, ifp->info = dhdinfo; ifp->idx = ifidx; ifp->bssidx = bssidx; +#ifdef DHD_MCAST_REGEN + ifp->mcast_regen_bss_enable = FALSE; +#endif + /* set to TRUE rx_pkt_chainable at alloc time */ + ifp->rx_pkt_chainable = TRUE; + if (mac != NULL) memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN); @@ -6620,7 +8796,7 @@ dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, /* initialize the dongle provided if name */ if (dngl_name) strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ); - else + else if (name) strncpy(ifp->dngl_name, name, IFNAMSIZ); #ifdef PCIE_FULL_DONGLE @@ -6632,13 +8808,24 @@ dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, #ifdef DHD_L2_FILTER ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh); ifp->parp_allnode = TRUE; -#endif +#endif /* DHD_L2_FILTER */ + + + DHD_CUMM_CTR_INIT(&ifp->cumm_ctr); + return ifp->net; fail: - if (ifp != NULL) { if (ifp->net != NULL) { +#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE) + if (ifp->net == dhdinfo->rx_napi_netdev) { + napi_disable(&dhdinfo->rx_napi_struct); + netif_napi_del(&dhdinfo->rx_napi_struct); + skb_queue_purge(&dhdinfo->rx_napi_queue); + dhdinfo->rx_napi_netdev = NULL; + } +#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */ dhd_dev_priv_clear(ifp->net); free_netdev(ifp->net); ifp->net = NULL; @@ -6646,7 +8833,6 @@ fail: MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); ifp = NULL; } - dhdinfo->iflist[ifidx] = NULL; return NULL; } @@ -6659,6 +8845,9 @@ dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) { dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info; dhd_if_t *ifp; +#ifdef PCIE_FULL_DONGLE + if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdpub->if_flow_lkup; +#endif /* PCIE_FULL_DONGLE */ ifp = dhdinfo->iflist[ifidx]; @@ -6666,6 +8855,7 @@ dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) if (ifp->net != NULL) { DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx)); + dhdinfo->iflist[ifidx] = NULL; /* in unregister_netdev case, the interface gets freed by net->destructor * (which is set to free_netdev) */ @@ -6679,18 +8869,12 @@ dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) #if defined(SET_RPS_CPUS) custom_rps_map_clear(ifp->net->_rx); #endif /* SET_RPS_CPUS */ -#if defined(SET_RPS_CPUS) -#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)) - dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF); -#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */ -#endif if (need_rtnl_lock) unregister_netdev(ifp->net); else unregister_netdevice(ifp->net); } ifp->net = NULL; - dhdinfo->iflist[ifidx] = NULL; } #ifdef DHD_WMF dhd_wmf_cleanup(dhdpub, ifidx); @@ -6702,7 +8886,15 @@ dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) ifp->phnd_arp_table = NULL; #endif /* DHD_L2_FILTER */ + dhd_if_del_sta_list(ifp); +#ifdef PCIE_FULL_DONGLE + /* Delete flowrings of WDS interface */ + if (if_flow_lkup[ifidx].role == WLC_E_IF_ROLE_WDS) { + dhd_flow_rings_delete(dhdpub, ifidx); + } +#endif /* PCIE_FULL_DONGLE */ + DHD_CUMM_CTR_INIT(&ifp->cumm_ctr); MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); ifp = NULL; @@ -6711,6 +8903,7 @@ dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) return BCME_OK; } + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) static struct net_device_ops dhd_ops_pri = { .ndo_open = dhd_open, @@ -6745,47 +8938,38 @@ extern void debugger_init(void *bus_handle); #ifdef SHOW_LOGTRACE -static char *logstrs_path = "/root/logstrs.bin"; -static char *st_str_file_path = "/root/rtecdc.bin"; -static char *map_file_path = "/root/rtecdc.map"; -static char *rom_st_str_file_path = "/root/roml.bin"; -static char *rom_map_file_path = "/root/roml.map"; +int +dhd_os_read_file(void *file, char *buf, uint32 size) +{ + struct file *filep = (struct file *)file; -#define BYTES_AHEAD_NUM 11 /* address in map file is before these many bytes */ -#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */ -#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */ -static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */ -static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */ -static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */ -static char *ram_file_str = "rtecdc"; -static char *rom_file_str = "roml"; -#define RAMSTART_BIT 0x01 -#define RDSTART_BIT 0x02 -#define RDEND_BIT 0x04 -#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT) + if (!file || !buf) + return -1; -module_param(logstrs_path, charp, S_IRUGO); -module_param(st_str_file_path, charp, S_IRUGO); -module_param(map_file_path, charp, S_IRUGO); -module_param(rom_st_str_file_path, charp, S_IRUGO); -module_param(rom_map_file_path, charp, S_IRUGO); + return vfs_read(filep, buf, size, &filep->f_pos); +} -static void -dhd_init_logstrs_array(dhd_event_log_t *temp) +int +dhd_os_seek_file(void *file, int64 offset) +{ + struct file *filep = (struct file *)file; + if (!file) + return -1; + + /* offset can be -ve */ + filep->f_pos = filep->f_pos + offset; + + return 0; +} + +static int +dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp) { struct file *filep = NULL; struct kstat stat; mm_segment_t fs; char *raw_fmts = NULL; int logstrs_size = 0; - - logstr_header_t *hdr = NULL; - uint32 *lognums = NULL; - char *logstrs = NULL; - int ram_index = 0; - char **fmts; - int num_fmts = 0; - uint32 i = 0; int error = 0; fs = get_fs(); @@ -6804,128 +8988,50 @@ dhd_init_logstrs_array(dhd_event_log_t *temp) } logstrs_size = (int) stat.size; - raw_fmts = kmalloc(logstrs_size, GFP_KERNEL); + if (logstrs_size == 0) { + DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__)); + goto fail1; + } + + raw_fmts = MALLOC(osh, logstrs_size); if (raw_fmts == NULL) { DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__)); goto fail; } if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) { - DHD_ERROR(("%s: Failed to read file %s", __FUNCTION__, logstrs_path)); + DHD_ERROR(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path)); goto fail; } - /* Remember header from the logstrs.bin file */ - hdr = (logstr_header_t *) (raw_fmts + logstrs_size - - sizeof(logstr_header_t)); - - if (hdr->log_magic == LOGSTRS_MAGIC) { - /* - * logstrs.bin start with header. - */ - num_fmts = hdr->rom_logstrs_offset / sizeof(uint32); - ram_index = (hdr->ram_lognums_offset - - hdr->rom_lognums_offset) / sizeof(uint32); - lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset]; - logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset]; - } else { - /* - * Legacy logstrs.bin format without header. - */ - num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32); - if (num_fmts == 0) { - /* Legacy ROM/RAM logstrs.bin format: - * - ROM 'lognums' section - * - RAM 'lognums' section - * - ROM 'logstrs' section. - * - RAM 'logstrs' section. - * - * 'lognums' is an array of indexes for the strings in the - * 'logstrs' section. The first uint32 is 0 (index of first - * string in ROM 'logstrs' section). - * - * The 4324b5 is the only ROM that uses this legacy format. Use the - * fixed number of ROM fmtnums to find the start of the RAM - * 'lognums' section. Use the fixed first ROM string ("Con\n") to - * find the ROM 'logstrs' section. - */ - #define NUM_4324B5_ROM_FMTS 186 - #define FIRST_4324B5_ROM_LOGSTR "Con\n" - ram_index = NUM_4324B5_ROM_FMTS; - lognums = (uint32 *) raw_fmts; - num_fmts = ram_index; - logstrs = (char *) &raw_fmts[num_fmts << 2]; - while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) { - num_fmts++; - logstrs = (char *) &raw_fmts[num_fmts << 2]; - } - } else { - /* Legacy RAM-only logstrs.bin format: - * - RAM 'lognums' section - * - RAM 'logstrs' section. - * - * 'lognums' is an array of indexes for the strings in the - * 'logstrs' section. The first uint32 is an index to the - * start of 'logstrs'. Therefore, if this index is divided - * by 'sizeof(uint32)' it provides the number of logstr - * entries. - */ - ram_index = 0; - lognums = (uint32 *) raw_fmts; - logstrs = (char *) &raw_fmts[num_fmts << 2]; - } - } - fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL); - if (fmts == NULL) { - DHD_ERROR(("Failed to allocate fmts memory\n")); - goto fail; + if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp) + == BCME_OK) { + filp_close(filep, NULL); + set_fs(fs); + return BCME_OK; } - for (i = 0; i < num_fmts; i++) { - /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base - * (they are 0-indexed relative to 'rom_logstrs_offset'). - * - * RAM lognums are already indexed to point to the correct RAM logstrs (they - * are 0-indexed relative to the start of the logstrs.bin file). - */ - if (i == ram_index) { - logstrs = raw_fmts; - } - fmts[i] = &logstrs[lognums[i]]; - } - temp->fmts = fmts; - temp->raw_fmts = raw_fmts; - temp->num_fmts = num_fmts; - filp_close(filep, NULL); - set_fs(fs); - return; fail: if (raw_fmts) { - kfree(raw_fmts); + MFREE(osh, raw_fmts, logstrs_size); raw_fmts = NULL; } + +fail1: if (!IS_ERR(filep)) filp_close(filep, NULL); + set_fs(fs); temp->fmts = NULL; - return; + return BCME_ERROR; } static int -dhd_read_map(char *fname, uint32 *ramstart, uint32 *rodata_start, - uint32 *rodata_end) +dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start, + uint32 *rodata_end) { struct file *filep = NULL; mm_segment_t fs; - char *raw_fmts = NULL; - uint32 read_size = READ_NUM_BYTES; - int error = 0; - char * cptr = NULL; - char c; - uint8 count = 0; - - *ramstart = 0; - *rodata_start = 0; - *rodata_end = 0; + int err = BCME_ERROR; if (fname == NULL) { DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__)); @@ -6941,86 +9047,21 @@ dhd_read_map(char *fname, uint32 *ramstart, uint32 *rodata_start, goto fail; } - /* Allocate 1 byte more than read_size to terminate it with NULL */ - raw_fmts = kmalloc(read_size + 1, GFP_KERNEL); - if (raw_fmts == NULL) { - DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); + if ((err = dhd_parse_map_file(osh, filep, ramstart, + rodata_start, rodata_end)) < 0) goto fail; - } - - /* read ram start, rodata_start and rodata_end values from map file */ - - while (count != ALL_MAP_VAL) - { - error = vfs_read(filep, raw_fmts, read_size, (&filep->f_pos)); - if (error < 0) { - DHD_ERROR(("%s: read failed %s err:%d \n", __FUNCTION__, - map_file_path, error)); - goto fail; - } - - if (error < read_size) { - /* - * since we reset file pos back to earlier pos by - * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF. - * So if ret value is less than read_size, reached EOF don't read further - */ - break; - } - /* End raw_fmts with NULL as strstr expects NULL terminated strings */ - raw_fmts[read_size] = '\0'; - - /* Get ramstart address */ - if ((cptr = strstr(raw_fmts, ramstart_str))) { - cptr = cptr - BYTES_AHEAD_NUM; - sscanf(cptr, "%x %c text_start", ramstart, &c); - count |= RAMSTART_BIT; - } - - /* Get ram rodata start address */ - if ((cptr = strstr(raw_fmts, rodata_start_str))) { - cptr = cptr - BYTES_AHEAD_NUM; - sscanf(cptr, "%x %c rodata_start", rodata_start, &c); - count |= RDSTART_BIT; - } - - /* Get ram rodata end address */ - if ((cptr = strstr(raw_fmts, rodata_end_str))) { - cptr = cptr - BYTES_AHEAD_NUM; - sscanf(cptr, "%x %c rodata_end", rodata_end, &c); - count |= RDEND_BIT; - } - memset(raw_fmts, 0, read_size); - /* - * go back to predefined NUM of bytes so that we won't miss - * the string and addr even if it comes as splited in next read. - */ - filep->f_pos = filep->f_pos - GO_BACK_FILE_POS_NUM_BYTES; - } - - DHD_ERROR(("---ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n", - *ramstart, *rodata_start, *rodata_end)); - - DHD_ERROR(("readmap over \n")); fail: - if (raw_fmts) { - kfree(raw_fmts); - raw_fmts = NULL; - } if (!IS_ERR(filep)) filp_close(filep, NULL); set_fs(fs); - if (count == ALL_MAP_VAL) { - return BCME_OK; - } - DHD_ERROR(("readmap error 0X%x \n", count)); - return BCME_ERROR; + + return err; } -static void -dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file) +static int +dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file) { struct file *filep = NULL; mm_segment_t fs; @@ -7033,12 +9074,16 @@ dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file uint32 rodata_end = 0; uint32 logfilebase = 0; - error = dhd_read_map(map_file, &ramstart, &rodata_start, &rodata_end); - if (error == BCME_ERROR) { + error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end); + if (error != BCME_OK) { DHD_ERROR(("readmap Error!! \n")); /* don't do event log parsing in actual case */ - temp->raw_sstr = NULL; - return; + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = NULL; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = NULL; + } + return error; } DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n", ramstart, rodata_start, rodata_end)); @@ -7055,7 +9100,12 @@ dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file /* Full file size is huge. Just read required part */ logstrs_size = rodata_end - rodata_start; - raw_fmts = kmalloc(logstrs_size, GFP_KERNEL); + if (logstrs_size == 0) { + DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__)); + goto fail1; + } + + raw_fmts = MALLOC(osh, logstrs_size); if (raw_fmts == NULL) { DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); goto fail; @@ -7077,11 +9127,13 @@ dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file if (strstr(str_file, ram_file_str) != NULL) { temp->raw_sstr = raw_fmts; + temp->raw_sstr_size = logstrs_size; temp->ramstart = ramstart; temp->rodata_start = rodata_start; temp->rodata_end = rodata_end; } else if (strstr(str_file, rom_file_str) != NULL) { temp->rom_raw_sstr = raw_fmts; + temp->rom_raw_sstr_size = logstrs_size; temp->rom_ramstart = ramstart; temp->rom_rodata_start = rodata_start; temp->rom_rodata_end = rodata_end; @@ -7090,21 +9142,27 @@ dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file filp_close(filep, NULL); set_fs(fs); - return; + return BCME_OK; + fail: if (raw_fmts) { - kfree(raw_fmts); + MFREE(osh, raw_fmts, logstrs_size); raw_fmts = NULL; } + +fail1: if (!IS_ERR(filep)) filp_close(filep, NULL); + set_fs(fs); + if (strstr(str_file, ram_file_str) != NULL) { temp->raw_sstr = NULL; } else if (strstr(str_file, rom_file_str) != NULL) { temp->rom_raw_sstr = NULL; } - return; + + return error; } #endif /* SHOW_LOGTRACE */ @@ -7119,6 +9177,9 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) uint32 bus_type = -1; uint32 bus_num = -1; uint32 slot_num = -1; +#ifdef SHOW_LOGTRACE + int ret; +#endif /* SHOW_LOGTRACE */ wifi_adapter_info_t *adapter = NULL; dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT; @@ -7139,7 +9200,7 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) dhd = MALLOC(osh, sizeof(dhd_info_t)); if (dhd == NULL) { DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__)); - goto fail; + goto dhd_null_flag; } } memset(dhd, 0, sizeof(dhd_info_t)); @@ -7148,7 +9209,17 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */ dhd->pub.osh = osh; +#ifdef DUMP_IOCTL_IOV_LIST + dll_init(&(dhd->pub.dump_iovlist_head)); +#endif /* DUMP_IOCTL_IOV_LIST */ dhd->adapter = adapter; +#ifdef DHD_DEBUG + dll_init(&(dhd->pub.mw_list_head)); +#endif /* DHD_DEBUG */ +#ifdef BT_OVER_SDIO + dhd->pub.is_bt_recovery_required = FALSE; + mutex_init(&dhd->bus_user_lock); +#endif /* BT_OVER_SDIO */ #ifdef GET_CUSTOM_MAC_ENABLE wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet); @@ -7164,7 +9235,9 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) #endif /* CUSTOM_COUNTRY_CODE */ dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID; dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID; - +#ifdef DHD_WET + dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub); +#endif /* DHD_WET */ /* Initialize thread based operation and lock */ sema_init(&dhd->sdsem, 1); @@ -7202,7 +9275,7 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) len = strlen(if_name); ch = if_name[len - 1]; if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) - strcat(if_name, "%d"); + strncat(if_name, "%d", 2); } /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */ @@ -7225,6 +9298,14 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) mutex_init(&dhd->dhd_iovar_mutex); sema_init(&dhd->proto_sem, 1); +#ifdef DHD_ULP + if (!(dhd_ulp_init(osh, &dhd->pub))) + goto fail; +#endif /* DHD_ULP */ + +#if defined(DHD_HANG_SEND_UP_TEST) + dhd->pub.req_hang_type = 0; +#endif /* DHD_HANG_SEND_UP_TEST */ #ifdef PROP_TXSTATUS spin_lock_init(&dhd->wlfc_spinlock); @@ -7248,6 +9329,9 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) /* Initialize other structure content */ init_waitqueue_head(&dhd->ioctl_resp_wait); init_waitqueue_head(&dhd->d3ack_wait); +#ifdef PCIE_INB_DW + init_waitqueue_head(&dhd->ds_exit_wait); +#endif /* PCIE_INB_DW */ init_waitqueue_head(&dhd->ctrl_wait); init_waitqueue_head(&dhd->dhd_bus_busy_state_wait); dhd->pub.dhd_bus_busy_state = 0; @@ -7255,8 +9339,12 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) /* Initialize the spinlocks */ spin_lock_init(&dhd->sdlock); spin_lock_init(&dhd->txqlock); + spin_lock_init(&dhd->rxqlock); spin_lock_init(&dhd->dhd_lock); spin_lock_init(&dhd->rxf_lock); +#ifdef WLTDLS + spin_lock_init(&dhd->pub.tdls_lock); +#endif /* WLTDLS */ #if defined(RXFRAME_THREAD) dhd->rxthread_enabled = TRUE; #endif /* defined(RXFRAME_THREAD) */ @@ -7269,7 +9357,7 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) spin_lock_init(&dhd->wakelock_spinlock); spin_lock_init(&dhd->wakelock_evt_spinlock); DHD_OS_WAKE_LOCK_INIT(dhd); - dhd->wakelock_wd_counter = 0; + dhd->wakelock_counter = 0; #ifdef CONFIG_HAS_WAKELOCK // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake"); @@ -7279,7 +9367,10 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_init(&dhd->dhd_net_if_mutex); mutex_init(&dhd->dhd_suspend_mutex); -#endif +#if defined(PKT_FILTER_SUPPORT) && defined(APF) + mutex_init(&dhd->dhd_apf_mutex); +#endif /* PKT_FILTER_SUPPORT && APF */ +#endif dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; /* Attach and link in the protocol */ @@ -7289,7 +9380,17 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) } dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH; +#ifdef DHD_TIMESYNC + /* attach the timesync module */ + if (dhd_timesync_attach(&dhd->pub) != 0) { + DHD_ERROR(("dhd_timesync_attach failed\n")); + goto fail; + } + dhd_state |= DHD_ATTACH_TIMESYNC_ATTACH_DONE; +#endif /* DHD_TIMESYNC */ + #ifdef WL_CFG80211 + spin_lock_init(&dhd->pub.up_lock); /* Attach and link in the cfg80211 */ if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) { DHD_ERROR(("wl_cfg80211_attach failed\n")); @@ -7312,16 +9413,38 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) dhd_state |= DHD_ATTACH_STATE_WL_ATTACH; } #ifdef WL_ESCAN - wl_escan_attach(net, (void *)&dhd->pub); + wl_escan_attach(net, &dhd->pub); #endif /* WL_ESCAN */ #endif /* defined(WL_WIRELESS_EXT) */ #ifdef SHOW_LOGTRACE - dhd_init_logstrs_array(&dhd->event_data); - dhd_init_static_strs_array(&dhd->event_data, st_str_file_path, map_file_path); - dhd_init_static_strs_array(&dhd->event_data, rom_st_str_file_path, rom_map_file_path); + ret = dhd_init_logstrs_array(osh, &dhd->event_data); + if (ret == BCME_OK) { + dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path); + dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path, + rom_map_file_path); + dhd_state |= DHD_ATTACH_LOGTRACE_INIT; + } #endif /* SHOW_LOGTRACE */ +#ifdef DEBUGABILITY + /* attach debug if support */ + if (dhd_os_dbg_attach(&dhd->pub)) { + DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__)); + goto fail; + } + +#ifdef DBG_PKT_MON + dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh); +#ifdef DBG_PKT_MON_INIT_DEFAULT + dhd_os_dbg_attach_pkt_monitor(&dhd->pub); +#endif /* DBG_PKT_MON_INIT_DEFAULT */ +#endif /* DBG_PKT_MON */ +#endif /* DEBUGABILITY */ +#ifdef DHD_PKT_LOGGING + dhd_os_attach_pktlog(&dhd->pub); +#endif /* DHD_PKT_LOGGING */ + if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) { DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA)); goto fail; @@ -7363,6 +9486,9 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) #ifdef DEBUGGER debugger_init((void *) bus); #endif +#ifdef SHOW_LOGTRACE + skb_queue_head_init(&dhd->evt_trace_queue); +#endif /* SHOW_LOGTRACE */ /* Set up the bottom half handler */ if (dhd_dpc_prio >= 0) { @@ -7427,55 +9553,52 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER); #endif #ifdef DHDTCPACK_SUPPRESS -#ifdef BCMSDIO - dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX); -#elif defined(BCMPCIE) - dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD); -#else - dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); -#endif /* BCMSDIO */ + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DEFAULT); #endif /* DHDTCPACK_SUPPRESS */ #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ - dhd_state |= DHD_ATTACH_STATE_DONE; - dhd->dhd_state = dhd_state; - dhd_found++; #ifdef DHD_DEBUG_PAGEALLOC register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub); #endif /* DHD_DEBUG_PAGEALLOC */ #if defined(DHD_LB) - DHD_ERROR(("DHD LOAD BALANCING Enabled\n")); dhd_lb_set_default_cpus(dhd); /* Initialize the CPU Masks */ - if (dhd_cpumasks_init(dhd) == 0) { - + if (dhd_cpumasks_init(dhd) == 0) { /* Now we have the current CPU maps, run through candidacy */ dhd_select_cpu_candidacy(dhd); - /* - * If we are able to initialize CPU masks, lets register to the - * CPU Hotplug framework to change the CPU for each job dynamically - * using candidacy algorithm. - */ + * If we are able to initialize CPU masks, lets register to the + * CPU Hotplug framework to change the CPU for each job dynamically + * using candidacy algorithm. + */ dhd->cpu_notifier.notifier_call = dhd_cpu_callback; - register_cpu_notifier(&dhd->cpu_notifier); /* Register a callback */ + register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */ } else { /* - * We are unable to initialize CPU masks, so candidacy algorithm - * won't run, but still Load Balancing will be honoured based - * on the CPUs allocated for a given job statically during init - */ + * We are unable to initialize CPU masks, so candidacy algorithm + * won't run, but still Load Balancing will be honoured based + * on the CPUs allocated for a given job statically during init + */ dhd->cpu_notifier.notifier_call = NULL; - DHD_ERROR(("%s(): dhd_cpumasks_init failed CPUs for JOB would be static\n", + DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n", __FUNCTION__)); } +#ifdef DHD_LB_TXP +#ifdef DHD_LB_TXP_DEFAULT_ENAB + /* Trun ON the feature by default */ + atomic_set(&dhd->lb_txp_active, 1); +#else + /* Trun OFF the feature by default */ + atomic_set(&dhd->lb_txp_active, 0); +#endif /* DHD_LB_TXP_DEFAULT_ENAB */ +#endif /* DHD_LB_TXP */ DHD_LB_STATS_INIT(&dhd->pub); @@ -7490,25 +9613,52 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) #if defined(DHD_LB_RXC) tasklet_init(&dhd->rx_compl_tasklet, dhd_lb_rx_compl_handler, (ulong)(&dhd->pub)); - INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn); DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__)); #endif /* DHD_LB_RXC */ #if defined(DHD_LB_RXP) - __skb_queue_head_init(&dhd->rx_pend_queue); + __skb_queue_head_init(&dhd->rx_pend_queue); skb_queue_head_init(&dhd->rx_napi_queue); - /* Initialize the work that dispatches NAPI job to a given core */ INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn); DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__)); #endif /* DHD_LB_RXP */ +#if defined(DHD_LB_TXP) + INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work); + skb_queue_head_init(&dhd->tx_pend_queue); + /* Initialize the work that dispatches TX job to a given core */ + tasklet_init(&dhd->tx_tasklet, + dhd_lb_tx_handler, (ulong)(dhd)); + DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__)); +#endif /* DHD_LB_TXP */ + + dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE; #endif /* DHD_LB */ - INIT_DELAYED_WORK(&dhd->dhd_memdump_work, dhd_memdump_work_handler); +#ifdef SHOW_LOGTRACE + INIT_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process); +#endif /* SHOW_LOGTRACE */ + + DHD_SSSR_MEMPOOL_INIT(&dhd->pub); + +#ifdef REPORT_FATAL_TIMEOUTS + init_dhd_timeouts(&dhd->pub); +#endif /* REPORT_FATAL_TIMEOUTS */ +#ifdef BCMPCIE + dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN); + if (dhd->pub.extended_trap_data == NULL) { + DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__)); + } +#endif /* BCMPCIE */ (void)dhd_sysfs_init(dhd); + dhd_state |= DHD_ATTACH_STATE_DONE; + dhd->dhd_state = dhd_state; + + dhd_found++; + return &dhd->pub; fail: @@ -7519,19 +9669,10 @@ fail: dhd_detach(&dhd->pub); dhd_free(&dhd->pub); } - +dhd_null_flag: return NULL; } -#include - -void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs) -{ - dhd_info_t *dhd = (dhd_info_t*)dhdp->info; - - schedule_delayed_work(&dhd->dhd_memdump_work, msecs_to_jiffies(msecs)); -} - int dhd_get_fw_mode(dhd_info_t *dhdinfo) { if (strstr(dhdinfo->fw_path, "_apsta") != NULL) @@ -7546,6 +9687,11 @@ int dhd_get_fw_mode(dhd_info_t *dhdinfo) return DHD_FLAG_STA_MODE; } +int dhd_bus_get_fw_mode(dhd_pub_t *dhdp) +{ + return dhd_get_fw_mode(dhdp->info); +} + extern int rkwifi_set_firmware(char *fw, char *nvram); bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) { @@ -7557,9 +9703,15 @@ bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) const char *nv = NULL; const char *clm = NULL; const char *conf = NULL; +#ifdef DHD_UCODE_DOWNLOAD + int uc_len; + const char *uc = NULL; +#endif /* DHD_UCODE_DOWNLOAD */ char firmware[100] = {0}; char nvram[100] = {0}; wifi_adapter_info_t *adapter = dhdinfo->adapter; + int fw_path_len = sizeof(dhdinfo->fw_path); + int nv_path_len = sizeof(dhdinfo->nv_path); /* Update firmware and nvram path. The path may be from adapter info or module parameter @@ -7618,24 +9770,56 @@ bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) clm = clm_path; if (config_path[0] != '\0') conf = config_path; +#ifdef DHD_UCODE_DOWNLOAD + if (ucode_path[0] != '\0') + uc = ucode_path; +#endif /* DHD_UCODE_DOWNLOAD */ if (fw && fw[0] != '\0') { fw_len = strlen(fw); - if (fw_len >= sizeof(dhdinfo->fw_path)) { + if (fw_len >= fw_path_len) { DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n")); return FALSE; } - strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path)); + strncpy(dhdinfo->fw_path, fw, fw_path_len); if (dhdinfo->fw_path[fw_len-1] == '\n') dhdinfo->fw_path[fw_len-1] = '\0'; } if (nv && nv[0] != '\0') { nv_len = strlen(nv); - if (nv_len >= sizeof(dhdinfo->nv_path)) { + if (nv_len >= nv_path_len) { DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n")); return FALSE; } - strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path)); + memset(dhdinfo->nv_path, 0, nv_path_len); + strncpy(dhdinfo->nv_path, nv, nv_path_len); +#ifdef DHD_USE_SINGLE_NVRAM_FILE + /* Remove "_net" or "_mfg" tag from current nvram path */ + { + char *nvram_tag = "nvram_"; + char *ext_tag = ".txt"; + char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len); + bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) + + strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len); + if (valid_buf) { + char *sp = sp_nvram + strlen(nvram_tag) - 1; + uint32 padding_size = (uint32)(dhdinfo->nv_path + + nv_path_len - sp); + memset(sp, 0, padding_size); + strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag)); + nv_len = strlen(dhdinfo->nv_path); + DHD_INFO(("%s: new nvram path = %s\n", + __FUNCTION__, dhdinfo->nv_path)); + } else if (sp_nvram) { + DHD_ERROR(("%s: buffer space for nvram path is not enough\n", + __FUNCTION__)); + return FALSE; + } else { + DHD_ERROR(("%s: Couldn't find the nvram tag. current" + " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path)); + } + } +#endif /* DHD_USE_SINGLE_NVRAM_FILE */ if (dhdinfo->nv_path[nv_len-1] == '\n') dhdinfo->nv_path[nv_len-1] = '\0'; } @@ -7659,6 +9843,18 @@ bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) if (dhdinfo->conf_path[conf_len-1] == '\n') dhdinfo->conf_path[conf_len-1] = '\0'; } +#ifdef DHD_UCODE_DOWNLOAD + if (uc && uc[0] != '\0') { + uc_len = strlen(uc); + if (uc_len >= sizeof(dhdinfo->uc_path)) { + DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n")); + return FALSE; + } + strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path)); + if (dhdinfo->uc_path[uc_len-1] == '\n') + dhdinfo->uc_path[uc_len-1] = '\0'; + } +#endif /* DHD_UCODE_DOWNLOAD */ #if 0 /* clear the path in module parameter */ @@ -7669,6 +9865,10 @@ bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) config_path[0] = '\0'; } #endif +#ifdef DHD_UCODE_DOWNLOAD + ucode_path[0] = '\0'; + DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path)); +#endif /* DHD_UCODE_DOWNLOAD */ #ifndef BCMEMBEDIMAGE /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */ @@ -7685,20 +9885,80 @@ bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) return TRUE; } +#if defined(BT_OVER_SDIO) +extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path) +{ + int fw_len; + const char *fw = NULL; + wifi_adapter_info_t *adapter = dhdinfo->adapter; + + + /* Update bt firmware path. The path may be from adapter info or module parameter + * The path from adapter info is used for initialization only (as it won't change). + * + * The btfw_path module parameter may be changed by the system at run + * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private + * command may change dhdinfo->btfw_path. As such we need to clear the path info in + * module parameter after it is copied. We won't update the path until the module parameter + * is changed again (first character is not '\0') + */ + + /* set default firmware and nvram path for built-in type driver */ + if (!dhd_download_fw_on_driverload) { +#ifdef CONFIG_BCMDHD_BTFW_PATH + fw = CONFIG_BCMDHD_BTFW_PATH; +#endif /* CONFIG_BCMDHD_FW_PATH */ + } + + /* check if we need to initialize the path */ + if (dhdinfo->btfw_path[0] == '\0') { + if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0') + fw = adapter->btfw_path; + } + + /* Use module parameter if it is valid, EVEN IF the path has not been initialized + */ + if (btfw_path[0] != '\0') + fw = btfw_path; + + if (fw && fw[0] != '\0') { + fw_len = strlen(fw); + if (fw_len >= sizeof(dhdinfo->btfw_path)) { + DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n")); + return FALSE; + } + strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path)); + if (dhdinfo->btfw_path[fw_len-1] == '\n') + dhdinfo->btfw_path[fw_len-1] = '\0'; + } + + /* clear the path in module parameter */ + btfw_path[0] = '\0'; + + if (dhdinfo->btfw_path[0] == '\0') { + DHD_ERROR(("bt firmware path not found\n")); + return FALSE; + } + + return TRUE; +} +#endif /* defined (BT_OVER_SDIO) */ + + #ifdef CUSTOMER_HW4_DEBUG bool dhd_validate_chipid(dhd_pub_t *dhdp) { uint chipid = dhd_bus_chip_id(dhdp); uint config_chipid; -#ifdef BCM4359_CHIP +#ifdef BCM4361_CHIP + config_chipid = BCM4361_CHIP_ID; +#elif defined(BCM4359_CHIP) config_chipid = BCM4359_CHIP_ID; #elif defined(BCM4358_CHIP) config_chipid = BCM4358_CHIP_ID; #elif defined(BCM4354_CHIP) config_chipid = BCM4354_CHIP_ID; -#elif defined(BCM4356_CHIP) - config_chipid = BCM4356_CHIP_ID; #elif defined(BCM4339_CHIP) config_chipid = BCM4339_CHIP_ID; #elif defined(BCM43349_CHIP) @@ -7711,10 +9971,14 @@ bool dhd_validate_chipid(dhd_pub_t *dhdp) config_chipid = BCM4330_CHIP_ID; #elif defined(BCM43430_CHIP) config_chipid = BCM43430_CHIP_ID; -#elif defined(BCM4334W_CHIP) - config_chipid = BCM43342_CHIP_ID; +#elif defined(BCM43018_CHIP) + config_chipid = BCM43018_CHIP_ID; #elif defined(BCM43455_CHIP) config_chipid = BCM4345_CHIP_ID; +#elif defined(BCM4334W_CHIP) + config_chipid = BCM43342_CHIP_ID; +#elif defined(BCM43454_CHIP) + config_chipid = BCM43454_CHIP_ID; #elif defined(BCM43012_CHIP_) config_chipid = BCM43012_CHIP_ID; #else @@ -7727,16 +9991,55 @@ bool dhd_validate_chipid(dhd_pub_t *dhdp) return FALSE; #endif /* BCM4354_CHIP */ +#ifdef SUPPORT_MULTIPLE_CHIP_4345X + if (config_chipid == BCM43454_CHIP_ID || config_chipid == BCM4345_CHIP_ID) { + return TRUE; + } +#endif /* SUPPORT_MULTIPLE_CHIP_4345X */ #if defined(BCM4359_CHIP) if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) { return TRUE; } #endif /* BCM4359_CHIP */ +#if defined(BCM4361_CHIP) + if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) { + return TRUE; + } +#endif /* BCM4361_CHIP */ return config_chipid == chipid; } #endif /* CUSTOMER_HW4_DEBUG */ +#if defined(BT_OVER_SDIO) +wlan_bt_handle_t dhd_bt_get_pub_hndl(void) +{ + DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub)); + /* assuming that dhd_pub_t type pointer is available from a global variable */ + return (wlan_bt_handle_t) g_dhd_pub; +} EXPORT_SYMBOL(dhd_bt_get_pub_hndl); + +int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path) +{ + int ret = -1; + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + + + /* Download BT firmware image to the dongle */ + if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) { + DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path)); + ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path); + if (ret < 0) { + DHD_ERROR(("%s: failed to download btfw from: %s\n", + __FUNCTION__, dhd->btfw_path)); + return ret; + } + } + return ret; +} EXPORT_SYMBOL(dhd_download_btfw); +#endif /* defined (BT_OVER_SDIO) */ + int dhd_bus_start(dhd_pub_t *dhdp) { @@ -7744,20 +10047,31 @@ dhd_bus_start(dhd_pub_t *dhdp) dhd_info_t *dhd = (dhd_info_t*)dhdp->info; unsigned long flags; +#if defined(DHD_DEBUG) && defined(BCMSDIO) + int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0; +#endif /* DHD_DEBUG && BCMSDIO */ ASSERT(dhd); DHD_TRACE(("Enter %s:\n", __FUNCTION__)); DHD_PERIM_LOCK(dhdp); - +#ifdef HOFFLOAD_MODULES + dhd_linux_get_modfw_address(dhdp); +#endif /* try to download image and nvram to the dongle */ if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) { /* Indicate FW Download has not yet done */ - dhd->pub.is_fw_download_done = FALSE; + dhd->pub.fw_download_done = FALSE; DHD_INFO(("%s download fw %s, nv %s, conf %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path)); +#if defined(DHD_DEBUG) && defined(BCMSDIO) + fw_download_start = OSL_SYSUPTIME(); +#endif /* DHD_DEBUG && BCMSDIO */ ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh, dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path); +#if defined(DHD_DEBUG) && defined(BCMSDIO) + fw_download_end = OSL_SYSUPTIME(); +#endif /* DHD_DEBUG && BCMSDIO */ if (ret < 0) { DHD_ERROR(("%s: failed to download firmware %s\n", __FUNCTION__, dhd->fw_path)); @@ -7765,32 +10079,38 @@ dhd_bus_start(dhd_pub_t *dhdp) return ret; } /* Indicate FW Download has succeeded */ - dhd->pub.is_fw_download_done = TRUE; + dhd->pub.fw_download_done = TRUE; } if (dhd->pub.busstate != DHD_BUS_LOAD) { DHD_PERIM_UNLOCK(dhdp); return -ENETDOWN; } +#ifdef BCMSDIO dhd_os_sdlock(dhdp); +#endif /* BCMSDIO */ /* Start the watchdog timer */ dhd->pub.tickcnt = 0; dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms); - DHD_ENABLE_RUNTIME_PM(&dhd->pub); /* Bring up the bus */ if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) { DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret)); +#ifdef BCMSDIO dhd_os_sdunlock(dhdp); +#endif /* BCMSDIO */ DHD_PERIM_UNLOCK(dhdp); return ret; } + + DHD_ENABLE_RUNTIME_PM(&dhd->pub); + +#ifdef DHD_ULP + dhd_ulp_set_ulp_state(dhdp, DHD_ULP_DISABLED); +#endif /* DHD_ULP */ #if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE) -#if defined(BCMPCIE_OOB_HOST_WAKE) - dhd_os_sdunlock(dhdp); -#endif /* BCMPCIE_OOB_HOST_WAKE */ /* Host registration for OOB interrupt */ if (dhd_bus_oob_intr_register(dhdp)) { /* deactivate timer and wait for the handler to finish */ @@ -7800,17 +10120,14 @@ dhd_bus_start(dhd_pub_t *dhdp) DHD_GENERAL_UNLOCK(&dhd->pub, flags); del_timer_sync(&dhd->timer); - dhd_os_sdunlock(dhdp); #endif /* !BCMPCIE_OOB_HOST_WAKE */ DHD_DISABLE_RUNTIME_PM(&dhd->pub); DHD_PERIM_UNLOCK(dhdp); - DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__)); return -ENODEV; } #if defined(BCMPCIE_OOB_HOST_WAKE) - dhd_os_sdlock(dhdp); dhd_bus_oob_intr_set(dhdp, TRUE); #else /* Enable oob at firmware */ @@ -7828,7 +10145,9 @@ dhd_bus_start(dhd_pub_t *dhdp) DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__, max_h2d_rings)); if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) { +#ifdef BCMSDIO dhd_os_sdunlock(dhdp); +#endif /* BCMSDIO */ DHD_PERIM_UNLOCK(dhdp); return ret; } @@ -7836,18 +10155,12 @@ dhd_bus_start(dhd_pub_t *dhdp) #endif /* PCIE_FULL_DONGLE */ /* Do protocol initialization necessary for IOCTL/IOVAR */ -#ifdef PCIE_FULL_DONGLE - dhd_os_sdunlock(dhdp); -#endif /* PCIE_FULL_DONGLE */ ret = dhd_prot_init(&dhd->pub); if (unlikely(ret) != BCME_OK) { DHD_PERIM_UNLOCK(dhdp); DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); return ret; } -#ifdef PCIE_FULL_DONGLE - dhd_os_sdlock(dhdp); -#endif /* PCIE_FULL_DONGLE */ /* If bus is not ready, can't come up */ if (dhd->pub.busstate != DHD_BUS_DATA) { @@ -7857,26 +10170,41 @@ dhd_bus_start(dhd_pub_t *dhdp) del_timer_sync(&dhd->timer); DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__)); DHD_DISABLE_RUNTIME_PM(&dhd->pub); +#ifdef BCMSDIO dhd_os_sdunlock(dhdp); +#endif /* BCMSDIO */ DHD_PERIM_UNLOCK(dhdp); - DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); return -ENODEV; } +#ifdef BCMSDIO dhd_os_sdunlock(dhdp); +#endif /* BCMSDIO */ /* Bus is ready, query any dongle information */ +#if defined(DHD_DEBUG) && defined(BCMSDIO) + f2_sync_start = OSL_SYSUPTIME(); +#endif /* DHD_DEBUG && BCMSDIO */ if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) { DHD_GENERAL_LOCK(&dhd->pub, flags); dhd->wd_timer_valid = FALSE; DHD_GENERAL_UNLOCK(&dhd->pub, flags); del_timer_sync(&dhd->timer); DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__)); - DHD_DISABLE_RUNTIME_PM(&dhd->pub); DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); DHD_PERIM_UNLOCK(dhdp); return ret; } +#if defined(CONFIG_SOC_EXYNOS8895) + DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__)); + exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI); +#endif /* CONFIG_SOC_EXYNOS8895 */ + +#if defined(DHD_DEBUG) && defined(BCMSDIO) + f2_sync_end = OSL_SYSUPTIME(); + DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n", + (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start))); +#endif /* DHD_DEBUG && BCMSDIO */ #ifdef ARP_OFFLOAD_SUPPORT if (dhd->pend_ipaddr) { @@ -7887,6 +10215,9 @@ dhd_bus_start(dhd_pub_t *dhdp) } #endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(TRAFFIC_MGMT_DWM) + bzero(&dhd->pub.dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t)); +#endif DHD_PERIM_UNLOCK(dhdp); return 0; } @@ -7894,7 +10225,6 @@ dhd_bus_start(dhd_pub_t *dhdp) #ifdef WLTDLS int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac) { - char iovbuf[WLC_IOCTL_SMLEN]; uint32 tdls = tdls_on; int ret = 0; uint32 tdls_auto_op = 0; @@ -7907,8 +10237,8 @@ int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_ad if (dhd->tdls_enable == tdls_on) goto auto_mode; - bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret)); goto exit; } @@ -7916,31 +10246,29 @@ int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_ad auto_mode: tdls_auto_op = auto_on; - bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), - iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL, + 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret)); goto exit; } if (tdls_auto_op) { - bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time, - sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time, + sizeof(tdls_idle_time), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret)); goto exit; } - bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high, + sizeof(tdls_rssi_high), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret)); goto exit; } - bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low, + sizeof(tdls_rssi_low), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret)); goto exit; } @@ -7960,10 +10288,10 @@ int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct e ret = BCME_ERROR; return ret; } + int dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode) { - char iovbuf[WLC_IOCTL_SMLEN]; int ret = 0; bool auto_on = false; uint32 mode = wfd_mode; @@ -7983,12 +10311,8 @@ dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode) return ret; } - - bcm_mkiovar("tdls_wfd_mode", (char *)&mode, sizeof(mode), - iovbuf, sizeof(iovbuf)); - if (((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) && - (ret != BCME_UNSUPPORTED)) { + ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE); + if ((ret < 0) && (ret != BCME_UNSUPPORTED)) { DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret)); return ret; } @@ -8003,59 +10327,71 @@ dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode) return ret; } #ifdef PCIE_FULL_DONGLE -void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da) +int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event) { - dhd_info_t *dhd = DHD_DEV_INFO(dev); - dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub; - tdls_peer_node_t *cur = dhdp->peer_tbl.node; + dhd_pub_t *dhd_pub = dhdp; + tdls_peer_node_t *cur = dhd_pub->peer_tbl.node; tdls_peer_node_t *new = NULL, *prev = NULL; - dhd_if_t *dhdif; - uint8 sa[ETHER_ADDR_LEN]; - int ifidx = dhd_net2idx(dhd, dev); + int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname); + uint8 *da = (uint8 *)&event->addr.octet[0]; + bool connect = FALSE; + uint32 reason = ntoh32(event->reason); + unsigned long flags; - if (ifidx == DHD_BAD_IF) - return; - - dhdif = dhd->iflist[ifidx]; - memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN); + if (reason == WLC_E_TDLS_PEER_CONNECTED) + connect = TRUE; + else if (reason == WLC_E_TDLS_PEER_DISCONNECTED) + connect = FALSE; + else + { + DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__)); + return BCME_ERROR; + } + if (ifindex == DHD_BAD_IF) + return BCME_ERROR; if (connect) { while (cur != NULL) { if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { DHD_ERROR(("%s: TDLS Peer exist already %d\n", __FUNCTION__, __LINE__)); - return; + return BCME_ERROR; } cur = cur->next; } - new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t)); + new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t)); if (new == NULL) { DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__)); - return; + return BCME_ERROR; } memcpy(new->addr, da, ETHER_ADDR_LEN); - new->next = dhdp->peer_tbl.node; - dhdp->peer_tbl.node = new; - dhdp->peer_tbl.tdls_peer_count++; + DHD_TDLS_LOCK(&dhdp->tdls_lock, flags); + new->next = dhd_pub->peer_tbl.node; + dhd_pub->peer_tbl.node = new; + dhd_pub->peer_tbl.tdls_peer_count++; + DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags); } else { while (cur != NULL) { if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { - dhd_flow_rings_delete_for_peer(dhdp, ifidx, da); + dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da); + DHD_TDLS_LOCK(&dhdp->tdls_lock, flags); if (prev) prev->next = cur->next; else - dhdp->peer_tbl.node = cur->next; - MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t)); - dhdp->peer_tbl.tdls_peer_count--; - return; + dhd_pub->peer_tbl.node = cur->next; + MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t)); + dhd_pub->peer_tbl.tdls_peer_count--; + DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags); + return BCME_OK; } prev = cur; cur = cur->next; } DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__)); } + return BCME_OK; } #endif /* PCIE_FULL_DONGLE */ #endif @@ -8099,9 +10435,9 @@ dhd_get_concurrent_capabilites(dhd_pub_t *dhd) } else { /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */ memset(buf, 0, sizeof(buf)); - bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), - FALSE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf, + sizeof(buf), FALSE); + if (ret < 0) { DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret)); return 0; } else { @@ -8115,9 +10451,11 @@ dhd_get_concurrent_capabilites(dhd_pub_t *dhd) if (FW_SUPPORTED(dhd, rsdb)) { ret |= DHD_FLAG_RSDB_MODE; } +#ifdef WL_SUPPORT_MULTIP2P if (FW_SUPPORTED(dhd, mp2p)) { ret |= DHD_FLAG_MP2P_MODE; } +#endif /* WL_SUPPORT_MULTIP2P */ #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF) return ret; #else @@ -8136,38 +10474,36 @@ dhd_get_concurrent_capabilites(dhd_pub_t *dhd) #define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0 int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable) { - char iovbuf[128]; int32 pps = RXCHAIN_PWRSAVE_PPS; int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME; int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK; + int ret; if (enable) { - bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, - iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable), + NULL, 0, TRUE); + if (ret != BCME_OK) { DHD_ERROR(("Failed to enable AP power save\n")); } - bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, - iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_pps", (char *)&pps, sizeof(pps), NULL, 0, + TRUE); + if (ret != BCME_OK) { DHD_ERROR(("Failed to set pps\n")); } - bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time, - 4, iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, - iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_quiet_time", (char *)&quiet_time, + sizeof(quiet_time), NULL, 0, TRUE); + if (ret != BCME_OK) { DHD_ERROR(("Failed to set quiet time\n")); } - bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check, - 4, iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, - iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_stas_assoc_check", + (char *)&stas_assoc_check, sizeof(stas_assoc_check), NULL, 0, TRUE); + if (ret != BCME_OK) { DHD_ERROR(("Failed to set stas assoc check\n")); } } else { - bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf)); - if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, - iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable), + NULL, 0, TRUE); + if (ret != BCME_OK) { DHD_ERROR(("Failed to disable AP power save\n")); } } @@ -8177,6 +10513,62 @@ int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable) #endif /* SUPPORT_AP_POWERSAVE */ + + +#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD) +int +dhd_enable_adps(dhd_pub_t *dhd, uint8 on) +{ + int i; + int len; + int ret = BCME_OK; + + bcm_iov_buf_t *iov_buf = NULL; + wl_adps_params_v1_t *data = NULL; + char buf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ + + len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data); + iov_buf = kmalloc(len, GFP_KERNEL); + if (iov_buf == NULL) { + DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len)); + ret = BCME_NOMEM; + goto exit; + } + + iov_buf->version = WL_ADPS_IOV_VER; + iov_buf->len = sizeof(*data); + iov_buf->id = WL_ADPS_IOV_MODE; + + data = (wl_adps_params_v1_t *)iov_buf->data; + data->version = ADPS_SUB_IOV_VERSION_1; + data->length = sizeof(*data); + data->mode = on; + + for (i = 1; i <= MAX_BANDS; i++) { + data->band = i; + bcm_mkiovar("adps", (char *)iov_buf, len, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0)) < 0) { + if (ret == BCME_UNSUPPORTED) { + DHD_ERROR(("%s adps is not supported\n", __FUNCTION__)); + ret = BCME_OK; + goto exit; + } + else { + DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n", + __FUNCTION__, on ? "On" : "Off", i, ret)); + goto exit; + } + } + } + +exit: + if (iov_buf) { + kfree(iov_buf); + } + return ret; +} +#endif /* WLADPS || WLADPS_PRIVATE_CMD */ + int dhd_preinit_ioctls(dhd_pub_t *dhd) { @@ -8191,6 +10583,7 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) eventmsgs_ext_t *eventmask_msg = NULL; char* iov_buf = NULL; int ret2 = 0; + uint32 wnm_cap = 0; #if defined(CUSTOM_AMPDU_BA_WSIZE) uint32 ampdu_ba_wsize = 0; #endif @@ -8203,9 +10596,8 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #if defined(CUSTOM_AMSDU_AGGSF) int32 amsdu_aggsf = 0; #endif -#ifdef SUPPORT_SENSORHUB - int32 shub_enable = 0; -#endif /* SUPPORT_SENSORHUB */ + shub_control_t shub_ctl; + #if defined(BCMSDIO) #ifdef PROP_TXSTATUS int wlfc_enable = TRUE; @@ -8215,17 +10607,14 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif /* DISABLE_11N */ #endif /* PROP_TXSTATUS */ #endif -#ifdef PCIE_FULL_DONGLE +#ifndef PCIE_FULL_DONGLE uint32 wl_ap_isolate; #endif /* PCIE_FULL_DONGLE */ - -#if defined(BCMSDIO) - /* by default frame burst is enabled for PCIe and disabled for SDIO dongles */ - uint32 frameburst = 0; -#else - uint32 frameburst = 1; -#endif /* BCMSDIO */ - + uint32 frameburst = CUSTOM_FRAMEBURST_SET; + uint wnm_bsstrans_resp = 0; +#ifdef SUPPORT_SET_CAC + uint32 cac = 1; +#endif /* SUPPORT_SET_CAC */ #ifdef DHD_ENABLE_LPC uint32 lpc = 1; #endif /* DHD_ENABLE_LPC */ @@ -8238,6 +10627,7 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) uint32 credall = 1; #endif uint bcn_timeout = dhd->conf->bcn_timeout; + uint scancache_enab = TRUE; #ifdef ENABLE_BCN_LI_BCN_WAKEUP uint32 bcn_li_bcn = 1; #endif /* ENABLE_BCN_LI_BCN_WAKEUP */ @@ -8251,6 +10641,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) char buf[WLC_IOCTL_SMLEN]; char *ptr; uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */ +#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE) + wl_el_tag_params_t *el_tag = NULL; +#endif /* DHD_8021X_DUMP */ #ifdef ROAM_ENABLE uint roamvar = 0; int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL}; @@ -8261,6 +10654,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ int roam_fullscan_period = 120; #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ +#ifdef DISABLE_BCNLOSS_ROAM + uint roam_bcnloss_off = 1; +#endif /* DISABLE_BCNLOSS_ROAM */ #else #ifdef DISABLE_BUILTIN_ROAM uint roamvar = 1; @@ -8271,7 +10667,6 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) uint dtim = 1; #endif #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211)) - uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */ struct ether_addr p2p_ea; #endif #ifdef SOFTAP_UAPSD_OFF @@ -8295,14 +10690,26 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #ifdef USE_WL_TXBF uint32 txbf = 1; #endif /* USE_WL_TXBF */ +#ifdef DISABLE_TXBFR + uint32 txbf_bfr_cap = 0; +#endif /* DISABLE_TXBFR */ #if defined(PROP_TXSTATUS) #ifdef USE_WFA_CERT_CONF uint32 proptx = 0; #endif /* USE_WFA_CERT_CONF */ #endif /* PROP_TXSTATUS */ +#if defined(SUPPORT_5G_1024QAM_VHT) + uint32 vht_features = 0; /* init to 0, will be set based on each support */ +#endif +#ifdef DISABLE_11N_PROPRIETARY_RATES + uint32 ht_features = 0; +#endif /* DISABLE_11N_PROPRIETARY_RATES */ #ifdef CUSTOM_PSPRETEND_THR uint32 pspretend_thr = CUSTOM_PSPRETEND_THR; #endif +#ifdef CUSTOM_EVENT_PM_WAKE + uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE; +#endif /* CUSTOM_EVENT_PM_WAKE */ uint32 rsdb_mode = 0; #ifdef ENABLE_TEMP_THROTTLING wl_temp_control_t temp_control; @@ -8310,17 +10717,25 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #ifdef DISABLE_PRUNED_SCAN uint32 scan_features = 0; #endif /* DISABLE_PRUNED_SCAN */ -#ifdef CUSTOM_EVENT_PM_WAKE - uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE; -#endif /* CUSTOM_EVENT_PM_WAKE */ #ifdef PKT_FILTER_SUPPORT dhd_pkt_filter_enable = TRUE; +#ifdef APF + dhd->apf_set = FALSE; +#endif /* APF */ #endif /* PKT_FILTER_SUPPORT */ #ifdef WLTDLS dhd->tdls_enable = FALSE; dhd_tdls_set_mode(dhd, false); #endif /* WLTDLS */ dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM; +#ifdef ENABLE_MAX_DTIM_IN_SUSPEND + dhd->max_dtim_enable = TRUE; +#else + dhd->max_dtim_enable = FALSE; +#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */ +#ifdef CUSTOM_SET_OCLOFF + dhd->ocl_off = FALSE; +#endif /* CUSTOM_SET_OCLOFF */ DHD_TRACE(("Enter %s\n", __FUNCTION__)); dhd_conf_set_intiovar(dhd, WLC_SET_BAND, "WLC_SET_BAND", dhd->conf->band, 0, FALSE); @@ -8328,8 +10743,13 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) printf("%s: Set tcpack_sup_mode %d\n", __FUNCTION__, dhd->conf->tcpack_sup_mode); dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode); #endif - dhd->op_mode = 0; + +#if defined(CUSTOM_COUNTRY_CODE) && defined(CUSTOMER_HW2) + /* clear AP flags */ + dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG; +#endif /* CUSTOM_COUNTRY_CODE && CUSTOMER_HW2 */ + #ifdef CUSTOMER_HW4_DEBUG if (!dhd_validate_chipid(dhd)) { DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n", @@ -8342,6 +10762,7 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif /* CUSTOMER_HW4_DEBUG */ if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || (op_mode == DHD_FLAG_MFG_MODE)) { + dhd->op_mode = DHD_FLAG_MFG_MODE; #ifdef DHD_PCIE_RUNTIMEPM /* Disable RuntimePM in mfg mode */ DHD_DISABLE_RUNTIME_PM(dhd); @@ -8406,10 +10827,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) { uint32 cap_buf_size = sizeof(dhd->fw_capabilities); memset(dhd->fw_capabilities, 0, cap_buf_size); - bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, cap_buf_size - 1); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities, - (cap_buf_size - 1), FALSE, 0)) < 0) - { + ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1), + FALSE); + if (ret < 0) { DHD_ERROR(("%s: Get Capability failed (error=%d)\n", __FUNCTION__, ret)); return 0; @@ -8443,21 +10863,13 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) iovbuf[4] = (unsigned char)(rand_mac >> 8); iovbuf[5] = (unsigned char)(rand_mac >> 16); - bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf)); - ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0, + TRUE); if (ret < 0) { DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); } else memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN); #endif /* SET_RANDOM_MAC_SOFTAP */ -#if !defined(AP) && defined(WL_CFG80211) - /* Turn off MPC in AP mode */ - bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret)); - } -#endif #ifdef USE_DYNAMIC_F2_BLKSIZE dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); #endif /* USE_DYNAMIC_F2_BLKSIZE */ @@ -8465,13 +10877,17 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) dhd_set_ap_powersave(dhd, 0, TRUE); #endif /* SUPPORT_AP_POWERSAVE */ #ifdef SOFTAP_UAPSD_OFF - bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0, + TRUE); + if (ret < 0) { DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", __FUNCTION__, ret)); } #endif /* SOFTAP_UAPSD_OFF */ +#if defined(CUSTOM_COUNTRY_CODE) && defined(CUSTOMER_HW2) + /* set AP flag for specific country code of SOFTAP */ + dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG; +#endif /* CUSTOM_COUNTRY_CODE && CUSTOMER_HW2 */ } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || (op_mode == DHD_FLAG_MFG_MODE)) { #if defined(ARP_OFFLOAD_SUPPORT) @@ -8486,9 +10902,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif /* USE_DYNAMIC_F2_BLKSIZE */ if (FW_SUPPORTED(dhd, rsdb)) { rsdb_mode = 0; - bcm_mkiovar("rsdb_mode", (char *)&rsdb_mode, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, - iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode), + NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n", __FUNCTION__, ret)); } @@ -8520,31 +10936,28 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) /* Check if we are enabling p2p */ if (dhd->op_mode & DHD_FLAG_P2P_MODE) { - bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, - iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, + TRUE); + if (ret < 0) DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret)); - } #if defined(SOFTAP_AND_GC) - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP, - (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) { - DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret)); - } + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP, + (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) { + DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret)); + } #endif memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN); ETHER_SET_LOCALADDR(&p2p_ea); - bcm_mkiovar("p2p_da_override", (char *)&p2p_ea, - ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, - iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea), + NULL, 0, TRUE); + if (ret < 0) DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret)); - } else { + else DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n")); - } } #else - (void)concurrent_mode; + (void)concurrent_mode; #endif } #ifdef BCMSDIO @@ -8552,27 +10965,23 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) dhdsdio_func_blocksize(dhd, 2, dhd->conf->sd_f2_blocksize); #endif -#ifdef RSDB_MODE_FROM_FILE +#if defined(RSDB_MODE_FROM_FILE) (void)dhd_rsdb_mode_from_file(dhd); -#endif /* RSDB_MODE_FROM_FILE */ +#endif #ifdef DISABLE_PRUNED_SCAN if (FW_SUPPORTED(dhd, rsdb)) { - memset(iovbuf, 0, sizeof(iovbuf)); - bcm_mkiovar("scan_features", (char *)&scan_features, - 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, - iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features, + sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE); + if (ret < 0) { DHD_ERROR(("%s get scan_features is failed ret=%d\n", __FUNCTION__, ret)); } else { memcpy(&scan_features, iovbuf, 4); scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM; - memset(iovbuf, 0, sizeof(iovbuf)); - bcm_mkiovar("scan_features", (char *)&scan_features, - 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, - iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features, + sizeof(scan_features), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s set scan_features is failed ret=%d\n", __FUNCTION__, ret)); } @@ -8582,18 +10991,34 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n", dhd->op_mode, MAC2STRDBG(dhd->mac.octet))); - #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA) +#ifdef CUSTOMER_HW2 +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (!dhd->pub.is_blob) +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + { + /* get a ccode and revision for the country code */ +#if defined(CUSTOM_COUNTRY_CODE) + get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev, + &dhd->dhd_cspec, dhd->dhd_cflags); +#else + get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev, + &dhd->dhd_cspec); +#endif /* CUSTOM_COUNTRY_CODE */ + } +#endif /* CUSTOMER_HW2 */ + +#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA) if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) dhd->info->rxthread_enabled = FALSE; else dhd->info->rxthread_enabled = TRUE; - #endif +#endif /* Set Country code */ if (dhd->dhd_cspec.ccode[0] != 0) { printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev); - bcm_mkiovar("country", (char *)&dhd->dhd_cspec, - sizeof(wl_country_t), iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t), + NULL, 0, TRUE); + if (ret < 0) printf("%s: country code setting failed %d\n", __FUNCTION__, ret); } else { dhd_conf_set_country(dhd); @@ -8604,8 +11029,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) /* Set Listen Interval */ - bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval), + NULL, 0, TRUE); + if (ret < 0) DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret)); #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM) @@ -8615,10 +11041,13 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) } #endif /* USE_WFA_CERT_CONF */ /* Disable built-in roaming to allowed ext supplicant to take care of roaming */ - bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE); #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */ #if defined(ROAM_ENABLE) +#ifdef DISABLE_BCNLOSS_ROAM + dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off, sizeof(roam_bcnloss_off), + NULL, 0, TRUE); +#endif /* DISABLE_BCNLOSS_ROAM */ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger, sizeof(roam_trigger), TRUE, 0)) < 0) DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret)); @@ -8628,18 +11057,20 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta, sizeof(roam_delta), TRUE, 0)) < 0) DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret)); - bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period, + sizeof(roam_fullscan_period), NULL, 0, TRUE); + if (ret < 0) DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret)); #endif /* ROAM_ENABLE */ dhd_conf_set_roam(dhd); #ifdef CUSTOM_EVENT_PM_WAKE - bcm_mkiovar("const_awake_thresh", (char *)&pm_awake_thresh, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh, + sizeof(pm_awake_thresh), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret)); } -#endif /* CUSTOM_EVENT_PM_WAKE */ +#endif /* CUSTOM_EVENT_PM_WAKE */ #ifdef WLTDLS #ifdef ENABLE_TDLS_AUTO_MODE /* by default TDLS on and auto mode on */ @@ -8652,9 +11083,8 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #ifdef DHD_ENABLE_LPC /* Set lpc 1 */ - bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret)); if (ret == BCME_NOTDOWN) { @@ -8663,29 +11093,43 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) (char *)&wl_down, sizeof(wl_down), TRUE, 0); DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc)); - bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf)); - ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE); DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret)); } } #endif /* DHD_ENABLE_LPC */ - dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "lpc", dhd->conf->lpc, 0, FALSE); + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "lpc", dhd->conf->lpc, 0, TRUE); + +#ifdef WLADPS +#ifdef WLADPS_SEAK_AP_WAR + dhd->disabled_adps = FALSE; +#endif /* WLADPS_SEAK_AP_WAR */ + if (dhd->op_mode & DHD_FLAG_STA_MODE) { +#ifdef ADPS_MODE_FROM_FILE + dhd_adps_mode_from_file(dhd); +#else + if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) { + DHD_ERROR(("%s dhd_enable_adps failed %d\n", + __FUNCTION__, ret)); + } +#endif /* ADPS_MODE_FROM_FILE */ + } +#endif /* WLADPS */ /* Set PowerSave mode */ if (dhd->conf->pm >= 0) power_mode = dhd->conf->pm; - dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); + (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "pm2_sleep_ret", dhd->conf->pm2_sleep_ret, 0, FALSE); #if defined(BCMSDIO) /* Match Host and Dongle rx alignment */ - bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align), + NULL, 0, TRUE); #if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL) /* enable credall to reduce the chance of no bus credit happened. */ - bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "bus:credall", (char *)&credall, sizeof(credall), NULL, 0, TRUE); #endif #ifdef USE_WFA_CERT_CONF @@ -8695,23 +11139,19 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif /* USE_WFA_CERT_CONF */ if (glom != DEFAULT_GLOM_VALUE) { DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom)); - bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE); } #endif /* defined(BCMSDIO) */ /* Setup timeout if Beacons are lost and roam is off to report link down */ - bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), NULL, 0, TRUE); + /* Setup assoc_retry_max count to reconnect target AP in dongle */ - bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), NULL, 0, TRUE); + #if defined(AP) && !defined(WLP2P) - /* Turn off MPC in AP mode */ - bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); - bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE); + #endif /* defined(AP) && !defined(WLP2P) */ /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "mimo_bw_cap", dhd->conf->mimo_bw_cap, 0, TRUE); @@ -8763,14 +11203,27 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif /* defined(KEEP_ALIVE) */ #ifdef USE_WL_TXBF - bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s Set txbf returned (%d)\n", __FUNCTION__, ret)); - } + ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE); + if (ret < 0) + DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret)); + #endif /* USE_WL_TXBF */ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "txbf", dhd->conf->txbf, 0, FALSE); + ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL, + 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret)); + } + +#ifdef DISABLE_TXBFR + ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL, + 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret)); + } +#endif /* DISABLE_TXBFR */ + #ifdef USE_WFA_CERT_CONF #ifdef USE_WL_FRAMEBURST if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) { @@ -8778,7 +11231,7 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) } #endif /* USE_WL_FRAMEBURST */ #ifdef DISABLE_FRAMEBURST_VSDB - g_frameburst = frameburst; + g_frameburst = frameburst; #endif /* DISABLE_FRAMEBURST_VSDB */ #endif /* USE_WFA_CERT_CONF */ #ifdef DISABLE_WL_FRAMEBURST_SOFTAP @@ -8793,21 +11246,6 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret)); } dhd_conf_set_intiovar(dhd, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG", dhd->conf->frameburst, 0, FALSE); -#if defined(CUSTOM_AMPDU_BA_WSIZE) - /* Set ampdu ba wsize to 64 or 16 */ -#ifdef CUSTOM_AMPDU_BA_WSIZE - ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE; -#endif - if (ampdu_ba_wsize != 0) { - bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { - DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n", - __FUNCTION__, ampdu_ba_wsize, ret)); - } - } -#endif - dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE); iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL); if (iov_buf == NULL) { @@ -8815,26 +11253,44 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) ret = BCME_NOMEM; goto done; } + + +#if defined(CUSTOM_AMPDU_BA_WSIZE) + /* Set ampdu ba wsize to 64 or 16 */ +#ifdef CUSTOM_AMPDU_BA_WSIZE + ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE; +#endif + if (ampdu_ba_wsize != 0) { + ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&du_ba_wsize, + sizeof(ampdu_ba_wsize), NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n", + __FUNCTION__, ampdu_ba_wsize, ret)); + } + } +#endif + dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE); + #ifdef ENABLE_TEMP_THROTTLING if (dhd->op_mode & DHD_FLAG_STA_MODE) { memset(&temp_control, 0, sizeof(temp_control)); temp_control.enable = 1; temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT; - bcm_mkiovar("temp_throttle_control", (char *)&temp_control, - sizeof(wl_temp_control_t), iov_buf, WLC_IOCTL_SMLEN); - ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf, WLC_IOCTL_SMLEN, TRUE, 0); + ret = dhd_iovar(dhd, 0, "temp_throttle_control", (char *)&temp_control, + sizeof(temp_control), NULL, 0, TRUE); if (ret < 0) { DHD_ERROR(("%s Set temp_throttle_control to %d failed \n", __FUNCTION__, ret)); } } #endif /* ENABLE_TEMP_THROTTLING */ + #if defined(CUSTOM_AMPDU_MPDU) ampdu_mpdu = CUSTOM_AMPDU_MPDU; if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) { - bcm_mkiovar("ampdu_mpdu", (char *)&du_mpdu, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&du_mpdu, sizeof(ampdu_mpdu), + NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n", __FUNCTION__, CUSTOM_AMPDU_MPDU, ret)); } @@ -8844,9 +11300,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #if defined(CUSTOM_AMPDU_RELEASE) ampdu_release = CUSTOM_AMPDU_RELEASE; if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) { - bcm_mkiovar("ampdu_release", (char *)&du_release, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&du_release, + sizeof(ampdu_release), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s Set ampdu_release to %d failed %d\n", __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret)); } @@ -8856,8 +11312,8 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #if defined(CUSTOM_AMSDU_AGGSF) amsdu_aggsf = CUSTOM_AMSDU_AGGSF; if (amsdu_aggsf != 0) { - bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf)); - ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf), + NULL, 0, TRUE); if (ret < 0) { DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n", __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret)); @@ -8865,26 +11321,70 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) } #endif /* CUSTOM_AMSDU_AGGSF */ +#if defined(SUPPORT_5G_1024QAM_VHT) +#ifdef SUPPORT_5G_1024QAM_VHT + if (dhd_get_chipid(dhd) == BCM4361_CHIP_ID) { + vht_features |= 0x6; /* 5G 1024 QAM support */ + } +#endif /* SUPPORT_5G_1024QAM_VHT */ + if (vht_features) { + ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features), + NULL, 0, TRUE); + if (ret < 0) { + DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret)); + + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, + (char *)&wl_down, sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s vht_features fail WL_DOWN : %d," + " vht_features = 0x%x\n", + __FUNCTION__, ret, vht_features)); + + ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, + sizeof(vht_features), NULL, 0, TRUE); + DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret)); + } + } + } +#endif +#ifdef DISABLE_11N_PROPRIETARY_RATES + ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0, + TRUE); + if (ret < 0) { + DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret)); + } +#endif /* DISABLE_11N_PROPRIETARY_RATES */ #ifdef CUSTOM_PSPRETEND_THR /* Turn off MPC in AP mode */ - bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4, - iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr, + sizeof(pspretend_thr), NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n", __FUNCTION__, ret)); } #endif - bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, - sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4), + NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret)); } +#ifdef SUPPORT_SET_CAC + bcm_mkiovar("cac", (char *)&cac, sizeof(cac), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret)); + } +#endif /* SUPPORT_SET_CAC */ +#ifdef DHD_ULP + /* Get the required details from dongle during preinit ioctl */ + dhd_ulp_preinit(dhd); +#endif /* DHD_ULP */ /* Read event_msgs mask */ - bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, + sizeof(iovbuf), FALSE); + if (ret < 0) { DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret)); goto done; } @@ -8904,6 +11404,7 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) setbit(eventmask, WLC_E_DISASSOC_IND); setbit(eventmask, WLC_E_DISASSOC); setbit(eventmask, WLC_E_JOIN); + setbit(eventmask, WLC_E_BSSID); setbit(eventmask, WLC_E_START); setbit(eventmask, WLC_E_ASSOC_IND); setbit(eventmask, WLC_E_PSK_SUP); @@ -8911,6 +11412,9 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) setbit(eventmask, WLC_E_MIC_ERROR); setbit(eventmask, WLC_E_ASSOC_REQ_IE); setbit(eventmask, WLC_E_ASSOC_RESP_IE); +#ifdef LIMIT_BORROW + setbit(eventmask, WLC_E_ALLOW_CREDIT_BORROW); +#endif #ifndef WL_CFG80211 setbit(eventmask, WLC_E_PMKID_CACHE); setbit(eventmask, WLC_E_TXFAIL); @@ -8931,18 +11435,20 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif /* PNO_SUPPORT */ /* enable dongle roaming event */ setbit(eventmask, WLC_E_ROAM); - setbit(eventmask, WLC_E_BSSID); #ifdef WLTDLS setbit(eventmask, WLC_E_TDLS_PEER_EVENT); #endif /* WLTDLS */ #ifdef WL_ESCAN setbit(eventmask, WLC_E_ESCAN_RESULT); #endif /* WL_ESCAN */ +#ifdef RTT_SUPPORT + setbit(eventmask, WLC_E_PROXD); +#endif /* RTT_SUPPORT */ #ifdef WL_CFG80211 setbit(eventmask, WLC_E_ESCAN_RESULT); setbit(eventmask, WLC_E_AP_STARTED); + setbit(eventmask, WLC_E_ACTION_FRAME_RX); if (dhd->op_mode & DHD_FLAG_P2P_MODE) { - setbit(eventmask, WLC_E_ACTION_FRAME_RX); setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE); } #endif /* WL_CFG80211 */ @@ -8960,22 +11466,29 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */ setbit(eventmask, WLC_E_CSA_COMPLETE_IND); -#ifdef DHD_LOSSLESS_ROAMING - setbit(eventmask, WLC_E_ROAM_PREP); +#ifdef DHD_WMF + setbit(eventmask, WLC_E_PSTA_PRIMARY_INTF_IND); #endif #ifdef CUSTOM_EVENT_PM_WAKE setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT); -#endif /* CUSTOM_EVENT_PM_WAKE */ +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef DHD_LOSSLESS_ROAMING + setbit(eventmask, WLC_E_ROAM_PREP); +#endif #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP); #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */ +#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP); +#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */ + #ifdef SUSPEND_EVENT bcopy(eventmask, dhd->conf->resume_eventmask, WL_EVENTING_MASK_LEN); #endif /* Write updated Event mask */ - bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, NULL, 0, TRUE); + if (ret < 0) { DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret)); goto done; } @@ -8993,27 +11506,41 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY; /* Read event_msgs_ext mask */ - bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN); - ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0); + ret2 = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, + WLC_IOCTL_SMLEN, FALSE); + if (ret2 == 0) { /* event_msgs_ext must be supported */ bcopy(iov_buf, eventmask_msg, msglen); +#ifdef RSSI_MONITOR_SUPPORT + setbit(eventmask_msg->mask, WLC_E_RSSI_LQM); +#endif /* RSSI_MONITOR_SUPPORT */ #ifdef GSCAN_SUPPORT setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT); setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE); - setbit(eventmask_msg->mask, WLC_E_PFN_SWC); + setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT); + setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT); #endif /* GSCAN_SUPPORT */ + setbit(eventmask_msg->mask, WLC_E_RSSI_LQM); #ifdef BT_WIFI_HANDOVER setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ); #endif /* BT_WIFI_HANDOVER */ +#ifdef DBG_PKT_MON + setbit(eventmask_msg->mask, WLC_E_ROAM_PREP); +#endif /* DBG_PKT_MON */ +#ifdef DHD_ULP + setbit(eventmask_msg->mask, WLC_E_ULP); +#endif +#ifdef ENABLE_TEMP_THROTTLING + setbit(eventmask_msg->mask, WLC_E_TEMP_THROTTLE); +#endif /* ENABLE_TEMP_THROTTLING */ /* Write updated Event mask */ eventmask_msg->ver = EVENTMSGS_VER; eventmask_msg->command = EVENTMSGS_SET_MASK; eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY; - bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, - msglen, iov_buf, WLC_IOCTL_SMLEN); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, - iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) { + ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0, + TRUE); + if (ret < 0) { DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret)); goto done; } @@ -9027,6 +11554,23 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) goto done; } +#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE) + /* Enabling event log trace for EAP events */ + el_tag = (wl_el_tag_params_t *)kmalloc(sizeof(wl_el_tag_params_t), GFP_KERNEL); + if (el_tag == NULL) { + DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", + (int)sizeof(wl_el_tag_params_t))); + ret = BCME_NOMEM; + goto done; + } + el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE; + el_tag->set = 1; + el_tag->flags = EVENT_LOG_TAG_FLAG_LOG; + bcm_mkiovar("event_log_tag_control", (char *)el_tag, + sizeof(*el_tag), iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* DHD_8021X_DUMP */ + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time, sizeof(scan_assoc_time), TRUE, 0); dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time, @@ -9056,8 +11600,14 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) if (dhd_master_mode) { dhd->pktfilter_count = 6; dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL; - dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL; - dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; + if (!FW_SUPPORTED(dhd, pf6)) { + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; + } else { + /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */ + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST; + } /* apply APP pktfilter */ dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806"; @@ -9067,6 +11617,14 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */ dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL; + dhd->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM] = NULL; + if (FW_SUPPORTED(dhd, pf6)) { + /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */ + dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = + "107 1 6 IP4_H:16 0xf0 !0xe0 IP4_H:19 0xff 0xff"; + dhd->pktfilter_count = 8; + } + #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER dhd->pktfilter_count = 4; /* Setup filter to block broadcast and NAT Keepalive packets */ @@ -9090,35 +11648,18 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) dhd_set_packet_filter(dhd); #endif /* PKT_FILTER_SUPPORT */ #ifdef DISABLE_11N - bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE); + if (ret < 0) DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret)); #endif /* DISABLE_11N */ #ifdef ENABLE_BCN_LI_BCN_WAKEUP - bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, 4, iovbuf, sizeof(iovbuf)); - dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0, TRUE); #endif /* ENABLE_BCN_LI_BCN_WAKEUP */ - /* query for 'ver' to get version info from firmware */ - memset(buf, 0, sizeof(buf)); - ptr = buf; - bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) - DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); - else { - bcmstrtok(&ptr, "\n", 0); - /* Print fw version info */ - DHD_ERROR(("Firmware version = %s\n", buf)); - strncpy(fw_version, buf, FW_VER_STR_LEN); - dhd_set_version_info(dhd, buf); -#ifdef WRITE_WLANINFO - sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path); -#endif /* WRITE_WLANINFO */ - } /* query for 'clmver' to get clm version info from firmware */ memset(buf, 0, sizeof(buf)); - bcm_mkiovar("clmver", (char *)&buf, 4, buf, sizeof(buf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) + ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE); + if (ret < 0) DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); else { char *clmver_temp_buf = NULL; @@ -9133,11 +11674,31 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) memset(clm_version, 0, CLM_VER_STR_LEN); strncpy(clm_version, clmver_temp_buf, MIN(strlen(clmver_temp_buf), CLM_VER_STR_LEN - 1)); - DHD_ERROR((" clm = %s\n", clm_version)); } } } + /* query for 'ver' to get version info from firmware */ + memset(buf, 0, sizeof(buf)); + ptr = buf; + ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE); + if (ret < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + else { + bcmstrtok(&ptr, "\n", 0); + strncpy(fw_version, buf, FW_VER_STR_LEN); + fw_version[FW_VER_STR_LEN-1] = '\0'; +#if defined(BCMSDIO) || defined(BCMPCIE) + dhd_set_version_info(dhd, buf); +#endif /* BCMSDIO || BCMPCIE */ +#ifdef WRITE_WLANINFO + sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path, clm_version); +#endif /* WRITE_WLANINFO */ + } +#ifdef GEN_SOFTAP_INFO_FILE + sec_save_softap_info(); +#endif /* GEN_SOFTAP_INFO_FILE */ + #if defined(BCMSDIO) dhd_txglom_enable(dhd, dhd->conf->bus_rxglom); // terence 20151210: set bus:txglom after dhd_txglom_enable since it's possible changed in dhd_conf_set_txglom_params @@ -9166,17 +11727,20 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) hostreorder = 0; } +#if defined(PROP_TXSTATUS) #ifdef USE_WFA_CERT_CONF if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) { DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx)); wlfc_enable = proptx; } #endif /* USE_WFA_CERT_CONF */ +#endif /* PROP_TXSTATUS */ #ifndef DISABLE_11N ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0); - bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf)); - if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder), + NULL, 0, TRUE); + if (ret2 < 0) { DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2)); if (ret2 != BCME_UNSUPPORTED) ret = ret2; @@ -9188,9 +11752,8 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n", __FUNCTION__, ret2, hostreorder)); - bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, - iovbuf, sizeof(iovbuf)); - ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, + sizeof(hostreorder), NULL, 0, TRUE); DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2)); if (ret2 != BCME_UNSUPPORTED) ret = ret2; @@ -9217,12 +11780,13 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif /* PROP_TXSTATUS */ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", dhd->conf->ampdu_hostreorder, 0, TRUE); #endif /* BCMSDIO || BCMBUS */ -#ifdef PCIE_FULL_DONGLE +#ifndef PCIE_FULL_DONGLE /* For FD we need all the packets at DHD to handle intra-BSS forwarding */ if (FW_SUPPORTED(dhd, ap)) { wl_ap_isolate = AP_ISOLATE_SENDUP_ALL; - bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf)); - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate), + NULL, 0, TRUE); + if (ret < 0) DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); } #endif /* PCIE_FULL_DONGLE */ @@ -9231,6 +11795,14 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) dhd_pno_init(dhd); } #endif +#ifdef RTT_SUPPORT + if (!dhd->rtt_state) { + ret = dhd_rtt_init(dhd); + if (ret < 0) { + DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__)); + } + } +#endif #ifdef WL11U dhd_interworking_enable(dhd); #endif /* WL11U */ @@ -9239,54 +11811,225 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) #endif #ifdef SUPPORT_SENSORHUB - bcm_mkiovar("shub", (char *)&shub_enable, 4, iovbuf, sizeof(iovbuf)); - if ((dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), - FALSE, 0)) < 0) { + DHD_ERROR(("%s: SensorHub enabled %d\n", + __FUNCTION__, dhd->info->shub_enable)); + ret2 = dhd_iovar(dhd, 0, "shub", NULL, 0, + (char *)&shub_ctl, sizeof(shub_ctl), FALSE); + if (ret2 < 0) { DHD_ERROR(("%s failed to get shub hub enable information %d\n", - __FUNCTION__, ret)); + __FUNCTION__, ret2)); dhd->info->shub_enable = 0; } else { - memcpy(&shub_enable, iovbuf, sizeof(uint32)); - dhd->info->shub_enable = shub_enable; + dhd->info->shub_enable = shub_ctl.enable; DHD_ERROR(("%s: checking sensorhub enable %d\n", __FUNCTION__, dhd->info->shub_enable)); } +#else + DHD_ERROR(("%s: SensorHub diabled %d\n", + __FUNCTION__, dhd->info->shub_enable)); + dhd->info->shub_enable = FALSE; + shub_ctl.enable = FALSE; + ret2 = dhd_iovar(dhd, 0, "shub", (char *)&shub_ctl, sizeof(shub_ctl), + NULL, 0, TRUE); + if (ret2 < 0) { + DHD_ERROR(("%s failed to set ShubHub disable\n", + __FUNCTION__)); + } #endif /* SUPPORT_SENSORHUB */ + + +#ifdef NDO_CONFIG_SUPPORT + dhd->ndo_enable = FALSE; + dhd->ndo_host_ip_overflow = FALSE; + dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES; +#endif /* NDO_CONFIG_SUPPORT */ + + /* ND offload version supported */ + dhd->ndo_version = dhd_ndo_get_version(dhd); + if (dhd->ndo_version > 0) { + DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version)); + +#ifdef NDO_CONFIG_SUPPORT + /* enable Unsolicited NA filter */ + ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1); + if (ret < 0) { + DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__)); + } +#endif /* NDO_CONFIG_SUPPORT */ + } + + /* check dongle supports wbtext or not */ + dhd->wbtext_support = FALSE; + if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp, + WLC_GET_VAR, FALSE, 0) != BCME_OK) { + DHD_ERROR(("failed to get wnm_bsstrans_resp\n")); + } + if (wnm_bsstrans_resp == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) { + dhd->wbtext_support = TRUE; + } +#ifndef WBTEXT + /* driver can turn off wbtext feature through makefile */ + if (dhd->wbtext_support) { + if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp", + WL_BSSTRANS_POLICY_ROAM_ALWAYS, + WLC_SET_VAR, FALSE, 0) != BCME_OK) { + DHD_ERROR(("failed to disable WBTEXT\n")); + } + } +#endif /* !WBTEXT */ + + /* WNM capabilities */ + wnm_cap = 0 +#ifdef WL11U + | WL_WNM_BSSTRANS | WL_WNM_NOTIF +#endif +#ifdef WBTEXT + | WL_WNM_BSSTRANS | WL_WNM_MAXIDLE +#endif + ; + if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) { + DHD_ERROR(("failed to set WNM capabilities\n")); + } + done: if (eventmask_msg) kfree(eventmask_msg); if (iov_buf) kfree(iov_buf); - +#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE) + if (el_tag) + kfree(el_tag); +#endif /* DHD_8021X_DUMP */ return ret; } int -dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set) +dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf, + uint res_len, int set) { - char buf[strlen(name) + 1 + cmd_len]; - int len = sizeof(buf); + char *buf = NULL; + int input_len; wl_ioctl_t ioc; int ret; - len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len); + if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN) + return BCME_BADARG; + + input_len = strlen(name) + 1 + param_len; + if (input_len > WLC_IOCTL_MAXLEN) + return BCME_BADARG; + + buf = NULL; + if (set) { + if (res_buf || res_len != 0) { + DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__)); + ret = BCME_BADARG; + goto exit; + } + buf = kzalloc(input_len, GFP_KERNEL); + if (!buf) { + DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__)); + ret = BCME_NOMEM; + goto exit; + } + ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len); + if (!ret) { + ret = BCME_NOMEM; + goto exit; + } + + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = input_len; + ioc.set = set; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + } else { + if (!res_buf || !res_len) { + DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__)); + ret = BCME_BADARG; + goto exit; + } + + if (res_len < input_len) { + DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__, + res_len, input_len)); + buf = kzalloc(input_len, GFP_KERNEL); + if (!buf) { + DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__)); + ret = BCME_NOMEM; + goto exit; + } + ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len); + if (!ret) { + ret = BCME_NOMEM; + goto exit; + } + + ioc.cmd = WLC_GET_VAR; + ioc.buf = buf; + ioc.len = input_len; + ioc.set = set; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + + if (ret == BCME_OK) { + memcpy(res_buf, buf, res_len); + } + } else { + memset(res_buf, 0, res_len); + ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len); + if (!ret) { + ret = BCME_NOMEM; + goto exit; + } + + ioc.cmd = WLC_GET_VAR; + ioc.buf = res_buf; + ioc.len = res_len; + ioc.set = set; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + } + } +exit: + kfree(buf); + return ret; +} + +int +dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, + uint cmd_len, char **resptr, uint resp_len) +{ + int len = resp_len; + int ret; + char *buf = *resptr; + wl_ioctl_t ioc; + if (resp_len > WLC_IOCTL_MAXLEN) + return BCME_BADARG; + + memset(buf, 0, resp_len); + + ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len); + if (ret == 0) { + return BCME_BUFTOOSHORT; + } memset(&ioc, 0, sizeof(ioc)); - ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR; + ioc.cmd = WLC_GET_VAR; ioc.buf = buf; ioc.len = len; - ioc.set = set; + ioc.set = 0; ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); - if (!set && ret >= 0) - memcpy(cmd_buf, buf, cmd_len); return ret; } + int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx) { struct dhd_info *dhd = dhdp->info; @@ -9409,10 +12152,10 @@ static int dhd_inetaddr_notifier_call(struct notifier_block *this, if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev) break; } - if (idx < DHD_MAX_IFS) { + if (idx < DHD_MAX_IFS) DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net, dhd->iflist[idx]->name, dhd->iflist[idx]->idx)); - } else { + else { DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label)); idx = 0; } @@ -9447,11 +12190,15 @@ static int dhd_inetaddr_notifier_call(struct notifier_block *this, #ifdef AOE_IP_ALIAS_SUPPORT DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n", __FUNCTION__)); - aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx); -#else - dhd_aoe_hostip_clr(&dhd->pub, idx); - dhd_aoe_arp_clr(&dhd->pub, idx); + if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) || + (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) { + aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx); + } else #endif /* AOE_IP_ALIAS_SUPPORT */ + { + dhd_aoe_hostip_clr(&dhd->pub, idx); + dhd_aoe_arp_clr(&dhd->pub, idx); + } break; default: @@ -9469,65 +12216,92 @@ static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event) { struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data; - dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub; - int ret; + dhd_info_t *dhd = (dhd_info_t *)dhd_info; + dhd_pub_t *dhdp; + int ret; + + if (!dhd) { + DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__)); + goto done; + } + dhdp = &dhd->pub; if (event != DHD_WQ_WORK_IPV6_NDO) { - DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); - return; + DHD_ERROR(("%s: unexpected event\n", __FUNCTION__)); + goto done; } if (!ndo_work) { - DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__)); - return; - } - - if (!pub) { - DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__)); - return; - } - - if (ndo_work->if_idx) { - DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx)); + DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__)); return; } switch (ndo_work->event) { case NETDEV_UP: - DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__)); - ret = dhd_ndo_enable(pub, TRUE); +#ifndef NDO_CONFIG_SUPPORT + DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, TRUE); if (ret < 0) { DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret)); } - - ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx); +#endif /* !NDO_CONFIG_SUPPORT */ + DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__)); + if (dhdp->ndo_version > 0) { + /* inet6 addr notifier called only for unicast address */ + ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0], + WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx); + } else { + ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0], + ndo_work->if_idx); + } if (ret < 0) { - DHD_ERROR(("%s: Adding host ip for NDO failed %d\n", + DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n", __FUNCTION__, ret)); } break; case NETDEV_DOWN: - DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__)); - ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx); + if (dhdp->ndo_version > 0) { + DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__)); + ret = dhd_ndo_remove_ip_by_addr(dhdp, + &ndo_work->ipv6_addr[0], ndo_work->if_idx); + } else { + DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__)); + ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx); + } if (ret < 0) { DHD_ERROR(("%s: Removing host ip for NDO failed %d\n", __FUNCTION__, ret)); goto done; } - - ret = dhd_ndo_enable(pub, FALSE); +#ifdef NDO_CONFIG_SUPPORT + if (dhdp->ndo_host_ip_overflow) { + ret = dhd_dev_ndo_update_inet6addr( + dhd_idx2net(dhdp, ndo_work->if_idx)); + if ((ret < 0) && (ret != BCME_NORESOURCE)) { + DHD_ERROR(("%s: Updating host ip for NDO failed %d\n", + __FUNCTION__, ret)); + goto done; + } + } +#else /* !NDO_CONFIG_SUPPORT */ + DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, FALSE); if (ret < 0) { DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret)); goto done; } +#endif /* NDO_CONFIG_SUPPORT */ break; + default: DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__)); break; } done: /* free ndo_work. alloced while scheduling the work */ - kfree(ndo_work); + if (ndo_work) { + kfree(ndo_work); + } return; } @@ -9537,16 +12311,13 @@ done: * is assigned with ipv6 address. * Handles only primary interface */ -static int dhd_inet6addr_notifier_call(struct notifier_block *this, - unsigned long event, - void *ptr) +int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr) { dhd_info_t *dhd; - dhd_pub_t *dhd_pub; + dhd_pub_t *dhdp; struct inet6_ifaddr *inet6_ifa = ptr; - struct in6_addr *ipv6_addr = &inet6_ifa->addr; struct ipv6_work_info_t *ndo_info; - int idx = 0; /* REVISIT */ + int idx; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) /* Filter notifications meant for non Broadcom devices */ @@ -9556,15 +12327,21 @@ static int dhd_inet6addr_notifier_call(struct notifier_block *this, #endif /* LINUX_VERSION_CODE */ dhd = DHD_DEV_INFO(inet6_ifa->idev->dev); - if (!dhd) + if (!dhd) { return NOTIFY_DONE; + } + dhdp = &dhd->pub; - if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev) + /* Supports only primary interface */ + idx = dhd_net2idx(dhd, inet6_ifa->idev->dev); + if (idx != 0) { return NOTIFY_DONE; - dhd_pub = &dhd->pub; + } - if (!FW_SUPPORTED(dhd_pub, ndoe)) + /* FW capability */ + if (!FW_SUPPORTED(dhdp, ndoe)) { return NOTIFY_DONE; + } ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC); if (!ndo_info) { @@ -9572,13 +12349,14 @@ static int dhd_inet6addr_notifier_call(struct notifier_block *this, return NOTIFY_DONE; } + /* fill up ndo_info */ ndo_info->event = event; ndo_info->if_idx = idx; - memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN); + memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN); /* defer the work to thread as it may block kernel */ dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO, - dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW); + dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW); return NOTIFY_DONE; } #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ @@ -9594,6 +12372,11 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + if (dhd == NULL || dhd->iflist[ifidx] == NULL) { + DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__)); + return BCME_ERROR; + } + ASSERT(dhd && dhd->iflist[ifidx]); ifp = dhd->iflist[ifidx]; net = ifp->net; @@ -9653,7 +12436,7 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) net->get_wireless_stats = dhd_get_wireless_stats; #endif /* WIRELESS_EXT < 19 */ #if WIRELESS_EXT > 12 - net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def; + net->wireless_handlers = &wl_iw_handler_def; #endif /* WIRELESS_EXT > 12 */ #endif /* defined(WL_WIRELESS_EXT) */ @@ -9700,19 +12483,32 @@ dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) #endif /* BCMLXSDMMC */ if (!dhd_download_fw_on_driverload) { #ifdef WL_CFG80211 - wl_terminate_event_handler(); + wl_terminate_event_handler(net); #endif /* WL_CFG80211 */ -#if defined(DHD_LB) && defined(DHD_LB_RXP) +#if defined(DHD_LB_RXP) __skb_queue_purge(&dhd->rx_pend_queue); -#endif /* DHD_LB && DHD_LB_RXP */ -#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) +#endif /* DHD_LB_RXP */ + +#if defined(DHD_LB_TXP) + skb_queue_purge(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ + +#ifdef SHOW_LOGTRACE + /* Release the skbs from queue for WLC_E_TRACE event */ + dhd_event_logtrace_flush_queue(dhdp); +#endif /* SHOW_LOGTRACE */ + +#ifdef DHDTCPACK_SUPPRESS dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); -#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ +#endif /* DHDTCPACK_SUPPRESS */ dhd_net_bus_devreset(net, TRUE); #ifdef BCMLXSDMMC dhd_net_bus_suspend(net); #endif /* BCMLXSDMMC */ wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY); +#if defined(BT_OVER_SDIO) + dhd->bus_user_count--; +#endif /* BT_OVER_SDIO */ } } #endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */ @@ -9742,7 +12538,7 @@ dhd_bus_detach(dhd_pub_t *dhdp) * In case of Android cfg80211 driver, the bus is down in dhd_stop, * calling stop again will cuase SD read/write errors. */ - if (dhd->pub.busstate != DHD_BUS_DOWN) { + if (dhd->pub.busstate != DHD_BUS_DOWN && dhd_download_fw_on_driverload) { /* Stop the protocol module */ dhd_prot_stop(&dhd->pub); @@ -9764,7 +12560,12 @@ void dhd_detach(dhd_pub_t *dhdp) unsigned long flags; int timer_valid = FALSE; struct net_device *dev; - +#ifdef WL_CFG80211 + struct bcm_cfg80211 *cfg = NULL; +#endif +#ifdef HOFFLOAD_MODULES + struct module_metadata *hmem = NULL; +#endif if (!dhdp) return; @@ -9797,7 +12598,9 @@ void dhd_detach(dhd_pub_t *dhdp) */ OSL_SLEEP(100); } - +#ifdef DHD_WET + dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info); +#endif /* DHD_WET */ #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ @@ -9812,8 +12615,18 @@ void dhd_detach(dhd_pub_t *dhdp) #endif /* DHD_WLFC_THREAD */ #endif /* PROP_TXSTATUS */ - if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) { +#ifdef DHD_TIMESYNC + if (dhd->dhd_state & DHD_ATTACH_TIMESYNC_ATTACH_DONE) { + dhd_timesync_detach(dhdp); + } +#endif /* DHD_TIMESYNC */ +#ifdef WL_CFG80211 + if (dev) { + wl_cfg80211_down(dev); + } +#endif /* WL_CFG80211 */ + if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) { dhd_bus_detach(dhdp); #ifdef BCMPCIE if (is_reboot == SYS_RESTART) { @@ -9828,7 +12641,7 @@ void dhd_detach(dhd_pub_t *dhdp) #ifndef PCIE_FULL_DONGLE if (dhdp->prot) dhd_prot_detach(dhdp); -#endif +#endif /* !PCIE_FULL_DONGLE */ } #ifdef ARP_OFFLOAD_SUPPORT @@ -9856,10 +12669,14 @@ void dhd_detach(dhd_pub_t *dhdp) wl_iw_detach(); } #ifdef WL_ESCAN - wl_escan_detach(); + wl_escan_detach(dhdp); #endif /* WL_ESCAN */ #endif /* defined(WL_WIRELESS_EXT) */ +#ifdef DHD_ULP + dhd_ulp_deinit(dhd->pub.osh, dhdp); +#endif /* DHD_ULP */ + /* delete all interfaces, start with virtual */ if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) { int i = 1; @@ -9868,8 +12685,9 @@ void dhd_detach(dhd_pub_t *dhdp) /* Cleanup virtual interfaces */ dhd_net_if_lock_local(dhd); for (i = 1; i < DHD_MAX_IFS; i++) { - if (dhd->iflist[i]) + if (dhd->iflist[i]) { dhd_remove_if(&dhd->pub, i, TRUE); + } } dhd_net_if_unlock_local(dhd); @@ -9881,19 +12699,28 @@ void dhd_detach(dhd_pub_t *dhdp) +#ifdef WL_CFG80211 + cfg = wl_get_cfg(ifp->net); +#endif /* in unregister_netdev case, the interface gets freed by net->destructor * (which is set to free_netdev) */ if (ifp->net->reg_state == NETREG_UNINITIALIZED) { free_netdev(ifp->net); } else { + argos_register_notifier_deinit(); #ifdef SET_RPS_CPUS custom_rps_map_clear(ifp->net->_rx); #endif /* SET_RPS_CPUS */ netif_tx_disable(ifp->net); unregister_netdev(ifp->net); } +#ifdef PCIE_FULL_DONGLE + ifp->net = DHD_NET_DEV_NULL; +#else ifp->net = NULL; +#endif /* PCIE_FULL_DONGLE */ + #ifdef DHD_WMF dhd_wmf_cleanup(dhdp, 0); #endif /* DHD_WMF */ @@ -9904,6 +12731,7 @@ void dhd_detach(dhd_pub_t *dhdp) ifp->phnd_arp_table = NULL; #endif /* DHD_L2_FILTER */ + dhd_if_del_sta_list(ifp); MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); @@ -9936,55 +12764,116 @@ void dhd_detach(dhd_pub_t *dhdp) if (dhd->thr_dpc_ctl.thr_pid >= 0) { PROC_STOP(&dhd->thr_dpc_ctl); - } else { + } else + { tasklet_kill(&dhd->tasklet); -#ifdef DHD_LB_RXP - __skb_queue_purge(&dhd->rx_pend_queue); -#endif /* DHD_LB_RXP */ } } -#if defined(DHD_LB) - /* Kill the Load Balancing Tasklets */ -#if defined(DHD_LB_TXC) - tasklet_disable(&dhd->tx_compl_tasklet); - tasklet_kill(&dhd->tx_compl_tasklet); +#ifdef DHD_LB + if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) { + /* Clear the flag first to avoid calling the cpu notifier */ + dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE; + + /* Kill the Load Balancing Tasklets */ +#ifdef DHD_LB_RXP + cancel_work_sync(&dhd->rx_napi_dispatcher_work); + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ +#ifdef DHD_LB_TXP + cancel_work_sync(&dhd->tx_dispatcher_work); + tasklet_kill(&dhd->tx_tasklet); + __skb_queue_purge(&dhd->tx_pend_queue); +#endif /* DHD_LB_TXP */ +#ifdef DHD_LB_TXC + cancel_work_sync(&dhd->tx_compl_dispatcher_work); + tasklet_kill(&dhd->tx_compl_tasklet); #endif /* DHD_LB_TXC */ -#if defined(DHD_LB_RXC) - tasklet_disable(&dhd->rx_compl_tasklet); - tasklet_kill(&dhd->rx_compl_tasklet); +#ifdef DHD_LB_RXC + tasklet_kill(&dhd->rx_compl_tasklet); #endif /* DHD_LB_RXC */ - if (dhd->cpu_notifier.notifier_call != NULL) - unregister_cpu_notifier(&dhd->cpu_notifier); - dhd_cpumasks_deinit(dhd); + + if (dhd->cpu_notifier.notifier_call != NULL) { + unregister_cpu_notifier(&dhd->cpu_notifier); + } + dhd_cpumasks_deinit(dhd); + DHD_LB_STATS_DEINIT(&dhd->pub); + } #endif /* DHD_LB */ + DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub); + #ifdef DHD_LOG_DUMP dhd_log_dump_deinit(&dhd->pub); #endif /* DHD_LOG_DUMP */ #ifdef WL_CFG80211 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { - wl_cfg80211_detach(NULL); - dhd_monitor_uninit(); + if (!cfg) { + DHD_ERROR(("cfg NULL!\n")); + ASSERT(0); + } else { + wl_cfg80211_detach(cfg); + dhd_monitor_uninit(); + } } #endif - /* free deferred work queue */ - dhd_deferred_work_deinit(dhd->dhd_deferred_wq); - dhd->dhd_deferred_wq = NULL; +#ifdef DEBUGABILITY + if (dhdp->dbg) { +#ifdef DBG_PKT_MON + dhd_os_dbg_detach_pkt_monitor(dhdp); + dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock); +#endif /* DBG_PKT_MON */ + dhd_os_dbg_detach(dhdp); + } +#endif /* DEBUGABILITY */ #ifdef SHOW_LOGTRACE - if (dhd->event_data.fmts) - kfree(dhd->event_data.fmts); - if (dhd->event_data.raw_fmts) - kfree(dhd->event_data.raw_fmts); - if (dhd->event_data.raw_sstr) - kfree(dhd->event_data.raw_sstr); -#endif /* SHOW_LOGTRACE */ +#ifdef DHD_PKT_LOGGING + dhd_os_detach_pktlog(dhdp); +#endif /* DHD_PKT_LOGGING */ + /* Release the skbs from queue for WLC_E_TRACE event */ + dhd_event_logtrace_flush_queue(dhdp); + if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) { + if (dhd->event_data.fmts) { + MFREE(dhd->pub.osh, dhd->event_data.fmts, + dhd->event_data.fmts_size); + dhd->event_data.fmts = NULL; + } + if (dhd->event_data.raw_fmts) { + MFREE(dhd->pub.osh, dhd->event_data.raw_fmts, + dhd->event_data.raw_fmts_size); + dhd->event_data.raw_fmts = NULL; + } + if (dhd->event_data.raw_sstr) { + MFREE(dhd->pub.osh, dhd->event_data.raw_sstr, + dhd->event_data.raw_sstr_size); + dhd->event_data.raw_sstr = NULL; + } + if (dhd->event_data.rom_raw_sstr) { + MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr, + dhd->event_data.rom_raw_sstr_size); + dhd->event_data.rom_raw_sstr = NULL; + } + dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT; + } +#endif /* SHOW_LOGTRACE */ +#ifdef BCMPCIE + if (dhdp->extended_trap_data) + { + MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN); + dhdp->extended_trap_data = NULL; + } +#endif /* BCMPCIE */ #ifdef PNO_SUPPORT if (dhdp->pno_state) dhd_pno_deinit(dhdp); #endif +#ifdef RTT_SUPPORT + if (dhdp->rtt_state) { + dhd_rtt_deinit(dhdp); + } +#endif #if defined(CONFIG_PM_SLEEP) if (dhd_pm_notifier_registered) { unregister_pm_notifier(&dhd->pm_notifier); @@ -9998,14 +12887,14 @@ void dhd_detach(dhd_pub_t *dhdp) dhd->new_freq = NULL; cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER); #endif - if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) { - DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter)); #ifdef CONFIG_HAS_WAKELOCK - dhd->wakelock_wd_counter = 0; - wake_lock_destroy(&dhd->wl_wdwake); + dhd->wakelock_wd_counter = 0; + wake_lock_destroy(&dhd->wl_wdwake); // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry wake_lock_destroy(&dhd->wl_wifi); #endif /* CONFIG_HAS_WAKELOCK */ + if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) { + DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter)); DHD_OS_WAKE_LOCK_DESTROY(dhd); } @@ -10017,14 +12906,46 @@ void dhd_detach(dhd_pub_t *dhdp) #endif /* DHDTCPACK_SUPPRESS */ #ifdef PCIE_FULL_DONGLE - dhd_flow_rings_deinit(dhdp); - if (dhdp->prot) - dhd_prot_detach(dhdp); + dhd_flow_rings_deinit(dhdp); + if (dhdp->prot) + dhd_prot_detach(dhdp); #endif +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) + dhd_free_tdls_peer_list(dhdp); +#endif + +#ifdef HOFFLOAD_MODULES + hmem = &dhdp->hmem; + dhd_free_module_memory(dhdp->bus, hmem); +#endif /* HOFFLOAD_MODULES */ +#if defined(BT_OVER_SDIO) + mutex_destroy(&dhd->bus_user_lock); +#endif /* BT_OVER_SDIO */ +#ifdef DUMP_IOCTL_IOV_LIST + dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head)); +#endif /* DUMP_IOCTL_IOV_LIST */ +#ifdef DHD_DEBUG + /* memory waste feature list initilization */ + dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head)); +#endif /* DHD_DEBUG */ +#ifdef WL_MONITOR + dhd_del_monitor_if(dhd, NULL, DHD_WQ_WORK_IF_DEL); +#endif /* WL_MONITOR */ + + /* Prefer adding de-init code above this comment unless necessary. + * The idea is to cancel work queue, sysfs and flags at the end. + */ + dhd_deferred_work_deinit(dhd->dhd_deferred_wq); + dhd->dhd_deferred_wq = NULL; + +#ifdef SHOW_LOGTRACE + /* Wait till event_log_dispatcher_work finishes */ + cancel_work_sync(&dhd->event_log_dispatcher_work); +#endif /* SHOW_LOGTRACE */ dhd_sysfs_exit(dhd); - dhd->pub.is_fw_download_done = FALSE; + dhd->pub.fw_download_done = FALSE; dhd_conf_detach(dhdp); } @@ -10075,11 +12996,17 @@ dhd_free(dhd_pub_t *dhdp) dhdp->cached_nvram = NULL; } #endif - /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */ - if (dhd && - dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE)) - MFREE(dhd->pub.osh, dhd, sizeof(*dhd)); - dhd = NULL; + if (dhd) { +#ifdef REPORT_FATAL_TIMEOUTS + deinit_dhd_timeouts(&dhd->pub); +#endif /* REPORT_FATAL_TIMEOUTS */ + + /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */ + if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, + DHD_PREALLOC_DHD_INFO, 0, FALSE)) + MFREE(dhd->pub.osh, dhd, sizeof(*dhd)); + dhd = NULL; + } } } @@ -10139,9 +13066,10 @@ dhd_module_cleanup(void) static void dhd_module_exit(void) { - dhd_buzzz_detach(); + atomic_set(&exit_in_progress, 1); dhd_module_cleanup(); unregister_reboot_notifier(&dhd_reboot_notifier); + dhd_destroy_to_notifier_skt(); } static int @@ -10152,8 +13080,6 @@ dhd_module_init(void) printf("%s: in %s\n", __FUNCTION__, dhd_version); - dhd_buzzz_attach(); - DHD_PERIM_RADIO_INIT(); @@ -10172,8 +13098,7 @@ dhd_module_init(void) if (!err) { register_reboot_notifier(&dhd_reboot_notifier); break; - } - else { + } else { DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n", __FUNCTION__, retry)); strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN); @@ -10183,6 +13108,8 @@ dhd_module_init(void) } } while (retry--); + dhd_create_to_notifier_skt(); + if (err) { DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__)); } else { @@ -10239,14 +13166,15 @@ module_exit(rockchip_wifi_exit_module_rkwifi); #endif #if 0 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) -#if defined(CONFIG_DEFERRED_INITCALLS) +#if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH) #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \ - defined(CONFIG_ARCH_MSM8996) + defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8895) || \ + defined(CONFIG_ARCH_MSM8998) deferred_module_init_sync(dhd_module_init); #else deferred_module_init(dhd_module_init); #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 || - * CONFIG_ARCH_MSM8996 + * CONFIG_ARCH_MSM8996 || CONFIG_SOC_EXYNOS8895 || CONFIG_ARCH_MSM8998 */ #elif defined(USE_LATE_INITCALL_SYNC) late_initcall_sync(dhd_module_init); @@ -10378,6 +13306,40 @@ dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition) return timeout; } +#ifdef PCIE_INB_DW +int +dhd_os_ds_exit_wait(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(ds_exit_timeout_msec); +#else + timeout = ds_exit_timeout_msec * HZ / 1000; +#endif + + DHD_PERIM_UNLOCK(pub); + + timeout = wait_event_timeout(dhd->ds_exit_wait, (*condition), timeout); + + DHD_PERIM_LOCK(pub); + + return timeout; +} + +int +dhd_os_ds_exit_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up(&dhd->ds_exit_wait); + return 0; +} + +#endif /* PCIE_INB_DW */ + int dhd_os_d3ack_wake(dhd_pub_t *pub) { @@ -10409,7 +13371,35 @@ dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition) return timeout; } -int INLINE +/* + * Wait until the condition *var == condition is met. + * Returns 0 if the @condition evaluated to false after the timeout elapsed + * Returns 1 if the @condition evaluated to true + */ +int +dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); +#else + timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000; +#endif + + timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout); + + return timeout; +} + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) +/* Fix compilation error for FC11 */ +INLINE +#endif +int dhd_os_busbusy_wake(dhd_pub_t *pub) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); @@ -10446,14 +13436,11 @@ dhd_os_wd_timer(void *bus, uint wdtick) return; } - DHD_OS_WD_WAKE_LOCK(pub); DHD_GENERAL_LOCK(pub, flags); /* don't start the wd until fw is loaded */ if (pub->busstate == DHD_BUS_DOWN) { DHD_GENERAL_UNLOCK(pub, flags); - if (!wdtick) - DHD_OS_WD_WAKE_UNLOCK(pub); return; } @@ -10462,19 +13449,16 @@ dhd_os_wd_timer(void *bus, uint wdtick) dhd->wd_timer_valid = FALSE; DHD_GENERAL_UNLOCK(pub, flags); del_timer_sync(&dhd->timer); - DHD_OS_WD_WAKE_UNLOCK(pub); return; } if (wdtick) { - DHD_OS_WD_WAKE_LOCK(pub); dhd_watchdog_ms = (uint)wdtick; /* Re arm the timer, at last watchdog period */ mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); dhd->wd_timer_valid = TRUE; } DHD_GENERAL_UNLOCK(pub, flags); - DHD_OS_WD_WAKE_UNLOCK(pub); } #ifdef DHD_PCIE_RUNTIMEPM @@ -10495,8 +13479,7 @@ dhd_os_runtimepm_timer(void *bus, uint tick) DHD_GENERAL_LOCK(pub, flags); /* don't start the RPM until fw is loaded */ - if (pub->busstate == DHD_BUS_DOWN || - pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) { DHD_GENERAL_UNLOCK(pub, flags); return; } @@ -10572,8 +13555,9 @@ dhd_os_get_image_block(char *buf, int len, void *image) int rdlen; int size; - if (!image) + if (!image) { return 0; + } size = i_size_read(file_inode(fp)); rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size)); @@ -10582,8 +13566,9 @@ dhd_os_get_image_block(char *buf, int len, void *image) return -EIO; } - if (rdlen > 0) + if (rdlen > 0) { fp->f_pos += rdlen; + } return rdlen; } @@ -10602,6 +13587,35 @@ dhd_os_get_image_size(void *image) return size; } +#if defined(BT_OVER_SDIO) +int +dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rd_len; + uint str_len = 0; + char *str_end = NULL; + + if (!image) + return 0; + + rd_len = kernel_read(fp, fp->f_pos, str, len); + str_end = strnchr(str, len, '\n'); + if (str_end == NULL) { + goto err; + } + str_len = (uint)(str_end - str); + + /* Advance file pointer past the string length */ + fp->f_pos += str_len + 1; + bzero(str_end, rd_len - str_len); + +err: + return str_len; +} +#endif /* defined (BT_OVER_SDIO) */ + + void dhd_os_close_image(void *image) { @@ -10656,11 +13670,19 @@ dhd_os_sdunlock_txq(dhd_pub_t *pub) void dhd_os_sdlock_rxq(dhd_pub_t *pub) { + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_lock_bh(&dhd->rxqlock); } void dhd_os_sdunlock_rxq(dhd_pub_t *pub) { + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_unlock_bh(&dhd->rxqlock); } static void @@ -10715,7 +13737,7 @@ dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags) if (dhd) { #ifdef BCMSDIO - spin_unlock_bh(&dhd->tcpack_lock); // terence 20160519 + spin_unlock_bh(&dhd->tcpack_lock); #else spin_unlock_irqrestore(&dhd->tcpack_lock, flags); #endif /* BCMSDIO */ @@ -10760,16 +13782,21 @@ dhd_get_wireless_stats(struct net_device *dev) #endif /* defined(WL_WIRELESS_EXT) */ static int -dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, +dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen, wl_event_msg_t *event, void **data) { int bcmerror = 0; +#ifdef WL_CFG80211 + unsigned long flags = 0; +#endif /* WL_CFG80211 */ ASSERT(dhd != NULL); #ifdef SHOW_LOGTRACE - bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data); + bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data, + &dhd->event_data); #else - bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL); + bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data, + NULL); #endif /* SHOW_LOGTRACE */ if (bcmerror != BCME_OK) @@ -10781,20 +13808,25 @@ dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, * Wireless ext is on primary interface only */ - ASSERT(dhd->iflist[*ifidx] != NULL); - ASSERT(dhd->iflist[*ifidx]->net != NULL); + ASSERT(dhd->iflist[ifidx] != NULL); + ASSERT(dhd->iflist[ifidx]->net != NULL); - if (dhd->iflist[*ifidx]->net) { - wl_iw_event(dhd->iflist[*ifidx]->net, event, *data); + if (dhd->iflist[ifidx]->net) { + wl_iw_event(dhd->iflist[ifidx]->net, event, *data); } } #endif /* defined(WL_WIRELESS_EXT) */ #ifdef WL_CFG80211 - ASSERT(dhd->iflist[*ifidx] != NULL); - ASSERT(dhd->iflist[*ifidx]->net != NULL); - if (dhd->iflist[*ifidx]->net) - wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data); + ASSERT(dhd->iflist[ifidx] != NULL); + ASSERT(dhd->iflist[ifidx]->net != NULL); + if (dhd->iflist[ifidx]->net) { + spin_lock_irqsave(&dhd->pub.up_lock, flags); + if (dhd->pub.up) { + wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data); + } + spin_unlock_irqrestore(&dhd->pub.up_lock, flags); + } #endif /* defined(WL_CFG80211) */ return (bcmerror); @@ -10804,11 +13836,8 @@ dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) { - switch (ntoh32(event->event_type)) { - - default: - break; - } + /* Just return from here */ + return; } #ifdef LOG_INTO_TCPDUMP @@ -10863,8 +13892,7 @@ dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len) } else { netif_rx_ni(skb); } - } - else { + } else { /* Could not allocate a sk_buf */ DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__)); } @@ -10913,13 +13941,24 @@ dhd_net_bus_devreset(struct net_device *dev, uint8 flag) DHD_TRACE(("%s: wl down failed\n", __FUNCTION__)); } #ifdef PROP_TXSTATUS - if (dhd->pub.wlfc_enabled) + if (dhd->pub.wlfc_enabled) { dhd_wlfc_deinit(&dhd->pub); + } #endif /* PROP_TXSTATUS */ #ifdef PNO_SUPPORT - if (dhd->pub.pno_state) - dhd_pno_deinit(&dhd->pub); + if (dhd->pub.pno_state) { + dhd_pno_deinit(&dhd->pub); + } #endif +#ifdef RTT_SUPPORT + if (dhd->pub.rtt_state) { + dhd_rtt_deinit(&dhd->pub); + } +#endif /* RTT_SUPPORT */ + +#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT) + dhd_os_dbg_detach_pkt_monitor(&dhd->pub); +#endif /* DBG_PKT_MON */ } #ifdef BCMSDIO @@ -11000,57 +14039,49 @@ int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val) return 0; } +int net_os_set_max_dtim_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd) { + DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n", + __FUNCTION__, (val ? "Enable" : "Disable"))); + if (val) { + dhd->pub.max_dtim_enable = TRUE; + } else { + dhd->pub.max_dtim_enable = FALSE; + } + } else { + return -1; + } + + return 0; +} + #ifdef PKT_FILTER_SUPPORT int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) { -#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER - return 0; -#else - dhd_info_t *dhd = DHD_DEV_INFO(dev); - char *filterp = NULL; - int filter_id = 0; int ret = 0; +#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER + dhd_info_t *dhd = DHD_DEV_INFO(dev); + if (!dhd_master_mode) add_remove = !add_remove; DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num)); - if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) - return ret; - if (num >= dhd->pub.pktfilter_count) - return -EINVAL; - switch (num) { - case DHD_BROADCAST_FILTER_NUM: - filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; - filter_id = 101; - break; - case DHD_MULTICAST4_FILTER_NUM: - filterp = "102 0 0 0 0xFFFFFF 0x01005E"; - filter_id = 102; - break; - case DHD_MULTICAST6_FILTER_NUM: - filterp = "103 0 0 0 0xFFFF 0x3333"; - filter_id = 103; - break; - case DHD_MDNS_FILTER_NUM: - filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB"; - filter_id = 104; - break; - default: - return -EINVAL; + if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) { + return 0; } - /* Add filter */ - if (add_remove) { - dhd->pub.pktfilter[num] = filterp; - dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]); - } else { /* Delete filter */ - if (dhd->pub.pktfilter[num] != NULL) { - dhd_pktfilter_offload_delete(&dhd->pub, filter_id); - dhd->pub.pktfilter[num] = NULL; - } + + if (num >= dhd->pub.pktfilter_count) { + return -EINVAL; } + + ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num); +#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ + return ret; -#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ } int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val) @@ -11102,15 +14133,6 @@ dhd_dev_get_feature_set(struct net_device *dev) dhd_pub_t *dhd = (&ptr->pub); int feature_set = 0; -#ifdef DYNAMIC_SWOOB_DURATION -#ifndef CUSTOM_INTR_WIDTH -#define CUSTOM_INTR_WIDTH 100 - int intr_width = 0; -#endif /* CUSTOM_INTR_WIDTH */ -#endif /* DYNAMIC_SWOOB_DURATION */ - if (!dhd) - return feature_set; - if (FW_SUPPORTED(dhd, sta)) feature_set |= WIFI_FEATURE_INFRA; if (FW_SUPPORTED(dhd, dualband)) @@ -11130,78 +14152,92 @@ dhd_dev_get_feature_set(struct net_device *dev) feature_set |= WIFI_FEATURE_D2D_RTT; } #ifdef RTT_SUPPORT + feature_set |= WIFI_FEATURE_D2D_RTT; feature_set |= WIFI_FEATURE_D2AP_RTT; #endif /* RTT_SUPPORT */ #ifdef LINKSTAT_SUPPORT feature_set |= WIFI_FEATURE_LINKSTAT; #endif /* LINKSTAT_SUPPORT */ - /* Supports STA + STA always */ - feature_set |= WIFI_FEATURE_ADDITIONAL_STA; + #ifdef PNO_SUPPORT if (dhd_is_pno_supported(dhd)) { feature_set |= WIFI_FEATURE_PNO; - feature_set |= WIFI_FEATURE_BATCH_SCAN; #ifdef GSCAN_SUPPORT feature_set |= WIFI_FEATURE_GSCAN; + feature_set |= WIFI_FEATURE_HAL_EPNO; #endif /* GSCAN_SUPPORT */ } #endif /* PNO_SUPPORT */ +#ifdef RSSI_MONITOR_SUPPORT + if (FW_SUPPORTED(dhd, rssi_mon)) { + feature_set |= WIFI_FEATURE_RSSI_MONITOR; + } +#endif /* RSSI_MONITOR_SUPPORT */ #ifdef WL11U feature_set |= WIFI_FEATURE_HOTSPOT; #endif /* WL11U */ +#ifdef NDO_CONFIG_SUPPORT + feature_set |= WIFI_FEATURE_CONFIG_NDO; +#endif /* NDO_CONFIG_SUPPORT */ +#ifdef KEEP_ALIVE + feature_set |= WIFI_FEATURE_MKEEP_ALIVE; +#endif /* KEEP_ALIVE */ + return feature_set; } - -int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num) +int +dhd_dev_get_feature_set_matrix(struct net_device *dev, int num) { - int feature_set_full, mem_needed; - int *ret; - - *num = 0; - mem_needed = sizeof(int) * MAX_FEATURE_SET_CONCURRRENT_GROUPS; - ret = (int *) kmalloc(mem_needed, GFP_KERNEL); - if (!ret) { - DHD_ERROR(("%s: failed to allocate %d bytes\n", __FUNCTION__, - mem_needed)); - return ret; - } + int feature_set_full; + int ret = 0; feature_set_full = dhd_dev_get_feature_set(dev); - ret[0] = (feature_set_full & WIFI_FEATURE_INFRA) | - (feature_set_full & WIFI_FEATURE_INFRA_5G) | - (feature_set_full & WIFI_FEATURE_NAN) | - (feature_set_full & WIFI_FEATURE_D2D_RTT) | - (feature_set_full & WIFI_FEATURE_D2AP_RTT) | - (feature_set_full & WIFI_FEATURE_PNO) | - (feature_set_full & WIFI_FEATURE_BATCH_SCAN) | - (feature_set_full & WIFI_FEATURE_GSCAN) | - (feature_set_full & WIFI_FEATURE_HOTSPOT) | - (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA) | - (feature_set_full & WIFI_FEATURE_EPR); + /* Common feature set for all interface */ + ret = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) | + (feature_set_full & WIFI_FEATURE_EPR); - ret[1] = (feature_set_full & WIFI_FEATURE_INFRA) | - (feature_set_full & WIFI_FEATURE_INFRA_5G) | - /* Not yet verified NAN with P2P */ - /* (feature_set_full & WIFI_FEATURE_NAN) | */ - (feature_set_full & WIFI_FEATURE_P2P) | - (feature_set_full & WIFI_FEATURE_D2AP_RTT) | - (feature_set_full & WIFI_FEATURE_D2D_RTT) | - (feature_set_full & WIFI_FEATURE_EPR); + /* Specific feature group for each interface */ + switch (num) { + case 0: + ret |= (feature_set_full & WIFI_FEATURE_P2P) | + /* Not supported yet */ + /* (feature_set_full & WIFI_FEATURE_NAN) | */ + (feature_set_full & WIFI_FEATURE_TDLS) | + (feature_set_full & WIFI_FEATURE_PNO) | + (feature_set_full & WIFI_FEATURE_HAL_EPNO) | + (feature_set_full & WIFI_FEATURE_BATCH_SCAN) | + (feature_set_full & WIFI_FEATURE_GSCAN) | + (feature_set_full & WIFI_FEATURE_HOTSPOT) | + (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA); + break; - ret[2] = (feature_set_full & WIFI_FEATURE_INFRA) | - (feature_set_full & WIFI_FEATURE_INFRA_5G) | - (feature_set_full & WIFI_FEATURE_NAN) | - (feature_set_full & WIFI_FEATURE_D2D_RTT) | - (feature_set_full & WIFI_FEATURE_D2AP_RTT) | - (feature_set_full & WIFI_FEATURE_TDLS) | - (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL) | - (feature_set_full & WIFI_FEATURE_EPR); - *num = MAX_FEATURE_SET_CONCURRRENT_GROUPS; + case 1: + ret |= (feature_set_full & WIFI_FEATURE_P2P); + /* Not yet verified NAN with P2P */ + /* (feature_set_full & WIFI_FEATURE_NAN) | */ + break; + + case 2: + ret |= (feature_set_full & WIFI_FEATURE_NAN) | + (feature_set_full & WIFI_FEATURE_TDLS) | + (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL); + break; + + default: + ret = WIFI_FEATURE_INVALID; + DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num)); + break; + } return ret; } + #ifdef CUSTOM_FORCE_NODFS_FLAG int dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs) @@ -11216,6 +14252,244 @@ dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs) return 0; } #endif /* CUSTOM_FORCE_NODFS_FLAG */ + +#ifdef NDO_CONFIG_SUPPORT +int +dhd_dev_ndo_cfg(struct net_device *dev, u8 enable) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + if (enable) { + /* enable ND offload feature (will be enabled in FW on suspend) */ + dhdp->ndo_enable = TRUE; + + /* Update changes of anycast address & DAD failed address */ + ret = dhd_dev_ndo_update_inet6addr(dev); + if ((ret < 0) && (ret != BCME_NORESOURCE)) { + DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret)); + return ret; + } + } else { + /* disable ND offload feature */ + dhdp->ndo_enable = FALSE; + + /* disable ND offload in FW */ + ret = dhd_ndo_enable(dhdp, 0); + if (ret < 0) { + DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret)); + } + } + return ret; +} + +/* #pragma used as a WAR to fix build failure, +* ignore dropping of 'const' qualifier in 'list_entry' macro +* this pragma disables the warning only for the following function +*/ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" + +static int +dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6) +{ + struct inet6_ifaddr *ifa; + struct ifacaddr6 *acaddr = NULL; + int addr_count = 0; + + /* lock */ + read_lock_bh(&inet6->lock); + + /* Count valid unicast address */ + list_for_each_entry(ifa, &inet6->addr_list, if_list) { + if ((ifa->flags & IFA_F_DADFAILED) == 0) { + addr_count++; + } + } + + /* Count anycast address */ + acaddr = inet6->ac_list; + while (acaddr) { + addr_count++; + acaddr = acaddr->aca_next; + } + + /* unlock */ + read_unlock_bh(&inet6->lock); + + return addr_count; +} + +int +dhd_dev_ndo_update_inet6addr(struct net_device *dev) +{ + dhd_info_t *dhd; + dhd_pub_t *dhdp; + struct inet6_dev *inet6; + struct inet6_ifaddr *ifa; + struct ifacaddr6 *acaddr = NULL; + struct in6_addr *ipv6_addr = NULL; + int cnt, i; + int ret = BCME_OK; + + /* + * this function evaulates host ip address in struct inet6_dev + * unicast addr in inet6_dev->addr_list + * anycast addr in inet6_dev->ac_list + * while evaluating inet6_dev, read_lock_bh() is required to prevent + * access on null(freed) pointer. + */ + + if (dev) { + inet6 = dev->ip6_ptr; + if (!inet6) { + DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhd = DHD_DEV_INFO(dev); + if (!dhd) { + DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__)); + return BCME_ERROR; + } + dhdp = &dhd->pub; + + if (dhd_net2idx(dhd, dev) != 0) { + DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__)); + return BCME_ERROR; + } + } else { + DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Check host IP overflow */ + cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6); + if (cnt > dhdp->ndo_max_host_ip) { + if (!dhdp->ndo_host_ip_overflow) { + dhdp->ndo_host_ip_overflow = TRUE; + /* Disable ND offload in FW */ + DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, 0); + } + + return ret; + } + + /* + * Allocate ipv6 addr buffer to store addresses to be added/removed. + * driver need to lock inet6_dev while accessing structure. but, driver + * cannot use ioctl while inet6_dev locked since it requires scheduling + * hence, copy addresses to the buffer and do ioctl after unlock. + */ + ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh, + sizeof(struct in6_addr) * dhdp->ndo_max_host_ip); + if (!ipv6_addr) { + DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Find DAD failed unicast address to be removed */ + cnt = 0; + read_lock_bh(&inet6->lock); + list_for_each_entry(ifa, &inet6->addr_list, if_list) { + /* DAD failed unicast address */ + if ((ifa->flags & IFA_F_DADFAILED) && + (cnt < dhdp->ndo_max_host_ip)) { + memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr)); + cnt++; + } + } + read_unlock_bh(&inet6->lock); + + /* Remove DAD failed unicast address */ + for (i = 0; i < cnt; i++) { + DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__)); + ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0); + if (ret < 0) { + goto done; + } + } + + /* Remove all anycast address */ + ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0); + if (ret < 0) { + goto done; + } + + /* + * if ND offload was disabled due to host ip overflow, + * attempt to add valid unicast address. + */ + if (dhdp->ndo_host_ip_overflow) { + /* Find valid unicast address */ + cnt = 0; + read_lock_bh(&inet6->lock); + list_for_each_entry(ifa, &inet6->addr_list, if_list) { + /* valid unicast address */ + if (!(ifa->flags & IFA_F_DADFAILED) && + (cnt < dhdp->ndo_max_host_ip)) { + memcpy(&ipv6_addr[cnt], &ifa->addr, + sizeof(struct in6_addr)); + cnt++; + } + } + read_unlock_bh(&inet6->lock); + + /* Add valid unicast address */ + for (i = 0; i < cnt; i++) { + ret = dhd_ndo_add_ip_with_type(dhdp, + (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0); + if (ret < 0) { + goto done; + } + } + } + + /* Find anycast address */ + cnt = 0; + read_lock_bh(&inet6->lock); + acaddr = inet6->ac_list; + while (acaddr) { + if (cnt < dhdp->ndo_max_host_ip) { + memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr)); + cnt++; + } + acaddr = acaddr->aca_next; + } + read_unlock_bh(&inet6->lock); + + /* Add anycast address */ + for (i = 0; i < cnt; i++) { + ret = dhd_ndo_add_ip_with_type(dhdp, + (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0); + if (ret < 0) { + goto done; + } + } + + /* Now All host IP addr were added successfully */ + if (dhdp->ndo_host_ip_overflow) { + dhdp->ndo_host_ip_overflow = FALSE; + if (dhdp->in_suspend) { + /* drvier is in (early) suspend state, need to enable ND offload in FW */ + DHD_INFO(("%s: enable NDO\n", __FUNCTION__)); + ret = dhd_ndo_enable(dhdp, 1); + } + } + +done: + if (ipv6_addr) { + MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip); + } + + return ret; +} +#pragma GCC diagnostic pop + +#endif /* NDO_CONFIG_SUPPORT */ + #ifdef PNO_SUPPORT /* Linux wrapper to call common dhd_pno_stop_for_ssid */ int @@ -11225,6 +14499,7 @@ dhd_dev_pno_stop_for_ssid(struct net_device *dev) return (dhd_pno_stop_for_ssid(&dhd->pub)); } + /* Linux wrapper to call common dhd_pno_set_for_ssid */ int dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, @@ -11260,6 +14535,7 @@ dhd_dev_pno_stop_for_batch(struct net_device *dev) dhd_info_t *dhd = DHD_DEV_INFO(dev); return (dhd_pno_stop_for_batch(&dhd->pub)); } + /* Linux wrapper to call common dhd_dev_pno_set_for_batch */ int dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params) @@ -11267,6 +14543,7 @@ dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *b dhd_info_t *dhd = DHD_DEV_INFO(dev); return (dhd_pno_set_for_batch(&dhd->pub, batch_params)); } + /* Linux wrapper to call common dhd_dev_pno_get_for_batch */ int dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize) @@ -11274,39 +14551,49 @@ dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize) dhd_info_t *dhd = DHD_DEV_INFO(dev); return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL)); } -/* Linux wrapper to call common dhd_pno_set_mac_oui */ -int -dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui) -{ - dhd_info_t *dhd = DHD_DEV_INFO(dev); - return (dhd_pno_set_mac_oui(&dhd->pub, oui)); -} #endif /* PNO_SUPPORT */ #if defined(PNO_SUPPORT) #ifdef GSCAN_SUPPORT +bool +dhd_dev_is_legacy_pno_enabled(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_is_legacy_pno_enabled(&dhd->pub)); +} + +int +dhd_dev_set_epno(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + if (!dhd) { + return BCME_ERROR; + } + return dhd_pno_set_epno(&dhd->pub); +} +int +dhd_dev_flush_fw_epno(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + if (!dhd) { + return BCME_ERROR; + } + return dhd_pno_flush_fw_epno(&dhd->pub); +} + /* Linux wrapper to call common dhd_pno_set_cfg_gscan */ int dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, - void *buf, uint8 flush) + void *buf, bool flush) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush)); } -/* Linux wrapper to call common dhd_pno_get_gscan */ -void * -dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, - void *info, uint32 *len) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - - return (dhd_pno_get_gscan(&dhd->pub, type, info, len)); -} - /* Linux wrapper to call common dhd_wait_batch_results_complete */ -void +int dhd_dev_wait_batch_results_complete(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); @@ -11315,7 +14602,7 @@ dhd_dev_wait_batch_results_complete(struct net_device *dev) } /* Linux wrapper to call common dhd_pno_lock_batch_results */ -void +int dhd_dev_pno_lock_access_batch_results(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); @@ -11349,15 +14636,6 @@ dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag) return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag)); } -/* Linux wrapper to call common dhd_handle_swc_evt */ -void * -dhd_dev_swc_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes) -{ - dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - - return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes)); -} - /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */ void * dhd_dev_hotlist_scan_event(struct net_device *dev, @@ -11371,11 +14649,11 @@ dhd_dev_hotlist_scan_event(struct net_device *dev, /* Linux wrapper to call common dhd_process_full_gscan_result */ void * dhd_dev_process_full_gscan_result(struct net_device *dev, -const void *data, int *send_evt_bytes) +const void *data, uint32 len, int *send_evt_bytes) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes)); + return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes)); } void @@ -11404,9 +14682,250 @@ dhd_dev_retrieve_batch_scan(struct net_device *dev) return (dhd_retreive_batch_scan_results(&dhd->pub)); } + +/* Linux wrapper to call common dhd_pno_process_epno_result */ +void * dhd_dev_process_epno_result(struct net_device *dev, + const void *data, uint32 event, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes)); +} + +int +dhd_dev_set_lazy_roam_cfg(struct net_device *dev, + wlc_roam_exp_params_t *roam_param) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + wl_roam_exp_cfg_t roam_exp_cfg; + int err; + + if (!roam_param) { + return BCME_BADARG; + } + + DHD_ERROR(("a_band_boost_thr %d a_band_penalty_thr %d\n", + roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold)); + DHD_ERROR(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n", + roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor, + roam_param->cur_bssid_boost)); + DHD_ERROR(("alert_roam_trigger_thr %d a_band_max_boost %d\n", + roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost)); + + memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param)); + roam_exp_cfg.version = ROAM_EXP_CFG_VERSION; + roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT; + if (dhd->pub.lazy_roam_enable) { + roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG; + } + err = dhd_iovar(&dhd->pub, 0, "roam_exp_params", + (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0, + TRUE); + if (err < 0) { + DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err)); + } + return err; +} + +int +dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable) +{ + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + wl_roam_exp_cfg_t roam_exp_cfg; + + memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg)); + roam_exp_cfg.version = ROAM_EXP_CFG_VERSION; + if (enable) { + roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG; + } + + err = dhd_iovar(&dhd->pub, 0, "roam_exp_params", + (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0, + TRUE); + if (err < 0) { + DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err)); + } else { + dhd->pub.lazy_roam_enable = (enable != 0); + } + return err; +} + +int +dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev, + wl_bssid_pref_cfg_t *bssid_pref, uint32 flush) +{ + int err; + int len; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + bssid_pref->version = BSSID_PREF_LIST_VERSION; + /* By default programming bssid pref flushes out old values */ + bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0; + len = sizeof(wl_bssid_pref_cfg_t); + len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t); + err = dhd_iovar(&(dhd->pub), 0, "roam_exp_bssid_pref", (char *)bssid_pref, + len, NULL, 0, TRUE); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err)); + } + return err; +} + +int +dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist, + uint32 len, uint32 flush) +{ + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int macmode; + + if (blacklist) { + err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist, + len, TRUE, 0); + if (err != BCME_OK) { + DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err)); + return err; + } + } + /* By default programming blacklist flushes out old values */ + macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY; + err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode, + sizeof(macmode), TRUE, 0); + if (err != BCME_OK) { + DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err)); + } + return err; +} + +int +dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist, + uint32 len, uint32 flush) +{ + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + wl_ssid_whitelist_t whitelist_ssid_flush; + + if (!ssid_whitelist) { + if (flush) { + ssid_whitelist = &whitelist_ssid_flush; + ssid_whitelist->ssid_count = 0; + } else { + DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__)); + return BCME_BADARG; + } + } + ssid_whitelist->version = SSID_WHITELIST_VERSION; + ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0; + err = dhd_iovar(&(dhd->pub), 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, + len, NULL, 0, TRUE); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err)); + } + return err; +} #endif /* GSCAN_SUPPORT */ + +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +/* Linux wrapper to call common dhd_pno_get_gscan */ +void * +dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_get_gscan(&dhd->pub, type, info, len)); +} +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ #endif + +#ifdef RSSI_MONITOR_SUPPORT +int +dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start, + int8 max_rssi, int8 min_rssi) +{ + int err; + wl_rssi_monitor_cfg_t rssi_monitor; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + rssi_monitor.version = RSSI_MONITOR_VERSION; + rssi_monitor.max_rssi = max_rssi; + rssi_monitor.min_rssi = min_rssi; + rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP; + err = dhd_iovar(&(dhd->pub), 0, "rssi_monitor", (char *)&rssi_monitor, + sizeof(rssi_monitor), NULL, 0, TRUE); + if (err < 0 && err != BCME_UNSUPPORTED) { + DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err)); + } + return err; +} +#endif /* RSSI_MONITOR_SUPPORT */ + +#ifdef DHDTCPACK_SUPPRESS +int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable) +{ + int err; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + err = dhd_tcpack_suppress_set(&(dhd->pub), enable); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err)); + } + return err; +} +#endif /* DHDTCPACK_SUPPRESS */ + +int +dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + + if (!dhdp || !oui) { + DHD_ERROR(("NULL POINTER : %s\n", + __FUNCTION__)); + return BCME_ERROR; + } + if (ETHER_ISMULTI(oui)) { + DHD_ERROR(("Expected unicast OUI\n")); + return BCME_ERROR; + } else { + uint8 *rand_mac_oui = dhdp->rand_mac_oui; + memcpy(rand_mac_oui, oui, DOT11_OUI_LEN); + DHD_ERROR(("Random MAC OUI to be used - %02x:%02x:%02x\n", rand_mac_oui[0], + rand_mac_oui[1], rand_mac_oui[2])); + } + return BCME_OK; +} + +int +dhd_set_rand_mac_oui(dhd_pub_t *dhd) +{ + int err; + wl_pfn_macaddr_cfg_t wl_cfg; + uint8 *rand_mac_oui = dhd->rand_mac_oui; + + memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN); + memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN); + wl_cfg.version = WL_PFN_MACADDR_CFG_VER; + if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) { + wl_cfg.flags = 0; + } else { + wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK); + } + + DHD_ERROR(("Setting rand mac oui to FW - %02x:%02x:%02x\n", rand_mac_oui[0], + rand_mac_oui[1], rand_mac_oui[2])); + + err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err)); + } + return err; +} + #ifdef RTT_SUPPORT +#ifdef WL_CFG80211 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */ int dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf) @@ -11415,6 +14934,7 @@ dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf) return (dhd_rtt_set_cfg(&dhd->pub, buf)); } + int dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt) { @@ -11422,6 +14942,7 @@ dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt)); } + int dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn) { @@ -11429,6 +14950,7 @@ dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_co return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn)); } + int dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn) { @@ -11445,8 +14967,575 @@ dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa) return (dhd_rtt_capability(&dhd->pub, capa)); } +int +dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + return (dhd_rtt_avail_channel(&dhd->pub, channel_info)); +} + +int +dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + return (dhd_rtt_enable_responder(&dhd->pub, channel_info)); +} + +int dhd_dev_rtt_cancel_responder(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + return (dhd_rtt_cancel_responder(&dhd->pub)); +} +#endif /* WL_CFG80211 */ #endif /* RTT_SUPPORT */ +#ifdef KEEP_ALIVE +#define KA_TEMP_BUF_SIZE 512 +#define KA_FRAME_SIZE 300 + +int +dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt, + uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec) +{ + const int ETHERTYPE_LEN = 2; + char *pbuf = NULL; + const char *str; + wl_mkeep_alive_pkt_t mkeep_alive_pkt; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL; + int buf_len = 0; + int str_len = 0; + int res = BCME_ERROR; + int len_bytes = 0; + int i = 0; + + /* ether frame to have both max IP pkt (256 bytes) and ether header */ + char *pmac_frame = NULL; + char *pmac_frame_begin = NULL; + + /* + * The mkeep_alive packet is for STA interface only; if the bss is configured as AP, + * dongle shall reject a mkeep_alive request. + */ + if (!dhd_support_sta_mode(dhd_pub)) + return res; + + DHD_TRACE(("%s execution\n", __FUNCTION__)); + + if ((pbuf = kzalloc(KA_TEMP_BUF_SIZE, GFP_KERNEL)) == NULL) { + DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE)); + res = BCME_NOMEM; + return res; + } + + if ((pmac_frame = kzalloc(KA_FRAME_SIZE, GFP_KERNEL)) == NULL) { + DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE)); + res = BCME_NOMEM; + goto exit; + } + pmac_frame_begin = pmac_frame; + + /* + * Get current mkeep-alive status. + */ + res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf, + KA_TEMP_BUF_SIZE, FALSE); + if (res < 0) { + DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res)); + goto exit; + } else { + /* Check available ID whether it is occupied */ + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf; + if (dtoh32(mkeep_alive_pktp->period_msec != 0)) { + DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n", + __FUNCTION__, mkeep_alive_id)); + + /* Current occupied ID info */ + DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__)); + DHD_ERROR((" Id : %d\n" + " Period: %d msec\n" + " Length: %d\n" + " Packet: 0x", + mkeep_alive_pktp->keep_alive_id, + dtoh32(mkeep_alive_pktp->period_msec), + dtoh16(mkeep_alive_pktp->len_bytes))); + + for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) { + DHD_ERROR(("%02x", mkeep_alive_pktp->data[i])); + } + DHD_ERROR(("\n")); + + res = BCME_NOTFOUND; + goto exit; + } + } + + /* Request the specified ID */ + memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); + memset(pbuf, 0, KA_TEMP_BUF_SIZE); + str = "mkeep_alive"; + str_len = strlen(str); + strncpy(pbuf, str, str_len); + pbuf[str_len] = '\0'; + + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1); + mkeep_alive_pkt.period_msec = htod32(period_msec); + buf_len = str_len + 1; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + + /* ID assigned */ + mkeep_alive_pkt.keep_alive_id = mkeep_alive_id; + + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + + /* + * Build up Ethernet Frame + */ + + /* Mapping dest mac addr */ + memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN); + pmac_frame += ETHER_ADDR_LEN; + + /* Mapping src mac addr */ + memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN); + pmac_frame += ETHER_ADDR_LEN; + + /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */ + *(pmac_frame++) = 0x08; + *(pmac_frame++) = 0x00; + + /* Mapping IP pkt */ + memcpy(pmac_frame, ip_pkt, ip_pkt_len); + pmac_frame += ip_pkt_len; + + /* + * Length of ether frame (assume to be all hexa bytes) + * = src mac + dst mac + ether type + ip pkt len + */ + len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len; + memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes); + buf_len += len_bytes; + mkeep_alive_pkt.len_bytes = htod16(len_bytes); + + /* + * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and + * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); + + res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0); +exit: + kfree(pmac_frame_begin); + kfree(pbuf); + return res; +} + +int +dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id) +{ + char *pbuf; + wl_mkeep_alive_pkt_t mkeep_alive_pkt; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp; + int res = BCME_ERROR; + int i; + + /* + * The mkeep_alive packet is for STA interface only; if the bss is configured as AP, + * dongle shall reject a mkeep_alive request. + */ + if (!dhd_support_sta_mode(dhd_pub)) + return res; + + DHD_TRACE(("%s execution\n", __FUNCTION__)); + + /* + * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt. + */ + if ((pbuf = kmalloc(KA_TEMP_BUF_SIZE, GFP_KERNEL)) == NULL) { + DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE)); + return res; + } + + res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, + sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE); + if (res < 0) { + DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res)); + goto exit; + } else { + /* Check occupied ID */ + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf; + DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__)); + DHD_INFO((" Id : %d\n" + " Period: %d msec\n" + " Length: %d\n" + " Packet: 0x", + mkeep_alive_pktp->keep_alive_id, + dtoh32(mkeep_alive_pktp->period_msec), + dtoh16(mkeep_alive_pktp->len_bytes))); + + for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) { + DHD_INFO(("%02x", mkeep_alive_pktp->data[i])); + } + DHD_INFO(("\n")); + } + + /* Make it stop if available */ + if (dtoh32(mkeep_alive_pktp->period_msec != 0)) { + DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id)); + memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); + + mkeep_alive_pkt.period_msec = 0; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + mkeep_alive_pkt.keep_alive_id = mkeep_alive_id; + + res = dhd_iovar(dhd_pub, 0, "mkeep_alive", + (char *)&mkeep_alive_pkt, + WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE); + } else { + DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id)); + res = BCME_NOTFOUND; + } +exit: + kfree(pbuf); + return res; +} +#endif /* KEEP_ALIVE */ + +#if defined(PKT_FILTER_SUPPORT) && defined(APF) +static void _dhd_apf_lock_local(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (dhd) { + mutex_lock(&dhd->dhd_apf_mutex); + } +#endif +} + +static void _dhd_apf_unlock_local(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (dhd) { + mutex_unlock(&dhd->dhd_apf_mutex); + } +#endif +} + +static int +__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id, + u8* program, uint32 program_len) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + wl_pkt_filter_t * pkt_filterp; + wl_apf_program_t *apf_program; + char *buf; + u32 cmd_len, buf_len; + int ifidx, ret; + gfp_t kflags; + char cmd[] = "pkt_filter_add"; + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + cmd_len = sizeof(cmd); + + /* Check if the program_len is more than the expected len + * and if the program is NULL return from here. + */ + if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) { + DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n", + __FUNCTION__, program_len, program)); + return -EINVAL; + } + buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN + + WL_APF_PROGRAM_FIXED_LEN + program_len; + + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + buf = kzalloc(buf_len, kflags); + if (unlikely(!buf)) { + DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len)); + return -ENOMEM; + } + + memcpy(buf, cmd, cmd_len); + + pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len); + pkt_filterp->id = htod32(filter_id); + pkt_filterp->negate_match = htod32(FALSE); + pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH); + + apf_program = &pkt_filterp->u.apf_program; + apf_program->version = htod16(WL_APF_INTERNAL_VERSION); + apf_program->instr_len = htod16(program_len); + memcpy(apf_program->instrs, program, program_len); + + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + } + + if (buf) { + kfree(buf); + } + return ret; +} + +static int +__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id, + uint32 mode, uint32 enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + wl_pkt_filter_enable_t * pkt_filterp; + char *buf; + u32 cmd_len, buf_len; + int ifidx, ret; + gfp_t kflags; + char cmd[] = "pkt_filter_enable"; + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + cmd_len = sizeof(cmd); + buf_len = cmd_len + sizeof(*pkt_filterp); + + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + buf = kzalloc(buf_len, kflags); + if (unlikely(!buf)) { + DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len)); + return -ENOMEM; + } + + memcpy(buf, cmd, cmd_len); + + pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len); + pkt_filterp->id = htod32(filter_id); + pkt_filterp->enable = htod32(enable); + + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + goto exit; + } + + ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode, + WLC_SET_VAR, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + } + +exit: + if (buf) { + kfree(buf); + } + return ret; +} + +static int +__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ifidx, ret; + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete", + htod32(filter_id), WLC_SET_VAR, TRUE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n", + __FUNCTION__, filter_id, ret)); + } + + return ret; +} + +void dhd_apf_lock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + _dhd_apf_lock_local(dhd); +} + +void dhd_apf_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + _dhd_apf_unlock_local(dhd); +} + +int +dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ifidx, ret; + + if (!FW_SUPPORTED(dhdp, apf)) { + DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__)); + + /* + * Notify Android framework that APF is not supported by setting + * version as zero. + */ + *version = 0; + return BCME_OK; + } + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version, + WLC_GET_VAR, FALSE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to get APF version, ret=%d\n", + __FUNCTION__, ret)); + } + + return ret; +} + +int +dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ifidx, ret; + + if (!FW_SUPPORTED(dhdp, apf)) { + DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__)); + *max_len = 0; + return BCME_OK; + } + + ifidx = dhd_net2idx(dhd, ndev); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len, + WLC_GET_VAR, FALSE, ifidx); + if (unlikely(ret)) { + DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n", + __FUNCTION__, ret)); + } + + return ret; +} + +int +dhd_dev_apf_add_filter(struct net_device *ndev, u8* program, + uint32 program_len) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret; + + DHD_APF_LOCK(ndev); + + /* delete, if filter already exists */ + if (dhdp->apf_set) { + ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID); + if (unlikely(ret)) { + goto exit; + } + dhdp->apf_set = FALSE; + } + + ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len); + if (ret) { + goto exit; + } + dhdp->apf_set = TRUE; + + if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) { + /* Driver is still in (early) suspend state, enable APF filter back */ + ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, + PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE); + } +exit: + DHD_APF_UNLOCK(ndev); + + return ret; +} + +int +dhd_dev_apf_enable_filter(struct net_device *ndev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + DHD_APF_LOCK(ndev); + + if (dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) { + ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, + PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE); + } + + DHD_APF_UNLOCK(ndev); + + return ret; +} + +int +dhd_dev_apf_disable_filter(struct net_device *ndev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + DHD_APF_LOCK(ndev); + + if (dhdp->apf_set) { + ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID, + PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE); + } + + DHD_APF_UNLOCK(ndev); + + return ret; +} + +int +dhd_dev_apf_delete_filter(struct net_device *ndev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(ndev); + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + DHD_APF_LOCK(ndev); + + if (dhdp->apf_set) { + ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID); + if (!ret) { + dhdp->apf_set = FALSE; + } + } + + DHD_APF_UNLOCK(ndev); + + return ret; +} +#endif /* PKT_FILTER_SUPPORT && APF */ + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) static void dhd_hang_process(void *dhd_info, void *event_info, u8 event) { @@ -11457,10 +15546,18 @@ static void dhd_hang_process(void *dhd_info, void *event_info, u8 event) dev = dhd->iflist[0]->net; if (dev) { - // terence 20161024: let wlan0 down when hang happened + /* + * For HW2, dev_close need to be done to recover + * from upper layer after hang. For Interposer skip + * dev_close so that dhd iovars can be used to take + * socramdump after crash, also skip for HW4 as + * handling of hang event is different + */ +#if !defined(CUSTOMER_HW2_INTERPOSER) rtnl_lock(); dev_close(dev); rtnl_unlock(); +#endif #if defined(WL_WIRELESS_EXT) wl_iw_send_priv_event(dev, "HANG"); #endif @@ -11486,10 +15583,39 @@ int dhd_os_send_hang_message(dhd_pub_t *dhdp) { int ret = 0; if (dhdp) { +#if defined(DHD_HANG_SEND_UP_TEST) + if (dhdp->req_hang_type) { + DHD_ERROR(("%s, Clear HANG test request 0x%x\n", + __FUNCTION__, dhdp->req_hang_type)); + dhdp->req_hang_type = 0; + } +#endif /* DHD_HANG_SEND_UP_TEST */ + if (!dhdp->hang_was_sent) { +#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG) + dhdp->hang_counts++; + if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) { + DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n", + __func__, dhdp->hang_counts)); + BUG_ON(1); + } +#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */ +#ifdef DHD_DEBUG_UART + /* If PCIe lane has broken, execute the debug uart application + * to gether a ramdump data from dongle via uart + */ + if (!dhdp->info->duart_execute) { + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP, + dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH); + } +#endif /* DHD_DEBUG_UART */ dhdp->hang_was_sent = 1; +#ifdef BT_OVER_SDIO + dhdp->is_bt_recovery_required = TRUE; +#endif dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp, - DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH); + DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WQ_WORK_PRIORITY_HIGH); DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate)); } @@ -11505,16 +15631,22 @@ int net_os_send_hang_message(struct net_device *dev) if (dhd) { /* Report FW problem when enabled */ if (dhd->pub.hang_report) { +#ifdef BT_OVER_SDIO + if (netif_running(dev)) { +#endif /* BT_OVER_SDIO */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) - ret = dhd_os_send_hang_message(&dhd->pub); + ret = dhd_os_send_hang_message(&dhd->pub); #else - ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); + ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); #endif +#ifdef BT_OVER_SDIO + } + DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__)); + bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev)); +#endif /* BT_OVER_SDIO */ } else { DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n", __FUNCTION__)); - /* Enforce bus down to stop any future traffic */ - dhd->pub.busstate = DHD_BUS_DOWN; } } return ret; @@ -11563,24 +15695,36 @@ bool dhd_force_country_change(struct net_device *dev) return dhd->pub.force_country_change; return FALSE; } + void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code, wl_country_t *cspec) { dhd_info_t *dhd = DHD_DEV_INFO(dev); -#ifdef CUSTOM_COUNTRY_CODE - get_customized_country_code(dhd->adapter, country_iso_code, cspec, +#if defined(DHD_BLOB_EXISTENCE_CHECK) + if (!dhd->pub.is_blob) +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + { +#if defined(CUSTOM_COUNTRY_CODE) + get_customized_country_code(dhd->adapter, country_iso_code, cspec, dhd->pub.dhd_cflags); #else - get_customized_country_code(dhd->adapter, country_iso_code, cspec); + get_customized_country_code(dhd->adapter, country_iso_code, cspec); #endif /* CUSTOM_COUNTRY_CODE */ + } + + BCM_REFERENCE(dhd); } void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify) { dhd_info_t *dhd = DHD_DEV_INFO(dev); +#ifdef WL_CFG80211 + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); +#endif + if (dhd && dhd->pub.up) { memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t)); #ifdef WL_CFG80211 - wl_update_wiphybands(NULL, notify); + wl_update_wiphybands(cfg, notify); #endif } } @@ -11588,9 +15732,12 @@ void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notif void dhd_bus_band_set(struct net_device *dev, uint band) { dhd_info_t *dhd = DHD_DEV_INFO(dev); +#ifdef WL_CFG80211 + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); +#endif if (dhd && dhd->pub.up) { #ifdef WL_CFG80211 - wl_update_wiphybands(NULL, true); + wl_update_wiphybands(cfg, true); #endif } } @@ -11752,6 +15899,52 @@ dhd_wait_pend8021x(struct net_device *dev) return pend; } +#if defined(DHD_DEBUG) +int write_file(const char * file_name, uint32 flags, uint8 *buf, int size) +{ + int ret = 0; + struct file *fp = NULL; + mm_segment_t old_fs; + loff_t pos = 0; + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* open file to write */ + fp = filp_open(file_name, flags, 0664); + if (IS_ERR(fp)) { + DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp))); + ret = -1; + goto exit; + } + + /* Write buf to file */ + ret = vfs_write(fp, buf, size, &pos); + if (ret < 0) { + DHD_ERROR(("write file error, err = %d\n", ret)); + goto exit; + } + + /* Sync file from filesystem to physical media */ + ret = vfs_fsync(fp, 0); + if (ret < 0) { + DHD_ERROR(("sync file error, error = %d\n", ret)); + goto exit; + } + ret = BCME_OK; + +exit: + /* close file before return */ + if (!IS_ERR(fp)) + filp_close(fp, current->files); + + /* restore previous address limit */ + set_fs(old_fs); + + return ret; +} +#endif + #ifdef DHD_DEBUG static void dhd_convert_memdump_type_to_str(uint32 type, char *buf) @@ -11774,9 +15967,15 @@ dhd_convert_memdump_type_to_str(uint32 type, char *buf) case DUMP_TYPE_PKTID_AUDIT_FAILURE: type_str = "PKTID_AUDIT_Fail"; break; + case DUMP_TYPE_PKTID_INVALID: + type_str = "PKTID_INVALID"; + break; case DUMP_TYPE_SCAN_TIMEOUT: type_str = "SCAN_timeout"; break; + case DUMP_TYPE_JOIN_TIMEOUT: + type_str = "JOIN_timeout"; + break; case DUMP_TYPE_SCAN_BUSY: type_str = "SCAN_Busy"; break; @@ -11789,6 +15988,35 @@ dhd_convert_memdump_type_to_str(uint32 type, char *buf) case DUMP_TYPE_AP_LINKUP_FAILURE: type_str = "BY_AP_LINK_FAILURE"; break; + case DUMP_TYPE_AP_ABNORMAL_ACCESS: + type_str = "INVALID_ACCESS"; + break; + case DUMP_TYPE_CFG_VENDOR_TRIGGERED: + type_str = "CFG_VENDOR_TRIGGERED"; + break; + case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX: + type_str = "ERROR_RX_TIMED_OUT"; + break; + case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX: + type_str = "ERROR_TX_TIMED_OUT"; + break; + case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR: + type_str = "BY_INVALID_RING_RDWR"; + break; + case DUMP_TYPE_DONGLE_HOST_EVENT: + type_str = "BY_DONGLE_HOST_EVENT"; + break; + case DUMP_TYPE_TRANS_ID_MISMATCH: + type_str = "BY_TRANS_ID_MISMATCH"; + break; + case DUMP_TYPE_HANG_ON_IFACE_OP_FAIL: + type_str = "HANG_IFACE_OP_FAIL"; + break; +#ifdef SUPPORT_LINKDOWN_RECOVERY + case DUMP_TYPE_READ_SHM_FAIL: + type_str = "READ_SHM_FAIL"; + break; +#endif /* SUPPORT_LINKDOWN_RECOVERY */ default: type_str = "Unknown_type"; break; @@ -11799,39 +16027,37 @@ dhd_convert_memdump_type_to_str(uint32 type, char *buf) } int -write_to_file(dhd_pub_t *dhd, uint8 *buf, int size) +write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname) { int ret = 0; - struct file *fp = NULL; - mm_segment_t old_fs; - loff_t pos = 0; char memdump_path[128]; char memdump_type[32]; struct timeval curtime; uint32 file_mode; - /* change to KERNEL_DS address limit */ - old_fs = get_fs(); - set_fs(KERNEL_DS); - /* Init file name */ memset(memdump_path, 0, sizeof(memdump_path)); memset(memdump_type, 0, sizeof(memdump_type)); do_gettimeofday(&curtime); dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type); #ifdef CUSTOMER_HW4_DEBUG - snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld", - DHD_COMMON_DUMP_PATH "mem_dump", memdump_type, + snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld", + DHD_COMMON_DUMP_PATH, fname, memdump_type, (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); file_mode = O_CREAT | O_WRONLY | O_SYNC; #elif defined(CUSTOMER_HW2) - snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld", - "/data/misc/wifi/mem_dump", memdump_type, + snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld", + "/data/misc/wifi/", fname, memdump_type, (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); file_mode = O_CREAT | O_WRONLY | O_SYNC; +#elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__)) + snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld", + "/data/misc/wifi/", fname, memdump_type, + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + file_mode = O_CREAT | O_WRONLY; #else - snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld", - "/installmedia/mem_dump", memdump_type, + snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld", + "/installmedia/", fname, memdump_type, (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are * calling BUG_ON immediately after collecting the socram dump. @@ -11839,37 +16065,26 @@ write_to_file(dhd_pub_t *dhd, uint8 *buf, int size) * file instead of caching it. O_TRUNC flag ensures that file will be re-written * instead of appending. */ - file_mode = O_CREAT | O_WRONLY | O_DIRECT | O_SYNC | O_TRUNC; + file_mode = O_CREAT | O_WRONLY | O_SYNC; + { + struct file *fp = filp_open(memdump_path, file_mode, 0664); + /* Check if it is live Brix image having /installmedia, else use /data */ + if (IS_ERR(fp)) { + DHD_ERROR(("open file %s, try /data/\n", memdump_path)); + snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld", + "/data/", fname, memdump_type, + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + } else { + filp_close(fp, NULL); + } + } #endif /* CUSTOMER_HW4_DEBUG */ /* print SOCRAM dump file path */ - DHD_ERROR(("%s: memdump_path = %s\n", __FUNCTION__, memdump_path)); + DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path)); - /* open file to write */ - fp = filp_open(memdump_path, file_mode, 0644); - if (IS_ERR(fp)) { - ret = PTR_ERR(fp); - printf("%s: open file error, err = %d\n", __FUNCTION__, ret); - goto exit; - } - - /* Write buf to file */ - fp->f_op->write(fp, buf, size, &pos); - -exit: - /* close file before return */ - if (!ret) - filp_close(fp, current->files); - - /* restore previous address limit */ - set_fs(old_fs); - - /* free buf before return */ -#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) - DHD_OS_PREFREE(dhd, buf, size); -#else - MFREE(dhd->osh, buf, size); -#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + /* Write file */ + ret = write_file(memdump_path, file_mode, buf, size); return ret; } @@ -11881,7 +16096,7 @@ int dhd_os_wake_lock_timeout(dhd_pub_t *pub) unsigned long flags; int ret = 0; - if (dhd) { + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ? dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable; @@ -11915,7 +16130,7 @@ int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val) dhd_info_t *dhd = (dhd_info_t *)(pub->info); unsigned long flags; - if (dhd) { + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); if (val > dhd->wakelock_rx_timeout_enable) dhd->wakelock_rx_timeout_enable = val; @@ -11929,7 +16144,7 @@ int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val) dhd_info_t *dhd = (dhd_info_t *)(pub->info); unsigned long flags; - if (dhd) { + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); if (val > dhd->wakelock_ctrl_timeout_enable) dhd->wakelock_ctrl_timeout_enable = val; @@ -11943,7 +16158,7 @@ int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub) dhd_info_t *dhd = (dhd_info_t *)(pub->info); unsigned long flags; - if (dhd) { + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); dhd->wakelock_ctrl_timeout_enable = 0; #ifdef CONFIG_HAS_WAKELOCK @@ -11993,7 +16208,6 @@ struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT }; #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ int trace_wklock_onoff = 1; - typedef enum dhd_wklock_type { DHD_WAKE_LOCK, DHD_WAKE_UNLOCK, @@ -12008,7 +16222,6 @@ struct wk_trace_record { struct hlist_node wklock_node; /* hash node */ }; - static struct wk_trace_record *find_wklock_entry(unsigned long addr) { struct wk_trace_record *wklock_info; @@ -12027,6 +16240,7 @@ static struct wk_trace_record *find_wklock_entry(unsigned long addr) return NULL; } + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) #define HASH_ADD(hashtable, node, key) \ do { \ @@ -12085,20 +16299,20 @@ static inline void dhd_wk_lock_rec_dump(void) { switch (wklock_info->lock_type) { case DHD_WAKE_LOCK: - DHD_ERROR(("wakelock lock : %pS lock_counter : %llu\n", - (void *)wklock_info->addr, wklock_info->counter)); + printk("wakelock lock : %pS lock_counter : %llu \n", + (void *)wklock_info->addr, wklock_info->counter); break; case DHD_WAKE_UNLOCK: - DHD_ERROR(("wakelock unlock : %pS, unlock_counter : %llu\n", - (void *)wklock_info->addr, wklock_info->counter)); + printk("wakelock unlock : %pS, unlock_counter : %llu \n", + (void *)wklock_info->addr, wklock_info->counter); break; case DHD_WAIVE_LOCK: - DHD_ERROR(("wakelock waive : %pS before_waive : %llu\n", - (void *)wklock_info->addr, wklock_info->counter)); + printk("wakelock waive : %pS before_waive : %llu \n", + (void *)wklock_info->addr, wklock_info->counter); break; case DHD_RESTORE_LOCK: - DHD_ERROR(("wakelock restore : %pS, after_waive : %llu\n", - (void *)wklock_info->addr, wklock_info->counter)); + printk("wakelock restore : %pS, after_waive : %llu \n", + (void *)wklock_info->addr, wklock_info->counter); break; } } @@ -12156,11 +16370,11 @@ void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp) dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); unsigned long flags; - DHD_ERROR((KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n")); + printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n"); spin_lock_irqsave(&dhd->wakelock_spinlock, flags); dhd_wk_lock_rec_dump(); spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); - DHD_ERROR((KERN_ERR"Event wakelock counter %u\n", dhd->wakelock_event_counter)); + } #else #define STORE_WKLOCK_RECORD(wklock_type) @@ -12172,7 +16386,7 @@ int dhd_os_wake_lock(dhd_pub_t *pub) unsigned long flags; int ret = 0; - if (dhd) { + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { #ifdef CONFIG_HAS_WAKELOCK @@ -12194,27 +16408,41 @@ int dhd_os_wake_lock(dhd_pub_t *pub) return ret; } -int dhd_event_wake_lock(dhd_pub_t *pub) +void dhd_event_wake_lock(dhd_pub_t *pub) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); - unsigned long flags; - int ret = 0; if (dhd) { - spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags); - if (dhd->wakelock_event_counter == 0) { #ifdef CONFIG_HAS_WAKELOCK - wake_lock(&dhd->wl_evtwake); + wake_lock(&dhd->wl_evtwake); #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) - dhd_bus_dev_pm_stay_awake(pub); + dhd_bus_dev_pm_stay_awake(pub); #endif - } - dhd->wakelock_event_counter++; - ret = dhd->wakelock_event_counter; - spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags); } +} - return ret; +void +dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKE_LOCK */ +} + +void +dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKE_LOCK */ } int net_os_wake_lock(struct net_device *dev) @@ -12234,7 +16462,7 @@ int dhd_os_wake_unlock(dhd_pub_t *pub) int ret = 0; dhd_os_wake_lock_timeout(pub); - if (dhd) { + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); if (dhd->wakelock_counter > 0) { @@ -12258,29 +16486,45 @@ int dhd_os_wake_unlock(dhd_pub_t *pub) return ret; } -int dhd_event_wake_unlock(dhd_pub_t *pub) +void dhd_event_wake_unlock(dhd_pub_t *pub) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); - unsigned long flags; - int ret = 0; if (dhd) { - spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags); - - if (dhd->wakelock_event_counter > 0) { - dhd->wakelock_event_counter--; - if (dhd->wakelock_event_counter == 0) { #ifdef CONFIG_HAS_WAKELOCK - wake_unlock(&dhd->wl_evtwake); + wake_unlock(&dhd->wl_evtwake); #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) - dhd_bus_dev_pm_relax(pub); + dhd_bus_dev_pm_relax(pub); #endif - } - ret = dhd->wakelock_event_counter; - } - spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags); } - return ret; +} + +void dhd_pm_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_pmwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_pmwake)) { + wake_unlock(&dhd->wl_pmwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void dhd_txfl_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_txflwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_txflwake)) { + wake_unlock(&dhd->wl_txflwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ } int dhd_os_check_wakelock(dhd_pub_t *pub) @@ -12309,13 +16553,13 @@ int dhd_os_check_wakelock(dhd_pub_t *pub) int dhd_os_check_wakelock_all(dhd_pub_t *pub) { -#ifdef CONFIG_HAS_WAKELOCK - int l1, l2, l3, l4, l7; +#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \ + KERNEL_VERSION(2, 6, 36))) +#if defined(CONFIG_HAS_WAKELOCK) + int l1, l2, l3, l4, l7, l8, l9; int l5 = 0, l6 = 0; int c, lock_active; #endif /* CONFIG_HAS_WAKELOCK */ -#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \ - KERNEL_VERSION(2, 6, 36))) dhd_info_t *dhd; if (!pub) { @@ -12333,27 +16577,29 @@ dhd_os_check_wakelock_all(dhd_pub_t *pub) l2 = wake_lock_active(&dhd->wl_wdwake); l3 = wake_lock_active(&dhd->wl_rxwake); l4 = wake_lock_active(&dhd->wl_ctrlwake); + l7 = wake_lock_active(&dhd->wl_evtwake); #ifdef BCMPCIE_OOB_HOST_WAKE l5 = wake_lock_active(&dhd->wl_intrwake); #endif /* BCMPCIE_OOB_HOST_WAKE */ #ifdef DHD_USE_SCAN_WAKELOCK l6 = wake_lock_active(&dhd->wl_scanwake); #endif /* DHD_USE_SCAN_WAKELOCK */ - l7 = wake_lock_active(&dhd->wl_evtwake); - lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7); + l8 = wake_lock_active(&dhd->wl_pmwake); + l9 = wake_lock_active(&dhd->wl_txflwake); + lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9); /* Indicate to the Host to avoid going to suspend if internal locks are up */ - if (dhd && lock_active) { + if (lock_active) { DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d " - "ctl-%d intr-%d scan-%d evt-%d\n", - __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7)); + "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n", + __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9)); return 1; } #elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) { return 1; } -#endif /* CONFIG_HAS_WAKELOCK */ +#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ return 0; } @@ -12473,7 +16719,7 @@ int dhd_os_wake_lock_waive(dhd_pub_t *pub) unsigned long flags; int ret = 0; - if (dhd) { + if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ @@ -12501,6 +16747,8 @@ int dhd_os_wake_lock_restore(dhd_pub_t *pub) if (!dhd) return 0; + if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0) + return 0; spin_lock_irqsave(&dhd->wakelock_spinlock, flags); @@ -12542,7 +16790,6 @@ exit: void dhd_os_wake_lock_init(struct dhd_info *dhd) { DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__)); - dhd->wakelock_event_counter = 0; dhd->wakelock_counter = 0; dhd->wakelock_rx_timeout_enable = 0; dhd->wakelock_ctrl_timeout_enable = 0; @@ -12551,6 +16798,8 @@ void dhd_os_wake_lock_init(struct dhd_info *dhd) wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake"); wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake"); + wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake"); + wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake"); #ifdef BCMPCIE_OOB_HOST_WAKE wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake"); #endif /* BCMPCIE_OOB_HOST_WAKE */ @@ -12567,7 +16816,6 @@ void dhd_os_wake_lock_destroy(struct dhd_info *dhd) { DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__)); #ifdef CONFIG_HAS_WAKELOCK - dhd->wakelock_event_counter = 0; dhd->wakelock_counter = 0; dhd->wakelock_rx_timeout_enable = 0; dhd->wakelock_ctrl_timeout_enable = 0; @@ -12575,6 +16823,8 @@ void dhd_os_wake_lock_destroy(struct dhd_info *dhd) wake_lock_destroy(&dhd->wl_rxwake); wake_lock_destroy(&dhd->wl_ctrlwake); wake_lock_destroy(&dhd->wl_evtwake); + wake_lock_destroy(&dhd->wl_pmwake); + wake_lock_destroy(&dhd->wl_txflwake); #ifdef BCMPCIE_OOB_HOST_WAKE wake_lock_destroy(&dhd->wl_intrwake); #endif /* BCMPCIE_OOB_HOST_WAKE */ @@ -12594,13 +16844,14 @@ bool dhd_os_check_if_up(dhd_pub_t *pub) return pub->up; } +#if defined(BCMSDIO) || defined(BCMPCIE) /* function to collect firmware, chip id and chip version info */ void dhd_set_version_info(dhd_pub_t *dhdp, char *fw) { int i; i = snprintf(info_string, sizeof(info_string), - " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw); + " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR, fw, clm_version); printf("%s\n", info_string); if (!dhdp) @@ -12610,6 +16861,7 @@ void dhd_set_version_info(dhd_pub_t *dhdp, char *fw) "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp), dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp)); } +#endif /* BCMSDIO || BCMPCIE */ int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd) { @@ -12682,13 +16934,15 @@ void dhd_wlfc_plat_deinit(void *dhd) return; } -bool dhd_wlfc_skip_fc(void) +bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx) { #ifdef SKIP_WLFC_ON_CONCURRENT -#ifdef WL_CFG80211 +#ifdef WL_CFG80211 + struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx); + if (net) /* enable flow control in vsdb mode */ - return !(wl_cfg80211_is_concurrent_mode()); + return !(wl_cfg80211_is_concurrent_mode(net)); #else return TRUE; /* skip flow control */ #endif /* WL_CFG80211 */ @@ -12696,6 +16950,7 @@ bool dhd_wlfc_skip_fc(void) #else return FALSE; #endif /* SKIP_WLFC_ON_CONCURRENT */ + return FALSE; } #endif /* PROP_TXSTATUS */ @@ -12802,7 +17057,7 @@ static const struct file_operations dhd_dbg_state_ops = { .llseek = dhd_debugfs_lseek }; -static void dhd_dbg_create(void) +static void dhd_dbgfs_create(void) { if (g_dbgfs.debugfs_dir) { g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir, @@ -12810,7 +17065,7 @@ static void dhd_dbg_create(void) } } -void dhd_dbg_init(dhd_pub_t *dhdp) +void dhd_dbgfs_init(dhd_pub_t *dhdp) { g_dbgfs.dhdp = dhdp; g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */ @@ -12821,12 +17076,12 @@ void dhd_dbg_init(dhd_pub_t *dhdp) return; } - dhd_dbg_create(); + dhd_dbgfs_create(); return; } -void dhd_dbg_remove(void) +void dhd_dbgfs_remove(void) { debugfs_remove(g_dbgfs.debugfs_mem); debugfs_remove(g_dbgfs.debugfs_dir); @@ -12935,6 +17190,7 @@ void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf) } else { return; } + if (htsf_ts->magic == HTSFMAGIC) { htsf_ts->tE0 = dhd_get_htsf(dhd, 0); htsf_ts->cE0 = get_cycles(); @@ -12977,9 +17233,9 @@ uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx) t = get_cycles(); cur_cycle = t; - if (cur_cycle > dhd->htsf.last_cycle) { + if (cur_cycle > dhd->htsf.last_cycle) delta = cur_cycle - dhd->htsf.last_cycle; - } else { + else { delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle); } @@ -13034,7 +17290,6 @@ static void dhd_dump_latency(void) static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx) { - wl_ioctl_t ioc; char buf[32]; int ret; uint32 s1, s2; @@ -13044,18 +17299,11 @@ dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx) uint32 high; } tsf_buf; - memset(&ioc, 0, sizeof(ioc)); memset(&tsf_buf, 0, sizeof(tsf_buf)); - ioc.cmd = WLC_GET_VAR; - ioc.buf = buf; - ioc.len = (uint)sizeof(buf); - ioc.set = FALSE; - - strncpy(buf, "tsf", sizeof(buf) - 1); - buf[sizeof(buf) - 1] = '\0'; s1 = dhd_get_htsf(dhd, 0); - if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + ret = dhd_iovar(&dhd->pub, ifidx, "tsf", NULL, 0, buf, sizeof(buf), FALSE); + if (ret < 0) { if (ret == -EIO) { DHD_ERROR(("%s: tsf is not supported by device\n", dhd_ifname(&dhd->pub, ifidx))); @@ -13202,7 +17450,7 @@ void dhd_set_cpucore(dhd_pub_t *dhd, int set) } while (e_rxf < 0); } #ifdef DHD_OF_SUPPORT - interrupt_set_cpucore(set); + interrupt_set_cpucore(set, DPC_CPUCORE, PRIMARY_CPUCORE); #endif /* DHD_OF_SUPPORT */ DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set)); @@ -13210,6 +17458,40 @@ void dhd_set_cpucore(dhd_pub_t *dhd, int set) } #endif /* CUSTOM_SET_CPUCORE */ +#ifdef DHD_MCAST_REGEN +/* Get interface specific ap_isolate configuration */ +int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + return ifp->mcast_regen_bss_enable; +} + +/* Set interface specific mcast_regen configuration */ +int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ifp->mcast_regen_bss_enable = val; + + /* Disable rx_pkt_chain feature for interface, if mcast_regen feature + * is enabled + */ + dhd_update_rx_pkt_chainable_state(dhdp, idx); + return BCME_OK; +} +#endif /* DHD_MCAST_REGEN */ + /* Get interface specific ap_isolate configuration */ int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) { @@ -13233,24 +17515,26 @@ int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val) ifp = dhd->iflist[idx]; - ifp->ap_isolate = val; + if (ifp) + ifp->ap_isolate = val; return 0; } #ifdef DHD_FW_COREDUMP - +#if defined(CONFIG_X86) +#define MEMDUMPINFO_LIVE "/installmedia/.memdump.info" +#define MEMDUMPINFO_INST "/data/.memdump.info" +#endif /* CONFIG_X86 && OEM_ANDROID */ #ifdef CUSTOMER_HW4_DEBUG -#ifdef PLATFORM_SLP -#define MEMDUMPINFO "/opt/etc/.memdump.info" -#else -#define MEMDUMPINFO "/data/.memdump.info" -#endif /* PLATFORM_SLP */ +#define MEMDUMPINFO PLATFORM_PATH".memdump.info" #elif defined(CUSTOMER_HW2) #define MEMDUMPINFO "/data/misc/wifi/.memdump.info" +#elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__)) +#define MEMDUMPINFO "/data/misc/wifi/.memdump.info" #else -#define MEMDUMPINFO "/installmedia/.memdump.info" +#define MEMDUMPINFO "/data/misc/wifi/.memdump.info" #endif /* CUSTOMER_HW4_DEBUG */ void dhd_get_memdump_info(dhd_pub_t *dhd) @@ -13264,29 +17548,50 @@ void dhd_get_memdump_info(dhd_pub_t *dhd) fp = filp_open(filepath, O_RDONLY, 0); if (IS_ERR(fp)) { DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); - goto done; - } else { - ret = kernel_read(fp, 0, (char *)&mem_val, 4); - if (ret < 0) { - DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); - filp_close(fp, NULL); +#if defined(CONFIG_X86) + /* Check if it is Live Brix Image */ + if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) { goto done; } - - mem_val = bcm_atoi((char *)&mem_val); - - DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, mem_val)); - filp_close(fp, NULL); + /* Try if it is Installed Brix Image */ + filepath = MEMDUMPINFO_INST; + DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath)); + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + goto done; + } +#else /* Non Brix Android platform */ + goto done; +#endif /* CONFIG_X86 && OEM_ANDROID */ } + /* Handle success case */ + ret = kernel_read(fp, 0, (char *)&mem_val, 4); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + filp_close(fp, NULL); + goto done; + } + + mem_val = bcm_atoi((char *)&mem_val); + + filp_close(fp, NULL); + +#ifdef DHD_INIT_DEFAULT_MEMDUMP + if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX) + mem_val = DUMP_MEMFILE_BUGON; +#endif /* DHD_INIT_DEFAULT_MEMDUMP */ + done: #ifdef CUSTOMER_HW4_DEBUG dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED; #else - dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE_BUGON; + dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE; #endif /* CUSTOMER_HW4_DEBUG */ -} + DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled)); +} void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size) { @@ -13316,8 +17621,9 @@ void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size) } #endif /* DHD_LOG_DUMP */ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump, - DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH); + DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH); } + static void dhd_mem_dump(void *handle, void *event_info, u8 event) { @@ -13334,21 +17640,101 @@ dhd_mem_dump(void *handle, void *event_info, u8 event) return; } - if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) { + if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) { DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__)); + dhd->pub.memdump_success = FALSE; } if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON && #ifdef DHD_LOG_DUMP dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP && -#endif - TRUE) { +#endif /* DHD_LOG_DUMP */ +#ifdef DHD_DEBUG_UART + dhd->pub.memdump_success == TRUE && +#endif /* DHD_DEBUG_UART */ + dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) { + +#ifdef SHOW_LOGTRACE + /* Wait till event_log_dispatcher_work finishes */ + cancel_work_sync(&dhd->event_log_dispatcher_work); +#endif /* SHOW_LOGTRACE */ + BUG_ON(1); } MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t)); } #endif /* DHD_FW_COREDUMP */ +#ifdef DHD_SSSR_DUMP + +static void +dhd_sssr_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_pub_t *dhdp; + int i; + char before_sr_dump[128]; + char after_sr_dump[128]; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + dhdp = &dhd->pub; + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + /* Init file name */ + memset(before_sr_dump, 0, sizeof(before_sr_dump)); + memset(after_sr_dump, 0, sizeof(after_sr_dump)); + + snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s", + "sssr_core", i, "before_SR"); + snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s", + "sssr_core", i, "after_SR"); + + if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i]) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i], + dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) { + DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n", + __FUNCTION__)); + } + } + if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i], + dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) { + DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n", + __FUNCTION__)); + } + } + } + + if (dhdp->sssr_vasip_buf_before) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_vasip_buf_before, + dhdp->sssr_reg_info.vasip_regs.vasip_sr_size, "sssr_vasip_before_SR")) { + DHD_ERROR(("%s: writing SSSR VASIP dump before to the file failed\n", + __FUNCTION__)); + } + } + + if (dhdp->sssr_vasip_buf_after) { + if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_vasip_buf_after, + dhdp->sssr_reg_info.vasip_regs.vasip_sr_size, "sssr_vasip_after_SR")) { + DHD_ERROR(("%s: writing SSSR VASIP dump after to the file failed\n", + __FUNCTION__)); + } + } + +} + +void +dhd_schedule_sssr_dump(dhd_pub_t *dhdp) +{ + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL, + DHD_WQ_WORK_SSSR_DUMP, dhd_sssr_dump, DHD_WQ_WORK_PRIORITY_HIGH); +} +#endif /* DHD_SSSR_DUMP */ + #ifdef DHD_LOG_DUMP static void dhd_log_dump(void *handle, void *event_info, u8 event) @@ -13370,34 +17756,35 @@ void dhd_schedule_log_dump(dhd_pub_t *dhdp) { dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP, - dhd_log_dump, DHD_WORK_PRIORITY_HIGH); + dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH); } static int do_dhd_log_dump(dhd_pub_t *dhdp) { - int ret = 0; + int ret = 0, i = 0; struct file *fp = NULL; mm_segment_t old_fs; loff_t pos = 0; + unsigned int wr_size = 0; char dump_path[128]; - char common_info[1024]; struct timeval curtime; uint32 file_mode; unsigned long flags = 0; + struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0]; + + const char *pre_strs = + "-------------------- General log ---------------------------\n"; + + const char *post_strs = + "-------------------- Specific log --------------------------\n"; if (!dhdp) { return -1; } - /* Building the additional information like DHD, F/W version */ - memset(common_info, 0, sizeof(common_info)); - snprintf(common_info, sizeof(common_info), - "---------- Common information ----------\n" - "DHD version: %s\n" - "F/W version: %s\n" - "----------------------------------------\n", - dhd_version, fw_version); + DHD_ERROR(("DHD version: %s\n", dhd_version)); + DHD_ERROR(("F/W version: %s\n", fw_version)); /* change to KERNEL_DS address limit */ old_fs = get_fs(); @@ -13412,31 +17799,67 @@ do_dhd_log_dump(dhd_pub_t *dhdp) file_mode = O_CREAT | O_WRONLY | O_SYNC; DHD_ERROR(("debug_dump_path = %s\n", dump_path)); - fp = filp_open(dump_path, file_mode, 0644); + fp = filp_open(dump_path, file_mode, 0664); if (IS_ERR(fp)) { ret = PTR_ERR(fp); DHD_ERROR(("open file error, err = %d\n", ret)); - ret = -1; goto exit; } - fp->f_op->write(fp, common_info, strlen(common_info), &pos); - if (dhdp->dld_buf.wraparound) { - fp->f_op->write(fp, dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE, &pos); - } else { - fp->f_op->write(fp, dhdp->dld_buf.buffer, - (int)(dhdp->dld_buf.present - dhdp->dld_buf.front), &pos); + ret = vfs_write(fp, pre_strs, strlen(pre_strs), &pos); + if (ret < 0) { + DHD_ERROR(("write file error, err = %d\n", ret)); + goto exit; } - /* re-init dhd_log_dump_buf structure */ - spin_lock_irqsave(&dhdp->dld_buf.lock, flags); - dhdp->dld_buf.wraparound = 0; - dhdp->dld_buf.present = dhdp->dld_buf.front; - dhdp->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE; - bzero(dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE); - spin_unlock_irqrestore(&dhdp->dld_buf.lock, flags); + do { + unsigned int buf_size = (unsigned int)(dld_buf->max - + (unsigned long)dld_buf->buffer); + if (dld_buf->wraparound) { + wr_size = buf_size; + } else { + if (!dld_buf->buffer[0]) { /* print log if buf is empty. */ + DHD_ERROR_EX(("Buffer is empty. No event/log.\n")); + } + wr_size = (unsigned int)(dld_buf->present - dld_buf->front); + } + + ret = vfs_write(fp, dld_buf->buffer, wr_size, &pos); + if (ret < 0) { + DHD_ERROR(("write file error, err = %d\n", ret)); + goto exit; + } + + /* re-init dhd_log_dump_buf structure */ + spin_lock_irqsave(&dld_buf->lock, flags); + dld_buf->wraparound = 0; + dld_buf->present = dld_buf->front; + dld_buf->remain = buf_size; + bzero(dld_buf->buffer, buf_size); + spin_unlock_irqrestore(&dld_buf->lock, flags); + ret = BCME_OK; + + if (++i < DLD_BUFFER_NUM) { + dld_buf = &g_dld_buf[i]; + } else { + break; + } + + ret = vfs_write(fp, post_strs, strlen(post_strs), &pos); + if (ret < 0) { + DHD_ERROR(("write file error, err = %d\n", ret)); + goto exit; + } + } while (1); + exit: - if (!ret) { +#if defined(STAT_REPORT) + if (!IS_ERR(fp) && ret >= 0) { + wl_stat_report_file_save(dhdp, fp); + } +#endif /* STAT_REPORT */ + + if (!IS_ERR(fp)) { filp_close(fp, NULL); } set_fs(old_fs); @@ -13445,13 +17868,10 @@ exit: } #endif /* DHD_LOG_DUMP */ + #ifdef BCMASSERT_LOG #ifdef CUSTOMER_HW4_DEBUG -#ifdef PLATFORM_SLP -#define ASSERTINFO "/opt/etc/.assert.info" -#else -#define ASSERTINFO "/data/.assert.info" -#endif /* PLATFORM_SLP */ +#define ASSERTINFO PLATFORM_PATH".assert.info" #elif defined(CUSTOMER_HW2) #define ASSERTINFO "/data/misc/wifi/.assert.info" #else @@ -13461,6 +17881,7 @@ void dhd_get_assert_info(dhd_pub_t *dhd) { struct file *fp = NULL; char *filepath = ASSERTINFO; + int mem_val = -1; /* * Read assert info from the file @@ -13473,20 +17894,106 @@ void dhd_get_assert_info(dhd_pub_t *dhd) if (IS_ERR(fp)) { DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); } else { - int mem_val = 0; int ret = kernel_read(fp, 0, (char *)&mem_val, 4); if (ret < 0) { DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); } else { mem_val = bcm_atoi((char *)&mem_val); DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val)); - g_assert_type = mem_val; } filp_close(fp, NULL); } +#ifdef CUSTOMER_HW4_DEBUG + /* By default. set to 1, No Kernel Panic */ + g_assert_type = (mem_val >= 0) ? mem_val : 1; +#else + /* By default. set to 0, Kernel Panic */ + g_assert_type = (mem_val >= 0) ? mem_val : 0; +#endif } #endif /* BCMASSERT_LOG */ +/* + * This call is to get the memdump size so that, + * halutil can alloc that much buffer in user space. + */ +int +dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size) +{ + int ret = BCME_OK; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + + if (dhdp->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is down\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n", + __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state)); + return BCME_ERROR; + } + + ret = dhd_common_socram_dump(dhdp); + if (ret == BCME_OK) { + *dump_size = dhdp->soc_ram_length; + } + return ret; +} + +/* + * This is to get the actual memdup after getting the memdump size + */ +int +dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size) +{ + int ret = BCME_OK; + int orig_len = 0; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhdp = &dhd->pub; + if (buf == NULL) + return BCME_ERROR; + orig_len = *size; + if (dhdp->soc_ram) { + if (orig_len >= dhdp->soc_ram_length) { + memcpy(*buf, dhdp->soc_ram, dhdp->soc_ram_length); + /* reset the storage of dump */ + memset(dhdp->soc_ram, 0, dhdp->soc_ram_length); + *size = dhdp->soc_ram_length; + } else { + ret = BCME_BUFTOOSHORT; + DHD_ERROR(("The length of the buffer is too short" + " to save the memory dump with %d\n", dhdp->soc_ram_length)); + } + } else { + DHD_ERROR(("socram_dump is not ready to get\n")); + ret = BCME_NOTREADY; + } + return ret; +} + +int +dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size) +{ + char *fw_str; + + if (size == 0) + return BCME_BADARG; + + fw_str = strstr(info_string, "Firmware: "); + if (fw_str == NULL) { + return BCME_ERROR; + } + + memset(*buf, 0, size); + if (dhd_ver) { + strncpy(*buf, dhd_version, size - 1); + } else { + strncpy(*buf, fw_str, size - 1); + } + return BCME_OK; +} #ifdef DHD_WMF /* Returns interface specific WMF configuration */ @@ -13502,13 +18009,49 @@ dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx) } #endif /* DHD_WMF */ +#if defined(TRAFFIC_MGMT_DWM) +void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf) +{ + struct ether_header *eh; + struct ethervlan_header *evh; + uint8 *pktdata, *ip_body; + uint8 dwm_filter; + uint8 tos_tc = 0; + uint8 dscp = 0; + pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); + eh = (struct ether_header *) pktdata; + ip_body = NULL; + + if (dhdp->dhd_tm_dwm_tbl.dhd_dwm_enabled) { + if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) { + evh = (struct ethervlan_header *)eh; + if ((evh->ether_type == hton16(ETHER_TYPE_IP)) || + (evh->ether_type == hton16(ETHER_TYPE_IPV6))) { + ip_body = pktdata + sizeof(struct ethervlan_header); + } + } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) || + (eh->ether_type == hton16(ETHER_TYPE_IPV6))) { + ip_body = pktdata + sizeof(struct ether_header); + } + if (ip_body) { + tos_tc = IP_TOS46(ip_body); + dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT; + } + + if (dscp < DHD_DWM_TBL_SIZE) { + dwm_filter = dhdp->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp]; + if (DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_filter)) { + PKTSETPRIO(pktbuf, DHD_TRF_MGMT_DWM_PRIO(dwm_filter)); + } + } + } +} +#endif -#if defined(DHD_L2_FILTER) bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac) { return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE; } -#endif #ifdef DHD_L2_FILTER arp_table_t* @@ -13554,7 +18097,7 @@ int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val) */ ifp->parp_enable = val; ifp->parp_discard = val; - ifp->parp_allnode = !val; + ifp->parp_allnode = val; /* Flush ARP entries when disabled */ if (val == FALSE) { @@ -13643,7 +18186,10 @@ int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val) ASSERT(ifp); ifp->block_ping = val; - + /* Disable rx_pkt_chain feature for interface if block_ping option is + * enabled + */ + dhd_update_rx_pkt_chainable_state(dhdp, idx); return BCME_OK; } @@ -13827,22 +18373,573 @@ dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len) EXPORT_SYMBOL(dhd_page_corrupt_cb); #endif /* DHD_DEBUG_PAGEALLOC */ -#ifdef DHD_PKTID_AUDIT_ENABLED +#if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED) void -dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp) +dhd_pktid_error_handler(dhd_pub_t *dhdp) { DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__)); DHD_OS_WAKE_LOCK(dhdp); dhd_dump_to_kernelog(dhdp); -#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP) - /* Load the dongle side dump to host memory and then BUG_ON() */ - dhdp->memdump_enabled = DUMP_MEMFILE_BUGON; +#ifdef DHD_FW_COREDUMP + /* Load the dongle side dump to host memory */ + if (dhdp->memdump_enabled == DUMP_DISABLED) { + dhdp->memdump_enabled = DUMP_MEMFILE; + } dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE; dhd_bus_mem_dump(dhdp); -#endif /* BCMPCIE && DHD_FW_COREDUMP */ +#endif /* DHD_FW_COREDUMP */ + dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR; + dhd_os_check_hang(dhdp, 0, -EREMOTEIO); DHD_OS_WAKE_UNLOCK(dhdp); } -#endif /* DHD_PKTID_AUDIT_ENABLED */ +#endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */ + +struct net_device * +dhd_linux_get_primary_netdev(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + + if (dhd->iflist[0] && dhd->iflist[0]->net) + return dhd->iflist[0]->net; + else + return NULL; +} + +#ifdef DHD_DHCP_DUMP +static void +dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx) +{ + struct bootp_fmt *b = (struct bootp_fmt *) &pktdata[ETHER_HDR_LEN]; + struct iphdr *h = &b->ip_header; + uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->ip_header.tot_len); + int dhcp_type = 0, len, opt_len; + + /* check IP header */ + if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) { + return; + } + + /* check UDP port for bootp (67, 68) */ + if (b->udp_header.source != htons(67) && b->udp_header.source != htons(68) && + b->udp_header.dest != htons(67) && b->udp_header.dest != htons(68)) { + return; + } + + /* check header length */ + if (ntohs(h->tot_len) < ntohs(b->udp_header.len) + sizeof(struct iphdr)) { + return; + } + + len = ntohs(b->udp_header.len) - sizeof(struct udphdr); + opt_len = len + - (sizeof(*b) - sizeof(struct iphdr) - sizeof(struct udphdr) - sizeof(b->options)); + + /* parse bootp options */ + if (opt_len >= 4 && !memcmp(b->options, bootp_magic_cookie, 4)) { + ptr = &b->options[4]; + while (ptr < end && *ptr != 0xff) { + opt = ptr++; + if (*opt == 0) { + continue; + } + ptr += *ptr + 1; + if (ptr >= end) { + break; + } + /* 53 is dhcp type */ + if (*opt == 53) { + if (opt[1]) { + dhcp_type = opt[2]; + DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n", + ifname, dhcp_types[dhcp_type], + tx ? "TX" : "RX", dhcp_ops[b->op])); + break; + } + } + } + } +} +#endif /* DHD_DHCP_DUMP */ + +#ifdef DHD_ICMP_DUMP +static void +dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx) +{ + uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN]; + struct iphdr *iph = (struct iphdr *)pkt; + struct icmphdr *icmph; + + /* check IP header */ + if (iph->ihl != 5 || iph->version != 4 || iph->protocol != IP_PROT_ICMP) { + return; + } + + icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr)); + if (icmph->type == ICMP_ECHO) { + DHD_ERROR(("PING REQUEST[%s] [%s] : SEQNUM=%d\n", + ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence))); + } else if (icmph->type == ICMP_ECHOREPLY) { + DHD_ERROR(("PING REPLY[%s] [%s] : SEQNUM=%d\n", + ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence))); + } else { + DHD_ERROR(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n", + ifname, tx ? "TX" : "RX", icmph->type, icmph->code)); + } +} +#endif /* DHD_ICMP_DUMP */ + +#ifdef SHOW_LOGTRACE +void +dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *trace_buf_info) +{ + dhd_dbg_ring_status_t ring_status; + uint32 rlen; + + rlen = dhd_dbg_ring_pull_single(dhd_pub, FW_VERBOSE_RING_ID, trace_buf_info->buf, + TRACE_LOG_BUF_MAX_SIZE, TRUE); + trace_buf_info->size = rlen; + trace_buf_info->availability = NEXT_BUF_NOT_AVAIL; + if (rlen == 0) { + trace_buf_info->availability = BUF_NOT_AVAILABLE; + return; + } + dhd_dbg_get_ring_status(dhd_pub, FW_VERBOSE_RING_ID, &ring_status); + if (ring_status.written_bytes != ring_status.read_bytes) { + trace_buf_info->availability = NEXT_BUF_AVAIL; + } +} +#endif /* SHOW_LOGTRACE */ + +bool +dhd_fw_download_status(dhd_pub_t * dhd_pub) +{ + return dhd_pub->fw_download_done; +} + +int +dhd_create_to_notifier_skt(void) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + /* Kernel 3.7 onwards this API accepts only 3 arguments. */ + /* Kernel version 3.6 is a special case which accepts 4 arguments */ + nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &g_cfg); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + /* Kernel version 3.5 and below use this old API format */ + nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0, + dhd_process_daemon_msg, NULL, THIS_MODULE); +#else + nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE, &g_cfg); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */ + if (!nl_to_event_sk) + { + printf("Error creating socket.\n"); + return -1; + } + DHD_INFO(("nl_to socket created successfully...\n")); + return 0; +} + +void +dhd_destroy_to_notifier_skt(void) +{ + DHD_INFO(("Destroying nl_to socket\n")); + if (nl_to_event_sk) { + netlink_kernel_release(nl_to_event_sk); + } +} + +static void +dhd_recv_msg_from_daemon(struct sk_buff *skb) +{ + struct nlmsghdr *nlh; + bcm_to_info_t *cmd; + + nlh = (struct nlmsghdr *)skb->data; + cmd = (bcm_to_info_t *)nlmsg_data(nlh); + if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) { + sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid; + DHD_INFO(("DHD Daemon Started\n")); + } +} + +int +dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb_out; + + if (!nl_to_event_sk) { + DHD_INFO(("No socket available\n")); + return -1; + } + + BCM_REFERENCE(skb); + if (sender_pid == 0) { + DHD_INFO(("Invalid PID 0\n")); + return -1; + } + + if ((skb_out = nlmsg_new(size, 0)) == NULL) { + DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__)); + return -1; + } + nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0); + NETLINK_CB(skb_out).dst_group = 0; /* Unicast */ + memcpy(nlmsg_data(nlh), (char *)data, size); + + if ((nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) { + DHD_INFO(("Error sending message\n")); + } + return 0; +} + + +static void +dhd_process_daemon_msg(struct sk_buff *skb) +{ + bcm_to_info_t to_info; + + to_info.magic = BCM_TO_MAGIC; + to_info.reason = REASON_DAEMON_STARTED; + to_info.trap = NO_TRAP; + + dhd_recv_msg_from_daemon(skb); + dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info)); +} + +#ifdef REPORT_FATAL_TIMEOUTS +static void +dhd_send_trap_to_fw(dhd_pub_t * pub, int reason, int trap) +{ + bcm_to_info_t to_info; + + to_info.magic = BCM_TO_MAGIC; + to_info.reason = reason; + to_info.trap = trap; + + DHD_ERROR(("Sending Event reason:%d trap:%d\n", reason, trap)); + dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t)); +} + +void +dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason) +{ + int to_reason; + int trap = NO_TRAP; + switch (reason) { + case DHD_REASON_COMMAND_TO: + to_reason = REASON_COMMAND_TO; + trap = DO_TRAP; + break; + case DHD_REASON_JOIN_TO: + to_reason = REASON_JOIN_TO; + break; + case DHD_REASON_SCAN_TO: + to_reason = REASON_SCAN_TO; + break; + case DHD_REASON_OQS_TO: + to_reason = REASON_OQS_TO; + trap = DO_TRAP; + break; + default: + to_reason = REASON_UNKOWN; + } + dhd_send_trap_to_fw(pub, to_reason, trap); +} +#endif /* REPORT_FATAL_TIMEOUTS */ + +#ifdef DHD_LOG_DUMP +void +dhd_log_dump_init(dhd_pub_t *dhd) +{ + struct dhd_log_dump_buf *dld_buf; + int i = 0; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF; +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + + for (i = 0; i < DLD_BUFFER_NUM; i++) { + dld_buf = &g_dld_buf[i]; + spin_lock_init(&dld_buf->lock); +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + dld_buf->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++, dld_buf_size[i]); +#else + dld_buf->buffer = kmalloc(dld_buf_size[i], GFP_KERNEL); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + + if (!dld_buf->buffer) { + dld_buf->buffer = kmalloc(dld_buf_size[i], GFP_KERNEL); + DHD_ERROR(("Try to allocate memory using kmalloc().\n")); + + if (!dld_buf->buffer) { + DHD_ERROR(("Failed to allocate memory for dld_buf[%d].\n", i)); + goto fail; + } + } + + dld_buf->wraparound = 0; + dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i]; + dld_buf->present = dld_buf->front = dld_buf->buffer; + dld_buf->remain = dld_buf_size[i]; + dld_buf->enable = 1; + } + return; + +fail: + for (i = 0; i < DLD_BUFFER_NUM; i++) { + if (dld_buf[i].buffer) { + kfree(dld_buf[i].buffer); + } + } +} + +void +dhd_log_dump_deinit(dhd_pub_t *dhd) +{ + struct dhd_log_dump_buf *dld_buf; + int i = 0; + + for (i = 0; i < DLD_BUFFER_NUM; i++) { + dld_buf = &g_dld_buf[i]; + dld_buf->enable = 0; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhd, dld_buf->buffer, dld_buf_size[i]); +#else + kfree(dld_buf->buffer); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + } +} + +void +dhd_log_dump_write(int type, const char *fmt, ...) +{ + int len = 0; + char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, }; + va_list args; + unsigned long flags = 0; + struct dhd_log_dump_buf *dld_buf = NULL; + + switch (type) + { + case DLD_BUF_TYPE_GENERAL: + dld_buf = &g_dld_buf[type]; + break; + case DLD_BUF_TYPE_SPECIAL: + dld_buf = &g_dld_buf[type]; + break; + default: + DHD_ERROR(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n", + __FUNCTION__, type)); + return; + } + + if (dld_buf->enable != 1) { + return; + } + + va_start(args, fmt); + + len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args); + /* Non ANSI C99 compliant returns -1, + * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE + */ + if (len < 0) { + return; + } + + if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) { + len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1; + tmp_buf[len] = '\0'; + } + + /* make a critical section to eliminate race conditions */ + spin_lock_irqsave(&dld_buf->lock, flags); + if (dld_buf->remain < len) { + dld_buf->wraparound = 1; + dld_buf->present = dld_buf->front; + dld_buf->remain = dld_buf_size[type]; + } + + strncpy(dld_buf->present, tmp_buf, len); + dld_buf->remain -= len; + dld_buf->present += len; + spin_unlock_irqrestore(&dld_buf->lock, flags); + + /* double check invalid memory operation */ + ASSERT((unsigned long)dld_buf->present <= dld_buf->max); + va_end(args); +} + +char* +dhd_log_dump_get_timestamp(void) +{ + static char buf[16]; + u64 ts_nsec; + unsigned long rem_nsec; + + ts_nsec = local_clock(); + rem_nsec = do_div(ts_nsec, 1000000000); + snprintf(buf, sizeof(buf), "%5lu.%06lu", + (unsigned long)ts_nsec, rem_nsec / 1000); + + return buf; +} +#endif /* DHD_LOG_DUMP */ + +int +dhd_write_file(const char *filepath, char *buf, int buf_len) +{ + struct file *fp = NULL; + mm_segment_t old_fs; + int ret = 0; + + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* File is always created. */ + fp = filp_open(filepath, O_RDWR | O_CREAT, 0664); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n", + __FUNCTION__, filepath, PTR_ERR(fp))); + ret = BCME_ERROR; + } else { + if (fp->f_mode & FMODE_WRITE) { + ret = vfs_write(fp, buf, buf_len, &fp->f_pos); + if (ret < 0) { + DHD_ERROR(("%s: Couldn't write file '%s'\n", + __FUNCTION__, filepath)); + ret = BCME_ERROR; + } else { + ret = BCME_OK; + } + } + filp_close(fp, NULL); + } + + /* restore previous address limit */ + set_fs(old_fs); + + return ret; +} + +int +dhd_read_file(const char *filepath, char *buf, int buf_len) +{ + struct file *fp = NULL; + mm_segment_t old_fs; + int ret; + + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + set_fs(old_fs); + DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath)); + return BCME_ERROR; + } + + ret = kernel_read(fp, 0, buf, buf_len); + filp_close(fp, NULL); + + /* restore previous address limit */ + set_fs(old_fs); + + /* Return the number of bytes read */ + if (ret > 0) { + /* Success to read */ + ret = 0; + } else { + DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n", + __FUNCTION__, filepath, ret)); + ret = BCME_ERROR; + } + + return ret; +} + +int +dhd_write_file_and_check(const char *filepath, char *buf, int buf_len) +{ + int ret; + + ret = dhd_write_file(filepath, buf, buf_len); + if (ret < 0) { + return ret; + } + + /* Read the file again and check if the file size is not zero */ + memset(buf, 0, buf_len); + ret = dhd_read_file(filepath, buf, buf_len); + + return ret; +} + +#ifdef DHD_LB_TXP +#define DHD_LB_TXBOUND 64 +/* + * Function that performs the TX processing on a given CPU + */ +bool +dhd_lb_tx_process(dhd_info_t *dhd) +{ + struct sk_buff *skb; + int cnt = 0; + struct net_device *net; + int ifidx; + bool resched = FALSE; + + DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__)); + if (dhd == NULL) { + DHD_ERROR((" Null pointer DHD \r\n")); + return resched; + } + + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt); + + /* Base Loop to perform the actual Tx */ + do { + skb = skb_dequeue(&dhd->tx_pend_queue); + if (skb == NULL) { + DHD_TRACE(("Dequeued a Null Packet \r\n")); + break; + } + cnt++; + + net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb)); + ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb)); + + BCM_REFERENCE(net); + DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb, + net, ifidx)); + + __dhd_sendpkt(&dhd->pub, ifidx, skb); + + if (cnt >= DHD_LB_TXBOUND) { + resched = TRUE; + break; + } + + } while (1); + + DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt)); + + return resched; +} + +void +dhd_lb_tx_handler(unsigned long data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + + if (dhd_lb_tx_process(dhd)) { + dhd_tasklet_schedule(&dhd->tx_tasklet); + } +} + +#endif /* DHD_LB_TXP */ /* ---------------------------------------------------------------------------- * Infrastructure code for sysfs interface support for DHD @@ -13903,6 +19000,45 @@ wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count) } #endif /* DHD_TRACE_WAKE_LOCK */ +#if defined(DHD_LB_TXP) +static ssize_t +show_lbtxp(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = atomic_read(&dhd->lb_txp_active); + ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", + onoff); + return ret; +} + +static ssize_t +lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + dhd_info_t *dhd = (dhd_info_t *)dev; + int i; + + onoff = bcm_strtoul(buf, NULL, 10); + + sscanf(buf, "%lu", &onoff); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + atomic_set(&dhd->lb_txp_active, onoff); + + /* Since the scheme is changed clear the counters */ + for (i = 0; i < NR_CPUS; i++) { + DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]); + DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]); + } + + return count; +} + +#endif /* DHD_LB_TXP */ /* * Generic Attribute Structure for DHD. * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have @@ -13923,11 +19059,19 @@ static struct dhd_attr dhd_attr_wklock = __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff); #endif /* defined(DHD_TRACE_WAKE_LOCK */ +#if defined(DHD_LB_TXP) +static struct dhd_attr dhd_attr_lbtxp = + __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff); +#endif /* DHD_LB_TXP */ + /* Attribute object that gets registered with "bcm-dhd" kobject tree */ static struct attribute *default_attrs[] = { #if defined(DHD_TRACE_WAKE_LOCK) &dhd_attr_wklock.attr, -#endif +#endif /* DHD_TRACE_WAKE_LOCK */ +#if defined(DHD_LB_TXP) + &dhd_attr_lbtxp.attr, +#endif /* DHD_LB_TXP */ NULL }; @@ -13940,8 +19084,15 @@ static struct attribute *default_attrs[] = { */ static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif dhd_info_t *dhd = to_dhd(kobj); struct dhd_attr *d_attr = to_attr(attr); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif int ret; if (d_attr->show) @@ -13952,7 +19103,6 @@ static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf) return ret; } - /* * bcm-dhd kobject show function, the "attr" attribute specifices to which * node under "bcm-dhd" the store function is called. @@ -13960,8 +19110,15 @@ static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf) static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif dhd_info_t *dhd = to_dhd(kobj); struct dhd_attr *d_attr = to_attr(attr); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif int ret; if (d_attr->store) @@ -14022,108 +19179,305 @@ static void dhd_sysfs_exit(dhd_info_t *dhd) kobject_put(&dhd->dhd_kobj); } -#ifdef DHD_LOG_DUMP -void -dhd_log_dump_init(dhd_pub_t *dhd) +#ifdef DHD_DEBUG_UART +bool +dhd_debug_uart_is_running(struct net_device *dev) { - spin_lock_init(&dhd->dld_buf.lock); -#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) - dhd->dld_buf.buffer = DHD_OS_PREALLOC(dhd, - DHD_PREALLOC_DHD_LOG_DUMP_BUF, DHD_LOG_DUMP_BUFFER_SIZE); -#else - dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL); -#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhd_info_t *dhd = DHD_DEV_INFO(dev); - if (!dhd->dld_buf.buffer) { - dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL); - DHD_ERROR(("Try to allocate memory using kmalloc().\n")); + if (dhd->duart_execute) { + return TRUE; + } - if (!dhd->dld_buf.buffer) { - DHD_ERROR(("Failed to allocate memory for dld_buf.\n")); - return; + return FALSE; +} + +static void +dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event) +{ + dhd_pub_t *dhdp = handle; + dhd_debug_uart_exec(dhdp, "rd"); +} + +static void +dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd) +{ + int ret; + + char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL}; + char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL}; + +#ifdef DHD_FW_COREDUMP + if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) +#endif + { + if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN || +#ifdef DHD_FW_COREDUMP + dhdp->memdump_success == FALSE || +#endif + FALSE) { + dhdp->info->duart_execute = TRUE; + DHD_ERROR(("DHD: %s - execute %s %s\n", + __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd)); + ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); + DHD_ERROR(("DHD: %s - %s %s ret = %d\n", + __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret)); + dhdp->info->duart_execute = FALSE; + +#ifdef DHD_LOG_DUMP + if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) +#endif + { + BUG_ON(1); + } } } - - dhd->dld_buf.wraparound = 0; - dhd->dld_buf.max = (unsigned long)dhd->dld_buf.buffer + DHD_LOG_DUMP_BUFFER_SIZE; - dhd->dld_buf.present = dhd->dld_buf.buffer; - dhd->dld_buf.front = dhd->dld_buf.buffer; - dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE; - dhd->dld_enable = 1; } +#endif /* DHD_DEBUG_UART */ +#if defined(DHD_BLOB_EXISTENCE_CHECK) void -dhd_log_dump_deinit(dhd_pub_t *dhd) +dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path) { - dhd->dld_enable = 0; -#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) - DHD_OS_PREFREE(dhd, - dhd->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE); + struct file *fp; + char *filepath = CONFIG_BCMDHD_CLM_PATH; + + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: ----- blob file dosen't exist -----\n", __FUNCTION__)); + dhdp->is_blob = FALSE; + } else { + DHD_ERROR(("%s: ----- blob file exist -----\n", __FUNCTION__)); + dhdp->is_blob = TRUE; +#if defined(CONCATE_BLOB) + strncat(fw_path, "_blob", strlen("_blob")); #else - kfree(dhd->dld_buf.buffer); -#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + BCM_REFERENCE(fw_path); +#endif /* SKIP_CONCATE_BLOB */ + filp_close(fp, NULL); + } +} +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + +#if defined(PCIE_FULL_DONGLE) +/** test / loopback */ +void +dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event) +{ + dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info; + dhd_info_t *dhd_info = (dhd_info_t *)handle; + dhd_pub_t *dhdp = &dhd_info->pub; + + if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if ((dhd_info == NULL) || (dhdp == NULL)) { + DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__)); + return; + } + + if (dmmap == NULL) { + DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__)); + return; + } + dmaxfer_free_prev_dmaaddr(dhdp, dmmap); +} + + +void +dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap) +{ + dhd_info_t *dhd_info = dhdp->info; + + dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap, + DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW); +} +#endif /* PCIE_FULL_DONGLE */ +/* ---------------------------- End of sysfs implementation ------------------------------------- */ +#ifdef HOFFLOAD_MODULES +void +dhd_linux_get_modfw_address(dhd_pub_t *dhd) +{ + const char* module_name = NULL; + const struct firmware *module_fw; + struct module_metadata *hmem = &dhd->hmem; + + if (dhd_hmem_module_string[0] != '\0') { + module_name = dhd_hmem_module_string; + } else { + DHD_ERROR(("%s No module image name specified\n", __FUNCTION__)); + return; + } + if (request_firmware(&module_fw, module_name, dhd_bus_to_dev(dhd->bus))) { + DHD_ERROR(("modules.img not available\n")); + return; + } + if (!dhd_alloc_module_memory(dhd->bus, module_fw->size, hmem)) { + release_firmware(module_fw); + return; + } + memcpy(hmem->data, module_fw->data, module_fw->size); + release_firmware(module_fw); +} +#endif /* HOFFLOAD_MODULES */ + +#ifdef SET_PCIE_IRQ_CPU_CORE +void +dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set) +{ + unsigned int irq; + if (!dhdp) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + + if (!dhdp->bus) { + DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__)); + return; + } + + if (dhdpcie_get_pcieirq(dhdp->bus, &irq)) { + return; + } + + set_irq_cpucore(irq, set); +} +#endif /* SET_PCIE_IRQ_CPU_CORE */ + +#if defined(DHD_HANG_SEND_UP_TEST) +void +dhd_make_hang_with_reason(struct net_device *dev, const char *string_num) +{ + dhd_info_t *dhd = NULL; + dhd_pub_t *dhdp = NULL; + uint reason = HANG_REASON_MAX; + char buf[WLC_IOCTL_SMLEN] = {0, }; + uint32 fw_test_code = 0; + dhd = DHD_DEV_INFO(dev); + + if (dhd) { + dhdp = &dhd->pub; + } + + if (!dhd || !dhdp) { + return; + } + + reason = (uint) bcm_strtoul(string_num, NULL, 0); + DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__, reason)); + + if (reason == 0) { + if (dhdp->req_hang_type) { + DHD_ERROR(("%s, Clear HANG test request 0x%x\n", + __FUNCTION__, dhdp->req_hang_type)); + dhdp->req_hang_type = 0; + return; + } else { + DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__)); + return; + } + } else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) { + DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason)); + return; + } + + if (dhdp->req_hang_type != 0) { + DHD_ERROR(("Already HANG requested for test\n")); + return; + } + + switch (reason) { + case HANG_REASON_IOCTL_RESP_TIMEOUT: + DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason)); + dhdp->req_hang_type = reason; + fw_test_code = 102; /* resumed on timeour */ + bcm_mkiovar("bus:disconnect", (void *)&fw_test_code, 4, buf, sizeof(buf)); + dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + break; + case HANG_REASON_DONGLE_TRAP: + DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason)); + dhdp->req_hang_type = reason; + fw_test_code = 99; /* dongle trap */ + bcm_mkiovar("bus:disconnect", (void *)&fw_test_code, 4, buf, sizeof(buf)); + dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + break; + case HANG_REASON_D3_ACK_TIMEOUT: + DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason)); + dhdp->req_hang_type = reason; + break; + case HANG_REASON_BUS_DOWN: + DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason)); + dhdp->req_hang_type = reason; + break; + case HANG_REASON_PCIE_LINK_DOWN: + case HANG_REASON_MSGBUF_LIVELOCK: + dhdp->req_hang_type = 0; + DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason)); + break; + case HANG_REASON_IFACE_OP_FAILURE: + DHD_ERROR(("Make HANG!!!: P2P inrerface delete failure(0x%x)\n", reason)); + dhdp->req_hang_type = reason; + break; + case HANG_REASON_HT_AVAIL_ERROR: + dhdp->req_hang_type = 0; + DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason)); + break; + case HANG_REASON_PCIE_RC_LINK_UP_FAIL: + DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason)); + dhdp->req_hang_type = reason; + break; + default: + dhdp->req_hang_type = 0; + DHD_ERROR(("Unknown HANG request (0x%x)\n", reason)); + break; + } +} +#endif /* DHD_HANG_SEND_UP_TEST */ +#ifdef DHD_WAKE_STATUS +wake_counts_t* +dhd_get_wakecount(dhd_pub_t *dhdp) +{ + return dhd_bus_get_wakecount(dhdp); +} +#endif /* DHD_WAKE_STATUS */ + +#ifdef BCM_ASLR_HEAP +uint32 +dhd_get_random_number(void) +{ + uint32 rand = 0; + get_random_bytes_arch(&rand, sizeof(rand)); + return rand; +} +#endif /* BCM_ASLR_HEAP */ + +#ifdef DHD_PKT_LOGGING +void +dhd_pktlog_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if (dhd_pktlog_write_file(&dhd->pub)) { + DHD_ERROR(("%s: writing pktlog dump to the file failed\n", __FUNCTION__)); + return; + } } void -dhd_log_dump_print(const char *fmt, ...) +dhd_schedule_pktlog_dump(dhd_pub_t *dhdp) { - int len = 0; - char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, }; - va_list args; - dhd_pub_t *dhd = NULL; - unsigned long flags = 0; - - if (wl_get_bcm_cfg80211_ptr()) { - dhd = (dhd_pub_t*)(wl_get_bcm_cfg80211_ptr()->pub); - } - - if (!dhd || dhd->dld_enable != 1) { - return; - } - - va_start(args, fmt); - - len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args); - if (len < 0) { - return; - } - - /* make a critical section to eliminate race conditions */ - spin_lock_irqsave(&dhd->dld_buf.lock, flags); - if (dhd->dld_buf.remain < len) { - dhd->dld_buf.wraparound = 1; - dhd->dld_buf.present = dhd->dld_buf.front; - dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE; - } - - strncpy(dhd->dld_buf.present, tmp_buf, len); - dhd->dld_buf.remain -= len; - dhd->dld_buf.present += len; - spin_unlock_irqrestore(&dhd->dld_buf.lock, flags); - - /* double check invalid memory operation */ - ASSERT((unsigned long)dhd->dld_buf.present <= dhd->dld_buf.max); - va_end(args); + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + (void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP, + dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH); } - -char* -dhd_log_dump_get_timestamp(void) -{ - static char buf[16]; - u64 ts_nsec; - unsigned long rem_nsec; - - ts_nsec = local_clock(); - rem_nsec = do_div(ts_nsec, 1000000000); - snprintf(buf, sizeof(buf), "%5lu.%06lu", - (unsigned long)ts_nsec, rem_nsec / 1000); - - return buf; -} - -#endif /* DHD_LOG_DUMP */ - -/* ---------------------------- End of sysfs implementation ------------------------------------- */ +#endif /* DHD_PKT_LOGGING */ void *dhd_get_pub(struct net_device *dev) { diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux.h index 79290db34dd2..4651dc51f69a 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux.h @@ -1,7 +1,7 @@ /* * DHD Linux header file (dhd_linux exports for cfg80211 and other components) * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_linux.h 591285 2015-10-07 11:56:29Z $ + * $Id: dhd_linux.h 699532 2017-05-15 11:00:39Z $ */ /* wifi platform functions for power, interrupt and pre-alloc, either @@ -65,6 +65,9 @@ typedef struct wifi_adapter_info { uint bus_type; uint bus_num; uint slot_num; +#if defined(BT_OVER_SDIO) + const char *btfw_path; +#endif /* defined (BT_OVER_SDIO) */ #ifdef BUS_POWER_RESTORE #if defined(BCMSDIO) struct sdio_func *sdio_func; @@ -76,16 +79,16 @@ typedef struct wifi_adapter_info { #endif } wifi_adapter_info_t; -#define WLAN_PLAT_NODFS_FLAG 0x01 +#define WLAN_PLAT_NODFS_FLAG 0x01 #define WLAN_PLAT_AP_FLAG 0x02 struct wifi_platform_data { #ifdef BUS_POWER_RESTORE - int (*set_power)(bool val, wifi_adapter_info_t *adapter); + int (*set_power)(int val, wifi_adapter_info_t *adapter); #else - int (*set_power)(bool val); + int (*set_power)(int val); #endif int (*set_reset)(int val); - int (*set_carddetect)(bool val); + int (*set_carddetect)(int val); void *(*mem_prealloc)(int section, unsigned long size); int (*get_mac_addr)(unsigned char *buf); #if defined(CUSTOM_COUNTRY_CODE) @@ -109,6 +112,9 @@ typedef struct dhd_sta { struct list_head list; /* link into dhd_if::sta_list */ int idx; /* index of self in dhd_pub::sta_pool[] */ int ifidx; /* index of interface in dhd */ +#ifdef DHD_WMF + struct dhd_sta *psta_prim; /* primary index of psta interface */ +#endif /* DHD_WMF */ } dhd_sta_t; typedef dhd_sta_t dhd_sta_pool_t; @@ -122,7 +128,7 @@ int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *ir int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf); #ifdef CUSTOM_COUNTRY_CODE void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, - u32 flags); + u32 flags); #else void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode); #endif /* CUSTOM_COUNTRY_CODE */ @@ -134,5 +140,30 @@ bool dhd_update_fw_nv_path(struct dhd_info *dhdinfo); #ifdef DHD_WMF dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx); +int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx); +int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val); +void dhd_update_psta_interface_for_sta(dhd_pub_t *dhdp, char* ifname, + void* mac_addr, void* event_data); #endif /* DHD_WMF */ +#if defined(BT_OVER_SDIO) +int dhd_net_bus_get(struct net_device *dev); +int dhd_net_bus_put(struct net_device *dev); +#endif /* BT_OVER_SDIO */ +#ifdef HOFFLOAD_MODULES +extern void dhd_free_module_memory(struct dhd_bus *bus, struct module_metadata *hmem); +extern void* dhd_alloc_module_memory(struct dhd_bus *bus, uint32_t size, + struct module_metadata *hmem); +#endif /* HOFFLOAD_MODULES */ +#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD) +#define ADPS_ENABLE 1 +#define ADPS_DISABLE 0 +typedef struct bcm_iov_buf { + uint16 version; + uint16 len; + uint16 id; + uint16 data[1]; +} bcm_iov_buf_t; + +int dhd_enable_adps(dhd_pub_t *dhd, uint8 on); +#endif /* WLADPS || WLADPS_PRIVATE_CMD */ #endif /* __DHD_LINUX_H__ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_platdev.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_platdev.c index fed0d0c1ed1a..7be2fa30d1eb 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_platdev.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_platdev.c @@ -1,7 +1,7 @@ /* * Linux platform device for DHD WLAN adapter * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_linux_platdev.c 591285 2015-10-07 11:56:29Z $ + * $Id: dhd_linux_platdev.c 662397 2016-09-29 10:15:08Z $ */ #include #include @@ -71,14 +71,21 @@ extern struct resource dhd_wlan_resources; extern struct wifi_platform_data dhd_wlan_control; #else static bool dts_enabled = FALSE; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#endif struct resource dhd_wlan_resources = {0}; -extern struct wifi_platform_data dhd_wlan_control; -#endif /* !defind(DHD_OF_SUPPORT) */ +struct wifi_platform_data dhd_wlan_control = {0}; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif +#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */ #endif /* !defind(CONFIG_DTS) */ static int dhd_wifi_platform_load(void); -extern void* wl_cfg80211_get_dhdp(void); +extern void* wl_cfg80211_get_dhdp(struct net_device *dev); #ifdef ENABLE_4335BT_WAR extern int bcm_bt_lock(int cookie); @@ -150,6 +157,14 @@ int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *ir int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec) { int err = 0; +#ifndef CONFIG_DTS + struct wifi_platform_data *plat_data; +#endif +#ifdef BT_OVER_SDIO + if (is_power_on == on) { + return -EINVAL; + } +#endif /* BT_OVER_SDIO */ #ifdef CONFIG_DTS if (on) { err = regulator_enable(wifi_regulator); @@ -162,8 +177,6 @@ int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long if (err < 0) DHD_ERROR(("%s: regulator enable/disable failed", __FUNCTION__)); #else - struct wifi_platform_data *plat_data; - if (!adapter || !adapter->wifi_plat_data) return -EINVAL; plat_data = adapter->wifi_plat_data; @@ -285,6 +298,9 @@ static int wifi_plat_dev_drv_probe(struct platform_device *pdev) if (resource) { adapter->irq_num = resource->start; adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK; +#ifdef DHD_ISR_NO_SUSPEND + adapter->intr_flags |= IRQF_NO_SUSPEND; +#endif } #ifdef CONFIG_DTS @@ -397,7 +413,14 @@ static struct platform_driver wifi_platform_dev_driver_legacy = { static int wifi_platdev_match(struct device *dev, void *data) { char *name = (char*)data; - struct platform_device *pdev = to_platform_device(dev); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + const struct platform_device *pdev = to_platform_device(dev); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif if (strcmp(pdev->name, name) == 0) { DHD_ERROR(("found wifi platform device %s\n", name)); @@ -478,6 +501,9 @@ static int wifi_ctrlfunc_register_drv(void) #endif adapter->irq_num = resource->start; adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK; +#ifdef DHD_ISR_NO_SUSPEND + adapter->intr_flags |= IRQF_NO_SUSPEND; +#endif wifi_plat_dev_probe_ret = dhd_wifi_platform_load(); } #endif /* !defined(CONFIG_DTS) */ @@ -657,7 +683,7 @@ static int dhd_wifi_platform_load_pcie(void) } } while (retry--); - if (!retry) { + if (retry < 0) { DHD_ERROR(("failed to power up %s, max retry reached**\n", adapter->name)); return -ENODEV; @@ -726,12 +752,18 @@ static int dhd_wifi_platform_load_sdio(void) return -EINVAL; #if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD) + sema_init(&dhd_registration_sem, 0); +#endif + if (dhd_wifi_platdata == NULL) { DHD_ERROR(("DHD wifi platform data is required for Android build\n")); - return -EINVAL; + DHD_ERROR(("DHD registeing bus directly\n")); + /* x86 bring-up PC needs no power-up operations */ + err = dhd_bus_register(); + return err; } - sema_init(&dhd_registration_sem, 0); +#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD) /* power up all adapters */ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { bool chip_up = FALSE; @@ -756,12 +788,12 @@ static int dhd_wifi_platform_load_sdio(void) } err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY); if (err) { + dhd_bus_unreg_sdio_notify(); /* WL_REG_ON state unknown, Power off forcely */ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); continue; } else { wifi_platform_bus_enumerate(adapter, TRUE); - err = 0; } if (down_timeout(&dhd_chipup_sem, msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) { @@ -790,7 +822,6 @@ static int dhd_wifi_platform_load_sdio(void) goto fail; } - /* * Wait till MMC sdio_register_driver callback called and made driver attach. * It's needed to make sync up exit from dhd insmod and @@ -812,11 +843,6 @@ fail: wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); wifi_platform_bus_enumerate(adapter, FALSE); } -#else - - /* x86 bring-up PC needs no power-up operations */ - err = dhd_bus_register(); - #endif return err; diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_sched.c index 66eb8940ba3f..88c0cce635bd 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_sched.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_sched.c @@ -1,7 +1,7 @@ /* * Expose some of the kernel scheduler routines * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_wq.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_wq.c index d2513cc4ab0d..1cba8d1400c4 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_wq.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_wq.c @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), Generic work queue framework * Generic interface to handle dhd deferred work events * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: dhd_linux_wq.c 514727 2014-11-12 03:02:48Z $ + * $Id: dhd_linux_wq.c 641330 2016-06-02 06:55:00Z $ */ #include @@ -46,35 +46,43 @@ #include #include -struct dhd_deferred_event_t { - u8 event; /* holds the event */ - void *event_data; /* Holds event specific data */ +typedef struct dhd_deferred_event { + u8 event; /* holds the event */ + void *event_data; /* holds event specific data */ event_handler_t event_handler; -}; -#define DEFRD_EVT_SIZE sizeof(struct dhd_deferred_event_t) + unsigned long pad; /* for memory alignment to power of 2 */ +} dhd_deferred_event_t; + +#define DEFRD_EVT_SIZE (sizeof(dhd_deferred_event_t)) + +/* + * work events may occur simultaneously. + * can hold upto 64 low priority events and 16 high priority events + */ +#define DHD_PRIO_WORK_FIFO_SIZE (16 * DEFRD_EVT_SIZE) +#define DHD_WORK_FIFO_SIZE (64 * DEFRD_EVT_SIZE) + +#define DHD_FIFO_HAS_FREE_SPACE(fifo) \ + ((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE)) +#define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \ + ((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE)) struct dhd_deferred_wq { - struct work_struct deferred_work; /* should be the first member */ + struct work_struct deferred_work; /* should be the first member */ - /* - * work events may occur simultaneously. - * Can hold upto 64 low priority events and 4 high priority events - */ -#define DHD_PRIO_WORK_FIFO_SIZE (4 * sizeof(struct dhd_deferred_event_t)) -#define DHD_WORK_FIFO_SIZE (64 * sizeof(struct dhd_deferred_event_t)) - struct kfifo *prio_fifo; - struct kfifo *work_fifo; - u8 *prio_fifo_buf; - u8 *work_fifo_buf; - spinlock_t work_lock; - void *dhd_info; /* review: does it require */ + struct kfifo *prio_fifo; + struct kfifo *work_fifo; + u8 *prio_fifo_buf; + u8 *work_fifo_buf; + spinlock_t work_lock; + void *dhd_info; /* review: does it require */ }; static inline struct kfifo* dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock) { struct kfifo *fifo; - gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; + gfp_t flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) fifo = kfifo_init(buf, size, flags, lock); @@ -104,10 +112,10 @@ static void dhd_deferred_work_handler(struct work_struct *data); void* dhd_deferred_work_init(void *dhd_info) { - struct dhd_deferred_wq *work = NULL; - u8* buf; - unsigned long fifo_size = 0; - gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; + struct dhd_deferred_wq *work = NULL; + u8* buf; + unsigned long fifo_size = 0; + gfp_t flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC; if (!dhd_info) { DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__)); @@ -116,9 +124,8 @@ dhd_deferred_work_init(void *dhd_info) work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq), flags); - if (!work) { - DHD_ERROR(("%s: work queue creation failed \n", __FUNCTION__)); + DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__)); goto return_null; } @@ -129,10 +136,12 @@ dhd_deferred_work_init(void *dhd_info) /* allocate buffer to hold prio events */ fifo_size = DHD_PRIO_WORK_FIFO_SIZE; - fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size); + fifo_size = is_power_of_2(fifo_size) ? fifo_size : + roundup_pow_of_two(fifo_size); buf = (u8*)kzalloc(fifo_size, flags); if (!buf) { - DHD_ERROR(("%s: prio work fifo allocation failed \n", __FUNCTION__)); + DHD_ERROR(("%s: prio work fifo allocation failed\n", + __FUNCTION__)); goto return_null; } @@ -145,10 +154,11 @@ dhd_deferred_work_init(void *dhd_info) /* allocate buffer to hold work events */ fifo_size = DHD_WORK_FIFO_SIZE; - fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size); + fifo_size = is_power_of_2(fifo_size) ? fifo_size : + roundup_pow_of_two(fifo_size); buf = (u8*)kzalloc(fifo_size, flags); if (!buf) { - DHD_ERROR(("%s: work fifo allocation failed \n", __FUNCTION__)); + DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__)); goto return_null; } @@ -160,13 +170,13 @@ dhd_deferred_work_init(void *dhd_info) } work->dhd_info = dhd_info; - DHD_ERROR(("%s: work queue initialized \n", __FUNCTION__)); + DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__)); return work; return_null: - - if (work) + if (work) { dhd_deferred_work_deinit(work); + } return NULL; } @@ -178,7 +188,8 @@ dhd_deferred_work_deinit(void *work) if (!deferred_work) { - DHD_ERROR(("%s: deferred work has been freed alread \n", __FUNCTION__)); + DHD_ERROR(("%s: deferred work has been freed already\n", + __FUNCTION__)); return; } @@ -189,15 +200,31 @@ dhd_deferred_work_deinit(void *work) * free work event fifo. * kfifo_free frees locally allocated fifo buffer */ - if (deferred_work->prio_fifo) + if (deferred_work->prio_fifo) { dhd_kfifo_free(deferred_work->prio_fifo); + } - if (deferred_work->work_fifo) + if (deferred_work->work_fifo) { dhd_kfifo_free(deferred_work->work_fifo); + } kfree(deferred_work); } +/* select kfifo according to priority */ +static inline struct kfifo * +dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq, + u8 priority) +{ + if (priority == DHD_WQ_WORK_PRIORITY_HIGH) { + return deferred_wq->prio_fifo; + } else if (priority == DHD_WQ_WORK_PRIORITY_LOW) { + return deferred_wq->work_fifo; + } else { + return NULL; + } +} + /* * Prepares event to be queued * Schedules the event @@ -206,21 +233,29 @@ int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, event_handler_t event_handler, u8 priority) { - struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *) workq; - struct dhd_deferred_event_t deferred_event; - int status; + struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq; + struct kfifo *fifo; + dhd_deferred_event_t deferred_event; + int bytes_copied = 0; if (!deferred_wq) { - DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__)); + DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); ASSERT(0); return DHD_WQ_STS_UNINITIALIZED; } if (!event || (event >= DHD_MAX_WQ_EVENTS)) { - DHD_ERROR(("%s: Unknown event \n", __FUNCTION__)); + DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__, + event)); return DHD_WQ_STS_UNKNOWN_EVENT; } + if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) { + DHD_ERROR(("%s: unknown priority, priority=%d\n", + __FUNCTION__, priority)); + return DHD_WQ_STS_UNKNOWN_PRIORITY; + } + /* * default element size is 1, which can be changed * using kfifo_esize(). Older kernel(FC11) doesn't support @@ -234,28 +269,29 @@ dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, deferred_event.event_data = event_data; deferred_event.event_handler = event_handler; - if (priority == DHD_WORK_PRIORITY_HIGH) { - status = kfifo_in_spinlocked(deferred_wq->prio_fifo, &deferred_event, - DEFRD_EVT_SIZE, &deferred_wq->work_lock); - } else { - status = kfifo_in_spinlocked(deferred_wq->work_fifo, &deferred_event, + fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority); + if (DHD_FIFO_HAS_FREE_SPACE(fifo)) { + bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); } - - if (!status) { + if (bytes_copied != DEFRD_EVT_SIZE) { + DHD_ERROR(("%s: failed to schedule deferred work, " + "priority=%d, bytes_copied=%d\n", __FUNCTION__, + priority, bytes_copied)); return DHD_WQ_STS_SCHED_FAILED; } schedule_work((struct work_struct *)deferred_wq); return DHD_WQ_STS_OK; } -static int -dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, struct dhd_deferred_event_t *event) +static bool +dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, + dhd_deferred_event_t *event) { - int status = 0; + int bytes_copied = 0; if (!deferred_wq) { - DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__)); + DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); return DHD_WQ_STS_UNINITIALIZED; } @@ -268,17 +304,36 @@ dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, struct dhd_deferred_ ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1); ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1); - /* first read priorit event fifo */ - status = kfifo_out_spinlocked(deferred_wq->prio_fifo, event, - DEFRD_EVT_SIZE, &deferred_wq->work_lock); - - if (!status) { - /* priority fifo is empty. Now read low prio work fifo */ - status = kfifo_out_spinlocked(deferred_wq->work_fifo, event, - DEFRD_EVT_SIZE, &deferred_wq->work_lock); + /* handle priority work */ + if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) { + bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo, + event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); } - return status; + /* handle normal work if priority work doesn't have enough data */ + if ((bytes_copied != DEFRD_EVT_SIZE) && + DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) { + bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo, + event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } + + return (bytes_copied == DEFRD_EVT_SIZE); +} + +static inline void +dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event) +{ + if (!work_event) { + DHD_ERROR(("%s: work_event is null\n", __FUNCTION__)); + return; + } + + DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__, + work_event->event)); + DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__, + work_event->event_data)); + DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__, + work_event->event_handler)); } /* @@ -287,9 +342,8 @@ dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, struct dhd_deferred_ static void dhd_deferred_work_handler(struct work_struct *work) { - struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work; - struct dhd_deferred_event_t work_event; - int status; + struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work; + dhd_deferred_event_t work_event; if (!deferred_work) { DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); @@ -297,24 +351,29 @@ dhd_deferred_work_handler(struct work_struct *work) } do { - status = dhd_get_scheduled_work(deferred_work, &work_event); - DHD_TRACE(("%s: event to handle %d \n", __FUNCTION__, status)); - if (!status) { - DHD_TRACE(("%s: No event to handle %d \n", __FUNCTION__, status)); + if (!dhd_get_scheduled_work(deferred_work, &work_event)) { + DHD_TRACE(("%s: no event to handle\n", __FUNCTION__)); break; } - if (work_event.event > DHD_MAX_WQ_EVENTS) { - DHD_TRACE(("%s: Unknown event %d \n", __FUNCTION__, work_event.event)); - break; + if (work_event.event >= DHD_MAX_WQ_EVENTS) { + DHD_ERROR(("%s: unknown event\n", __FUNCTION__)); + dhd_deferred_dump_work_event(&work_event); + ASSERT(work_event.event < DHD_MAX_WQ_EVENTS); + continue; } + if (work_event.event_handler) { work_event.event_handler(deferred_work->dhd_info, work_event.event_data, work_event.event); } else { - DHD_ERROR(("%s: event not defined %d\n", __FUNCTION__, work_event.event)); + DHD_ERROR(("%s: event handler is null\n", + __FUNCTION__)); + dhd_deferred_dump_work_event(&work_event); + ASSERT(work_event.event_handler != NULL); } } while (1); + return; } diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_wq.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_wq.h index e6197b26f211..6dc41a5dc3a3 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_wq.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_wq.h @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD), Generic work queue framework * Generic interface to handle dhd deferred work events * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: dhd_linux_wq.h 597512 2015-11-05 11:37:36Z $ + * $Id: dhd_linux_wq.h 704361 2017-06-13 08:50:38Z $ */ #ifndef _dhd_linux_wq_h_ #define _dhd_linux_wq_h_ @@ -41,15 +41,23 @@ enum _wq_event { DHD_WQ_WORK_HANG_MSG, DHD_WQ_WORK_SOC_RAM_DUMP, DHD_WQ_WORK_DHD_LOG_DUMP, - + DHD_WQ_WORK_INFORM_DHD_MON, + DHD_WQ_WORK_EVENT_LOGTRACE, + DHD_WQ_WORK_DMA_LB_MEM_REL, + DHD_WQ_WORK_DEBUG_UART_DUMP, + DHD_WQ_WORK_SSSR_DUMP, + DHD_WQ_WORK_PKTLOG_DUMP, DHD_MAX_WQ_EVENTS }; /* * Work event priority */ -#define DHD_WORK_PRIORITY_LOW 0 -#define DHD_WORK_PRIORITY_HIGH 1 +enum wq_priority { + DHD_WQ_WORK_PRIORITY_LOW = 1, + DHD_WQ_WORK_PRIORITY_HIGH, + DHD_WQ_MAX_PRIORITY +}; /* * Error definitions @@ -59,6 +67,7 @@ enum _wq_event { #define DHD_WQ_STS_UNINITIALIZED -2 #define DHD_WQ_STS_SCHED_FAILED -3 #define DHD_WQ_STS_UNKNOWN_EVENT -4 +#define DHD_WQ_STS_UNKNOWN_PRIORITY -5 typedef void (*event_handler_t)(void *handle, void *event_data, u8 event); diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_log.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_log.c deleted file mode 100755 index a498197d65aa..000000000000 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_log.c +++ /dev/null @@ -1,58 +0,0 @@ -/* - * DHD logging module for internal debug - * - * $Copyright Open Broadcom Corporation$ - * - * $Id: dhd_sdio.c 281456 2011-09-02 01:49:45Z $ - */ - -#include -#include - -#include -#include -#include - -#include - -void dhd_blog(char *cp, int size) -{ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)) - static struct socket * _udpSocket = NULL; - struct sockaddr_in _saAddr; - struct iovec iov; - struct msghdr msg; - if (sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &_udpSocket) >= 0) - { - - { - memset(&_saAddr, 0, sizeof(_saAddr)); - _saAddr.sin_family = AF_INET; - _saAddr.sin_port = htons(7651); - _saAddr.sin_addr.s_addr = in_aton("10.19.74.43"); - - iov.iov_base = cp; - iov.iov_len = size; - - msg.msg_name = &_saAddr; - msg.msg_namelen = sizeof(struct sockaddr_in); - msg.msg_iov = &iov; - msg.msg_iovlen = 1; - msg.msg_control = NULL; - msg.msg_controllen = 0; - msg.msg_flags = 0; - - { - mm_segment_t fs = get_fs(); - set_fs(get_ds()); - - sock_sendmsg(_udpSocket, &msg, size); - - set_fs(fs); - } - } - - sock_release(_udpSocket); - } -#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -} diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_mschdbg.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_mschdbg.c new file mode 100644 index 000000000000..c1032f203766 --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_mschdbg.c @@ -0,0 +1,747 @@ +/* + * DHD debugability support + * + * <> + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_mschdbg.c 639872 2016-05-25 05:39:30Z $ + */ +#ifdef SHOW_LOGTRACE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static const char *head_log = ""; +#define MSCH_EVENT_HEAD(space) \ + do { \ + MSCH_EVENT(("%s_E: ", head_log)); \ + if (space > 0) { \ + int ii; \ + for (ii = 0; ii < space; ii += 4) MSCH_EVENT((" ")); \ + } \ + } while (0) +#define MSCH_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0) + +static uint64 solt_start_time[4], req_start_time[4], profiler_start_time[4]; +static uint32 solt_chanspec[4] = {0, }, req_start[4] = {0, }; +static bool lastMessages = FALSE; + +#define US_PRE_SEC 1000000 + +static void dhd_mschdbg_us_to_sec(uint32 time_h, uint32 time_l, uint32 *sec, uint32 *remain) +{ + uint64 cur_time = ((uint64)(ntoh32(time_h)) << 32) | ntoh32(time_l); + uint64 r, u = 0; + + r = cur_time; + while (time_h != 0) { + u += (uint64)((0xffffffff / US_PRE_SEC)) * time_h; + r = cur_time - u * US_PRE_SEC; + time_h = (uint32)(r >> 32); + } + + *sec = (uint32)(u + ((uint32)(r) / US_PRE_SEC)); + *remain = (uint32)(r) % US_PRE_SEC; +} + +static char *dhd_mschdbg_display_time(uint32 time_h, uint32 time_l) +{ + static char display_time[32]; + uint32 s, ss; + + if (time_h == 0xffffffff && time_l == 0xffffffff) { + snprintf(display_time, 31, "-1"); + } else { + dhd_mschdbg_us_to_sec(time_h, time_l, &s, &ss); + snprintf(display_time, 31, "%d.%06d", s, ss); + } + return display_time; +} + +static void +dhd_mschdbg_chanspec_list(int sp, char *data, uint16 ptr, uint16 chanspec_cnt) +{ + int i, cnt = (int)ntoh16(chanspec_cnt); + uint16 *chanspec_list = (uint16 *)(data + ntoh16(ptr)); + char buf[CHANSPEC_STR_LEN]; + chanspec_t c; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((":")); + for (i = 0; i < cnt; i++) { + c = (chanspec_t)ntoh16(chanspec_list[i]); + MSCH_EVENT((" %s", wf_chspec_ntoa(c, buf))); + } + MSCH_EVENT(("\n")); +} + +static void +dhd_mschdbg_elem_list(int sp, char *title, char *data, uint16 ptr, uint16 list_cnt) +{ + int i, cnt = (int)ntoh16(list_cnt); + uint32 *list = (uint32 *)(data + ntoh16(ptr)); + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("%s_list: ", title)); + for (i = 0; i < cnt; i++) { + MSCH_EVENT(("0x%08x->", ntoh32(list[i]))); + } + MSCH_EVENT(("null\n")); +} + +static void +dhd_mschdbg_req_param_profiler_event_data(int sp, int ver, char *data, uint16 ptr) +{ + int sn = sp + 4; + msch_req_param_profiler_event_data_t *p = + (msch_req_param_profiler_event_data_t *)(data + ntoh16(ptr)); + uint32 type, flags; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("\n")); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("req_type: ")); + + type = p->req_type; + if (type < 4) { + char *req_type[] = {"fixed", "start-flexible", "duration-flexible", + "both-flexible"}; + MSCH_EVENT(("%s", req_type[type])); + } + else + MSCH_EVENT(("unknown(%d)", type)); + + flags = ntoh16(p->flags); + if (flags & WL_MSCH_REQ_FLAGS_CHAN_CONTIGUOUS) + MSCH_EVENT((", CHAN_CONTIGUOUS")); + if (flags & WL_MSCH_REQ_FLAGS_MERGE_CONT_SLOTS) + MSCH_EVENT((", MERGE_CONT_SLOTS")); + if (flags & WL_MSCH_REQ_FLAGS_PREMTABLE) + MSCH_EVENT((", PREMTABLE")); + if (flags & WL_MSCH_REQ_FLAGS_PREMT_CURTS) + MSCH_EVENT((", PREMT_CURTS")); + if (flags & WL_MSCH_REQ_FLAGS_PREMT_IMMEDIATE) + MSCH_EVENT((", PREMT_IMMEDIATE")); + MSCH_EVENT((", priority: %d\n", p->priority)); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("start-time: %s, duration: %d(us), interval: %d(us)\n", + dhd_mschdbg_display_time(p->start_time_h, p->start_time_l), + ntoh32(p->duration), ntoh32(p->interval))); + + if (type == WL_MSCH_RT_DUR_FLEX) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("dur_flex: %d(us)\n", ntoh32(p->flex.dur_flex))); + } else if (type == WL_MSCH_RT_BOTH_FLEX) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("min_dur: %d(us), max_away_dur: %d(us)\n", + ntoh32(p->flex.bf.min_dur), ntoh32(p->flex.bf.max_away_dur))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("hi_prio_time: %s, hi_prio_interval: %d(us)\n", + dhd_mschdbg_display_time(p->flex.bf.hi_prio_time_h, + p->flex.bf.hi_prio_time_l), + ntoh32(p->flex.bf.hi_prio_interval))); + } +} + +static void +dhd_mschdbg_timeslot_profiler_event_data(int sp, int ver, char *title, char *data, + uint16 ptr, bool empty) +{ + int s, sn = sp + 4; + msch_timeslot_profiler_event_data_t *p = + (msch_timeslot_profiler_event_data_t *)(data + ntoh16(ptr)); + char *state[] = {"NONE", "CHN_SW", "ONCHAN_FIRE", "OFF_CHN_PREP", + "OFF_CHN_DONE", "TS_COMPLETE"}; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("<%s timeslot>: ", title)); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x\n", ntoh32(p->p_timeslot))); + + s = (int)(ntoh32(p->state)); + if (s > 5) s = 0; + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("id: %d, state[%d]: %s, chan_ctxt: [0x%08x]\n", + ntoh32(p->timeslot_id), ntoh32(p->state), state[s], ntoh32(p->p_chan_ctxt))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("fire_time: %s", + dhd_mschdbg_display_time(p->fire_time_h, p->fire_time_l))); + + MSCH_EVENT((", pre_start_time: %s", + dhd_mschdbg_display_time(p->pre_start_time_h, p->pre_start_time_l))); + + MSCH_EVENT((", end_time: %s", + dhd_mschdbg_display_time(p->end_time_h, p->end_time_l))); + + MSCH_EVENT((", sch_dur: %s\n", + dhd_mschdbg_display_time(p->sch_dur_h, p->sch_dur_l))); +} + +static void +dhd_mschdbg_req_timing_profiler_event_data(int sp, int ver, char *title, char *data, + uint16 ptr, bool empty) +{ + int sn = sp + 4; + msch_req_timing_profiler_event_data_t *p = + (msch_req_timing_profiler_event_data_t *)(data + ntoh16(ptr)); + uint32 type; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("<%s req_timing>: ", title)); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n", + ntoh32(p->p_req_timing), ntoh32(p->p_prev), ntoh32(p->p_next))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("flags:")); + type = ntoh16(p->flags); + if ((type & 0x7f) == 0) + MSCH_EVENT((" NONE")); + else { + if (type & WL_MSCH_RC_FLAGS_ONCHAN_FIRE) + MSCH_EVENT((" ONCHAN_FIRE")); + if (type & WL_MSCH_RC_FLAGS_START_FIRE_DONE) + MSCH_EVENT((" START_FIRE")); + if (type & WL_MSCH_RC_FLAGS_END_FIRE_DONE) + MSCH_EVENT((" END_FIRE")); + if (type & WL_MSCH_RC_FLAGS_ONFIRE_DONE) + MSCH_EVENT((" ONFIRE_DONE")); + if (type & WL_MSCH_RC_FLAGS_SPLIT_SLOT_START) + MSCH_EVENT((" SPLIT_SLOT_START")); + if (type & WL_MSCH_RC_FLAGS_SPLIT_SLOT_END) + MSCH_EVENT((" SPLIT_SLOT_END")); + if (type & WL_MSCH_RC_FLAGS_PRE_ONFIRE_DONE) + MSCH_EVENT((" PRE_ONFIRE_DONE")); + } + MSCH_EVENT(("\n")); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("pre_start_time: %s", + dhd_mschdbg_display_time(p->pre_start_time_h, p->pre_start_time_l))); + + MSCH_EVENT((", start_time: %s", + dhd_mschdbg_display_time(p->start_time_h, p->start_time_l))); + + MSCH_EVENT((", end_time: %s\n", + dhd_mschdbg_display_time(p->end_time_h, p->end_time_l))); + + if (p->p_timeslot && (p->timeslot_ptr == 0)) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("<%s timeslot>: 0x%08x\n", title, ntoh32(p->p_timeslot))); + } else + dhd_mschdbg_timeslot_profiler_event_data(sn, ver, title, data, p->timeslot_ptr, + (p->timeslot_ptr == 0)); +} + +static void +dhd_mschdbg_chan_ctxt_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty) +{ + int sn = sp + 4; + msch_chan_ctxt_profiler_event_data_t *p = + (msch_chan_ctxt_profiler_event_data_t *)(data + ntoh16(ptr)); + chanspec_t c; + char buf[CHANSPEC_STR_LEN]; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": ")); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n", + ntoh32(p->p_chan_ctxt), ntoh32(p->p_prev), ntoh32(p->p_next))); + + c = (chanspec_t)ntoh16(p->chanspec); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("channel: %s, bf_sch_pending: %s, bf_skipped: %d\n", + wf_chspec_ntoa(c, buf), p->bf_sch_pending? "TRUE" : "FALSE", + ntoh32(p->bf_skipped_count))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("bf_link: prev 0x%08x, next 0x%08x\n", + ntoh32(p->bf_link_prev), ntoh32(p->bf_link_next))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("onchan_time: %s", + dhd_mschdbg_display_time(p->onchan_time_h, p->onchan_time_l))); + MSCH_EVENT((", actual_onchan_dur: %s", + dhd_mschdbg_display_time(p->actual_onchan_dur_h, p->actual_onchan_dur_l))); + MSCH_EVENT((", pend_onchan_dur: %s\n", + dhd_mschdbg_display_time(p->pend_onchan_dur_h, p->pend_onchan_dur_l))); + + dhd_mschdbg_elem_list(sn, "req_entity", data, p->req_entity_list_ptr, + p->req_entity_list_cnt); + dhd_mschdbg_elem_list(sn, "bf_entity", data, p->bf_entity_list_ptr, + p->bf_entity_list_cnt); +} + +static void +dhd_mschdbg_req_entity_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty) +{ + int sn = sp + 4; + msch_req_entity_profiler_event_data_t *p = + (msch_req_entity_profiler_event_data_t *)(data + ntoh16(ptr)); + char buf[CHANSPEC_STR_LEN]; + chanspec_t c; + uint32 flags; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": ")); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n", + ntoh32(p->p_req_entity), ntoh32(p->req_hdl_link_prev), + ntoh32(p->req_hdl_link_next))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("req_hdl: [0x%08x]\n", ntoh32(p->p_req_hdl))); + + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("chan_ctxt_link: prev 0x%08x, next 0x%08x\n", + ntoh32(p->chan_ctxt_link_prev), ntoh32(p->chan_ctxt_link_next))); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("rt_specific_link: prev 0x%08x, next 0x%08x\n", + ntoh32(p->rt_specific_link_prev), ntoh32(p->rt_specific_link_next))); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("start_fixed_link: prev 0x%08x, next 0x%08x\n", + ntoh32(p->start_fixed_link_prev), ntoh32(p->start_fixed_link_next))); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("both_flex_list: prev 0x%08x, next 0x%08x\n", + ntoh32(p->both_flex_list_prev), ntoh32(p->both_flex_list_next))); + + c = (chanspec_t)ntoh16(p->chanspec); + MSCH_EVENT_HEAD(sn); + if (ver >= 2) { + MSCH_EVENT(("channel: %s, onchan Id %d, current chan Id %d, priority %d", + wf_chspec_ntoa(c, buf), ntoh16(p->onchan_chn_idx), ntoh16(p->cur_chn_idx), + ntoh16(p->priority))); + flags = ntoh32(p->flags); + if (flags & WL_MSCH_ENTITY_FLAG_MULTI_INSTANCE) + MSCH_EVENT((" : MULTI_INSTANCE\n")); + else + MSCH_EVENT(("\n")); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("actual_start_time: %s, ", + dhd_mschdbg_display_time(p->actual_start_time_h, p->actual_start_time_l))); + MSCH_EVENT(("curts_fire_time: %s, ", + dhd_mschdbg_display_time(p->curts_fire_time_h, p->curts_fire_time_l))); + } else { + MSCH_EVENT(("channel: %s, priority %d, ", wf_chspec_ntoa(c, buf), + ntoh16(p->priority))); + } + MSCH_EVENT(("bf_last_serv_time: %s\n", + dhd_mschdbg_display_time(p->bf_last_serv_time_h, p->bf_last_serv_time_l))); + + dhd_mschdbg_req_timing_profiler_event_data(sn, ver, "current", data, p->cur_slot_ptr, + (p->cur_slot_ptr == 0)); + dhd_mschdbg_req_timing_profiler_event_data(sn, ver, "pending", data, p->pend_slot_ptr, + (p->pend_slot_ptr == 0)); + + if (p->p_chan_ctxt && (p->chan_ctxt_ptr == 0)) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT((": 0x%08x\n", ntoh32(p->p_chan_ctxt))); + } + else + dhd_mschdbg_chan_ctxt_profiler_event_data(sn, ver, data, p->chan_ctxt_ptr, + (p->chan_ctxt_ptr == 0)); +} + +static void +dhd_mschdbg_req_handle_profiler_event_data(int sp, int ver, char *data, uint16 ptr, bool empty) +{ + int sn = sp + 4; + msch_req_handle_profiler_event_data_t *p = + (msch_req_handle_profiler_event_data_t *)(data + ntoh16(ptr)); + uint32 flags; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": ")); + if (empty) { + MSCH_EVENT((" null\n")); + return; + } + else + MSCH_EVENT(("0x%08x (prev 0x%08x, next 0x%08x)\n", + ntoh32(p->p_req_handle), ntoh32(p->p_prev), ntoh32(p->p_next))); + + dhd_mschdbg_elem_list(sn, "req_entity", data, p->req_entity_list_ptr, + p->req_entity_list_cnt); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("cb_func: [0x%08x], cb_func: [0x%08x]", + ntoh32(p->cb_func), ntoh32(p->cb_ctxt))); + if (ver < 2) { + MSCH_EVENT((", chan_cnt: %d", ntoh16(p->chan_cnt))); + } + flags = ntoh32(p->flags); + if (flags & WL_MSCH_REQ_HDL_FLAGS_NEW_REQ) + MSCH_EVENT((", NEW_REQ")); + MSCH_EVENT(("\n")); + + dhd_mschdbg_req_param_profiler_event_data(sn, ver, data, p->req_param_ptr); + + if (ver >= 2) { + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("req_time: %s\n", + dhd_mschdbg_display_time(p->req_time_h, p->req_time_l))); + MSCH_EVENT_HEAD(sn); + MSCH_EVENT(("chan_cnt: %d, chan idx %d, last chan idx %d\n", + ntoh16(p->chan_cnt), ntoh16(p->chan_idx), ntoh16(p->last_chan_idx))); + if (p->chanspec_list && p->chanspec_cnt) { + dhd_mschdbg_chanspec_list(sn, data, p->chanspec_list, p->chanspec_cnt); + } + } +} + +static void +dhd_mschdbg_profiler_profiler_event_data(int sp, int ver, char *data, uint16 ptr) +{ + msch_profiler_profiler_event_data_t *p = + (msch_profiler_profiler_event_data_t *)(data + ntoh16(ptr)); + uint32 flags; + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("free list: req_hdl 0x%08x, req_entity 0x%08x," + " chan_ctxt 0x%08x, chanspec 0x%08x\n", + ntoh32(p->free_req_hdl_list), ntoh32(p->free_req_entity_list), + ntoh32(p->free_chan_ctxt_list), ntoh32(p->free_chanspec_list))); + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("alloc count: chanspec %d, req_entity %d, req_hdl %d, " + "chan_ctxt %d, timeslot %d\n", + ntoh16(p->msch_chanspec_alloc_cnt), ntoh16(p->msch_req_entity_alloc_cnt), + ntoh16(p->msch_req_hdl_alloc_cnt), ntoh16(p->msch_chan_ctxt_alloc_cnt), + ntoh16(p->msch_timeslot_alloc_cnt))); + + dhd_mschdbg_elem_list(sp, "req_hdl", data, p->msch_req_hdl_list_ptr, + p->msch_req_hdl_list_cnt); + dhd_mschdbg_elem_list(sp, "chan_ctxt", data, p->msch_chan_ctxt_list_ptr, + p->msch_chan_ctxt_list_cnt); + dhd_mschdbg_elem_list(sp, "req_timing", data, p->msch_req_timing_list_ptr, + p->msch_req_timing_list_cnt); + dhd_mschdbg_elem_list(sp, "start_fixed", data, p->msch_start_fixed_list_ptr, + p->msch_start_fixed_list_cnt); + dhd_mschdbg_elem_list(sp, "both_flex_req_entity", data, + p->msch_both_flex_req_entity_list_ptr, + p->msch_both_flex_req_entity_list_cnt); + dhd_mschdbg_elem_list(sp, "start_flex", data, p->msch_start_flex_list_ptr, + p->msch_start_flex_list_cnt); + dhd_mschdbg_elem_list(sp, "both_flex", data, p->msch_both_flex_list_ptr, + p->msch_both_flex_list_cnt); + + if (p->p_cur_msch_timeslot && (p->cur_msch_timeslot_ptr == 0)) { + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": 0x%08x\n", + ntoh32(p->p_cur_msch_timeslot))); + } else + dhd_mschdbg_timeslot_profiler_event_data(sp, ver, "cur_msch", data, + p->cur_msch_timeslot_ptr, (p->cur_msch_timeslot_ptr == 0)); + + if (p->p_next_timeslot && (p->next_timeslot_ptr == 0)) { + MSCH_EVENT_HEAD(sp); + MSCH_EVENT((": 0x%08x\n", + ntoh32(p->p_next_timeslot))); + } else + dhd_mschdbg_timeslot_profiler_event_data(sp, ver, "next", data, + p->next_timeslot_ptr, (p->next_timeslot_ptr == 0)); + + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("ts_id: %d, ", ntoh32(p->ts_id))); + flags = ntoh32(p->flags); + if (flags & WL_MSCH_STATE_IN_TIEMR_CTXT) + MSCH_EVENT(("IN_TIEMR_CTXT, ")); + if (flags & WL_MSCH_STATE_SCHD_PENDING) + MSCH_EVENT(("SCHD_PENDING, ")); + MSCH_EVENT(("slotskip_flags: %d, cur_armed_timeslot: 0x%08x\n", + (ver >= 2)? ntoh32(p->slotskip_flag) : 0, ntoh32(p->cur_armed_timeslot))); + MSCH_EVENT_HEAD(sp); + MSCH_EVENT(("flex_list_cnt: %d, service_interval: %d, " + "max_lo_prio_interval: %d\n", + ntoh16(p->flex_list_cnt), ntoh32(p->service_interval), + ntoh32(p->max_lo_prio_interval))); +} + +static void dhd_mschdbg_dump_data(dhd_pub_t *dhdp, void *raw_event_ptr, int type, + char *data, int len) +{ + uint64 t = 0, tt = 0; + uint32 s = 0, ss = 0; + int wlc_index, ver; + + ver = (type & WL_MSCH_PROFILER_VER_MASK) >> WL_MSCH_PROFILER_VER_SHIFT; + wlc_index = (type & WL_MSCH_PROFILER_WLINDEX_MASK) >> WL_MSCH_PROFILER_WLINDEX_SHIFT; + if (wlc_index >= 4) + return; + + type &= WL_MSCH_PROFILER_TYPE_MASK; + if (type <= WL_MSCH_PROFILER_PROFILE_END) { + msch_profiler_event_data_t *pevent = (msch_profiler_event_data_t *)data; + tt = ((uint64)(ntoh32(pevent->time_hi)) << 32) | ntoh32(pevent->time_lo); + dhd_mschdbg_us_to_sec(pevent->time_hi, pevent->time_lo, &s, &ss); + } + + if (lastMessages && (type != WL_MSCH_PROFILER_MESSAGE) && + (type != WL_MSCH_PROFILER_EVENT_LOG)) { + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("\n")); + lastMessages = FALSE; + } + + switch (type) { + case WL_MSCH_PROFILER_START: + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d START\n", s, ss)); + break; + + case WL_MSCH_PROFILER_EXIT: + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d EXIT\n", s, ss)); + break; + + case WL_MSCH_PROFILER_REQ: + { + msch_req_profiler_event_data_t *p = (msch_req_profiler_event_data_t *)data; + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("===============================\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d] REGISTER:\n", s, ss, wlc_index)); + dhd_mschdbg_req_param_profiler_event_data(4, ver, data, p->req_param_ptr); + dhd_mschdbg_chanspec_list(4, data, p->chanspec_ptr, p->chanspec_cnt); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("===============================\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("\n")); + } + break; + + case WL_MSCH_PROFILER_CALLBACK: + { + msch_callback_profiler_event_data_t *p = + (msch_callback_profiler_event_data_t *)data; + char buf[CHANSPEC_STR_LEN]; + chanspec_t chanspec; + uint16 cbtype; + + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d] CALLBACK: ", s, ss, wlc_index)); + chanspec = (chanspec_t)ntoh16(p->chanspec); + MSCH_EVENT(("req_hdl[0x%08x], channel %s --", + ntoh32(p->p_req_hdl), wf_chspec_ntoa(chanspec, buf))); + + cbtype = ntoh16(p->type); + if (cbtype & WL_MSCH_CT_ON_CHAN) + MSCH_EVENT((" ON_CHAN")); + if (cbtype & WL_MSCH_CT_OFF_CHAN) + MSCH_EVENT((" OFF_CHAN")); + if (cbtype & WL_MSCH_CT_REQ_START) + MSCH_EVENT((" REQ_START")); + if (cbtype & WL_MSCH_CT_REQ_END) + MSCH_EVENT((" REQ_END")); + if (cbtype & WL_MSCH_CT_SLOT_START) + MSCH_EVENT((" SLOT_START")); + if (cbtype & WL_MSCH_CT_SLOT_SKIP) + MSCH_EVENT((" SLOT_SKIP")); + if (cbtype & WL_MSCH_CT_SLOT_END) + MSCH_EVENT((" SLOT_END")); + if (cbtype & WL_MSCH_CT_OFF_CHAN_DONE) + MSCH_EVENT((" OFF_CHAN_DONE")); + if (cbtype & WL_MSCH_CT_PARTIAL) + MSCH_EVENT((" PARTIAL")); + if (cbtype & WL_MSCH_CT_PRE_ONCHAN) + MSCH_EVENT((" PRE_ONCHAN")); + if (cbtype & WL_MSCH_CT_PRE_REQ_START) + MSCH_EVENT((" PRE_REQ_START")); + + if (cbtype & WL_MSCH_CT_REQ_START) { + req_start[wlc_index] = 1; + req_start_time[wlc_index] = tt; + } else if (cbtype & WL_MSCH_CT_REQ_END) { + if (req_start[wlc_index]) { + MSCH_EVENT((" : REQ duration %d", + (uint32)(tt - req_start_time[wlc_index]))); + req_start[wlc_index] = 0; + } + } + + if (cbtype & WL_MSCH_CT_SLOT_START) { + solt_chanspec[wlc_index] = p->chanspec; + solt_start_time[wlc_index] = tt; + } else if (cbtype & WL_MSCH_CT_SLOT_END) { + if (p->chanspec == solt_chanspec[wlc_index]) { + MSCH_EVENT((" : SLOT duration %d", + (uint32)(tt - solt_start_time[wlc_index]))); + solt_chanspec[wlc_index] = 0; + } + } + MSCH_EVENT(("\n")); + + if (cbtype & (WL_MSCH_CT_ON_CHAN | WL_MSCH_CT_SLOT_SKIP)) { + MSCH_EVENT_HEAD(4); + if (cbtype & WL_MSCH_CT_ON_CHAN) { + MSCH_EVENT(("ID %d onchan idx %d cur_chan_seq_start %s ", + ntoh32(p->timeslot_id), ntoh32(p->onchan_idx), + dhd_mschdbg_display_time(p->cur_chan_seq_start_time_h, + p->cur_chan_seq_start_time_l))); + } + t = ((uint64)(ntoh32(p->start_time_h)) << 32) | + ntoh32(p->start_time_l); + MSCH_EVENT(("start %s ", + dhd_mschdbg_display_time(p->start_time_h, + p->start_time_l))); + tt = ((uint64)(ntoh32(p->end_time_h)) << 32) | ntoh32(p->end_time_l); + MSCH_EVENT(("end %s duration %d\n", + dhd_mschdbg_display_time(p->end_time_h, p->end_time_l), + (p->end_time_h == 0xffffffff && p->end_time_l == 0xffffffff)? + -1 : (int)(tt - t))); + } + + } + break; + + case WL_MSCH_PROFILER_EVENT_LOG: + { + while (len > 0) { + msch_event_log_profiler_event_data_t *p = + (msch_event_log_profiler_event_data_t *)data; + int size = WL_MSCH_EVENT_LOG_HEAD_SIZE + p->hdr.count * sizeof(uint32); + data += size; + len -= size; + dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag)); + p->hdr.tag = EVENT_LOG_TAG_MSCHPROFILE; + p->hdr.fmt_num = ntoh16(p->hdr.fmt_num); + dhd_dbg_verboselog_printf(dhdp, &p->hdr, raw_event_ptr, p->data); + } + lastMessages = TRUE; + break; + } + + case WL_MSCH_PROFILER_MESSAGE: + { + msch_message_profiler_event_data_t *p = (msch_message_profiler_event_data_t *)data; + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d]: %s", s, ss, wlc_index, p->message)); + lastMessages = TRUE; + break; + } + + case WL_MSCH_PROFILER_PROFILE_START: + profiler_start_time[wlc_index] = tt; + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("-------------------------------\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d] PROFILE DATA:\n", s, ss, wlc_index)); + dhd_mschdbg_profiler_profiler_event_data(4, ver, data, 0); + break; + + case WL_MSCH_PROFILER_PROFILE_END: + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d] PROFILE END: take time %d\n", s, ss, + wlc_index, (uint32)(tt - profiler_start_time[wlc_index]))); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("-------------------------------\n")); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("\n")); + break; + + case WL_MSCH_PROFILER_REQ_HANDLE: + dhd_mschdbg_req_handle_profiler_event_data(4, ver, data, 0, FALSE); + break; + + case WL_MSCH_PROFILER_REQ_ENTITY: + dhd_mschdbg_req_entity_profiler_event_data(4, ver, data, 0, FALSE); + break; + + case WL_MSCH_PROFILER_CHAN_CTXT: + dhd_mschdbg_chan_ctxt_profiler_event_data(4, ver, data, 0, FALSE); + break; + + case WL_MSCH_PROFILER_REQ_TIMING: + dhd_mschdbg_req_timing_profiler_event_data(4, ver, "msch", data, 0, FALSE); + break; + + default: + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("[wl%d] ERROR: unsupported EVENT reason code:%d; ", + wlc_index, type)); + break; + } +} + +void +wl_mschdbg_event_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int type, void *data, int len) +{ + head_log = "MSCH"; + dhd_mschdbg_dump_data(dhdp, raw_event_ptr, type, (char *)data, len); +} + +void +wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int tag, uint32 *log_ptr) +{ + head_log = "CONSOLE"; + if (tag == EVENT_LOG_TAG_MSCHPROFILE) { + msch_event_log_profiler_event_data_t *p = + (msch_event_log_profiler_event_data_t *)log_ptr; + uint32 s, ss; + dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss); + MSCH_EVENT_HEAD(0); + MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag)); + p->hdr.tag = EVENT_LOG_TAG_MSCHPROFILE; + p->hdr.fmt_num = ntoh16(p->hdr.fmt_num); + dhd_dbg_verboselog_printf(dhdp, &p->hdr, raw_event_ptr, p->data); + } else { + msch_collect_tlv_t *p = (msch_collect_tlv_t *)log_ptr; + int type = ntoh16(p->type); + int len = ntoh16(p->size); + dhd_mschdbg_dump_data(dhdp, raw_event_ptr, type, p->value, len); + } +} +#endif /* SHOW_LOGTRACE */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_bta.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_mschdbg.h old mode 100755 new mode 100644 similarity index 67% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_bta.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_mschdbg.h index df9d1f91b9ce..749e1119f421 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_bta.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_mschdbg.h @@ -1,7 +1,9 @@ /* - * BT-AMP support routines + * DHD debugability header file * - * Copyright (C) 1999-2016, Broadcom Corporation + * <> + * + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -21,22 +23,17 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * - * <> - * - * $Id: dhd_bta.h 514727 2014-11-12 03:02:48Z $ + * $Id: dhd_mschdbg.h 571265 2015-07-14 20:50:18Z $ */ -#ifndef __dhd_bta_h__ -#define __dhd_bta_h__ -struct dhd_pub; +#ifndef _dhd_mschdbg_h_ +#define _dhd_mschdbg_h_ -extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len); +#ifdef SHOW_LOGTRACE +extern void wl_mschdbg_event_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int type, + void *data, int len); +extern void wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int tag, + uint32 *log_ptr); +#endif /* SHOW_LOGTRACE */ -extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len); - -extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len); -extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success); - - -#endif /* __dhd_bta_h__ */ +#endif /* _dhd_mschdbg_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_msgbuf.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_msgbuf.c index 3bd9a45d7906..e602d24f2e6a 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_msgbuf.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_msgbuf.c @@ -3,7 +3,7 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: dhd_msgbuf.c 605475 2015-12-10 12:49:49Z $ + * $Id: dhd_msgbuf.c 704361 2017-06-13 08:50:38Z $ */ @@ -45,22 +45,34 @@ #include #include - +#include #include #include #include #include +#ifdef DHD_TIMESYNC +#include +#endif /* DHD_TIMESYNC */ #if defined(DHD_LB) #include #include -#define DHD_LB_WORKQ_SZ (8192) +#define DHD_LB_WORKQ_SZ (8192) #define DHD_LB_WORKQ_SYNC (16) #define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2) #endif /* DHD_LB */ +#include +#include + +#ifdef DHD_PKT_LOGGING +#include +#endif /* DHD_PKT_LOGGING */ + +extern char dhd_version[]; +extern char fw_version[]; /** * Host configures a soft doorbell for d2h rings, by specifying a 32bit host @@ -90,7 +102,6 @@ #define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */ #define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN) -#define FLOWRING_SIZE (H2DRING_TXPOST_MAX_ITEM * H2DRING_TXPOST_ITEMSIZE) /* flags for ioctl pending status */ #define MSGBUF_IOCTL_ACK_PENDING (1<<0) @@ -101,12 +112,21 @@ #define DMA_D2H_SCRATCH_BUF_LEN 8 #define DMA_XFER_LEN_LIMIT 0x400000 +#ifdef BCM_HOST_BUF +#ifndef DMA_HOST_BUFFER_LEN +#define DMA_HOST_BUFFER_LEN 0x200000 +#endif +#endif /* BCM_HOST_BUF */ + #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192 -#define DHD_FLOWRING_MAX_EVENTBUF_POST 8 +#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1 +#define DHD_FLOWRING_MAX_EVENTBUF_POST 32 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8 +#define DHD_H2D_INFORING_MAX_BUF_POST 32 +#define DHD_MAX_TSBUF_POST 8 -#define DHD_PROT_FUNCS 37 +#define DHD_PROT_FUNCS 41 /* Length of buffer in host for bus throughput measurement */ #define DHD_BUS_TPUT_BUF_LEN 2048 @@ -117,7 +137,9 @@ #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48 #define RING_NAME_MAX_LENGTH 24 - +#define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024 +/* Giving room before ioctl_trans_id rollsover. */ +#define BUFFER_BEFORE_ROLLOVER 300 struct msgbuf_ring; /* ring context for common and flow rings */ @@ -141,12 +163,10 @@ struct msgbuf_ring; /* ring context for common and flow rings */ * * Dongle advertizes host side sync mechanism requirements. */ -#define PCIE_D2H_SYNC -#if defined(PCIE_D2H_SYNC) #define PCIE_D2H_SYNC_WAIT_TRIES (512UL) -#define PCIE_D2H_SYNC_NUM_OF_STEPS (3UL) -#define PCIE_D2H_SYNC_DELAY (50UL) /* in terms of usecs */ +#define PCIE_D2H_SYNC_NUM_OF_STEPS (5UL) +#define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */ /** * Custom callback attached based upon D2H DMA Sync mode advertized by dongle. @@ -156,8 +176,6 @@ struct msgbuf_ring; /* ring context for common and flow rings */ */ typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, volatile cmn_msg_hdr_t *msg, int msglen); -#endif /* PCIE_D2H_SYNC */ - /* * +---------------------------------------------------------------------------- @@ -211,8 +229,9 @@ typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, #define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS /* Determine whether a ringid belongs to a TxPost flowring */ -#define DHD_IS_FLOWRING(ringid) \ - ((ringid) >= BCMPCIE_COMMON_MSGRINGS) +#define DHD_IS_FLOWRING(ringid, max_flow_rings) \ + ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \ + (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS)) /* Convert a H2D TxPost FlowId to a MsgBuf RingId */ #define DHD_FLOWID_TO_RINGID(flowid) \ @@ -227,14 +246,24 @@ typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, * any array of H2D rings. */ #define DHD_H2D_RING_OFFSET(ringid) \ - ((DHD_IS_FLOWRING(ringid)) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid)) + (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid)) + +/* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array + * This may be used for IFRM. + */ +#define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \ + ((ringid) - BCMPCIE_COMMON_MSGRINGS) /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array * This may be used for the D2H DMA WR index array or D2H DMA RD index array or * any array of D2H rings. + * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring + * max_h2d_rings: total number of h2d rings */ -#define DHD_D2H_RING_OFFSET(ringid) \ - ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS) +#define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \ + ((ringid) > (max_h2d_rings) ? \ + ((ringid) - max_h2d_rings) : \ + ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS)) /* Convert a D2H DMA Indices Offset to a RingId */ #define DHD_D2H_RINGID(offset) \ @@ -264,6 +293,8 @@ typedef struct dhd_dmaxfer { uint32 destdelay; uint32 len; bool in_progress; + uint64 start_usec; + uint32 d11_lpbk; } dhd_dmaxfer_t; /** @@ -293,7 +324,15 @@ typedef struct msgbuf_ring { /* # of messages on ring not yet announced to dongle */ uint16 pend_items_count; #endif /* TXP_FLUSH_NITEMS */ + + uint8 ring_type; + uint8 n_completion_ids; + bool create_pending; + uint16 create_req_id; + uint8 current_phase; + uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED]; uchar name[RING_NAME_MAX_LENGTH]; + uint32 ring_mem_allocated; } msgbuf_ring_t; #define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va) @@ -303,6 +342,11 @@ typedef struct msgbuf_ring { +/* This can be overwritten by module parameter defined in dhd_linux.c + * or by dhd iovar h2d_max_txpost. + */ +int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM; + /** DHD protocol handle. Is an opaque type to other DHD software layers. */ typedef struct dhd_prot { osl_t *osh; /* OSL handle */ @@ -310,12 +354,16 @@ typedef struct dhd_prot { uint16 max_rxbufpost; uint16 max_eventbufpost; uint16 max_ioctlrespbufpost; + uint16 max_tsbufpost; + uint16 max_infobufpost; + uint16 infobufpost; uint16 cur_event_bufs_posted; uint16 cur_ioctlresp_bufs_posted; + uint16 cur_ts_bufs_posted; /* Flow control mechanism based on active transmits pending */ uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */ - uint16 max_tx_count; + uint16 h2d_max_txpost; uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */ /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */ @@ -324,6 +372,8 @@ typedef struct dhd_prot { msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */ msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */ msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */ + msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */ + msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */ msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */ dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */ @@ -332,6 +382,7 @@ typedef struct dhd_prot { uint32 rx_dataoffset; dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */ + dhd_mb_ring_2_t mb_2_ring_fn; /* called when dongle needs to be notified of new msg */ /* ioctl related resources */ uint8 ioctl_state; @@ -350,24 +401,26 @@ typedef struct dhd_prot { dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */ dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */ dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */ + dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */ dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */ dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */ uint32 flowring_num; -#if defined(PCIE_D2H_SYNC) d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */ ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */ ulong d2h_sync_wait_tot; /* total wait loops */ -#endif /* PCIE_D2H_SYNC */ dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */ uint16 ioctl_seq_no; uint16 data_seq_no; uint16 ioctl_trans_id; - void *pktid_map_handle; /* a pktid maps to a packet and its metadata */ + void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */ + void *pktid_rx_map; /* pktid map for rx path */ + void *pktid_tx_map; /* pktid map for tx path */ + void *rx_lock; /* rx pktid map and rings access protection */ bool metadata_dbg; void *pktid_map_handle_ioctl; @@ -380,7 +433,7 @@ typedef struct dhd_prot { /* Host's soft doorbell configuration */ bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS]; #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ -#if defined(DHD_LB) + /* Work Queues to be used by the producer and the consumer, and threshold * when the WRITE index must be synced to consumer's workq */ @@ -392,9 +445,22 @@ typedef struct dhd_prot { uint32 rx_compl_prod_sync ____cacheline_aligned; bcm_workq_t rx_compl_prod, rx_compl_cons; #endif /* DHD_LB_RXC */ -#endif /* DHD_LB */ + + dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */ + + uint32 host_ipc_version; /* Host sypported IPC rev */ + uint32 device_ipc_version; /* FW supported IPC rev */ + uint32 active_ipc_version; /* Host advertised IPC rev */ + dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */ + bool hostts_req_buf_inuse; + bool rx_ts_log_enabled; + bool tx_ts_log_enabled; } dhd_prot_t; +extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap); + +static atomic_t dhd_msgbuf_rxbuf_post_event_bufs_running = ATOMIC_INIT(0); + /* Convert a dmaaddr_t to a base_addr with htol operations */ static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa); @@ -410,6 +476,7 @@ static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring); static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring); static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf); /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */ static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd); @@ -436,13 +503,12 @@ static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *p, uint16 len); static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring); -/* Allocate DMA-able memory for saving H2D/D2H WR/RD indices */ static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, dhd_dma_buf_t *dma_buf, uint32 bufsz); /* Set/Get a RD or WR index in the array of indices */ /* See also: dhd_prot_dma_indx_init() */ -static void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, +void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid); static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid); @@ -461,14 +527,16 @@ static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void *buf, int ifidx); /* Post buffers for Rx, control ioctl response and events */ -static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post); +static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post); static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub); static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub); static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid); static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid); +static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub); static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt); + /* D2H Message handling */ static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len); @@ -479,7 +547,6 @@ static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg); static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg); static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg); static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg); -static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg); static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg); /* Loopback test with dongle */ @@ -492,10 +559,24 @@ static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg); static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg); static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg); static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg); +static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg); + +/* Monitor Mode */ +#ifdef WL_MONITOR +extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx); +extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx); +#endif /* WL_MONITOR */ /* Configure a soft doorbell per D2H ring */ static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd); -static void dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg); +static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf); +static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf); +static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf); +static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf); +static void dhd_prot_detach_info_rings(dhd_pub_t *dhd); +static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf); typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg); @@ -521,39 +602,44 @@ static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = { NULL, dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */ NULL, - dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */ + NULL, /* MSG_TYPE_RX_CMPLT use dedicated handler */ NULL, dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */ NULL, /* MSG_TYPE_FLOW_RING_RESUME */ - NULL, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */ + dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */ NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */ - NULL, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */ + dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */ NULL, /* MSG_TYPE_INFO_BUF_POST */ - NULL, /* MSG_TYPE_INFO_BUF_CMPLT */ + dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */ NULL, /* MSG_TYPE_H2D_RING_CREATE */ NULL, /* MSG_TYPE_D2H_RING_CREATE */ - NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */ - NULL, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */ + dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */ + dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */ NULL, /* MSG_TYPE_H2D_RING_CONFIG */ NULL, /* MSG_TYPE_D2H_RING_CONFIG */ NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */ - dhd_prot_d2h_ring_config_cmplt_process, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */ + dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */ NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */ - NULL, /* MSG_TYPE_D2H_MAILBOX_DATA */ + dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */ + NULL, /* MSG_TYPE_TIMSTAMP_BUFPOST */ + NULL, /* MSG_TYPE_HOSTTIMSTAMP */ + dhd_prot_process_d2h_host_ts_complete, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */ + dhd_prot_process_fw_timestamp, /* MSG_TYPE_FIRMWARE_TIMESTAMP */ }; #ifdef DHD_RX_CHAINING #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \ - (!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \ - !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \ - !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \ - !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \ - ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \ - ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \ - (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))) && \ - dhd_l2_filter_chainable((dhd), (evh), (ifidx))) + (dhd_wet_chainable(dhd) && \ + dhd_rx_pkt_chainable((dhd), (ifidx)) && \ + !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \ + !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \ + !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \ + !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \ + ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \ + ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \ + (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6)))) static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain); static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx); @@ -565,8 +651,6 @@ static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd); static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd); -#if defined(PCIE_D2H_SYNC) /* avoids problems related to host CPU cache */ - /** * D2H DMA to completion callback handlers. Based on the mode advertised by the * dongle through the PCIE shared region, the appropriate callback will be @@ -575,8 +659,8 @@ static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd); * does not require host participation, then a noop callback handler will be * bound that simply returns the msg_type. */ -static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, - uint32 tries, uchar *msg, int msglen); +static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, + uint32 tries, volatile uchar *msg, int msglen); static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, volatile cmn_msg_hdr_t *msg, int msglen); static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, @@ -584,26 +668,40 @@ static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, volatile cmn_msg_hdr_t *msg, int msglen); static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd); +static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create); +static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create); +static uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd); -void dhd_prot_collect_memdump(dhd_pub_t *dhd) +bool +dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info) { - DHD_ERROR(("%s(): Collecting mem dump now \r\n", __FUNCTION__)); -#ifdef DHD_FW_COREDUMP - if (dhd->memdump_enabled) { - /* collect core dump */ - dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK; - dhd_bus_mem_dump(dhd); - } -#endif /* DHD_FW_COREDUMP */ -#ifdef SUPPORT_LINKDOWN_RECOVERY -#ifdef CONFIG_ARCH_MSM - dhd->bus->no_cfg_restore = 1; -#endif /* CONFIG_ARCH_MSM */ - dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK; - dhd_os_send_hang_message(dhd); -#endif /* SUPPORT_LINKDOWN_RECOVERY */ -} + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info; + uint16 rd, wr; + bool ret; + if (dhd->dma_d2h_ring_upd_support) { + wr = flow_ring->wr; + } else { + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); + } + if (dhd->dma_h2d_ring_upd_support) { + rd = flow_ring->rd; + } else { + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); + } + ret = (wr == rd) ? TRUE : FALSE; + return ret; +} +uint16 +dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd) +{ + return (uint16)h2d_max_txpost; +} +void +dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost) +{ + h2d_max_txpost = max_txpost; +} /** * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has * not completed, a livelock condition occurs. Host will avert this livelock by @@ -614,18 +712,20 @@ void dhd_prot_collect_memdump(dhd_pub_t *dhd) * */ static void -dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries, - uchar *msg, int msglen) +dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries, + volatile uchar *msg, int msglen) { - uint32 seqnum = ring->seqnum; - - DHD_ERROR(("LIVELOCK DHD<%p> name<%s> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>" - "dma_buf va<%p> msg<%p> curr_rd<%d>\n", - dhd, ring->name, seqnum, seqnum% D2H_EPOCH_MODULO, tries, + uint32 ring_seqnum = ring->seqnum; + DHD_ERROR(( + "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>" + " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d>\n", + dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries, dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot, ring->dma_buf.va, msg, ring->curr_rd)); - prhex("D2H MsgBuf Failure", (uchar *)msg, msglen); - dhd_dump_to_kernelog(dhd); + prhex("D2H MsgBuf Failure", (volatile uchar *)msg, msglen); + + dhd_bus_dump_console_buffer(dhd->bus); + dhd_prot_debug_info_print(dhd); #ifdef DHD_FW_COREDUMP if (dhd->memdump_enabled) { @@ -634,6 +734,9 @@ dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries, dhd_bus_mem_dump(dhd); } #endif /* DHD_FW_COREDUMP */ + + dhd_schedule_reset(dhd); + #ifdef SUPPORT_LINKDOWN_RECOVERY #ifdef CONFIG_ARCH_MSM dhd->bus->no_cfg_restore = 1; @@ -654,8 +757,9 @@ dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries; uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; int num_words = msglen / sizeof(uint32); /* num of 32bit words */ - volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */ + volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */ dhd_prot_t *prot = dhd->prot; + uint32 msg_seqnum; uint32 step = 0; uint32 delay = PCIE_D2H_SYNC_DELAY; uint32 total_tries = 0; @@ -683,8 +787,8 @@ dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, * unwanted extra delay introdcued in normal conditions. */ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { - for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) { - uint32 msg_seqnum = *marker; + for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + msg_seqnum = *marker; if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */ ring->seqnum++; /* next expected sequence number */ goto dma_completed; @@ -697,21 +801,20 @@ dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ -#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) - /* For ARM there is no pause in cpu_relax, so add extra delay */ - OSL_DELAY(delay * step); -#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */ - } /* for PCIE_D2H_SYNC_WAIT_TRIES */ - } /* for number of steps */ + OSL_DELAY(delay * step); /* Add stepper delay */ - dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen); + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */ + + dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries, + (volatile uchar *) msg, msglen); ring->seqnum++; /* skip this message ... leak of a pktid */ return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ dma_completed: - prot->d2h_sync_wait_tot += total_tries; + prot->d2h_sync_wait_tot += tries; return msg->msg_type; } @@ -736,7 +839,6 @@ dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, ASSERT(msglen == ring->item_len); BCM_REFERENCE(delay); - /* * For retries we have to make some sort of stepper algorithm. * We see that every time when the Dongle comes out of the D3 @@ -757,7 +859,7 @@ dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, * unwanted extra delay introdcued in normal conditions. */ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { - for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words); if (prot_checksum == 0U) { /* checksum is OK */ if (msg->epoch == ring_seqnum) { @@ -773,22 +875,21 @@ dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ -#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) - /* For ARM there is no pause in cpu_relax, so add extra delay */ - OSL_DELAY(delay * step); -#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */ + OSL_DELAY(delay * step); /* Add stepper delay */ } /* for PCIE_D2H_SYNC_WAIT_TRIES */ - } /* for number of steps */ + } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */ - dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen); + DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum)); + dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries, + (volatile uchar *) msg, msglen); ring->seqnum++; /* skip this message ... leak of a pktid */ return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ dma_completed: - prot->d2h_sync_wait_tot += total_tries; + prot->d2h_sync_wait_tot += tries; return msg->msg_type; } @@ -804,6 +905,17 @@ dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, return msg->msg_type; } +INLINE void +dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason) +{ + /* To synchronize with the previous memory operations call wmb() */ + OSL_SMP_WMB(); + dhd->prot->ioctl_received = reason; + /* Call another wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + dhd_os_ioctl_resp_wake(dhd); +} + /** * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what * dongle advertizes. @@ -816,32 +928,26 @@ dhd_prot_d2h_sync_init(dhd_pub_t *dhd) prot->d2h_sync_wait_tot = 0UL; prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; + prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) { prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum; + DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__)); } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) { prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum; + DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__)); } else { prot->d2h_sync_cb = dhd_prot_d2h_sync_none; + DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__)); } } -#endif /* PCIE_D2H_SYNC */ - -int INLINE -dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason) -{ - /* To synchronize with the previous memory operations call wmb() */ - OSL_SMP_WMB(); - dhd->prot->ioctl_received = reason; - /* Call another wmb() to make sure before waking up the other event value gets updated */ - OSL_SMP_WMB(); - dhd_os_ioctl_resp_wake(dhd); - return 0; -} - /** * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum */ @@ -850,7 +956,10 @@ dhd_prot_h2d_sync_init(dhd_pub_t *dhd) { dhd_prot_t *prot = dhd->prot; prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL; + prot->h2dring_rxp_subn.current_phase = 0; + prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL; + prot->h2dring_ctrl_subn.current_phase = 0; } /* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */ @@ -878,20 +987,19 @@ dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa) static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) { - uint32 base, end; /* dongle uses 32bit ptr arithmetic */ - + uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */ ASSERT(dma_buf); - base = PHYSADDRLO(dma_buf->pa); - ASSERT(base); - ASSERT(ISALIGNED(base, DMA_ALIGN_LEN)); + pa_lowaddr = PHYSADDRLO(dma_buf->pa); + ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa)); + ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN)); ASSERT(dma_buf->len != 0); /* test 32bit offset arithmetic over dma buffer for loss of carry-over */ - end = (base + dma_buf->len); /* end address */ + end = (pa_lowaddr + dma_buf->len); /* end address */ - if ((end & 0xFFFFFFFF) < (base & 0xFFFFFFFF)) { /* exclude carryover */ + if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */ DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n", - __FUNCTION__, base, dma_buf->len)); + __FUNCTION__, pa_lowaddr, dma_buf->len)); return BCME_ERROR; } @@ -908,6 +1016,8 @@ dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len) { uint32 dma_pad = 0; osl_t *osh = dhd->osh; + uint16 dma_align = DMA_ALIGN_LEN; + ASSERT(dma_buf != NULL); ASSERT(dma_buf->va == NULL); @@ -918,7 +1028,7 @@ dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len) */ dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0; dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad, - DMA_ALIGN_LEN, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah); + dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah); if (dma_buf->va == NULL) { DHD_ERROR(("%s: buf_len %d, no memory available\n", @@ -944,9 +1054,8 @@ dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len) static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) { - if ((dma_buf == NULL) || (dma_buf->va == NULL)) { + if ((dma_buf == NULL) || (dma_buf->va == NULL)) return; - } (void)dhd_dma_buf_audit(dhd, dma_buf); @@ -966,9 +1075,8 @@ dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) ASSERT(dma_buf); - if (dma_buf->va == NULL) { + if (dma_buf->va == NULL) return; /* Allow for free invocation, when alloc failed */ - } /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */ (void)dhd_dma_buf_audit(dhd, dma_buf); @@ -1014,7 +1122,9 @@ dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf, * +---------------------------------------------------------------------------+ */ #define DHD_PCIE_PKTID -#define MAX_PKTID_ITEMS (3072) /* Maximum number of pktids supported */ +#define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */ +#define MAX_RX_PKTID (1024) +#define MAX_TX_PKTID (3072 * 2) /* On Router, the pktptr serves as a pktid. */ @@ -1029,40 +1139,53 @@ typedef enum dhd_pkttype { PKTTYPE_DATA_RX, PKTTYPE_IOCTL_RX, PKTTYPE_EVENT_RX, + PKTTYPE_INFO_RX, /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */ - PKTTYPE_NO_CHECK + PKTTYPE_NO_CHECK, + PKTTYPE_TSBUF_RX } dhd_pkttype_t; #define DHD_PKTID_INVALID (0U) #define DHD_IOCTL_REQ_PKTID (0xFFFE) #define DHD_FAKE_PKTID (0xFACE) +#define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD +#define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC +#define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB -#define DHD_PKTID_FREE_LOCKER (FALSE) -#define DHD_PKTID_RSV_LOCKER (TRUE) +#define IS_FLOWRING(ring) \ + ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0)) typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */ /* Construct a packet id mapping table, returning an opaque map handle */ -static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index); +static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items); /* Destroy a packet id mapping table, freeing all packets active in the table */ static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map); -#define PKTID_MAP_HANDLE (0) -#define PKTID_MAP_HANDLE_IOCTL (1) - -#define DHD_NATIVE_TO_PKTID_INIT(dhd, items, index) dhd_pktid_map_init((dhd), (items), (index)) +#define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items)) +#define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map)) #define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map)) +#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map) dhd_pktid_map_fini_ioctl((osh), (map)) + +#ifdef MACOSX_DHD +#undef DHD_PCIE_PKTID +#define DHD_PCIE_PKTID 1 +#endif /* MACOSX_DHD */ #if defined(DHD_PCIE_PKTID) - +#if defined(MACOSX_DHD) || defined(DHD_EFI) +#define IOCTLRESP_USE_CONSTMEM +static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf); +static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf); +#endif /* Determine number of pktids that are available */ static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle); /* Allocate a unique pktid against which a pkt and some metadata is saved */ static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, - void *pkt); + void *pkt, dhd_pkttype_t pkttype); static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma, void *dmah, void *secdma, dhd_pkttype_t pkttype); @@ -1137,51 +1260,36 @@ static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, } while (0) #endif /* !USE_DHD_PKTID_LOCK */ -/* Packet metadata saved in packet id mapper */ - -/* The Locker can be 3 states - * LOCKER_IS_FREE - Locker is free and can be allocated - * LOCKER_IS_BUSY - Locker is assigned and is being used, values in the - * locker (buffer address, len, phy addr etc) are populated - * with valid values - * LOCKER_IS_RSVD - The locker is reserved for future use, but the values - * in the locker are not valid. Especially pkt should be - * NULL in this state. When the user wants to re-use the - * locker dhd_pktid_map_free can be called with a flag - * to reserve the pktid for future use, which will clear - * the contents of the locker. When the user calls - * dhd_pktid_map_save the locker would move to LOCKER_IS_BUSY - */ typedef enum dhd_locker_state { LOCKER_IS_FREE, LOCKER_IS_BUSY, LOCKER_IS_RSVD } dhd_locker_state_t; +/* Packet metadata saved in packet id mapper */ + typedef struct dhd_pktid_item { dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */ - uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */ - dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */ - uint16 len; /* length of mapped packet's buffer */ - void *pkt; /* opaque native pointer to a packet */ - dmaaddr_t pa; /* physical address of mapped packet's buffer */ - void *dmah; /* handle to OS specific DMA map */ - void *secdma; + uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */ + dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */ + uint16 len; /* length of mapped packet's buffer */ + void *pkt; /* opaque native pointer to a packet */ + dmaaddr_t pa; /* physical address of mapped packet's buffer */ + void *dmah; /* handle to OS specific DMA map */ + void *secdma; } dhd_pktid_item_t; +typedef uint32 dhd_pktid_key_t; + typedef struct dhd_pktid_map { uint32 items; /* total items in map */ uint32 avail; /* total available items */ int failures; /* lockers unavailable count */ - /* Spinlock to protect dhd_pktid_map in process/tasklet context */ - void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */ - #if defined(DHD_PKTID_AUDIT_ENABLED) - void *pktid_audit_lock; + void *pktid_audit_lock; struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */ #endif /* DHD_PKTID_AUDIT_ENABLED */ - - uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */ + dhd_pktid_key_t *keys; /* map_items +1 unique pkt ids */ dhd_pktid_item_t lockers[0]; /* metadata storage */ } dhd_pktid_map_t; @@ -1194,47 +1302,46 @@ typedef struct dhd_pktid_map { * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID. */ +#define DHD_PKTID_FREE_LOCKER (FALSE) +#define DHD_PKTID_RSV_LOCKER (TRUE) + #define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t)) #define DHD_PKIDMAP_ITEMS(items) (items) #define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \ - (DHD_PKTID_ITEM_SZ * ((items) + 1))) + (DHD_PKTID_ITEM_SZ * ((items) + 1))) +#define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1)) -#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, map) dhd_pktid_map_fini_ioctl((dhd), (map)) +#define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map) dhd_pktid_map_reset_ioctl((dhd), (map)) /* Convert a packet to a pktid, and save pkt pointer in busy locker */ -#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) dhd_pktid_map_reserve((dhd), (map), (pkt)) - +#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) \ + dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype)) /* Reuse a previously reserved locker to save packet params */ #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \ dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \ - (uint8)(dir), (void *)(dmah), (void *)(secdma), \ - (dhd_pkttype_t)(pkttype)) - + (uint8)(dir), (void *)(dmah), (void *)(secdma), \ + (dhd_pkttype_t)(pkttype)) /* Convert a packet to a pktid, and save packet params in locker */ #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \ dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \ - (uint8)(dir), (void *)(dmah), (void *)(secdma), \ - (dhd_pkttype_t)(pkttype)) + (uint8)(dir), (void *)(dmah), (void *)(secdma), \ + (dhd_pkttype_t)(pkttype)) /* Convert pktid to a packet, and free the locker */ #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ - (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ - (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER) + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER) /* Convert the pktid to a packet, empty locker, but keep it reserved */ #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ - (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ - (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER) + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER) #define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map) #if defined(DHD_PKTID_AUDIT_ENABLED) - -static int dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, - const int test_for, const char *errmsg); - /** * dhd_pktid_audit - Use the mwbmap to audit validity of a pktid. */ @@ -1243,8 +1350,6 @@ dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, const int test_for, const char *errmsg) { #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: " - - const uint32 max_pktid_items = (MAX_PKTID_ITEMS); struct bcm_mwbmap *handle; uint32 flags; bool ignore_audit; @@ -1270,7 +1375,7 @@ dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, return BCME_OK; } - if ((pktid == DHD_PKTID_INVALID) || (pktid > max_pktid_items)) { + if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) { DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid)); /* lock is released in "error" */ goto error; @@ -1323,7 +1428,7 @@ error: DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); /* May insert any trap mechanism here ! */ - dhd_pktid_audit_fail_cb(dhd); + dhd_pktid_error_handler(dhd); return BCME_ERROR; } @@ -1331,16 +1436,29 @@ error: #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \ dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__) -#endif /* DHD_PKTID_AUDIT_ENABLED */ +static int +dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid, + const int test_for, void *msg, uint32 msg_len, const char * func) +{ + int ret = 0; + ret = DHD_PKTID_AUDIT(dhdp, map, pktid, test_for); + if (ret == BCME_ERROR) { + prhex(func, (uchar *)msg, msg_len); + } + return ret; +} +#define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \ + dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \ + (pktid), (test_for), msg, msg_len, __FUNCTION__) -/* +------------------ End of PCIE DHD PKTID AUDIT ------------------------+ */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ /** * +---------------------------------------------------------------------------+ * Packet to Packet Id mapper using a paradigm. * - * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS]. + * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID]. * * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique * packet id is returned. This unique packet id may be used to retrieve the @@ -1359,66 +1477,50 @@ error: /** Allocate and initialize a mapper of num_items */ static dhd_pktid_map_handle_t * -dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index) +dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items) { - void *osh; + void* osh; uint32 nkey; dhd_pktid_map_t *map; uint32 dhd_pktid_map_sz; uint32 map_items; -#ifdef DHD_USE_STATIC_PKTIDMAP - uint32 section; -#endif /* DHD_USE_STATIC_PKTIDMAP */ + uint32 map_keys_sz; osh = dhd->osh; - ASSERT((num_items >= 1) && (num_items <= MAX_PKTID_ITEMS)); dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items); -#ifdef DHD_USE_STATIC_PKTIDMAP - if (index == PKTID_MAP_HANDLE) { - section = DHD_PREALLOC_PKTID_MAP; - } else { - section = DHD_PREALLOC_PKTID_MAP_IOCTL; - } - - map = (dhd_pktid_map_t *)DHD_OS_PREALLOC(dhd, section, dhd_pktid_map_sz); -#else - map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz); -#endif /* DHD_USE_STATIC_PKTIDMAP */ - + map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz); if (map == NULL) { DHD_ERROR(("%s:%d: MALLOC failed for size %d\n", __FUNCTION__, __LINE__, dhd_pktid_map_sz)); - goto error; + return (dhd_pktid_map_handle_t *)NULL; } - bzero(map, dhd_pktid_map_sz); - /* Initialize the lock that protects this structure */ - map->pktid_lock = DHD_PKTID_LOCK_INIT(osh); - if (map->pktid_lock == NULL) { - DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__)); - goto error; - } - map->items = num_items; map->avail = num_items; map_items = DHD_PKIDMAP_ITEMS(map->items); -#if defined(DHD_PKTID_AUDIT_ENABLED) - /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */ - map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1); - if (map->pktid_audit == (struct bcm_mwbmap *)NULL) { - DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__)); + map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); + map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz); + if (map->keys == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n", + __FUNCTION__, __LINE__, map_keys_sz)); goto error; - } else { - DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n", - __FUNCTION__, __LINE__, map_items + 1)); } - map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh); - +#if defined(DHD_PKTID_AUDIT_ENABLED) + /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */ + map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1); + if (map->pktid_audit == (struct bcm_mwbmap *)NULL) { + DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__)); + goto error; + } else { + DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n", + __FUNCTION__, __LINE__, map_items + 1)); + } + map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh); #endif /* DHD_PKTID_AUDIT_ENABLED */ for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */ @@ -1428,8 +1530,8 @@ dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index) map->lockers[nkey].len = 0; } - /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be busy */ - map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; + /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */ + map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */ map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */ map->lockers[DHD_PKTID_INVALID].len = 0; @@ -1441,9 +1543,7 @@ dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index) return (dhd_pktid_map_handle_t *)map; /* opaque handle */ error: - if (map) { - #if defined(DHD_PKTID_AUDIT_ENABLED) if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ @@ -1452,13 +1552,11 @@ error: DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); } #endif /* DHD_PKTID_AUDIT_ENABLED */ - - if (map->pktid_lock) - DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); - - MFREE(osh, map, dhd_pktid_map_sz); + if (map->keys) { + MFREE(osh, map->keys, map_keys_sz); + } + VMFREE(osh, map, dhd_pktid_map_sz); } - return (dhd_pktid_map_handle_t *)NULL; } @@ -1467,159 +1565,169 @@ error: * Freeing implies: unmapping the buffers and freeing the native packet * This could have been a callback registered with the pktid mapper. */ - static void -dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) { void *osh; uint32 nkey; dhd_pktid_map_t *map; - uint32 dhd_pktid_map_sz; dhd_pktid_item_t *locker; uint32 map_items; uint32 flags; - - if (handle == NULL) { - return; - } + bool data_tx = FALSE; map = (dhd_pktid_map_t *)handle; - flags = DHD_PKTID_LOCK(map->pktid_lock); + DHD_GENERAL_LOCK(dhd, flags); osh = dhd->osh; - dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); - - nkey = 1; /* skip reserved KEY #0, and start from 1 */ - locker = &map->lockers[nkey]; - map_items = DHD_PKIDMAP_ITEMS(map->items); + /* skip reserved KEY #0, and start from 1 */ - for (; nkey <= map_items; nkey++, locker++) { - - if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */ - - locker->state = LOCKER_IS_FREE; /* force open the locker */ - -#if defined(DHD_PKTID_AUDIT_ENABLED) - DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ -#endif /* DHD_PKTID_AUDIT_ENABLED */ - - { /* This could be a callback registered with dhd_pktid_map */ - DMA_UNMAP(osh, locker->pa, locker->len, - locker->dir, 0, DHD_DMAH_NULL); - dhd_prot_packet_free(dhd, (ulong*)locker->pkt, - locker->pkttype, TRUE); + for (nkey = 1; nkey <= map_items; nkey++) { + if (map->lockers[nkey].state == LOCKER_IS_BUSY) { + locker = &map->lockers[nkey]; + locker->state = LOCKER_IS_FREE; + data_tx = (locker->pkttype == PKTTYPE_DATA_TX); + if (data_tx) { + dhd->prot->active_tx_count--; } + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ +#endif /* DHD_PKTID_AUDIT_RING */ + + { + if (SECURE_DMA_ENAB(dhd->osh)) + SECURE_DMA_UNMAP(osh, locker->pa, + locker->len, locker->dir, 0, + locker->dmah, locker->secdma, 0); + else + DMA_UNMAP(osh, locker->pa, locker->len, + locker->dir, 0, locker->dmah); + } + dhd_prot_packet_free(dhd, (ulong*)locker->pkt, + locker->pkttype, data_tx); } -#if defined(DHD_PKTID_AUDIT_ENABLED) else { +#ifdef DHD_PKTID_AUDIT_RING DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); +#endif /* DHD_PKTID_AUDIT_RING */ } -#endif /* DHD_PKTID_AUDIT_ENABLED */ - - locker->pkt = NULL; /* clear saved pkt */ - locker->len = 0; + map->keys[nkey] = nkey; /* populate with unique keys */ } -#if defined(DHD_PKTID_AUDIT_ENABLED) - if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { - bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ - map->pktid_audit = (struct bcm_mwbmap *)NULL; - if (map->pktid_audit_lock) { - DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); - } - } -#endif /* DHD_PKTID_AUDIT_ENABLED */ - - DHD_PKTID_UNLOCK(map->pktid_lock, flags); - DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); - -#ifdef DHD_USE_STATIC_PKTIDMAP - DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz); -#else - MFREE(osh, handle, dhd_pktid_map_sz); -#endif /* DHD_USE_STATIC_PKTIDMAP */ + map->avail = map_items; + memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items); + DHD_GENERAL_UNLOCK(dhd, flags); } #ifdef IOCTLRESP_USE_CONSTMEM /** Called in detach scenario. Releasing IOCTL buffers. */ static void -dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) { uint32 nkey; dhd_pktid_map_t *map; - uint32 dhd_pktid_map_sz; dhd_pktid_item_t *locker; uint32 map_items; uint32 flags; - osl_t *osh = dhd->osh; - - if (handle == NULL) { - return; - } map = (dhd_pktid_map_t *)handle; - flags = DHD_PKTID_LOCK(map->pktid_lock); - - dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); - - nkey = 1; /* skip reserved KEY #0, and start from 1 */ - locker = &map->lockers[nkey]; + DHD_GENERAL_LOCK(dhd, flags); map_items = DHD_PKIDMAP_ITEMS(map->items); + /* skip reserved KEY #0, and start from 1 */ + for (nkey = 1; nkey <= map_items; nkey++) { + if (map->lockers[nkey].state == LOCKER_IS_BUSY) { + dhd_dma_buf_t retbuf; - for (; nkey <= map_items; nkey++, locker++) { - - if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */ - - locker->state = LOCKER_IS_FREE; /* force open the locker */ - -#if defined(DHD_PKTID_AUDIT_ENABLED) +#ifdef DHD_PKTID_AUDIT_RING DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ -#endif /* DHD_PKTID_AUDIT_ENABLED */ +#endif /* DHD_PKTID_AUDIT_RING */ - { - dhd_dma_buf_t retbuf; - retbuf.va = locker->pkt; - retbuf.len = locker->len; - retbuf.pa = locker->pa; - retbuf.dmah = locker->dmah; - retbuf.secdma = locker->secdma; + locker = &map->lockers[nkey]; + retbuf.va = locker->pkt; + retbuf.len = locker->len; + retbuf.pa = locker->pa; + retbuf.dmah = locker->dmah; + retbuf.secdma = locker->secdma; - /* This could be a callback registered with dhd_pktid_map */ - DHD_PKTID_UNLOCK(map->pktid_lock, flags); - free_ioctl_return_buffer(dhd, &retbuf); - flags = DHD_PKTID_LOCK(map->pktid_lock); - } + /* This could be a callback registered with dhd_pktid_map */ + DHD_GENERAL_UNLOCK(dhd, flags); + free_ioctl_return_buffer(dhd, &retbuf); + DHD_GENERAL_LOCK(dhd, flags); } -#if defined(DHD_PKTID_AUDIT_ENABLED) else { +#ifdef DHD_PKTID_AUDIT_RING DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); +#endif /* DHD_PKTID_AUDIT_RING */ } -#endif /* DHD_PKTID_AUDIT_ENABLED */ - - locker->pkt = NULL; /* clear saved pkt */ - locker->len = 0; + map->keys[nkey] = nkey; /* populate with unique keys */ } + map->avail = map_items; + memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items); + DHD_GENERAL_UNLOCK(dhd, flags); +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + + +/** + * Free the pktid map. + */ +static void +dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + uint32 map_keys_sz; + + /* Free any pending packets */ + dhd_pktid_map_reset(dhd, handle); + + map = (dhd_pktid_map_t *)handle; + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); + map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); + #if defined(DHD_PKTID_AUDIT_ENABLED) if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { - bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ + bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */ map->pktid_audit = (struct bcm_mwbmap *)NULL; if (map->pktid_audit_lock) { - DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); + DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock); + } + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + MFREE(dhd->osh, map->keys, map_keys_sz); + VMFREE(dhd->osh, handle, dhd_pktid_map_sz); +} +#ifdef IOCTLRESP_USE_CONSTMEM +static void +dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + uint32 map_keys_sz; + + /* Free any pending packets */ + dhd_pktid_map_reset_ioctl(dhd, handle); + + map = (dhd_pktid_map_t *)handle; + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); + map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) { + DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock); } } #endif /* DHD_PKTID_AUDIT_ENABLED */ - DHD_PKTID_UNLOCK(map->pktid_lock, flags); - DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); - -#ifdef DHD_USE_STATIC_PKTIDMAP - DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz); -#else - MFREE(osh, handle, dhd_pktid_map_sz); -#endif /* DHD_USE_STATIC_PKTIDMAP */ + MFREE(dhd->osh, map->keys, map_keys_sz); + VMFREE(dhd->osh, handle, dhd_pktid_map_sz); } #endif /* IOCTLRESP_USE_CONSTMEM */ @@ -1628,28 +1736,26 @@ static INLINE uint32 BCMFASTPATH dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle) { dhd_pktid_map_t *map; - uint32 flags; uint32 avail; ASSERT(handle != NULL); map = (dhd_pktid_map_t *)handle; - flags = DHD_PKTID_LOCK(map->pktid_lock); avail = map->avail; - DHD_PKTID_UNLOCK(map->pktid_lock, flags); return avail; } /** - * Allocate locker, save pkt contents, and return the locker's numbered key. - * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility. - * Caller must treat a returned value DHD_PKTID_INVALID as a failure case, - * implying a depleted pool of pktids. + * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not + * yet populated. Invoke the pktid save api to populate the packet parameters + * into the locker. This function is not reentrant, and is the caller's + * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as + * a failure case, implying a depleted pool of pktids. */ - static INLINE uint32 -__dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt) +dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt, dhd_pkttype_t pkttype) { uint32 nkey; dhd_pktid_map_t *map; @@ -1658,7 +1764,7 @@ __dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pk ASSERT(handle != NULL); map = (dhd_pktid_map_t *)handle; - if (map->avail <= 0) { /* no more pktids to allocate */ + if ((int)(map->avail) <= 0) { /* no more pktids to allocate */ map->failures++; DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__)); return DHD_PKTID_INVALID; /* failed alloc request */ @@ -1666,45 +1772,32 @@ __dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pk ASSERT(map->avail <= map->items); nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */ + + if ((map->avail > map->items) || (nkey > map->items)) { + map->failures++; + DHD_ERROR(("%s:%d: failed to allocate a new pktid," + " map->avail<%u>, nkey<%u>, pkttype<%u>\n", + __FUNCTION__, __LINE__, map->avail, nkey, + pkttype)); + return DHD_PKTID_INVALID; /* failed alloc request */ + } + locker = &map->lockers[nkey]; /* save packet metadata in locker */ map->avail--; locker->pkt = pkt; /* pkt is saved, other params not yet saved. */ locker->len = 0; locker->state = LOCKER_IS_BUSY; /* reserve this locker */ -#if defined(DHD_PKTID_AUDIT_MAP) - DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_ALLOC); /* Audit duplicate alloc */ -#endif /* DHD_PKTID_AUDIT_MAP */ - ASSERT(nkey != DHD_PKTID_INVALID); return nkey; /* return locker's numbered key */ } - -/** - * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not - * yet populated. Invoke the pktid save api to populate the packet parameters - * into the locker. - * Wrapper that takes the required lock when called directly. +/* + * dhd_pktid_map_save - Save a packet's parameters into a locker + * corresponding to a previously reserved unique numbered key. */ -static INLINE uint32 -dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt) -{ - dhd_pktid_map_t *map; - uint32 flags; - uint32 ret; - - ASSERT(handle != NULL); - map = (dhd_pktid_map_t *)handle; - flags = DHD_PKTID_LOCK(map->pktid_lock); - ret = __dhd_pktid_map_reserve(dhd, handle, pkt); - DHD_PKTID_UNLOCK(map->pktid_lock, flags); - - return ret; -} - static INLINE void -__dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, +dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, dhd_pkttype_t pkttype) { @@ -1714,17 +1807,26 @@ __dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, ASSERT(handle != NULL); map = (dhd_pktid_map_t *)handle; - ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items))); + if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { + DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n", + __FUNCTION__, __LINE__, nkey, pkttype)); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return; + } locker = &map->lockers[nkey]; ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) || ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL))); -#if defined(DHD_PKTID_AUDIT_MAP) - DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */ -#endif /* DHD_PKTID_AUDIT_MAP */ - /* store contents in locker */ locker->dir = dir; locker->pa = pa; @@ -1736,27 +1838,6 @@ __dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, locker->state = LOCKER_IS_BUSY; /* make this locker busy */ } -/** - * dhd_pktid_map_save - Save a packet's parameters into a locker corresponding - * to a previously reserved unique numbered key. - * Wrapper that takes the required lock when called directly. - */ -static INLINE void -dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, - uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, - dhd_pkttype_t pkttype) -{ - dhd_pktid_map_t *map; - uint32 flags; - - ASSERT(handle != NULL); - map = (dhd_pktid_map_t *)handle; - flags = DHD_PKTID_LOCK(map->pktid_lock); - __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len, - dir, dmah, secdma, pkttype); - DHD_PKTID_UNLOCK(map->pktid_lock, flags); -} - /** * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet * contents into the corresponding locker. Return the numbered key. @@ -1767,25 +1848,13 @@ dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, dhd_pkttype_t pkttype) { uint32 nkey; - uint32 flags; - dhd_pktid_map_t *map; - ASSERT(handle != NULL); - map = (dhd_pktid_map_t *)handle; - - flags = DHD_PKTID_LOCK(map->pktid_lock); - - nkey = __dhd_pktid_map_reserve(dhd, handle, pkt); + nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype); if (nkey != DHD_PKTID_INVALID) { - __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, + dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len, dir, dmah, secdma, pkttype); -#if defined(DHD_PKTID_AUDIT_MAP) - DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */ -#endif /* DHD_PKTID_AUDIT_MAP */ } - DHD_PKTID_UNLOCK(map->pktid_lock, flags); - return nkey; } @@ -1797,21 +1866,32 @@ dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, */ static void * BCMFASTPATH dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey, - dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, - dhd_pkttype_t pkttype, bool rsv_locker) + dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype, + bool rsv_locker) { dhd_pktid_map_t *map; dhd_pktid_item_t *locker; void * pkt; - uint32 flags; + unsigned long long locker_addr; ASSERT(handle != NULL); map = (dhd_pktid_map_t *)handle; - flags = DHD_PKTID_LOCK(map->pktid_lock); - - ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items))); + if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { + DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n", + __FUNCTION__, __LINE__, nkey, pkttype)); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + return NULL; + } locker = &map->lockers[nkey]; @@ -1819,12 +1899,19 @@ dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey, DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */ #endif /* DHD_PKTID_AUDIT_MAP */ - if (locker->state == LOCKER_IS_FREE) { /* Debug check for cloned numbered key */ - DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n", - __FUNCTION__, __LINE__, nkey)); - ASSERT(locker->state != LOCKER_IS_FREE); - - DHD_PKTID_UNLOCK(map->pktid_lock, flags); + /* Debug check for cloned numbered key */ + if (locker->state == LOCKER_IS_FREE) { + DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n", + __FUNCTION__, __LINE__, nkey)); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ return NULL; } @@ -1834,12 +1921,26 @@ dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey, */ if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) { - DHD_PKTID_UNLOCK(map->pktid_lock, flags); - DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n", __FUNCTION__, __LINE__, nkey)); - ASSERT(locker->pkttype == pkttype); - +#ifdef BCMDMA64OSL + PHYSADDRTOULONG(locker->pa, locker_addr); +#else + locker_addr = PHYSADDRLO(locker->pa); +#endif /* BCMDMA64OSL */ + DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>," + "pkttype <%d> locker->pa <0x%llx> \n", + __FUNCTION__, __LINE__, locker->state, locker->pkttype, + pkttype, locker_addr)); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; + dhd_bus_mem_dump(dhd); + } +#else + ASSERT(0); +#endif /* DHD_FW_COREDUMP */ return NULL; } @@ -1865,7 +1966,6 @@ dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey, locker->pkt = NULL; /* Clear pkt */ locker->len = 0; - DHD_PKTID_UNLOCK(map->pktid_lock, flags); return pkt; } @@ -1899,7 +1999,7 @@ static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pkt dhd_pkttype_t pkttype); static dhd_pktid_map_handle_t * -dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index) +dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items) { osl_t *osh = dhd->osh; pktlists_t *handle = NULL; @@ -1962,9 +2062,8 @@ dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map) pktlists_t *handle = (pktlists_t *) map; ASSERT(handle != NULL); - if (handle == (pktlists_t *)NULL) { + if (handle == (pktlists_t *)NULL) return; - } if (handle->ctrl_pkt_list) { PKTLIST_FINI(handle->ctrl_pkt_list); @@ -2037,7 +2136,7 @@ dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, return pktptr32; } -#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) DHD_PKTID32(pkt) +#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt) #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \ ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \ @@ -2106,6 +2205,10 @@ dhd_prot_attach(dhd_pub_t *dhd) /* DMAing ring completes supported? FALSE by default */ dhd->dma_d2h_ring_upd_support = FALSE; dhd->dma_h2d_ring_upd_support = FALSE; + dhd->dma_ring_upd_overwrite = FALSE; + + dhd->idma_inited = 0; + dhd->ifrm_inited = 0; /* Common Ring Allocations */ @@ -2171,8 +2274,20 @@ dhd_prot_attach(dhd_pub_t *dhd) goto fail; } + /* Host TS request buffer one buffer for now */ + if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) { + goto fail; + } + prot->hostts_req_buf_inuse = FALSE; + /* Scratch buffer for dma rx offset */ - if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) { +#ifdef BCM_HOST_BUF + if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, + ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN)) +#else + if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) +#endif /* BCM_HOST_BUF */ + { goto fail; } @@ -2185,7 +2300,28 @@ dhd_prot_attach(dhd_pub_t *dhd) dhd_rxchain_reset(&prot->rxchain); #endif -#if defined(DHD_LB) + prot->rx_lock = dhd_os_spin_lock_init(dhd->osh); + + prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID); + if (prot->pktid_ctrl_map == NULL) { + goto fail; + } + + prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID); + if (prot->pktid_rx_map == NULL) + goto fail; + + prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID); + if (prot->pktid_rx_map == NULL) + goto fail; + +#ifdef IOCTLRESP_USE_CONSTMEM + prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd, + DHD_FLOWRING_MAX_IOCTLRESPBUF_POST); + if (prot->pktid_map_handle_ioctl == NULL) { + goto fail; + } +#endif /* IOCTLRESP_USE_CONSTMEM */ /* Initialize the work queues to be used by the Load Balancing logic */ #if defined(DHD_LB_TXC) @@ -2203,7 +2339,7 @@ dhd_prot_attach(dhd_pub_t *dhd) #if defined(DHD_LB_RXC) { void *buffer; - buffer = MALLOC(dhd->osh, sizeof(uint32) * DHD_LB_WORKQ_SZ); + buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ); bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons, buffer, DHD_LB_WORKQ_SZ); prot->rx_compl_prod_sync = 0; @@ -2211,8 +2347,11 @@ dhd_prot_attach(dhd_pub_t *dhd) __FUNCTION__, buffer, DHD_LB_WORKQ_SZ)); } #endif /* DHD_LB_RXC */ - -#endif /* DHD_LB */ + /* Initialize trap buffer */ + if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, BCMPCIE_EXT_TRAP_DATA_MAXLEN)) { + DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__)); + goto fail; + } return BCME_OK; @@ -2227,6 +2366,93 @@ fail: return BCME_NOMEM; } /* dhd_prot_attach */ +void +dhd_set_host_cap(dhd_pub_t *dhd) +{ + uint32 data = 0; + dhd_prot_t *prot = dhd->prot; + + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) { + if (dhd->h2d_phase_supported) { + + data |= HOSTCAP_H2D_VALID_PHASE; + + if (dhd->force_dongletrap_on_bad_h2d_phase) { + data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE; + } + } + if (prot->host_ipc_version > prot->device_ipc_version) { + prot->active_ipc_version = prot->device_ipc_version; + } else { + prot->active_ipc_version = prot->host_ipc_version; + } + + data |= prot->active_ipc_version; + + if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) { + + DHD_INFO(("Advertise Hostready Capability\n")); + + data |= HOSTCAP_H2D_ENABLE_HOSTRDY; + } +#ifdef PCIE_INB_DW + if (dhdpcie_bus_get_pcie_inband_dw_supported(dhd->bus)) { + DHD_INFO(("Advertise Inband-DW Capability\n")); + data |= HOSTCAP_DS_INBAND_DW; + data |= HOSTCAP_DS_NO_OOB_DW; + dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_INB); + } else +#endif /* PCIE_INB_DW */ +#ifdef PCIE_OOB + if (dhdpcie_bus_get_pcie_oob_dw_supported(dhd->bus)) { + dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_OOB); + } else +#endif /* PCIE_OOB */ + { + /* Disable DS altogether */ + data |= HOSTCAP_DS_NO_OOB_DW; + dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE); + } + + if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) { + + DHD_ERROR(("IDMA inited\n")); + data |= HOSTCAP_H2D_IDMA; + dhd->idma_inited = TRUE; + } + + if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) { + DHD_ERROR(("IFRM Inited\n")); + data |= HOSTCAP_H2D_IFRM; + dhd->ifrm_inited = TRUE; + dhd->dma_h2d_ring_upd_support = FALSE; + dhd_prot_dma_indx_free(dhd); + } + + /* Indicate support for TX status metadata */ + data |= HOSTCAP_TXSTATUS_METADATA; + + /* Indicate support for extended trap data */ + data |= HOSTCAP_EXTENDED_TRAP_DATA; + + DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n", + __FUNCTION__, + prot->active_ipc_version, prot->host_ipc_version, + prot->device_ipc_version)); + + dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0); + dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa, + sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0); + } +#ifdef HOFFLOAD_MODULES + dhd_bus_cmn_writeshared(dhd->bus, &dhd->hmem.data_addr, + sizeof(dhd->hmem.data_addr), WRT_HOST_MODULE_ADDR, 0); +#endif + +#ifdef DHD_TIMESYNC + dhd_timesync_notify_ipc_rev(dhd->ts, prot->active_ipc_version); +#endif /* DHD_TIMESYNC */ +} /** * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has @@ -2240,43 +2466,18 @@ dhd_prot_init(dhd_pub_t *dhd) { sh_addr_t base_addr; dhd_prot_t *prot = dhd->prot; + int ret = 0; - /* PKTID handle INIT */ - if (prot->pktid_map_handle != NULL) { - DHD_ERROR(("%s: pktid_map_handle already set!\n", __FUNCTION__)); - ASSERT(0); - return BCME_ERROR; - } + /** + * A user defined value can be assigned to global variable h2d_max_txpost via + * 1. DHD IOVAR h2d_max_txpost, before firmware download + * 2. module parameter h2d_max_txpost + * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM, + * if user has not defined any buffers by one of the above methods. + */ + prot->h2d_max_txpost = (uint16)h2d_max_txpost; -#ifdef IOCTLRESP_USE_CONSTMEM - if (prot->pktid_map_handle_ioctl != NULL) { - DHD_ERROR(("%s: pktid_map_handle_ioctl already set!\n", __FUNCTION__)); - ASSERT(0); - return BCME_ERROR; - } -#endif /* IOCTLRESP_USE_CONSTMEM */ - - prot->pktid_map_handle = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_ITEMS, PKTID_MAP_HANDLE); - if (prot->pktid_map_handle == NULL) { - DHD_ERROR(("%s: Unable to map packet id's\n", __FUNCTION__)); - ASSERT(0); - return BCME_NOMEM; - } - -#ifdef IOCTLRESP_USE_CONSTMEM - prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd, - DHD_FLOWRING_MAX_IOCTLRESPBUF_POST, PKTID_MAP_HANDLE_IOCTL); - if (prot->pktid_map_handle_ioctl == NULL) { - DHD_ERROR(("%s: Unable to map ioctl response buffers\n", __FUNCTION__)); - ASSERT(0); - return BCME_NOMEM; - } -#endif /* IOCTLRESP_USE_CONSTMEM */ - - /* Max pkts in ring */ - prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM; - - DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count)); + DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost)); /* Read max rx packets supported by dongle */ dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0); @@ -2290,6 +2491,8 @@ dhd_prot_init(dhd_pub_t *dhd) /* Initialize. bzero() would blow away the dma pointers. */ prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST; prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST; + prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST; + prot->max_tsbufpost = DHD_MAX_TSBUF_POST; prot->cur_ioctlresp_bufs_posted = 0; prot->active_tx_count = 0; @@ -2299,7 +2502,8 @@ dhd_prot_init(dhd_pub_t *dhd) prot->cur_event_bufs_posted = 0; prot->ioctl_state = 0; prot->curr_ioctl_cmd = 0; - prot->ioctl_received = IOCTL_WAIT; + prot->cur_ts_bufs_posted = 0; + prot->infobufpost = 0; prot->dmaxfer.srcmem.va = NULL; prot->dmaxfer.dstmem.va = NULL; @@ -2310,26 +2514,53 @@ dhd_prot_init(dhd_pub_t *dhd) prot->tx_metadata_offset = 0; prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT; - prot->ioctl_trans_id = 0; + /* To catch any rollover issues fast, starting with higher ioctl_trans_id */ + prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER; + prot->ioctl_state = 0; + prot->ioctl_status = 0; + prot->ioctl_resplen = 0; + prot->ioctl_received = IOCTL_WAIT; /* Register the interrupt function upfront */ /* remove corerev checks in data path */ prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus); + prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus); + /* Initialize Common MsgBuf Rings */ + prot->device_ipc_version = dhd->bus->api.fw_rev; + prot->host_ipc_version = PCIE_SHARED_VERSION; + + /* Init the host API version */ + dhd_set_host_cap(dhd); + dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn); dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn); dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln); + + /* Make it compatibile with pre-rev7 Firmware */ + if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) { + prot->d2hring_tx_cpln.item_len = + D2HRING_TXCMPLT_ITEMSIZE_PREREV7; + prot->d2hring_rx_cpln.item_len = + D2HRING_RXCMPLT_ITEMSIZE_PREREV7; + } dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln); dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln); -#if defined(PCIE_D2H_SYNC) dhd_prot_d2h_sync_init(dhd); -#endif /* PCIE_D2H_SYNC */ dhd_prot_h2d_sync_init(dhd); +#ifdef PCIE_INB_DW + /* Set the initial DS state */ + if (INBAND_DW_ENAB(dhd->bus)) { + dhdpcie_bus_set_pcie_inband_dw_state(dhd->bus, + DW_DEVICE_DS_ACTIVE); + } +#endif /* PCIE_INB_DW */ + /* init the scratch buffer */ dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa); dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), @@ -2340,7 +2571,7 @@ dhd_prot_init(dhd_pub_t *dhd) /* If supported by the host, indicate the memory block * for completion writes / submission reads to shared space */ - if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + if (dhd->dma_d2h_ring_upd_support) { dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa); dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), D2H_DMA_INDX_WR_BUF, 0); @@ -2349,15 +2580,19 @@ dhd_prot_init(dhd_pub_t *dhd) H2D_DMA_INDX_RD_BUF, 0); } - if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) { dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa); dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), H2D_DMA_INDX_WR_BUF, 0); dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa); dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), D2H_DMA_INDX_RD_BUF, 0); + } + /* Signal to the dongle that common ring init is complete */ + dhd_bus_hostready(dhd->bus); + /* * If the DMA-able buffers for flowring needs to come from a specific * contiguous memory region, then setup prot->flowrings_dma_buf here. @@ -2370,6 +2605,24 @@ dhd_prot_init(dhd_pub_t *dhd) return BCME_ERROR; } + /* If IFRM is enabled, wait for FW to setup the DMA channel */ + if (IFRM_ENAB(dhd)) { + dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_IFRM_INDX_WR_BUF, 0); + } + + /* See if info rings could be created */ + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) { + if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) { + /* For now log and proceed, further clean up action maybe necessary + * when we have more clarity. + */ + DHD_ERROR(("%s Info rings couldn't be created: Err Code%d", + __FUNCTION__, ret)); + } + } + /* Host should configure soft doorbells if needed ... here */ /* Post to dongle host configured soft doorbells */ @@ -2378,6 +2631,8 @@ dhd_prot_init(dhd_pub_t *dhd) /* Post buffers for packet reception and ioctl/event responses */ dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); + /* Fix re-entry problem without general lock */ + atomic_set(&dhd_msgbuf_rxbuf_post_event_bufs_running, 0); dhd_msgbuf_rxbuf_post_event_bufs(dhd); return BCME_OK; @@ -2388,8 +2643,7 @@ dhd_prot_init(dhd_pub_t *dhd) * dhd_prot_detach - PCIE FD protocol layer destructor. * Unlink, frees allocated protocol memory (including dhd_prot) */ -void -dhd_prot_detach(dhd_pub_t *dhd) +void dhd_prot_detach(dhd_pub_t *dhd) { dhd_prot_t *prot = dhd->prot; @@ -2399,9 +2653,11 @@ dhd_prot_detach(dhd_pub_t *dhd) /* free up all DMA-able buffers allocated during prot attach/init */ dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf); - dhd_dma_buf_free(dhd, &prot->retbuf); /* ioctl return buffer */ + dhd_dma_buf_free(dhd, &prot->retbuf); dhd_dma_buf_free(dhd, &prot->ioctbuf); dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf); + dhd_dma_buf_free(dhd, &prot->hostts_req_buf); + dhd_dma_buf_free(dhd, &prot->fw_trap_buf); /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */ dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf); @@ -2409,6 +2665,8 @@ dhd_prot_detach(dhd_pub_t *dhd) dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf); dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf); + dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf); + /* Common MsgBuf Rings */ dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn); dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn); @@ -2419,26 +2677,41 @@ dhd_prot_detach(dhd_pub_t *dhd) /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */ dhd_prot_flowrings_pool_detach(dhd); - DHD_NATIVE_TO_PKTID_FINI(dhd, dhd->prot->pktid_map_handle); + /* detach info rings */ + dhd_prot_detach_info_rings(dhd); + + /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl + * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise + * they will be part of pktid_ctrl_map handler and PKT memory is allocated using + * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET. + * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used + * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE. + * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using + * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer. + */ + DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map); + DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map); + DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map); +#ifdef IOCTLRESP_USE_CONSTMEM + DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl); +#endif + + dhd_os_spin_lock_deinit(dhd->osh, prot->rx_lock); #ifndef CONFIG_DHD_USE_STATIC_BUF MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t)); #endif /* CONFIG_DHD_USE_STATIC_BUF */ -#if defined(DHD_LB) #if defined(DHD_LB_TXC) - if (prot->tx_compl_prod.buffer) { + if (prot->tx_compl_prod.buffer) MFREE(dhd->osh, prot->tx_compl_prod.buffer, - sizeof(void*) * DHD_LB_WORKQ_SZ); - } + sizeof(void*) * DHD_LB_WORKQ_SZ); #endif /* DHD_LB_TXC */ #if defined(DHD_LB_RXC) - if (prot->rx_compl_prod.buffer) { + if (prot->rx_compl_prod.buffer) MFREE(dhd->osh, prot->rx_compl_prod.buffer, - sizeof(void*) * DHD_LB_WORKQ_SZ); - } + sizeof(void*) * DHD_LB_WORKQ_SZ); #endif /* DHD_LB_RXC */ -#endif /* DHD_LB */ dhd->prot = NULL; } @@ -2446,12 +2719,12 @@ dhd_prot_detach(dhd_pub_t *dhd) /** - * dhd_prot_reset - Reset the protocol layer without freeing any objects. This - * may be invoked to soft reboot the dongle, without having to detach and attach - * the entire protocol layer. + * dhd_prot_reset - Reset the protocol layer without freeing any objects. + * This may be invoked to soft reboot the dongle, without having to + * detach and attach the entire protocol layer. * - * After dhd_prot_reset(), dhd_prot_init() may be invoked without going through - * a dhd_prot_attach() phase. + * After dhd_prot_reset(), dhd_prot_init() may be invoked + * without going througha dhd_prot_attach() phase. */ void dhd_prot_reset(dhd_pub_t *dhd) @@ -2466,15 +2739,33 @@ dhd_prot_reset(dhd_pub_t *dhd) dhd_prot_flowrings_pool_reset(dhd); + /* Reset Common MsgBuf Rings */ dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn); dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn); dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln); dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln); dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln); + /* Reset info rings */ + if (prot->h2dring_info_subn) { + dhd_prot_ring_reset(dhd, prot->h2dring_info_subn); + } + + if (prot->d2hring_info_cpln) { + dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln); + } + + /* Reset all DMA-able buffers allocated during prot attach */ + dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf); dhd_dma_buf_reset(dhd, &prot->retbuf); dhd_dma_buf_reset(dhd, &prot->ioctbuf); - dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf); + dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf); + dhd_dma_buf_reset(dhd, &prot->hostts_req_buf); + dhd_dma_buf_reset(dhd, &prot->fw_trap_buf); + + dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf); + + /* Reset all DMA-able buffers for DMAing H2D/D2H WR/RD indices */ dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf); dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf); dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf); @@ -2494,7 +2785,8 @@ dhd_prot_reset(dhd_pub_t *dhd) prot->ioctl_state = 0; prot->curr_ioctl_cmd = 0; prot->ioctl_received = IOCTL_WAIT; - prot->ioctl_trans_id = 0; + /* To catch any rollover issues fast, starting with higher ioctl_trans_id */ + prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER; /* dhd_flow_rings_init is located at dhd_bus_start, * so when stopping bus, flowrings shall be deleted @@ -2503,126 +2795,76 @@ dhd_prot_reset(dhd_pub_t *dhd) dhd_flow_rings_deinit(dhd); } - if (prot->pktid_map_handle) { - DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_map_handle); - prot->pktid_map_handle = NULL; - } - + /* Reset PKTID map */ + DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map); + DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map); + DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map); #ifdef IOCTLRESP_USE_CONSTMEM - if (prot->pktid_map_handle_ioctl) { - DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl); - prot->pktid_map_handle_ioctl = NULL; - } + DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl); #endif /* IOCTLRESP_USE_CONSTMEM */ +#ifdef DMAMAP_STATS + dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0; + dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0; +#ifndef IOCTLRESP_USE_CONSTMEM + dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0; +#endif /* IOCTLRESP_USE_CONSTMEM */ + dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0; + dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0; + dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0; +#endif /* DMAMAP_STATS */ } /* dhd_prot_reset */ +#if defined(DHD_LB_RXP) +#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp) +#else /* !DHD_LB_RXP */ +#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) do { /* noop */ } while (0) +#endif /* !DHD_LB_RXP */ -void -dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset) -{ - dhd_prot_t *prot = dhd->prot; - prot->rx_dataoffset = rx_offset; -} +#if defined(DHD_LB_RXC) +#define DHD_LB_DISPATCH_RX_COMPL(dhdp) dhd_lb_dispatch_rx_compl(dhdp) +#else /* !DHD_LB_RXC */ +#define DHD_LB_DISPATCH_RX_COMPL(dhdp) do { /* noop */ } while (0) +#endif /* !DHD_LB_RXC */ -/** - * Initialize protocol: sync w/dongle state. - * Sets dongle media info (iswl, drv_version, mac address). - */ -int -dhd_sync_with_dongle(dhd_pub_t *dhd) -{ - int ret = 0; - wlc_rev_info_t revinfo; +#if defined(DHD_LB_TXC) +#define DHD_LB_DISPATCH_TX_COMPL(dhdp) dhd_lb_dispatch_tx_compl(dhdp) +#else /* !DHD_LB_TXC */ +#define DHD_LB_DISPATCH_TX_COMPL(dhdp) do { /* noop */ } while (0) +#endif /* !DHD_LB_TXC */ - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - - dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); - - - -#ifdef DHD_FW_COREDUMP - /* Check the memdump capability */ - dhd_get_memdump_info(dhd); -#endif /* DHD_FW_COREDUMP */ -#ifdef BCMASSERT_LOG - dhd_get_assert_info(dhd); -#endif /* BCMASSERT_LOG */ - - /* Get the device rev info */ - memset(&revinfo, 0, sizeof(revinfo)); - ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); - if (ret < 0) { - DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__)); - goto done; - } - DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__, - revinfo.deviceid, revinfo.vendorid, revinfo.chipnum)); - - dhd_process_cid_mac(dhd, TRUE); - - ret = dhd_preinit_ioctls(dhd); - - if (!ret) { - dhd_process_cid_mac(dhd, FALSE); - } - - /* Always assumes wl for now */ - dhd->iswl = TRUE; -done: - return ret; -} /* dhd_sync_with_dongle */ - #if defined(DHD_LB) - /* DHD load balancing: deferral of work to another online CPU */ - /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */ extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp); extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp); extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); - extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); +#if defined(DHD_LB_RXP) /** - * dhd_lb_dispatch - load balance by dispatch work to other CPU cores - * Note: rx_compl_tasklet is dispatched explicitly. + * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work + * to other CPU cores */ static INLINE void -dhd_lb_dispatch(dhd_pub_t *dhdp, uint16 ring_idx) +dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp) { - switch (ring_idx) { + dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */ +} +#endif /* DHD_LB_RXP */ #if defined(DHD_LB_TXC) - case BCMPCIE_D2H_MSGRING_TX_COMPLETE: - bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */ - dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */ - break; -#endif /* DHD_LB_TXC */ - - case BCMPCIE_D2H_MSGRING_RX_COMPLETE: - { -#if defined(DHD_LB_RXC) - dhd_prot_t *prot = dhdp->prot; - /* Schedule the takslet only if we have to */ - if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) { - /* flush WR index */ - bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod); - dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */ - } -#endif /* DHD_LB_RXC */ -#if defined(DHD_LB_RXP) - dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */ -#endif /* DHD_LB_RXP */ - break; - } - default: - break; - } +/** + * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work + * to other CPU cores + */ +static INLINE void +dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx) +{ + bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */ + dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */ } - -#if defined(DHD_LB_TXC) /** * DHD load balanced tx completion tasklet handler, that will perform the * freeing of packets on the selected CPU. Packet pointers are delivered to @@ -2640,6 +2882,10 @@ dhd_lb_tx_compl_handler(unsigned long data) bcm_workq_t *workq = &prot->tx_compl_cons; uint32 count = 0; + int curr_cpu; + curr_cpu = get_cpu(); + put_cpu(); + DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd); while (1) { @@ -2661,7 +2907,6 @@ dhd_lb_tx_compl_handler(unsigned long data) pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt)); DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0); - #if defined(BCMPCIE) dhd_txcomplete(dhd, pkt, true); #endif @@ -2677,6 +2922,23 @@ dhd_lb_tx_compl_handler(unsigned long data) #endif /* DHD_LB_TXC */ #if defined(DHD_LB_RXC) + +/** + * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work + * to other CPU cores + */ +static INLINE void +dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp) +{ + dhd_prot_t *prot = dhdp->prot; + /* Schedule the takslet only if we have to */ + if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) { + /* flush WR index */ + bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod); + dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */ + } +} + void dhd_lb_rx_compl_handler(unsigned long data) { @@ -2689,9 +2951,184 @@ dhd_lb_rx_compl_handler(unsigned long data) bcm_workq_cons_sync(workq); } #endif /* DHD_LB_RXC */ - #endif /* DHD_LB */ +void +dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset) +{ + dhd_prot_t *prot = dhd->prot; + prot->rx_dataoffset = rx_offset; +} + +static int +dhd_check_create_info_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_ERROR; + uint16 ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS; + + if (prot->h2dring_info_subn && prot->d2hring_info_cpln) { + return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ + } + + if (prot->h2dring_info_subn == NULL) { + prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->h2dring_info_subn == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__)); + ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo", + H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n", + __FUNCTION__)); + goto err; + } + } + + if (prot->d2hring_info_cpln == NULL) { + prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); + + if (prot->d2hring_info_cpln == NULL) { + DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + /* create the debug info completion ring next to debug info submit ring + * ringid = id next to debug info submit ring + */ + ringid = ringid + 1; + + DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__)); + ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo", + D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE, + ringid); + if (ret != BCME_OK) { + DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n", + __FUNCTION__)); + dhd_prot_ring_detach(dhd, prot->h2dring_info_subn); + goto err; + } + } + + return ret; +err: + MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t)); + prot->h2dring_info_subn = NULL; + + if (prot->d2hring_info_cpln) { + MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t)); + prot->d2hring_info_cpln = NULL; + } + return ret; +} /* dhd_check_create_info_rings */ + +int +dhd_prot_init_info_rings(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int ret = BCME_OK; + + if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) { + DHD_ERROR(("%s: info rings aren't created! \n", + __FUNCTION__)); + return ret; + } + + if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) { + DHD_INFO(("Info completion ring was created!\n")); + return ret; + } + + DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx)); + ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln); + if (ret != BCME_OK) + return ret; + + prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL; + + DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx)); + prot->h2dring_info_subn->n_completion_ids = 1; + prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx; + + ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn); + + /* Note that there is no way to delete d2h or h2d ring deletion incase either fails, + * so can not cleanup if one ring was created while the other failed + */ + return ret; +} /* dhd_prot_init_info_rings */ + +static void +dhd_prot_detach_info_rings(dhd_pub_t *dhd) +{ + if (dhd->prot->h2dring_info_subn) { + dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn); + MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t)); + dhd->prot->h2dring_info_subn = NULL; + } + if (dhd->prot->d2hring_info_cpln) { + dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln); + MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t)); + dhd->prot->d2hring_info_cpln = NULL; + } +} + +/** + * Initialize protocol: sync w/dongle state. + * Sets dongle media info (iswl, drv_version, mac address). + */ +int dhd_sync_with_dongle(dhd_pub_t *dhd) +{ + int ret = 0; + wlc_rev_info_t revinfo; + + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + + /* Post ts buffer after shim layer is attached */ + ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd); + + +#ifdef DHD_FW_COREDUMP + /* Check the memdump capability */ + dhd_get_memdump_info(dhd); +#endif /* DHD_FW_COREDUMP */ +#ifdef BCMASSERT_LOG + dhd_get_assert_info(dhd); +#endif /* BCMASSERT_LOG */ + + /* Get the device rev info */ + memset(&revinfo, 0, sizeof(revinfo)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); + if (ret < 0) { + DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__)); + goto done; + } + DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__, + revinfo.deviceid, revinfo.vendorid, revinfo.chipnum)); + + DHD_SSSR_DUMP_INIT(dhd); + + dhd_process_cid_mac(dhd, TRUE); + ret = dhd_preinit_ioctls(dhd); + dhd_process_cid_mac(dhd, FALSE); + + /* Always assumes wl for now */ + dhd->iswl = TRUE; +done: + return ret; +} /* dhd_sync_with_dongle */ + + #define DHD_DBG_SHOW_METADATA 0 #if DHD_DBG_SHOW_METADATA @@ -2790,7 +3227,9 @@ dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send) { if (pkt) { if (pkttype == PKTTYPE_IOCTL_RX || - pkttype == PKTTYPE_EVENT_RX) { + pkttype == PKTTYPE_EVENT_RX || + pkttype == PKTTYPE_INFO_RX || + pkttype == PKTTYPE_TSBUF_RX) { #ifdef DHD_USE_STATIC_CTRLBUF PKTFREE_STATIC(dhd->osh, pkt, send); #else @@ -2802,6 +3241,7 @@ dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send) } } +/* dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle */ static INLINE void * BCMFASTPATH dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid) { @@ -2813,25 +3253,45 @@ dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid #ifdef DHD_PCIE_PKTID if (free_pktid) { - PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, + PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa, len, dmah, secdma, pkttype); } else { - PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_map_handle, + PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map, pktid, pa, len, dmah, secdma, pkttype); } #else - PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, pa, + PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa, len, dmah, secdma, pkttype); #endif /* DHD_PCIE_PKTID */ - if (PKTBUF) { { - if (SECURE_DMA_ENAB(dhd->osh)) { + if (SECURE_DMA_ENAB(dhd->osh)) SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah, secdma, 0); - } else { + else DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); +#ifdef DMAMAP_STATS + switch (pkttype) { +#ifndef IOCTLRESP_USE_CONSTMEM + case PKTTYPE_IOCTL_RX: + dhd->dma_stats.ioctl_rx--; + dhd->dma_stats.ioctl_rx_sz -= len; + break; +#endif /* IOCTLRESP_USE_CONSTMEM */ + case PKTTYPE_EVENT_RX: + dhd->dma_stats.event_rx--; + dhd->dma_stats.event_rx_sz -= len; + break; + case PKTTYPE_INFO_RX: + dhd->dma_stats.info_rx--; + dhd->dma_stats.info_rx_sz -= len; + break; + case PKTTYPE_TSBUF_RX: + dhd->dma_stats.tsbuf_rx--; + dhd->dma_stats.tsbuf_rx_sz -= len; + break; } +#endif /* DMAMAP_STATS */ } } @@ -2848,7 +3308,42 @@ dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbu return; } -#endif /* IOCTLRESP_USE_CONSTMEM */ +#endif + +#ifdef PCIE_INB_DW +static int +dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t *bus) +{ + unsigned long flags = 0; + + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + bus->host_active_cnt++; + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + if (dhd_bus_set_device_wake(bus, TRUE) != BCME_OK) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + bus->host_active_cnt--; + dhd_bus_inb_ack_pending_ds_req(bus); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + return BCME_ERROR; + } + } + + return BCME_OK; +} + +static void +dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t *bus) +{ + unsigned long flags = 0; + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + bus->host_active_cnt--; + dhd_bus_inb_ack_pending_ds_req(bus); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } +} +#endif /* PCIE_INB_DW */ static void BCMFASTPATH dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid) @@ -2893,88 +3388,105 @@ dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid) static int BCMFASTPATH dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid) { - void *p; + void *p, **pktbuf; uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; uint8 *rxbuf_post_tmp; host_rxbuf_post_t *rxbuf_post; void *msg_start; - dmaaddr_t pa; - uint32 pktlen; - uint8 i = 0; - uint16 alloced = 0; + dmaaddr_t pa, *pktbuf_pa; + uint32 *pktlen; + uint16 i = 0, alloced = 0; unsigned long flags; uint32 pktid; dhd_prot_t *prot = dhd->prot; msgbuf_ring_t *ring = &prot->h2dring_rxp_subn; + void *lcl_buf; + uint16 lcl_buf_size; - DHD_GENERAL_LOCK(dhd, flags); +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ - /* Claim space for exactly 'count' no of messages, for mitigation purpose */ - msg_start = (void *) - dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE); - - DHD_GENERAL_UNLOCK(dhd, flags); - - if (msg_start == NULL) { - DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); - return -1; + /* allocate a local buffer to store pkt buffer va, pa and length */ + lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) * + RX_BUF_BURST; + lcl_buf = MALLOC(dhd->osh, lcl_buf_size); + if (!lcl_buf) { + DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__)); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return 0; } - /* if msg_start != NULL, we should have alloced space for atleast 1 item */ - ASSERT(alloced > 0); + pktbuf = lcl_buf; + pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST); + pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST); - rxbuf_post_tmp = (uint8*)msg_start; - - /* loop through each allocated message in the rxbuf post msgbuf_ring */ - for (i = 0; i < alloced; i++) { - rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp; - /* Create a rx buffer */ + for (i = 0; i < count; i++) { if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) { DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__)); dhd->rx_pktgetfail++; break; } - pktlen = PKTLEN(dhd->osh, p); + pktlen[i] = PKTLEN(dhd->osh, p); if (SECURE_DMA_ENAB(dhd->osh)) { - DHD_GENERAL_LOCK(dhd, flags); - pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0, ring->dma_buf.secdma, 0); - DHD_GENERAL_UNLOCK(dhd, flags); - } else { - pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); } +#ifndef BCM_SECURE_DMA + else + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0); +#endif /* #ifndef BCM_SECURE_DMA */ if (PHYSADDRISZERO(pa)) { - if (SECURE_DMA_ENAB(dhd->osh)) { - DHD_GENERAL_LOCK(dhd, flags); - SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, - ring->dma_buf.secdma, 0); - DHD_GENERAL_UNLOCK(dhd, flags); - } else { - DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); - } - PKTFREE(dhd->osh, p, FALSE); DHD_ERROR(("Invalid phyaddr 0\n")); ASSERT(0); break; } +#ifdef DMAMAP_STATS + dhd->dma_stats.rxdata++; + dhd->dma_stats.rxdata_sz += pktlen[i]; +#endif /* DMAMAP_STATS */ PKTPULL(dhd->osh, p, prot->rx_metadata_offset); - pktlen = PKTLEN(dhd->osh, p); + pktlen[i] = PKTLEN(dhd->osh, p); + pktbuf[i] = p; + pktbuf_pa[i] = pa; + } - /* Common msg header */ - rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST; - rxbuf_post->cmn_hdr.if_id = 0; - rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; - ring->seqnum++; + /* only post what we have */ + count = i; + + /* grab the rx lock to allocate pktid and post on ring */ + DHD_SPIN_LOCK(prot->rx_lock, flags); + + /* Claim space for exactly 'count' no of messages, for mitigation purpose */ + msg_start = (void *) + dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE); + if (msg_start == NULL) { + DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); + goto cleanup; + } + /* if msg_start != NULL, we should have alloced space for atleast 1 item */ + ASSERT(alloced > 0); + + rxbuf_post_tmp = (uint8*)msg_start; + + for (i = 0; i < alloced; i++) { + rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp; + p = pktbuf[i]; + pa = pktbuf_pa[i]; #if defined(DHD_LB_RXC) if (use_rsv_pktid == TRUE) { bcm_workq_t *workq = &prot->rx_compl_cons; int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + if (elem_ix == BCM_RING_EMPTY) { - DHD_ERROR(("%s rx_compl_cons ring is empty\n", __FUNCTION__)); + DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__)); pktid = DHD_PKTID_INVALID; goto alloc_pkt_id; } else { @@ -2982,49 +3494,36 @@ dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid) pktid = *elem; } + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + /* Now populate the previous locker with valid information */ if (pktid != DHD_PKTID_INVALID) { - rxbuf_post->cmn_hdr.request_id = htol32(pktid); - DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, p, pktid, - pa, pktlen, DMA_RX, NULL, ring->dma_buf.secdma, + DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map, + p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL, PKTTYPE_DATA_RX); } } else -#endif /* DHD_LB_RXC */ +#endif /* ! DHD_LB_RXC */ { #if defined(DHD_LB_RXC) alloc_pkt_id: -#endif +#endif /* DHD_LB_RXC */ + pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa, + pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX); #if defined(DHD_PCIE_PKTID) - /* get the lock before calling DHD_NATIVE_TO_PKTID */ - DHD_GENERAL_LOCK(dhd, flags); -#endif - pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_map_handle, p, pa, - pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX); - -#if defined(DHD_PCIE_PKTID) - /* free lock */ - DHD_GENERAL_UNLOCK(dhd, flags); - if (pktid == DHD_PKTID_INVALID) { - - if (SECURE_DMA_ENAB(dhd->osh)) { - DHD_GENERAL_LOCK(dhd, flags); - SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, - ring->dma_buf.secdma, 0); - DHD_GENERAL_UNLOCK(dhd, flags); - } else { - DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); - } - - PKTFREE(dhd->osh, p, FALSE); - DHD_ERROR(("Pktid pool depleted.\n")); break; } #endif /* DHD_PCIE_PKTID */ } - rxbuf_post->data_buf_len = htol16((uint16)pktlen); + /* Common msg header */ + rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST; + rxbuf_post->cmn_hdr.if_id = 0; + rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + rxbuf_post->cmn_hdr.flags = ring->current_phase; + ring->seqnum++; + rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]); rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); rxbuf_post->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset); @@ -3039,8 +3538,8 @@ alloc_pkt_id: rxbuf_post->metadata_buf_addr.low_addr = 0; } -#if defined(DHD_PKTID_AUDIT_RING) - DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, DHD_DUPLICATE_ALLOC); +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC); #endif /* DHD_PKTID_AUDIT_RING */ rxbuf_post->cmn_hdr.request_id = htol32(pktid); @@ -3050,22 +3549,235 @@ alloc_pkt_id: } if (i < alloced) { - if (ring->wr < (alloced - i)) { + if (ring->wr < (alloced - i)) ring->wr = ring->max_items - (alloced - i); - } else { + else ring->wr -= (alloced - i); + + if (ring->wr == 0) { + DHD_INFO(("%s: flipping the phase now\n", ring->name)); + ring->current_phase = ring->current_phase ? + 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; } alloced = i; } - /* Update ring's WR index and ring doorbell to dongle */ + /* update ring's WR index and ring doorbell to dongle */ if (alloced > 0) { + unsigned long flags1; + DHD_GENERAL_LOCK(dhd, flags1); dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced); + DHD_GENERAL_UNLOCK(dhd, flags1); } + DHD_SPIN_UNLOCK(prot->rx_lock, flags); + +cleanup: + for (i = alloced; i < count; i++) { + p = pktbuf[i]; + pa = pktbuf_pa[i]; + + if (SECURE_DMA_ENAB(dhd->osh)) + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, + DHD_DMAH_NULL, ring->dma_buf.secdma, 0); + else + DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL); + PKTFREE(dhd->osh, p, FALSE); + } + + MFREE(dhd->osh, lcl_buf, lcl_buf_size); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return alloced; -} /* dhd_prot_rxbuf_post */ +} /* dhd_prot_rxbufpost */ + +static int +dhd_prot_infobufpost(dhd_pub_t *dhd) +{ + unsigned long flags; + uint32 pktid; + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = prot->h2dring_info_subn; + uint16 alloced = 0; + uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + uint32 pktlen; + info_buf_post_msg_t *infobuf_post; + uint8 *infobuf_post_tmp; + void *p; + void* msg_start; + uint8 i = 0; + dmaaddr_t pa; + int16 count; + + if (ring == NULL) + return 0; + + if (ring->inited != TRUE) + return 0; + if (prot->max_infobufpost == 0) + return 0; + + count = prot->max_infobufpost - prot->infobufpost; + + if (count <= 0) { + DHD_INFO(("%s: Cannot post more than max info resp buffers\n", + __FUNCTION__)); + return 0; + } + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + + DHD_GENERAL_LOCK(dhd, flags); + /* Claim space for exactly 'count' no of messages, for mitigation purpose */ + msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE); + DHD_GENERAL_UNLOCK(dhd, flags); + + if (msg_start == NULL) { + DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return -1; + } + + /* if msg_start != NULL, we should have alloced space for atleast 1 item */ + ASSERT(alloced > 0); + + infobuf_post_tmp = (uint8*) msg_start; + + /* loop through each allocated message in the host ring */ + for (i = 0; i < alloced; i++) { + infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp; + /* Create a rx buffer */ +#ifdef DHD_USE_STATIC_CTRLBUF + p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); +#else + p = PKTGET(dhd->osh, pktsz, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + if (p == NULL) { + DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__)); + dhd->rx_pktgetfail++; + break; + } + pktlen = PKTLEN(dhd->osh, p); + if (SECURE_DMA_ENAB(dhd->osh)) { + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, + DMA_RX, p, 0, ring->dma_buf.secdma, 0); + } +#ifndef BCM_SECURE_DMA + else + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); +#endif /* #ifndef BCM_SECURE_DMA */ + if (PHYSADDRISZERO(pa)) { + if (SECURE_DMA_ENAB(dhd->osh)) { + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + } + else + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, p, FALSE); +#else + PKTFREE(dhd->osh, p, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + DHD_ERROR(("Invalid phyaddr 0\n")); + ASSERT(0); + break; + } +#ifdef DMAMAP_STATS + dhd->dma_stats.info_rx++; + dhd->dma_stats.info_rx_sz += pktlen; +#endif /* DMAMAP_STATS */ + pktlen = PKTLEN(dhd->osh, p); + + /* Common msg header */ + infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST; + infobuf_post->cmn_hdr.if_id = 0; + infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + infobuf_post->cmn_hdr.flags = ring->current_phase; + ring->seqnum++; + +#if defined(DHD_PCIE_PKTID) + /* get the lock before calling DHD_NATIVE_TO_PKTID */ + DHD_GENERAL_LOCK(dhd, flags); +#endif /* DHD_PCIE_PKTID */ + + pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa, + pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX); + + +#if defined(DHD_PCIE_PKTID) + /* free lock */ + DHD_GENERAL_UNLOCK(dhd, flags); + + if (pktid == DHD_PKTID_INVALID) { + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0); + +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, p, FALSE); +#else + PKTFREE(dhd->osh, p, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__)); + break; + } +#endif /* DHD_PCIE_PKTID */ + + infobuf_post->host_buf_len = htol16((uint16)pktlen); + infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n", + infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr, + infobuf_post->host_buf_addr.high_addr)); + + infobuf_post->cmn_hdr.request_id = htol32(pktid); + /* Move rxbuf_post_tmp to next item */ + infobuf_post_tmp = infobuf_post_tmp + ring->item_len; + } + + if (i < alloced) { + if (ring->wr < (alloced - i)) + ring->wr = ring->max_items - (alloced - i); + else + ring->wr -= (alloced - i); + + alloced = i; + if (alloced && ring->wr == 0) { + DHD_INFO(("%s: flipping the phase now\n", ring->name)); + ring->current_phase = ring->current_phase ? + 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + } + + /* Update the write pointer in TCM & ring bell */ + if (alloced > 0) { + prot->infobufpost += alloced; + DHD_INFO(("allocated %d buffers for info ring\n", alloced)); + DHD_GENERAL_LOCK(dhd, flags); + dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced); + DHD_GENERAL_UNLOCK(dhd, flags); + } +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return alloced; +} /* dhd_prot_infobufpost */ #ifdef IOCTLRESP_USE_CONSTMEM static int @@ -3092,10 +3804,6 @@ free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0; retbuf->len = IOCT_RETBUF_SIZE; retbuf->_alloced = retbuf->len + dma_pad; - /* JIRA:SWWLAN-70021 The pa value would be overwritten by the dongle. - * Need to reassign before free to pass the check in dhd_dma_buf_audit(). - */ - retbuf->pa = DMA_MAP(dhd->osh, retbuf->va, retbuf->len, DMA_RX, NULL, NULL); } dhd_dma_buf_free(dhd, retbuf); @@ -3104,7 +3812,7 @@ free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) #endif /* IOCTLRESP_USE_CONSTMEM */ static int -dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) +dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type) { void *p; uint16 pktsz; @@ -3119,16 +3827,34 @@ dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) uint32 pktid; void *map_handle; msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + bool non_ioctl_resp_buf = 0; + dhd_pkttype_t buf_type; if (dhd->busstate == DHD_BUS_DOWN) { DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); return -1; } - memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); - if (event_buf) { - /* Allocate packet for event buffer post */ + if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST) + buf_type = PKTTYPE_IOCTL_RX; + else if (msg_type == MSG_TYPE_EVENT_BUF_POST) + buf_type = PKTTYPE_EVENT_RX; + else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST) + buf_type = PKTTYPE_TSBUF_RX; + else { + DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type)); + return -1; + } + + + if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)) + non_ioctl_resp_buf = TRUE; + else + non_ioctl_resp_buf = FALSE; + + if (non_ioctl_resp_buf) { + /* Allocate packet for not ioctl resp buffer post */ pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; } else { /* Allocate packet for ctrl/ioctl buffer post */ @@ -3136,7 +3862,7 @@ dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) } #ifdef IOCTLRESP_USE_CONSTMEM - if (!event_buf) { + if (!non_ioctl_resp_buf) { if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) { DHD_ERROR(("Could not allocate IOCTL response buffer\n")); return -1; @@ -3156,7 +3882,7 @@ dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) #endif /* DHD_USE_STATIC_CTRLBUF */ if (p == NULL) { DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n", - __FUNCTION__, __LINE__, event_buf ? + __FUNCTION__, __LINE__, non_ioctl_resp_buf ? "EVENT" : "IOCTL RESP")); dhd->rx_pktgetfail++; return -1; @@ -3169,16 +3895,44 @@ dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0, ring->dma_buf.secdma, 0); DHD_GENERAL_UNLOCK(dhd, flags); - } else { - pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); } +#ifndef BCM_SECURE_DMA + else + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); +#endif /* #ifndef BCM_SECURE_DMA */ if (PHYSADDRISZERO(pa)) { DHD_ERROR(("Invalid physaddr 0\n")); ASSERT(0); goto free_pkt_return; } + +#ifdef DMAMAP_STATS + switch (buf_type) { +#ifndef IOCTLRESP_USE_CONSTMEM + case PKTTYPE_IOCTL_RX: + dhd->dma_stats.ioctl_rx++; + dhd->dma_stats.ioctl_rx_sz += pktlen; + break; +#endif /* !IOCTLRESP_USE_CONSTMEM */ + case PKTTYPE_EVENT_RX: + dhd->dma_stats.event_rx++; + dhd->dma_stats.event_rx_sz += pktlen; + break; + case PKTTYPE_TSBUF_RX: + dhd->dma_stats.tsbuf_rx++; + dhd->dma_stats.tsbuf_rx_sz += pktlen; + break; + default: + break; + } +#endif /* DMAMAP_STATS */ + } +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ DHD_GENERAL_LOCK(dhd, flags); @@ -3191,7 +3945,7 @@ dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) __FUNCTION__, __LINE__)); #ifdef IOCTLRESP_USE_CONSTMEM - if (event_buf) + if (non_ioctl_resp_buf) #endif /* IOCTLRESP_USE_CONSTMEM */ { if (SECURE_DMA_ENAB(dhd->osh)) { @@ -3207,24 +3961,20 @@ dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) } /* CMN msg header */ - if (event_buf) { - rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST; - } else { - rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST; - } + rxbuf_post->cmn_hdr.msg_type = msg_type; #ifdef IOCTLRESP_USE_CONSTMEM - if (!event_buf) { + if (!non_ioctl_resp_buf) { map_handle = dhd->prot->pktid_map_handle_ioctl; - pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, - DMA_RX, dmah, ring->dma_buf.secdma, PKTTYPE_IOCTL_RX); + pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah, + ring->dma_buf.secdma, buf_type); } else #endif /* IOCTLRESP_USE_CONSTMEM */ { - map_handle = dhd->prot->pktid_map_handle; - pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, + map_handle = dhd->prot->pktid_ctrl_map; + pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma, - event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX); + buf_type); } if (pktid == DHD_PKTID_INVALID) { @@ -3232,31 +3982,40 @@ dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) ring->wr = ring->max_items - 1; } else { ring->wr--; + if (ring->wr == 0) { + ring->current_phase = ring->current_phase ? 0 : + BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } } DHD_GENERAL_UNLOCK(dhd, flags); DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__)); goto free_pkt_return; } -#if defined(DHD_PKTID_AUDIT_RING) +#ifdef DHD_PKTID_AUDIT_RING DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC); #endif /* DHD_PKTID_AUDIT_RING */ rxbuf_post->cmn_hdr.request_id = htol32(pktid); rxbuf_post->cmn_hdr.if_id = 0; - rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; ring->seqnum++; + rxbuf_post->cmn_hdr.flags = ring->current_phase; #if defined(DHD_PCIE_PKTID) if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) { if (ring->wr == 0) { ring->wr = ring->max_items - 1; } else { - ring->wr--; + if (ring->wr == 0) { + ring->current_phase = ring->current_phase ? 0 : + BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } } DHD_GENERAL_UNLOCK(dhd, flags); #ifdef IOCTLRESP_USE_CONSTMEM - if (event_buf) + if (non_ioctl_resp_buf) #endif /* IOCTLRESP_USE_CONSTMEM */ { if (SECURE_DMA_ENAB(dhd->osh)) { @@ -3264,15 +4023,13 @@ dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, 0); DHD_GENERAL_UNLOCK(dhd, flags); - } else { + } else DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); - } } goto free_pkt_return; } #endif /* DHD_PCIE_PKTID */ - rxbuf_post->cmn_hdr.flags = 0; #ifndef IOCTLRESP_USE_CONSTMEM rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p)); #else @@ -3285,30 +4042,36 @@ dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return 1; free_pkt_return: #ifdef IOCTLRESP_USE_CONSTMEM - if (!event_buf) { + if (!non_ioctl_resp_buf) { free_ioctl_return_buffer(dhd, &retbuf); } else -#endif /* IOCTLRESP_USE_CONSTMEM */ +#endif { - dhd_prot_packet_free(dhd, p, - event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX, - FALSE); + dhd_prot_packet_free(dhd, p, buf_type, FALSE); } +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return -1; } /* dhd_prot_rxbufpost_ctrl */ static uint16 -dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post) +dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post) { uint32 i = 0; int32 ret_val; - DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf)); + DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type)); if (dhd->busstate == DHD_BUS_DOWN) { DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); @@ -3316,13 +4079,12 @@ dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_pos } while (i < max_to_post) { - ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf); - if (ret_val < 0) { + ret_val = dhd_prot_rxbufpost_ctrl(dhd, msg_type); + if (ret_val < 0) break; - } i++; } - DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf)); + DHD_INFO(("posted %d buffers of type %d\n", i, msg_type)); return (uint16)i; } @@ -3340,7 +4102,7 @@ dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd) return; } prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, - FALSE, max_to_post); + MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post); } static void @@ -3349,23 +4111,62 @@ dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd) dhd_prot_t *prot = dhd->prot; int max_to_post; + /* Use atomic variable to avoid re-entry */ + if (atomic_read(&dhd_msgbuf_rxbuf_post_event_bufs_running) > 0) { + return; + } + atomic_inc(&dhd_msgbuf_rxbuf_post_event_bufs_running); + max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted; if (max_to_post <= 0) { - DHD_INFO(("%s: Cannot post more than max event buffers\n", + DHD_ERROR(("%s: Cannot post more than max event buffers\n", __FUNCTION__)); return; } prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, - TRUE, max_to_post); + MSG_TYPE_EVENT_BUF_POST, max_to_post); + + atomic_dec(&dhd_msgbuf_rxbuf_post_event_bufs_running); } -/** called when DHD needs to check for 'receive complete' messages from the dongle */ -bool BCMFASTPATH -dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) +static int +dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd) { +#ifdef DHD_TIMESYNC + dhd_prot_t *prot = dhd->prot; + int max_to_post; + + if (prot->active_ipc_version < 7) { + DHD_ERROR(("no ts buffers to device ipc rev is %d, needs to be atleast 7\n", + prot->active_ipc_version)); + return 0; + } + + max_to_post = prot->max_tsbufpost - prot->cur_ts_bufs_posted; + if (max_to_post <= 0) { + DHD_INFO(("%s: Cannot post more than max ts buffers\n", + __FUNCTION__)); + return 0; + } + + prot->cur_ts_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, + MSG_TYPE_TIMSTAMP_BUFPOST, max_to_post); +#endif /* DHD_TIMESYNC */ + return 0; +} + +bool BCMFASTPATH +dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound) +{ + dhd_prot_t *prot = dhd->prot; bool more = TRUE; uint n = 0; - msgbuf_ring_t *ring = &dhd->prot->d2hring_rx_cpln; + msgbuf_ring_t *ring = prot->d2hring_info_cpln; + + if (ring == NULL) + return FALSE; + if (ring->inited != TRUE) + return FALSE; /* Process all the messages - DTOH direction */ while (!dhd_is_device_removed(dhd)) { @@ -3377,7 +4178,7 @@ dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) break; } - /* Get the address of the next message to be read from ring */ + /* Get the message from ring */ msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); if (msg_addr == NULL) { more = FALSE; @@ -3388,8 +4189,8 @@ dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) OSL_PREFETCH(msg_addr); if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { - DHD_ERROR(("%s: process %s msg addr %p len %d\n", - __FUNCTION__, ring->name, msg_addr, msg_len)); + DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n", + __FUNCTION__, msg_len)); } /* Update read pointer */ @@ -3405,6 +4206,222 @@ dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) return more; } +/** called when DHD needs to check for 'receive complete' messages from the dongle */ +bool BCMFASTPATH +dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) +{ + bool more = FALSE; + uint n = 0; + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->d2hring_rx_cpln; + uint16 item_len = ring->item_len; + host_rxbuf_cmpl_t *msg = NULL; + uint8 *msg_addr; + uint32 msg_len; + uint16 pkt_cnt, pkt_cnt_newidx; + unsigned long flags; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + int ifidx = 0, if_newidx = 0; + void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt; + uint32 pktid; + int i; + uint8 sync; + + while (1) { + if (dhd_is_device_removed(dhd)) + break; + + if (dhd->hang_was_sent) + break; + + pkt_cnt = 0; + pktqhead = pkt_newidx = NULL; + pkt_cnt_newidx = 0; + + DHD_SPIN_LOCK(prot->rx_lock, flags); + + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + DHD_SPIN_UNLOCK(prot->rx_lock, flags); + break; + } + + while (msg_len > 0) { + msg = (host_rxbuf_cmpl_t *)msg_addr; + + /* Wait until DMA completes, then fetch msg_type */ + sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len); + /* + * Update the curr_rd to the current index in the ring, from where + * the work item is fetched. This way if the fetched work item + * fails in LIVELOCK, we can print the exact read index in the ring + * that shows up the corrupted work item. + */ + if ((ring->curr_rd + 1) >= ring->max_items) { + ring->curr_rd = 0; + } else { + ring->curr_rd += 1; + } + + if (!sync) { + msg_len -= item_len; + msg_addr += item_len; + continue; + } + + pktid = ltoh32(msg->cmn_hdr.request_id); + +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid, + DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE); +#endif /* DHD_PKTID_AUDIT_RING */ + + pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa, + len, dmah, secdma, PKTTYPE_DATA_RX); + if (!pkt) { + msg_len -= item_len; + msg_addr += item_len; + continue; + } + + if (SECURE_DMA_ENAB(dhd->osh)) + SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, + dmah, secdma, 0); + else + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); + +#ifdef DMAMAP_STATS + dhd->dma_stats.rxdata--; + dhd->dma_stats.rxdata_sz -= len; +#endif /* DMAMAP_STATS */ + DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, " + "pktdata %p, metalen %d\n", + ltoh32(msg->cmn_hdr.request_id), + ltoh16(msg->data_offset), + ltoh16(msg->data_len), msg->cmn_hdr.if_id, + msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt), + ltoh16(msg->metadata_len))); + + pkt_cnt++; + msg_len -= item_len; + msg_addr += item_len; + +#if DHD_DBG_SHOW_METADATA + if (prot->metadata_dbg && prot->rx_metadata_offset && + msg->metadata_len) { + uchar *ptr; + ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset); + /* header followed by data */ + bcm_print_bytes("rxmetadata", ptr, msg->metadata_len); + dhd_prot_print_metadata(dhd, ptr, msg->metadata_len); + } +#endif /* DHD_DBG_SHOW_METADATA */ + + /* data_offset from buf start */ + if (ltoh16(msg->data_offset)) { + /* data offset given from dongle after split rx */ + PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset)); + } + else if (prot->rx_dataoffset) { + /* DMA RX offset updated through shared area */ + PKTPULL(dhd->osh, pkt, prot->rx_dataoffset); + } + /* Actual length of the packet */ + PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len)); +#if defined(WL_MONITOR) + if (dhd_monitor_enabled(dhd, ifidx) && + (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) { + dhd_rx_mon_pkt(dhd, msg, pkt, ifidx); + continue; + } +#endif + + if (!pktqhead) { + pktqhead = prevpkt = pkt; + ifidx = msg->cmn_hdr.if_id; + } else { + if (ifidx != msg->cmn_hdr.if_id) { + pkt_newidx = pkt; + if_newidx = msg->cmn_hdr.if_id; + pkt_cnt--; + pkt_cnt_newidx = 1; + break; + } else { + PKTSETNEXT(dhd->osh, prevpkt, pkt); + prevpkt = pkt; + } + } + +#ifdef DHD_TIMESYNC + if (dhd->prot->rx_ts_log_enabled) { + ts_timestamp_t *ts = (ts_timestamp_t *)&msg->ts; + dhd_timesync_log_rx_timestamp(dhd->ts, ifidx, ts->low, ts->high); + } +#endif /* DHD_TIMESYNC */ + } + + /* roll back read pointer for unprocessed message */ + if (msg_len > 0) { + if (ring->rd < msg_len / item_len) + ring->rd = ring->max_items - msg_len / item_len; + else + ring->rd -= msg_len / item_len; + } + + /* Update read pointer */ + dhd_prot_upd_read_idx(dhd, ring); + + DHD_SPIN_UNLOCK(prot->rx_lock, flags); + + pkt = pktqhead; + for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) { + nextpkt = PKTNEXT(dhd->osh, pkt); + PKTSETNEXT(dhd->osh, pkt, NULL); +#ifdef DHD_LB_RXP + dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx); +#elif defined(DHD_RX_CHAINING) + dhd_rxchain_frame(dhd, pkt, ifidx); +#else + dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); +#endif /* DHD_LB_RXP */ + } + + if (pkt_newidx) { +#ifdef DHD_LB_RXP + dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx); +#elif defined(DHD_RX_CHAINING) + dhd_rxchain_frame(dhd, pkt_newidx, if_newidx); +#else + dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1); +#endif /* DHD_LB_RXP */ + } + + pkt_cnt += pkt_cnt_newidx; + + /* Post another set of rxbufs to the device */ + dhd_prot_return_rxbuf(dhd, 0, pkt_cnt); + + /* After batch processing, check RX bound */ + n += pkt_cnt; + if (n >= bound) { + more = TRUE; + break; + } + } + + /* Call lb_dispatch only if packets are queued */ + if (n) { + DHD_LB_DISPATCH_RX_COMPL(dhd); + DHD_LB_DISPATCH_RX_PROCESS(dhd); + } + + return more; +} + /** * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring) */ @@ -3413,8 +4430,12 @@ dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring) { msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring; + if (ring == NULL) { + DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__)); + return; + } /* Update read pointer */ - if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + if (dhd->dma_d2h_ring_upd_support) { ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); } @@ -3468,9 +4489,45 @@ dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound) } } + DHD_LB_DISPATCH_TX_COMPL(dhd); + return more; } +int BCMFASTPATH +dhd_prot_process_trapbuf(dhd_pub_t *dhd) +{ + uint32 data; + dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf; + + /* Interrupts can come in before this struct + * has been initialized. + */ + if (trap_addr->va == NULL) { + DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__)); + return 0; + } + + OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32)); + data = *(uint32 *)(trap_addr->va); + + if (data & D2H_DEV_FWHALT) { + DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data)); + if (data & D2H_DEV_EXT_TRAP_DATA) + { + if (dhd->extended_trap_data) { + OSL_CACHE_INV((void *)trap_addr->va, + BCMPCIE_EXT_TRAP_DATA_MAXLEN); + memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va, + BCMPCIE_EXT_TRAP_DATA_MAXLEN); + } + DHD_ERROR(("Extended trap data available\n")); + } + return data; + } + return 0; +} + /** called when DHD needs to check for 'ioctl complete' messages from the dongle */ int BCMFASTPATH dhd_prot_process_ctrlbuf(dhd_pub_t *dhd) @@ -3495,7 +4552,6 @@ dhd_prot_process_ctrlbuf(dhd_pub_t *dhd) /* Prefetch data to populate the cache */ OSL_PREFETCH(msg_addr); - if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { DHD_ERROR(("%s: process %s msg addr %p len %d\n", __FUNCTION__, ring->name, msg_addr, msg_len)); @@ -3516,7 +4572,7 @@ dhd_prot_process_ctrlbuf(dhd_pub_t *dhd) static int BCMFASTPATH dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len) { - int buf_len = len; + uint32 buf_len = len; uint16 item_len; uint8 msg_type; cmn_msg_hdr_t *msg = NULL; @@ -3525,7 +4581,7 @@ dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 ASSERT(ring); item_len = ring->item_len; if (item_len == 0) { - DHD_ERROR(("%s: ringidx %d item_len %d buf_len %d\n", + DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n", __FUNCTION__, ring->idx, item_len, buf_len)); return BCME_ERROR; } @@ -3538,6 +4594,9 @@ dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 msg = (cmn_msg_hdr_t *)buf; + /* Wait until DMA completes, then fetch msg_type */ + msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len); + /* * Update the curr_rd to the current index in the ring, from where * the work item is fetched. This way if the fetched work item @@ -3550,13 +4609,6 @@ dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 ring->curr_rd += 1; } -#if defined(PCIE_D2H_SYNC) - /* Wait until DMA completes, then fetch msg_type */ - msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len); -#else - msg_type = msg->msg_type; -#endif /* !PCIE_D2H_SYNC */ - /* Prefetch data to populate the cache */ OSL_PREFETCH(buf + item_len); @@ -3570,7 +4622,7 @@ dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 ASSERT(msg_type < DHD_PROT_FUNCS); if (msg_type >= DHD_PROT_FUNCS) { - DHD_ERROR(("%s: msg_type %d item_len %d buf_len %d\n", + DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n", __FUNCTION__, msg_type, item_len, buf_len)); ret = BCME_ERROR; goto done; @@ -3593,9 +4645,7 @@ done: #ifdef DHD_RX_CHAINING dhd_rxchain_commit(dhd); #endif -#if defined(DHD_LB) - dhd_lb_dispatch(dhd, ring->idx); -#endif + return ret; } /* dhd_prot_process_msgtype */ @@ -3609,10 +4659,50 @@ dhd_prot_noop(dhd_pub_t *dhd, void *msg) static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg) { - pcie_ring_status_t *ring_status = (pcie_ring_status_t *)msg; + pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg; + uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id); + uint16 status = ltoh16(ring_status->compl_hdr.status); + uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id); + DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n", - ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status, - ring_status->compl_hdr.flow_ring_id, ring_status->write_idx)); + request_id, status, ring_id, ltoh16(ring_status->write_idx))); + + if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) + return; + if (status == BCMPCIE_BAD_PHASE) { + /* bad phase report from */ + DHD_ERROR(("Bad phase\n")); + } + if (status != BCMPCIE_BADOPTION) + return; + + if (request_id == DHD_H2D_DBGRING_REQ_PKTID) { + if (dhd->prot->h2dring_info_subn != NULL) { + if (dhd->prot->h2dring_info_subn->create_pending == TRUE) { + DHD_ERROR(("H2D ring create failed for info ring\n")); + dhd->prot->h2dring_info_subn->create_pending = FALSE; + } + else + DHD_ERROR(("ring create ID for a ring, create not pending\n")); + } else { + DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__)); + } + } + else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) { + if (dhd->prot->d2hring_info_cpln != NULL) { + if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) { + DHD_ERROR(("D2H ring create failed for info ring\n")); + dhd->prot->d2hring_info_cpln->create_pending = FALSE; + } + else + DHD_ERROR(("ring create ID for info ring, create not pending\n")); + } else { + DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__)); + } + } + else { + DHD_ERROR(("don;t know how to pair with original request\n")); + } /* How do we track this to pair it with ??? */ return; } @@ -3637,20 +4727,20 @@ dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg) static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg) { - uint32 pktid; ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg; unsigned long flags; +#ifdef DHD_PKTID_AUDIT_RING + uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id); - pktid = ltoh32(ioct_ack->cmn_hdr.request_id); - -#if defined(DHD_PKTID_AUDIT_RING) - /* Skip DHD_IOCTL_REQ_PKTID = 0xFFFE */ + /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */ if (pktid != DHD_IOCTL_REQ_PKTID) { - if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid, - DHD_TEST_IS_ALLOC) == BCME_ERROR) { - prhex("dhd_prot_ioctack_process:", - (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); - } +#ifndef IOCTLRESP_USE_CONSTMEM + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid, + DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); +#else + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid, + DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); +#endif /* !IOCTLRESP_USE_CONSTMEM */ } #endif /* DHD_PKTID_AUDIT_RING */ @@ -3672,6 +4762,11 @@ dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg) if (ioct_ack->compl_hdr.status != 0) { DHD_ERROR(("got an error status for the ioctl request...need to handle that\n")); } +#ifdef REPORT_FATAL_TIMEOUTS + else { + dhd_stop_bus_timer(dhd); + } +#endif /* REPORT_FATAL_TIMEOUTS */ } /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */ @@ -3684,26 +4779,22 @@ dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg) void *pkt; unsigned long flags; dhd_dma_buf_t retbuf; +#ifdef REPORT_FATAL_TIMEOUTS + uint16 dhd_xt_id; +#endif memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id); -#if defined(DHD_PKTID_AUDIT_RING) - { - int ret; +#ifdef DHD_PKTID_AUDIT_RING #ifndef IOCTLRESP_USE_CONSTMEM - ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pkt_id, - DHD_DUPLICATE_FREE); + DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id, + DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); #else - ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle_ioctl, pkt_id, - DHD_DUPLICATE_FREE); + DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id, + DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); #endif /* !IOCTLRESP_USE_CONSTMEM */ - if (ret == BCME_ERROR) { - prhex("dhd_prot_ioctcmplt_process:", - (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); - } - } #endif /* DHD_PKTID_AUDIT_RING */ DHD_GENERAL_LOCK(dhd, flags); @@ -3712,10 +4803,14 @@ dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg) DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n", __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); prhex("dhd_prot_ioctcmplt_process:", - (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); DHD_GENERAL_UNLOCK(dhd, flags); return; } + + /* Clear Response pending bit */ + prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING; + #ifndef IOCTLRESP_USE_CONSTMEM pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE); #else @@ -3723,9 +4818,10 @@ dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg) pkt = retbuf.va; #endif /* !IOCTLRESP_USE_CONSTMEM */ if (!pkt) { - prot->ioctl_state = 0; DHD_GENERAL_UNLOCK(dhd, flags); DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__)); + prhex("dhd_prot_ioctcmplt_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); return; } DHD_GENERAL_UNLOCK(dhd, flags); @@ -3733,11 +4829,37 @@ dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg) prot->ioctl_resplen = ltoh16(ioct_resp->resp_len); prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status); xt_id = ltoh16(ioct_resp->trans_id); - if (xt_id != prot->ioctl_trans_id) { + + if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) { + DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n", + __FUNCTION__, xt_id, prot->ioctl_trans_id, + prot->curr_ioctl_cmd, ioct_resp->cmd)); +#ifdef REPORT_FATAL_TIMEOUTS + dhd_stop_cmd_timer(dhd); +#endif /* REPORT_FATAL_TIMEOUTS */ + dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR); + dhd_prot_debug_info_print(dhd); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH; + dhd_bus_mem_dump(dhd); + } +#else ASSERT(0); +#endif /* DHD_FW_COREDUMP */ + dhd_schedule_reset(dhd); goto exit; } - +#ifdef REPORT_FATAL_TIMEOUTS + dhd_xt_id = dhd_get_request_id(dhd); + if (xt_id == dhd_xt_id) { + dhd_stop_cmd_timer(dhd); + } else { + DHD_ERROR(("%s: Cmd timer not stopped received xt_id %d stored xt_id %d", + __FUNCTION__, xt_id, dhd_xt_id)); + } +#endif /* REPORT_FATAL_TIMEOUTS */ DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n", pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen)); @@ -3759,6 +4881,13 @@ exit: #else free_ioctl_return_buffer(dhd, &retbuf); #endif /* !IOCTLRESP_USE_CONSTMEM */ + + /* Post another ioctl buf to the device */ + if (prot->cur_ioctlresp_bufs_posted > 0) { + prot->cur_ioctlresp_bufs_posted--; + } + + dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); } /** called on MSG_TYPE_TX_STATUS message received from dongle */ @@ -3769,24 +4898,37 @@ dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg) host_txbuf_cmpl_t * txstatus; unsigned long flags; uint32 pktid; - void *pkt = NULL; + void *pkt; dmaaddr_t pa; uint32 len; void *dmah; void *secdma; + bool pkt_fate; +#ifdef DEVICE_TX_STUCK_DETECT + flow_ring_node_t *flow_ring_node; + uint16 flowid; +#endif /* DEVICE_TX_STUCK_DETECT */ + + + txstatus = (host_txbuf_cmpl_t *)msg; +#ifdef DEVICE_TX_STUCK_DETECT + flowid = txstatus->compl_hdr.flow_ring_id; + flow_ring_node = DHD_FLOW_RING(dhd, flowid); + /** + * Since we got a completion message on this flowid, + * update tx_cmpl time stamp + */ + flow_ring_node->tx_cmpl = OSL_SYSUPTIME(); +#endif /* DEVICE_TX_STUCK_DETECT */ /* locks required to protect circular buffer accesses */ DHD_GENERAL_LOCK(dhd, flags); - - txstatus = (host_txbuf_cmpl_t *)msg; pktid = ltoh32(txstatus->cmn_hdr.request_id); + pkt_fate = TRUE; -#if defined(DHD_PKTID_AUDIT_RING) - if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid, - DHD_DUPLICATE_FREE) == BCME_ERROR) { - prhex("dhd_prot_txstatus_process:", - (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE); - } +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid, + DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE); #endif /* DHD_PKTID_AUDIT_RING */ DHD_INFO(("txstatus for pktid 0x%04x\n", pktid)); @@ -3795,22 +4937,22 @@ dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg) /* Release the Lock when no more tx packets are pending */ if (prot->active_tx_count == 0) - DHD_OS_WAKE_UNLOCK(dhd); - + DHD_TXFL_WAKE_UNLOCK(dhd); } else { DHD_ERROR(("Extra packets are freed\n")); } ASSERT(pktid != 0); - #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA) { int elem_ix; void **elem; bcm_workq_t *workq; + dmaaddr_t pa; + uint32 pa_len; - pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, - pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX); + pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, + pktid, pa, pa_len, dmah, secdma, PKTTYPE_DATA_TX); workq = &prot->tx_compl_prod; /* @@ -3823,7 +4965,7 @@ dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg) elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa); - DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len); + DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), pa_len); if (elem_ix == BCM_RING_FULL) { DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n")); @@ -3842,24 +4984,19 @@ dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg) } DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n", - __FUNCTION__, pkt, prot->tx_compl_prod_sync)); + __FUNCTION__, pkt, prot->tx_compl_prod_sync)); DHD_GENERAL_UNLOCK(dhd, flags); + return; - } + } workq_ring_full: #endif /* !DHD_LB_TXC */ - /* - * We can come here if no DHD_LB_TXC is enabled and in case where DHD_LB_TXC is - * defined but the tx_compl queue is full. - */ - if (pkt == NULL) { - pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, - pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX); - } + pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid, + pa, len, dmah, secdma, PKTTYPE_DATA_TX); if (pkt) { if (SECURE_DMA_ENAB(dhd->osh)) { @@ -3871,11 +5008,28 @@ workq_ring_full: SECURE_DMA_UNMAP(dhd->osh, (uint) pa, (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah, secdma, offset); - } else { + } else DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); +#ifdef DMAMAP_STATS + dhd->dma_stats.txdata--; + dhd->dma_stats.txdata_sz -= len; +#endif /* DMAMAP_STATS */ +#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING) + if (dhd->d11_tx_status) { + uint16 tx_status; + + tx_status = ltoh16(txstatus->compl_hdr.status) & + WLFC_CTL_PKTFLAG_MASK; + pkt_fate = (tx_status == WLFC_CTL_PKTFLAG_DISCARD) ? TRUE : FALSE; + + DHD_DBG_PKT_MON_TX_STATUS(dhd, pkt, pktid, tx_status); +#ifdef DHD_PKT_LOGGING + DHD_PKTLOG_TXS(dhd, pkt, pktid, tx_status); +#endif /* DHD_PKT_LOGGING */ } +#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */ #if defined(BCMPCIE) - dhd_txcomplete(dhd, pkt, true); + dhd_txcomplete(dhd, pkt, pkt_fate); #endif #if DHD_DBG_SHOW_METADATA @@ -3891,9 +5045,21 @@ workq_ring_full: dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len); } #endif /* DHD_DBG_SHOW_METADATA */ + DHD_GENERAL_UNLOCK(dhd, flags); PKTFREE(dhd->osh, pkt, TRUE); + DHD_GENERAL_LOCK(dhd, flags); DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id, txstatus->tx_status); + +#ifdef DHD_TIMESYNC + if (dhd->prot->tx_ts_log_enabled) { + ts_timestamp_t *ts = (ts_timestamp_t *)&(txstatus->ts); + dhd_timesync_log_tx_timestamp(dhd->ts, + txstatus->compl_hdr.flow_ring_id, + txstatus->cmn_hdr.if_id, + ts->low, ts->high); + } +#endif /* DHD_TIMESYNC */ } DHD_GENERAL_UNLOCK(dhd, flags); @@ -3917,12 +5083,9 @@ dhd_prot_event_process(dhd_pub_t *dhd, void *msg) evnt = (wlevent_req_msg_t *)msg; bufid = ltoh32(evnt->cmn_hdr.request_id); -#if defined(DHD_PKTID_AUDIT_RING) - if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, bufid, - DHD_DUPLICATE_FREE) == BCME_ERROR) { - prhex("dhd_prot_event_process:", - (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); - } +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid, + DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); #endif /* DHD_PKTID_AUDIT_RING */ buflen = ltoh16(evnt->event_data_len); @@ -3930,9 +5093,8 @@ dhd_prot_event_process(dhd_pub_t *dhd, void *msg) ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr); /* Post another rxbuf to the device */ - if (prot->cur_event_bufs_posted) { + if (prot->cur_event_bufs_posted) prot->cur_event_bufs_posted--; - } dhd_msgbuf_rxbuf_post_event_bufs(dhd); /* locks required to protect pktid_map */ @@ -3941,108 +5103,69 @@ dhd_prot_event_process(dhd_pub_t *dhd, void *msg) DHD_GENERAL_UNLOCK(dhd, flags); if (!pkt) { + DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid)); return; } /* DMA RX offset updated through shared area */ - if (dhd->prot->rx_dataoffset) { + if (dhd->prot->rx_dataoffset) PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); - } PKTSETLEN(dhd->osh, pkt, buflen); dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); } -/** called on MSG_TYPE_RX_CMPLT message received from dongle */ +/** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */ static void BCMFASTPATH -dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg) +dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf) { - host_rxbuf_cmpl_t *rxcmplt_h; - uint16 data_offset; /* offset at which data starts */ - void *pkt; - unsigned long flags; - uint ifidx; + info_buf_resp_t *resp; uint32 pktid; -#if defined(DHD_LB_RXC) - const bool free_pktid = FALSE; -#else - const bool free_pktid = TRUE; -#endif /* DHD_LB_RXC */ + uint16 buflen; + void * pkt; + unsigned long flags; - /* RXCMPLT HDR */ - rxcmplt_h = (host_rxbuf_cmpl_t *)msg; + resp = (info_buf_resp_t *)buf; + pktid = ltoh32(resp->cmn_hdr.request_id); + buflen = ltoh16(resp->info_data_len); - /* offset from which data starts is populated in rxstatus0 */ - data_offset = ltoh16(rxcmplt_h->data_offset); - - pktid = ltoh32(rxcmplt_h->cmn_hdr.request_id); - -#if defined(DHD_PKTID_AUDIT_RING) - if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid, - DHD_DUPLICATE_FREE) == BCME_ERROR) { - prhex("dhd_prot_rxcmplt_process:", - (uchar *)msg, D2HRING_RXCMPLT_ITEMSIZE); - } +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid, + DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE); #endif /* DHD_PKTID_AUDIT_RING */ - DHD_GENERAL_LOCK(dhd, flags); - pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_DATA_RX, free_pktid); - DHD_GENERAL_UNLOCK(dhd, flags); + DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n", + pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum), + dhd->prot->rx_dataoffset)); - if (!pkt) { + if (!dhd->prot->infobufpost) { + DHD_ERROR(("infobuf posted are zero, but there is a completion\n")); return; } - /* Post another set of rxbufs to the device */ - dhd_prot_return_rxbuf(dhd, pktid, 1); + dhd->prot->infobufpost--; + dhd_prot_infobufpost(dhd); - DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n", - ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len), - rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt), - ltoh16(rxcmplt_h->metadata_len))); -#if DHD_DBG_SHOW_METADATA - if (dhd->prot->metadata_dbg && - dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) { - uchar *ptr; - ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset); - /* header followed by data */ - bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len); - dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len); - } -#endif /* DHD_DBG_SHOW_METADATA */ + DHD_GENERAL_LOCK(dhd, flags); + pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE); + DHD_GENERAL_UNLOCK(dhd, flags); - if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) { - DHD_INFO(("D11 frame rxed \n")); - } + if (!pkt) + return; - /* data_offset from buf start */ - if (data_offset) { - /* data offset given from dongle after split rx */ - PKTPULL(dhd->osh, pkt, data_offset); /* data offset */ - } else { - /* DMA RX offset updated through shared area */ - if (dhd->prot->rx_dataoffset) { - PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); - } - } - /* Actual length of the packet */ - PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len)); + /* DMA RX offset updated through shared area */ + if (dhd->prot->rx_dataoffset) + PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); - ifidx = rxcmplt_h->cmn_hdr.if_id; + PKTSETLEN(dhd->osh, pkt, buflen); -#if defined(DHD_LB_RXP) - dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx); -#else /* ! DHD_LB_RXP */ -#ifdef DHD_RX_CHAINING - /* Chain the packets */ - dhd_rxchain_frame(dhd, pkt, ifidx); -#else /* ! DHD_RX_CHAINING */ - /* offset from which data starts is populated in rxstatus0 */ - dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); -#endif /* ! DHD_RX_CHAINING */ -#endif /* ! DHD_LB_RXP */ -} /* dhd_prot_rxcmplt_process */ + /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a + * special ifidx of -1. This is just internal to dhd to get the data to + * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process). + */ + dhd_bus_rx_frame(dhd->bus, pkt, DHD_EVENT_IF /* ifidx HACK */, 1); +} /** Stop protocol: sync w/dongle state. */ void dhd_prot_stop(dhd_pub_t *dhd) @@ -4097,20 +5220,24 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) } flowid = DHD_PKT_GET_FLOWID(PKTBUF); - flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; ring = (msgbuf_ring_t *)flow_ring_node->prot_info; +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ DHD_GENERAL_LOCK(dhd, flags); /* Create a unique 32-bit packet id */ - pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_map_handle, PKTBUF); + pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map, + PKTBUF, PKTTYPE_DATA_TX); #if defined(DHD_PCIE_PKTID) if (pktid == DHD_PKTID_INVALID) { - DHD_ERROR(("Pktid pool depleted.\n")); + DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__)); /* * If we return error here, the caller would queue the packet * again. So we'll just free the skb allocated in DMA Zone. @@ -4125,18 +5252,19 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) txdesc = (host_txbuf_post_t *) dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); if (txdesc == NULL) { -#if defined(DHD_PCIE_PKTID) - void *dmah; - void *secdma; - /* Free up the PKTID. physaddr and pktlen will be garbage. */ - DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, - pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK); -#endif /* DHD_PCIE_PKTID */ DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n", __FUNCTION__, __LINE__, prot->active_tx_count)); - goto err_no_res_pktfree; + goto err_free_pktid; } +#ifdef DBG_PKT_MON + DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid); +#endif /* DBG_PKT_MON */ +#ifdef DHD_PKT_LOGGING + DHD_PKTLOG_TX(dhd, PKTBUF, pktid); +#endif /* DHD_PKT_LOGGING */ + + /* Extract the data pointer and length information */ pktdata = PKTDATA(dhd->osh, PKTBUF); pktlen = PKTLEN(dhd->osh, PKTBUF); @@ -4153,29 +5281,35 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) int offset = 0; BCM_REFERENCE(offset); - if (prot->tx_metadata_offset) { + if (prot->tx_metadata_offset) offset = prot->tx_metadata_offset + ETHER_HDR_LEN; - } pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset); - } else { + } +#ifndef BCM_SECURE_DMA + else pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0); - } +#endif /* #ifndef BCM_SECURE_DMA */ - if ((PHYSADDRHI(pa) == 0) && (PHYSADDRLO(pa) == 0)) { - DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n")); + if (PHYSADDRISZERO(pa)) { + DHD_ERROR(("%s: Something really bad, unless 0 is " + "a valid phyaddr for pa\n", __FUNCTION__)); ASSERT(0); + goto err_rollback_idx; } +#ifdef DMAMAP_STATS + dhd->dma_stats.txdata++; + dhd->dma_stats.txdata_sz += pktlen; +#endif /* DMAMAP_STATS */ /* No need to lock. Save the rest of the packet's metadata */ - DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, PKTBUF, pktid, + DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid, pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX); #ifdef TXP_FLUSH_NITEMS - if (ring->pend_items_count == 0) { + if (ring->pend_items_count == 0) ring->start_addr = (void *)txdesc; - } ring->pend_items_count++; #endif @@ -4184,6 +5318,7 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) /* Common message hdr */ txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST; txdesc->cmn_hdr.if_id = ifidx; + txdesc->cmn_hdr.flags = ring->current_phase; txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3; prio = (uint8)PKTPRIO(PKTBUF); @@ -4201,10 +5336,9 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) /* Handle Tx metadata */ headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF); - if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) { + if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) DHD_ERROR(("No headroom for Metadata tx %d %d\n", prot->tx_metadata_offset, headroom)); - } if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) { DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset)); @@ -4216,14 +5350,41 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF), prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF, 0, ring->dma_buf.secdma); - } else { + } +#ifndef BCM_SECURE_DMA + else meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), prot->tx_metadata_offset, DMA_RX, PKTBUF, 0); - } +#endif /* #ifndef BCM_SECURE_DMA */ if (PHYSADDRISZERO(meta_pa)) { - DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n")); + /* Unmap the data pointer to a DMA-able address */ + if (SECURE_DMA_ENAB(dhd->osh)) { + + int offset = 0; + BCM_REFERENCE(offset); + + if (prot->tx_metadata_offset) { + offset = prot->tx_metadata_offset + ETHER_HDR_LEN; + } + + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, + DMA_TX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, offset); + } +#ifndef BCM_SECURE_DMA + else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL); + } +#endif /* #ifndef BCM_SECURE_DMA */ +#ifdef TXP_FLUSH_NITEMS + /* update pend_items_count */ + ring->pend_items_count--; +#endif /* TXP_FLUSH_NITEMS */ + + DHD_ERROR(("%s: Something really bad, unless 0 is " + "a valid phyaddr for meta_pa\n", __FUNCTION__)); ASSERT(0); + goto err_rollback_idx; } /* Adjust the data pointer back to original value */ @@ -4238,9 +5399,8 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) txdesc->metadata_buf_addr.low_addr = 0; } -#if defined(DHD_PKTID_AUDIT_RING) - DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, - DHD_DUPLICATE_ALLOC); +#ifdef DHD_PKTID_AUDIT_RING + DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC); #endif /* DHD_PKTID_AUDIT_RING */ txdesc->cmn_hdr.request_id = htol32(pktid); @@ -4267,18 +5427,49 @@ dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) * Take a wake lock, do not sleep if we have atleast one packet * to finish. */ - if (prot->active_tx_count == 1) - DHD_OS_WAKE_LOCK(dhd); + if (prot->active_tx_count >= 1) + DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_OK; +err_rollback_idx: + /* roll back write pointer for unprocessed message */ + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + ring->wr--; + if (ring->wr == 0) { + DHD_INFO(("%s: flipping the phase now\n", ring->name)); + ring->current_phase = ring->current_phase ? + 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + } + +err_free_pktid: +#if defined(DHD_PCIE_PKTID) + { + void *dmah; + void *secdma; + /* Free up the PKTID. physaddr and pktlen will be garbage. */ + DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid, + pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK); + } + err_no_res_pktfree: +#endif /* DHD_PCIE_PKTID */ DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return BCME_NORESOURCE; } /* dhd_prot_txdata */ @@ -4363,19 +5554,18 @@ dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt) #endif /* DHD_LB_RXC */ - if (prot->rxbufpost >= rxcnt) { - prot->rxbufpost -= rxcnt; + prot->rxbufpost -= (uint16)rxcnt; } else { /* ASSERT(0); */ prot->rxbufpost = 0; } #if !defined(DHD_LB_RXC) - if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) { + if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ - } #endif /* !DHD_LB_RXC */ + return; } /* called before an ioctl is sent to the dongle */ @@ -4404,7 +5594,8 @@ int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int uint8 action; if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { - DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + DHD_ERROR(("%s : bus is down. we have nothing to do - bs: %d, has: %d\n", + __FUNCTION__, dhd->busstate, dhd->hang_was_sent)); goto done; } @@ -4416,14 +5607,13 @@ int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int DHD_TRACE(("%s: Enter\n", __FUNCTION__)); if (ioc->cmd == WLC_SET_PM) { - DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf)); + DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0)); } ASSERT(len <= WLC_IOCTL_MAXLEN); - if (len > WLC_IOCTL_MAXLEN) { + if (len > WLC_IOCTL_MAXLEN) goto done; - } action = ioc->set; @@ -4433,16 +5623,15 @@ int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); } else { ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); - if (ret > 0) { + if (ret > 0) ioc->used = ret; - } } /* Too many programs assume ioctl() returns 0 on success */ if (ret >= 0) { ret = 0; } else { - DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret)); + DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret)); dhd->dongle_error = ret; } @@ -4452,9 +5641,8 @@ int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int int slen, val = 0; slen = strlen("wme_dp") + 1; - if (len >= (int)(slen + sizeof(int))) { + if (len >= (int)(slen + sizeof(int))) bcopy(((char *)buf + slen), &val, sizeof(int)); - } dhd->wme_dp = (uint8) ltoh32(val); } @@ -4483,6 +5671,11 @@ dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len) msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN); msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE); +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + DHD_GENERAL_LOCK(dhd, flags); ioct_rqst = (ioct_reqst_hdr_t *) @@ -4490,6 +5683,9 @@ dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len) if (ioct_rqst == NULL) { DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return 0; } @@ -4509,12 +5705,16 @@ dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len) ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK; ioct_rqst->msg.if_id = 0; + ioct_rqst->msg.flags = ring->current_phase; bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen); /* update ring's WR index and ring doorbell to dongle */ dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return 0; } @@ -4522,22 +5722,62 @@ dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len) /** test / loopback */ void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer) { - if (dmaxfer == NULL) { + if (dmaxfer == NULL) return; - } dhd_dma_buf_free(dhd, &dmaxfer->srcmem); dhd_dma_buf_free(dhd, &dmaxfer->dstmem); } +/** test / loopback */ +int +dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp) +{ + dhd_prot_t *prot = dhdp->prot; + dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer; + dmaxref_mem_map_t *dmap = NULL; + + dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t)); + if (!dmap) { + DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__)); + goto mem_alloc_fail; + } + dmap->srcmem = &(dmaxfer->srcmem); + dmap->dstmem = &(dmaxfer->dstmem); + + DMAXFER_FREE(dhdp, dmap); + return BCME_OK; + +mem_alloc_fail: + if (dmap) { + MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t)); + dmap = NULL; + } + return BCME_NOMEM; +} /* dhd_prepare_schedule_dmaxfer_free */ + + +/** test / loopback */ +void +dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap) +{ + + dhd_dma_buf_free(dhdp, dmmap->srcmem); + dhd_dma_buf_free(dhdp, dmmap->dstmem); + + MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t)); + dmmap = NULL; + +} /* dmaxfer_free_prev_dmaaddr */ + + /** test / loopback */ int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer) { uint i; - if (!dmaxfer) { + if (!dmaxfer) return BCME_ERROR; - } /* First free up existing buffers */ dmaxfer_free_dmaaddr(dhd, dmaxfer); @@ -4569,20 +5809,35 @@ static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg) { dhd_prot_t *prot = dhd->prot; + uint64 end_usec; + pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg; + BCM_REFERENCE(cmplt); + DHD_INFO(("DMA status: %d\n", cmplt->compl_hdr.status)); OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len); if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) { if (memcmp(prot->dmaxfer.srcmem.va, prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) { - bcm_print_bytes("XFER SRC: ", + prhex("XFER SRC: ", prot->dmaxfer.srcmem.va, prot->dmaxfer.len); - bcm_print_bytes("XFER DST: ", + prhex("XFER DST: ", prot->dmaxfer.dstmem.va, prot->dmaxfer.len); - } else { - DHD_INFO(("DMA successful\n")); + DHD_ERROR(("DMA failed\n")); + } + else { + if (prot->dmaxfer.d11_lpbk) { + DHD_ERROR(("DMA successful with d11 loopback\n")); + } else { + DHD_ERROR(("DMA successful without d11 loopback\n")); + } } } - dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); + end_usec = OSL_SYSUPTIME_US(); + dhd_prepare_schedule_dmaxfer_free(dhd); + end_usec -= prot->dmaxfer.start_usec; + DHD_ERROR(("DMA loopback %d bytes in %llu usec, %u kBps\n", + prot->dmaxfer.len, end_usec, + (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)(end_usec + 1)))); dhd->prot->dmaxfer.in_progress = FALSE; } @@ -4592,7 +5847,7 @@ dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg) * by a spinlock. */ int -dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay) +dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay, uint d11_lpbk) { unsigned long flags; int ret = BCME_OK; @@ -4614,6 +5869,11 @@ dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay) return ret; } +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + DHD_GENERAL_LOCK(dhd, flags); dmap = (pcie_dma_xfer_params_t *) @@ -4623,6 +5883,9 @@ dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay) dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); prot->dmaxfer.in_progress = FALSE; DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return BCME_NOMEM; } @@ -4630,6 +5893,7 @@ dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay) dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER; dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID); dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + dmap->cmn_hdr.flags = ring->current_phase; ring->seqnum++; dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa)); @@ -4639,12 +5903,19 @@ dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay) dmap->xfer_len = htol32(prot->dmaxfer.len); dmap->srcdelay = htol32(prot->dmaxfer.srcdelay); dmap->destdelay = htol32(prot->dmaxfer.destdelay); + prot->dmaxfer.d11_lpbk = d11_lpbk ? 1 : 0; + dmap->flags = (prot->dmaxfer.d11_lpbk << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT) + & PCIE_DMA_XFER_FLG_D11_LPBK_MASK; /* update ring's WR index and ring doorbell to dongle */ + prot->dmaxfer.start_usec = OSL_SYSUPTIME_US(); dhd_prot_ring_write_complete(dhd, ring, dmap, 1); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif - DHD_ERROR(("DMA Started...\n")); + DHD_INFO(("DMA Started...\n")); return BCME_OK; } /* dhdmsgbuf_dmaxfer_req */ @@ -4654,28 +5925,75 @@ static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) { int ret = 0; + uint copylen = 0; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - /* Respond "bcmerror" and "bcmerrorstr" with local cache */ if (cmd == WLC_GET_VAR && buf) { - if (!strcmp((char *)buf, "bcmerrorstr")) - { - strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN); + if (!len || !*(uint8 *)buf) { + DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__)); + ret = BCME_BADARG; goto done; } - else if (!strcmp((char *)buf, "bcmerror")) - { - *(int *)buf = dhd->dongle_error; + + /* Respond "bcmerror" and "bcmerrorstr" with local cache */ + copylen = MIN(len, BCME_STRLEN); + + if ((len >= strlen("bcmerrorstr")) && + (!strcmp((char *)buf, "bcmerrorstr"))) { + + strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen); + *(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0'; + + goto done; + } else if ((len >= strlen("bcmerror")) && + !strcmp((char *)buf, "bcmerror")) { + + *(uint32 *)(uint32 *)buf = dhd->dongle_error; + goto done; } } - ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n", - action, ifidx, cmd, len)); + action, ifidx, cmd, len)); +#ifdef REPORT_FATAL_TIMEOUTS + /* + * These timers "should" be started before sending H2D interrupt. + * Think of the scenario where H2D interrupt is fired and the Dongle + * responds back immediately. From the DPC we would stop the cmd, bus + * timers. But the process context could have switched out leading to + * a situation where the timers are Not started yet, but are actually stopped. + * + * Disable preemption from the time we start the timer until we are done + * with seding H2D interrupts. + */ + OSL_DISABLE_PREEMPTION(dhd->osh); + dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd); + dhd_start_cmd_timer(dhd); + dhd_start_bus_timer(dhd); +#endif /* REPORT_FATAL_TIMEOUTS */ + + ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); + +#ifdef REPORT_FATAL_TIMEOUTS + /* For some reason if we fail to ring door bell, stop the timers */ + if (ret < 0) { + DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); + dhd_stop_cmd_timer(dhd); + dhd_stop_bus_timer(dhd); + OSL_ENABLE_PREEMPTION(dhd->osh); + goto done; + } + OSL_ENABLE_PREEMPTION(dhd->osh); +#else + if (ret < 0) { + DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); + goto done; + } +#endif /* REPORT_FATAL_TIMEOUTS */ /* wait for IOCTL completion message from dongle and get first fragment */ ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); @@ -4698,52 +6016,107 @@ dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf) DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - if (dhd->dongle_reset) { + if (dhd_query_bus_erros(dhd)) { ret = -EIO; goto out; } - if (prot->cur_ioctlresp_bufs_posted) { - prot->cur_ioctlresp_bufs_posted--; + timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received); + +#ifdef DHD_RECOVER_TIMEOUT + if (prot->ioctl_received == 0) { + uint32 intstatus = 0; + uint32 intmask = 0; + intstatus = si_corereg(dhd->bus->sih, + dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + intmask = si_corereg(dhd->bus->sih, + dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); + if ((intstatus) && (!intmask) && (timeleft == 0) && (!dhd_query_bus_erros(dhd))) + { + DHD_ERROR(("%s: iovar timeout trying again intstatus=%x intmask=%x\n", + __FUNCTION__, intstatus, intmask)); + DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters\r\n")); + DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n" + "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n" + "dpc_return_busdown_count=%lu\n", + dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count, + dhd->bus->isr_intr_disable_count, + dhd->bus->suspend_intr_disable_count, + dhd->bus->dpc_return_busdown_count)); + + dhd_prot_process_ctrlbuf(dhd); + + timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received); + /* Enable Back Interrupts using IntMask */ + dhdpcie_bus_intr_enable(dhd->bus); + } } +#endif /* DHD_RECOVER_TIMEOUT */ - dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); + if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) { + uint32 intstatus; - timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received); - if (timeleft == 0) { dhd->rxcnt_timeout++; dhd->rx_ctlerrs++; + dhd->iovar_timeout_occured = TRUE; DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d " "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id, prot->ioctl_state, dhd->busstate, prot->ioctl_received)); + if (prot->curr_ioctl_cmd == WLC_SET_VAR || + prot->curr_ioctl_cmd == WLC_GET_VAR) { + char iovbuf[32]; + int i; + int dump_size = 128; + uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va; + memset(iovbuf, 0, sizeof(iovbuf)); + strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1); + iovbuf[sizeof(iovbuf) - 1] = '\0'; + DHD_ERROR(("Current IOVAR (%s): %s\n", + prot->curr_ioctl_cmd == WLC_SET_VAR ? + "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf)); + DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n")); + for (i = 0; i < dump_size; i++) { + DHD_ERROR(("%02X ", ioctl_buf[i])); + if ((i % 32) == 31) { + DHD_ERROR(("\n")); + } + } + DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n")); + } + /* Check the PCIe link status by reading intstatus register */ + intstatus = si_corereg(dhd->bus->sih, + dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + if (intstatus == (uint32)-1) { + DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__)); + dhd->bus->is_linkdown = TRUE; + } + + dhd_bus_dump_console_buffer(dhd->bus); dhd_prot_debug_info_print(dhd); #ifdef DHD_FW_COREDUMP - /* As soon as FW TRAP occurs, FW dump will be collected from dhdpcie_checkdied */ - if (dhd->memdump_enabled && !dhd->dongle_trap_occured) { + /* Collect socram dump */ + if (dhd->memdump_enabled) { /* collect core dump */ dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT; dhd_bus_mem_dump(dhd); } #endif /* DHD_FW_COREDUMP */ - if (dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) { #ifdef SUPPORT_LINKDOWN_RECOVERY #ifdef CONFIG_ARCH_MSM - dhd->bus->no_cfg_restore = 1; + dhd->bus->no_cfg_restore = 1; #endif /* CONFIG_ARCH_MSM */ #endif /* SUPPORT_LINKDOWN_RECOVERY */ - DHD_ERROR(("%s: timeout > MAX_CNTL_TX_TIMEOUT\n", __FUNCTION__)); - } ret = -ETIMEDOUT; goto out; } else { if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) { DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n", __FUNCTION__, prot->ioctl_received)); - ret = -ECONNABORTED; + ret = -EINVAL; goto out; } dhd->rxcnt_timeout = 0; @@ -4752,25 +6125,13 @@ dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf) __FUNCTION__, prot->ioctl_resplen)); } - if (dhd->dongle_trap_occured) { -#ifdef SUPPORT_LINKDOWN_RECOVERY -#ifdef CONFIG_ARCH_MSM - dhd->bus->no_cfg_restore = 1; -#endif /* CONFIG_ARCH_MSM */ -#endif /* SUPPORT_LINKDOWN_RECOVERY */ - DHD_ERROR(("%s: TRAP occurred!!\n", __FUNCTION__)); - ret = -EREMOTEIO; - goto out; - } - - if (dhd->prot->ioctl_resplen > len) { + if (dhd->prot->ioctl_resplen > len) dhd->prot->ioctl_resplen = (uint16)len; - } - if (buf) { + if (buf) bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen); - } ret = (int)(dhd->prot->ioctl_status); + out: DHD_GENERAL_LOCK(dhd, flags); dhd->prot->ioctl_state = 0; @@ -4801,14 +6162,50 @@ dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, u return -EIO; } - /* Fill up msgbuf for ioctl req */ - ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); - DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n", action, ifidx, cmd, len)); +#ifdef REPORT_FATAL_TIMEOUTS + /* + * These timers "should" be started before sending H2D interrupt. + * Think of the scenario where H2D interrupt is fired and the Dongle + * responds back immediately. From the DPC we would stop the cmd, bus + * timers. But the process context could have switched out leading to + * a situation where the timers are Not started yet, but are actually stopped. + * + * Disable preemption from the time we start the timer until we are done + * with seding H2D interrupts. + */ + OSL_DISABLE_PREEMPTION(dhd->osh); + dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd); + dhd_start_cmd_timer(dhd); + dhd_start_bus_timer(dhd); +#endif /* REPORT_FATAL_TIMEOUTS */ + + /* Fill up msgbuf for ioctl req */ + ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); + +#ifdef REPORT_FATAL_TIMEOUTS + /* For some reason if we fail to ring door bell, stop the timers */ + if (ret < 0) { + DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); + dhd_stop_cmd_timer(dhd); + dhd_stop_bus_timer(dhd); + OSL_ENABLE_PREEMPTION(dhd->osh); + goto done; + } + + OSL_ENABLE_PREEMPTION(dhd->osh); +#else + if (ret < 0) { + DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); + goto done; + } +#endif /* REPORT_FATAL_TIMEOUTS */ + ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); +done: return ret; } @@ -4820,7 +6217,7 @@ int dhd_prot_ctl_complete(dhd_pub_t *dhd) /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */ int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name, - void *params, int plen, void *arg, int len, bool set) + void *params, int plen, void *arg, int len, bool set) { return BCME_UNSUPPORTED; } @@ -4829,7 +6226,6 @@ int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name, void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) { -#if defined(PCIE_D2H_SYNC) if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) bcm_bprintf(b, "\nd2h_sync: SEQNUM:"); else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) @@ -4838,12 +6234,13 @@ void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) bcm_bprintf(b, "\nd2h_sync: NONE:"); bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n", dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot); -#endif /* PCIE_D2H_SYNC */ bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n", - DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support), - DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support), + dhd->dma_h2d_ring_upd_support, + dhd->dma_d2h_ring_upd_support, dhd->prot->rw_index_sz); + bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n", + h2d_max_txpost, dhd->prot->h2d_max_txpost); } /* Update local copy of dongle statistics */ @@ -4870,6 +6267,11 @@ dhd_post_dummy_msg(dhd_pub_t *dhd) dhd_prot_t *prot = dhd->prot; msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + DHD_GENERAL_LOCK(dhd, flags); hevent = (hostevent_hdr_t *) @@ -4877,6 +6279,9 @@ dhd_post_dummy_msg(dhd_pub_t *dhd) if (hevent == NULL) { DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return -1; } @@ -4885,6 +6290,7 @@ dhd_post_dummy_msg(dhd_pub_t *dhd) ring->seqnum++; hevent->msg.msg_type = MSG_TYPE_HOST_EVNT; hevent->msg.if_id = 0; + hevent->msg.flags = ring->current_phase; /* Event payload */ hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD); @@ -4894,6 +6300,9 @@ dhd_post_dummy_msg(dhd_pub_t *dhd) */ dhd_prot_ring_write_complete(dhd, ring, hevent, 1); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return 0; } @@ -4913,10 +6322,18 @@ dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, if (ret_buf == NULL) { /* if alloc failed , invalidate cached read ptr */ - if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + if (dhd->dma_d2h_ring_upd_support) { ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); } else { dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx); +#ifdef SUPPORT_LINKDOWN_RECOVERY + /* Check if ring->rd is valid */ + if (ring->rd >= ring->max_items) { + dhd->bus->read_shm_fail = TRUE; + DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd)); + return NULL; + } +#endif /* SUPPORT_LINKDOWN_RECOVERY */ } /* Try allocating once more */ @@ -4928,6 +6345,11 @@ dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, } } + if (ret_buf == HOST_RING_BASE(ring)) { + DHD_INFO(("%s: setting the phase now\n", ring->name)); + ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; + } + /* Return alloced space */ return ret_buf; } @@ -4949,6 +6371,10 @@ dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx uint16 alloced = 0; msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + if (dhd_query_bus_erros(dhd)) { + return -EIO; + } + rqstlen = len; resplen = len; @@ -4958,11 +6384,19 @@ dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx /* so making the assumption that input length can never be more than 1.5k */ rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE); +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + DHD_GENERAL_LOCK(dhd, flags); if (prot->ioctl_state) { DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state)); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return BCME_BUSY; } else { prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING; @@ -4977,13 +6411,16 @@ dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx prot->curr_ioctl_cmd = 0; prot->ioctl_received = IOCTL_WAIT; DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return -1; } /* Common msg buf hdr */ ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ; ioct_rqst->cmn_hdr.if_id = (uint8)ifidx; - ioct_rqst->cmn_hdr.flags = 0; + ioct_rqst->cmn_hdr.flags = ring->current_phase; ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID); ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; ring->seqnum++; @@ -5001,15 +6438,13 @@ dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx /* copy ioct payload */ ioct_buf = (void *) prot->ioctbuf.va; - if (buf) { + if (buf) memcpy(ioct_buf, buf, len); - } OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len); - if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) { + if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) DHD_ERROR(("host ioct address unaligned !!!!! \n")); - } DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n", ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len, @@ -5018,6 +6453,9 @@ dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx /* update ring's WR index and ring doorbell to dongle */ dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return 0; } /* dhd_fillup_ioct_reqst */ @@ -5041,6 +6479,7 @@ dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name, int dma_buf_alloced = BCME_NOMEM; uint32 dma_buf_len = max_items * item_len; dhd_prot_t *prot = dhd->prot; + uint16 max_flowrings = dhd->bus->max_tx_flowrings; ASSERT(ring); ASSERT(name); @@ -5056,7 +6495,7 @@ dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name, ring->item_len = item_len; /* A contiguous space may be reserved for all flowrings */ - if (DHD_IS_FLOWRING(ringid) && (prot->flowrings_dma_buf.va)) { + if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) { /* Carve out from the contiguous DMA-able flowring buffer */ uint16 flowid; uint32 base_offset; @@ -5169,6 +6608,8 @@ dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring) ring->rd = ring->wr = 0; ring->curr_rd = 0; + ring->inited = FALSE; + ring->create_pending = FALSE; } @@ -5180,6 +6621,7 @@ static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring) { dhd_prot_t *prot = dhd->prot; + uint16 max_flowrings = dhd->bus->max_tx_flowrings; ASSERT(ring); ring->inited = FALSE; @@ -5187,18 +6629,18 @@ dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring) #ifdef BCM_SECURE_DMA if (SECURE_DMA_ENAB(prot->osh)) { - SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma); if (ring->dma_buf.secdma) { + SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma); MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t)); + ring->dma_buf.secdma = NULL; } - ring->dma_buf.secdma = NULL; } #endif /* BCM_SECURE_DMA */ /* If the DMA-able buffer was carved out of a pre-reserved contiguous * memory, then simply stop using it. */ - if (DHD_IS_FLOWRING(ring->idx) && (prot->flowrings_dma_buf.va)) { + if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) { (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t)); } else { @@ -5250,24 +6692,31 @@ dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring) * +---------------------------------------------------------------------------- */ -/* Fetch number of H2D flowrings given the total number of h2d rings */ -#define DHD_FLOWRINGS_POOL_TOTAL(h2d_rings_total) \ - ((h2d_rings_total) - BCMPCIE_H2D_COMMON_MSGRINGS) - /* Conversion of a flowid to a flowring pool index */ #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \ ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS) /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */ #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \ - (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + DHD_FLOWRINGS_POOL_OFFSET(flowid) + (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \ + DHD_FLOWRINGS_POOL_OFFSET(flowid) /* Traverse each flowring in the flowring pool, assigning ring and flowid */ -#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) \ +#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \ for ((flowid) = DHD_FLOWRING_START_FLOWID, \ - (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \ - (flowid) < (prot)->h2d_rings_total; \ - (flowid)++, (ring)++) + (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \ + (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \ + (ring)++, (flowid)++) + +/* Fetch number of H2D flowrings given the total number of h2d rings */ +static uint16 +dhd_get_max_flow_rings(dhd_pub_t *dhd) +{ + if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) + return dhd->bus->max_tx_flowrings; + else + return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS); +} /** * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t. @@ -5295,9 +6744,8 @@ dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd) dhd_prot_t *prot = dhd->prot; char ring_name[RING_NAME_MAX_LENGTH]; - if (prot->h2d_flowrings_pool != NULL) { + if (prot->h2d_flowrings_pool != NULL) return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ - } ASSERT(prot->h2d_rings_total == 0); @@ -5311,7 +6759,7 @@ dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd) } /* Subtract number of H2D common rings, to determine number of flowrings */ - h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total); + h2d_flowrings_total = dhd_get_max_flow_rings(dhd); DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total)); @@ -5326,11 +6774,10 @@ dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd) } /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */ - FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) { + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid); - ring_name[RING_NAME_MAX_LENGTH - 1] = '\0'; if (dhd_prot_ring_attach(dhd, ring, ring_name, - H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE, + prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE, DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) { goto attach_fail; } @@ -5366,7 +6813,7 @@ fail: static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd) { - uint16 flowid; + uint16 flowid, h2d_flowrings_total; msgbuf_ring_t *ring; dhd_prot_t *prot = dhd->prot; @@ -5374,9 +6821,9 @@ dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd) ASSERT(prot->h2d_rings_total == 0); return; } - + h2d_flowrings_total = dhd_get_max_flow_rings(dhd); /* Reset each flowring in the flowring pool */ - FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) { + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { dhd_prot_ring_reset(dhd, ring); ring->inited = FALSE; } @@ -5396,7 +6843,7 @@ dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd) { int flowid; msgbuf_ring_t *ring; - int h2d_flowrings_total; /* exclude H2D common rings */ + uint16 h2d_flowrings_total; /* exclude H2D common rings */ dhd_prot_t *prot = dhd->prot; if (prot->h2d_flowrings_pool == NULL) { @@ -5404,12 +6851,12 @@ dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd) return; } + h2d_flowrings_total = dhd_get_max_flow_rings(dhd); /* Detach the DMA-able buffer for each flowring in the flowring pool */ - FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) { + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { dhd_prot_ring_detach(dhd, ring); } - h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total); MFREE(prot->osh, prot->h2d_flowrings_pool, (h2d_flowrings_total * sizeof(msgbuf_ring_t))); @@ -5448,7 +6895,11 @@ dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid) ring->rd = 0; ring->curr_rd = 0; ring->inited = TRUE; - + /** + * Every time a flowring starts dynamically, initialize current_phase with 0 + * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT + */ + ring->current_phase = 0; return ring; } @@ -5510,11 +6961,11 @@ dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced, ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len); /* Update write index */ - if ((ring->wr + *alloced) == ring->max_items) { + if ((ring->wr + *alloced) == ring->max_items) ring->wr = 0; - } else if ((ring->wr + *alloced) < ring->max_items) { + else if ((ring->wr + *alloced) < ring->max_items) ring->wr += *alloced; - } else { + else { /* Should never hit this */ ASSERT(0); return NULL; @@ -5530,29 +6981,48 @@ dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced, * posting the new WR index. The new WR index will be updated in the DMA index * array or directly in the dongle's ring state memory. * A PCIE doorbell will be generated to wake up the dongle. + * This is a non-atomic function, make sure the callers + * always hold appropriate locks. */ static void BCMFASTPATH dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 nitems) { dhd_prot_t *prot = dhd->prot; + uint8 db_index; + uint16 max_flowrings = dhd->bus->max_tx_flowrings; /* cache flush */ OSL_CACHE_FLUSH(p, ring->item_len * nitems); - if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { - dhd_prot_dma_indx_set(dhd, ring->wr, - H2D_DMA_INDX_WR_UPD, ring->idx); - } else { + if (IDMA_DS_ACTIVE(dhd) && IDMA_ACTIVE(dhd)) { dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), sizeof(uint16), RING_WR_UPD, ring->idx); + } else if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { + dhd_prot_dma_indx_set(dhd, ring->wr, + H2D_DMA_INDX_WR_UPD, ring->idx); + } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) { + dhd_prot_dma_indx_set(dhd, ring->wr, + H2D_IFRM_INDX_WR_UPD, ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), + sizeof(uint16), RING_WR_UPD, ring->idx); } /* raise h2d interrupt */ - prot->mb_ring_fn(dhd->bus, ring->wr); + if (IDMA_ACTIVE(dhd) || + (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) { + if (IDMA_DS_ACTIVE(dhd)) { + prot->mb_ring_fn(dhd->bus, ring->wr); + } else { + db_index = IDMA_IDX0; + prot->mb_2_ring_fn(dhd->bus, db_index, TRUE); + } + } else { + prot->mb_ring_fn(dhd->bus, ring->wr); + } } - /** * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages * from a D2H ring. The new RD index will be updated in the DMA Index array or @@ -5561,32 +7031,183 @@ dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) { + dhd_prot_t *prot = dhd->prot; + uint8 db_index; + /* update read index */ /* If dma'ing h2d indices supported * update the r -indices in the * host memory o/w in TCM */ - if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + if (IDMA_ACTIVE(dhd)) { dhd_prot_dma_indx_set(dhd, ring->rd, - D2H_DMA_INDX_RD_UPD, ring->idx); + D2H_DMA_INDX_RD_UPD, ring->idx); + if (IDMA_DS_ACTIVE(dhd)) { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), + sizeof(uint16), RING_RD_UPD, ring->idx); + } else { + db_index = IDMA_IDX1; + prot->mb_2_ring_fn(dhd->bus, db_index, FALSE); + } + } else if (dhd->dma_h2d_ring_upd_support) { + dhd_prot_dma_indx_set(dhd, ring->rd, + D2H_DMA_INDX_RD_UPD, ring->idx); } else { dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), sizeof(uint16), RING_RD_UPD, ring->idx); } } +static int +dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create) +{ + unsigned long flags; + d2h_ring_create_req_t *d2h_ring; + uint16 alloced = 0; + int ret = BCME_OK; + uint16 max_h2d_rings = dhd->bus->max_submission_rings; + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + DHD_GENERAL_LOCK(dhd, flags); + + DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__)); + + if (ring_to_create == NULL) { + DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__)); + ret = BCME_ERROR; + goto err; + } + + /* Request for ring buffer space */ + d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd, + &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, + &alloced, FALSE); + + if (d2h_ring == NULL) { + DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n", + __FUNCTION__)); + ret = BCME_NOMEM; + goto err; + } + ring_to_create->create_req_id = DHD_D2H_DBGRING_REQ_PKTID; + ring_to_create->create_pending = TRUE; + + /* Common msg buf hdr */ + d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE; + d2h_ring->msg.if_id = 0; + d2h_ring->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase; + d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id); + d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings)); + d2h_ring->ring_type = BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL; + d2h_ring->max_items = htol16(D2HRING_DYNAMIC_INFO_MAX_ITEM); + d2h_ring->len_item = htol16(D2HRING_INFO_BUFCMPLT_ITEMSIZE); + d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr; + d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr; + + d2h_ring->flags = 0; + d2h_ring->msg.epoch = + dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO; + dhd->prot->h2dring_ctrl_subn.seqnum++; + + /* Update the flow_ring's WRITE index */ + dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, d2h_ring, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); + +err: + DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return ret; +} + +static int +dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create) +{ + unsigned long flags; + h2d_ring_create_req_t *h2d_ring; + uint16 alloced = 0; + uint8 i = 0; + int ret = BCME_OK; + + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + DHD_GENERAL_LOCK(dhd, flags); + + DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__)); + + if (ring_to_create == NULL) { + DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__)); + ret = BCME_ERROR; + goto err; + } + + /* Request for ring buffer space */ + h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd, + &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, + &alloced, FALSE); + + if (h2d_ring == NULL) { + DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n", + __FUNCTION__)); + ret = BCME_NOMEM; + goto err; + } + ring_to_create->create_req_id = DHD_H2D_DBGRING_REQ_PKTID; + ring_to_create->create_pending = TRUE; + + /* Common msg buf hdr */ + h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE; + h2d_ring->msg.if_id = 0; + h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id); + h2d_ring->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase; + h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx)); + h2d_ring->ring_type = BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT; + h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM); + h2d_ring->n_completion_ids = ring_to_create->n_completion_ids; + h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE); + h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr; + h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr; + + for (i = 0; i < ring_to_create->n_completion_ids; i++) { + h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]); + } + + h2d_ring->flags = 0; + h2d_ring->msg.epoch = + dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO; + dhd->prot->h2dring_ctrl_subn.seqnum++; + + /* Update the flow_ring's WRITE index */ + dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, h2d_ring, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); + +err: + DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return ret; +} /** * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array. * Dongle will DMA the entire array (if DMA_INDX feature is enabled). * See dhd_prot_dma_indx_init() */ -static void +void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid) { uint8 *ptr; uint16 offset; dhd_prot_t *prot = dhd->prot; + uint16 max_h2d_rings = dhd->bus->max_submission_rings; switch (type) { case H2D_DMA_INDX_WR_UPD: @@ -5596,7 +7217,12 @@ dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringi case D2H_DMA_INDX_RD_UPD: ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); - offset = DHD_D2H_RING_OFFSET(ringid); + offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings); + break; + + case H2D_IFRM_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va); + offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid); break; default: @@ -5631,6 +7257,7 @@ dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid) uint16 data; uint16 offset; dhd_prot_t *prot = dhd->prot; + uint16 max_h2d_rings = dhd->bus->max_submission_rings; switch (type) { case H2D_DMA_INDX_WR_UPD: @@ -5645,12 +7272,12 @@ dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid) case D2H_DMA_INDX_WR_UPD: ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va); - offset = DHD_D2H_RING_OFFSET(ringid); + offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings); break; case D2H_DMA_INDX_RD_UPD: ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); - offset = DHD_D2H_RING_OFFSET(ringid); + offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings); break; default: @@ -5717,40 +7344,44 @@ dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 le switch (type) { case H2D_DMA_INDX_WR_BUF: dma_buf = &prot->h2d_dma_indx_wr_buf; - if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) goto ret_no_mem; - } DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n", dma_buf->len, rw_index_sz, length)); break; case H2D_DMA_INDX_RD_BUF: dma_buf = &prot->h2d_dma_indx_rd_buf; - if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) goto ret_no_mem; - } DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n", dma_buf->len, rw_index_sz, length)); break; case D2H_DMA_INDX_WR_BUF: dma_buf = &prot->d2h_dma_indx_wr_buf; - if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) goto ret_no_mem; - } DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n", dma_buf->len, rw_index_sz, length)); break; case D2H_DMA_INDX_RD_BUF: dma_buf = &prot->d2h_dma_indx_rd_buf; - if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) goto ret_no_mem; - } DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n", dma_buf->len, rw_index_sz, length)); break; + case H2D_IFRM_INDX_WR_BUF: + dma_buf = &prot->h2d_ifrm_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) + goto ret_no_mem; + DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + default: DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__)); return BCME_BADOPTION; @@ -5792,7 +7423,7 @@ dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_le ring->curr_rd = ring->rd; /* update write pointer */ - if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + if (dhd->dma_d2h_ring_upd_support) { /* DMAing write/read indices supported */ d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); ring->wr = d2h_wr; @@ -5806,11 +7437,8 @@ dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_le /* check for avail space, in number of ring items */ items = READ_AVAIL_SPACE(wr, rd, depth); - if (items == 0) { + if (items == 0) return NULL; - } - - ASSERT(items < ring->max_items); /* * Note that there are builds where Assert translates to just printk @@ -5818,16 +7446,32 @@ dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_le * dhd_prot_process_msgtype can get into an big loop if this * happens. */ - if (items >= ring->max_items) { + if (items > ring->max_items) { DHD_ERROR(("\r\n======================= \r\n")); DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n", __FUNCTION__, ring, ring->name, ring->max_items, items)); DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth)); - DHD_ERROR(("dhd->busstate %d bus->suspended %d bus->wait_for_d3_ack %d \r\n", - dhd->busstate, dhd->bus->suspended, dhd->bus->wait_for_d3_ack)); + DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n", + dhd->busstate, dhd->bus->wait_for_d3_ack)); DHD_ERROR(("\r\n======================= \r\n")); +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (wr >= ring->max_items) { + dhd->bus->read_shm_fail = TRUE; + } +#else +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR; + dhd_bus_mem_dump(dhd); + + } +#endif /* DHD_FW_COREDUMP */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ *available_len = 0; + dhd_schedule_reset(dhd); + return NULL; } @@ -5835,11 +7479,10 @@ dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_le read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len); /* update read pointer */ - if ((ring->rd + items) >= ring->max_items) { + if ((ring->rd + items) >= ring->max_items) ring->rd = 0; - } else { + else ring->rd += items; - } ASSERT(ring->rd < ring->max_items); @@ -5853,6 +7496,82 @@ dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_le } /* dhd_prot_get_read_addr */ +/** + * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function, + * make sure the callers always hold appropriate locks. + */ +int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data) +{ + h2d_mailbox_data_t *h2d_mb_data; + uint16 alloced = 0; + int num_post = 1; + int i; + + DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n", + __FUNCTION__, mb_data)); + if (!dhd->prot->h2dring_ctrl_subn.inited) { + DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__)); + return BCME_ERROR; + } +#ifdef PCIE_INB_DW + if ((INBAND_DW_ENAB(dhd->bus)) && + (dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus) == + DW_DEVICE_DS_DEV_SLEEP)) { + if (mb_data == H2D_HOST_CONS_INT) { + /* One additional device_wake post needed */ + num_post = 2; + } + } +#endif /* PCIE_INB_DW */ + + for (i = 0; i < num_post; i ++) { + /* Request for ring buffer space */ + h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd, + &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, + &alloced, FALSE); + + if (h2d_mb_data == NULL) { + DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n", + __FUNCTION__)); + return BCME_NOMEM; + } + + memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t)); + /* Common msg buf hdr */ + h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA; + h2d_mb_data->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase; + + h2d_mb_data->msg.epoch = + dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO; + dhd->prot->h2dring_ctrl_subn.seqnum++; + +#ifdef PCIE_INB_DW + /* post device_wake first */ + if ((num_post == 2) && (i == 0)) { + h2d_mb_data->mail_box_data = htol32(H2DMB_DS_DEVICE_WAKE); + } else +#endif /* PCIE_INB_DW */ + { + h2d_mb_data->mail_box_data = htol32(mb_data); + } + + DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data)); + + /* upd wrt ptr and raise interrupt */ + /* caller of dhd_prot_h2d_mbdata_send_ctrlmsg already holding general lock */ + dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, h2d_mb_data, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); +#ifdef PCIE_INB_DW + /* Add a delay if device_wake is posted */ + if ((num_post == 2) && (i == 0)) { + OSL_DELAY(1000); + } +#endif /* PCIE_INB_DW */ + } + + return 0; +} + /** Creates a flow ring and informs dongle of this event */ int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) @@ -5863,6 +7582,7 @@ dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) unsigned long flags; uint16 alloced = 0; msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + uint16 max_flowrings = dhd->bus->max_tx_flowrings; /* Fetch a pre-initialized msgbuf_ring from the flowring pool */ flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid); @@ -5872,6 +7592,10 @@ dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) return BCME_NOMEM; } +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ DHD_GENERAL_LOCK(dhd, flags); /* Request for ctrl_ring buffer space */ @@ -5883,6 +7607,9 @@ dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n", __FUNCTION__, flow_ring_node->flowid)); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return BCME_NOMEM; } @@ -5892,6 +7619,7 @@ dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE; flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; flow_create_rqst->msg.request_id = htol32(0); /* TBD */ + flow_create_rqst->msg.flags = ctrl_ring->current_phase; flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; ctrl_ring->seqnum++; @@ -5904,17 +7632,27 @@ dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) /* CAUTION: ring::base_addr already in Little Endian */ flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr; flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr; - flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM); + flow_create_rqst->max_items = htol16(prot->h2d_max_txpost); flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE); + + /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core + * currently it is not used for priority. so uses solely for ifrm mask + */ + if (IFRM_ACTIVE(dhd)) + flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0); + DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, flow_ring_node->flow_info.ifindex)); /* Update the flow_ring's WRITE index */ - if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { dhd_prot_dma_indx_set(dhd, flow_ring->wr, - H2D_DMA_INDX_WR_UPD, flow_ring->idx); + H2D_DMA_INDX_WR_UPD, flow_ring->idx); + } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_IFRM_INDX_WR_UPD, flow_ring->idx); } else { dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), sizeof(uint16), RING_WR_UPD, flow_ring->idx); @@ -5924,6 +7662,9 @@ dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return BCME_OK; } /* dhd_prot_flow_ring_create */ @@ -5943,6 +7684,100 @@ dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg) ltoh16(flow_create_resp->cmplt.status)); } +static void +dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf) +{ + h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf; + DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__, + ltoh16(resp->cmplt.status), + ltoh16(resp->cmplt.ring_id), + ltoh32(resp->cmn_hdr.request_id))); + if (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) { + DHD_ERROR(("invalid request ID with h2d ring create complete\n")); + return; + } + if (!dhd->prot->h2dring_info_subn->create_pending) { + DHD_ERROR(("info ring create status for not pending submit ring\n")); + } + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("info ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + dhd->prot->h2dring_info_subn->create_pending = FALSE; + dhd->prot->h2dring_info_subn->inited = TRUE; + dhd_prot_infobufpost(dhd); +} + +static void +dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf) +{ + d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf; + DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__, + ltoh16(resp->cmplt.status), + ltoh16(resp->cmplt.ring_id), + ltoh32(resp->cmn_hdr.request_id))); + if (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) { + DHD_ERROR(("invalid request ID with d2h ring create complete\n")); + return; + } + if (!dhd->prot->d2hring_info_cpln->create_pending) { + DHD_ERROR(("info ring create status for not pending cpl ring\n")); + return; + } + + if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { + DHD_ERROR(("info cpl ring create failed with status %d\n", + ltoh16(resp->cmplt.status))); + return; + } + dhd->prot->d2hring_info_cpln->create_pending = FALSE; + dhd->prot->d2hring_info_cpln->inited = TRUE; +} + +static void +dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf) +{ + d2h_mailbox_data_t *d2h_data; + + d2h_data = (d2h_mailbox_data_t *)buf; + DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__, + d2h_data->d2h_mailbox_data)); + dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data); +} + +static void +dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf) +{ +#ifdef DHD_TIMESYNC + host_timestamp_msg_cpl_t *host_ts_cpl; + uint32 pktid; + dhd_prot_t *prot = dhd->prot; + + host_ts_cpl = (host_timestamp_msg_cpl_t *)buf; + DHD_INFO(("%s host TS cpl: status %d, req_ID: 0x%04x, xt_id %d \n", __FUNCTION__, + host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id)); + + pktid = ltoh32(host_ts_cpl->msg.request_id); + if (prot->hostts_req_buf_inuse == FALSE) { + DHD_ERROR(("No Pending Host TS req, but completion\n")); + return; + } + prot->hostts_req_buf_inuse = FALSE; + if (pktid != DHD_H2D_HOSTTS_REQ_PKTID) { + DHD_ERROR(("Host TS req CPL, but req ID different 0x%04x, exp 0x%04x\n", + pktid, DHD_H2D_HOSTTS_REQ_PKTID)); + return; + } + dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id, + host_ts_cpl->cmplt.status); +#else /* DHD_TIMESYNC */ + DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n")); +#endif /* DHD_TIMESYNC */ + +} + /** called on e.g. flow ring delete */ void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info) { @@ -5954,7 +7789,8 @@ void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info) void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, struct bcmstrbuf *strbuf, const char * fmt) { - const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d\n"; + const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x" + " WORK ITEM SIZE %d MAX WORK ITEMS %d SIZE %d\n"; msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; uint16 rd, wr; uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len; @@ -5966,28 +7802,63 @@ void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va, ltoh32(flow_ring->base_addr.high_addr), - ltoh32(flow_ring->base_addr.low_addr), dma_buf_len); + ltoh32(flow_ring->base_addr.low_addr), + flow_ring->item_len, flow_ring->max_items, dma_buf_len); } void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) { dhd_prot_t *prot = dhd->prot; - bcm_bprintf(strbuf, "CtrlPost: "); - dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, NULL); - bcm_bprintf(strbuf, "CtrlCpl: "); - dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, NULL); + bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n", + dhd->prot->device_ipc_version, + dhd->prot->host_ipc_version, + dhd->prot->active_ipc_version); - bcm_bprintf(strbuf, "RxPost: "); - bcm_bprintf(strbuf, "RBP %d ", prot->rxbufpost); - dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, NULL); - bcm_bprintf(strbuf, "RxCpl: "); - dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, NULL); + bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n", + dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted); + bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n", + dhd->prot->max_infobufpost, dhd->prot->infobufpost); + bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n", + dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted); + bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n", + dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted); + bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n", + dhd->prot->max_rxbufpost, dhd->prot->rxbufpost); - bcm_bprintf(strbuf, "TxCpl: "); - dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, NULL); - bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail %d\n", + bcm_bprintf(strbuf, + "%14s %5s %5s %17s %17s %14s %14s %10s\n", + "Type", "RD", "WR", "BASE(VA)", "BASE(PA)", + "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE"); + bcm_bprintf(strbuf, "%14s", "H2DCtrlPost"); + dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl"); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost); + dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "D2HRxCpl"); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "D2HTxCpl"); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) { + bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub"); + dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl"); + dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf, + " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); + } + + bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n", dhd->prot->active_tx_count, - DHD_PKTID_AVAIL(dhd->prot->pktid_map_handle)); + DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map), + DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map), + DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)); + } int @@ -5999,6 +7870,11 @@ dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) uint16 alloced = 0; msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + DHD_GENERAL_LOCK(dhd, flags); /* Request for ring buffer space */ @@ -6008,6 +7884,9 @@ dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) if (flow_delete_rqst == NULL) { DHD_GENERAL_UNLOCK(dhd, flags); DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__)); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return BCME_NOMEM; } @@ -6015,6 +7894,7 @@ dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE; flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; flow_delete_rqst->msg.request_id = htol32(0); /* TBD */ + flow_delete_rqst->msg.flags = ring->current_phase; flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; ring->seqnum++; @@ -6031,6 +7911,9 @@ dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) /* update ring's WR index and ring doorbell to dongle */ dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return BCME_OK; } @@ -6047,6 +7930,43 @@ dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg) flow_delete_resp->cmplt.status); } +static void +dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg) +{ +#ifdef IDLE_TX_FLOW_MGMT + tx_idle_flowring_resume_response_t *flow_resume_resp = + (tx_idle_flowring_resume_response_t *)msg; + + DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__, + flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id)); + + dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id, + flow_resume_resp->cmplt.status); +#endif /* IDLE_TX_FLOW_MGMT */ +} + +static void +dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg) +{ +#ifdef IDLE_TX_FLOW_MGMT + int16 status; + tx_idle_flowring_suspend_response_t *flow_suspend_resp = + (tx_idle_flowring_suspend_response_t *)msg; + status = flow_suspend_resp->cmplt.status; + + DHD_ERROR(("%s Flow id %d suspend Response status = %d\n", + __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id, + status)); + + if (status != BCME_OK) { + + DHD_ERROR(("%s Error in Suspending Flow rings!!" + "Dongle will still be polling idle rings!!Status = %d \n", + __FUNCTION__, status)); + } +#endif /* IDLE_TX_FLOW_MGMT */ +} + int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) { @@ -6056,6 +7976,11 @@ dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) uint16 alloced = 0; msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + DHD_GENERAL_LOCK(dhd, flags); /* Request for ring buffer space */ @@ -6064,6 +7989,9 @@ dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) if (flow_flush_rqst == NULL) { DHD_GENERAL_UNLOCK(dhd, flags); DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__)); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return BCME_NOMEM; } @@ -6071,7 +7999,7 @@ dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH; flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; flow_flush_rqst->msg.request_id = htol32(0); /* TBD */ - + flow_flush_rqst->msg.flags = ring->current_phase; flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; ring->seqnum++; @@ -6083,6 +8011,9 @@ dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) /* update ring's WR index and ring doorbell to dongle */ dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return BCME_OK; } /* dhd_prot_flow_ring_flush */ @@ -6119,14 +8050,22 @@ dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd) msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS; +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */ DHD_GENERAL_LOCK(dhd, flags); + msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE); if (msg_start == NULL) { DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n", __FUNCTION__, d2h_rings)); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif return; } @@ -6173,17 +8112,76 @@ dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd) /* update control subn ring's WR index and ring doorbell to dongle */ dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings); DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ } static void -dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg) +dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg) { DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n", __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status), ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id))); } +int +dhd_prot_debug_dma_info_print(dhd_pub_t *dhd) +{ + if (dhd->bus->is_linkdown) { + DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers " + "due to PCIe link down ------- \r\n")); + return 0; + } + + DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n")); + + //HostToDev + DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0))); + DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0))); + DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0))); + + DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0))); + DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0))); + DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0))); + + //DevToHost + DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0))); + DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0))); + DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0))); + + DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0))); + DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0))); + DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n", + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0), + si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0))); + + return 0; +} + int dhd_prot_debug_info_print(dhd_pub_t *dhd) { @@ -6196,6 +8194,28 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd) uint32 d2h_mb_data = 0; uint32 dma_buf_len; + DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n")); + DHD_ERROR(("DHD: %s\n", dhd_version)); + DHD_ERROR(("Firmware: %s\n", fw_version)); + + DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n")); + DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n", + prot->device_ipc_version, + prot->host_ipc_version, + prot->active_ipc_version)); + DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n", + prot->max_tsbufpost, prot->cur_ts_bufs_posted)); + DHD_ERROR(("max INFO bufs to post: %d, posted %d\n", + prot->max_infobufpost, prot->infobufpost)); + DHD_ERROR(("max event bufs to post: %d, posted %d\n", + prot->max_eventbufpost, prot->cur_event_bufs_posted)); + DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n", + prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted)); + DHD_ERROR(("max RX bufs to post: %d, posted %d\n", + prot->max_rxbufpost, prot->rxbufpost)); + DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n", + h2d_max_txpost, prot->h2d_max_txpost)); + DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n")); ring = &prot->h2dring_ctrl_subn; @@ -6207,6 +8227,7 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd) dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); ring = &prot->d2hring_ctrl_cpln; dma_buf_len = ring->max_items * ring->item_len; @@ -6217,18 +8238,110 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd) dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); - DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum)); + DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); - intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); - intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); - mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0); - dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + ring = prot->h2dring_info_subn; + if (ring) { + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); + } + ring = prot->d2hring_info_cpln; + if (ring) { + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); + } - DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n")); - DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n,", - intstatus, intmask, mbintstatus)); - DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, dhd->bus->def_intmask)); + DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n", + __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted)); + if (!dhd->bus->is_linkdown && dhd->bus->intstatus != (uint32)-1) { + DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n")); + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIMailBoxInt, 0, 0); + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCIMailBoxMask, 0, 0); + mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, + PCID2H_MailBox, 0, 0); + dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + + DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n", + intstatus, intmask, mbintstatus)); + DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, + dhd->bus->def_intmask)); + + DHD_ERROR(("host pcie_irq enabled = %d\n", dhdpcie_irq_enabled(dhd->bus))); + + DHD_ERROR(("\n ------- DUMPING PCIE Registers ------- \r\n")); + /* hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/CurrentPcieGen2ProgramGuide */ + DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x\n", + PCIECFGREG_STATUS_CMD, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)), + PCIECFGREG_BASEADDR0, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)))); + DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x " + "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL, + sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2, + sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1, + dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1, + sizeof(uint32)))); + + /* hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/ + * CurrentPcieGen2ProgramGuide/pcie_ep.htm + */ + DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x " + "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0), + PCIECFGREG_PHY_DBG_CLKREQ1, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1), + PCIECFGREG_PHY_DBG_CLKREQ2, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2), + PCIECFGREG_PHY_DBG_CLKREQ3, + dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3))); + +#if defined(PCIE_RC_VENDOR_ID) && defined(PCIE_RC_DEVICE_ID) + DHD_ERROR(("Pcie RC Error Status Val=0x%x\n", + dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, + PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0))); + + DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n", + dhd_debug_get_rc_linkcap(dhd->bus))); +#endif + + DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n")); + DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n" + "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n" + "dpc_return_busdown_count=%lu\n", + dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count, + dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count, + dhd->bus->dpc_return_busdown_count)); + + } + dhd_prot_debug_dma_info_print(dhd); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { +#ifdef DHD_SSSR_DUMP + if (dhd->sssr_inited) { + dhdpcie_sssr_dump(dhd); + } +#endif /* DHD_SSSR_DUMP */ + } +#endif /* DHD_FW_COREDUMP */ return 0; } @@ -6377,7 +8490,7 @@ dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx) rxchain->pkt_count++; } - if ((!ETHER_ISMULTI(rxchain->h_da)) && + if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) && ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) || (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) { PKTSETCHAINED(dhd->osh, pkt); @@ -6411,3 +8524,406 @@ dhd_rxchain_commit(dhd_pub_t *dhd) } #endif /* DHD_RX_CHAINING */ + + +#ifdef IDLE_TX_FLOW_MGMT +int +dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_idle_flowring_resume_request_t *flow_resume_rqst; + msgbuf_ring_t *flow_ring; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + + /* Fetch a pre-initialized msgbuf_ring from the flowring pool */ + flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid); + if (flow_ring == NULL) { + DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n", + __FUNCTION__, flow_ring_node->flowid)); + return BCME_NOMEM; + } +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + + DHD_GENERAL_LOCK(dhd, flags); + + /* Request for ctrl_ring buffer space */ + flow_resume_rqst = (tx_idle_flowring_resume_request_t *) + dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE); + + if (flow_resume_rqst == NULL) { + dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring); + DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n", + __FUNCTION__, flow_ring_node->flowid)); + DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_NOMEM; + } + + flow_ring_node->prot_info = (void *)flow_ring; + + /* Common msg buf hdr */ + flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME; + flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_resume_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + DHD_ERROR(("%s Send Flow resume Req flow ID %d\n", + __FUNCTION__, flow_ring_node->flowid)); + + /* Update the flow_ring's WRITE index */ + if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_DMA_INDX_WR_UPD, flow_ring->idx); + } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_IFRM_INDX_WR_UPD, + (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), + sizeof(uint16), RING_WR_UPD, flow_ring->idx); + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1); + + DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + + return BCME_OK; +} /* dhd_prot_flow_ring_create */ + +int +dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count) +{ + tx_idle_flowring_suspend_request_t *flow_suspend_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 index; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + + DHD_GENERAL_LOCK(dhd, flags); + + /* Request for ring buffer space */ + flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (flow_suspend_rqst == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__)); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND; + /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */ + flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + /* Update flow id info */ + for (index = 0; index < count; index++) + { + flow_suspend_rqst->ring_id[index] = ringid[index]; + } + flow_suspend_rqst->num = count; + + DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); +#endif + + return BCME_OK; +} +#endif /* IDLE_TX_FLOW_MGMT */ + + +int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw) +{ + uint32 i; + uint32 *ext_data; + hnd_ext_trap_hdr_t *hdr; + bcm_tlv_t *tlv; + trap_t *tr; + uint32 *stack; + hnd_ext_trap_bp_err_t *bpe; + uint32 raw_len; + + ext_data = dhdp->extended_trap_data; + + /* return if there is no extended trap data */ + if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA)) + { + bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data); + return BCME_OK; + } + + bcm_bprintf(b, "Extended trap data\n"); + + /* First word is original trap_data */ + bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data); + ext_data++; + + /* Followed by the extended trap data header */ + hdr = (hnd_ext_trap_hdr_t *)ext_data; + bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len); + + if (raw) + { + raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0); + for (i = 0; i < raw_len; i++) + { + bcm_bprintf(b, "0x%08x ", ext_data[i]); + if (i % 4 == 3) + bcm_bprintf(b, "\n"); + } + return BCME_OK; + } + + /* Extract the various supported TLVs from the extended trap data */ + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE); + if (tlv) + { + bcm_bprintf(b, "\nTAG_TRAP_SIGNATURE len: %d\n", tlv->len); + tr = (trap_t *)tlv->data; + + bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n", + tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr); + bcm_bprintf(b, " r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n", + tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6); + bcm_bprintf(b, " r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n", + tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12); + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK); + if (tlv) + { + bcm_bprintf(b, "\nTAG_TRAP_STACK len: %d\n", tlv->len); + stack = (uint32 *)tlv->data; + for (i = 0; i < (uint32)(tlv->len / 4); i++) + { + bcm_bprintf(b, " 0x%08x\n", *stack); + stack++; + } + } + + tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE); + if (tlv) + { + bcm_bprintf(b, "\nTAG_TRAP_BACKPLANE len: %d\n", tlv->len); + bpe = (hnd_ext_trap_bp_err_t *)tlv->data; + bcm_bprintf(b, " error: %x\n", bpe->error); + bcm_bprintf(b, " coreid: %x\n", bpe->coreid); + bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr); + bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl); + bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus); + bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl); + bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus); + bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl); + bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone); + bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus); + bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo); + bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi); + bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid); + bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser); + bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags); + } + + return BCME_OK; +} + + +#ifdef BCMPCIE +int +dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len, + uint16 seqnum, uint16 xt_id) +{ + dhd_prot_t *prot = dhdp->prot; + host_timestamp_msg_t *ts_req; + unsigned long flags; + uint16 alloced = 0; + uchar *ts_tlv_buf; + + if ((tlvs == NULL) || (tlv_len == 0)) { + DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n", + __FUNCTION__, tlvs, tlv_len)); + return -1; + } +#ifdef PCIE_INB_DW + if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK) + return BCME_ERROR; +#endif /* PCIE_INB_DW */ + + DHD_GENERAL_LOCK(dhdp, flags); + + /* if Host TS req already pending go away */ + if (prot->hostts_req_buf_inuse == TRUE) { + DHD_ERROR(("one host TS request already pending at device\n")); + DHD_GENERAL_UNLOCK(dhdp, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus); +#endif + return -1; + } + + /* Request for cbuf space */ + ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, &prot->h2dring_ctrl_subn, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE); + if (ts_req == NULL) { + DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n")); + DHD_GENERAL_UNLOCK(dhdp, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus); +#endif + return -1; + } + + /* Common msg buf hdr */ + ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP; + ts_req->msg.if_id = 0; + ts_req->msg.flags = prot->h2dring_ctrl_subn.current_phase; + ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID; + + ts_req->msg.epoch = prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO; + prot->h2dring_ctrl_subn.seqnum++; + + ts_req->xt_id = xt_id; + ts_req->seqnum = seqnum; + /* populate TS req buffer info */ + ts_req->input_data_len = htol16(tlv_len); + ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa)); + ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa)); + /* copy ioct payload */ + ts_tlv_buf = (void *) prot->hostts_req_buf.va; + prot->hostts_req_buf_inuse = TRUE; + memcpy(ts_tlv_buf, tlvs, tlv_len); + + OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len); + + if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) { + DHD_ERROR(("host TS req buffer address unaligned !!!!! \n")); + } + + DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n", + ts_req->msg.request_id, ts_req->input_data_len, + ts_req->xt_id, ts_req->seqnum)); + + + /* upd wrt ptr and raise interrupt */ + dhd_prot_ring_write_complete(dhdp, &prot->h2dring_ctrl_subn, ts_req, + DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); + DHD_GENERAL_UNLOCK(dhdp, flags); +#ifdef PCIE_INB_DW + dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus); +#endif + + return 0; +} /* dhd_prot_send_host_timestamp */ + + +bool +dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set) +{ + if (set) + dhd->prot->tx_ts_log_enabled = enable; + + return dhd->prot->tx_ts_log_enabled; +} + +bool +dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set) +{ + if (set) + dhd->prot->rx_ts_log_enabled = enable; + + return dhd->prot->rx_ts_log_enabled; +} +#endif /* BCMPCIE */ + +void +dhd_prot_dma_indx_free(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf); +} + +static void BCMFASTPATH +dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf) +{ +#ifdef DHD_TIMESYNC + fw_timestamp_event_msg_t *resp; + uint32 pktid; + uint16 buflen, seqnum; + void * pkt; + unsigned long flags; + + resp = (fw_timestamp_event_msg_t *)buf; + pktid = ltoh32(resp->msg.request_id); + buflen = ltoh16(resp->buf_len); + seqnum = ltoh16(resp->seqnum); + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid, + DHD_DUPLICATE_FREE); +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d\n", + pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum))); + + if (!dhd->prot->cur_ts_bufs_posted) { + DHD_ERROR(("tsbuf posted are zero, but there is a completion\n")); + return; + } + + dhd->prot->cur_ts_bufs_posted--; + if (dhd->prot->max_tsbufpost > 0) + dhd_msgbuf_rxbuf_post_ts_bufs(dhd); + + DHD_GENERAL_LOCK(dhd, flags); + pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_TSBUF_RX, TRUE); + DHD_GENERAL_UNLOCK(dhd, flags); + + if (!pkt) { + DHD_ERROR(("no ts buffer associated with pktid 0x%04x\n", pktid)); + return; + } + + PKTSETLEN(dhd->osh, pkt, buflen); + dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum); +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, pkt, TRUE); +#else + PKTFREE(dhd->osh, pkt, TRUE); +#endif /* DHD_USE_STATIC_CTRLBUF */ +#else /* DHD_TIMESYNC */ + DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n")); +#endif /* DHD_TIMESYNC */ + +} diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pcie.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pcie.c index d6fc50de4d95..91a1203ebc65 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pcie.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pcie.c @@ -1,7 +1,7 @@ /* * DHD Bus Module for PCIE * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_pcie.c 609007 2015-12-30 07:44:52Z $ + * $Id: dhd_pcie.c 710862 2017-07-14 07:43:59Z $ */ @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #if defined(DHD_DEBUG) @@ -47,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -57,15 +59,20 @@ #ifdef DHDTCPACK_SUPPRESS #include #endif /* DHDTCPACK_SUPPRESS */ +#include #include -#ifdef BCMEMBEDIMAGE -#include BCMEMBEDIMAGE -#endif /* BCMEMBEDIMAGE */ +#ifdef DHD_TIMESYNC +#include +#endif /* DHD_TIMESYNC */ -#ifdef PCIE_OOB -#include "ftdi_sio_external.h" -#endif /* PCIE_OOB */ +#if defined(BCMEMBEDIMAGE) +#ifndef DHD_EFI +#include BCMEMBEDIMAGE +#else +#include +#endif /* !DHD_EFI */ +#endif /* BCMEMBEDIMAGE */ #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ #define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */ @@ -74,6 +81,10 @@ #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32)) /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */ +/* CTO Prevention Recovery */ +#define CTO_TO_CLEAR_WAIT_MS 1000 +#define CTO_TO_CLEAR_WAIT_MAX_CNT 10 + #if defined(SUPPORT_MULTIPLE_BOARD_REV) extern unsigned int system_rev; #endif /* SUPPORT_MULTIPLE_BOARD_REV */ @@ -81,10 +92,9 @@ int dhd_dongle_memsize; int dhd_dongle_ramsize; static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size); -#ifdef DHD_DEBUG static int dhdpcie_bus_readconsole(dhd_bus_t *bus); -#endif /* DHD_DEBUG */ #if defined(DHD_FW_COREDUMP) +struct dhd_bus *g_dhd_bus = NULL; static int dhdpcie_mem_dump(dhd_bus_t *bus); #endif /* DHD_FW_COREDUMP */ @@ -94,7 +104,7 @@ static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 act int plen, void *arg, int len, int val_size); static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval); static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, - uint32 len, uint32 srcdelay, uint32 destdelay); + uint32 len, uint32 srcdelay, uint32 destdelay, uint32 d11_lpbk); static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter); static int _dhdpcie_download_firmware(struct dhd_bus *bus); static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh); @@ -115,15 +125,23 @@ static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data); static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset); static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data); static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset); +#ifdef DHD_SUPPORT_64BIT static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data); static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset); +#endif /* DHD_SUPPORT_64BIT */ static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data); -static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size); +static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size); static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b); -static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data); +static void dhdpcie_fw_trap(dhd_bus_t *bus); static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info); +extern void dhd_dpc_enable(dhd_pub_t *dhdp); extern void dhd_dpc_kill(dhd_pub_t *dhdp); +#ifdef IDLE_TX_FLOW_MGMT +static void dhd_bus_check_idle_scan(dhd_bus_t *bus); +static void dhd_bus_idle_scan(dhd_bus_t *bus); +#endif /* IDLE_TX_FLOW_MGMT */ + #ifdef BCMEMBEDIMAGE static int dhdpcie_download_code_array(dhd_bus_t *bus); #endif /* BCMEMBEDIMAGE */ @@ -135,30 +153,23 @@ extern void exynos_pcie_register_dump(int ch_num); #define PCI_VENDOR_ID_BROADCOM 0x14e4 -static void dhd_bus_set_device_wake(struct dhd_bus *bus, bool val); -extern void wl_nddbg_wpp_log(const char *format, ...); -#ifdef PCIE_OOB -static void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus); - #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */ +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT; - -#define HOST_WAKE 4 /* GPIO_0 (HOST_WAKE) - Output from WLAN */ -#define DEVICE_WAKE 5 /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */ -#define BIT_WL_REG_ON 6 -#define BIT_BT_REG_ON 7 - -int gpio_handle_val = 0; -unsigned char gpio_port = 0; -unsigned char gpio_direction = 0; -#define OOB_PORT "ttyUSB0" -#endif /* PCIE_OOB */ +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version); +static void dhdpcie_cto_error_recovery(struct dhd_bus *bus); + +#ifdef BCM_ASLR_HEAP +static void dhdpcie_wrt_rnd(struct dhd_bus *bus); +#endif /* BCM_ASLR_HEAP */ + +extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd); +extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost); /* IOVar table */ enum { IOV_INTR = 1, - IOV_MEMBYTES, IOV_MEMSIZE, IOV_SET_DOWNLOAD_STATE, IOV_DEVRESET, @@ -171,12 +182,6 @@ enum { IOV_SLEEP_ALLOWED, IOV_PCIE_DMAXFER, IOV_PCIE_SUSPEND, - IOV_PCIEREG, - IOV_PCIECFGREG, - IOV_PCIECOREREG, - IOV_PCIESERDESREG, - IOV_BAR0_SECWIN_REG, - IOV_SBREG, IOV_DONGLEISOLATION, IOV_LTRSLEEPON_UNLOOAD, IOV_METADATA_DBG, @@ -186,6 +191,7 @@ enum { IOV_BUZZZ_DUMP, IOV_DUMP_RINGUPD_BLOCK, IOV_DMA_RINGINDICES, + IOV_FORCE_FW_TRAP, IOV_DB1_FOR_MB, IOV_FLOW_PRIO_MAP, #ifdef DHD_PCIE_RUNTIMEPM @@ -194,55 +200,104 @@ enum { IOV_RXBOUND, IOV_TXBOUND, IOV_HANGREPORT, + IOV_H2D_MAILBOXDATA, + IOV_INFORINGS, + IOV_H2D_PHASE, + IOV_H2D_ENABLE_TRAP_BADPHASE, + IOV_H2D_TXPOST_MAX_ITEM, + IOV_TRAPDATA, + IOV_TRAPDATA_RAW, + IOV_CTO_PREVENTION, #ifdef PCIE_OOB IOV_OOB_BT_REG_ON, - IOV_OOB_ENABLE + IOV_OOB_ENABLE, #endif /* PCIE_OOB */ + IOV_PCIE_WD_RESET, + IOV_CTO_THRESHOLD, +#ifdef DHD_EFI + IOV_CONTROL_SIGNAL, +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + IOV_DEEP_SLEEP, +#endif /* PCIE_OOB || PCIE_INB_DW */ +#endif /* DHD_EFI */ +#ifdef DEVICE_TX_STUCK_DETECT + IOV_DEVICE_TX_STUCK_DETECT, +#endif /* DEVICE_TX_STUCK_DETECT */ + IOV_INB_DW_ENABLE, + IOV_IDMA_ENABLE, + IOV_IFRM_ENABLE, + IOV_CLEAR_RING, +#ifdef DHD_EFI + IOV_WIFI_PROPERTIES, + IOV_OTP_DUMP +#endif }; const bcm_iovar_t dhdpcie_iovars[] = { - {"intr", IOV_INTR, 0, IOVT_BOOL, 0 }, - {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) }, - {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0 }, - {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 }, - {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 }, - {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 }, - {"pcie_lpbk", IOV_PCIE_LPBK, 0, IOVT_UINT32, 0 }, - {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, IOVT_BUFFER, 0 }, - {"ramsize", IOV_RAMSIZE, 0, IOVT_UINT32, 0 }, - {"ramstart", IOV_RAMSTART, 0, IOVT_UINT32, 0 }, - {"pciereg", IOV_PCIEREG, 0, IOVT_BUFFER, 2 * sizeof(int32) }, - {"pciecfgreg", IOV_PCIECFGREG, 0, IOVT_BUFFER, 2 * sizeof(int32) }, - {"pciecorereg", IOV_PCIECOREREG, 0, IOVT_BUFFER, 2 * sizeof(int32) }, - {"pcieserdesreg", IOV_PCIESERDESREG, 0, IOVT_BUFFER, 3 * sizeof(int32) }, - {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, - {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, - {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, IOVT_BUFFER, 3 * sizeof(int32) }, - {"pcie_suspend", IOV_PCIE_SUSPEND, 0, IOVT_UINT32, 0 }, + {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 }, + {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 }, + {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 }, + {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 }, + {"devreset", IOV_DEVRESET, 0, 0, IOVT_BOOL, 0 }, + {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 }, + {"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0 }, + {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 }, + {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 }, + {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 }, + {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) }, + {"pcie_suspend", IOV_PCIE_SUSPEND, 0, 0, IOVT_UINT32, 0 }, #ifdef PCIE_OOB - {"oob_bt_reg_on", IOV_OOB_BT_REG_ON, 0, IOVT_UINT32, 0 }, - {"oob_enable", IOV_OOB_ENABLE, 0, IOVT_UINT32, 0 }, + {"oob_bt_reg_on", IOV_OOB_BT_REG_ON, 0, 0, IOVT_UINT32, 0 }, + {"oob_enable", IOV_OOB_ENABLE, 0, 0, IOVT_UINT32, 0 }, #endif /* PCIE_OOB */ - {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, IOVT_BOOL, 0 }, - {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 }, - {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, IOVT_UINT32, 0 }, - {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, IOVT_BUFFER, 0 }, - {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, IOVT_UINT32, 0}, - {"metadata_dbg", IOV_METADATA_DBG, 0, IOVT_BOOL, 0 }, - {"rx_metadata_len", IOV_RX_METADATALEN, 0, IOVT_UINT32, 0 }, - {"tx_metadata_len", IOV_TX_METADATALEN, 0, IOVT_UINT32, 0 }, - {"db1_for_mb", IOV_DB1_FOR_MB, 0, IOVT_UINT32, 0 }, - {"txp_thresh", IOV_TXP_THRESHOLD, 0, IOVT_UINT32, 0 }, - {"buzzz_dump", IOV_BUZZZ_DUMP, 0, IOVT_UINT32, 0 }, - {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, IOVT_UINT32, 0 }, + {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 }, + {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 }, + {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 }, + {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 }, + {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0}, + {"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0 }, + {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 }, + {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 }, + {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 }, + {"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, + {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 }, + {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 }, #ifdef DHD_PCIE_RUNTIMEPM - {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 }, + {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 }, #endif /* DHD_PCIE_RUNTIMEPM */ - {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 }, - {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 }, - {"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 }, - {NULL, 0, 0, 0, 0 } + {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 }, + {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 }, + {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 }, + {"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0 }, + {"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0 }, + {"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0 }, + {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0, + IOVT_UINT32, 0 }, + {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 }, + {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 }, + {"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0 }, + {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 }, + {"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0 }, + {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_EFI + {"control_signal", IOV_CONTROL_SIGNAL, 0, 0, IOVT_UINT32, 0}, +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + {"deep_sleep", IOV_DEEP_SLEEP, 0, 0, IOVT_UINT32, 0}, +#endif /* PCIE_OOB || PCIE_INB_DW */ +#endif /* DHD_EFI */ + {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 }, +#ifdef DEVICE_TX_STUCK_DETECT + {"dev_tx_stuck_monitor", IOV_DEVICE_TX_STUCK_DETECT, 0, 0, IOVT_UINT32, 0 }, +#endif /* DEVICE_TX_STUCK_DETECT */ + {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 }, + {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 }, + {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 }, +#ifdef DHD_EFI + {"properties", IOV_WIFI_PROPERTIES, 0, 0, IOVT_BUFFER, 0}, + {"otp_dump", IOV_OTP_DUMP, 0, 0, IOVT_BUFFER, 0}, +#endif + {NULL, 0, 0, 0, 0, 0 } }; @@ -254,14 +309,16 @@ const bcm_iovar_t dhdpcie_iovars[] = { #ifndef DHD_TXBOUND #define DHD_TXBOUND 64 #endif + +#define DHD_INFORING_BOUND 32 + uint dhd_rxbound = DHD_RXBOUND; uint dhd_txbound = DHD_TXBOUND; -/* Register/Unregister functions are called by the main DHD entry - * point (e.g. module insertion) to link with the bus driver, in - * order to look for or await the device. +/** + * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to + * link with the bus driver, in order to look for or await the device. */ - int dhd_bus_register(void) { @@ -288,9 +345,9 @@ dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size) } void -dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size) +dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size) { - REG_UNMAP((void*)(uintptr)addr); + REG_UNMAP(addr); return; } @@ -320,7 +377,17 @@ dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */ bus->dev = (struct pci_dev *)pci_dev; - dll_init(&bus->const_flowring); + + dll_init(&bus->flowring_active_list); +#ifdef IDLE_TX_FLOW_MGMT + bus->active_list_last_process_ts = OSL_SYSUPTIME(); +#endif /* IDLE_TX_FLOW_MGMT */ + +#ifdef DEVICE_TX_STUCK_DETECT + /* Enable the Device stuck detection feature by default */ + bus->dev_tx_stuck_monitor = TRUE; + bus->device_tx_stuck_check = OSL_SYSUPTIME(); +#endif /* DEVICE_TX_STUCK_DETECT */ /* Attach pcie shared structure */ if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) { @@ -344,13 +411,18 @@ dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, bus->dhd->busstate = DHD_BUS_DOWN; bus->db1_for_mb = TRUE; bus->dhd->hang_report = TRUE; + bus->use_mailbox = FALSE; + bus->use_d0_inform = FALSE; +#ifdef IDLE_TX_FLOW_MGMT + bus->enable_idle_flowring_mgmt = FALSE; +#endif /* IDLE_TX_FLOW_MGMT */ bus->irq_registered = FALSE; - bus->d3_ack_war_cnt = 0; - DHD_TRACE(("%s: EXIT SUCCESS\n", __FUNCTION__)); - +#ifdef DHD_FW_COREDUMP + g_dhd_bus = bus; +#endif return bus; } while (0); @@ -387,10 +459,10 @@ dhd_bus_pub(struct dhd_bus *bus) return bus->dhd; } -void * +const void * dhd_bus_sih(struct dhd_bus *bus) { - return (void *)bus->sih; + return (const void *)bus->sih; } void * @@ -420,13 +492,26 @@ uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) return bus->sih->chippkg; } -/** Read and clear intstatus. This should be called with interupts disabled or inside isr */ +/** Read and clear intstatus. This should be called with interrupts disabled or inside isr */ uint32 dhdpcie_bus_intstatus(dhd_bus_t *bus) { uint32 intstatus = 0; uint32 intmask = 0; + if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) && + bus->wait_for_d3_ack) { +#ifdef DHD_EFI + DHD_INFO(("%s: trying to clear intstatus during suspend (%d)" + " or suspend in progress %d\n", + __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending)); +#else + DHD_ERROR(("%s: trying to clear intstatus during suspend (%d)" + " or suspend in progress %d\n", + __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending)); +#endif /* !DHD_EFI */ + return intstatus; + } if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) || (bus->sih->buscorerev == 2)) { intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); @@ -439,6 +524,20 @@ dhdpcie_bus_intstatus(dhd_bus_t *bus) /* this is a PCIE core register..not a config register... */ intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); + intstatus &= intmask; + /* Is device removed. intstatus & intmask read 0xffffffff */ + if (intstatus == (uint32)-1) { + DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__)); +#ifdef CUSTOMER_HW4_DEBUG +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_os_send_hang_message(bus->dhd); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */ +#endif /* CUSTOMER_HW4_DEBUG */ + return intstatus; + } + + /* * The fourth argument to si_corereg is the "mask" fields of the register to update * and the fifth field is the "value" to update. Now if we are interested in only @@ -448,20 +547,6 @@ dhdpcie_bus_intstatus(dhd_bus_t *bus) si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask, intstatus); - intstatus &= intmask; - - /* Is device removed. intstatus & intmask read 0xffffffff */ - if (intstatus == (uint32)-1) { - DHD_ERROR(("%s: !!!!!!Device Removed or dead chip.\n", __FUNCTION__)); - intstatus = 0; -#ifdef CUSTOMER_HW4_DEBUG -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) - bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN; - dhd_os_send_hang_message(bus->dhd); -#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */ -#endif /* CUSTOMER_HW4_DEBUG */ - } - intstatus &= bus->def_intmask; } @@ -498,11 +583,23 @@ dhdpcie_bus_isr(dhd_bus_t *bus) } if (bus->dhd->busstate == DHD_BUS_DOWN) { - DHD_ERROR(("%s: BUS is down, not processing the interrupt \r\n", - __FUNCTION__)); break; } + + if (PCIECTO_ENAB(bus->dhd)) { + /* read pci_intstatus */ + intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4); + + if (intstatus & PCI_CTO_INT_MASK) { + /* reset backplane and cto, + * then access through pcie is recovered. + */ + dhdpcie_cto_error_recovery(bus); + return TRUE; + } + } + intstatus = dhdpcie_bus_intstatus(bus); /* Check if the interrupt is ours or not */ @@ -511,8 +608,16 @@ dhdpcie_bus_isr(dhd_bus_t *bus) } /* save the intstatus */ + /* read interrupt status register!! Status bits will be cleared in DPC !! */ bus->intstatus = intstatus; + /* return error for 0xFFFFFFFF */ + if (intstatus == (uint32)-1) { + dhdpcie_disable_irq_nosync(bus); + bus->is_linkdown = TRUE; + return BCME_ERROR; + } + /* Overall operation: * - Mask further interrupts * - Read/ack intstatus @@ -523,9 +628,11 @@ dhdpcie_bus_isr(dhd_bus_t *bus) /* Count the interrupt call */ bus->intrcount++; - /* read interrupt status register!! Status bits will be cleared in DPC !! */ bus->ipend = TRUE; - dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */ + + bus->isr_intr_disable_count++; + dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */ + bus->intdis = TRUE; #if defined(PCIE_ISR_THREAD) @@ -548,6 +655,176 @@ dhdpcie_bus_isr(dhd_bus_t *bus) return FALSE; } +int +dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state) +{ + uint32 cur_state = 0; + uint32 pm_csr = 0; + osl_t *osh = bus->osh; + + pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32)); + cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK; + + if (cur_state == state) { + DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state)); + return BCME_OK; + } + + if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT) + return BCME_ERROR; + + /* Validate the state transition + * if already in a lower power state, return error + */ + if (state != PCIECFGREG_PM_CSR_STATE_D0 && + cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD && + cur_state > state) { + DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__)); + return BCME_ERROR; + } + + pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK; + pm_csr |= state; + + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr); + + /* need to wait for the specified mandatory pcie power transition delay time */ + if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT || + cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT) + OSL_DELAY(DHDPCIE_PM_D3_DELAY); + else if (state == PCIECFGREG_PM_CSR_STATE_D2 || + cur_state == PCIECFGREG_PM_CSR_STATE_D2) + OSL_DELAY(DHDPCIE_PM_D2_DELAY); + + /* read back the power state and verify */ + pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32)); + cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK; + if (cur_state != state) { + DHD_ERROR(("%s: power transition failed ! Current state is %u \n", + __FUNCTION__, cur_state)); + return BCME_ERROR; + } else { + DHD_ERROR(("%s: power transition to %u success \n", + __FUNCTION__, cur_state)); + } + + return BCME_OK; + +} + +int +dhdpcie_config_check(dhd_bus_t *bus) +{ + uint32 i, val; + int ret = BCME_ERROR; + + for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) { + val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32)); + if ((val & 0xFFFF) == VENDOR_BROADCOM) { + ret = BCME_OK; + break; + } + OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000); + } + + return ret; +} + +int +dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr) +{ + uint32 i; + osl_t *osh = bus->osh; + + if (BCME_OK != dhdpcie_config_check(bus)) { + return BCME_ERROR; + } + + for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) { + OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]); + } + OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]); + + if (restore_pmcsr) + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, + sizeof(uint32), bus->saved_config.pmcsr); + + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32), + bus->saved_config.msi_addr0); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H, + sizeof(uint32), bus->saved_config.msi_addr1); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA, + sizeof(uint32), bus->saved_config.msi_data); + + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL, + sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2, + sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL, + sizeof(uint32), bus->saved_config.exp_link_ctrl_stat); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2, + sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2); + + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1, + sizeof(uint32), bus->saved_config.l1pm0); + OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2, + sizeof(uint32), bus->saved_config.l1pm1); + + OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, + sizeof(uint32), bus->saved_config.bar0_win); + OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, + sizeof(uint32), bus->saved_config.bar1_win); + + return BCME_OK; +} + +int +dhdpcie_config_save(dhd_bus_t *bus) +{ + uint32 i; + osl_t *osh = bus->osh; + + if (BCME_OK != dhdpcie_config_check(bus)) { + return BCME_ERROR; + } + + for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) { + bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32)); + } + + bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32)); + + bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP, + sizeof(uint32)); + bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, + sizeof(uint32)); + bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H, + sizeof(uint32)); + bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA, + sizeof(uint32)); + + bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh, + PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32)); + bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh, + PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32)); + bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh, + PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32)); + bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh, + PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32)); + + bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1, + sizeof(uint32)); + bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2, + sizeof(uint32)); + + bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN, + sizeof(uint32)); + bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN, + sizeof(uint32)); + return BCME_OK; +} + #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY dhd_pub_t *link_recovery = NULL; #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ @@ -556,7 +833,7 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) { osl_t *osh = bus->osh; - void *regsva = (void*)bus->regs; + volatile void *regsva = (volatile void*)bus->regs; uint16 devid = bus->cl_devid; uint32 val; sbpcieregs_t *sbpcieregs; @@ -580,6 +857,24 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) goto fail; } + /* + * Checking PCI_SPROM_CONTROL register for preventing invalid address access + * due to switch address space from PCI_BUS to SI_BUS. + */ + val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32)); + if (val == 0xffffffff) { + DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__)); + goto fail; + } + +#ifdef DHD_EFI + /* Save good copy of PCIe config space */ + if (BCME_OK != dhdpcie_config_save(bus)) { + DHD_ERROR(("%s : failed to save PCI configuration space!\n", __FUNCTION__)); + goto fail; + } +#endif /* DHD_EFI */ + /* si_attach() will provide an SI handle and scan the backplane */ if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus, &bus->vars, &bus->varsz))) { @@ -587,6 +882,31 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) goto fail; } + /* Olympic EFI requirement - stop driver load if FW is already running + * need to do this here before pcie_watchdog_reset, because + * pcie_watchdog_reset will put the ARM back into halt state + */ + if (!dhdpcie_is_arm_halted(bus)) { + DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n", + __FUNCTION__)); + goto fail; + } + + /* Enable CLKREQ# */ + dhdpcie_clkreq(bus->osh, 1, 1); + +#ifndef DONGLE_ENABLE_ISOLATION + /* + * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd + * This is required to avoid spurious interrupts to the Host and bring back + * dongle to a sane state (on host soft-reboot / watchdog-reboot). + */ + pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs); +#endif /* !DONGLE_ENABLE_ISOLATION */ + +#ifdef DHD_EFI + dhdpcie_dongle_pwr_toggle(bus); +#endif si_setcore(bus->sih, PCIE2_CORE_ID, 0); sbpcieregs = (sbpcieregs_t*)(bus->regs); @@ -609,12 +929,17 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) } if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { - if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) { - DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__)); - goto fail; + /* Only set dongle RAMSIZE to default value when ramsize is not adjusted */ + if (!bus->ramsize_adjusted) { + if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + bus->dongle_ram_base = CA7_4365_RAM_BASE; + /* Default reserve 1.75MB for CA7 */ + bus->orig_ramsize = 0x1c0000; } - /* also populate base address */ - bus->dongle_ram_base = CA7_4365_RAM_BASE; } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); @@ -633,7 +958,6 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) bus->dongle_ram_base = CR4_4335_RAM_BASE; break; case BCM4358_CHIP_ID: - case BCM4356_CHIP_ID: case BCM4354_CHIP_ID: case BCM43567_CHIP_ID: case BCM43569_CHIP_ID: @@ -644,6 +968,11 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) case BCM4360_CHIP_ID: bus->dongle_ram_base = CR4_4360_RAM_BASE; break; + + case BCM4364_CHIP_ID: + bus->dongle_ram_base = CR4_4364_RAM_BASE; + break; + CASE_BCM4345_CHIP: bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */ ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; @@ -652,9 +981,12 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) bus->dongle_ram_base = CR4_43602_RAM_BASE; break; case BCM4349_CHIP_GRPID: - /* RAM base changed from 4349c0(revid=9) onwards */ + /* RAM based changed from 4349c0(revid=9) onwards */ bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? - CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9); + CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9); + break; + case BCM4347_CHIP_GRPID: + bus->dongle_ram_base = CR4_4347_RAM_BASE; break; default: bus->dongle_ram_base = 0; @@ -678,34 +1010,18 @@ dhdpcie_dongle_attach(dhd_bus_t *bus) bus->intr = (bool)dhd_intr; bus->wait_for_d3_ack = 1; - bus->suspended = FALSE; - #ifdef PCIE_OOB - gpio_handle_val = get_handle(OOB_PORT); - if (gpio_handle_val < 0) - { - DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__)); - ASSERT(FALSE); - } - - gpio_direction = 0; - ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG); - - /* Note BT core is also enabled here */ - gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE; - gpio_write_port(gpio_handle_val, gpio_port); - - gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE; - ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG); - - bus->oob_enabled = TRUE; - - /* drive the Device_Wake GPIO low on startup */ - bus->device_wake_state = TRUE; - dhd_bus_set_device_wake(bus, FALSE); - dhd_bus_doorbell_timeout_reset(bus); + dhdpcie_oob_init(bus); #endif /* PCIE_OOB */ - +#ifdef PCIE_INB_DW + bus->inb_enabled = TRUE; +#endif /* PCIE_INB_DW */ + bus->dongle_in_ds = FALSE; + bus->idma_enabled = TRUE; + bus->ifrm_enabled = TRUE; +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + bus->ds_enabled = TRUE; +#endif DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__)); return 0; @@ -734,42 +1050,42 @@ dhpcie_bus_mask_interrupt(dhd_bus_t *bus) void dhdpcie_bus_intr_enable(dhd_bus_t *bus) { - DHD_TRACE(("%s: enable interrupts\n", __FUNCTION__)); + DHD_TRACE(("%s Enter\n", __FUNCTION__)); if (bus && bus->sih && !bus->is_linkdown) { if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4)) { dhpcie_bus_unmask_interrupt(bus); } else { - si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, - bus->def_intmask, bus->def_intmask); + /* Skip after recieving D3 ACK */ + if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) && + bus->wait_for_d3_ack) { + return; + } + si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, + bus->def_intmask, bus->def_intmask); } - } else { - DHD_ERROR(("****** %s: failed ******\n", __FUNCTION__)); - DHD_ERROR(("bus: %p sih: %p bus->is_linkdown %d\n", - bus, bus ? bus->sih : NULL, bus ? bus->is_linkdown: -1)); } + DHD_TRACE(("%s Exit\n", __FUNCTION__)); } void dhdpcie_bus_intr_disable(dhd_bus_t *bus) { - DHD_TRACE(("%s Enter\n", __FUNCTION__)); - if (bus && bus->sih && !bus->is_linkdown) { if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4)) { dhpcie_bus_mask_interrupt(bus); } else { + /* Skip after recieving D3 ACK */ + if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) && + bus->wait_for_d3_ack) { + return; + } si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, bus->def_intmask, 0); } - } else { - DHD_ERROR(("****** %s: failed ******\n", __FUNCTION__)); - DHD_ERROR(("bus: %p sih: %p bus->is_linkdown %d\n", - bus, bus ? bus->sih : NULL, bus ? bus->is_linkdown: -1)); } - DHD_TRACE(("%s Exit\n", __FUNCTION__)); } @@ -791,10 +1107,10 @@ dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp) DHD_GENERAL_UNLOCK(dhdp, flags); timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); - if (timeleft == 0) { + if ((timeleft == 0) || (timeleft == 1)) { DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", __FUNCTION__, dhdp->dhd_bus_busy_state)); - BUG_ON(1); + ASSERT(0); } return; @@ -810,12 +1126,27 @@ dhdpcie_bus_remove_prep(dhd_bus_t *bus) bus->dhd->busstate = DHD_BUS_DOWN; DHD_GENERAL_UNLOCK(bus->dhd, flags); +#ifdef PCIE_INB_DW + /* De-Initialize the lock to serialize Device Wake Inband activities */ + if (bus->inb_lock) { + dhd_os_spin_lock_deinit(bus->dhd->osh, bus->inb_lock); + bus->inb_lock = NULL; + } +#endif + + dhd_os_sdlock(bus->dhd); - dhdpcie_bus_intr_disable(bus); - // terence 20150406: fix for null pointer handle when doing remove driver - if (!bus->dhd->dongle_isolation && bus->sih) { - pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs)); + if (bus->sih && !bus->dhd->dongle_isolation) { + /* Has insmod fails after rmmod issue in Brix Android */ + /* if the pcie link is down, watchdog reset should not be done, as it may hang */ + if (!bus->is_linkdown) + pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs); + else + DHD_ERROR(("%s: skipping watchdog reset, due to pcie link down ! \n", + __FUNCTION__)); + + bus->dhd->is_pcie_watchdog_reset = TRUE; } dhd_os_sdunlock(bus->dhd); @@ -840,6 +1171,7 @@ dhdpcie_bus_release(dhd_bus_t *bus) if (bus->dhd) { dhdpcie_advertise_bus_cleanup(bus->dhd); dongle_isolation = bus->dhd->dongle_isolation; + bus->dhd->is_pcie_watchdog_reset = FALSE; dhdpcie_bus_remove_prep(bus); if (bus->intr) { @@ -854,11 +1186,11 @@ dhdpcie_bus_release(dhd_bus_t *bus) /* unmap the regs and tcm here!! */ if (bus->regs) { - dhdpcie_bus_reg_unmap(osh, (ulong)bus->regs, DONGLE_REG_MAP_SIZE); + dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE); bus->regs = NULL; } if (bus->tcm) { - dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, DONGLE_TCM_MAP_SIZE); + dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE); bus->tcm = NULL; } @@ -869,11 +1201,9 @@ dhdpcie_bus_release(dhd_bus_t *bus) bus->pcie_sh = NULL; } -#ifdef DHD_DEBUG - - if (bus->console.buf != NULL) + if (bus->console.buf != NULL) { MFREE(osh, bus->console.buf, bus->console.bufsize); -#endif + } /* Finally free bus info */ @@ -898,16 +1228,24 @@ dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bo if (bus->sih) { - if (!dongle_isolation) - pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs)); - + if (!dongle_isolation && + (bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) + pcie_watchdog_reset(bus->osh, bus->sih, + (sbpcieregs_t *) bus->regs); +#ifdef DHD_EFI + dhdpcie_dongle_pwr_toggle(bus); +#endif if (bus->ltrsleep_on_unload) { si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0); } if (bus->sih->buscorerev == 13) - pcie_serdes_iddqdisable(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs)); + pcie_serdes_iddqdisable(bus->osh, bus->sih, + (sbpcieregs_t *) bus->regs); + + /* Disable CLKREQ# */ + dhdpcie_clkreq(bus->osh, 1, 0); if (bus->sih != NULL) { si_detach(bus->sih); @@ -1009,21 +1347,148 @@ done: return; } +#ifdef DEVICE_TX_STUCK_DETECT +void +dhd_bus_send_msg_to_daemon(int reason) +{ + bcm_to_info_t to_info; + + to_info.magic = BCM_TO_MAGIC; + to_info.reason = reason; + + dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t)); + return; +} + +/** + * scan the flow rings in active list to check if stuck and notify application + * The conditions for warn/stuck detection are + * 1. Flow ring is active + * 2. There are packets to be consumed by the consumer (wr != rd) + * If 1 and 2 are true, then + * 3. Warn, if Tx completion is not received for a duration of DEVICE_TX_STUCK_WARN_DURATION + * 4. Trap FW, if Tx completion is not received for a duration of DEVICE_TX_STUCK_DURATION + */ +static void +dhd_bus_device_tx_stuck_scan(dhd_bus_t *bus) +{ + uint32 tx_cmpl; + unsigned long list_lock_flags; + unsigned long ring_lock_flags; + dll_t *item, *prev; + flow_ring_node_t *flow_ring_node; + bool ring_empty; + bool active; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, list_lock_flags); + + for (item = dll_tail_p(&bus->flowring_active_list); + !dll_end(&bus->flowring_active_list, item); item = prev) { + + prev = dll_prev_p(item); + + flow_ring_node = dhd_constlist_to_flowring(item); + DHD_FLOWRING_LOCK(flow_ring_node->lock, ring_lock_flags); + tx_cmpl = flow_ring_node->tx_cmpl; + active = flow_ring_node->active; + ring_empty = dhd_prot_is_cmpl_ring_empty(bus->dhd, flow_ring_node->prot_info); + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, ring_lock_flags); + + if (ring_empty) { + /* reset conters... etc */ + flow_ring_node->stuck_count = 0; + flow_ring_node->tx_cmpl_prev = tx_cmpl; + continue; + } + /** + * DEVICE_TX_STUCK_WARN_DURATION, DEVICE_TX_STUCK_DURATION are integer + * representation of time, to decide if a flow is in warn state or stuck. + * + * flow_ring_node->stuck_count is an integer counter representing how long + * tx_cmpl is not received though there are pending packets in the ring + * to be consumed by the dongle for that particular flow. + * + * This method of determining time elapsed is helpful in sleep/wake scenarios. + * If host sleeps and wakes up, that sleep time is not considered into + * stuck duration. + */ + if ((tx_cmpl == flow_ring_node->tx_cmpl_prev) && active) { + + flow_ring_node->stuck_count++; + + DHD_ERROR(("%s: flowid: %d tx_cmpl: %u tx_cmpl_prev: %u stuck_count: %d\n", + __func__, flow_ring_node->flowid, tx_cmpl, + flow_ring_node->tx_cmpl_prev, flow_ring_node->stuck_count)); + + switch (flow_ring_node->stuck_count) { + case DEVICE_TX_STUCK_WARN_DURATION: + /** + * Notify Device Tx Stuck Notification App about the + * device Tx stuck warning for this flowid. + * App will collect the logs required. + */ + DHD_ERROR(("stuck warning for flowid: %d sent to app\n", + flow_ring_node->flowid)); + dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK_WARNING); + break; + case DEVICE_TX_STUCK_DURATION: + /** + * Notify Device Tx Stuck Notification App about the + * device Tx stuck info for this flowid. + * App will collect the logs required. + */ + DHD_ERROR(("stuck information for flowid: %d sent to app\n", + flow_ring_node->flowid)); + dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK); + break; + default: + break; + } + } else { + flow_ring_node->tx_cmpl_prev = tx_cmpl; + flow_ring_node->stuck_count = 0; + } + } + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, list_lock_flags); +} +/** + * schedules dhd_bus_device_tx_stuck_scan after DEVICE_TX_STUCK_CKECK_TIMEOUT, + * to determine if any flowid is stuck. + */ +static void +dhd_bus_device_stuck_scan(dhd_bus_t *bus) +{ + uint32 time_stamp; /* in millisec */ + uint32 diff; + + /* Need not run the algorith if Dongle has trapped */ + if (bus->dhd->dongle_trap_occured) { + return; + } + time_stamp = OSL_SYSUPTIME(); + diff = time_stamp - bus->device_tx_stuck_check; + if (diff > DEVICE_TX_STUCK_CKECK_TIMEOUT) { + dhd_bus_device_tx_stuck_scan(bus); + bus->device_tx_stuck_check = OSL_SYSUPTIME(); + } + return; +} +#endif /* DEVICE_TX_STUCK_DETECT */ + /** Watchdog timer function */ bool dhd_bus_watchdog(dhd_pub_t *dhd) { unsigned long flags; -#ifdef DHD_DEBUG dhd_bus_t *bus; bus = dhd->bus; DHD_GENERAL_LOCK(dhd, flags); - if (dhd->busstate == DHD_BUS_DOWN || - dhd->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) || + DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) { DHD_GENERAL_UNLOCK(dhd, flags); return FALSE; } - dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD; + DHD_BUS_BUSY_SET_IN_WD(dhd); DHD_GENERAL_UNLOCK(dhd, flags); #ifdef DHD_PCIE_RUNTIMEPM @@ -1033,7 +1498,8 @@ bool dhd_bus_watchdog(dhd_pub_t *dhd) /* Poll for console output periodically */ - if (dhd->busstate == DHD_BUS_DATA && dhd_console_ms != 0) { + if (dhd->busstate == DHD_BUS_DATA && + dhd_console_ms != 0 && !bus->d3_suspend_pending) { bus->console.count += dhd_watchdog_ms; if (bus->console.count >= dhd_console_ms) { bus->console.count -= dhd_console_ms; @@ -1042,142 +1508,58 @@ bool dhd_bus_watchdog(dhd_pub_t *dhd) dhd_console_ms = 0; /* On error, stop trying */ } } -#endif /* DHD_DEBUG */ -#ifdef PCIE_OOB +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */ - if (dhd_doorbell_timeout != 0 && !(bus->dhd->busstate == DHD_BUS_SUSPEND) && - dhd_timeout_expired(&bus->doorbell_timer)) { + if (dhd_doorbell_timeout != 0 && dhd->busstate == DHD_BUS_DATA && + dhd->up && dhd_timeout_expired(&bus->doorbell_timer)) { dhd_bus_set_device_wake(bus, FALSE); } -#endif /* PCIE_OOB */ +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + if (bus->ds_exit_timeout) { + bus->ds_exit_timeout --; + if (bus->ds_exit_timeout == 1) { + DHD_ERROR(("DS-EXIT TIMEOUT\n")); + bus->ds_exit_timeout = 0; + bus->inband_ds_exit_to_cnt++; + } + } + if (bus->host_sleep_exit_timeout) { + bus->host_sleep_exit_timeout --; + if (bus->host_sleep_exit_timeout == 1) { + DHD_ERROR(("HOST_SLEEP-EXIT TIMEOUT\n")); + bus->host_sleep_exit_timeout = 0; + bus->inband_host_sleep_exit_to_cnt++; + } + } + } +#endif /* PCIE_INB_DW */ + +#ifdef DEVICE_TX_STUCK_DETECT + if (dhd->bus->dev_tx_stuck_monitor == TRUE) { + dhd_bus_device_stuck_scan(dhd->bus); + } +#endif /* DEVICE_TX_STUCK_DETECT */ DHD_GENERAL_LOCK(dhd, flags); - dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD; + DHD_BUS_BUSY_CLEAR_IN_WD(dhd); + dhd_os_busbusy_wake(dhd); DHD_GENERAL_UNLOCK(dhd, flags); - return TRUE; } /* dhd_bus_watchdog */ -#define DEADBEEF_PATTERN 0xADDEADDE // "DeadDead" -#define MEMCHECKINFO "/data/.memcheck.info" - -static int -dhd_get_memcheck_info(void) +uint16 +dhd_get_chipid(dhd_pub_t *dhd) { - struct file *fp = NULL; - uint32 mem_val = 0; - int ret = 0; - char *filepath = MEMCHECKINFO; + dhd_bus_t *bus = dhd->bus; - fp = filp_open(filepath, O_RDONLY, 0); - if (IS_ERR(fp)) { - DHD_ERROR(("[WIFI_SEC] %s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); - goto done; - } else { - ret = kernel_read(fp, 0, (char *)&mem_val, 4); - if (ret < 0) { - DHD_ERROR(("[WIFI_SEC] %s: File read error, ret=%d\n", __FUNCTION__, ret)); - filp_close(fp, NULL); - goto done; - } - - mem_val = bcm_atoi((char *)&mem_val); - - DHD_ERROR(("[WIFI_SEC]%s: MEMCHECK ENABLED = %d\n", __FUNCTION__, mem_val)); - filp_close(fp, NULL); - } -done: - return mem_val; -} - -static int -dhdpcie_mem_check(struct dhd_bus *bus) -{ - int bcmerror = BCME_OK; - int offset = 0; - int len = 0; - uint8 *memblock = NULL, *memptr; - int size = bus->ramsize; - int i; - uint32 memcheck_enabled; - - /* Read memcheck info from the file */ - /* 0 : Disable */ - /* 1 : "Dead Beef" pattern write */ - /* 2 : "Dead Beef" pattern write and checking the pattern value */ - - memcheck_enabled = dhd_get_memcheck_info(); - - DHD_ERROR(("%s: memcheck_enabled: %d \n", __FUNCTION__, memcheck_enabled)); - - if (memcheck_enabled == 0) { - return bcmerror; - } - - memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); - if (memblock == NULL) { - DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); - goto err; - } - - if ((ulong)memblock % DHD_SDALIGN) { - memptr += (DHD_SDALIGN - ((ulong)memblock % DHD_SDALIGN)); - } - - for (i = 0; i < MEMBLOCK; i = i + 4) { - *(ulong*)(memptr + i) = DEADBEEF_PATTERN; - } - - if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || - si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { - if (offset == 0) { - /* Add start of RAM address to the address given by user */ - offset += bus->dongle_ram_base; - } - } - - /* Write "DeadBeef" pattern with MEMBLOCK size */ - while (size) { - len = MIN(MEMBLOCK, size); - - bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); - if (bcmerror) { - DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", - __FUNCTION__, bcmerror, MEMBLOCK, offset)); - goto err; - } - - if (memcheck_enabled == 2) { - bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, (uint8 *)memptr, len); - if (bcmerror) { - DHD_ERROR(("%s: error %d on read %d membytes at 0x%08x\n", - __FUNCTION__, bcmerror, MEMBLOCK, offset)); - goto err; - } else { - for (i = 0; i < len; i = i+4) { - if ((*(uint32*)(memptr + i)) != DEADBEEF_PATTERN) { - DHD_ERROR(("%s: error on reading pattern at " - "0x%08x\n", __FUNCTION__, (offset + i))); - bcmerror = BCME_ERROR; - goto err; - } - } - } - } - offset += MEMBLOCK; - size -= MEMBLOCK; - } - - DHD_ERROR(("%s: Writing the Dead Beef pattern is Done \n", __FUNCTION__)); - -err: - if (memblock) { - MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); - } - - return bcmerror; + if (bus && bus->sih) + return (uint16)si_chipid(bus->sih); + else + return 0; } /* Download firmware image and nvram image */ @@ -1193,11 +1575,14 @@ dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, bus->dhd->clm_path = pclm_path; bus->dhd->conf_path = pconf_path; + +#if defined(DHD_BLOB_EXISTENCE_CHECK) + dhd_set_blob_support(bus->dhd, bus->fw_path); +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n", __FUNCTION__, bus->fw_path, bus->nv_path)); - dhdpcie_mem_check(bus); - ret = dhdpcie_download_firmware(bus, osh); return ret; @@ -1287,13 +1672,16 @@ dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) int bcmerror = BCME_ERROR; int offset = 0; int len = 0; + bool store_reset; char *imgbuf = NULL; uint8 *memblock = NULL, *memptr; uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct int offset_end = bus->ramsize; +#ifndef DHD_EFI DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); +#endif /* DHD_EFI */ /* Should succeed in opening image if it is actually given through registry * entry or in module param. @@ -1316,11 +1704,15 @@ dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) goto err; } } - if ((uint32)(uintptr)memblock % DHD_SDALIGN) + if ((uint32)(uintptr)memblock % DHD_SDALIGN) { memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + } + + + /* check if CR4/CA7 */ + store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, ARMCA7_CORE_ID, 0)); - DHD_INFO_HW4(("%s: dongle_ram_base: 0x%x ramsize: 0x%x tcm: %p\n", - __FUNCTION__, bus->dongle_ram_base, bus->ramsize, bus->tcm)); /* Download image with MEMBLOCK size */ while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) { if (len < 0) { @@ -1328,17 +1720,16 @@ dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) bcmerror = BCME_ERROR; goto err; } - /* check if CR4/CA7 */ - if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || - si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { - /* if address is 0, store the reset instruction to be written in 0 */ - if (offset == 0) { - bus->resetinstr = *(((uint32*)memptr)); - /* Add start of RAM address to the address given by user */ - offset += bus->dongle_ram_base; - offset_end += offset; - } + /* if address is 0, store the reset instruction to be written in 0 */ + if (store_reset) { + ASSERT(offset == 0); + bus->resetinstr = *(((uint32*)memptr)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + offset_end += offset; + store_reset = FALSE; } + bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); if (bcmerror) { DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", @@ -1370,15 +1761,17 @@ dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) } err: - if (memblock) + if (memblock) { MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); - if (dhd_msg_level & DHD_TRACE_VAL) { - if (memptr_tmp) - MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN); + if (dhd_msg_level & DHD_TRACE_VAL) { + if (memptr_tmp) + MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN); + } } - if (imgbuf) + if (imgbuf) { dhd_os_close_image(imgbuf); + } return bcmerror; } /* dhdpcie_download_code_file */ @@ -1400,18 +1793,22 @@ dhdpcie_download_nvram(struct dhd_bus *bus) bool local_alloc = FALSE; pnv_path = bus->nv_path; +#ifdef BCMEMBEDIMAGE + nvram_file_exists = TRUE; +#else nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); +#endif /* First try UEFI */ len = MAX_NVRAMBUF_SIZE; - dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, &len); + dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len); /* If UEFI empty, then read from file system */ - if ((len == 0) || (memblock[0] == '\0')) { + if ((len <= 0) || (memblock == NULL)) { if (nvram_file_exists) { len = MAX_NVRAMBUF_SIZE; - dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, &len); + dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len); if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) { goto err; } @@ -1426,7 +1823,7 @@ dhdpcie_download_nvram(struct dhd_bus *bus) DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len)); - if (len > 0 && len <= MAX_NVRAMBUF_SIZE) { + if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) { bufp = (char *) memblock; #ifdef CACHE_FW_IMAGES @@ -1503,15 +1900,22 @@ dhdpcie_download_code_array(struct dhd_bus *bus) remaining_len = 0; len = 0; +#ifdef DHD_EFI + p_dlarray = rtecdc_fw_arr; + dlarray_size = sizeof(rtecdc_fw_arr); +#else p_dlarray = dlarray; dlarray_size = sizeof(dlarray); p_dlimagename = dlimagename; p_dlimagever = dlimagever; p_dlimagedate = dlimagedate; +#endif /* DHD_EFI */ +#ifndef DHD_EFI if ((p_dlarray == 0) || (dlarray_size == 0) ||(dlarray_size > bus->ramsize) || (p_dlimagename == 0) || (p_dlimagever == 0) || (p_dlimagedate == 0)) goto err; +#endif /* DHD_EFI */ memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); if (memblock == NULL) { @@ -1579,7 +1983,13 @@ dhdpcie_download_code_array(struct dhd_bus *bus) uploded_len += len; offset += MEMBLOCK; } - +#ifdef DHD_EFI + if (memcmp(p_dlarray, ularray, dlarray_size)) { + DHD_ERROR(("%s: Downloaded image is corrupted ! \n", __FUNCTION__)); + goto upload_err; + } else + DHD_ERROR(("%s: Download, Upload and compare succeeded .\n", __FUNCTION__)); +#else if (memcmp(p_dlarray, ularray, dlarray_size)) { DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n", __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate)); @@ -1588,6 +1998,8 @@ dhdpcie_download_code_array(struct dhd_bus *bus) } else DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n", __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate)); +#endif /* DHD_EFI */ + upload_err: if (ularray) MFREE(bus->dhd->osh, ularray, dlarray_size); @@ -1603,6 +2015,158 @@ err: #endif /* BCMEMBEDIMAGE */ +static int +dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len) +{ + int bcmerror = BCME_ERROR; + char *imgbuf = NULL; + + if (buf == NULL || len == 0) + goto err; + + /* External image takes precedence if specified */ + if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { + imgbuf = dhd_os_open_image(bus->fw_path); + if (imgbuf == NULL) { + DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__)); + goto err; + } + + /* Read it */ + if (len != dhd_os_get_image_block(buf, len, imgbuf)) { + DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__, len)); + goto err; + } + + bcmerror = BCME_OK; + } + +err: + if (imgbuf) + dhd_os_close_image(imgbuf); + + return bcmerror; +} + + +/* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem + * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation. + * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well. + */ +static void +dhdpcie_ramsize_adj(struct dhd_bus *bus) +{ + int i, search_len = 0; + uint8 *memptr = NULL; + uint8 *ramsizeptr = NULL; + uint ramsizelen; + uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST}; + hnd_ramsize_ptr_t ramsize_info; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Adjust dongle RAMSIZE already called. */ + if (bus->ramsize_adjusted) { + return; + } + + /* success or failure, we don't want to be here + * more than once. + */ + bus->ramsize_adjusted = TRUE; + + /* Not handle if user restrict dongle ram size enabled */ + if (dhd_dongle_memsize) { + DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__, + dhd_dongle_memsize)); + return; + } + +#ifndef BCMEMBEDIMAGE + /* Out immediately if no image to download */ + if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { + DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__)); + return; + } +#endif /* !BCMEMBEDIMAGE */ + + /* Get maximum RAMSIZE info search length */ + for (i = 0; ; i++) { + if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END) + break; + + if (search_len < (int)ramsize_ptr_ptr[i]) + search_len = (int)ramsize_ptr_ptr[i]; + } + + if (!search_len) + return; + + search_len += sizeof(hnd_ramsize_ptr_t); + + memptr = MALLOC(bus->dhd->osh, search_len); + if (memptr == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, search_len)); + return; + } + + /* External image takes precedence if specified */ + if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) { +#if defined(BCMEMBEDIMAGE) && !defined(DHD_EFI) + unsigned char *p_dlarray = NULL; + unsigned int dlarray_size = 0; + char *p_dlimagename, *p_dlimagever, *p_dlimagedate; + + p_dlarray = dlarray; + dlarray_size = sizeof(dlarray); + p_dlimagename = dlimagename; + p_dlimagever = dlimagever; + p_dlimagedate = dlimagedate; + + if ((p_dlarray == 0) || (dlarray_size == 0) || (p_dlimagename == 0) || + (p_dlimagever == 0) || (p_dlimagedate == 0)) + goto err; + + ramsizeptr = p_dlarray; + ramsizelen = dlarray_size; +#else + goto err; +#endif /* BCMEMBEDIMAGE && !DHD_EFI */ + } + else { + ramsizeptr = memptr; + ramsizelen = search_len; + } + + if (ramsizeptr) { + /* Check Magic */ + for (i = 0; ; i++) { + if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END) + break; + + if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen) + continue; + + memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i], + sizeof(hnd_ramsize_ptr_t)); + + if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) { + bus->orig_ramsize = LTOH32(ramsize_info.ram_size); + bus->ramsize = LTOH32(ramsize_info.ram_size); + DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__, + bus->ramsize)); + break; + } + } + } + +err: + if (memptr) + MFREE(bus->dhd->osh, memptr, search_len); + + return; +} /* _dhdpcie_download_firmware */ + static int _dhdpcie_download_firmware(struct dhd_bus *bus) { @@ -1620,6 +2184,8 @@ _dhdpcie_download_firmware(struct dhd_bus *bus) return 0; #endif } + /* Adjust ram size */ + dhdpcie_ramsize_adj(bus); /* Keep arm in reset */ if (dhdpcie_bus_download_state(bus, TRUE)) { @@ -1684,7 +2250,6 @@ err: #define CONSOLE_LINE_MAX 192 -#ifdef DHD_DEBUG static int dhdpcie_bus_readconsole(dhd_bus_t *bus) { @@ -1747,15 +2312,90 @@ dhdpcie_bus_readconsole(dhd_bus_t *bus) if (line[n - 1] == '\r') n--; line[n] = 0; - printf("CONSOLE: %s\n", line); - + DHD_FWLOG(("CONSOLE: %s\n", line)); } } break2: return BCME_OK; } /* dhdpcie_bus_readconsole */ -#endif /* DHD_DEBUG */ + +void +dhd_bus_dump_console_buffer(dhd_bus_t *bus) +{ + uint32 n, i; + uint32 addr; + char *console_buffer = NULL; + uint32 console_ptr, console_size, console_index; + uint8 line[CONSOLE_LINE_MAX], ch; + int rv; + + DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__)); + + if (bus->is_linkdown) { + DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__)); + return; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) { + goto exit; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_size, sizeof(console_size))) < 0) { + goto exit; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_index, sizeof(console_index))) < 0) { + goto exit; + } + + console_ptr = ltoh32(console_ptr); + console_size = ltoh32(console_size); + console_index = ltoh32(console_index); + + if (console_size > CONSOLE_BUFFER_MAX || + !(console_buffer = MALLOC(bus->dhd->osh, console_size))) { + goto exit; + } + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr, + (uint8 *)console_buffer, console_size)) < 0) { + goto exit; + } + + for (i = 0, n = 0; i < console_size; i += n + 1) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + ch = console_buffer[(console_index + i + n) % console_size]; + if (ch == '\n') + break; + line[n] = ch; + } + + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + /* Don't use DHD_ERROR macro since we print + * a lot of information quickly. The macro + * will truncate a lot of the printfs + */ + + DHD_FWLOG(("CONSOLE: %s\n", line)); + } + } + +exit: + if (console_buffer) + MFREE(bus->dhd->osh, console_buffer, console_size); + return; +} static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) @@ -1763,16 +2403,11 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) int bcmerror = 0; uint msize = 512; char *mbuffer = NULL; - char *console_buffer = NULL; uint maxstrlen = 256; char *str = NULL; - trap_t tr; - pciedev_shared_t *pciedev_shared = bus->pcie_sh; + pciedev_shared_t *local_pciedev_shared = bus->pcie_sh; struct bcmstrbuf strbuf; - uint32 console_ptr, console_size, console_index; - uint8 line[CONSOLE_LINE_MAX], ch; - uint32 n, i, addr; - int rv; + unsigned long flags; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -1800,6 +2435,9 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) bcmerror = BCME_NOMEM; goto done; } + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); if ((bcmerror = dhdpcie_readshared(bus)) < 0) { goto done; @@ -1808,9 +2446,9 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) bcm_binit(&strbuf, data, size); bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", - pciedev_shared->msgtrace_addr, pciedev_shared->console_addr); + local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr); - if ((pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) { + if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) { /* NOTE: Misspelled assert is intentional - DO NOT FIX. * (Avoids conflict with real asserts for programmatic parsing of output.) */ @@ -1856,85 +2494,43 @@ dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) } if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) { + trap_t *tr = &bus->dhd->last_trap_info; bus->dhd->dongle_trap_occured = TRUE; if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, - bus->pcie_sh->trap_addr, (uint8*)&tr, sizeof(trap_t))) < 0) { + bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) { goto done; } + dhd_bus_dump_trap_info(bus, &strbuf); - bcm_bprintf(&strbuf, - "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," - " lp 0x%x, rpc 0x%x" - "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " - "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", - ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr), - ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc), - ltoh32(bus->pcie_sh->trap_addr), - ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3), - ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7)); - - addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log); - if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, - (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) { - goto printbuf; - } - - addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size); - if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, - (uint8 *)&console_size, sizeof(console_size))) < 0) { - goto printbuf; - } - - addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx); - if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, - (uint8 *)&console_index, sizeof(console_index))) < 0) { - goto printbuf; - } - - console_ptr = ltoh32(console_ptr); - console_size = ltoh32(console_size); - console_index = ltoh32(console_index); - - if (console_size > CONSOLE_BUFFER_MAX || - !(console_buffer = MALLOC(bus->dhd->osh, console_size))) { - goto printbuf; - } - - if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr, - (uint8 *)console_buffer, console_size)) < 0) { - goto printbuf; - } - - for (i = 0, n = 0; i < console_size; i += n + 1) { - for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { - ch = console_buffer[(console_index + i + n) % console_size]; - if (ch == '\n') - break; - line[n] = ch; - } - - - if (n > 0) { - if (line[n - 1] == '\r') - n--; - line[n] = 0; - /* Don't use DHD_ERROR macro since we print - * a lot of information quickly. The macro - * will truncate a lot of the printfs - */ - - printf("CONSOLE: %s\n", line); - } - } + dhd_bus_dump_console_buffer(bus); } } -printbuf: if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) { printf("%s: %s\n", __FUNCTION__, strbuf.origbuf); +#ifdef REPORT_FATAL_TIMEOUTS + /** + * stop the timers as FW trapped + */ + if (dhd_stop_scan_timer(bus->dhd)) { + DHD_ERROR(("dhd_stop_scan_timer failed\n")); + ASSERT(0); + } + if (dhd_stop_bus_timer(bus->dhd)) { + DHD_ERROR(("dhd_stop_bus_timer failed\n")); + ASSERT(0); + } + if (dhd_stop_cmd_timer(bus->dhd)) { + DHD_ERROR(("dhd_stop_cmd_timer failed\n")); + ASSERT(0); + } + if (dhd_stop_join_timer(bus->dhd)) { + DHD_ERROR(("dhd_stop_join_timer failed\n")); + ASSERT(0); + } +#endif /* REPORT_FATAL_TIMEOUTS */ - /* wake up IOCTL wait event */ - dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP); + dhd_prot_debug_info_print(bus->dhd); #if defined(DHD_FW_COREDUMP) /* save core dump or write to a file */ @@ -1944,18 +2540,25 @@ printbuf: } #endif /* DHD_FW_COREDUMP */ + /* wake up IOCTL wait event */ + dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP); + + dhd_schedule_reset(bus->dhd); + } + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + done: if (mbuffer) MFREE(bus->dhd->osh, mbuffer, msize); if (str) MFREE(bus->dhd->osh, str, maxstrlen); - if (console_buffer) - MFREE(bus->dhd->osh, console_buffer, console_size); - return bcmerror; } /* dhdpcie_checkdied */ @@ -1974,6 +2577,17 @@ void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf) } start = bus->dongle_ram_base; + read_size = 4; + /* check for dead bus */ + { + uint test_word = 0; + ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size); + /* if read error or bus timeout */ + if (ret || (test_word == 0xFFFFFFFF)) { + return; + } + } + /* Get full mem size */ size = bus->ramsize; /* Read mem content */ @@ -2011,26 +2625,21 @@ dhdpcie_mem_dump(dhd_bus_t *bus) #ifdef SUPPORT_LINKDOWN_RECOVERY if (bus->is_linkdown) { - DHD_ERROR(("%s: PCIe link was down so skip\n", __FUNCTION__)); + DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__)); return BCME_ERROR; } #endif /* SUPPORT_LINKDOWN_RECOVERY */ /* Get full mem size */ size = bus->ramsize; -#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) - buf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_MEMDUMP_BUF, size); - bzero(buf, size); -#else - buf = MALLOC(bus->dhd->osh, size); -#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + buf = dhd_get_fwdump_buf(bus->dhd, size); if (!buf) { DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size)); return BCME_ERROR; } /* Read mem content */ - DHD_TRACE_HW4(("Dump dongle memory")); + DHD_TRACE_HW4(("Dump dongle memory\n")); databuf = buf; while (size) { @@ -2038,9 +2647,7 @@ dhdpcie_mem_dump(dhd_bus_t *bus) if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) { DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret)); - if (buf) { - MFREE(bus->dhd->osh, buf, size); - } + bus->dhd->memdump_success = FALSE; return BCME_ERROR; } DHD_TRACE((".")); @@ -2050,11 +2657,10 @@ dhdpcie_mem_dump(dhd_bus_t *bus) start += read_size; databuf += read_size; } + bus->dhd->memdump_success = TRUE; - DHD_TRACE_HW4(("%s FUNC: Copy fw image to the embedded buffer \n", __FUNCTION__)); - - dhd_save_fwdump(bus->dhd, buf, bus->ramsize); dhd_schedule_memdump(bus->dhd, buf, bus->ramsize); + /* buf, actually soc_ram free handled in dhd_{free,clear} */ return ret; } @@ -2064,20 +2670,64 @@ dhd_bus_mem_dump(dhd_pub_t *dhdp) { dhd_bus_t *bus = dhdp->bus; - if (bus->suspended) { - DHD_ERROR(("%s: Bus is suspend so skip\n", __FUNCTION__)); - return 0; + if (dhdp->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s bus is down\n", __FUNCTION__)); + return BCME_ERROR; + } +#ifdef DHD_PCIE_RUNTIMEPM + if (dhdp->memdump_type == DUMP_TYPE_BY_SYSDUMP) { + DHD_ERROR(("%s : bus wakeup by SYSDUMP\n", __FUNCTION__)); + dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0)); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + + if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) { + DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n", + __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state)); + return BCME_ERROR; } return dhdpcie_mem_dump(bus); } -#endif /* DHD_FW_COREDUMP */ + +int +dhd_dongle_mem_dump(void) +{ + if (!g_dhd_bus) { + DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__)); + return -ENODEV; + } + + dhd_bus_dump_console_buffer(g_dhd_bus); + dhd_prot_debug_info_print(g_dhd_bus->dhd); + + g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON; + g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS; + +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + + DHD_OS_WAKE_LOCK(g_dhd_bus->dhd); + dhd_bus_mem_dump(g_dhd_bus->dhd); + DHD_OS_WAKE_UNLOCK(g_dhd_bus->dhd); + return 0; +} +EXPORT_SYMBOL(dhd_dongle_mem_dump); +#endif /* DHD_FW_COREDUMP */ int dhd_socram_dump(dhd_bus_t *bus) { +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + #if defined(DHD_FW_COREDUMP) - return (dhdpcie_mem_dump(bus)); + DHD_OS_WAKE_LOCK(bus->dhd); + dhd_bus_mem_dump(bus->dhd); + DHD_OS_WAKE_UNLOCK(bus->dhd); + return 0; #else return -1; #endif @@ -2099,6 +2749,7 @@ dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uin return BCME_ERROR; } + /* Detect endianness. */ little_endian = *(char *)&detect_endian_flag; @@ -2108,20 +2759,27 @@ dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uin */ /* Determine initial transfer parameters */ +#ifdef DHD_SUPPORT_64BIT dsize = sizeof(uint64); +#else /* !DHD_SUPPORT_64BIT */ + dsize = sizeof(uint32); +#endif /* DHD_SUPPORT_64BIT */ /* Do the transfer(s) */ DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n", __FUNCTION__, (write ? "write" : "read"), size, address)); if (write) { while (size) { - if (size >= sizeof(uint64) && little_endian && -#ifdef CONFIG_64BIT - !(address % 8) && -#endif /* CONFIG_64BIT */ - 1) { +#ifdef DHD_SUPPORT_64BIT + if (size >= sizeof(uint64) && little_endian && !(address % 8)) { dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data)); - } else { + } +#else /* !DHD_SUPPORT_64BIT */ + if (size >= sizeof(uint32) && little_endian && !(address % 4)) { + dhdpcie_bus_wtcm32(bus, address, *((uint32*)data)); + } +#endif /* DHD_SUPPORT_64BIT */ + else { dsize = sizeof(uint8); dhdpcie_bus_wtcm8(bus, address, *data); } @@ -2134,13 +2792,18 @@ dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uin } } else { while (size) { - if (size >= sizeof(uint64) && little_endian && -#ifdef CONFIG_64BIT - !(address % 8) && -#endif /* CONFIG_64BIT */ - 1) { +#ifdef DHD_SUPPORT_64BIT + if (size >= sizeof(uint64) && little_endian && !(address % 8)) + { *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address); - } else { + } +#else /* !DHD_SUPPORT_64BIT */ + if (size >= sizeof(uint32) && little_endian && !(address % 4)) + { + *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address); + } +#endif /* DHD_SUPPORT_64BIT */ + else { dsize = sizeof(uint8); *data = dhdpcie_bus_rtcm8(bus, address); } @@ -2170,9 +2833,9 @@ dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs) DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id)); /* ASSERT on flow_id */ - if (flow_id >= bus->max_sub_queues) { + if (flow_id >= bus->max_submission_rings) { DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__, - flow_id, bus->max_sub_queues)); + flow_id, bus->max_submission_rings)); return 0; } @@ -2232,7 +2895,7 @@ dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs) /* Restore to original priority for 802.1X packet */ if (prio == PRIO_8021D_NC) { - PKTSETPRIO(txp, PRIO_8021D_BE); + PKTSETPRIO(txp, dhdp->prio_8021x); } } #endif /* DHD_LOSSLESS_ROAMING */ @@ -2264,6 +2927,9 @@ int BCMFASTPATH dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx) { uint16 flowid; +#ifdef IDLE_TX_FLOW_MGMT + uint8 node_status; +#endif /* IDLE_TX_FLOW_MGMT */ flow_queue_t *queue; flow_ring_node_t *flow_ring_node; unsigned long flags; @@ -2280,34 +2946,77 @@ dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx) flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n", - __FUNCTION__, flowid, flow_ring_node->status, - flow_ring_node->active)); + __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active)); DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); if ((flowid >= bus->dhd->num_flow_rings) || +#ifdef IDLE_TX_FLOW_MGMT + (!flow_ring_node->active)) +#else (!flow_ring_node->active) || (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) || - (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING)) { + (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING)) +#endif /* IDLE_TX_FLOW_MGMT */ + { DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n", __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active)); ret = BCME_ERROR; - goto toss; + goto toss; } +#ifdef IDLE_TX_FLOW_MGMT + node_status = flow_ring_node->status; + + /* handle diffrent status states here!! */ + switch (node_status) + { + case FLOW_RING_STATUS_OPEN: + + if (bus->enable_idle_flowring_mgmt) { + /* Move the node to the head of active list */ + dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node); + } + break; + + case FLOW_RING_STATUS_SUSPENDED: + DHD_INFO(("Need to Initiate TX Flow resume\n")); + /* Issue resume_ring request */ + dhd_bus_flow_ring_resume_request(bus, + flow_ring_node); + break; + + case FLOW_RING_STATUS_CREATE_PENDING: + case FLOW_RING_STATUS_RESUME_PENDING: + /* Dont do anything here!! */ + DHD_INFO(("Waiting for Flow create/resume! status is %u\n", + node_status)); + break; + + case FLOW_RING_STATUS_DELETE_PENDING: + default: + DHD_ERROR(("Dropping packet!! flowid %u status is %u\n", + flowid, node_status)); + /* error here!! */ + ret = BCME_ERROR; + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + goto toss; + } + /* Now queue the packet */ +#endif /* IDLE_TX_FLOW_MGMT */ + queue = &flow_ring_node->queue; /* queue associated with flow ring */ - if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) { + if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) txp_pend = txp; - } DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); if (flow_ring_node->status) { DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n", - __FUNCTION__, flowid, flow_ring_node->status, - flow_ring_node->active)); + __FUNCTION__, flowid, flow_ring_node->status, + flow_ring_node->active)); if (txp_pend) { txp = txp_pend; goto toss; @@ -2333,7 +3042,14 @@ dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx) toss: DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret)); +/* for EFI, pass the 'send' flag as false, to avoid enqueuing the failed tx pkt +* into the Tx done queue +*/ +#ifdef DHD_EFI + PKTCFREE(bus->dhd->osh, txp, FALSE); +#else PKTCFREE(bus->dhd->osh, txp, TRUE); +#endif return ret; } /* dhd_bus_txdata */ @@ -2352,7 +3068,6 @@ dhd_bus_start_queue(struct dhd_bus *bus) bus->bus_flowctrl = TRUE; } -#if defined(DHD_DEBUG) /* Device console input function */ int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) { @@ -2365,7 +3080,6 @@ int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) /* Don't allow input if dongle is in reset */ if (bus->dhd->dongle_reset) { - dhd_os_sdunlock(bus->dhd); return BCME_NOTREADY; } @@ -2391,7 +3105,6 @@ int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) done: return rv; } /* dhd_bus_console_in */ -#endif /* defined(DHD_DEBUG) */ /** * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is @@ -2407,42 +3120,40 @@ dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count) void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data) { - *(volatile uint8 *)(bus->tcm + offset) = (uint8)data; + W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data); } uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset) { volatile uint8 data; - - data = *(volatile uint8 *)(bus->tcm + offset); - + data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset)); return data; } void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data) { - *(volatile uint32 *)(bus->tcm + offset) = (uint32)data; + W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data); } void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data) { - *(volatile uint16 *)(bus->tcm + offset) = (uint16)data; + W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data); } +#ifdef DHD_SUPPORT_64BIT void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) { - *(volatile uint64 *)(bus->tcm + offset) = (uint64)data; + W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data); } +#endif /* DHD_SUPPORT_64BIT */ uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset) { volatile uint16 data; - - data = *(volatile uint16 *)(bus->tcm + offset); - + data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset)); return data; } @@ -2450,28 +3161,26 @@ uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset) { volatile uint32 data; - - data = *(volatile uint32 *)(bus->tcm + offset); - + data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset)); return data; } +#ifdef DHD_SUPPORT_64BIT uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) { volatile uint64 data; - - data = *(volatile uint64 *)(bus->tcm + offset); - + data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset)); return data; } +#endif /* DHD_SUPPORT_64BIT */ /** A snippet of dongle memory is shared between host and dongle */ void dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid) { uint64 long_data; - ulong tcm_offset; + uintptr tcm_offset; DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len)); @@ -2485,18 +3194,24 @@ dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint { pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; long_data = HTOL64(*(uint64 *)data); - tcm_offset = (ulong)&(sh->host_dma_scratch_buffer); - dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); - prhex(__FUNCTION__, data, len); + tcm_offset = (uintptr)&(sh->host_dma_scratch_buffer); + dhdpcie_bus_membytes(bus, TRUE, + (ulong)tcm_offset, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } break; } - case D2H_DMA_SCRATCH_BUF_LEN: + case D2H_DMA_SCRATCH_BUF_LEN : { pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; - tcm_offset = (ulong)&(sh->host_dma_scratch_buffer_len); - dhdpcie_bus_wtcm32(bus, tcm_offset, (uint32) HTOL32(*(uint32 *)data)); - prhex(__FUNCTION__, data, len); + tcm_offset = (uintptr)&(sh->host_dma_scratch_buffer_len); + dhdpcie_bus_wtcm32(bus, + (ulong)tcm_offset, (uint32) HTOL32(*(uint32 *)data)); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } break; } @@ -2505,10 +3220,13 @@ dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; long_data = HTOL64(*(uint64 *)data); - tcm_offset = (ulong)shmem->rings_info_ptr; + tcm_offset = (uintptr)shmem->rings_info_ptr; tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr); - dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); - prhex(__FUNCTION__, data, len); + dhdpcie_bus_membytes(bus, TRUE, + (ulong)tcm_offset, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } break; } @@ -2516,10 +3234,13 @@ dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint { pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; long_data = HTOL64(*(uint64 *)data); - tcm_offset = (ulong)shmem->rings_info_ptr; + tcm_offset = (uintptr)shmem->rings_info_ptr; tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr); - dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); - prhex(__FUNCTION__, data, len); + dhdpcie_bus_membytes(bus, TRUE, + (ulong)tcm_offset, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } break; } @@ -2527,10 +3248,13 @@ dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint { pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; long_data = HTOL64(*(uint64 *)data); - tcm_offset = (ulong)shmem->rings_info_ptr; + tcm_offset = (uintptr)shmem->rings_info_ptr; tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr); - dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); - prhex(__FUNCTION__, data, len); + dhdpcie_bus_membytes(bus, TRUE, + (ulong)tcm_offset, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } break; } @@ -2538,41 +3262,66 @@ dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint { pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; long_data = HTOL64(*(uint64 *)data); - tcm_offset = (ulong)shmem->rings_info_ptr; + tcm_offset = (uintptr)shmem->rings_info_ptr; tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr); - dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); - prhex(__FUNCTION__, data, len); + dhdpcie_bus_membytes(bus, TRUE, + (ulong)tcm_offset, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } break; } - case RING_ITEM_LEN: + case H2D_IFRM_INDX_WR_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; + + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (uintptr)shmem->rings_info_ptr; + tcm_offset += OFFSETOF(ring_info_t, ifrm_w_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, + (ulong)tcm_offset, (uint8*) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } + break; + } + + case RING_ITEM_LEN : tcm_offset = bus->ring_sh[ringid].ring_mem_addr; tcm_offset += OFFSETOF(ring_mem_t, len_items); - dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); + dhdpcie_bus_wtcm16(bus, + (ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data)); break; - case RING_MAX_ITEMS: + case RING_MAX_ITEMS : tcm_offset = bus->ring_sh[ringid].ring_mem_addr; tcm_offset += OFFSETOF(ring_mem_t, max_item); - dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); + dhdpcie_bus_wtcm16(bus, + (ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data)); break; - case RING_BUF_ADDR: + case RING_BUF_ADDR : long_data = HTOL64(*(uint64 *)data); tcm_offset = bus->ring_sh[ringid].ring_mem_addr; tcm_offset += OFFSETOF(ring_mem_t, base_addr); - dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8 *) &long_data, len); - prhex(__FUNCTION__, data, len); + dhdpcie_bus_membytes(bus, TRUE, + (ulong)tcm_offset, (uint8 *) &long_data, len); + if (dhd_msg_level & DHD_INFO_VAL) { + prhex(__FUNCTION__, data, len); + } break; - case RING_WR_UPD: + case RING_WR_UPD : tcm_offset = bus->ring_sh[ringid].ring_state_w; - dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); + dhdpcie_bus_wtcm16(bus, + (ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data)); break; - case RING_RD_UPD: + case RING_RD_UPD : tcm_offset = bus->ring_sh[ringid].ring_state_r; - dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); + dhdpcie_bus_wtcm16(bus, + (ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data)); break; case D2H_MB_DATA: @@ -2585,6 +3334,36 @@ dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint (uint32) HTOL32(*(uint32 *)data)); break; + case HOST_API_VERSION: + { + pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr; + tcm_offset = (uintptr)sh + OFFSETOF(pciedev_shared_t, host_cap); + dhdpcie_bus_wtcm32(bus, + (ulong)tcm_offset, (uint32) HTOL32(*(uint32 *)data)); + break; + } + + case DNGL_TO_HOST_TRAP_ADDR: + { + pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr; + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (uintptr)&(sh->host_trap_addr); + dhdpcie_bus_membytes(bus, TRUE, + (ulong)tcm_offset, (uint8*) &long_data, len); + break; + } + +#ifdef HOFFLOAD_MODULES + case WRT_HOST_MODULE_ADDR: + { + pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr; + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (uintptr)&(sh->hoffload_addr); + dhdpcie_bus_membytes(bus, TRUE, + (ulong)tcm_offset, (uint8*) &long_data, len); + break; + } +#endif default: break; } @@ -2597,19 +3376,19 @@ dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid) ulong tcm_offset; switch (type) { - case RING_WR_UPD: + case RING_WR_UPD : tcm_offset = bus->ring_sh[ringid].ring_state_w; *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset)); break; - case RING_RD_UPD: + case RING_RD_UPD : tcm_offset = bus->ring_sh[ringid].ring_state_r; *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset)); break; - case TOTAL_LFRAG_PACKET_CNT: + case TOTAL_LFRAG_PACKET_CNT : { pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, - (ulong) &sh->total_lfrag_pkt_cnt)); + (ulong)(uintptr) &sh->total_lfrag_pkt_cnt)); break; } case H2D_MB_DATA: @@ -2618,11 +3397,11 @@ dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid) case D2H_MB_DATA: *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr)); break; - case MAX_HOST_RXBUFS: + case MAX_HOST_RXBUFS : { pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, - (ulong) &sh->max_host_rxbufs)); + (ulong)(uintptr) &sh->max_host_rxbufs)); break; } default : @@ -2646,7 +3425,7 @@ dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, { dhd_bus_t *bus = dhdp->bus; const bcm_iovar_t *vi = NULL; - int bcmerror = 0; + int bcmerror = BCME_UNSUPPORTED; int val_size; uint32 actionid; @@ -2929,17 +3708,17 @@ int dhd_buzzz_dump_dngl(dhd_bus_t *bus) sh = bus->pcie_sh; - DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzzz)); + DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr)); - if (sh->buzzz != 0U) { /* Fetch and display dongle BUZZZ Trace */ + if (sh->buzz_dbg_ptr != 0U) { /* Fetch and display dongle BUZZZ Trace */ - dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz, + dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr, (uint8 *)buzzz_p, sizeof(bcm_buzzz_t)); printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> " "count<%u> status<%u> wrap<%u>\n" "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n", - (int)sh->buzzz, + (int)sh->buzz_dbg_ptr, (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end, buzzz_p->count, buzzz_p->status, buzzz_p->wrap, buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group, @@ -2991,39 +3770,6 @@ done: #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \ ((sih)->buscoretype == PCIE2_CORE_ID)) -static bool -pcie2_mdiosetblock(dhd_bus_t *bus, uint blk) -{ - uint mdiodata, mdioctrl, i = 0; - uint pcie_serdes_spinwait = 200; - - mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF); - mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE; - - si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl); - si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata); - - OSL_DELAY(10); - /* retry till the transaction is complete */ - while (i < pcie_serdes_spinwait) { - uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, - 0, 0); - if (!(mdioctrl_read & MDIODATA2_DONE)) { - break; - } - OSL_DELAY(1000); - i++; - } - - if (i >= pcie_serdes_spinwait) { - DHD_ERROR(("%s: pcie_mdiosetblock: timed out\n", __FUNCTION__)); - return FALSE; - } - - return TRUE; -} - - int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) { @@ -3086,6 +3832,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) dhd_bus_oob_intr_set(bus->dhd, FALSE); dhd_bus_oob_intr_unregister(bus->dhd); #endif /* BCMPCIE_OOB_HOST_WAKE */ + dhd_dpc_kill(bus->dhd); dhd_prot_reset(dhdp); dhd_clear(dhdp); dhd_bus_release_dongle(bus); @@ -3133,7 +3880,9 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) } #endif /* CONFIG_ARCH_MSM */ bus->is_linkdown = 0; - bus->pci_d3hot_done = 0; +#ifdef SUPPORT_LINKDOWN_RECOVERY + bus->read_shm_fail = FALSE; +#endif /* SUPPORT_LINKDOWN_RECOVERY */ bcmerror = dhdpcie_bus_enable_device(bus); if (bcmerror) { DHD_ERROR(("%s: host configuration restore failed: %d\n", @@ -3190,51 +3939,6 @@ done: return bcmerror; } -static int -pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val, - bool slave_bypass) -{ - uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl; - uint32 reg32; - - pcie2_mdiosetblock(bus, physmedia); - - /* enable mdio access to SERDES */ - mdio_ctrl = MDIOCTL2_DIVISOR_VAL; - mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF); - - if (slave_bypass) - mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS; - - if (!write) - mdio_ctrl |= MDIOCTL2_READ; - - si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl); - - if (write) { - reg32 = PCIE2_MDIO_WR_DATA; - si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, - *val | MDIODATA2_DONE); - } else - reg32 = PCIE2_MDIO_RD_DATA; - - /* retry till the transaction is complete */ - while (i < pcie_serdes_spinwait) { - uint done_val = si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0); - if (!(done_val & MDIODATA2_DONE)) { - if (!write) { - *val = si_corereg(bus->sih, bus->sih->buscoreidx, - PCIE2_MDIO_RD_DATA, 0, 0); - *val = *val & MDIODATA2_MASK; - } - return 0; - } - OSL_DELAY(1000); - i++; - } - return -1; -} - static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, void *params, int plen, void *arg, int len, int val_size) @@ -3275,245 +3979,80 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons case IOV_SVAL(IOV_VARS): bcmerror = dhdpcie_downloadvars(bus, arg, len); break; - - case IOV_SVAL(IOV_PCIEREG): - si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, - int_val); - si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0, - int_val2); - break; - - case IOV_GVAL(IOV_PCIEREG): - si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, - int_val); - int_val = si_corereg(bus->sih, bus->sih->buscoreidx, - OFFSETOF(sbpcieregs_t, configdata), 0, 0); - bcopy(&int_val, arg, sizeof(int_val)); - break; - - case IOV_SVAL(IOV_PCIECOREREG): - si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2); - break; - case IOV_GVAL(IOV_BAR0_SECWIN_REG): - { - sdreg_t sdreg; - uint32 addr, size; - - bcopy(params, &sdreg, sizeof(sdreg)); - - addr = sdreg.offset; - size = sdreg.func; - - if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) { - DHD_ERROR(("Invalid size/addr combination \n")); - bcmerror = BCME_ERROR; - break; - } - bcopy(&int_val, arg, sizeof(int32)); - break; - } - - case IOV_SVAL(IOV_BAR0_SECWIN_REG): - { - sdreg_t sdreg; - uint32 addr, size; - - bcopy(params, &sdreg, sizeof(sdreg)); - - addr = sdreg.offset; - size = sdreg.func; - if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) { - DHD_ERROR(("Invalid size/addr combination \n")); - bcmerror = BCME_ERROR; - } - break; - } - - case IOV_GVAL(IOV_SBREG): - { - sdreg_t sdreg; - uint32 addr, size; - - bcopy(params, &sdreg, sizeof(sdreg)); - - addr = sdreg.offset | SI_ENUM_BASE; - size = sdreg.func; - - if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) { - DHD_ERROR(("Invalid size/addr combination \n")); - bcmerror = BCME_ERROR; - break; - } - bcopy(&int_val, arg, sizeof(int32)); - break; - } - - case IOV_SVAL(IOV_SBREG): - { - sdreg_t sdreg; - uint32 addr, size; - - bcopy(params, &sdreg, sizeof(sdreg)); - - addr = sdreg.offset | SI_ENUM_BASE; - size = sdreg.func; - if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) { - DHD_ERROR(("Invalid size/addr combination \n")); - bcmerror = BCME_ERROR; - } - break; - } - - case IOV_GVAL(IOV_PCIESERDESREG): - { - uint val; - if (!PCIE_GEN2(bus->sih)) { - DHD_ERROR(("%s: supported only in pcie gen2\n", __FUNCTION__)); - bcmerror = BCME_ERROR; - break; - } - - if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) { - bcopy(&val, arg, sizeof(int32)); - } else { - DHD_ERROR(("%s: pcie2_mdioop failed.\n", __FUNCTION__)); - bcmerror = BCME_ERROR; - } - break; - } - - case IOV_SVAL(IOV_PCIESERDESREG): - if (!PCIE_GEN2(bus->sih)) { - DHD_ERROR(("%s: supported only in pcie gen2\n", __FUNCTION__)); - bcmerror = BCME_ERROR; - break; - } - if (pcie2_mdioop(bus, int_val, int_val2, TRUE, &int_val3, FALSE)) { - DHD_ERROR(("%s: pcie2_mdioop failed.\n", __FUNCTION__)); - bcmerror = BCME_ERROR; - } - break; - case IOV_GVAL(IOV_PCIECOREREG): - int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0); - bcopy(&int_val, arg, sizeof(int_val)); - break; - - case IOV_SVAL(IOV_PCIECFGREG): - OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2); - break; - - case IOV_GVAL(IOV_PCIECFGREG): - int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4); - bcopy(&int_val, arg, sizeof(int_val)); - break; - case IOV_SVAL(IOV_PCIE_LPBK): bcmerror = dhdpcie_bus_lpback_req(bus, int_val); break; - case IOV_SVAL(IOV_PCIE_DMAXFER): - bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3); + case IOV_SVAL(IOV_PCIE_DMAXFER): { + int int_val4 = 0; + if (plen >= (int)sizeof(int_val) * 4) { + bcopy((void*)((uintptr)params + 3 * sizeof(int_val)), + &int_val4, sizeof(int_val4)); + } + bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3, int_val4); break; + } +#ifdef DEVICE_TX_STUCK_DETECT + case IOV_GVAL(IOV_DEVICE_TX_STUCK_DETECT): + int_val = bus->dev_tx_stuck_monitor; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_DEVICE_TX_STUCK_DETECT): + bus->dev_tx_stuck_monitor = (bool)int_val; + break; +#endif /* DEVICE_TX_STUCK_DETECT */ case IOV_GVAL(IOV_PCIE_SUSPEND): int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_PCIE_SUSPEND): - dhdpcie_bus_suspend(bus, bool_val); + if (bool_val) { /* Suspend */ + int ret; + unsigned long flags; + + /* + * If some other context is busy, wait until they are done, + * before starting suspend + */ + ret = dhd_os_busbusy_wait_condition(bus->dhd, + &bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR); + if (ret == 0) { + DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, bus->dhd->dhd_bus_busy_state)); + return BCME_BUSY; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + dhdpcie_bus_suspend(bus, TRUE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } else { /* Resume */ + unsigned long flags; + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + dhdpcie_bus_suspend(bus, FALSE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } break; case IOV_GVAL(IOV_MEMSIZE): int_val = (int32)bus->ramsize; bcopy(&int_val, arg, val_size); break; - case IOV_SVAL(IOV_MEMBYTES): - case IOV_GVAL(IOV_MEMBYTES): - { - uint32 address; /* absolute backplane address */ - uint size, dsize; - uint8 *data; - - bool set = (actionid == IOV_SVAL(IOV_MEMBYTES)); - - ASSERT(plen >= 2*sizeof(int)); - - address = (uint32)int_val; - bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); - size = (uint)int_val; - - /* Do some validation */ - dsize = set ? plen - (2 * sizeof(int)) : len; - if (dsize < size) { - DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n", - __FUNCTION__, (set ? "set" : "get"), address, size, dsize)); - bcmerror = BCME_BADARG; - break; - } - - DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__, - (set ? "write" : "read"), size, address, dsize)); - - /* check if CR4 */ - if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || - si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { - /* if address is 0, store the reset instruction to be written in 0 */ - if (set && address == bus->dongle_ram_base) { - bus->resetinstr = *(((uint32*)params) + 2); - } - } else { - /* If we know about SOCRAM, check for a fit */ - if ((bus->orig_ramsize) && - ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize))) - { - uint8 enable, protect, remap; - si_socdevram(bus->sih, FALSE, &enable, &protect, &remap); - if (!enable || protect) { - DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n", - __FUNCTION__, bus->orig_ramsize, size, address)); - DHD_ERROR(("%s: socram enable %d, protect %d\n", - __FUNCTION__, enable, protect)); - bcmerror = BCME_BADARG; - break; - } - - if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) { - uint32 devramsize = si_socdevram_size(bus->sih); - if ((address < SOCDEVRAM_ARM_ADDR) || - (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) { - DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n", - __FUNCTION__, address, size)); - DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n", - __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize)); - bcmerror = BCME_BADARG; - break; - } - /* move it such that address is real now */ - address -= SOCDEVRAM_ARM_ADDR; - address += SOCDEVRAM_BP_ADDR; - DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n", - __FUNCTION__, (set ? "write" : "read"), size, address)); - } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) { - /* Can not access remap region while devram remap bit is set - * ROM content would be returned in this case - */ - DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n", - __FUNCTION__, address)); - bcmerror = BCME_ERROR; - break; - } - } - } - - /* Generate the actual data pointer */ - data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg; - - /* Call to do the transfer */ - bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size); - - break; - } #ifdef BCM_BUZZZ /* Dump dongle side buzzz trace to console */ @@ -3531,6 +4070,11 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons bcopy(&int_val, arg, val_size); break; + case IOV_SVAL(IOV_RAMSIZE): + bus->ramsize = int_val; + bus->orig_ramsize = int_val; + break; + case IOV_GVAL(IOV_RAMSTART): int_val = (int32)bus->dongle_ram_base; bcopy(&int_val, arg, val_size); @@ -3582,8 +4126,8 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons case IOV_GVAL(IOV_DMA_RINGINDICES): { int h2d_support, d2h_support; - d2h_support = DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0; - h2d_support = DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0; + d2h_support = bus->dhd->dma_d2h_ring_upd_support ? 1 : 0; + h2d_support = bus->dhd->dma_h2d_ring_upd_support ? 1 : 0; int_val = d2h_support | (h2d_support << 1); bcopy(&int_val, arg, sizeof(int_val)); break; @@ -3597,6 +4141,7 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons } else { bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE; bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE; + bus->dhd->dma_ring_upd_overwrite = TRUE; } } else { DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", @@ -3666,7 +4211,14 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons case IOV_SVAL(IOV_DEVRESET): dhd_bus_devreset(bus->dhd, (uint8)bool_val); break; - + case IOV_SVAL(IOV_FORCE_FW_TRAP): + if (bus->dhd->busstate == DHD_BUS_DATA) + dhdpcie_fw_trap(bus); + else { + DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__)); + bcmerror = BCME_NOTUP; + } + break; case IOV_GVAL(IOV_FLOW_PRIO_MAP): int_val = bus->dhd->flow_prio_map_type; bcopy(&int_val, arg, val_size); @@ -3688,6 +4240,11 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons bcmerror = BCME_BADARG; } else { bus->idletime = int_val; + if (bus->idletime) { + DHD_ENABLE_RUNTIME_PM(bus->dhd); + } else { + DHD_DISABLE_RUNTIME_PM(bus->dhd); + } } break; #endif /* DHD_PCIE_RUNTIMEPM */ @@ -3701,6 +4258,65 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons dhd_txbound = (uint)int_val; break; + case IOV_SVAL(IOV_H2D_MAILBOXDATA): + dhdpcie_send_mb_data(bus, (uint)int_val); + break; + + case IOV_SVAL(IOV_INFORINGS): + dhd_prot_init_info_rings(bus->dhd); + break; + + case IOV_SVAL(IOV_H2D_PHASE): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + break; + } + if (int_val) + bus->dhd->h2d_phase_supported = TRUE; + else + bus->dhd->h2d_phase_supported = FALSE; + break; + + case IOV_GVAL(IOV_H2D_PHASE): + int_val = (int32) bus->dhd->h2d_phase_supported; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + break; + } + if (int_val) + bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE; + else + bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE; + break; + + case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE): + int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM): + if (bus->dhd->busstate != DHD_BUS_DOWN) { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + break; + } + dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val); + break; + + case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM): + int_val = dhd_prot_get_h2d_max_txpost(bus->dhd); + bcopy(&int_val, arg, val_size); + break; + case IOV_GVAL(IOV_RXBOUND): int_val = (int32)dhd_rxbound; bcopy(&int_val, arg, val_size); @@ -3710,6 +4326,21 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons dhd_rxbound = (uint)int_val; break; + case IOV_GVAL(IOV_TRAPDATA): + { + struct bcmstrbuf dump_b; + bcm_binit(&dump_b, arg, len); + bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE); + break; + } + + case IOV_GVAL(IOV_TRAPDATA_RAW): + { + struct bcmstrbuf dump_b; + bcm_binit(&dump_b, arg, len); + bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE); + break; + } case IOV_SVAL(IOV_HANGREPORT): bus->dhd->hang_report = bool_val; DHD_ERROR(("%s: Set hang_report as %d\n", @@ -3721,6 +4352,151 @@ dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, cons bcopy(&int_val, arg, val_size); break; + case IOV_SVAL(IOV_CTO_PREVENTION): + { + uint32 pcie_lnkst; + + if (bus->sih->buscorerev < 19) { + bcmerror = BCME_UNSUPPORTED; + break; + } + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS); + + pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), 0, 0); + + /* 4347A0 in PCIEGEN1 doesn't support CTO prevention due to + * 4347A0 DAR Issue : JIRA:CRWLPCIEGEN2-443: Issue in DAR write + */ + if ((bus->sih->buscorerev == 19) && + (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) & + PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1)) { + bcmerror = BCME_UNSUPPORTED; + break; + } + bus->dhd->cto_enable = bool_val; + dhdpcie_cto_init(bus, bus->dhd->cto_enable); + DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n", + __FUNCTION__, bus->dhd->cto_enable)); + } + break; + + case IOV_GVAL(IOV_CTO_PREVENTION): + if (bus->sih->buscorerev < 19) { + bcmerror = BCME_UNSUPPORTED; + break; + } + int_val = (int32)bus->dhd->cto_enable; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CTO_THRESHOLD): + { + if (bus->sih->buscorerev < 19) { + bcmerror = BCME_UNSUPPORTED; + break; + } + bus->dhd->cto_threshold = (uint32)int_val; + } + break; + + case IOV_GVAL(IOV_CTO_THRESHOLD): + if (bus->sih->buscorerev < 19) { + bcmerror = BCME_UNSUPPORTED; + break; + } + if (bus->dhd->cto_threshold) + int_val = (int32)bus->dhd->cto_threshold; + else + int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT; + + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PCIE_WD_RESET): + if (bool_val) { + pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs); + } + break; +#ifdef DHD_EFI + case IOV_SVAL(IOV_CONTROL_SIGNAL): + { + bcmerror = dhd_control_signal(bus, arg, TRUE); + break; + } + + case IOV_GVAL(IOV_CONTROL_SIGNAL): + { + bcmerror = dhd_control_signal(bus, params, FALSE); + break; + } +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + case IOV_GVAL(IOV_DEEP_SLEEP): + int_val = bus->ds_enabled; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DEEP_SLEEP): + if (int_val == 1) { + bus->ds_enabled = TRUE; + /* Deassert */ + if (dhd_bus_set_device_wake(bus, FALSE) == BCME_OK) { +#ifdef PCIE_INB_DW + int timeleft; + timeleft = dhd_os_ds_enter_wait(bus->dhd, NULL); + if (timeleft == 0) { + DHD_ERROR(("DS-ENTER timeout\n")); + bus->ds_enabled = FALSE; + break; + } +#endif /* PCIE_INB_DW */ + } + else { + DHD_ERROR(("%s: Enable Deep Sleep failed !\n", __FUNCTION__)); + bus->ds_enabled = FALSE; + } + } + else if (int_val == 0) { + /* Assert */ + if (dhd_bus_set_device_wake(bus, TRUE) == BCME_OK) + bus->ds_enabled = FALSE; + else + DHD_ERROR(("%s: Disable Deep Sleep failed !\n", __FUNCTION__)); + } + else + DHD_ERROR(("%s: Invalid number, allowed only 0|1\n", __FUNCTION__)); + + break; +#endif /* PCIE_OOB || PCIE_INB_DW */ + + case IOV_GVAL(IOV_WIFI_PROPERTIES): + bcmerror = dhd_wifi_properties(bus, params); + break; + + case IOV_GVAL(IOV_OTP_DUMP): + bcmerror = dhd_otp_dump(bus, params); + break; +#endif /* DHD_EFI */ + + case IOV_GVAL(IOV_IDMA_ENABLE): + int_val = bus->idma_enabled; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_IDMA_ENABLE): + bus->idma_enabled = (bool)int_val; + break; + case IOV_GVAL(IOV_IFRM_ENABLE): + int_val = bus->ifrm_enabled; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_IFRM_ENABLE): + bus->ifrm_enabled = (bool)int_val; + break; + case IOV_GVAL(IOV_CLEAR_RING): + bcopy(&int_val, arg, val_size); + dhd_flow_rings_flush(bus->dhd, 0); + break; default: bcmerror = BCME_UNSUPPORTED; break; @@ -3750,12 +4526,49 @@ dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len) return 0; } +/* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */ +void +dhd_bus_hostready(struct dhd_bus *bus) +{ + if (!bus->dhd->d2h_hostrdy_supported) { + return; + } + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__, + dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32)))); + si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678); + bus->hostready_count ++; + DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count)); +} + +/* Clear INTSTATUS */ +void +dhdpcie_bus_clear_intstatus(struct dhd_bus *bus) +{ + uint32 intstatus = 0; + if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) || + (bus->sih->buscorerev == 2)) { + intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus); + } else { + /* this is a PCIE core register..not a config register... */ + intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask, + intstatus); + } +} + int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) { int timeleft; - unsigned long flags; int rc = 0; + unsigned long flags; printf("%s: state=%d\n", __FUNCTION__, state); if (bus->dhd == NULL) { @@ -3766,9 +4579,14 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__)); return BCME_ERROR; } + + if (dhd_query_bus_erros(bus->dhd)) { + return BCME_ERROR; + } + DHD_GENERAL_LOCK(bus->dhd, flags); - if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) { - DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__)); + if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) { + DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__)); DHD_GENERAL_UNLOCK(bus->dhd, flags); return BCME_ERROR; } @@ -3778,11 +4596,24 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) return -EIO; } - if (bus->suspended == state) { /* Set to same state */ + /* Check whether we are already in the requested state. + * state=TRUE means Suspend + * state=FALSE meanse Resume + */ + if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) { DHD_ERROR(("Bus is already in SUSPEND state.\n")); return BCME_OK; + } else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) { + DHD_ERROR(("Bus is already in RESUME state.\n")); + return BCME_OK; } + if (bus->d3_suspend_pending) { + DHD_ERROR(("Suspend pending ...\n")); + return BCME_ERROR; + } + + if (state) { int idle_retry = 0; int active; @@ -3795,128 +4626,224 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) /* Suspend */ DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__)); - bus->wait_for_d3_ack = 0; - bus->suspended = TRUE; - DHD_GENERAL_LOCK(bus->dhd, flags); - /* stop all interface network queue. */ - dhd_bus_stop_queue(bus); - bus->dhd->busstate = DHD_BUS_SUSPEND; - if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX) { + if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) { DHD_ERROR(("Tx Request is not ended\n")); bus->dhd->busstate = DHD_BUS_DATA; - /* resume all interface network queue. */ - dhd_bus_start_queue(bus); DHD_GENERAL_UNLOCK(bus->dhd, flags); - bus->suspended = FALSE; +#ifndef DHD_EFI return -EBUSY; +#else + return BCME_ERROR; +#endif } - bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SUSPEND; + /* stop all interface network queue. */ + dhd_bus_stop_queue(bus); DHD_GENERAL_UNLOCK(bus->dhd, flags); DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); - dhd_os_set_ioctl_resp_timeout(D3_ACK_RESP_TIMEOUT); - dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM); - timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); - dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); - DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); - - { - uint32 d2h_mb_data = 0; - uint32 zero = 0; - - /* If wait_for_d3_ack was not updated because D2H MB was not received */ - if (bus->wait_for_d3_ack == 0) { - /* Read the Mb data to see if the Dongle has actually sent D3 ACK */ - dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); - - if (d2h_mb_data & D2H_DEV_D3_ACK) { - DHD_ERROR(("*** D3 WAR for missing interrupt ***\r\n")); - /* Clear the MB Data */ - dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), - D2H_MB_DATA, 0); - - /* Consider that D3 ACK is received */ - bus->wait_for_d3_ack = 1; - bus->d3_ack_war_cnt++; - - } /* d2h_mb_data & D2H_DEV_D3_ACK */ - } /* bus->wait_for_d3_ack was 0 */ +#ifdef DHD_TIMESYNC + /* disable time sync mechanism, if configed */ + dhd_timesync_control(bus->dhd, TRUE); +#endif /* DHD_TIMESYNC */ +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + dhd_bus_set_device_wake(bus, TRUE); +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ +#ifdef PCIE_OOB + bus->oob_presuspend = TRUE; +#endif +#ifdef PCIE_INB_DW + /* De-assert at this point for In-band device_wake */ + if (INBAND_DW_ENAB(bus)) { + dhd_bus_set_device_wake(bus, FALSE); + dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_SLEEP_WAIT); } +#endif /* PCIE_INB_DW */ + + /* Clear wait_for_d3_ack */ + bus->wait_for_d3_ack = 0; + /* + * Send H2D_HOST_D3_INFORM to dongle and mark + * bus->d3_suspend_pending to TRUE in dhdpcie_send_mb_data + * inside atomic context, so that no more DBs will be + * rung after sending D3_INFORM + */ + dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM); + + /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */ + dhd_os_set_ioctl_resp_timeout(D3_ACK_RESP_TIMEOUT); + timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); + +#ifdef DHD_RECOVER_TIMEOUT + if (bus->wait_for_d3_ack == 0) { + /* If wait_for_d3_ack was not updated because D2H MB was not received */ + uint32 intstatus = 0; + uint32 intmask = 0; + intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); + if ((intstatus) && (!intmask) && (timeleft == 0) && + (!dhd_query_bus_erros(bus->dhd))) { + + DHD_ERROR(("%s: D3 ACK trying again intstatus=%x intmask=%x\n", + __FUNCTION__, intstatus, intmask)); + DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters\r\n")); + DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_en_count=%lu\n" + "isr_intr_disable_count=%lu suspend_intr_dis_count=%lu\n" + "dpc_return_busdown_count=%lu\n", + bus->resume_intr_enable_count, bus->dpc_intr_enable_count, + bus->isr_intr_disable_count, + bus->suspend_intr_disable_count, + bus->dpc_return_busdown_count)); + + dhd_prot_process_ctrlbuf(bus->dhd); + + timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); + + /* Enable Back Interrupts using IntMask */ + dhdpcie_bus_intr_enable(bus); + } + + + } /* bus->wait_for_d3_ack was 0 */ +#endif /* DHD_RECOVER_TIMEOUT */ + + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); /* To allow threads that got pre-empted to complete. */ while ((active = dhd_os_check_wakelock_all(bus->dhd)) && (idle_retry < MAX_WKLK_IDLE_CHECK)) { - msleep(1); + OSL_SLEEP(1); idle_retry++; } if (bus->wait_for_d3_ack) { DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__)); + /* Got D3 Ack. Suspend the bus */ if (active) { - DHD_ERROR(("%s():Suspend failed because of wakelock restoring " - "Dongle to D0\n", __FUNCTION__)); + DHD_ERROR(("%s():Suspend failed because of wakelock" + "restoring Dongle to D0\n", __FUNCTION__)); /* - * Dongle still thinks that it has to be in D3 state - * until gets a D0 Inform, but we are backing off from suspend. + * Dongle still thinks that it has to be in D3 state until + * it gets a D0 Inform, but we are backing off from suspend. * Ensure that Dongle is brought back to D0. * - * Bringing back Dongle from D3 Ack state to D0 state - * is a 2 step process. Dongle would want to know that D0 Inform - * would be sent as a MB interrupt - * to bring it out of D3 Ack state to D0 state. - * So we have to send both this message. + * Bringing back Dongle from D3 Ack state to D0 state is a + * 2 step process. Dongle would want to know that D0 Inform + * would be sent as a MB interrupt to bring it out of D3 Ack + * state to D0 state. So we have to send both this message. */ - DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); - dhdpcie_send_mb_data(bus, - (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM)); - DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); - bus->suspended = FALSE; + /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */ + bus->wait_for_d3_ack = 0; + + /* Enable back the intmask which was cleared in DPC + * after getting D3_ACK. + */ + bus->resume_intr_enable_count++; + dhdpcie_bus_intr_enable(bus); + + if (bus->use_d0_inform) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, + (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + } + /* ring doorbell 1 (hostready) */ + dhd_bus_hostready(bus); + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->d3_suspend_pending = FALSE; bus->dhd->busstate = DHD_BUS_DATA; /* resume all interface network queue. */ dhd_bus_start_queue(bus); DHD_GENERAL_UNLOCK(bus->dhd, flags); rc = BCME_ERROR; } else { - DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); - dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE)); - DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); - dhdpcie_bus_intr_disable(bus); - rc = dhdpcie_pci_suspend_resume(bus, state); - dhd_bus_set_device_wake(bus, FALSE); - } - bus->dhd->d3ackcnt_timeout = 0; +#ifdef PCIE_OOB + bus->oob_presuspend = FALSE; + if (OOB_DW_ENAB(bus)) { + dhd_bus_set_device_wake(bus, FALSE); + } +#endif /* PCIE_OOB */ +#if defined(PCIE_OOB) || defined(BCMPCIE_OOB_HOST_WAKE) + bus->oob_presuspend = TRUE; +#endif /* PCIE_OOB || BCMPCIE_OOB_HOST_WAKE */ +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_HOST_SLEEP_WAIT) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_HOST_SLEEP); + } + } +#endif /* PCIE_INB_DW */ + if (bus->use_d0_inform && + (bus->api.fw_rev < PCIE_SHARED_VERSION_6)) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + } #if defined(BCMPCIE_OOB_HOST_WAKE) - dhdpcie_oob_intr_set(bus, TRUE); + dhdpcie_oob_intr_set(bus, TRUE); #endif /* BCMPCIE_OOB_HOST_WAKE */ + + DHD_GENERAL_LOCK(bus->dhd, flags); + /* The Host cannot process interrupts now so disable the same. + * No need to disable the dongle INTR using intmask, as we are + * already calling dhdpcie_bus_intr_disable from DPC context after + * getting D3_ACK. Code may not look symmetric between Suspend and + * Resume paths but this is done to close down the timing window + * between DPC and suspend context. + */ + /* Disable interrupt from host side!! */ + dhdpcie_disable_irq_nosync(bus); + + bus->dhd->d3ackcnt_timeout = 0; + bus->d3_suspend_pending = FALSE; + bus->dhd->busstate = DHD_BUS_SUSPEND; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + /* Handle Host Suspend */ + rc = dhdpcie_pci_suspend_resume(bus, state); + } } else if (timeleft == 0) { + bus->dhd->d3ack_timeout_occured = TRUE; + /* If the D3 Ack has timeout */ bus->dhd->d3ackcnt_timeout++; DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n", - __FUNCTION__, bus->dhd->d3ackcnt_timeout)); - dhd_prot_debug_info_print(bus->dhd); -#ifdef DHD_FW_COREDUMP - if (bus->dhd->memdump_enabled) { - /* write core dump to file */ - bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT; - dhdpcie_mem_dump(bus); - } -#endif /* DHD_FW_COREDUMP */ - bus->suspended = FALSE; + __FUNCTION__, bus->dhd->d3ackcnt_timeout)); DHD_GENERAL_LOCK(bus->dhd, flags); + bus->d3_suspend_pending = FALSE; bus->dhd->busstate = DHD_BUS_DATA; /* resume all interface network queue. */ dhd_bus_start_queue(bus); DHD_GENERAL_UNLOCK(bus->dhd, flags); - if (bus->dhd->d3ackcnt_timeout >= MAX_CNTL_D3ACK_TIMEOUT) { - DHD_ERROR(("%s: Event HANG send up " - "due to PCIe linkdown\n", __FUNCTION__)); + if (!bus->dhd->dongle_trap_occured) { + uint32 intstatus = 0; + + /* Check if PCIe bus status is valid */ + intstatus = si_corereg(bus->sih, + bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + if (intstatus == (uint32)-1) { + /* Invalidate PCIe bus status */ + bus->is_linkdown = 1; + } + + dhd_bus_dump_console_buffer(bus); + dhd_prot_debug_info_print(bus->dhd); +#ifdef DHD_FW_COREDUMP + if (bus->dhd->memdump_enabled) { + /* write core dump to file */ + bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n", + __FUNCTION__)); #ifdef SUPPORT_LINKDOWN_RECOVERY #ifdef CONFIG_ARCH_MSM bus->no_cfg_restore = 1; @@ -3925,52 +4852,145 @@ dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT); } rc = -ETIMEDOUT; - } - bus->wait_for_d3_ack = 1; - DHD_GENERAL_LOCK(bus->dhd, flags); - bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SUSPEND; - dhd_os_busbusy_wake(bus->dhd); - DHD_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef PCIE_OOB + bus->oob_presuspend = FALSE; +#endif /* PCIE_OOB */ } else { /* Resume */ + /** + * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold. + * si_backplane_access(function to read/write backplane) + * updates the window(PCIE2_BAR0_CORE2_WIN) only if + * window being accessed is different form the window + * being pointed by second_bar0win. + * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold, + * invalidating second_bar0win after resume updates + * PCIE2_BAR0_CORE2_WIN with right window. + */ + si_invalidate_second_bar0win(bus->sih); #if defined(BCMPCIE_OOB_HOST_WAKE) DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd); #endif /* BCMPCIE_OOB_HOST_WAKE */ - DHD_GENERAL_LOCK(bus->dhd, flags); - bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_RESUME; - DHD_GENERAL_UNLOCK(bus->dhd, flags); - rc = dhdpcie_pci_suspend_resume(bus, state); - if (bus->dhd->busstate == DHD_BUS_SUSPEND) { - DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); - dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM)); - DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); - dhd_bus_set_device_wake(bus, TRUE); +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_HOST_SLEEP) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_WAKE_WAIT); + } } - bus->suspended = FALSE; +#endif /* PCIE_INB_DW */ + rc = dhdpcie_pci_suspend_resume(bus, state); + +#ifdef BCMPCIE_OOB_HOST_WAKE + bus->oob_presuspend = FALSE; +#endif /* BCMPCIE_OOB_HOST_WAKE */ + + if (bus->dhd->busstate == DHD_BUS_SUSPEND) { + if (bus->use_d0_inform) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + } + /* ring doorbell 1 (hostready) */ + dhd_bus_hostready(bus); + } + DHD_GENERAL_LOCK(bus->dhd, flags); bus->dhd->busstate = DHD_BUS_DATA; - bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_RESUME; #ifdef DHD_PCIE_RUNTIMEPM - if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) { + if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) { bus->bus_wake = 1; OSL_SMP_WMB(); wake_up_interruptible(&bus->rpm_queue); } #endif /* DHD_PCIE_RUNTIMEPM */ +#ifdef PCIE_OOB + /* + * Assert & Deassert the Device Wake. The following is the explanation for doing so. + * 0) At this point, + * Host is in suspend state, Link is in L2/L3, Dongle is in D3 Cold + * Device Wake is enabled. + * 1) When the Host comes out of Suspend, it first sends PERST# in the Link. + * Looking at this the Dongle moves from D3 Cold to NO DS State + * 2) Now The Host OS calls the "resume" function of DHD. From here the DHD first + * Asserts the Device Wake. + * From the defn, when the Device Wake is asserted, The dongle FW will ensure + * that the Dongle is out of deep sleep IF the device is already in deep sleep. + * But note that now the Dongle is NOT in Deep sleep and is actually in + * NO DS state. So just driving the Device Wake high does not trigger any state + * transitions. The Host should actually "Toggle" the Device Wake to ensure + * that Dongle synchronizes with the Host and starts the State Transition to D0. + * 4) Note that the above explanation is applicable Only when the Host comes out of + * suspend and the Dongle comes out of D3 Cold + */ + /* This logic is not required when hostready is enabled */ + + if (!bus->dhd->d2h_hostrdy_supported) { + if (OOB_DW_ENAB(bus)) { + dhd_bus_set_device_wake(bus, TRUE); + OSL_DELAY(1000); + dhd_bus_set_device_wake(bus, FALSE); + } + } +#endif /* PCIE_OOB */ /* resume all interface network queue. */ dhd_bus_start_queue(bus); - dhd_os_busbusy_wake(bus->dhd); + /* The Host is ready to process interrupts now so enable the same. */ + + /* TODO: for NDIS also we need to use enable_irq in future */ + bus->resume_intr_enable_count++; + dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */ + dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */ DHD_GENERAL_UNLOCK(bus->dhd, flags); - dhdpcie_bus_intr_enable(bus); +#ifdef DHD_TIMESYNC + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + /* enable time sync mechanism, if configed */ + dhd_timesync_control(bus->dhd, FALSE); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); +#endif /* DHD_TIMESYNC */ } return rc; } +uint32 +dhdpcie_force_alp(struct dhd_bus *bus, bool enable) +{ + ASSERT(bus && bus->sih); + if (enable) { + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0); + } + return 0; +} + +/* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */ +uint32 +dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time) +{ + uint reg_val; + + ASSERT(bus && bus->sih); + + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, + 0x1004); + reg_val = si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), 0, 0); + reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16); + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0, + reg_val); + + return 0; +} + /** Transfers bytes from host to dongle and to host again using DMA */ static int -dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay) +dhdpcie_bus_dmaxfer_req( + struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay, uint32 d11_lpbk) { if (bus->dhd == NULL) { DHD_ERROR(("%s: bus not inited\n", __FUNCTION__)); @@ -3989,7 +5009,7 @@ dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint3 DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__)); return BCME_ERROR; } - return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay); + return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay, d11_lpbk); } @@ -3998,7 +5018,7 @@ static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) { int bcmerror = 0; - uint32 *cr4_regs; + volatile uint32 *cr4_regs; if (!bus->sih) { DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__)); @@ -4008,6 +5028,8 @@ dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) * To exit download state, simply reset ARM (default is RAM boot). */ if (enter) { + /* Make sure BAR1 maps to backplane address 0 */ + dhdpcie_bus_cfg_write_dword(bus, PCI_BAR1_WIN, 4, 0x00000000); bus->alp_only = TRUE; /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */ @@ -4138,6 +5160,13 @@ dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) goto fail; } +#ifdef BCM_ASLR_HEAP + /* write a random number to TCM for the purpose of + * randomizing heap address space. + */ + dhdpcie_wrt_rnd(bus); +#endif /* BCM_ASLR_HEAP */ + /* switch back to arm core again */ if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__)); @@ -4210,8 +5239,6 @@ dhdpcie_bus_write_vars(dhd_bus_t *bus) bzero(vbuffer, varsize); bcopy(bus->vars, vbuffer, bus->varsz); /* Write the vars list */ - DHD_INFO_HW4(("%s: tcm: %p varaddr: 0x%x varsize: %d\n", - __FUNCTION__, bus->tcm, varaddr, varsize)); bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize); /* Implement read back and verify later */ @@ -4273,8 +5300,6 @@ dhdpcie_bus_write_vars(dhd_bus_t *bus) DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew)); /* Write the length token to the last word */ - DHD_INFO_HW4(("%s: tcm: %p phys_size: 0x%x varsizew: %x\n", - __FUNCTION__, bus->tcm, phys_size, varsizew)); bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4), (uint8*)&varsizew, 4); @@ -4312,12 +5337,49 @@ dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len) /* Copy the passed variables, which should include the terminating double-null */ bcopy(arg, bus->vars, bus->varsz); +#ifdef DHD_USE_SINGLE_NVRAM_FILE + if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) { + char *sp = NULL; + char *ep = NULL; + int i; + char tag[2][8] = {"ccode=", "regrev="}; + + /* Find ccode and regrev info */ + for (i = 0; i < 2; i++) { + sp = strnstr(bus->vars, tag[i], bus->varsz); + if (!sp) { + DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n", + __FUNCTION__, bus->nv_path)); + bcmerror = BCME_ERROR; + goto err; + } + sp = strchr(sp, '='); + ep = strchr(sp, '\0'); + /* We assumed that string length of both ccode and + * regrev values should not exceed WLC_CNTRY_BUF_SZ + */ + if (sp && ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) { + sp++; + while (*sp != '\0') { + DHD_INFO(("%s: parse '%s', current sp = '%c'\n", + __FUNCTION__, tag[i], *sp)); + *sp++ = '0'; + } + } else { + DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n", + __FUNCTION__, tag[i])); + bcmerror = BCME_ERROR; + goto err; + } + } + } +#endif /* DHD_USE_SINGLE_NVRAM_FILE */ + err: return bcmerror; } -#ifndef BCMPCIE_OOB_HOST_WAKE /* loop through the capability list and see if the pcie capabilty exists */ uint8 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id) @@ -4406,7 +5468,73 @@ dhdpcie_pme_cap(osl_t *osh) return ((pme_cap & PME_CAP_PM_STATES) != 0); } -#endif /* !BCMPCIE_OOB_HOST_WAKE */ + +uint32 +dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val) +{ + + uint8 pcie_cap; + uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */ + uint32 reg_val; + + + pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID); + + if (!pcie_cap) { + DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__)); + return 0; + } + + lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET; + + /* set operation */ + if (mask) { + /* read */ + reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); + + /* modify */ + reg_val &= ~mask; + reg_val |= (mask & val); + + /* write */ + OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val); + } + return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); +} + + + +uint8 +dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val) +{ + uint8 pcie_cap; + uint32 reg_val; + uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */ + + pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID); + + if (!pcie_cap) { + DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__)); + return 0; + } + + lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET; + + reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); + /* set operation */ + if (mask) { + if (val) + reg_val |= PCIE_CLKREQ_ENAB; + else + reg_val &= ~PCIE_CLKREQ_ENAB; + OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val); + reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32)); + } + if (reg_val & PCIE_CLKREQ_ENAB) + return 1; + else + return 0; +} void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) { @@ -4424,6 +5552,13 @@ void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) intstatus, intmask, mbintstatus); bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n", d2h_mb_data, dhd->bus->def_intmask); + bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n"); + bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n" + "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n" + "dpc_return_busdown_count=%lu\n", + dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count, + dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count, + dhd->bus->dpc_return_busdown_count); } /** Add bus dump output to a buffer */ @@ -4438,38 +5573,83 @@ void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) if (dhdp->busstate != DHD_BUS_DATA) return; +#ifdef DHD_WAKE_STATUS + bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n", + bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake, + dhdp->bus->wake_counts.rcwake); +#ifdef DHD_WAKE_RX_STATUS + bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n", + dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast, + dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp); + bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n", + dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6, + dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other); + bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n", + dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na, + dhdp->bus->wake_counts.rx_icmpv6_ns); +#endif /* DHD_WAKE_RX_STATUS */ +#ifdef DHD_WAKE_EVENT_STATUS + for (flowid = 0; flowid < WLC_E_LAST; flowid++) + if (dhdp->bus->wake_counts.rc_event[flowid] != 0) + bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid), + dhdp->bus->wake_counts.rc_event[flowid]); + bcm_bprintf(strbuf, "\n"); +#endif /* DHD_WAKE_EVENT_STATUS */ +#endif /* DHD_WAKE_STATUS */ + dhd_prot_print_info(dhdp, strbuf); dhd_dump_intr_registers(dhdp, strbuf); bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n", dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr); bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr)); bcm_bprintf(strbuf, - "%s %4s %2s %4s %17s %4s %4s %10s %4s %4s ", - "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", + "%s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ", + "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen", "Overflows", "RD", "WR"); bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack"); for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) { flow_ring_node = DHD_FLOW_RING(dhdp, flowid); - if (flow_ring_node->active) { - flow_info = &flow_ring_node->flow_info; - bcm_bprintf(strbuf, - "%3d. %4d %2d %4d %17s %4d %4d %10u ", ix++, - flow_ring_node->flowid, flow_info->ifindex, flow_info->tid, - bcm_ether_ntoa((struct ether_addr *)&flow_info->da, eabuf), - DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue), - DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)), - DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue)); - dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf, - "%4d %4d "); - bcm_bprintf(strbuf, - "%5s %6s %5s\n", "NA", "NA", "NA"); - } + if (!flow_ring_node->active) + continue; + + flow_info = &flow_ring_node->flow_info; + bcm_bprintf(strbuf, + "%3d. %4d %2d %4d %17s %4d %4d %6d %10u ", ix++, + flow_ring_node->flowid, flow_info->ifindex, flow_info->tid, + bcm_ether_ntoa((struct ether_addr *)&flow_info->da, eabuf), + DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue), + DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)), + DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)), + DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue)); + dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf, + "%4d %4d "); + bcm_bprintf(strbuf, + "%5s %6s %5s\n", "NA", "NA", "NA"); } bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt); bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt); bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt); - bcm_bprintf(strbuf, "D3 Ack WAR cnt %d\n", dhdp->bus->d3_ack_war_cnt); + if (dhdp->d2h_hostrdy_supported) { + bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count); + } +#ifdef PCIE_INB_DW + /* Inband device wake counters */ + if (INBAND_DW_ENAB(dhdp->bus)) { + bcm_bprintf(strbuf, "Inband device_wake assert count: %d\n", + dhdp->bus->inband_dw_assert_cnt); + bcm_bprintf(strbuf, "Inband device_wake deassert count: %d\n", + dhdp->bus->inband_dw_deassert_cnt); + bcm_bprintf(strbuf, "Inband DS-EXIT count: %d\n", + dhdp->bus->inband_ds_exit_host_cnt); + bcm_bprintf(strbuf, "Inband DS-EXIT count: %d\n", + dhdp->bus->inband_ds_exit_device_cnt); + bcm_bprintf(strbuf, "Inband DS-EXIT Timeout count: %d\n", + dhdp->bus->inband_ds_exit_to_cnt); + bcm_bprintf(strbuf, "Inband HOST_SLEEP-EXIT Timeout count: %d\n", + dhdp->bus->inband_host_sleep_exit_to_cnt); + } +#endif /* PCIE_INB_DW */ } /** @@ -4484,9 +5664,10 @@ dhd_update_txflowrings(dhd_pub_t *dhd) flow_ring_node_t *flow_ring_node; struct dhd_bus *bus = dhd->bus; + /* Hold flowring_list_lock to ensure no race condition while accessing the List */ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); - for (item = dll_head_p(&bus->const_flowring); - (!dhd_is_device_removed(dhd) && !dll_end(&bus->const_flowring, item)); + for (item = dll_head_p(&bus->flowring_active_list); + (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item)); item = next) { if (dhd->hang_was_sent) { break; @@ -4526,105 +5707,279 @@ dhd_bus_gen_devmb_intr(struct dhd_bus *bus) } } +/* Upon receiving a mailbox interrupt, + * if H2D_FW_TRAP bit is set in mailbox location + * device traps + */ static void -dhd_bus_set_device_wake(struct dhd_bus *bus, bool val) +dhdpcie_fw_trap(dhd_bus_t *bus) { - if (bus->device_wake_state != val) - { - DHD_INFO(("Set Device_Wake to %d\n", val)); -#ifdef PCIE_OOB - if (bus->oob_enabled) - { - if (val) - { - gpio_port = gpio_port | (1 << DEVICE_WAKE); - gpio_write_port_non_block(gpio_handle_val, gpio_port); - } else { - gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE)); - gpio_write_port_non_block(gpio_handle_val, gpio_port); - } - } -#endif /* PCIE_OOB */ - bus->device_wake_state = val; - } + /* Send the mailbox data and generate mailbox intr. */ + dhdpcie_send_mb_data(bus, H2D_FW_TRAP); } -#ifdef PCIE_OOB +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) void -dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val) -{ - DHD_INFO(("Set Device_Wake to %d\n", val)); - if (val) - { - gpio_port = gpio_port | (1 << BIT_BT_REG_ON); - gpio_write_port(gpio_handle_val, gpio_port); - } else { - gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON)); - gpio_write_port(gpio_handle_val, gpio_port); - } -} - -int -dhd_oob_get_bt_reg_on(struct dhd_bus *bus) -{ - int ret; - uint8 val; - ret = gpio_read_port(gpio_handle_val, &val); - - if (ret < 0) { - DHD_ERROR(("gpio_read_port returns %d\n", ret)); - return ret; - } - - if (val & (1 << BIT_BT_REG_ON)) - { - ret = 1; - } else { - ret = 0; - } - - return ret; -} - -static void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus) { if (dhd_doorbell_timeout) dhd_timeout_start(&bus->doorbell_timer, (dhd_doorbell_timeout * 1000) / dhd_watchdog_ms); - else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) + else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) { dhd_bus_set_device_wake(bus, FALSE); + } } +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ + +#ifdef PCIE_INB_DW + +void +dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus) +{ + /* The DHD_BUS_INB_DW_LOCK must be held before + * calling this function !! + */ + if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_DS_DEV_SLEEP_PEND) && + (bus->host_active_cnt == 0)) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP); + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + } +} + +int +dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val) +{ + int timeleft; + unsigned long flags; + int ret; + + if (!INBAND_DW_ENAB(bus)) { + return BCME_ERROR; + } + + if (val) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + + /* + * Reset the Door Bell Timeout value. So that the Watchdog + * doesn't try to Deassert Device Wake, while we are in + * the process of still Asserting the same. + */ + if (dhd_doorbell_timeout) { + dhd_timeout_start(&bus->doorbell_timer, + (dhd_doorbell_timeout * 1000) / dhd_watchdog_ms); + } + + if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_DS_DEV_SLEEP) { + /* Clear wait_for_ds_exit */ + bus->wait_for_ds_exit = 0; + ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_ASSERT); + if (ret != BCME_OK) { + DHD_ERROR(("Failed: assert Inband device_wake\n")); + bus->wait_for_ds_exit = 1; + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + ret = BCME_ERROR; + goto exit; + } + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_DISABLED_WAIT); + bus->inband_dw_assert_cnt++; + } else { + DHD_INFO(("Not in DS SLEEP state \n")); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + ret = BCME_OK; + goto exit; + } + + /* + * Since we are going to wait/sleep .. release the lock. + * The Device Wake sanity is still valid, because + * a) If there is another context that comes in and tries + * to assert DS again and if it gets the lock, since + * ds_state would be now != DW_DEVICE_DS_DEV_SLEEP the + * context would return saying Not in DS Sleep. + * b) If ther is another context that comes in and tries + * to de-assert DS and gets the lock, + * since the ds_state is != DW_DEVICE_DS_DEV_WAKE + * that context would return too. This can not happen + * since the watchdog is the only context that can + * De-Assert Device Wake and as the first step of + * Asserting the Device Wake, we have pushed out the + * Door Bell Timeout. + * + */ + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + + if (!CAN_SLEEP()) { + /* Called from context that cannot sleep */ + OSL_DELAY(1000); + bus->wait_for_ds_exit = 1; + } else { + /* Wait for DS EXIT for DS_EXIT_TIMEOUT seconds */ + timeleft = dhd_os_ds_exit_wait(bus->dhd, &bus->wait_for_ds_exit); + if (!bus->wait_for_ds_exit && timeleft == 0) { + DHD_ERROR(("DS-EXIT timeout\n")); + bus->inband_ds_exit_to_cnt++; + bus->ds_exit_timeout = 0; + ret = BCME_ERROR; + goto exit; + } + } + + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_DEV_WAKE); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + + ret = BCME_OK; + } else { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_DS_DEV_WAKE)) { + ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_DEASSERT); + if (ret != BCME_OK) { + DHD_ERROR(("Failed: deassert Inband device_wake\n")); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + goto exit; + } + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_ACTIVE); + bus->inband_dw_deassert_cnt++; + } else if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_DS_DEV_SLEEP_PEND) && + (bus->host_active_cnt == 0)) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP); + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + } + + ret = BCME_OK; + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } + +exit: + return ret; +} +#endif /* PCIE_INB_DW */ + + +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) +int +dhd_bus_set_device_wake(struct dhd_bus *bus, bool val) +{ + if (bus->ds_enabled) { +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + return dhd_bus_inb_set_device_wake(bus, val); + } +#endif /* PCIE_INB_DW */ +#ifdef PCIE_OOB + if (OOB_DW_ENAB(bus)) { + return dhd_os_oob_set_device_wake(bus, val); + } #endif /* PCIE_OOB */ + } + return BCME_OK; +} +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ /** mailbox doorbell ring function */ void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value) { + /* Skip after sending D3_INFORM */ + if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) { + DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :" + "busstate=%d, d3_suspend_pending=%d\n", + __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending)); + return; + } if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4)) { si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB); } else { /* this is a pcie core register, not the config regsiter */ DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__)); - si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox, ~0, 0x12345678); + if (IDMA_ACTIVE(bus->dhd)) { + si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox_2, + ~0, value); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, + PCIH2D_MailBox, ~0, 0x12345678); + } } } +/** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */ +void +dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake) +{ + /* this is a pcie core register, not the config regsiter */ + /* Skip after sending D3_INFORM */ + if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) { + DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :" + "busstate=%d, d3_suspend_pending=%d\n", + __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending)); + return; + } + DHD_INFO(("writing a door bell 2 to the device\n")); + si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox_2, + ~0, value); +} + void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value) { -#ifdef PCIE_OOB - dhd_bus_set_device_wake(bus, TRUE); + /* Skip after sending D3_INFORM */ + if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) { + DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :" + "busstate=%d, d3_suspend_pending=%d\n", + __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending)); + return; + } +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + if (OOB_DW_ENAB(bus)) { + dhd_bus_set_device_wake(bus, TRUE); + } dhd_bus_doorbell_timeout_reset(bus); -#endif +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value); } +void +dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake) +{ + /* Skip after sending D3_INFORM */ + if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) { + DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :" + "busstate=%d, d3_suspend_pending=%d\n", + __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending)); + return; + } +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + if (devwake) { + if (OOB_DW_ENAB(bus)) { + dhd_bus_set_device_wake(bus, TRUE); + } + } + dhd_bus_doorbell_timeout_reset(bus); +#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */ + + W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value); +} + static void dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value) { uint32 w; + /* Skip after sending D3_INFORM */ + if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) { + DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :" + "busstate=%d, d3_suspend_pending=%d\n", + __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending)); + return; + } w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB; W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w); } @@ -4651,6 +6006,18 @@ dhd_bus_get_mbintr_fn(struct dhd_bus *bus) return dhd_bus_ringbell; } +dhd_mb_ring_2_t +dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus) +{ + bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, + PCIH2D_MailBox_2); + if (bus->pcie_mb_intr_2_addr) { + bus->pcie_mb_intr_osh = si_osh(bus->sih); + return dhdpcie_bus_ringbell_2_fast; + } + return dhd_bus_ringbell_2; +} + bool BCMFASTPATH dhd_bus_dpc(struct dhd_bus *bus) { @@ -4669,24 +6036,25 @@ dhd_bus_dpc(struct dhd_bus *bus) DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); bus->intstatus = 0; DHD_GENERAL_UNLOCK(bus->dhd, flags); + bus->dpc_return_busdown_count++; return 0; } - bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC; +#ifdef DHD_PCIE_RUNTIMEPM + bus->idlecount = 0; +#endif /* DHD_PCIE_RUNTIMEPM */ + DHD_BUS_BUSY_SET_IN_DPC(bus->dhd); DHD_GENERAL_UNLOCK(bus->dhd, flags); + resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus); if (!resched) { bus->intstatus = 0; - if (!bus->pci_d3hot_done) { - dhdpcie_bus_intr_enable(bus); - } else { - DHD_ERROR(("%s: dhdpcie_bus_intr_enable skip in pci D3hot state \n", - __FUNCTION__)); - } + bus->dpc_intr_enable_count++; + dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */ } DHD_GENERAL_LOCK(bus->dhd, flags); - bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC; + DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd); dhd_os_busbusy_wake(bus->dhd); DHD_GENERAL_UNLOCK(bus->dhd, flags); @@ -4695,16 +6063,36 @@ dhd_bus_dpc(struct dhd_bus *bus) } -static void +int dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data) { uint32 cur_h2d_mb_data = 0; + unsigned long flags; DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data)); if (bus->is_linkdown) { DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); - return; + return BCME_ERROR; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + + if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) { + DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n", + h2d_mb_data)); + /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */ +#ifdef PCIE_OOB + bus->oob_enabled = FALSE; +#endif /* PCIE_OOB */ + if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) { + DHD_ERROR(("failure sending the H2D Mailbox message to firmware\n")); + goto fail; + } +#ifdef PCIE_OOB + bus->oob_enabled = TRUE; +#endif /* PCIE_OOB */ + goto done; } dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); @@ -4727,8 +6115,13 @@ dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data) dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0); dhd_bus_gen_devmb_intr(bus); +done: if (h2d_mb_data == H2D_HOST_D3_INFORM) { DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__)); + /* Mark D3_INFORM in the atomic context to + * skip ringing H2D DB after D3_INFORM + */ + bus->d3_suspend_pending = TRUE; bus->d3_inform_cnt++; } if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) { @@ -4739,6 +6132,131 @@ dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data) DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__)); bus->d0_inform_cnt++; } + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return BCME_OK; + +fail: + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return BCME_ERROR; +} + +void +dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data) +{ +#ifdef PCIE_INB_DW + unsigned long flags = 0; +#endif + DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data)); + + if (d2h_mb_data & D2H_DEV_FWHALT) { + DHD_ERROR(("FW trap has happened\n")); + dhdpcie_checkdied(bus, NULL, 0); +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO); + return; + } + if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) { + if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) && + bus->wait_for_d3_ack) { + DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n")); + bus->dhd->busstate = DHD_BUS_DOWN; + return; + } + /* what should we do */ + DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n")); +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_DS_ACTIVE) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_DEV_SLEEP_PEND); + if (bus->host_active_cnt == 0) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_DEV_SLEEP); + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + } + } + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + dhd_os_ds_enter_wake(bus->dhd); + } else +#endif /* PCIE_INB_DW */ + { + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + } + if (IDMA_DS_ENAB(bus->dhd)) { + bus->dongle_in_ds = TRUE; + } + DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n")); + } + if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) { + /* what should we do */ + bus->dongle_in_ds = FALSE; + DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n")); +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + bus->inband_ds_exit_device_cnt++; + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_DS_DISABLED_WAIT) { + /* wake up only if some one is waiting in + * DW_DEVICE_DS_DISABLED_WAIT state + * in this case the waiter will change the state + * to DW_DEVICE_DS_DEV_WAKE + */ + bus->wait_for_ds_exit = 1; + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + dhd_os_ds_exit_wake(bus->dhd); + } else { + DHD_INFO(("D2H_MB_DATA: not in DW_DEVICE_DS_DISABLED_WAIT!\n")); + /* + * If there is no one waiting, then update the state from here + */ + bus->wait_for_ds_exit = 1; + dhdpcie_bus_set_pcie_inband_dw_state(bus, + DW_DEVICE_DS_DEV_WAKE); + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } + } +#endif /* PCIE_INB_DW */ + } + if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) { + /* what should we do */ + DHD_INFO(("D2H_MB_DATA: D0 ACK\n")); +#ifdef PCIE_INB_DW + if (INBAND_DW_ENAB(bus)) { + DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); + if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == + DW_DEVICE_HOST_WAKE_WAIT) { + dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_ACTIVE); + } + DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); + } +#endif /* PCIE_INB_DW */ + } + if (d2h_mb_data & D2H_DEV_D3_ACK) { + /* what should we do */ + DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n")); + if (!bus->wait_for_d3_ack) { + /* Disable dongle Interrupts Immediately after D3 */ + bus->suspend_intr_disable_count++; + dhdpcie_bus_intr_disable(bus); +#if defined(DHD_HANG_SEND_UP_TEST) + if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) { + DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n")); + } else { + bus->wait_for_d3_ack = 1; + dhd_os_d3ack_wake(bus->dhd); + } +#else /* DHD_HANG_SEND_UP_TEST */ + bus->wait_for_d3_ack = 1; + dhd_os_d3ack_wake(bus->dhd); +#endif /* DHD_HANG_SEND_UP_TEST */ + } + } } static void @@ -4747,46 +6265,67 @@ dhdpcie_handle_mb_data(dhd_bus_t *bus) uint32 d2h_mb_data = 0; uint32 zero = 0; dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); - if (!d2h_mb_data) { - DHD_INFO_HW4(("%s: Invalid D2H_MB_DATA: 0x%08x\n", + if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) { + DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n", __FUNCTION__, d2h_mb_data)); return; } dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0); - DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%08x\n", __FUNCTION__, d2h_mb_data)); + DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data)); if (d2h_mb_data & D2H_DEV_FWHALT) { DHD_ERROR(("FW trap has happened\n")); dhdpcie_checkdied(bus, NULL, 0); /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */ - bus->dhd->busstate = DHD_BUS_DOWN; return; } if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) { /* what should we do */ DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__)); dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + if (IDMA_DS_ENAB(bus->dhd)) { + bus->dongle_in_ds = TRUE; + } DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__)); } if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) { /* what should we do */ DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__)); + bus->dongle_in_ds = FALSE; } if (d2h_mb_data & D2H_DEV_D3_ACK) { /* what should we do */ DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__)); if (!bus->wait_for_d3_ack) { +#if defined(DHD_HANG_SEND_UP_TEST) + if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) { + DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n")); + } else { + bus->wait_for_d3_ack = 1; + dhd_os_d3ack_wake(bus->dhd); + } +#else /* DHD_HANG_SEND_UP_TEST */ bus->wait_for_d3_ack = 1; dhd_os_d3ack_wake(bus->dhd); +#endif /* DHD_HANG_SEND_UP_TEST */ } } } -/* Inform Dongle to print HW Registers for Livelock Debug */ -void dhdpcie_bus_dongle_print_hwregs(struct dhd_bus *bus) +static void +dhdpcie_read_handle_mb_data(dhd_bus_t *bus) { - dhdpcie_send_mb_data(bus, H2D_FW_TRAP); + uint32 d2h_mb_data = 0; + uint32 zero = 0; + + dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); + if (!d2h_mb_data) + return; + + dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0); + + dhd_bus_handle_mb_data(bus, d2h_mb_data); } static bool @@ -4804,7 +6343,7 @@ dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus) } } else { if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1)) - dhdpcie_handle_mb_data(bus); + bus->api.handle_mb_data(bus); if (bus->dhd->busstate == DHD_BUS_SUSPEND) { goto exit; @@ -4824,8 +6363,16 @@ dhdpci_bus_read_frames(dhd_bus_t *bus) { bool more = FALSE; + /* First check if there a FW trap */ + if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) && + (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) { + dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT); + return FALSE; + } + /* There may be frames in both ctrl buf and data buf; check ctrl buf first */ DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + dhd_prot_process_ctrlbuf(bus->dhd); /* Unlock to give chance for resp to be handled */ DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); @@ -4844,12 +6391,49 @@ dhdpci_bus_read_frames(dhd_bus_t *bus) */ more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound); + /* Process info ring completion messages */ + more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND); + +#ifdef IDLE_TX_FLOW_MGMT + if (bus->enable_idle_flowring_mgmt) { + /* Look for idle flow rings */ + dhd_bus_check_idle_scan(bus); + } +#endif /* IDLE_TX_FLOW_MGMT */ + /* don't talk to the dongle if fw is about to be reloaded */ if (bus->dhd->hang_was_sent) { more = FALSE; } DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus->read_shm_fail) { + /* Read interrupt state once again to confirm linkdown */ + int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + if (intstatus != (uint32)-1) { + DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__)); +#ifdef DHD_FW_COREDUMP + if (bus->dhd->memdump_enabled) { + DHD_OS_WAKE_LOCK(bus->dhd); + bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL; + dhd_bus_mem_dump(bus->dhd); + DHD_OS_WAKE_UNLOCK(bus->dhd); + } +#endif /* DHD_FW_COREDUMP */ + bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_os_send_hang_message(bus->dhd); + } else { + DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__)); +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + bus->is_linkdown = 1; + bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_os_send_hang_message(bus->dhd); + } + } +#endif /* SUPPORT_LINKDOWN_RECOVERY */ return more; } @@ -4889,18 +6473,33 @@ dhdpcie_tcm_valid(dhd_bus_t *bus) return TRUE; } +static void +dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version) +{ + snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)", + firmware_api_version, host_api_version); + return; +} + static bool dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version) { + bool retcode = FALSE; + DHD_INFO(("firmware api revision %d, host api revision %d\n", firmware_api_version, host_api_version)); - if (firmware_api_version <= host_api_version) - return TRUE; - if ((firmware_api_version == 6) && (host_api_version == 5)) - return TRUE; - if ((firmware_api_version == 5) && (host_api_version == 6)) - return TRUE; - return FALSE; + + switch (firmware_api_version) { + case PCIE_SHARED_VERSION_7: + case PCIE_SHARED_VERSION_6: + case PCIE_SHARED_VERSION_5: + retcode = TRUE; + break; + default: + if (firmware_api_version <= host_api_version) + retcode = TRUE; + } + return retcode; } static int @@ -4913,10 +6512,6 @@ dhdpcie_readshared(dhd_bus_t *bus) dhd_timeout_t tmo; shaddr = bus->dongle_ram_base + bus->ramsize - 4; - - DHD_INFO_HW4(("%s: ram_base: 0x%x ramsize 0x%x tcm: %p shaddr: 0x%x nvram_csm: 0x%x\n", - __FUNCTION__, bus->dongle_ram_base, bus->ramsize, - bus->tcm, shaddr, bus->nvram_csm)); /* start a timer for 5 seconds */ dhd_timeout_start(&tmo, MAX_READ_TIMEOUT); @@ -4954,57 +6549,75 @@ dhdpcie_readshared(dhd_bus_t *bus) sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); sh->dma_rxoffset = ltoh32(sh->dma_rxoffset); sh->rings_info_ptr = ltoh32(sh->rings_info_ptr); + sh->flags2 = ltoh32(sh->flags2); -#ifdef DHD_DEBUG /* load bus console address */ bus->console_addr = sh->console_addr; -#endif /* Read the dma rx offset */ bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset; dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset); - DHD_ERROR(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset)); + DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset)); - if (!(dhdpcie_check_firmware_compatible(sh->flags & PCIE_SHARED_VERSION_MASK, - PCIE_SHARED_VERSION))) + bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK; + if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION))) { DHD_ERROR(("%s: pcie_shared version %d in dhd " "is older than pciedev_shared version %d in dongle\n", __FUNCTION__, PCIE_SHARED_VERSION, - sh->flags & PCIE_SHARED_VERSION_MASK)); + bus->api.fw_rev)); return BCME_ERROR; } + dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION); bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ? sizeof(uint16) : sizeof(uint32); - DHD_ERROR(("%s: Dongle advertizes %d size indices\n", + DHD_INFO(("%s: Dongle advertizes %d size indices\n", __FUNCTION__, bus->rw_index_sz)); +#ifdef IDLE_TX_FLOW_MGMT + if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) { + DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n", + __FUNCTION__)); + bus->enable_idle_flowring_mgmt = TRUE; + } +#endif /* IDLE_TX_FLOW_MGMT */ + + bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE; + bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE; + + bus->dhd->idma_retention_ds = (sh->flags & PCIE_SHARED_IDMA_RETENTION_DS) ? TRUE : FALSE; + + bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK; + /* Does the FW support DMA'ing r/w indices */ if (sh->flags & PCIE_SHARED_DMA_INDEX) { + if (!bus->dhd->dma_ring_upd_overwrite) { + { + if (!IFRM_ENAB(bus->dhd)) { + bus->dhd->dma_h2d_ring_upd_support = TRUE; + } + bus->dhd->dma_d2h_ring_upd_support = TRUE; + } + } + if (bus->dhd->dma_d2h_ring_upd_support) + bus->dhd->d2h_sync_mode = 0; - DHD_ERROR(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n", + DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n", __FUNCTION__, - (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0), - (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0))); - - } else if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) || - DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) { - -#ifdef BCM_INDX_DMA - DHD_ERROR(("%s: Incompatible FW. FW does not support DMAing indices\n", + (bus->dhd->dma_h2d_ring_upd_support ? 1 : 0), + (bus->dhd->dma_d2h_ring_upd_support ? 1 : 0))); + } else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) { + DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n", __FUNCTION__)); - return BCME_ERROR; -#endif - DHD_ERROR(("%s: Host supports DMAing indices but FW does not\n", - __FUNCTION__)); - bus->dhd->dma_d2h_ring_upd_support = FALSE; + return BCME_UNSUPPORTED; + } else { bus->dhd->dma_h2d_ring_upd_support = FALSE; + bus->dhd->dma_d2h_ring_upd_support = FALSE; } - /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */ { ring_info_t ring_info; @@ -5017,47 +6630,88 @@ dhdpcie_readshared(dhd_bus_t *bus) bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr); - bus->max_sub_queues = ltoh16(ring_info.max_sub_queues); + if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) { + bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings); + bus->max_submission_rings = ltoh16(ring_info.max_submission_queues); + bus->max_completion_rings = ltoh16(ring_info.max_completion_rings); + bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings; + bus->api.handle_mb_data = dhdpcie_read_handle_mb_data; + bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX; + } + else { + bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings); + bus->max_submission_rings = bus->max_tx_flowrings; + bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS; + bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS; + bus->api.handle_mb_data = dhdpcie_handle_mb_data; + } + if (bus->max_completion_rings == 0) { + DHD_ERROR(("dongle completion rings are invalid %d\n", + bus->max_completion_rings)); + return BCME_ERROR; + } + if (bus->max_submission_rings == 0) { + DHD_ERROR(("dongle submission rings are invalid %d\n", + bus->max_submission_rings)); + return BCME_ERROR; + } + if (bus->max_tx_flowrings == 0) { + DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings)); + return BCME_ERROR; + } /* If both FW and Host support DMA'ing indices, allocate memory and notify FW * The max_sub_queues is read from FW initialized ring_info */ - if (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) { + if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) { dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, - H2D_DMA_INDX_WR_BUF, bus->max_sub_queues); + H2D_DMA_INDX_WR_BUF, bus->max_submission_rings); dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, - D2H_DMA_INDX_RD_BUF, BCMPCIE_D2H_COMMON_MSGRINGS); + D2H_DMA_INDX_RD_BUF, bus->max_completion_rings); if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices" - "Host will use w/r indices in TCM\n", - __FUNCTION__)); + "Host will use w/r indices in TCM\n", + __FUNCTION__)); bus->dhd->dma_h2d_ring_upd_support = FALSE; + bus->dhd->idma_enable = FALSE; } } - if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support)) { + if (bus->dhd->dma_d2h_ring_upd_support) { dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, - D2H_DMA_INDX_WR_BUF, BCMPCIE_D2H_COMMON_MSGRINGS); + D2H_DMA_INDX_WR_BUF, bus->max_completion_rings); dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, - H2D_DMA_INDX_RD_BUF, bus->max_sub_queues); + H2D_DMA_INDX_RD_BUF, bus->max_submission_rings); if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices" - "Host will use w/r indices in TCM\n", - __FUNCTION__)); + "Host will use w/r indices in TCM\n", + __FUNCTION__)); bus->dhd->dma_d2h_ring_upd_support = FALSE; } } + if (IFRM_ENAB(bus->dhd)) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings); + + if (dma_indx_wr_buf != BCME_OK) { + DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n", + __FUNCTION__)); + bus->dhd->ifrm_enable = FALSE; + } + } + /* read ringmem and ringstate ptrs from shared area and store in host variables */ dhd_fillup_ring_sharedptr_info(bus, &ring_info); - - bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t)); + if (dhd_msg_level & DHD_INFO_VAL) { + bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t)); + } DHD_INFO(("%s: ring_info\n", __FUNCTION__)); DHD_ERROR(("%s: max H2D queues %d\n", - __FUNCTION__, ltoh16(ring_info.max_sub_queues))); + __FUNCTION__, ltoh16(ring_info.max_tx_flowrings))); DHD_INFO(("mail box address\n")); DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", @@ -5066,10 +6720,29 @@ dhdpcie_readshared(dhd_bus_t *bus) __FUNCTION__, bus->d2h_mb_data_ptr_addr)); } - bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK; DHD_INFO(("%s: d2h_sync_mode 0x%08x\n", __FUNCTION__, bus->dhd->d2h_sync_mode)); + bus->dhd->d2h_hostrdy_supported = + ((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT); + +#ifdef PCIE_OOB + bus->dhd->d2h_no_oob_dw = (sh->flags & PCIE_SHARED_NO_OOB_DW) ? TRUE : FALSE; +#endif /* PCIE_OOB */ + +#ifdef PCIE_INB_DW + bus->dhd->d2h_inband_dw = (sh->flags & PCIE_SHARED_INBAND_DS) ? TRUE : FALSE; +#endif /* PCIE_INB_DW */ + +#if defined(PCIE_OOB) && defined(PCIE_INB_DW) + DHD_ERROR(("FW supports Inband dw ? %s oob dw ? %s\n", + bus->dhd->d2h_inband_dw ? "Y":"N", + bus->dhd->d2h_no_oob_dw ? "N":"Y")); +#endif /* defined(PCIE_OOB) && defined(PCIE_INB_DW) */ + + bus->dhd->ext_trap_data_supported = + ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA); + return BCME_OK; } /* dhdpcie_readshared */ @@ -5081,6 +6754,7 @@ dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info) uint16 j = 0; uint32 tcm_memloc; uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr; + uint16 max_tx_flowrings = bus->max_tx_flowrings; /* Ring mem ptr info */ /* Alloated in the order @@ -5139,8 +6813,13 @@ dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info) } /* Store txflow ring write/read pointers */ - for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS); - i++, j++) + if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) { + max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS; + } else { + /* Account for Debug info h2d ring located after the last tx flow ring */ + max_tx_flowrings = max_tx_flowrings + 1; + } + for (j = 0; j < max_tx_flowrings; i++, j++) { bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; @@ -5154,6 +6833,13 @@ dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info) bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); } + /* store wr/rd pointers for debug info completion ring */ + bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr; + bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr; + d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz; + d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz; + DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i, + bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); } } /* dhd_fillup_ring_sharedptr_info */ @@ -5189,23 +6875,56 @@ int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) /* Set bus state according to enable result */ dhdp->busstate = DHD_BUS_DATA; + bus->d3_suspend_pending = FALSE; + +#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING) + if (bus->pcie_sh->flags2 & PCIE_SHARED_D2H_D11_TX_STATUS) { + uint32 flags2 = bus->pcie_sh->flags2; + uint32 addr; + + addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2); + flags2 |= PCIE_SHARED_H2D_D11_TX_STATUS; + ret = dhdpcie_bus_membytes(bus, TRUE, addr, + (uint8 *)&flags2, sizeof(flags2)); + if (ret < 0) { + DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n", + __FUNCTION__)); + return ret; + } + bus->pcie_sh->flags2 = flags2; + bus->dhd->d11_tx_status = TRUE; + } +#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */ if (!dhd_download_fw_on_driverload) dhd_dpc_enable(bus->dhd); - /* Enable the interrupt after device is up */ dhdpcie_bus_intr_enable(bus); /* bcmsdh_intr_unmask(bus->sdh); */ - #ifdef DHD_PCIE_RUNTIMEPM bus->idlecount = 0; bus->idletime = (int32)MAX_IDLE_COUNT; init_waitqueue_head(&bus->rpm_queue); mutex_init(&bus->pm_lock); +#else + bus->idletime = 0; #endif /* DHD_PCIE_RUNTIMEPM */ - bus->d3_ack_war_cnt = 0; +#ifdef PCIE_INB_DW + /* Initialize the lock to serialize Device Wake Inband activities */ + if (!bus->inb_lock) { + bus->inb_lock = dhd_os_spin_lock_init(bus->dhd->osh); + } +#endif + + + /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */ + if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) { + bus->use_d0_inform = TRUE; + } else { + bus->use_d0_inform = FALSE; + } return ret; } @@ -5219,7 +6938,6 @@ dhdpcie_init_shared_addr(dhd_bus_t *bus) #ifdef DHD_PCIE_RUNTIMEPM dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0)); #endif /* DHD_PCIE_RUNTIMEPM */ - DHD_INFO_HW4(("%s: tcm: %p, addr: 0x%x val: 0x%x\n", __FUNCTION__, bus->tcm, addr, val)); dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val)); } @@ -5228,8 +6946,10 @@ bool dhdpcie_chipmatch(uint16 vendor, uint16 device) { if (vendor != PCI_VENDOR_ID_BROADCOM) { +#ifndef DHD_EFI DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device)); +#endif /* DHD_EFI */ return (-ENODEV); } @@ -5246,10 +6966,18 @@ dhdpcie_chipmatch(uint16 vendor, uint16 device) (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) return 0; + if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) || + (device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID)) + return 0; + if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) || (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) return 0; + if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) || + (device == BCM43452_D11AC5G_ID)) + return 0; + if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) || (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) return 0; @@ -5282,6 +7010,21 @@ dhdpcie_chipmatch(uint16 vendor, uint16 device) (device == BCM43596_D11AC5G_ID)) return 0; + if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) || + (device == BCM43597_D11AC5G_ID)) + return 0; + + if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) || + (device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID)) + return 0; + + if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) || + (device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) + return 0; + + if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) || + (device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) + return 0; if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) || (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) @@ -5290,8 +7033,9 @@ dhdpcie_chipmatch(uint16 vendor, uint16 device) if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) || (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID)) return 0; - +#ifndef DHD_EFI DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device)); +#endif return (-ENODEV); } /* dhdpcie_chipmatch */ @@ -5334,9 +7078,11 @@ dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) } /* Check ChipID */ - if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip)) { - DHD_ERROR(("%s: cc_nvmdump cmd. supported for 4350/4345 only\n", - __FUNCTION__)); + if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) && + ((uint16)bus->sih->chip != BCM4355_CHIP_ID) && + ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) { + DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips" + "4350/4345/4355/4364 only\n", __FUNCTION__)); return BCME_UNSUPPORTED; } @@ -5354,28 +7100,60 @@ dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT) == OTPL_WRAP_TYPE_40NM) { /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */ - otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE) + /* Chipcommon rev51 is a variation on rev45 and does not support + * the latest OTP configuration. + */ + if (chipc_corerev != 51 && chipc_corerev >= 49) { + otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) + >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + } else { + otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024; - bcm_bprintf(b, "(Size %d bits)\n", otp_size); + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + } } else { /* This part is untested since newer chips have 40nm OTP */ - otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE) - >> CC_CAP_OTPSIZE_SHIFT]; - bcm_bprintf(b, "(Size %d bits)\n", otp_size); - DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n", - __FUNCTION__)); + /* Chipcommon rev51 is a variation on rev45 and does not support + * the latest OTP configuration. + */ + if (chipc_corerev != 51 && chipc_corerev >= 49) { + otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK) + >> OTPL_ROW_SIZE_SHIFT]; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + } else { + otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE) + >> CC_CAP_OTPSIZE_SHIFT]; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n", + __FUNCTION__)); + } } } - if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) && - ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) { - DHD_ERROR(("%s: SPROM and OTP could not be found \n", - __FUNCTION__)); - return BCME_NOTFOUND; + /* Chipcommon rev51 is a variation on rev45 and does not support + * the latest OTP configuration. + */ + if (chipc_corerev != 51 && chipc_corerev >= 49) { + if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) && + ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) { + DHD_ERROR(("%s: SPROM and OTP could not be found " + "sromcontrol = %x, otplayout = %x \n", + __FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout)); + return BCME_NOTFOUND; + } + } else { + if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) && + ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) { + DHD_ERROR(("%s: SPROM and OTP could not be found " + "sromcontrol = %x, capablities = %x \n", + __FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities)); + return BCME_NOTFOUND; + } } /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */ - if ((chipcregs->sromcontrol & SRC_OTPSEL) && + if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) && (chipcregs->sromcontrol & SRC_OTPPRESENT)) { bcm_bprintf(b, "OTP Strap selected.\n" @@ -5402,23 +7180,38 @@ dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) DHD_ERROR(("ChipCommon Regs. not initialized\n")); return BCME_NOTREADY; } else { - bcm_bprintf(b, "\n OffSet:"); + bcm_bprintf(b, "\n OffSet:"); - /* Point to the SPROM/OTP shadow in ChipCommon */ - nvm_shadow = chipcregs->sromotp; + /* Chipcommon rev51 is a variation on rev45 and does not support + * the latest OTP configuration. + */ + if (chipc_corerev != 51 && chipc_corerev >= 49) { + /* Chip common can read only 8kbits, + * for ccrev >= 49 otp size is around 12 kbits so use GCI core + */ + nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0); + } else { + /* Point to the SPROM/OTP shadow in ChipCommon */ + nvm_shadow = chipcregs->sromotp; + } - /* - * Read 16 bits / iteration. - * dump_size & dump_offset in 16-bit words - */ - while (dump_offset < dump_size) { - if (dump_offset % 2 == 0) - /* Print the offset in the shadow space in Bytes */ - bcm_bprintf(b, "\n 0x%04x", dump_offset * 2); + if (nvm_shadow == NULL) { + DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__)); + return BCME_NOTFOUND; + } - bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset)); - dump_offset += 0x1; - } + /* + * Read 16 bits / iteration. + * dump_size & dump_offset in 16-bit words + */ + while (dump_offset < dump_size) { + if (dump_offset % 2 == 0) + /* Print the offset in the shadow space in Bytes */ + bcm_bprintf(b, "\n 0x%04x", dump_offset * 2); + + bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset)); + dump_offset += 0x1; + } } /* Switch back to the original core */ @@ -5453,11 +7246,14 @@ void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node) } ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + /* Reinitialise flowring's queue */ + dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD); flow_ring_node->status = FLOW_RING_STATUS_CLOSED; flow_ring_node->active = FALSE; DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + /* Hold flowring_list_lock to ensure no race condition while accessing the List */ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); dll_delete(&flow_ring_node->list); DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); @@ -5468,7 +7264,7 @@ void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node) /* Free the flowid back to the flowid allocator */ dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex, - flow_ring_node->flowid); + flow_ring_node->flowid); } /** @@ -5529,9 +7325,8 @@ dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status) * active list only after its truely created, which is after * receiving the create response message from the Host. */ - DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); - dll_prepend(&bus->const_flowring, &flow_ring_node->list); + dll_prepend(&bus->flowring_active_list, &flow_ring_node->list); DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ @@ -5554,8 +7349,7 @@ dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg) DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) { DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); - DHD_ERROR(("%s :Delete Pending Flow %d\n", - __FUNCTION__, flow_ring_node->flowid)); + DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid)); return BCME_ERROR; } flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING; @@ -5604,7 +7398,6 @@ dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status) } -/** This function is not called. Obsolete ? */ int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg) { void *pkt; @@ -5612,12 +7405,16 @@ int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg) flow_ring_node_t *flow_ring_node; unsigned long flags; - DHD_INFO(("%s :Flow Delete\n", __FUNCTION__)); + DHD_INFO(("%s :Flow Flush\n", __FUNCTION__)); flow_ring_node = (flow_ring_node_t *)arg; - queue = &flow_ring_node->queue; /* queue associated with flow ring */ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN + * once flow ring flush response is received for this flowring node. + */ + flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING; #ifdef DHDTCPACK_SUPPRESS /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, @@ -5637,7 +7434,6 @@ int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg) /* Send Msg to device about flow ring flush */ dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node); - flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING; return BCME_OK; } @@ -5662,7 +7458,7 @@ dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status) uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus) { - return bus->max_sub_queues; + return bus->max_submission_rings; } /* To be symmetric with SDIO */ @@ -5678,6 +7474,186 @@ dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) dhdp->bus->is_linkdown = val; } +#ifdef IDLE_TX_FLOW_MGMT +/* resume request */ +int +dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg) +{ + flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg; + + DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid)); + + flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING; + + /* Send Msg to device about flow ring resume */ + dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node); + + return BCME_OK; +} + +/* add the node back to active flowring */ +void +dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status) +{ + + flow_ring_node_t *flow_ring_node; + + DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid)); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + + if (status != BCME_OK) { + DHD_ERROR(("%s Error Status = %d \n", + __FUNCTION__, status)); + return; + } + + DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n", + __FUNCTION__, flow_ring_node->flowid, flow_ring_node->queue.len)); + + flow_ring_node->status = FLOW_RING_STATUS_OPEN; + + dhd_bus_schedule_queue(bus, flowid, FALSE); + return; +} + +/* scan the flow rings in active list for idle time out */ +void +dhd_bus_check_idle_scan(dhd_bus_t *bus) +{ + uint64 time_stamp; /* in millisec */ + uint64 diff; + + time_stamp = OSL_SYSUPTIME(); + diff = time_stamp - bus->active_list_last_process_ts; + + if (diff > IDLE_FLOW_LIST_TIMEOUT) { + dhd_bus_idle_scan(bus); + bus->active_list_last_process_ts = OSL_SYSUPTIME(); + } + + return; +} + + +/* scan the nodes in active list till it finds a non idle node */ +void +dhd_bus_idle_scan(dhd_bus_t *bus) +{ + dll_t *item, *prev; + flow_ring_node_t *flow_ring_node; + uint64 time_stamp, diff; + unsigned long flags; + uint16 ringid[MAX_SUSPEND_REQ]; + uint16 count = 0; + + time_stamp = OSL_SYSUPTIME(); + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + + for (item = dll_tail_p(&bus->flowring_active_list); + !dll_end(&bus->flowring_active_list, item); item = prev) { + prev = dll_prev_p(item); + + flow_ring_node = dhd_constlist_to_flowring(item); + + if (flow_ring_node->flowid == (bus->max_submission_rings - 1)) + continue; + + if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) { + /* Takes care of deleting zombie rings */ + /* delete from the active list */ + DHD_INFO(("deleting flow id %u from active list\n", + flow_ring_node->flowid)); + __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node); + continue; + } + + diff = time_stamp - flow_ring_node->last_active_ts; + + if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len)) { + DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid)); + /* delete from the active list */ + __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node); + flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED; + ringid[count] = flow_ring_node->flowid; + count++; + if (count == MAX_SUSPEND_REQ) { + /* create a batch message now!! */ + dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count); + count = 0; + } + + } else { + + /* No more scanning, break from here! */ + break; + } + } + + if (count) { + dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count); + } + + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + return; +} + +void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) +{ + unsigned long flags; + dll_t* list; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + /* check if the node is already at head, otherwise delete it and prepend */ + list = dll_head_p(&bus->flowring_active_list); + if (&flow_ring_node->list != list) { + dll_delete(&flow_ring_node->list); + dll_prepend(&bus->flowring_active_list, &flow_ring_node->list); + } + + /* update flow ring timestamp */ + flow_ring_node->last_active_ts = OSL_SYSUPTIME(); + + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + return; +} + +void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) +{ + unsigned long flags; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + + dll_prepend(&bus->flowring_active_list, &flow_ring_node->list); + /* update flow ring timestamp */ + flow_ring_node->last_active_ts = OSL_SYSUPTIME(); + + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + return; +} +void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) +{ + dll_delete(&flow_ring_node->list); +} + +void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node) +{ + unsigned long flags; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + + __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node); + + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + return; +} +#endif /* IDLE_TX_FLOW_MGMT */ + int dhdpcie_bus_clock_start(struct dhd_bus *bus) { @@ -5747,6 +7723,81 @@ dhd_bus_release_dongle(struct dhd_bus *bus) return 0; } +void +dhdpcie_cto_init(struct dhd_bus *bus, bool enable) +{ + if (enable) { + dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, + PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR); + dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, SPROM_BACKPLANE_EN); + + if (bus->dhd->cto_threshold == 0) { + bus->dhd->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT; + } + + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ctoctrl), ~0, + ((bus->dhd->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) & + PCIE_CTO_TO_THRESHHOLD_MASK) | + ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) & + PCIE_CTO_CLKCHKCNT_MASK) | + PCIE_CTO_ENAB_MASK); + } else { + dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0); + dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, 0); + + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0); + } +} + +static void +dhdpcie_cto_error_recovery(struct dhd_bus *bus) +{ + uint32 pci_intmask, err_status; + uint8 i = 0; + + pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4); + dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK); + + DHD_OS_WAKE_LOCK(bus->dhd); + + /* reset backplane */ + dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, SPROM_CFG_TO_SB_RST); + + /* clear timeout error */ + while (1) { + err_status = si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, dm_errlog), + 0, 0); + if (err_status & PCIE_CTO_ERR_MASK) { + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, dm_errlog), + ~0, PCIE_CTO_ERR_MASK); + } else { + break; + } + OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000); + i++; + if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) { + DHD_ERROR(("cto recovery fail\n")); + + DHD_OS_WAKE_UNLOCK(bus->dhd); + return; + } + } + + /* clear interrupt status */ + dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK); + + /* Halt ARM & remove reset */ + /* TBD : we can add ARM Halt here in case */ + + DHD_ERROR(("cto recovery success\n")); + + DHD_OS_WAKE_UNLOCK(bus->dhd); +} + #ifdef BCMPCIE_OOB_HOST_WAKE int dhd_bus_oob_intr_register(dhd_pub_t *dhdp) @@ -5766,3 +7817,631 @@ dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable) dhdpcie_oob_intr_set(dhdp->bus, enable); } #endif /* BCMPCIE_OOB_HOST_WAKE */ + + + +bool +dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus) +{ + return bus->dhd->d2h_hostrdy_supported; +} + +void +dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr) +{ + dhd_bus_t *bus = pub->bus; + uint32 coreoffset = index << 12; + uint32 core_addr = SI_ENUM_BASE + coreoffset; + uint32 value; + + + while (first_addr <= last_addr) { + core_addr = SI_ENUM_BASE + coreoffset + first_addr; + if (si_backplane_access(bus->sih, core_addr, 4, &value, TRUE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + } + DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value)); + first_addr = first_addr + 4; + } +} + +#ifdef PCIE_OOB +bool +dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) + return FALSE; + if (bus->oob_enabled) { + return !bus->dhd->d2h_no_oob_dw; + } else { + return FALSE; + } +} +#endif /* PCIE_OOB */ + +void +dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option) +{ + DHD_ERROR(("ENABLING DW:%d\n", dw_option)); + bus->dw_option = dw_option; +} + +#ifdef PCIE_INB_DW +bool +dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) + return FALSE; + if (bus->inb_enabled) { + return bus->dhd->d2h_inband_dw; + } else { + return FALSE; + } +} + +void +dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus, enum dhd_bus_ds_state state) +{ + if (!INBAND_DW_ENAB(bus)) + return; + + DHD_INFO(("%s:%d\n", __FUNCTION__, state)); + bus->dhd->ds_state = state; + if (state == DW_DEVICE_DS_DISABLED_WAIT || state == DW_DEVICE_DS_D3_INFORM_WAIT) { + bus->ds_exit_timeout = 100; + } + if (state == DW_DEVICE_HOST_WAKE_WAIT) { + bus->host_sleep_exit_timeout = 100; + } + if (state == DW_DEVICE_DS_DEV_WAKE) { + bus->ds_exit_timeout = 0; + } + if (state == DW_DEVICE_DS_ACTIVE) { + bus->host_sleep_exit_timeout = 0; + } +} + +enum dhd_bus_ds_state +dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus) +{ + if (!INBAND_DW_ENAB(bus)) + return DW_DEVICE_DS_INVALID; + return bus->dhd->ds_state; +} +#endif /* PCIE_INB_DW */ + +bool +dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) + return FALSE; + else if (bus->idma_enabled) { + return bus->dhd->idma_enable; + } else { + return FALSE; + } +} + +bool +dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus) +{ + if (!bus->dhd) + return FALSE; + else if (bus->ifrm_enabled) { + return bus->dhd->ifrm_enable; + } else { + return FALSE; + } +} + + +void +dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf) +{ + trap_t *tr = &bus->dhd->last_trap_info; + bcm_bprintf(strbuf, + "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + " lp 0x%x, rpc 0x%x" + "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " + "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", + ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr), + ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc), + ltoh32(bus->pcie_sh->trap_addr), + ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3), + ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7)); +} + +int +dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read) +{ + int bcmerror = 0; + struct dhd_bus *bus = dhdp->bus; + + if (si_backplane_access(bus->sih, addr, size, data, read) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + } + + return bcmerror; +} + +int +dhd_get_idletime(dhd_pub_t *dhd) +{ + return dhd->bus->idletime; +} + +#ifdef DHD_SSSR_DUMP + +static INLINE void +dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read) +{ + OSL_DELAY(1); + si_backplane_access(dhd->bus->sih, addr, sizeof(uint), val, read); + DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__, addr, *val, read)); + return; +} + +static int +dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size, + uint addr_reg, uint data_reg) +{ + uint addr; + uint val = 0; + int i; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + if (!buf) { + DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (!fifo_size) { + DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Set the base address offset to 0 */ + addr = addr_reg; + val = 0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + addr = data_reg; + /* Read 4 bytes at once and loop for fifo_size / 4 */ + for (i = 0; i < fifo_size / 4; i++) { + si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE); + buf[i] = val; + OSL_DELAY(1); + } + return BCME_OK; +} + +static int +dhdpcie_get_sssr_vasip_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size, + uint addr_reg) +{ + uint addr; + uint val = 0; + int i; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + if (!buf) { + DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (!fifo_size) { + DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Check if vasip clk is disabled, if yes enable it */ + addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (!val) { + val = 1; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + + addr = addr_reg; + /* Read 4 bytes at once and loop for fifo_size / 4 */ + for (i = 0; i < fifo_size / 4; i++, addr += 4) { + si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE); + buf[i] = val; + OSL_DELAY(1); + } + return BCME_OK; +} + +static int +dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd) +{ + uint addr; + uint val; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* conditionally clear bits [11:8] of PowerCtrl */ + addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) { + addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; + val = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + return BCME_OK; +} + +static int +dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd) +{ + uint addr; + uint val; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* conditionally clear bits [11:8] of PowerCtrl */ + addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) { + addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl; + val = 0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + return BCME_OK; +} + +static int +dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd) +{ + uint addr; + uint val; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* clear chipcommon intmask */ + addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* clear PMUIntMask0 */ + addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* clear PMUIntMask1 */ + addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* clear res_req_timer */ + addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* clear macresreqtimer */ + addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* clear macresreqtimer1 */ + addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* clear VasipClkEn */ + if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) { + addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl; + val = 0x0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + + return BCME_OK; +} + +static int +dhdpcie_d11_check_outofreset(dhd_pub_t *dhd) +{ + int i; + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + /* Check if bit 0 of resetctrl is cleared */ + addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (!(val & 1)) { + dhd->sssr_d11_outofreset[i] = TRUE; + } else { + dhd->sssr_d11_outofreset[i] = FALSE; + } + DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n", + __FUNCTION__, i, dhd->sssr_d11_outofreset[i])); + } + return BCME_OK; +} + +static int +dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd) +{ + int i; + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + if (dhd->sssr_d11_outofreset[i]) { + /* clear request clk only if itopoobb is non zero */ + addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (val != 0) { + /* clear clockcontrolstatus */ + addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus; + val = + dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + } + } + return BCME_OK; +} + +static int +dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd) +{ + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* Check if bit 0 of resetctrl is cleared */ + addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (!(val & 1)) { + /* clear request clk only if itopoobb is non zero */ + addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (val != 0) { + /* clear clockcontrolstatus */ + addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus; + val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + } + return BCME_OK; +} + +static int +dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd) +{ + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + /* clear request clk only if itopoobb is non zero */ + addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb; + dhd_sbreg_op(dhd, addr, &val, TRUE); + if (val) { + /* clear clockcontrolstatus */ + addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus; + val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + return BCME_OK; +} + +static int +dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd) +{ + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate; + val = LTR_ACTIVE; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + val = LTR_SLEEP; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + return BCME_OK; +} + +static int +dhdpcie_clear_clk_req(dhd_pub_t *dhd) +{ + DHD_ERROR(("%s\n", __FUNCTION__)); + + dhdpcie_arm_clear_clk_req(dhd); + + dhdpcie_d11_clear_clk_req(dhd); + + dhdpcie_pcie_clear_clk_req(dhd); + + return BCME_OK; +} + +static int +dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd) +{ + int i; + uint addr; + uint val = 0; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + if (dhd->sssr_d11_outofreset[i]) { + /* disable core by setting bit 0 */ + addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl; + val = 1; + dhd_sbreg_op(dhd, addr, &val, FALSE); + OSL_DELAY(6000); + + addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl; + val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0]; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1]; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + /* enable core by clearing bit 0 */ + addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl; + val = 0; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl; + val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2]; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3]; + dhd_sbreg_op(dhd, addr, &val, FALSE); + + val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4]; + dhd_sbreg_op(dhd, addr, &val, FALSE); + } + } + return BCME_OK; +} + +static int +dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd) +{ + int i; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + if (dhd->sssr_d11_outofreset[i]) { + dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i], + dhd->sssr_reg_info.mac_regs[i].sr_size, + dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress, + dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata); + } + } + + if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) { + dhdpcie_get_sssr_vasip_dump(dhd, dhd->sssr_vasip_buf_before, + dhd->sssr_reg_info.vasip_regs.vasip_sr_size, + dhd->sssr_reg_info.vasip_regs.vasip_sr_addr); + } + + return BCME_OK; +} + +static int +dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd) +{ + int i; + + DHD_ERROR(("%s\n", __FUNCTION__)); + + for (i = 0; i < MAX_NUM_D11CORES; i++) { + if (dhd->sssr_d11_outofreset[i]) { + dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i], + dhd->sssr_reg_info.mac_regs[i].sr_size, + dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress, + dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata); + } + } + + if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) { + dhdpcie_get_sssr_vasip_dump(dhd, dhd->sssr_vasip_buf_after, + dhd->sssr_reg_info.vasip_regs.vasip_sr_size, + dhd->sssr_reg_info.vasip_regs.vasip_sr_addr); + } + + return BCME_OK; +} + +int +dhdpcie_sssr_dump(dhd_pub_t *dhd) +{ + if (!dhd->sssr_inited) { + DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd->bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhdpcie_d11_check_outofreset(dhd); + + DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__)); + if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) { + DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhdpcie_clear_intmask_and_timer(dhd); + dhdpcie_suspend_chipcommon_powerctrl(dhd); + dhdpcie_clear_clk_req(dhd); + dhdpcie_pcie_send_ltrsleep(dhd); + + /* Wait for some time before Restore */ + OSL_DELAY(6000); + + dhdpcie_resume_chipcommon_powerctrl(dhd); + dhdpcie_bring_d11_outofreset(dhd); + + DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__)); + if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) { + DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhd_schedule_sssr_dump(dhd); + + return BCME_OK; +} +#endif /* DHD_SSSR_DUMP */ + +#ifdef DHD_WAKE_STATUS +wake_counts_t* +dhd_bus_get_wakecount(dhd_pub_t *dhd) +{ + if (!dhd->bus) { + return NULL; + } + return &dhd->bus->wake_counts; +} +int +dhd_bus_get_bus_wake(dhd_pub_t *dhd) +{ + return bcmpcie_set_get_wake(dhd->bus, 0); +} +#endif /* DHD_WAKE_STATUS */ + +#ifdef BCM_ASLR_HEAP +/* Writes random number(s) to the TCM. FW upon initialization reads the metadata + * of the random number and then based on metadata, reads the random number from the TCM. + */ +static void +dhdpcie_wrt_rnd(struct dhd_bus *bus) +{ + bcm_rand_metadata_t rnd_data; + uint32 rand_no; + uint32 count = 1; /* start with 1 random number */ + + uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) - + ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data)); + rnd_data.signature = htol32(BCM_RNG_SIGNATURE); + rnd_data.count = htol32(count); + /* write the metadata about random number */ + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data)); + /* scale back by number of random number counts */ + addr -= sizeof(count) * count; + /* Now write the random number(s) */ + rand_no = htol32(dhd_get_random_number()); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rand_no, sizeof(rand_no)); +} +#endif /* BCM_ASLR_HEAP */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pcie.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pcie.h index 511d00e8ce2c..eb8de62956bf 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pcie.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pcie.h @@ -1,7 +1,7 @@ /* * Linux DHD Bus Module for PCIE * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_pcie.h 607608 2015-12-21 13:14:19Z $ + * $Id: dhd_pcie.h 707536 2017-06-28 04:23:48Z $ */ @@ -42,11 +42,11 @@ #endif /* CONFIG_PCI_MSM */ #endif /* CONFIG_ARCH_MSM */ #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY -#ifdef CONFIG_SOC_EXYNOS8890 +#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) #include extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg); extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg); -#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 */ #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ #endif /* SUPPORT_LINKDOWN_RECOVERY */ @@ -88,17 +88,13 @@ extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg); #define struct_pcie_register_event struct msm_pcie_register_event #endif /* CONFIG_ARCH_MSM */ #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY -#ifdef CONFIG_SOC_EXYNOS8890 +#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) #define struct_pcie_notify struct exynos_pcie_notify #define struct_pcie_register_event struct exynos_pcie_register_event -#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 */ #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ #endif /* SUPPORT_LINKDOWN_RECOVERY */ -/* - * Router with 4366 can have 128 stations and 16 BSS, - * hence (128 stations x 4 access categories for ucast) + 16 bc/mc flowrings - */ #define MAX_DHD_TX_FLOWS 320 /* user defined data structures */ @@ -106,11 +102,47 @@ extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg); #define CONSOLE_LINE_MAX 192 #define CONSOLE_BUFFER_MAX (8 * 1024) -#ifndef MAX_CNTL_D3ACK_TIMEOUT -#define MAX_CNTL_D3ACK_TIMEOUT 2 -#endif /* MAX_CNTL_D3ACK_TIMEOUT */ +#ifdef IDLE_TX_FLOW_MGMT +#define IDLE_FLOW_LIST_TIMEOUT 5000 +#define IDLE_FLOW_RING_TIMEOUT 5000 +#endif /* IDLE_TX_FLOW_MGMT */ -#ifdef DHD_DEBUG +#ifdef DEVICE_TX_STUCK_DETECT +#define DEVICE_TX_STUCK_CKECK_TIMEOUT 1000 /* 1 sec */ +#define DEVICE_TX_STUCK_TIMEOUT 10000 /* 10 secs */ +#define DEVICE_TX_STUCK_WARN_DURATION (DEVICE_TX_STUCK_TIMEOUT / DEVICE_TX_STUCK_CKECK_TIMEOUT) +#define DEVICE_TX_STUCK_DURATION (DEVICE_TX_STUCK_WARN_DURATION * 2) +#endif /* DEVICE_TX_STUCK_DETECT */ + +/* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */ +#define IDMA_ENAB(dhd) ((dhd)->idma_enable) +#define IDMA_ACTIVE(dhd) (((dhd)->idma_enable) && ((dhd)->idma_inited)) + +#define IDMA_DS_ENAB(dhd) ((dhd)->idma_retention_ds) +#define IDMA_DS_ACTIVE(dhd) ((dhd)->bus->dongle_in_ds) + +/* IFRM (Implicit Flow Ring Manager enable and inited */ +#define IFRM_ENAB(dhd) ((dhd)->ifrm_enable) +#define IFRM_ACTIVE(dhd) (((dhd)->ifrm_enable) && ((dhd)->ifrm_inited)) + +/* PCIE CTO Prevention and Recovery */ +#define PCIECTO_ENAB(dhd) ((dhd)->cto_enable) + +/* Implicit DMA index usage : + * Index 0 for h2d write index transfer + * Index 1 for d2h read index transfer + */ +#define IDMA_IDX0 0 +#define IDMA_IDX1 1 +#define IDMA_IDX2 2 +#define IDMA_IDX3 3 + +#define DHDPCIE_CONFIG_HDR_SIZE 16 +#define DHDPCIE_CONFIG_CHECK_DELAY_MS 10 /* 10ms */ +#define DHDPCIE_CONFIG_CHECK_RETRY_COUNT 20 +#define DHDPCIE_DONGLE_PWR_TOGGLE_DELAY 1000 /* 1ms in units of us */ +#define DHDPCIE_PM_D3_DELAY 200000 /* 200ms in units of us */ +#define DHDPCIE_PM_D2_DELAY 200 /* 200us */ typedef struct dhd_console { uint count; /* Poll interval msec counter */ @@ -120,17 +152,80 @@ typedef struct dhd_console { uint8 *buf; /* Log buffer (host copy) */ uint last; /* Last buffer read index */ } dhd_console_t; -#endif /* DHD_DEBUG */ + typedef struct ring_sh_info { uint32 ring_mem_addr; uint32 ring_state_w; uint32 ring_state_r; } ring_sh_info_t; + +#define DEVICE_WAKE_NONE 0 +#define DEVICE_WAKE_OOB 1 +#define DEVICE_WAKE_INB 2 + +#define INBAND_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_INB) +#define OOB_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_OOB) +#define NO_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_NONE) + +struct dhd_bus; + +struct dhd_pcie_rev { + uint8 fw_rev; + void (*handle_mb_data)(struct dhd_bus *); +}; + +typedef struct dhdpcie_config_save +{ + uint32 header[DHDPCIE_CONFIG_HDR_SIZE]; + /* pmcsr save */ + uint32 pmcsr; + /* express save */ + uint32 exp_dev_ctrl_stat; + uint32 exp_link_ctrl_stat; + uint32 exp_dev_ctrl_stat2; + uint32 exp_link_ctrl_stat2; + /* msi save */ + uint32 msi_cap; + uint32 msi_addr0; + uint32 msi_addr1; + uint32 msi_data; + /* l1pm save */ + uint32 l1pm0; + uint32 l1pm1; + /* ltr save */ + uint32 ltr; + /* aer save */ + uint32 aer_caps_ctrl; /* 0x18 */ + uint32 aer_severity; /* 0x0C */ + uint32 aer_umask; /* 0x08 */ + uint32 aer_cmask; /* 0x14 */ + uint32 aer_root_cmd; /* 0x2c */ + /* BAR0 and BAR1 windows */ + uint32 bar0_win; + uint32 bar1_win; +} dhdpcie_config_save_t; + typedef struct dhd_bus { dhd_pub_t *dhd; + struct pci_dev *rc_dev; /* pci RC device handle */ struct pci_dev *dev; /* pci device handle */ - dll_t const_flowring; /* constructed list of tx flowring queues */ +#ifdef DHD_EFI + void *pcie_dev; +#endif + + dll_t flowring_active_list; /* constructed list of tx flowring queues */ +#ifdef IDLE_TX_FLOW_MGMT + uint64 active_list_last_process_ts; + /* stores the timestamp of active list processing */ +#endif /* IDLE_TX_FLOW_MGMT */ + +#ifdef DEVICE_TX_STUCK_DETECT + /* Flag to enable/disable device tx stuck monitor by DHD IOVAR dev_tx_stuck_monitor */ + uint32 dev_tx_stuck_monitor; + /* Stores the timestamp (msec) of the last device Tx stuck check */ + uint32 device_tx_stuck_check; +#endif /* DEVICE_TX_STUCK_DETECT */ si_t *sih; /* Handle for SI calls */ char *vars; /* Variables (from CIS and/or other) */ @@ -142,6 +237,10 @@ typedef struct dhd_bus { uint ramrev; /* SOCRAM core revision */ uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ + bool ramsize_adjusted; /* flag to note adjustment, so that + * adjustment routine and file io + * are avoided on D3 cold -> D0 + */ uint32 srmemsize; /* Size of SRMEM */ uint32 bus; /* gSPI or SDIO bus */ @@ -165,10 +264,8 @@ typedef struct dhd_bus { uint intrcount; /* Count of device interrupt callbacks */ uint lastintrs; /* Count as of last watchdog timer */ -#ifdef DHD_DEBUG dhd_console_t console; /* Console output polling support */ uint console_addr; /* Console address from shared struct */ -#endif /* DHD_DEBUG */ bool alp_only; /* Don't use HT clock (ALP only) */ @@ -190,12 +287,16 @@ typedef struct dhd_bus { uint16 pollrate; uint16 polltick; - uint32 *pcie_mb_intr_addr; + volatile uint32 *pcie_mb_intr_addr; + volatile uint32 *pcie_mb_intr_2_addr; void *pcie_mb_intr_osh; bool sleep_allowed; + wake_counts_t wake_counts; + /* version 3 shared struct related info start */ ring_sh_info_t ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS]; + uint8 h2d_ring_count; uint8 d2h_ring_count; uint32 ringmem_ptr; @@ -210,29 +311,34 @@ typedef struct dhd_bus { uint32 def_intmask; bool ltrsleep_on_unload; uint wait_for_d3_ack; - uint32 max_sub_queues; + uint16 max_tx_flowrings; + uint16 max_submission_rings; + uint16 max_completion_rings; + uint16 max_cmn_rings; uint32 rw_index_sz; bool db1_for_mb; - bool suspended; dhd_timeout_t doorbell_timer; bool device_wake_state; - bool irq_registered; #ifdef PCIE_OOB bool oob_enabled; #endif /* PCIE_OOB */ + bool irq_registered; #ifdef SUPPORT_LINKDOWN_RECOVERY #if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \ - defined(CONFIG_SOC_EXYNOS8890)) + defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895)) #ifdef CONFIG_ARCH_MSM uint8 no_cfg_restore; #endif /* CONFIG_ARCH_MSM */ struct_pcie_register_event pcie_event; -#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && CONFIG_SOC_EXYNOS8890) */ +#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && + * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895)) + */ + bool read_shm_fail; #endif /* SUPPORT_LINKDOWN_RECOVERY */ + int32 idletime; /* Control for activity timeout */ #ifdef DHD_PCIE_RUNTIMEPM int32 idlecount; /* Activity timeout counter */ - int32 idletime; /* Control for activity timeout */ int32 bus_wake; /* For wake up the bus */ bool runtime_resume_done; /* For check runtime suspend end */ struct mutex pm_lock; /* Synchronize for system PM & runtime PM */ @@ -242,9 +348,49 @@ typedef struct dhd_bus { uint32 d0_inform_cnt; uint32 d0_inform_in_use_cnt; uint8 force_suspend; - uint32 d3_ack_war_cnt; uint8 is_linkdown; - uint32 pci_d3hot_done; +#ifdef IDLE_TX_FLOW_MGMT + bool enable_idle_flowring_mgmt; +#endif /* IDLE_TX_FLOW_MGMT */ + struct dhd_pcie_rev api; + bool use_mailbox; + bool d3_suspend_pending; + bool use_d0_inform; + uint32 hostready_count; /* Number of hostready issued */ +#if defined(PCIE_OOB) || defined(BCMPCIE_OOB_HOST_WAKE) + bool oob_presuspend; +#endif /* PCIE_OOB || BCMPCIE_OOB_HOST_WAKE */ + bool dongle_in_ds; + uint8 dw_option; +#ifdef PCIE_INB_DW + bool inb_enabled; + uint32 ds_exit_timeout; + uint32 host_sleep_exit_timeout; + uint wait_for_ds_exit; + uint32 inband_dw_assert_cnt; /* # of inband device_wake assert */ + uint32 inband_dw_deassert_cnt; /* # of inband device_wake deassert */ + uint32 inband_ds_exit_host_cnt; /* # of DS-EXIT , host initiated */ + uint32 inband_ds_exit_device_cnt; /* # of DS-EXIT , device initiated */ + uint32 inband_ds_exit_to_cnt; /* # of DS-EXIT timeout */ + uint32 inband_host_sleep_exit_to_cnt; /* # of Host_Sleep exit timeout */ + void *inb_lock; /* Lock to serialize in band device wake activity */ + /* # of contexts in the host which currently want a FW transaction */ + uint32 host_active_cnt; +#endif /* PCIE_INB_DW */ + dhdpcie_config_save_t saved_config; + ulong resume_intr_enable_count; + ulong dpc_intr_enable_count; + ulong isr_intr_disable_count; + ulong suspend_intr_disable_count; + ulong dpc_return_busdown_count; + bool idma_enabled; + bool ifrm_enabled; +#if defined(PCIE_OOB) || defined(PCIE_INB_DW) + bool ds_enabled; +#endif +#ifdef DHD_PCIE_RUNTIMEPM + bool chk_pm; /* To avoid counting of wake up from Runtime PM */ +#endif /* DHD_PCIE_RUNTIMEPM */ } dhd_bus_t; /* function declarations */ @@ -260,25 +406,36 @@ extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint3 extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data); extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus); extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus); +extern int dhpcie_bus_mask_interrupt(dhd_bus_t *bus); extern void dhdpcie_bus_release(struct dhd_bus *bus); extern int32 dhdpcie_bus_isr(struct dhd_bus *bus); extern void dhdpcie_free_irq(dhd_bus_t *bus); extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value); +extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake); extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state); extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state); +extern uint32 dhdpcie_force_alp(struct dhd_bus *bus, bool enable); +extern uint32 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int force_l1_entry_time); extern bool dhdpcie_tcm_valid(dhd_bus_t *bus); -extern void dhdpcie_bus_dongle_print_hwregs(struct dhd_bus *bus); -#ifndef BCMPCIE_OOB_HOST_WAKE extern void dhdpcie_pme_active(osl_t *osh, bool enable); -#endif /* !BCMPCIE_OOB_HOST_WAKE */ extern bool dhdpcie_pme_cap(osl_t *osh); +extern uint32 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val); +extern void dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask); +extern uint8 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val); +extern int dhdpcie_disable_irq(dhd_bus_t *bus); +extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus); +extern int dhdpcie_enable_irq(dhd_bus_t *bus); +extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset); +extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, + bool is_write, uint32 writeval); +extern uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus); extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus); extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus); extern int dhdpcie_disable_device(dhd_bus_t *bus); -extern int dhdpcie_enable_device(dhd_bus_t *bus); extern int dhdpcie_alloc_resource(dhd_bus_t *bus); extern void dhdpcie_free_resource(dhd_bus_t *bus); extern int dhdpcie_bus_request_irq(struct dhd_bus *bus); +extern int dhdpcie_enable_device(dhd_bus_t *bus); #ifdef BCMPCIE_OOB_HOST_WAKE extern int dhdpcie_oob_intr_register(dhd_bus_t *bus); extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus); @@ -287,9 +444,14 @@ extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable); #ifdef PCIE_OOB extern void dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val); extern int dhd_oob_get_bt_reg_on(struct dhd_bus *bus); +extern void dhdpcie_oob_init(dhd_bus_t *bus); +extern void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus); +extern int dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val); +extern void dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val); #endif /* PCIE_OOB */ -#ifdef USE_EXYNOS_PCIE_RC_PMPATCH +#if defined(CONFIG_ARCH_EXYNOS) +#define SAMSUNG_PCIE_VENDOR_ID 0x144d #if defined(CONFIG_MACH_UNIVERSAL5433) #define SAMSUNG_PCIE_DEVICE_ID 0xa5e3 #define SAMSUNG_PCIE_CH_NUM @@ -299,9 +461,57 @@ extern int dhd_oob_get_bt_reg_on(struct dhd_bus *bus); #elif defined(CONFIG_SOC_EXYNOS8890) #define SAMSUNG_PCIE_DEVICE_ID 0xa544 #define SAMSUNG_PCIE_CH_NUM 0 +#elif defined(CONFIG_SOC_EXYNOS7420) +#define SAMSUNG_PCIE_DEVICE_ID 0xa575 +#define SAMSUNG_PCIE_CH_NUM 1 +#elif defined(CONFIG_SOC_EXYNOS8895) +#define SAMSUNG_PCIE_DEVICE_ID 0xecec +#define SAMSUNG_PCIE_CH_NUM 0 +#else +#error "Not supported platform" +#endif /* CONFIG_SOC_EXYNOSXXXX & CONFIG_MACH_UNIVERSALXXXX */ +#endif /* CONFIG_ARCH_EXYNOS */ + +#if defined(CONFIG_ARCH_MSM) +#define MSM_PCIE_VENDOR_ID 0x17cb +#if defined(CONFIG_ARCH_APQ8084) +#define MSM_PCIE_DEVICE_ID 0x0101 +#elif defined(CONFIG_ARCH_MSM8994) +#define MSM_PCIE_DEVICE_ID 0x0300 +#elif defined(CONFIG_ARCH_MSM8996) +#define MSM_PCIE_DEVICE_ID 0x0104 +#elif defined(CONFIG_ARCH_MSM8998) +#define MSM_PCIE_DEVICE_ID 0x0105 #else #error "Not supported platform" #endif +#endif /* CONFIG_ARCH_MSM */ + +#if defined(CONFIG_X86) +#define X86_PCIE_VENDOR_ID 0x8086 +#define X86_PCIE_DEVICE_ID 0x9c1a +#endif /* CONFIG_X86 */ + +#if defined(CONFIG_ARCH_TEGRA) +#define TEGRA_PCIE_VENDOR_ID 0x14e4 +#define TEGRA_PCIE_DEVICE_ID 0x4347 +#endif /* CONFIG_ARCH_TEGRA */ + +#if defined(CONFIG_ARCH_EXYNOS) +#define PCIE_RC_VENDOR_ID SAMSUNG_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID SAMSUNG_PCIE_DEVICE_ID +#elif defined(CONFIG_ARCH_MSM) +#define PCIE_RC_VENDOR_ID MSM_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID MSM_PCIE_DEVICE_ID +#elif defined(CONFIG_X86) +#define PCIE_RC_VENDOR_ID X86_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID X86_PCIE_DEVICE_ID +#elif defined(CONFIG_ARCH_TEGRA) +#define PCIE_RC_VENDOR_ID TEGRA_PCIE_VENDOR_ID +#define PCIE_RC_DEVICE_ID TEGRA_PCIE_DEVICE_ID +#endif /* CONFIG_ARCH_EXYNOS */ + +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH #ifdef CONFIG_MACH_UNIVERSAL5433 extern int exynos_pcie_pm_suspend(void); extern int exynos_pcie_pm_resume(void); @@ -311,5 +521,86 @@ extern int exynos_pcie_pm_resume(int ch_num); #endif /* CONFIG_MACH_UNIVERSAL5433 */ #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ +#ifdef CONFIG_ARCH_TEGRA +extern int tegra_pcie_pm_suspend(void); +extern int tegra_pcie_pm_resume(void); +#endif /* CONFIG_ARCH_TEGRA */ + extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus); +#ifdef IDLE_TX_FLOW_MGMT +extern int dhd_bus_flow_ring_resume_request(struct dhd_bus *bus, void *arg); +extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status); +extern int dhd_bus_flow_ring_suspend_request(struct dhd_bus *bus, void *arg); +extern void dhd_bus_flow_ring_suspend_response(struct dhd_bus *bus, uint16 flowid, uint32 status); +extern void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, + flow_ring_node_t *flow_ring_node); +extern void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, + flow_ring_node_t *flow_ring_node); +extern void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, + flow_ring_node_t *flow_ring_node); +extern void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, + flow_ring_node_t *flow_ring_node); +#endif /* IDLE_TX_FLOW_MGMT */ + +extern int dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data); + +#ifdef DHD_WAKE_STATUS +int bcmpcie_get_total_wake(struct dhd_bus *bus); +int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag); +#endif /* DHD_WAKE_STATUS */ +extern bool dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus); +extern void dhd_bus_hostready(struct dhd_bus *bus); +#ifdef PCIE_OOB +extern bool dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus); +#endif /* PCIE_OOB */ +#ifdef PCIE_INB_DW +extern bool dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus); +extern void dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus, + enum dhd_bus_ds_state state); +extern enum dhd_bus_ds_state dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus); +extern const char * dhd_convert_inb_state_names(enum dhd_bus_ds_state inbstate); +extern const char * dhd_convert_dsval(uint32 val, bool d2h); +extern int dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val); +extern void dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus); +#endif /* PCIE_INB_DW */ +extern void dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option); +extern bool dhdpcie_irq_enabled(struct dhd_bus *bus); +extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus); +extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus); + +static INLINE uint32 +dhd_pcie_config_read(osl_t *osh, uint offset, uint size) +{ + OSL_DELAY(100); + return OSL_PCI_READ_CONFIG(osh, offset, size); +} + +static INLINE uint32 +dhd_pcie_corereg_read(si_t *sih, uint val) +{ + OSL_DELAY(100); + si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, val); + return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0); +} + +#ifdef DHD_SSSR_DUMP +extern int dhdpcie_sssr_dump(dhd_pub_t *dhd); +#endif /* DHD_SSSR_DUMP */ + +#ifdef DHD_EFI +extern int dhd_os_wifi_platform_set_power(uint32 value); +int dhd_control_signal(dhd_bus_t *bus, char *arg, int set); +extern int dhd_wifi_properties(struct dhd_bus *bus, char *arg); +extern bool dhdpcie_is_arm_halted(struct dhd_bus *bus); +extern void dhdpcie_dongle_pwr_toggle(dhd_bus_t *bus); +extern int dhd_otp_dump(dhd_bus_t *bus, char *arg); +#else +static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; } +static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;} +#endif /* DHD_EFI */ +int dhdpcie_config_check(dhd_bus_t *bus); +int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr); +int dhdpcie_config_save(dhd_bus_t *bus); +int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state); + #endif /* dhd_pcie_h */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pcie_linux.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pcie_linux.c index 0fef810a3a7c..51664a7e217b 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pcie_linux.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pcie_linux.c @@ -1,7 +1,7 @@ /* * Linux DHD Bus Module for PCIE * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_pcie_linux.c 610267 2016-01-06 16:03:53Z $ + * $Id: dhd_pcie_linux.c 707536 2017-06-28 04:23:48Z $ */ @@ -52,14 +52,24 @@ #include #include #ifdef CONFIG_ARCH_MSM -#ifdef CONFIG_PCI_MSM +#if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996) #include #else #include #endif /* CONFIG_PCI_MSM */ #endif /* CONFIG_ARCH_MSM */ +#ifdef PCIE_OOB +#include "ftdi_sio_external.h" +#endif /* PCIE_OOB */ +#include +#ifdef USE_SMMU_ARCH_MSM +#include +#include +#include +#include +#endif /* USE_SMMU_ARCH_MSM */ -#define PCI_CFG_RETRY 10 +#define PCI_CFG_RETRY 10 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */ #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ @@ -73,6 +83,17 @@ do { \ *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \ } while (0) +#ifdef PCIE_OOB +#define HOST_WAKE 4 /* GPIO_0 (HOST_WAKE) - Output from WLAN */ +#define DEVICE_WAKE 5 /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */ +#define BIT_WL_REG_ON 6 +#define BIT_BT_REG_ON 7 + +int gpio_handle_val = 0; +unsigned char gpio_port = 0; +unsigned char gpio_direction = 0; +#define OOB_PORT "ttyUSB0" +#endif /* PCIE_OOB */ /* user defined data structures */ @@ -102,6 +123,15 @@ typedef struct dhdpcie_info #ifdef BCMPCIE_OOB_HOST_WAKE void *os_cxt; /* Pointer to per-OS private data */ #endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_WAKE_STATUS + spinlock_t pcie_lock; + unsigned int total_wake_count; + int pkt_wake; + int wake_irq; +#endif /* DHD_WAKE_STATUS */ +#ifdef USE_SMMU_ARCH_MSM + void *smmu_cxt; +#endif /* USE_SMMU_ARCH_MSM */ } dhdpcie_info_t; @@ -125,8 +155,20 @@ typedef struct dhdpcie_os_info { spinlock_t oob_irq_spinlock; void *dev; /* handle to the underlying device */ } dhdpcie_os_info_t; +static irqreturn_t wlan_oob_irq(int irq, void *data); +#if defined(CUSTOMER_HW2) && defined(CONFIG_ARCH_APQ8084) +extern struct brcm_pcie_wake brcm_pcie_wake; +#endif /* CUSTOMER_HW2 && CONFIG_ARCH_APQ8084 */ #endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef USE_SMMU_ARCH_MSM +typedef struct dhdpcie_smmu_info { + struct dma_iommu_mapping *smmu_mapping; + dma_addr_t smmu_iova_start; + size_t smmu_iova_len; +} dhdpcie_smmu_info_t; +#endif /* USE_SMMU_ARCH_MSM */ + /* function declarations */ static int __devinit dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); @@ -140,9 +182,9 @@ static irqreturn_t dhdpcie_isr(int irq, void *arg); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) DEFINE_MUTEX(_dhd_sdio_mutex_lock_); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -#endif +#endif -static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state); +static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state); static int dhdpcie_resume_host_dev(dhd_bus_t *bus); static int dhdpcie_suspend_host_dev(dhd_bus_t *bus); static int dhdpcie_resume_dev(struct pci_dev *dev); @@ -156,6 +198,7 @@ static void dhdpcie_pm_complete(struct device *dev); static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state); static int dhdpcie_pci_resume(struct pci_dev *dev); #endif /* DHD_PCIE_RUNTIMEPM */ + static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = { { vendor: 0x14e4, device: PCI_ANY_ID, @@ -165,7 +208,7 @@ static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = { class_mask: 0xffff00, driver_data: 0, }, - { 0, } + { 0, 0, 0, 0, 0, 0, 0} }; MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid); @@ -180,7 +223,7 @@ static const struct dev_pm_ops dhd_pcie_pm_ops = { #endif /* DHD_PCIE_RUNTIMEPM */ static struct pci_driver dhdpcie_driver = { - node: {}, + node: {&dhdpcie_driver.node, &dhdpcie_driver.node}, name: "pcieh", id_table: dhdpcie_pci_devid, probe: dhdpcie_pci_probe, @@ -198,11 +241,142 @@ static struct pci_driver dhdpcie_driver = { int dhdpcie_init_succeeded = FALSE; +#ifdef USE_SMMU_ARCH_MSM +static int dhdpcie_smmu_init(struct pci_dev *pdev, void *smmu_cxt) +{ + struct dma_iommu_mapping *mapping; + struct device_node *root_node = NULL; + dhdpcie_smmu_info_t *smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt; + int smmu_iova_address[2]; + char *wlan_node = "android,bcmdhd_wlan"; + char *wlan_smmu_node = "wlan-smmu-iova-address"; + int atomic_ctx = 1; + int s1_bypass = 1; + int ret = 0; + + DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__)); + + root_node = of_find_compatible_node(NULL, NULL, wlan_node); + if (!root_node) { + WARN(1, "failed to get device node of BRCM WLAN\n"); + return -ENODEV; + } + + if (of_property_read_u32_array(root_node, wlan_smmu_node, + smmu_iova_address, 2) == 0) { + DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n", + __FUNCTION__, smmu_iova_address[0], smmu_iova_address[1])); + smmu_info->smmu_iova_start = smmu_iova_address[0]; + smmu_info->smmu_iova_len = smmu_iova_address[1]; + } else { + printf("%s : can't get smmu iova address property\n", + __FUNCTION__); + return -ENODEV; + } + + if (smmu_info->smmu_iova_len <= 0) { + DHD_ERROR(("%s: Invalid smmu iova len %d\n", + __FUNCTION__, (int)smmu_info->smmu_iova_len)); + return -EINVAL; + } + + DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__)); + mapping = arm_iommu_create_mapping(&platform_bus_type, + smmu_info->smmu_iova_start, smmu_info->smmu_iova_len); + if (IS_ERR(mapping)) { + DHD_ERROR(("%s: create mapping failed, err = %d\n", + __FUNCTION__, ret)); + ret = PTR_ERR(mapping); + goto map_fail; + } + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_ATOMIC, &atomic_ctx); + if (ret) { + DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n", + __FUNCTION__, ret)); + goto set_attr_fail; + } + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_S1_BYPASS, &s1_bypass); + if (ret < 0) { + DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n", + __FUNCTION__, ret)); + goto set_attr_fail; + } + + ret = arm_iommu_attach_device(&pdev->dev, mapping); + if (ret) { + DHD_ERROR(("%s: attach device failed, err = %d\n", + __FUNCTION__, ret)); + goto attach_fail; + } + + smmu_info->smmu_mapping = mapping; + + return ret; + +attach_fail: +set_attr_fail: + arm_iommu_release_mapping(mapping); +map_fail: + return ret; +} + +static void dhdpcie_smmu_remove(struct pci_dev *pdev, void *smmu_cxt) +{ + dhdpcie_smmu_info_t *smmu_info; + + if (!smmu_cxt) { + return; + } + + smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt; + if (smmu_info->smmu_mapping) { + arm_iommu_detach_device(&pdev->dev); + arm_iommu_release_mapping(smmu_info->smmu_mapping); + smmu_info->smmu_mapping = NULL; + } +} +#endif /* USE_SMMU_ARCH_MSM */ + #ifdef DHD_PCIE_RUNTIMEPM static int dhdpcie_pm_suspend(struct device *dev) { + int ret = 0; struct pci_dev *pdev = to_pci_dev(dev); - return dhdpcie_set_suspend_resume(pdev, TRUE); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + unsigned long flags; + + if (pch) { + bus = pch->bus; + } + if (!bus) { + return ret; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) { + DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, bus->dhd->dhd_bus_busy_state)); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return -EBUSY; + } + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + if (!bus->dhd->dongle_reset) + ret = dhdpcie_set_suspend_resume(bus, TRUE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return ret; + } static int dhdpcie_pm_prepare(struct device *dev) @@ -216,13 +390,40 @@ static int dhdpcie_pm_prepare(struct device *dev) DHD_DISABLE_RUNTIME_PM(bus->dhd); } + bus->chk_pm = TRUE; return 0; } static int dhdpcie_pm_resume(struct device *dev) { + int ret = 0; struct pci_dev *pdev = to_pci_dev(dev); - return dhdpcie_set_suspend_resume(pdev, FALSE); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + unsigned long flags; + + if (pch) { + bus = pch->bus; + } + if (!bus) { + return ret; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + if (!bus->dhd->dongle_reset) { + ret = dhdpcie_set_suspend_resume(bus, FALSE); + bus->chk_pm = FALSE; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return ret; } static void dhdpcie_pm_complete(struct device *dev) @@ -240,43 +441,90 @@ static void dhdpcie_pm_complete(struct device *dev) } #else static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state) -{ - BCM_REFERENCE(state); - return dhdpcie_set_suspend_resume(pdev, TRUE); -} - -static int dhdpcie_pci_resume(struct pci_dev *pdev) -{ - return dhdpcie_set_suspend_resume(pdev, FALSE); -} - -#endif /* DHD_PCIE_RUNTIMEPM */ - -static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state) { int ret = 0; dhdpcie_info_t *pch = pci_get_drvdata(pdev); dhd_bus_t *bus = NULL; + unsigned long flags; if (pch) { bus = pch->bus; } + if (!bus) { + return ret; + } + + BCM_REFERENCE(state); + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) { + DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, bus->dhd->dhd_bus_busy_state)); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return -EBUSY; + } + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + if (!bus->dhd->dongle_reset) + ret = dhdpcie_set_suspend_resume(bus, TRUE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return ret; +} + +static int dhdpcie_pci_resume(struct pci_dev *pdev) +{ + int ret = 0; + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + unsigned long flags; + + if (pch) { + bus = pch->bus; + } + if (!bus) { + return ret; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + if (!bus->dhd->dongle_reset) + ret = dhdpcie_set_suspend_resume(bus, FALSE); + + DHD_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return ret; +} + +#endif /* DHD_PCIE_RUNTIMEPM */ + +static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state) +{ + int ret = 0; + + ASSERT(bus && !bus->dhd->dongle_reset); #ifdef DHD_PCIE_RUNTIMEPM - if (bus && !bus->dhd->dongle_reset) { /* if wakelock is held during suspend, return failed */ if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) { return -EBUSY; } - mutex_lock(&bus->pm_lock); - } #endif /* DHD_PCIE_RUNTIMEPM */ /* When firmware is not loaded do the PCI bus */ /* suspend/resume only */ - if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) && - !bus->dhd->dongle_reset) { + if (bus->dhd->busstate == DHD_BUS_DOWN) { ret = dhdpcie_pci_suspend_resume(bus, state); #ifdef DHD_PCIE_RUNTIMEPM mutex_unlock(&bus->pm_lock); @@ -284,20 +532,19 @@ static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state) return ret; } - if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)|| - (bus->dhd->busstate == DHD_BUS_DATA)) && - (bus->suspended != state)) { ret = dhdpcie_bus_suspend(bus, state); - } #ifdef DHD_PCIE_RUNTIMEPM - if (bus && !bus->dhd->dongle_reset) { mutex_unlock(&bus->pm_lock); - } #endif /* DHD_PCIE_RUNTIMEPM */ + return ret; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) +extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + static int dhdpcie_suspend_dev(struct pci_dev *dev) { int ret; @@ -312,37 +559,61 @@ static int dhdpcie_suspend_dev(struct pci_dev *dev) #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) - bus->pci_d3hot_done = 1; + dhd_dpc_tasklet_kill(bus->dhd); #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ pci_save_state(dev); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) pch->state = pci_store_saved_state(dev); #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ pci_enable_wake(dev, PCI_D0, TRUE); - if (pci_is_enabled(dev)) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + if (pci_is_enabled(dev)) +#endif pci_disable_device(dev); - } + ret = pci_set_power_state(dev, PCI_D3hot); if (ret) { DHD_ERROR(("%s: pci_set_power_state error %d\n", __FUNCTION__, ret)); } - disable_irq(dev->irq); + dev->state_saved = FALSE; return ret; } +#ifdef DHD_WAKE_STATUS +int bcmpcie_get_total_wake(struct dhd_bus *bus) +{ + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + + return pch->total_wake_count; +} + +int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag) +{ + dhdpcie_info_t *pch = pci_get_drvdata(bus->dev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&pch->pcie_lock, flags); + + ret = pch->pkt_wake; + pch->total_wake_count += flag; + pch->pkt_wake = flag; + + spin_unlock_irqrestore(&pch->pcie_lock, flags); + return ret; +} +#endif /* DHD_WAKE_STATUS */ + static int dhdpcie_resume_dev(struct pci_dev *dev) { int err = 0; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) dhdpcie_info_t *pch = pci_get_drvdata(dev); - dhd_bus_t *bus = pch->bus; pci_load_and_free_saved_state(dev, &pch->state); #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) - bus->pci_d3hot_done = 0; -#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + dev->state_saved = TRUE; pci_restore_state(dev); err = pci_enable_device(dev); if (err) { @@ -357,7 +628,6 @@ static int dhdpcie_resume_dev(struct pci_dev *dev) } out: - enable_irq(dev->irq); return err; } @@ -370,13 +640,18 @@ static int dhdpcie_resume_host_dev(dhd_bus_t *bus) #ifdef CONFIG_ARCH_MSM bcmerror = dhdpcie_start_host_pcieclock(bus); #endif /* CONFIG_ARCH_MSM */ +#ifdef CONFIG_ARCH_TEGRA + bcmerror = tegra_pcie_pm_resume(); +#endif /* CONFIG_ARCH_TEGRA */ if (bcmerror < 0) { DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n", __FUNCTION__, bcmerror)); bus->is_linkdown = 1; +#ifdef SUPPORT_LINKDOWN_RECOVERY #ifdef CONFIG_ARCH_MSM bus->no_cfg_restore = 1; #endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ } return bcmerror; @@ -386,19 +661,132 @@ static int dhdpcie_suspend_host_dev(dhd_bus_t *bus) { int bcmerror = 0; #ifdef USE_EXYNOS_PCIE_RC_PMPATCH - struct pci_dev *rc_pci_dev; - rc_pci_dev = pci_get_device(0x144d, SAMSUNG_PCIE_DEVICE_ID, NULL); - if (rc_pci_dev) { - pci_save_state(rc_pci_dev); + if (bus->rc_dev) { + pci_save_state(bus->rc_dev); + } else { + DHD_ERROR(("%s: RC %x:%x handle is NULL\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID)); } exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM); #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ #ifdef CONFIG_ARCH_MSM bcmerror = dhdpcie_stop_host_pcieclock(bus); #endif /* CONFIG_ARCH_MSM */ +#ifdef CONFIG_ARCH_TEGRA + bcmerror = tegra_pcie_pm_suspend(); +#endif /* CONFIG_ARCH_TEGRA */ return bcmerror; } +#if defined(PCIE_RC_VENDOR_ID) && defined(PCIE_RC_DEVICE_ID) +uint32 +dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset) +{ + uint val = -1; /* Initialise to 0xfffffff */ + if (bus->rc_dev) { + pci_read_config_dword(bus->rc_dev, offset, &val); + OSL_DELAY(100); + } else { + DHD_ERROR(("%s: RC %x:%x handle is NULL\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID)); + } + DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, offset, val)); + return (val); +} + +/* + * Reads/ Writes the value of capability register + * from the given CAP_ID section of PCI Root Port + * + * Arguements + * @bus current dhd_bus_t pointer + * @cap Capability or Extended Capability ID to get + * @offset offset of Register to Read + * @is_ext TRUE if @cap is given for Extended Capability + * @is_write is set to TRUE to indicate write + * @val value to write + * + * Return Value + * Returns 0xffffffff on error + * on write success returns BCME_OK (0) + * on Read Success returns the value of register requested + * Note: caller shoud ensure valid capability ID and Ext. Capability ID. + */ + +uint32 +dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write, + uint32 writeval) +{ + int cap_ptr = 0; + uint32 ret = -1; + uint32 readval; + + if (!(bus->rc_dev)) { + DHD_ERROR(("%s: RC %x:%x handle is NULL\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID)); + return ret; + } + + /* Find Capability offset */ + if (is_ext) { + /* removing max EXT_CAP_ID check as + * linux kernel definition's max value is not upadted yet as per spec + */ + cap_ptr = pci_find_ext_capability(bus->rc_dev, cap); + + } else { + /* removing max PCI_CAP_ID_MAX check as + * pervious kernel versions dont have this definition + */ + cap_ptr = pci_find_capability(bus->rc_dev, cap); + } + + /* Return if capability with given ID not found */ + if (cap_ptr == 0) { + DHD_ERROR(("%s: RC %x:%x PCI Cap(0x%02x) not supported.\n", + __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, cap)); + return BCME_ERROR; + } + + if (is_write) { + ret = pci_write_config_dword(bus->rc_dev, (cap_ptr + offset), writeval); + if (ret) { + DHD_ERROR(("%s: pci_write_config_dword failed. cap=%d offset=%d\n", + __FUNCTION__, cap, offset)); + return BCME_ERROR; + } + ret = BCME_OK; + + } else { + + ret = pci_read_config_dword(bus->rc_dev, (cap_ptr + offset), &readval); + + if (ret) { + DHD_ERROR(("%s: pci_read_config_dword failed. cap=%d offset=%d\n", + __FUNCTION__, cap, offset)); + return BCME_ERROR; + } + ret = readval; + } + + return ret; +} + +/* API wrapper to read Root Port link capability + * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found + */ + +uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus) +{ + uint32 linkcap = -1; + linkcap = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP, + PCIE_CAP_LINKCAP_OFFSET, FALSE, FALSE, 0); + linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK; + return linkcap; +} +#endif + int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) { int rc; @@ -406,10 +794,6 @@ int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) struct pci_dev *dev = bus->dev; if (state) { - if (bus->is_linkdown) { - DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); - return BCME_ERROR; - } #ifndef BCMPCIE_OOB_HOST_WAKE dhdpcie_pme_active(bus->osh, state); #endif /* !BCMPCIE_OOB_HOST_WAKE */ @@ -424,7 +808,13 @@ int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) dhdpcie_pme_active(bus->osh, state); #endif /* !BCMPCIE_OOB_HOST_WAKE */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) - if (bus->is_linkdown) { +#if defined(DHD_HANG_SEND_UP_TEST) + if (bus->is_linkdown || + bus->dhd->req_hang_type == HANG_REASON_PCIE_RC_LINK_UP_FAIL) +#else /* DHD_HANG_SEND_UP_TEST */ + if (bus->is_linkdown) +#endif /* DHD_HANG_SEND_UP_TEST */ + { bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL; dhd_os_send_hang_message(bus->dhd); } @@ -439,7 +829,14 @@ static int dhdpcie_device_scan(struct device *dev, void *data) struct pci_dev *pcidev; int *cnt = data; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif pcidev = container_of(dev, struct pci_dev, dev); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif if (pcidev->vendor != 0x14e4) return 0; @@ -579,17 +976,34 @@ dhdpcie_pci_remove(struct pci_dev *pdev) #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ } #endif /* SUPPORT_LINKDOWN_RECOVERY */ + + bus->rc_dev = NULL; + dhdpcie_bus_release(bus); - pci_disable_device(pdev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + if (pci_is_enabled(pdev)) +#endif + pci_disable_device(pdev); #ifdef BCMPCIE_OOB_HOST_WAKE /* pcie os info detach */ MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t)); #endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef USE_SMMU_ARCH_MSM + /* smmu info detach */ + dhdpcie_smmu_remove(pdev, pch->smmu_cxt); + MFREE(osh, pch->smmu_cxt, sizeof(dhdpcie_smmu_info_t)); +#endif /* USE_SMMU_ARCH_MSM */ /* pcie info detach */ dhdpcie_detach(pch); /* osl detach */ osl_detach(osh); +#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \ + defined(CONFIG_ARCH_APQ8084) + brcm_pcie_wake.wake_irq = NULL; + brcm_pcie_wake.data = NULL; +#endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */ + dhdpcie_init_succeeded = FALSE; #if defined(MULTIPLE_SUPPLICANT) @@ -627,12 +1041,35 @@ dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info) DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__)); } + if (!dhdpcie_irq_enabled(bus)) { + DHD_ERROR(("%s: PCIe IRQ was disabled, so, enabled it again\n", __FUNCTION__)); + dhdpcie_enable_irq(bus); + } + DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname)); return 0; /* SUCCESS */ } +/** + * dhdpcie_get_pcieirq - return pcie irq number to linux-dhd + */ +int +dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq) +{ + struct pci_dev *pdev = bus->dev; + + if (!pdev) { + DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__)); + return -ENODEV; + } + + *irq = pdev->irq; + + return 0; /* SUCCESS */ +} + #ifdef CONFIG_PHYS_ADDR_T_64BIT #define PRINTF_RESOURCE "0x%016llx" #else @@ -663,6 +1100,9 @@ int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info) ulong bar1_size; struct pci_dev *pdev = NULL; pdev = dhdpcie_info->dev; +#ifdef EXYNOS_PCIE_MODULE_PATCH + pci_restore_state(pdev); +#endif /* EXYNOS_MODULE_PATCH */ do { if (pci_enable_device(pdev)) { printf("%s: Cannot enable PCI device\n", __FUNCTION__); @@ -683,8 +1123,9 @@ int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info) } dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); - dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE); - dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE; + dhdpcie_info->tcm_size = + (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE; + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size); if (!dhdpcie_info->regs || !dhdpcie_info->tcm) { DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__)); @@ -710,6 +1151,10 @@ int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info) } #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ +#ifdef EXYNOS_PCIE_MODULE_PATCH + pci_save_state(pdev); +#endif /* EXYNOS_MODULE_PATCH */ + DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", __FUNCTION__, dhdpcie_info->regs, bar0_addr)); DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", @@ -747,7 +1192,7 @@ int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info) #ifdef SUPPORT_LINKDOWN_RECOVERY #if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \ - defined(CONFIG_SOC_EXYNOS8890)) + (defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895))) void dhdpcie_linkdown_cb(struct_pcie_notify *noti) { struct pci_dev *pdev = (struct pci_dev *)noti->user; @@ -776,8 +1221,10 @@ void dhdpcie_linkdown_cb(struct_pcie_notify *noti) } } -#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && CONFIG_SOC_EXYNOS8890) */ -#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ +#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && + * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895)) + */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ #if defined(MULTIPLE_SUPPLICANT) extern void wl_android_post_init(void); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe @@ -793,6 +1240,9 @@ int dhdpcie_init(struct pci_dev *pdev) #ifdef BCMPCIE_OOB_HOST_WAKE dhdpcie_os_info_t *dhdpcie_osinfo = NULL; #endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef USE_SMMU_ARCH_MSM + dhdpcie_smmu_info_t *dhdpcie_smmu_info = NULL; +#endif /* USE_SMMU_ARCH_MSM */ #if defined(MULTIPLE_SUPPLICANT) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) @@ -804,7 +1254,7 @@ int dhdpcie_init(struct pci_dev *pdev) } mutex_lock(&_dhd_sdio_mutex_lock_); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -#endif +#endif do { /* osl attach */ @@ -856,6 +1306,30 @@ int dhdpcie_init(struct pci_dev *pdev) } #endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef USE_SMMU_ARCH_MSM + /* allocate private structure for using SMMU */ + dhdpcie_smmu_info = MALLOC(osh, sizeof(dhdpcie_smmu_info_t)); + if (dhdpcie_smmu_info == NULL) { + DHD_ERROR(("%s: MALLOC of dhdpcie_smmu_info_t failed\n", + __FUNCTION__)); + break; + } + bzero(dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t)); + dhdpcie_info->smmu_cxt = (void *)dhdpcie_smmu_info; + + /* Initialize smmu structure */ + if (dhdpcie_smmu_init(pdev, dhdpcie_info->smmu_cxt) < 0) { + DHD_ERROR(("%s: Failed to initialize SMMU\n", + __FUNCTION__)); + break; + } +#endif /* USE_SMMU_ARCH_MSM */ + +#ifdef DHD_WAKE_STATUS + /* Initialize pcie_lock */ + spin_lock_init(&dhdpcie_info->pcie_lock); +#endif /* DHD_WAKE_STATUS */ + /* Find the PCI resources, verify the */ /* vendor and device ID, map BAR regions and irq, update in structures */ if (dhdpcie_scan_resource(dhdpcie_info)) { @@ -873,7 +1347,20 @@ int dhdpcie_init(struct pci_dev *pdev) dhdpcie_info->bus = bus; bus->is_linkdown = 0; - bus->pci_d3hot_done = 0; + + /* Get RC Device Handle */ +#if defined(PCIE_RC_VENDOR_ID) && defined(PCIE_RC_DEVICE_ID) + bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL); +#else + bus->rc_dev = NULL; +#endif + +#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \ + defined(CONFIG_ARCH_APQ8084) + brcm_pcie_wake.wake_irq = wlan_oob_irq; + brcm_pcie_wake.data = bus; +#endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */ + #ifdef DONGLE_ENABLE_ISOLATION bus->dhd->dongle_isolation = TRUE; #endif /* DONGLE_ENABLE_ISOLATION */ @@ -888,14 +1375,15 @@ int dhdpcie_init(struct pci_dev *pdev) bus->no_cfg_restore = 0; #endif /* CONFIG_ARCH_MSM */ #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY -#ifdef CONFIG_SOC_EXYNOS8890 +#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN; bus->pcie_event.user = pdev; bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK; bus->pcie_event.callback = dhdpcie_linkdown_cb; exynos_pcie_register_event(&bus->pcie_event); -#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 */ #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + bus->read_shm_fail = FALSE; #endif /* SUPPORT_LINKDOWN_RECOVERY */ if (bus->intr) { @@ -971,6 +1459,13 @@ int dhdpcie_init(struct pci_dev *pdev) } #endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef USE_SMMU_ARCH_MSM + if (dhdpcie_smmu_info) { + MFREE(osh, dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t)); + dhdpcie_info->smmu_cxt = NULL; + } +#endif /* USE_SMMU_ARCH_MSM */ + if (dhdpcie_info) dhdpcie_detach(dhdpcie_info); pci_disable_device(pdev); @@ -997,16 +1492,14 @@ dhdpcie_free_irq(dhd_bus_t *bus) struct pci_dev *pdev = NULL; DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__)); - if (!bus) { - return; - } - - if (bus->irq_registered) { + if (bus) { pdev = bus->dev; - free_irq(pdev->irq, bus); - bus->irq_registered = FALSE; - } else { - DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__)); + if (bus->irq_registered) { + free_irq(pdev->irq, bus); + bus->irq_registered = FALSE; + } else { + DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__)); + } } DHD_TRACE(("%s: Exit\n", __FUNCTION__)); return; @@ -1041,6 +1534,64 @@ dhdpcie_isr(int irq, void *arg) return FALSE; } +int +dhdpcie_disable_irq_nosync(dhd_bus_t *bus) +{ + struct pci_dev *dev; + if ((bus == NULL) || (bus->dev == NULL)) { + DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dev = bus->dev; + disable_irq_nosync(dev->irq); + return BCME_OK; +} + +int +dhdpcie_disable_irq(dhd_bus_t *bus) +{ + struct pci_dev *dev; + if ((bus == NULL) || (bus->dev == NULL)) { + DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dev = bus->dev; + disable_irq(dev->irq); + return BCME_OK; +} + +int +dhdpcie_enable_irq(dhd_bus_t *bus) +{ + struct pci_dev *dev; + if ((bus == NULL) || (bus->dev == NULL)) { + DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dev = bus->dev; + enable_irq(dev->irq); + return BCME_OK; +} + +bool +dhdpcie_irq_enabled(dhd_bus_t *bus) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + struct irq_desc *desc = irq_to_desc(bus->dev->irq); + /* depth will be zero, if enabled */ + if (!desc->depth) { + DHD_ERROR(("%s: depth:%d\n", __FUNCTION__, desc->depth)); + } + return desc->depth ? FALSE : TRUE; +#else + /* return TRUE by default as there is no support for lower versions */ + return TRUE; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ +} + int dhdpcie_start_host_pcieclock(dhd_bus_t *bus) { @@ -1108,6 +1659,9 @@ dhdpcie_stop_host_pcieclock(dhd_bus_t *bus) #ifdef CONFIG_ARCH_MSM #ifdef SUPPORT_LINKDOWN_RECOVERY + /* Always reset the PCIe host when wifi off */ + bus->no_cfg_restore = 1; + if (bus->no_cfg_restore) { options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN; } @@ -1170,15 +1724,16 @@ dhdpcie_enable_device(dhd_bus_t *bus) return BCME_ERROR; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && !defined(CONFIG_SOC_EXYNOS8890) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && (LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 19, 0)) && !defined(CONFIG_SOC_EXYNOS8890) /* Updated with pci_load_and_free_saved_state to compatible - * with kernel 3.14 or higher + * with Kernel version 3.14.0 to 3.18.41. */ pci_load_and_free_saved_state(bus->dev, &pch->default_state); pch->default_state = pci_store_saved_state(bus->dev); #else pci_load_saved_state(bus->dev, pch->default_state); -#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && !CONFIG_SOC_EXYNOS8890 */ +#endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */ pci_restore_state(bus->dev); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */ @@ -1237,8 +1792,9 @@ dhdpcie_alloc_resource(dhd_bus_t *bus) } bus->regs = dhdpcie_info->regs; - dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE); - dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE; + dhdpcie_info->tcm_size = + (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE; + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size); if (!dhdpcie_info->tcm) { DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); REG_UNMAP(dhdpcie_info->regs); @@ -1371,10 +1927,19 @@ static irqreturn_t wlan_oob_irq(int irq, void *data) DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__)); bus = (dhd_bus_t *)data; dhdpcie_oob_intr_set(bus, FALSE); +#ifdef DHD_WAKE_STATUS +#ifdef DHD_PCIE_RUNTIMEPM + /* This condition is for avoiding counting of wake up from Runtime PM */ + if (bus->chk_pm) +#endif /* DHD_PCIE_RUNTIMPM */ + { + bcmpcie_set_get_wake(bus, 1); + } +#endif /* DHD_WAKE_STATUS */ #ifdef DHD_PCIE_RUNTIMEPM dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq); #endif /* DHD_PCIE_RUNTIMPM */ - if (bus->dhd->up && bus->suspended) { + if (bus->dhd->up && bus->oob_presuspend) { DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT); } return IRQ_HANDLED; @@ -1485,6 +2050,105 @@ void dhdpcie_oob_intr_unregister(dhd_bus_t *bus) } #endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef PCIE_OOB +void dhdpcie_oob_init(dhd_bus_t *bus) +{ + gpio_handle_val = get_handle(OOB_PORT); + if (gpio_handle_val < 0) + { + DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__)); + ASSERT(FALSE); + } + + gpio_direction = 0; + ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG); + + /* Note BT core is also enabled here */ + gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE; + gpio_write_port(gpio_handle_val, gpio_port); + + gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE; + ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG); + + bus->oob_enabled = TRUE; + bus->oob_presuspend = FALSE; + + /* drive the Device_Wake GPIO low on startup */ + bus->device_wake_state = TRUE; + dhd_bus_set_device_wake(bus, FALSE); + dhd_bus_doorbell_timeout_reset(bus); + +} + +void +dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val) +{ + DHD_INFO(("Set Device_Wake to %d\n", val)); + if (val) + { + gpio_port = gpio_port | (1 << BIT_BT_REG_ON); + gpio_write_port(gpio_handle_val, gpio_port); + } else { + gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON)); + gpio_write_port(gpio_handle_val, gpio_port); + } +} + +int +dhd_oob_get_bt_reg_on(struct dhd_bus *bus) +{ + int ret; + uint8 val; + ret = gpio_read_port(gpio_handle_val, &val); + + if (ret < 0) { + DHD_ERROR(("gpio_read_port returns %d\n", ret)); + return ret; + } + + if (val & (1 << BIT_BT_REG_ON)) + { + ret = 1; + } else { + ret = 0; + } + + return ret; +} + +int +dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val) +{ + if (bus->device_wake_state != val) + { + DHD_INFO(("Set Device_Wake to %d\n", val)); + + if (bus->oob_enabled && !bus->oob_presuspend) + { + if (val) + { + gpio_port = gpio_port | (1 << DEVICE_WAKE); + gpio_write_port_non_block(gpio_handle_val, gpio_port); + } else { + gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE)); + gpio_write_port_non_block(gpio_handle_val, gpio_port); + } + } + + bus->device_wake_state = val; + } + return BCME_OK; +} + +INLINE void +dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val) +{ + /* TODO: Currently Inband implementation of Device_Wake is not supported, + * so this function is left empty later this can be used to support the same. + */ +} +#endif /* PCIE_OOB */ + #ifdef DHD_PCIE_RUNTIMEPM bool dhd_runtimepm_state(dhd_pub_t *dhd) { @@ -1493,21 +2157,15 @@ bool dhd_runtimepm_state(dhd_pub_t *dhd) bus = dhd->bus; DHD_GENERAL_LOCK(dhd, flags); - if (bus->suspended == TRUE) { - DHD_GENERAL_UNLOCK(dhd, flags); - DHD_INFO(("Bus is already suspended system PM: %d\n", bus->suspended)); - return FALSE; - } bus->idlecount++; DHD_TRACE(("%s : Enter \n", __FUNCTION__)); if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) { bus->idlecount = 0; - if (dhd->dhd_bus_busy_state == 0 && dhd->busstate != DHD_BUS_DOWN && - dhd->busstate != DHD_BUS_DOWN_IN_PROGRESS) { + if (DHD_BUS_BUSY_CHECK_IDLE(dhd) && !DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) { bus->bus_wake = 0; - dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; + DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhd); bus->runtime_resume_done = FALSE; /* stop all interface network queue. */ dhd_bus_stop_queue(bus); @@ -1515,10 +2173,11 @@ bool dhd_runtimepm_state(dhd_pub_t *dhd) DHD_ERROR(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n", __FUNCTION__, bus->idletime, dhd_runtimepm_ms)); /* RPM suspend is failed, return FALSE then re-trying */ - if (dhdpcie_set_suspend_resume(bus->dev, TRUE)) { + if (dhdpcie_set_suspend_resume(bus, TRUE)) { DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__)); DHD_GENERAL_LOCK(dhd, flags); - dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; + DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd); + dhd_os_busbusy_wake(bus->dhd); bus->runtime_resume_done = TRUE; /* It can make stuck NET TX Queue without below */ dhd_bus_start_queue(bus); @@ -1529,8 +2188,8 @@ bool dhd_runtimepm_state(dhd_pub_t *dhd) } DHD_GENERAL_LOCK(dhd, flags); - dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; - dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_DONE; + DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd); + DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhd); /* For making sure NET TX Queue active */ dhd_bus_start_queue(bus); DHD_GENERAL_UNLOCK(dhd, flags); @@ -1538,14 +2197,15 @@ bool dhd_runtimepm_state(dhd_pub_t *dhd) wait_event_interruptible(bus->rpm_queue, bus->bus_wake); DHD_GENERAL_LOCK(dhd, flags); - dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_DONE; - dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS; + DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhd); + DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhd); DHD_GENERAL_UNLOCK(dhd, flags); - dhdpcie_set_suspend_resume(bus->dev, FALSE); + dhdpcie_set_suspend_resume(bus, FALSE); DHD_GENERAL_LOCK(dhd, flags); - dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS; + DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhd); + dhd_os_busbusy_wake(bus->dhd); /* Inform the wake up context that Resume is over */ bus->runtime_resume_done = TRUE; /* For making sure NET TX Queue active */ @@ -1554,7 +2214,7 @@ bool dhd_runtimepm_state(dhd_pub_t *dhd) smp_wmb(); wake_up_interruptible(&bus->rpm_queue); - DHD_ERROR(("%s : runtime resume ended\n", __FUNCTION__)); + DHD_ERROR(("%s : runtime resume ended \n", __FUNCTION__)); return TRUE; } else { DHD_GENERAL_UNLOCK(dhd, flags); @@ -1587,10 +2247,10 @@ bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr) } DHD_GENERAL_LOCK(bus->dhd, flags); - if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL) { + if (DHD_BUS_BUSY_CHECK_RPM_ALL(bus->dhd)) { /* Wake up RPM state thread if it is suspend in progress or suspended */ - if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS || - bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) { + if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd) || + DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) { bus->bus_wake = 1; DHD_GENERAL_UNLOCK(bus->dhd, flags); @@ -1599,7 +2259,7 @@ bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr) smp_wmb(); wake_up_interruptible(&bus->rpm_queue); /* No need to wake up the RPM state thread */ - } else if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) { + } else if (DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) { DHD_GENERAL_UNLOCK(bus->dhd, flags); } @@ -1636,3 +2296,51 @@ bool dhdpcie_is_resume_done(dhd_pub_t *dhdp) return bus->runtime_resume_done; } #endif /* DHD_PCIE_RUNTIMEPM */ +struct device * dhd_bus_to_dev(dhd_bus_t *bus) +{ + struct pci_dev *pdev; + pdev = bus->dev; + + if (pdev) + return &pdev->dev; + else + return NULL; +} +#ifdef HOFFLOAD_MODULES +void +dhd_free_module_memory(struct dhd_bus *bus, struct module_metadata *hmem) +{ + struct device *dev = &bus->dev->dev; + if (hmem) { + dma_unmap_single(dev, (dma_addr_t) hmem->data_addr, hmem->size, DMA_TO_DEVICE); + kfree(hmem->data); + hmem->data = NULL; + hmem->size = 0; + } else { + DHD_ERROR(("dev:%p pci unmapping error\n", dev)); + } +} + +void * +dhd_alloc_module_memory(struct dhd_bus *bus, uint32_t size, struct module_metadata *hmem) +{ + struct device *dev = &bus->dev->dev; + if (!hmem->data) { + hmem->data = kzalloc(size, GFP_KERNEL); + if (!hmem->data) { + DHD_ERROR(("dev:%p mem alloc failure\n", dev)); + return NULL; + } + } + hmem->size = size; + DHD_INFO(("module size: 0x%x \n", hmem->size)); + hmem->data_addr = (u64) dma_map_single(dev, hmem->data, hmem->size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, hmem->data_addr)) { + DHD_ERROR(("dev:%p dma mapping error\n", dev)); + kfree(hmem->data); + hmem->data = NULL; + return hmem->data; + } + return hmem->data; +} +#endif /* HOFFLOAD_MODULES */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pno.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pno.c index a0eddd2e892e..c553733f682e 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pno.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pno.c @@ -2,7 +2,7 @@ * Broadcom Dongle Host Driver (DHD) * Prefered Network Offload and Wi-Fi Location Service(WLS) code. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: dhd_pno.c 606280 2015-12-15 05:28:25Z $ + * $Id: dhd_pno.c 707287 2017-06-27 06:44:29Z $ */ #if defined(GSCAN_SUPPORT) && !defined(PNO_SUPPORT) @@ -48,13 +48,16 @@ #include #include -#include +#include #include #include #include #ifdef GSCAN_SUPPORT #include #endif /* GSCAN_SUPPORT */ +#ifdef WL_CFG80211 +#include +#endif /* WL_CFG80211 */ #ifdef __BIG_ENDIAN #include @@ -98,13 +101,23 @@ #define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====") #define TIME_MIN_DIFF 5 -static wlc_ssid_ext_t * dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd, - dhd_pno_status_info_t *pno_state); + +#define EVENT_DATABUF_MAXLEN (512 - sizeof(bcm_event_t)) +#define EVENT_MAX_NETCNT_V1 \ + ((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_v1_t)) \ + / sizeof(wl_pfn_net_info_v1_t) + 1) +#define EVENT_MAX_NETCNT_V2 \ + ((EVENT_DATABUF_MAXLEN - sizeof(wl_pfn_scanresults_v2_t)) \ + / sizeof(wl_pfn_net_info_v2_t) + 1) + #ifdef GSCAN_SUPPORT -static wl_pfn_gscan_channel_bucket_t * +static int _dhd_pno_flush_ssid(dhd_pub_t *dhd); +static wl_pfn_gscan_ch_bucket_cfg_t * dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state, -uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw); + uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw); #endif /* GSCAN_SUPPORT */ +static int dhd_pno_set_legacy_pno(dhd_pub_t *dhd, uint16 scan_fr, int pno_repeat, + int pno_freq_expo_max, uint16 *channel_list, int nchan); static inline bool is_dfs(uint16 channel) @@ -127,14 +140,14 @@ dhd_pno_clean(dhd_pub_t *dhd) _pno_state = PNO_GET_PNOSTATE(dhd); DHD_PNO(("%s enter\n", __FUNCTION__)); /* Disable PNO */ - err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), 1); + err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), NULL, 0, TRUE); if (err < 0) { DHD_ERROR(("%s : failed to execute pfn(error : %d)\n", __FUNCTION__, err)); goto exit; } _pno_state->pno_status = DHD_PNO_DISABLED; - err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, 1); + err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, NULL, 0, TRUE); if (err < 0) { DHD_ERROR(("%s : failed to execute pfnclear(error : %d)\n", __FUNCTION__, err)); @@ -157,37 +170,132 @@ dhd_is_pno_supported(dhd_pub_t *dhd) return WLS_SUPPORTED(_pno_state); } -int -dhd_pno_set_mac_oui(dhd_pub_t *dhd, uint8 *oui) +bool +dhd_is_legacy_pno_enabled(dhd_pub_t *dhd) { - int err = BCME_OK; dhd_pno_status_info_t *_pno_state; if (!dhd || !dhd->pno_state) { - DHD_ERROR(("NULL POINTER : %s\n", __FUNCTION__)); - return BCME_ERROR; + DHD_ERROR(("NULL POINTER : %s\n", + __FUNCTION__)); + return FALSE; } _pno_state = PNO_GET_PNOSTATE(dhd); - if (ETHER_ISMULTI(oui)) { - DHD_ERROR(("Expected unicast OUI\n")); - err = BCME_ERROR; - } else { - memcpy(_pno_state->pno_oui, oui, DOT11_OUI_LEN); - DHD_PNO(("PNO mac oui to be used - %02x:%02x:%02x\n", _pno_state->pno_oui[0], - _pno_state->pno_oui[1], _pno_state->pno_oui[2])); - } - - return err; + return ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) != 0); } #ifdef GSCAN_SUPPORT static uint64 -convert_fw_rel_time_to_systime(uint32 fw_ts_ms) +convert_fw_rel_time_to_systime(struct timespec *ts, uint32 fw_ts_ms) { - struct timespec ts; + return ((uint64)(TIMESPEC_TO_US(*ts)) - (uint64)(fw_ts_ms * 1000)); +} - get_monotonic_boottime(&ts); - return ((uint64)(TIMESPEC_TO_US(ts)) - (uint64)(fw_ts_ms * 1000)); +static void +dhd_pno_idx_to_ssid(struct dhd_pno_gscan_params *gscan_params, + dhd_epno_results_t *res, uint32 idx) +{ + dhd_pno_ssid_t *iter, *next; + int i; + + /* If idx doesn't make sense */ + if (idx >= gscan_params->epno_cfg.num_epno_ssid) { + DHD_ERROR(("No match, idx %d num_ssid %d\n", idx, + gscan_params->epno_cfg.num_epno_ssid)); + goto exit; + } + + if (gscan_params->epno_cfg.num_epno_ssid > 0) { + i = 0; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_entry_safe(iter, next, + &gscan_params->epno_cfg.epno_ssid_list, list) { + if (i++ == idx) { + memcpy(res->ssid, iter->SSID, iter->SSID_len); + res->ssid_len = iter->SSID_len; + return; + } + } + } +exit: + /* If we are here then there was no match */ + res->ssid[0] = '\0'; + res->ssid_len = 0; + return; +} + +/* Translate HAL flag bitmask to BRCM FW flag bitmask */ +void dhd_pno_translate_epno_fw_flags(uint32 *flags) +{ + uint32 in_flags, fw_flags = 0; + in_flags = *flags; + + if (in_flags & DHD_EPNO_A_BAND_TRIG) { + fw_flags |= WL_PFN_SSID_A_BAND_TRIG; + } + + if (in_flags & DHD_EPNO_BG_BAND_TRIG) { + fw_flags |= WL_PFN_SSID_BG_BAND_TRIG; + } + + if (!(in_flags & DHD_EPNO_STRICT_MATCH) && + !(in_flags & DHD_EPNO_HIDDEN_SSID)) { + fw_flags |= WL_PFN_SSID_IMPRECISE_MATCH; + } + + if (in_flags & DHD_EPNO_SAME_NETWORK) { + fw_flags |= WL_PFN_SSID_SAME_NETWORK; + } + + /* Add any hard coded flags needed */ + fw_flags |= WL_PFN_SUPPRESS_AGING_MASK; + *flags = fw_flags; + + return; +} + +/* Translate HAL auth bitmask to BRCM FW bitmask */ +void dhd_pno_set_epno_auth_flag(uint32 *wpa_auth) +{ + switch (*wpa_auth) { + case DHD_PNO_AUTH_CODE_OPEN: + *wpa_auth = WPA_AUTH_DISABLED; + break; + case DHD_PNO_AUTH_CODE_PSK: + *wpa_auth = (WPA_AUTH_PSK | WPA2_AUTH_PSK); + break; + case DHD_PNO_AUTH_CODE_EAPOL: + *wpa_auth = ~WPA_AUTH_NONE; + break; + default: + DHD_ERROR(("%s: Unknown auth %d", __FUNCTION__, *wpa_auth)); + *wpa_auth = WPA_AUTH_PFN_ANY; + break; + } + return; +} + +/* Cleanup all results */ +static void +dhd_gscan_clear_all_batch_results(dhd_pub_t *dhd) +{ + struct dhd_pno_gscan_params *gscan_params; + dhd_pno_status_info_t *_pno_state; + gscan_results_cache_t *iter; + + _pno_state = PNO_GET_PNOSTATE(dhd); + gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan; + iter = gscan_params->gscan_batch_cache; + /* Mark everything as consumed */ + while (iter) { + iter->tot_consumed = iter->tot_count; + iter = iter->next; + } + dhd_gscan_batch_cache_cleanup(dhd); + return; } static int @@ -198,7 +306,7 @@ _dhd_pno_gscan_cfg(dhd_pub_t *dhd, wl_pfn_gscan_cfg_t *pfncfg_gscan_param, int s DHD_PNO(("%s enter\n", __FUNCTION__)); - err = dhd_iovar(dhd, 0, "pfn_gscan_cfg", (char *)pfncfg_gscan_param, size, 1); + err = dhd_iovar(dhd, 0, "pfn_gscan_cfg", (char *)pfncfg_gscan_param, size, NULL, 0, TRUE); if (err < 0) { DHD_ERROR(("%s : failed to execute pfncfg_gscan_param\n", __FUNCTION__)); goto exit; @@ -207,6 +315,22 @@ exit: return err; } +#ifdef GSCAN_SUPPORT +static int +_dhd_pno_flush_ssid(dhd_pub_t *dhd) +{ + int err; + wl_pfn_t pfn_elem; + memset(&pfn_elem, 0, sizeof(wl_pfn_t)); + pfn_elem.flags = htod32(WL_PFN_FLUSH_ALL_SSIDS); + err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_elem, sizeof(wl_pfn_t), NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__)); + } + return err; +} +#endif /* GSCAN_SUPPORT */ + static bool is_batch_retrieval_complete(struct dhd_pno_gscan_params *gscan_params) { @@ -215,28 +339,6 @@ is_batch_retrieval_complete(struct dhd_pno_gscan_params *gscan_params) } #endif /* GSCAN_SUPPORT */ -static int -dhd_pno_set_mac_addr(dhd_pub_t *dhd, struct ether_addr *macaddr) -{ - int err; - wl_pfn_macaddr_cfg_t cfg; - - cfg.version = WL_PFN_MACADDR_CFG_VER; - if (ETHER_ISNULLADDR(macaddr)) { - cfg.flags = 0; - } else { - cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK); - } - memcpy(&cfg.macaddr, macaddr, ETHER_ADDR_LEN); - - err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&cfg, sizeof(cfg), 1); - if (err < 0) { - DHD_ERROR(("%s : failed to execute pfn_macaddr\n", __FUNCTION__)); - } - - return err; -} - static int _dhd_pno_suspend(dhd_pub_t *dhd) { @@ -248,7 +350,7 @@ _dhd_pno_suspend(dhd_pub_t *dhd) DHD_PNO(("%s enter\n", __FUNCTION__)); _pno_state = PNO_GET_PNOSTATE(dhd); - err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), 1); + err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), NULL, 0, TRUE); if (err < 0) { DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err)); goto exit; @@ -288,7 +390,7 @@ _dhd_pno_enable(dhd_pub_t *dhd, int enable) } } /* Enable/Disable PNO */ - err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), 1); + err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), NULL, 0, TRUE); if (err < 0) { DHD_ERROR(("%s : failed to execute pfn_set - %d\n", __FUNCTION__, err)); goto exit; @@ -312,7 +414,6 @@ _dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t dhd_pno_params_t *_params; dhd_pno_status_info_t *_pno_state; bool combined_scan = FALSE; - struct ether_addr macaddr; DHD_PNO(("%s enter\n", __FUNCTION__)); NULL_CHECK(dhd, "dhd is NULL", err); @@ -427,20 +528,24 @@ _dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t } /* RSSI margin of 30 dBm */ pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM); - /* ADAPTIVE turned off */ - pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT)); pfn_param.repeat = 0; pfn_param.exp = 0; pfn_param.slow_freq = 0; + pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT); if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { - dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); - dhd_pno_params_t *_params; + dhd_pno_params_t *params; - _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); - pfn_param.scan_freq = htod32(MIN(pno_params->params_gscan.scan_fr, - _params->params_legacy.scan_fr)); + pfn_param.scan_freq = gcd(pno_params->params_gscan.scan_fr, + params->params_legacy.scan_fr); + + if ((params->params_legacy.pno_repeat != 0) || + (params->params_legacy.pno_freq_expo_max != 0)) { + pfn_param.repeat = (uchar) (params->params_legacy.pno_repeat); + pfn_param.exp = (uchar) (params->params_legacy.pno_freq_expo_max); + } } lost_network_timeout = (pno_params->params_gscan.max_ch_bucket_freq * @@ -464,18 +569,13 @@ _dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t } } - memset(&macaddr, 0, ETHER_ADDR_LEN); - memcpy(&macaddr, _pno_state->pno_oui, DOT11_OUI_LEN); - - DHD_PNO(("Setting mac oui to FW - %02x:%02x:%02x\n", _pno_state->pno_oui[0], - _pno_state->pno_oui[1], _pno_state->pno_oui[2])); - err = dhd_pno_set_mac_addr(dhd, &macaddr); - if (err < 0) { - DHD_ERROR(("%s : failed to set pno mac address, error - %d\n", __FUNCTION__, err)); + err = dhd_set_rand_mac_oui(dhd); + /* Ignore if chip doesnt support the feature */ + if (err < 0 && err != BCME_UNSUPPORTED) { + DHD_ERROR(("%s : failed to set random mac for PNO scan, %d\n", __FUNCTION__, err)); goto exit; } - #ifdef GSCAN_SUPPORT if (mode == DHD_PNO_BATCH_MODE || ((mode & DHD_PNO_GSCAN_MODE) && pno_params->params_gscan.mscan)) @@ -485,21 +585,22 @@ _dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t { int _tmp = pfn_param.bestn; /* set bestn to calculate the max mscan which firmware supports */ - err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 1); + err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), NULL, 0, TRUE); if (err < 0) { DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__)); goto exit; } /* get max mscan which the firmware supports */ - err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 0); + err = dhd_iovar(dhd, 0, "pfnmem", NULL, 0, (char *)&_tmp, sizeof(_tmp), FALSE); if (err < 0) { DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__)); goto exit; } - DHD_PNO((" returned mscan : %d, set bestn : %d\n", _tmp, pfn_param.bestn)); pfn_param.mscan = MIN(pfn_param.mscan, _tmp); + DHD_PNO((" returned mscan : %d, set bestn : %d mscan %d\n", _tmp, pfn_param.bestn, + pfn_param.mscan)); } - err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), 1); + err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), NULL, 0, TRUE); if (err < 0) { DHD_ERROR(("%s : failed to execute pfn_set %d\n", __FUNCTION__, err)); goto exit; @@ -511,54 +612,58 @@ exit: } static int -_dhd_pno_add_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssids_list, int nssid) +_dhd_pno_add_ssid(dhd_pub_t *dhd, struct list_head *ssid_list, int nssid) { int err = BCME_OK; - int i = 0; - wl_pfn_t pfn_element; + int i = 0, mem_needed; + wl_pfn_t *pfn_elem_buf; + struct dhd_pno_ssid *iter, *next; + NULL_CHECK(dhd, "dhd is NULL", err); - if (nssid) { - NULL_CHECK(ssids_list, "ssid list is NULL", err); + if (!nssid) { + NULL_CHECK(ssid_list, "ssid list is NULL", err); + return BCME_ERROR; } - memset(&pfn_element, 0, sizeof(pfn_element)); - { - int j; - for (j = 0; j < nssid; j++) { - DHD_PNO(("%d: scan for %s size = %d hidden = %d\n", j, - ssids_list[j].SSID, ssids_list[j].SSID_len, ssids_list[j].hidden)); + mem_needed = (sizeof(wl_pfn_t) * nssid); + pfn_elem_buf = (wl_pfn_t *) kzalloc(mem_needed, GFP_KERNEL); + if (!pfn_elem_buf) { + DHD_ERROR(("%s: Can't malloc %d bytes!\n", __FUNCTION__, mem_needed)); + return BCME_NOMEM; + } + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_entry_safe(iter, next, ssid_list, list) { + pfn_elem_buf[i].infra = htod32(1); + pfn_elem_buf[i].auth = htod32(DOT11_OPEN_SYSTEM); + pfn_elem_buf[i].wpa_auth = htod32(iter->wpa_auth); + pfn_elem_buf[i].flags = htod32(iter->flags); + if (iter->hidden) + pfn_elem_buf[i].flags |= htod32(ENABLE << WL_PFN_HIDDEN_BIT); + /* If a single RSSI threshold is defined, use that */ +#ifdef PNO_MIN_RSSI_TRIGGER + pfn_elem_buf[i].flags |= ((PNO_MIN_RSSI_TRIGGER & 0xFF) << WL_PFN_RSSI_SHIFT); +#else + pfn_elem_buf[i].flags |= ((iter->rssi_thresh & 0xFF) << WL_PFN_RSSI_SHIFT); +#endif /* PNO_MIN_RSSI_TRIGGER */ + memcpy((char *)pfn_elem_buf[i].ssid.SSID, iter->SSID, + iter->SSID_len); + pfn_elem_buf[i].ssid.SSID_len = iter->SSID_len; + DHD_PNO(("%s size = %d hidden = %d flags = %x rssi_thresh %d\n", + iter->SSID, iter->SSID_len, iter->hidden, + iter->flags, iter->rssi_thresh)); + if (++i >= nssid) { + /* shouldn't happen */ + break; } } - /* Check for broadcast ssid */ - for (i = 0; i < nssid; i++) { - if (!ssids_list[i].SSID_len) { - DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", i)); - err = BCME_ERROR; - goto exit; - } + err = dhd_iovar(dhd, 0, "pfn_add", (char *)pfn_elem_buf, mem_needed, NULL, 0, TRUE); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__)); } - /* set all pfn ssid */ - for (i = 0; i < nssid; i++) { - pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE); - pfn_element.auth = (DOT11_OPEN_SYSTEM); - pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY); - pfn_element.wsec = htod32(0); - pfn_element.infra = htod32(1); - if (ssids_list[i].hidden) { - pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT); - } else { - pfn_element.flags = 0; - } - memcpy((char *)pfn_element.ssid.SSID, ssids_list[i].SSID, - ssids_list[i].SSID_len); - pfn_element.ssid.SSID_len = ssids_list[i].SSID_len; - err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_element, - sizeof(pfn_element), 1); - if (err < 0) { - DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__)); - goto exit; - } - } -exit: + kfree(pfn_elem_buf); return err; } @@ -566,7 +671,7 @@ exit: static int _dhd_pno_cmpfunc(const void *a, const void *b) { - return (*(uint16*)a - *(uint16*)b); + return (*(const uint16*)a - *(const uint16*)b); } static int @@ -617,6 +722,7 @@ _dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list, if (*nchan) { NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); } + memset(&chan_buf, 0, sizeof(chan_buf)); list = (wl_uint32_list_t *) (void *)chan_buf; list->count = htod32(WL_NUMCHANNELS); err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0); @@ -634,15 +740,15 @@ _dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list, if (skip_dfs && is_dfs(dtoh32(list->element[i]))) continue; - - } else if (band == WLC_BAND_AUTO) { - if (skip_dfs || !is_dfs(dtoh32(list->element[i]))) + } else if (band == WLC_BAND_AUTO) { + if (skip_dfs || !is_dfs(dtoh32(list->element[i]))) continue; - } else { /* All channels */ - if (skip_dfs && is_dfs(dtoh32(list->element[i]))) + + } else { /* All channels */ + if (skip_dfs && is_dfs(dtoh32(list->element[i]))) continue; } - if (dtoh32(list->element[i]) <= CHANNEL_5G_MAX) { + if (dtoh32(list->element[i]) <= CHANNEL_5G_MAX) { d_chan_list[j++] = (uint16) dtoh32(list->element[i]); } else { err = BCME_BADCHAN; @@ -680,15 +786,22 @@ _dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batc DHD_PNO(("%s enter \n", __FUNCTION__)); /* # of scans */ if (!params_batch->get_batch.batch_started) { - bp += nreadsize = sprintf(bp, "scancount=%d\n", + bp += nreadsize = snprintf(bp, nleftsize, "scancount=%d\n", params_batch->get_batch.expired_tot_scan_cnt); nleftsize -= nreadsize; params_batch->get_batch.batch_started = TRUE; } DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt)); /* preestimate scan count until which scan result this report is going to end */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(siter, snext, ¶ms_batch->get_batch.expired_scan_results_list, list) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif phead = siter->bestnetheader; while (phead != NULL) { /* if left_size is less than bestheader total size , stop this */ @@ -701,45 +814,53 @@ _dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batc DHD_PNO(("\n\n", cnt - 1, phead->tot_cnt)); /* attribute of the scan */ if (phead->reason & PNO_STATUS_ABORT_MASK) { - bp += nreadsize = sprintf(bp, "trunc\n"); + bp += nreadsize = snprintf(bp, nleftsize, "trunc\n"); nleftsize -= nreadsize; } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(iter, next, &phead->entry_list, list) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif t_delta = jiffies_to_msecs(jiffies - iter->recorded_time); #ifdef PNO_DEBUG _base_bp = bp; memset(msg, 0, sizeof(msg)); #endif /* BSSID info */ - bp += nreadsize = sprintf(bp, "bssid=%s\n", + bp += nreadsize = snprintf(bp, nleftsize, "bssid=%s\n", bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf)); nleftsize -= nreadsize; /* SSID */ - bp += nreadsize = sprintf(bp, "ssid=%s\n", iter->SSID); + bp += nreadsize = snprintf(bp, nleftsize, "ssid=%s\n", iter->SSID); nleftsize -= nreadsize; /* channel */ - bp += nreadsize = sprintf(bp, "freq=%d\n", + bp += nreadsize = snprintf(bp, nleftsize, "freq=%d\n", wf_channel2mhz(iter->channel, iter->channel <= CH_MAX_2G_CHANNEL? WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); nleftsize -= nreadsize; /* RSSI */ - bp += nreadsize = sprintf(bp, "level=%d\n", iter->RSSI); + bp += nreadsize = snprintf(bp, nleftsize, "level=%d\n", iter->RSSI); nleftsize -= nreadsize; /* add the time consumed in Driver to the timestamp of firmware */ iter->timestamp += t_delta; - bp += nreadsize = sprintf(bp, "age=%d\n", iter->timestamp); + bp += nreadsize = snprintf(bp, nleftsize, + "age=%d\n", iter->timestamp); nleftsize -= nreadsize; /* RTT0 */ - bp += nreadsize = sprintf(bp, "dist=%d\n", + bp += nreadsize = snprintf(bp, nleftsize, "dist=%d\n", (iter->rtt0 == 0)? -1 : iter->rtt0); nleftsize -= nreadsize; /* RTT1 */ - bp += nreadsize = sprintf(bp, "distSd=%d\n", + bp += nreadsize = snprintf(bp, nleftsize, "distSd=%d\n", (iter->rtt0 == 0)? -1 : iter->rtt1); nleftsize -= nreadsize; - bp += nreadsize = sprintf(bp, "%s", AP_END_MARKER); + bp += nreadsize = snprintf(bp, nleftsize, "%s", AP_END_MARKER); nleftsize -= nreadsize; list_del(&iter->list); MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); @@ -748,7 +869,7 @@ _dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batc DHD_PNO(("Entry : \n%s", msg)); #endif } - bp += nreadsize = sprintf(bp, "%s", SCAN_END_MARKER); + bp += nreadsize = snprintf(bp, nleftsize, "%s", SCAN_END_MARKER); DHD_PNO(("%s", SCAN_END_MARKER)); nleftsize -= nreadsize; pprev = phead; @@ -772,9 +893,16 @@ exit: } params_batch->get_batch.expired_tot_scan_cnt -= cnt; /* set FALSE only if the link list is empty after returning the data */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif if (list_empty(¶ms_batch->get_batch.expired_scan_results_list)) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif params_batch->get_batch.batch_started = FALSE; - bp += sprintf(bp, "%s", RESULTS_END_MARKER); + bp += snprintf(bp, nleftsize, "%s", RESULTS_END_MARKER); DHD_PNO(("%s", RESULTS_END_MARKER)); DHD_PNO(("%s : Getting the batching data is complete\n", __FUNCTION__)); } @@ -794,6 +922,10 @@ _dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool on NULL_CHECK(head, "head is NULL", err); NULL_CHECK(head->next, "head->next is NULL", err); DHD_PNO(("%s enter\n", __FUNCTION__)); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(siter, snext, head, list) { if (only_last) { @@ -822,6 +954,9 @@ _dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool on MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); } } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif return removed_scan_cnt; } @@ -835,17 +970,21 @@ _dhd_pno_cfg(dhd_pub_t *dhd, uint16 *channel_list, int nchan) if (nchan) { NULL_CHECK(channel_list, "nchan is NULL", err); } + if (nchan > WL_NUMCHANNELS) { + return BCME_RANGE; + } DHD_PNO(("%s enter : nchan : %d\n", __FUNCTION__, nchan)); memset(&pfncfg_param, 0, sizeof(wl_pfn_cfg_t)); /* Setup default values */ pfncfg_param.reporttype = htod32(WL_PFN_REPORT_ALLNET); pfncfg_param.channel_num = htod32(0); - for (i = 0; i < nchan && nchan < WL_NUMCHANNELS; i++) + for (i = 0; i < nchan; i++) pfncfg_param.channel_list[i] = channel_list[i]; pfncfg_param.channel_num = htod32(nchan); - err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), 1); + err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), NULL, 0, + TRUE); if (err < 0) { DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); goto exit; @@ -867,12 +1006,19 @@ _dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mod case DHD_PNO_LEGACY_MODE: { struct dhd_pno_ssid *iter, *next; if (params->params_legacy.nssid > 0) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(iter, next, ¶ms->params_legacy.ssid_list, list) { list_del(&iter->list); kfree(iter); } } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif params->params_legacy.nssid = 0; params->params_legacy.scan_fr = 0; params->params_legacy.pno_freq_expo_max = 0; @@ -910,11 +1056,18 @@ _dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mod case DHD_PNO_HOTLIST_MODE: { struct dhd_pno_bssid *iter, *next; if (params->params_hotlist.nbssid > 0) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(iter, next, ¶ms->params_hotlist.bssid_list, list) { list_del(&iter->list); kfree(iter); } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif } params->params_hotlist.scan_fr = 0; params->params_hotlist.nbssid = 0; @@ -940,7 +1093,7 @@ _dhd_pno_add_bssid(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, int nbssid) NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err); } err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)p_pfn_bssid, - sizeof(wl_pfn_bssid_t) * nbssid, 1); + sizeof(wl_pfn_bssid_t) * nbssid, NULL, 0, TRUE); if (err < 0) { DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); goto exit; @@ -949,32 +1102,6 @@ exit: return err; } -#ifdef GSCAN_SUPPORT -static int -_dhd_pno_add_significant_bssid(dhd_pub_t *dhd, - wl_pfn_significant_bssid_t *p_pfn_significant_bssid, int nbssid) -{ - int err = BCME_OK; - NULL_CHECK(dhd, "dhd is NULL", err); - - if (!nbssid) { - err = BCME_ERROR; - goto exit; - } - - NULL_CHECK(p_pfn_significant_bssid, "bssid list is NULL", err); - - err = dhd_iovar(dhd, 0, "pfn_add_swc_bssid", (char *)p_pfn_significant_bssid, - sizeof(wl_pfn_significant_bssid_t) * nbssid, 1); - if (err < 0) { - DHD_ERROR(("%s : failed to execute pfn_significant_bssid %d\n", __FUNCTION__, err)); - goto exit; - } -exit: - return err; -} -#endif /* GSCAN_SUPPORT */ - int dhd_pno_stop_for_ssid(dhd_pub_t *dhd) { @@ -998,19 +1125,25 @@ dhd_pno_stop_for_ssid(dhd_pub_t *dhd) _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; gscan_params = &_params->params_gscan; - - if (gscan_params->mscan) - dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); - /* save current pno_mode before calling dhd_pno_clean */ + if (gscan_params->mscan) { + /* retrieve the batching data from firmware into host */ + err = dhd_wait_batch_results_complete(dhd); + if (err != BCME_OK) + goto exit; + } + /* save current pno_mode before calling dhd_pno_clean */ + mutex_lock(&_pno_state->pno_mutex); mode = _pno_state->pno_mode; err = dhd_pno_clean(dhd); if (err < 0) { DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", __FUNCTION__, err)); + mutex_unlock(&_pno_state->pno_mutex); goto exit; } /* restore previous pno_mode */ _pno_state->pno_mode = mode; + mutex_unlock(&_pno_state->pno_mutex); /* Restart gscan */ err = dhd_pno_initiate_gscan_request(dhd, 1, 0); goto exit; @@ -1022,7 +1155,14 @@ dhd_pno_stop_for_ssid(dhd_pub_t *dhd) dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); /* save current pno_mode before calling dhd_pno_clean */ mode = _pno_state->pno_mode; - dhd_pno_clean(dhd); + err = dhd_pno_clean(dhd); + if (err < 0) { + err = BCME_ERROR; + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + /* restore previous pno_mode */ _pno_state->pno_mode = mode; if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { @@ -1050,6 +1190,10 @@ dhd_pno_stop_for_ssid(dhd_pub_t *dhd) goto exit; } /* convert dhd_pno_bssid to wl_pfn_bssid */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(iter, next, &_params->params_hotlist.bssid_list, list) { memcpy(&p_pfn_bssid->macaddr, @@ -1057,6 +1201,9 @@ dhd_pno_stop_for_ssid(dhd_pub_t *dhd) p_pfn_bssid->flags = iter->flags; p_pfn_bssid++; } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); if (err < 0) { _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; @@ -1087,41 +1234,11 @@ dhd_pno_enable(dhd_pub_t *dhd, int enable) return (_dhd_pno_enable(dhd, enable)); } -static wlc_ssid_ext_t * -dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state) -{ - int err = BCME_OK; - int i; - struct dhd_pno_ssid *iter, *next; - dhd_pno_params_t *_params1 = &pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; - wlc_ssid_ext_t *p_ssid_list; - - p_ssid_list = kzalloc(sizeof(wlc_ssid_ext_t) * - _params1->params_legacy.nssid, GFP_KERNEL); - if (p_ssid_list == NULL) { - DHD_ERROR(("%s : failed to allocate wlc_ssid_ext_t array (count: %d)", - __FUNCTION__, _params1->params_legacy.nssid)); - err = BCME_ERROR; - pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; - goto exit; - } - i = 0; - /* convert dhd_pno_ssid to wlc_ssid_ext_t */ - list_for_each_entry_safe(iter, next, &_params1->params_legacy.ssid_list, list) { - p_ssid_list[i].SSID_len = iter->SSID_len; - p_ssid_list[i].hidden = iter->hidden; - memcpy(p_ssid_list[i].SSID, iter->SSID, p_ssid_list[i].SSID_len); - i++; - } -exit: - return p_ssid_list; -} - static int -dhd_pno_add_to_ssid_list(dhd_pno_params_t *params, wlc_ssid_ext_t *ssid_list, - int nssid) +dhd_pno_add_to_ssid_list(struct list_head *ptr, wlc_ssid_ext_t *ssid_list, + int nssid, int *num_ssid_added) { - int ret = 0; + int ret = BCME_OK; int i; struct dhd_pno_ssid *_pno_ssid; @@ -1132,6 +1249,13 @@ dhd_pno_add_to_ssid_list(dhd_pno_params_t *params, wlc_ssid_ext_t *ssid_list, ret = BCME_ERROR; goto exit; } + /* Check for broadcast ssid */ + if (!ssid_list[i].SSID_len) { + DHD_ERROR(("%d: Broadcast SSID is illegal for PNO setting\n", i)); + ret = BCME_ERROR; + goto exit; + } + _pno_ssid = kzalloc(sizeof(struct dhd_pno_ssid), GFP_KERNEL); if (_pno_ssid == NULL) { DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n", @@ -1141,11 +1265,16 @@ dhd_pno_add_to_ssid_list(dhd_pno_params_t *params, wlc_ssid_ext_t *ssid_list, } _pno_ssid->SSID_len = ssid_list[i].SSID_len; _pno_ssid->hidden = ssid_list[i].hidden; + _pno_ssid->rssi_thresh = ssid_list[i].rssi_thresh; + _pno_ssid->flags = ssid_list[i].flags; + _pno_ssid->wpa_auth = WPA_AUTH_PFN_ANY; + memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len); - list_add_tail(&_pno_ssid->list, ¶ms->params_legacy.ssid_list); + list_add_tail(&_pno_ssid->list, ptr); } exit: + *num_ssid_added = i; return ret; } @@ -1153,26 +1282,64 @@ int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) { + + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + struct dhd_pno_legacy_params *params_legacy; + int err = BCME_OK; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("%s: PNO Not enabled/Not ready\n", __FUNCTION__)); + return BCME_NOTREADY; + } + + if (!dhd_support_sta_mode(dhd)) { + return BCME_BADOPTION; + } + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + params_legacy = &(_params->params_legacy); + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + + if (err < 0) { + DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n", + __FUNCTION__, err)); + return err; + } + + INIT_LIST_HEAD(¶ms_legacy->ssid_list); + + if (dhd_pno_add_to_ssid_list(¶ms_legacy->ssid_list, ssid_list, + nssid, ¶ms_legacy->nssid) < 0) { + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + return BCME_ERROR; + } + + DHD_PNO(("%s enter : nssid %d, scan_fr :%d, pno_repeat :%d," + "pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__, + params_legacy->nssid, scan_fr, pno_repeat, pno_freq_expo_max, nchan)); + + return dhd_pno_set_legacy_pno(dhd, scan_fr, pno_repeat, + pno_freq_expo_max, channel_list, nchan); + +} + +static int +dhd_pno_set_legacy_pno(dhd_pub_t *dhd, uint16 scan_fr, int pno_repeat, + int pno_freq_expo_max, uint16 *channel_list, int nchan) +{ dhd_pno_params_t *_params; dhd_pno_params_t *_params2; dhd_pno_status_info_t *_pno_state; uint16 _chan_list[WL_NUMCHANNELS]; int32 tot_nchan = 0; int err = BCME_OK; - int i; + int i, nssid; int mode = 0; - NULL_CHECK(dhd, "dhd is NULL", err); - NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + struct list_head *ssid_list; + _pno_state = PNO_GET_PNOSTATE(dhd); - - if (!dhd_support_sta_mode(dhd)) { - err = BCME_BADOPTION; - goto exit_no_clear; - } - DHD_PNO(("%s enter : scan_fr :%d, pno_repeat :%d," - "pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__, - scan_fr, pno_repeat, pno_freq_expo_max, nchan)); - _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); /* If GSCAN is also ON will handle this down below */ #ifdef GSCAN_SUPPORT @@ -1188,16 +1355,10 @@ dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, if (err < 0) { DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n", __FUNCTION__, err)); - goto exit_no_clear; + return err; } } _pno_state->pno_mode |= DHD_PNO_LEGACY_MODE; - err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); - if (err < 0) { - DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n", - __FUNCTION__, err)); - goto exit_no_clear; - } memset(_chan_list, 0, sizeof(_chan_list)); tot_nchan = MIN(nchan, WL_NUMCHANNELS); if (tot_nchan > 0 && channel_list) { @@ -1208,7 +1369,7 @@ dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, else { tot_nchan = WL_NUMCHANNELS; err = _dhd_pno_get_channels(dhd, _chan_list, &tot_nchan, - (WLC_BAND_2G | WLC_BAND_5G), TRUE); + (WLC_BAND_2G | WLC_BAND_5G), FALSE); if (err < 0) { tot_nchan = 0; DHD_PNO(("Could not get channel list for PNO SSID\n")); @@ -1228,7 +1389,7 @@ dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, err = _dhd_pno_enable(dhd, PNO_OFF); if (err < 0) { DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); - goto exit_no_clear; + goto exit; } /* restore the previous mode */ _pno_state->pno_mode = mode; @@ -1244,7 +1405,7 @@ dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, DHD_ERROR(("%s : failed to merge channel list" " between legacy and batch\n", __FUNCTION__)); - goto exit_no_clear; + goto exit; } } else { DHD_PNO(("superset channel will use" @@ -1261,7 +1422,7 @@ dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, DHD_ERROR(("%s : failed to merge channel list" " between legacy and hotlist\n", __FUNCTION__)); - goto exit_no_clear; + goto exit; } } } @@ -1270,13 +1431,18 @@ dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, _params->params_legacy.pno_repeat = pno_repeat; _params->params_legacy.pno_freq_expo_max = pno_freq_expo_max; _params->params_legacy.nchan = tot_nchan; - _params->params_legacy.nssid = nssid; - INIT_LIST_HEAD(&_params->params_legacy.ssid_list); + ssid_list = &_params->params_legacy.ssid_list; + nssid = _params->params_legacy.nssid; + #ifdef GSCAN_SUPPORT /* dhd_pno_initiate_gscan_request will handle simultaneous Legacy PNO and GSCAN */ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { - if (dhd_pno_add_to_ssid_list(_params, ssid_list, nssid) < 0) { - err = BCME_ERROR; + struct dhd_pno_gscan_params *gscan_params; + gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan; + /* ePNO and Legacy PNO do not co-exist */ + if (gscan_params->epno_cfg.num_epno_ssid) { + DHD_PNO(("ePNO and Legacy PNO do not co-exist\n")); + err = BCME_EPERM; goto exit; } DHD_PNO(("GSCAN mode is ON! Will restart GSCAN+Legacy PNO\n")); @@ -1292,10 +1458,6 @@ dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid)); goto exit; } - if (dhd_pno_add_to_ssid_list(_params, ssid_list, nssid) < 0) { - err = BCME_ERROR; - goto exit; - } if (tot_nchan > 0) { if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", @@ -1311,7 +1473,6 @@ exit: if (err < 0) { _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); } -exit_no_clear: /* clear mode in case of error */ if (err < 0) { int ret = dhd_pno_clean(dhd); @@ -1336,7 +1497,6 @@ dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params) dhd_pno_params_t *_params; dhd_pno_params_t *_params2; dhd_pno_status_info_t *_pno_state; - wlc_ssid_ext_t *p_ssid_list = NULL; NULL_CHECK(dhd, "dhd is NULL", err); NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); NULL_CHECK(batch_params, "batch_params is NULL", err); @@ -1432,14 +1592,8 @@ dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params) } else { DHD_PNO(("superset channel will use all channels in firmware\n")); } - p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); - if (!p_ssid_list) { - err = BCME_NOMEM; - DHD_ERROR(("failed to get Legacy PNO SSID list\n")); - goto exit; - } - if ((err = _dhd_pno_add_ssid(dhd, p_ssid_list, - _params2->params_legacy.nssid)) < 0) { + if ((err = _dhd_pno_add_ssid(dhd, &_params2->params_legacy.ssid_list, + _params2->params_legacy.nssid)) < 0) { DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); goto exit; } @@ -1471,13 +1625,81 @@ exit: /* return #max scan firmware can do */ err = mscan; } - if (p_ssid_list) - kfree(p_ssid_list); return err; } #ifdef GSCAN_SUPPORT + +static int +dhd_set_epno_params(dhd_pub_t *dhd, wl_pfn_ssid_params_t *params, bool set) +{ + wl_pfn_ssid_cfg_t cfg; + int err; + NULL_CHECK(dhd, "dhd is NULL\n", err); + memset(&cfg, 0, sizeof(wl_pfn_ssid_cfg_t)); + cfg.version = WL_PFN_SSID_CFG_VERSION; + + /* If asked to clear params (set == FALSE) just set the CLEAR bit */ + if (!set) + cfg.flags |= WL_PFN_SSID_CFG_CLEAR; + else if (params) + memcpy(&cfg.params, params, sizeof(wl_pfn_ssid_params_t)); + err = dhd_iovar(dhd, 0, "pfn_ssid_cfg", (char *)&cfg, + sizeof(wl_pfn_ssid_cfg_t), NULL, 0, TRUE); + if (err != BCME_OK) { + DHD_ERROR(("%s : Failed to execute pfn_ssid_cfg %d\n", __FUNCTION__, err)); + } + return err; +} + +int +dhd_pno_flush_fw_epno(dhd_pub_t *dhd) +{ + int err; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + + err = dhd_set_epno_params(dhd, NULL, FALSE); + if (err < 0) { + DHD_ERROR(("failed to set ePNO params %d\n", err)); + return err; + } + err = _dhd_pno_flush_ssid(dhd); + return err; +} + +int +dhd_pno_set_epno(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + + struct dhd_pno_gscan_params *gscan_params; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + if (gscan_params->epno_cfg.num_epno_ssid) { + DHD_PNO(("num_epno_ssid %d\n", gscan_params->epno_cfg.num_epno_ssid)); + if ((err = _dhd_pno_add_ssid(dhd, &gscan_params->epno_cfg.epno_ssid_list, + gscan_params->epno_cfg.num_epno_ssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) to firmware\n", err)); + return err; + } + err = dhd_set_epno_params(dhd, &gscan_params->epno_cfg.params, TRUE); + if (err < 0) { + DHD_ERROR(("failed to set ePNO params %d\n", err)); + } + } + return err; +} + + static void dhd_pno_reset_cfg_gscan(dhd_pno_params_t *_params, dhd_pno_status_info_t *_pno_state, uint8 flags) @@ -1499,39 +1721,59 @@ dhd_pno_reset_cfg_gscan(dhd_pno_params_t *_params, if (flags & GSCAN_FLUSH_HOTLIST_CFG) { struct dhd_pno_bssid *iter, *next; if (_params->params_gscan.nbssid_hotlist > 0) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(iter, next, &_params->params_gscan.hotlist_bssid_list, list) { list_del(&iter->list); kfree(iter); } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif } _params->params_gscan.nbssid_hotlist = 0; DHD_PNO(("Flush Hotlist Config\n")); } - if (flags & GSCAN_FLUSH_SIGNIFICANT_CFG) { - dhd_pno_significant_bssid_t *iter, *next; + if (flags & GSCAN_FLUSH_EPNO_CFG) { + dhd_pno_ssid_t *iter, *next; + dhd_epno_ssid_cfg_t *epno_cfg = &_params->params_gscan.epno_cfg; - if (_params->params_gscan.nbssid_significant_change > 0) { + if (epno_cfg->num_epno_ssid > 0) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(iter, next, - &_params->params_gscan.significant_bssid_list, list) { + &epno_cfg->epno_ssid_list, list) { list_del(&iter->list); kfree(iter); } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + epno_cfg->num_epno_ssid = 0; } - _params->params_gscan.nbssid_significant_change = 0; - DHD_PNO(("Flush Significant Change Config\n")); + memset(&epno_cfg->params, 0, sizeof(wl_pfn_ssid_params_t)); + DHD_PNO(("Flushed ePNO Config\n")); } return; } -void +int dhd_pno_lock_batch_results(dhd_pub_t *dhd) { dhd_pno_status_info_t *_pno_state; + int err = BCME_OK; + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); _pno_state = PNO_GET_PNOSTATE(dhd); mutex_lock(&_pno_state->pno_mutex); - return; + return err; } void @@ -1543,12 +1785,14 @@ dhd_pno_unlock_batch_results(dhd_pub_t *dhd) return; } -void -dhd_wait_batch_results_complete(dhd_pub_t *dhd) +int dhd_wait_batch_results_complete(dhd_pub_t *dhd) { dhd_pno_status_info_t *_pno_state; dhd_pno_params_t *_params; + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); _pno_state = PNO_GET_PNOSTATE(dhd); _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; @@ -1561,7 +1805,6 @@ dhd_wait_batch_results_complete(dhd_pub_t *dhd) } else { /* GSCAN_BATCH_RETRIEVAL_COMPLETE */ gscan_results_cache_t *iter; uint16 num_results = 0; - int err; mutex_lock(&_pno_state->pno_mutex); iter = _params->params_gscan.gscan_batch_cache; @@ -1574,7 +1817,7 @@ dhd_wait_batch_results_complete(dhd_pub_t *dhd) /* All results consumed/No results cached?? * Get fresh results from FW */ - if (!num_results) { + if ((_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) && !num_results) { DHD_PNO(("%s: No results cached, getting from FW..\n", __FUNCTION__)); err = dhd_retreive_batch_scan_results(dhd); if (err == BCME_OK) { @@ -1585,8 +1828,7 @@ dhd_wait_batch_results_complete(dhd_pub_t *dhd) } } DHD_PNO(("%s: Wait complete\n", __FUNCTION__)); - - return; + return err; } static void * @@ -1611,108 +1853,9 @@ dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len) return results; } -void * -dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, - void *info, uint32 *len) -{ - void *ret = NULL; - dhd_pno_gscan_capabilities_t *ptr; - - if (!len) { - DHD_ERROR(("%s: len is NULL\n", __FUNCTION__)); - return ret; - } - - switch (type) { - case DHD_PNO_GET_CAPABILITIES: - ptr = (dhd_pno_gscan_capabilities_t *) - kmalloc(sizeof(dhd_pno_gscan_capabilities_t), GFP_KERNEL); - if (!ptr) - break; - /* Hardcoding these values for now, need to get - * these values from FW, will change in a later check-in - */ - ptr->max_scan_cache_size = 12; - ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS; - ptr->max_ap_cache_per_scan = 16; - ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX; - ptr->max_scan_reporting_threshold = 100; - ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS; - ptr->max_significant_wifi_change_aps = PFN_SWC_MAX_NUM_APS; - ret = (void *)ptr; - *len = sizeof(dhd_pno_gscan_capabilities_t); - break; - - case DHD_PNO_GET_BATCH_RESULTS: - ret = dhd_get_gscan_batch_results(dhd, len); - break; - case DHD_PNO_GET_CHANNEL_LIST: - if (info) { - uint16 ch_list[WL_NUMCHANNELS]; - uint32 *ptr, mem_needed, i; - int32 err, nchan = WL_NUMCHANNELS; - uint32 *gscan_band = (uint32 *) info; - uint8 band = 0; - - /* No band specified?, nothing to do */ - if ((*gscan_band & GSCAN_BAND_MASK) == 0) { - DHD_PNO(("No band specified\n")); - *len = 0; - break; - } - - /* HAL and DHD use different bits for 2.4G and - * 5G in bitmap. Hence translating it here... - */ - if (*gscan_band & GSCAN_BG_BAND_MASK) { - band |= WLC_BAND_2G; - } - if (*gscan_band & GSCAN_A_BAND_MASK) { - band |= WLC_BAND_5G; - } - - err = _dhd_pno_get_channels(dhd, ch_list, &nchan, - (band & GSCAN_ABG_BAND_MASK), - !(*gscan_band & GSCAN_DFS_MASK)); - - if (err < 0) { - DHD_ERROR(("%s: failed to get valid channel list\n", - __FUNCTION__)); - *len = 0; - } else { - mem_needed = sizeof(uint32) * nchan; - ptr = (uint32 *) kmalloc(mem_needed, GFP_KERNEL); - if (!ptr) { - DHD_ERROR(("%s: Unable to malloc %d bytes\n", - __FUNCTION__, mem_needed)); - break; - } - for (i = 0; i < nchan; i++) { - ptr[i] = wf_channel2mhz(ch_list[i], - (ch_list[i] <= CH_MAX_2G_CHANNEL? - WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); - } - ret = ptr; - *len = mem_needed; - } - } else { - *len = 0; - DHD_ERROR(("%s: info buffer is NULL\n", __FUNCTION__)); - } - break; - - default: - DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); - break; - } - - return ret; - -} - int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, - void *buf, uint8 flush) + void *buf, bool flush) { int err = BCME_OK; dhd_pno_params_t *_params; @@ -1720,6 +1863,7 @@ dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, dhd_pno_status_info_t *_pno_state; NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); DHD_PNO(("%s enter\n", __FUNCTION__)); @@ -1728,14 +1872,14 @@ dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, mutex_lock(&_pno_state->pno_mutex); switch (type) { - case DHD_PNO_BATCH_SCAN_CFG_ID: + case DHD_PNO_BATCH_SCAN_CFG_ID: { gscan_batch_params_t *ptr = (gscan_batch_params_t *)buf; _params->params_gscan.bestn = ptr->bestn; _params->params_gscan.mscan = ptr->mscan; _params->params_gscan.buffer_threshold = ptr->buffer_threshold; - break; } + break; case DHD_PNO_GEOFENCE_SCAN_CFG_ID: { gscan_hotlist_scan_params_t *ptr = (gscan_hotlist_scan_params_t *)buf; @@ -1754,14 +1898,6 @@ dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, if (!_params->params_gscan.nbssid_hotlist) { INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list); } - if ((_params->params_gscan.nbssid_hotlist + - ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { - DHD_ERROR(("Excessive number of hotlist APs programmed %d\n", - (_params->params_gscan.nbssid_hotlist + - ptr->nbssid))); - err = BCME_RANGE; - goto exit; - } for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) { _pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL); @@ -1782,66 +1918,12 @@ dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, _params->params_gscan.nbssid_hotlist += ptr->nbssid; _params->params_gscan.lost_ap_window = ptr->lost_ap_window; - break; } - case DHD_PNO_SIGNIFICANT_SCAN_CFG_ID: + break; + case DHD_PNO_SCAN_CFG_ID: { - gscan_swc_params_t *ptr = (gscan_swc_params_t *)buf; - dhd_pno_significant_bssid_t *_pno_significant_change_bssid; - wl_pfn_significant_bssid_t *significant_bssid_ptr; - - if (flush) { - dhd_pno_reset_cfg_gscan(_params, _pno_state, - GSCAN_FLUSH_SIGNIFICANT_CFG); - } - - if (!ptr->nbssid) { - break; - } - if (!_params->params_gscan.nbssid_significant_change) { - INIT_LIST_HEAD(&_params->params_gscan.significant_bssid_list); - } - if ((_params->params_gscan.nbssid_significant_change + - ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { - DHD_ERROR(("Excessive number of SWC APs programmed %d\n", - (_params->params_gscan.nbssid_significant_change + - ptr->nbssid))); - err = BCME_RANGE; - goto exit; - } - - for (i = 0, significant_bssid_ptr = ptr->bssid_elem_list; - i < ptr->nbssid; i++, significant_bssid_ptr++) { - _pno_significant_change_bssid = - kzalloc(sizeof(dhd_pno_significant_bssid_t), - GFP_KERNEL); - - if (!_pno_significant_change_bssid) { - DHD_ERROR(("SWC bssidptr is NULL, cannot kalloc %zd bytes", - sizeof(dhd_pno_significant_bssid_t))); - err = BCME_NOMEM; - goto exit; - } - memcpy(&_pno_significant_change_bssid->BSSID, - &significant_bssid_ptr->macaddr, ETHER_ADDR_LEN); - _pno_significant_change_bssid->rssi_low_threshold = - significant_bssid_ptr->rssi_low_threshold; - _pno_significant_change_bssid->rssi_high_threshold = - significant_bssid_ptr->rssi_high_threshold; - list_add_tail(&_pno_significant_change_bssid->list, - &_params->params_gscan.significant_bssid_list); - } - - _params->params_gscan.swc_nbssid_threshold = ptr->swc_threshold; - _params->params_gscan.swc_rssi_window_size = ptr->rssi_window; - _params->params_gscan.lost_ap_window = ptr->lost_ap_window; - _params->params_gscan.nbssid_significant_change += ptr->nbssid; - break; - } - case DHD_PNO_SCAN_CFG_ID: - { - int i, k, valid = 0; - uint16 band, min; + int k; + uint16 band; gscan_scan_params_t *ptr = (gscan_scan_params_t *)buf; struct dhd_pno_gscan_channel_bucket *ch_bucket; @@ -1851,7 +1933,6 @@ dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, memcpy(_params->params_gscan.channel_bucket, ptr->channel_bucket, _params->params_gscan.nchannel_buckets * sizeof(struct dhd_pno_gscan_channel_bucket)); - min = ptr->channel_bucket[0].bucket_freq_multiple; ch_bucket = _params->params_gscan.channel_bucket; for (i = 0; i < ptr->nchannel_buckets; i++) { @@ -1865,30 +1946,27 @@ dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, /* HAL and DHD use different bits for 2.4G and * 5G in bitmap. Hence translating it here... */ - if (band & GSCAN_BG_BAND_MASK) + if (band & GSCAN_BG_BAND_MASK) { ch_bucket[i].band |= WLC_BAND_2G; - - if (band & GSCAN_A_BAND_MASK) - ch_bucket[i].band |= WLC_BAND_5G; - - if (band & GSCAN_DFS_MASK) - ch_bucket[i].band |= GSCAN_DFS_MASK; - if (ptr->scan_fr == - ptr->channel_bucket[i].bucket_freq_multiple) { - valid = 1; } - if (ptr->channel_bucket[i].bucket_freq_multiple < min) - min = ptr->channel_bucket[i].bucket_freq_multiple; - + if (band & GSCAN_A_BAND_MASK) { + ch_bucket[i].band |= WLC_BAND_5G; + } + if (band & GSCAN_DFS_MASK) { + ch_bucket[i].band |= GSCAN_DFS_MASK; + } DHD_PNO(("band %d report_flag %d\n", ch_bucket[i].band, ch_bucket[i].report_flag)); } - if (!valid) - ptr->scan_fr = min; for (i = 0; i < ptr->nchannel_buckets; i++) { ch_bucket[i].bucket_freq_multiple = ch_bucket[i].bucket_freq_multiple/ptr->scan_fr; + ch_bucket[i].bucket_max_multiple = + ch_bucket[i].bucket_max_multiple/ptr->scan_fr; + DHD_PNO(("mult %d max_mult %d\n", + ch_bucket[i].bucket_freq_multiple, + ch_bucket[i].bucket_max_multiple)); } _params->params_gscan.scan_fr = ptr->scan_fr; @@ -1897,9 +1975,25 @@ dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, } else { err = BCME_BADARG; } - break; } - default: + break; + case DHD_PNO_EPNO_CFG_ID: + if (flush) { + dhd_pno_reset_cfg_gscan(_params, _pno_state, + GSCAN_FLUSH_EPNO_CFG); + } + break; + case DHD_PNO_EPNO_PARAMS_ID: + if (flush) { + memset(&_params->params_gscan.epno_cfg.params, 0, + sizeof(wl_pfn_ssid_params_t)); + } + if (buf) { + memcpy(&_params->params_gscan.epno_cfg.params, buf, + sizeof(wl_pfn_ssid_params_t)); + } + break; + default: err = BCME_BADARG; DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); break; @@ -1941,20 +2035,18 @@ static int dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) { int err = BCME_OK; - int mode, i = 0, k; + int mode, i = 0; uint16 _chan_list[WL_NUMCHANNELS]; int tot_nchan = 0; int num_buckets_to_fw, tot_num_buckets, gscan_param_size; dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); - wl_pfn_gscan_channel_bucket_t *ch_bucket = NULL; + wl_pfn_gscan_ch_bucket_cfg_t *ch_bucket = NULL; wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL; wl_pfn_significant_bssid_t *p_pfn_significant_bssid = NULL; wl_pfn_bssid_t *p_pfn_bssid = NULL; - wlc_ssid_ext_t *pssid_list = NULL; - dhd_pno_params_t *params_legacy; dhd_pno_params_t *_params; + bool fw_flushed = FALSE; - params_legacy = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); @@ -1977,37 +2069,40 @@ dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) err = BCME_BADARG; goto exit; } - /* Create channel list based on channel buckets */ + if (!(ch_bucket = dhd_pno_gscan_create_channel_list(dhd, _pno_state, _chan_list, &tot_num_buckets, &num_buckets_to_fw))) { goto exit; } + mutex_lock(&_pno_state->pno_mutex); + /* Clear any pre-existing results in our cache + * not consumed by framework + */ + dhd_gscan_clear_all_batch_results(dhd); if (_pno_state->pno_mode & (DHD_PNO_GSCAN_MODE | DHD_PNO_LEGACY_MODE)) { /* store current pno_mode before disabling pno */ mode = _pno_state->pno_mode; err = dhd_pno_clean(dhd); if (err < 0) { DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + mutex_unlock(&_pno_state->pno_mutex); goto exit; } + fw_flushed = TRUE; /* restore the previous mode */ _pno_state->pno_mode = mode; } - _pno_state->pno_mode |= DHD_PNO_GSCAN_MODE; + mutex_unlock(&_pno_state->pno_mutex); - if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { - pssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) && + !gscan_params->epno_cfg.num_epno_ssid) { + struct dhd_pno_legacy_params *params_legacy; + params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); - if (!pssid_list) { - err = BCME_NOMEM; - DHD_ERROR(("failed to get Legacy PNO SSID list\n")); - goto exit; - } - - if ((err = _dhd_pno_add_ssid(dhd, pssid_list, - params_legacy->params_legacy.nssid)) < 0) { + if ((err = _dhd_pno_add_ssid(dhd, ¶ms_legacy->ssid_list, + params_legacy->nssid)) < 0) { DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); goto exit; } @@ -2019,8 +2114,8 @@ dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) } gscan_param_size = sizeof(wl_pfn_gscan_cfg_t) + - (num_buckets_to_fw - 1) * sizeof(wl_pfn_gscan_channel_bucket_t); - pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) MALLOC(dhd->osh, gscan_param_size); + (num_buckets_to_fw - 1) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t); + pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) MALLOCZ(dhd->osh, gscan_param_size); if (!pfn_gscan_cfg_t) { DHD_ERROR(("%s: failed to malloc memory of size %d\n", @@ -2029,37 +2124,29 @@ dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) goto exit; } - - if (gscan_params->mscan) { + pfn_gscan_cfg_t->version = WL_GSCAN_CFG_VERSION; + if (gscan_params->mscan) pfn_gscan_cfg_t->buffer_threshold = gscan_params->buffer_threshold; - } else { + else pfn_gscan_cfg_t->buffer_threshold = GSCAN_BATCH_NO_THR_SET; - } - if (gscan_params->nbssid_significant_change) { - pfn_gscan_cfg_t->swc_nbssid_threshold = gscan_params->swc_nbssid_threshold; - pfn_gscan_cfg_t->swc_rssi_window_size = gscan_params->swc_rssi_window_size; - pfn_gscan_cfg_t->lost_ap_window = gscan_params->lost_ap_window; - } else { - pfn_gscan_cfg_t->swc_nbssid_threshold = 0; - pfn_gscan_cfg_t->swc_rssi_window_size = 0; - pfn_gscan_cfg_t->lost_ap_window = 0; - } pfn_gscan_cfg_t->flags = (gscan_params->send_all_results_flag & GSCAN_SEND_ALL_RESULTS_MASK); + pfn_gscan_cfg_t->flags |= GSCAN_ALL_BUCKETS_IN_FIRST_SCAN_MASK; pfn_gscan_cfg_t->count_of_channel_buckets = num_buckets_to_fw; + pfn_gscan_cfg_t->retry_threshold = GSCAN_RETRY_THRESHOLD; - - for (i = 0, k = 0; i < tot_num_buckets; i++) { - if (ch_bucket[i].bucket_end_index != CHANNEL_BUCKET_EMPTY_INDEX) { - pfn_gscan_cfg_t->channel_bucket[k].bucket_end_index = - ch_bucket[i].bucket_end_index; - pfn_gscan_cfg_t->channel_bucket[k].bucket_freq_multiple = - ch_bucket[i].bucket_freq_multiple; - pfn_gscan_cfg_t->channel_bucket[k].report_flag = - ch_bucket[i].report_flag; - k++; - } + for (i = 0; i < num_buckets_to_fw; i++) { + pfn_gscan_cfg_t->channel_bucket[i].bucket_end_index = + ch_bucket[i].bucket_end_index; + pfn_gscan_cfg_t->channel_bucket[i].bucket_freq_multiple = + ch_bucket[i].bucket_freq_multiple; + pfn_gscan_cfg_t->channel_bucket[i].max_freq_multiple = + ch_bucket[i].max_freq_multiple; + pfn_gscan_cfg_t->channel_bucket[i].repeat = + ch_bucket[i].repeat; + pfn_gscan_cfg_t->channel_bucket[i].flag = + ch_bucket[i].flag; } tot_nchan = pfn_gscan_cfg_t->channel_bucket[num_buckets_to_fw - 1].bucket_end_index + 1; @@ -2077,37 +2164,9 @@ dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) __FUNCTION__, err)); goto exit; } - if (gscan_params->nbssid_significant_change) { - dhd_pno_significant_bssid_t *iter, *next; - - - p_pfn_significant_bssid = kzalloc(sizeof(wl_pfn_significant_bssid_t) * - gscan_params->nbssid_significant_change, GFP_KERNEL); - if (p_pfn_significant_bssid == NULL) { - DHD_ERROR(("%s : failed to allocate memory %zd\n", - __FUNCTION__, - sizeof(wl_pfn_significant_bssid_t) * - gscan_params->nbssid_significant_change)); - err = BCME_NOMEM; - goto exit; - } - i = 0; - /* convert dhd_pno_significant_bssid_t to wl_pfn_significant_bssid_t */ - list_for_each_entry_safe(iter, next, &gscan_params->significant_bssid_list, list) { - p_pfn_significant_bssid[i].rssi_low_threshold = iter->rssi_low_threshold; - p_pfn_significant_bssid[i].rssi_high_threshold = iter->rssi_high_threshold; - memcpy(&p_pfn_significant_bssid[i].macaddr, &iter->BSSID, ETHER_ADDR_LEN); - i++; - } - DHD_PNO(("nbssid_significant_change %d \n", - gscan_params->nbssid_significant_change)); - err = _dhd_pno_add_significant_bssid(dhd, p_pfn_significant_bssid, - gscan_params->nbssid_significant_change); - if (err < 0) { - DHD_ERROR(("%s : failed to call _dhd_pno_add_significant_bssid(err :%d)\n", - __FUNCTION__, err)); - goto exit; - } + /* Reprogram ePNO cfg from dhd cache if FW has been flushed */ + if (fw_flushed) { + dhd_pno_set_epno(dhd); } if (gscan_params->nbssid_hotlist) { @@ -2126,13 +2185,23 @@ dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) ptr = p_pfn_bssid; /* convert dhd_pno_bssid to wl_pfn_bssid */ DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist)); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(iter, next, &gscan_params->hotlist_bssid_list, list) { + char buffer_hotlist[64]; memcpy(&ptr->macaddr, &iter->macaddr, ETHER_ADDR_LEN); + DHD_PNO(("%s\n", bcm_ether_ntoa(&ptr->macaddr, buffer_hotlist))); + BCM_REFERENCE(buffer_hotlist); ptr->flags = iter->flags; ptr++; } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist); if (err < 0) { @@ -2141,7 +2210,6 @@ dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) goto exit; } } - if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) { DHD_ERROR(("%s : failed to enable PNO err %d\n", __FUNCTION__, err)); } @@ -2158,7 +2226,6 @@ exit: _pno_state->pno_mode &= ~DHD_PNO_GSCAN_MODE; } } - kfree(pssid_list); kfree(p_pfn_significant_bssid); kfree(p_pfn_bssid); if (pfn_gscan_cfg_t) { @@ -2166,183 +2233,59 @@ exit: } if (ch_bucket) { MFREE(dhd->osh, ch_bucket, - (tot_num_buckets * sizeof(wl_pfn_gscan_channel_bucket_t))); + (tot_num_buckets * sizeof(wl_pfn_gscan_ch_bucket_cfg_t))); } return err; } - -static void -dhd_pno_merge_gscan_pno_channels(dhd_pno_status_info_t *pno_state, - uint16 *chan_list, - uint8 *ch_scratch_pad, - wl_pfn_gscan_channel_bucket_t *ch_bucket, - uint32 *num_buckets_to_fw, - int num_channels) -{ - uint16 chan_buf[WL_NUMCHANNELS]; - int i, j = 0, ch_bucket_idx = 0; - dhd_pno_params_t *_params = &pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; - dhd_pno_params_t *_params1 = &pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; - uint16 *legacy_chan_list = _params1->params_legacy.chan_list; - bool is_legacy_scan_freq_higher; - uint8 report_flag = CH_BUCKET_REPORT_REGULAR; - - if (!_params1->params_legacy.scan_fr) - _params1->params_legacy.scan_fr = PNO_SCAN_MIN_FW_SEC; - - is_legacy_scan_freq_higher = - _params->params_gscan.scan_fr < _params1->params_legacy.scan_fr; - - /* Calculate new Legacy scan multiple of base scan_freq - * The legacy PNO channel bucket is added at the end of the - * channel bucket list. - */ - if (is_legacy_scan_freq_higher) { - ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple = - _params1->params_legacy.scan_fr/_params->params_gscan.scan_fr; - - } else { - uint16 max = 0; - - /* Calculate new multiple of base scan_freq for gscan buckets */ - ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple = 1; - for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { - ch_bucket[i].bucket_freq_multiple *= _params->params_gscan.scan_fr; - ch_bucket[i].bucket_freq_multiple /= _params1->params_legacy.scan_fr; - if (max < ch_bucket[i].bucket_freq_multiple) - max = ch_bucket[i].bucket_freq_multiple; - } - _params->params_gscan.max_ch_bucket_freq = max; - } - - /* Off to remove duplicates!! - * Find channels that are already being serviced by gscan before legacy bucket - * These have to be removed from legacy bucket. - * !!Assuming chan_list channels are validated list of channels!! - * ch_scratch_pad is 1 at gscan bucket locations see dhd_pno_gscan_create_channel_list() - */ - for (i = 0; i < _params1->params_legacy.nchan; i++) - ch_scratch_pad[legacy_chan_list[i]] += 2; - - ch_bucket_idx = 0; - memcpy(chan_buf, chan_list, num_channels * sizeof(uint16)); - - /* Finally create channel list and bucket - * At this point ch_scratch_pad can have 4 values: - * 0 - Channel not present in either Gscan or Legacy PNO bucket - * 1 - Channel present only in Gscan bucket - * 2 - Channel present only in Legacy PNO bucket - * 3 - Channel present in both Gscan and Legacy PNO buckets - * Thus Gscan buckets can have values 1 or 3 and Legacy 2 or 3 - * For channel buckets with scan_freq < legacy accept all - * channels i.e. ch_scratch_pad = 1 and 3 - * else accept only ch_scratch_pad = 1 and mark rejects as - * ch_scratch_pad = 4 so that they go in legacy - */ - for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { - if (ch_bucket[i].bucket_freq_multiple <= - ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple) { - for (; ch_bucket_idx <= ch_bucket[i].bucket_end_index; ch_bucket_idx++, j++) - chan_list[j] = chan_buf[ch_bucket_idx]; - - ch_bucket[i].bucket_end_index = j - 1; - } else { - num_channels = 0; - for (; ch_bucket_idx <= ch_bucket[i].bucket_end_index; ch_bucket_idx++) { - if (ch_scratch_pad[chan_buf[ch_bucket_idx]] == 1) { - chan_list[j] = chan_buf[ch_bucket_idx]; - j++; - num_channels++; - } else { - ch_scratch_pad[chan_buf[ch_bucket_idx]] = 4; - /* If Gscan channel is merged off to legacy bucket and - * if the gscan channel bucket has a report flag > 0 - * use the same for legacy - */ - if (report_flag < ch_bucket[i].report_flag) - report_flag = ch_bucket[i].report_flag; - } - } - - if (num_channels) { - ch_bucket[i].bucket_end_index = j - 1; - } else { - ch_bucket[i].bucket_end_index = CHANNEL_BUCKET_EMPTY_INDEX; - *num_buckets_to_fw = *num_buckets_to_fw - 1; - } - } - - } - - num_channels = 0; - ch_bucket[_params->params_gscan.nchannel_buckets].report_flag = report_flag; - /* Now add channels to the legacy scan bucket - * ch_scratch_pad = 0 to 4 at this point, for legacy -> 2,3,4. 2 means exclusively - * Legacy so add to bucket. 4 means it is a reject of gscan bucket and must - * be added to Legacy bucket,reject 3 - */ - for (i = 0; i < _params1->params_legacy.nchan; i++) { - if (ch_scratch_pad[legacy_chan_list[i]] != 3) { - chan_list[j] = legacy_chan_list[i]; - j++; - num_channels++; - } - } - if (num_channels) { - ch_bucket[_params->params_gscan.nchannel_buckets].bucket_end_index = j - 1; - } - else { - ch_bucket[_params->params_gscan.nchannel_buckets].bucket_end_index = - CHANNEL_BUCKET_EMPTY_INDEX; - *num_buckets_to_fw = *num_buckets_to_fw - 1; - } - - return; -} -static wl_pfn_gscan_channel_bucket_t * +static wl_pfn_gscan_ch_bucket_cfg_t * dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *_pno_state, uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw) { - int i, num_channels, err, nchan = WL_NUMCHANNELS; + int i, num_channels, err, nchan = WL_NUMCHANNELS, ch_cnt; uint16 *ptr = chan_list, max; - uint8 *ch_scratch_pad; - wl_pfn_gscan_channel_bucket_t *ch_bucket; + wl_pfn_gscan_ch_bucket_cfg_t *ch_bucket; dhd_pno_params_t *_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; - bool is_pno_legacy_running = _pno_state->pno_mode & DHD_PNO_LEGACY_MODE; + bool is_pno_legacy_running; dhd_pno_gscan_channel_bucket_t *gscan_buckets = _params->params_gscan.channel_bucket; + /* ePNO and Legacy PNO do not co-exist */ + is_pno_legacy_running = ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) && + !_params->params_gscan.epno_cfg.num_epno_ssid); + if (is_pno_legacy_running) *num_buckets = _params->params_gscan.nchannel_buckets + 1; - else + else *num_buckets = _params->params_gscan.nchannel_buckets; + *num_buckets_to_fw = 0; - *num_buckets_to_fw = *num_buckets; - - - ch_bucket = (wl_pfn_gscan_channel_bucket_t *) MALLOC(dhd->osh, - ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + ch_bucket = (wl_pfn_gscan_ch_bucket_cfg_t *) MALLOC(dhd->osh, + ((*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t))); if (!ch_bucket) { DHD_ERROR(("%s: failed to malloc memory of size %zd\n", - __FUNCTION__, (*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + __FUNCTION__, (*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t))); *num_buckets_to_fw = *num_buckets = 0; return NULL; } max = gscan_buckets[0].bucket_freq_multiple; num_channels = 0; - for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + /* nchan is the remaining space left in chan_list buffer + * So any overflow list of channels is ignored + */ + for (i = 0; i < _params->params_gscan.nchannel_buckets && nchan; i++) { if (!gscan_buckets[i].band) { - num_channels += gscan_buckets[i].num_channels; + ch_cnt = MIN(gscan_buckets[i].num_channels, (uint8)nchan); + num_channels += ch_cnt; memcpy(ptr, gscan_buckets[i].chan_list, - gscan_buckets[i].num_channels * sizeof(uint16)); - ptr = ptr + gscan_buckets[i].num_channels; + ch_cnt * sizeof(uint16)); + ptr = ptr + ch_cnt; } else { /* get a valid channel list based on band B or A */ err = _dhd_pno_get_channels(dhd, ptr, @@ -2353,7 +2296,7 @@ dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", __FUNCTION__, gscan_buckets[i].band)); MFREE(dhd->osh, ch_bucket, - ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + ((*num_buckets) * sizeof(wl_pfn_gscan_ch_bucket_cfg_t))); *num_buckets_to_fw = *num_buckets = 0; return NULL; } @@ -2364,55 +2307,60 @@ dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, ch_bucket[i].bucket_end_index = num_channels - 1; ch_bucket[i].bucket_freq_multiple = gscan_buckets[i].bucket_freq_multiple; - ch_bucket[i].report_flag = gscan_buckets[i].report_flag; + ch_bucket[i].repeat = gscan_buckets[i].repeat; + ch_bucket[i].max_freq_multiple = gscan_buckets[i].bucket_max_multiple; + ch_bucket[i].flag = gscan_buckets[i].report_flag; + /* HAL and FW interpretations are opposite for this bit */ + ch_bucket[i].flag ^= DHD_PNO_REPORT_NO_BATCH; if (max < gscan_buckets[i].bucket_freq_multiple) max = gscan_buckets[i].bucket_freq_multiple; nchan = WL_NUMCHANNELS - num_channels; + *num_buckets_to_fw = *num_buckets_to_fw + 1; DHD_PNO(("end_idx %d freq_mult - %d\n", ch_bucket[i].bucket_end_index, ch_bucket[i].bucket_freq_multiple)); } - ch_scratch_pad = (uint8 *) kzalloc(CHANNEL_5G_MAX, GFP_KERNEL); - if (!ch_scratch_pad) { - DHD_ERROR(("%s: failed to malloc memory of size %d\n", - __FUNCTION__, CHANNEL_5G_MAX)); - MFREE(dhd->osh, ch_bucket, - ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); - *num_buckets_to_fw = *num_buckets = 0; - return NULL; - } - - /* Need to look for duplicates in gscan buckets if the framework programmed - * the gscan buckets badly, for now return error if there are duplicates. - * Plus as an added bonus, we get all channels in Gscan bucket - * set to 1 for dhd_pno_merge_gscan_pno_channels() - */ - for (i = 0; i < num_channels; i++) { - if (!ch_scratch_pad[chan_list[i]]) { - ch_scratch_pad[chan_list[i]] = 1; - } else { - DHD_ERROR(("%s: Duplicate channel - %d programmed in channel bucket\n", - __FUNCTION__, chan_list[i])); - MFREE(dhd->osh, ch_bucket, ((*num_buckets) * - sizeof(wl_pfn_gscan_channel_bucket_t))); - *num_buckets_to_fw = *num_buckets = 0; - kfree(ch_scratch_pad); - return NULL; - } - } _params->params_gscan.max_ch_bucket_freq = max; /* Legacy PNO maybe running, which means we need to create a legacy PNO bucket - * Plus need to remove duplicates as the legacy PNO chan_list may have common channels - * If channel is to be scanned more frequently as per gscan requirements - * remove from legacy PNO ch_bucket. Similarly, if legacy wants a channel scanned - * more often, it is removed from the Gscan channel bucket. - * In the end both are satisfied. + * Get GCF of Legacy PNO and Gscan scanfreq */ - if (is_pno_legacy_running) - dhd_pno_merge_gscan_pno_channels(_pno_state, chan_list, - ch_scratch_pad, ch_bucket, num_buckets_to_fw, num_channels); - - kfree(ch_scratch_pad); + if (is_pno_legacy_running) { + dhd_pno_params_t *_params1 = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + uint16 *legacy_chan_list = _params1->params_legacy.chan_list; + uint16 common_freq; + uint32 legacy_bucket_idx = _params->params_gscan.nchannel_buckets; + /* If no space is left then only gscan buckets will be sent to FW */ + if (nchan) { + common_freq = gcd(_params->params_gscan.scan_fr, + _params1->params_legacy.scan_fr); + max = gscan_buckets[0].bucket_freq_multiple; + /* GSCAN buckets */ + for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + ch_bucket[i].bucket_freq_multiple *= _params->params_gscan.scan_fr; + ch_bucket[i].bucket_freq_multiple /= common_freq; + if (max < gscan_buckets[i].bucket_freq_multiple) + max = gscan_buckets[i].bucket_freq_multiple; + } + /* Legacy PNO bucket */ + ch_bucket[legacy_bucket_idx].bucket_freq_multiple = + _params1->params_legacy.scan_fr; + ch_bucket[legacy_bucket_idx].bucket_freq_multiple /= + common_freq; + _params->params_gscan.max_ch_bucket_freq = MAX(max, + ch_bucket[legacy_bucket_idx].bucket_freq_multiple); + ch_bucket[legacy_bucket_idx].flag = CH_BUCKET_REPORT_REGULAR; + /* Now add channels to the legacy scan bucket */ + for (i = 0; i < _params1->params_legacy.nchan && nchan; i++, nchan--) { + ptr[i] = legacy_chan_list[i]; + num_channels++; + } + ch_bucket[legacy_bucket_idx].bucket_end_index = num_channels - 1; + *num_buckets_to_fw = *num_buckets_to_fw + 1; + DHD_PNO(("end_idx %d freq_mult - %d\n", + ch_bucket[legacy_bucket_idx].bucket_end_index, + ch_bucket[legacy_bucket_idx].bucket_freq_multiple)); + } + } return ch_bucket; } @@ -2421,7 +2369,6 @@ dhd_pno_stop_for_gscan(dhd_pub_t *dhd) { int err = BCME_OK; int mode; - wlc_ssid_ext_t *pssid_list = NULL; dhd_pno_status_info_t *_pno_state; _pno_state = PNO_GET_PNOSTATE(dhd); @@ -2442,11 +2389,16 @@ dhd_pno_stop_for_gscan(dhd_pub_t *dhd) DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__)); goto exit; } + if (_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan.mscan) { + /* retrieve the batching data from firmware into host */ + err = dhd_wait_batch_results_complete(dhd); + if (err != BCME_OK) + goto exit; + } mutex_lock(&_pno_state->pno_mutex); mode = _pno_state->pno_mode & ~DHD_PNO_GSCAN_MODE; err = dhd_pno_clean(dhd); if (err < 0) { - DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", __FUNCTION__, err)); mutex_unlock(&_pno_state->pno_mutex); @@ -2462,22 +2414,13 @@ dhd_pno_stop_for_gscan(dhd_pub_t *dhd) params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; - pssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); - if (!pssid_list) { - err = BCME_NOMEM; - DHD_ERROR(("failed to get Legacy PNO SSID list\n")); - goto exit; - } - DHD_PNO(("Restarting Legacy PNO SSID scan...\n")); memcpy(chan_list, params_legacy->chan_list, (params_legacy->nchan * sizeof(uint16))); - err = dhd_pno_set_for_ssid(dhd, pssid_list, params_legacy->nssid, - params_legacy->scan_fr, params_legacy->pno_repeat, - params_legacy->pno_freq_expo_max, chan_list, - params_legacy->nchan); + err = dhd_pno_set_legacy_pno(dhd, params_legacy->scan_fr, + params_legacy->pno_repeat, params_legacy->pno_freq_expo_max, + chan_list, params_legacy->nchan); if (err < 0) { - _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", __FUNCTION__, err)); goto exit; @@ -2486,7 +2429,6 @@ dhd_pno_stop_for_gscan(dhd_pub_t *dhd) } exit: - kfree(pssid_list); return err; } @@ -2503,6 +2445,7 @@ dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush) _pno_state = PNO_GET_PNOSTATE(dhd); DHD_PNO(("%s enter - run %d flush %d\n", __FUNCTION__, run, flush)); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; gscan_params = ¶ms->params_gscan; @@ -2552,12 +2495,14 @@ dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag) if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { if (old_flag != gscan_params->send_all_results_flag) { wl_pfn_gscan_cfg_t gscan_cfg; + + gscan_cfg.version = WL_GSCAN_CFG_VERSION; gscan_cfg.flags = (gscan_params->send_all_results_flag & - GSCAN_SEND_ALL_RESULTS_MASK); + GSCAN_SEND_ALL_RESULTS_MASK); gscan_cfg.flags |= GSCAN_CFG_FLAGS_ONLY_MASK; if ((err = _dhd_pno_gscan_cfg(dhd, &gscan_cfg, - sizeof(wl_pfn_gscan_cfg_t))) < 0) { + sizeof(wl_pfn_gscan_cfg_t))) < 0) { DHD_ERROR(("%s : pno_gscan_cfg failed (err %d) in firmware\n", __FUNCTION__, err)); goto exit_mutex_unlock; @@ -2574,6 +2519,9 @@ exit: return err; } +/* Cleanup any consumed results + * Return TRUE if all results consumed, else FALSE + */ int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd) { int ret = 0; @@ -2594,7 +2542,7 @@ int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd) iter = tmp; } else break; -} + } gscan_params->gscan_batch_cache = iter; ret = (iter == NULL); return ret; @@ -2607,14 +2555,15 @@ _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) uint32 timestamp = 0, ts = 0, i, j, timediff; dhd_pno_params_t *params; dhd_pno_status_info_t *_pno_state; - wl_pfn_lnet_info_t *plnetinfo; + wl_pfn_lnet_info_v2_t *plnetinfo; struct dhd_pno_gscan_params *gscan_params; - wl_pfn_lscanresults_t *plbestnet = NULL; + wl_pfn_lscanresults_v2_t *plbestnet = NULL; gscan_results_cache_t *iter, *tail; wifi_gscan_result_t *result; uint8 *nAPs_per_scan = NULL; uint8 num_scans_in_cur_iter; - uint16 count, scan_id = 0; + uint16 count; + struct timespec tm_spec; NULL_CHECK(dhd, "dhd is NULL\n", err); NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); @@ -2628,7 +2577,10 @@ _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) err = BCME_UNSUPPORTED; goto exit; } - + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s: GSCAN is not enabled\n", __FUNCTION__)); + goto exit; + } gscan_params = ¶ms->params_gscan; nAPs_per_scan = (uint8 *) MALLOC(dhd->osh, gscan_params->mscan); @@ -2639,16 +2591,17 @@ _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) goto exit; } - plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + plbestnet = (wl_pfn_lscanresults_v2_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + if (!plbestnet) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__, + PNO_BESTNET_LEN)); + err = BCME_NOMEM; + goto exit; + } mutex_lock(&_pno_state->pno_mutex); - iter = gscan_params->gscan_batch_cache; - /* If a cache has not been consumed , just delete it */ - while (iter) { - iter->tot_consumed = iter->tot_count; - iter = iter->next; - } - dhd_gscan_batch_cache_cleanup(dhd); + + dhd_gscan_clear_all_batch_results(dhd); if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__)); @@ -2663,12 +2616,14 @@ _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) tail = gscan_params->gscan_batch_cache; while (plbestnet->status != PFN_COMPLETE) { memset(plbestnet, 0, PNO_BESTNET_LEN); - err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0); + err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet, PNO_BESTNET_LEN, + FALSE); if (err < 0) { DHD_ERROR(("%s : Cannot get all the batch results, err :%d\n", __FUNCTION__, err)); goto exit_mutex_unlock; } + get_monotonic_boottime(&tm_spec); DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version, plbestnet->status, plbestnet->count)); if (plbestnet->version != PFN_SCANRESULT_VERSION) { @@ -2677,7 +2632,10 @@ _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) plbestnet->version, PFN_SCANRESULT_VERSION)); goto exit_mutex_unlock; } - + if (plbestnet->count == 0) { + DHD_PNO(("No more batch results\n")); + goto exit_mutex_unlock; + } num_scans_in_cur_iter = 0; timestamp = plbestnet->netinfo[0].timestamp; /* find out how many scans' results did we get in this batch of FW results */ @@ -2686,7 +2644,7 @@ _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) /* Unlikely to happen, but just in case the results from * FW doesnt make sense..... Assume its part of one single scan */ - if (num_scans_in_cur_iter > gscan_params->mscan) { + if (num_scans_in_cur_iter >= gscan_params->mscan) { num_scans_in_cur_iter = 0; count = plbestnet->count; break; @@ -2698,15 +2656,17 @@ _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) } timestamp = plnetinfo->timestamp; } - nAPs_per_scan[num_scans_in_cur_iter] = count; - num_scans_in_cur_iter++; + if (num_scans_in_cur_iter < gscan_params->mscan) { + nAPs_per_scan[num_scans_in_cur_iter] = count; + num_scans_in_cur_iter++; + } DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter)); plnetinfo = &plbestnet->netinfo[0]; for (i = 0; i < num_scans_in_cur_iter; i++) { iter = (gscan_results_cache_t *) - kzalloc(((nAPs_per_scan[i] - 1) * sizeof(wifi_gscan_result_t)) + + kmalloc(((nAPs_per_scan[i] - 1) * sizeof(wifi_gscan_result_t)) + sizeof(gscan_results_cache_t), GFP_KERNEL); if (!iter) { DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", @@ -2718,14 +2678,17 @@ _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) * maybe a continuation of previous sets' scan results */ if (TIME_DIFF_MS(ts, plnetinfo->timestamp) > timediff) { - iter->scan_id = ++scan_id; + iter->scan_id = ++gscan_params->scan_id; } else { - iter->scan_id = scan_id; + iter->scan_id = gscan_params->scan_id; } - DHD_PNO(("scan_id %d tot_count %d\n", scan_id, nAPs_per_scan[i])); + DHD_PNO(("scan_id %d tot_count %d ch_bucket %x\n", + gscan_params->scan_id, nAPs_per_scan[i], + plbestnet->scan_ch_buckets[i])); iter->tot_count = nAPs_per_scan[i]; + iter->scan_ch_bucket = plbestnet->scan_ch_buckets[i]; iter->tot_consumed = 0; - + iter->flag = 0; if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) { DHD_PNO(("This scan is aborted\n")); iter->flag = (ENABLE << PNO_STATUS_ABORT); @@ -2750,17 +2713,17 @@ _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) /* Info not available & not expected */ result->beacon_period = 0; result->capability = 0; - result->ie_length = 0; result->rtt = (uint64) plnetinfo->rtt0; result->rtt_sd = (uint64) plnetinfo->rtt1; - result->ts = convert_fw_rel_time_to_systime(plnetinfo->timestamp); + result->ts = convert_fw_rel_time_to_systime(&tm_spec, + plnetinfo->timestamp); ts = plnetinfo->timestamp; if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { DHD_ERROR(("%s: Invalid SSID length %d\n", __FUNCTION__, plnetinfo->pfnsubnet.SSID_len)); plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; } - memcpy(result->ssid, plnetinfo->pfnsubnet.SSID, + memcpy(result->ssid, plnetinfo->pfnsubnet.u.SSID, plnetinfo->pfnsubnet.SSID_len); result->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0'; memcpy(&result->macaddr, &plnetinfo->pfnsubnet.BSSID, @@ -2768,7 +2731,7 @@ _dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) DHD_PNO(("\tSSID : ")); DHD_PNO(("\n")); - DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n", + DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n", result->macaddr.octet[0], result->macaddr.octet[1], result->macaddr.octet[2], @@ -2791,7 +2754,7 @@ exit: smp_wmb(); wake_up_interruptible(&_pno_state->batch_get_wait); if (nAPs_per_scan) { - MFREE(dhd->osh, nAPs_per_scan, gscan_params->mscan); + MFREE(dhd->osh, nAPs_per_scan, gscan_params->mscan * sizeof(uint8)); } if (plbestnet) { MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN); @@ -2800,6 +2763,140 @@ exit: return err; } #endif /* GSCAN_SUPPORT */ +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +void * +dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) +{ + void *ret = NULL; + dhd_pno_gscan_capabilities_t *ptr; + dhd_pno_ssid_t *ssid_elem; + dhd_pno_params_t *_params; + dhd_epno_ssid_cfg_t *epno_cfg; + dhd_pno_status_info_t *_pno_state; + + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", __FUNCTION__)); + return NULL; + } + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + if (!len) { + DHD_ERROR(("%s: len is NULL\n", __FUNCTION__)); + return NULL; + } + + switch (type) { + case DHD_PNO_GET_CAPABILITIES: + ptr = (dhd_pno_gscan_capabilities_t *) + kmalloc(sizeof(dhd_pno_gscan_capabilities_t), GFP_KERNEL); + if (!ptr) + break; + /* Hardcoding these values for now, need to get + * these values from FW, will change in a later check-in + */ + ptr->max_scan_cache_size = GSCAN_MAX_AP_CACHE; + ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS; + ptr->max_ap_cache_per_scan = GSCAN_MAX_AP_CACHE_PER_SCAN; + ptr->max_scan_reporting_threshold = 100; + ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS; + ptr->max_epno_ssid_crc32 = MAX_EPNO_SSID_NUM; + ptr->max_epno_hidden_ssid = MAX_EPNO_HIDDEN_SSID; + ptr->max_white_list_ssid = MAX_WHITELIST_SSID; + ret = (void *)ptr; + *len = sizeof(dhd_pno_gscan_capabilities_t); + break; +#ifdef GSCAN_SUPPORT + case DHD_PNO_GET_BATCH_RESULTS: + ret = dhd_get_gscan_batch_results(dhd, len); + break; +#endif /* GSCAN_SUPPORT */ + case DHD_PNO_GET_CHANNEL_LIST: + if (info) { + uint16 ch_list[WL_NUMCHANNELS]; + uint32 *p, mem_needed, i; + int32 err, nchan = WL_NUMCHANNELS; + uint32 *gscan_band = (uint32 *) info; + uint8 band = 0; + + /* No band specified?, nothing to do */ + if ((*gscan_band & GSCAN_BAND_MASK) == 0) { + DHD_PNO(("No band specified\n")); + *len = 0; + break; + } + + /* HAL and DHD use different bits for 2.4G and + * 5G in bitmap. Hence translating it here... + */ + if (*gscan_band & GSCAN_BG_BAND_MASK) { + band |= WLC_BAND_2G; + } + if (*gscan_band & GSCAN_A_BAND_MASK) { + band |= WLC_BAND_5G; + } + + err = _dhd_pno_get_channels(dhd, ch_list, &nchan, + (band & GSCAN_ABG_BAND_MASK), + !(*gscan_band & GSCAN_DFS_MASK)); + + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list\n", + __FUNCTION__)); + *len = 0; + } else { + mem_needed = sizeof(uint32) * nchan; + p = (uint32 *) kmalloc(mem_needed, GFP_KERNEL); + if (!p) { + DHD_ERROR(("%s: Unable to malloc %d bytes\n", + __FUNCTION__, mem_needed)); + break; + } + for (i = 0; i < nchan; i++) { + p[i] = wf_channel2mhz(ch_list[i], + (ch_list[i] <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + } + ret = p; + *len = mem_needed; + } + } else { + *len = 0; + DHD_ERROR(("%s: info buffer is NULL\n", __FUNCTION__)); + } + break; + case DHD_PNO_GET_NEW_EPNO_SSID_ELEM: + epno_cfg = &_params->params_gscan.epno_cfg; + if (epno_cfg->num_epno_ssid >= + MAX_EPNO_SSID_NUM) { + DHD_ERROR(("Excessive number of ePNO SSIDs programmed %d\n", + epno_cfg->num_epno_ssid)); + return NULL; + } + if (!epno_cfg->num_epno_ssid) { + INIT_LIST_HEAD(&epno_cfg->epno_ssid_list); + } + ssid_elem = kzalloc(sizeof(dhd_pno_ssid_t), GFP_KERNEL); + if (!ssid_elem) { + DHD_ERROR(("EPNO ssid: cannot alloc %zd bytes", + sizeof(dhd_pno_ssid_t))); + return NULL; + } + epno_cfg->num_epno_ssid++; + list_add_tail(&ssid_elem->list, &epno_cfg->epno_ssid_list); + ret = ssid_elem; + break; + + default: + DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); + break; + } + + return ret; + +} +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ static int _dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) @@ -2809,32 +2906,30 @@ _dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) uint32 timestamp = 0; dhd_pno_params_t *_params = NULL; dhd_pno_status_info_t *_pno_state = NULL; - wl_pfn_lscanresults_t *plbestnet = NULL; - wl_pfn_lnet_info_t *plnetinfo; + wl_pfn_lscanresults_v2_t *plbestnet = NULL; + wl_pfn_lnet_info_v2_t *plnetinfo; dhd_pno_bestnet_entry_t *pbestnet_entry; dhd_pno_best_header_t *pbestnetheader = NULL; dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext; bool allocate_header = FALSE; NULL_CHECK(dhd, "dhd is NULL", err); NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + if (!dhd_support_sta_mode(dhd)) { err = BCME_BADOPTION; goto exit_no_unlock; } - DHD_PNO(("%s enter\n", __FUNCTION__)); - _pno_state = PNO_GET_PNOSTATE(dhd); if (!WLS_SUPPORTED(_pno_state)) { DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); err = BCME_UNSUPPORTED; goto exit_no_unlock; } -#ifdef GSCAN_SUPPORT - if (!(_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_GSCAN_MODE))) -#else - if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) -#endif /* GSCAN_SUPPORT */ - { + + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); goto exit_no_unlock; } @@ -2851,11 +2946,18 @@ _dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) /* this is a first try to get batching results */ if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { /* move the scan_results_list to expired_scan_results_lists */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(siter, snext, &_params->params_batch.get_batch.scan_results_list, list) { list_move_tail(&siter->list, &_params->params_batch.get_batch.expired_scan_results_list); } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif _params->params_batch.get_batch.top_node_cnt = 0; _params->params_batch.get_batch.expired_tot_scan_cnt = _params->params_batch.get_batch.tot_scan_cnt; @@ -2887,13 +2989,17 @@ _dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); } - plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); - NULL_CHECK(plbestnet, "failed to allocate buffer for bestnet", err); + plbestnet = (wl_pfn_lscanresults_v2_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + if (!(plbestnet)) + { + DHD_ERROR(("(%s) : plbestnet (%p) is NULL\n", __FUNCTION__, (plbestnet))); + goto exit; + } DHD_PNO(("%s enter\n", __FUNCTION__)); memset(plbestnet, 0, PNO_BESTNET_LEN); while (plbestnet->status != PFN_COMPLETE) { memset(plbestnet, 0, PNO_BESTNET_LEN); - err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0); + err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet, PNO_BESTNET_LEN, 0); if (err < 0) { if (err == BCME_EPERM) { DHD_ERROR(("we cannot get the batching data " @@ -2980,7 +3086,7 @@ _dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; } pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len; - memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID, + memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.u.SSID, pbestnet_entry->SSID_len); memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN); /* add the element into list */ @@ -2991,7 +3097,7 @@ _dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1)); DHD_PNO(("\tSSID : ")); for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++) - DHD_PNO(("%c", plnetinfo->pfnsubnet.SSID[j])); + DHD_PNO(("%c", plnetinfo->pfnsubnet.u.SSID[j])); DHD_PNO(("\n")); DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n", plnetinfo->pfnsubnet.BSSID.octet[0], @@ -3015,19 +3121,27 @@ _dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) list_del(&pscan_results->list); MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE); _params->params_batch.get_batch.top_node_cnt--; + } else { + /* increase total scan count using current scan count */ + _params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header; } - /* increase total scan count using current scan count */ - _params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header; if (buf && bufsize) { /* This is a first try to get batching results */ if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { /* move the scan_results_list to expired_scan_results_lists */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(siter, snext, &_params->params_batch.get_batch.scan_results_list, list) { list_move_tail(&siter->list, &_params->params_batch.get_batch.expired_scan_results_list); } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif /* reset gloval values after moving to expired list */ _params->params_batch.get_batch.top_node_cnt = 0; _params->params_batch.get_batch.expired_tot_scan_cnt = @@ -3063,7 +3177,14 @@ _dhd_pno_get_batch_handler(struct work_struct *work) dhd_pub_t *dhd; struct dhd_pno_batch_params *params_batch; DHD_PNO(("%s enter\n", __FUNCTION__)); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif _pno_state = container_of(work, struct dhd_pno_status_info, work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif dhd = _pno_state->dhd; if (dhd == NULL) { DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); @@ -3071,18 +3192,14 @@ _dhd_pno_get_batch_handler(struct work_struct *work) } #ifdef GSCAN_SUPPORT - if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { - _dhd_pno_get_gscan_batch_from_fw(dhd); - return; - } else + _dhd_pno_get_gscan_batch_from_fw(dhd); #endif /* GSCAN_SUPPORT */ - { + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; _dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf, params_batch->get_batch.bufsize, params_batch->get_batch.reason); } - } int @@ -3124,8 +3241,8 @@ dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); memset(pbuf, 0, bufsize); - pbuf += sprintf(pbuf, "scancount=%d\n", 0); - sprintf(pbuf, "%s", RESULTS_END_MARKER); + pbuf += snprintf(pbuf, bufsize, "scancount=%d\n", 0); + snprintf(pbuf, bufsize, "%s", RESULTS_END_MARKER); err = strlen(buf); goto exit; } @@ -3154,7 +3271,6 @@ dhd_pno_stop_for_batch(dhd_pub_t *dhd) dhd_pno_status_info_t *_pno_state; dhd_pno_params_t *_params; wl_pfn_bssid_t *p_pfn_bssid = NULL; - wlc_ssid_ext_t *p_ssid_list = NULL; NULL_CHECK(dhd, "dhd is NULL", err); NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); _pno_state = PNO_GET_PNOSTATE(dhd); @@ -3184,25 +3300,24 @@ dhd_pno_stop_for_batch(dhd_pub_t *dhd) _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) { mode = _pno_state->pno_mode; - dhd_pno_clean(dhd); + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + _pno_state->pno_mode = mode; /* restart Legacy PNO if the Legacy PNO is on */ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { struct dhd_pno_legacy_params *_params_legacy; _params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); - p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); - if (!p_ssid_list) { - err = BCME_NOMEM; - DHD_ERROR(("failed to get Legacy PNO SSID list\n")); - goto exit; - } - err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid, - _params_legacy->scan_fr, _params_legacy->pno_repeat, - _params_legacy->pno_freq_expo_max, _params_legacy->chan_list, - _params_legacy->nchan); + err = dhd_pno_set_legacy_pno(dhd, _params_legacy->scan_fr, + _params_legacy->pno_repeat, + _params_legacy->pno_freq_expo_max, + _params_legacy->chan_list, _params_legacy->nchan); if (err < 0) { - _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", __FUNCTION__, err)); goto exit; @@ -3222,12 +3337,19 @@ dhd_pno_stop_for_batch(dhd_pub_t *dhd) } i = 0; /* convert dhd_pno_bssid to wl_pfn_bssid */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry_safe(iter, next, &_params->params_hotlist.bssid_list, list) { memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN); p_pfn_bssid[i].flags = iter->flags; i++; } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); if (err < 0) { _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; @@ -3247,7 +3369,6 @@ dhd_pno_stop_for_batch(dhd_pub_t *dhd) exit: _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); - kfree(p_ssid_list); kfree(p_pfn_bssid); return err; } @@ -3408,7 +3529,6 @@ dhd_pno_stop_for_hotlist(dhd_pub_t *dhd) uint32 mode = 0; dhd_pno_status_info_t *_pno_state; dhd_pno_params_t *_params; - wlc_ssid_ext_t *p_ssid_list = NULL; NULL_CHECK(dhd, "dhd is NULL", err); NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); _pno_state = PNO_GET_PNOSTATE(dhd); @@ -3445,18 +3565,11 @@ dhd_pno_stop_for_hotlist(dhd_pub_t *dhd) struct dhd_pno_legacy_params *_params_legacy; _params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); - p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); - if (!p_ssid_list) { - err = BCME_NOMEM; - DHD_ERROR(("failed to get Legacy PNO SSID list\n")); - goto exit; - } - err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid, - _params_legacy->scan_fr, _params_legacy->pno_repeat, - _params_legacy->pno_freq_expo_max, _params_legacy->chan_list, - _params_legacy->nchan); + + err = dhd_pno_set_legacy_pno(dhd, _params_legacy->scan_fr, + _params_legacy->pno_repeat, _params_legacy->pno_freq_expo_max, + _params_legacy->chan_list, _params_legacy->nchan); if (err < 0) { - _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", __FUNCTION__, err)); goto exit; @@ -3482,7 +3595,6 @@ dhd_pno_stop_for_hotlist(dhd_pub_t *dhd) } } exit: - kfree(p_ssid_list); return err; } @@ -3494,6 +3606,9 @@ dhd_retreive_batch_scan_results(dhd_pub_t *dhd) dhd_pno_status_info_t *_pno_state; dhd_pno_params_t *_params; struct dhd_pno_batch_params *params_batch; + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); _pno_state = PNO_GET_PNOSTATE(dhd); _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; @@ -3504,6 +3619,7 @@ dhd_retreive_batch_scan_results(dhd_pub_t *dhd) params_batch->get_batch.bufsize = 0; params_batch->get_batch.reason = PNO_STATUS_EVENT; _params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_IN_PROGRESS; + smp_wmb(); schedule_work(&_pno_state->work); } else { DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING retrieval" @@ -3514,83 +3630,6 @@ dhd_retreive_batch_scan_results(dhd_pub_t *dhd) return err; } -/* Handle Significant WiFi Change (SWC) event from FW - * Send event to HAL when all results arrive from FW - */ -void * -dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes) -{ - void *ptr = NULL; - dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); - struct dhd_pno_gscan_params *gscan_params; - struct dhd_pno_swc_evt_param *params; - wl_pfn_swc_results_t *results = (wl_pfn_swc_results_t *)event_data; - wl_pfn_significant_net_t *change_array; - int i; - - - gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); - params = &(gscan_params->param_significant); - - if (!results->total_count) { - *send_evt_bytes = 0; - return ptr; - } - - if (!params->results_rxed_so_far) { - if (!params->change_array) { - params->change_array = (wl_pfn_significant_net_t *) - kmalloc(sizeof(wl_pfn_significant_net_t) * results->total_count, - GFP_KERNEL); - - if (!params->change_array) { - DHD_ERROR(("%s Cannot Malloc %zd bytes!!\n", __FUNCTION__, - sizeof(wl_pfn_significant_net_t) * results->total_count)); - *send_evt_bytes = 0; - return ptr; - } - } else { - DHD_ERROR(("RX'ed WLC_E_PFN_SWC evt from FW, previous evt not complete!!")); - *send_evt_bytes = 0; - return ptr; - } - - } - - DHD_PNO(("%s: pkt_count %d total_count %d\n", __FUNCTION__, - results->pkt_count, results->total_count)); - - for (i = 0; i < results->pkt_count; i++) { - DHD_PNO(("\t %02x:%02x:%02x:%02x:%02x:%02x\n", - results->list[i].BSSID.octet[0], - results->list[i].BSSID.octet[1], - results->list[i].BSSID.octet[2], - results->list[i].BSSID.octet[3], - results->list[i].BSSID.octet[4], - results->list[i].BSSID.octet[5])); - } - - change_array = ¶ms->change_array[params->results_rxed_so_far]; - memcpy(change_array, results->list, sizeof(wl_pfn_significant_net_t) * results->pkt_count); - params->results_rxed_so_far += results->pkt_count; - - if (params->results_rxed_so_far == results->total_count) { - params->results_rxed_so_far = 0; - *send_evt_bytes = sizeof(wl_pfn_significant_net_t) * results->total_count; - /* Pack up change buffer to send up and reset - * results_rxed_so_far, after its done. - */ - ptr = (void *) params->change_array; - /* expecting the callee to free this mem chunk */ - params->change_array = NULL; - } - else { - *send_evt_bytes = 0; - } - - return ptr; -} - void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type) { @@ -3621,71 +3660,166 @@ dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type) } void * -dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, int *size) +dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, uint32 len, int *size) { wl_bss_info_t *bi = NULL; wl_gscan_result_t *gscan_result; - wifi_gscan_result_t *result = NULL; + wifi_gscan_full_result_t *result = NULL; u32 bi_length = 0; uint8 channel; uint32 mem_needed; - struct timespec ts; + u32 bi_ie_length = 0; + u32 bi_ie_offset = 0; *size = 0; - +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif gscan_result = (wl_gscan_result_t *)data; - +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif if (!gscan_result) { DHD_ERROR(("Invalid gscan result (NULL pointer)\n")); goto exit; } - if (!gscan_result->bss_info) { - DHD_ERROR(("Invalid gscan bss info (NULL pointer)\n")); + + if ((len < sizeof(*gscan_result)) || + (len < dtoh32(gscan_result->buflen)) || + (dtoh32(gscan_result->buflen) > + (sizeof(*gscan_result) + WL_SCAN_IE_LEN_MAX))) { + DHD_ERROR(("%s: invalid gscan buflen:%u\n", __FUNCTION__, + dtoh32(gscan_result->buflen))); goto exit; } + bi = &gscan_result->bss_info[0].info; bi_length = dtoh32(bi->length); if (bi_length != (dtoh32(gscan_result->buflen) - - WL_GSCAN_RESULTS_FIXED_SIZE - WL_GSCAN_INFO_FIXED_FIELD_SIZE)) { + WL_GSCAN_RESULTS_FIXED_SIZE - WL_GSCAN_INFO_FIXED_FIELD_SIZE)) { DHD_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length)); goto exit; } - if (bi->SSID_len > DOT11_MAX_SSID_LEN) { - DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", bi->SSID_len)); - bi->SSID_len = DOT11_MAX_SSID_LEN; - } - - mem_needed = OFFSETOF(wifi_gscan_result_t, ie_data) + bi->ie_length; - result = kmalloc(mem_needed, GFP_KERNEL); - - if (!result) { - DHD_ERROR(("%s Cannot malloc scan result buffer %d bytes\n", - __FUNCTION__, mem_needed)); + bi_ie_offset = dtoh32(bi->ie_offset); + bi_ie_length = dtoh32(bi->ie_length); + if ((bi_ie_offset + bi_ie_length) > bi_length) { + DHD_ERROR(("%s: Invalid ie_length:%u or ie_offset:%u\n", + __FUNCTION__, bi_ie_length, bi_ie_offset)); goto exit; } - - memcpy(result->ssid, bi->SSID, bi->SSID_len); - result->ssid[bi->SSID_len] = '\0'; + if (bi->SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length:%u\n", __FUNCTION__, bi->SSID_len)); + goto exit; + } + mem_needed = OFFSETOF(wifi_gscan_full_result_t, ie_data) + bi_ie_length; + result = (wifi_gscan_full_result_t *) kmalloc(mem_needed, GFP_KERNEL); + if (!result) { + DHD_ERROR(("%s Cannot malloc scan result buffer %d bytes\n", + __FUNCTION__, mem_needed)); + goto exit; + } + result->scan_ch_bucket = gscan_result->scan_ch_bucket; + memcpy(result->fixed.ssid, bi->SSID, bi->SSID_len); + result->fixed.ssid[bi->SSID_len] = '\0'; channel = wf_chspec_ctlchan(bi->chanspec); - result->channel = wf_channel2mhz(channel, + result->fixed.channel = wf_channel2mhz(channel, (channel <= CH_MAX_2G_CHANNEL? WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); - result->rssi = (int32) bi->RSSI; - result->rtt = 0; - result->rtt_sd = 0; + result->fixed.rssi = (int32) bi->RSSI; + result->fixed.rtt = 0; + result->fixed.rtt_sd = 0; get_monotonic_boottime(&ts); - result->ts = (uint64) TIMESPEC_TO_US(ts); - result->beacon_period = dtoh16(bi->beacon_period); - result->capability = dtoh16(bi->capability); - result->ie_length = dtoh32(bi->ie_length); - memcpy(&result->macaddr, &bi->BSSID, ETHER_ADDR_LEN); - memcpy(result->ie_data, ((uint8 *)bi + bi->ie_offset), bi->ie_length); + result->fixed.ts = (uint64) TIMESPEC_TO_US(ts); + result->fixed.beacon_period = dtoh16(bi->beacon_period); + result->fixed.capability = dtoh16(bi->capability); + result->ie_length = bi_ie_length; + memcpy(&result->fixed.macaddr, &bi->BSSID, ETHER_ADDR_LEN); + memcpy(result->ie_data, ((uint8 *)bi + bi_ie_offset), bi_ie_length); *size = mem_needed; exit: return result; } +void * +dhd_pno_process_epno_result(dhd_pub_t *dhd, const void *data, uint32 event, int *size) +{ + dhd_epno_results_t *results = NULL; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + uint32 count, mem_needed = 0, i; + uint8 ssid[DOT11_MAX_SSID_LEN + 1]; + struct ether_addr *bssid; + + *size = 0; + if (!_pno_state) + return NULL; + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (event == WLC_E_PFN_NET_FOUND || event == WLC_E_PFN_NET_LOST) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + wl_pfn_scanresults_v2_t *pfn_result = (wl_pfn_scanresults_v2_t *)data; + wl_pfn_net_info_v2_t *net; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + if (pfn_result->version != PFN_SCANRESULT_VERSION) { + DHD_ERROR(("%s event %d: Incorrect version %d %d\n", __FUNCTION__, event, + pfn_result->version, PFN_SCANRESULT_VERSION)); + return NULL; + } + /* Check if count of pfn results is corrupted */ + if (pfn_result->count > EVENT_MAX_NETCNT_V2) { + DHD_ERROR(("%s event %d: pfn results count %d" + "exceeds the max limit\n", __FUNCTION__, event, + pfn_result->count)); + return NULL; + } + + count = pfn_result->count; + mem_needed = sizeof(dhd_epno_results_t) * count; + results = (dhd_epno_results_t *) kmalloc(mem_needed, GFP_KERNEL); + if (!results) { + DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__, + mem_needed)); + return NULL; + } + for (i = 0; i < count; i++) { + net = &pfn_result->netinfo[i]; + results[i].rssi = net->RSSI; + results[i].channel = wf_channel2mhz(net->pfnsubnet.channel, + (net->pfnsubnet.channel <= CH_MAX_2G_CHANNEL ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + results[i].flags = (event == WLC_E_PFN_NET_FOUND) ? + WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST; + results[i].ssid_len = min(net->pfnsubnet.SSID_len, + (uint8)DOT11_MAX_SSID_LEN); + bssid = &results[i].bssid; + memcpy(bssid, &net->pfnsubnet.BSSID, ETHER_ADDR_LEN); + if (!net->pfnsubnet.SSID_len) { + dhd_pno_idx_to_ssid(gscan_params, &results[i], + net->pfnsubnet.u.index); + } else { + memcpy(results[i].ssid, net->pfnsubnet.u.SSID, results[i].ssid_len); + } + memcpy(ssid, results[i].ssid, results[i].ssid_len); + ssid[results[i].ssid_len] = '\0'; + DHD_PNO(("ssid - %s bssid %02x:%02x:%02x:%02x:%02x:%02x " + "ch %d rssi %d flags %d\n", ssid, + bssid->octet[0], bssid->octet[1], + bssid->octet[2], bssid->octet[3], + bssid->octet[4], bssid->octet[5], + results[i].channel, results[i].rssi, results[i].flags)); + } + } + *size = mem_needed; + return results; +} + void * dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes, hotlist_type_t type) @@ -3693,19 +3827,29 @@ dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, void *ptr = NULL; dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); struct dhd_pno_gscan_params *gscan_params; - wl_pfn_scanresults_t *results = (wl_pfn_scanresults_t *)event_data; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + wl_pfn_scanresults_v2_t *results = (wl_pfn_scanresults_v2_t *)event_data; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif wifi_gscan_result_t *hotlist_found_array; - wl_pfn_net_info_t *plnetinfo; + wl_pfn_net_info_v2_t *plnetinfo; gscan_results_cache_t *gscan_hotlist_cache; int malloc_size = 0, i, total = 0; + struct timespec tm_spec; gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); - if (!results->count) { + if ((results->count == 0) || (results->count > EVENT_MAX_NETCNT_V2)) { + DHD_ERROR(("%s: wrong result count:%d\n", __FUNCTION__, results->count)); *send_evt_bytes = 0; return ptr; } + get_monotonic_boottime(&tm_spec); malloc_size = sizeof(gscan_results_cache_t) + ((results->count - 1) * sizeof(wifi_gscan_result_t)); gscan_hotlist_cache = (gscan_results_cache_t *) kmalloc(malloc_size, GFP_KERNEL); @@ -3728,31 +3872,30 @@ dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, gscan_hotlist_cache->tot_count = results->count; gscan_hotlist_cache->tot_consumed = 0; + gscan_hotlist_cache->scan_ch_bucket = results->scan_ch_bucket; plnetinfo = results->netinfo; for (i = 0; i < results->count; i++, plnetinfo++) { hotlist_found_array = &gscan_hotlist_cache->results[i]; + memset(hotlist_found_array, 0, sizeof(wifi_gscan_result_t)); hotlist_found_array->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel, (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL? WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); hotlist_found_array->rssi = (int32) plnetinfo->RSSI; - /* Info not available & not expected */ - hotlist_found_array->beacon_period = 0; - hotlist_found_array->capability = 0; - hotlist_found_array->ie_length = 0; - hotlist_found_array->ts = convert_fw_rel_time_to_systime(plnetinfo->timestamp); + hotlist_found_array->ts = + convert_fw_rel_time_to_systime(&tm_spec, (plnetinfo->timestamp * 1000)); if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", plnetinfo->pfnsubnet.SSID_len)); plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; } - memcpy(hotlist_found_array->ssid, plnetinfo->pfnsubnet.SSID, + memcpy(hotlist_found_array->ssid, plnetinfo->pfnsubnet.u.SSID, plnetinfo->pfnsubnet.SSID_len); hotlist_found_array->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0'; memcpy(&hotlist_found_array->macaddr, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN); - DHD_PNO(("\t%s %02x:%02x:%02x:%02x:%02x:%02x rssi %d\n", hotlist_found_array->ssid, + DHD_PNO(("\t%s %02x:%02x:%02x:%02x:%02x:%02x rssi %d\n", hotlist_found_array->ssid, hotlist_found_array->macaddr.octet[0], hotlist_found_array->macaddr.octet[1], hotlist_found_array->macaddr.octet[2], @@ -3779,7 +3922,7 @@ int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) { int err = BCME_OK; - uint status, event_type, flags, datalen; + uint event_type; dhd_pno_status_info_t *_pno_state; NULL_CHECK(dhd, "dhd is NULL", err); NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); @@ -3790,9 +3933,6 @@ dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) goto exit; } event_type = ntoh32(event->event_type); - flags = ntoh16(event->flags); - status = ntoh32(event->status); - datalen = ntoh32(event->datalen); DHD_PNO(("%s enter : event_type :%d\n", __FUNCTION__, event_type)); switch (event_type) { case WLC_E_PFN_BSSID_NET_FOUND: @@ -3829,6 +3969,7 @@ int dhd_pno_init(dhd_pub_t *dhd) { int err = BCME_OK; dhd_pno_status_info_t *_pno_state; + char *buf = NULL; NULL_CHECK(dhd, "dhd is NULL", err); DHD_PNO(("%s enter\n", __FUNCTION__)); UNUSED_PARAMETER(_dhd_pno_suspend); @@ -3847,7 +3988,13 @@ int dhd_pno_init(dhd_pub_t *dhd) #ifdef GSCAN_SUPPORT init_waitqueue_head(&_pno_state->batch_get_wait); #endif /* GSCAN_SUPPORT */ - err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, 0); + buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL); + if (!buf) { + DHD_ERROR((":%s buf alloc err.\n", __FUNCTION__)); + return BCME_NOMEM; + } + err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, buf, WLC_IOCTL_SMLEN, + FALSE); if (err == BCME_UNSUPPORTED) { _pno_state->wls_supported = FALSE; DHD_INFO(("Current firmware doesn't support" @@ -3857,9 +4004,9 @@ int dhd_pno_init(dhd_pub_t *dhd) __FUNCTION__)); } exit: + kfree(buf); return err; } - int dhd_pno_deinit(dhd_pub_t *dhd) { int err = BCME_OK; diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pno.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pno.h index 990ec6ce4ad1..7b56c5512bb3 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pno.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_pno.h @@ -2,7 +2,7 @@ * Header file of Broadcom Dongle Host Driver (DHD) * Prefered Network Offload code and Wi-Fi Location Service(WLS) code. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: dhd_pno.h 591285 2015-10-07 11:56:29Z $ + * $Id: dhd_pno.h 707287 2017-06-27 06:44:29Z $ */ #ifndef __DHD_PNO_H__ @@ -36,9 +36,11 @@ #define PNO_TLV_VERSION '1' #define PNO_TLV_SUBTYPE_LEGACY_PNO '2' #define PNO_TLV_RESERVED '0' + #define PNO_BATCHING_SET "SET" #define PNO_BATCHING_GET "GET" #define PNO_BATCHING_STOP "STOP" + #define PNO_PARAMS_DELIMETER " " #define PNO_PARAM_CHANNEL_DELIMETER "," #define PNO_PARAM_VALUE_DELLIMETER '=' @@ -63,9 +65,32 @@ #define AP_END_MARKER "====\n" #define PNO_RSSI_MARGIN_DBM 30 -#ifdef GSCAN_SUPPORT +#define CSCAN_COMMAND "CSCAN " +#define CSCAN_TLV_PREFIX 'S' +#define CSCAN_TLV_VERSION 1 +#define CSCAN_TLV_SUBVERSION 0 +#define CSCAN_TLV_TYPE_SSID_IE 'S' +#define CSCAN_TLV_TYPE_CHANNEL_IE 'C' +#define CSCAN_TLV_TYPE_NPROBE_IE 'N' +#define CSCAN_TLV_TYPE_ACTIVE_IE 'A' +#define CSCAN_TLV_TYPE_PASSIVE_IE 'P' +#define CSCAN_TLV_TYPE_HOME_IE 'H' +#define CSCAN_TLV_TYPE_STYPE_IE 'T' -#define GSCAN_MAX_CH_BUCKETS 8 +#define WL_SCAN_PARAMS_SSID_MAX 10 +#define GET_SSID "SSID=" +#define GET_CHANNEL "CH=" +#define GET_NPROBE "NPROBE=" +#define GET_ACTIVE_ASSOC_DWELL "ACTIVE=" +#define GET_PASSIVE_ASSOC_DWELL "PASSIVE=" +#define GET_HOME_DWELL "HOME=" +#define GET_SCAN_TYPE "TYPE=" + +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +#define GSCAN_MAX_CH_BUCKETS 8 +#define GSCAN_MAX_CHANNELS_IN_BUCKET 32 +#define GSCAN_MAX_AP_CACHE_PER_SCAN 32 +#define GSCAN_MAX_AP_CACHE 320 #define GSCAN_BG_BAND_MASK (1 << 0) #define GSCAN_A_BAND_MASK (1 << 1) #define GSCAN_DFS_MASK (1 << 2) @@ -75,9 +100,18 @@ #define GSCAN_FLUSH_HOTLIST_CFG (1 << 0) #define GSCAN_FLUSH_SIGNIFICANT_CFG (1 << 1) #define GSCAN_FLUSH_SCAN_CFG (1 << 2) +#define GSCAN_FLUSH_EPNO_CFG (1 << 3) #define GSCAN_FLUSH_ALL_CFG (GSCAN_FLUSH_SCAN_CFG | \ GSCAN_FLUSH_SIGNIFICANT_CFG | \ - GSCAN_FLUSH_HOTLIST_CFG) + GSCAN_FLUSH_HOTLIST_CFG | \ + GSCAN_FLUSH_EPNO_CFG) +#define DHD_EPNO_HIDDEN_SSID (1 << 0) +#define DHD_EPNO_A_BAND_TRIG (1 << 1) +#define DHD_EPNO_BG_BAND_TRIG (1 << 2) +#define DHD_EPNO_STRICT_MATCH (1 << 3) +#define DHD_EPNO_SAME_NETWORK (1 << 4) +#define DHD_PNO_USE_SSID (DHD_EPNO_HIDDEN_SSID | DHD_EPNO_STRICT_MATCH) + /* Do not change GSCAN_BATCH_RETRIEVAL_COMPLETE */ #define GSCAN_BATCH_RETRIEVAL_COMPLETE 0 #define GSCAN_BATCH_RETRIEVAL_IN_PROGRESS 1 @@ -87,7 +121,9 @@ #define GSCAN_BATCH_GET_MAX_WAIT 500 #define CHANNEL_BUCKET_EMPTY_INDEX 0xFFFF #define GSCAN_RETRY_THRESHOLD 3 -#endif /* GSCAN_SUPPORT */ + +#define MAX_EPNO_SSID_NUM 64 +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ enum scan_status { /* SCAN ABORT by other scan */ @@ -114,9 +150,7 @@ enum index_mode { /* GSCAN includes hotlist scan and they do not run * independent of each other */ -#ifdef GSCAN_SUPPORT INDEX_OF_GSCAN_PARAMS = INDEX_OF_HOTLIST_PARAMS, -#endif /* GSCAN_SUPPORT */ INDEX_MODE_MAX }; enum dhd_pno_status { @@ -130,7 +164,7 @@ typedef struct cmd_tlv { char subtype; char reserved; } cmd_tlv_t; -#ifdef GSCAN_SUPPORT +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) typedef enum { WIFI_BAND_UNSPECIFIED, WIFI_BAND_BG = 1, /* 2.4 GHz */ @@ -147,13 +181,17 @@ typedef enum { } hotlist_type_t; typedef enum dhd_pno_gscan_cmd_cfg { - DHD_PNO_BATCH_SCAN_CFG_ID, + DHD_PNO_BATCH_SCAN_CFG_ID = 0, DHD_PNO_GEOFENCE_SCAN_CFG_ID, DHD_PNO_SIGNIFICANT_SCAN_CFG_ID, DHD_PNO_SCAN_CFG_ID, DHD_PNO_GET_CAPABILITIES, DHD_PNO_GET_BATCH_RESULTS, - DHD_PNO_GET_CHANNEL_LIST + DHD_PNO_GET_CHANNEL_LIST, + DHD_PNO_GET_NEW_EPNO_SSID_ELEM, + DHD_PNO_EPNO_CFG_ID, + DHD_PNO_GET_AUTOJOIN_CAPABILITIES, + DHD_PNO_EPNO_PARAMS_ID } dhd_pno_gscan_cmd_cfg_t; typedef enum dhd_pno_mode { @@ -177,19 +215,26 @@ typedef enum dhd_pno_mode { /* Wi-Fi Android Hotlist SCAN Mode */ DHD_PNO_HOTLIST_MODE = (1 << (2)) } dhd_pno_mode_t; -#endif /* GSCAN_SUPPORT */ -struct dhd_pno_ssid { +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ + +typedef struct dhd_pno_ssid { bool hidden; - uint32 SSID_len; + int8 rssi_thresh; + uint8 dummy; + uint16 SSID_len; + uint32 flags; + int32 wpa_auth; uchar SSID[DOT11_MAX_SSID_LEN]; struct list_head list; -}; +} dhd_pno_ssid_t; + struct dhd_pno_bssid { struct ether_addr macaddr; /* Bit4: suppress_lost, Bit3: suppress_found */ uint16 flags; struct list_head list; }; + typedef struct dhd_pno_bestnet_entry { struct ether_addr BSSID; uint8 SSID_len; @@ -260,7 +305,9 @@ struct dhd_pno_hotlist_params { uint16 nbssid; struct list_head bssid_list; }; -#ifdef GSCAN_SUPPORT +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) +#define DHD_PNO_REPORT_NO_BATCH (1 << 2) + typedef struct dhd_pno_gscan_channel_bucket { uint16 bucket_freq_multiple; /* band = 1 All bg band channels, @@ -270,13 +317,37 @@ typedef struct dhd_pno_gscan_channel_bucket { uint16 band; uint8 report_flag; uint8 num_channels; - uint16 chan_list[GSCAN_MAX_CH_BUCKETS]; + uint16 repeat; + uint16 bucket_max_multiple; + uint16 chan_list[GSCAN_MAX_CHANNELS_IN_BUCKET]; } dhd_pno_gscan_channel_bucket_t; -typedef struct dhd_pno_swc_evt_param { - uint16 results_rxed_so_far; - wl_pfn_significant_net_t *change_array; -} dhd_pno_swc_evt_param_t; + +#define DHD_PNO_AUTH_CODE_OPEN 1 /* Open */ +#define DHD_PNO_AUTH_CODE_PSK 2 /* WPA_PSK or WPA2PSK */ +#define DHD_PNO_AUTH_CODE_EAPOL 4 /* any EAPOL */ + +#define DHD_EPNO_DEFAULT_INDEX 0xFFFFFFFF + +typedef struct dhd_epno_params { + uint8 ssid[DOT11_MAX_SSID_LEN]; + uint8 ssid_len; + int8 rssi_thresh; + uint8 flags; + uint8 auth; + /* index required only for visble ssid */ + uint32 index; + struct list_head list; +} dhd_epno_params_t; + +typedef struct dhd_epno_results { + uint8 ssid[DOT11_MAX_SSID_LEN]; + uint8 ssid_len; + int8 rssi; + uint16 channel; + uint16 flags; + struct ether_addr bssid; +} dhd_epno_results_t; typedef struct wifi_gscan_result { uint64 ts; /* Time of discovery */ @@ -288,16 +359,23 @@ typedef struct wifi_gscan_result { uint64 rtt_sd; /* standard deviation in rtt */ uint16 beacon_period; /* units are Kusec */ uint16 capability; /* Capability information */ - uint32 ie_length; /* byte length of Information Elements */ - char ie_data[1]; /* IE data to follow */ + uint32 pad; } wifi_gscan_result_t; +typedef struct wifi_gscan_full_result { + wifi_gscan_result_t fixed; + uint32 scan_ch_bucket; + uint32 ie_length; /* byte length of Information Elements */ + char ie_data[1]; /* IE data to follow */ +} wifi_gscan_full_result_t; + typedef struct gscan_results_cache { struct gscan_results_cache *next; uint8 scan_id; uint8 flag; uint8 tot_count; uint8 tot_consumed; + uint32 scan_ch_bucket; wifi_gscan_result_t results[1]; } gscan_results_cache_t; @@ -309,15 +387,22 @@ typedef struct dhd_pno_gscan_capabilities { int max_scan_reporting_threshold; int max_hotlist_aps; int max_significant_wifi_change_aps; + int max_epno_ssid_crc32; + int max_epno_hidden_ssid; + int max_white_list_ssid; } dhd_pno_gscan_capabilities_t; +typedef struct dhd_epno_ssid_cfg { + wl_pfn_ssid_params_t params; + uint32 num_epno_ssid; + struct list_head epno_ssid_list; +} dhd_epno_ssid_cfg_t; + struct dhd_pno_gscan_params { int32 scan_fr; uint8 bestn; uint8 mscan; uint8 buffer_threshold; - uint8 swc_nbssid_threshold; - uint8 swc_rssi_window_size; uint8 lost_ap_window; uint8 nchannel_buckets; uint8 reason; @@ -327,12 +412,11 @@ struct dhd_pno_gscan_params { gscan_results_cache_t *gscan_batch_cache; gscan_results_cache_t *gscan_hotlist_found; gscan_results_cache_t *gscan_hotlist_lost; - uint16 nbssid_significant_change; uint16 nbssid_hotlist; - struct dhd_pno_swc_evt_param param_significant; struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS]; struct list_head hotlist_bssid_list; - struct list_head significant_bssid_list; + dhd_epno_ssid_cfg_t epno_cfg; + uint32 scan_id; }; typedef struct gscan_scan_params { @@ -358,37 +442,16 @@ typedef struct gscan_hotlist_scan_params { struct bssid_t bssid[1]; /* n bssids to follow */ } gscan_hotlist_scan_params_t; -/* SWC (Significant WiFi Change) params */ -typedef struct gscan_swc_params { - /* Rssi averaging window size */ - uint8 rssi_window; - /* Number of scans that the AP has to be absent before - * being declared LOST - */ - uint8 lost_ap_window; - /* if x Aps have a significant change generate an event. */ - uint8 swc_threshold; - uint8 nbssid; - wl_pfn_significant_bssid_t bssid_elem_list[1]; -} gscan_swc_params_t; - -typedef struct dhd_pno_significant_bssid { - struct ether_addr BSSID; - int8 rssi_low_threshold; - int8 rssi_high_threshold; - struct list_head list; -} dhd_pno_significant_bssid_t; -#endif /* GSCAN_SUPPORT */ +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ typedef union dhd_pno_params { struct dhd_pno_legacy_params params_legacy; struct dhd_pno_batch_params params_batch; struct dhd_pno_hotlist_params params_hotlist; -#ifdef GSCAN_SUPPORT +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) struct dhd_pno_gscan_params params_gscan; -#endif /* GSCAN_SUPPORT */ +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ } dhd_pno_params_t; typedef struct dhd_pno_status_info { - uint8 pno_oui[DOT11_OUI_LEN]; dhd_pub_t *dhd; struct work_struct work; struct mutex pno_mutex; @@ -427,28 +490,32 @@ dhd_dev_pno_stop_for_batch(struct net_device *dev); extern int dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, struct dhd_pno_hotlist_params *hotlist_params); -extern int dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui); -#ifdef GSCAN_SUPPORT -extern int -dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, - void *buf, uint8 flush); +extern bool dhd_dev_is_legacy_pno_enabled(struct net_device *dev); +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) extern void * dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, void *info, uint32 *len); -void dhd_dev_pno_lock_access_batch_results(struct net_device *dev); +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ +#ifdef GSCAN_SUPPORT +extern int +dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *buf, bool flush); +int dhd_dev_pno_lock_access_batch_results(struct net_device *dev); void dhd_dev_pno_unlock_access_batch_results(struct net_device *dev); extern int dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush); extern int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time); -extern void * dhd_dev_swc_scan_event(struct net_device *dev, const void *data, - int *send_evt_bytes); int dhd_retreive_batch_scan_results(dhd_pub_t *dhd); extern void * dhd_dev_hotlist_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes, hotlist_type_t type); void * dhd_dev_process_full_gscan_result(struct net_device *dev, - const void *data, int *send_evt_bytes); + const void *data, uint32 len, int *send_evt_bytes); extern int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev); extern void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type); -extern void dhd_dev_wait_batch_results_complete(struct net_device *dev); +extern int dhd_dev_wait_batch_results_complete(struct net_device *dev); +extern void * dhd_dev_process_epno_result(struct net_device *dev, + const void *data, uint32 event, int *send_evt_bytes); +extern int dhd_dev_set_epno(struct net_device *dev); +extern int dhd_dev_flush_fw_epno(struct net_device *dev); #endif /* GSCAN_SUPPORT */ /* dhd pno fuctions */ extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd); @@ -472,26 +539,33 @@ extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *ev extern int dhd_pno_init(dhd_pub_t *dhd); extern int dhd_pno_deinit(dhd_pub_t *dhd); extern bool dhd_is_pno_supported(dhd_pub_t *dhd); -extern int dhd_pno_set_mac_oui(dhd_pub_t *dhd, uint8 *oui); -#ifdef GSCAN_SUPPORT -extern int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, - void *buf, uint8 flush); +extern bool dhd_is_legacy_pno_enabled(dhd_pub_t *dhd); +#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS) extern void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *info, uint32 *len); -extern void dhd_pno_lock_batch_results(dhd_pub_t *dhd); +#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */ +#ifdef GSCAN_SUPPORT +extern int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *buf, bool flush); +extern int dhd_pno_lock_batch_results(dhd_pub_t *dhd); extern void dhd_pno_unlock_batch_results(dhd_pub_t *dhd); extern int dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush); extern int dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag); extern int dhd_pno_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf); extern int dhd_dev_retrieve_batch_scan(struct net_device *dev); -extern void *dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes); extern void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes, hotlist_type_t type); extern void *dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data, - int *send_evt_bytes); + uint32 len, int *send_evt_bytes); extern int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd); extern void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type); -extern void dhd_wait_batch_results_complete(dhd_pub_t *dhd); +extern int dhd_wait_batch_results_complete(dhd_pub_t *dhd); +extern void * dhd_pno_process_epno_result(dhd_pub_t *dhd, const void *data, + uint32 event, int *size); +extern void dhd_pno_translate_epno_fw_flags(uint32 *flags); +extern int dhd_pno_set_epno(dhd_pub_t *dhd); +extern int dhd_pno_flush_fw_epno(dhd_pub_t *dhd); +extern void dhd_pno_set_epno_auth_flag(uint32 *wpa_auth); #endif /* GSCAN_SUPPORT */ #endif diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_proto.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_proto.h index 6dcb56328140..820e3449fa3a 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_proto.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_proto.h @@ -4,7 +4,7 @@ * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,7 +27,7 @@ * * <> * - * $Id: dhd_proto.h 604483 2015-12-07 14:47:36Z $ + * $Id: dhd_proto.h 678890 2017-01-11 11:48:36Z $ */ #ifndef _dhd_proto_h_ @@ -39,7 +39,7 @@ #include #endif -#define DEFAULT_IOCTL_RESP_TIMEOUT 2000 +#define DEFAULT_IOCTL_RESP_TIMEOUT 4000 #ifndef IOCTL_RESP_TIMEOUT /* In milli second default value for Production FW */ #define IOCTL_RESP_TIMEOUT DEFAULT_IOCTL_RESP_TIMEOUT @@ -49,7 +49,7 @@ #define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */ #endif /* MFG_IOCTL_RESP_TIMEOUT */ -#define DEFAULT_D3_ACK_RESP_TIMEOUT 4000 +#define DEFAULT_D3_ACK_RESP_TIMEOUT 1000 #ifndef D3_ACK_RESP_TIMEOUT #define D3_ACK_RESP_TIMEOUT DEFAULT_D3_ACK_RESP_TIMEOUT #endif /* D3_ACK_RESP_TIMEOUT */ @@ -59,7 +59,11 @@ #define DHD_BUS_BUSY_TIMEOUT DEFAULT_DHD_BUS_BUSY_TIMEOUT #endif /* DEFAULT_DHD_BUS_BUSY_TIMEOUT */ +#define DS_EXIT_TIMEOUT 1000 /* In ms */ +#define DS_ENTER_TIMEOUT 1000 /* In ms */ + #define IOCTL_DISABLE_TIMEOUT 0 + /* * Exported from the dhd protocol module (dhd_cdc, dhd_rndis) */ @@ -107,6 +111,9 @@ extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, /* Add prot dump output to a buffer */ extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +/* Dump extended trap data */ +extern int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw); + /* Update local copy of dongle statistics */ extern void dhd_prot_dstats(dhd_pub_t *dhdp); @@ -120,13 +127,16 @@ extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, #ifdef BCMPCIE extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound); extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound); +extern bool dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound); extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd); +extern int dhd_prot_process_trapbuf(dhd_pub_t * dhd); extern bool dhd_prot_dtohsplit(dhd_pub_t * dhd); extern int dhd_post_dummy_msg(dhd_pub_t *dhd); extern int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len); extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset); extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx); -extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay); +extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, + uint len, uint srcdelay, uint destdelay, uint d11_lpbk); extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf, void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma); @@ -148,13 +158,35 @@ extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *ms extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id, bool in_lock); extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val); extern void dhd_prot_reset(dhd_pub_t *dhd); + +#ifdef IDLE_TX_FLOW_MGMT +extern int dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count); +extern int dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +#endif /* IDLE_TX_FLOW_MGMT */ +extern int dhd_prot_init_info_rings(dhd_pub_t *dhd); + +#endif /* BCMPCIE */ + #ifdef DHD_LB extern void dhd_lb_tx_compl_handler(unsigned long data); extern void dhd_lb_rx_compl_handler(unsigned long data); extern void dhd_lb_rx_process_handler(unsigned long data); #endif /* DHD_LB */ -void dhd_prot_collect_memdump(dhd_pub_t *dhd); +extern int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data); + +#ifdef BCMPCIE +extern int dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlv, uint16 tlv_len, + uint16 seq, uint16 xt_id); +extern bool dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set); +extern bool dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set); +#else /* BCMPCIE */ +#define dhd_prot_send_host_timestamp(a, b, c, d, e) 0 +#define dhd_prot_data_path_tx_timestamp_logging(a, b, c) 0 +#define dhd_prot_data_path_rx_timestamp_logging(a, b, c) 0 #endif /* BCMPCIE */ + +extern void dhd_prot_dma_indx_free(dhd_pub_t *dhd); + /******************************** * For version-string expansion * */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_rtt.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_rtt.c index cc0ebb2ecd2d..c58ca93fc435 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_rtt.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_rtt.c @@ -1,7 +1,7 @@ /* * Broadcom Dongle Host Driver (DHD), RTT * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -21,9 +21,11 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_rtt.c 606280 2015-12-15 05:28:25Z $ + * + * <> + * + * $Id$ */ -#ifdef RTT_SUPPORT #include #include @@ -39,11 +41,14 @@ #include #include -#include +#include #include #include #include -#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state) +#include +#ifdef WL_CFG80211 +#include +#endif /* WL_CFG80211 */ static DEFINE_SPINLOCK(noti_list_lock); #define NULL_CHECK(p, s, err) \ do { \ @@ -54,39 +59,1079 @@ static DEFINE_SPINLOCK(noti_list_lock); } \ } while (0) -#define RTT_TWO_SIDED(capability) \ - do { \ - if ((capability & RTT_CAP_ONE_WAY) == (uint8) (RTT_CAP_ONE_WAY)) \ - return FALSE; \ - else \ - return TRUE; \ - } while (0) +#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED) +#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED) #define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ (ts).tv_nsec / NSEC_PER_USEC) + +#define FTM_IOC_BUFSZ 2048 /* ioc buffsize for our module (> BCM_XTLV_HDR_SIZE) */ +#define FTM_AVAIL_MAX_SLOTS 32 +#define FTM_MAX_CONFIGS 10 +#define FTM_MAX_PARAMS 10 +#define FTM_DEFAULT_SESSION 1 +#define FTM_BURST_TIMEOUT_UNIT 250 /* 250 ns */ +#define FTM_INVALID -1 +#define FTM_DEFAULT_CNT_20M 12 +#define FTM_DEFAULT_CNT_40M 10 +#define FTM_DEFAULT_CNT_80M 5 + +/* convenience macros */ +#define FTM_TU2MICRO(_tu) ((uint64)(_tu) << 10) +#define FTM_MICRO2TU(_tu) ((uint64)(_tu) >> 10) +#define FTM_TU2MILLI(_tu) ((uint32)FTM_TU2MICRO(_tu) / 1000) +#define FTM_MICRO2MILLI(_x) ((uint32)(_x) / 1000) +#define FTM_MICRO2SEC(_x) ((uint32)(_x) / 1000000) +#define FTM_INTVL2NSEC(_intvl) ((uint32)ftm_intvl2nsec(_intvl)) +#define FTM_INTVL2USEC(_intvl) ((uint32)ftm_intvl2usec(_intvl)) +#define FTM_INTVL2MSEC(_intvl) (FTM_INTVL2USEC(_intvl) / 1000) +#define FTM_INTVL2SEC(_intvl) (FTM_INTVL2USEC(_intvl) / 1000000) +#define FTM_USECIN100MILLI(_usec) ((_usec) / 100000) + +/* broadcom specific set to have more accurate data */ +#define ENABLE_VHT_ACK +#define CH_MIN_5G_CHANNEL 34 +#define CH_MIN_2G_CHANNEL 1 + struct rtt_noti_callback { struct list_head list; void *ctx; dhd_rtt_compl_noti_fn noti_fn; }; -typedef struct rtt_status_info { - dhd_pub_t *dhd; - int8 status; /* current status for the current entry */ - int8 cur_idx; /* current entry to do RTT */ - int32 capability; /* rtt capability */ - struct mutex rtt_mutex; - rtt_config_params_t rtt_config; - struct work_struct work; - struct list_head noti_fn_list; - struct list_head rtt_results_cache; /* store results for RTT */ -} rtt_status_info_t; -static int dhd_rtt_start(dhd_pub_t *dhd); +/* bitmask indicating which command groups; */ +typedef enum { + FTM_SUBCMD_FLAG_METHOD = 0x01, /* FTM method command */ + FTM_SUBCMD_FLAG_SESSION = 0x02, /* FTM session command */ + FTM_SUBCMD_FLAG_ALL = FTM_SUBCMD_FLAG_METHOD | FTM_SUBCMD_FLAG_SESSION +} ftm_subcmd_flag_t; + +/* proxd ftm config-category definition */ +typedef enum { + FTM_CONFIG_CAT_GENERAL = 1, /* generial configuration */ + FTM_CONFIG_CAT_OPTIONS = 2, /* 'config options' */ + FTM_CONFIG_CAT_AVAIL = 3, /* 'config avail' */ +} ftm_config_category_t; + + +typedef struct ftm_subcmd_info { + int16 version; /* FTM version (optional) */ + char *name; /* cmd-name string as cmdline input */ + wl_proxd_cmd_t cmdid; /* cmd-id */ + bcm_xtlv_unpack_cbfn_t *handler; /* cmd response handler (optional) */ + ftm_subcmd_flag_t cmdflag; /* CMD flag (optional) */ +} ftm_subcmd_info_t; + + +typedef struct ftm_config_options_info { + uint32 flags; /* wl_proxd_flags_t/wl_proxd_session_flags_t */ + bool enable; +} ftm_config_options_info_t; + +typedef struct ftm_config_param_info { + uint16 tlvid; /* mapping TLV id for the item */ + union { + uint32 chanspec; + struct ether_addr mac_addr; + wl_proxd_intvl_t data_intvl; + uint32 data32; + uint16 data16; + uint8 data8; + }; +} ftm_config_param_info_t; + +/* +* definition for id-string mapping. +* This is used to map an id (can be cmd-id, tlv-id, ....) to a text-string +* for debug-display or cmd-log-display +*/ +typedef struct ftm_strmap_entry { + int32 id; + char *text; +} ftm_strmap_entry_t; + + +typedef struct ftm_status_map_host_entry { + wl_proxd_status_t proxd_status; + rtt_reason_t rtt_reason; +} ftm_status_map_host_entry_t; + +static int +dhd_rtt_convert_results_to_host(rtt_report_t *rtt_report, uint8 *p_data, uint16 tlvid, uint16 len); + +static wifi_rate_t +dhd_rtt_convert_rate_to_host(uint32 ratespec); + +#ifdef WL_CFG80211 +static int +dhd_rtt_start(dhd_pub_t *dhd); +#endif /* WL_CFG80211 */ +static const int burst_duration_idx[] = {0, 0, 1, 2, 4, 8, 16, 32, 64, 128, 0, 0}; + +/* ftm status mapping to host status */ +static const ftm_status_map_host_entry_t ftm_status_map_info[] = { + {WL_PROXD_E_INCOMPLETE, RTT_REASON_FAILURE}, + {WL_PROXD_E_OVERRIDDEN, RTT_REASON_FAILURE}, + {WL_PROXD_E_ASAP_FAILED, RTT_REASON_FAILURE}, + {WL_PROXD_E_NOTSTARTED, RTT_REASON_FAIL_NOT_SCHEDULED_YET}, + {WL_PROXD_E_INVALIDMEAS, RTT_REASON_FAIL_INVALID_TS}, + {WL_PROXD_E_INCAPABLE, RTT_REASON_FAIL_NO_CAPABILITY}, + {WL_PROXD_E_MISMATCH, RTT_REASON_FAILURE}, + {WL_PROXD_E_DUP_SESSION, RTT_REASON_FAILURE}, + {WL_PROXD_E_REMOTE_FAIL, RTT_REASON_FAILURE}, + {WL_PROXD_E_REMOTE_INCAPABLE, RTT_REASON_FAILURE}, + {WL_PROXD_E_SCHED_FAIL, RTT_REASON_FAIL_SCHEDULE}, + {WL_PROXD_E_PROTO, RTT_REASON_FAIL_PROTOCOL}, + {WL_PROXD_E_EXPIRED, RTT_REASON_FAILURE}, + {WL_PROXD_E_TIMEOUT, RTT_REASON_FAIL_TM_TIMEOUT}, + {WL_PROXD_E_NOACK, RTT_REASON_FAIL_NO_RSP}, + {WL_PROXD_E_DEFERRED, RTT_REASON_FAILURE}, + {WL_PROXD_E_INVALID_SID, RTT_REASON_FAILURE}, + {WL_PROXD_E_REMOTE_CANCEL, RTT_REASON_FAILURE}, + {WL_PROXD_E_CANCELED, RTT_REASON_ABORTED}, + {WL_PROXD_E_INVALID_SESSION, RTT_REASON_FAILURE}, + {WL_PROXD_E_BAD_STATE, RTT_REASON_FAILURE}, + {WL_PROXD_E_ERROR, RTT_REASON_FAILURE}, + {WL_PROXD_E_OK, RTT_REASON_SUCCESS} +}; + +/* ftm tlv-id mapping */ +static const ftm_strmap_entry_t ftm_tlvid_loginfo[] = { + /* { WL_PROXD_TLV_ID_xxx, "text for WL_PROXD_TLV_ID_xxx" }, */ + { WL_PROXD_TLV_ID_NONE, "none" }, + { WL_PROXD_TLV_ID_METHOD, "method" }, + { WL_PROXD_TLV_ID_FLAGS, "flags" }, + { WL_PROXD_TLV_ID_CHANSPEC, "chanspec" }, + { WL_PROXD_TLV_ID_TX_POWER, "tx power" }, + { WL_PROXD_TLV_ID_RATESPEC, "ratespec" }, + { WL_PROXD_TLV_ID_BURST_DURATION, "burst duration" }, + { WL_PROXD_TLV_ID_BURST_PERIOD, "burst period" }, + { WL_PROXD_TLV_ID_BURST_FTM_SEP, "burst ftm sep" }, + { WL_PROXD_TLV_ID_BURST_NUM_FTM, "burst num ftm" }, + { WL_PROXD_TLV_ID_NUM_BURST, "num burst" }, + { WL_PROXD_TLV_ID_FTM_RETRIES, "ftm retries" }, + { WL_PROXD_TLV_ID_BSS_INDEX, "BSS index" }, + { WL_PROXD_TLV_ID_BSSID, "bssid" }, + { WL_PROXD_TLV_ID_INIT_DELAY, "burst init delay" }, + { WL_PROXD_TLV_ID_BURST_TIMEOUT, "burst timeout" }, + { WL_PROXD_TLV_ID_EVENT_MASK, "event mask" }, + { WL_PROXD_TLV_ID_FLAGS_MASK, "flags mask" }, + { WL_PROXD_TLV_ID_PEER_MAC, "peer addr" }, + { WL_PROXD_TLV_ID_FTM_REQ, "ftm req" }, + { WL_PROXD_TLV_ID_LCI_REQ, "lci req" }, + { WL_PROXD_TLV_ID_LCI, "lci" }, + { WL_PROXD_TLV_ID_CIVIC_REQ, "civic req" }, + { WL_PROXD_TLV_ID_CIVIC, "civic" }, + { WL_PROXD_TLV_ID_AVAIL, "availability" }, + { WL_PROXD_TLV_ID_SESSION_FLAGS, "session flags" }, + { WL_PROXD_TLV_ID_SESSION_FLAGS_MASK, "session flags mask" }, + { WL_PROXD_TLV_ID_RX_MAX_BURST, "rx max bursts" }, + { WL_PROXD_TLV_ID_RANGING_INFO, "ranging info" }, + { WL_PROXD_TLV_ID_RANGING_FLAGS, "ranging flags" }, + { WL_PROXD_TLV_ID_RANGING_FLAGS_MASK, "ranging flags mask" }, + /* output - 512 + x */ + { WL_PROXD_TLV_ID_STATUS, "status" }, + { WL_PROXD_TLV_ID_COUNTERS, "counters" }, + { WL_PROXD_TLV_ID_INFO, "info" }, + { WL_PROXD_TLV_ID_RTT_RESULT, "rtt result" }, + { WL_PROXD_TLV_ID_AOA_RESULT, "aoa result" }, + { WL_PROXD_TLV_ID_SESSION_INFO, "session info" }, + { WL_PROXD_TLV_ID_SESSION_STATUS, "session status" }, + { WL_PROXD_TLV_ID_SESSION_ID_LIST, "session ids" }, + /* debug tlvs can be added starting 1024 */ + { WL_PROXD_TLV_ID_DEBUG_MASK, "debug mask" }, + { WL_PROXD_TLV_ID_COLLECT, "collect" }, + { WL_PROXD_TLV_ID_STRBUF, "result" }, + { WL_PROXD_TLV_ID_COLLECT_DATA, "collect-data" }, + { WL_PROXD_TLV_ID_RI_RR, "ri_rr" }, + { WL_PROXD_TLV_ID_COLLECT_CHAN_DATA, "chan est"} +}; + +static const ftm_strmap_entry_t ftm_event_type_loginfo[] = { + /* wl_proxd_event_type_t, text-string */ + { WL_PROXD_EVENT_NONE, "none" }, + { WL_PROXD_EVENT_SESSION_CREATE, "session create" }, + { WL_PROXD_EVENT_SESSION_START, "session start" }, + { WL_PROXD_EVENT_FTM_REQ, "FTM req" }, + { WL_PROXD_EVENT_BURST_START, "burst start" }, + { WL_PROXD_EVENT_BURST_END, "burst end" }, + { WL_PROXD_EVENT_SESSION_END, "session end" }, + { WL_PROXD_EVENT_SESSION_RESTART, "session restart" }, + { WL_PROXD_EVENT_BURST_RESCHED, "burst rescheduled" }, + { WL_PROXD_EVENT_SESSION_DESTROY, "session destroy" }, + { WL_PROXD_EVENT_RANGE_REQ, "range request" }, + { WL_PROXD_EVENT_FTM_FRAME, "FTM frame" }, + { WL_PROXD_EVENT_DELAY, "delay" }, + { WL_PROXD_EVENT_VS_INITIATOR_RPT, "initiator-report " }, /* rx */ + { WL_PROXD_EVENT_RANGING, "ranging " }, + { WL_PROXD_EVENT_COLLECT, "collect" }, +}; + +/* +* session-state --> text string mapping +*/ +static const ftm_strmap_entry_t ftm_session_state_value_loginfo[] = { + /* wl_proxd_session_state_t, text string */ + { WL_PROXD_SESSION_STATE_CREATED, "created" }, + { WL_PROXD_SESSION_STATE_CONFIGURED, "configured" }, + { WL_PROXD_SESSION_STATE_STARTED, "started" }, + { WL_PROXD_SESSION_STATE_DELAY, "delay" }, + { WL_PROXD_SESSION_STATE_USER_WAIT, "user-wait" }, + { WL_PROXD_SESSION_STATE_SCHED_WAIT, "sched-wait" }, + { WL_PROXD_SESSION_STATE_BURST, "burst" }, + { WL_PROXD_SESSION_STATE_STOPPING, "stopping" }, + { WL_PROXD_SESSION_STATE_ENDED, "ended" }, + { WL_PROXD_SESSION_STATE_DESTROYING, "destroying" }, + { WL_PROXD_SESSION_STATE_NONE, "none" } +}; + +/* +* ranging-state --> text string mapping +*/ +static const ftm_strmap_entry_t ftm_ranging_state_value_loginfo [] = { + /* wl_proxd_ranging_state_t, text string */ + { WL_PROXD_RANGING_STATE_NONE, "none" }, + { WL_PROXD_RANGING_STATE_NOTSTARTED, "nonstarted" }, + { WL_PROXD_RANGING_STATE_INPROGRESS, "inprogress" }, + { WL_PROXD_RANGING_STATE_DONE, "done" }, +}; + +/* +* status --> text string mapping +*/ +static const ftm_strmap_entry_t ftm_status_value_loginfo[] = { + /* wl_proxd_status_t, text-string */ + { WL_PROXD_E_OVERRIDDEN, "overridden" }, + { WL_PROXD_E_ASAP_FAILED, "ASAP failed" }, + { WL_PROXD_E_NOTSTARTED, "not started" }, + { WL_PROXD_E_INVALIDMEAS, "invalid measurement" }, + { WL_PROXD_E_INCAPABLE, "incapable" }, + { WL_PROXD_E_MISMATCH, "mismatch"}, + { WL_PROXD_E_DUP_SESSION, "dup session" }, + { WL_PROXD_E_REMOTE_FAIL, "remote fail" }, + { WL_PROXD_E_REMOTE_INCAPABLE, "remote incapable" }, + { WL_PROXD_E_SCHED_FAIL, "sched failure" }, + { WL_PROXD_E_PROTO, "protocol error" }, + { WL_PROXD_E_EXPIRED, "expired" }, + { WL_PROXD_E_TIMEOUT, "timeout" }, + { WL_PROXD_E_NOACK, "no ack" }, + { WL_PROXD_E_DEFERRED, "deferred" }, + { WL_PROXD_E_INVALID_SID, "invalid session id" }, + { WL_PROXD_E_REMOTE_CANCEL, "remote cancel" }, + { WL_PROXD_E_CANCELED, "canceled" }, + { WL_PROXD_E_INVALID_SESSION, "invalid session" }, + { WL_PROXD_E_BAD_STATE, "bad state" }, + { WL_PROXD_E_ERROR, "error" }, + { WL_PROXD_E_OK, "OK" } +}; + +/* +* time interval unit --> text string mapping +*/ +static const ftm_strmap_entry_t ftm_tmu_value_loginfo[] = { + /* wl_proxd_tmu_t, text-string */ + { WL_PROXD_TMU_TU, "TU" }, + { WL_PROXD_TMU_SEC, "sec" }, + { WL_PROXD_TMU_MILLI_SEC, "ms" }, + { WL_PROXD_TMU_MICRO_SEC, "us" }, + { WL_PROXD_TMU_NANO_SEC, "ns" }, + { WL_PROXD_TMU_PICO_SEC, "ps" } +}; + +#define RSPEC_BW(rspec) ((rspec) & WL_RSPEC_BW_MASK) +#define RSPEC_IS20MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_20MHZ) +#define RSPEC_IS40MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_40MHZ) +#define RSPEC_IS80MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_80MHZ) +#define RSPEC_IS160MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_160MHZ) + +#define IS_MCS(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) != WL_RSPEC_ENCODE_RATE) +#define IS_STBC(rspec) (((((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) || \ + (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)) && \ + (((rspec) & WL_RSPEC_STBC) == WL_RSPEC_STBC)) +#define RSPEC_ISSGI(rspec) (((rspec) & WL_RSPEC_SGI) != 0) +#define RSPEC_ISLDPC(rspec) (((rspec) & WL_RSPEC_LDPC) != 0) +#define RSPEC_ISSTBC(rspec) (((rspec) & WL_RSPEC_STBC) != 0) +#define RSPEC_ISTXBF(rspec) (((rspec) & WL_RSPEC_TXBF) != 0) +#define RSPEC_ISVHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT) +#define RSPEC_ISHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) +#define RSPEC_ISLEGACY(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE) +#define RSPEC2RATE(rspec) (RSPEC_ISLEGACY(rspec) ? \ + ((rspec) & RSPEC_RATE_MASK) : rate_rspec2rate(rspec)) +/* return rate in unit of 500Kbps -- for internal use in wlc_rate_sel.c */ +#define RSPEC2KBPS(rspec) rate_rspec2rate(rspec) + +struct ieee_80211_mcs_rate_info { + uint8 constellation_bits; + uint8 coding_q; + uint8 coding_d; +}; + +static const struct ieee_80211_mcs_rate_info wl_mcs_info[] = { + { 1, 1, 2 }, /* MCS 0: MOD: BPSK, CR 1/2 */ + { 2, 1, 2 }, /* MCS 1: MOD: QPSK, CR 1/2 */ + { 2, 3, 4 }, /* MCS 2: MOD: QPSK, CR 3/4 */ + { 4, 1, 2 }, /* MCS 3: MOD: 16QAM, CR 1/2 */ + { 4, 3, 4 }, /* MCS 4: MOD: 16QAM, CR 3/4 */ + { 6, 2, 3 }, /* MCS 5: MOD: 64QAM, CR 2/3 */ + { 6, 3, 4 }, /* MCS 6: MOD: 64QAM, CR 3/4 */ + { 6, 5, 6 }, /* MCS 7: MOD: 64QAM, CR 5/6 */ + { 8, 3, 4 }, /* MCS 8: MOD: 256QAM, CR 3/4 */ + { 8, 5, 6 } /* MCS 9: MOD: 256QAM, CR 5/6 */ +}; + +/** + * Returns the rate in [Kbps] units for a caller supplied MCS/bandwidth/Nss/Sgi combination. + * 'mcs' : a *single* spatial stream MCS (11n or 11ac) + */ +uint +rate_mcs2rate(uint mcs, uint nss, uint bw, int sgi) +{ + const int ksps = 250; /* kilo symbols per sec, 4 us sym */ + const int Nsd_20MHz = 52; + const int Nsd_40MHz = 108; + const int Nsd_80MHz = 234; + const int Nsd_160MHz = 468; + uint rate; + + if (mcs == 32) { + /* just return fixed values for mcs32 instead of trying to parametrize */ + rate = (sgi == 0) ? 6000 : 6778; + } else if (mcs <= 9) { + /* This calculation works for 11n HT and 11ac VHT if the HT mcs values + * are decomposed into a base MCS = MCS % 8, and Nss = 1 + MCS / 8. + * That is, HT MCS 23 is a base MCS = 7, Nss = 3 + */ + + /* find the number of complex numbers per symbol */ + if (RSPEC_IS20MHZ(bw)) { + rate = Nsd_20MHz; + } else if (RSPEC_IS40MHZ(bw)) { + rate = Nsd_40MHz; + } else if (bw == WL_RSPEC_BW_80MHZ) { + rate = Nsd_80MHz; + } else if (bw == WL_RSPEC_BW_160MHZ) { + rate = Nsd_160MHz; + } else { + rate = 0; + } + + /* multiply by bits per number from the constellation in use */ + rate = rate * wl_mcs_info[mcs].constellation_bits; + + /* adjust for the number of spatial streams */ + rate = rate * nss; + + /* adjust for the coding rate given as a quotient and divisor */ + rate = (rate * wl_mcs_info[mcs].coding_q) / wl_mcs_info[mcs].coding_d; + + /* multiply by Kilo symbols per sec to get Kbps */ + rate = rate * ksps; + + /* adjust the symbols per sec for SGI + * symbol duration is 4 us without SGI, and 3.6 us with SGI, + * so ratio is 10 / 9 + */ + if (sgi) { + /* add 4 for rounding of division by 9 */ + rate = ((rate * 10) + 4) / 9; + } + } else { + rate = 0; + } + + return rate; +} /* wlc_rate_mcs2rate */ + +/** take a well formed ratespec_t arg and return phy rate in [Kbps] units */ +int +rate_rspec2rate(uint32 rspec) +{ + int rate = -1; + + if (RSPEC_ISLEGACY(rspec)) { + rate = 500 * (rspec & WL_RSPEC_RATE_MASK); + } else if (RSPEC_ISHT(rspec)) { + uint mcs = (rspec & WL_RSPEC_RATE_MASK); + + if (mcs == 32) { + rate = rate_mcs2rate(mcs, 1, WL_RSPEC_BW_40MHZ, RSPEC_ISSGI(rspec)); + } else { + uint nss = 1 + (mcs / 8); + mcs = mcs % 8; + rate = rate_mcs2rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec)); + } + } else if (RSPEC_ISVHT(rspec)) { + uint mcs = (rspec & WL_RSPEC_VHT_MCS_MASK); + uint nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT; + + ASSERT(mcs <= 9); + ASSERT(nss <= 8); + + rate = rate_mcs2rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec)); + } else { + ASSERT(0); + } + + return (rate == 0) ? -1 : rate; +} + +char resp_buf[WLC_IOCTL_SMLEN]; + +static uint64 +ftm_intvl2nsec(const wl_proxd_intvl_t *intvl) +{ + uint64 ret; + ret = intvl->intvl; + switch (intvl->tmu) { + case WL_PROXD_TMU_TU: ret = FTM_TU2MICRO(ret) * 1000; break; + case WL_PROXD_TMU_SEC: ret *= 1000000000; break; + case WL_PROXD_TMU_MILLI_SEC: ret *= 1000000; break; + case WL_PROXD_TMU_MICRO_SEC: ret *= 1000; break; + case WL_PROXD_TMU_PICO_SEC: ret = intvl->intvl / 1000; break; + case WL_PROXD_TMU_NANO_SEC: /* fall through */ + default: break; + } + return ret; +} +uint64 +ftm_intvl2usec(const wl_proxd_intvl_t *intvl) +{ + uint64 ret; + ret = intvl->intvl; + switch (intvl->tmu) { + case WL_PROXD_TMU_TU: ret = FTM_TU2MICRO(ret); break; + case WL_PROXD_TMU_SEC: ret *= 1000000; break; + case WL_PROXD_TMU_NANO_SEC: ret = intvl->intvl / 1000; break; + case WL_PROXD_TMU_PICO_SEC: ret = intvl->intvl / 1000000; break; + case WL_PROXD_TMU_MILLI_SEC: ret *= 1000; break; + case WL_PROXD_TMU_MICRO_SEC: /* fall through */ + default: break; + } + return ret; +} + +/* +* lookup 'id' (as a key) from a fw status to host map table +* if found, return the corresponding reason code +*/ + +static rtt_reason_t +ftm_get_statusmap_info(wl_proxd_status_t id, const ftm_status_map_host_entry_t *p_table, + uint32 num_entries) +{ + int i; + const ftm_status_map_host_entry_t *p_entry; + /* scan thru the table till end */ + p_entry = p_table; + for (i = 0; i < (int) num_entries; i++) + { + if (p_entry->proxd_status == id) { + return p_entry->rtt_reason; + } + p_entry++; /* next entry */ + } + return RTT_REASON_FAILURE; /* not found */ +} +/* +* lookup 'id' (as a key) from a table +* if found, return the entry pointer, otherwise return NULL +*/ +static const ftm_strmap_entry_t* +ftm_get_strmap_info(int32 id, const ftm_strmap_entry_t *p_table, uint32 num_entries) +{ + int i; + const ftm_strmap_entry_t *p_entry; + + /* scan thru the table till end */ + p_entry = p_table; + for (i = 0; i < (int) num_entries; i++) + { + if (p_entry->id == id) + return p_entry; + p_entry++; /* next entry */ + } + return NULL; /* not found */ +} + +/* +* map enum to a text-string for display, this function is called by the following: +* For debug/trace: +* ftm_[cmdid|tlvid]_to_str() +* For TLV-output log for 'get' commands +* ftm_[method|tmu|caps|status|state]_value_to_logstr() +* Input: +* pTable -- point to a 'enum to string' table. +*/ +static const char * +ftm_map_id_to_str(int32 id, const ftm_strmap_entry_t *p_table, uint32 num_entries) +{ + const ftm_strmap_entry_t*p_entry = ftm_get_strmap_info(id, p_table, num_entries); + if (p_entry) + return (p_entry->text); + + return "invalid"; +} + + +#ifdef RTT_DEBUG + +/* define entry, e.g. { WL_PROXD_CMD_xxx, "WL_PROXD_CMD_xxx" } */ +#define DEF_STRMAP_ENTRY(id) { (id), #id } + +/* ftm cmd-id mapping */ +static const ftm_strmap_entry_t ftm_cmdid_map[] = { + /* {wl_proxd_cmd_t(WL_PROXD_CMD_xxx), "WL_PROXD_CMD_xxx" }, */ + DEF_STRMAP_ENTRY(WL_PROXD_CMD_NONE), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_VERSION), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_ENABLE), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_DISABLE), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_CONFIG), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_START_SESSION), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_BURST_REQUEST), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_STOP_SESSION), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_DELETE_SESSION), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_RESULT), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_INFO), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_STATUS), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_SESSIONS), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_COUNTERS), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_CLEAR_COUNTERS), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_COLLECT), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_TUNE), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_DUMP), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_START_RANGING), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_STOP_RANGING), + DEF_STRMAP_ENTRY(WL_PROXD_CMD_GET_RANGING_INFO), +}; + +/* +* map a ftm cmd-id to a text-string for display +*/ +static const char * +ftm_cmdid_to_str(uint16 cmdid) +{ + return ftm_map_id_to_str((int32) cmdid, &ftm_cmdid_map[0], ARRAYSIZE(ftm_cmdid_map)); +} +#endif /* RTT_DEBUG */ + + +/* +* convert BCME_xxx error codes into related error strings +* note, bcmerrorstr() defined in bcmutils is for BCMDRIVER only, +* this duplicate copy is for WL access and may need to clean up later +*/ +static const char *ftm_bcmerrorstrtable[] = BCMERRSTRINGTABLE; +static const char * +ftm_status_value_to_logstr(wl_proxd_status_t status) +{ + static char ftm_msgbuf_status_undef[32]; + const ftm_strmap_entry_t *p_loginfo; + int bcmerror; + + /* check if within BCME_xxx error range */ + bcmerror = (int) status; + if (VALID_BCMERROR(bcmerror)) + return ftm_bcmerrorstrtable[-bcmerror]; + + /* otherwise, look for 'proxd ftm status' range */ + p_loginfo = ftm_get_strmap_info((int32) status, + &ftm_status_value_loginfo[0], ARRAYSIZE(ftm_status_value_loginfo)); + if (p_loginfo) + return p_loginfo->text; + + /* report for 'out of range' FTM-status error code */ + memset(ftm_msgbuf_status_undef, 0, sizeof(ftm_msgbuf_status_undef)); + snprintf(ftm_msgbuf_status_undef, sizeof(ftm_msgbuf_status_undef), + "Undefined status %d", status); + return &ftm_msgbuf_status_undef[0]; +} + +static const char * +ftm_tmu_value_to_logstr(wl_proxd_tmu_t tmu) +{ + return ftm_map_id_to_str((int32)tmu, + &ftm_tmu_value_loginfo[0], ARRAYSIZE(ftm_tmu_value_loginfo)); +} + +static const ftm_strmap_entry_t* +ftm_get_event_type_loginfo(wl_proxd_event_type_t event_type) +{ + /* look up 'event-type' from a predefined table */ + return ftm_get_strmap_info((int32) event_type, + ftm_event_type_loginfo, ARRAYSIZE(ftm_event_type_loginfo)); +} + +static const char * +ftm_session_state_value_to_logstr(wl_proxd_session_state_t state) +{ + return ftm_map_id_to_str((int32)state, &ftm_session_state_value_loginfo[0], + ARRAYSIZE(ftm_session_state_value_loginfo)); +} + + +#ifdef WL_CFG80211 +/* +* send 'proxd' iovar for all ftm get-related commands +*/ +static int +rtt_do_get_ioctl(dhd_pub_t *dhd, wl_proxd_iov_t *p_proxd_iov, uint16 proxd_iovsize, + ftm_subcmd_info_t *p_subcmd_info) +{ + + wl_proxd_iov_t *p_iovresp = (wl_proxd_iov_t *)resp_buf; + int status; + int tlvs_len; + /* send getbuf proxd iovar */ + status = dhd_getiovar(dhd, 0, "proxd", (char *)p_proxd_iov, + proxd_iovsize, (char **)&p_iovresp, WLC_IOCTL_SMLEN); + if (status != BCME_OK) { + DHD_ERROR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n", + __FUNCTION__, p_subcmd_info->cmdid, status)); + return status; + } + if (p_subcmd_info->cmdid == WL_PROXD_CMD_GET_VERSION) { + p_subcmd_info->version = ltoh16(p_iovresp->version); + DHD_RTT(("ftm version: 0x%x\n", ltoh16(p_iovresp->version))); + goto exit; + } + + tlvs_len = ltoh16(p_iovresp->len) - WL_PROXD_IOV_HDR_SIZE; + if (tlvs_len < 0) { + DHD_ERROR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n", + __FUNCTION__, ltoh16(p_iovresp->len), (int) WL_PROXD_IOV_HDR_SIZE)); + tlvs_len = 0; + } + + if (tlvs_len > 0 && p_subcmd_info->handler) { + /* unpack TLVs and invokes the cbfn for processing */ + status = bcm_unpack_xtlv_buf(p_proxd_iov, (uint8 *)p_iovresp->tlvs, + tlvs_len, BCM_XTLV_OPTION_ALIGN32, p_subcmd_info->handler); + } +exit: + return status; +} + + +static wl_proxd_iov_t * +rtt_alloc_getset_buf(wl_proxd_method_t method, wl_proxd_session_id_t session_id, + wl_proxd_cmd_t cmdid, uint16 tlvs_bufsize, uint16 *p_out_bufsize) +{ + uint16 proxd_iovsize; + uint16 kflags; + wl_proxd_tlv_t *p_tlv; + wl_proxd_iov_t *p_proxd_iov = (wl_proxd_iov_t *) NULL; + + *p_out_bufsize = 0; /* init */ + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + /* calculate the whole buffer size, including one reserve-tlv entry in the header */ + proxd_iovsize = sizeof(wl_proxd_iov_t) + tlvs_bufsize; + + p_proxd_iov = kzalloc(proxd_iovsize, kflags); + if (p_proxd_iov == NULL) { + DHD_ERROR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize)); + return NULL; + } + + /* setup proxd-FTM-method iovar header */ + p_proxd_iov->version = htol16(WL_PROXD_API_VERSION); + p_proxd_iov->len = htol16(proxd_iovsize); /* caller may adjust it based on #of TLVs */ + p_proxd_iov->cmd = htol16(cmdid); + p_proxd_iov->method = htol16(method); + p_proxd_iov->sid = htol16(session_id); + + /* initialize the reserved/dummy-TLV in iovar header */ + p_tlv = p_proxd_iov->tlvs; + p_tlv->id = htol16(WL_PROXD_TLV_ID_NONE); + p_tlv->len = htol16(0); + + *p_out_bufsize = proxd_iovsize; /* for caller's reference */ + + return p_proxd_iov; +} + + +static int +dhd_rtt_common_get_handler(dhd_pub_t *dhd, ftm_subcmd_info_t *p_subcmd_info, + wl_proxd_method_t method, + wl_proxd_session_id_t session_id) +{ + int status = BCME_OK; + uint16 proxd_iovsize = 0; + wl_proxd_iov_t *p_proxd_iov; +#ifdef RTT_DEBUG + DHD_RTT(("enter %s: method=%d, session_id=%d, cmdid=%d(%s)\n", + __FUNCTION__, method, session_id, p_subcmd_info->cmdid, + ftm_cmdid_to_str(p_subcmd_info->cmdid))); +#endif + /* alloc mem for ioctl headr + reserved 0 bufsize for tlvs (initialize to zero) */ + p_proxd_iov = rtt_alloc_getset_buf(method, session_id, p_subcmd_info->cmdid, + 0, &proxd_iovsize); + + if (p_proxd_iov == NULL) + return BCME_NOMEM; + + status = rtt_do_get_ioctl(dhd, p_proxd_iov, proxd_iovsize, p_subcmd_info); + + if (status != BCME_OK) { + DHD_RTT(("%s failed: status=%d\n", __FUNCTION__, status)); + } + kfree(p_proxd_iov); + return status; +} + +/* +* common handler for set-related proxd method commands which require no TLV as input +* wl proxd ftm [session-id] +* e.g. +* wl proxd ftm enable -- to enable ftm +* wl proxd ftm disable -- to disable ftm +* wl proxd ftm start -- to start a specified session +* wl proxd ftm stop -- to cancel a specified session; +* state is maintained till session is delete. +* wl proxd ftm delete -- to delete a specified session +* wl proxd ftm [] clear-counters -- to clear counters +* wl proxd ftm burst-request -- on initiator: to send burst request; +* on target: send FTM frame +* wl proxd ftm collect +* wl proxd ftm tune (TBD) +*/ +static int +dhd_rtt_common_set_handler(dhd_pub_t *dhd, const ftm_subcmd_info_t *p_subcmd_info, + wl_proxd_method_t method, wl_proxd_session_id_t session_id) +{ + uint16 proxd_iovsize; + wl_proxd_iov_t *p_proxd_iov; + int ret; + +#ifdef RTT_DEBUG + DHD_RTT(("enter %s: method=%d, session_id=%d, cmdid=%d(%s)\n", + __FUNCTION__, method, session_id, p_subcmd_info->cmdid, + ftm_cmdid_to_str(p_subcmd_info->cmdid))); +#endif + + /* allocate and initialize a temp buffer for 'set proxd' iovar */ + proxd_iovsize = 0; + p_proxd_iov = rtt_alloc_getset_buf(method, session_id, p_subcmd_info->cmdid, + 0, &proxd_iovsize); /* no TLV */ + if (p_proxd_iov == NULL) + return BCME_NOMEM; + + /* no TLV to pack, simply issue a set-proxd iovar */ + ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov, proxd_iovsize, NULL, 0, TRUE); +#ifdef RTT_DEBUG + if (ret != BCME_OK) { + DHD_RTT(("error: IOVAR failed, status=%d\n", ret)); + } +#endif + /* clean up */ + kfree(p_proxd_iov); + + return ret; +} +#endif /* WL_CFG80211 */ + +static int +rtt_unpack_xtlv_cbfn(void *ctx, uint8 *p_data, uint16 tlvid, uint16 len) +{ + int ret = BCME_OK; + int i; + wl_proxd_ftm_session_status_t *p_data_info = NULL; + wl_proxd_collect_event_data_t *p_collect_data = NULL; + uint32 chan_data_entry = 0; + + switch (tlvid) { + case WL_PROXD_TLV_ID_RTT_RESULT: + ret = dhd_rtt_convert_results_to_host((rtt_report_t *)ctx, + p_data, tlvid, len); + break; + case WL_PROXD_TLV_ID_SESSION_STATUS: + DHD_RTT(("WL_PROXD_TLV_ID_SESSION_STATUS\n")); + memcpy(ctx, p_data, sizeof(wl_proxd_ftm_session_status_t)); + p_data_info = (wl_proxd_ftm_session_status_t *)ctx; + p_data_info->sid = ltoh16_ua(&p_data_info->sid); + p_data_info->state = ltoh16_ua(&p_data_info->state); + p_data_info->status = ltoh32_ua(&p_data_info->status); + p_data_info->burst_num = ltoh16_ua(&p_data_info->burst_num); + DHD_RTT(("\tsid=%u, state=%d, status=%d, burst_num=%u\n", + p_data_info->sid, p_data_info->state, + p_data_info->status, p_data_info->burst_num)); + + break; + case WL_PROXD_TLV_ID_COLLECT_DATA: + DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_DATA\n")); + memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_t)); + p_collect_data = (wl_proxd_collect_event_data_t *)ctx; + DHD_RTT(("\tH_RX\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data->H_RX[i] = ltoh32_ua(&p_collect_data->H_RX[i]); + DHD_RTT(("\t%u\n", p_collect_data->H_RX[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tH_LB\n")); + for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) { + p_collect_data->H_LB[i] = ltoh32_ua(&p_collect_data->H_LB[i]); + DHD_RTT(("\t%u\n", p_collect_data->H_LB[i])); + } + DHD_RTT(("\n")); + DHD_RTT(("\tri_rr\n")); + for (i = 0; i < FTM_TPK_RI_RR_LEN; i++) { + DHD_RTT(("\t%u\n", p_collect_data->ri_rr[i])); + } + p_collect_data->phy_err_mask = ltoh32_ua(&p_collect_data->phy_err_mask); + DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data->phy_err_mask)); + break; + case WL_PROXD_TLV_ID_COLLECT_CHAN_DATA: + DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_CHAN_DATA\n")); + DHD_RTT(("\tchan est %u\n", (uint32) (len / sizeof(uint32)))); + for (i = 0; i < (len/sizeof(chan_data_entry)); i++) { + uint32 *p = (uint32*)p_data; + chan_data_entry = ltoh32_ua(p + i); + DHD_RTT(("\t%u\n", chan_data_entry)); + } + break; + default: + DHD_ERROR(("> Unsupported TLV ID %d\n", tlvid)); + ret = BCME_ERROR; + break; + } + + return ret; +} + +#ifdef WL_CFG80211 +static int +rtt_handle_config_options(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_tlv, + uint16 *p_buf_space_left, ftm_config_options_info_t *ftm_configs, int ftm_cfg_cnt) +{ + int ret = BCME_OK; + int cfg_idx = 0; + uint32 flags = WL_PROXD_FLAG_NONE; + uint32 flags_mask = WL_PROXD_FLAG_NONE; + uint32 new_mask; /* cmdline input */ + ftm_config_options_info_t *p_option_info; + uint16 type = (session_id == WL_PROXD_SESSION_ID_GLOBAL) ? + WL_PROXD_TLV_ID_FLAGS_MASK : WL_PROXD_TLV_ID_SESSION_FLAGS_MASK; + for (cfg_idx = 0; cfg_idx < ftm_cfg_cnt; cfg_idx++) { + p_option_info = (ftm_configs + cfg_idx); + if (p_option_info != NULL) { + new_mask = p_option_info->flags; + /* update flags mask */ + flags_mask |= new_mask; + if (p_option_info->enable) { + flags |= new_mask; /* set the bit on */ + } else { + flags &= ~new_mask; /* set the bit off */ + } + } + } + flags = htol32(flags); + flags_mask = htol32(flags_mask); + /* setup flags_mask TLV */ + ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left, + type, sizeof(uint32), &flags_mask, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + DHD_ERROR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n", + __FUNCTION__, ret)); + goto exit; + } + + type = (session_id == WL_PROXD_SESSION_ID_GLOBAL)? + WL_PROXD_TLV_ID_FLAGS : WL_PROXD_TLV_ID_SESSION_FLAGS; + /* setup flags TLV */ + ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left, + type, sizeof(uint32), &flags, BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { +#ifdef RTT_DEBUG + DHD_RTT(("%s: bcm_pack_xltv_entry() for flags failed, status=%d\n", + __FUNCTION__, ret)); +#endif + } +exit: + return ret; +} + +static int +rtt_handle_config_general(wl_proxd_session_id_t session_id, wl_proxd_tlv_t **p_tlv, + uint16 *p_buf_space_left, ftm_config_param_info_t *ftm_configs, int ftm_cfg_cnt) +{ + int ret = BCME_OK; + int cfg_idx = 0; + uint32 chanspec; + ftm_config_param_info_t *p_config_param_info; + void *p_src_data; + uint16 src_data_size; /* size of data pointed by p_src_data as 'source' */ + for (cfg_idx = 0; cfg_idx < ftm_cfg_cnt; cfg_idx++) { + p_config_param_info = (ftm_configs + cfg_idx); + if (p_config_param_info != NULL) { + switch (p_config_param_info->tlvid) { + case WL_PROXD_TLV_ID_BSS_INDEX: + case WL_PROXD_TLV_ID_FTM_RETRIES: + case WL_PROXD_TLV_ID_FTM_REQ_RETRIES: + p_src_data = &p_config_param_info->data8; + src_data_size = sizeof(uint8); + break; + case WL_PROXD_TLV_ID_BURST_NUM_FTM: /* uint16 */ + case WL_PROXD_TLV_ID_NUM_BURST: + case WL_PROXD_TLV_ID_RX_MAX_BURST: + p_src_data = &p_config_param_info->data16; + src_data_size = sizeof(uint16); + break; + case WL_PROXD_TLV_ID_TX_POWER: /* uint32 */ + case WL_PROXD_TLV_ID_RATESPEC: + case WL_PROXD_TLV_ID_EVENT_MASK: /* wl_proxd_event_mask_t/uint32 */ + case WL_PROXD_TLV_ID_DEBUG_MASK: + p_src_data = &p_config_param_info->data32; + src_data_size = sizeof(uint32); + break; + case WL_PROXD_TLV_ID_CHANSPEC: /* chanspec_t --> 32bit */ + chanspec = p_config_param_info->chanspec; + p_src_data = (void *) &chanspec; + src_data_size = sizeof(uint32); + break; + case WL_PROXD_TLV_ID_BSSID: /* mac address */ + case WL_PROXD_TLV_ID_PEER_MAC: + p_src_data = &p_config_param_info->mac_addr; + src_data_size = sizeof(struct ether_addr); + break; + case WL_PROXD_TLV_ID_BURST_DURATION: /* wl_proxd_intvl_t */ + case WL_PROXD_TLV_ID_BURST_PERIOD: + case WL_PROXD_TLV_ID_BURST_FTM_SEP: + case WL_PROXD_TLV_ID_BURST_TIMEOUT: + case WL_PROXD_TLV_ID_INIT_DELAY: + p_src_data = &p_config_param_info->data_intvl; + src_data_size = sizeof(wl_proxd_intvl_t); + break; + default: + ret = BCME_BADARG; + break; + } + if (ret != BCME_OK) { + DHD_ERROR(("%s bad TLV ID : %d\n", + __FUNCTION__, p_config_param_info->tlvid)); + break; + } + + ret = bcm_pack_xtlv_entry((uint8 **) p_tlv, p_buf_space_left, + p_config_param_info->tlvid, src_data_size, p_src_data, + BCM_XTLV_OPTION_ALIGN32); + if (ret != BCME_OK) { + DHD_ERROR(("%s: bcm_pack_xltv_entry() failed," + " status=%d\n", __FUNCTION__, ret)); + break; + } + + } + } + return ret; +} + +static int +dhd_rtt_ftm_enable(dhd_pub_t *dhd, bool enable) +{ + ftm_subcmd_info_t subcmd_info; + subcmd_info.name = (enable)? "enable" : "disable"; + subcmd_info.cmdid = (enable)? WL_PROXD_CMD_ENABLE: WL_PROXD_CMD_DISABLE; + subcmd_info.handler = NULL; + return dhd_rtt_common_set_handler(dhd, &subcmd_info, + WL_PROXD_METHOD_FTM, WL_PROXD_SESSION_ID_GLOBAL); +} + +static int +dhd_rtt_start_session(dhd_pub_t *dhd, wl_proxd_session_id_t session_id, bool start) +{ + ftm_subcmd_info_t subcmd_info; + subcmd_info.name = (start)? "start session" : "stop session"; + subcmd_info.cmdid = (start)? WL_PROXD_CMD_START_SESSION: WL_PROXD_CMD_STOP_SESSION; + subcmd_info.handler = NULL; + return dhd_rtt_common_set_handler(dhd, &subcmd_info, + WL_PROXD_METHOD_FTM, session_id); +} + +static int +dhd_rtt_delete_session(dhd_pub_t *dhd, wl_proxd_session_id_t session_id) +{ + ftm_subcmd_info_t subcmd_info; + subcmd_info.name = "delete session"; + subcmd_info.cmdid = WL_PROXD_CMD_DELETE_SESSION; + subcmd_info.handler = NULL; + return dhd_rtt_common_set_handler(dhd, &subcmd_info, + WL_PROXD_METHOD_FTM, session_id); +} + +static int +dhd_rtt_ftm_config(dhd_pub_t *dhd, wl_proxd_session_id_t session_id, + ftm_config_category_t catagory, void *ftm_configs, int ftm_cfg_cnt) +{ + ftm_subcmd_info_t subcmd_info; + wl_proxd_tlv_t *p_tlv; + /* alloc mem for ioctl headr + reserved 0 bufsize for tlvs (initialize to zero) */ + wl_proxd_iov_t *p_proxd_iov; + uint16 proxd_iovsize = 0; + uint16 bufsize; + uint16 buf_space_left; + uint16 all_tlvsize; + int ret = BCME_OK; + + subcmd_info.name = "config"; + subcmd_info.cmdid = WL_PROXD_CMD_CONFIG; + + p_proxd_iov = rtt_alloc_getset_buf(WL_PROXD_METHOD_FTM, session_id, subcmd_info.cmdid, + FTM_IOC_BUFSZ, &proxd_iovsize); + + if (p_proxd_iov == NULL) { + DHD_ERROR(("%s : failed to allocate the iovar (size :%d)\n", + __FUNCTION__, FTM_IOC_BUFSZ)); + return BCME_NOMEM; + } + /* setup TLVs */ + bufsize = proxd_iovsize - WL_PROXD_IOV_HDR_SIZE; /* adjust available size for TLVs */ + p_tlv = &p_proxd_iov->tlvs[0]; + /* TLV buffer starts with a full size, will decrement for each packed TLV */ + buf_space_left = bufsize; + if (catagory == FTM_CONFIG_CAT_OPTIONS) { + ret = rtt_handle_config_options(session_id, &p_tlv, &buf_space_left, + (ftm_config_options_info_t *)ftm_configs, ftm_cfg_cnt); + } else if (catagory == FTM_CONFIG_CAT_GENERAL) { + ret = rtt_handle_config_general(session_id, &p_tlv, &buf_space_left, + (ftm_config_param_info_t *)ftm_configs, ftm_cfg_cnt); + } + if (ret == BCME_OK) { + /* update the iov header, set len to include all TLVs + header */ + all_tlvsize = (bufsize - buf_space_left); + p_proxd_iov->len = htol16(all_tlvsize + WL_PROXD_IOV_HDR_SIZE); + ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov, + all_tlvsize + WL_PROXD_IOV_HDR_SIZE, NULL, 0, TRUE); + if (ret != BCME_OK) { + DHD_ERROR(("%s : failed to set config\n", __FUNCTION__)); + } + } + /* clean up */ + kfree(p_proxd_iov); + return ret; +} + +static int +dhd_rtt_get_version(dhd_pub_t *dhd, int *out_version) +{ + int ret; + ftm_subcmd_info_t subcmd_info; + subcmd_info.name = "ver"; + subcmd_info.cmdid = WL_PROXD_CMD_GET_VERSION; + subcmd_info.handler = NULL; + ret = dhd_rtt_common_get_handler(dhd, &subcmd_info, + WL_PROXD_METHOD_FTM, WL_PROXD_SESSION_ID_GLOBAL); + *out_version = (ret == BCME_OK) ? subcmd_info.version : 0; + return ret; +} +#endif /* WL_CFG80211 */ chanspec_t dhd_rtt_convert_to_chspec(wifi_channel_info_t channel) { int bw; + chanspec_t chanspec = 0; + uint8 center_chan; + uint8 primary_chan; /* set witdh to 20MHZ for 2.4G HZ */ if (channel.center_freq >= 2400 && channel.center_freq <= 2500) { channel.width = WIFI_CHAN_WIDTH_20; @@ -94,22 +1139,35 @@ dhd_rtt_convert_to_chspec(wifi_channel_info_t channel) switch (channel.width) { case WIFI_CHAN_WIDTH_20: bw = WL_CHANSPEC_BW_20; + primary_chan = wf_mhz2channel(channel.center_freq, 0); + chanspec = wf_channel2chspec(primary_chan, bw); break; case WIFI_CHAN_WIDTH_40: bw = WL_CHANSPEC_BW_40; + primary_chan = wf_mhz2channel(channel.center_freq, 0); + chanspec = wf_channel2chspec(primary_chan, bw); break; case WIFI_CHAN_WIDTH_80: bw = WL_CHANSPEC_BW_80; - break; - case WIFI_CHAN_WIDTH_160: - bw = WL_CHANSPEC_BW_160; + primary_chan = wf_mhz2channel(channel.center_freq, 0); + center_chan = wf_mhz2channel(channel.center_freq0, 0); + chanspec = wf_chspec_80(center_chan, primary_chan); break; default: DHD_ERROR(("doesn't support this bandwith : %d", channel.width)); bw = -1; break; } - return wf_channel2chspec(wf_mhz2channel(channel.center_freq, 0), bw); + return chanspec; +} + +int +dhd_rtt_idx_to_burst_duration(uint idx) +{ + if (idx >= ARRAY_SIZE(burst_duration_idx)) { + return -1; + } + return burst_duration_idx[idx]; } int @@ -123,18 +1181,22 @@ dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params) NULL_CHECK(dhd, "dhd is NULL", err); rtt_status = GET_RTTSTATE(dhd); NULL_CHECK(rtt_status, "rtt_status is NULL", err); - if (rtt_status->capability == RTT_CAP_NONE) { + if (!HAS_11MC_CAP(rtt_status->rtt_capa.proto)) { DHD_ERROR(("doesn't support RTT \n")); return BCME_ERROR; } - if (rtt_status->status == RTT_STARTED) { + if (rtt_status->status != RTT_STOPPED) { DHD_ERROR(("rtt is already started\n")); return BCME_BUSY; } DHD_RTT(("%s enter\n", __FUNCTION__)); - bcopy(params, &rtt_status->rtt_config, sizeof(rtt_config_params_t)); + + memset(rtt_status->rtt_config.target_info, 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + rtt_status->rtt_config.rtt_target_cnt = params->rtt_target_cnt; + memcpy(rtt_status->rtt_config.target_info, + params->target_info, TARGET_INFO_SIZE(params->rtt_target_cnt)); rtt_status->status = RTT_STARTED; - /* start to measure RTT from 1th device */ + /* start to measure RTT from first device */ /* find next target to trigger RTT */ for (idx = rtt_status->cur_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { /* skip the disabled device */ @@ -157,8 +1219,12 @@ int dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt) { int err = BCME_OK; +#ifdef WL_CFG8011 int i = 0, j = 0; rtt_status_info_t *rtt_status; + rtt_results_header_t *entry, *next; + rtt_result_t *rtt_result, *next2; + struct rtt_noti_callback *iter; NULL_CHECK(dhd, "dhd is NULL", err); rtt_status = GET_RTTSTATE(dhd); @@ -177,169 +1243,271 @@ dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt) } } } + if (rtt_status->all_cancel) { + /* cancel all of request */ + rtt_status->status = RTT_STOPPED; + DHD_RTT(("current RTT process is cancelled\n")); + /* remove the rtt results in cache */ + if (!list_empty(&rtt_status->rtt_results_cache)) { + /* Iterate rtt_results_header list */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_entry_safe(entry, next, + &rtt_status->rtt_results_cache, list) { + list_del(&entry->list); + /* Iterate rtt_result list */ + list_for_each_entry_safe(rtt_result, next2, + &entry->result_list, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + kfree(entry); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + } + /* send the rtt complete event to wake up the user process */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + /* reinitialize the HEAD */ + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + /* clear information for rtt_config */ + rtt_status->rtt_config.rtt_target_cnt = 0; + memset(rtt_status->rtt_config.target_info, 0, + TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); + rtt_status->cur_idx = 0; + dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); + dhd_rtt_ftm_enable(dhd, FALSE); + } mutex_unlock(&rtt_status->rtt_mutex); +#endif /* WL_CFG80211 */ return err; } + +#ifdef WL_CFG80211 static int dhd_rtt_start(dhd_pub_t *dhd) { int err = BCME_OK; - int mpc = 0; - int nss, mcs, bw; + char eabuf[ETHER_ADDR_STR_LEN]; + char chanbuf[CHANSPEC_STR_LEN]; + int ftm_cfg_cnt = 0; + int ftm_param_cnt = 0; uint32 rspec = 0; - int8 eabuf[ETHER_ADDR_STR_LEN]; - int8 chanbuf[CHANSPEC_STR_LEN]; - bool set_mpc = FALSE; - wl_proxd_iovar_t proxd_iovar; - wl_proxd_params_iovar_t proxd_params; - wl_proxd_params_iovar_t proxd_tune; - wl_proxd_params_tof_method_t *tof_params = &proxd_params.u.tof_params; - rtt_status_info_t *rtt_status; + ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS]; + ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS]; rtt_target_info_t *rtt_target; + rtt_status_info_t *rtt_status; + int pm = PM_OFF; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); NULL_CHECK(dhd, "dhd is NULL", err); rtt_status = GET_RTTSTATE(dhd); NULL_CHECK(rtt_status, "rtt_status is NULL", err); - /* turn off mpc in case of non-associted */ - if (!dhd_is_associated(dhd, 0, NULL)) { - err = dhd_iovar(dhd, 0, "mpc", (char *)&mpc, sizeof(mpc), 1); - if (err < 0) { - DHD_ERROR(("%s : failed to set proxd_tune\n", __FUNCTION__)); - goto exit; - } - set_mpc = TRUE; - } + DHD_RTT(("Enter %s\n", __FUNCTION__)); if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) { err = BCME_RANGE; + DHD_RTT(("%s : idx %d is out of range\n", __FUNCTION__, rtt_status->cur_idx)); + if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) { + DHD_ERROR(("STA is set as Target/Responder \n")); + return BCME_ERROR; + } goto exit; } - DHD_RTT(("%s enter\n", __FUNCTION__)); - bzero(&proxd_tune, sizeof(proxd_tune)); - bzero(&proxd_params, sizeof(proxd_params)); + if (RTT_IS_STOPPED(rtt_status)) { + DHD_RTT(("RTT is stopped\n")); + goto exit; + } + err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm)); + if (err) { + DHD_ERROR(("Failed to get the PM value\n")); + } else { + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_ERROR(("Failed to set the PM\n")); + rtt_status->pm_restore = FALSE; + } else { + rtt_status->pm_restore = TRUE; + } + } + mutex_lock(&rtt_status->rtt_mutex); /* Get a target information */ rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; mutex_unlock(&rtt_status->rtt_mutex); - /* set role */ - proxd_iovar.method = PROXD_TOF_METHOD; - proxd_iovar.mode = WL_PROXD_MODE_INITIATOR; - - /* make sure that proxd is stop */ - /* dhd_iovar(dhd, 0, "proxd_stop", (char *)NULL, 0, 1); */ - - err = dhd_iovar(dhd, 0, "proxd", (char *)&proxd_iovar, sizeof(proxd_iovar), 1); - if (err < 0 && err != BCME_BUSY) { - DHD_ERROR(("%s : failed to set proxd %d\n", __FUNCTION__, err)); - goto exit; - } - if (err == BCME_BUSY) { - DHD_RTT(("BCME_BUSY occurred\n")); - } - /* mac address */ - bcopy(&rtt_target->addr, &tof_params->tgt_mac, ETHER_ADDR_LEN); - /* frame count */ - if (rtt_target->ftm_cnt > RTT_MAX_FRAME_CNT) { - rtt_target->ftm_cnt = RTT_MAX_FRAME_CNT; - } - - if (rtt_target->ftm_cnt) { - tof_params->ftm_cnt = htol16(rtt_target->ftm_cnt); - } else { - tof_params->ftm_cnt = htol16(DEFAULT_FTM_CNT); - } - - if (rtt_target->retry_cnt > RTT_MAX_RETRY_CNT) { - rtt_target->retry_cnt = RTT_MAX_RETRY_CNT; - } - - /* retry count */ - if (rtt_target->retry_cnt) { - tof_params->retry_cnt = htol16(rtt_target->retry_cnt); - } else { - tof_params->retry_cnt = htol16(DEFAULT_RETRY_CNT); - } - - /* chanspec */ - tof_params->chanspec = htol16(rtt_target->chanspec); - /* set parameter */ - DHD_RTT(("Target addr(Idx %d) %s, Channel : %s for RTT (ftm_cnt %d, rety_cnt : %d)\n", - rtt_status->cur_idx, - bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr, eabuf), - wf_chspec_ntoa(rtt_target->chanspec, chanbuf), rtt_target->ftm_cnt, - rtt_target->retry_cnt)); - - if (rtt_target->type == RTT_ONE_WAY) { - proxd_tune.u.tof_tune.flags = htol32(WL_PROXD_FLAG_ONEWAY); - /* report RTT results for initiator */ - proxd_tune.u.tof_tune.flags |= htol32(WL_PROXD_FLAG_INITIATOR_RPTRTT); - proxd_tune.u.tof_tune.vhtack = 0; - tof_params->tx_rate = htol16(WL_RATE_6M); - tof_params->vht_rate = htol16((WL_RATE_6M >> 16)); - } else { /* RTT TWO WAY */ - /* initiator will send the rtt result to the target */ - proxd_tune.u.tof_tune.flags = htol32(WL_PROXD_FLAG_INITIATOR_REPORT); - tof_params->timeout = 10; /* 10ms for timeout */ - rspec = WL_RSPEC_ENCODE_VHT; /* 11ac VHT */ - nss = 1; /* default Nss = 1 */ - mcs = 0; /* default MCS 0 */ - rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs; - bw = 0; - switch (CHSPEC_BW(rtt_target->chanspec)) { - case WL_CHANSPEC_BW_20: - bw = WL_RSPEC_BW_20MHZ; - break; - case WL_CHANSPEC_BW_40: - bw = WL_RSPEC_BW_40MHZ; - break; - case WL_CHANSPEC_BW_80: - bw = WL_RSPEC_BW_80MHZ; - break; - case WL_CHANSPEC_BW_160: - bw = WL_RSPEC_BW_160MHZ; - break; - default: - DHD_ERROR(("CHSPEC_BW not supported : %d", - CHSPEC_BW(rtt_target->chanspec))); + DHD_RTT(("%s enter\n", __FUNCTION__)); + if (!RTT_IS_ENABLED(rtt_status)) { + /* enable ftm */ + err = dhd_rtt_ftm_enable(dhd, TRUE); + if (err) { + DHD_ERROR(("failed to enable FTM (%d)\n", err)); goto exit; } - rspec |= bw; - tof_params->tx_rate = htol16(rspec & 0xffff); - tof_params->vht_rate = htol16(rspec >> 16); } - /* Set Method to TOF */ - proxd_tune.method = PROXD_TOF_METHOD; - err = dhd_iovar(dhd, 0, "proxd_tune", (char *)&proxd_tune, sizeof(proxd_tune), 1); - if (err < 0) { - DHD_ERROR(("%s : failed to set proxd_tune %d\n", __FUNCTION__, err)); + /* delete session of index default sesession */ + err = dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION); + if (err < 0 && err != BCME_NOTFOUND) { + DHD_ERROR(("failed to delete session of FTM (%d)\n", err)); goto exit; } + rtt_status->status = RTT_ENABLED; + memset(ftm_configs, 0, sizeof(ftm_configs)); + memset(ftm_params, 0, sizeof(ftm_params)); - /* Set Method to TOF */ - proxd_params.method = PROXD_TOF_METHOD; - err = dhd_iovar(dhd, 0, "proxd_params", (char *)&proxd_params, sizeof(proxd_params), 1); - if (err < 0) { - DHD_ERROR(("%s : failed to set proxd_params %d\n", __FUNCTION__, err)); - goto exit; + /* configure the session 1 as initiator */ + ftm_configs[ftm_cfg_cnt].enable = TRUE; + ftm_configs[ftm_cfg_cnt++].flags = WL_PROXD_SESSION_FLAG_INITIATOR; + dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS, + ftm_configs, ftm_cfg_cnt); + /* target's mac address */ + if (!ETHER_ISNULLADDR(rtt_target->addr.octet)) { + ftm_params[ftm_param_cnt].mac_addr = rtt_target->addr; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_PEER_MAC; + bcm_ether_ntoa(&rtt_target->addr, eabuf); + DHD_RTT((">\t target %s\n", eabuf)); } - err = dhd_iovar(dhd, 0, "proxd_find", (char *)NULL, 0, 1); - if (err < 0) { - DHD_ERROR(("%s : failed to set proxd_find %d\n", __FUNCTION__, err)); - goto exit; + /* target's chanspec */ + if (rtt_target->chanspec) { + ftm_params[ftm_param_cnt].chanspec = htol32((uint32)rtt_target->chanspec); + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_CHANSPEC; + wf_chspec_ntoa(rtt_target->chanspec, chanbuf); + DHD_RTT((">\t chanspec : %s\n", chanbuf)); + } + /* num-burst */ + if (rtt_target->num_burst) { + ftm_params[ftm_param_cnt].data16 = htol16(rtt_target->num_burst); + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_NUM_BURST; + DHD_RTT((">\t num of burst : %d\n", rtt_target->num_burst)); + } + /* number of frame per burst */ + if (rtt_target->num_frames_per_burst == 0) { + rtt_target->num_frames_per_burst = + CHSPEC_IS20(rtt_target->chanspec) ? FTM_DEFAULT_CNT_20M : + CHSPEC_IS40(rtt_target->chanspec) ? FTM_DEFAULT_CNT_40M : + FTM_DEFAULT_CNT_80M; + } + ftm_params[ftm_param_cnt].data16 = htol16(rtt_target->num_frames_per_burst); + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_NUM_FTM; + DHD_RTT((">\t number of frame per burst : %d\n", rtt_target->num_frames_per_burst)); + /* FTM retry count */ + if (rtt_target->num_retries_per_ftm) { + ftm_params[ftm_param_cnt].data8 = rtt_target->num_retries_per_ftm; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_FTM_RETRIES; + DHD_RTT((">\t retry count of FTM : %d\n", rtt_target->num_retries_per_ftm)); + } + /* FTM Request retry count */ + if (rtt_target->num_retries_per_ftmr) { + ftm_params[ftm_param_cnt].data8 = rtt_target->num_retries_per_ftmr; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_FTM_REQ_RETRIES; + DHD_RTT((">\t retry count of FTM Req : %d\n", rtt_target->num_retries_per_ftmr)); + } + /* burst-period */ + if (rtt_target->burst_period) { + ftm_params[ftm_param_cnt].data_intvl.intvl = + htol32(rtt_target->burst_period); /* ms */ + ftm_params[ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_PERIOD; + DHD_RTT((">\t burst period : %d ms\n", rtt_target->burst_period)); + } + /* burst-duration */ + if (rtt_target->burst_duration) { + ftm_params[ftm_param_cnt].data_intvl.intvl = + htol32(rtt_target->burst_duration); /* ms */ + ftm_params[ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC; + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_DURATION; + DHD_RTT((">\t burst duration : %d ms\n", + rtt_target->burst_duration)); + } + if (rtt_target->bw && rtt_target->preamble) { + bool use_default = FALSE; + int nss; + int mcs; + switch (rtt_target->preamble) { + case RTT_PREAMBLE_LEGACY: + rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */ + rspec |= WL_RATE_6M; + break; + case RTT_PREAMBLE_HT: + rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */ + mcs = 0; /* default MCS 0 */ + rspec |= mcs; + break; + case RTT_PREAMBLE_VHT: + rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */ + mcs = 0; /* default MCS 0 */ + nss = 1; /* default Nss = 1 */ + rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs; + break; + default: + DHD_RTT(("doesn't support this preamble : %d\n", rtt_target->preamble)); + use_default = TRUE; + break; + } + switch (rtt_target->bw) { + case RTT_BW_20: + rspec |= WL_RSPEC_BW_20MHZ; + break; + case RTT_BW_40: + rspec |= WL_RSPEC_BW_40MHZ; + break; + case RTT_BW_80: + rspec |= WL_RSPEC_BW_80MHZ; + break; + default: + DHD_RTT(("doesn't support this BW : %d\n", rtt_target->bw)); + use_default = TRUE; + break; + } + if (!use_default) { + ftm_params[ftm_param_cnt].data32 = htol32(rspec); + ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_RATESPEC; + DHD_RTT((">\t ratespec : %d\n", rspec)); + } + + } + dhd_set_rand_mac_oui(dhd); + dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_GENERAL, + ftm_params, ftm_param_cnt); + + err = dhd_rtt_start_session(dhd, FTM_DEFAULT_SESSION, TRUE); + if (err) { + DHD_ERROR(("failed to start session of FTM : error %d\n", err)); } exit: - if (err < 0) { + if (err) { + DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__)); rtt_status->status = RTT_STOPPED; - if (set_mpc) { - /* enable mpc again in case of error */ - mpc = 1; - err = dhd_iovar(dhd, 0, "mpc", (char *)&mpc, sizeof(mpc), 1); + /* disable FTM */ + dhd_rtt_ftm_enable(dhd, FALSE); + if (rtt_status->pm_restore) { + DHD_ERROR(("pm_restore =%d func =%s \n", + rtt_status->pm_restore, __FUNCTION__)); + pm = PM_FAST; + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_ERROR(("Failed to set PM \n")); + } else { + rtt_status->pm_restore = FALSE; + } } } return err; } +#endif /* WL_CFG80211 */ int dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn) @@ -353,11 +1521,18 @@ dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn rtt_status = GET_RTTSTATE(dhd); NULL_CHECK(rtt_status, "rtt_status is NULL", err); spin_lock_bh(¬i_list_lock); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { if (iter->noti_fn == noti_fn) { goto exit; } } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif cb = kmalloc(sizeof(struct rtt_noti_callback), GFP_ATOMIC); if (!cb) { err = -ENOMEM; @@ -382,6 +1557,10 @@ dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn) rtt_status = GET_RTTSTATE(dhd); NULL_CHECK(rtt_status, "rtt_status is NULL", err); spin_lock_bh(¬i_list_lock); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { if (iter->noti_fn == noti_fn) { cb = iter; @@ -389,6 +1568,10 @@ dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn) break; } } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + spin_unlock_bh(¬i_list_lock); if (cb) { kfree(cb); @@ -396,88 +1579,156 @@ dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn) return err; } +static wifi_rate_t +dhd_rtt_convert_rate_to_host(uint32 rspec) +{ + wifi_rate_t host_rate; + memset(&host_rate, 0, sizeof(wifi_rate_t)); + if ((rspec & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE) { + host_rate.preamble = 0; + } else if ((rspec & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) { + host_rate.preamble = 2; + host_rate.rateMcsIdx = rspec & WL_RSPEC_RATE_MASK; + } else if ((rspec & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT) { + host_rate.preamble = 3; + host_rate.rateMcsIdx = rspec & WL_RSPEC_VHT_MCS_MASK; + host_rate.nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT; + } + host_rate.bw = (rspec & WL_RSPEC_BW_MASK) - 1; + host_rate.bitrate = rate_rspec2rate(rspec) / 100; /* 100kbps */ + DHD_RTT(("bit rate : %d\n", host_rate.bitrate)); + return host_rate; +} + + static int -dhd_rtt_convert_to_host(rtt_result_t *rtt_results, const wl_proxd_event_data_t* evp) +dhd_rtt_convert_results_to_host(rtt_report_t *rtt_report, uint8 *p_data, uint16 tlvid, uint16 len) { int err = BCME_OK; - int i; char eabuf[ETHER_ADDR_STR_LEN]; - char diststr[40]; + wl_proxd_rtt_result_t *p_data_info; + wl_proxd_result_flags_t flags; + wl_proxd_session_state_t session_state; + wl_proxd_status_t proxd_status; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) struct timespec ts; - NULL_CHECK(rtt_results, "rtt_results is NULL", err); - NULL_CHECK(evp, "evp is NULL", err); +#endif /* LINUX_VER >= 2.6.39 */ + uint32 ratespec; + uint32 avg_dist; + wl_proxd_rtt_sample_t *p_sample; + wl_proxd_intvl_t rtt; + wl_proxd_intvl_t p_time; + + NULL_CHECK(rtt_report, "rtt_report is NULL", err); + NULL_CHECK(p_data, "p_data is NULL", err); DHD_RTT(("%s enter\n", __FUNCTION__)); - rtt_results->distance = ntoh32(evp->distance); - rtt_results->sdrtt = ntoh32(evp->sdrtt); - rtt_results->ftm_cnt = ntoh16(evp->ftm_cnt); - rtt_results->avg_rssi = ntoh16(evp->avg_rssi); - rtt_results->validfrmcnt = ntoh16(evp->validfrmcnt); - rtt_results->meanrtt = ntoh32(evp->meanrtt); - rtt_results->modertt = ntoh32(evp->modertt); - rtt_results->medianrtt = ntoh32(evp->medianrtt); - rtt_results->err_code = evp->err_code; - rtt_results->tx_rate.preamble = (evp->OFDM_frame_type == TOF_FRAME_RATE_VHT)? 3 : 0; - rtt_results->tx_rate.nss = 0; /* 1 x 1 */ - rtt_results->tx_rate.bw = - (evp->bandwidth == TOF_BW_80MHZ)? 2 : (evp->bandwidth == TOF_BW_40MHZ)? 1 : 0; - rtt_results->TOF_type = evp->TOF_type; - if (evp->TOF_type == TOF_TYPE_ONE_WAY) { - /* convert to 100kbps unit */ - rtt_results->tx_rate.bitrate = WL_RATE_6M * 5; - rtt_results->tx_rate.rateMcsIdx = WL_RATE_6M; - } else { - rtt_results->tx_rate.bitrate = WL_RATE_6M * 5; - rtt_results->tx_rate.rateMcsIdx = 0; /* MCS 0 */ - } - memset(diststr, 0, sizeof(diststr)); - if (rtt_results->distance == 0xffffffff || rtt_results->distance == 0) { - sprintf(diststr, "distance=-1m\n"); - } else { - sprintf(diststr, "distance=%d.%d m\n", - rtt_results->distance >> 4, ((rtt_results->distance & 0xf) * 125) >> 1); - } + p_data_info = (wl_proxd_rtt_result_t *) p_data; + /* unpack and format 'flags' for display */ + flags = ltoh16_ua(&p_data_info->flags); - if (ntoh32(evp->mode) == WL_PROXD_MODE_INITIATOR) { - DHD_RTT(("Target:(%s) %s;\n", bcm_ether_ntoa((&evp->peer_mac), eabuf), diststr)); - DHD_RTT(("RTT : mean %d mode %d median %d\n", rtt_results->meanrtt, - rtt_results->modertt, rtt_results->medianrtt)); - } else { - DHD_RTT(("Initiator:(%s) %s; ", bcm_ether_ntoa((&evp->peer_mac), eabuf), diststr)); - } - if (rtt_results->sdrtt > 0) { - DHD_RTT(("sigma:%d.%d\n", rtt_results->sdrtt/10, rtt_results->sdrtt % 10)); - } else { - DHD_RTT(("sigma:0\n")); - } + /* session state and status */ + session_state = ltoh16_ua(&p_data_info->state); + proxd_status = ltoh32_ua(&p_data_info->status); + bcm_ether_ntoa((&(p_data_info->peer)), eabuf); + ftm_session_state_value_to_logstr(session_state); + ftm_status_value_to_logstr(proxd_status); + DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n", + eabuf, + session_state, + ftm_session_state_value_to_logstr(session_state), + proxd_status, + ftm_status_value_to_logstr(proxd_status))); - DHD_RTT(("rssi:%d validfrmcnt %d, err_code : %d\n", rtt_results->avg_rssi, - rtt_results->validfrmcnt, evp->err_code)); - - switch (evp->err_code) { - case TOF_REASON_OK: - rtt_results->err_code = RTT_REASON_SUCCESS; - break; - case TOF_REASON_TIMEOUT: - rtt_results->err_code = RTT_REASON_TIMEOUT; - break; - case TOF_REASON_NOACK: - rtt_results->err_code = RTT_REASON_NO_RSP; - break; - case TOF_REASON_ABORT: - rtt_results->err_code = RTT_REASON_ABORT; - break; - default: - rtt_results->err_code = RTT_REASON_FAILURE; - break; + /* show avg_dist (1/256m units), burst_num */ + avg_dist = ltoh32_ua(&p_data_info->avg_dist); + if (avg_dist == 0xffffffff) { /* report 'failure' case */ + DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n", + ltoh16_ua(&p_data_info->burst_num), + p_data_info->num_valid_rtt)); /* in a session */ + avg_dist = FTM_INVALID; } - rtt_results->peer_mac = evp->peer_mac; + else { + DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d\n", + avg_dist >> 8, /* 1/256m units */ + ((avg_dist & 0xff) * 625) >> 4, + ltoh16_ua(&p_data_info->burst_num), + p_data_info->num_valid_rtt, + p_data_info->num_ftm)); /* in a session */ + } + /* show 'avg_rtt' sample */ + p_sample = &p_data_info->avg_rtt; + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)); + DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d ratespec=0x%08x\n", + (int16) ltoh16_ua(&p_sample->rssi), + ltoh32_ua(&p_sample->rtt.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)), + ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10, + ltoh32_ua(&p_sample->ratespec))); + + /* set peer address */ + rtt_report->addr = p_data_info->peer; + /* burst num */ + rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num); + /* success num */ + rtt_report->success_num = p_data_info->num_valid_rtt; + /* actual number of FTM supported by peer */ + rtt_report->num_per_burst_peer = p_data_info->num_ftm; + rtt_report->negotiated_burst_num = p_data_info->num_ftm; + /* status */ + rtt_report->status = ftm_get_statusmap_info(proxd_status, + &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info)); + + /* rssi (0.5db) */ + rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_data_info->avg_rtt.rssi)) * 2; + + /* rx rate */ + ratespec = ltoh32_ua(&p_data_info->avg_rtt.ratespec); + rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec); + /* tx rate */ + if (flags & WL_PROXD_RESULT_FLAG_VHTACK) { + rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010); + } else { + rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc); + } + /* rtt_sd */ + rtt.tmu = ltoh16_ua(&p_data_info->avg_rtt.rtt.tmu); + rtt.intvl = ltoh32_ua(&p_data_info->avg_rtt.rtt.intvl); + rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */ + rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */ + DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt)); + DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi)); + + /* average distance */ + if (avg_dist != FTM_INVALID) { + rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */ + rtt_report->distance += (avg_dist & 0xff) * 1000 / 256; + } else { + rtt_report->distance = FTM_INVALID; + } + /* time stamp */ /* get the time elapsed from boot time */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) get_monotonic_boottime(&ts); - rtt_results->ts = (uint64) TIMESPEC_TO_US(ts); + rtt_report->ts = (uint64)TIMESPEC_TO_US(ts); +#endif /* LINUX_VER >= 2.6.39 */ - for (i = 0; i < rtt_results->ftm_cnt; i++) { - rtt_results->ftm_buff[i].value = ltoh32(evp->ftm_buff[i].value); - rtt_results->ftm_buff[i].rssi = ltoh32(evp->ftm_buff[i].rssi); + if (proxd_status == WL_PROXD_E_REMOTE_FAIL) { + /* retry time after failure */ + p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); + p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); + rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */ + DHD_RTT((">\tretry_after: %d%s\n", + ltoh32_ua(&p_data_info->u.retry_after.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu)))); + } else { + /* burst duration */ + p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl); + p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu); + rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */ + DHD_RTT((">\tburst_duration: %d%s\n", + ltoh32_ua(&p_data_info->u.burst_duration.intvl), + ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu)))); + DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration)); } return err; } @@ -485,72 +1736,223 @@ dhd_rtt_convert_to_host(rtt_result_t *rtt_results, const wl_proxd_event_data_t* int dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) { - int err = BCME_OK; - int len = 0; - int idx; - uint status, event_type, flags, reason, ftm_cnt; - rtt_status_info_t *rtt_status; - wl_proxd_event_data_t* evp; - struct rtt_noti_callback *iter; - rtt_result_t *rtt_result, *entry, *next; + int ret = BCME_OK; + int tlvs_len; + uint16 version; + wl_proxd_event_t *p_event; + wl_proxd_event_type_t event_type; + wl_proxd_ftm_session_status_t session_status; + wl_proxd_collect_event_data_t *collect_event_data; + const ftm_strmap_entry_t *p_loginfo; + rtt_result_t *rtt_result; gfp_t kflags; - NULL_CHECK(dhd, "dhd is NULL", err); - rtt_status = GET_RTTSTATE(dhd); - NULL_CHECK(rtt_status, "rtt_status is NULL", err); - event_type = ntoh32_ua((void *)&event->event_type); - flags = ntoh16_ua((void *)&event->flags); - status = ntoh32_ua((void *)&event->status); - reason = ntoh32_ua((void *)&event->reason); +#ifdef WL_CFG80211 + int idx; + struct rtt_noti_callback *iter; + bool is_new = TRUE; + rtt_status_info_t *rtt_status; + rtt_result_t *next2; + rtt_results_header_t *next = NULL; + rtt_target_info_t *rtt_target_info; + rtt_results_header_t *entry, *rtt_results_header = NULL; +#endif /* WL_CFG80211 */ + DHD_RTT(("Enter %s \n", __FUNCTION__)); + NULL_CHECK(dhd, "dhd is NULL", ret); + +#ifdef WL_CFG80211 + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", ret); + + if (RTT_IS_STOPPED(rtt_status)) { + /* Ignore the Proxd event */ + DHD_RTT((" event handler rtt is stopped \n")); + if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) { + DHD_RTT(("Device is target/Responder. Recv the event. \n")); + } else { + return ret; + } + } +#endif /* WL_CFG80211 */ + if (ntoh32_ua((void *)&event->datalen) < OFFSETOF(wl_proxd_event_t, tlvs)) { + DHD_RTT(("%s: wrong datalen:%d\n", __FUNCTION__, + ntoh32_ua((void *)&event->datalen))); + return -EINVAL; + } + event_type = ntoh32_ua((void *)&event->event_type); if (event_type != WLC_E_PROXD) { + DHD_ERROR((" failed event \n")); + return -EINVAL; + } + + if (!event_data) { + DHD_ERROR(("%s: event_data:NULL\n", __FUNCTION__)); + return -EINVAL; + } + p_event = (wl_proxd_event_t *) event_data; + version = ltoh16(p_event->version); + if (version < WL_PROXD_API_VERSION) { + DHD_ERROR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n", + version, WL_PROXD_API_VERSION)); + return ret; + } +#ifdef WL_CFG80211 + if (!in_atomic()) { + mutex_lock(&rtt_status->rtt_mutex); + } +#endif /* WL_CFG80211 */ + event_type = (wl_proxd_event_type_t) ltoh16(p_event->type); + + kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL; + + DHD_RTT(("event_type=0x%x, ntoh16()=0x%x, ltoh16()=0x%x\n", + p_event->type, ntoh16(p_event->type), ltoh16(p_event->type))); + p_loginfo = ftm_get_event_type_loginfo(event_type); + if (p_loginfo == NULL) { + DHD_ERROR(("receive an invalid FTM event %d\n", event_type)); + ret = -EINVAL; + goto exit; /* ignore this event */ + } + /* get TLVs len, skip over event header */ + if (ltoh16(p_event->len) < OFFSETOF(wl_proxd_event_t, tlvs)) { + DHD_ERROR(("invalid FTM event length:%d\n", ltoh16(p_event->len))); + ret = -EINVAL; goto exit; } - kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL; - evp = (wl_proxd_event_data_t*)event_data; - DHD_RTT(("%s enter : mode: %s, reason :%d \n", __FUNCTION__, - (ntoh16(evp->mode) == WL_PROXD_MODE_INITIATOR)? - "initiator":"target", reason)); - switch (reason) { - case WLC_E_PROXD_STOP: - DHD_RTT(("WLC_E_PROXD_STOP\n")); + tlvs_len = ltoh16(p_event->len) - OFFSETOF(wl_proxd_event_t, tlvs); + DHD_RTT(("receive '%s' event: version=0x%x len=%d method=%d sid=%d tlvs_len=%d\n", + p_loginfo->text, + version, + ltoh16(p_event->len), + ltoh16(p_event->method), + ltoh16(p_event->sid), + tlvs_len)); +#ifdef WL_CFG80211 + rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + /* find a rtt_report_header for this mac address */ + list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) { + if (!memcmp(&entry->peer_mac, &event->addr, ETHER_ADDR_LEN)) { + /* found a rtt_report_header for peer_mac in the list */ + is_new = FALSE; + rtt_results_header = entry; + break; + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif +#endif /* WL_CFG80211 */ + switch (event_type) { + case WL_PROXD_EVENT_SESSION_CREATE: + DHD_RTT(("WL_PROXD_EVENT_SESSION_CREATE\n")); break; - case WLC_E_PROXD_ERROR: - case WLC_E_PROXD_COMPLETED: - if (reason == WLC_E_PROXD_ERROR) { - DHD_RTT(("WLC_E_PROXD_ERROR\n")); - } else { - DHD_RTT(("WLC_E_PROXD_COMPLETED\n")); + case WL_PROXD_EVENT_SESSION_START: + DHD_RTT(("WL_PROXD_EVENT_SESSION_START\n")); + break; + case WL_PROXD_EVENT_BURST_START: + DHD_RTT(("WL_PROXD_EVENT_BURST_START\n")); + break; + case WL_PROXD_EVENT_BURST_END: + DHD_RTT(("WL_PROXD_EVENT_BURST_END\n")); +#ifdef WL_CFG80211 + if (is_new) { + /* allocate new header for rtt_results */ + rtt_results_header = kzalloc(sizeof(rtt_results_header_t), kflags); + if (!rtt_results_header) { + ret = -ENOMEM; + goto exit; + } + /* Initialize the head of list for rtt result */ + INIT_LIST_HEAD(&rtt_results_header->result_list); + rtt_results_header->peer_mac = event->addr; + list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache); } +#endif /* WL_CFG80211 */ + if (tlvs_len > 0) { + /* allocate rtt_results for new results */ + rtt_result = kzalloc(sizeof(rtt_result_t), kflags); + if (!rtt_result) { + ret = -ENOMEM; + goto exit; + } + /* unpack TLVs and invokes the cbfn to print the event content TLVs */ + ret = bcm_unpack_xtlv_buf((void *) &(rtt_result->report), + (uint8 *)&p_event->tlvs[0], tlvs_len, + BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn); + if (ret != BCME_OK) { + DHD_ERROR(("%s : Failed to unpack xtlv for an event\n", + __FUNCTION__)); + goto exit; + } +#ifdef WL_CFG80211 + /* fill out the results from the configuration param */ + rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst; + rtt_result->report.type = RTT_TWO_WAY; + DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num)); + rtt_result->report_len = RTT_REPORT_SIZE; - if (!in_atomic()) { - mutex_lock(&rtt_status->rtt_mutex); + list_add_tail(&rtt_result->list, &rtt_results_header->result_list); + rtt_results_header->result_cnt++; + rtt_results_header->result_tot_len += rtt_result->report_len; +#endif /* WL_CFG80211 */ } - ftm_cnt = ntoh16(evp->ftm_cnt); - - if (ftm_cnt > 0) { - len = OFFSETOF(rtt_result_t, ftm_buff); - } else { - len = sizeof(rtt_result_t); + break; + case WL_PROXD_EVENT_SESSION_END: + DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n")); +#ifdef WL_CFG80211 + if (!RTT_IS_ENABLED(rtt_status)) { + DHD_RTT(("Ignore the session end evt\n")); + goto exit; } - /* check whether the results is already reported or not */ - list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) { - if (!memcmp(&entry->peer_mac, &evp->peer_mac, ETHER_ADDR_LEN)) { - if (!in_atomic()) { - mutex_unlock(&rtt_status->rtt_mutex); - } +#endif /* WL_CFG80211 */ + if (tlvs_len > 0) { + /* unpack TLVs and invokes the cbfn to print the event content TLVs */ + ret = bcm_unpack_xtlv_buf((void *) &session_status, + (uint8 *)&p_event->tlvs[0], tlvs_len, + BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn); + if (ret != BCME_OK) { + DHD_ERROR(("%s : Failed to unpack xtlv for an event\n", + __FUNCTION__)); goto exit; } } - rtt_result = kzalloc(len + sizeof(ftm_sample_t) * ftm_cnt, kflags); - if (!rtt_result) { - if (!in_atomic()) { - mutex_unlock(&rtt_status->rtt_mutex); +#ifdef WL_CFG80211 + /* In case of no result for the peer device, make fake result for error case */ + if (is_new) { + /* allocate new header for rtt_results */ + rtt_results_header = kzalloc(sizeof(rtt_results_header_t), GFP_KERNEL); + if (!rtt_results_header) { + ret = -ENOMEM; + goto exit; } - err = -ENOMEM; - goto exit; + /* Initialize the head of list for rtt result */ + INIT_LIST_HEAD(&rtt_results_header->result_list); + rtt_results_header->peer_mac = event->addr; + list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache); + + /* allocate rtt_results for new results */ + rtt_result = kzalloc(sizeof(rtt_result_t), kflags); + if (!rtt_result) { + ret = -ENOMEM; + kfree(rtt_results_header); + goto exit; + } + /* fill out the results from the configuration param */ + rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst; + rtt_result->report.type = RTT_TWO_WAY; + DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num)); + rtt_result->report_len = RTT_REPORT_SIZE; + rtt_result->report.status = RTT_REASON_FAIL_NO_RSP; + rtt_result->report.addr = rtt_target_info->addr; + rtt_result->report.distance = FTM_INVALID; + list_add_tail(&rtt_result->list, &rtt_results_header->result_list); + rtt_results_header->result_cnt++; + rtt_results_header->result_tot_len += rtt_result->report_len; } - /* point to target_info in status struct and increase pointer */ - rtt_result->target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; /* find next target to trigger RTT */ for (idx = (rtt_status->cur_idx + 1); idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { @@ -563,11 +1965,9 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) break; } } - /* convert the event results to host format */ - dhd_rtt_convert_to_host(rtt_result, evp); - list_add_tail(&rtt_result->list, &rtt_status->rtt_results_cache); if (idx < rtt_status->rtt_config.rtt_target_cnt) { /* restart to measure RTT from next device */ + DHD_ERROR(("restart to measure rtt\n")); schedule_work(&rtt_status->work); } else { DHD_RTT(("RTT_STOPPED\n")); @@ -575,60 +1975,112 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) /* to turn on mpc mode */ schedule_work(&rtt_status->work); /* notify the completed information to others */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); } /* remove the rtt results in cache */ - list_for_each_entry_safe(rtt_result, next, - &rtt_status->rtt_results_cache, list) { - list_del(&rtt_result->list); - kfree(rtt_result); + if (!list_empty(&rtt_status->rtt_results_cache)) { + /* Iterate rtt_results_header list */ + list_for_each_entry_safe(entry, next, + &rtt_status->rtt_results_cache, list) { + list_del(&entry->list); + /* Iterate rtt_result list */ + list_for_each_entry_safe(rtt_result, next2, + &entry->result_list, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + kfree(entry); + } } - /* reinit the HEAD */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + /* reinitialize the HEAD */ INIT_LIST_HEAD(&rtt_status->rtt_results_cache); /* clear information for rtt_config */ - bzero(&rtt_status->rtt_config, sizeof(rtt_status->rtt_config)); + rtt_status->rtt_config.rtt_target_cnt = 0; + memset(rtt_status->rtt_config.target_info, 0, + TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT)); rtt_status->cur_idx = 0; } - if (!in_atomic()) { - mutex_unlock(&rtt_status->rtt_mutex); +#endif /* WL_CFG80211 */ + break; + case WL_PROXD_EVENT_SESSION_RESTART: + DHD_RTT(("WL_PROXD_EVENT_SESSION_RESTART\n")); + break; + case WL_PROXD_EVENT_BURST_RESCHED: + DHD_RTT(("WL_PROXD_EVENT_BURST_RESCHED\n")); + break; + case WL_PROXD_EVENT_SESSION_DESTROY: + DHD_RTT(("WL_PROXD_EVENT_SESSION_DESTROY\n")); + break; + case WL_PROXD_EVENT_FTM_FRAME: + DHD_RTT(("WL_PROXD_EVENT_FTM_FRAME\n")); + break; + case WL_PROXD_EVENT_DELAY: + DHD_RTT(("WL_PROXD_EVENT_DELAY\n")); + break; + case WL_PROXD_EVENT_VS_INITIATOR_RPT: + DHD_RTT(("WL_PROXD_EVENT_VS_INITIATOR_RPT\n ")); + break; + case WL_PROXD_EVENT_RANGING: + DHD_RTT(("WL_PROXD_EVENT_RANGING\n")); + break; + case WL_PROXD_EVENT_COLLECT: + DHD_RTT(("WL_PROXD_EVENT_COLLECT\n")); + if (tlvs_len > 0) { + collect_event_data = kzalloc(sizeof(wl_proxd_collect_event_data_t), kflags); + if (!collect_event_data) { + ret = -ENOMEM; + goto exit; + } + /* unpack TLVs and invokes the cbfn to print the event content TLVs */ + ret = bcm_unpack_xtlv_buf((void *) collect_event_data, + (uint8 *)&p_event->tlvs[0], tlvs_len, + BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn); + kfree(collect_event_data); + if (ret != BCME_OK) { + DHD_ERROR(("%s : Failed to unpack xtlv for an event\n", + __FUNCTION__)); + goto exit; + } } + break; + - break; - case WLC_E_PROXD_GONE: - DHD_RTT(("WLC_E_PROXD_GONE\n")); - break; - case WLC_E_PROXD_START: - /* event for targets / accesspoints */ - DHD_RTT(("WLC_E_PROXD_START\n")); - break; - case WLC_E_PROXD_COLLECT_START: - DHD_RTT(("WLC_E_PROXD_COLLECT_START\n")); - break; - case WLC_E_PROXD_COLLECT_STOP: - DHD_RTT(("WLC_E_PROXD_COLLECT_STOP\n")); - break; - case WLC_E_PROXD_COLLECT_COMPLETED: - DHD_RTT(("WLC_E_PROXD_COLLECT_COMPLETED\n")); - break; - case WLC_E_PROXD_COLLECT_ERROR: - DHD_RTT(("WLC_E_PROXD_COLLECT_ERROR; ")); - break; default: - DHD_ERROR(("WLC_E_PROXD: supported EVENT reason code:%d\n", reason)); + DHD_ERROR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type)); break; } - exit: - return err; +#ifdef WL_CFG80211 + if (!in_atomic()) { + mutex_unlock(&rtt_status->rtt_mutex); + } +#endif /* WL_CFG80211 */ + + return ret; } +#ifdef WL_CFG80211 static void dhd_rtt_work(struct work_struct *work) { rtt_status_info_t *rtt_status; dhd_pub_t *dhd; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif rtt_status = container_of(work, rtt_status_info_t, work); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif if (rtt_status == NULL) { DHD_ERROR(("%s : rtt_status is NULL\n", __FUNCTION__)); return; @@ -640,6 +2092,7 @@ dhd_rtt_work(struct work_struct *work) } (void) dhd_rtt_start(dhd); } +#endif /* WL_CFG80211 */ int dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa) @@ -652,65 +2105,291 @@ dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa) NULL_CHECK(capa, "capa is NULL", err); bzero(capa, sizeof(rtt_capabilities_t)); - if (rtt_status->capability & RTT_CAP_ONE_WAY) { + /* set rtt capabilities */ + if (rtt_status->rtt_capa.proto & RTT_CAP_ONE_WAY) capa->rtt_one_sided_supported = 1; - } - if (rtt_status->capability & RTT_CAP_11V_WAY) { - capa->rtt_11v_supported = 1; - } - if (rtt_status->capability & RTT_CAP_11MC_WAY) { + if (rtt_status->rtt_capa.proto & RTT_CAP_FTM_WAY) capa->rtt_ftm_supported = 1; - } - if (rtt_status->capability & RTT_CAP_VS_WAY) { - capa->rtt_vs_supported = 1; - } + + if (rtt_status->rtt_capa.feature & RTT_FEATURE_LCI) + capa->lci_support = 1; + if (rtt_status->rtt_capa.feature & RTT_FEATURE_LCR) + capa->lcr_support = 1; + if (rtt_status->rtt_capa.feature & RTT_FEATURE_PREAMBLE) + capa->preamble_support = 1; + if (rtt_status->rtt_capa.feature & RTT_FEATURE_BW) + capa->bw_support = 1; + + /* bit mask */ + capa->preamble_support = rtt_status->rtt_capa.preamble; + capa->bw_support = rtt_status->rtt_capa.bw; return err; } +#ifdef WL_CFG80211 +int +dhd_rtt_avail_channel(dhd_pub_t *dhd, wifi_channel_info *channel_info) +{ + u32 chanspec = 0; + int err = BCME_OK; + chanspec_t c = 0; + u32 channel; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + + if ((err = wldev_iovar_getint(dev, "chanspec", + (s32 *)&chanspec)) == BCME_OK) { + c = (chanspec_t)dtoh32(chanspec); + c = wl_chspec_driver_to_host(c); + channel = wf_chspec_ctlchan(c); + DHD_RTT((" control channel is %d \n", channel)); + if (CHSPEC_IS20(c)) { + channel_info->width = WIFI_CHAN_WIDTH_20; + DHD_RTT((" band is 20 \n")); + } else if (CHSPEC_IS40(c)) { + channel_info->width = WIFI_CHAN_WIDTH_40; + DHD_RTT(("band is 40 \n")); + } else { + channel_info->width = WIFI_CHAN_WIDTH_80; + DHD_RTT(("band is 80 \n")); + } + if (CHSPEC_IS2G(c) && (channel >= CH_MIN_2G_CHANNEL) && + (channel <= CH_MAX_2G_CHANNEL)) { + channel_info->center_freq = + ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); + } else if (CHSPEC_IS5G(c) && channel >= CH_MIN_5G_CHANNEL) { + channel_info->center_freq = + ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ); + } + if ((channel_info->width == WIFI_CHAN_WIDTH_80) || + (channel_info->width == WIFI_CHAN_WIDTH_40)) { + channel = CHSPEC_CHANNEL(c); + channel_info->center_freq0 = + ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ); + } + } else { + DHD_ERROR(("Failed to get the chanspec \n")); + } + return err; +} + +int +dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info) +{ + int err = BCME_OK; + char chanbuf[CHANSPEC_STR_LEN]; + int pm = PM_OFF; + int ftm_cfg_cnt = 0; + chanspec_t chanspec; + wifi_channel_info_t channel; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS]; + ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS]; + rtt_status_info_t *rtt_status; + + memset(&channel, 0, sizeof(channel)); + BCM_REFERENCE(chanbuf); + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (RTT_IS_STOPPED(rtt_status)) { + DHD_RTT(("STA responder/Target. \n")); + } + DHD_RTT(("Enter %s \n", __FUNCTION__)); + if (!dhd_is_associated(dhd, 0, NULL)) { + if (channel_info) { + channel.width = channel_info->width; + channel.center_freq = channel_info->center_freq; + channel.center_freq0 = channel_info->center_freq; + } + else { + channel.width = WIFI_CHAN_WIDTH_80; + channel.center_freq = DEFAULT_FTM_FREQ; + channel.center_freq0 = DEFAULT_FTM_CNTR_FREQ0; + } + chanspec = dhd_rtt_convert_to_chspec(channel); + DHD_RTT(("chanspec/channel set as %s for rtt.\n", + wf_chspec_ntoa(chanspec, chanbuf))); + err = wldev_iovar_setint(dev, "chanspec", chanspec); + if (err) { + DHD_ERROR(("Failed to set the chanspec \n")); + } + } + err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm)); + DHD_RTT(("Current PM value read %d\n", rtt_status->pm)); + if (err) { + DHD_ERROR(("Failed to get the PM value \n")); + } else { + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_ERROR(("Failed to set the PM \n")); + rtt_status->pm_restore = FALSE; + } else { + rtt_status->pm_restore = TRUE; + } + } + if (!RTT_IS_ENABLED(rtt_status)) { + err = dhd_rtt_ftm_enable(dhd, TRUE); + if (err) { + DHD_ERROR(("Failed to enable FTM (%d)\n", err)); + goto exit; + } + DHD_RTT(("FTM enabled \n")); + } + rtt_status->status = RTT_ENABLED; + DHD_RTT(("Responder enabled \n")); + memset(ftm_configs, 0, sizeof(ftm_configs)); + memset(ftm_params, 0, sizeof(ftm_params)); + ftm_configs[ftm_cfg_cnt].enable = TRUE; + ftm_configs[ftm_cfg_cnt++].flags = WL_PROXD_SESSION_FLAG_TARGET; + rtt_status->flags = WL_PROXD_SESSION_FLAG_TARGET; + DHD_RTT(("Set the device as responder \n")); + err = dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS, + ftm_configs, ftm_cfg_cnt); +exit: + if (err) { + rtt_status->status = RTT_STOPPED; + DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__)); + dhd_rtt_ftm_enable(dhd, FALSE); + DHD_RTT(("restoring the PM value \n")); + if (rtt_status->pm_restore) { + pm = PM_FAST; + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_ERROR(("Failed to restore PM \n")); + } else { + rtt_status->pm_restore = FALSE; + } + } + } + return err; +} + +int +dhd_rtt_cancel_responder(dhd_pub_t *dhd) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + int pm = 0; + struct net_device *dev = dhd_linux_get_primary_netdev(dhd); + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + DHD_RTT(("Enter %s \n", __FUNCTION__)); + err = dhd_rtt_ftm_enable(dhd, FALSE); + if (err) { + DHD_ERROR(("failed to disable FTM (%d)\n", err)); + } + rtt_status->status = RTT_STOPPED; + if (rtt_status->pm_restore) { + pm = PM_FAST; + DHD_RTT(("pm_restore =%d \n", rtt_status->pm_restore)); + err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm)); + if (err) { + DHD_ERROR(("Failed to restore PM \n")); + } else { + rtt_status->pm_restore = FALSE; + } + } + return err; +} +#endif /* WL_CFG80211 */ + int dhd_rtt_init(dhd_pub_t *dhd) { int err = BCME_OK; +#ifdef WL_CFG80211 + int ret; + int32 drv_up = 1; + int32 version; rtt_status_info_t *rtt_status; NULL_CHECK(dhd, "dhd is NULL", err); if (dhd->rtt_state) { - goto exit; + return err; } - dhd->rtt_state = MALLOC(dhd->osh, sizeof(rtt_status_info_t)); + dhd->rtt_state = kzalloc(sizeof(rtt_status_info_t), GFP_KERNEL); if (dhd->rtt_state == NULL) { - DHD_ERROR(("failed to create rtt_state\n")); - goto exit; + err = BCME_NOMEM; + DHD_ERROR(("%s : failed to create rtt_state\n", __FUNCTION__)); + return err; } bzero(dhd->rtt_state, sizeof(rtt_status_info_t)); rtt_status = GET_RTTSTATE(dhd); - rtt_status->dhd = dhd; - err = dhd_iovar(dhd, 0, "proxd_params", NULL, 0, 1); - if (err != BCME_UNSUPPORTED) { - rtt_status->capability |= RTT_CAP_ONE_WAY; - rtt_status->capability |= RTT_CAP_VS_WAY; - DHD_ERROR(("%s: Support RTT Service\n", __FUNCTION__)); + rtt_status->rtt_config.target_info = + kzalloc(TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT), GFP_KERNEL); + if (rtt_status->rtt_config.target_info == NULL) { + DHD_ERROR(("%s failed to allocate the target info for %d\n", + __FUNCTION__, RTT_MAX_TARGET_CNT)); + err = BCME_NOMEM; + goto exit; } + rtt_status->dhd = dhd; + /* need to do WLC_UP */ + dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&drv_up, sizeof(int32), TRUE, 0); + + ret = dhd_rtt_get_version(dhd, &version); + if (ret == BCME_OK && (version == WL_PROXD_API_VERSION)) { + DHD_ERROR(("%s : FTM is supported\n", __FUNCTION__)); + /* rtt_status->rtt_capa.proto |= RTT_CAP_ONE_WAY; */ + rtt_status->rtt_capa.proto |= RTT_CAP_FTM_WAY; + + /* indicate to set tx rate */ + rtt_status->rtt_capa.feature |= RTT_FEATURE_LCI; + rtt_status->rtt_capa.feature |= RTT_FEATURE_LCR; + rtt_status->rtt_capa.feature |= RTT_FEATURE_PREAMBLE; + rtt_status->rtt_capa.preamble |= RTT_PREAMBLE_VHT; + rtt_status->rtt_capa.preamble |= RTT_PREAMBLE_HT; + + /* indicate to set bandwith */ + rtt_status->rtt_capa.feature |= RTT_FEATURE_BW; + rtt_status->rtt_capa.bw |= RTT_BW_20; + rtt_status->rtt_capa.bw |= RTT_BW_40; + rtt_status->rtt_capa.bw |= RTT_BW_80; + } else { + if ((ret != BCME_OK) || (version == 0)) { + DHD_ERROR(("%s : FTM is not supported\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n", + __FUNCTION__, WL_PROXD_API_VERSION, version)); + } + } + /* cancel all of RTT request once we got the cancel request */ + rtt_status->all_cancel = TRUE; mutex_init(&rtt_status->rtt_mutex); INIT_LIST_HEAD(&rtt_status->noti_fn_list); INIT_LIST_HEAD(&rtt_status->rtt_results_cache); INIT_WORK(&rtt_status->work, dhd_rtt_work); exit: + if (err < 0) { + kfree(rtt_status->rtt_config.target_info); + kfree(dhd->rtt_state); + } +#endif /* WL_CFG80211 */ return err; + } int dhd_rtt_deinit(dhd_pub_t *dhd) { int err = BCME_OK; +#ifdef WL_CFG80211 rtt_status_info_t *rtt_status; - rtt_result_t *rtt_result, *next; + rtt_results_header_t *rtt_header, *next; + rtt_result_t *rtt_result, *next2; struct rtt_noti_callback *iter, *iter2; NULL_CHECK(dhd, "dhd is NULL", err); rtt_status = GET_RTTSTATE(dhd); NULL_CHECK(rtt_status, "rtt_status is NULL", err); rtt_status->status = RTT_STOPPED; + DHD_RTT(("rtt is stopped %s \n", __FUNCTION__)); /* clear evt callback list */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + if (!list_empty(&rtt_status->noti_fn_list)) { list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) { list_del(&iter->list); @@ -719,13 +2398,22 @@ dhd_rtt_deinit(dhd_pub_t *dhd) } /* remove the rtt results */ if (!list_empty(&rtt_status->rtt_results_cache)) { - list_for_each_entry_safe(rtt_result, next, &rtt_status->rtt_results_cache, list) { - list_del(&rtt_result->list); - kfree(rtt_result); + list_for_each_entry_safe(rtt_header, next, &rtt_status->rtt_results_cache, list) { + list_del(&rtt_header->list); + list_for_each_entry_safe(rtt_result, next2, + &rtt_header->result_list, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + kfree(rtt_header); } } - MFREE(dhd->osh, dhd->rtt_state, sizeof(rtt_status_info_t)); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + kfree(rtt_status->rtt_config.target_info); + kfree(dhd->rtt_state); dhd->rtt_state = NULL; +#endif /* WL_CFG80211 */ return err; } -#endif /* RTT_SUPPORT */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_rtt.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_rtt.h index 2fbb9c973cd3..b3ca820b7478 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_rtt.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_rtt.h @@ -1,7 +1,7 @@ /* * Broadcom Dongle Host Driver (DHD), RTT * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -21,19 +21,31 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_rtt.h 558438 2015-05-22 06:05:11Z $ + * + * <> + * + * $Id$ */ #ifndef __DHD_RTT_H__ #define __DHD_RTT_H__ #include "dngl_stats.h" -#define RTT_MAX_TARGET_CNT 10 -#define RTT_MAX_FRAME_CNT 25 -#define RTT_MAX_RETRY_CNT 10 -#define DEFAULT_FTM_CNT 6 -#define DEFAULT_RETRY_CNT 6 +#define RTT_MAX_TARGET_CNT 50 +#define RTT_MAX_FRAME_CNT 25 +#define RTT_MAX_RETRY_CNT 10 +#define DEFAULT_FTM_CNT 6 +#define DEFAULT_RETRY_CNT 6 +#define DEFAULT_FTM_FREQ 5180 +#define DEFAULT_FTM_CNTR_FREQ0 5210 +#define TARGET_INFO_SIZE(count) (sizeof(rtt_target_info_t) * count) + +#define TARGET_TYPE(target) (target->type) + +#ifndef BIT +#define BIT(x) (1 << (x)) +#endif /* DSSS, CCK and 802.11n rates in [500kbps] units */ #define WL_MAXRATE 108 /* in 500kbps units */ @@ -49,7 +61,7 @@ #define WL_RATE_36M 72 /* in 500kbps units */ #define WL_RATE_48M 96 /* in 500kbps units */ #define WL_RATE_54M 108 /* in 500kbps units */ - +#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state) enum rtt_role { RTT_INITIATOR = 0, @@ -57,11 +69,12 @@ enum rtt_role { }; enum rtt_status { RTT_STOPPED = 0, - RTT_STARTED = 1 + RTT_STARTED = 1, + RTT_ENABLED = 2 }; typedef int64_t wifi_timestamp; /* In microseconds (us) */ typedef int64_t wifi_timespan; -typedef int wifi_rssi; +typedef int32 wifi_rssi_rtt; typedef enum { RTT_INVALID, @@ -79,24 +92,53 @@ typedef enum { } rtt_peer_type_t; typedef enum rtt_reason { - RTT_REASON_SUCCESS, - RTT_REASON_FAILURE, - RTT_REASON_NO_RSP, - RTT_REASON_REJECTED, - RTT_REASON_NOT_SCHEDULED_YET, - RTT_REASON_TIMEOUT, - RTT_REASON_AP_ON_DIFF_CH, - RTT_REASON_AP_NO_CAP, - RTT_REASON_ABORT + RTT_REASON_SUCCESS, + RTT_REASON_FAILURE, + RTT_REASON_FAIL_NO_RSP, + RTT_REASON_FAIL_INVALID_TS, /* Invalid timestamp */ + RTT_REASON_FAIL_PROTOCOL, /* 11mc protocol failed */ + RTT_REASON_FAIL_REJECTED, + RTT_REASON_FAIL_NOT_SCHEDULED_YET, + RTT_REASON_FAIL_SCHEDULE, /* schedule failed */ + RTT_REASON_FAIL_TM_TIMEOUT, + RTT_REASON_FAIL_AP_ON_DIFF_CHANNEL, + RTT_REASON_FAIL_NO_CAPABILITY, + RTT_REASON_FAIL_BUSY_TRY_LATER, + RTT_REASON_ABORTED } rtt_reason_t; -typedef enum rtt_capability { - RTT_CAP_NONE = 0, - RTT_CAP_ONE_WAY = (1 << (0)), - RTT_CAP_11V_WAY = (1 << (1)), /* IEEE802.11v */ - RTT_CAP_11MC_WAY = (1 << (2)), /* IEEE802.11mc */ - RTT_CAP_VS_WAY = (1 << (3)) /* BRCM vendor specific */ -} rtt_capability_t; +enum { + RTT_CAP_ONE_WAY = BIT(0), + /* IEEE802.11mc */ + RTT_CAP_FTM_WAY = BIT(1) +}; + +enum { + RTT_FEATURE_LCI = BIT(0), + RTT_FEATURE_LCR = BIT(1), + RTT_FEATURE_PREAMBLE = BIT(2), + RTT_FEATURE_BW = BIT(3) +}; + +enum { + RTT_PREAMBLE_LEGACY = BIT(0), + RTT_PREAMBLE_HT = BIT(1), + RTT_PREAMBLE_VHT = BIT(2) +}; + + +enum { + RTT_BW_5 = BIT(0), + RTT_BW_10 = BIT(1), + RTT_BW_20 = BIT(2), + RTT_BW_40 = BIT(3), + RTT_BW_80 = BIT(4), + RTT_BW_160 = BIT(5) +}; +#define FTM_MAX_NUM_BURST_EXP 14 +#define HAS_11MC_CAP(cap) (cap & RTT_CAP_FTM_WAY) +#define HAS_ONEWAY_CAP(cap) (cap & RTT_CAP_ONE_WAY) +#define HAS_RTT_CAP(cap) (HAS_ONEWAY_CAP(cap) || HAS_11MC_CAP(cap)) typedef struct wifi_channel_info { wifi_channel_width_t width; @@ -107,7 +149,7 @@ typedef struct wifi_channel_info { typedef struct wifi_rate { uint32 preamble :3; /* 0: OFDM, 1: CCK, 2 : HT, 3: VHT, 4..7 reserved */ - uint32 nss :2; /* 0 : 1x1, 1: 2x2, 3: 3x3, 4: 4x4 */ + uint32 nss :2; /* 1 : 1x1, 2: 2x2, 3: 3x3, 4: 4x4 */ uint32 bw :3; /* 0: 20Mhz, 1: 40Mhz, 2: 80Mhz, 3: 160Mhz */ /* OFDM/CCK rate code would be as per IEEE std in the unit of 0.5 mb * HT/VHT it would be mcs index @@ -123,67 +165,156 @@ typedef struct rtt_target_info { rtt_peer_type_t peer; /* peer type */ wifi_channel_info_t channel; /* channel information */ chanspec_t chanspec; /* chanspec for channel */ - int8 continuous; /* 0 = single shot or 1 = continous raging */ bool disable; /* disable for RTT measurement */ - uint32 interval; /* interval of RTT measurement (unit ms) when continuous = true */ - uint32 measure_cnt; /* total number of RTT measurement when continuous */ - uint32 ftm_cnt; /* num of packets in each RTT measurement */ - uint32 retry_cnt; /* num of retries if sampling fails */ + /* + * Time interval between bursts (units: 100 ms). + * Applies to 1-sided and 2-sided RTT multi-burst requests. + * Range: 0-31, 0: no preference by initiator (2-sided RTT) + */ + uint32 burst_period; + /* + * Total number of RTT bursts to be executed. It will be + * specified in the same way as the parameter "Number of + * Burst Exponent" found in the FTM frame format. It + * applies to both: 1-sided RTT and 2-sided RTT. Valid + * values are 0 to 15 as defined in 802.11mc std. + * 0 means single shot + * The implication of this parameter on the maximum + * number of RTT results is the following: + * for 1-sided RTT: max num of RTT results = (2^num_burst)*(num_frames_per_burst) + * for 2-sided RTT: max num of RTT results = (2^num_burst)*(num_frames_per_burst - 1) + */ + uint16 num_burst; + /* + * num of frames per burst. + * Minimum value = 1, Maximum value = 31 + * For 2-sided this equals the number of FTM frames + * to be attempted in a single burst. This also + * equals the number of FTM frames that the + * initiator will request that the responder send + * in a single frame + */ + uint32 num_frames_per_burst; + /* num of frames in each RTT burst + * for single side, measurement result num = frame number + * for 2 side RTT, measurement result num = frame number - 1 + */ + uint32 num_retries_per_ftm; /* retry time for RTT measurment frame */ + /* following fields are only valid for 2 side RTT */ + uint32 num_retries_per_ftmr; + uint8 LCI_request; + uint8 LCR_request; + /* + * Applies to 1-sided and 2-sided RTT. Valid values will + * be 2-11 and 15 as specified by the 802.11mc std for + * the FTM parameter burst duration. In a multi-burst + * request, if responder overrides with larger value, + * the initiator will return failure. In a single-burst + * request if responder overrides with larger value, + * the initiator will sent TMR_STOP to terminate RTT + * at the end of the burst_duration it requested. + */ + uint32 burst_duration; + uint8 preamble; /* 1 - Legacy, 2 - HT, 4 - VHT */ + uint8 bw; /* 5, 10, 20, 40, 80, 160 */ } rtt_target_info_t; -typedef struct rtt_result { - struct list_head list; - uint16 ver; /* version */ - rtt_target_info_t *target_info; /* target info */ - uint16 mode; /* mode: target/initiator */ - uint16 method; /* method: rssi/TOF/AOA */ - uint8 err_code; /* error classification */ - uint8 TOF_type; /* one way or two way TOF */ - wifi_rate_t tx_rate; /* tx rate */ - struct ether_addr peer_mac; /* (e.g for tgt:initiator's */ - int32 distance; /* dst to tgt, units (meter * 16) */ - uint32 meanrtt; /* mean delta */ - uint32 modertt; /* Mode delta */ - uint32 medianrtt; /* median RTT */ - uint32 sdrtt; /* Standard deviation of RTT */ - int16 avg_rssi; /* avg rssi across the ftm frames */ - int16 validfrmcnt; /* Firmware's valid frame counts */ - wifi_timestamp ts; /* the time elapsed from boot time when driver get this result */ - uint16 ftm_cnt; /* num of rtd measurments/length in the ftm buffer */ - ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */ -} rtt_result_t; +typedef struct rtt_config_params { + int8 rtt_target_cnt; + rtt_target_info_t *target_info; +} rtt_config_params_t; + +typedef struct rtt_status_info { + dhd_pub_t *dhd; + int8 status; /* current status for the current entry */ + int8 txchain; /* current device tx chain */ + int8 mpc; /* indicate we change mpc mode */ + int pm; /* to save current value of pm */ + int8 pm_restore; /* flag to reset the old value of pm */ + int8 cur_idx; /* current entry to do RTT */ + bool all_cancel; /* cancel all request once we got the cancel requet */ + uint32 flags; /* indicate whether device is configured as initiator or target */ + struct capability { + int32 proto :8; + int32 feature :8; + int32 preamble :8; + int32 bw :8; + } rtt_capa; /* rtt capability */ + struct mutex rtt_mutex; + rtt_config_params_t rtt_config; + struct work_struct work; + struct list_head noti_fn_list; + struct list_head rtt_results_cache; /* store results for RTT */ +} rtt_status_info_t; typedef struct rtt_report { struct ether_addr addr; - uint num_measurement; /* measurement number in case of continous raging */ + unsigned int burst_num; /* # of burst inside a multi-burst request */ + unsigned int ftm_num; /* total RTT measurement frames attempted */ + unsigned int success_num; /* total successful RTT measurement frames */ + uint8 num_per_burst_peer; /* max number of FTM number per burst the peer support */ rtt_reason_t status; /* raging status */ + /* in s, 11mc only, only for RTT_REASON_FAIL_BUSY_TRY_LATER, 1- 31s */ + uint8 retry_after_duration; rtt_type_t type; /* rtt type */ - rtt_peer_type_t peer; /* peer type */ - wifi_channel_info_t channel; /* channel information */ - wifi_rssi rssi; /* avg rssi accroos the ftm frames */ - wifi_rssi rssi_spread; /* rssi spread in 0.5 db steps e.g. 5 implies 2.5 spread */ - wifi_rate_t tx_rate; /* tx rate */ - wifi_timespan rtt; /* round trip time in nanoseconds */ - wifi_timespan rtt_sd; /* rtt standard deviation in nanoseconds */ + wifi_rssi_rtt rssi; /* average rssi in 0.5 dB steps e.g. 143 implies -71.5 dB */ + wifi_rssi_rtt rssi_spread; /* rssi spread in 0.5 db steps e.g. 5 implies 2.5 spread */ + /* + * 1-sided RTT: TX rate of RTT frame. + * 2-sided RTT: TX rate of initiator's Ack in response to FTM frame. + */ + wifi_rate_t tx_rate; + /* + * 1-sided RTT: TX rate of Ack from other side. + * 2-sided RTT: TX rate of FTM frame coming from responder. + */ + wifi_rate_t rx_rate; + wifi_timespan rtt; /* round trip time in 0.1 nanoseconds */ + wifi_timespan rtt_sd; /* rtt standard deviation in 0.1 nanoseconds */ wifi_timespan rtt_spread; /* difference between max and min rtt times recorded */ - int32 distance; /* distance in cm (optional) */ - int32 distance_sd; /* standard deviation in cm (optional) */ - int32 distance_spread; /* difference between max and min distance recorded (optional) */ + int distance; /* distance in cm (optional) */ + int distance_sd; /* standard deviation in cm (optional) */ + int distance_spread; /* difference between max and min distance recorded (optional) */ wifi_timestamp ts; /* time of the measurement (in microseconds since boot) */ + int burst_duration; /* in ms, how long the FW time is to fininish one burst measurement */ + int negotiated_burst_num; /* Number of bursts allowed by the responder */ + bcm_tlv_t *LCI; /* LCI Report */ + bcm_tlv_t *LCR; /* Location Civic Report */ } rtt_report_t; +#define RTT_REPORT_SIZE (sizeof(rtt_report_t)) + +/* rtt_results_header to maintain rtt result list per mac address */ +typedef struct rtt_results_header { + struct ether_addr peer_mac; + uint32 result_cnt; + uint32 result_tot_len; /* sum of report_len of rtt_result */ + struct list_head list; + struct list_head result_list; +} rtt_results_header_t; + +/* rtt_result to link all of rtt_report */ +typedef struct rtt_result { + struct list_head list; + struct rtt_report report; + int32 report_len; /* total length of rtt_report */ +} rtt_result_t; /* RTT Capabilities */ typedef struct rtt_capabilities { uint8 rtt_one_sided_supported; /* if 1-sided rtt data collection is supported */ - uint8 rtt_11v_supported; /* if 11v rtt data collection is supported */ uint8 rtt_ftm_supported; /* if ftm rtt data collection is supported */ - uint8 rtt_vs_supported; /* if vendor specific data collection supported */ + uint8 lci_support; /* location configuration information */ + uint8 lcr_support; /* Civic Location */ + uint8 preamble_support; /* bit mask indicate what preamble is supported */ + uint8 bw_support; /* bit mask indicate what BW is supported */ } rtt_capabilities_t; -typedef struct rtt_config_params { - int8 rtt_target_cnt; - rtt_target_info_t target_info[RTT_MAX_TARGET_CNT]; -} rtt_config_params_t; + +/* RTT responder information */ +typedef struct wifi_rtt_responder { + wifi_channel_info channel; /* channel of responder */ + uint8 preamble; /* preamble supported by responder */ +} wifi_rtt_responder_t; typedef void (*dhd_rtt_compl_noti_fn)(void *ctx, void *rtt_data); /* Linux wrapper to call common dhd_rtt_set_cfg */ @@ -203,10 +334,23 @@ dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_ int dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa); +#ifdef WL_CFG80211 +int +dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info); +#endif /* WL_CFG80211 */ + +int +dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info); + +int +dhd_dev_rtt_cancel_responder(struct net_device *dev); /* export to upper layer */ chanspec_t dhd_rtt_convert_to_chspec(wifi_channel_info_t channel); +int +dhd_rtt_idx_to_burst_duration(uint idx); + int dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params); @@ -226,6 +370,15 @@ dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); int dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa); +int +dhd_rtt_avail_channel(dhd_pub_t *dhd, wifi_channel_info *channel_info); + +int +dhd_rtt_enable_responder(dhd_pub_t *dhd, wifi_channel_info *channel_info); + +int +dhd_rtt_cancel_responder(dhd_pub_t *dhd); + int dhd_rtt_init(dhd_pub_t *dhd); diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_sdio.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_sdio.c index 58638dea8040..d7b214443422 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_sdio.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_sdio.c @@ -1,7 +1,7 @@ /* * DHD Bus Module for SDIO * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_sdio.c 593728 2015-10-19 09:20:32Z $ + * $Id: dhd_sdio.c 705650 2017-06-19 03:00:50Z $ */ #include @@ -55,9 +55,9 @@ #include #include -#include -#include -#include +#include +#include <802.1d.h> +#include <802.11.h> #include #include @@ -75,10 +75,17 @@ #include #endif /* DHDTCPACK_SUPPRESS */ +#ifdef BT_OVER_SDIO +#include +#endif /* BT_OVER_SDIO */ + bool dhd_mp_halting(dhd_pub_t *dhdp); extern void bcmsdh_waitfor_iodrain(void *sdh); extern void bcmsdh_reject_ioreqs(void *sdh, bool reject); extern bool bcmsdh_fatal_error(void *sdh); +static int dhdsdio_suspend(void *context); +static int dhdsdio_resume(void *context); + #ifndef DHDSDIO_MEM_DUMP_FNAME #define DHDSDIO_MEM_DUMP_FNAME "mem_dump" @@ -89,6 +96,7 @@ extern bool bcmsdh_fatal_error(void *sdh); #define FCLOW (FCHI / 2) #define PRIOMASK 7 +#define F0_BLOCK_SIZE 32 #define TXRETRIES 2 /* # of retries for tx frames */ #define READ_FRM_CNT_RETRIES 3 #ifndef DHD_RXBOUND @@ -102,6 +110,8 @@ extern bool bcmsdh_fatal_error(void *sdh); #define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */ #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ +#define MAX_MEMBLOCK (32 * 1024) /* Block size used for downloading of dongle image */ + #define MAX_DATA_BUF (64 * 1024) /* Must be large enough to hold biggest possible glom */ #ifndef DHD_FIRSTREAD @@ -135,6 +145,9 @@ extern bool bcmsdh_fatal_error(void *sdh); /* Maximum milliseconds to wait for F2 to come up */ #define DHD_WAIT_F2RDY 3000 +/* Maximum usec to wait for HTAVAIL to come up */ +#define DHD_WAIT_HTAVAIL 10000 + /* Bump up limit on waiting for HT to account for first startup; * if the image is doing a CRC calculation before programming the PMU * for HT availability, it could take a couple hundred ms more, so @@ -163,18 +176,21 @@ extern bool bcmsdh_fatal_error(void *sdh); */ #define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \ PKTFREE(bus->dhd->osh, pkt, FALSE); +DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep); #ifdef PKT_STATICS pkt_statics_t tx_statics = {0}; #endif -DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep); - #if defined(MULTIPLE_SUPPLICANT) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) DEFINE_MUTEX(_dhd_sdio_mutex_lock_); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */ -#endif +#endif + +#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_HW +extern unsigned int system_hw_rev; +#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_HW */ /* Device console log buffer state */ #define CONSOLE_LINE_MAX 192 @@ -193,9 +209,9 @@ typedef struct dhd_console { #define KSO_ENAB(bus) ((bus)->kso) #define SR_ENAB(bus) ((bus)->_srenab) #define SLPAUTO_ENAB(bus) ((SR_ENAB(bus)) && ((bus)->_slpauto)) -#define MIN_RSRC_ADDR (SI_ENUM_BASE + 0x618) + #define MIN_RSRC_SR 0x3 -#define CORE_CAPEXT_ADDR (SI_ENUM_BASE + 0x64c) +#define CORE_CAPEXT_ADDR_OFFSET (0x64c) #define CORE_CAPEXT_SR_SUPPORTED_MASK (1 << 1) #define RCTL_MACPHY_DISABLE_MASK (1 << 26) #define RCTL_LOGIC_DISABLE_MASK (1 << 27) @@ -210,6 +226,35 @@ typedef struct dhd_console { #define OVERFLOW_BLKSZ512_MES 80 #define CC_PMUCC3 (0x3) + +#ifdef DHD_UCODE_DOWNLOAD +/* Ucode host download related macros */ +#define UCODE_DOWNLOAD_REQUEST 0xCAFECAFE +#define UCODE_DOWNLOAD_COMPLETE 0xABCDABCD +#endif /* DHD_UCODE_DOWNLOAD */ + +#if defined(BT_OVER_SDIO) +#define BTMEM_OFFSET 0x19000000 +/* BIT0 => WLAN Power UP and BIT1=> WLAN Wake */ +#define BT2WLAN_PWRUP_WAKE 0x03 +#define BT2WLAN_PWRUP_ADDR 0x640894 /* This address is specific to 43012B0 */ + +#define BTFW_MAX_STR_LEN 600 +#define BTFW_DOWNLOAD_BLK_SIZE (BTFW_MAX_STR_LEN/2 + 8) + +#define BTFW_ADDR_MODE_UNKNOWN 0 +#define BTFW_ADDR_MODE_EXTENDED 1 +#define BTFW_ADDR_MODE_SEGMENT 2 +#define BTFW_ADDR_MODE_LINEAR32 3 + +#define BTFW_HEX_LINE_TYPE_DATA 0 +#define BTFW_HEX_LINE_TYPE_END_OF_DATA 1 +#define BTFW_HEX_LINE_TYPE_EXTENDED_SEGMENT_ADDRESS 2 +#define BTFW_HEX_LINE_TYPE_EXTENDED_ADDRESS 4 +#define BTFW_HEX_LINE_TYPE_ABSOLUTE_32BIT_ADDRESS 5 + +#endif /* defined (BT_OVER_SDIO) */ + /* Private data for SDIO bus interaction */ typedef struct dhd_bus { dhd_pub_t *dhd; @@ -278,10 +323,8 @@ typedef struct dhd_bus { uint polltick; /* Tick counter */ uint pollcnt; /* Count of active polls */ -#ifdef DHD_DEBUG dhd_console_t console; /* Console output polling support */ uint console_addr; /* Console address from shared struct */ -#endif /* DHD_DEBUG */ uint regfails; /* Count of R_REG/W_REG failures */ @@ -360,6 +403,7 @@ typedef struct dhd_bus { uint f2rxdata; /* Number of frame data reads */ uint f2txdata; /* Number of f2 frame writes */ uint f1regdata; /* Number of f1 register accesses */ + wake_counts_t wake_counts; /* Wake up counter */ #ifdef DHDENABLE_TAILPAD uint tx_tailpad_chain; /* Number of tail padding by chaining pad_pkt */ uint tx_tailpad_pktget; /* Number of tail padding by new PKTGET */ @@ -389,10 +433,33 @@ typedef struct dhd_bus { #ifdef DHDENABLE_TAILPAD void *pad_pkt; #endif /* DHDENABLE_TAILPAD */ + uint32 dongle_trap_addr; /* device trap addr location in device memory */ +#if defined(BT_OVER_SDIO) + char *btfw_path; /* module_param: path to BT firmware image */ + uint32 bt_use_count; /* Counter that tracks whether BT is using the bus */ +#endif /* defined (BT_OVER_SDIO) */ uint txglomframes; /* Number of tx glom frames (superframes) */ uint txglompkts; /* Number of packets from tx glom frames */ } dhd_bus_t; + +/* + * Whenever DHD_IDLE_IMMEDIATE condition is handled, we have to now check if + * BT is active too. Instead of adding #ifdef code in all the places, we thought + * of adding one macro check as part of the if condition that checks for DHD_IDLE_IMMEDIATE + * In case of non BT over SDIO builds, this macro will always return TRUE. In case + * of the builds where BT_OVER_SDIO is enabled, it will expand to a condition check + * that checks if bt_use_count is zero. So this macro will return equate to 1 if + * bt_use_count is 0, indicating that there are no active users and if bt_use_count + * is non zero it would return 0 there by preventing the caller from executing the + * sleep calls. + */ +#ifdef BT_OVER_SDIO +#define NO_OTHER_ACTIVE_BUS_USER(bus) (bus->bt_use_count == 0) +#else +#define NO_OTHER_ACTIVE_BUS_USER(bus) (1) +#endif /* BT_OVER_SDIO */ + /* clkstate */ #define CLK_NONE 0 #define CLK_SDONLY 1 @@ -414,10 +481,16 @@ static int tx_packets[NUMPRIO]; const uint dhd_deferred_tx = 1; extern uint dhd_watchdog_ms; +extern uint sd_f1_blocksize; + +#if defined(BT_OVER_SDIO) +extern dhd_pub_t *g_dhd_pub; +#endif /* (BT_OVER_SDIO) */ extern void dhd_os_wd_timer(void *bus, uint wdtick); int dhd_enableOOB(dhd_pub_t *dhd, bool sleep); + /* Tx/Rx bounds */ uint dhd_txbound; uint dhd_rxbound; @@ -477,7 +550,7 @@ static const uint max_roundup = 512; /* Try doing readahead */ static bool dhd_readahead; -#if defined(SWTXGLOM) || defined(BCMSDIOH_TXGLOM_EXT) +#if defined(BCMSDIOH_TXGLOM_EXT) bool dhdsdio_is_dataok(dhd_bus_t *bus) { return (((uint8)(bus->tx_max - bus->tx_seq) - bus->dhd->conf->tx_max_offset > 1) && \ @@ -491,7 +564,7 @@ dhdsdio_get_databufcnt(dhd_bus_t *bus) { #endif /* To check if there's window offered */ -#if defined(SWTXGLOM) || defined(BCMSDIOH_TXGLOM_EXT) +#if defined(BCMSDIOH_TXGLOM_EXT) #define DATAOK(bus) dhdsdio_is_dataok(bus) #else #define DATAOK(bus) \ @@ -505,7 +578,7 @@ dhdsdio_get_databufcnt(dhd_bus_t *bus) { (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)) /* Number of pkts available in dongle for data RX */ -#if defined(SWTXGLOM) || defined(BCMSDIOH_TXGLOM_EXT) +#if defined(BCMSDIOH_TXGLOM_EXT) #define DATABUFCNT(bus) dhdsdio_get_databufcnt(bus) #else #define DATABUFCNT(bus) \ @@ -631,6 +704,9 @@ static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt); static int dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh); static int _dhdsdio_download_firmware(dhd_bus_t *bus); +#ifdef DHD_UCODE_DOWNLOAD +static int dhdsdio_download_ucode_file(struct dhd_bus *bus, char *ucode_path); +#endif /* DHD_UCODE_DOWNLOAD */ static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path); static int dhdsdio_download_nvram(dhd_bus_t *bus); #ifdef BCMEMBEDIMAGE @@ -639,12 +715,38 @@ static int dhdsdio_download_code_array(dhd_bus_t *bus); static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep); static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok); static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus); +static bool dhdsdio_dpc(dhd_bus_t *bus); +static int dhd_bcmsdh_send_buffer(void *bus, uint8 *frame, uint16 len); +static int dhdsdio_set_sdmode(dhd_bus_t *bus, int32 sd_mode); +static int dhdsdio_sdclk(dhd_bus_t *bus, bool on); +static void dhdsdio_advertise_bus_cleanup(dhd_pub_t *dhdp); +#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_DT +int dhd_get_system_rev(void); +#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_DT */ #ifdef WLMEDIA_HTSF #include extern uint32 dhd_get_htsf(void *dhd, int ifidx); #endif /* WLMEDIA_HTSF */ +#if defined(BT_OVER_SDIO) +static int extract_hex_field(char * line, uint16 start_pos, uint16 num_chars, uint16 * value); +static int read_more_btbytes(struct dhd_bus *bus, void * file, char *line, int * addr_mode, + uint16 * hi_addr, uint32 * dest_addr, uint8 *data_bytes, uint32 * num_bytes); +static int dhdsdio_download_btfw(struct dhd_bus *bus, osl_t *osh, void *sdh); +static int _dhdsdio_download_btfw(struct dhd_bus *bus); +#endif /* defined (BT_OVER_SDIO) */ + +#ifdef DHD_ULP +#include +static int dhd_bus_ulp_reinit_fw(dhd_bus_t *bus); +#endif /* DHD_ULP */ + +#ifdef DHD_WAKE_STATUS +int bcmsdh_get_total_wake(bcmsdh_info_t *bcmsdh); +int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag); +#endif /* DHD_WAKE_STATUS */ + static void dhdsdio_tune_fifoparam(struct dhd_bus *bus) { @@ -750,7 +852,8 @@ dhdsdio_sr_cap(dhd_bus_t *bus) bool cap = FALSE; uint32 core_capext, addr, data; - if (bus->sih->chip == BCM43430_CHIP_ID) { + if (bus->sih->chip == BCM43430_CHIP_ID || + bus->sih->chip == BCM43018_CHIP_ID) { /* check if fw initialized sr engine */ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, sr_control1); if (bcmsdh_reg_read(bus->sdh, addr, 4) != 0) @@ -764,15 +867,14 @@ dhdsdio_sr_cap(dhd_bus_t *bus) bcmsdh_reg_write(bus->sdh, addr, 4, 3); core_capext = bcmsdh_reg_read(bus->sdh, data, 4); } else if ((bus->sih->chip == BCM4330_CHIP_ID) || - (bus->sih->chip == BCM43362_CHIP_ID)) { + (bus->sih->chip == BCM43362_CHIP_ID) || + (BCM4347_CHIP(bus->sih->chip))) { core_capext = FALSE; } else if ((bus->sih->chip == BCM4335_CHIP_ID) || (bus->sih->chip == BCM4339_CHIP_ID) || (bus->sih->chip == BCM43349_CHIP_ID) || - (bus->sih->chip == BCM4345_CHIP_ID) || - (bus->sih->chip == BCM43454_CHIP_ID) || + BCM4345_CHIP(bus->sih->chip) || (bus->sih->chip == BCM4354_CHIP_ID) || - (bus->sih->chip == BCM4356_CHIP_ID) || (bus->sih->chip == BCM4358_CHIP_ID) || (bus->sih->chip == BCM43569_CHIP_ID) || (bus->sih->chip == BCM4371_CHIP_ID) || @@ -795,10 +897,8 @@ dhdsdio_sr_cap(dhd_bus_t *bus) } else if ((bus->sih->chip == BCM4335_CHIP_ID) || (bus->sih->chip == BCM4339_CHIP_ID) || (bus->sih->chip == BCM43349_CHIP_ID) || - (bus->sih->chip == BCM4345_CHIP_ID) || - (bus->sih->chip == BCM43454_CHIP_ID) || + BCM4345_CHIP(bus->sih->chip) || (bus->sih->chip == BCM4354_CHIP_ID) || - (bus->sih->chip == BCM4356_CHIP_ID) || (bus->sih->chip == BCM4358_CHIP_ID) || (bus->sih->chip == BCM43569_CHIP_ID) || (bus->sih->chip == BCM4371_CHIP_ID) || @@ -810,10 +910,8 @@ dhdsdio_sr_cap(dhd_bus_t *bus) enabval = bcmsdh_reg_read(bus->sdh, data, 4); if ((bus->sih->chip == BCM4350_CHIP_ID) || - (bus->sih->chip == BCM4345_CHIP_ID) || - (bus->sih->chip == BCM43454_CHIP_ID) || + BCM4345_CHIP(bus->sih->chip) || (bus->sih->chip == BCM4354_CHIP_ID) || - (bus->sih->chip == BCM4356_CHIP_ID) || (bus->sih->chip == BCM4358_CHIP_ID) || (bus->sih->chip == BCM43569_CHIP_ID) || (bus->sih->chip == BCM4371_CHIP_ID)) @@ -854,6 +952,7 @@ dhdsdio_sr_init(dhd_bus_t *bus) if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) dhdsdio_srwar_init(bus); + if (bus->sih->chip == BCM43012_CHIP_ID) { val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); val |= 1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT; @@ -874,7 +973,11 @@ dhdsdio_sr_init(dhd_bus_t *bus) (SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT)); #endif /* USE_CMD14 */ - dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC); + if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43018_CHIP_ID || + CHIPID(bus->sih->chip) == BCM4339_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43012_CHIP_ID) + dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC); if (bus->sih->chip == BCM43012_CHIP_ID) { bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, @@ -1095,7 +1198,12 @@ dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) #ifdef USE_CMD14 err = bcmsdh_sleep(bus->sdh, TRUE); #else - + if ((SLPAUTO_ENAB(bus)) && (bus->idleclock == DHD_IDLE_STOP)) { + if (sd1idle) { + /* Change to SD1 mode */ + dhdsdio_set_sdmode(bus, 1); + } + } err = dhdsdio_clk_kso_enab(bus, FALSE); if (OOB_WAKEUP_ENAB(bus)) @@ -1103,6 +1211,12 @@ dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE); /* GPIO_1 is off */ } #endif /* USE_CMD14 */ + + if ((SLPAUTO_ENAB(bus)) && (bus->idleclock != DHD_IDLE_ACTIVE)) { + DHD_TRACE(("%s: Turnoff SD clk\n", __FUNCTION__)); + /* Now remove the SD clock */ + err = dhdsdio_sdclk(bus, FALSE); + } } else { /* Exit Sleep */ /* Make sure we have SD bus access */ @@ -1160,10 +1274,14 @@ dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) if (err != 0) { DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry)); +#ifndef BT_OVER_SDIO err = 0; /* continue anyway */ +#endif /* BT_OVER_SDIO */ } - + if ((SLPAUTO_ENAB(bus)) && (bus->idleclock == DHD_IDLE_STOP)) { + dhdsdio_set_sdmode(bus, bus->sd_mode); + } #endif /* !USE_CMD14 */ if (err == 0) { @@ -1186,7 +1304,7 @@ dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) SPINWAIT_SLEEP(sdioh_spinwait_sleep, (((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) != - (SBSDIO_HT_AVAIL)), (10000)); + (SBSDIO_HT_AVAIL)), (DHD_WAIT_HTAVAIL)); DHD_TRACE(("%s: SBSDIO_FUNC1_CHIPCLKCSR : 0x%x\n", __FUNCTION__, csr)); if (!err && ((csr & SBSDIO_HT_AVAIL) != SBSDIO_HT_AVAIL)) { @@ -1356,6 +1474,22 @@ dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok) return BCME_OK; } +/* Change SD1/SD4 bus mode */ +static int +dhdsdio_set_sdmode(dhd_bus_t *bus, int32 sd_mode) +{ + int err; + + err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &sd_mode, sizeof(sd_mode), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_mode: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + return BCME_OK; +} + /* Change idle/active SD state */ static int dhdsdio_sdclk(dhd_bus_t *bus, bool on) @@ -1377,14 +1511,6 @@ dhdsdio_sdclk(dhd_bus_t *bus, bool on) return BCME_ERROR; } - iovalue = bus->sd_mode; - err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, - &iovalue, sizeof(iovalue), TRUE); - if (err) { - DHD_ERROR(("%s: error changing sd_mode: %d\n", - __FUNCTION__, err)); - return BCME_ERROR; - } } else if (bus->idleclock != DHD_IDLE_ACTIVE) { /* Restore clock speed */ iovalue = bus->sd_divisor; @@ -1405,18 +1531,6 @@ dhdsdio_sdclk(dhd_bus_t *bus, bool on) return BCME_ERROR; } if (bus->idleclock == DHD_IDLE_STOP) { - if (sd1idle) { - /* Change to SD1 mode and turn off clock */ - iovalue = 1; - err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, - &iovalue, sizeof(iovalue), TRUE); - if (err) { - DHD_ERROR(("%s: error changing sd_clock: %d\n", - __FUNCTION__, err)); - return BCME_ERROR; - } - } - iovalue = 0; err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, &iovalue, sizeof(iovalue), TRUE); @@ -1482,6 +1596,27 @@ dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok) break; case CLK_SDONLY: + +#ifdef BT_OVER_SDIO + /* + * If the request is to switch off Back plane clock, + * confirm that BT is inactive before doing so. + * If this call had come from Non Watchdog context any way + * the Watchdog would switch off the clock again when + * nothing is to be done & Bt has finished using the bus. + */ + if (bus->bt_use_count != 0) { + DHD_INFO(("%s(): Req CLK_SDONLY, BT is active %d not switching off \r\n", + __FUNCTION__, bus->bt_use_count)); + ret = BCME_OK; + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + break; + } + + DHD_INFO(("%s(): Request CLK_NONE BT is NOT active switching off \r\n", + __FUNCTION__)); +#endif /* BT_OVER_SDIO */ + /* Remove HT request, or bring up SD clock */ if (bus->clkstate == CLK_NONE) ret = dhdsdio_sdclk(bus, TRUE); @@ -1496,6 +1631,26 @@ dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok) break; case CLK_NONE: + +#ifdef BT_OVER_SDIO + /* + * If the request is to switch off Back plane clock, + * confirm that BT is inactive before doing so. + * If this call had come from Non Watchdog context any way + * the Watchdog would switch off the clock again when + * nothing is to be done & Bt has finished using the bus. + */ + if (bus->bt_use_count != 0) { + DHD_INFO(("%s(): Request CLK_NONE BT is active %d not switching off \r\n", + __FUNCTION__, bus->bt_use_count)); + ret = BCME_OK; + break; + } + + DHD_INFO(("%s(): Request CLK_NONE BT is NOT active switching off \r\n", + __FUNCTION__)); +#endif /* BT_OVER_SDIO */ + /* Make sure to remove HT request */ if (bus->clkstate == CLK_AVAIL) ret = dhdsdio_htclk(bus, FALSE, FALSE); @@ -1524,7 +1679,7 @@ dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) uint retries = 0; DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n", - (sleep ? "SLEEP" : "WAKE"), + (sleep ? "SLEEP" : "WAKE"), (bus->sleeping ? "SLEEP" : "WAKE"))); if (bus->dhd->hang_was_sent) @@ -1545,6 +1700,26 @@ dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) #endif /* DHD_USE_IDLECOUNT */ return BCME_BUSY; +#ifdef BT_OVER_SDIO + /* + * The following is the assumption based on which the hook is placed. + * From WLAN driver, either from the active contexts OR from the Watchdog contexts + * we will be attempting to Go to Sleep. AT that moment if we see that BT is still + * actively using the bus, we will return BCME_BUSY from here, but the bus->sleeping + * state would not have changed. So the caller can then schedule the Watchdog again + * which will come and attempt to sleep at a later point. + * + * In case if BT is the only one and is the last user, we don't switch off the clock + * immediately, we allow the WLAN to decide when to sleep i.e from the watchdog. + * Now if the watchdog becomes active and attempts to switch off the clock and if + * another WLAN context is active they are any way serialized with sdlock. + */ + if (bus->bt_use_count != 0) { + DHD_INFO(("%s(): Cannot sleep BT is active \r\n", __FUNCTION__)); + return BCME_BUSY; + } +#endif /* !BT_OVER_SDIO */ + if (!SLPAUTO_ENAB(bus)) { /* Disable SDIO interrupts (no longer interested) */ @@ -1565,11 +1740,8 @@ dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) SBSDIO_FORCE_HW_CLKREQ_OFF, NULL); /* Isolate the bus */ - if (bus->sih->chip != BCM4329_CHIP_ID && - bus->sih->chip != BCM4319_CHIP_ID) { - bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, SBSDIO_DEVCTL_PADS_ISO, NULL); - } } else { /* Leave interrupts enabled since device can exit sleep and * interrupt host @@ -1613,6 +1785,21 @@ dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) } } else { err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */); +#ifdef BT_OVER_SDIO + if (err < 0) { + struct net_device *net = NULL; + dhd_pub_t *dhd = bus->dhd; + net = dhd_idx2net(dhd, 0); + if (net != NULL) { + DHD_ERROR(("<<<<<< WIFI HANG by KSO Enabled failure\n")); + dhd_os_sdunlock(dhd); + net_os_send_hang_message(net); + dhd_os_sdlock(dhd); + } else { + DHD_ERROR(("<<<<< WIFI HANG Fail because net is NULL\n")); + } + } +#endif /* BT_OVER_SDIO */ } if (err == 0) { @@ -1624,6 +1811,85 @@ dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) return err; } +#ifdef BT_OVER_SDIO +/* + * Call this function to Get the Clock running. + * Assumes that the caller holds the sdlock. + * bus - Pointer to the dhd_bus handle + * can_wait - TRUE if the caller can wait until the clock becomes ready + * FALSE if the caller cannot wait + */ +int __dhdsdio_clk_enable(struct dhd_bus *bus, bus_owner_t owner, int can_wait) +{ + int ret = BCME_ERROR; + + BCM_REFERENCE(owner); + + bus->bt_use_count++; + + /* + * We can call BUS_WAKE, clkctl multiple times, both of the items + * have states and if its already ON, no new configuration is done + */ + + /* Wake up the Dongle FW from SR */ + BUS_WAKE(bus); + + /* + * Make sure back plane ht clk is on + * CLK_AVAIL - Turn On both SD & HT clock + */ + ret = dhdsdio_clkctl(bus, CLK_AVAIL, can_wait); + + DHD_INFO(("%s():bt_use_count %d \r\n", __FUNCTION__, + bus->bt_use_count)); + return ret; +} + +/* + * Call this function to relinquish the Clock. + * Assumes that the caller holds the sdlock. + * bus - Pointer to the dhd_bus handle + * can_wait - TRUE if the caller can wait until the clock becomes ready + * FALSE if the caller cannot wait + */ +int __dhdsdio_clk_disable(struct dhd_bus *bus, bus_owner_t owner, int can_wait) +{ + int ret = BCME_ERROR; + + BCM_REFERENCE(owner); + BCM_REFERENCE(can_wait); + + if (bus->bt_use_count == 0) { + DHD_ERROR(("%s(): Clocks are already turned off \r\n", + __FUNCTION__)); + return ret; + } + + bus->bt_use_count--; + + /* + * When the SDIO Bus is shared between BT & WLAN, we turn Off the clock + * once the last user has relinqushed the same. But there are two schemes + * in that too. We consider WLAN as the bus master (even if its not + * active). Even when the WLAN is OFF the DHD Watchdog is active. + * So this Bus Watchdog is the context whill put the Bus to sleep. + * Refer dhd_bus_watchdog function + */ + + ret = BCME_OK; + DHD_INFO(("%s():bt_use_count %d \r\n", __FUNCTION__, + bus->bt_use_count)); + return ret; +} + +void dhdsdio_reset_bt_use_count(struct dhd_bus *bus) +{ + /* reset bt use count */ + bus->bt_use_count = 0; +} +#endif /* BT_OVER_SDIO */ + int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size) { int func_blk_size = function_num; @@ -1807,9 +2073,11 @@ dhd_bus_txdata(struct dhd_bus *bus, void *pkt) else bus->dhd->dstats.tx_bytes += datalen; - if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { bus->activity = FALSE; - dhdsdio_clkctl(bus, CLK_NONE, TRUE); + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); } dhd_os_sdunlock(bus->dhd); @@ -1918,7 +2186,7 @@ static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txs /* align the data pointer, allocate a new packet if there is not enough space (new * packet data pointer will be aligned thus no padding will be needed) */ - head_padding = (ulong)frame % DHD_SDALIGN; + head_padding = (uintptr)frame % DHD_SDALIGN; if (PKTHEADROOM(osh, pkt) < head_padding) { head_padding = 0; alloc_new_pkt = TRUE; @@ -1997,8 +2265,7 @@ static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txs (cur_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) { modulo = cur_total_len % bus->blocksize; tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0; - } - else { + } else { modulo = pkt_len % DHD_SDALIGN; tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0; } @@ -2159,610 +2426,6 @@ static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt) return BCME_OK; } -#if defined(SWTXGLOM) -static int -dhd_bcmsdh_send_swtxglom_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, - void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry) -{ - int ret; - int i = 0; - int retries = 0; - bcmsdh_info_t *sdh; - - if (!KSO_ENAB(bus)) { - DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); - return BCME_NODEVICE; - } - - sdh = bus->sdh; - do { - ret = bcmsdh_send_swtxglom_buf(bus->sdh, addr, fn, flags, buf, nbytes, - pkt, complete, handle); - - bus->f2txdata++; - ASSERT(ret != BCME_PENDING); - - if (ret == BCME_NODEVICE) { - DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__)); - } else if (ret < 0) { - /* On failure, abort the command and terminate the frame */ - DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n", - __FUNCTION__, ret)); - bus->tx_sderrs++; - bus->f1regdata++; - bus->dhd->tx_errors++; - bcmsdh_abort(sdh, SDIO_FUNC_2); - bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, - SFC_WF_TERM, NULL); - for (i = 0; i < READ_FRM_CNT_RETRIES; i++) { - uint8 hi, lo; - hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCHI, - NULL); - lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCLO, - NULL); - bus->f1regdata += 2; - if ((hi == 0) && (lo == 0)) - break; - } - } - if (ret == 0) { -#ifdef BCMSDIOH_TXGLOM - if (bus->txglom_enable) { - bus->tx_seq = (bus->tx_seq + bus->txglom_cnt) % SDPCM_SEQUENCE_WRAP; - } else -#endif - { - bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; - } - } - } while ((ret < 0) && retrydata && ++retries < max_retry); - - return ret; -} - -/* Writes a HW/SW header into the packet and sends it. */ -/* Assumes: (a) header space already there, (b) caller holds lock */ -static int -dhdsdio_txpkt_swtxglom(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt, bool queue_only) -{ - int ret; - osl_t *osh; - uint8 *frame; - uint16 len, pad1 = 0, act_len = 0; - uint32 swheader; - uint32 real_pad = 0; - bcmsdh_info_t *sdh; - void *new; - int pkt_cnt; -#ifdef BCMSDIOH_TXGLOM - uint8 *frame_tmp; -#endif -#ifdef WLMEDIA_HTSF - char *p; - htsfts_t *htsf_ts; -#endif - - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - - sdh = bus->sdh; - osh = bus->dhd->osh; - -#ifdef DHDTCPACK_SUPPRESS - if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) { - DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", - __FUNCTION__, __LINE__)); - dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF); - } -#endif /* DHDTCPACK_SUPPRESS */ - - /* Add space for the header */ - PKTPUSH(osh, pkt, SDPCM_HDRLEN_TXGLOM); - ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2)); - - if (bus->dhd->dongle_reset) { - ret = BCME_NOTREADY; - goto done; - } - - frame = (uint8*)PKTDATA(osh, pkt); - -#ifdef WLMEDIA_HTSF - if (PKTLEN(osh, pkt) >= 100) { - p = PKTDATA(osh, pkt); - htsf_ts = (htsfts_t*) (p + HTSF_HOSTOFFSET + 12); - if (htsf_ts->magic == HTSFMAGIC) { - htsf_ts->c20 = get_cycles(); - htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0); - } - } -#endif /* WLMEDIA_HTSF */ - -#ifdef PKT_STATICS - len = (uint16)PKTLEN(osh, pkt); - switch(chan) { - case SDPCM_CONTROL_CHANNEL: - tx_statics.ctrl_count++; - tx_statics.ctrl_size += len; - break; - case SDPCM_DATA_CHANNEL: - tx_statics.data_count++; - tx_statics.data_size += len; - break; - case SDPCM_GLOM_CHANNEL: - tx_statics.glom_count++; - tx_statics.glom_size += len; - break; - case SDPCM_EVENT_CHANNEL: - tx_statics.event_count++; - tx_statics.event_size += len; - break; - case SDPCM_TEST_CHANNEL: - tx_statics.test_count++; - tx_statics.test_size += len; - break; - - default: - break; - } -#endif /* PKT_STATICS */ - - /* Add alignment padding, allocate new packet if needed */ - if ((pad1 = ((uintptr)frame % DHD_SDALIGN))) { - if (PKTHEADROOM(osh, pkt) < pad1) { - DHD_INFO(("%s: insufficient headroom %d for %d pad1\n", - __FUNCTION__, (int)PKTHEADROOM(osh, pkt), pad1)); - bus->dhd->tx_realloc++; - new = PKTGET(osh, (PKTLEN(osh, pkt) + DHD_SDALIGN), TRUE); - if (!new) { - DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n", - __FUNCTION__, PKTLEN(osh, pkt) + DHD_SDALIGN)); - ret = BCME_NOMEM; - goto done; - } - - PKTALIGN(osh, new, PKTLEN(osh, pkt), DHD_SDALIGN); - bcopy(PKTDATA(osh, pkt), PKTDATA(osh, new), PKTLEN(osh, pkt)); - if (free_pkt) - PKTFREE(osh, pkt, TRUE); - /* free the pkt if canned one is not used */ - free_pkt = TRUE; - pkt = new; - frame = (uint8*)PKTDATA(osh, pkt); - ASSERT(((uintptr)frame % DHD_SDALIGN) == 0); - pad1 = 0; - } else { - PKTPUSH(osh, pkt, pad1); - frame = (uint8*)PKTDATA(osh, pkt); - - ASSERT((pad1 + SDPCM_HDRLEN_TXGLOM) <= (int) PKTLEN(osh, pkt)); - bzero(frame, pad1 + SDPCM_HDRLEN_TXGLOM); - } - } - ASSERT(pad1 < DHD_SDALIGN); - - /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ - len = (uint16)PKTLEN(osh, pkt); - *(uint16*)frame = htol16(len); - *(((uint16*)frame) + 1) = htol16(~len); - -#ifdef BCMSDIOH_TXGLOM - if (bus->txglom_enable) { - uint32 hwheader1 = 0, hwheader2 = 0; - act_len = len; - - /* Software tag: channel, sequence number, data offset */ - swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | - ((bus->tx_seq + bus->txglom_cnt) % SDPCM_SEQUENCE_WRAP) | - (((pad1 + SDPCM_HDRLEN_TXGLOM) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); - htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN); - htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN + sizeof(swheader)); - - if (queue_only) { - if (bus->dhd->conf->txglom_ext) { - if(bus->txglom_cnt == 0) { - // first pkt, add pad to bucket size - recv offset - len = bus->dhd->conf->txglom_bucket_size - TXGLOM_RECV_OFFSET; - } else { - // add pad to bucket size - len = bus->dhd->conf->txglom_bucket_size; - } - } else { - uint8 alignment = ALIGNMENT; - if (forcealign && (len & (alignment - 1))) - len = ROUNDUP(len, alignment); - } - /* Hardware extention tag */ - /* 2byte frame length, 1byte-, 1byte frame flag, - * 2byte-hdrlength, 2byte padlenght - */ - hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (0 << 24); - hwheader2 = (len - act_len) << 16; - htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); - htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); - real_pad = len - act_len; - if (PKTTAILROOM(osh, pkt) < real_pad) { - DHD_INFO(("%s 1: insufficient tailroom %d for %d real_pad\n", - __FUNCTION__, (int)PKTTAILROOM(osh, pkt), real_pad)); - if (PKTPADTAILROOM(osh, pkt, real_pad)) { - DHD_ERROR(("CHK1: padding error size %d\n", real_pad)); - ret = BCME_NOMEM; - goto done; - } -#ifndef BCMLXSDMMC - else - PKTSETLEN(osh, pkt, act_len); -#endif - } -#ifdef BCMLXSDMMC - PKTSETLEN(osh, pkt, len); -#endif /* BCMLXSDMMC */ - /* Post the frame pointer to sdio glom array */ - bcmsdh_glom_post(bus->sdh, frame, pkt, len); - /* Save the pkt pointer in bus glom array */ - bus->glom_pkt_arr[bus->txglom_cnt] = pkt; - bus->txglom_total_len += len; - bus->txglom_cnt++; - return BCME_OK; - } else { - /* Raise len to next SDIO block to eliminate tail command */ - if (bus->roundup && bus->blocksize && - ((bus->txglom_total_len + len) > bus->blocksize)) { - uint16 pad2 = bus->blocksize - - ((bus->txglom_total_len + len) % bus->blocksize); - if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize)) { - len += pad2; - } else { - } - } else if ((bus->txglom_total_len + len) % DHD_SDALIGN) { - len += DHD_SDALIGN - - ((bus->txglom_total_len + len) % DHD_SDALIGN); - } - if (forcealign && (len & (ALIGNMENT - 1))) { - len = ROUNDUP(len, ALIGNMENT); - } - - /* Hardware extention tag */ - /* 2byte frame length, 1byte-, 1byte frame flag, - * 2byte-hdrlength, 2byte padlenght - */ - if (bus->dhd->conf->txglom_ext) { - // copy way, the last packet pad2 is set to 0 it will be dropped by HW - hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (1 << 24); - hwheader2 = 0; - htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); - htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); - } else { - hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (1 << 24); - hwheader2 = (len - act_len) << 16; - htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); - htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); - } - real_pad = len - act_len; - if (PKTTAILROOM(osh, pkt) < real_pad) { - DHD_INFO(("%s 2: insufficient tailroom %d" - " for %d real_pad\n", - __FUNCTION__, (int)PKTTAILROOM(osh, pkt), real_pad)); - if (PKTPADTAILROOM(osh, pkt, real_pad)) { - DHD_ERROR(("CHK2: padding error size %d." - " %d more pkts are discarded together.\n", - real_pad, bus->txglom_cnt)); - /* Save the pkt pointer in bus glom array - * Otherwise, this last pkt will not be - * cleaned under "goto done" - */ - bus->glom_pkt_arr[bus->txglom_cnt] = pkt; - bus->txglom_cnt++; - bus->txglom_total_len += len; - ret = BCME_NOMEM; - goto done; - } -#ifndef BCMLXSDMMC - else - PKTSETLEN(osh, pkt, act_len); -#endif - } -#ifdef BCMLXSDMMC - PKTSETLEN(osh, pkt, len); -#endif /* BCMLXSDMMC */ - - /* Post the frame pointer to sdio glom array */ - bcmsdh_glom_post(bus->sdh, frame, pkt, len); - /* Save the pkt pointer in bus glom array */ - bus->glom_pkt_arr[bus->txglom_cnt] = pkt; - bus->txglom_cnt++; - if (bus->dhd->conf->txglom_ext) - //copy way, the last buffer padding is not need add to len - bus->txglom_total_len += act_len; - else - bus->txglom_total_len += len; - - /* Update the total length on the first pkt */ - frame_tmp = (uint8*)PKTDATA(osh, bus->glom_pkt_arr[0]); - *(uint16*)frame_tmp = htol16(bus->txglom_total_len); - *(((uint16*)frame_tmp) + 1) = htol16(~bus->txglom_total_len); - } - } else -#endif /* BCMSDIOH_TXGLOM */ - { - act_len = len; - /* Software tag: channel, sequence number, data offset */ - swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq | - (((pad1 + SDPCM_HDRLEN_TXGLOM) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); - htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN); - htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader)); - -#ifdef DHD_DEBUG - if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) { - tx_packets[PKTPRIO(pkt)]++; - } - if (DHD_BYTES_ON() && - (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) || - (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) { - prhex("Tx Frame", frame, len); - } else if (DHD_HDRS_ON()) { - prhex("TxHdr", frame, MIN(len, 16)); - } -#endif - - /* Raise len to next SDIO block to eliminate tail command */ - if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { - uint16 pad2 = bus->blocksize - (len % bus->blocksize); - if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize)) -#ifdef NOTUSED - if (pad2 <= PKTTAILROOM(osh, pkt)) -#endif /* NOTUSED */ - len += pad2; - } else if (len % DHD_SDALIGN) { - len += DHD_SDALIGN - (len % DHD_SDALIGN); - } - - /* Some controllers have trouble with odd bytes -- round to even */ - if (forcealign && (len & (ALIGNMENT - 1))) { -#ifdef NOTUSED - if (PKTTAILROOM(osh, pkt)) -#endif - len = ROUNDUP(len, ALIGNMENT); -#ifdef NOTUSED - else - DHD_ERROR(("%s: sending unrounded %d-byte packet\n", __FUNCTION__, len)); -#endif - } - real_pad = len - act_len; - if (PKTTAILROOM(osh, pkt) < real_pad) { - DHD_INFO(("%s 3: insufficient tailroom %d for %d real_pad\n", - __FUNCTION__, (int)PKTTAILROOM(osh, pkt), real_pad)); - if (PKTPADTAILROOM(osh, pkt, real_pad)) { - DHD_ERROR(("CHK3: padding error size %d\n", real_pad)); - ret = BCME_NOMEM; - goto done; - } -#ifndef BCMLXSDMMC - else - PKTSETLEN(osh, pkt, act_len); -#endif - } -#ifdef BCMLXSDMMC - PKTSETLEN(osh, pkt, len); -#endif /* BCMLXSDMMC */ - } -#ifdef DHD_DEBUG - if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) { - tx_packets[PKTPRIO(pkt)]++; - } -#endif - ret = dhd_bcmsdh_send_swtxglom_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, - frame, len, pkt, NULL, NULL, TXRETRIES); - -done: - -#ifdef BCMSDIOH_TXGLOM - if (bus->txglom_enable && !queue_only) { - bcmsdh_glom_clear(bus->sdh); - pkt_cnt = bus->txglom_cnt; - } else -#endif - { - pkt_cnt = 1; - } - /* restore pkt buffer pointer before calling tx complete routine */ - while (pkt_cnt) { -#ifdef BCMSDIOH_TXGLOM - uint32 doff; - if (bus->txglom_enable) { -#ifdef BCMLXSDMMC - uint32 pad2 = 0; -#endif /* BCMLXSDMMC */ - if (!queue_only) - pkt = bus->glom_pkt_arr[bus->txglom_cnt - pkt_cnt]; - - frame = (uint8*)PKTDATA(osh, pkt); - doff = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN); - doff = (doff & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT; -#ifdef BCMLXSDMMC - pad2 = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16; - PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - pad2); -#endif /* BCMLXSDMMC */ - PKTPULL(osh, pkt, doff); - } else -#endif /* BCMSDIOH_TXGLOM */ - { -#ifdef BCMLXSDMMC - if (act_len > 0) - PKTSETLEN(osh, pkt, act_len); -#endif /* BCMLXSDMMC */ - PKTPULL(osh, pkt, SDPCM_HDRLEN_TXGLOM + pad1); - } -#ifdef PROP_TXSTATUS - if (bus->dhd->wlfc_state) { - dhd_os_sdunlock(bus->dhd); - dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0); - dhd_os_sdlock(bus->dhd); - } else { -#endif /* PROP_TXSTATUS */ -#ifdef SDTEST - if (chan != SDPCM_TEST_CHANNEL) { - dhd_txcomplete(bus->dhd, pkt, ret != 0); - } -#else /* SDTEST */ - dhd_txcomplete(bus->dhd, pkt, ret != 0); -#endif /* SDTEST */ - if (free_pkt) - PKTFREE(osh, pkt, TRUE); -#ifdef PROP_TXSTATUS - } -#endif - pkt_cnt--; - } - -#ifdef BCMSDIOH_TXGLOM - /* Reset the glom array */ - if (bus->txglom_enable && !queue_only) { - bus->txglom_cnt = 0; - bus->txglom_total_len = 0; - } -#endif - return ret; -} - -static uint -dhdsdio_sendfromq_swtxglom(dhd_bus_t *bus, uint maxframes) -{ - void *pkt; - uint32 intstatus = 0; - uint retries = 0; - int ret = 0, prec_out; - uint cnt = 0; - uint datalen; - uint8 tx_prec_map; - uint16 txpktqlen = 0; -#ifdef BCMSDIOH_TXGLOM - uint i; - uint8 txglom_cnt; -#endif - - dhd_pub_t *dhd = bus->dhd; - sdpcmd_regs_t *regs = bus->regs; - - DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - - if (!KSO_ENAB(bus)) { - DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); - return BCME_NODEVICE; - } - - tx_prec_map = ~bus->flowcontrol; - /* Send frames until the limit or some other event */ - for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) { -#ifdef BCMSDIOH_TXGLOM - if (bus->txglom_enable) { - void *pkttable[SDPCM_MAXGLOM_SIZE]; - dhd_os_sdlock_txq(bus->dhd); - txglom_cnt = MIN(DATABUFCNT(bus), bus->txglomsize); - txglom_cnt = MIN(txglom_cnt, pktq_mlen(&bus->txq, tx_prec_map)); - txglom_cnt = MIN(txglom_cnt, maxframes-cnt); - - /* Limiting the size to 2pkts in case of copy */ - if (bus->dhd->conf->txglom_ext) - txglom_cnt = MIN(txglom_cnt, SDPCM_MAXGLOM_SIZE); - else - txglom_cnt = MIN(txglom_cnt, 10); - - for (i = 0; i < txglom_cnt; i++) - pkttable[i] = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out); - - txpktqlen = pktq_len(&bus->txq); - dhd_os_sdunlock_txq(bus->dhd); - - if (txglom_cnt == 0) - break; - datalen = 0; - -#ifdef PKT_STATICS - if (txglom_cnt) { - tx_statics.glom_cnt[txglom_cnt-1]++; - if (txglom_cnt > tx_statics.glom_max) - tx_statics.glom_max = txglom_cnt; - } -#endif - for (i = 0; i < txglom_cnt; i++) { - uint datalen_tmp = 0; - - if ((pkt = pkttable[i]) == NULL) { - /* This case should not happen */ - DHD_ERROR(("No pkts in the queue for glomming\n")); - break; - } - - datalen_tmp = (PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN_TXGLOM); - -#ifndef SDTEST - ret = dhdsdio_txpkt_swtxglom(bus, - pkt, - SDPCM_DATA_CHANNEL, - TRUE, - (i == (txglom_cnt-1))? FALSE: TRUE); -#else - ret = dhdsdio_txpkt_swtxglom(bus, - pkt, - (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), - TRUE, - (i == (txglom_cnt-1))? FALSE: TRUE); -#endif - if (ret == BCME_OK) - datalen += datalen_tmp; - } - cnt += i-1; - } else -#endif /* BCMSDIOH_TXGLOM */ - { - dhd_os_sdlock_txq(bus->dhd); - if ((pkt = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out)) == NULL) { - txpktqlen = pktq_len(&bus->txq); - dhd_os_sdunlock_txq(bus->dhd); - break; - } - txpktqlen = pktq_len(&bus->txq); - dhd_os_sdunlock_txq(bus->dhd); - datalen = PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN_TXGLOM; - -#ifndef SDTEST - ret = dhdsdio_txpkt_swtxglom(bus, pkt, SDPCM_DATA_CHANNEL, TRUE, FALSE); -#else - ret = dhdsdio_txpkt_swtxglom(bus, - pkt, - (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), - TRUE, - FALSE); -#endif - } - - if (ret) - bus->dhd->tx_errors++; - else - bus->dhd->dstats.tx_bytes += datalen; - - /* In poll mode, need to check for other events */ - if (!bus->intr && cnt) - { - /* Check device status, signal pending interrupt */ - R_SDREG(intstatus, ®s->intstatus, retries); - bus->f2txdata++; - if (bcmsdh_regfail(bus->sdh)) - break; - if (intstatus & bus->hostintmask) - bus->ipend = TRUE; - } - } - - /* Deflow-control stack if needed */ - if (dhd_doflow && dhd->up && (dhd->busstate == DHD_BUS_DATA) && - dhd->txoff && (txpktqlen < FCLOW)) - dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); - - return cnt; -} -#endif - static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt) { int i; @@ -2782,6 +2445,9 @@ static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bo if (bus->dhd->dongle_reset) return BCME_NOTREADY; + if (num_pkt <= 0) + return BCME_BADARG; + sdh = bus->sdh; osh = bus->dhd->osh; /* init new_pkts[0] to make some compiler happy, not necessary as we check new_pkt_num */ @@ -2840,12 +2506,6 @@ static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bo * so it will take the aligned length and buffer pointer. */ pkt_chain = PKTNEXT(osh, head_pkt) ? head_pkt : NULL; -#if defined(SWTXGLOM) - if (bus->dhd->conf->swtxglom) - ret = dhd_bcmsdh_send_swtxglom_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, - PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES); - else -#endif ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES); if (ret == BCME_OK) @@ -2902,6 +2562,10 @@ dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) uint datalen = 0; dhd_pub_t *dhd = bus->dhd; sdpcmd_regs_t *regs = bus->regs; +#ifdef DHD_LOSSLESS_ROAMING + uint8 *pktdata; + struct ether_header *eh; +#endif /* DHD_LOSSLESS_ROAMING */ DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -2914,7 +2578,7 @@ dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) tx_prec_map = ~bus->flowcontrol; #ifdef DHD_LOSSLESS_ROAMING tx_prec_map &= dhd->dequeue_prec_map; -#endif +#endif /* DHD_LOSSLESS_ROAMING */ for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) { int i; int num_pkt = 1; @@ -2941,6 +2605,22 @@ dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) ASSERT(0); break; } +#ifdef DHD_LOSSLESS_ROAMING + pktdata = (uint8 *)PKTDATA(osh, pkts[i]); +#ifdef BDC + /* Skip BDC header */ + pktdata += BDC_HEADER_LEN + ((struct bdc_header *)pktdata)->dataOffset; +#endif + eh = (struct ether_header *)pktdata; + if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + uint8 prio = (uint8)PKTPRIO(pkts[i]); + + /* Restore to original priority for 802.1X packet */ + if (prio == PRIO_8021D_NC) { + PKTSETPRIO(pkts[i], dhd->prio_8021x); + } + } +#endif /* DHD_LOSSLESS_ROAMING */ PKTORPHAN(pkts[i], bus->dhd->conf->tsq); datalen += PKTLEN(osh, pkts[i]); } @@ -3013,13 +2693,6 @@ dhdsdio_sendpendctl(dhd_bus_t *bus) *frame_seq = bus->tx_seq; } -#if defined(SWTXGLOM) - if (bus->dhd->conf->swtxglom) - ret = dhd_bcmsdh_send_swtxglom_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, - (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len, - NULL, NULL, NULL, 1); - else -#endif ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len, NULL, NULL, NULL, 1); @@ -3037,10 +2710,10 @@ dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) uint8 *frame; uint16 len; uint32 swheader; - bcmsdh_info_t *sdh = bus->sdh; uint8 doff = 0; int ret = -1; uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; + int cnt = 0; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); @@ -3080,7 +2753,17 @@ dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) /* Need to lock here to protect txseq and SDIO tx calls */ +retry: dhd_os_sdlock(bus->dhd); + if (cnt < bus->dhd->conf->txctl_tmo_fix && !TXCTLOK(bus)) { + cnt++; + dhd_os_sdunlock(bus->dhd); + OSL_SLEEP(1); + if (cnt >= (bus->dhd->conf->txctl_tmo_fix)) + DHD_ERROR(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d, last retry cnt %d\n", + __FUNCTION__, bus->tx_max, bus->tx_seq, cnt)); + goto retry; + } BUS_WAKE(bus); @@ -3115,7 +2798,15 @@ dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN); htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader)); } - if (!TXCTLOK(bus)) { + +#ifdef DHD_ULP + dhd_ulp_set_path(bus->dhd, DHD_ULP_TX_CTRL); + + if (!TXCTLOK(bus) || !dhd_ulp_f2_ready(bus->dhd, bus->sdh)) +#else + if (!TXCTLOK(bus)) +#endif + { DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n", __FUNCTION__, bus->tx_max, bus->tx_seq)); bus->ctrl_frame_stat = TRUE; @@ -3148,6 +2839,16 @@ dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n", __FUNCTION__, bus->dhd->txcnt_timeout)); } +#ifdef DHD_FW_COREDUMP + /* Collect socram dump */ + if (bus->dhd->memdump_enabled) { + /* collect core dump */ + bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT_TX; + dhd_os_sdunlock(bus->dhd); + dhd_bus_mem_dump(bus->dhd); + dhd_os_sdlock(bus->dhd); + } +#endif /* DHD_FW_COREDUMP */ ret = -1; bus->ctrl_frame_stat = FALSE; goto done; @@ -3169,23 +2870,19 @@ dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) tx_statics.ctrl_count++; tx_statics.ctrl_size += len; #endif -#if defined(SWTXGLOM) - if (bus->dhd->conf->swtxglom) - ret = dhd_bcmsdh_send_swtxglom_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, - frame, len, NULL, NULL, NULL, TXRETRIES); - else -#endif - ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, - frame, len, NULL, NULL, NULL, TXRETRIES); - if (ret == BCME_OK) - bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + ret = dhd_bcmsdh_send_buffer(bus, frame, len); } bus->ctrl_frame_stat = FALSE; +#ifdef DHD_ULP + dhd_ulp_enable_cached_sbwad(bus->dhd, bus->sdh); +#endif /* DHD_ULP */ done: - if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { bus->activity = FALSE; - dhdsdio_clkctl(bus, CLK_NONE, TRUE); + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); } dhd_os_sdunlock(bus->dhd); @@ -3195,8 +2892,9 @@ done: else bus->dhd->tx_ctlpkts++; - if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT) + if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT) { return -ETIMEDOUT; + } if (ret == BCME_NODEVICE) err_nodevice++; @@ -3229,40 +2927,66 @@ dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) if (rxlen) { DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n", __FUNCTION__, rxlen, msglen)); - } else if (timeleft == 0) { -#ifdef DHD_DEBUG - uint32 status, retry = 0; - R_SDREG(status, &bus->regs->intstatus, retry); - DHD_ERROR(("%s: resumed on timeout, INT status=0x%08X\n", - __FUNCTION__, status)); -#else - DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__)); -#endif /* DHD_DEBUG */ - dhd_os_sdlock(bus->dhd); - dhdsdio_checkdied(bus, NULL, 0); - dhd_os_sdunlock(bus->dhd); } else { - DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__)); - dhd_os_sdlock(bus->dhd); - dhdsdio_checkdied(bus, NULL, 0); - dhd_os_sdunlock(bus->dhd); + if (timeleft == 0) { +#ifdef DHD_DEBUG + uint32 status, retry = 0; + R_SDREG(status, &bus->regs->intstatus, retry); + DHD_ERROR(("%s: resumed on timeout, INT status=0x%08X\n", + __FUNCTION__, status)); +#else + DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__)); +#endif /* DHD_DEBUG */ + if (!bus->dhd->dongle_trap_occured) { +#ifdef DHD_FW_COREDUMP + bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT; +#endif /* DHD_FW_COREDUMP */ + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); + } + } else { + DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__)); + if (!bus->dhd->dongle_trap_occured) { +#ifdef DHD_FW_COREDUMP + bus->dhd->memdump_type = DUMP_TYPE_RESUMED_UNKNOWN; +#endif /* DHD_FW_COREDUMP */ + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); + } + } +#ifdef DHD_FW_COREDUMP + /* Dump the ram image */ + if (bus->dhd->memdump_enabled && !bus->dhd->dongle_trap_occured) + dhdsdio_mem_dump(bus); +#endif /* DHD_FW_COREDUMP */ } if (timeleft == 0) { if (rxlen == 0) bus->dhd->rxcnt_timeout++; DHD_ERROR(("%s: rxcnt_timeout=%d, rxlen=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout, rxlen)); - } - else +#ifdef DHD_FW_COREDUMP + /* collect socram dump */ + if (bus->dhd->memdump_enabled) { + bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT_RX; + dhd_bus_mem_dump(bus->dhd); + } +#endif /* DHD_FW_COREDUMP */ + } else { bus->dhd->rxcnt_timeout = 0; + } if (rxlen) bus->dhd->rx_ctlpkts++; else bus->dhd->rx_ctlerrs++; - if (bus->dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) + if (bus->dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) { return -ETIMEDOUT; + } + if (bus->dhd->dongle_trap_occured) return -EREMOTEIO; @@ -3277,7 +3001,6 @@ enum { IOV_SDREG, IOV_SBREG, IOV_SDCIS, - IOV_MEMBYTES, IOV_RAMSIZE, IOV_RAMSTART, #ifdef DHD_DEBUG @@ -3321,61 +3044,62 @@ enum { IOV_TXGLOMSIZE, IOV_TXGLOMMODE, IOV_HANGREPORT, - IOV_TXINRX_THRES + IOV_TXINRX_THRES, + IOV_SDIO_SUSPEND }; const bcm_iovar_t dhdsdio_iovars[] = { - {"intr", IOV_INTR, 0, IOVT_BOOL, 0 }, - {"sleep", IOV_SLEEP, 0, IOVT_BOOL, 0 }, - {"pollrate", IOV_POLLRATE, 0, IOVT_UINT32, 0 }, - {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 }, - {"idleclock", IOV_IDLECLOCK, 0, IOVT_INT32, 0 }, - {"sd1idle", IOV_SD1IDLE, 0, IOVT_BOOL, 0 }, - {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) }, - {"ramsize", IOV_RAMSIZE, 0, IOVT_UINT32, 0 }, - {"ramstart", IOV_RAMSTART, 0, IOVT_UINT32, 0 }, - {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 }, - {"socram_state", IOV_SOCRAM_STATE, 0, IOVT_BOOL, 0 }, - {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 }, - {"sdiod_drive", IOV_SDIOD_DRIVE, 0, IOVT_UINT32, 0 }, - {"readahead", IOV_READAHEAD, 0, IOVT_BOOL, 0 }, - {"sdrxchain", IOV_SDRXCHAIN, 0, IOVT_BOOL, 0 }, - {"alignctl", IOV_ALIGNCTL, 0, IOVT_BOOL, 0 }, - {"sdalign", IOV_SDALIGN, 0, IOVT_BOOL, 0 }, - {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 }, + {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 }, + {"sleep", IOV_SLEEP, 0, 0, IOVT_BOOL, 0 }, + {"pollrate", IOV_POLLRATE, 0, 0, IOVT_UINT32, 0 }, + {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 }, + {"idleclock", IOV_IDLECLOCK, 0, 0, IOVT_INT32, 0 }, + {"sd1idle", IOV_SD1IDLE, 0, 0, IOVT_BOOL, 0 }, + {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 }, + {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 }, + {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 }, + {"socram_state", IOV_SOCRAM_STATE, 0, 0, IOVT_BOOL, 0 }, + {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 }, + {"sdiod_drive", IOV_SDIOD_DRIVE, 0, 0, IOVT_UINT32, 0 }, + {"readahead", IOV_READAHEAD, 0, 0, IOVT_BOOL, 0 }, + {"sdrxchain", IOV_SDRXCHAIN, 0, 0, IOVT_BOOL, 0 }, + {"alignctl", IOV_ALIGNCTL, 0, 0, IOVT_BOOL, 0 }, + {"sdalign", IOV_SDALIGN, 0, 0, IOVT_BOOL, 0 }, + {"devreset", IOV_DEVRESET, 0, 0, IOVT_BOOL, 0 }, #ifdef DHD_DEBUG - {"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, - {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, - {"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, - {"forcealign", IOV_FORCEEVEN, 0, IOVT_BOOL, 0 }, - {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 }, - {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 }, - {"txminmax", IOV_TXMINMAX, 0, IOVT_UINT32, 0 }, - {"cpu", IOV_CPU, 0, IOVT_BOOL, 0 }, + {"sdreg", IOV_SDREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sbreg", IOV_SBREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_cis", IOV_SDCIS, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, + {"forcealign", IOV_FORCEEVEN, 0, 0, IOVT_BOOL, 0 }, + {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 }, + {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 }, + {"txminmax", IOV_TXMINMAX, 0, 0, IOVT_UINT32, 0 }, + {"cpu", IOV_CPU, 0, 0, IOVT_BOOL, 0 }, #ifdef DHD_DEBUG - {"checkdied", IOV_CHECKDIED, 0, IOVT_BUFFER, 0 }, - {"serial", IOV_SERIALCONS, 0, IOVT_UINT32, 0 }, + {"checkdied", IOV_CHECKDIED, 0, 0, IOVT_BUFFER, 0 }, + {"serial", IOV_SERIALCONS, 0, 0, IOVT_UINT32, 0 }, #endif /* DHD_DEBUG */ #endif /* DHD_DEBUG */ #ifdef SDTEST - {"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0 }, - {"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) }, + {"extloop", IOV_EXTLOOP, 0, 0, IOVT_BOOL, 0 }, + {"pktgen", IOV_PKTGEN, 0, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) }, #endif /* SDTEST */ #if defined(USE_SDIOFIFO_IOVAR) - {"watermark", IOV_WATERMARK, 0, IOVT_UINT32, 0 }, - {"mesbusyctrl", IOV_MESBUSYCTRL, 0, IOVT_UINT32, 0 }, + {"watermark", IOV_WATERMARK, 0, 0, IOVT_UINT32, 0 }, + {"mesbusyctrl", IOV_MESBUSYCTRL, 0, 0, IOVT_UINT32, 0 }, #endif /* USE_SDIOFIFO_IOVAR */ - {"devcap", IOV_DEVCAP, 0, IOVT_UINT32, 0 }, - {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 }, - {"kso", IOV_KSO, 0, IOVT_UINT32, 0 }, - {"devsleep", IOV_DEVSLEEP, 0, IOVT_UINT32, 0 }, + {"devcap", IOV_DEVCAP, 0, 0, IOVT_UINT32, 0 }, + {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 }, + {"kso", IOV_KSO, 0, 0, IOVT_UINT32, 0 }, + {"devsleep", IOV_DEVSLEEP, 0, 0, IOVT_UINT32, 0 }, #ifdef SOFTAP - {"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 }, + {"fwpath", IOV_FWPATH, 0, 0, IOVT_BUFFER, 0 }, #endif - {"txglomsize", IOV_TXGLOMSIZE, 0, IOVT_UINT32, 0 }, - {"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 }, - {"txinrx_thres", IOV_TXINRX_THRES, 0, IOVT_INT32, 0 }, - {NULL, 0, 0, 0, 0 } + {"txglomsize", IOV_TXGLOMSIZE, 0, 0, IOVT_UINT32, 0 }, + {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 }, + {"txinrx_thres", IOV_TXINRX_THRES, 0, 0, IOVT_INT32, 0 }, + {"sdio_suspend", IOV_SDIO_SUSPEND, 0, 0, IOVT_UINT32, 0 }, + {NULL, 0, 0, 0, 0, 0 } }; static void @@ -3396,6 +3120,9 @@ void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) { dhd_bus_t *bus = dhdp->bus; +#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKE_EVENT_STATUS) + int i; +#endif bcm_bprintf(strbuf, "Bus SDIO structure:\n"); bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n", @@ -3405,6 +3132,31 @@ dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) bus->rxlen, bus->rx_seq); bcm_bprintf(strbuf, "intr %d intrcount %u lastintrs %u spurious %u\n", bus->intr, bus->intrcount, bus->lastintrs, bus->spurious); + +#ifdef DHD_WAKE_STATUS + bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n", + bcmsdh_get_total_wake(bus->sdh), bus->wake_counts.rxwake, + bus->wake_counts.rcwake); +#ifdef DHD_WAKE_RX_STATUS + bcm_bprintf(strbuf, " unicast %u multicast %u broadcast %u arp %u\n", + bus->wake_counts.rx_ucast, bus->wake_counts.rx_mcast, + bus->wake_counts.rx_bcast, bus->wake_counts.rx_arp); + bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n", + bus->wake_counts.rx_multi_ipv4, bus->wake_counts.rx_multi_ipv6, + bus->wake_counts.rx_icmpv6, bus->wake_counts.rx_multi_other); + bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n", + bus->wake_counts.rx_icmpv6_ra, bus->wake_counts.rx_icmpv6_na, + bus->wake_counts.rx_icmpv6_ns); +#endif /* DHD_WAKE_RX_STATUS */ +#ifdef DHD_WAKE_EVENT_STATUS + for (i = 0; i < WLC_E_LAST; i++) + if (bus->wake_counts.rc_event[i] != 0) + bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(i), + bus->wake_counts.rc_event[i]); + bcm_bprintf(strbuf, "\n"); +#endif /* DHD_WAKE_EVENT_STATUS */ +#endif /* DHD_WAKE_STATUS */ + bcm_bprintf(strbuf, "pollrate %u pollcnt %u regfails %u\n", bus->pollrate, bus->pollcnt, bus->regfails); @@ -3652,7 +3404,8 @@ dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) return BCME_ERROR; } } - if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID && !dhdsdio_sr_cap(bus)) + if ((CHIPID(bus->sih->chip) == BCM43430_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43018_CHIP_ID) && !dhdsdio_sr_cap(bus)) bus->srmemsize = 0; shaddr = bus->dongle_ram_base + bus->ramsize - 4; @@ -3798,8 +3551,7 @@ dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) char *console_buffer = NULL; uint maxstrlen = 256; char *str = NULL; - trap_t tr; - sdpcm_shared_t sdpcm_shared; + sdpcm_shared_t l_sdpcm_shared; struct bcmstrbuf strbuf; uint32 console_ptr, console_size, console_index; uint8 line[CONSOLE_LINE_MAX], ch; @@ -3831,36 +3583,36 @@ dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) goto done; } - if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0) + if ((bcmerror = dhdsdio_readshared(bus, &l_sdpcm_shared)) < 0) goto done; bcm_binit(&strbuf, data, size); bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", - sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr); + l_sdpcm_shared.msgtrace_addr, l_sdpcm_shared.console_addr); - if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) { + if ((l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) { /* NOTE: Misspelled assert is intentional - DO NOT FIX. * (Avoids conflict with real asserts for programmatic parsing of output.) */ bcm_bprintf(&strbuf, "Assrt not built in dongle\n"); } - if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) { + if ((l_sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) { /* NOTE: Misspelled assert is intentional - DO NOT FIX. * (Avoids conflict with real asserts for programmatic parsing of output.) */ bcm_bprintf(&strbuf, "No trap%s in dongle", - (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) + (l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) ?"/assrt" :""); } else { - if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) { + if (l_sdpcm_shared.flags & SDPCM_SHARED_ASSERT) { /* Download assert */ bcm_bprintf(&strbuf, "Dongle assert"); - if (sdpcm_shared.assert_exp_addr != 0) { + if (l_sdpcm_shared.assert_exp_addr != 0) { str[0] = '\0'; if ((bcmerror = dhdsdio_membytes(bus, FALSE, - sdpcm_shared.assert_exp_addr, + l_sdpcm_shared.assert_exp_addr, (uint8 *)str, maxstrlen)) < 0) goto done; @@ -3868,10 +3620,10 @@ dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) bcm_bprintf(&strbuf, " expr \"%s\"", str); } - if (sdpcm_shared.assert_file_addr != 0) { + if (l_sdpcm_shared.assert_file_addr != 0) { str[0] = '\0'; if ((bcmerror = dhdsdio_membytes(bus, FALSE, - sdpcm_shared.assert_file_addr, + l_sdpcm_shared.assert_file_addr, (uint8 *)str, maxstrlen)) < 0) goto done; @@ -3879,38 +3631,32 @@ dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) bcm_bprintf(&strbuf, " file \"%s\"", str); } - bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line); + bcm_bprintf(&strbuf, " line %d ", l_sdpcm_shared.assert_line); } - if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + if (l_sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + trap_t *tr = &bus->dhd->last_trap_info; bus->dhd->dongle_trap_occured = TRUE; if ((bcmerror = dhdsdio_membytes(bus, FALSE, - sdpcm_shared.trap_addr, - (uint8*)&tr, sizeof(trap_t))) < 0) + l_sdpcm_shared.trap_addr, + (uint8*)tr, sizeof(trap_t))) < 0) goto done; - bcm_bprintf(&strbuf, - "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," - "lp 0x%x, rpc 0x%x Trap offset 0x%x, " - "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " - "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", - ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr), - ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc), - ltoh32(sdpcm_shared.trap_addr), - ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3), - ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7)); + bus->dongle_trap_addr = ltoh32(l_sdpcm_shared.trap_addr); - addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log); + dhd_bus_dump_trap_info(bus, &strbuf); + + addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log); if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) goto printbuf; - addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.buf_size); + addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.buf_size); if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&console_size, sizeof(console_size))) < 0) goto printbuf; - addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.idx); + addr = l_sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.idx); if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&console_index, sizeof(console_index))) < 0) goto printbuf; @@ -3953,14 +3699,17 @@ dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) } printbuf: - if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) { + if (l_sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) { DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); } #if defined(DHD_FW_COREDUMP) - if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + if (bus->dhd->memdump_enabled && (l_sdpcm_shared.flags & SDPCM_SHARED_TRAP)) { /* Mem dump to a file on device */ + bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP; + dhd_os_sdunlock(bus->dhd); dhdsdio_mem_dump(bus); + dhd_os_sdlock(bus->dhd); } #endif /* #if defined(DHD_FW_COREDUMP) */ @@ -3976,54 +3725,71 @@ done: } #if defined(DHD_FW_COREDUMP) +int +dhd_bus_mem_dump(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + if (dhdp->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s: Bus is suspend so skip\n", __FUNCTION__)); + return 0; + } + return dhdsdio_mem_dump(bus); +} + static int dhdsdio_mem_dump(dhd_bus_t *bus) { int ret = 0; - int size; /* Full mem size */ - int start = bus->dongle_ram_base; /* Start address */ - int read_size = 0; /* Read size of each iteration */ + int size; /* Full mem size */ + uint32 start = bus->dongle_ram_base; /* Start address */ + uint read_size = 0; /* Read size of each iteration */ uint8 *buf = NULL, *databuf = NULL; /* Get full mem size */ size = bus->ramsize; - buf = MALLOC(bus->dhd->osh, size); + buf = dhd_get_fwdump_buf(bus->dhd, size); if (!buf) { - printf("%s: Out of memory (%d bytes)\n", __FUNCTION__, size); + DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size)); return -1; } + dhd_os_sdlock(bus->dhd); + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + /* Read mem content */ - printf("Dump dongle memory"); + DHD_ERROR(("Dump dongle memory\n")); databuf = buf; while (size) { read_size = MIN(MEMBLOCK, size); if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size))) { - printf("%s: Error membytes %d\n", __FUNCTION__, ret); - if (buf) { - MFREE(bus->dhd->osh, buf, size); - } - return -1; + DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret)); + ret = BCME_ERROR; + break; } /* Decrement size and increment start address */ size -= read_size; start += read_size; databuf += read_size; } - printf("Done\n"); - dhd_save_fwdump(bus->dhd, buf, bus->ramsize); - /* free buf before return !!! */ - if (write_to_file(bus->dhd, buf, bus->ramsize)) - { - printf("%s: Error writing to files\n", __FUNCTION__); - return -1; + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); } - /* buf free handled in write_to_file, not here */ - return 0; + dhd_os_sdunlock(bus->dhd); + + /* schedule a work queue to perform actual memdump. dhd_mem_dump() performs the job */ + if (!ret) { + /* buf, actually soc_ram free handled in dhd_{free,clear} */ + dhd_schedule_memdump(bus->dhd, buf, bus->ramsize); + } + + return ret; } #endif /* DHD_FW_COREDUMP */ @@ -4044,8 +3810,11 @@ dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len) DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - /* Basic sanity checks */ - if (bus->dhd->up) { + if (bus->dhd->up && +#ifdef DHD_ULP + (DHD_ULP_DISABLED == dhd_ulp_get_ulp_state(bus->dhd)) && +#endif /* DHD_ULP */ + 1) { bcmerror = BCME_NOTDOWN; goto err; } @@ -4100,10 +3869,10 @@ dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror) *bcmerror = BCME_SDIO_ERROR; return -1; } + if (bus->sih->chip == BCM4330_CHIP_ID) { uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB; - } - else if (bus->sih->chip == BCM4334_CHIP_ID || + } else if (bus->sih->chip == BCM4334_CHIP_ID || bus->sih->chip == BCM43340_CHIP_ID || bus->sih->chip == BCM43341_CHIP_ID || bus->sih->chip == BCM43342_CHIP_ID || @@ -4278,94 +4047,6 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const ch break; - case IOV_SVAL(IOV_MEMBYTES): - case IOV_GVAL(IOV_MEMBYTES): - { - uint32 address; - uint size, dsize; - uint8 *data; - - bool set = (actionid == IOV_SVAL(IOV_MEMBYTES)); - - ASSERT(plen >= 2*sizeof(int)); - - address = (uint32)int_val; - bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); - size = (uint)int_val; - - /* Do some validation */ - dsize = set ? plen - (2 * sizeof(int)) : len; - if (dsize < size) { - DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n", - __FUNCTION__, (set ? "set" : "get"), address, size, dsize)); - bcmerror = BCME_BADARG; - break; - } - - DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__, - (set ? "write" : "read"), size, address)); - - /* check if CR4 */ - if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { - /* - * If address is start of RAM (i.e. a downloaded image), - * store the reset instruction to be written in 0 - */ - if (set && address == bus->dongle_ram_base) { - bus->resetinstr = *(((uint32*)params) + 2); - } - } else { - /* If we know about SOCRAM, check for a fit */ - if ((bus->orig_ramsize) && - ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize))) - { - uint8 enable, protect, remap; - si_socdevram(bus->sih, FALSE, &enable, &protect, &remap); - if (!enable || protect) { - DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n", - __FUNCTION__, bus->orig_ramsize, size, address)); - DHD_ERROR(("%s: socram enable %d, protect %d\n", - __FUNCTION__, enable, protect)); - bcmerror = BCME_BADARG; - break; - } - - if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) { - uint32 devramsize = si_socdevram_size(bus->sih); - if ((address < SOCDEVRAM_ARM_ADDR) || - (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) { - DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n", - __FUNCTION__, address, size)); - DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n", - __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize)); - bcmerror = BCME_BADARG; - break; - } - /* move it such that address is real now */ - address -= SOCDEVRAM_ARM_ADDR; - address += SOCDEVRAM_BP_ADDR; - DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n", - __FUNCTION__, (set ? "write" : "read"), size, address)); - } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) { - /* Can not access remap region while devram remap bit is set - * ROM content would be returned in this case - */ - DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n", - __FUNCTION__, address)); - bcmerror = BCME_ERROR; - break; - } - } - } - - /* Generate the actual data pointer */ - data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg; - - /* Call to do the transfer */ - bcmerror = dhdsdio_membytes(bus, set, address, data, size); - - break; - } case IOV_GVAL(IOV_RAMSIZE): int_val = (int32)bus->ramsize; @@ -4448,11 +4129,12 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const ch case IOV_GVAL(IOV_SDREG): { sdreg_t *sd_ptr; - uint32 addr, size; + uintptr addr; + uint size; sd_ptr = (sdreg_t *)params; - addr = (uint32)((ulong)bus->regs + sd_ptr->offset); + addr = ((uintptr)bus->regs + sd_ptr->offset); size = sd_ptr->func; int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); if (bcmsdh_regfail(bus->sdh)) @@ -4464,11 +4146,12 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const ch case IOV_SVAL(IOV_SDREG): { sdreg_t *sd_ptr; - uint32 addr, size; + uintptr addr; + uint size; sd_ptr = (sdreg_t *)params; - addr = (uint32)((ulong)bus->regs + sd_ptr->offset); + addr = ((uintptr)bus->regs + sd_ptr->offset); size = sd_ptr->func; bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value); if (bcmsdh_regfail(bus->sdh)) @@ -4701,15 +4384,31 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const ch } break; + case IOV_GVAL(IOV_SDIO_SUSPEND): + int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDIO_SUSPEND): + if (bool_val) { /* Suspend */ + dhdsdio_suspend(bus); + } + else { /* Resume */ + dhdsdio_resume(bus); + } + break; + default: bcmerror = BCME_UNSUPPORTED; break; } exit: - if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { bus->activity = FALSE; - dhdsdio_clkctl(bus, CLK_NONE, TRUE); + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); } dhd_os_sdunlock(bus->dhd); @@ -4760,12 +4459,20 @@ dhdsdio_write_vars(dhd_bus_t *bus) /* Write the vars list */ bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + return bcmerror; + } + #ifdef DHD_DEBUG /* Verify NVRAM bytes */ DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize)); nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize); - if (!nvram_ularray) + if (!nvram_ularray) { + MFREE(bus->dhd->osh, vbuffer, varsize); return BCME_NOMEM; + } /* Upload image to verify downloaded contents. */ memset(nvram_ularray, 0xaa, varsize); @@ -4872,7 +4579,8 @@ dhdsdio_download_state(dhd_bus_t *bus, bool enter) if (REMAP_ENAB(bus) && si_socdevram_remap_isenb(bus->sih)) dhdsdio_devram_remap(bus, FALSE); - if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID) { + if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID || + CHIPID(bus->sih->chip) == BCM43018_CHIP_ID) { /* Disabling Remap for SRAM_3 */ si_socram_set_bankpda(bus->sih, 0x3, 0x0); } @@ -5077,9 +4785,11 @@ dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, } bus->roundup = MIN(max_roundup, bus->blocksize); - if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { bus->activity = FALSE; - dhdsdio_clkctl(bus, CLK_NONE, TRUE); + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); } dhd_os_sdunlock(bus->dhd); @@ -5121,6 +4831,7 @@ dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) uint retries; int err; bool wlfc_enabled = FALSE; + unsigned long flags; if (!bus->dhd) return; @@ -5176,7 +4887,9 @@ dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); /* Change our idea of bus state */ + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); bus->dhd->busstate = DHD_BUS_DOWN; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); } #ifdef PROP_TXSTATUS @@ -5189,8 +4902,10 @@ dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) */ dhd_tcpack_info_tbl_clean(bus->dhd); #endif /* DHDTCPACK_SUPPRESS */ + dhd_os_sdlock_txq(bus->dhd); /* Clear the data packet queues */ - pktq_flush(osh, &bus->txq, TRUE, NULL, 0); + pktq_flush(osh, &bus->txq, TRUE); + dhd_os_sdunlock_txq(bus->dhd); } /* Clear any held glomming stuff */ @@ -5228,7 +4943,6 @@ dhd_txglom_enable(dhd_pub_t *dhdp, bool enable) */ dhd_bus_t *bus = dhdp->bus; #ifdef BCMSDIOH_TXGLOM - char buf[256]; uint32 rxglom; int32 ret; @@ -5241,9 +4955,8 @@ dhd_txglom_enable(dhd_pub_t *dhdp, bool enable) if (enable) { rxglom = 1; - memset(buf, 0, sizeof(buf)); - bcm_mkiovar("bus:rxglom", (void *)&rxglom, 4, buf, sizeof(buf)); - ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + ret = dhd_iovar(dhdp, 0, "bus:rxglom", (char *)&rxglom, sizeof(rxglom), NULL, 0, + TRUE); if (ret >= 0) bus->txglom_enable = TRUE; else { @@ -5292,9 +5005,9 @@ dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) goto exit; } - /* Force clocks on backplane to be sure F2 interrupt propagates */ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { if (bus->sih->chip == BCM43012_CHIP_ID) { bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, @@ -5304,6 +5017,7 @@ dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) (saveclk | SBSDIO_FORCE_HT), &err); } } + if (err) { DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err)); ret = -1; @@ -5391,10 +5105,10 @@ dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries); DHD_INFO(("%s: disable I_CHIPACTIVE in hostintmask[0x%08x]\n", __FUNCTION__, bus->hostintmask)); - } - else + } else { bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); + } /* If we didn't come up, turn off backplane clock */ if (dhdp->busstate != DHD_BUS_DATA) @@ -5732,6 +5446,7 @@ dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq) errcode = -1; } pnext = NULL; + BCM_REFERENCE(pnext); } else { DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen)); errcode = -1; @@ -5943,8 +5658,7 @@ dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq) if (free_buf_count == 0) { continue; - } - else { + } else { void *temp; /* go to the end of the chain and attach the pnext there */ @@ -5961,14 +5675,12 @@ dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq) } num += (uint8)free_buf_count; - } - else { + } else { /* this packet will go up, link back into chain and count it */ if (list_tail[ifidx] == NULL) { list_head[ifidx] = list_tail[ifidx] = pfirst; - } - else { + } else { PKTSETNEXT(osh, list_tail[ifidx], pfirst); list_tail[ifidx] = pfirst; } @@ -6051,7 +5763,6 @@ dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished) #endif DHD_TRACE(("%s: Enter\n", __FUNCTION__)); - bus->readframes = TRUE; if (!KSO_ENAB(bus)) { @@ -6096,11 +5807,6 @@ dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished) !bus->fcstate && DATAOK(bus) && (pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres) && bus->dhd->conf->tx_in_rx) { -#if defined(SWTXGLOM) - if (bus->dhd->conf->swtxglom) - dhdsdio_sendfromq_swtxglom(bus, dhd_txbound); - else -#endif dhdsdio_sendfromq(bus, dhd_txbound); #ifdef DHDTCPACK_SUPPRESS /* In TCPACK_SUP_DELAYTX mode, do txinrx only if @@ -6132,8 +5838,7 @@ dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished) if (bus->bus == SPI_BUS) { rdlen = len = nextlen; - } - else { + } else { rdlen = len = nextlen << 4; /* Pad read to blocksize for efficiency */ @@ -6623,15 +6328,16 @@ deliver: bus->dhd->rx_errors++; continue; } + if (reorder_info_len) { /* Reordering info from the firmware */ dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len, &pkt, &pkt_count); if (pkt_count == 0) continue; - } - else + } else { pkt_count = 1; + } /* Unlock during rx call */ dhd_os_sdunlock(bus->dhd); @@ -6671,7 +6377,7 @@ deliver: } static uint32 -dhdsdio_hostmail(dhd_bus_t *bus) +dhdsdio_hostmail(dhd_bus_t *bus, uint32 *hmbd) { sdpcmd_regs_t *regs = bus->regs; uint32 intstatus = 0; @@ -6765,6 +6471,10 @@ dhdsdio_hostmail(dhd_bus_t *bus) DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data)); } + if (hmbd) { + *hmbd = hmb_data; + } + return intstatus; } @@ -6780,20 +6490,25 @@ dhdsdio_dpc(dhd_bus_t *bus) uint framecnt = 0; /* Temporary counter of tx/rx frames */ bool rxdone = TRUE; /* Flag for no more read data */ bool resched = FALSE; /* Flag indicating resched wanted */ + unsigned long flags; #ifdef DEBUG_DPC_THREAD_WATCHDOG bool is_resched_by_readframe = FALSE; #endif /* DEBUG_DPC_THREAD_WATCHDOG */ DHD_TRACE(("%s: Enter\n", __FUNCTION__)); dhd_os_sdlock(bus->dhd); - + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); if (bus->dhd->busstate == DHD_BUS_DOWN) { DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); bus->intstatus = 0; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); dhd_os_sdunlock(bus->dhd); return 0; } + DHD_BUS_BUSY_SET_IN_DPC(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + /* Start with leftover status bits */ intstatus = bus->intstatus; @@ -6857,6 +6572,10 @@ dhdsdio_dpc(dhd_bus_t *bus) /* Pending interrupt indicates new device status */ if (bus->ipend) { bus->ipend = FALSE; +#if defined(BT_OVER_SDIO) + bcmsdh_btsdio_process_f3_intr(); +#endif /* defined (BT_OVER_SDIO) */ + R_SDREG(newstatus, ®s->intstatus, retries); bus->f1regdata++; if (bcmsdh_regfail(bus->sdh)) @@ -6867,8 +6586,7 @@ dhdsdio_dpc(dhd_bus_t *bus) bus->f1regdata++; if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) && (newstatus == I_XMTDATA_AVAIL)) { - } - else + } else W_SDREG(newstatus, ®s->intstatus, retries); } } @@ -6890,6 +6608,30 @@ dhdsdio_dpc(dhd_bus_t *bus) intstatus |= (newstatus & bus->hostintmask); } + /* Handle host mailbox indication */ + if (intstatus & I_HMB_HOST_INT) { + uint32 hmbdata = 0; + + intstatus &= ~I_HMB_HOST_INT; + intstatus |= dhdsdio_hostmail(bus, &hmbdata); + +#ifdef DHD_ULP + /* ULP prototyping. Redowload fw on oob interupt */ + + /* all the writes after this point CAN use cached sbwad value */ + bcmsdh_force_sbwad_calc(bus->sdh, FALSE); + + if (dhd_ulp_pre_redownload_check(bus->dhd, bus->sdh, hmbdata)) { + if (dhd_bus_ulp_reinit_fw(bus) < 0) { + DHD_ERROR(("%s:%d FW redownload failed\n", + __FUNCTION__, __LINE__)); + goto exit; + } + } +#endif + + } + /* Just being here means nothing more to do for chipactive */ if (intstatus & I_CHIPACTIVE) { /* ASSERT(bus->clkstate == CLK_AVAIL); */ @@ -6899,7 +6641,7 @@ dhdsdio_dpc(dhd_bus_t *bus) /* Handle host mailbox indication */ if (intstatus & I_HMB_HOST_INT) { intstatus &= ~I_HMB_HOST_INT; - intstatus |= dhdsdio_hostmail(bus); + intstatus |= dhdsdio_hostmail(bus, NULL); } /* Generally don't ask for these, can get CRC errors... */ @@ -6936,6 +6678,7 @@ dhdsdio_dpc(dhd_bus_t *bus) /* On frame indication, read available frames */ if (PKT_AVAILABLE(bus, intstatus)) { + framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone); if (rxdone || bus->rxskip) intstatus &= ~FRAME_AVAIL_MASK(bus); @@ -6986,21 +6729,29 @@ clkwait: /* Send queued frames (limit 1 if rx may still be pending) */ else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) { - if (bus->dhd->conf->dhd_txminmax < 0) - framecnt = rxdone ? txlimit : MIN(txlimit, DATABUFCNT(bus)); - else - framecnt = rxdone ? txlimit : MIN(txlimit, bus->dhd->conf->dhd_txminmax); -#if defined(SWTXGLOM) - if (bus->dhd->conf->swtxglom) - framecnt = dhdsdio_sendfromq_swtxglom(bus, framecnt); - else -#endif - framecnt = dhdsdio_sendfromq(bus, framecnt); - txlimit -= framecnt; + +#ifdef DHD_ULP + if (dhd_ulp_f2_ready(bus->dhd, bus->sdh)) { +#endif /* DHD_ULP */ + if (bus->dhd->conf->dhd_txminmax < 0) + framecnt = rxdone ? txlimit : MIN(txlimit, DATABUFCNT(bus)); + else + framecnt = rxdone ? txlimit : MIN(txlimit, bus->dhd->conf->dhd_txminmax); + framecnt = dhdsdio_sendfromq(bus, framecnt); + txlimit -= framecnt; +#ifdef DHD_ULP + } else { + /* In other transient states like DHD_ULP_, after the states are + * DHD_ULP_F2ENAB_CLEARING and DHD_ULP_F2ENAB_SETTING, + * dpc is scheduled after steady-state and dhdsdio_sendfromq() will + * execute again + */ + } +#endif /* DHD_ULP */ } /* Resched the DPC if ctrl cmd is pending on bus credit */ if (bus->ctrl_frame_stat) { - if (bus->dhd->conf->txctl_tmo_fix) { + if (bus->dhd->conf->txctl_tmo_fix > 0) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) schedule_timeout(1); @@ -7034,8 +6785,10 @@ clkwait: bus->dpc_sched = resched; /* If we're done for now, turn off clock request. */ - if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING)) { + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING) && + NO_OTHER_ACTIVE_BUS_USER(bus)) { bus->activity = FALSE; + dhdsdio_bussleep(bus, TRUE); dhdsdio_clkctl(bus, CLK_NONE, FALSE); } @@ -7077,6 +6830,12 @@ exit: bus->dhd->dhd_bug_on = FALSE; } #endif /* DEBUG_DPC_THREAD_WATCHDOG */ + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + return resched; } @@ -7151,7 +6910,6 @@ dhdsdio_isr(void *arg) #else bus->dpc_sched = TRUE; dhd_sched_dpc(bus->dhd); - #endif /* defined(SDIO_ISR_THREAD) */ } @@ -7545,6 +7303,7 @@ extern bool dhd_bus_watchdog(dhd_pub_t *dhdp) { dhd_bus_t *bus; + unsigned long flags; DHD_TIMER(("%s: Enter\n", __FUNCTION__)); @@ -7562,8 +7321,14 @@ dhd_bus_watchdog(dhd_pub_t *dhdp) if (!SLPAUTO_ENAB(bus) && bus->sleeping) return FALSE; - if (dhdp->busstate == DHD_BUS_DOWN) + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) || + DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) { + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); return FALSE; + } + DHD_BUS_BUSY_SET_IN_WD(dhdp); + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); dhd_os_sdlock(bus->dhd); @@ -7643,7 +7408,23 @@ dhd_bus_watchdog(dhd_pub_t *dhdp) else { bus->idlecount++; - if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) { + /* + * If the condition to switch off the clock is reached And if + * BT is inactive (in case of BT_OVER_SDIO build) turn off clk. + * + * Consider the following case, DHD is configured with + * 1) idletime == DHD_IDLE_IMMEDIATE + * 2) BT is the last user of the clock + * We cannot disable the clock from __dhdsdio_clk_disable + * since WLAN might be using it. If WLAN is active then + * from the respective function/context after doing the job + * the clk is turned off. + * But if WLAN is actually inactive then the watchdog should + * disable the clock. So the condition check below should be + * bus->idletime != 0 instead of idletime == 0 + */ + if ((bus->idletime != 0) && (bus->idlecount >= bus->idletime) && + NO_OTHER_ACTIVE_BUS_USER(bus)) { DHD_TIMER(("%s: DHD Idle state!!\n", __FUNCTION__)); if (!bus->poll && SLPAUTO_ENAB(bus)) { if (dhdsdio_bussleep(bus, TRUE) != BCME_BUSY) @@ -7655,7 +7436,8 @@ dhd_bus_watchdog(dhd_pub_t *dhdp) } } #else - if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) { + if ((bus->idletime != 0) && (bus->clkstate == CLK_AVAIL) && + NO_OTHER_ACTIVE_BUS_USER(bus)) { if (++bus->idlecount >= bus->idletime) { bus->idlecount = 0; if (bus->activity) { @@ -7665,9 +7447,9 @@ dhd_bus_watchdog(dhd_pub_t *dhdp) dhdsdio_bussleep(bus, TRUE); else bus->reqbussleep = TRUE; - } - else + } else { dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } } } } @@ -7675,10 +7457,14 @@ dhd_bus_watchdog(dhd_pub_t *dhdp) dhd_os_sdunlock(bus->dhd); + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + DHD_BUS_BUSY_CLEAR_IN_WD(dhdp); + dhd_os_busbusy_wake(dhdp); + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + return bus->ipend; } -#ifdef DHD_DEBUG extern int dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen) { @@ -7729,16 +7515,17 @@ dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen) rv = dhdsdio_txpkt(bus, SDPCM_EVENT_CHANNEL, &pkt, 1, TRUE); done: - if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched && + NO_OTHER_ACTIVE_BUS_USER(bus)) { bus->activity = FALSE; - dhdsdio_clkctl(bus, CLK_NONE, TRUE); + dhdsdio_bussleep(bus, TRUE); + dhdsdio_clkctl(bus, CLK_NONE, FALSE); } dhd_os_sdunlock(bus->dhd); return rv; } -#endif /* DHD_DEBUG */ #ifdef DHD_DEBUG static void @@ -7773,14 +7560,6 @@ dhd_dump_cis(uint fn, uint8 *cis) static bool dhdsdio_chipmatch(uint16 chipid) { - if (chipid == BCM4325_CHIP_ID) - return TRUE; - if (chipid == BCM4329_CHIP_ID) - return TRUE; - if (chipid == BCM4315_CHIP_ID) - return TRUE; - if (chipid == BCM4319_CHIP_ID) - return TRUE; if (chipid == BCM4336_CHIP_ID) return TRUE; if (chipid == BCM4330_CHIP_ID) @@ -7813,14 +7592,12 @@ dhdsdio_chipmatch(uint16 chipid) return TRUE; if (chipid == BCM43349_CHIP_ID) return TRUE; - if (chipid == BCM4345_CHIP_ID || chipid == BCM43454_CHIP_ID) + if (BCM4345_CHIP(chipid)) return TRUE; if (chipid == BCM4350_CHIP_ID) return TRUE; if (chipid == BCM4354_CHIP_ID) return TRUE; - if (chipid == BCM4356_CHIP_ID) - return TRUE; if (chipid == BCM4358_CHIP_ID) return TRUE; if (chipid == BCM43569_CHIP_ID) @@ -7829,10 +7606,18 @@ dhdsdio_chipmatch(uint16 chipid) return TRUE; if (chipid == BCM43430_CHIP_ID) return TRUE; + if (chipid == BCM43018_CHIP_ID) + return TRUE; if (BCM4349_CHIP(chipid)) return TRUE; + if (BCM4347_CHIP(chipid)) + return TRUE; + if (chipid == BCM4364_CHIP_ID) + return TRUE; + if (chipid == BCM43012_CHIP_ID) return TRUE; + return FALSE; } @@ -7906,27 +7691,6 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, /* Check the Device ID and make sure it's one that we support */ switch (devid) { - case BCM4325_D11DUAL_ID: /* 4325 802.11a/g id */ - case BCM4325_D11G_ID: /* 4325 802.11g 2.4Ghz band id */ - case BCM4325_D11A_ID: /* 4325 802.11a 5Ghz band id */ - DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__)); - break; - case BCM4329_D11N_ID: /* 4329 802.11n dualband device */ - case BCM4329_D11N2G_ID: /* 4329 802.11n 2.4G device */ - case BCM4329_D11N5G_ID: /* 4329 802.11n 5G device */ - case 0x4329: - DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__)); - break; - case BCM4315_D11DUAL_ID: /* 4315 802.11a/g id */ - case BCM4315_D11G_ID: /* 4315 802.11g id */ - case BCM4315_D11A_ID: /* 4315 802.11a id */ - DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__)); - break; - case BCM4319_D11N_ID: /* 4319 802.11n id */ - case BCM4319_D11N2G_ID: /* 4319 802.11n2g id */ - case BCM4319_D11N5G_ID: /* 4319 802.11n5g id */ - DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__)); - break; case 0: DHD_INFO(("%s: allow device id 0, will check chip internals\n", __FUNCTION__)); @@ -7956,6 +7720,9 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, bus->slot_num = slot; bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */ +#ifdef BT_OVER_SDIO + bus->bt_use_count = 0; +#endif #if defined(SUPPORT_P2P_GO_PS) init_waitqueue_head(&bus->bus_sleep); @@ -7972,6 +7739,10 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__)); goto fail; } +#if defined(BT_OVER_SDIO) + g_dhd_pub = bus->dhd; + DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub)); +#endif /* defined (BT_OVER_SDIO) */ /* Allocate buffers */ if (!(dhdsdio_probe_malloc(bus, osh, sdh))) { @@ -8019,6 +7790,16 @@ dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, bus->dhd->mac.octet[2] = 0x4C; } #endif +#if defined(BT_OVER_SDIO) + /* At this point Regulators are turned on and iconditionaly sdio bus is started + * based upon dhd_download_fw_on_driverload check, so + * increase the bus user count, this count will only be disabled inside + * dhd_register_if() function if flag dhd_download_fw_on_driverload is set to false, + * i.e FW download during insmod is not needed, otherwise it will not be decremented + * so that WALN will always hold the bus untill rmmod is done. + */ + dhdsdio_bus_usr_cnt_inc(bus->dhd); +#endif /* BT_OVER_SDIO */ #ifdef GET_OTP_MAC_ENABLE if (dhd_conf_get_mac(bus->dhd, sdh, ea_addr.octet)) { @@ -8070,8 +7851,11 @@ static bool dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, uint16 devid) { - int err = 0; uint8 clkctl = 0; + uint fn, numfn; + uint8 *cis[SDIOD_MAX_IOFUNCS]; + int err = 0; + bus->alp_only = TRUE; bus->sih = NULL; @@ -8100,26 +7884,20 @@ dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, err, DHD_INIT_CLKCTL1, clkctl)); goto fail; } + numfn = bcmsdh_query_iofnum(sdh); + ASSERT(numfn <= SDIOD_MAX_IOFUNCS); -#ifdef DHD_DEBUG + /* Make sure ALP is available before trying to read CIS */ + SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), + !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY); + + /* Now request ALP be put on the bus */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + DHD_INIT_CLKCTL2, &err); + OSL_DELAY(200); + if (DHD_INFO_ON()) { - uint fn, numfn; - uint8 *cis[SDIOD_MAX_IOFUNCS]; - int err = 0; - - numfn = bcmsdh_query_iofnum(sdh); - ASSERT(numfn <= SDIOD_MAX_IOFUNCS); - - /* Make sure ALP is available before trying to read CIS */ - SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, - SBSDIO_FUNC1_CHIPCLKCSR, NULL)), - !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY); - - /* Now request ALP be put on the bus */ - bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, - DHD_INIT_CLKCTL2, &err); - OSL_DELAY(65); - for (fn = 0; fn <= numfn; fn++) { if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) { DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn)); @@ -8127,26 +7905,52 @@ dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, } bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT); - if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) { + if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], + SBSDIO_CIS_SIZE_LIMIT))) { DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err)); MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); break; } - dhd_dump_cis(fn, cis[fn]); +#if 0 + /* Reading the F1, F2 and F3 max blocksize values from CIS + * and writing into the F1, F2 and F3 block size registers. + * There is no max block size register value available for F0 in CIS register. + * So, setting default value for F0 block size as 32 (which was set earlier + * in iovar). IOVAR takes only one arguement. + * So, we are passing the function number alongwith the value (fn<<16) + */ + if (!fn) + value = F0_BLOCK_SIZE; + else + value = (cis[fn][25]<<8) | cis[fn][24] | (fn<<16); + printf("%s: fn=%d, value=%d\n", __FUNCTION__, fn, value); + if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &value, + sizeof(value), TRUE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, + "sd_blocksize")); + } +#endif +#ifdef DHD_DEBUG + if (DHD_INFO_ON()) { + dhd_dump_cis(fn, cis[fn]); + } +#endif /* DHD_DEBUG */ } - while (fn-- > 0) { ASSERT(cis[fn]); MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); } - - if (err) { - DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n")); - goto fail; - } } -#endif /* DHD_DEBUG */ - +#if 0 + if (dhd_conf_set_blksize(sdh)) { + bus->blocksize = 0; + } +#endif + if (err) { + DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n")); + goto fail; + } /* si_attach() will provide an SI handle and scan the backplane */ if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh, &bus->vars, &bus->varsz))) { @@ -8210,7 +8014,6 @@ dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, break; case BCM4350_CHIP_ID: case BCM4354_CHIP_ID: - case BCM4356_CHIP_ID: case BCM4358_CHIP_ID: case BCM43569_CHIP_ID: case BCM4371_CHIP_ID: @@ -8219,15 +8022,20 @@ dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, case BCM4360_CHIP_ID: bus->dongle_ram_base = CR4_4360_RAM_BASE; break; - case BCM4345_CHIP_ID: - case BCM43454_CHIP_ID: + CASE_BCM4345_CHIP: bus->dongle_ram_base = (bus->sih->chiprev < 6) /* from 4345C0 */ ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; break; case BCM4349_CHIP_GRPID: - /* RAM base changed from 4349c0(revid=9) onwards */ + /* RAM based changed from 4349c0(revid=9) onwards */ bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? - CR4_4349_RAM_BASE: CR4_4349_RAM_BASE_FROM_REV_9); + CR4_4349_RAM_BASE: CR4_4349_RAM_BASE_FROM_REV_9); + break; + case BCM4364_CHIP_ID: + bus->dongle_ram_base = CR4_4364_RAM_BASE; + break; + case BCM4347_CHIP_GRPID: + bus->dongle_ram_base = CR4_4347_RAM_BASE; break; default: bus->dongle_ram_base = 0; @@ -8340,6 +8148,8 @@ dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh) dhdsdio_pktgen_init(bus); #endif /* SDTEST */ + /* set PMU minimum resource mask to default */ + dhd_bus_set_default_min_res_mask(bus); /* Disable F2 to clear any intermediate frame state on the dongle */ bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); @@ -8439,7 +8249,6 @@ dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, ret = dhdsdio_download_firmware(bus, osh, bus->sdh); - return ret; } @@ -8504,6 +8313,10 @@ dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh) int ret; +#if defined(DHD_BLOB_EXISTENCE_CHECK) + dhd_set_blob_support(bus->dhd, bus->fw_path); +#endif /* DHD_BLOB_EXISTENCE_CHECK */ + DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n", __FUNCTION__, bus->fw_path, bus->nv_path)); DHD_OS_WAKE_LOCK(bus->dhd); @@ -8606,6 +8419,8 @@ dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool r return; if (bus->sih) { + /* In Win10, system will be BSOD if using "sysprep" to do OS image */ + /* Skip this will not cause the BSOD. */ #if !defined(BCMLXSDMMC) if (bus->dhd) { dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); @@ -8648,6 +8463,8 @@ dhdsdio_disconnect(void *ptr) if (bus) { ASSERT(bus->dhd); + /* Advertise bus cleanup during rmmod */ + dhdsdio_advertise_bus_cleanup(bus->dhd); dhdsdio_release(bus, bus->dhd->osh); } @@ -8666,11 +8483,51 @@ static int dhdsdio_suspend(void *context) { int ret = 0; - - dhd_bus_t *bus = (dhd_bus_t*)context; #ifdef SUPPORT_P2P_GO_PS int wait_time = 0; +#endif /* SUPPORT_P2P_GO_PS */ + dhd_bus_t *bus = (dhd_bus_t*)context; + unsigned long flags; + + DHD_ERROR(("%s Enter\n", __FUNCTION__)); + if (bus->dhd == NULL) { + DHD_ERROR(("bus not inited\n")); + return BCME_ERROR; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + + if (bus->dhd->up == FALSE) { + return BCME_OK; + } + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) { + DHD_ERROR(("not in a readystate to LPBK is not inited\n")); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + return BCME_ERROR; + } + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + if (bus->dhd->dongle_reset) { + DHD_ERROR(("Dongle is in reset state.\n")); + return -EIO; + } + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_SUSPEND; + if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) { + DHD_ERROR(("Tx Request is not ended\n")); + bus->dhd->busstate = DHD_BUS_DATA; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + return -EBUSY; + } + DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + +#ifdef SUPPORT_P2P_GO_PS if (bus->idletime > 0) { wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms); } @@ -8681,23 +8538,49 @@ dhdsdio_suspend(void *context) if (SLPAUTO_ENAB(bus) && (!ret) && (bus->dhd->up) && (bus->dhd->op_mode != DHD_FLAG_HOSTAP_MODE)) { if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) { if (!bus->sleeping) { - return 1; + ret = 1; } } } #endif /* SUPPORT_P2P_GO_PS */ + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + if (ret) { + bus->dhd->busstate = DHD_BUS_DATA; + } + DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd); + dhd_os_busbusy_wake(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); return ret; } static int dhdsdio_resume(void *context) { -#if defined(OOB_INTR_ONLY) dhd_bus_t *bus = (dhd_bus_t*)context; + ulong flags; + DHD_ERROR(("%s Enter\n", __FUNCTION__)); + + if (bus->dhd->up == FALSE) { + return BCME_OK; + } + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + +#if defined(OOB_INTR_ONLY) if (dhd_os_check_if_up(bus->dhd)) bcmsdh_oob_intr_set(bus->sdh, TRUE); #endif + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd); + bus->dhd->busstate = DHD_BUS_DATA; + dhd_os_busbusy_wake(bus->dhd); + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + return 0; } @@ -8842,6 +8725,11 @@ dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path) void *image = NULL; uint8 *memblock = NULL, *memptr; uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct + uint memblock_size = MEMBLOCK; +#ifdef DHD_DEBUG_DOWNLOADTIME + unsigned long initial_jiffies = 0; + uint firmware_sz = 0; +#endif DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); @@ -8851,9 +8739,14 @@ dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path) goto err; } - memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + /* Update the dongle image download block size depending on the F1 block size */ + if (sd_f1_blocksize == 512) + memblock_size = MAX_MEMBLOCK; + + memptr = memblock = MALLOC(bus->dhd->osh, memblock_size + DHD_SDALIGN); if (memblock == NULL) { - DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + memblock_size)); goto err; } if (dhd_msg_level & DHD_TRACE_VAL) { @@ -8866,8 +8759,12 @@ dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path) if ((uint32)(uintptr)memblock % DHD_SDALIGN) memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); +#ifdef DHD_DEBUG_DOWNLOADTIME + initial_jiffies = jiffies; +#endif + /* Download image */ - while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) { + while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, image))) { // terence 20150412: fix for firmware failed to download if (bus->dhd->conf->chip == BCM43340_CHIP_ID || bus->dhd->conf->chip == BCM43341_CHIP_ID) { @@ -8895,7 +8792,7 @@ dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path) bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len); if (bcmerror) { DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", - __FUNCTION__, bcmerror, MEMBLOCK, offset)); + __FUNCTION__, bcmerror, memblock_size, offset)); goto err; } @@ -8912,12 +8809,21 @@ dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path) } else DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__)); } - offset += MEMBLOCK; + + offset += memblock_size; +#ifdef DHD_DEBUG_DOWNLOADTIME + firmware_sz += len; +#endif } +#ifdef DHD_DEBUG_DOWNLOADTIME + DHD_ERROR(("Firmware download time for %u bytes: %u ms\n", + firmware_sz, jiffies_to_msecs(jiffies - initial_jiffies))); +#endif + err: if (memblock) - MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + MFREE(bus->dhd->osh, memblock, memblock_size + DHD_SDALIGN); if (dhd_msg_level & DHD_TRACE_VAL) { if (memptr_tmp) MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN); @@ -8929,6 +8835,131 @@ err: return bcmerror; } +#ifdef DHD_UCODE_DOWNLOAD +/* Currently supported only for the chips in which ucode RAM is AXI addressable */ +static uint32 +dhdsdio_ucode_base(struct dhd_bus *bus) +{ + uint32 ucode_base = 0; + + switch ((uint16)bus->sih->chip) { + case BCM43012_CHIP_ID: + ucode_base = 0xE8020000; + break; + default: + DHD_ERROR(("%s: Unsupported!\n", __func__)); + break; + } + + return ucode_base; +} + +static int +dhdsdio_download_ucode_file(struct dhd_bus *bus, char *ucode_path) +{ + int bcmerror = -1; + int offset = 0; + int len; + uint32 ucode_base; + void *image = NULL; + uint8 *memblock = NULL, *memptr; + uint memblock_size = MEMBLOCK; +#ifdef DHD_DEBUG_DOWNLOADTIME + unsigned long initial_jiffies = 0; + uint firmware_sz = 0; +#endif + + DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, ucode_path)); + + ucode_base = dhdsdio_ucode_base(bus); + + image = dhd_os_open_image(ucode_path); + if (image == NULL) + goto err; + + /* Update the dongle image download block size depending on the F1 block size */ + if (sd_f1_blocksize == 512) + memblock_size = MAX_MEMBLOCK; + + memptr = memblock = MALLOC(bus->dhd->osh, memblock_size + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + memblock_size)); + goto err; + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + +#ifdef DHD_DEBUG_DOWNLOADTIME + initial_jiffies = jiffies; +#endif + + /* Download image */ + while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, image))) { + if (len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } + + bcmerror = dhdsdio_membytes(bus, TRUE, (ucode_base + offset), memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, memblock_size, offset)); + goto err; + } + + offset += memblock_size; +#ifdef DHD_DEBUG_DOWNLOADTIME + firmware_sz += len; +#endif + } + +#ifdef DHD_DEBUG_DOWNLOADTIME + DHD_ERROR(("ucode download time for %u bytes: %u ms\n", + firmware_sz, jiffies_to_msecs(jiffies - initial_jiffies))); +#endif + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, memblock_size + DHD_SDALIGN); + + if (image) + dhd_os_close_image(image); + + return bcmerror; +} + +void +dhd_bus_ucode_download(struct dhd_bus *bus) +{ + uint32 shaddr = 0, shdata = 0; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&shdata, 4); + + DHD_TRACE(("%s: shdata:[0x%08x :0x%08x]\n", __func__, shaddr, shdata)); + + if (shdata == UCODE_DOWNLOAD_REQUEST) + { + DHD_ERROR(("%s: Received ucode download request!\n", __func__)); + + /* Download the ucode */ + if (!dhd_get_ucode_path(bus->dhd)) { + DHD_ERROR(("%s: bus->uc_path not set!\n", __func__)); + return; + } + dhdsdio_download_ucode_file(bus, dhd_get_ucode_path(bus->dhd)); + + DHD_ERROR(("%s: Ucode downloaded successfully!\n", __func__)); + + shdata = UCODE_DOWNLOAD_COMPLETE; + dhdsdio_membytes(bus, TRUE, shaddr, (uint8 *)&shdata, 4); + } +} + +#endif /* DHD_UCODE_DOWNLOAD */ + static int dhdsdio_download_nvram(struct dhd_bus *bus) { @@ -8978,8 +9009,7 @@ dhdsdio_download_nvram(struct dhd_bus *bus) DHD_ERROR(("%s: error downloading vars: %d\n", __FUNCTION__, bcmerror)); } - } - else { + } else { DHD_ERROR(("%s: error reading nvram file: %d\n", __FUNCTION__, len)); bcmerror = BCME_SDIO_ERROR; @@ -9027,8 +9057,7 @@ _dhdsdio_download_firmware(struct dhd_bus *bus) #else goto err; #endif - } - else { + } else { embed = FALSE; dlok = TRUE; } @@ -9039,8 +9068,7 @@ _dhdsdio_download_firmware(struct dhd_bus *bus) if (dhdsdio_download_code_array(bus)) { DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__)); goto err; - } - else { + } else { dlok = TRUE; } } @@ -9072,7 +9100,7 @@ err: static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, - void *pkt, bcmsdh_cmplt_fn_t complete, void *handle) + void *pkt, bcmsdh_cmplt_fn_t complete_fn, void *handle) { int status; @@ -9081,14 +9109,14 @@ dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf return BCME_NODEVICE; } - status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle); + status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete_fn, handle); return status; } static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, - void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry) + void *pkt, bcmsdh_cmplt_fn_t complete_fn, void *handle, int max_retry) { int ret; int i = 0; @@ -9103,7 +9131,7 @@ dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf sdh = bus->sdh; do { ret = bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, - pkt, complete, handle); + pkt, complete_fn, handle); bus->f2txdata++; ASSERT(ret != BCME_PENDING); @@ -9169,10 +9197,10 @@ dhd_bus_pub(struct dhd_bus *bus) return bus->dhd; } -void * +const void * dhd_bus_sih(struct dhd_bus *bus) { - return (void *)bus->sih; + return (const void *)bus->sih; } void * @@ -9193,16 +9221,46 @@ dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val) bus->dotxinrx = val; } +/* + * dhdsdio_advertise_bus_cleanup advertises that clean up is under progress + * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts + * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for + * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so + * they will exit from there itself without marking dhd_bus_busy_state as BUSY. + */ +static void +dhdsdio_advertise_bus_cleanup(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_LINUX_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS; + DHD_LINUX_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if ((timeleft == 0) || (timeleft == 1)) { + DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + ASSERT(0); + } + + return; +} + int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) { int bcmerror = 0; dhd_bus_t *bus; + unsigned long flags; bus = dhdp->bus; if (flag == TRUE) { if (!bus->dhd->dongle_reset) { + DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__)); + dhdsdio_advertise_bus_cleanup(bus->dhd); dhd_os_sdlock(dhdp); dhd_os_wd_timer(dhdp, 0); #if !defined(IGNORE_ETH0_DOWN) @@ -9228,6 +9286,10 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) dhd_txglom_enable(dhdp, FALSE); dhd_os_sdunlock(dhdp); + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); + printf("%s: WLAN OFF DONE\n", __FUNCTION__); /* App can now remove power from device */ } else @@ -9247,6 +9309,10 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh, (uint32 *)SI_ENUM_BASE, bus->cl_devid)) { + + DHD_LINUX_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags); /* Attempt to download binary to the dongle */ if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) && dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) { @@ -9292,7 +9358,6 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) dhd_os_sdunlock(dhdp); } else { - bcmerror = BCME_SDIO_ERROR; printf("%s called when dongle is not in reset\n", __FUNCTION__); printf("Will call dhd_bus_start instead\n"); @@ -9432,7 +9497,7 @@ dhd_bus_pktq_flush(dhd_pub_t *dhdp) dhd_tcpack_info_tbl_clean(bus->dhd); #endif /* DHDTCPACK_SUPPRESS */ /* Clear the data packet queues */ - pktq_flush(dhdp->osh, &bus->txq, TRUE, NULL, 0); + pktq_flush(dhdp->osh, &bus->txq, TRUE); } } @@ -9493,4 +9558,455 @@ void dhd_sdio_reg_write(void *h, uint32 addr, uint32 val) dhd_os_sdunlock(bus->dhd); } + #endif /* DEBUGGER */ + + +#if defined(BT_OVER_SDIO) +uint8 dhd_bus_cfg_read(void *h, uint fun_num, uint32 addr, int *err) +{ + uint8 intrd; + dhd_pub_t *dhdp = (dhd_pub_t *)h; + dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus; + + dhd_os_sdlock(bus->dhd); + + intrd = bcmsdh_cfg_read(bus->sdh, fun_num, addr, err); + + dhd_os_sdunlock(bus->dhd); + + return intrd; +} EXPORT_SYMBOL(dhd_bus_cfg_read); + +void dhd_bus_cfg_write(void *h, uint fun_num, uint32 addr, uint8 val, int *err) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)h; + dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus; + + dhd_os_sdlock(bus->dhd); + + bcmsdh_cfg_write(bus->sdh, fun_num, addr, val, err); + + dhd_os_sdunlock(bus->dhd); + +} EXPORT_SYMBOL(dhd_bus_cfg_write); + +static int +extract_hex_field(char * line, uint16 start_pos, uint16 num_chars, uint16 * value) +{ + char field [8]; + + strncpy(field, line + start_pos, num_chars); + field [num_chars] = '\0'; + + return (sscanf (field, "%hX", value) == 1); +} + +static int +read_more_btbytes(struct dhd_bus *bus, void * file, char *line, int * addr_mode, uint16 * hi_addr, + uint32 * dest_addr, uint8 *data_bytes, uint32 * num_bytes) +{ + int str_len; + uint16 num_data_bytes, addr, data_pos, type, w, i; + uint32 abs_base_addr32 = 0; + *num_bytes = 0; + + while (!*num_bytes) + { + str_len = dhd_os_gets_image(bus->dhd, line, BTFW_MAX_STR_LEN, file); + + DHD_TRACE(("%s: Len :0x%x %s\n", __FUNCTION__, str_len, line)); + + if (str_len == 0) { + break; + } else if (str_len > 9) { + extract_hex_field(line, 1, 2, &num_data_bytes); + extract_hex_field(line, 3, 4, &addr); + extract_hex_field(line, 7, 2, &type); + + data_pos = 9; + for (i = 0; i < num_data_bytes; i++) { + extract_hex_field(line, data_pos, 2, &w); + data_bytes [i] = (uint8)(w & 0x00FF); + data_pos += 2; + } + + if (type == BTFW_HEX_LINE_TYPE_EXTENDED_ADDRESS) { + *hi_addr = (data_bytes [0] << 8) | data_bytes [1]; + *addr_mode = BTFW_ADDR_MODE_EXTENDED; + } else if (type == BTFW_HEX_LINE_TYPE_EXTENDED_SEGMENT_ADDRESS) { + *hi_addr = (data_bytes [0] << 8) | data_bytes [1]; + *addr_mode = BTFW_ADDR_MODE_SEGMENT; + } else if (type == BTFW_HEX_LINE_TYPE_ABSOLUTE_32BIT_ADDRESS) { + abs_base_addr32 = (data_bytes [0] << 24) | (data_bytes [1] << 16) | + (data_bytes [2] << 8) | data_bytes [3]; + *addr_mode = BTFW_ADDR_MODE_LINEAR32; + } else if (type == BTFW_HEX_LINE_TYPE_DATA) { + *dest_addr = addr; + if (*addr_mode == BTFW_ADDR_MODE_EXTENDED) + *dest_addr += (*hi_addr << 16); + else if (*addr_mode == BTFW_ADDR_MODE_SEGMENT) + *dest_addr += (*hi_addr << 4); + else if (*addr_mode == BTFW_ADDR_MODE_LINEAR32) + *dest_addr += abs_base_addr32; + *num_bytes = num_data_bytes; + } + } + } + return (*num_bytes > 0); +} + +static int +_dhdsdio_download_btfw(struct dhd_bus *bus) +{ + int bcm_error = -1; + void *image = NULL; + uint8 *mem_blk = NULL, *mem_ptr = NULL, *data_ptr = NULL; + + + uint32 offset_addr = 0, offset_len = 0, bytes_to_write = 0; + + char *line = NULL; + uint32 dest_addr = 0, num_bytes; + uint16 hiAddress = 0; + uint32 start_addr, start_data, end_addr, end_data, i, index, pad; + uint32 bt2wlan_pwrup_adr; + + int addr_mode = BTFW_ADDR_MODE_EXTENDED; + + /* Out immediately if no image to download */ + if ((bus->btfw_path == NULL) || (bus->btfw_path[0] == '\0')) { + return 0; + } + + image = dhd_os_open_image(bus->btfw_path); + if (image == NULL) + goto err; + + mem_ptr = mem_blk = MALLOC(bus->dhd->osh, BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN); + if (mem_blk == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN)); + goto err; + } + if ((uint32)(uintptr)mem_blk % DHD_SDALIGN) + mem_ptr += (DHD_SDALIGN - ((uint32)(uintptr)mem_blk % DHD_SDALIGN)); + + data_ptr = MALLOC(bus->dhd->osh, BTFW_DOWNLOAD_BLK_SIZE - 8); + if (data_ptr == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, + BTFW_DOWNLOAD_BLK_SIZE - 8)); + goto err; + } + /* Write to BT register to hold WLAN wake high during BT FW download */ + bt2wlan_pwrup_adr = BTMEM_OFFSET + BT2WLAN_PWRUP_ADDR; + bcmsdh_reg_write(bus->sdh, bt2wlan_pwrup_adr, 4, BT2WLAN_PWRUP_WAKE); + /* + * Wait for at least 2msec for the clock to be ready/Available. + */ + OSL_DELAY(2000); + + line = MALLOC(bus->dhd->osh, BTFW_MAX_STR_LEN); + if (line == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, BTFW_MAX_STR_LEN)); + goto err; + } + memset(line, 0, BTFW_MAX_STR_LEN); + + while (read_more_btbytes (bus, image, line, &addr_mode, &hiAddress, &dest_addr, + data_ptr, &num_bytes)) { + + DHD_TRACE(("read %d bytes at address %08X\n", num_bytes, dest_addr)); + + start_addr = BTMEM_OFFSET + dest_addr; + index = 0; + + /* Make sure the start address is 4 byte aligned to avoid alignment issues + * with SD host controllers + */ + if (!ISALIGNED(start_addr, 4)) { + pad = start_addr % 4; + start_addr = ROUNDDN(start_addr, 4); + start_data = bcmsdh_reg_read(bus->sdh, start_addr, 4); + for (i = 0; i < pad; i++, index++) { + mem_ptr[index] = (uint8)((uint8 *)&start_data)[i]; + } + } + bcopy(data_ptr, &(mem_ptr[index]), num_bytes); + index += num_bytes; + + /* Make sure the length is multiple of 4bytes to avoid alignment issues + * with SD host controllers + */ + end_addr = start_addr + index; + if (!ISALIGNED(end_addr, 4)) { + end_addr = ROUNDDN(end_addr, 4); + end_data = bcmsdh_reg_read(bus->sdh, end_addr, 4); + for (i = (index % 4); i < 4; i++, index++) { + mem_ptr[index] = (uint8)((uint8 *)&end_data)[i]; + } + } + + offset_addr = start_addr & 0xFFF; + offset_len = offset_addr + index; + if (offset_len <= 0x1000) { + bcm_error = dhdsdio_membytes(bus, TRUE, start_addr, mem_ptr, index); + if (bcm_error) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcm_error, num_bytes, start_addr)); + goto err; + } + } + else { + bytes_to_write = 0x1000 - offset_addr; + bcm_error = dhdsdio_membytes(bus, TRUE, start_addr, mem_ptr, + bytes_to_write); + if (bcm_error) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcm_error, num_bytes, start_addr)); + goto err; + } + + OSL_DELAY(10000); + + bcm_error = dhdsdio_membytes(bus, TRUE, (start_addr + bytes_to_write), + (mem_ptr + bytes_to_write), (index - bytes_to_write)); + if (bcm_error) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcm_error, num_bytes, start_addr)); + goto err; + } + } + memset(line, 0, BTFW_MAX_STR_LEN); + } + + bcm_error = 0; +err: + if (mem_blk) + MFREE(bus->dhd->osh, mem_blk, BTFW_DOWNLOAD_BLK_SIZE + DHD_SDALIGN); + + if (data_ptr) + MFREE(bus->dhd->osh, data_ptr, BTFW_DOWNLOAD_BLK_SIZE - 8); + + if (line) + MFREE(bus->dhd->osh, line, BTFW_MAX_STR_LEN); + + if (image) + dhd_os_close_image(image); + + return bcm_error; +} + +static int +dhdsdio_download_btfw(struct dhd_bus *bus, osl_t *osh, void *sdh) +{ + int ret; + + DHD_TRACE(("%s: btfw path=%s\n", + __FUNCTION__, bus->btfw_path)); + DHD_OS_WAKE_LOCK(bus->dhd); + dhd_os_sdlock(bus->dhd); + + /* Download the firmware */ + ret = _dhdsdio_download_btfw(bus); + + dhd_os_sdunlock(bus->dhd); + DHD_OS_WAKE_UNLOCK(bus->dhd); + + return ret; +} + +int +dhd_bus_download_btfw(struct dhd_bus *bus, osl_t *osh, + char *pbtfw_path) +{ + int ret; + + bus->btfw_path = pbtfw_path; + + ret = dhdsdio_download_btfw(bus, osh, bus->sdh); + + return ret; +} +#endif /* defined (BT_OVER_SDIO) */ + +void +dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf) +{ + trap_t *tr = &bus->dhd->last_trap_info; + + bcm_bprintf(strbuf, + "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + "lp 0x%x, rpc 0x%x Trap offset 0x%x, " + "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " + "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", + ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr), + ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc), + ltoh32(bus->dongle_trap_addr), + ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3), + ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7)); + +} + +static int +dhd_bcmsdh_send_buffer(void *bus, uint8 *frame, uint16 len) +{ + int ret = -1; + + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(((dhd_bus_t*)bus)->sdh), + SDIO_FUNC_2, F2SYNC, frame, len, NULL, NULL, NULL, TXRETRIES); + + if (ret == BCME_OK) + ((dhd_bus_t*)bus)->tx_seq = (((dhd_bus_t*)bus)->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + + return ret; +} + +/* Function to set the min res mask depending on the chip ID used */ +bool +dhd_bus_set_default_min_res_mask(struct dhd_bus *bus) +{ + if ((bus == NULL) || (bus->sih == NULL)) { + DHD_ERROR(("%s(): Invalid Arguments \r\n", __FUNCTION__)); + return FALSE; + } + + switch (bus->sih->chip) { + case BCM4339_CHIP_ID: + bcmsdh_reg_write(bus->sdh, SI_ENUM_BASE + 0x618, 4, 0x3fcaf377); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__)); + return FALSE; + } + break; + + case BCM43012_CHIP_ID: + bcmsdh_reg_write(bus->sdh, + si_get_pmu_reg_addr(bus->sih, OFFSETOF(pmuregs_t, min_res_mask)), + 4, DEFAULT_43012_MIN_RES_MASK); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__)); + return FALSE; + } + break; + + default: + DHD_ERROR(("%s: Unhandled chip id\n", __FUNCTION__)); + return FALSE; + } + + return TRUE; +} + +/* Function to reset PMU registers */ +void +dhd_bus_pmu_reg_reset(dhd_pub_t *dhdp) +{ + struct dhd_bus *bus = dhdp->bus; + bcmsdh_reg_write(bus->sdh, si_get_pmu_reg_addr(bus->sih, + OFFSETOF(pmuregs_t, swscratch)), 4, 0x0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__)); + } +} + + +#ifdef DHD_ULP +/* Function to disable console messages on entering ULP mode */ +void +dhd_bus_ulp_disable_console(dhd_pub_t *dhdp) +{ +#ifdef DHD_DEBUG + DHD_ERROR(("Flushing and disabling console messages\n")); + + /* Save the console print interval */ + dhd_ulp_save_console_interval(dhdp); + + /* Flush the console buffer before disabling */ + dhdsdio_readconsole(dhdp->bus); + dhd_console_ms = 0; +#endif /* DHD_DEBUG */ +} + +/* Function for redownloading firmaware */ +static int +dhd_bus_ulp_reinit_fw(dhd_bus_t *bus) +{ + int bcmerror = 0; + + /* After firmware redownload tx/rx seq are reset accordingly these values are + reset on DHD side tx_max is initially set to 4, which later is updated by FW + */ + bus->tx_seq = bus->rx_seq = 0; + bus->tx_max = 4; + + if (dhd_bus_download_firmware(bus, bus->dhd->osh, + bus->fw_path, bus->nv_path) >= 0) { + + /* Re-init bus, enable F2 transfer */ + bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE); + if (bcmerror == BCME_OK) { + bus->dhd->up = TRUE; + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + + dhd_ulp_set_ulp_state(bus->dhd, DHD_ULP_READY); +#if defined(OOB_INTR_ONLY) + dhd_enable_oob_intr(bus, TRUE); + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif +#ifdef DHD_DEBUG + /* Re-enable the console messages on FW redownload to default value */ + dhd_ulp_restore_console_interval(bus->dhd); +#endif /* DHD_DEBUG */ + } else { + DHD_ERROR(("bus init failed\n")); + dhd_bus_stop(bus, FALSE); + dhdsdio_release_dongle(bus, bus->dhd->osh, + TRUE, FALSE); + } + } else + bcmerror = BCME_SDIO_ERROR; + + return bcmerror; +} +#endif /* DHD_ULP */ + +int +dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read) +{ + int bcmerror = 0; + struct dhd_bus *bus = dhdp->bus; + + if (read) { + *data = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + } else { + bcmsdh_reg_write(bus->sdh, addr, size, *data); + } + + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + + return bcmerror; +} + +int dhd_get_idletime(dhd_pub_t *dhd) +{ + return dhd->bus->idletime; +} + +#ifdef DHD_WAKE_STATUS +wake_counts_t* +dhd_bus_get_wakecount(dhd_pub_t *dhd) +{ + if (!dhd->bus) { + return NULL; + } + return &dhd->bus->wake_counts; +} +int +dhd_bus_get_bus_wake(dhd_pub_t *dhd) +{ + return bcmsdh_set_get_wake(dhd->bus->sdh, 0); +} +#endif /* DHD_WAKE_STATUS */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_static_buf.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_static_buf.c index 36cca5b32551..a2acf0a31d24 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_static_buf.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_static_buf.c @@ -6,68 +6,87 @@ #include #include +#define DHD_STATIC_VERSION_STR "1.579.77.41.1" + +#define BCMDHD_SDIO +#define BCMDHD_PCIE + enum dhd_prealloc_index { DHD_PREALLOC_PROT = 0, - DHD_PREALLOC_RXBUF, - DHD_PREALLOC_DATABUF, - DHD_PREALLOC_OSL_BUF, - DHD_PREALLOC_SKB_BUF, +#if defined(BCMDHD_SDIO) + DHD_PREALLOC_RXBUF = 1, + DHD_PREALLOC_DATABUF = 2, +#endif + DHD_PREALLOC_OSL_BUF = 3, + DHD_PREALLOC_SKB_BUF = 4, DHD_PREALLOC_WIPHY_ESCAN0 = 5, DHD_PREALLOC_WIPHY_ESCAN1 = 6, DHD_PREALLOC_DHD_INFO = 7, DHD_PREALLOC_DHD_WLFC_INFO = 8, +#ifdef BCMDHD_PCIE DHD_PREALLOC_IF_FLOW_LKUP = 9, +#endif DHD_PREALLOC_MEMDUMP_BUF = 10, DHD_PREALLOC_MEMDUMP_RAM = 11, DHD_PREALLOC_DHD_WLFC_HANGER = 12, + DHD_PREALLOC_PKTID_MAP = 13, + DHD_PREALLOC_PKTID_MAP_IOCTL = 14, + DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15, + DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16, + DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17, + DHD_PREALLOC_STAT_REPORT_BUF = 18, + DHD_PREALLOC_WL_ESCAN_INFO = 19, + DHD_PREALLOC_FW_VERBOSE_RING = 20, + DHD_PREALLOC_FW_EVENT_RING = 21, + DHD_PREALLOC_DHD_EVENT_RING = 22, + DHD_PREALLOC_NAN_EVENT_RING = 23, DHD_PREALLOC_MAX }; #define STATIC_BUF_MAX_NUM 20 #define STATIC_BUF_SIZE (PAGE_SIZE*2) -#define DHD_PREALLOC_PROT_SIZE (16 * 1024) -#define DHD_PREALLOC_RXBUF_SIZE (24 * 1024) -#define DHD_PREALLOC_DATABUF_SIZE (64 * 1024) -#define DHD_PREALLOC_OSL_BUF_SIZE (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) +#define DHD_PREALLOC_PROT_SIZE (16 * 1024) +#define DHD_PREALLOC_RXBUF_SIZE (24 * 1024) +#define DHD_PREALLOC_DATABUF_SIZE (64 * 1024) +#define DHD_PREALLOC_OSL_BUF_SIZE (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) #define DHD_PREALLOC_WIPHY_ESCAN0_SIZE (64 * 1024) -#define DHD_PREALLOC_DHD_INFO_SIZE (24 * 1024) -#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE (64 * 1024) +#define DHD_PREALLOC_DHD_INFO_SIZE (30 * 1024) +#define DHD_PREALLOC_MEMDUMP_RAM_SIZE (770 * 1024) +#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE (73 * 1024) +#define DHD_PREALLOC_WL_ESCAN_INFO_SIZE (66 * 1024) #ifdef CONFIG_64BIT #define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024 * 2) #else #define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024) #endif +#define FW_VERBOSE_RING_SIZE (64 * 1024) +#define FW_EVENT_RING_SIZE (64 * 1024) +#define DHD_EVENT_RING_SIZE (64 * 1024) +#define NAN_EVENT_RING_SIZE (64 * 1024) #if defined(CONFIG_64BIT) -#define WLAN_DHD_INFO_BUF_SIZE (24 * 1024) -#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) +#define WLAN_DHD_INFO_BUF_SIZE (24 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024) #define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024) #else -#define WLAN_DHD_INFO_BUF_SIZE (16 * 1024) -#define WLAN_DHD_WLFC_BUF_SIZE (24 * 1024) +#define WLAN_DHD_INFO_BUF_SIZE (16 * 1024) +#define WLAN_DHD_WLFC_BUF_SIZE (24 * 1024) #define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024) #endif /* CONFIG_64BIT */ -#define WLAN_DHD_MEMDUMP_SIZE (800 * 1024) +#define WLAN_DHD_MEMDUMP_SIZE (800 * 1024) -#ifdef CONFIG_BCMDHD_PCIE #define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) #define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) #define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) -#define DHD_SKB_1PAGE_BUF_NUM 0 -#define DHD_SKB_2PAGE_BUF_NUM 64 -#define DHD_SKB_4PAGE_BUF_NUM 0 -#else -#define DHD_SKB_HDRSIZE 336 -#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE) -#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE) -#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE) - #define DHD_SKB_1PAGE_BUF_NUM 8 +#ifdef BCMDHD_PCIE +#define DHD_SKB_2PAGE_BUF_NUM 64 +#elif defined(BCMDHD_SDIO) #define DHD_SKB_2PAGE_BUF_NUM 8 +#endif #define DHD_SKB_4PAGE_BUF_NUM 1 -#endif /* CONFIG_BCMDHD_PCIE */ /* The number is defined in linux_osl.c * WLAN_SKB_1_2PAGE_BUF_NUM => STATIC_PKT_1_2PAGE_NUM @@ -86,21 +105,29 @@ void *wlan_static_scan_buf1 = NULL; void *wlan_static_dhd_info_buf = NULL; void *wlan_static_dhd_wlfc_info_buf = NULL; void *wlan_static_if_flow_lkup = NULL; +void *wlan_static_dhd_memdump_ram_buf = NULL; void *wlan_static_dhd_wlfc_hanger_buf = NULL; +void *wlan_static_wl_escan_info_buf = NULL; +void *wlan_static_fw_verbose_ring_buf = NULL; +void *wlan_static_fw_event_ring_buf = NULL; +void *wlan_static_dhd_event_ring_buf = NULL; +void *wlan_static_nan_event_ring_buf = NULL; static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; void *dhd_wlan_mem_prealloc(int section, unsigned long size) { - printk("%s: sectoin %d, %ld\n", __FUNCTION__, section, size); + pr_err("%s: sectoin %d, %ld\n", __func__, section, size); if (section == DHD_PREALLOC_PROT) return wlan_static_prot; +#if defined(BCMDHD_SDIO) if (section == DHD_PREALLOC_RXBUF) return wlan_static_rxbuf; if (section == DHD_PREALLOC_DATABUF) return wlan_static_databuf; +#endif /* BCMDHD_SDIO */ if (section == DHD_PREALLOC_SKB_BUF) return wlan_static_skb; @@ -113,7 +140,7 @@ void *dhd_wlan_mem_prealloc(int section, unsigned long size) if (section == DHD_PREALLOC_OSL_BUF) { if (size > DHD_PREALLOC_OSL_BUF_SIZE) { - pr_err("request OSL_BUF(%lu) is bigger than static size(%ld).\n", + pr_err("request OSL_BUF(%lu) > %ld\n", size, DHD_PREALLOC_OSL_BUF_SIZE); return NULL; } @@ -122,7 +149,7 @@ void *dhd_wlan_mem_prealloc(int section, unsigned long size) if (section == DHD_PREALLOC_DHD_INFO) { if (size > DHD_PREALLOC_DHD_INFO_SIZE) { - pr_err("request DHD_INFO size(%lu) is bigger than static size(%d).\n", + pr_err("request DHD_INFO size(%lu) > %d\n", size, DHD_PREALLOC_DHD_INFO_SIZE); return NULL; } @@ -130,34 +157,91 @@ void *dhd_wlan_mem_prealloc(int section, unsigned long size) } if (section == DHD_PREALLOC_DHD_WLFC_INFO) { if (size > WLAN_DHD_WLFC_BUF_SIZE) { - pr_err("request DHD_WLFC_INFO size(%lu) is bigger than static size(%d).\n", + pr_err("request DHD_WLFC_INFO size(%lu) > %d\n", size, WLAN_DHD_WLFC_BUF_SIZE); return NULL; } return wlan_static_dhd_wlfc_info_buf; } +#ifdef BCMDHD_PCIE if (section == DHD_PREALLOC_IF_FLOW_LKUP) { if (size > DHD_PREALLOC_IF_FLOW_LKUP_SIZE) { - pr_err("request DHD_IF_FLOW_LKUP size(%lu) is bigger than static size(%d).\n", + pr_err("request DHD_IF_FLOW_LKUP size(%lu) > %d\n", size, DHD_PREALLOC_IF_FLOW_LKUP_SIZE); return NULL; } return wlan_static_if_flow_lkup; } +#endif /* BCMDHD_PCIE */ + if (section == DHD_PREALLOC_MEMDUMP_RAM) { + if (size > DHD_PREALLOC_MEMDUMP_RAM_SIZE) { + pr_err("request DHD_PREALLOC_MEMDUMP_RAM_SIZE(%lu) > %d\n", + size, DHD_PREALLOC_MEMDUMP_RAM_SIZE); + return NULL; + } + + return wlan_static_dhd_memdump_ram_buf; + } if (section == DHD_PREALLOC_DHD_WLFC_HANGER) { if (size > DHD_PREALLOC_DHD_WLFC_HANGER_SIZE) { - pr_err("request DHD_WLFC_HANGER size(%lu) is bigger than static size(%d).\n", + pr_err("request DHD_WLFC_HANGER size(%lu) > %d\n", size, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE); return NULL; } return wlan_static_dhd_wlfc_hanger_buf; } + if (section == DHD_PREALLOC_WL_ESCAN_INFO) { + if (size > DHD_PREALLOC_WL_ESCAN_INFO_SIZE) { + pr_err("request DHD_PREALLOC_WL_ESCAN_INFO_SIZE(%lu) > %d\n", + size, DHD_PREALLOC_WL_ESCAN_INFO_SIZE); + return NULL; + } + + return wlan_static_wl_escan_info_buf; + } + if (section == DHD_PREALLOC_FW_VERBOSE_RING) { + if (size > FW_VERBOSE_RING_SIZE) { + pr_err("request DHD_PREALLOC_FW_VERBOSE_RING(%lu) > %d\n", + size, FW_VERBOSE_RING_SIZE); + return NULL; + } + + return wlan_static_fw_verbose_ring_buf; + } + if (section == DHD_PREALLOC_FW_EVENT_RING) { + if (size > FW_EVENT_RING_SIZE) { + pr_err("request DHD_PREALLOC_FW_EVENT_RING(%lu) > %d\n", + size, FW_EVENT_RING_SIZE); + return NULL; + } + + return wlan_static_fw_event_ring_buf; + } + if (section == DHD_PREALLOC_DHD_EVENT_RING) { + if (size > DHD_EVENT_RING_SIZE) { + pr_err("request DHD_PREALLOC_DHD_EVENT_RING(%lu) > %d\n", + size, DHD_EVENT_RING_SIZE); + return NULL; + } + + return wlan_static_dhd_event_ring_buf; + } + if (section == DHD_PREALLOC_NAN_EVENT_RING) { + if (size > NAN_EVENT_RING_SIZE) { + pr_err("request DHD_PREALLOC_NAN_EVENT_RING(%lu) > %d\n", + size, NAN_EVENT_RING_SIZE); + return NULL; + } + + return wlan_static_nan_event_ring_buf; + } if ((section < 0) || (section > DHD_PREALLOC_MAX)) pr_err("request section id(%d) is out of max index %d\n", section, DHD_PREALLOC_MAX); - pr_err("%s: failed to alloc section %d, size=%ld\n", __FUNCTION__, section, size); + pr_err("%s: failed to alloc section %d, size=%ld\n", + __func__, section, size); return NULL; } @@ -173,7 +257,8 @@ static int dhd_init_wlan_mem(void) if (!wlan_static_skb[i]) { goto err_skb_alloc; } - printk("%s: sectoin %d skb[%d], size=%ld\n", __FUNCTION__, DHD_PREALLOC_SKB_BUF, i, DHD_SKB_1PAGE_BUFSIZE); + pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__, + DHD_PREALLOC_SKB_BUF, i, DHD_SKB_1PAGE_BUFSIZE); } for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) { @@ -181,81 +266,111 @@ static int dhd_init_wlan_mem(void) if (!wlan_static_skb[i]) { goto err_skb_alloc; } - printk("%s: sectoin %d skb[%d], size=%ld\n", __FUNCTION__, DHD_PREALLOC_SKB_BUF, i, DHD_SKB_2PAGE_BUFSIZE); + pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__, + DHD_PREALLOC_SKB_BUF, i, DHD_SKB_2PAGE_BUFSIZE); } -#if !defined(CONFIG_BCMDHD_PCIE) +#if defined(BCMDHD_SDIO) wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE); - if (!wlan_static_skb[i]) { + if (!wlan_static_skb[i]) goto err_skb_alloc; - } -#endif /* !CONFIG_BCMDHD_PCIE */ + pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__, + DHD_PREALLOC_SKB_BUF, i, DHD_SKB_4PAGE_BUFSIZE); +#endif /* BCMDHD_SDIO */ wlan_static_prot = kmalloc(DHD_PREALLOC_PROT_SIZE, GFP_KERNEL); - if (!wlan_static_prot) { - pr_err("Failed to alloc wlan_static_prot\n"); + if (!wlan_static_prot) goto err_mem_alloc; - } - printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_PROT, DHD_PREALLOC_PROT_SIZE); + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_PROT, DHD_PREALLOC_PROT_SIZE); -#if defined(CONFIG_BCMDHD_SDIO) +#if defined(BCMDHD_SDIO) wlan_static_rxbuf = kmalloc(DHD_PREALLOC_RXBUF_SIZE, GFP_KERNEL); - if (!wlan_static_rxbuf) { - pr_err("Failed to alloc wlan_static_rxbuf\n"); + if (!wlan_static_rxbuf) goto err_mem_alloc; - } - printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_RXBUF, DHD_PREALLOC_RXBUF_SIZE); + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_RXBUF, DHD_PREALLOC_RXBUF_SIZE); wlan_static_databuf = kmalloc(DHD_PREALLOC_DATABUF_SIZE, GFP_KERNEL); - if (!wlan_static_databuf) { - pr_err("Failed to alloc wlan_static_databuf\n"); + if (!wlan_static_databuf) goto err_mem_alloc; - } - printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_DATABUF, DHD_PREALLOC_DATABUF_SIZE); -#endif + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_DATABUF, DHD_PREALLOC_DATABUF_SIZE); +#endif /* BCMDHD_SDIO */ wlan_static_osl_buf = kmalloc(DHD_PREALLOC_OSL_BUF_SIZE, GFP_KERNEL); - if (!wlan_static_osl_buf) { - pr_err("Failed to alloc wlan_static_osl_buf\n"); + if (!wlan_static_osl_buf) goto err_mem_alloc; - } - printk("%s: sectoin %d, size=%ld\n", __FUNCTION__, DHD_PREALLOC_OSL_BUF, DHD_PREALLOC_OSL_BUF_SIZE); + pr_err("%s: sectoin %d, size=%ld\n", __func__, + DHD_PREALLOC_OSL_BUF, DHD_PREALLOC_OSL_BUF_SIZE); wlan_static_scan_buf0 = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL); - if (!wlan_static_scan_buf0) { - pr_err("Failed to alloc wlan_static_scan_buf0\n"); + if (!wlan_static_scan_buf0) goto err_mem_alloc; - } - printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_WIPHY_ESCAN0, DHD_PREALLOC_WIPHY_ESCAN0_SIZE); + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_WIPHY_ESCAN0, DHD_PREALLOC_WIPHY_ESCAN0_SIZE); wlan_static_dhd_info_buf = kmalloc(DHD_PREALLOC_DHD_INFO_SIZE, GFP_KERNEL); - if (!wlan_static_dhd_info_buf) { - pr_err("Failed to alloc wlan_static_dhd_info_buf\n"); + if (!wlan_static_dhd_info_buf) goto err_mem_alloc; - } - printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_DHD_INFO, DHD_PREALLOC_DHD_INFO_SIZE); + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_DHD_INFO, DHD_PREALLOC_DHD_INFO_SIZE); wlan_static_dhd_wlfc_info_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, GFP_KERNEL); - if (!wlan_static_dhd_wlfc_info_buf) { - pr_err("Failed to alloc wlan_static_dhd_wlfc_info_buf\n"); + if (!wlan_static_dhd_wlfc_info_buf) goto err_mem_alloc; - } - printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_DHD_WLFC_INFO, WLAN_DHD_WLFC_BUF_SIZE); + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_DHD_WLFC_INFO, WLAN_DHD_WLFC_BUF_SIZE); + +#ifdef BCMDHD_PCIE + wlan_static_if_flow_lkup = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE, GFP_KERNEL); + if (!wlan_static_if_flow_lkup) + goto err_mem_alloc; +#endif /* BCMDHD_PCIE */ + + wlan_static_dhd_memdump_ram_buf = kmalloc(DHD_PREALLOC_MEMDUMP_RAM_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_memdump_ram_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_MEMDUMP_RAM, DHD_PREALLOC_MEMDUMP_RAM_SIZE); wlan_static_dhd_wlfc_hanger_buf = kmalloc(DHD_PREALLOC_DHD_WLFC_HANGER_SIZE, GFP_KERNEL); - if (!wlan_static_dhd_wlfc_hanger_buf) { - pr_err("Failed to alloc wlan_static_dhd_wlfc_hanger_buf\n"); + if (!wlan_static_dhd_wlfc_hanger_buf) goto err_mem_alloc; - } - printk("%s: sectoin %d, size=%d\n", __FUNCTION__, DHD_PREALLOC_DHD_WLFC_HANGER, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE); + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_DHD_WLFC_HANGER, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE); -#ifdef CONFIG_BCMDHD_PCIE - wlan_static_if_flow_lkup = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE, GFP_KERNEL); - if (!wlan_static_if_flow_lkup) { - pr_err("Failed to alloc wlan_static_if_flow_lkup\n"); + wlan_static_wl_escan_info_buf = kmalloc(DHD_PREALLOC_WL_ESCAN_INFO_SIZE, GFP_KERNEL); + if (!wlan_static_wl_escan_info_buf) goto err_mem_alloc; - } -#endif /* CONFIG_BCMDHD_PCIE */ + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_WL_ESCAN_INFO, DHD_PREALLOC_WL_ESCAN_INFO_SIZE); + + wlan_static_fw_verbose_ring_buf = kmalloc( + DHD_PREALLOC_WIPHY_ESCAN0_SIZE, + GFP_KERNEL); + if (!wlan_static_fw_verbose_ring_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_FW_VERBOSE_RING, DHD_PREALLOC_WL_ESCAN_INFO_SIZE); + + wlan_static_fw_event_ring_buf = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL); + if (!wlan_static_fw_event_ring_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_FW_EVENT_RING, DHD_PREALLOC_WL_ESCAN_INFO_SIZE); + + wlan_static_dhd_event_ring_buf = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL); + if (!wlan_static_dhd_event_ring_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_DHD_EVENT_RING, DHD_PREALLOC_WL_ESCAN_INFO_SIZE); + + wlan_static_nan_event_ring_buf = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL); + if (!wlan_static_nan_event_ring_buf) + goto err_mem_alloc; + pr_err("%s: sectoin %d, size=%d\n", __func__, + DHD_PREALLOC_NAN_EVENT_RING, DHD_PREALLOC_WL_ESCAN_INFO_SIZE); return 0; @@ -264,13 +379,22 @@ err_mem_alloc: if (wlan_static_prot) kfree(wlan_static_prot); -#if defined(CONFIG_BCMDHD_SDIO) +#if defined(BCMDHD_SDIO) if (wlan_static_rxbuf) kfree(wlan_static_rxbuf); if (wlan_static_databuf) kfree(wlan_static_databuf); -#endif +#endif /* BCMDHD_SDIO */ + + if (wlan_static_osl_buf) + kfree(wlan_static_osl_buf); + + if (wlan_static_scan_buf0) + kfree(wlan_static_scan_buf0); + + if (wlan_static_scan_buf1) + kfree(wlan_static_scan_buf1); if (wlan_static_dhd_info_buf) kfree(wlan_static_dhd_info_buf); @@ -278,31 +402,42 @@ err_mem_alloc: if (wlan_static_dhd_wlfc_info_buf) kfree(wlan_static_dhd_wlfc_info_buf); +#ifdef BCMDHD_PCIE + if (wlan_static_if_flow_lkup) + kfree(wlan_static_if_flow_lkup); +#endif /* BCMDHD_PCIE */ + + if (wlan_static_dhd_memdump_ram_buf) + kfree(wlan_static_dhd_memdump_ram_buf); + if (wlan_static_dhd_wlfc_hanger_buf) kfree(wlan_static_dhd_wlfc_hanger_buf); - if (wlan_static_scan_buf1) - kfree(wlan_static_scan_buf1); + if (wlan_static_wl_escan_info_buf) + kfree(wlan_static_wl_escan_info_buf); + +#ifdef BCMDHD_PCIE + if (wlan_static_fw_verbose_ring_buf) + kfree(wlan_static_fw_verbose_ring_buf); - if (wlan_static_scan_buf0) - kfree(wlan_static_scan_buf0); + if (wlan_static_fw_event_ring_buf) + kfree(wlan_static_fw_event_ring_buf); - if (wlan_static_osl_buf) - kfree(wlan_static_osl_buf); + if (wlan_static_dhd_event_ring_buf) + kfree(wlan_static_dhd_event_ring_buf); -#ifdef CONFIG_BCMDHD_PCIE - if (wlan_static_if_flow_lkup) - kfree(wlan_static_if_flow_lkup); -#endif - pr_err("Failed to mem_alloc for WLAN\n"); + if (wlan_static_nan_event_ring_buf) + kfree(wlan_static_nan_event_ring_buf); +#endif /* BCMDHD_PCIE */ + + pr_err("%s: Failed to mem_alloc for WLAN\n", __func__); i = WLAN_SKB_BUF_NUM; err_skb_alloc: - pr_err("Failed to skb_alloc for WLAN\n"); - for (j = 0; j < i; j++) { + pr_err("%s: Failed to skb_alloc for WLAN\n", __func__); + for (j = 0; j < i; j++) dev_kfree_skb(wlan_static_skb[j]); - } return -ENOMEM; } @@ -310,7 +445,7 @@ err_skb_alloc: static int __init dhd_static_buf_init(void) { - printk(KERN_ERR "%s()\n", __FUNCTION__); + printk(KERN_ERR "%s(): %s\n", __func__, DHD_STATIC_VERSION_STR); dhd_init_wlan_mem(); @@ -322,7 +457,7 @@ dhd_static_buf_exit(void) { int i; - printk(KERN_ERR "%s()\n", __FUNCTION__); + pr_err("%s()\n", __FUNCTION__); for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) { if (wlan_static_skb[i]) @@ -334,21 +469,21 @@ dhd_static_buf_exit(void) dev_kfree_skb(wlan_static_skb[i]); } -#if !defined(CONFIG_BCMDHD_PCIE) +#if defined(BCMDHD_SDIO) if (wlan_static_skb[i]) dev_kfree_skb(wlan_static_skb[i]); -#endif /* !CONFIG_BCMDHD_PCIE */ +#endif /* BCMDHD_SDIO */ if (wlan_static_prot) kfree(wlan_static_prot); -#if defined(CONFIG_BCMDHD_SDIO) +#if defined(BCMDHD_SDIO) if (wlan_static_rxbuf) kfree(wlan_static_rxbuf); if (wlan_static_databuf) kfree(wlan_static_databuf); -#endif +#endif /* BCMDHD_SDIO */ if (wlan_static_osl_buf) kfree(wlan_static_osl_buf); @@ -356,22 +491,43 @@ dhd_static_buf_exit(void) if (wlan_static_scan_buf0) kfree(wlan_static_scan_buf0); + if (wlan_static_scan_buf1) + kfree(wlan_static_scan_buf1); + if (wlan_static_dhd_info_buf) kfree(wlan_static_dhd_info_buf); if (wlan_static_dhd_wlfc_info_buf) kfree(wlan_static_dhd_wlfc_info_buf); +#ifdef BCMDHD_PCIE + if (wlan_static_if_flow_lkup) + kfree(wlan_static_if_flow_lkup); +#endif /* BCMDHD_PCIE */ + + if (wlan_static_dhd_memdump_ram_buf) + kfree(wlan_static_dhd_memdump_ram_buf); + if (wlan_static_dhd_wlfc_hanger_buf) kfree(wlan_static_dhd_wlfc_hanger_buf); - if (wlan_static_scan_buf1) - kfree(wlan_static_scan_buf1); + if (wlan_static_wl_escan_info_buf) + kfree(wlan_static_wl_escan_info_buf); + +#ifdef BCMDHD_PCIE + if (wlan_static_fw_verbose_ring_buf) + kfree(wlan_static_fw_verbose_ring_buf); -#ifdef CONFIG_BCMDHD_PCIE - if (wlan_static_if_flow_lkup) - kfree(wlan_static_if_flow_lkup); + if (wlan_static_fw_event_ring_buf) + kfree(wlan_static_fw_event_ring_buf); + + if (wlan_static_dhd_event_ring_buf) + kfree(wlan_static_dhd_event_ring_buf); + + if (wlan_static_nan_event_ring_buf) + kfree(wlan_static_nan_event_ring_buf); #endif + return; } diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_wlfc.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_wlfc.c index b790c8d15fde..678dbc387f6d 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_wlfc.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_wlfc.c @@ -1,7 +1,7 @@ /* * DHD PROP_TXSTATUS Module. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: dhd_wlfc.c 579277 2015-08-14 04:49:50Z $ + * $Id: dhd_wlfc.c 679733 2017-01-17 06:40:39Z $ * */ @@ -72,8 +72,6 @@ #define DHD_WLFC_QMON_COMPLETE(entry) -#define LIMIT_BORROW - /** reordering related */ @@ -126,8 +124,7 @@ _dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead, return; ASSERT(prec >= 0 && prec < pq->num_prec); - /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */ - ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p))); + ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ ASSERT(!pktq_full(pq)); ASSERT(!pktq_pfull(pq, prec)); @@ -934,7 +931,7 @@ _dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint dhdp = (dhd_pub_t *)ctx->dhdp; ASSERT(dhdp); - if (dhdp->skip_fc && dhdp->skip_fc()) + if (dhdp->skip_fc && dhdp->skip_fc((void *)dhdp, if_id)) return; if ((ctx->hostif_flow_state[if_id] == OFF) && !_dhd_wlfc_allow_fc(ctx, if_id)) @@ -1227,10 +1224,6 @@ _dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx, h->items[hslot].pkt_txstatus = 0; h->items[hslot].state = WLFC_HANGER_ITEM_STATE_INUSE; } - } else if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { - /* clear hanger state */ - ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt_state = 0; - ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt_txstatus = 0; } if ((rc == BCME_OK) && header_needed) { @@ -1251,12 +1244,14 @@ static int _dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, int prec) { + wlfc_mac_descriptor_t* interfaces = ctx->destination_entries.interfaces; + if (entry->interface_id >= WLFC_MAX_IFNUM) { ASSERT(&ctx->destination_entries.other == entry); return 1; } - if (ctx->destination_entries.interfaces[entry->interface_id].iftype == + if (interfaces[entry->interface_id].iftype == WLC_E_IF_ROLE_P2P_GO) { /* - destination interface is of type p2p GO. For a p2pGO interface, if the destination is OPEN but the interface is @@ -1271,7 +1266,9 @@ _dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx, } /* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */ - if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) && + if ((((entry->state == WLFC_STATE_CLOSE) || + (interfaces[entry->interface_id].state == WLFC_STATE_CLOSE)) && + (entry->requested_credit == 0) && (entry->requested_packet == 0)) || (!(entry->ac_bitmap & (1 << prec)))) { return 0; @@ -1830,6 +1827,7 @@ _dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp); pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid); if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN); @@ -2219,7 +2217,7 @@ _dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ); seq = ltoh16(seq); - seq_fromfw = WL_SEQ_GET_FROMFW(seq); + seq_fromfw = GET_WL_HAS_ASSIGNED_SEQ(seq); seq_num = WL_SEQ_GET_NUM(seq); } @@ -2353,7 +2351,7 @@ _dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, WL_TXSTATUS_SET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)), gen); if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { - WL_SEQ_SET_FROMDRV(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_fromfw); + WL_SEQ_SET_REUSE(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_fromfw); WL_SEQ_SET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_num); } @@ -2502,15 +2500,18 @@ _dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) PKTSETLINK(pkt, NULL); entry = _dhd_wlfc_find_table_entry(wlfc, pkt); - if (entry) { - if (entry->onbus_pkts_count > 0) - entry->onbus_pkts_count--; - if (entry->suppressed && - (!entry->onbus_pkts_count) && - (!entry->suppr_transit_count)) - entry->suppressed = FALSE; + if (!entry) { + PKTFREE(dhd->osh, pkt, TRUE); + continue; + } + if (entry->onbus_pkts_count > 0) { + entry->onbus_pkts_count--; + } + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) { + entry->suppressed = FALSE; } - /* fake a suppression txstatus */ htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt)); WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS); @@ -2519,9 +2520,9 @@ _dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS); if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt)); - if (WL_SEQ_GET_FROMDRV(htodseq)) { - WL_SEQ_SET_FROMFW(htodseq, 1); - WL_SEQ_SET_FROMDRV(htodseq, 0); + if (IS_WL_TO_REUSE_SEQ(htodseq)) { + SET_WL_HAS_ASSIGNED_SEQ(htodseq); + RESET_WL_TO_REUSE_SEQ(htodseq); } htodseq = htol16(htodseq); memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq, @@ -3088,7 +3089,7 @@ dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar dhd_os_wlfc_unblock(dhd); return BCME_OK; -} /* dhd_wlfc_parse_header_info */ +} KERNEL_THREAD_RETURN_TYPE dhd_wlfc_transfer_packets(void *data) @@ -3099,6 +3100,7 @@ dhd_wlfc_transfer_packets(void *data) athost_wl_status_info_t* ctx; int bus_retry_count = 0; int pkt_send = 0; + int pkt_send_per_ac = 0; uint8 tx_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */ uint8 rx_map = 0; /* received packets, Bitmask for 4 ACs + BC/MC */ @@ -3166,7 +3168,9 @@ dhd_wlfc_transfer_packets(void *data) tx_map |= (1 << ac); single_ac = ac + 1; - while (FALSE == dhdp->proptxstatus_txoff) { + pkt_send_per_ac = 0; + while ((FALSE == dhdp->proptxstatus_txoff) && + (pkt_send_per_ac < WLFC_PACKET_BOUND)) { /* packets from delayQ with less priority are fresh and * they'd need header and have no MAC entry */ @@ -3178,7 +3182,8 @@ dhd_wlfc_transfer_packets(void *data) lender = -1; #ifdef LIMIT_BORROW - if (no_credit && (ac < AC_COUNT) && (tx_map >= rx_map)) { + if (no_credit && (ac < AC_COUNT) && (tx_map >= rx_map) && + dhdp->wlfc_borrow_allowed) { /* try borrow from lower priority */ lender = _dhd_wlfc_borrow_credit(ctx, ac - 1, ac, FALSE); if (lender != -1) { @@ -3198,7 +3203,7 @@ dhd_wlfc_transfer_packets(void *data) if (commit_info.p == NULL) { #ifdef LIMIT_BORROW - if (lender != -1) { + if (lender != -1 && dhdp->wlfc_borrow_allowed) { _dhd_wlfc_return_credit(ctx, lender, ac); } #endif @@ -3215,17 +3220,19 @@ dhd_wlfc_transfer_packets(void *data) /* Bus commits may fail (e.g. flow control); abort after retries */ if (rc == BCME_OK) { pkt_send++; + pkt_send_per_ac++; if (commit_info.ac_fifo_credit_spent && (lender == -1)) { ctx->FIFO_credit[ac]--; } #ifdef LIMIT_BORROW - else if (!commit_info.ac_fifo_credit_spent && (lender != -1)) { + else if (!commit_info.ac_fifo_credit_spent && (lender != -1) && + dhdp->wlfc_borrow_allowed) { _dhd_wlfc_return_credit(ctx, lender, ac); } #endif } else { #ifdef LIMIT_BORROW - if (lender != -1) { + if (lender != -1 && dhdp->wlfc_borrow_allowed) { _dhd_wlfc_return_credit(ctx, lender, ac); } #endif @@ -3294,9 +3301,13 @@ dhd_wlfc_transfer_packets(void *data) /* At this point, borrow all credits only for ac */ while (FALSE == dhdp->proptxstatus_txoff) { #ifdef LIMIT_BORROW - if ((lender = _dhd_wlfc_borrow_credit(ctx, AC_COUNT, ac, TRUE)) == -1) { - break; + if (dhdp->wlfc_borrow_allowed) { + if ((lender = _dhd_wlfc_borrow_credit(ctx, AC_COUNT, ac, TRUE)) == -1) { + break; + } } + else + break; #endif commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, &(commit_info.ac_fifo_credit_spent), @@ -3604,7 +3615,9 @@ dhd_wlfc_init(dhd_pub_t *dhd) } DHD_ERROR(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret)); - +#ifdef LIMIT_BORROW + dhd->wlfc_borrow_allowed = TRUE; +#endif dhd_os_wlfc_unblock(dhd); if (dhd->plat_init) @@ -3824,6 +3837,20 @@ int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data) return rc; } +#ifdef LIMIT_BORROW +int dhd_wlfc_disable_credit_borrow_event(dhd_pub_t *dhdp, uint8* event_data) +{ + if (dhdp == NULL || event_data == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + dhd_os_wlfc_block(dhdp); + dhdp->wlfc_borrow_allowed = (bool)(*(uint32 *)event_data); + dhd_os_wlfc_unblock(dhdp); + + return BCME_OK; +} +#endif /* LIMIT_BORROW */ /** * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast @@ -4312,10 +4339,6 @@ int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock) dhd_os_wlfc_unblock(dhdp); } -#if defined(DHD_WLFC_THREAD) - _dhd_wlfc_thread_wakeup(dhd); -#endif /* defined(DHD_WLFC_THREAD) */ - return BCME_OK; } @@ -4409,6 +4432,11 @@ int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val) __FUNCTION__, tlv)); } } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* defined(DHD_WLFC_THREAD) */ + return BCME_OK; } diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_wlfc.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_wlfc.h index a6fd465e35fd..1e8b01f97a44 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_wlfc.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_wlfc.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -22,7 +22,7 @@ * * <> * - * $Id: dhd_wlfc.h 557035 2015-05-15 18:48:57Z $ + * $Id: dhd_wlfc.h 671530 2016-11-22 08:43:33Z $ * */ #ifndef __wlfc_host_driver_definitions_h__ @@ -109,9 +109,9 @@ typedef struct wlfc_hanger { #define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /**< 2 for each AC traffic and bc/mc */ #define WLFC_AFQ_PREC_COUNT (AC_COUNT + 1) -#define WLFC_PSQ_LEN 2048 +#define WLFC_PSQ_LEN (4096 * 8) -#define WLFC_FLOWCONTROL_HIWATER (2048 - 256) +#define WLFC_FLOWCONTROL_HIWATER ((4096 * 8) - 256) #define WLFC_FLOWCONTROL_LOWATER 256 #if (WLFC_FLOWCONTROL_HIWATER >= (WLFC_PSQ_LEN - 256)) @@ -247,7 +247,7 @@ typedef struct athost_wl_stat_counters { #define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0) #define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0) #endif - +#define WLFC_PACKET_BOUND 10 #define WLFC_FCMODE_NONE 0 #define WLFC_FCMODE_IMPLIED_CREDIT 1 #define WLFC_FCMODE_EXPLICIT_CREDIT 2 @@ -373,7 +373,7 @@ typedef struct dhd_pkttag { /** This 32-bit goes from host to device for every packet. */ uint32 htod_tag; - /** This 16-bit is original seq number for every suppress packet. */ + /** This 16-bit is original d11seq number for every suppressed packet. */ uint16 htod_seq; /** This address is mac entry for every packet. */ @@ -529,6 +529,9 @@ int dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void* arg); int dhd_wlfc_deinit(dhd_pub_t *dhd); int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea); int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data); +#ifdef LIMIT_BORROW +int dhd_wlfc_disable_credit_borrow_event(dhd_pub_t *dhdp, uint8* event_data); +#endif /* LIMIT_BORROW */ int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp); int dhd_wlfc_enable(dhd_pub_t *dhdp); int dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dngl_stats.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dngl_stats.h index 66e4f4528f7d..b995ec5b0ed4 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dngl_stats.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dngl_stats.h @@ -2,7 +2,7 @@ * Common stats definitions for clients of dongle * ports * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,14 +25,14 @@ * * <> * - * $Id: dngl_stats.h 523030 2014-12-25 17:28:07Z $ + * $Id: dngl_stats.h 681171 2017-01-25 05:27:08Z $ */ #ifndef _dngl_stats_h_ #define _dngl_stats_h_ -#include -#include +#include +#include <802.11.h> typedef struct { unsigned long rx_packets; /* total packets received */ @@ -98,7 +98,7 @@ typedef enum { * element UTF-8 SSID bit is set */ #define WIFI_CAPABILITY_COUNTRY 0x00000020 /* set is 802.11 Country Element is present */ - +#define PACK_ATTRIBUTE __attribute__ ((packed)) typedef struct { wifi_interface_mode mode; /* interface mode */ uint8 mac_addr[6]; /* interface mac address (self) */ @@ -134,6 +134,18 @@ typedef struct { uint32 bitrate; /* units of 100 Kbps */ } wifi_rate; +typedef struct { + uint32 preamble :3; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */ + uint32 nss :2; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */ + uint32 bw :3; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */ + uint32 rateMcsIdx :8; /* OFDM/CCK rate code would be as per ieee std + * in the units of 0.5mbps HT/VHT it would be + * mcs index + */ + uint32 reserved :16; /* reserved */ + uint32 bitrate; /* units of 100 Kbps */ +} wifi_rate_v1; + /* channel statistics */ typedef struct { wifi_channel_info channel; /* channel */ @@ -183,19 +195,41 @@ typedef struct { wifi_channel_stat channels[1]; /* channel statistics */ } wifi_radio_stat; +typedef struct { + wifi_radio radio; + uint32 on_time; + uint32 tx_time; + uint32 rx_time; + uint32 on_time_scan; + uint32 on_time_nbd; + uint32 on_time_gscan; + uint32 on_time_roam_scan; + uint32 on_time_pno_scan; + uint32 on_time_hs20; + uint32 num_channels; +} wifi_radio_stat_h; + /* per rate statistics */ typedef struct { - struct { - uint16 version; - uint16 length; - }; + wifi_rate_v1 rate; /* rate information */ uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */ uint32 rx_mpdu; /* number of received data pkts */ uint32 mpdu_lost; /* number of data packet losses (no ACK) */ uint32 retries; /* total number of data pkt retries */ uint32 retries_short; /* number of short data pkt retries */ uint32 retries_long; /* number of long data pkt retries */ - wifi_rate rate; /* rate information */ +} wifi_rate_stat_v1; + +typedef struct { + uint16 version; + uint16 length; + uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */ + uint32 rx_mpdu; /* number of received data pkts */ + uint32 mpdu_lost; /* number of data packet losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ + wifi_rate rate; } wifi_rate_stat; /* access categories */ @@ -261,9 +295,28 @@ typedef struct { uint32 beacon_rx; /* access point beacon received count from * connected AP */ + uint64 average_tsf_offset; /* average beacon offset encountered (beacon_TSF - TBTT) + * The average_tsf_offset field is used so as to calculate + * the typical beacon contention time on the channel as well + * may be used to debug beacon synchronization and related + * power consumption issue + */ + uint32 leaky_ap_detected; /* indicate that this AP + * typically leaks packets beyond + * the driver guard time. + */ + uint32 leaky_ap_avg_num_frames_leaked; /* average number of frame leaked by AP after + * frame with PM bit set was ACK'ed by AP + */ + uint32 leaky_ap_guard_time; /* guard time currently in force + * (when implementing IEEE power management + * based on frame control PM bit), How long + * driver waits before shutting down the radio and after + * receiving an ACK for a data frame with PM bit set) + */ uint32 mgmt_rx; /* access point mgmt frames received count from - * connected AP (including Beacon) - */ + * connected AP (including Beacon) + */ uint32 mgmt_action_rx; /* action frames received count */ uint32 mgmt_action_tx; /* action frames transmit count */ wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI @@ -280,4 +333,51 @@ typedef struct { wifi_peer_info peer_info[1]; /* per peer statistics */ } wifi_iface_stat; +#ifdef CONFIG_COMPAT +/* interface statistics */ +typedef struct { + compat_uptr_t iface; /* wifi interface */ + wifi_interface_info info; /* current state of the interface */ + uint32 beacon_rx; /* access point beacon received count from + * connected AP + */ + uint64 average_tsf_offset; /* average beacon offset encountered (beacon_TSF - TBTT) + * The average_tsf_offset field is used so as to calculate + * the typical beacon contention time on the channel as well + * may be used to debug beacon synchronization and related + * power consumption issue + */ + uint32 leaky_ap_detected; /* indicate that this AP + * typically leaks packets beyond + * the driver guard time. + */ + uint32 leaky_ap_avg_num_frames_leaked; /* average number of frame leaked by AP after + * frame with PM bit set was ACK'ed by AP + */ + uint32 leaky_ap_guard_time; /* guard time currently in force + * (when implementing IEEE power management + * based on frame control PM bit), How long + * driver waits before shutting down the radio and after + * receiving an ACK for a data frame with PM bit set) + */ + uint32 mgmt_rx; /* access point mgmt frames received count from + * connected AP (including Beacon) + */ + uint32 mgmt_action_rx; /* action frames received count */ + uint32 mgmt_action_tx; /* action frames transmit count */ + wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI + * (averaged) + */ + wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from + * connected AP + */ + wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from + * connected AP + */ + wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */ + uint32 num_peers; /* number of peers */ + wifi_peer_info peer_info[1]; /* per peer statistics */ +} compat_wifi_iface_stat; +#endif /* CONFIG_COMPAT */ + #endif /* _dngl_stats_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dngl_wlhdr.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dngl_wlhdr.h index 93e0b5a5b69d..96da42e5f570 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dngl_wlhdr.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dngl_wlhdr.h @@ -1,7 +1,7 @@ /* * Dongle WL Header definitions * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/hnd_pktpool.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/hnd_pktpool.c index f3555e40ce91..0c5c3a8a3525 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/hnd_pktpool.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/hnd_pktpool.c @@ -1,7 +1,7 @@ /* * HND generic packet pool operation primitives * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: hnd_pktpool.c 591285 2015-10-07 11:56:29Z $ + * $Id: hnd_pktpool.c 613891 2016-01-20 10:05:44Z $ */ #include @@ -33,6 +33,7 @@ #include #include + /* mutex macros for thread safe */ #ifdef HND_PKTPOOL_THREAD_SAFE #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) @@ -84,7 +85,7 @@ static int pktpool_register(pktpool_t * poolptr); static int pktpool_deregister(pktpool_t * poolptr); /** add declaration */ -static int pktpool_avail_notify(pktpool_t *pktp); +static void pktpool_avail_notify(pktpool_t *pktp); /** accessor functions required when ROMming this file, forced into RAM */ @@ -107,10 +108,12 @@ BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp) return pktpools_registry[id] == pp; } -int /* Construct a pool registry to serve a maximum of total_pools */ +/** Constructs a pool registry to serve a maximum of total_pools */ +int pktpool_attach(osl_t *osh, uint32 total_pools) { uint32 poolid; + BCM_REFERENCE(osh); if (pktpools_max != 0U) { return BCME_ERROR; @@ -130,10 +133,12 @@ pktpool_attach(osl_t *osh, uint32 total_pools) return (int)pktpools_max; } -int /* Destruct the pool registry. Ascertain all pools were first de-inited */ +/** Destructs the pool registry. Ascertain all pools were first de-inited */ +int pktpool_dettach(osl_t *osh) { uint32 poolid; + BCM_REFERENCE(osh); if (pktpools_max == 0U) { return BCME_OK; @@ -151,7 +156,8 @@ pktpool_dettach(osl_t *osh) return BCME_OK; } -static int /* Register a pool in a free slot; return the registry slot index */ +/** Registers a pool in a free slot; returns the registry slot index */ +static int pktpool_register(pktpool_t * poolptr) { uint32 poolid; @@ -173,7 +179,8 @@ pktpool_register(pktpool_t * poolptr) return PKTPOOL_INVALID_ID; /* error: registry is full */ } -static int /* Deregister a pktpool, given the pool pointer; tag slot as free */ +/** Deregisters a pktpool, given the pool pointer; tag slot as free */ +static int pktpool_deregister(pktpool_t * poolptr) { uint32 poolid; @@ -194,12 +201,10 @@ pktpool_deregister(pktpool_t * poolptr) return BCME_OK; } - -/* +/** * pktpool_init: - * User provides a pktpool_t sturcture and specifies the number of packets to - * be pre-filled into the pool (pplen). The size of all packets in a pool must - * be the same and is specified by plen. + * User provides a pktpool_t structure and specifies the number of packets to + * be pre-filled into the pool (pplen). * pktpool_init first attempts to register the pool and fetch a unique poolid. * If registration fails, it is considered an BCME_ERR, caused by either the * registry was not pre-created (pktpool_attach) or the registry is full. @@ -210,6 +215,10 @@ pktpool_deregister(pktpool_t * poolptr) * In dongle builds, prior to memory reclaimation, one should limit the number * of packets to be allocated during pktpool_init and fill the pool up after * reclaim stage. + * + * @param pplen Number of packets to be pre-filled into the pool + * @param plen The size of all packets in a pool must be the same, [bytes] units. E.g. PKTBUFSZ. + * @param type e.g. 'lbuf_frag' */ int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 type) @@ -274,11 +283,11 @@ pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 exit: pktp->len = pktp->avail; - *pplen = pktp->len; + *pplen = pktp->len; /* number of packets managed by pool */ return err; -} +} /* pktpool_init */ -/* +/** * pktpool_deinit: * Prior to freeing a pktpool, all packets must be first freed into the pktpool. * Upon pktpool_deinit, all packets in the free pool will be freed to the heap. @@ -391,6 +400,8 @@ pktpool_deq(pktpool_t *pktp) p = pktp->freelist; /* dequeue packet from head of pktpool free list */ pktp->freelist = PKTFREELIST(p); /* free list points to next packet */ + + PKTSETFREELIST(p, NULL); pktp->avail--; @@ -406,11 +417,12 @@ pktpool_enq(pktpool_t *pktp, void *p) PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */ pktp->freelist = p; /* free list points to newly inserted packet */ + pktp->avail++; ASSERT(pktp->avail <= pktp->len); } -/* utility for registering host addr fill function called from pciedev */ +/** utility for registering host addr fill function called from pciedev */ int /* BCMATTACHFN */ (pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) @@ -437,8 +449,8 @@ pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) pktp->rxcplidfn.arg = arg; return 0; } -/* Callback functions for split rx modes */ -/* when evr host posts rxbuffer, invike dma_rxfill from pciedev layer */ + +/** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */ void pktpool_invoke_dmarxfill(pktpool_t *pktp) { @@ -448,6 +460,8 @@ pktpool_invoke_dmarxfill(pktpool_t *pktp) if (pktp->dmarxfill.cb) pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg); } + +/** Registers callback functions for split rx mode */ int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg) { @@ -459,7 +473,11 @@ pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg) return 0; } -/* No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function */ + +/** + * Registers callback functions. + * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function + */ int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) { @@ -491,6 +509,7 @@ done: return err; } +/** Registers callback functions */ int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) { @@ -522,6 +541,7 @@ done: return err; } +/** Calls registered callback functions */ static int pktpool_empty_notify(pktpool_t *pktp) { @@ -687,6 +707,7 @@ done: } int pktpool_stop_trigger(pktpool_t *pktp, void *p); + int pktpool_stop_trigger(pktpool_t *pktp, void *p) { @@ -731,6 +752,7 @@ done: int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp) { + BCM_REFERENCE(osh); ASSERT(pktp); /* protect shared resource */ @@ -751,6 +773,7 @@ pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb) { int i; int err; + BCM_REFERENCE(osh); ASSERT(pktp); @@ -778,7 +801,7 @@ pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb) return err; } -static int +static void pktpool_avail_notify(pktpool_t *pktp) { int i, k, idx; @@ -787,7 +810,7 @@ pktpool_avail_notify(pktpool_t *pktp) ASSERT(pktp); if (pktp->availcb_excl != NULL) { pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg); - return 0; + return; } k = pktp->cbcnt - 1; @@ -809,9 +832,10 @@ pktpool_avail_notify(pktpool_t *pktp) */ pktp->cbtoggle ^= 1; - return 0; + return; } +/** Gets an empty packet from the caller provided pool */ void * pktpool_get(pktpool_t *pktp) { @@ -887,6 +911,7 @@ pktpool_free(pktpool_t *pktp, void *p) return; } +/** Adds a caller provided (empty) packet to the caller provided pool */ int pktpool_add(pktpool_t *pktp, void *p) { @@ -922,7 +947,8 @@ done: return err; } -/* Force pktpool_setmaxlen () into RAM as it uses a constant +/** + * Force pktpool_setmaxlen () into RAM as it uses a constant * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips. */ int @@ -989,21 +1015,27 @@ pktpool_t *pktpool_shared_rxlfrag = NULL; static osl_t *pktpool_osh = NULL; -void +/** + * Initializes several packet pools and allocates packets within those pools. + */ +int hnd_pktpool_init(osl_t *osh) { + int err; int n; /* Construct a packet pool registry before initializing packet pools */ n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID); if (n != PKTPOOL_MAXIMUM_ID) { ASSERT(0); - return; + err = BCME_ERROR; + goto error0; } pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t)); if (pktpool_shared == NULL) { ASSERT(0); + err = BCME_NOMEM; goto error1; } @@ -1011,6 +1043,7 @@ hnd_pktpool_init(osl_t *osh) pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t)); if (pktpool_shared_lfrag == NULL) { ASSERT(0); + err = BCME_NOMEM; goto error2; } #endif @@ -1019,6 +1052,7 @@ hnd_pktpool_init(osl_t *osh) pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t)); if (pktpool_shared_rxlfrag == NULL) { ASSERT(0); + err = BCME_NOMEM; goto error3; } #endif @@ -1038,8 +1072,8 @@ hnd_pktpool_init(osl_t *osh) * were not filled into the pool. */ n = 1; - if (pktpool_init(osh, pktpool_shared, - &n, PKTBUFSZ, FALSE, lbuf_basic) == BCME_ERROR) { + if ((err = pktpool_init(osh, pktpool_shared, + &n, PKTBUFSZ, FALSE, lbuf_basic)) != BCME_OK) { ASSERT(0); goto error4; } @@ -1047,8 +1081,8 @@ hnd_pktpool_init(osl_t *osh) #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) n = 1; - if (pktpool_init(osh, pktpool_shared_lfrag, - &n, PKTFRAGSZ, TRUE, lbuf_frag) == BCME_ERROR) { + if ((err = pktpool_init(osh, pktpool_shared_lfrag, + &n, PKTFRAGSZ, TRUE, lbuf_frag)) != BCME_OK) { ASSERT(0); goto error5; } @@ -1056,8 +1090,8 @@ hnd_pktpool_init(osl_t *osh) #endif #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) n = 1; - if (pktpool_init(osh, pktpool_shared_rxlfrag, - &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag) == BCME_ERROR) { + if ((err = pktpool_init(osh, pktpool_shared_rxlfrag, + &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag)) != BCME_OK) { ASSERT(0); goto error6; } @@ -1066,7 +1100,7 @@ hnd_pktpool_init(osl_t *osh) pktpool_osh = osh; - return; + return BCME_OK; #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) error6: @@ -1100,15 +1134,17 @@ error2: error1: pktpool_dettach(osh); -} +error0: + return err; +} /* hnd_pktpool_init */ -void +int hnd_pktpool_fill(pktpool_t *pktpool, bool minimal) { - pktpool_fill(pktpool_osh, pktpool, minimal); + return (pktpool_fill(pktpool_osh, pktpool, minimal)); } -/* refill pktpools after reclaim */ +/** refills pktpools after reclaim */ void hnd_pktpool_refill(bool minimal) { @@ -1118,6 +1154,19 @@ hnd_pktpool_refill(bool minimal) /* fragpool reclaim */ #ifdef BCMFRAGPOOL if (POOL_ENAB(pktpool_shared_lfrag)) { +#if defined(SRMEM) + if (SRMEM_ENAB()) { + int maxlen = pktpool_maxlen(pktpool_shared); + int len = pktpool_len(pktpool_shared); + + for (; len < maxlen; len++) { + void *p; + if ((p = PKTSRGET(pktpool_plen(pktpool_shared))) == NULL) + break; + pktpool_add(pktpool_shared, p); + } + } +#endif /* SRMEM */ pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal); } #endif /* BCMFRAGPOOL */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/hnd_pktq.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/hnd_pktq.c index 71de6af41098..132b32135de0 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/hnd_pktq.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/hnd_pktq.c @@ -1,7 +1,7 @@ /* * HND generic pktq operation primitives * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: hnd_pktq.c 605726 2015-12-11 07:08:16Z $ + * $Id: hnd_pktq.c 644628 2016-06-21 06:25:58Z $ */ #include @@ -44,7 +44,7 @@ #define HND_PKTQ_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS #define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS #define HND_PKTQ_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS -#endif +#endif /* */ /* * osl multiple-precedence packet queue @@ -60,8 +60,7 @@ pktq_penq(struct pktq *pq, int prec, void *p) return NULL; ASSERT(prec >= 0 && prec < pq->num_prec); - /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */ - ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p))); + ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ ASSERT(!pktq_full(pq)); ASSERT(!pktq_pfull(pq, prec)); @@ -98,8 +97,7 @@ pktq_penq_head(struct pktq *pq, int prec, void *p) return NULL; ASSERT(prec >= 0 && prec < pq->num_prec); - /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */ - ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p))); + ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ ASSERT(!pktq_full(pq)); ASSERT(!pktq_pfull(pq, prec)); @@ -389,48 +387,6 @@ done: return p; } -void -pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg) -{ - struct pktq_prec *q; - void *p, *next, *prev = NULL; - - /* protect shared resource */ - if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) - return; - - q = &pq->q[prec]; - p = q->head; - while (p) { - next = PKTLINK(p); - if (fn == NULL || (*fn)(p, arg)) { - bool head = (p == q->head); - if (head) - q->head = next; - else - PKTSETLINK(prev, next); - PKTSETLINK(p, NULL); - PKTFREE(osh, p, dir); - q->len--; - pq->len--; - } else { - prev = p; - } - p = next; - } - - q->tail = prev; - - if (q->head == NULL) { - ASSERT(q->len == 0); - ASSERT(q->tail == NULL); - } - - /* protect shared resource */ - if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) - return; -} - bool BCMFASTPATH pktq_pdel(struct pktq *pq, void *pktbuf, int prec) { @@ -477,6 +433,110 @@ done: return ret; } +static void +_pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx) +{ + struct pktq_prec wq; + struct pktq_prec *q; + void *p; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + /* move the prec queue aside to a work queue */ + q = &pq->q[prec]; + + wq = *q; + + q->head = NULL; + q->tail = NULL; + q->len = 0; + + pq->len -= wq.len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; + + /* start with the head of the work queue */ + while ((p = wq.head) != NULL) { + /* unlink the current packet from the list */ + wq.head = PKTLINK(p); + PKTSETLINK(p, NULL); + wq.len--; + + /* call the filter function on current packet */ + ASSERT(fltr != NULL); + switch ((*fltr)(fltr_ctx, p)) { + case PKT_FILTER_NOACTION: + /* put this packet back */ + pktq_penq(pq, prec, p); + break; + + case PKT_FILTER_DELETE: + /* delete this packet */ + ASSERT(defer != NULL); + (*defer)(defer_ctx, p); + break; + + case PKT_FILTER_REMOVE: + /* pkt already removed from list */ + break; + + default: + ASSERT(0); + break; + } + } + + ASSERT(wq.len == 0); +} + +void +pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx) +{ + _pktq_pfilter(pq, prec, fltr, fltr_ctx, defer, defer_ctx); + + ASSERT(flush != NULL); + (*flush)(flush_ctx); +} + +void +pktq_filter(struct pktq *pq, pktq_filter_t fltr, void* fltr_ctx, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx) +{ + bool filter = FALSE; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + /* Optimize if pktq len = 0, just return. + * pktq len of 0 means pktq's prec q's are all empty. + */ + if (pq->len > 0) { + filter = TRUE; + } + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; + + if (filter) { + int prec; + + PKTQ_PREC_ITER(pq, prec) { + _pktq_pfilter(pq, prec, fltr, fltr_ctx, defer, defer_ctx); + } + + ASSERT(flush != NULL); + (*flush)(flush_ctx); + } +} + bool pktq_init(struct pktq *pq, int num_prec, int max_len) { @@ -503,6 +563,7 @@ pktq_init(struct pktq *pq, int num_prec, int max_len) bool pktq_deinit(struct pktq *pq) { + BCM_REFERENCE(pq); if (HND_PKTQ_MUTEX_DELETE(&pq->mutex) != OSL_EXT_SUCCESS) return FALSE; @@ -677,9 +738,24 @@ done: } void -pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg) +pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir) { - int prec; + void *p; + + /* no need for a mutex protection! */ + + /* start with the head of the list */ + while ((p = pktq_pdeq(pq, prec)) != NULL) { + + /* delete this packet */ + PKTFREE(osh, p, dir); + } +} + +void +pktq_flush(osl_t *osh, struct pktq *pq, bool dir) +{ + bool flush = FALSE; /* protect shared resource */ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) @@ -688,18 +764,21 @@ pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg) /* Optimize flush, if pktq len = 0, just return. * pktq len of 0 means pktq's prec q's are all empty. */ - if (pq->len == 0) - goto done; + if (pq->len > 0) { + flush = TRUE; + } - for (prec = 0; prec < pq->num_prec; prec++) - pktq_pflush(osh, pq, prec, dir, fn, arg); - if (fn == NULL) - ASSERT(pq->len == 0); - -done: /* protect shared resource */ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) return; + + if (flush) { + int prec; + + PKTQ_PREC_ITER(pq, prec) { + pktq_pflush(osh, pq, prec, dir); + } + } } /* Return sum of lengths of a specific set of precedences */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/hndpmu.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/hndpmu.c index c0c658203dda..c76a943dab38 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/hndpmu.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/hndpmu.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing PMU corerev specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: hndpmu.c 530092 2015-01-29 04:44:58Z $ + * $Id: hndpmu.c 657872 2016-09-02 22:17:34Z $ */ @@ -49,6 +49,14 @@ #include #include #include +#if defined(BCMULP) +#include +#endif /* defined(BCMULP) */ +#include +#ifdef EVENT_LOG_COMPILE +#include +#endif +#include #define PMU_ERROR(args) @@ -95,6 +103,7 @@ static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = { {2, 0x1}, {0, 0x0} }; + /* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */ static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = { {32, 0x7}, @@ -192,7 +201,8 @@ si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength) if (!(sih->cccaps & CC_CAP_PMU)) { return; } - + BCM_REFERENCE(sdiod_drive_strength_tab1); + BCM_REFERENCE(sdiod_drive_strength_tab2); /* Remember original core before switch to chipc/pmu */ origidx = si_coreidx(sih); if (AOB_ENAB(sih)) { @@ -202,25 +212,12 @@ si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength) } ASSERT(pmu != NULL); - switch (SDIOD_DRVSTR_KEY(CHIPID(sih->chip), sih->pmurev)) { - case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1): - str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1; - str_mask = 0x30000000; - str_shift = 28; - break; - case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2): - case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3): - case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4): - str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2; - str_mask = 0x00003800; - str_shift = 11; - break; + switch (SDIOD_DRVSTR_KEY(CHIPID(sih->chip), PMUREV(sih->pmurev))) { case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8): case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11): - if (sih->pmurev == 8) { + if (PMUREV(sih->pmurev) == 8) { str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3; - } - else if (sih->pmurev == 11) { + } else if (PMUREV(sih->pmurev) == 11) { str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8; } str_mask = 0x00003800; @@ -257,7 +254,7 @@ si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength) default: PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", bcm_chipname( - CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev), sih->pmurev)); + CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev), PMUREV(sih->pmurev))); break; } @@ -290,3 +287,96 @@ si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength) /* Return to original core */ si_setcoreidx(sih, origidx); } /* si_sdiod_drive_strength_init */ + + +#if defined(BCMULP) +int +si_pmu_ulp_register(si_t *sih) +{ + return ulp_p1_module_register(ULP_MODULE_ID_PMU, &ulp_pmu_ctx, (void *)sih); +} + +static uint +si_pmu_ulp_get_retention_size_cb(void *handle, ulp_ext_info_t *einfo) +{ + ULP_DBG(("%s: sz: %d\n", __FUNCTION__, sizeof(si_pmu_ulp_cr_dat_t))); + return sizeof(si_pmu_ulp_cr_dat_t); +} + +static int +si_pmu_ulp_enter_cb(void *handle, ulp_ext_info_t *einfo, uint8 *cache_data) +{ + si_pmu_ulp_cr_dat_t crinfo = {0}; + crinfo.ilpcycles_per_sec = ilpcycles_per_sec; + ULP_DBG(("%s: ilpcycles_per_sec: %x\n", __FUNCTION__, ilpcycles_per_sec)); + memcpy(cache_data, (void*)&crinfo, sizeof(crinfo)); + return BCME_OK; +} + +static int +si_pmu_ulp_exit_cb(void *handle, uint8 *cache_data, + uint8 *p2_cache_data) +{ + si_pmu_ulp_cr_dat_t *crinfo = (si_pmu_ulp_cr_dat_t *)cache_data; + + ilpcycles_per_sec = crinfo->ilpcycles_per_sec; + ULP_DBG(("%s: ilpcycles_per_sec: %x, cache_data: %p\n", __FUNCTION__, + ilpcycles_per_sec, cache_data)); + return BCME_OK; +} + +void +si_pmu_ulp_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period) +{ + pmuregs_t *pmu; + pmu = si_setcoreidx(sih, si_findcoreidx(sih, PMU_CORE_ID, 0)); + W_REG(osh, &pmu->ILPPeriod, ilp_period); +} +#endif /* defined(BCMULP) */ + + + +void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask) +{ + pmuregs_t *pmu; + uint origidx; + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } + else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + W_REG(osh, &pmu->min_res_mask, min_res_mask); + OSL_DELAY(100); + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} + +bool +si_pmu_cap_fast_lpo(si_t *sih) +{ + return (PMU_REG(sih, core_cap_ext, 0, 0) & PCAP_EXT_USE_MUXED_ILP_CLK_MASK) ? TRUE : FALSE; +} + +int +si_pmu_fast_lpo_disable(si_t *sih) +{ + if (!si_pmu_cap_fast_lpo(sih)) { + PMU_ERROR(("%s: No Fast LPO capability\n", __FUNCTION__)); + return BCME_ERROR; + } + + PMU_REG(sih, pmucontrol_ext, + PCTL_EXT_FASTLPO_ENAB | + PCTL_EXT_FASTLPO_SWENAB | + PCTL_EXT_FASTLPO_PCIE_SWENAB, + 0); + OSL_DELAY(1000); + return BCME_OK; +} diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/802.11.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.11.h old mode 100755 new mode 100644 similarity index 87% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/802.11.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.11.h index 7aaea5d0596d..2d7650ae9bbd --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/802.11.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.11.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to 802.11 * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: 802.11.h 556559 2015-05-14 01:48:17Z $ + * $Id: 802.11.h 700693 2017-05-20 20:29:07Z $ */ #ifndef _802_11_H_ @@ -35,10 +35,10 @@ #endif #ifndef _NET_ETHERNET_H_ -#include +#include #endif -#include +#include /* This marks the start of a packed structure section. */ #include @@ -96,6 +96,8 @@ /** 802.2 LLC/SNAP header used by 802.11 per 802.1H */ #define DOT11_LLC_SNAP_HDR_LEN 8 /* d11 LLC/SNAP header length */ +/* minimum LLC header length; DSAP, SSAP, 8 bit Control (unnumbered) */ +#define DOT11_LLC_HDR_LEN_MIN 3 #define DOT11_OUI_LEN 3 /* d11 OUI length */ BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header { uint8 dsap; /* always 0xAA */ @@ -262,7 +264,10 @@ BWL_PRE_PACKED_STRUCT struct dot11_auth { uint16 seq; /* sequence control */ uint16 status; /* status code */ } BWL_POST_PACKED_STRUCT; -#define DOT11_AUTH_FIXED_LEN 6 /* length of auth frame without challenge IE */ +#define DOT11_AUTH_FIXED_LEN 6 /* length of auth frame without challenge IE */ +#define DOT11_AUTH_SEQ_STATUS_LEN 4 /* length of auth frame without challenge IE and + * without algorithm + */ BWL_PRE_PACKED_STRUCT struct dot11_assoc_req { uint16 capability; /* capability information */ @@ -397,6 +402,17 @@ BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr { uint8 data[1]; } BWL_POST_PACKED_STRUCT; typedef struct dot11_action_frmhdr dot11_action_frmhdr_t; + +/* Action Field length */ +#define DOT11_ACTION_CATEGORY_LEN 1 +#define DOT11_ACTION_ACTION_LEN 1 +#define DOT11_ACTION_DIALOG_TOKEN_LEN 1 +#define DOT11_ACTION_CAPABILITY_LEN 2 +#define DOT11_ACTION_STATUS_CODE_LEN 2 +#define DOT11_ACTION_REASON_CODE_LEN 2 +#define DOT11_ACTION_TARGET_CH_LEN 1 +#define DOT11_ACTION_OPER_CLASS_LEN 1 + #define DOT11_ACTION_FRMHDR_LEN 2 /** CSA IE data structure */ @@ -430,7 +446,7 @@ BWL_PRE_PACKED_STRUCT struct dot11_csa_body { /** 11n Extended Channel Switch IE data structure */ BWL_PRE_PACKED_STRUCT struct dot11_ext_csa { - uint8 id; /* id DOT11_MNG_EXT_CHANNEL_SWITCH_ID */ + uint8 id; /* id DOT11_MNG_EXT_CSA_ID */ uint8 len; /* length of IE */ struct dot11_csa_body b; /* body of the ie */ } BWL_POST_PACKED_STRUCT; @@ -469,6 +485,25 @@ BWL_PRE_PACKED_STRUCT struct dot11_channel_switch_wrapper { } BWL_POST_PACKED_STRUCT; typedef struct dot11_channel_switch_wrapper dot11_chan_switch_wrapper_ie_t; +typedef enum wide_bw_chan_width { + WIDE_BW_CHAN_WIDTH_20 = 0, + WIDE_BW_CHAN_WIDTH_40 = 1, + WIDE_BW_CHAN_WIDTH_80 = 2, + WIDE_BW_CHAN_WIDTH_160 = 3, + WIDE_BW_CHAN_WIDTH_80_80 = 4 +} wide_bw_chan_width_t; + +/** Wide Bandwidth Channel IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_ID */ + uint8 len; /* length of IE */ + uint8 channel_width; /* channel width */ + uint8 center_frequency_segment_0; /* center frequency segment 0 */ + uint8 center_frequency_segment_1; /* center frequency segment 1 */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wide_bw_channel dot11_wide_bw_chan_ie_t; + +#define DOT11_WIDE_BW_IE_LEN 3 /* length of IE data, not including 2 byte header */ /** VHT Transmit Power Envelope IE data structure */ BWL_PRE_PACKED_STRUCT struct dot11_vht_transmit_power_envelope { uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ @@ -613,6 +648,12 @@ BWL_PRE_PACKED_STRUCT struct dot11_meas_req_loc { uint16 si; /* service interval */ uint8 data[1]; } BWL_POST_PACKED_STRUCT civic; + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + uint8 data[1]; + } BWL_POST_PACKED_STRUCT locid; BWL_PRE_PACKED_STRUCT struct { uint16 max_init_delay; /* maximum random initial delay */ uint8 min_ap_count; @@ -633,6 +674,16 @@ BWL_PRE_PACKED_STRUCT struct dot11_lci_subelement { } BWL_POST_PACKED_STRUCT; typedef struct dot11_lci_subelement dot11_lci_subelement_t; +BWL_PRE_PACKED_STRUCT struct dot11_colocated_bssid_list_se { + uint8 sub_id; + uint8 length; + uint8 max_bssid_ind; /* MaxBSSID Indicator */ + struct ether_addr bssid[1]; /* variable */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_colocated_bssid_list_se dot11_colocated_bssid_list_se_t; +#define DOT11_LCI_COLOCATED_BSSID_LIST_FIXED_LEN 3 +#define DOT11_LCI_COLOCATED_BSSID_SUBELEM_ID 7 + BWL_PRE_PACKED_STRUCT struct dot11_civic_subelement { uint8 type; /* type of civic location */ uint8 subelement; @@ -666,6 +717,12 @@ BWL_PRE_PACKED_STRUCT struct dot11_meas_rep { uint8 length; uint8 data[1]; } BWL_POST_PACKED_STRUCT civic; + BWL_PRE_PACKED_STRUCT struct { + uint8 exp_tsf[8]; + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT locid; BWL_PRE_PACKED_STRUCT struct { uint8 entry_count; uint8 data[1]; @@ -677,6 +734,7 @@ typedef struct dot11_meas_rep dot11_meas_rep_t; #define DOT11_MNG_IE_MREP_MIN_LEN 5 /* d11 measurement report IE length */ #define DOT11_MNG_IE_MREP_LCI_FIXED_LEN 5 /* d11 measurement report IE length */ #define DOT11_MNG_IE_MREP_CIVIC_FIXED_LEN 6 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_LOCID_FIXED_LEN 13 /* d11 measurement report IE length */ #define DOT11_MNG_IE_MREP_BASIC_FIXED_LEN 15 /* d11 measurement report IE length */ #define DOT11_MNG_IE_MREP_FRNG_FIXED_LEN 4 @@ -742,6 +800,13 @@ typedef uint8 ac_bitmap_t; /* AC bitmap of (1 << AC_xx) */ #define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac)))) #define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac)))) +/* Management PKT Lifetime indices */ +/* Removing flag checks 'BCMINTERNAL || WLTEST' + * while merging MERGE BIS120RC4 to DINGO2 + */ +#define MGMT_ALL 0xffff +#define MGMT_AUTH_LT FC_SUBTYPE_AUTH +#define MGMT_ASSOC_LT FC_SUBTYPE_ASSOC_REQ /** WME Information Element (IE) */ BWL_PRE_PACKED_STRUCT struct wme_ie { @@ -902,6 +967,22 @@ typedef struct ti_ie ti_ie_t; #define TI_TYPE_REASSOC_DEADLINE 1 #define TI_TYPE_KEY_LIFETIME 2 +#ifndef CISCO_AIRONET_OUI +#define CISCO_AIRONET_OUI "\x00\x40\x96" /* Cisco AIRONET OUI */ +#endif +/* QoS FastLane IE. */ +BWL_PRE_PACKED_STRUCT struct ccx_qfl_ie { + uint8 id; /* 221, DOT11_MNG_VS_ID */ + uint8 length; /* 5 */ + uint8 oui[3]; /* 00:40:96 */ + uint8 type; /* 11 */ + uint8 data; +} BWL_POST_PACKED_STRUCT; +typedef struct ccx_qfl_ie ccx_qfl_ie_t; +#define CCX_QFL_IE_TYPE 11 +#define CCX_QFL_ENABLE_SHIFT 5 +#define CCX_QFL_ENALBE (1 << CCX_QFL_ENABLE_SHIFT) + /* WME Action Codes */ #define WME_ADDTS_REQUEST 0 /* WME ADDTS request */ #define WME_ADDTS_RESPONSE 1 /* WME ADDTS response */ @@ -921,6 +1002,10 @@ typedef struct ti_ie ti_ie_t; #define DOT11_OPEN_SYSTEM 0 /* d11 open authentication */ #define DOT11_SHARED_KEY 1 /* d11 shared authentication */ #define DOT11_FAST_BSS 2 /* d11 fast bss authentication */ +#define DOT11_SAE 3 /* d11 simultaneous authentication of equals */ +#define DOT11_FILS_SKEY_PFS 4 /* d11 fils shared key authentication w/o pfs */ +#define DOT11_FILS_SKEY 5 /* d11 fils shared key authentication w/ pfs */ +#define DOT11_FILS_PKEY 6 /* d11 fils public key authentication */ #define DOT11_CHALLENGE_LEN 128 /* d11 challenge text length */ /* Frame control macros */ @@ -1304,6 +1389,7 @@ typedef struct ti_ie ti_ie_t; #define DOT11_MNG_COUNTRY_ID 7 /* d11 management country id */ #define DOT11_MNG_HOPPING_PARMS_ID 8 /* d11 management hopping parameter id */ #define DOT11_MNG_HOPPING_TABLE_ID 9 /* d11 management hopping table id */ +#define DOT11_MNG_FTM_SYNC_INFO_ID 9 /* 11mc D4.3 */ #define DOT11_MNG_REQUEST_ID 10 /* d11 management request id */ #define DOT11_MNG_QBSS_LOAD_ID 11 /* d11 management QBSS Load id */ #define DOT11_MNG_EDCA_PARAM_ID 12 /* 11E EDCA Parameter id */ @@ -1370,7 +1456,7 @@ typedef struct ti_ie ti_ie_t; #define DOT11_MNG_WAKEUP_SCHEDULE_ID 102 /* 11z TDLS Wakeup Schedule IE */ #define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID 104 /* 11z TDLS Channel Switch Timing IE */ #define DOT11_MNG_PTI_CONTROL_ID 105 /* 11z TDLS PTI Control IE */ -#define DOT11_MNG_PU_BUFFER_STATUS_ID 106 /* 11z TDLS PU Buffer Status IE */ +#define DOT11_MNG_PU_BUFFER_STATUS_ID 106 /* 11z TDLS PU Buffer Status IE */ #define DOT11_MNG_INTERWORKING_ID 107 /* 11u interworking */ #define DOT11_MNG_ADVERTISEMENT_ID 108 /* 11u advertisement protocol */ #define DOT11_MNG_EXP_BW_REQ_ID 109 /* 11u expedited bandwith request */ @@ -1380,7 +1466,6 @@ typedef struct ti_ie ti_ie_t; #define DOT11_MNG_MESH_CONFIG 113 /* Mesh Configuration */ #define DOT11_MNG_MESH_ID 114 /* Mesh ID */ #define DOT11_MNG_MESH_PEER_MGMT_ID 117 /* Mesh PEER MGMT IE */ - #define DOT11_MNG_EXT_CAP_ID 127 /* d11 mgmt ext capability */ #define DOT11_MNG_EXT_PREQ_ID 130 /* Mesh PREQ IE */ #define DOT11_MNG_EXT_PREP_ID 131 /* Mesh PREP IE */ @@ -1388,17 +1473,51 @@ typedef struct ti_ie ti_ie_t; #define DOT11_MNG_VHT_CAP_ID 191 /* d11 mgmt VHT cap id */ #define DOT11_MNG_VHT_OPERATION_ID 192 /* d11 mgmt VHT op id */ #define DOT11_MNG_EXT_BSSLOAD_ID 193 /* d11 mgmt VHT extended bss load id */ -#define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID 194 /* Wide BW Channel Switch IE */ -#define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID 195 /* VHT transmit Power Envelope IE */ -#define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID 196 /* Channel Switch Wrapper IE */ -#define DOT11_MNG_AID_ID 197 /* Association ID IE */ -#define DOT11_MNG_OPER_MODE_NOTIF_ID 199 /* d11 mgmt VHT oper mode notif */ +#define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID 194 /* Wide BW Channel Switch IE */ +#define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID 195 /* VHT transmit Power Envelope IE */ +#define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID 196 /* Channel Switch Wrapper IE */ +#define DOT11_MNG_AID_ID 197 /* Association ID IE */ +#define DOT11_MNG_OPER_MODE_NOTIF_ID 199 /* d11 mgmt VHT oper mode notif */ +#define DOT11_MNG_RNR_ID 201 +#define DOT11_MNG_HE_CAP_ID 202 +#define DOT11_MNG_HE_OP_ID 203 #define DOT11_MNG_FTM_PARAMS_ID 206 - +#define DOT11_MNG_TWT_ID 216 /* 11ah D5.0 */ #define DOT11_MNG_WPA_ID 221 /* d11 management WPA id */ #define DOT11_MNG_PROPR_ID 221 /* should start using this one instead of above two */ #define DOT11_MNG_VS_ID 221 /* d11 management Vendor Specific IE */ +#define DOT11_MNG_MESH_CSP_ID 222 /* d11 Mesh Channel Switch Parameter */ +#define DOT11_MNG_FILS_IND_ID 240 /* 11ai FILS Indication element */ + +/* The follwing ID extensions should be defined >= 255 + * i.e. the values should include 255 (DOT11_MNG_ID_EXT_ID + ID Extension). + */ +#define DOT11_MNG_ID_EXT_ID 255 /* Element ID Extension 11mc D4.3 */ +#define DOT11_MNG_RAPS_ID (DOT11_MNG_ID_EXT_ID+11) /* OFDMA Random Access Parameter Set */ + +/* FILS ext ids */ +#define FILS_REQ_PARAMS_EXT_ID 2 +#define DOT11_MNG_FILS_REQ_PARAMS (DOT11_MNG_ID_EXT_ID + FILS_REQ_PARAMS_EXT_ID) +#define FILS_SESSION_EXT_ID 4 +#define DOT11_MNG_FILS_SESSION (DOT11_MNG_ID_EXT_ID + FILS_SESSION_EXT_ID) +#define FILS_HLP_CONTAINER_EXT_ID 5 +#define DOT11_MNG_FILS_HLP_CONTAINER (DOT11_MNG_ID_EXT_ID + FILS_HLP_CONTAINER_EXT_ID) +#define FILS_WRAPPED_DATA_EXT_ID 8 +#define DOT11_MNG_FILS_WRAPPED_DATA (DOT11_MNG_ID_EXT_ID + FILS_WRAPPED_DATA_EXT_ID) +#define FILS_NONCE_EXT_ID 13 +#define DOT11_MNG_FILS_NONCE (DOT11_MNG_ID_EXT_ID + FILS_NONCE_EXT_ID) + +#define DOT11_MNG_IE_ID_EXT_MATCH(_ie, _id) (\ + ((_ie)->id == DOT11_MNG_ID_EXT_ID) && \ + ((_ie)->len > 0) && \ + ((_id) == ((uint8 *)(_ie) + TLV_HDR_LEN)[0])) + +#define DOT11_MNG_IE_ID_EXT_INIT(_ie, _id, _len) do {\ + (_ie)->id = DOT11_MNG_ID_EXT_ID; \ + (_ie)->len = _len; \ + (_ie)->id_ext = _id; \ + } while (0) /* Rate Defines */ @@ -1432,13 +1551,14 @@ typedef struct ti_ie ti_ie_t; #define DOT11_RATE_MASK 0x7F /* mask for numeric part of rate */ /* BSS Membership Selector parameters - * 802.11-2012 and 802.11ac_D4.0 sec 8.4.2.3 + * 802.11-2016 (and 802.11ax-D1.1), Sec 9.4.2.3 * These selector values are advertised in Supported Rates and Extended Supported Rates IEs * in the supported rates list with the Basic rate bit set. * Constants below include the basic bit. */ #define DOT11_BSS_MEMBERSHIP_HT 0xFF /* Basic 0x80 + 127, HT Required to join */ #define DOT11_BSS_MEMBERSHIP_VHT 0xFE /* Basic 0x80 + 126, VHT Required to join */ +#define DOT11_BSS_MEMBERSHIP_HE 0xFD /* Basic 0x80 + 125, HE Required to join */ /* ERP info element bit values */ #define DOT11_MNG_ERP_LEN 1 /* ERP is currently 1 byte long */ @@ -1505,6 +1625,8 @@ typedef struct ti_ie ti_ie_t; /* service Interval granularity bit position and mask */ #define DOT11_EXT_CAP_SI 41 #define DOT11_EXT_CAP_SI_MASK 0x0E +/* Location Identifier service */ +#define DOT11_EXT_CAP_IDENT_LOC 44 /* WNM notification */ #define DOT11_EXT_CAP_WNM_NOTIF 46 /* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */ @@ -1512,25 +1634,36 @@ typedef struct ti_ie ti_ie_t; /* Fine timing measurement - D3.0 */ #define DOT11_EXT_CAP_FTM_RESPONDER 70 #define DOT11_EXT_CAP_FTM_INITIATOR 71 /* tentative 11mcd3.0 */ +/* TWT support */ +#define DOT11_EXT_CAP_TWT_REQUESTER 75 +#define DOT11_EXT_CAP_TWT_RESPONDER 76 +/* TODO: Update DOT11_EXT_CAP_MAX_IDX to reflect the highest offset. + * Note: DOT11_EXT_CAP_MAX_IDX must only be used in attach path. + * It will cause ROM invalidation otherwise. + */ +#define DOT11_EXT_CAP_MAX_IDX 76 + #ifdef WL_FTM #define DOT11_EXT_CAP_MAX_BIT_IDX 95 /* !!!update this please!!! */ #else #define DOT11_EXT_CAP_MAX_BIT_IDX 62 /* !!!update this please!!! */ #endif - /* extended capability */ #ifndef DOT11_EXTCAP_LEN_MAX #define DOT11_EXTCAP_LEN_MAX ((DOT11_EXT_CAP_MAX_BIT_IDX + 8) >> 3) #endif - BWL_PRE_PACKED_STRUCT struct dot11_extcap { uint8 extcap[DOT11_EXTCAP_LEN_MAX]; } BWL_POST_PACKED_STRUCT; typedef struct dot11_extcap dot11_extcap_t; -/* VHT Operating mode bit fields - (11ac D3.0 - 8.4.1.50) */ +/* VHT Operating mode bit fields - (11ac D8.0/802.11-2016 - 9.4.1.53) */ #define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0 #define DOT11_OPER_MODE_CHANNEL_WIDTH_MASK 0x3 +#define DOT11_OPER_MODE_160_8080_BW_SHIFT 2 +#define DOT11_OPER_MODE_160_8080_BW_MASK 0x04 +#define DOT11_OPER_MODE_NOLDPC_SHIFT 3 +#define DOT11_OPER_MODE_NOLDPC_MASK 0x08 #define DOT11_OPER_MODE_RXNSS_SHIFT 4 #define DOT11_OPER_MODE_RXNSS_MASK 0x70 #define DOT11_OPER_MODE_RXNSS_TYPE_SHIFT 7 @@ -1543,9 +1676,22 @@ typedef struct dot11_extcap dot11_extcap_t; ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\ DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)) +#define DOT11_D8_OPER_MODE(type, nss, ldpc, bw160_8080, chanw) (\ + ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\ + DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\ + (((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\ + ((ldpc) << DOT11_OPER_MODE_NOLDPC_SHIFT & DOT11_OPER_MODE_NOLDPC_MASK) |\ + ((bw160_8080) << DOT11_OPER_MODE_160_8080_BW_SHIFT &\ + DOT11_OPER_MODE_160_8080_BW_MASK) |\ + ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\ + DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)) + #define DOT11_OPER_MODE_CHANNEL_WIDTH(mode) \ (((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)\ >> DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT) +#define DOT11_OPER_MODE_160_8080(mode) \ + (((mode) & DOT11_OPER_MODE_160_8080_BW_MASK)\ + >> DOT11_OPER_MODE_160_8080_BW_SHIFT) #define DOT11_OPER_MODE_RXNSS(mode) \ ((((mode) & DOT11_OPER_MODE_RXNSS_MASK) \ >> DOT11_OPER_MODE_RXNSS_SHIFT) + 1) @@ -1558,6 +1704,7 @@ typedef struct dot11_extcap dot11_extcap_t; #define DOT11_OPER_MODE_80MHZ 2 #define DOT11_OPER_MODE_160MHZ 3 #define DOT11_OPER_MODE_8080MHZ 3 +#define DOT11_OPER_MODE_1608080MHZ 1 #define DOT11_OPER_MODE_CHANNEL_WIDTH_20MHZ(mode) (\ ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_20MHZ) @@ -1566,9 +1713,9 @@ typedef struct dot11_extcap dot11_extcap_t; #define DOT11_OPER_MODE_CHANNEL_WIDTH_80MHZ(mode) (\ ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_80MHZ) #define DOT11_OPER_MODE_CHANNEL_WIDTH_160MHZ(mode) (\ - ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_160MHZ) + ((mode) & DOT11_OPER_MODE_160_8080_BW_MASK)) #define DOT11_OPER_MODE_CHANNEL_WIDTH_8080MHZ(mode) (\ - ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_8080MHZ) + ((mode) & DOT11_OPER_MODE_160_8080_BW_MASK)) /* Operating mode information element 802.11ac D3.0 - 8.4.2.168 */ BWL_PRE_PACKED_STRUCT struct dot11_oper_mode_notif_ie { @@ -1607,6 +1754,9 @@ typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t; #define DOT11_ACTION_CAT_SELFPROT 15 /* category for Mesh, self protected */ #define DOT11_ACTION_NOTIFICATION 17 #define DOT11_ACTION_CAT_VHT 21 /* VHT action */ +#define DOT11_ACTION_CAT_S1G 22 /* S1G action */ +#define DOT11_ACTION_CAT_HE 27 /* HE action frame */ +#define DOT11_ACTION_CAT_FILS 26 /* FILS action frame */ #define DOT11_ACTION_CAT_VSP 126 /* protected vendor specific */ #define DOT11_ACTION_CAT_VS 127 /* category Vendor Specific */ @@ -1632,6 +1782,7 @@ typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t; /* Public action ids */ #define DOT11_PUB_ACTION_BSS_COEX_MNG 0 /* 20/40 Coexistence Management action id */ #define DOT11_PUB_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */ +#define DOT11_PUB_ACTION_VENDOR_SPEC 9 /* Vendor specific */ #define DOT11_PUB_ACTION_GAS_CB_REQ 12 /* GAS Comeback Request */ #define DOT11_PUB_ACTION_FTM_REQ 32 /* FTM request */ #define DOT11_PUB_ACTION_FTM 33 /* FTM measurement */ @@ -1707,6 +1858,9 @@ typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t; #define DOT11_VHT_ACTION_GID_MGMT 1 /* Group ID Management */ #define DOT11_VHT_ACTION_OPER_MODE_NOTIF 2 /* Operating mode notif'n */ +/* FILS category action types - 802.11ai D11.0 - 9.6.8.1 */ +#define DOT11_FILS_ACTION_DISCOVERY 34 /* FILS Discovery */ + /** DLS Request frame header */ BWL_PRE_PACKED_STRUCT struct dot11_dls_req { uint8 category; /* category of action frame (2) */ @@ -2303,6 +2457,17 @@ typedef struct dot11_dms_resp_st dot11_dms_resp_st_t; #define DOT11_DMS_RESP_LSC_UNSUPPORTED 0xFFFF +/** WNM-Notification Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_wnm_notif_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: Notification request (26) */ + uint8 token; /* dialog token */ + uint8 type; /* type */ + uint8 data[1]; /* Sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_notif_req dot11_wnm_notif_req_t; +#define DOT11_WNM_NOTIF_REQ_LEN 4 /* Fixed length */ + /** FMS Management Request frame header */ BWL_PRE_PACKED_STRUCT struct dot11_fms_req { uint8 category; /* category of action frame (10) */ @@ -2526,7 +2691,10 @@ typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t; #define DOT11_RRM_CAP_TTSCM 15 #define DOT11_RRM_CAP_AP_CHANREP 16 #define DOT11_RRM_CAP_RMMIB 17 -/* bit18-bit26, not used for RRM_IOVAR */ +/* bit18-bit23, not used for RRM_IOVAR */ +#define DOT11_RRM_CAP_MPC0 24 +#define DOT11_RRM_CAP_MPC1 25 +#define DOT11_RRM_CAP_MPC2 26 #define DOT11_RRM_CAP_MPTI 27 #define DOT11_RRM_CAP_NBRTSFO 28 #define DOT11_RRM_CAP_RCPI 29 @@ -2536,8 +2704,58 @@ typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t; #define DOT11_RRM_CAP_AI 33 #define DOT11_RRM_CAP_FTM_RANGE 34 #define DOT11_RRM_CAP_CIVIC_LOC 35 -#define DOT11_RRM_CAP_LAST 35 +#define DOT11_RRM_CAP_IDENT_LOC 36 +#define DOT11_RRM_CAP_LAST 36 +#ifdef WL11K_ALL_MEAS +#define DOT11_RRM_CAP_LINK_ENAB (1 << DOT11_RRM_CAP_LINK) +#define DOT11_RRM_CAP_FM_ENAB (1 << (DOT11_RRM_CAP_FM - 8)) +#define DOT11_RRM_CAP_CLM_ENAB (1 << (DOT11_RRM_CAP_CLM - 8)) +#define DOT11_RRM_CAP_NHM_ENAB (1 << (DOT11_RRM_CAP_NHM - 8)) +#define DOT11_RRM_CAP_SM_ENAB (1 << (DOT11_RRM_CAP_SM - 8)) +#define DOT11_RRM_CAP_LCIM_ENAB (1 << (DOT11_RRM_CAP_LCIM - 8)) +#define DOT11_RRM_CAP_TSCM_ENAB (1 << (DOT11_RRM_CAP_TSCM - 8)) +#ifdef WL11K_AP +#define DOT11_RRM_CAP_MPC0_ENAB (1 << (DOT11_RRM_CAP_MPC0 - 24)) +#define DOT11_RRM_CAP_MPC1_ENAB (1 << (DOT11_RRM_CAP_MPC1 - 24)) +#define DOT11_RRM_CAP_MPC2_ENAB (1 << (DOT11_RRM_CAP_MPC2 - 24)) +#define DOT11_RRM_CAP_MPTI_ENAB (1 << (DOT11_RRM_CAP_MPTI - 24)) +#else +#define DOT11_RRM_CAP_MPC0_ENAB 0 +#define DOT11_RRM_CAP_MPC1_ENAB 0 +#define DOT11_RRM_CAP_MPC2_ENAB 0 +#define DOT11_RRM_CAP_MPTI_ENAB 0 +#endif /* WL11K_AP */ +#define DOT11_RRM_CAP_CIVIC_LOC_ENAB (1 << (DOT11_RRM_CAP_CIVIC_LOC - 32)) +#define DOT11_RRM_CAP_IDENT_LOC_ENAB (1 << (DOT11_RRM_CAP_IDENT_LOC - 32)) +#else +#define DOT11_RRM_CAP_LINK_ENAB 0 +#define DOT11_RRM_CAP_FM_ENAB 0 +#define DOT11_RRM_CAP_CLM_ENAB 0 +#define DOT11_RRM_CAP_NHM_ENAB 0 +#define DOT11_RRM_CAP_SM_ENAB 0 +#define DOT11_RRM_CAP_LCIM_ENAB 0 +#define DOT11_RRM_CAP_TSCM_ENAB 0 +#define DOT11_RRM_CAP_MPC0_ENAB 0 +#define DOT11_RRM_CAP_MPC1_ENAB 0 +#define DOT11_RRM_CAP_MPC2_ENAB 0 +#define DOT11_RRM_CAP_MPTI_ENAB 0 +#define DOT11_RRM_CAP_CIVIC_LOC_ENAB 0 +#define DOT11_RRM_CAP_IDENT_LOC_ENAB 0 +#endif /* WL11K_ALL_MEAS */ +#ifdef WL11K_NBR_MEAS +#define DOT11_RRM_CAP_NEIGHBOR_REPORT_ENAB (1 << DOT11_RRM_CAP_NEIGHBOR_REPORT) +#else +#define DOT11_RRM_CAP_NEIGHBOR_REPORT_ENAB 0 +#endif /* WL11K_NBR_MEAS */ +#ifdef WL11K_BCN_MEAS +#define DOT11_RRM_CAP_BCN_PASSIVE_ENAB (1 << DOT11_RRM_CAP_BCN_PASSIVE) +#define DOT11_RRM_CAP_BCN_ACTIVE_ENAB (1 << DOT11_RRM_CAP_BCN_ACTIVE) +#else +#define DOT11_RRM_CAP_BCN_PASSIVE_ENAB 0 +#define DOT11_RRM_CAP_BCN_ACTIVE_ENAB 0 +#endif /* WL11K_BCN_MEAS */ +#define DOT11_RRM_CAP_MPA_MASK 0x7 /* Operating Class (formerly "Regulatory Class") definitions */ #define DOT11_OP_CLASS_NONE 255 @@ -2556,6 +2774,7 @@ typedef struct do11_ap_chrep dot11_ap_chrep_t; #define DOT11_RM_ACTION_LM_REP 3 /* Link measurement report */ #define DOT11_RM_ACTION_NR_REQ 4 /* Neighbor report request */ #define DOT11_RM_ACTION_NR_REP 5 /* Neighbor report response */ +#define DOT11_PUB_ACTION_MP 7 /* Measurement Pilot public action id */ /** Generic radio measurement action frame header */ BWL_PRE_PACKED_STRUCT struct dot11_rm_action { @@ -2658,12 +2877,6 @@ typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t; /* Sub-element IDs for Frame Report */ #define DOT11_RMREP_FRAME_COUNT_REPORT 1 -/* Statistics Group Report: Group IDs */ -#define DOT11_RRM_STATS_GRP_ID_0 0 - -/* Statistics Group Report: Group Data length */ -#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_0 28 - /* Channel load request */ BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload { uint8 id; @@ -2791,7 +3004,159 @@ BWL_PRE_PACKED_STRUCT struct dot11_rmrep_stat { } BWL_POST_PACKED_STRUCT; typedef struct dot11_rmrep_stat dot11_rmrep_stat_t; -/** Transmit stream/category measurement request */ +/* Statistics Group Report: Group IDs */ +enum { + DOT11_RRM_STATS_GRP_ID_0 = 0, + DOT11_RRM_STATS_GRP_ID_1, + DOT11_RRM_STATS_GRP_ID_2, + DOT11_RRM_STATS_GRP_ID_3, + DOT11_RRM_STATS_GRP_ID_4, + DOT11_RRM_STATS_GRP_ID_5, + DOT11_RRM_STATS_GRP_ID_6, + DOT11_RRM_STATS_GRP_ID_7, + DOT11_RRM_STATS_GRP_ID_8, + DOT11_RRM_STATS_GRP_ID_9, + DOT11_RRM_STATS_GRP_ID_10, + DOT11_RRM_STATS_GRP_ID_11, + DOT11_RRM_STATS_GRP_ID_12, + DOT11_RRM_STATS_GRP_ID_13, + DOT11_RRM_STATS_GRP_ID_14, + DOT11_RRM_STATS_GRP_ID_15, + DOT11_RRM_STATS_GRP_ID_16 +}; + +/* Statistics Group Report: Group Data length */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_0 28 +typedef struct rrm_stat_group_0 { + uint32 txfrag; + uint32 txmulti; + uint32 txfail; + uint32 rxframe; + uint32 rxmulti; + uint32 rxbadfcs; + uint32 txframe; +} rrm_stat_group_0_t; + +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_1 24 +typedef struct rrm_stat_group_1 { + uint32 txretry; + uint32 txretries; + uint32 rxdup; + uint32 txrts; + uint32 rtsfail; + uint32 ackfail; +} rrm_stat_group_1_t; + +/* group 2-9 use same qos data structure (tid 0-7), total 52 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_2_9 52 +typedef struct rrm_stat_group_qos { + uint32 txfrag; + uint32 txfail; + uint32 txretry; + uint32 txretries; + uint32 rxdup; + uint32 txrts; + uint32 rtsfail; + uint32 ackfail; + uint32 rxfrag; + uint32 txframe; + uint32 txdrop; + uint32 rxmpdu; + uint32 rxretries; +} rrm_stat_group_qos_t; + +/* dot11BSSAverageAccessDelay Group (only available at an AP): 8 byte */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_10 8 +typedef BWL_PRE_PACKED_STRUCT struct rrm_stat_group_10 { + uint8 apavgdelay; + uint8 avgdelaybe; + uint8 avgdelaybg; + uint8 avgdelayvi; + uint8 avgdelayvo; + uint16 stacount; + uint8 chanutil; +} BWL_POST_PACKED_STRUCT rrm_stat_group_10_t; + +/* AMSDU, 40 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_11 40 +typedef struct rrm_stat_group_11 { + uint32 txamsdu; + uint32 amsdufail; + uint32 amsduretry; + uint32 amsduretries; + uint32 txamsdubyte_h; + uint32 txamsdubyte_l; + uint32 amsduackfail; + uint32 rxamsdu; + uint32 rxamsdubyte_h; + uint32 rxamsdubyte_l; +} rrm_stat_group_11_t; + +/* AMPDU, 36 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_12 36 +typedef struct rrm_stat_group_12 { + uint32 txampdu; + uint32 txmpdu; + uint32 txampdubyte_h; + uint32 txampdubyte_l; + uint32 rxampdu; + uint32 rxmpdu; + uint32 rxampdubyte_h; + uint32 rxampdubyte_l; + uint32 ampducrcfail; +} rrm_stat_group_12_t; + +/* BACK etc, 36 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_13 36 +typedef struct rrm_stat_group_13 { + uint32 rximpbarfail; + uint32 rxexpbarfail; + uint32 chanwidthsw; + uint32 txframe20mhz; + uint32 txframe40mhz; + uint32 rxframe20mhz; + uint32 rxframe40mhz; + uint32 psmpgrantdur; + uint32 psmpuseddur; +} rrm_stat_group_13_t; + +/* RD Dual CTS etc, 36 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_14 36 +typedef struct rrm_stat_group_14 { + uint32 grantrdgused; + uint32 grantrdgunused; + uint32 txframeingrantrdg; + uint32 txbyteingrantrdg_h; + uint32 txbyteingrantrdg_l; + uint32 dualcts; + uint32 dualctsfail; + uint32 rtslsi; + uint32 rtslsifail; +} rrm_stat_group_14_t; + +/* bf and STBC etc, 20 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_15 20 +typedef struct rrm_stat_group_15 { + uint32 bfframe; + uint32 stbccts; + uint32 stbcctsfail; + uint32 nonstbccts; + uint32 nonstbcctsfail; +} rrm_stat_group_15_t; + +/* RSNA, 28 bytes */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_16 28 +typedef struct rrm_stat_group_16 { + uint32 rsnacmacicverr; + uint32 rsnacmacreplay; + uint32 rsnarobustmgmtccmpreplay; + uint32 rsnatkipicverr; + uint32 rsnatkipicvreplay; + uint32 rsnaccmpdecrypterr; + uint32 rsnaccmpreplay; +} rrm_stat_group_16_t; + +/* Transmit stream/category measurement request */ BWL_PRE_PACKED_STRUCT struct dot11_rmreq_tx_stream { uint8 id; uint8 len; @@ -2805,6 +3170,7 @@ BWL_PRE_PACKED_STRUCT struct dot11_rmreq_tx_stream { uint8 bin0_range; } BWL_POST_PACKED_STRUCT; typedef struct dot11_rmreq_tx_stream dot11_rmreq_tx_stream_t; +#define DOT11_RMREQ_TXSTREAM_LEN 17 /** Transmit stream/category measurement report */ BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream { @@ -2829,7 +3195,25 @@ BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream { uint32 bin5; } BWL_POST_PACKED_STRUCT; typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t; +#define DOT11_RMREP_TXSTREAM_LEN 71 +typedef struct rrm_tscm { + uint32 msdu_tx; + uint32 msdu_exp; + uint32 msdu_fail; + uint32 msdu_retries; + uint32 cfpolls_lost; + uint32 queue_delay; + uint32 tx_delay_sum; + uint32 tx_delay_cnt; + uint32 bin0_range_us; + uint32 bin0; + uint32 bin1; + uint32 bin2; + uint32 bin3; + uint32 bin4; + uint32 bin5; +} rrm_tscm_t; enum { DOT11_FTM_LOCATION_SUBJ_LOCAL = 0, /* Where am I? */ DOT11_FTM_LOCATION_SUBJ_REMOTE = 1, /* Where are you? */ @@ -2852,6 +3236,7 @@ BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_lci { /* optional sub-elements */ } BWL_POST_PACKED_STRUCT; typedef struct dot11_rmreq_ftm_lci dot11_rmreq_ftm_lci_t; +#define DOT11_RMREQ_LCI_LEN 9 BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_lci { uint8 id; @@ -2884,6 +3269,7 @@ BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_civic { /* optional sub-elements */ } BWL_POST_PACKED_STRUCT; typedef struct dot11_rmreq_ftm_civic dot11_rmreq_ftm_civic_t; +#define DOT11_RMREQ_CIVIC_LEN 10 BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_civic { uint8 id; @@ -2906,6 +3292,37 @@ typedef struct dot11_rmrep_ftm_civic dot11_rmrep_ftm_civic_t; #define DOT11_FTM_CIVIC_TYPE_LEN 1 #define DOT11_FTM_CIVIC_UNKNOWN_LEN 3 +/* Location Identifier measurement request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_locid { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + uint8 siu; + uint16 si; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_locid dot11_rmreq_locid_t; +#define DOT11_RMREQ_LOCID_LEN 9 + +/* Location Identifier measurement report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_locid { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 exp_tsf[8]; + uint8 locid_sub_id; + uint8 locid_sub_len; + /* optional location identifier field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_locid dot11_rmrep_locid_t; +#define DOT11_LOCID_UNKNOWN_LEN 10 +#define DOT11_LOCID_SUBELEM_ID 0 + BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_subel { uint8 id; uint8 len; @@ -2930,19 +3347,20 @@ BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_range { typedef struct dot11_rmreq_ftm_range dot11_rmreq_ftm_range_t; #define DOT11_RMREQ_FTM_RANGE_LEN 8 +#define DOT11_FTM_RANGE_LEN 3 BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_entry { uint32 start_tsf; /* 4 lsb of tsf */ struct ether_addr bssid; - uint16 range; - uint16 max_err; + uint8 range[DOT11_FTM_RANGE_LEN]; + uint8 max_err[DOT11_FTM_RANGE_LEN]; uint8 rsvd; } BWL_POST_PACKED_STRUCT; typedef struct dot11_ftm_range_entry dot11_ftm_range_entry_t; #define DOT11_FTM_RANGE_ENTRY_MAX_COUNT 15 enum { - DOT11_FTM_RANGE_ERROR_AP_INCAPABLE = 3, - DOT11_FTM_RANGE_ERROR_AP_FAILED = 4, + DOT11_FTM_RANGE_ERROR_AP_INCAPABLE = 2, + DOT11_FTM_RANGE_ERROR_AP_FAILED = 3, DOT11_FTM_RANGE_ERROR_TX_FAILED = 8, DOT11_FTM_RANGE_ERROR_MAX }; @@ -2985,6 +3403,7 @@ BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time { uint16 pause_time; } BWL_POST_PACKED_STRUCT; typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t; +#define DOT11_RMREQ_PAUSE_LEN 7 /* Neighbor Report subelements ID (11k & 11v) */ @@ -2993,6 +3412,7 @@ typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t; #define DOT11_NGBR_BSSTRANS_PREF_SE_ID 3 #define DOT11_NGBR_BSS_TERM_DUR_SE_ID 4 #define DOT11_NGBR_BEARING_SE_ID 5 +#define DOT11_NGBR_WIDE_BW_CHAN_SE_ID 6 /** Neighbor Report, BSS Transition Candidate Preference subelement */ BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bsstrans_pref_se { @@ -3001,7 +3421,9 @@ BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bsstrans_pref_se { uint8 preference; } BWL_POST_PACKED_STRUCT; typedef struct dot11_ngbr_bsstrans_pref_se dot11_ngbr_bsstrans_pref_se_t; -#define DOT11_NGBR_BSSTRANS_PREF_SE_LEN 1 +#define DOT11_NGBR_BSSTRANS_PREF_SE_LEN 1 +#define DOT11_NGBR_BSSTRANS_PREF_SE_IE_LEN 3 +#define DOT11_NGBR_BSSTRANS_PREF_SE_HIGHEST 0xff /** Neighbor Report, BSS Termination Duration subelement */ BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bss_term_dur_se { @@ -3077,6 +3499,19 @@ BWL_PRE_PACKED_STRUCT struct dot11_lmrep { typedef struct dot11_lmrep dot11_lmrep_t; #define DOT11_LMREP_LEN 11 +#define DOT11_MP_CAP_SPECTRUM 0x01 /* d11 cap. spectrum */ +#define DOT11_MP_CAP_SHORTSLOT 0x02 /* d11 cap. shortslot */ +/* Measurement Pilot */ +BWL_PRE_PACKED_STRUCT struct dot11_mprep { + uint8 cap_info; /* Condensed capability Info. */ + uint8 country[2]; /* Condensed country string */ + uint8 opclass; /* Op. Class */ + uint8 channel; /* Channel */ + uint8 mp_interval; /* Measurement Pilot Interval */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mprep dot11_mprep_t; +#define DOT11_MPREP_LEN 6 + /* 802.11 BRCM "Compromise" Pre N constants */ #define PREN_PREAMBLE 24 /* green field preamble time */ #define PREN_MM_EXT 12 /* extra mixed mode preamble time */ @@ -3206,7 +3641,6 @@ typedef int vht_group_id_t; #define VHT_N_SERVICE 16 /* bits in SERVICE field */ #define VHT_N_TAIL 6 /* tail bits per BCC encoder */ - /** dot11Counters Table - 802.11 spec., Annex D */ typedef struct d11cnt { uint32 txfrag; /* dot11TransmittedFragmentCount */ @@ -3228,12 +3662,19 @@ typedef struct d11cnt { #define BRCM_PROP_OUI "\x00\x90\x4C" -/* Action frame type for FTM Initiator Report */ -#define BRCM_FTM_VS_AF_TYPE 14 -enum { - BRCM_FTM_VS_INITIATOR_RPT_SUBTYPE = 1, /* FTM Initiator Report */ - BRCM_FTM_VS_COLLECT_SUBTYPE = 2, /* FTM Collect debug protocol */ -}; +#define BRCM_FTM_IE_TYPE 14 + +/* #define HT_CAP_IE_TYPE 51 + * #define HT_ADD_IE_TYPE 52 + * #define BRCM_EXTCH_IE_TYPE 53 + * #define MEMBER_OF_BRCM_PROP_IE_TYPE 54 + * #define BRCM_RELMACST_IE_TYPE 55 + * #define BRCM_EVT_WL_BSS_INFO 64 + * #define RWL_ACTION_WIFI_FRAG_TYPE 85 + * #define BTC_INFO_BRCM_PROP_IE_TYPE 90 + * #define ULB_BRCM_PROP_IE_TYPE 91 + * #define SDB_BRCM_PROP_IE_TYPE 92 + */ /* Action frame type for RWL */ #define RWL_WIFI_DEFAULT 0 @@ -3244,6 +3685,14 @@ enum { #define PROXD_AF_TYPE 11 /* Wifi proximity action frame type */ #define BRCM_RELMACST_AF_TYPE 12 /* RMC action frame type */ +/* Action frame type for FTM Initiator Report */ +#define BRCM_FTM_VS_AF_TYPE 14 +enum { + BRCM_FTM_VS_INITIATOR_RPT_SUBTYPE = 1, /* FTM Initiator Report */ + BRCM_FTM_VS_COLLECT_SUBTYPE = 2, /* FTM Collect debug protocol */ +}; + + /* brcm syscap_ie cap */ @@ -3442,6 +3891,11 @@ typedef struct ht_prop_cap_ie ht_prop_cap_ie_t; #define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_MASK 0x18000 #define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_SHIFT 15 +#define HT_CAP_MCS_FLAGS_SUPP_BYTE 12 /* byte offset in HT Cap Supported MCS for various flags */ +#define HT_CAP_MCS_RX_8TO15_BYTE_OFFSET 1 +#define HT_CAP_MCS_FLAGS_TX_RX_UNEQUAL 0x02 +#define HT_CAP_MCS_FLAGS_MAX_SPATIAL_STREAM_MASK 0x0C + #define VHT_MAX_MPDU 11454 /* max mpdu size for now (bytes) */ #define VHT_MPDU_MSDU_DELTA 56 /* Difference in spec - vht mpdu, amsdu len */ /* Max AMSDU len - per spec */ @@ -3527,6 +3981,8 @@ typedef struct ht_prop_add_ie ht_prop_add_ie_t; #define HT_OPMODE_NONGF 0x0004 /* protection mode non-GF */ #define DOT11N_TXBURST 0x0008 /* Tx burst limit */ #define DOT11N_OBSS_NONHT 0x0010 /* OBSS Non-HT STA present */ +#define HT_OPMODE_CCFS2_MASK 0x1fe0 /* Channel Center Frequency Segment 2 mask */ +#define HT_OPMODE_CCFS2_SHIFT 5 /* Channel Center Frequency Segment 2 shift */ /* misc_bites defn's */ #define HT_BASIC_STBC_MCS 0x007f /* basic STBC MCS */ @@ -3558,6 +4014,20 @@ typedef struct ht_prop_add_ie ht_prop_add_ie_t; == DOT11N_TXBURST) /* Tx Burst present */ #define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \ == DOT11N_OBSS_NONHT) /* OBSS Non-HT present */ +#define HT_OPMODE_CCFS2_GET(add_ie) ((ltoh16_ua(&(add_ie)->opmode) & HT_OPMODE_CCFS2_MASK) \ + >> HT_OPMODE_CCFS2_SHIFT) /* get CCFS2 */ +#define HT_OPMODE_CCFS2_SET(add_ie, ccfs2) do { /* set CCFS2 */ \ + (add_ie)->opmode &= htol16(~HT_OPMODE_CCFS2_MASK); \ + (add_ie)->opmode |= htol16(((ccfs2) << HT_OPMODE_CCFS2_SHIFT) & HT_OPMODE_CCFS2_MASK); \ +} while (0) + +/* Macros for HT MCS field access */ +#define HT_CAP_MCS_BITMASK(supp_mcs) \ + ((supp_mcs)[HT_CAP_MCS_RX_8TO15_BYTE_OFFSET]) +#define HT_CAP_MCS_TX_RX_UNEQUAL(supp_mcs) \ + ((supp_mcs)[HT_CAP_MCS_FLAGS_SUPP_BYTE] & HT_CAP_MCS_FLAGS_TX_RX_UNEQUAL) +#define HT_CAP_MCS_TX_STREAM_SUPPORT(supp_mcs) \ + ((supp_mcs)[HT_CAP_MCS_FLAGS_SUPP_BYTE] & HT_CAP_MCS_FLAGS_MAX_SPATIAL_STREAM_MASK) BWL_PRE_PACKED_STRUCT struct obss_params { uint16 passive_dwell; @@ -3644,14 +4114,32 @@ typedef struct vht_cap_ie vht_cap_ie_t; #define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT 23 #define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK 0x0c000000 #define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT 26 +#define VHT_CAP_INFO_EXT_NSS_BW_SUP_MASK 0xc0000000 +#define VHT_CAP_INFO_EXT_NSS_BW_SUP_SHIFT 30 + +/* get Extended NSS BW Support passing vht cap info */ +#define VHT_CAP_EXT_NSS_BW_SUP(cap_info) \ + (((cap_info) & VHT_CAP_INFO_EXT_NSS_BW_SUP_MASK) >> VHT_CAP_INFO_EXT_NSS_BW_SUP_SHIFT) + +/* VHT CAP INFO extended NSS BW support - refer to IEEE 802.11 REVmc D8.0 Figure 9-559 */ +#define VHT_CAP_INFO_EXT_NSS_BW_HALF_160 1 /* 160MHz at half NSS CAP */ +#define VHT_CAP_INFO_EXT_NSS_BW_HALF_160_80P80 2 /* 160 & 80p80 MHz at half NSS CAP */ /* VHT Supported MCS Set - 64-bit - in VHT Cap IE */ #define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK 0x1fff #define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT 0 +#define VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 5 #define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK 0x1fff #define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT 0 +/* defines for field(s) in vht_cap_ie->rx_max_rate */ +#define VHT_CAP_MAX_NSTS_MASK 0xe000 +#define VHT_CAP_MAX_NSTS_SHIFT 13 + +/* defines for field(s) in vht_cap_ie->tx_max_rate */ +#define VHT_CAP_EXT_NSS_BW_CAP 0x2000 + #define VHT_CAP_MCS_MAP_0_7 0 #define VHT_CAP_MCS_MAP_0_8 1 #define VHT_CAP_MCS_MAP_0_9 2 @@ -3752,8 +4240,8 @@ typedef struct vht_op_ie vht_op_ie_t; typedef enum vht_op_chan_width { VHT_OP_CHAN_WIDTH_20_40 = 0, VHT_OP_CHAN_WIDTH_80 = 1, - VHT_OP_CHAN_WIDTH_160 = 2, - VHT_OP_CHAN_WIDTH_80_80 = 3 + VHT_OP_CHAN_WIDTH_160 = 2, /* deprecated - IEEE 802.11 REVmc D8.0 Table 11-25 */ + VHT_OP_CHAN_WIDTH_80_80 = 3 /* deprecated - IEEE 802.11 REVmc D8.0 Table 11-25 */ } vht_op_chan_width_t; /* AID length */ @@ -3794,6 +4282,16 @@ typedef struct vht_features_ie_hdr vht_features_ie_hdr_t; #define VHT_MCS_SS_SUPPORTED(nss, mcsMap) \ (VHT_MCS_MAP_GET_MCS_PER_SS((nss), (mcsMap)) != VHT_CAP_MCS_MAP_NONE) +/* Get the max ss supported from the mcs map */ +#define VHT_MAX_SS_SUPPORTED(mcsMap) \ + VHT_MCS_SS_SUPPORTED(8, mcsMap) ? 8 : \ + VHT_MCS_SS_SUPPORTED(7, mcsMap) ? 7 : \ + VHT_MCS_SS_SUPPORTED(6, mcsMap) ? 6 : \ + VHT_MCS_SS_SUPPORTED(5, mcsMap) ? 5 : \ + VHT_MCS_SS_SUPPORTED(4, mcsMap) ? 4 : \ + VHT_MCS_SS_SUPPORTED(3, mcsMap) ? 3 : \ + VHT_MCS_SS_SUPPORTED(2, mcsMap) ? 2 : \ + VHT_MCS_SS_SUPPORTED(1, mcsMap) ? 1 : 0 /* ************* WPA definitions. ************* */ #define WPA_OUI "\x00\x50\xF2" /* WPA OUI */ @@ -3811,6 +4309,10 @@ typedef struct vht_features_ie_hdr vht_features_ie_hdr_t; #define WPS_OUI_TYPE 4 /* ************* WFA definitions. ************* */ +#if defined(WL_LEGACY_P2P) +#define MAC_OUI "\x00\x17\xF2" /* MACOSX OUI */ +#define MAC_OUI_TYPE_P2P 5 +#endif #ifdef P2P_IE_OVRD #define WFA_OUI MAC_OUI @@ -3834,6 +4336,8 @@ typedef struct vht_features_ie_hdr vht_features_ie_hdr_t; #define WFA_OUI_TYPE_HS20 0x10 #define WFA_OUI_TYPE_OSEN 0x12 #define WFA_OUI_TYPE_NAN 0x13 +#define WFA_OUI_TYPE_MBO 0x16 +#define WFA_OUI_TYPE_MBO_OCE 0x16 /* RSN authenticated key managment suite */ #define RSN_AKM_NONE 0 /* None (IBSS) */ @@ -3849,6 +4353,8 @@ typedef struct vht_features_ie_hdr vht_features_ie_hdr_t; #define RSN_AKM_SHA256_1X 5 /* SHA256 key derivation, using 802.1X */ #define RSN_AKM_SHA256_PSK 6 /* SHA256 key derivation, using Pre-shared Key */ #define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */ +#define RSN_AKM_FILS_SHA256 14 /* SHA256 key derivation, using FILS */ +#define RSN_AKM_FILS_SHA384 15 /* SHA384 key derivation, using FILS */ /* OSEN authenticated key managment suite */ #define OSEN_AKM_UNSPECIFIED RSN_AKM_UNSPECIFIED /* Over 802.1x */ @@ -4080,6 +4586,42 @@ BWL_PRE_PACKED_STRUCT struct dot11_ftm { } BWL_POST_PACKED_STRUCT; typedef struct dot11_ftm dot11_ftm_t; + +#define DOT11_FTM_ERR_NOT_CONT_OFFSET 1 +#define DOT11_FTM_ERR_NOT_CONT_MASK 0x80 +#define DOT11_FTM_ERR_NOT_CONT_SHIFT 7 +#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \ + DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT) +#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\ + uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \ + _err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \ + _err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \ + (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \ +} while (0) + +#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0 +#define DOT11_FTM_ERR_MAX_ERR_MASK 0x7fff +#define DOT11_FTM_ERR_MAX_ERR_SHIFT 0 +#define DOT11_FTM_ERR_MAX_ERR(_err) (((((_err)[1] & 0x7f) << 8) | (_err)[0])) +#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\ + uint16 _val2; \ + uint16 _not_cont; \ + _val2 = (((_val) & DOT11_FTM_ERR_MAX_ERR_MASK) << DOT11_FTM_ERR_MAX_ERR_SHIFT); \ + _val2 = (_val2 > 0x3fff) ? 0 : _val2; /* not expecting > 16ns error */ \ + _not_cont = DOT11_FTM_ERR_NOT_CONT(_err); \ + (_err)[0] = _val2 & 0xff; \ + (_err)[1] = (_val2 >> 8) & 0xff; \ + DOT11_FTM_ERR_SET_NOT_CONT(_err, _not_cont); \ +} while (0) + +#if defined(DOT11_FTM_ERR_ROM_COMPAT) +/* incorrect defs - here for ROM compatibiity */ +#undef DOT11_FTM_ERR_NOT_CONT_OFFSET +#undef DOT11_FTM_ERR_NOT_CONT_MASK +#undef DOT11_FTM_ERR_NOT_CONT_SHIFT +#undef DOT11_FTM_ERR_NOT_CONT +#undef DOT11_FTM_ERR_SET_NOT_CONT + #define DOT11_FTM_ERR_NOT_CONT_OFFSET 0 #define DOT11_FTM_ERR_NOT_CONT_MASK 0x0001 #define DOT11_FTM_ERR_NOT_CONT_SHIFT 0 @@ -4092,6 +4634,12 @@ typedef struct dot11_ftm dot11_ftm_t; (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \ } while (0) +#undef DOT11_FTM_ERR_MAX_ERR_OFFSET +#undef DOT11_FTM_ERR_MAX_ERR_MASK +#undef DOT11_FTM_ERR_MAX_ERR_SHIFT +#undef DOT11_FTM_ERR_MAX_ERR +#undef DOT11_FTM_ERR_SET_MAX_ERR + #define DOT11_FTM_ERR_MAX_ERR_OFFSET 0 #define DOT11_FTM_ERR_MAX_ERR_MASK 0xfff7 #define DOT11_FTM_ERR_MAX_ERR_SHIFT 1 @@ -4103,6 +4651,7 @@ typedef struct dot11_ftm dot11_ftm_t; (_err)[0] = _val2 & 0xff; \ (_err)[1] = _val2 >> 8 & 0xff; \ } while (0) +#endif /* DOT11_FTM_ERR_ROM_COMPAT */ BWL_PRE_PACKED_STRUCT struct dot11_ftm_params { uint8 id; /* DOT11_MNG_FTM_PARAM_ID 8.4.2.166 11mcd2.6/2014 - revisit */ @@ -4146,6 +4695,10 @@ typedef struct dot11_ftm_params dot11_ftm_params_t; #define FTM_PARAMS_NBURST(_p) (1 << FTM_PARAMS_NBURSTEXP(_p)) +enum { + FTM_PARAMS_NBURSTEXP_NOPREF = 15 +}; + enum { FTM_PARAMS_BURSTTMO_NOPREF = 15 }; @@ -4171,6 +4724,10 @@ enum { (_p)->info[FTM_PARAMS_MINDELTA_OFFSET] = (_delta) / 100; \ } while (0) +enum { + FTM_PARAMS_MINDELTA_NOPREF = 0 +}; + #define FTM_PARAMS_PARTIAL_TSF(_p) ((_p)->info[4] << 8 | (_p)->info[3]) #define FTM_PARAMS_SET_PARTIAL_TSF(_p, _partial_tsf) do { \ (_p)->info[3] = (_partial_tsf) & 0xff; \ @@ -4182,6 +4739,22 @@ enum { #define FTM_PARAMS_PARTIAL_TSF_BIT_LEN 16 #define FTM_PARAMS_PARTIAL_TSF_MAX 0xffff +/* FTM can indicate upto 62k TUs forward and 1k TU backward */ +#define FTM_PARAMS_TSF_FW_HI (63487 << 10) /* in micro sec */ +#define FTM_PARAMS_TSF_BW_LOW (64512 << 10) /* in micro sec */ +#define FTM_PARAMS_TSF_BW_HI (65535 << 10) /* in micro sec */ +#define FTM_PARAMS_TSF_FW_MAX FTM_PARAMS_TSF_FW_HI +#define FTM_PARAMS_TSF_BW_MAX (FTM_PARAMS_TSF_BW_HI - FTM_PARAMS_TSF_BW_LOW) + +#define FTM_PARAMS_PTSFNOPREF_OFFSET 5 +#define FTM_PARAMS_PTSFNOPREF_MASK 0x1 +#define FTM_PARAMS_PTSFNOPREF_SHIFT 0 +#define FTM_PARAMS_PTSFNOPREF(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_PTSFNOPREF_OFFSET, \ + FTM_PARAMS_PTSFNOPREF_MASK, FTM_PARAMS_PTSFNOPREF_SHIFT) +#define FTM_PARAMS_SET_PTSFNOPREF(_p, _nopref) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_PTSFNOPREF_OFFSET, FTM_PARAMS_PTSFNOPREF_MASK, \ + FTM_PARAMS_PTSFNOPREF_SHIFT, _nopref) + #define FTM_PARAMS_ASAP_OFFSET 5 #define FTM_PARAMS_ASAP_MASK 0x4 #define FTM_PARAMS_ASAP_SHIFT 2 @@ -4190,6 +4763,7 @@ enum { #define FTM_PARAMS_SET_ASAP(_p, _asap) FTM_PARAMS_SET_FIELD(_p, \ FTM_PARAMS_ASAP_OFFSET, FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT, _asap) +/* FTM1 - AKA ASAP Capable */ #define FTM_PARAMS_FTM1_OFFSET 5 #define FTM_PARAMS_FTM1_MASK 0x02 #define FTM_PARAMS_FTM1_SHIFT 1 @@ -4207,6 +4781,10 @@ enum { FTM_PARAMS_FTMS_PER_BURST_OFFSET, FTM_PARAMS_FTMS_PER_BURST_MASK, \ FTM_PARAMS_FTMS_PER_BURST_SHIFT, _nftms) +enum { + FTM_PARAMS_FTMS_PER_BURST_NOPREF = 0 +}; + #define FTM_PARAMS_CHAN_INFO_OFFSET 6 #define FTM_PARAMS_CHAN_INFO_MASK 0xfc #define FTM_PARAMS_CHAN_INFO_SHIFT 2 @@ -4224,6 +4802,10 @@ enum { #define FTM_PARAMS_BURST_PERIOD_MS(_p) (FTM_PARAMS_BURST_PERIOD(_p) * 100) +enum { + FTM_PARAMS_BURST_PERIOD_NOPREF = 0 +}; + /* FTM status values - last updated from 11mcD4.0 */ enum { FTM_PARAMS_STATUS_RESERVED = 0, @@ -4261,6 +4843,81 @@ enum { FTM_PARAMS_CHAN_INFO_MAX = 63 }; +/* tag_ID/length/value_buffer tuple */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 id; + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT ftm_vs_tlv_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_ie { + uint8 id; /* DOT11_MNG_VS_ID */ + uint8 len; /* length following */ + uint8 oui[3]; /* BRCM_PROP_OUI (or Customer) */ + uint8 sub_type; /* BRCM_FTM_IE_TYPE (or Customer) */ + uint8 version; + ftm_vs_tlv_t tlvs[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_vs_ie dot11_ftm_vs_ie_t; + +/* ftm vs api version */ +#define BCM_FTM_VS_PARAMS_VERSION 0x01 + +/* ftm vendor specific information tlv types */ +enum { + FTM_VS_TLV_NONE = 0, + FTM_VS_TLV_REQ_PARAMS = 1, /* additional request params (in FTM_REQ) */ + FTM_VS_TLV_MEAS_INFO = 2, /* measurement information (in FTM_MEAS) */ + FTM_VS_TLV_SEC_PARAMS = 3, /* security parameters (in either) */ + FTM_VS_TLV_SEQ_PARAMS = 4, /* toast parameters (FTM_REQ, BRCM proprietary) */ + FTM_VS_TLV_MF_BUF = 5, /* multi frame buffer - may span ftm vs ie's */ + FTM_VS_TLV_TIMING_PARAMS = 6 /* timing adjustments */ + /* add additional types above */ +}; + +/* the following definitions are *DEPRECATED* and moved to implemenetion files. They + * are retained here because previous (May 2016) some branches use them + */ +#define FTM_TPK_LEN 16 +#define FTM_RI_RR_BUF_LEN 32 +#define FTM_TPK_RI_RR_LEN 13 +#define FTM_TPK_RI_RR_LEN_SECURE_2_0 28 +#define FTM_TPK_DIGEST_LEN 32 +#define FTM_TPK_BUFFER_LEN 128 +#define FTM_TPK_RI_PHY_LEN 7 +#define FTM_TPK_RR_PHY_LEN 7 +#define FTM_TPK_DATA_BUFFER_LEN 88 +#define FTM_TPK_LEN_SECURE_2_0 32 +#define FTM_TPK_RI_PHY_LEN_SECURE_2_0 14 +#define FTM_TPK_RR_PHY_LEN_SECURE_2_0 14 + + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_params { + uint8 id; /* DOT11_MNG_VS_ID */ + uint8 len; + uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */ + uint8 bcm_vs_id; + ftm_vs_tlv_t ftm_tpk_ri_rr[1]; /* ftm_TPK_ri_rr place holder */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_vs_params dot11_ftm_vs_tpk_ri_rr_params_t; +#define DOT11_FTM_VS_LEN (sizeof(dot11_ftm_vs_tpk_ri_rr_params_t) - TLV_HDR_LEN) +/* end *DEPRECATED* ftm definitions */ + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_sync_info { + uint8 id; /* Extended - 255 11mc D4.3 */ + uint8 len; + uint8 id_ext; + uint8 tsf_sync_info[4]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_sync_info dot11_ftm_sync_info_t; + +/* ftm tsf sync info ie len - includes id ext */ +#define DOT11_FTM_SYNC_INFO_IE_LEN (sizeof(dot11_ftm_sync_info_t) - TLV_HDR_LEN) + +#define DOT11_FTM_IS_SYNC_INFO_IE(_ie) (\ + DOT11_MNG_IE_ID_EXT_MATCH(_ie, DOT11_MNG_FTM_SYNC_INFO) && \ + (_ie)->len == DOT11_FTM_SYNC_INFO_IE_LEN) + /* 802.11u interworking access network options */ #define IW_ANT_MASK 0x0f #define IW_INTERNET_MASK 0x10 @@ -4278,6 +4935,12 @@ enum { #define IW_ANT_TEST_NETWORK 14 #define IW_ANT_WILDCARD_NETWORK 15 +#define IW_ANT_LEN 1 +#define IW_VENUE_LEN 2 +#define IW_HESSID_LEN 6 +#define IW_HESSID_OFF (IW_ANT_LEN + IW_VENUE_LEN) +#define IW_MAX_LEN (IW_ANT_LEN + IW_VENUE_LEN + IW_HESSID_LEN) + /* 802.11u advertisement protocol */ #define ADVP_ANQP_PROTOCOL_ID 0 #define ADVP_MIH_PROTOCOL_ID 1 @@ -4405,6 +5068,13 @@ enum { #define G3PP_GUD_VERSION 0 #define G3PP_PLMN_LIST_IE 0 +/* AP Location Public ID Info encoding */ +#define PUBLIC_ID_URI_FQDN_SE_ID 0 +/* URI/FQDN Descriptor field values */ +#define LOCATION_ENCODING_HELD 1 +#define LOCATION_ENCODING_SUPL 2 +#define URI_FQDN_SIZE 255 + /** hotspot2.0 indication element (vendor specific) */ BWL_PRE_PACKED_STRUCT struct hs20_ie { uint8 oui[3]; diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/802.11e.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.11e.h old mode 100755 new mode 100644 similarity index 98% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/802.11e.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.11e.h index ccfa9656b83b..f2021e5d4334 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/802.11e.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.11e.h @@ -1,7 +1,7 @@ /* * 802.11e protocol header file * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: 802.11e.h 518342 2014-12-01 23:21:41Z $ + * $Id: 802.11e.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _802_11e_H_ @@ -37,7 +37,6 @@ /* This marks the start of a packed structure section. */ #include - /* WME Traffic Specification (TSPEC) element */ #define WME_TSPEC_HDR_LEN 2 /* WME TSPEC header length */ #define WME_TSPEC_BODY_OFF 2 /* WME TSPEC body offset */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.11s.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.11s.h new file mode 100644 index 000000000000..2d66d0b52e1a --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.11s.h @@ -0,0 +1,314 @@ +/* + * Fundamental types and constants relating to 802.11s Mesh + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.11s.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _802_11s_h_ +#define _802_11s_h_ + +/* This marks the start of a packed structure section. */ +#include + +#define DOT11_MESH_FLAGS_AE_MASK 0x3 +#define DOT11_MESH_FLAGS_AE_SHIFT 0 + +#define DOT11_MESH_CONNECTED_AS_SET 7 +#define DOT11_MESH_NUMBER_PEERING_SET 1 +#define DOT11_MESH_MESH_GWSET 0 + +#define DOT11_MESH_ACTION_LINK_MET_REP 0 +#define DOT11_MESH_ACTION_PATH_SEL 1 +#define DOT11_MESH_ACTION_GATE_ANN 2 +#define DOT11_MESH_ACTION_CONG_CONT_NOTIF 3 +#define DOT11_MESH_ACTION_MCCA_SETUP_REQ 4 +#define DOT11_MESH_ACTION_MCCA_SETUP_REP 5 +#define DOT11_MESH_ACTION_MCCA_ADVT_REQ 6 +#define DOT11_MESH_ACTION_MCCA_ADVT 7 +#define DOT11_MESH_ACTION_MCCA_TEARDOWN 8 +#define DOT11_MESH_ACTION_TBTT_ADJ_REQ 9 +#define DOT11_MESH_ACTION_TBTT_ADJ_RESP 10 + +/* self-protected action field values: 7-57v24 */ +#define DOT11_SELFPROT_ACTION_MESH_PEER_OPEN 1 +#define DOT11_SELFPROT_ACTION_MESH_PEER_CONFM 2 +#define DOT11_SELFPROT_ACTION_MESH_PEER_CLOSE 3 +#define DOT11_SELFPROT_ACTION_MESH_PEER_GK_INF 4 +#define DOT11_SELFPROT_ACTION_MESH_PEER_GK_ACK 5 + +#define DOT11_MESH_AUTH_PROTO_NONE 0 +#define DOT11_MESH_AUTH_PROTO_SAE 1 +#define DOT11_MESH_AUTH_PROTO_8021X 2 +#define DOT11_MESH_AUTH_PROTO_VS 255 + +#define DOT11_MESH_PATHSEL_LEN 2 +#define DOT11_MESH_PERR_LEN1 2 /* Least PERR length fixed */ +#define DOT11_MESH_PERR_LEN2 13 /* Least PERR length variable */ +#define DOT11_MESH_PREP_LEN 31 /* Least PREP length */ +#define DOT11_MESH_PREQ_LEN 37 /* Least PREQ length */ + +#define DOT11_MESH_PATHSEL_PROTID_HWMP 1 +#define DOT11_MESH_PATHSEL_METRICID_ALM 1 /* Air link metric */ +#define DOT11_MESH_CONGESTCTRL_NONE 0 +#define DOT11_MESH_CONGESTCTRL_SP 1 +#define DOT11_MESH_SYNCMETHOD_NOFFSET 1 + +BWL_PRE_PACKED_STRUCT struct dot11_meshctrl_hdr { + uint8 flags; /* flag bits such as ae etc */ + uint8 ttl; /* time to live */ + uint32 seq; /* sequence control */ + struct ether_addr a5; /* optional address 5 */ + struct ether_addr a6; /* optional address 6 */ +} BWL_POST_PACKED_STRUCT; + +/* Mesh Path Selection Action Frame */ +BWL_PRE_PACKED_STRUCT struct dot11_mesh_pathsel { + uint8 category; + uint8 meshaction; + uint8 data[]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mesh_pathsel dot11_mesh_pathsel_t; + +/* Mesh PREQ IE */ +BWL_PRE_PACKED_STRUCT struct mesh_preq_ie { + uint8 id; + uint8 len; + uint8 flags; + uint8 hop_count; + uint8 ttl; + uint32 pathdis_id; + struct ether_addr originator_addr; + uint32 originator_seq; + union { + BWL_PRE_PACKED_STRUCT struct { + struct ether_addr target_ext_add; + uint32 lifetime; + uint32 metric; + uint8 target_count; + uint8 data[]; + } BWL_POST_PACKED_STRUCT oea; + + BWL_PRE_PACKED_STRUCT struct { + uint32 lifetime; + uint32 metric; + uint8 target_count; + uint8 data[]; + } BWL_POST_PACKED_STRUCT noea; + } u; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_preq_ie mesh_preq_ie_t; + +/* Target info (part of Mesh PREQ IE) */ +BWL_PRE_PACKED_STRUCT struct mesh_targetinfo { + uint8 target_flag; + struct ether_addr target_addr; + uint32 target_seq; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_targetinfo mesh_targetinfo_t; + + +/* Mesh PREP IE */ +BWL_PRE_PACKED_STRUCT struct mesh_prep_ie { + uint8 id; + uint8 len; + uint8 flags; + uint8 hop_count; + uint8 ttl; + struct ether_addr target_addr; + uint32 target_seq; + union { + BWL_PRE_PACKED_STRUCT struct { + struct ether_addr target_ext_add; + uint32 lifetime; + uint32 metric; + uint8 target_count; + struct ether_addr originator_addr; + uint32 originator_seq; + } BWL_POST_PACKED_STRUCT oea; + + BWL_PRE_PACKED_STRUCT struct { + uint32 lifetime; + uint32 metric; + uint8 target_count; + struct ether_addr originator_addr; + uint32 originator_seq; + } BWL_POST_PACKED_STRUCT noea; + } u; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_prep_ie mesh_prep_ie_t; + + +/* Mesh PERR IE */ +struct mesh_perr_ie { + uint8 id; + uint8 len; + uint8 ttl; + uint8 num_dest; + uint8 data[]; +}; +typedef struct mesh_perr_ie mesh_perr_ie_t; + +/* Destination info is part of PERR IE */ +BWL_PRE_PACKED_STRUCT struct mesh_perr_destinfo { + uint8 flags; + struct ether_addr destination_addr; + uint32 dest_seq; + union { + BWL_PRE_PACKED_STRUCT struct { + struct ether_addr dest_ext_addr; + } BWL_POST_PACKED_STRUCT dea; + + BWL_PRE_PACKED_STRUCT struct { + /* 1 byte reason code to be populated manually in software */ + uint16 reason_code; + } BWL_POST_PACKED_STRUCT nodea; + } u; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_perr_destinfo mesh_perr_destinfo_t; + +/* Mesh peering action frame hdr */ +BWL_PRE_PACKED_STRUCT struct mesh_peering_frmhdr { + uint8 category; + uint8 action; + union { + struct { + uint16 capability; + } open; + struct { + uint16 capability; + uint16 AID; + } confirm; + uint8 data[1]; + } u; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_peering_frmhdr mesh_peering_frmhdr_t; + +/* Mesh peering mgmt IE */ +BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_common { + uint16 mesh_peer_prot_id; + uint16 local_link_id; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_peer_mgmt_ie_common mesh_peer_mgmt_ie_common_t; +#define MESH_PEER_MGMT_IE_OPEN_LEN (4) + +BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_cfm { + mesh_peer_mgmt_ie_common_t common; + uint16 peer_link_id; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_peer_mgmt_ie_cfm mesh_peer_mgmt_ie_cfm_t; +#define MESH_PEER_MGMT_IE_CONF_LEN (6) + +BWL_PRE_PACKED_STRUCT struct mesh_peer_mgmt_ie_close { + mesh_peer_mgmt_ie_common_t common; + /* uint16 peer_link_id; + * simplicity: not supported, TODO for future + */ + uint16 reason_code; +} BWL_POST_PACKED_STRUCT; +typedef struct mesh_peer_mgmt_ie_close mesh_peer_mgmt_ie_close_t; +#define MESH_PEER_MGMT_IE_CLOSE_LEN (6) + +struct mesh_config_ie { + uint8 activ_path_sel_prot_id; + uint8 activ_path_sel_metric_id; + uint8 cong_ctl_mode_id; + uint8 sync_method_id; + uint8 auth_prot_id; + uint8 mesh_formation_info; + uint8 mesh_cap; +}; +typedef struct mesh_config_ie mesh_config_ie_t; +#define MESH_CONFIG_IE_LEN (7) + +/* Mesh peering states */ +#define MESH_PEERING_IDLE 0 +#define MESH_PEERING_OPEN_SNT 1 +#define MESH_PEERING_CNF_RCVD 2 +#define MESH_PEERING_OPEN_RCVD 3 +#define MESH_PEERING_ESTAB 4 +#define MESH_PEERING_HOLDING 5 +#define MESH_PEERING_LAST_STATE 6 +/* for debugging: mapping strings */ +#define MESH_PEERING_STATE_STRINGS \ + {"IDLE ", "OPNSNT", "CNFRCV", "OPNRCV", "ESTAB ", "HOLDNG"} + +typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info { + /* mesh_peer_instance as given in the spec. Note that, peer address + * is stored in scb + */ + uint16 mesh_peer_prot_id; + uint16 local_link_id; + uint16 peer_link_id; + /* AID generated by *peer* to self & received in peer_confirm */ + uint16 peer_aid; + + /* TODO: no mention in spec? possibly used in PS case. Note that aid generated + * from self to peer is stored in scb. + */ + uint8 state; + /* TODO: struct mesh_peer_info *next; this field is required + * if multiple peerings per same src is allowed, which is + * true as per spec. + */ +} BWL_POST_PACKED_STRUCT mesh_peer_info_t; + +/* once an entry is added into mesh_peer_list, if peering is lost, it will +* get retried for peering, MAX_MESH_PEER_ENTRY_RETRIES times. after wards, it +* wont get retried and will be moved to MESH_PEER_ENTRY_STATE_TIMEDOUT state, +* until user adds it again explicitely, when its entry_state is changed +* to MESH_PEER_ENTRY_STATE_ACTIVE and tried again. +*/ +#define MAX_MESH_SELF_PEER_ENTRY_RETRIES 3 +#define MESH_SELF_PEER_ENTRY_STATE_ACTIVE 1 +#define MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT 2 + +/** Mesh Channel Switch Parameter IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_mcsp_body { + uint8 ttl; /* remaining number of hops allowed for this element. */ + uint8 flags; /* attributes of this channel switch attempt */ + uint8 reason; /* reason for the mesh channel switch */ + uint16 precedence; /* random value in the range 0 to 65535 */ +} BWL_POST_PACKED_STRUCT; + +#define DOT11_MCSP_TTL_DEFAULT 1 +#define DOT11_MCSP_FLAG_TRANS_RESTRICT 0x1 /* no transmit except frames with mcsp */ +#define DOT11_MCSP_FLAG_INIT 0x2 /* initiates the channel switch attempt */ +#define DOT11_MCSP_FLAG_REASON 0x4 /* validity of reason code field */ +#define DOT11_MCSP_REASON_REGULATORY 0 /* meet regulatory requirements */ +#define DOT11_MCSP_REASON_UNSPECIFIED 1 /* unspecified reason */ + +BWL_PRE_PACKED_STRUCT struct dot11_mesh_csp { + uint8 id; /* id DOT11_MNG_MESH_CSP_ID */ + uint8 len; /* length of IE */ + struct dot11_mcsp_body body; /* body of the ie */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mesh_csp dot11_mesh_csp_ie_t; +#define DOT11_MESH_CSP_IE_LEN 5 /* length of mesh channel switch parameter IE body */ + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _802_11s_H_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/802.1d.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.1d.h old mode 100755 new mode 100644 similarity index 95% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/802.1d.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.1d.h index 9610b550467a..1b8dea5b117d --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/802.1d.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.1d.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to 802.1D * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: 802.1d.h 518342 2014-12-01 23:21:41Z $ + * $Id: 802.1d.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _802_1_D_ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/802.3.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.3.h old mode 100755 new mode 100644 similarity index 95% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/802.3.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.3.h index 9f108c888a2e..6758ac048428 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/802.3.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/802.3.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to 802.3 * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: 802.3.h 518342 2014-12-01 23:21:41Z $ + * $Id: 802.3.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _802_3_h_ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/aidmp.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/aidmp.h index 6654364b9103..10992e7635f4 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/aidmp.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/aidmp.h @@ -1,7 +1,7 @@ /* * Broadcom AMBA Interconnect definitions. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: aidmp.h 514727 2014-11-12 03:02:48Z $ + * $Id: aidmp.h 614820 2016-01-23 17:16:17Z $ */ #ifndef _AIDMP_H @@ -373,7 +373,22 @@ typedef volatile struct _aidmp { #define AIELD_ERRDONE_MASK 0x3 /* errlogstatus */ -#define AIELS_TIMEOUT_MASK 0x3 +#define AIELS_SLAVE_ERR 0x1 +#define AIELS_TIMEOUT 0x2 +#define AIELS_DECODE 0x3 +#define AIELS_TIMEOUT_MASK 0x3 + +/* errorlog status bit map, for SW use */ +#define AXI_WRAP_STS_NONE (0) +#define AXI_WRAP_STS_TIMEOUT (1<<0) +#define AXI_WRAP_STS_SLAVE_ERR (1<<1) +#define AXI_WRAP_STS_DECODE_ERR (1<<2) +#define AXI_WRAP_STS_PCI_RD_ERR (1<<3) +#define AXI_WRAP_STS_WRAP_RD_ERR (1<<4) +#define AXI_WRAP_STS_SET_CORE_FAIL (1<<5) + +/* errlogFrags */ +#define AXI_ERRLOG_FLAGS_WRITE_REQ (1<<24) /* config */ #define AICFG_OOB 0x00000020 @@ -399,4 +414,11 @@ typedef volatile struct _aidmp { #define AI_OOBSEL_7_SHIFT 24 #define AI_IOCTRL_ENABLE_D11_PME (1 << 14) +/* mask for interrupts from each core to wrapper */ +#define AI_OOBSELINA74_CORE_MASK 0x80808080 +#define AI_OOBSELINA30_CORE_MASK 0x80808080 + +/* axi id mask in the error log id */ +#define AI_ERRLOGID_AXI_ID_MASK 0x07 + #endif /* _AIDMP_H */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcm_cfg.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcm_cfg.h index e71f5c82da6c..12e3cb2ca27a 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcm_cfg.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcm_cfg.h @@ -1,7 +1,7 @@ /* * BCM common config options * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcm_mpool_pub.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcm_mpool_pub.h index 79ae0f5d4a9c..f8ce7a78e5b4 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcm_mpool_pub.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcm_mpool_pub.h @@ -35,7 +35,7 @@ * and instrumentation on top of the heap, without modifying the heap * allocation implementation. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -58,7 +58,7 @@ * * <> * - * $Id: bcm_mpool_pub.h 514727 2014-11-12 03:02:48Z $ + * $Id: bcm_mpool_pub.h 535090 2015-02-17 04:49:01Z $ */ #ifndef _BCM_MPOOL_PUB_H @@ -343,7 +343,7 @@ int bcm_mp_free(bcm_mp_pool_h pool, void *objp); * other Error getting statistics. * */ -int bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats); +void bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats); /* diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcm_ring.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcm_ring.h index 5f1b38c65e3c..721d7eab6b08 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcm_ring.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcm_ring.h @@ -1,19 +1,39 @@ -#ifndef __bcm_ring_included__ -#define __bcm_ring_included__ - /* - * +---------------------------------------------------------------------------- - * * bcm_ring.h : Ring context abstraction - * * The ring context tracks the WRITE and READ indices where elements may be * produced and consumed respectively. All elements in the ring need to be * fixed size. * * NOTE: A ring of size N, may only hold N-1 elements. * - * +---------------------------------------------------------------------------- + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * + * + * <> + * + * $Id: bcm_ring.h 596126 2015-10-29 19:53:48Z $ + */ +#ifndef __bcm_ring_included__ +#define __bcm_ring_included__ +/* * API Notes: * * Ring manipulation API allows for: @@ -81,7 +101,7 @@ * private L1 data cache. * +---------------------------------------------------------------------------- * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -101,9 +121,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * <> - * - * $Id: bcm_ring.h 591283 2015-10-07 11:52:00Z $ + * $Id: bcm_ring.h 596126 2015-10-29 19:53:48Z $ * * -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- * vim: set ts=4 noet sw=4 tw=80: diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmcdc.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmcdc.h index a95dc31c27bd..22fd8a0933bb 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmcdc.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmcdc.h @@ -4,7 +4,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,11 +27,11 @@ * * <> * - * $Id: bcmcdc.h 514727 2014-11-12 03:02:48Z $ + * $Id: bcmcdc.h 676811 2016-12-24 20:48:46Z $ */ #ifndef _bcmcdc_h_ #define _bcmcdc_h_ -#include +#include typedef struct cdc_ioctl { uint32 cmd; /* ioctl command value */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmdefs.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmdefs.h index a02499996f61..58cbe5e7a591 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmdefs.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmdefs.h @@ -1,7 +1,7 @@ /* * Misc system wide definitions * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmdefs.h 601026 2015-11-20 06:53:19Z $ + * $Id: bcmdefs.h 657791 2016-09-02 15:14:42Z $ */ #ifndef _bcmdefs_h_ @@ -63,24 +63,88 @@ * BCMATTACHFN is also used for detach functions (it's not worth having a BCMDETACHFN, * as in most cases, the attach function calls the detach function to clean up on error). */ +#if defined(BCM_RECLAIM) -#define bcmreclaimed 0 +extern bool bcm_reclaimed; +extern bool bcm_attach_part_reclaimed; +extern bool bcm_preattach_part_reclaimed; + +#if defined(BCM_RECLAIM_ATTACH_FN_DATA) +#define _data __attribute__ ((__section__ (".dataini2." #_data))) _data +#define _fn __attribute__ ((__section__ (".textini2." #_fn), noinline)) _fn + +/* Relocate attach symbols to save-restore region to increase pre-reclaim heap size. */ +#define BCM_SRM_ATTACH_DATA(_data) __attribute__ ((__section__ (".datasrm." #_data))) _data +#define BCM_SRM_ATTACH_FN(_fn) __attribute__ ((__section__ (".textsrm." #_fn), noinline)) _fn + +#ifndef PREATTACH_NORECLAIM +#define BCMPREATTACHDATA(_data) __attribute__ ((__section__ (".dataini3." #_data))) _data +#define BCMPREATTACHFN(_fn) __attribute__ ((__section__ (".textini3." #_fn), noinline)) _fn +#else +#define BCMPREATTACHDATA(_data) __attribute__ ((__section__ (".dataini2." #_data))) _data +#define BCMPREATTACHFN(_fn) __attribute__ ((__section__ (".textini2." #_fn), noinline)) _fn +#endif /* PREATTACH_NORECLAIM */ +#else /* BCM_RECLAIM_ATTACH_FN_DATA */ #define _data _data #define _fn _fn #define BCMPREATTACHDATA(_data) _data #define BCMPREATTACHFN(_fn) _fn +#endif /* BCM_RECLAIM_ATTACH_FN_DATA */ + +#if defined(BCM_RECLAIM_INIT_FN_DATA) +#define _data __attribute__ ((__section__ (".dataini1." #_data))) _data +#define _fn __attribute__ ((__section__ (".textini1." #_fn), noinline)) _fn +#define CONST +#else /* BCM_RECLAIM_INIT_FN_DATA */ #define _data _data #define _fn _fn -#define _fn _fn +#ifndef CONST +#define CONST const +#endif +#endif /* BCM_RECLAIM_INIT_FN_DATA */ + +/* Non-manufacture or internal attach function/dat */ #define BCMNMIATTACHFN(_fn) _fn #define BCMNMIATTACHDATA(_data) _data -#define CONST const +#ifdef BCMNODOWN +#define _fn _fn +#else +#define _fn _fn +#endif + +#else /* BCM_RECLAIM */ + +#define bcm_reclaimed 0 +#define _data _data +#define _fn _fn +#define BCM_SRM_ATTACH_DATA(_data) _data +#define BCM_SRM_ATTACH_FN(_fn) _fn +#define BCMPREATTACHDATA(_data) _data +#define BCMPREATTACHFN(_fn) _fn +#define _data _data +#define _fn _fn +#define _fn _fn +#define BCMNMIATTACHFN(_fn) _fn +#define BCMNMIATTACHDATA(_data) _data +#define CONST const + +#endif /* BCM_RECLAIM */ + +#if !defined STB #undef BCM47XX_CA9 +#endif /* STB */ +/* BCMFASTPATH Related Macro defines +*/ #ifndef BCMFASTPATH +#if defined(STB) +#define BCMFASTPATH __attribute__ ((__section__ (".text.fastpath"))) +#define BCMFASTPATH_HOST __attribute__ ((__section__ (".text.fastpath_host"))) +#else #define BCMFASTPATH #define BCMFASTPATH_HOST +#endif #endif /* BCMFASTPATH */ @@ -105,16 +169,22 @@ /* Allows size optimization for single-bus image */ #ifdef BCMBUSTYPE -#define BUSTYPE(bus) (BCMBUSTYPE) +#define BUSTYPE(bus) (BCMBUSTYPE) #else -#define BUSTYPE(bus) (bus) +#define BUSTYPE(bus) (bus) +#endif + +#ifdef BCMBUSCORETYPE +#define BUSCORETYPE(ct) (BCMBUSCORETYPE) +#else +#define BUSCORETYPE(ct) (ct) #endif /* Allows size optimization for single-backplane image */ #ifdef BCMCHIPTYPE -#define CHIPTYPE(bus) (BCMCHIPTYPE) +#define CHIPTYPE(bus) (BCMCHIPTYPE) #else -#define CHIPTYPE(bus) (bus) +#define CHIPTYPE(bus) (bus) #endif @@ -146,6 +216,24 @@ #define PCIECOREREV(rev) (rev) #endif +#ifdef BCMPMUREV +#define PMUREV(rev) (BCMPMUREV) +#else +#define PMUREV(rev) (rev) +#endif + +#ifdef BCMCCREV +#define CCREV(rev) (BCMCCREV) +#else +#define CCREV(rev) (rev) +#endif + +#ifdef BCMGCIREV +#define GCIREV(rev) (BCMGCIREV) +#else +#define GCIREV(rev) (rev) +#endif + /* Defines for DMA Address Width - Shared between OSL and HNDDMA */ #define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */ #define DMADDR_MASK_30 0xc0000000 /* Address mask for 30-bits */ @@ -182,7 +270,7 @@ typedef dma64addr_t dmaaddr_t; #define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val) #define PHYSADDRTOULONG(_pa, _ulong) \ do { \ - _ulong = ((unsigned long)(_pa).hiaddr << 32) | ((_pa).loaddr); \ + _ulong = ((unsigned long long)(_pa).hiaddr << 32) | ((_pa).loaddr); \ } while (0) #else @@ -224,7 +312,15 @@ typedef struct { /* add 40 bytes to allow for extra RPC header and info */ #define BCMEXTRAHDROOM 260 #else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */ +#if defined(STB) +#if defined(BCM_GMAC3) +#define BCMEXTRAHDROOM 32 /* For FullDongle, no D11 headroom space required. */ +#else +#define BCMEXTRAHDROOM 224 +#endif /* ! BCM_GMAC3 */ +#else #define BCMEXTRAHDROOM 204 +#endif #endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */ /* Packet alignment for most efficient SDIO (can change based on platform) */ @@ -310,28 +406,6 @@ typedef struct { #else #define BCMLFRAG_ENAB() (0) #endif /* BCMLFRAG_ENAB */ -#define RXMODE1 1 /* descriptor split */ -#define RXMODE2 2 /* descriptor split + classification */ -#define RXMODE3 3 /* fifo split + classification */ -#define RXMODE4 4 /* fifo split + classification + hdr conversion */ - -#ifdef BCMSPLITRX /* BCMLFRAG support enab macros */ - extern bool _bcmsplitrx; - extern uint8 _bcmsplitrx_mode; - #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) - #define BCMSPLITRX_ENAB() (_bcmsplitrx) - #define BCMSPLITRX_MODE() (_bcmsplitrx_mode) - #elif defined(BCMSPLITRX_DISABLED) - #define BCMSPLITRX_ENAB() (0) - #define BCMSPLITRX_MODE() (0) - #else - #define BCMSPLITRX_ENAB() (1) - #define BCMSPLITRX_MODE() (_bcmsplitrx_mode) - #endif -#else - #define BCMSPLITRX_ENAB() (0) - #define BCMSPLITRX_MODE() (0) -#endif /* BCMSPLITRX */ #ifdef BCMPCIEDEV /* BCMPCIEDEV support enab macros */ extern bool _pciedevenab; @@ -346,28 +420,7 @@ extern bool _pciedevenab; #define BCMPCIEDEV_ENAB() 0 #endif /* BCMPCIEDEV */ -#define SPLIT_RXMODE1() ((BCMSPLITRX_MODE() == RXMODE1)) -#define SPLIT_RXMODE2() ((BCMSPLITRX_MODE() == RXMODE2)) -#define SPLIT_RXMODE3() ((BCMSPLITRX_MODE() == RXMODE3)) -#define SPLIT_RXMODE4() ((BCMSPLITRX_MODE() == RXMODE4)) - -#define PKT_CLASSIFY() (SPLIT_RXMODE2() || SPLIT_RXMODE3() || SPLIT_RXMODE4()) -#define RXFIFO_SPLIT() (SPLIT_RXMODE3() || SPLIT_RXMODE4()) -#define HDR_CONV() (SPLIT_RXMODE4()) - -#define PKT_CLASSIFY_EN(x) ((PKT_CLASSIFY()) && (PKT_CLASSIFY_FIFO == (x))) -#ifdef BCM_SPLITBUF - extern bool _bcmsplitbuf; - #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) - #define BCM_SPLITBUF_ENAB() (_bcmsplitbuf) - #elif defined(BCM_SPLITBUF_DISABLED) - #define BCM_SPLITBUF_ENAB() (0) - #else - #define BCM_SPLITBUF_ENAB() (1) - #endif -#else - #define BCM_SPLITBUF_ENAB() (0) -#endif /* BCM_SPLITBUF */ + #define BCMSDIODEV_ENAB() 0 /* Max size for reclaimable NVRAM array */ #ifdef DL_NVRAM @@ -378,5 +431,32 @@ extern bool _pciedevenab; extern uint32 gFWID; +/* Chip related low power flags (lpflags) */ +#define LPFLAGS_SI_GLOBAL_DISABLE (1 << 0) +#define LPFLAGS_SI_MEM_STDBY_DISABLE (1 << 1) +#define LPFLAGS_SI_SFLASH_DISABLE (1 << 2) +#define LPFLAGS_SI_BTLDO3P3_DISABLE (1 << 3) +#define LPFLAGS_SI_GCI_FORCE_REGCLK_DISABLE (1 << 4) +#define LPFLAGS_SI_FORCE_PWM_WHEN_RADIO_ON (1 << 5) +#define LPFLAGS_PHY_GLOBAL_DISABLE (1 << 16) +#define LPFLAGS_PHY_LP_DISABLE (1 << 17) +#define LPFLAGS_PSM_PHY_CTL (1 << 18) + +/* Chip related Cbuck modes */ +#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE0 0x00001c03 +#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE0 0x00492490 +#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE1 0x00001c03 +#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE1 0x00490410 + +/* Chip related dynamic cbuck mode mask */ + +#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE_MASK 0xFFFFFC00 +#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE_MASK 0xFFFFFFFF + +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif #endif /* _bcmdefs_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmdevs.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmdevs.h index 49c1064c2409..70ef46788483 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmdevs.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmdevs.h @@ -1,7 +1,7 @@ /* * Broadcom device-specific manifest constants. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmdevs.h 582052 2015-08-26 09:30:53Z $ + * $Id: bcmdevs.h 625027 2016-03-15 08:20:18Z $ */ #ifndef _BCMDEVS_H @@ -77,6 +77,7 @@ #define BCM_DNGL_BL_PID_4354 0xbd26 #define BCM_DNGL_BL_PID_43569 0xbd27 #define BCM_DNGL_BL_PID_43909 0xbd28 +#define BCM_DNGL_BL_PID_4373 0xbd29 #define BCM_DNGL_BDC_PID 0x0bdc #define BCM_DNGL_JTAG_PID 0x4a44 @@ -85,6 +86,7 @@ #define BCM_HWUSB_PID_43239 43239 /* PCI Device IDs */ +#ifdef DEPRECATED /* These products have been deprecated */ #define BCM4210_DEVICE_ID 0x1072 /* never used */ #define BCM4230_DEVICE_ID 0x1086 /* never used */ #define BCM4401_ENET_ID 0x170c /* 4401b0 production enet cards */ @@ -99,17 +101,12 @@ #define BCM4328_D11DUAL_ID 0x4314 /* 4328/4312 802.11a/g id */ #define BCM4328_D11G_ID 0x4315 /* 4328/4312 802.11g id */ #define BCM4328_D11A_ID 0x4316 /* 4328/4312 802.11a id */ -#define BCM4318_D11G_ID 0x4318 /* 4318 802.11b/g id */ -#define BCM4318_D11DUAL_ID 0x4319 /* 4318 802.11a/b/g id */ #define BCM4318_D11A_ID 0x431a /* 4318 802.11a id */ #define BCM4325_D11DUAL_ID 0x431b /* 4325 802.11a/g id */ #define BCM4325_D11G_ID 0x431c /* 4325 802.11g id */ #define BCM4325_D11A_ID 0x431d /* 4325 802.11a id */ -#define BCM4306_D11G_ID 0x4320 /* 4306 802.11g */ -#define BCM4306_D11A_ID 0x4321 /* 4306 802.11a */ #define BCM4306_UART_ID 0x4322 /* 4306 uart */ #define BCM4306_V90_ID 0x4323 /* 4306 v90 codec */ -#define BCM4306_D11DUAL_ID 0x4324 /* 4306 dual A+B */ #define BCM4306_D11G_ID2 0x4325 /* BCM4306_D11G_ID; INF w/loose binding war */ #define BCM4321_D11N_ID 0x4328 /* 4321 802.11n dualband id */ #define BCM4321_D11N2G_ID 0x4329 /* 4321 802.11n 2.4Ghz band id */ @@ -131,9 +128,19 @@ #define BCM43222_D11N_ID 0x4350 /* 43222 802.11n dualband device */ #define BCM43222_D11N2G_ID 0x4351 /* 43222 802.11n 2.4GHz device */ #define BCM43222_D11N5G_ID 0x4352 /* 43222 802.11n 5GHz device */ +#define BCM43226_D11N_ID 0x4354 /* 43226 802.11n dualband device */ +#endif /* DEPRECATED */ +/* DEPRECATED but used */ +#define BCM4306_D11G_ID 0x4320 /* 4306 802.11g */ +#define BCM4306_D11A_ID 0x4321 /* 4306 802.11a */ +#define BCM4306_D11DUAL_ID 0x4324 /* 4306 dual A+B */ +#define BCM4318_D11G_ID 0x4318 /* 4318 802.11b/g id */ +#define BCM4318_D11DUAL_ID 0x4319 /* 4318 802.11a/b/g id */ +/* DEPRECATED */ + +#define BCM53572_D11N2G_ID 0x4329 /* 53572 802.11n 2.4Ghz band id (same as BCM4321) */ #define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */ #define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db device */ -#define BCM43226_D11N_ID 0x4354 /* 43226 802.11n dualband device */ #define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */ #define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */ #define BCM43236_D11N5G_ID 0x4348 /* 43236 802.11n 5GHz device */ @@ -178,6 +185,9 @@ #define BCM4345_D11AC_ID 0x43ab /* 4345 802.11ac dualband device */ #define BCM4345_D11AC2G_ID 0x43ac /* 4345 802.11ac 2.4G device */ #define BCM4345_D11AC5G_ID 0x43ad /* 4345 802.11ac 5G device */ +#define BCM43455_D11AC_ID 0x43e3 /* 43455 802.11ac dualband device */ +#define BCM43455_D11AC2G_ID 0x43e4 /* 43455 802.11ac 2.4G device */ +#define BCM43455_D11AC5G_ID 0x43e5 /* 43455 802.11ac 5G device */ #define BCM4335_D11AC_ID 0x43ae #define BCM4335_D11AC2G_ID 0x43af #define BCM4335_D11AC5G_ID 0x43b0 @@ -205,9 +215,15 @@ #define BCM43596_D11AC_ID 0x4415 /* 43596 802.11ac dualband device */ #define BCM43596_D11AC2G_ID 0x4416 /* 43596 802.11ac 2.4G device */ #define BCM43596_D11AC5G_ID 0x4417 /* 43596 802.11ac 5G device */ +#define BCM43597_D11AC_ID 0x441c /* 43597 802.11ac dualband device */ +#define BCM43597_D11AC2G_ID 0x441d /* 43597 802.11ac 2.4G device */ +#define BCM43597_D11AC5G_ID 0x441e /* 43597 802.11ac 5G device */ #define BCM43909_D11AC_ID 0x43d0 /* 43909 802.11ac dualband device */ #define BCM43909_D11AC2G_ID 0x43d1 /* 43909 802.11ac 2.4G device */ #define BCM43909_D11AC5G_ID 0x43d2 /* 43909 802.11ac 5G device */ +#define BCM43012_D11N_ID 0xA804 /* 43012 802.11n dualband device */ +#define BCM43012_D11N2G_ID 0xA805 /* 43012 802.11n 2.4G device */ +#define BCM43012_D11N5G_ID 0xA806 /* 43012 802.11n 5G device */ /* PCI Subsystem ID */ #define BCM943228HMB_SSID_VEN1 0x0607 @@ -253,8 +269,21 @@ #define BCM4354_D11AC2G_ID 0x43e0 /* 4354 802.11ac 2.4G device */ #define BCM4354_D11AC5G_ID 0x43e1 /* 4354 802.11ac 5G device */ #define BCM43430_D11N2G_ID 0x43e2 /* 43430 802.11n 2.4G device */ +#define BCM43018_D11N2G_ID 0x441b /* 43018 802.11n 2.4G device */ +#define BCM4347_D11AC_ID 0x440a /* 4347 802.11ac dualband device */ +#define BCM4347_D11AC2G_ID 0x440b /* 4347 802.11ac 2.4G device */ +#define BCM4347_D11AC5G_ID 0x440c /* 4347 802.11ac 5G device */ + +#define BCM4361_D11AC_ID 0x441f /* 4361 802.11ac dualband device */ +#define BCM4361_D11AC2G_ID 0x4420 /* 4361 802.11ac 2.4G device */ +#define BCM4361_D11AC5G_ID 0x4421 /* 4361 802.11ac 5G device */ + +#define BCM4364_D11AC_ID 0x4464 /* 4364 802.11ac dualband device */ +#define BCM4364_D11AC2G_ID 0x446a /* 4364 802.11ac 2.4G device */ +#define BCM4364_D11AC5G_ID 0x446b /* 4364 802.11ac 5G device */ + #define BCM4365_D11AC_ID 0x43ca #define BCM4365_D11AC2G_ID 0x43cb #define BCM4365_D11AC5G_ID 0x43cc @@ -275,6 +304,17 @@ #define BCM4356_D11AC2G_ID 0x43ed /* 4356 802.11ac 2.4G device */ #define BCM4356_D11AC5G_ID 0x43ee /* 4356 802.11ac 5G device */ +#define BCM4371_D11AC_ID 0x440d /* 4371 802.11ac dualband device */ +#define BCM4371_D11AC2G_ID 0x440e /* 4371 802.11ac 2.4G device */ +#define BCM4371_D11AC5G_ID 0x440f /* 4371 802.11ac 5G device */ +#define BCM7271_D11AC_ID 0x4410 /* 7271 802.11ac dualband device */ +#define BCM7271_D11AC2G_ID 0x4411 /* 7271 802.11ac 2.4G device */ +#define BCM7271_D11AC5G_ID 0x4412 /* 7271 802.11ac 5G device */ + +#define BCM4373_D11AC_ID 0x4418 /* 4373 802.11ac dualband device */ +#define BCM4373_D11AC2G_ID 0x4419 /* 4373 802.11ac 2.4G device */ +#define BCM4373_D11AC5G_ID 0x441a /* 4373 802.11ac 5G device */ + #define BCMGPRS_UART_ID 0x4333 /* Uart id used by 4306/gprs card */ #define BCMGPRS2_UART_ID 0x4344 /* Uart id used by 4306/gprs card */ #define FPGA_JTAGM_ID 0x43f0 /* FPGA jtagm device id */ @@ -309,10 +349,13 @@ #define BCM47XX_ATA100_ID 0x471d /* 47xx parallel ATA */ #define BCM47XX_SATAXOR_ID 0x471e /* 47xx serial ATA & XOR DMA */ #define BCM47XX_GIGETH_ID 0x471f /* 47xx GbE (5700) */ +#ifdef DEPRECATED /* These products have been deprecated */ #define BCM4712_MIPS_ID 0x4720 /* 4712 base devid */ #define BCM4716_DEVICE_ID 0x4722 /* 4716 base devid */ +#endif /* DEPRECATED */ #define BCM47XX_USB30H_ID 0x472a /* 47xx usb 3.0 host */ #define BCM47XX_USB30D_ID 0x472b /* 47xx usb 3.0 device */ +#define BCM47XX_USBHUB_ID 0x472c /* 47xx usb hub */ #define BCM47XX_SMBUS_EMU_ID 0x47fe /* 47xx emulated SMBus device */ #define BCM47XX_XOR_EMU_ID 0x47ff /* 47xx emulated XOR engine */ #define EPI41210_DEVICE_ID 0xa0fa /* bcm4210 */ @@ -324,29 +367,48 @@ #define R5C822_SDIOH_ID 0x0822 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host */ #define JMICRON_SDIOH_ID 0x2381 /* JMicron Standard SDIO Host Controller */ +#define BCM43452_D11AC_ID 0x47ab /* 43452 802.11ac dualband device */ +#define BCM43452_D11AC2G_ID 0x47ac /* 43452 802.11ac 2.4G device */ +#define BCM43452_D11AC5G_ID 0x47ad /* 43452 802.11ac 5G device */ + /* Chip IDs */ +#ifdef DEPRECATED /* These products have been deprecated */ #define BCM4306_CHIP_ID 0x4306 /* 4306 chipcommon chipid */ #define BCM4311_CHIP_ID 0x4311 /* 4311 PCIe 802.11a/b/g */ #define BCM43111_CHIP_ID 43111 /* 43111 chipcommon chipid (OTP chipid) */ #define BCM43112_CHIP_ID 43112 /* 43112 chipcommon chipid (OTP chipid) */ #define BCM4312_CHIP_ID 0x4312 /* 4312 chipcommon chipid */ -#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */ -#define BCM43131_CHIP_ID 43131 /* 43131 chip id (OTP chipid) */ #define BCM4315_CHIP_ID 0x4315 /* 4315 chip id */ #define BCM4318_CHIP_ID 0x4318 /* 4318 chipcommon chipid */ #define BCM4319_CHIP_ID 0x4319 /* 4319 chip id */ #define BCM4320_CHIP_ID 0x4320 /* 4320 chipcommon chipid */ #define BCM4321_CHIP_ID 0x4321 /* 4321 chipcommon chipid */ -#define BCM43217_CHIP_ID 43217 /* 43217 chip id (OTP chipid) */ #define BCM4322_CHIP_ID 0x4322 /* 4322 chipcommon chipid */ #define BCM43221_CHIP_ID 43221 /* 43221 chipcommon chipid (OTP chipid) */ #define BCM43222_CHIP_ID 43222 /* 43222 chipcommon chipid */ +#define BCM43226_CHIP_ID 43226 /* 43226 chipcommon chipid */ +#define BCM43231_CHIP_ID 43231 /* 43231 chipcommon chipid (OTP chipid) */ +#define BCM4342_CHIP_ID 4342 /* 4342 chipcommon chipid (OTP, RBBU) */ +#define BCM4325_CHIP_ID 0x4325 /* 4325 chip id */ +#define BCM4328_CHIP_ID 0x4328 /* 4328 chip id */ +#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */ +#define BCM4712_CHIP_ID 0x4712 /* 4712 chipcommon chipid */ +#endif /* DEPRECATED */ + +/* DEPRECATED but still referenced in components - start */ +#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */ +#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */ +#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */ +#define BCM5354_CHIP_ID 0x5354 /* 5354 chipcommon chipid */ +/* DEPRECATED but still referenced in components - end */ + #define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */ #define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */ #define BCM43227_CHIP_ID 43227 /* 43227 chipcommon chipid */ #define BCM43228_CHIP_ID 43228 /* 43228 chipcommon chipid */ -#define BCM43226_CHIP_ID 43226 /* 43226 chipcommon chipid */ -#define BCM43231_CHIP_ID 43231 /* 43231 chipcommon chipid (OTP chipid) */ +#define BCM43217_CHIP_ID 43217 /* 43217 chip id (OTP chipid) */ +#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */ +#define BCM43131_CHIP_ID 43131 /* 43131 chip id (OTP chipid) */ #define BCM43234_CHIP_ID 43234 /* 43234 chipcommon chipid */ #define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */ #define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */ @@ -358,9 +420,6 @@ #define BCM43428_CHIP_ID 43428 /* 43228 chipcommon chipid (OTP, RBBU) */ #define BCM43431_CHIP_ID 43431 /* 4331 chipcommon chipid (OTP, RBBU) */ #define BCM43460_CHIP_ID 43460 /* 4360 chipcommon chipid (OTP, RBBU) */ -#define BCM4325_CHIP_ID 0x4325 /* 4325 chip id */ -#define BCM4328_CHIP_ID 0x4328 /* 4328 chip id */ -#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */ #define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */ #define BCM4336_CHIP_ID 0x4336 /* 4336 chipcommon chipid */ #define BCM43362_CHIP_ID 43362 /* 43362 chipcommon chipid */ @@ -386,19 +445,20 @@ #define BCM4350_CHIP_ID 0x4350 /* 4350 chipcommon chipid */ #define BCM4354_CHIP_ID 0x4354 /* 4354 chipcommon chipid */ #define BCM4356_CHIP_ID 0x4356 /* 4356 chipcommon chipid */ +#define BCM4371_CHIP_ID 0x4371 /* 4371 chipcommon chipid */ #define BCM43556_CHIP_ID 0xAA24 /* 43556 chipcommon chipid */ #define BCM43558_CHIP_ID 0xAA26 /* 43558 chipcommon chipid */ +#define BCM43562_CHIP_ID 0xAA2A /* 43562 chipcommon chipid */ #define BCM43566_CHIP_ID 0xAA2E /* 43566 chipcommon chipid */ #define BCM43567_CHIP_ID 0xAA2F /* 43567 chipcommon chipid */ #define BCM43568_CHIP_ID 0xAA30 /* 43568 chipcommon chipid */ #define BCM43569_CHIP_ID 0xAA31 /* 43569 chipcommon chipid */ #define BCM43570_CHIP_ID 0xAA32 /* 43570 chipcommon chipid */ -#define BCM4358_CHIP_ID 0x4358 /* 4358 chipcommon chipid */ +#define BCM4358_CHIP_ID 0x4358 /* 4358 chipcommon chipid */ #define BCM4371_CHIP_ID 0x4371 /* 4371 chipcommon chipid */ #define BCM43012_CHIP_ID 0xA804 /* 43012 chipcommon chipid */ #define BCM4350_CHIP(chipid) ((CHIPID(chipid) == BCM4350_CHIP_ID) || \ (CHIPID(chipid) == BCM4354_CHIP_ID) || \ - (CHIPID(chipid) == BCM4356_CHIP_ID) || \ (CHIPID(chipid) == BCM43556_CHIP_ID) || \ (CHIPID(chipid) == BCM43558_CHIP_ID) || \ (CHIPID(chipid) == BCM43566_CHIP_ID) || \ @@ -407,37 +467,52 @@ (CHIPID(chipid) == BCM43569_CHIP_ID) || \ (CHIPID(chipid) == BCM43570_CHIP_ID) || \ (CHIPID(chipid) == BCM4358_CHIP_ID)) /* 4350 variations */ + #define BCM4345_CHIP_ID 0x4345 /* 4345 chipcommon chipid */ #define BCM43454_CHIP_ID 43454 /* 43454 chipcommon chipid */ -#define BCM43455_CHIP_ID 43455 /* 43455 chipcommon chipid */ -#define BCM43457_CHIP_ID 43457 /* 43457 chipcommon chipid */ -#define BCM43458_CHIP_ID 43458 /* 43458 chipcommon chipid */ +#define BCM43455_CHIP_ID 43455 /* 43455 chipcommon chipid */ +#define BCM43457_CHIP_ID 43457 /* 43457 chipcommon chipid */ +#define BCM43458_CHIP_ID 43458 /* 43458 chipcommon chipid */ + +#define BCM4345_CHIP(chipid) (CHIPID(chipid) == BCM4345_CHIP_ID || \ + CHIPID(chipid) == BCM43454_CHIP_ID || \ + CHIPID(chipid) == BCM43455_CHIP_ID || \ + CHIPID(chipid) == BCM43457_CHIP_ID || \ + CHIPID(chipid) == BCM43458_CHIP_ID) + +#define CASE_BCM4345_CHIP case BCM4345_CHIP_ID: /* fallthrough */ \ + case BCM43454_CHIP_ID: /* fallthrough */ \ + case BCM43455_CHIP_ID: /* fallthrough */ \ + case BCM43457_CHIP_ID: /* fallthrough */ \ + case BCM43458_CHIP_ID + #define BCM43430_CHIP_ID 43430 /* 43430 chipcommon chipid */ +#define BCM43018_CHIP_ID 43018 /* 43018 chipcommon chipid */ #define BCM4349_CHIP_ID 0x4349 /* 4349 chipcommon chipid */ #define BCM4355_CHIP_ID 0x4355 /* 4355 chipcommon chipid */ #define BCM4359_CHIP_ID 0x4359 /* 4359 chipcommon chipid */ #define BCM4349_CHIP(chipid) ((CHIPID(chipid) == BCM4349_CHIP_ID) || \ (CHIPID(chipid) == BCM4355_CHIP_ID) || \ (CHIPID(chipid) == BCM4359_CHIP_ID)) - -#define BCM4345_CHIP(chipid) (CHIPID(chipid) == BCM4345_CHIP_ID || \ - CHIPID(chipid) == BCM43454_CHIP_ID || \ - CHIPID(chipid) == BCM43455_CHIP_ID || \ - CHIPID(chipid) == BCM43457_CHIP_ID || \ - CHIPID(chipid) == BCM43458_CHIP_ID) - -#define CASE_BCM4345_CHIP case BCM4345_CHIP_ID: /* fallthrough */ \ - case BCM43454_CHIP_ID: /* fallthrough */ \ - case BCM43455_CHIP_ID: /* fallthrough */ \ - case BCM43457_CHIP_ID: /* fallthrough */ \ - case BCM43458_CHIP_ID - #define BCM4349_CHIP_GRPID BCM4349_CHIP_ID: \ case BCM4355_CHIP_ID: \ case BCM4359_CHIP_ID +#define BCM43596_CHIP_ID 43596 /* 43596 chipcommon chipid */ +#define BCM4347_CHIP_ID 0x4347 /* 4347 chipcommon chipid */ +#define BCM4357_CHIP_ID 0x4357 /* 4357 chipcommon chipid */ +#define BCM4361_CHIP_ID 0x4361 /* 4361 chipcommon chipid */ +#define BCM4347_CHIP(chipid) ((CHIPID(chipid) == BCM4347_CHIP_ID) || \ + (CHIPID(chipid) == BCM4357_CHIP_ID) || \ + (CHIPID(chipid) == BCM4361_CHIP_ID)) +#define BCM4347_CHIP_GRPID BCM4347_CHIP_ID: \ + case BCM4357_CHIP_ID: \ + case BCM4361_CHIP_ID #define BCM4365_CHIP_ID 0x4365 /* 4365 chipcommon chipid */ #define BCM4366_CHIP_ID 0x4366 /* 4366 chipcommon chipid */ +#define BCM4365_CHIP(chipid) ((CHIPID(chipid) == BCM4365_CHIP_ID) || \ + (CHIPID(chipid) == BCM4366_CHIP_ID)) + #define BCM43909_CHIP_ID 0xab85 /* 43909 chipcommon chipid */ @@ -447,11 +522,11 @@ #define BCM43602_CHIP(chipid) ((CHIPID(chipid) == BCM43602_CHIP_ID) || \ (CHIPID(chipid) == BCM43462_CHIP_ID) || \ (CHIPID(chipid) == BCM43522_CHIP_ID)) /* 43602 variations */ +#define BCM43012_CHIP(chipid) (CHIPID(chipid) == BCM43012_CHIP_ID) #define CASE_BCM43602_CHIP case BCM43602_CHIP_ID: /* fallthrough */ \ case BCM43462_CHIP_ID: /* fallthrough */ \ case BCM43522_CHIP_ID -#define BCM4342_CHIP_ID 4342 /* 4342 chipcommon chipid (OTP, RBBU) */ #define BCM4402_CHIP_ID 0x4402 /* 4402 chipid */ #define BCM4704_CHIP_ID 0x4704 /* 4704 chipcommon chipid */ #define BCM4706_CHIP_ID 0x5300 /* 4706 chipcommon chipid */ @@ -462,24 +537,32 @@ ((chipid) == BCM53018_CHIP_ID) || \ ((chipid) == BCM47094_CHIP_ID)) #define BCM4710_CHIP_ID 0x4710 /* 4710 chipid */ -#define BCM4712_CHIP_ID 0x4712 /* 4712 chipcommon chipid */ -#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */ -#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */ -#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */ #define BCM4749_CHIP_ID 0x4749 /* 5357 chipcommon chipid (OTP, RBBU) */ #define BCM4785_CHIP_ID 0x4785 /* 4785 chipcommon chipid */ #define BCM5350_CHIP_ID 0x5350 /* 5350 chipcommon chipid */ #define BCM5352_CHIP_ID 0x5352 /* 5352 chipcommon chipid */ -#define BCM5354_CHIP_ID 0x5354 /* 5354 chipcommon chipid */ #define BCM5365_CHIP_ID 0x5365 /* 5365 chipcommon chipid */ #define BCM5356_CHIP_ID 0x5356 /* 5356 chipcommon chipid */ #define BCM5357_CHIP_ID 0x5357 /* 5357 chipcommon chipid */ #define BCM53572_CHIP_ID 53572 /* 53572 chipcommon chipid */ #define BCM53573_CHIP_ID 53573 /* 53573 chipcommon chipid */ -#define BCM53573_CHIP(chipid) (CHIPID(chipid) == BCM53573_CHIP_ID) -#define BCM53573_CHIP_GRPID BCM53573_CHIP_ID +#define BCM53574_CHIP_ID 53574 /* 53574 chipcommon chipid */ +#define BCM53573_CHIP(chipid) ((CHIPID(chipid) == BCM53573_CHIP_ID) || \ + (CHIPID(chipid) == BCM53574_CHIP_ID)) +#define BCM53573_CHIP_GRPID BCM53573_CHIP_ID : \ + case BCM53574_CHIP_ID +#define BCM53573_DEVICE(devid) (((devid) == BCM53573_D11AC_ID) || \ + ((devid) == BCM53573_D11AC2G_ID) || \ + ((devid) == BCM53573_D11AC5G_ID) || \ + ((devid) == BCM47189_D11AC_ID) || \ + ((devid) == BCM47189_D11AC2G_ID) || \ + ((devid) == BCM47189_D11AC5G_ID)) + +#define BCM7271_CHIP_ID 0x05c9 /* 7271 chipcommon chipid */ +#define BCM4373_CHIP_ID 0x4373 /* 4373 chipcommon chipid */ /* Package IDs */ +#ifdef DEPRECATED /* These products have been deprecated */ #define BCM4303_PKG_ID 2 /* 4303 package id */ #define BCM4309_PKG_ID 1 /* 4309 package id */ #define BCM4712LARGE_PKG_ID 0 /* 340pin 4712 package id */ @@ -495,6 +578,7 @@ #define BCM4716_PKG_ID 8 /* 4716 package id */ #define BCM4717_PKG_ID 9 /* 4717 package id */ #define BCM4718_PKG_ID 10 /* 4718 package id */ +#endif /* DEPRECATED */ #define BCM5356_PKG_NONMODE 1 /* 5356 package without nmode suppport */ #define BCM5358U_PKG_ID 8 /* 5358U package id */ #define BCM5358_PKG_ID 9 /* 5358 package id */ @@ -545,7 +629,7 @@ #define BFL_BTC2WIRE 0x00000001 /* old 2wire Bluetooth coexistence, OBSOLETE */ #define BFL_BTCOEX 0x00000001 /* Board supports BTCOEX */ #define BFL_PACTRL 0x00000002 /* Board has gpio 9 controlling the PA */ -#define BFL_AIRLINEMODE 0x00000004 /* Board implements gpio 13 radio disable indication, UNUSED */ +#define BFL_AIRLINEMODE 0x00000004 /* Board implements gpio radio disable indication */ #define BFL_ADCDIV 0x00000008 /* Board has the rssi ADC divider */ #define BFL_DIS_256QAM 0x00000008 #define BFL_ENETROBO 0x00000010 /* Board has robo switch or core */ @@ -699,6 +783,34 @@ /* boardflags4 for SROM12 */ #define BFL4_SROM12_4dBPAD (1 << 0) /* To distinguigh between normal and 4dB pad board */ +#define BFL4_SROM12_2G_DETTYPE (1 << 1) /* Determine power detector type for 2G */ +#define BFL4_SROM12_5G_DETTYPE (1 << 2) /* Determine power detector type for 5G */ +#define BFL4_4364_HARPOON 0x0100 /* Harpoon module 4364 */ +#define BFL4_4364_GODZILLA 0x0200 /* Godzilla module 4364 */ + + +/* papd params */ +#define PAPD_TX_ATTN_2G 0xFF +#define PAPD_TX_ATTN_5G 0xFF00 +#define PAPD_TX_ATTN_5G_SHIFT 8 +#define PAPD_RX_ATTN_2G 0xFF +#define PAPD_RX_ATTN_5G 0xFF00 +#define PAPD_RX_ATTN_5G_SHIFT 8 +#define PAPD_CAL_IDX_2G 0xFF +#define PAPD_CAL_IDX_5G 0xFF00 +#define PAPD_CAL_IDX_5G_SHIFT 8 +#define PAPD_BBMULT_2G 0xFF +#define PAPD_BBMULT_5G 0xFF00 +#define PAPD_BBMULT_5G_SHIFT 8 +#define TIA_GAIN_MODE_2G 0xFF +#define TIA_GAIN_MODE_5G 0xFF00 +#define TIA_GAIN_MODE_5G_SHIFT 8 +#define PAPD_EPS_OFFSET_2G 0xFFFF +#define PAPD_EPS_OFFSET_5G 0xFFFF0000 +#define PAPD_EPS_OFFSET_5G_SHIFT 16 +#define PAPD_CALREF_DB_2G 0xFF +#define PAPD_CALREF_DB_5G 0xFF00 +#define PAPD_CALREF_DB_5G_SHIFT 8 /* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */ @@ -747,13 +859,25 @@ /* 43342 Boards */ #define BCM943342FCAGBI_SSID 0x0641 +/* 43012 wlbga Board */ +#define BCM943012WLREF_SSID 0x07d7 + +/* 43012 fcbga Board */ +#define BCM943012FCREF_SSID 0x07d4 + /* 43602 Boards, unclear yet what boards will be created. */ #define BCM943602RSVD1_SSID 0x06a5 #define BCM943602RSVD2_SSID 0x06a6 #define BCM943602X87 0X0133 -#define BCM943602X87P2 0X0143 +#define BCM943602X87P2 0X0152 +#define BCM943602X87P3 0X0153 #define BCM943602X238 0X0132 #define BCM943602X238D 0X014A +#define BCM943602X238DP2 0X0155 +#define BCM943602X238DP3 0X0156 +#define BCM943602X100 0x0761 +#define BCM943602X100GS 0x0157 +#define BCM943602X100P2 0x015A /* # of GPIO pins */ #define GPIO_NUMPINS 32 diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmdhcp.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmdhcp.h old mode 100755 new mode 100644 similarity index 68% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmdhcp.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmdhcp.h index 5e51979ce393..f3e1b08ed981 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmdhcp.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmdhcp.h @@ -1,18 +1,30 @@ /* * Fundamental constants relating to DHCP Protocol * - * Copyright (C) 2016, Broadcom Corporation - * All Rights Reserved. + * Copyright (C) 1999-2017, Broadcom Corporation * - * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; - * the contents of this file may not be disclosed to third parties, copied - * or duplicated in any form, in whole or in part, without the prior - * written permission of Broadcom Corporation. + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. * * - * <> + * <> * - * $Id: bcmdhcp.h 518342 2014-12-01 23:21:41Z $ + * $Id: bcmdhcp.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _bcmdhcp_h_ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmendian.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmendian.h index 27f237947324..00adedf1cd97 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmendian.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmendian.h @@ -1,7 +1,7 @@ /* * Byte order utilities * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmeth.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmeth.h old mode 100755 new mode 100644 similarity index 94% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmeth.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmeth.h index 7ad453dbad0d..e1d2d6669e91 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmeth.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmeth.h @@ -1,7 +1,7 @@ /* * Broadcom Ethernettype protocol definitions * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmeth.h 518342 2014-12-01 23:21:41Z $ + * $Id: bcmeth.h 701825 2017-05-26 16:45:27Z $ */ /* @@ -93,9 +93,11 @@ */ /* #define BCMILCP_BCM_SUBTYPE_EAPOL 3 */ #define BCMILCP_BCM_SUBTYPE_DPT 4 +#define BCMILCP_BCM_SUBTYPE_DNGLEVENT 5 #define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8 #define BCMILCP_BCM_SUBTYPEHDR_VERSION 0 +#define BCMILCP_BCM_SUBTYPE_EVENT_DATA_PAD 2 /* These fields are stored in network order */ typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmevent.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmevent.h old mode 100755 new mode 100644 similarity index 59% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmevent.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmevent.h index 6c30d57bfbbc..27f77a001873 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmevent.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmevent.h @@ -1,9 +1,9 @@ /* * Broadcom Event protocol definitions * - * Dependencies: proto/bcmeth.h + * Dependencies: bcmeth.h * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: bcmevent.h 555154 2015-05-07 20:46:07Z $ + * $Id: bcmevent.h 700076 2017-05-17 14:42:22Z $ * */ @@ -38,11 +38,12 @@ #ifndef _BCMEVENT_H_ #define _BCMEVENT_H_ -#ifndef _TYPEDEFS_H_ #include -#endif /* #include -- TODO: req., excluded to overwhelming coupling (break up ethernet.h) */ -#include +#include +#if defined(DNGL_EVENT_SUPPORT) +#include +#endif /* This marks the start of a packed structure section. */ #include @@ -97,6 +98,19 @@ typedef BWL_PRE_PACKED_STRUCT struct bcm_event { /* data portion follows */ } BWL_POST_PACKED_STRUCT bcm_event_t; +/* + * used by host event + * note: if additional event types are added, it should go with is_wlc_event_frame() as well. + */ +typedef union bcm_event_msg_u { + wl_event_msg_t event; +#if defined(DNGL_EVENT_SUPPORT) + bcm_dngl_event_msg_t dngl_event; +#endif + + /* add new event here */ +} bcm_event_msg_u_t; + #define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header)) /* Event messages */ @@ -119,7 +133,7 @@ typedef BWL_PRE_PACKED_STRUCT struct bcm_event { #define WLC_E_LINK 16 /* generic link indication */ #define WLC_E_MIC_ERROR 17 /* TKIP MIC error occurred */ #define WLC_E_NDIS_LINK 18 /* NDIS style link indication */ -#define WLC_E_ROAM 19 /* roam attempt occurred: indicate status & reason */ +#define WLC_E_ROAM 19 /* roam complete: indicate status & reason */ #define WLC_E_TXFAIL 20 /* change in dot11FailedCount (txfail) */ #define WLC_E_PMKID_CACHE 21 /* WPA2 pmkid cache indication */ #define WLC_E_RETROGRADE_TSF 22 /* current AP's TSF value went backward */ @@ -132,16 +146,17 @@ typedef BWL_PRE_PACKED_STRUCT struct bcm_event { #define WLC_E_BCNSENT_IND 29 /* indicate to host of beacon transmit */ #define WLC_E_BCNRX_MSG 30 /* Send the received beacon up to the host */ #define WLC_E_BCNLOST_MSG 31 /* indicate to host loss of beacon */ -#define WLC_E_ROAM_PREP 32 /* before attempting to roam */ +#define WLC_E_ROAM_PREP 32 /* before attempting to roam association */ #define WLC_E_PFN_NET_FOUND 33 /* PFN network found event */ #define WLC_E_PFN_NET_LOST 34 /* PFN network lost event */ #define WLC_E_RESET_COMPLETE 35 #define WLC_E_JOIN_START 36 -#define WLC_E_ROAM_START 37 +#define WLC_E_ROAM_START 37 /* roam attempt started: indicate reason */ #define WLC_E_ASSOC_START 38 #define WLC_E_IBSS_ASSOC 39 #define WLC_E_RADIO 40 #define WLC_E_PSM_WATCHDOG 41 /* PSM microcode watchdog fired */ + #define WLC_E_PROBREQ_MSG 44 /* probe request received */ #define WLC_E_SCAN_CONFIRM_IND 45 #define WLC_E_PSK_SUP 46 /* WPA Handshake fail */ @@ -200,7 +215,7 @@ typedef BWL_PRE_PACKED_STRUCT struct bcm_event { #define WLC_E_NATIVE 94 /* port-specific event and payload (e.g. NDIS) */ #define WLC_E_PKTDELAY_IND 95 /* event for tx pkt delay suddently jump */ #define WLC_E_PSTA_PRIMARY_INTF_IND 99 /* psta primary interface indication */ -#define WLC_E_NAN 100 /* NAN event */ +#define WLC_E_NAN 100 /* NAN event - Reserved for future */ #define WLC_E_BEACON_FRAME_RX 101 #define WLC_E_SERVICE_FOUND 102 /* desired service found */ #define WLC_E_GAS_FRAGMENT_RX 103 /* GAS fragment received */ @@ -215,6 +230,9 @@ typedef BWL_PRE_PACKED_STRUCT struct bcm_event { #define WLC_E_IBSS_COALESCE 110 /* IBSS Coalescing */ #define WLC_E_AIBSS_TXFAIL 110 /* TXFAIL event for AIBSS, re using event 110 */ #define WLC_E_BSS_LOAD 114 /* Inform host of beacon bss load */ +#define WLC_E_MIMO_PWR_SAVE 115 /* Inform host MIMO PWR SAVE learning events */ +#define WLC_E_LEAKY_AP_STATS 116 /* Inform host leaky Ap stats events */ +#define WLC_E_ALLOW_CREDIT_BORROW 117 /* Allow or disallow wlfc credit borrowing in DHD */ #define WLC_E_MSCH 120 /* Multiple channel scheduler event */ #define WLC_E_CSA_START_IND 121 #define WLC_E_CSA_DONE_IND 122 @@ -223,6 +241,7 @@ typedef BWL_PRE_PACKED_STRUCT struct bcm_event { #define WLC_E_BSSID 125 /* to report change in BSSID while roaming */ #define WLC_E_TX_STAT_ERROR 126 /* tx error indication */ #define WLC_E_BCMC_CREDIT_SUPPORT 127 /* credit check for BCMC supported */ +#define WLC_E_PEER_TIMEOUT 128 /* silently drop a STA because of inactivity */ #define WLC_E_BT_WIFI_HANDOVER_REQ 130 /* Handover Request Initiated */ #define WLC_E_SPW_TXINHIBIT 131 /* Southpaw TxInhibit notification */ #define WLC_E_FBT_AUTH_REQ_IND 132 /* FBT Authentication Request Indication */ @@ -233,13 +252,41 @@ typedef BWL_PRE_PACKED_STRUCT struct bcm_event { #define WLC_E_PROBREQ_MSG_RX 137 /* probe req with wl_event_rx_frame_data_t header */ #define WLC_E_PFN_SCAN_COMPLETE 138 /* PFN completed scan of network list */ #define WLC_E_RMC_EVENT 139 /* RMC Event */ -#define WLC_E_DPSTA_INTF_IND 140 /* DPSTA interface indication */ -#define WLC_E_RRM 141 /* RRM Event */ -#define WLC_E_PFN_SSID_EXT 142 /* SSID EXT event */ -#define WLC_E_ROAM_EXP_EVENT 143 /* Expanded roam event */ -#define WLC_E_LAST 144 /* highest val + 1 for range checking */ -#if (WLC_E_LAST > 144) -#error "WLC_E_LAST: Invalid value for last event; must be <= 141." +#define WLC_E_DPSTA_INTF_IND 140 /* DPSTA interface indication */ +#define WLC_E_RRM 141 /* RRM Event */ +#define WLC_E_PFN_SSID_EXT 142 /* SSID EXT event */ +#define WLC_E_ROAM_EXP_EVENT 143 /* Expanded roam event */ +#define WLC_E_ULP 146 /* ULP entered indication */ +#define WLC_E_MACDBG 147 /* Ucode debugging event */ +#define WLC_E_RESERVED 148 /* reserved */ +#define WLC_E_PRE_ASSOC_RSEP_IND 149 /* assoc resp received */ +#define WLC_E_PSK_AUTH 150 /* PSK AUTH WPA2-PSK 4 WAY Handshake failure */ +#define WLC_E_TKO 151 /* TCP keepalive offload */ +#define WLC_E_SDB_TRANSITION 152 /* SDB mode-switch event */ +#define WLC_E_NATOE_NFCT 153 /* natoe event */ +#define WLC_E_TEMP_THROTTLE 154 /* Temperature throttling control event */ +#define WLC_E_LINK_QUALITY 155 /* Link quality measurement complete */ +#define WLC_E_BSSTRANS_RESP 156 /* BSS Transition Response received */ +#define WLC_E_TWT_SETUP 157 /* TWT Setup Complete event */ +#define WLC_E_HE_TWT_SETUP 157 /* TODO:Remove after merging TWT changes to trunk */ +#define WLC_E_NAN_CRITICAL 158 /* NAN Critical Event */ +#define WLC_E_NAN_NON_CRITICAL 159 /* NAN Non-Critical Event */ +#define WLC_E_RADAR_DETECTED 160 /* Radar Detected event */ +#define WLC_E_RANGING_EVENT 161 /* Ranging event */ +#define WLC_E_INVALID_IE 162 /* Received invalid IE */ +#define WLC_E_MODE_SWITCH 163 /* Mode switch event */ +#define WLC_E_PKT_FILTER 164 /* Packet filter event */ +#define WLC_E_DMA_TXFLUSH_COMPLETE 165 /* TxFlush done before changing + * tx/rxchain + */ +#define WLC_E_FBT 166 /* FBT event */ +#define WLC_E_PFN_SCAN_BACKOFF 167 /* PFN SCAN Backoff event */ +#define WLC_E_PFN_BSSID_SCAN_BACKOFF 168 /* PFN BSSID SCAN BAckoff event */ +#define WLC_E_AGGR_EVENT 169 /* Aggregated event */ +#define WLC_E_TVPM_MITIGATION 171 /* Change in mitigation applied by TVPM */ +#define WLC_E_LAST 172 /* highest val + 1 for range checking */ +#if (WLC_E_LAST > 172) +#error "WLC_E_LAST: Invalid value for last event; must be <= 172." #endif /* WLC_E_LAST */ /* define an API for getting the string name of an event */ @@ -247,6 +294,10 @@ extern const char *bcmevent_get_name(uint event_type); extern void wl_event_to_host_order(wl_event_msg_t * evt); extern void wl_event_to_network_order(wl_event_msg_t * evt); +/* validate if the event is proper and if valid copy event header to event */ +extern int is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype, + bcm_event_msg_u_t *out_event); + /* conversion between host and network order for events */ void wl_event_to_host_order(wl_event_msg_t * evt); void wl_event_to_network_order(wl_event_msg_t * evt); @@ -271,6 +322,55 @@ void wl_event_to_network_order(wl_event_msg_t * evt); #define WLC_E_STATUS_ERROR 16 /* request failed due to error */ #define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */ +/* 4-way handshake event type */ +#define WLC_E_PSK_AUTH_SUB_EAPOL_START 1 /* EAPOL start */ +#define WLC_E_PSK_AUTH_SUB_EAPOL_DONE 2 /* EAPOL end */ +/* GTK event type */ +#define WLC_E_PSK_AUTH_SUB_GTK_DONE 3 /* GTK end */ + +/* 4-way handshake event status code */ +#define WLC_E_STATUS_PSK_AUTH_WPA_TIMOUT 1 /* operation timed out */ +#define WLC_E_STATUS_PSK_AUTH_MIC_WPA_ERR 2 /* MIC error */ +#define WLC_E_STATUS_PSK_AUTH_IE_MISMATCH_ERR 3 /* IE Missmatch error */ +#define WLC_E_STATUS_PSK_AUTH_REPLAY_COUNT_ERR 4 +#define WLC_E_STATUS_PSK_AUTH_PEER_BLACKISTED 5 /* Blaclisted peer */ +#define WLC_E_STATUS_PSK_AUTH_GTK_REKEY_FAIL 6 /* GTK event status code */ + +/* SDB transition status code */ +#define WLC_E_STATUS_SDB_START 1 +#define WLC_E_STATUS_SDB_COMPLETE 2 +/* Slice-swap status code */ +#define WLC_E_STATUS_SLICE_SWAP_START 3 +#define WLC_E_STATUS_SLICE_SWAP_COMPLETE 4 + + +/* SDB transition reason code */ +#define WLC_E_REASON_HOST_DIRECT 0 +#define WLC_E_REASON_INFRA_ASSOC 1 +#define WLC_E_REASON_INFRA_ROAM 2 +#define WLC_E_REASON_INFRA_DISASSOC 3 +#define WLC_E_REASON_NO_MODE_CHANGE_NEEDED 4 +#define WLC_E_REASON_AWDL_ENABLE 5 +#define WLC_E_REASON_AWDL_DISABLE 6 + +/* WLC_E_SDB_TRANSITION event data */ +#define WL_MAX_BSSCFG 4 +#define WL_EVENT_SDB_TRANSITION_VER 1 +typedef struct wl_event_sdb_data { + uint8 wlunit; /* Core index */ + uint8 is_iftype; /* Interface Type(Station, SoftAP, P2P_GO, P2P_GC */ + uint16 chanspec; /* Interface Channel/Chanspec */ + char ssidbuf[(4 * 32) + 1]; /* SSID_FMT_BUF_LEN: ((4 * DOT11_MAX_SSID_LEN) + 1) */ +} wl_event_sdb_data_t; + +typedef struct wl_event_sdb_trans { + uint8 version; /* Event Data Version */ + uint8 rsdb_mode; + uint8 enable_bsscfg; + uint8 reserved; + struct wl_event_sdb_data values[WL_MAX_BSSCFG]; +} wl_event_sdb_trans_t; + /* roam reason codes */ #define WLC_E_REASON_INITIAL_ASSOC 0 /* initial assoc */ #define WLC_E_REASON_LOW_RSSI 1 /* roamed due to low RSSI */ @@ -278,7 +378,6 @@ void wl_event_to_network_order(wl_event_msg_t * evt); #define WLC_E_REASON_DISASSOC 3 /* roamed due to DISASSOC indication */ #define WLC_E_REASON_BCNS_LOST 4 /* roamed due to lost beacons */ -/* Roam codes used primarily by CCX */ #define WLC_E_REASON_FAST_ROAM_FAILED 5 /* roamed due to fast roam failure */ #define WLC_E_REASON_DIRECTED_ROAM 6 /* roamed due to request by AP */ #define WLC_E_REASON_TSPEC_REJECTED 7 /* roamed due to TSPEC rejection */ @@ -288,6 +387,8 @@ void wl_event_to_network_order(wl_event_msg_t * evt); /* retained for precommit auto-merging errors; remove once all branches are synced */ #define WLC_E_REASON_REQUESTED_ROAM 11 #define WLC_E_REASON_BSSTRANS_REQ 11 /* roamed due to BSS Transition request by AP */ +#define WLC_E_REASON_LOW_RSSI_CU 12 /* roamed due to low RSSI and Channel Usage */ +#define WLC_E_REASON_RADAR_DETECTED 13 /* roamed due to radar detection by STA */ /* prune reason codes */ #define WLC_E_PRUNE_ENCR_MISMATCH 1 /* encryption mismatch */ @@ -324,6 +425,14 @@ void wl_event_to_network_order(wl_event_msg_t * evt); #define WLC_E_SUP_SEND_FAIL 13 /* message send failure */ #define WLC_E_SUP_DEAUTH 14 /* received FC_DEAUTH */ #define WLC_E_SUP_WPA_PSK_TMO 15 /* WPA PSK 4-way handshake timeout */ +#define WLC_E_SUP_WPA_PSK_M1_TMO 16 /* WPA PSK 4-way handshake M1 timeout */ +#define WLC_E_SUP_WPA_PSK_M3_TMO 17 /* WPA PSK 4-way handshake M3 timeout */ + + +/* Ucode reason codes carried in the WLC_E_MACDBG event */ +#define WLC_E_MACDBG_LIST_PSM 0 /* Dump list update for PSM registers */ +#define WLC_E_MACDBG_LIST_PSMX 1 /* Dump list update for PSMx registers */ +#define WLC_E_MACDBG_REGALL 2 /* Dump all registers */ /* Event data for events that include frames received over the air */ /* WLC_E_PROBRESP_MSG @@ -349,6 +458,14 @@ typedef struct wl_event_data_if { uint8 role; /* see I/F role */ } wl_event_data_if_t; +/* WLC_E_NATOE event data */ +typedef struct wl_event_data_natoe { + uint32 natoe_active; + uint32 sta_ip; + uint16 start_port; + uint16 end_port; +} wl_event_data_natoe_t; + /* opcode in WLC_E_IF event */ #define WLC_E_IF_ADD 1 /* bsscfg add */ #define WLC_E_IF_DEL 2 /* bsscfg delete */ @@ -361,6 +478,7 @@ typedef struct wl_event_data_if { #define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */ #define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */ #define WLC_E_IF_ROLE_IBSS 8 /* IBSS */ +#define WLC_E_IF_ROLE_NAN 9 /* NAN */ /* WLC_E_RSSI event data */ typedef struct wl_event_data_rssi { @@ -419,6 +537,9 @@ typedef BWL_PRE_PACKED_STRUCT struct ndis_link_parms { #define WLAN_TDLS_SET_WFD_DISABLED 14 #endif +/* WLC_E_RANGING_EVENT subtypes */ +#define WLC_E_RANGING_RESULTS 0 + /* GAS event data */ typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas { @@ -446,6 +567,9 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd { wl_sd_tlv_t tlv[1]; /* service discovery TLV */ } BWL_POST_PACKED_STRUCT wl_event_sd_t; +/* WLC_E_PKT_FILTER event sub-classification codes */ +#define WLC_E_PKT_FILTER_TIMEOUT 1 /* Matching packet not received in last timeout seconds */ + /* Note: proxd has a new API (ver 3.0) deprecates the following */ /* Reason codes for WLC_E_PROXD */ @@ -526,6 +650,16 @@ typedef struct wl_intfer_event { uint8 txfail_histo[WLINTFER_STATS_NSMPLS]; /* txfail histo */ } wl_intfer_event_t; +#define RRM_EVENT_VERSION 0 +typedef struct wl_rrm_event { + int16 version; + int16 len; + int16 cat; /* Category */ + int16 subevent; + char payload[1]; /* Measurement payload */ +} wl_rrm_event_t; + + /* WLC_E_PSTA_PRIMARY_INTF_IND event data */ typedef struct wl_psta_primary_intf_event { struct ether_addr prim_ea; /* primary intf ether addr */ @@ -543,249 +677,272 @@ typedef struct wl_dpsta_intf_event { /* ********** NAN protocol events/subevents ********** */ #define NAN_EVENT_BUFFER_SIZE 512 /* max size */ -/* nan application events to the host driver */ -typedef enum nan_app_events { - WL_NAN_EVENT_START = 1, /* NAN cluster started */ - WL_NAN_EVENT_JOIN = 2, /* Joined to a NAN cluster */ - WL_NAN_EVENT_ROLE = 3, /* Role or State changed */ - WL_NAN_EVENT_SCAN_COMPLETE = 4, - WL_NAN_EVENT_DISCOVERY_RESULT = 5, - WL_NAN_EVENT_REPLIED = 6, - WL_NAN_EVENT_TERMINATED = 7, /* the instance ID will be present in the ev data */ - WL_NAN_EVENT_RECEIVE = 8, - WL_NAN_EVENT_STATUS_CHG = 9, /* generated on any change in nan_mac status */ - WL_NAN_EVENT_MERGE = 10, /* Merged to a NAN cluster */ - WL_NAN_EVENT_STOP = 11, /* NAN stopped */ - WL_NAN_EVENT_P2P = 12, /* NAN P2P EVENT */ - WL_NAN_EVENT_WINDOW_BEGIN_P2P = 13, /* Event for begin of P2P further availability window */ - WL_NAN_EVENT_WINDOW_BEGIN_MESH = 14, - WL_NAN_EVENT_WINDOW_BEGIN_IBSS = 15, - WL_NAN_EVENT_WINDOW_BEGIN_RANGING = 16, - WL_NAN_EVENT_POST_DISC = 17, /* Event for post discovery data */ - WL_NAN_EVENT_INVALID /* delimiter for max value */ +/* NAN Events sent by firmware */ + +/* + * If you make changes to this enum, dont forget to update the mask (if need be). + */ +typedef enum wl_nan_events { + WL_NAN_EVENT_START = 1, /* NAN cluster started */ + WL_NAN_EVENT_JOIN = 2, /* To be deprecated */ + WL_NAN_EVENT_ROLE = 3, /* Role changed */ + WL_NAN_EVENT_SCAN_COMPLETE = 4, /* To be deprecated */ + WL_NAN_EVENT_DISCOVERY_RESULT = 5, /* Subscribe Received */ + WL_NAN_EVENT_REPLIED = 6, /* Publish Sent */ + WL_NAN_EVENT_TERMINATED = 7, /* sub / pub is terminated */ + WL_NAN_EVENT_RECEIVE = 8, /* Follow up Received */ + WL_NAN_EVENT_STATUS_CHG = 9, /* change in nan_mac status */ + WL_NAN_EVENT_MERGE = 10, /* Merged to a NAN cluster */ + WL_NAN_EVENT_STOP = 11, /* To be deprecated */ + WL_NAN_EVENT_P2P = 12, /* Unused */ + WL_NAN_EVENT_WINDOW_BEGIN_P2P = 13, /* Unused */ + WL_NAN_EVENT_WINDOW_BEGIN_MESH = 14, /* Unused */ + WL_NAN_EVENT_WINDOW_BEGIN_IBSS = 15, /* Unused */ + WL_NAN_EVENT_WINDOW_BEGIN_RANGING = 16, /* Unused */ + WL_NAN_EVENT_POST_DISC = 17, /* Event for post discovery data */ + WL_NAN_EVENT_DATA_IF_ADD = 18, /* Unused */ + WL_NAN_EVENT_DATA_PEER_ADD = 19, /* Event for peer add */ + /* nan 2.0 */ + /* Will be removed after source code is committed. */ + WL_NAN_EVENT_DATA_IND = 20, + WL_NAN_EVENT_PEER_DATAPATH_IND = 20, /* Incoming DP req */ + /* Will be removed after source code is committed. */ + WL_NAN_EVENT_DATA_CONF = 21, + WL_NAN_EVENT_DATAPATH_ESTB = 21, /* DP Established */ + WL_NAN_EVENT_SDF_RX = 22, /* SDF payload */ + WL_NAN_EVENT_DATAPATH_END = 23, /* DP Terminate recvd */ + /* Below event needs to be removed after source code is committed. */ + WL_NAN_EVENT_DATA_END = 23, + WL_NAN_EVENT_BCN_RX = 24, /* received beacon payload */ + WL_NAN_EVENT_PEER_DATAPATH_RESP = 25, /* Peer's DP response */ + WL_NAN_EVENT_PEER_DATAPATH_CONF = 26, /* Peer's DP confirm */ + WL_NAN_EVENT_RNG_REQ_IND = 27, /* Range Request */ + WL_NAN_EVENT_RNG_RPT_IND = 28, /* Range Report */ + WL_NAN_EVENT_RNG_TERM_IND = 29, /* Range Termination */ + WL_NAN_EVENT_PEER_DATAPATH_SEC_INST = 30, /* Peer's DP sec install */ + WL_NAN_EVENT_TXS = 31, /* for tx status of follow-up and SDFs */ + WL_NAN_EVENT_INVALID /* delimiter for max value */ } nan_app_events_e; +#define NAN_EV_MASK(ev) \ + (1 << (ev - 1)) #define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0) /* ******************* end of NAN section *************** */ -#define MSCH_EVENTS_BUFFER_SIZE 2048 +/* WLC_E_ULP event data */ +#define WL_ULP_EVENT_VERSION 1 +#define WL_ULP_DISABLE_CONSOLE 1 /* Disable console message on ULP entry */ +#define WL_ULP_UCODE_DOWNLOAD 2 /* Download ULP ucode file */ -/* Reason codes for WLC_E_MSCH */ -#define WLC_E_MSCH_START 0 /* start event check */ -#define WLC_E_MSCH_EXIT 1 /* exit event check */ -#define WLC_E_MSCH_REQ 2 /* request event */ -#define WLC_E_MSCH_CALLBACK 3 /* call back event */ -#define WLC_E_MSCH_MESSAGE 4 /* message event */ -#define WLC_E_MSCH_PROFILE_START 5 -#define WLC_E_MSCH_PROFILE_END 6 -#define WLC_E_MSCH_REQ_HANDLE 7 -#define WLC_E_MSCH_REQ_ENTITY 8 -#define WLC_E_MSCH_CHAN_CTXT 9 -#define WLC_E_MSCH_TIMESLOT 10 -#define WLC_E_MSCH_REQ_TIMING 11 +typedef struct wl_ulp_event { + uint16 version; + uint16 ulp_dongle_action; +} wl_ulp_event_t; -typedef BWL_PRE_PACKED_STRUCT struct msch_event_data { - uint32 time_lo; /* Request time */ - uint32 time_hi; -} BWL_POST_PACKED_STRUCT msch_event_data_t; +/* TCP keepalive event data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_tko { + uint8 index; /* TCP connection index, 0 to max-1 */ + uint8 pad[3]; /* 4-byte struct alignment */ +} BWL_POST_PACKED_STRUCT wl_event_tko_t; -typedef BWL_PRE_PACKED_STRUCT struct msch_start_event_data { - uint32 time_lo; /* Request time */ - uint32 time_hi; - uint32 status; -} BWL_POST_PACKED_STRUCT msch_start_event_data_t; +typedef struct { + uint8 radar_type; /* one of RADAR_TYPE_XXX */ + uint16 min_pw; /* minimum pulse-width (usec * 20) */ + uint16 max_pw; /* maximum pulse-width (usec * 20) */ + uint16 min_pri; /* minimum pulse repetition interval (usec) */ + uint16 max_pri; /* maximum pulse repetition interval (usec) */ + uint16 subband; /* subband/frequency */ +} radar_detected_event_info_t; +typedef struct wl_event_radar_detect_data { -typedef BWL_PRE_PACKED_STRUCT struct msch_message_event_data { - uint32 time_lo; /* Request time */ - uint32 time_hi; - char message[1]; /* message */ -} BWL_POST_PACKED_STRUCT msch_message_event_data_t; + uint32 version; + uint16 current_chanspec; /* chanspec on which the radar is recieved */ + uint16 target_chanspec; /* Target chanspec after detection of radar on current_chanspec */ + radar_detected_event_info_t radar_info[2]; +} wl_event_radar_detect_data_t; -typedef BWL_PRE_PACKED_STRUCT struct msch_req_param_event_data { - uint16 flags; /* Describe various request properties */ - uint8 req_type; /* Describe start and end time flexiblilty */ - uint8 priority; /* Define the request priority */ - uint32 start_time_l; /* Requested start time offset in us unit */ - uint32 start_time_h; - uint32 duration; /* Requested duration in us unit */ - uint32 interval; /* Requested periodic interval in us unit, - * 0 means non-periodic - */ - union { - uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */ - struct { - uint32 min_dur; /* min duration for traffic, maps to home_time */ - uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time*/ - uint32 lo_prio_time_l; - uint32 lo_prio_time_h; - uint32 lo_prio_interval; /* repeated low priority interval */ - uint32 hi_prio_time_l; - uint32 hi_prio_time_h; - uint32 hi_prio_interval; /* repeated high priority interval */ - } bf; - } flex; -} BWL_POST_PACKED_STRUCT msch_req_param_event_data_t; -typedef BWL_PRE_PACKED_STRUCT struct msch_timeslot_event_data { - uint32 p_timeslot; - uint32 p_prev; - uint32 p_next; - uint32 timeslot_id; - uint32 pre_start_time_l; - uint32 pre_start_time_h; - uint32 end_time_l; - uint32 end_time_h; - uint32 sch_dur_l; - uint32 sch_dur_h; - uint32 p_chan_ctxt; - uint32 fire_time_l; - uint32 fire_time_h; - uint32 state; -} BWL_POST_PACKED_STRUCT msch_timeslot_event_data_t; +#define WL_EVENT_MODESW_VER_1 1 +#define WL_EVENT_MODESW_VER_CURRENT WL_EVENT_MODESW_VER_1 -typedef BWL_PRE_PACKED_STRUCT struct msch_req_timing_event_data { - uint32 p_req_timing; - uint32 p_prev; - uint32 p_next; - uint16 flags; - uint16 timeslot_ptr; - uint32 fire_time_l; - uint32 fire_time_h; - uint32 pre_start_time_l; - uint32 pre_start_time_h; - uint32 start_time_l; - uint32 start_time_h; - uint32 end_time_l; - uint32 end_time_h; - uint32 p_timeslot; -} BWL_POST_PACKED_STRUCT msch_req_timing_event_data_t; +#define WL_E_MODESW_FLAG_MASK_DEVICE 0x01u /* mask of device: belongs to local or peer */ +#define WL_E_MODESW_FLAG_MASK_FROM 0x02u /* mask of origin: firmware or user */ +#define WL_E_MODESW_FLAG_MASK_STATE 0x0Cu /* mask of state: modesw progress state */ -typedef BWL_PRE_PACKED_STRUCT struct msch_chan_ctxt_event_data { - uint32 p_chan_ctxt; - uint32 p_prev; - uint32 p_next; - uint16 chanspec; - uint16 bf_sch_pending; - uint32 bf_link_prev; - uint32 bf_link_next; - uint32 onchan_time_l; - uint32 onchan_time_h; - uint32 actual_onchan_dur_l; - uint32 actual_onchan_dur_h; - uint32 pend_onchan_dur_l; - uint32 pend_onchan_dur_h; - uint16 req_entity_list_cnt; - uint16 req_entity_list_ptr; - uint16 bf_entity_list_cnt; - uint16 bf_entity_list_ptr; -} BWL_POST_PACKED_STRUCT msch_chan_ctxt_event_data_t; +#define WL_E_MODESW_FLAG_DEVICE_LOCAL 0x00u /* flag - device: info is about self/local */ +#define WL_E_MODESW_FLAG_DEVICE_PEER 0x01u /* flag - device: info is about peer */ -typedef BWL_PRE_PACKED_STRUCT struct msch_prio_event_data { - uint32 is_lo; - uint32 time_l; - uint32 time_h; - uint32 p_entity; -} BWL_POST_PACKED_STRUCT msch_prio_event_data_t; +#define WL_E_MODESW_FLAG_FROM_FIRMWARE 0x00u /* flag - from: request is from firmware */ +#define WL_E_MODESW_FLAG_FROM_USER 0x02u /* flag - from: request is from user/iov */ -typedef BWL_PRE_PACKED_STRUCT struct msch_req_entity_event_data { - uint32 p_req_entity; - uint32 req_hdl_link_prev; - uint32 req_hdl_link_next; - uint32 chan_ctxt_link_prev; - uint32 chan_ctxt_link_next; - uint32 rt_specific_link_prev; - uint32 rt_specific_link_next; - uint16 chanspec; - uint16 req_param_ptr; - uint16 cur_slot_ptr; - uint16 pend_slot_ptr; - msch_prio_event_data_t lo_event; - msch_prio_event_data_t hi_event; - uint32 ts_change_dur_flex; - uint16 ts_change_flags; - uint16 chan_ctxt_ptr; - uint32 p_chan_ctxt; - uint32 p_req_hdl; - uint32 hi_cnt_l; - uint32 hi_cnt_h; - uint32 bf_last_serv_time_l; - uint32 bf_last_serv_time_h; -} BWL_POST_PACKED_STRUCT msch_req_entity_event_data_t; +#define WL_E_MODESW_FLAG_STATE_REQUESTED 0x00u /* flag - state: mode switch request */ +#define WL_E_MODESW_FLAG_STATE_INITIATED 0x04u /* flag - state: switch initiated */ +#define WL_E_MODESW_FLAG_STATE_COMPLETE 0x08u /* flag - state: switch completed/success */ +#define WL_E_MODESW_FLAG_STATE_FAILURE 0x0Cu /* flag - state: failed to switch */ -typedef BWL_PRE_PACKED_STRUCT struct msch_req_handle_event_data { - uint32 p_req_handle; - uint32 p_prev; - uint32 p_next; - uint32 cb_func; - uint32 cb_ctxt; - uint16 req_param_ptr; - uint16 req_entity_list_cnt; - uint16 req_entity_list_ptr; - uint16 chan_cnt; - uint16 schd_chan_cnt; - uint16 chanspec_list_cnt; - uint16 chanspec_list_ptr; - uint16 pad; -} BWL_POST_PACKED_STRUCT msch_req_handle_event_data_t; +/* Get sizeof *X including variable data's length where X is pointer to wl_event_mode_switch_t */ +#define WL_E_MODESW_SIZE(X) (sizeof(*(X)) + (X)->length) -typedef BWL_PRE_PACKED_STRUCT struct msch_profile_event_data { - uint32 time_lo; /* Request time */ - uint32 time_hi; - uint32 free_req_hdl_list; - uint32 free_req_entity_list; - uint32 free_chan_ctxt_list; - uint32 free_timeslot_list; - uint32 free_chanspec_list; - uint16 cur_msch_timeslot_ptr; - uint16 pad; - uint32 p_cur_msch_timeslot; - uint32 cur_armed_timeslot; - uint32 cur_armed_req_timing; - uint32 ts_id; - uint32 service_interval; - uint32 max_lo_prio_interval; - uint16 flex_list_cnt; - uint16 msch_chanspec_alloc_cnt; - uint16 msch_req_entity_alloc_cnt; - uint16 msch_req_hdl_alloc_cnt; - uint16 msch_chan_ctxt_alloc_cnt; - uint16 msch_timeslot_alloc_cnt; - uint16 msch_req_hdl_list_cnt; - uint16 msch_req_hdl_list_ptr; - uint16 msch_chan_ctxt_list_cnt; - uint16 msch_chan_ctxt_list_ptr; - uint16 msch_timeslot_list_cnt; - uint16 msch_timeslot_list_ptr; - uint16 msch_req_timing_list_cnt; - uint16 msch_req_timing_list_ptr; - uint16 msch_start_flex_list_cnt; - uint16 msch_start_flex_list_ptr; - uint16 msch_both_flex_list_cnt; - uint16 msch_both_flex_list_ptr; -} BWL_POST_PACKED_STRUCT msch_profile_event_data_t; +/* Get variable data's length where X is pointer to wl_event_mode_switch_t */ +#define WL_E_MODESW_DATA_SIZE(X) (((X)->length > sizeof(*(X))) ? ((X)->length - sizeof(*(X))) : 0) -typedef BWL_PRE_PACKED_STRUCT struct msch_req_event_data { - uint32 time_lo; /* Request time */ - uint32 time_hi; - uint16 chanspec_cnt; - uint16 chanspec_ptr; - uint16 req_param_ptr; - uint16 pad; -} BWL_POST_PACKED_STRUCT msch_req_event_data_t; +#define WL_E_MODESW_REASON_UNKNOWN 0u /* reason: UNKNOWN */ +#define WL_E_MODESW_REASON_ACSD 1u /* reason: ACSD (based on events from FW */ +#define WL_E_MODESW_REASON_OBSS_DBS 2u /* reason: OBSS DBS (eg. on interference) */ +#define WL_E_MODESW_REASON_DFS 3u /* reason: DFS (eg. on subband radar) */ +#define WL_E_MODESW_REASON_DYN160 4u /* reason: DYN160 (160/2x2 - 80/4x4) */ -typedef BWL_PRE_PACKED_STRUCT struct msch_callback_event_data { - uint32 time_lo; /* Request time */ - uint32 time_hi; - uint16 type; /* callback type */ - uint16 chanspec; /* actual chanspec, may different with requested one */ - uint32 pre_start_time_l; /* time slot prestart time low 32bit */ - uint32 pre_start_time_h; /* time slot prestart time high 32bit */ - uint32 end_time_l; /* time slot end time low 32 bit */ - uint32 end_time_h; /* time slot end time high 32 bit */ - uint32 timeslot_id; /* unique time slot id */ -} BWL_POST_PACKED_STRUCT msch_callback_event_data_t; +/* event structure for WLC_E_MODE_SWITCH */ +typedef struct { + uint16 version; + uint16 length; /* size including 'data' field */ + uint16 opmode_from; + uint16 opmode_to; + uint32 flags; /* bit 0: peer(/local==0); + * bit 1: user(/firmware==0); + * bits 3,2: 00==requested, 01==initiated, + * 10==complete, 11==failure; + * rest: reserved + */ + uint16 reason; /* value 0: unknown, 1: ACSD, 2: OBSS_DBS, + * 3: DFS, 4: DYN160, rest: reserved + */ + uint16 data_offset; /* offset to 'data' from beginning of this struct. + * fields may be added between data_offset and data + */ + /* ADD NEW FIELDS HERE */ + uint8 data[]; /* reason specific data; could be empty */ +} wl_event_mode_switch_t; + +/* when reason in WLC_E_MODE_SWITCH is DYN160, data will carry the following structure */ +typedef struct { + uint16 trigger; /* value 0: MU to SU, 1: SU to MU, 2: metric_dyn160, 3:re-/assoc, + * 4: disassoc, 5: rssi, 6: traffic, 7: interference, + * 8: chanim_stats + */ + struct ether_addr sta_addr; /* causal STA's MAC address when known */ + uint16 metric_160_80; /* latest dyn160 metric */ + uint8 nss; /* NSS of the STA */ + uint8 bw; /* BW of the STA */ + int8 rssi; /* RSSI of the STA */ + uint8 traffic; /* internal metric of traffic */ +} wl_event_mode_switch_dyn160; + +#define WL_EVENT_FBT_VER_1 1 + +#define WL_E_FBT_TYPE_FBT_OTD_AUTH 1 +#define WL_E_FBT_TYPE_FBT_OTA_AUTH 2 + +/* event structure for WLC_E_FBT */ +typedef struct { + uint16 version; + uint16 length; /* size including 'data' field */ + uint16 type; /* value 0: unknown, 1: FBT OTD Auth Req */ + uint16 data_offset; /* offset to 'data' from beginning of this struct. + * fields may be added between data_offset and data + */ + /* ADD NEW FIELDS HERE */ + uint8 data[]; /* type specific data; could be empty */ +} wl_event_fbt_t; + +/* TWT Setup Completion is designed to notify the user of TWT Setup process + * status. When 'status' field is value of BCME_OK, the user must check the + * 'setup_cmd' field value in 'wl_twt_sdesc_t' structure that at the end of + * the event data to see the response from the TWT Responding STA; when + * 'status' field is value of BCME_ERROR or non BCME_OK, user must not use + * anything from 'wl_twt_sdesc_t' structure as it is the TWT Requesting STA's + * own TWT parameter. + */ + +#define WL_TWT_SETUP_CPLT_VER 0 + +/* TWT Setup Completion event data */ +typedef struct wl_twt_setup_cplt { + uint16 version; + uint16 length; /* the byte count of fields from 'dialog' onwards */ + uint8 dialog; /* the dialog token user supplied to the TWT setup API */ + uint8 pad[3]; + int32 status; + /* wl_twt_sdesc_t desc; - defined in wlioctl.h */ +} wl_twt_setup_cplt_t; + +#define WL_INVALID_IE_EVENT_VERSION 0 + +/* Invalid IE Event data */ +typedef struct wl_invalid_ie_event { + uint16 version; + uint16 len; /* Length of the invalid IE copy */ + uint16 type; /* Type/subtype of the frame which contains the invalid IE */ + uint16 error; /* error code of the wrong IE, defined in ie_error_code_t */ + uint8 ie[]; /* Variable length buffer for the invalid IE copy */ +} wl_invalid_ie_event_t; + +/* Fixed header portion of Invalid IE Event */ +typedef struct wl_invalid_ie_event_hdr { + uint16 version; + uint16 len; /* Length of the invalid IE copy */ + uint16 type; /* Type/subtype of the frame which contains the invalid IE */ + uint16 error; /* error code of the wrong IE, defined in ie_error_code_t */ + /* var length IE data follows */ +} wl_invalid_ie_event_hdr_t; + +typedef enum ie_error_code { + IE_ERROR_OUT_OF_RANGE = 0x01 +} ie_error_code_t; /* This marks the end of a packed structure section. */ #include +/* reason of channel switch */ +typedef enum { + CHANSW_DFS = 10, /* channel switch due to DFS module */ + CHANSW_HOMECH_REQ = 14, /* channel switch due to HOME Channel Request */ + CHANSW_STA = 15, /* channel switch due to STA */ + CHANSW_SOFTAP = 16, /* channel switch due to SodtAP */ + CHANSW_AIBSS = 17, /* channel switch due to AIBSS */ + CHANSW_NAN = 18, /* channel switch due to NAN */ + CHANSW_NAN_DISC = 19, /* channel switch due to NAN Disc */ + CHANSW_NAN_SCHED = 20, /* channel switch due to NAN Sched */ + CHANSW_AWDL_AW = 21, /* channel switch due to AWDL aw */ + CHANSW_AWDL_SYNC = 22, /* channel switch due to AWDL sync */ + CHANSW_AWDL_CAL = 23, /* channel switch due to AWDL Cal */ + CHANSW_AWDL_PSF = 24, /* channel switch due to AWDL PSF */ + CHANSW_AWDL_OOB_AF = 25, /* channel switch due to AWDL OOB action frame */ + CHANSW_TDLS = 26, /* channel switch due to TDLS */ + CHANSW_PROXD = 27, /* channel switch due to PROXD */ + CHANSW_MAX_NUMBER = 28 /* max channel switch reason */ +} wl_chansw_reason_t; + +#define CHANSW_REASON(reason) (1 << reason) + +#define EVENT_AGGR_DATA_HDR_LEN 8 + +typedef struct event_aggr_data { + uint16 num_events; /* No of events aggregated */ + uint16 len; /* length of the aggregated events, excludes padding */ + uint8 pad[4]; /* Padding to make aggr event packet header aligned + * on 64-bit boundary, for a 64-bit host system. + */ + uint8 data[]; /* Aggregate buffer containing Events */ +} event_aggr_data_t; + + +/* WLC_E_TVPM_MITIGATION event structure version */ +#define WL_TVPM_MITIGATION_VERSION 1 + +/* TVPM mitigation on/off status bits */ +#define WL_TVPM_MITIGATION_TXDC 0x1 +#define WL_TVPM_MITIGATION_TXPOWER 0x2 +#define WL_TVPM_MITIGATION_TXCHAINS 0x4 + +/* Event structure for WLC_E_TVPM_MITIGATION */ +typedef struct wl_event_tvpm_mitigation { + uint16 version; /* structure version */ + uint16 length; /* length of this structure */ + uint32 timestamp_ms; /* millisecond timestamp */ + uint8 slice; /* slice number */ + uint8 pad; + uint16 on_off; /* mitigation status bits */ +} wl_event_tvpm_mitigation_t; + #endif /* _BCMEVENT_H_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmip.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmip.h old mode 100755 new mode 100644 similarity index 97% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmip.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmip.h index eaa679c38948..e5bb0ac66c59 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmip.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmip.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to IP Protocol * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmip.h 518342 2014-12-01 23:21:41Z $ + * $Id: bcmip.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _bcmip_h_ @@ -52,7 +52,8 @@ #define IP_PROT_IGMP 0x2 /* IGMP protocol */ #define IP_PROT_TCP 0x6 /* TCP protocol */ #define IP_PROT_UDP 0x11 /* UDP protocol type */ -#define IP_PROT_ICMP6 0x3a /* ICMPv6 protocol type */ +#define IP_PROT_GRE 0x2f /* GRE protocol type */ +#define IP_PROT_ICMP6 0x3a /* ICMPv6 protocol type */ /* IPV4 field offsets */ #define IPV4_VER_HL_OFFSET 0 /* version and ihl byte offset */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmipv6.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmipv6.h old mode 100755 new mode 100644 similarity index 97% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmipv6.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmipv6.h index fbab037b2f32..84e2b693b70a --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmipv6.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmipv6.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to Neighbor Discovery Protocol * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmipv6.h 518342 2014-12-01 23:21:41Z $ + * $Id: bcmipv6.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _bcmipv6_h_ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmmsgbuf.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmmsgbuf.h index ab1375ea854d..08d66930db74 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmmsgbuf.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmmsgbuf.h @@ -4,7 +4,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,12 +27,12 @@ * * <> * - * $Id: bcmmsgbuf.h 541060 2015-03-13 23:28:01Z $ + * $Id: bcmmsgbuf.h 676811 2016-12-24 20:48:46Z $ */ #ifndef _bcmmsgbuf_h_ #define _bcmmsgbuf_h_ -#include +#include #include #include @@ -47,9 +47,16 @@ #define H2DRING_TXPOST_ITEMSIZE 48 #define H2DRING_RXPOST_ITEMSIZE 32 #define H2DRING_CTRL_SUB_ITEMSIZE 40 -#define D2HRING_TXCMPLT_ITEMSIZE 16 -#define D2HRING_RXCMPLT_ITEMSIZE 32 + +#define D2HRING_TXCMPLT_ITEMSIZE 24 +#define D2HRING_RXCMPLT_ITEMSIZE 40 + +#define D2HRING_TXCMPLT_ITEMSIZE_PREREV7 16 +#define D2HRING_RXCMPLT_ITEMSIZE_PREREV7 32 + #define D2HRING_CTRL_CMPLT_ITEMSIZE 24 +#define H2DRING_INFO_BUFPOST_ITEMSIZE H2DRING_CTRL_SUB_ITEMSIZE +#define D2HRING_INFO_BUFCMPLT_ITEMSIZE D2HRING_CTRL_CMPLT_ITEMSIZE #define H2DRING_TXPOST_MAX_ITEM 512 #define H2DRING_RXPOST_MAX_ITEM 512 @@ -57,6 +64,9 @@ #define D2HRING_TXCMPLT_MAX_ITEM 1024 #define D2HRING_RXCMPLT_MAX_ITEM 512 +#define H2DRING_DYNAMIC_INFO_MAX_ITEM 32 +#define D2HRING_DYNAMIC_INFO_MAX_ITEM 32 + #define D2HRING_CTRL_CMPLT_MAX_ITEM 64 enum { @@ -73,6 +83,7 @@ enum { }; #define MESSAGE_PAYLOAD(a) (a & MSG_TYPE_INTERNAL_USE_START) ? TRUE : FALSE +#define PCIEDEV_FIRMWARE_TSINFO 0x1 #ifdef PCIE_API_REV1 @@ -135,12 +146,16 @@ typedef struct cmn_msg_hdr { /** message type */ typedef enum bcmpcie_msgtype { - MSG_TYPE_GEN_STATUS = 0x1, + MSG_TYPE_GEN_STATUS = 0x1, MSG_TYPE_RING_STATUS = 0x2, MSG_TYPE_FLOW_RING_CREATE = 0x3, MSG_TYPE_FLOW_RING_CREATE_CMPLT = 0x4, + /* Enum value as copied from BISON 7.15: new generic message */ + MSG_TYPE_RING_CREATE_CMPLT = 0x4, MSG_TYPE_FLOW_RING_DELETE = 0x5, MSG_TYPE_FLOW_RING_DELETE_CMPLT = 0x6, + /* Enum value as copied from BISON 7.15: new generic message */ + MSG_TYPE_RING_DELETE_CMPLT = 0x6, MSG_TYPE_FLOW_RING_FLUSH = 0x7, MSG_TYPE_FLOW_RING_FLUSH_CMPLT = 0x8, MSG_TYPE_IOCTLPTR_REQ = 0x9, @@ -171,7 +186,10 @@ typedef enum bcmpcie_msgtype { MSG_TYPE_D2H_RING_CONFIG_CMPLT = 0x22, MSG_TYPE_H2D_MAILBOX_DATA = 0x23, MSG_TYPE_D2H_MAILBOX_DATA = 0x24, - + MSG_TYPE_TIMSTAMP_BUFPOST = 0x25, + MSG_TYPE_HOSTTIMSTAMP = 0x26, + MSG_TYPE_HOSTTIMSTAMP_CMPLT = 0x27, + MSG_TYPE_FIRMWARE_TIMESTAMP = 0x28, MSG_TYPE_API_MAX_RSVD = 0x3F } bcmpcie_msg_type_t; @@ -183,7 +201,9 @@ typedef enum bcmpcie_msgtype_int { MSG_TYPE_HOST_FETCH = 0x44, MSG_TYPE_LPBK_DMAXFER_PYLD = 0x45, MSG_TYPE_TXMETADATA_PYLD = 0x46, - MSG_TYPE_INDX_UPDATE = 0x47 + MSG_TYPE_INDX_UPDATE = 0x47, + MSG_TYPE_INFO_PYLD = 0x48, + MSG_TYPE_TS_EVENT_PYLD = 0x49 } bcmpcie_msgtype_int_t; typedef enum bcmpcie_msgtype_u { @@ -211,6 +231,40 @@ typedef struct bcmpcie_soft_doorbell { uint16 msecs; /* interrupt coalescing: timeout in millisecs */ } bcmpcie_soft_doorbell_t; +/** + * D2H interrupt using MSI instead of INTX + * Host configures MSI vector offset for each D2H interrupt + * + * D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL + */ +typedef enum bcmpcie_msi_intr_idx { + MSI_INTR_IDX_CTRL_CMPL_RING, + MSI_INTR_IDX_TXP_CMPL_RING, + MSI_INTR_IDX_RXP_CMPL_RING, + MSI_INTR_IDX_MAILBOX, + MSI_INTR_IDX_MAX +} bcmpcie_msi_intr_idx_t; + +typedef enum bcmpcie_msi_offset_type { + BCMPCIE_D2H_MSI_OFFSET_MB0 = 2, + BCMPCIE_D2H_MSI_OFFSET_MB1, + BCMPCIE_D2H_MSI_OFFSET_DB0, + BCMPCIE_D2H_MSI_OFFSET_DB1, + BCMPCIE_D2H_MSI_OFFSET_MAX +} bcmpcie_msi_offset_type_t; + +typedef struct bcmpcie_msi_offset { + uint16 intr_idx; /* interrupt index */ + uint16 msi_offset; /* msi vector offset */ +} bcmpcie_msi_offset_t; + +typedef struct bcmpcie_msi_offset_config { + uint32 len; + bcmpcie_msi_offset_t bcmpcie_msi_offset[MSI_INTR_IDX_MAX]; +} bcmpcie_msi_offset_config_t; + +#define BCMPCIE_D2H_MSI_OFFSET_DEFAULT BCMPCIE_D2H_MSI_OFFSET_DB1 + /* if_id */ #define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT 5 @@ -226,7 +280,7 @@ typedef struct bcmpcie_soft_doorbell { #define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX 0x1 #define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX_INTR 0x2 #define BCMPCIE_CMNHDR_FLAGS_PHASE_BIT 0x80 - +#define BCMPCIE_CMNHDR_PHASE_BIT_INIT 0x80 /* IOCTL request message */ typedef struct ioctl_req_msg { @@ -261,6 +315,35 @@ typedef struct ioctl_resp_evt_buf_post_msg { uint32 rsvd[4]; } ioctl_resp_evt_buf_post_msg_t; +/* buffer post messages for device to use to return dbg buffers */ +typedef ioctl_resp_evt_buf_post_msg_t info_buf_post_msg_t; + + +/* An infobuf host buffer starts with a 32 bit (LE) version. */ +#define PCIE_INFOBUF_V1 1 +/* Infobuf v1 type MSGTRACE's data is exactly the same as the MSGTRACE data that + * is wrapped previously/also in a WLC_E_TRACE event. See structure + * msgrace_hdr_t in msgtrace.h. +*/ +#define PCIE_INFOBUF_V1_TYPE_MSGTRACE 1 + +/* Infobuf v1 type LOGTRACE data is exactly the same as the LOGTRACE data that + * is wrapped previously/also in a WLC_E_TRACE event. See structure + * msgrace_hdr_t in msgtrace.h. (The only difference between a MSGTRACE + * and a LOGTRACE is the "trace type" field.) +*/ +#define PCIE_INFOBUF_V1_TYPE_LOGTRACE 2 + +/* An infobuf version 1 host buffer has a single TLV. The information on the + * version 1 types follow this structure definition. (int's LE) +*/ +typedef struct info_buf_payload_hdr_s { + uint16 type; + uint16 length; +} info_buf_payload_hdr_t; + +#define PCIE_DMA_XFER_FLG_D11_LPBK_MASK 0x00000001 +#define PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT 0 typedef struct pcie_dma_xfer_params { /** common message header */ @@ -278,7 +361,8 @@ typedef struct pcie_dma_xfer_params { uint32 srcdelay; /** delay before doing the dest txfer */ uint32 destdelay; - uint32 rsvd; + uint8 rsvd[3]; + uint8 flags; } pcie_dma_xfer_params_t; /** Complete msgbuf hdr for flow ring update from host to dongle */ @@ -290,7 +374,10 @@ typedef struct tx_flowring_create_request { uint8 if_flags; uint16 flow_ring_id; uint8 tc; - uint8 priority; + /* priority_ifrmmask is to define core mask in ifrm mode. + * currently it is not used for priority. so uses solely for ifrm mask + */ + uint8 priority_ifrmmask; uint16 int_vector; uint16 max_items; uint16 len_item; @@ -327,9 +414,74 @@ typedef struct ring_config_req { uint32 data[6]; /** D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL */ bcmpcie_soft_doorbell_t soft_doorbell; + /** D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL */ + bcmpcie_msi_offset_config_t msi_offset; }; } ring_config_req_t; +/* data structure to use to create on the fly d2h rings */ +typedef struct d2h_ring_create_req { + cmn_msg_hdr_t msg; + uint16 ring_id; + uint16 ring_type; + uint32 flags; + bcm_addr64_t ring_ptr; + uint16 max_items; + uint16 len_item; + uint32 rsvd[3]; +} d2h_ring_create_req_t; + +/* data structure to use to create on the fly h2d rings */ +#define MAX_COMPLETION_RING_IDS_ASSOCIATED 4 +typedef struct h2d_ring_create_req { + cmn_msg_hdr_t msg; + uint16 ring_id; + uint8 ring_type; + uint8 n_completion_ids; + uint32 flags; + bcm_addr64_t ring_ptr; + uint16 max_items; + uint16 len_item; + uint16 completion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED]; + uint32 rsvd; +} h2d_ring_create_req_t; + +typedef struct d2h_ring_config_req { + cmn_msg_hdr_t msg; + uint16 d2h_ring_config_subtype; + uint16 d2h_ring_id; + uint32 d2h_ring_config_data[4]; + uint32 rsvd[3]; +} d2h_ring_config_req_t; + +typedef struct h2d_ring_config_req { + cmn_msg_hdr_t msg; + uint16 h2d_ring_config_subtype; + uint16 h2d_ring_id; + uint32 h2d_ring_config_data; + uint32 rsvd[6]; +} h2d_ring_config_req_t; + +typedef struct h2d_mailbox_data { + cmn_msg_hdr_t msg; + uint32 mail_box_data; + uint32 rsvd[7]; +} h2d_mailbox_data_t; +typedef struct host_timestamp_msg { + cmn_msg_hdr_t msg; + uint16 xt_id; /* transaction ID */ + uint16 input_data_len; /* data len at the host_buf_addr, data in TLVs */ + uint16 seqnum; /* number of times host captured the timestamp */ + uint16 rsvd; + /* always align on 8 byte boundary */ + bcm_addr64_t host_buf_addr; + /* rsvd */ + uint32 rsvd1[4]; +} host_timestamp_msg_t; + +/* buffer post message for timestamp events MSG_TYPE_TIMSTAMP_BUFPOST */ +typedef ioctl_resp_evt_buf_post_msg_t ts_buf_post_msg_t; + typedef union ctrl_submit_item { ioctl_req_msg_t ioctl_req; ioctl_resp_evt_buf_post_msg_t resp_buf_post; @@ -338,20 +490,65 @@ typedef union ctrl_submit_item { tx_flowring_delete_request_t flow_delete; tx_flowring_flush_request_t flow_flush; ring_config_req_t ring_config_req; + d2h_ring_create_req_t d2h_create; + h2d_ring_create_req_t h2d_create; + d2h_ring_config_req_t d2h_config; + h2d_ring_config_req_t h2d_config; + h2d_mailbox_data_t h2d_mailbox_data; + host_timestamp_msg_t host_ts; + ts_buf_post_msg_t ts_buf_post; unsigned char check[H2DRING_CTRL_SUB_ITEMSIZE]; } ctrl_submit_item_t; +typedef struct info_ring_submit_item { + info_buf_post_msg_t info_buf_post; + unsigned char check[H2DRING_INFO_BUFPOST_ITEMSIZE]; +} info_sumbit_item_t; + /** Control Completion messages (20 bytes) */ typedef struct compl_msg_hdr { /** status for the completion */ int16 status; /** submisison flow ring id which generated this status */ - uint16 flow_ring_id; + union { + uint16 ring_id; + uint16 flow_ring_id; + }; } compl_msg_hdr_t; /** XOR checksum or a magic number to audit DMA done */ typedef uint32 dma_done_t; +#define MAX_CLKSRC_ID 0xF + +typedef struct ts_timestamp_srcid { + union { + uint32 ts_low; /* time stamp low 32 bits */ + uint32 reserved; /* If timestamp not used */ + }; + union { + uint32 ts_high; /* time stamp high 28 bits */ + union { + uint32 ts_high_ext :28; /* time stamp high 28 bits */ + uint32 clk_id_ext :3; /* clock ID source */ + uint32 phase :1; /* Phase bit */ + dma_done_t marker_ext; + }; + }; +} ts_timestamp_srcid_t; + +typedef ts_timestamp_srcid_t ipc_timestamp_t; + +typedef struct ts_timestamp { + uint32 low; + uint32 high; +} ts_timestamp_t; + +typedef ts_timestamp_t tick_count_64_t; +typedef ts_timestamp_t ts_timestamp_ns_64_t; +typedef ts_timestamp_t ts_correction_m_t; +typedef ts_timestamp_t ts_correction_b_t; + /* completion header status codes */ #define BCMPCIE_SUCCESS 0 #define BCMPCIE_NOTFOUND 1 @@ -365,6 +562,11 @@ typedef uint32 dma_done_t; #define BCMPCIE_NO_IOCTLRESP_BUF 9 #define BCMPCIE_MAX_IOCTLRESP_BUF 10 #define BCMPCIE_MAX_EVENT_BUF 11 +#define BCMPCIE_BAD_PHASE 12 +#define BCMPCIE_INVALID_CPL_RINGID 13 +#define BCMPCIE_RING_TYPE_INVALID 14 +#define BCMPCIE_NO_TS_EVENT_BUF 15 +#define BCMPCIE_MAX_TS_EVENT_BUF 16 /** IOCTL completion response */ typedef struct ioctl_compl_resp_msg { @@ -446,13 +648,17 @@ typedef struct pcie_ring_status { dma_done_t marker; } pcie_ring_status_t; -typedef struct tx_flowring_create_response { - cmn_msg_hdr_t msg; +typedef struct ring_create_response { + cmn_msg_hdr_t cmn_hdr; compl_msg_hdr_t cmplt; uint32 rsvd[2]; /** XOR checksum or a magic number to audit DMA done */ dma_done_t marker; -} tx_flowring_create_response_t; +} ring_create_response_t; + +typedef ring_create_response_t tx_flowring_create_response_t; +typedef ring_create_response_t h2d_ring_create_response_t; +typedef ring_create_response_t d2h_ring_create_response_t; typedef struct tx_flowring_delete_response { cmn_msg_hdr_t msg; @@ -491,6 +697,57 @@ typedef struct ring_config_resp { dma_done_t marker; } ring_config_resp_t; +typedef struct d2h_mailbox_data { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 d2h_mailbox_data; + uint32 rsvd[1]; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} d2h_mailbox_data_t; + +/* dbg buf completion msg: send from device to host */ +typedef struct info_buf_resp { + /* common message header */ + cmn_msg_hdr_t cmn_hdr; + /* completion message header */ + compl_msg_hdr_t compl_hdr; + /* event data len valid with the event buffer */ + uint16 info_data_len; + /* sequence number */ + uint16 seqnum; + /* rsvd */ + uint32 rsvd; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} info_buf_resp_t; + +typedef struct info_ring_cpl_item { + info_buf_resp_t info_buf_post; + unsigned char check[D2HRING_INFO_BUFCMPLT_ITEMSIZE]; +} info_cpl_item_t; + +typedef struct host_timestamp_msg_cpl { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint16 xt_id; /* transaction ID */ + uint16 rsvd; + uint32 rsvd1; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} host_timestamp_msg_cpl_t; + +typedef struct fw_timestamp_event_msg { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + /* fw captures time stamp info and passed that to host in TLVs */ + uint16 buf_len; /* length of the time stamp data copied in host buf */ + uint16 seqnum; /* number of times fw captured time stamp */ + uint32 rsvd; + /* XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} fw_timestamp_event_msg_t; + typedef union ctrl_completion_item { ioctl_comp_resp_msg_t ioctl_resp; wlevent_req_msg_t event; @@ -503,7 +760,13 @@ typedef union ctrl_completion_item { tx_flowring_flush_response_t txfl_flush_resp; ctrl_compl_msg_t ctrl_compl; ring_config_resp_t ring_config_resp; - unsigned char check[D2HRING_CTRL_CMPLT_ITEMSIZE]; + d2h_mailbox_data_t d2h_mailbox_data; + info_buf_resp_t dbg_resp; + h2d_ring_create_response_t h2d_ring_create_resp; + d2h_ring_create_response_t d2h_ring_create_resp; + host_timestamp_msg_cpl_t host_ts_cpl; + fw_timestamp_event_msg_t fw_ts_event; + unsigned char ctrl_response[D2HRING_CTRL_CMPLT_ITEMSIZE]; } ctrl_completion_item_t; /** H2D Rxpost ring work items */ @@ -527,8 +790,7 @@ typedef union rxbuf_submit_item { unsigned char check[H2DRING_RXPOST_ITEMSIZE]; } rxbuf_submit_item_t; - -/** D2H Rxcompletion ring work items */ +/* D2H Rxcompletion ring work items for IPC rev7 */ typedef struct host_rxbuf_cmpl { /** common message header */ cmn_msg_hdr_t cmn_hdr; @@ -546,7 +808,10 @@ typedef struct host_rxbuf_cmpl { uint32 rx_status_0; uint32 rx_status_1; /** XOR checksum or a magic number to audit DMA done */ + /* This is for rev6 only. For IPC rev7, this is a reserved field */ dma_done_t marker; + /* timestamp */ + ipc_timestamp_t ts; } host_rxbuf_cmpl_t; typedef union rxbuf_complete_item { @@ -586,6 +851,12 @@ typedef struct host_txbuf_post { #define BCMPCIE_PKT_FLAGS_PRIO_SHIFT 5 #define BCMPCIE_PKT_FLAGS_PRIO_MASK (7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT) +#define BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU 0x00 +#define BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT 0x01 +#define BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT 0x02 +#define BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT 0x03 +#define BCMPCIE_PKT_FLAGS_MONITOR_SHIFT 8 +#define BCMPCIE_PKT_FLAGS_MONITOR_MASK (3 << BCMPCIE_PKT_FLAGS_MONITOR_SHIFT) /* These are added to fix up compile issues */ #define BCMPCIE_TXPOST_FLAGS_FRAME_802_3 BCMPCIE_PKT_FLAGS_FRAME_802_3 @@ -593,13 +864,14 @@ typedef struct host_txbuf_post { #define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT BCMPCIE_PKT_FLAGS_PRIO_SHIFT #define BCMPCIE_TXPOST_FLAGS_PRIO_MASK BCMPCIE_PKT_FLAGS_PRIO_MASK -/** H2D Txpost ring work items */ + +/* H2D Txpost ring work items */ typedef union txbuf_submit_item { host_txbuf_post_t txpost; unsigned char check[H2DRING_TXPOST_ITEMSIZE]; } txbuf_submit_item_t; -/** D2H Txcompletion ring work items */ +/* D2H Txcompletion ring work items - extended for IOC rev7 */ typedef struct host_txbuf_cmpl { /** common message header */ cmn_msg_hdr_t cmn_hdr; @@ -613,8 +885,12 @@ typedef struct host_txbuf_cmpl { uint16 tx_status; }; /** XOR checksum or a magic number to audit DMA done */ + /* This is for rev6 only. For IPC rev7, this is not used */ dma_done_t marker; }; + /* timestamp */ + ipc_timestamp_t ts; + } host_txbuf_cmpl_t; typedef union txbuf_complete_item { @@ -834,30 +1110,93 @@ enum { #define MAX_SUSPEND_REQ 15 typedef struct tx_idle_flowring_suspend_request { - cmn_msg_hdr_t msg; - uint16 ring_id[MAX_SUSPEND_REQ]; /**< ring Id's */ - uint16 num; /**< number of flowid's to suspend */ + cmn_msg_hdr_t msg; + uint16 ring_id[MAX_SUSPEND_REQ]; /* ring Id's */ + uint16 num; /* number of flowid's to suspend */ } tx_idle_flowring_suspend_request_t; typedef struct tx_idle_flowring_suspend_response { - cmn_msg_hdr_t msg; - compl_msg_hdr_t cmplt; + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; uint32 rsvd[2]; dma_done_t marker; } tx_idle_flowring_suspend_response_t; typedef struct tx_idle_flowring_resume_request { - cmn_msg_hdr_t msg; + cmn_msg_hdr_t msg; uint16 flow_ring_id; uint16 reason; uint32 rsvd[7]; } tx_idle_flowring_resume_request_t; typedef struct tx_idle_flowring_resume_response { - cmn_msg_hdr_t msg; - compl_msg_hdr_t cmplt; + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; uint32 rsvd[2]; dma_done_t marker; } tx_idle_flowring_resume_response_t; +/* timesync related additions */ + +typedef struct _bcm_xtlv { + uint16 id; /* TLV idenitifier */ + uint16 len; /* TLV length in bytes */ +} _bcm_xtlv_t; + +#define BCMMSGBUF_FW_CLOCK_INFO_TAG 0 +#define BCMMSGBUF_HOST_CLOCK_INFO_TAG 1 +#define BCMMSGBUF_HOST_CLOCK_SELECT_TAG 2 +#define BCMMSGBUF_D2H_CLOCK_CORRECTION_TAG 3 +#define BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG 4 +#define BCMMSGBUF_MAX_TSYNC_TAG 5 + +/* Flags in fw clock info TLV */ +#define CAP_DEVICE_TS (1 << 0) +#define CAP_CORRECTED_TS (1 << 1) +#define TS_CLK_ACTIVE (1 << 2) + +typedef struct ts_fw_clock_info { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_FW_CLOCK_INFO_TAG */ + ts_timestamp_srcid_t ts; /* tick count */ + uchar clk_src[4]; /* clock source acronym ILP/AVB/TSF */ + uint32 nominal_clock_freq; + uint32 reset_cnt; + uint8 flags; + uint8 rsvd[3]; +} ts_fw_clock_info_t; + +typedef struct ts_host_clock_info { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_CLOCK_INFO_TAG */ + tick_count_64_t ticks; /* 64 bit host tick counter */ + ts_timestamp_ns_64_t ns; /* 64 bit host time in nano seconds */ +} ts_host_clock_info_t; + +typedef struct ts_host_clock_sel { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_CLOCK_SELECT_TAG */ + uint32 seqnum; /* number of times GPIO time sync toggled */ + uint8 min_clk_idx; /* clock idenitifer configured for packet tiem stamping */ + uint8 max_clk_idx; /* clock idenitifer configured for packet tiem stamping */ + uint16 rsvd[1]; +} ts_host_clock_sel_t; + +typedef struct ts_d2h_clock_correction { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_CLOCK_INFO_TAG */ + uint8 clk_id; /* clock source in the device */ + uint8 rsvd[3]; + ts_correction_m_t m; /* y = 'm' x + b */ + ts_correction_b_t b; /* y = 'm' x + 'c' */ +} ts_d2h_clock_correction_t; + +typedef struct ts_host_timestamping_config { + _bcm_xtlv_t xtlv; /* BCMMSGBUF_HOST_TIMESTAMPING_CONFIG_TAG */ + /* time period to capture the device time stamp and toggle WLAN_TIME_SYNC_GPIO */ + uint16 period_ms; + uint8 flags; + uint8 rsvd; + uint32 reset_cnt; +} ts_host_timestamping_config_t; + +/* Flags in host timestamping config TLV */ +#define FLAG_HOST_RESET (1 << 0) + #endif /* _bcmmsgbuf_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmnvram.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmnvram.h index e3ba9b4166fb..15b58568a1a5 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmnvram.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmnvram.h @@ -1,7 +1,7 @@ /* * NVRAM variable manipulation * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmnvram.h 514727 2014-11-12 03:02:48Z $ + * $Id: bcmnvram.h 613043 2016-01-16 00:24:13Z $ */ #ifndef _bcmnvram_h_ @@ -136,6 +136,15 @@ static INLINE int nvram_match(const char *name, const char *match) { const char *value = nvram_get(name); + + /* In nvramstubs.c builds, nvram_get() is defined as returning zero, + * so the return line below never executes the strcmp(), + * resulting in 'match' being an unused parameter. + * Make a ref to 'match' to quiet the compiler warning. + */ + + BCM_REFERENCE(match); + return (value && !strcmp(value, match)); } @@ -151,6 +160,7 @@ static INLINE int nvram_match_bitflag(const char *name, const int bit, const char *match) { const char *value = nvram_get_bitflag(name, bit); + BCM_REFERENCE(match); return (value && !strcmp(value, match)); } @@ -165,6 +175,15 @@ static INLINE int nvram_invmatch(const char *name, const char *invmatch) { const char *value = nvram_get(name); + + /* In nvramstubs.c builds, nvram_get() is defined as returning zero, + * so the return line below never executes the strcmp(), + * resulting in 'invmatch' being an unused parameter. + * Make a ref to 'invmatch' to quiet the compiler warning. + */ + + BCM_REFERENCE(invmatch); + return (value && strcmp(value, invmatch)); } diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmpcie.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmpcie.h index 0c15055a0353..114924cc9fec 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmpcie.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmpcie.h @@ -3,7 +3,7 @@ * Software-specific definitions shared between device and host side * Explains the shared area between host and dongle * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: bcmpcie.h 604490 2015-12-07 15:48:45Z $ + * $Id: bcmpcie.h 678914 2017-01-11 15:34:26Z $ */ @@ -50,11 +50,14 @@ typedef struct { #define BCMPCIE_MAX_TX_FLOWS 40 #endif /* ! BCMPCIE_MAX_TX_FLOWS */ +#define PCIE_SHARED_VERSION_7 0x00007 +#define PCIE_SHARED_VERSION_6 0x00006 /* rev6 is compatible with rev 5 */ +#define PCIE_SHARED_VERSION_5 0x00005 /* rev6 is compatible with rev 5 */ /** * Feature flags enabled in dongle. Advertised by dongle to DHD via the PCIe Shared structure that * is located in device memory. */ -#define PCIE_SHARED_VERSION 0x00005 +#define PCIE_SHARED_VERSION PCIE_SHARED_VERSION_7 #define PCIE_SHARED_VERSION_MASK 0x000FF #define PCIE_SHARED_ASSERT_BUILT 0x00100 #define PCIE_SHARED_ASSERT 0x00200 @@ -62,10 +65,14 @@ typedef struct { #define PCIE_SHARED_IN_BRPT 0x00800 #define PCIE_SHARED_SET_BRPT 0x01000 #define PCIE_SHARED_PENDING_BRPT 0x02000 -#define PCIE_SHARED_TXPUSH_SPRT 0x04000 +/* BCMPCIE_SUPPORT_TX_PUSH_RING 0x04000 obsolete */ #define PCIE_SHARED_EVT_SEQNUM 0x08000 #define PCIE_SHARED_DMA_INDEX 0x10000 +/* WAR: D11 txstatus through unused status field of PCIe completion header */ +#define PCIE_SHARED_D2H_D11_TX_STATUS 0x40000000 /* using flags2 in shared area */ +#define PCIE_SHARED_H2D_D11_TX_STATUS 0x80000000 /* using flags2 in shared area */ + /** * There are host types where a device interrupt can 'race ahead' of data written by the device into * host memory. The dongle can avoid this condition using a variety of techniques (read barrier, @@ -81,6 +88,45 @@ typedef struct { #define PCIE_SHARED_IDLE_FLOW_RING 0x80000 #define PCIE_SHARED_2BYTE_INDICES 0x100000 +#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001 /* using flags2 in shared area */ + +/* dongle supports fatal buf log collection */ +#define PCIE_SHARED_FATAL_LOGBUG_VALID 0x200000 + +/* Implicit DMA with corerev 19 and after */ +#define PCIE_SHARED_IDMA 0x400000 + +/* MSI support */ +#define PCIE_SHARED_D2H_MSI_MULTI_MSG 0x800000 + +/* IFRM with corerev 19 and after */ +#define PCIE_SHARED_IFRM 0x1000000 + +/** + * From Rev6 and above, suspend/resume can be done using two handshake methods. + * 1. Using ctrl post/ctrl cmpl messages (Default rev6) + * 2. Using Mailbox data (old method as used in rev5) + * This shared flag indicates whether to overide rev6 default method and use mailbox for + * suspend/resume. + */ +#define PCIE_SHARED_USE_MAILBOX 0x2000000 + +/* Firmware compiled for mfgbuild purposes */ +#define PCIE_SHARED_MFGBUILD_FW 0x4000000 + +/* Firmware could use DB0 value as host timestamp */ +#define PCIE_SHARED_TIMESTAMP_DB0 0x8000000 +/* Firmware could use Hostready (IPC rev7) */ +#define PCIE_SHARED_HOSTRDY_SUPPORT 0x10000000 + +/* When set, Firmwar does not support OOB Device Wake based DS protocol */ +#define PCIE_SHARED_NO_OOB_DW 0x20000000 + +/* When set, Firmwar supports Inband DS protocol */ +#define PCIE_SHARED_INBAND_DS 0x40000000 + +/* Implicit DMA WAR for 4347B0 PCIe memory retention */ +#define PCIE_SHARED_IDMA_RETENTION_DS 0x80000000 #define PCIE_SHARED_D2H_MAGIC 0xFEDCBA09 #define PCIE_SHARED_H2D_MAGIC 0x12345678 @@ -105,6 +151,19 @@ typedef struct { #define BCMPCIE_H2D_MSGRINGS(max_tx_flows) \ (BCMPCIE_H2D_COMMON_MSGRINGS + (max_tx_flows)) +/* different ring types */ +#define BCMPCIE_H2D_RING_TYPE_CTRL_SUBMIT 0x1 +#define BCMPCIE_H2D_RING_TYPE_TXFLOW_RING 0x2 +#define BCMPCIE_H2D_RING_TYPE_RXBUFPOST 0x3 +#define BCMPCIE_H2D_RING_TYPE_TXSUBMIT 0x4 +#define BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT 0x5 + +#define BCMPCIE_D2H_RING_TYPE_CTRL_CPL 0x1 +#define BCMPCIE_D2H_RING_TYPE_TX_CPL 0x2 +#define BCMPCIE_D2H_RING_TYPE_RX_CPL 0x3 +#define BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL 0x4 +#define BCMPCIE_D2H_RING_TYPE_AC_RX_COMPLETE 0x5 + /** * H2D and D2H, WR and RD index, are maintained in the following arrays: * - Array of all H2D WR Indices @@ -188,7 +247,7 @@ typedef struct ring_mem { /** - * Per flow ring, information is maintained in device memory, e.g. at what address the ringmem and + * Per flow ring, information is maintained in device memory, eg at what address the ringmem and * ringstate are located. The flow ring itself can be instantiated in either host or device memory. * * Perhaps this type should be renamed to make clear that it resides in device memory only. @@ -216,8 +275,12 @@ typedef struct ring_info { sh_addr_t d2h_w_idx_hostaddr; /* Array of all D2H ring's WR indices */ sh_addr_t d2h_r_idx_hostaddr; /* Array of all D2H ring's RD indices */ - uint16 max_sub_queues; /* maximum number of H2D rings: common + flow */ - uint16 rsvd; + uint16 max_tx_flowrings; /* maximum number of H2D rings: common + flow */ + uint16 max_submission_queues; /* maximum number of H2D rings: common + flow */ + uint16 max_completion_rings; /* maximum number of H2D rings: common + flow */ + uint16 max_vdevs; /* max number of virtual interfaces supported */ + + sh_addr_t ifrm_w_idx_hostaddr; /* Array of all H2D ring's WR indices for IFRM */ } ring_info_t; /** @@ -260,12 +323,41 @@ typedef struct { uint32 device_rings_stsblk_len; sh_addr_t device_rings_stsblk; - uint32 buzzz; /* BUZZZ state format strings and trace buffer */ + uint32 buzz_dbg_ptr; /* BUZZZ state format strings and trace buffer */ + /* rev6 compatible changes */ + uint32 flags2; + uint32 host_cap; + + /* location in the host address space to write trap indication. + * At this point for the current rev of the spec, firmware will + * support only indications to 32 bit host addresses. + */ + sh_addr_t host_trap_addr; + + /* location for host fatal error log buffer start address */ + uint32 device_fatal_logbuf_start; + + /* location in host memory for offloaded modules */ + sh_addr_t hoffload_addr; } pciedev_shared_t; extern pciedev_shared_t pciedev_shared; +/* host capabilities */ +#define HOSTCAP_PCIEAPI_VERSION_MASK 0x000000FF +#define HOSTCAP_H2D_VALID_PHASE 0x00000100 +#define HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE 0x00000200 +#define HOSTCAP_H2D_ENABLE_HOSTRDY 0x00000400 +#define HOSTCAP_DB0_TIMESTAMP 0x00000800 +#define HOSTCAP_DS_NO_OOB_DW 0x00001000 +#define HOSTCAP_DS_INBAND_DW 0x00002000 +#define HOSTCAP_H2D_IDMA 0x00004000 +#define HOSTCAP_H2D_IFRM 0x00008000 +#define HOSTCAP_H2D_DAR 0x00010000 +#define HOSTCAP_EXTENDED_TRAP_DATA 0x00020000 +#define HOSTCAP_TXSTATUS_METADATA 0x00040000 + /** * Mailboxes notify a remote party that an event took place, using interrupts. They use hardware * support. @@ -276,19 +368,41 @@ extern pciedev_shared_t pciedev_shared; #define H2D_HOST_DS_ACK 0x00000002 #define H2D_HOST_DS_NAK 0x00000004 #define H2D_HOST_CONS_INT 0x80000000 /**< h2d int for console cmds */ -#define H2D_FW_TRAP 0x20000000 /**< dump HW reg info for Livelock issue */ +#define H2D_FW_TRAP 0x20000000 /**< h2d force TRAP */ #define H2D_HOST_D0_INFORM_IN_USE 0x00000008 #define H2D_HOST_D0_INFORM 0x00000010 +#define H2D_HOST_IDMA_INITED 0x00000080 +#define H2DMB_DS_HOST_SLEEP_INFORM H2D_HOST_D3_INFORM +#define H2DMB_DS_DEVICE_SLEEP_ACK H2D_HOST_DS_ACK +#define H2DMB_DS_DEVICE_SLEEP_NAK H2D_HOST_DS_NAK +#define H2DMB_D0_INFORM_IN_USE H2D_HOST_D0_INFORM_IN_USE +#define H2DMB_D0_INFORM H2D_HOST_D0_INFORM +#define H2DMB_DS_ACTIVE 0x00000020 +#define H2DMB_DS_DEVICE_WAKE 0x00000040 +#define H2DMB_FW_TRAP H2D_FW_TRAP +#define H2DMB_HOST_CONS_INT H2D_HOST_CONS_INT +#define H2DMB_DS_DEVICE_WAKE_ASSERT H2DMB_DS_DEVICE_WAKE +#define H2DMB_DS_DEVICE_WAKE_DEASSERT H2DMB_DS_ACTIVE /* D2H mail box Data */ #define D2H_DEV_D3_ACK 0x00000001 #define D2H_DEV_DS_ENTER_REQ 0x00000002 #define D2H_DEV_DS_EXIT_NOTE 0x00000004 #define D2H_DEV_FWHALT 0x10000000 +#define D2H_DEV_EXT_TRAP_DATA 0x20000000 +#define D2H_DEV_IDMA_INITED 0x00000010 +#define D2H_FWTRAP_MASK 0x0000001F /* Adding maskbits for TRAP information */ +#define D2HMB_DS_HOST_SLEEP_ACK D2H_DEV_D3_ACK +#define D2HMB_DS_DEVICE_SLEEP_ENTER_REQ D2H_DEV_DS_ENTER_REQ +#define D2HMB_DS_DEVICE_SLEEP_EXIT D2H_DEV_DS_EXIT_NOTE +#define D2HMB_DS_HOST_SLEEP_EXIT_ACK 0x00000008 +#define D2HMB_FWHALT D2H_DEV_FWHALT #define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \ - D2H_DEV_DS_EXIT_NOTE | D2H_DEV_FWHALT) + D2H_DEV_DS_EXIT_NOTE | D2H_DEV_IDMA_INITED | D2H_DEV_FWHALT | \ + D2H_FWTRAP_MASK | D2H_DEV_EXT_TRAP_DATA) #define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK)) + /** These macro's operate on type 'inuse_lclbuf_pool_t' and are used by firmware only */ #define NEXTTXP(i, d) ((((i)+1) >= (d)) ? 0 : ((i)+1)) #define NTXPACTIVE(r, w, d) (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w))) @@ -301,18 +415,26 @@ extern pciedev_shared_t pciedev_shared; #define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w)) #define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1) #define CHECK_WRITE_SPACE(r, w, d) \ - MIN(WRITE_SPACE_AVAIL(r, w, d), WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d)) + ((r) > (w)) ? ((r) - (w) - 1) : ((r) == 0 || (w) == 0) ? ((d) - (w) - 1) : ((d) - (w)) +#define CHECK_NOWRITE_SPACE(r, w, d) \ + (((r) == (w) + 1) || (((r) == 0) && ((w) == ((d) - 1)))) #define WRT_PEND(x) ((x)->wr_pending) -#define DNGL_RING_WPTR(msgbuf) (*((msgbuf)->tcm_rs_w_ptr)) +#define DNGL_RING_WPTR(msgbuf) (*((msgbuf)->tcm_rs_w_ptr)) /**< advanced by producer */ #define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a) (DNGL_RING_WPTR(msgbuf) = (a)) -#define DNGL_RING_RPTR(msgbuf) (*((msgbuf)->tcm_rs_r_ptr)) +#define DNGL_RING_RPTR(msgbuf) (*((msgbuf)->tcm_rs_r_ptr)) /**< advanced by consumer */ #define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a) (DNGL_RING_RPTR(msgbuf) = (a)) -#define RING_START_PTR(x) ((x)->ringmem->base_addr.low_addr) -#define RING_MAX_ITEM(x) ((x)->ringmem->max_item) -#define RING_LEN_ITEMS(x) ((x)->ringmem->len_items) +#define MODULO_RING_IDX(x, y) ((x) % (y)->bitmap_size) +#define RING_READ_PTR(x) ((x)->ringstate->r_offset) +#define RING_WRITE_PTR(x) ((x)->ringstate->w_offset) +#define RING_START_PTR(x) ((x)->ringmem->base_addr.low_addr) +#define RING_MAX_ITEM(x) ((x)->ringmem->max_item) +#define RING_LEN_ITEMS(x) ((x)->ringmem->len_items) +#define HOST_RING_BASE(x) ((x)->dma_buf.va) +#define HOST_RING_END(x) ((uint8 *)HOST_RING_BASE((x)) + \ + ((RING_MAX_ITEM((x))-1)*RING_LEN_ITEMS((x)))) #endif /* _bcmpcie_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmpcispi.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmpcispi.h index 66c783c4aeff..b3502ea7b884 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmpcispi.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmpcispi.h @@ -1,7 +1,7 @@ /* * Broadcom PCI-SPI Host Controller Register Definitions * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmperf.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmperf.h index 823c3b62f09a..09e607fc9b74 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmperf.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmperf.h @@ -1,7 +1,7 @@ /* * Performance counters software interface. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdbus.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdbus.h index 56ea1d49b40f..da835e88afcd 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdbus.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdbus.h @@ -2,7 +2,7 @@ * Definitions for API from sdio common code (bcmsdh) to individual * host controller drivers. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,12 +25,16 @@ * * <> * - * $Id: bcmsdbus.h 514727 2014-11-12 03:02:48Z $ + * $Id: bcmsdbus.h 644725 2016-06-21 12:26:04Z $ */ #ifndef _sdio_api_h_ #define _sdio_api_h_ +#if defined(BT_OVER_SDIO) +#include +#endif /* defined (BT_OVER_SDIO) */ + #define SDIOH_API_RC_SUCCESS (0x00) #define SDIOH_API_RC_FAIL (0x01) @@ -95,6 +99,10 @@ typedef struct sdioh_info sdioh_info_t; /* callback function, taking one arg */ typedef void (*sdioh_cb_fn_t)(void *); +#if defined(BT_OVER_SDIO) +extern +void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func); +#endif /* defined (BT_OVER_SDIO) */ extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh); extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si); @@ -123,6 +131,7 @@ extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fi /* get cis data */ extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length); +extern SDIOH_API_RC sdioh_cisaddr_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 offset); extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); @@ -159,15 +168,6 @@ extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd); extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio); extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio); extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab); - extern uint sdioh_set_mode(sdioh_info_t *sd, uint mode); -#if defined(SWTXGLOM) -/* read or write any buffer using cmd53 */ -extern SDIOH_API_RC sdioh_request_swtxglom_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc, - uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer, - void *pkt); -extern void sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len); -extern void sdioh_glom_clear(sdioh_info_t *sd); -#endif #endif /* _sdio_api_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdh.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdh.h index 0933d227f374..7262d0f53536 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdh.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdh.h @@ -3,7 +3,7 @@ * export functions to client drivers * abstract OS and BUS specific details of SDIO * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: bcmsdh.h 514727 2014-11-12 03:02:48Z $ + * $Id: bcmsdh.h 698895 2017-05-11 02:55:17Z $ */ /** @@ -52,6 +52,15 @@ extern const uint bcmsdh_msglevel; typedef struct bcmsdh_info bcmsdh_info_t; typedef void (*bcmsdh_cb_fn_t)(void *); + +#if defined(BT_OVER_SDIO) +typedef enum { + NO_HANG_STATE = 0, + HANG_START_STATE = 1, + HANG_RECOVERY_STATE = 2 +} dhd_hang_state_t; +#endif + extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva); /** * BCMSDH API context @@ -65,6 +74,11 @@ struct bcmsdh_info bool regfail; /* Save status of last reg_read/reg_write call */ uint32 sbwad; /* Save backplane window address */ void *os_cxt; /* Pointer to per-OS private data */ + bool force_sbwad_calc; /* forces calculation of sbwad instead of using cached value */ +#ifdef DHD_WAKE_STATUS + unsigned int total_wake_count; + int pkt_wake; +#endif /* DHD_WAKE_STATUS */ }; /* Detach - freeup resources allocated in attach */ @@ -114,14 +128,15 @@ extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 d * to form an SDIO-space address to read the data from. */ extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length); +extern int bcmsdh_cisaddr_read(void *sdh, uint func, uint8 *cisd, uint offset); /* Synchronous access to device (client) core registers via CMD53 to F1. * addr: backplane address (i.e. >= regsva from attach) * size: register width in bytes (2 or 4) * data: data for register write */ -extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size); -extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data); +extern uint32 bcmsdh_reg_read(void *sdh, uintptr addr, uint size); +extern uint32 bcmsdh_reg_write(void *sdh, uintptr addr, uint size, uint32 data); /* set sb address window */ extern int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set); @@ -148,11 +163,6 @@ extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, void *pkt, bcmsdh_cmplt_fn_t complete_fn, void *handle); -#if defined(SWTXGLOM) -extern int bcmsdh_send_swtxglom_buf(void *sdh, uint32 addr, uint fn, uint flags, - uint8 *buf, uint nbytes, void *pkt, - bcmsdh_cmplt_fn_t complete_fn, void *handle); -#endif extern void bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len); extern void bcmsdh_glom_clear(void *sdh); @@ -245,6 +255,9 @@ extern uint32 bcmsdh_get_dstatus(void *sdh); /* Function to return current window addr */ extern uint32 bcmsdh_cur_sbwad(void *sdh); +/* function to force sbwad calculation instead of using cached value */ +extern void bcmsdh_force_sbwad_calc(void *sdh, bool force); + /* Function to pass chipid and rev to lower layers for controlling pr's */ extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev); diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdh_sdmmc.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdh_sdmmc.h index 1bd35b527b9d..1073d97c650a 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdh_sdmmc.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdh_sdmmc.h @@ -1,7 +1,7 @@ /* * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmsdh_sdmmc.h 591160 2015-10-07 06:01:58Z $ + * $Id: bcmsdh_sdmmc.h 687253 2017-02-28 09:33:36Z $ */ #ifndef __BCMSDH_SDMMC_H__ @@ -60,15 +60,7 @@ /* private bus modes */ #define SDIOH_MODE_SD4 2 #define CLIENT_INTR 0x100 /* Get rid of this! */ -#define SDIOH_SDMMC_MAX_SG_ENTRIES SDPCM_MAXGLOM_SIZE - -#if defined(SWTXGLOM) -typedef struct glom_buf { - void *glom_pkt_head; - void *glom_pkt_tail; - uint32 count; /* Total number of pkts queued */ -} glom_buf_t; -#endif /* SWTXGLOM */ +#define SDIOH_SDMMC_MAX_SG_ENTRIES (SDPCM_MAXGLOM_SIZE + 2) struct sdioh_info { osl_t *osh; /* osh handler */ @@ -93,11 +85,8 @@ struct sdioh_info { struct scatterlist sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES]; struct sdio_func fake_func0; struct sdio_func *func[SDIOD_MAX_IOFUNCS]; - + uint sd_clk_rate; uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */ -#if defined(SWTXGLOM) - glom_buf_t glom_info; /* pkt information used for glomming */ -#endif }; /************************************************************ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdpcm.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdpcm.h index 5c0adff8e8ad..6230047f4f3a 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdpcm.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdpcm.h @@ -2,7 +2,7 @@ * Broadcom SDIO/PCMCIA * Software-specific definitions shared between device and host side * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: bcmsdpcm.h 514727 2014-11-12 03:02:48Z $ + * $Id: bcmsdpcm.h 614070 2016-01-21 00:55:57Z $ */ #ifndef _bcmsdpcm_h_ @@ -51,6 +51,14 @@ #define SMB_MASK 0x0000000f /* To SB Mailbox Mask */ /* tosbmailboxdata */ + +#ifdef DS_PROT +/* Bit msgs for custom deep sleep protocol */ +#define SMB_DATA_D3INFORM 0x100 /* host announcing D3 entry */ +#define SMB_DATA_DSACK 0x200 /* host acking a deepsleep request */ +#define SMB_DATA_DSNACK 0x400 /* host nacking a deepsleep request */ +#endif /* DS_PROT */ + #define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */ #define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */ @@ -59,16 +67,16 @@ */ /* intstatus bits */ +#define I_HMB_INT_ACK I_HMB_SW0 /* To Host Mailbox Dev Interrupt ACK */ #define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */ #define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */ #define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */ #define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */ -#define I_TOHOSTMAIL (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT) +#define I_TOHOSTMAIL (I_HMB_INT_ACK | I_HMB_FRAME_IND | I_HMB_HOST_INT) /* tohostmailbox bits corresponding to intstatus bits */ -#define HMB_FC_ON (1 << 0) /* To Host Mailbox Flow Control State */ -#define HMB_FC_CHANGE (1 << 1) /* To Host Mailbox Flow Control State Changed */ +#define HMB_INT_ACK (1 << 0) /* To Host Mailbox Dev Interrupt ACK */ #define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */ #define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */ #define HMB_MASK 0x0000000f /* To Host Mailbox Mask */ @@ -80,6 +88,16 @@ #define HMB_DATA_FWREADY 0x08 /* firmware is ready for protocol activity */ #define HMB_DATA_FWHALT 0x10 /* firmware has halted operation */ +#ifdef DS_PROT +/* Bit msgs for custom deep sleep protocol */ +#define HMB_DATA_DSREQ 0x100 /* firmware requesting deepsleep entry */ +#define HMB_DATA_DSEXIT 0x200 /* firmware announcing deepsleep exit */ +#define HMB_DATA_D3ACK 0x400 /* firmware acking a D3 notice from host */ +#define HMB_DATA_D3EXIT 0x800 /* firmware announcing D3 exit */ +#define HMB_DATA_DSPROT_MASK 0xf00 +#endif /* DS_PROT */ + + #define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */ #define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */ @@ -264,6 +282,7 @@ typedef volatile struct { #define SDPCM_SHARED_IN_BRPT 0x0800 #define SDPCM_SHARED_SET_BRPT 0x1000 #define SDPCM_SHARED_PENDING_BRPT 0x2000 +#define SDPCM_SHARED_FATAL_LOGBUF_VALID 0x100000 typedef struct { uint32 flags; @@ -274,6 +293,7 @@ typedef struct { uint32 console_addr; /* Address of hnd_cons_t */ uint32 msgtrace_addr; uint32 fwid; + uint32 device_fatal_logbuf_start; } sdpcm_shared_t; extern sdpcm_shared_t sdpcm_shared; diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdspi.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdspi.h index b1831db8b19b..537876c3696d 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdspi.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdspi.h @@ -1,7 +1,7 @@ /* * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdstd.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdstd.h index 24df8de685d7..ff3b0d1f2750 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdstd.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsdstd.h @@ -1,7 +1,7 @@ /* * 'Standard' SDIO HOST CONTROLLER driver * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmspi.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmspi.h index e9a906e79734..9b4bd2d8ac0d 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmspi.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmspi.h @@ -1,7 +1,7 @@ /* * Broadcom SPI Low-Level Hardware Driver API * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmspibrcm.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmspibrcm.h index 7c2bfc4653c1..e9735ffc621a 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmspibrcm.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmspibrcm.h @@ -1,7 +1,7 @@ /* * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsrom_fmt.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsrom_fmt.h index a40bd569da34..f1e9bfe7df07 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsrom_fmt.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsrom_fmt.h @@ -1,7 +1,7 @@ /* * SROM format definition. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmsrom_fmt.h 553280 2015-04-29 07:55:29Z $ + * $Id: bcmsrom_fmt.h 646789 2016-06-30 19:43:02Z $ */ #ifndef _bcmsrom_fmt_h_ @@ -924,6 +924,9 @@ #define SROM13_ULBPDOFFS2GA3 166 #define SROM13_RPCAL5GB4 199 +#define SROM13_RPCAL2GCORE3 101 +#define SROM13_RPCAL5GB01CORE3 102 +#define SROM13_RPCAL5GB23CORE3 103 #define SROM13_EU_EDCRSTH 232 @@ -954,6 +957,11 @@ #define SROM13_RXGAINERRCORE3_1 587 +#define SROM16_SIGN 104 +#define SROM16_WORDS 512 +#define SROM16_SIGNATURE 0x4347 +#define SROM16_CRCREV 511 + typedef struct { uint8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */ uint8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsrom_tbl.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsrom_tbl.h index f2775fbba1c5..e855186f1626 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsrom_tbl.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmsrom_tbl.h @@ -1,7 +1,7 @@ /* * Table that encodes the srom formats for PCI/PCIe NICs. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmsrom_tbl.h 553564 2015-04-30 06:19:30Z $ + * $Id: bcmsrom_tbl.h 616054 2016-01-29 13:22:24Z $ */ #ifndef _bcmsrom_tbl_h_ @@ -108,9 +108,9 @@ static const sromvar_t pci_sromvars[] = { {"boardnum", 0x00000700, 0, SROM8_MACLO, 0xffff}, {"cc", 0x00000002, 0, SROM_AABREV, SROM_CC_MASK}, {"regrev", 0x00000008, 0, SROM_OPO, 0xff00}, - {"regrev", 0x00000010, 0, SROM4_REGREV, 0x00ff}, - {"regrev", 0x000000e0, 0, SROM5_REGREV, 0x00ff}, - {"regrev", 0x00000700, 0, SROM8_REGREV, 0x00ff}, + {"regrev", 0x00000010, 0, SROM4_REGREV, 0xffff}, + {"regrev", 0x000000e0, 0, SROM5_REGREV, 0xffff}, + {"regrev", 0x00000700, 0, SROM8_REGREV, 0xffff}, {"ledbh0", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff}, {"ledbh1", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00}, {"ledbh2", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff}, @@ -473,7 +473,7 @@ static const sromvar_t pci_sromvars[] = { {"boardnum", 0xfffff800, 0, SROM11_MACLO, 0xffff}, {"macaddr", 0xfffff800, SRFL_ETHADDR, SROM11_MACHI, 0xffff}, {"ccode", 0xfffff800, SRFL_CCODE, SROM11_CCODE, 0xffff}, - {"regrev", 0xfffff800, 0, SROM11_REGREV, 0x00ff}, + {"regrev", 0xfffff800, 0, SROM11_REGREV, 0xffff}, {"ledbh0", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0x00ff}, {"ledbh1", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0xff00}, {"ledbh2", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0x00ff}, @@ -513,6 +513,7 @@ static const sromvar_t pci_sromvars[] = { {"tempcorrx", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0xfc00}, {"tempsense_option", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x0300}, {"xtalfreq", 0xfffff800, 0, SROM11_XTAL_FREQ, 0xffff}, + {"txpwrbckof", 0x00000800, SRFL_PRHEX, SROM11_PATH0 + SROM11_2G_MAXP, 0xff00}, /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #1 */ {"pa5gbw4080a1", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W0_A1, 0xffff}, {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W1_A1, 0xffff}, @@ -766,6 +767,12 @@ static const sromvar_t pci_sromvars[] = { {"gpdn", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_GPDN_L, 0xffff}, {"", 0, 0, SROM12_GPDN_H, 0xffff}, + {"rpcal2gcore3", 0xffffe000, 0, SROM13_RPCAL2GCORE3, 0x00ff}, + {"rpcal5gb0core3", 0xffffe000, 0, SROM13_RPCAL5GB01CORE3, 0x00ff}, + {"rpcal5gb1core3", 0xffffe000, 0, SROM13_RPCAL5GB01CORE3, 0xff00}, + {"rpcal5gb2core3", 0xffffe000, 0, SROM13_RPCAL5GB23CORE3, 0x00ff}, + {"rpcal5gb3core3", 0xffffe000, 0, SROM13_RPCAL5GB23CORE3, 0xff00}, + {"eu_edthresh2g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0x00ff}, {"eu_edthresh5g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0xff00}, @@ -1284,7 +1291,7 @@ static const cis_tuple_t cis_hnbuvars[] = { {HNBU_LEDDC, 0xffffffff, 3, "2leddc"}, {HNBU_RDLRNDIS, 0xffffffff, 2, "1rdlndis"}, {HNBU_CHAINSWITCH, 0xffffffff, 5, "1txchain 1rxchain 2antswitch"}, - {HNBU_REGREV, 0xffffffff, 2, "1regrev"}, + {HNBU_REGREV, 0xffffffff, 3, "2regrev"}, {HNBU_FEM, 0x000007fe, 5, "0antswctl2g 0triso2g 0pdetrange2g 0extpagain2g " "0tssipos2g 0antswctl5g 0triso5g 0pdetrange5g 0extpagain5g 0tssipos5g"}, /* special case */ {HNBU_PAPARMS_C0, 0x000007fe, 31, "1maxp2ga0 1itt2ga0 2pa2gw0a0 2pa2gw1a0 " @@ -1394,6 +1401,7 @@ static const cis_tuple_t cis_hnbuvars[] = { {HNBU_TXBFRPCALS, 0xfffff800, 11, "2rpcal2g 2rpcal5gb0 2rpcal5gb1 2rpcal5gb2 2rpcal5gb3"}, /* txbf rpcalvars */ {HNBU_GPIO_PULL_DOWN, 0xffffffff, 5, "4gpdn"}, + {HNBU_MACADDR2, 0xffffffff, 7, "6macaddr2"}, /* special case */ {0xFF, 0xffffffff, 0, ""} }; diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmtcp.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmtcp.h old mode 100755 new mode 100644 similarity index 97% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmtcp.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmtcp.h index 661e1f84d2ae..4d40948759d6 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmtcp.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmtcp.h @@ -1,7 +1,7 @@ /* * Fundamental constants relating to TCP Protocol * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmtcp.h 518342 2014-12-01 23:21:41Z $ + * $Id: bcmtcp.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _bcmtcp_h_ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmudp.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmudp.h new file mode 100644 index 000000000000..e5581348206e --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmudp.h @@ -0,0 +1,61 @@ +/* + * Fundamental constants relating to UDP Protocol + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmudp.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _bcmudp_h_ +#define _bcmudp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + + +/* UDP header */ +#define UDP_DEST_PORT_OFFSET 2 /* UDP dest port offset */ +#define UDP_LEN_OFFSET 4 /* UDP length offset */ +#define UDP_CHKSUM_OFFSET 6 /* UDP body checksum offset */ + +#define UDP_HDR_LEN 8 /* UDP header length */ +#define UDP_PORT_LEN 2 /* UDP port length */ + +/* These fields are stored in network order */ +BWL_PRE_PACKED_STRUCT struct bcmudp_hdr +{ + uint16 src_port; /* Source Port Address */ + uint16 dst_port; /* Destination Port Address */ + uint16 len; /* Number of bytes in datagram including header */ + uint16 chksum; /* entire datagram checksum with pseudoheader */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _bcmudp_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmutils.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmutils.h index 7dc741697343..3c061b8bfecf 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmutils.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/bcmutils.h @@ -1,7 +1,7 @@ /* * Misc useful os-independent macros and functions. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: bcmutils.h 563776 2015-06-15 15:51:15Z $ + * $Id: bcmutils.h 701785 2017-05-26 11:08:50Z $ */ #ifndef _bcmutils_h_ @@ -48,6 +48,8 @@ extern "C" { * */ +#define BCM_BIT(x) (1 << (x)) + /* ctype replacement */ #define _BCM_U 0x01 /* upper */ #define _BCM_L 0x02 /* lower */ @@ -126,76 +128,8 @@ struct ether_addr; extern int ether_isbcast(const void *ea); extern int ether_isnulladdr(const void *ea); -#define BCM_MAC_RXCPL_IDX_BITS 12 -#define BCM_MAX_RXCPL_IDX_INVALID 0 -#define BCM_MAC_RXCPL_IFIDX_BITS 3 -#define BCM_MAC_RXCPL_DOT11_BITS 1 -#define BCM_MAX_RXCPL_IFIDX ((1 << BCM_MAC_RXCPL_IFIDX_BITS) - 1) -#define BCM_MAC_RXCPL_FLAG_BITS 4 -#define BCM_RXCPL_FLAGS_IN_TRANSIT 0x1 -#define BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST 0x2 -#define BCM_RXCPL_FLAGS_RXCPLVALID 0x4 -#define BCM_RXCPL_FLAGS_RSVD 0x8 - -#define BCM_RXCPL_SET_IN_TRANSIT(a) ((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_IN_TRANSIT) -#define BCM_RXCPL_CLR_IN_TRANSIT(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_IN_TRANSIT) -#define BCM_RXCPL_IN_TRANSIT(a) ((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_IN_TRANSIT) - -#define BCM_RXCPL_SET_FRST_IN_FLUSH(a) ((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST) -#define BCM_RXCPL_CLR_FRST_IN_FLUSH(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST) -#define BCM_RXCPL_FRST_IN_FLUSH(a) ((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST) - -#define BCM_RXCPL_SET_VALID_INFO(a) ((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_RXCPLVALID) -#define BCM_RXCPL_CLR_VALID_INFO(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_RXCPLVALID) -#define BCM_RXCPL_VALID_INFO(a) (((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_RXCPLVALID) ? TRUE : FALSE) - #define UP_TABLE_MAX ((IPV4_TOS_DSCP_MASK >> IPV4_TOS_DSCP_SHIFT) + 1) /* 64 max */ -struct reorder_rxcpl_id_list { - uint16 head; - uint16 tail; - uint32 cnt; -}; - -typedef struct rxcpl_id { - uint32 idx : BCM_MAC_RXCPL_IDX_BITS; - uint32 next_idx : BCM_MAC_RXCPL_IDX_BITS; - uint32 ifidx : BCM_MAC_RXCPL_IFIDX_BITS; - uint32 dot11 : BCM_MAC_RXCPL_DOT11_BITS; - uint32 flags : BCM_MAC_RXCPL_FLAG_BITS; -} rxcpl_idx_id_t; - -typedef struct rxcpl_data_len { - uint32 metadata_len_w : 6; - uint32 dataoffset: 10; - uint32 datalen : 16; -} rxcpl_data_len_t; - -typedef struct rxcpl_info { - rxcpl_idx_id_t rxcpl_id; - uint32 host_pktref; - union { - rxcpl_data_len_t rxcpl_len; - struct rxcpl_info *free_next; - }; -} rxcpl_info_t; - -/* rx completion list */ -typedef struct bcm_rxcplid_list { - uint32 max; - uint32 avail; - rxcpl_info_t *rxcpl_ptr; - rxcpl_info_t *free_list; -} bcm_rxcplid_list_t; - -extern bool bcm_alloc_rxcplid_list(osl_t *osh, uint32 max); -extern rxcpl_info_t * bcm_alloc_rxcplinfo(void); -extern void bcm_free_rxcplinfo(rxcpl_info_t *ptr); -extern void bcm_chain_rxcplid(uint16 first, uint16 next); -extern rxcpl_info_t *bcm_id2rxcplinfo(uint16 id); -extern uint16 bcm_rxcplinfo2id(rxcpl_info_t *ptr); -extern rxcpl_info_t *bcm_rxcpllist_end(rxcpl_info_t *ptr, uint32 *count); - /* externs */ /* packet */ extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf); @@ -210,7 +144,7 @@ extern void *pktoffset(osl_t *osh, void *p, uint offset); extern void pktset8021xprio(void *pkt, int prio); /* Get priority from a packet and pass it back in scb (or equiv) */ -#define PKTPRIO_VDSCP 0x100 /* DSCP prio found after VLAN tag */ +#define PKTPRIO_VDSCP 0x100 /* DSCP prio found af ter VLAN tag */ #define PKTPRIO_VLAN 0x200 /* VLAN prio found */ #define PKTPRIO_UPD 0x400 /* DSCP used to update VLAN prio */ #define PKTPRIO_DSCP 0x800 /* DSCP prio found */ @@ -235,19 +169,6 @@ extern uint pktsetprio(void *pkt, bool update_vtag); extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag); extern bool pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp); -/* string */ -extern int bcm_atoi(const char *s); -extern ulong bcm_strtoul(const char *cp, char **endp, uint base); -extern char *bcmstrstr(const char *haystack, const char *needle); -extern char *bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len); -extern char *bcmstrcat(char *dest, const char *src); -extern char *bcmstrncat(char *dest, const char *src, uint size); -extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen); -char* bcmstrtok(char **string, const char *delimiters, char *tokdelim); -int bcmstricmp(const char *s1, const char *s2); -int bcmstrnicmp(const char* s1, const char* s2, int cnt); - - /* ethernet address */ extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf); extern int bcm_ether_atoe(const char *p, struct ether_addr *ea); @@ -261,7 +182,15 @@ extern int bcm_atoipv4(const char *p, struct ipv4_addr *ip); /* delay */ extern void bcm_mdelay(uint ms); /* variable access */ +#if defined(BCM_RECLAIM) +#define NVRAM_RECLAIM_CHECK(name) \ + if (bcm_attach_part_reclaimed == TRUE) { \ + *(char*) 0 = 0; /* TRAP */ \ + return NULL; \ + } +#else /* BCM_RECLAIM */ #define NVRAM_RECLAIM_CHECK(name) +#endif /* BCM_RECLAIM */ extern char *getvar(char *vars, const char *name); extern int getintvar(char *vars, const char *name); @@ -298,7 +227,8 @@ typedef struct bcm_iovar { const char *name; /* name for lookup and display */ uint16 varid; /* id for switch */ uint16 flags; /* driver-specific flag bits */ - uint16 type; /* base type of argument */ + uint8 flags2; /* driver-specific flag bits */ + uint8 type; /* base type of argument */ uint16 minlen; /* min length for buffer vars */ } bcm_iovar_t; @@ -318,12 +248,32 @@ typedef struct bcm_iovar { extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name); extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set); + +/* ioctl structure */ +typedef struct wlc_ioctl_cmd { + uint16 cmd; /**< IOCTL command */ + uint16 flags; /**< IOCTL command flags */ + int16 min_len; /**< IOCTL command minimum argument len (in bytes) */ +} wlc_ioctl_cmd_t; + #if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); #endif #endif /* BCMDRIVER */ +/* string */ +extern int bcm_atoi(const char *s); +extern ulong bcm_strtoul(const char *cp, char **endp, uint base); +extern char *bcmstrstr(const char *haystack, const char *needle); +extern char *bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len); +extern char *bcmstrcat(char *dest, const char *src); +extern char *bcmstrncat(char *dest, const char *src, uint size); +extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen); +char* bcmstrtok(char **string, const char *delimiters, char *tokdelim); +int bcmstricmp(const char *s1, const char *s2); +int bcmstrnicmp(const char* s1, const char* s2, int cnt); + /* Base type definitions */ #define IOVT_VOID 0 /* no value (implictly set only) */ #define IOVT_BOOL 1 /* any value ok (zero/nonzero) */ @@ -426,10 +376,27 @@ extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); #define BCME_REPLAY -51 /* Replay */ #define BCME_IE_NOTFOUND -52 /* IE not found */ #define BCME_DATA_NOTFOUND -53 /* Complete data not found in buffer */ -#define BCME_LAST BCME_DATA_NOTFOUND +#define BCME_NOT_GC -54 /* expecting a group client */ +#define BCME_PRS_REQ_FAILED -55 /* GC presence req failed to sent */ +#define BCME_NO_P2P_SE -56 /* Could not find P2P-Subelement */ +#define BCME_NOA_PND -57 /* NoA pending, CB shuld be NULL */ +#define BCME_FRAG_Q_FAILED -58 /* queueing 80211 frag failedi */ +#define BCME_GET_AF_FAILED -59 /* Get p2p AF pkt failed */ +#define BCME_MSCH_NOTREADY -60 /* scheduler not ready */ +#define BCME_LAST BCME_MSCH_NOTREADY #define BCME_NOTENABLED BCME_DISABLED +/* This error code is *internal* to the driver, and is not propogated to users. It should + * only be used by IOCTL patch handlers as an indication that it did not handle the IOCTL. + * (Since the error code is internal, an entry in 'BCMERRSTRINGTABLE' is not required, + * nor does it need to be part of any OSL driver-to-OS error code mapping). + */ +#define BCME_IOCTL_PATCH_UNSUPPORTED -9999 +#if (BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED) + #error "BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED" +#endif + /* These are collection of BCME Error strings */ #define BCMERRSTRINGTABLE { \ "OK", \ @@ -486,6 +453,13 @@ extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); "Replay", \ "IE not found", \ "Data not found", \ + "NOT GC", \ + "PRS REQ FAILED", \ + "NO P2P SubElement", \ + "NOA Pending", \ + "FRAG Q FAILED", \ + "GET ActionFrame failed", \ + "scheduler not ready", \ } #ifndef ABS @@ -550,6 +524,10 @@ extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); #endif /* __ARMCC_VERSION */ #endif /* OFFSETOF */ +#ifndef CONTAINEROF +#define CONTAINEROF(ptr, type, member) ((type *)((char *)(ptr) - OFFSETOF(type, member))) +#endif /* CONTAINEROF */ + #ifndef ARRAYSIZE #define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0])) #endif @@ -659,7 +637,13 @@ DECLARE_MAP_API(8, 2, 3, 3U, 0x00FF) /* setbit8() and getbit8() */ ((struct ether_addr *) (ea))->octet[4], \ ((struct ether_addr *) (ea))->octet[5] -#define ETHER_TO_MACF(ea) (ea).octet[0], \ +#define CONST_ETHERP_TO_MACF(ea) ((const struct ether_addr *) (ea))->octet[0], \ + ((const struct ether_addr *) (ea))->octet[1], \ + ((const struct ether_addr *) (ea))->octet[2], \ + ((const struct ether_addr *) (ea))->octet[3], \ + ((const struct ether_addr *) (ea))->octet[4], \ + ((const struct ether_addr *) (ea))->octet[5] +#define ETHER_TO_MACF(ea) (ea).octet[0], \ (ea).octet[1], \ (ea).octet[2], \ (ea).octet[3], \ @@ -743,7 +727,7 @@ extern const char *bcm_crypto_algo_name(uint algo); extern char *bcm_chipname(uint chipid, char *buf, uint len); extern char *bcm_brev_str(uint32 brev, char *buf); extern void printbig(char *buf); -extern void prhex(const char *msg, uchar *buf, uint len); +extern void prhex(const char *msg, volatile uchar *buf, uint len); /* IE parsing */ @@ -756,6 +740,10 @@ typedef struct bcm_tlv { uint8 data[1]; } bcm_tlv_t; +#define BCM_TLV_SIZE(_tlv) ((_tlv) ? (OFFSETOF(bcm_tlv_t, data) + (_tlv)->len) : 0) + +#define BCM_XTLV_TAG_LEN_SIZE 4 + /* bcm tlv w/ 16 bit id/len */ typedef BWL_PRE_PACKED_STRUCT struct bcm_xtlv { uint16 id; @@ -807,6 +795,7 @@ typedef struct bcm_xtlvbuf bcm_xtlvbuf_t; extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen); extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key); extern bcm_tlv_t *bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen); +extern bcm_tlv_t *bcm_parse_tlvs_dot11(void *buf, int buflen, uint key, bool id_ext); extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key); @@ -885,6 +874,8 @@ int bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen, /* bcmerror */ extern const char *bcmerrorstr(int bcmerror); +extern int wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie); + /* multi-bool data type: set of bools, mbool is true if any is set */ typedef uint32 mbool; #define mboolset(mb, bit) ((mb) |= (bit)) /* set one bool */ @@ -917,7 +908,7 @@ extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...); /* power conversion */ extern uint16 bcm_qdbm_to_mw(uint8 qdbm); extern uint8 bcm_mw_to_qdbm(uint16 mw); -extern uint bcm_mkiovar(const char *name, char *data, uint datalen, char *buf, uint len); +extern uint bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint len); unsigned int process_nvram_vars(char *varbuf, unsigned int len); @@ -1002,6 +993,18 @@ C_bcm_count_leading_zeros(uint32 u32arg) return (32U - shifts); } +#ifdef BCM_ASLR_HEAP + +#define BCM_NVRAM_OFFSET_TCM 4 +#define BCM_NVRAM_IMG_COMPRS_FACTOR 4 +#define BCM_RNG_SIGNATURE 0xFEEDC0DE + +typedef struct bcm_rand_metadata { + uint32 signature; /* host fills it in, FW verfies before reading rand */ + uint32 count; /* number of 4byte wide random numbers */ +} bcm_rand_metadata_t; +#endif /* BCM_ASLR_HEAP */ + #ifdef BCMDRIVER /* * Assembly instructions: Count Leading Zeros @@ -1080,8 +1083,10 @@ extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl); /* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */ -#define ID16_INVALID ((uint16)(~0)) -#define ID16_UNDEFINED (ID16_INVALID) +#define ID8_INVALID 0xFFu +#define ID16_INVALID 0xFFFFu +#define ID32_INVALID 0xFFFFFFFFu +#define ID16_UNDEFINED ID16_INVALID /* * Construct a 16bit id allocator, managing 16bit ids in the range: @@ -1112,6 +1117,28 @@ extern void bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint3 void bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset); void bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset); +uint64 fp_mult_64(uint64 val1, uint64 val2, uint8 nf1, uint8 nf2, uint8 nf_res); +uint8 fp_div_64(uint64 num, uint32 den, uint8 nf_num, uint8 nf_den, uint32 *div_out); +uint8 fp_calc_head_room_64(uint64 num); +uint8 fp_calc_head_room_32(uint32 num); +uint32 fp_round_64(uint64 num, uint8 rnd_pos); +uint32 fp_round_32(uint32 num, uint8 rnd_pos); +uint32 fp_floor_64(uint64 num, uint8 floor_pos); +uint32 fp_floor_32(uint32 num, uint8 floor_pos); +uint32 fp_ceil_64(uint64 num, uint8 ceil_pos); +uint64 bcm_shl_64(uint64 input, uint8 shift_amt); +uint64 bcm_shr_64(uint64 input, uint8 shift_amt); + +#define MASK_32_BITS (~0) +#define MASK_8_BITS ((1 << 8) - 1) + +#define EXTRACT_LOW32(num) (uint32)(num & MASK_32BITS) +#define EXTRACT_HIGH32(num) (uint32)(((uint64)num >> 32) & MASK_32BITS) + +#define MAXIMUM(a, b) ((a > b) ? a : b) +#define MINIMUM(a, b) ((a < b) ? a : b) +#define LIMIT(x, min, max) ((x) < (min) ? (min) : ((x) > (max) ? (max) : (x))) + /* calculate checksum for ip header, tcp / udp header / data */ uint16 bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum); @@ -1269,6 +1296,24 @@ void dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p); typedef void (* dll_elem_dump)(void * elem_p); void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size); +/* calculate IPv4 header checksum + * - input ip points to IP header in network order + * - output cksum is in network order + */ +uint16 ipv4_hdr_cksum(uint8 *ip, int ip_len); + +/* calculate IPv4 TCP header checksum + * - input ip and tcp points to IP and TCP header in network order + * - output cksum is in network order + */ +uint16 ipv4_tcp_hdr_cksum(uint8 *ip, uint8 *tcp, uint16 tcp_len); + +/* calculate IPv6 TCP header checksum + * - input ipv6 and tcp points to IPv6 and TCP header in network order + * - output cksum is in network order + */ +uint16 ipv6_tcp_hdr_cksum(uint8 *ipv6, uint8 *tcp, uint16 tcp_len); + #ifdef __cplusplus } #endif @@ -1289,16 +1334,22 @@ typedef struct _counter_tbl_t { void counter_printlog(counter_tbl_t *ctr_tbl); #endif /* DEBUG_COUNTER */ -/* Given a number 'n' returns 'm' that is next larger power of 2 after n */ -static INLINE uint32 next_larger_power2(uint32 num) -{ - num--; - num |= (num >> 1); - num |= (num >> 2); - num |= (num >> 4); - num |= (num >> 8); - num |= (num >> 16); - return (num + 1); -} +#if defined(__GNUC__) +#define CALL_SITE __builtin_return_address(0) +#else +#define CALL_SITE ((void*) 0) +#endif +#ifdef SHOW_LOGTRACE +#define TRACE_LOG_BUF_MAX_SIZE 1500 +#define BUF_NOT_AVAILABLE 0 +#define NEXT_BUF_NOT_AVAIL 1 +#define NEXT_BUF_AVAIL 2 + +typedef struct trace_buf_info { + int availability; + int size; + char buf[TRACE_LOG_BUF_MAX_SIZE]; +} trace_buf_info_t; +#endif /* SHOW_LOGTRACE */ #endif /* _bcmutils_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/brcm_nl80211.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/brcm_nl80211.h index 888863117105..84bfddabffdd 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/brcm_nl80211.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/brcm_nl80211.h @@ -1,7 +1,7 @@ /* * Definitions for nl80211 vendor command/event access to host driver * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: brcm_nl80211.h 556083 2015-05-12 14:03:00Z $ + * $Id: brcm_nl80211.h 601873 2015-11-24 11:04:28Z $ * */ @@ -32,6 +32,7 @@ #define _brcm_nl80211_h_ #define OUI_BRCM 0x001018 +#define OUI_GOOGLE 0x001A11 enum wl_vendor_subcmd { BRCM_VENDOR_SCMD_UNSPEC, diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dbus.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dbus.h index b066c67a5dad..c926ba77e673 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dbus.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dbus.h @@ -2,7 +2,7 @@ * Dongle BUS interface Abstraction layer * target serial buses like USB, SDIO, SPI, etc. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: dbus.h 553311 2015-04-29 10:23:08Z $ + * $Id: dbus.h 596371 2015-10-30 22:43:47Z $ */ #ifndef __DBUS_H__ @@ -150,12 +150,14 @@ typedef struct { */ enum { DBUS_CONFIG_ID_RXCTL_DEFERRES = 1, - DBUS_CONFIG_ID_AGGR_LIMIT + DBUS_CONFIG_ID_AGGR_LIMIT, + DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET }; typedef struct { uint32 config_id; union { + uint32 general_param; bool rxctl_deferrespok; struct { int maxrxsf; diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dhd_daemon.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dhd_daemon.h new file mode 100644 index 000000000000..3a5141ab2094 --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dhd_daemon.h @@ -0,0 +1,62 @@ +/* + * Header file for DHD daemon to handle timeouts + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_daemon.h 671464 2016-11-22 06:15:32Z $ + */ + +#ifndef __BCM_DHDD_H__ +#define __BCM_DHDD_H__ + +/** + * To maintain compatabily when dhd driver and dhd daemon is taken from different branches, + * make sure to keep this file same across dhd driver branch and dhd apps branch. + * TODO: Make this file as shared between apps and dhd.ko + */ + +#define BCM_TO_MAGIC 0x600DB055 +#define NO_TRAP 0 +#define DO_TRAP 1 + +#define BCM_NL_USER 31 + +typedef enum notify_dhd_daemon_reason { + REASON_COMMAND_TO, + REASON_OQS_TO, + REASON_SCAN_TO, + REASON_JOIN_TO, + REASON_DAEMON_STARTED, + REASON_DEVICE_TX_STUCK_WARNING, + REASON_DEVICE_TX_STUCK, + REASON_UNKOWN +} notify_dhd_daemon_reason_t; + +typedef struct bcm_to_info { + int magic; + int reason; + int trap; +} bcm_to_info_t; + +#endif /* __BCM_DHDD_H__ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dhdioctl.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dhdioctl.h index 342d39c076a3..8a139dcc1c1b 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dhdioctl.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dhdioctl.h @@ -5,7 +5,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -28,7 +28,7 @@ * * <> * - * $Id: dhdioctl.h 585723 2015-09-11 06:26:37Z $ + * $Id: dhdioctl.h 675190 2016-12-14 15:27:52Z $ */ #ifndef _dhdioctl_h_ @@ -37,20 +37,15 @@ #include -/* require default structure packing */ -#define BWL_DEFAULT_PACKING -#include - - /* Linux network driver ioctl encoding */ typedef struct dhd_ioctl { - uint cmd; /* common ioctl definition */ + uint32 cmd; /* common ioctl definition */ void *buf; /* pointer to user buffer */ - uint len; /* length of user buffer */ - bool set; /* get or set request (optional) */ - uint used; /* bytes read or written (optional) */ - uint needed; /* bytes needed (optional) */ - uint driver; /* to identify target driver */ + uint32 len; /* length of user buffer */ + uint32 set; /* get or set request boolean (optional) */ + uint32 used; /* bytes read or written (optional) */ + uint32 needed; /* bytes needed (optional) */ + uint32 driver; /* to identify target driver */ } dhd_ioctl_t; /* Underlying BUS definition */ @@ -60,13 +55,23 @@ enum { BUS_TYPE_PCIE /* for PCIE dongles */ }; + /* per-driver magic numbers */ #define DHD_IOCTL_MAGIC 0x00444944 /* bump this number if you change the ioctl interface */ #define DHD_IOCTL_VERSION 1 -#define DHD_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */ +/* + * Increase the DHD_IOCTL_MAXLEN to 16K for supporting download of NVRAM files of size + * > 8K. In the existing implementation when NVRAM is to be downloaded via the "vars" + * DHD IOVAR, the NVRAM is copied to the DHD Driver memory. Later on when "dwnldstate" is + * invoked with FALSE option, the NVRAM gets copied from the DHD driver to the Dongle + * memory. The simple way to support this feature without modifying the DHD application, + * driver logic is to increase the DHD_IOCTL_MAXLEN size. This macro defines the "size" + * of the buffer in which data is exchanged between the DHD App and DHD driver. + */ +#define DHD_IOCTL_MAXLEN (16384) /* max length ioctl buffer required */ #define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */ /* common ioctl definitions */ @@ -94,10 +99,16 @@ enum { #define DHD_REORDER_VAL 0x8000 #define DHD_NOCHECKDIED_VAL 0x20000 /* UTF WAR */ #define DHD_PNO_VAL 0x80000 -#define DHD_MSGTRACE_VAL 0x100000 +#define DHD_RTT_VAL 0x100000 +#define DHD_MSGTRACE_VAL 0x200000 #define DHD_FWLOG_VAL 0x400000 -#define DHD_RTT_VAL 0x200000 -#define DHD_IOV_INFO_VAL 0x800000 +#define DHD_DBGIF_VAL 0x800000 +#ifdef DHD_PCIE_NATIVE_RUNTIMEPM +#define DHD_RPM_VAL 0x1000000 +#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ +#define DHD_PKT_MON_VAL 0x2000000 +#define DHD_PKT_MON_DUMP_VAL 0x4000000 +#define DHD_ERROR_MEM_VAL 0x8000000 #define DHD_ANDROID_VAL 0x10000 #define DHD_IW_VAL 0x20000 #define DHD_CFG_VAL 0x40000 @@ -106,18 +117,18 @@ enum { #ifdef SDTEST /* For pktgen iovar */ typedef struct dhd_pktgen { - uint version; /* To allow structure change tracking */ - uint freq; /* Max ticks between tx/rx attempts */ - uint count; /* Test packets to send/rcv each attempt */ - uint print; /* Print counts every attempts */ - uint total; /* Total packets (or bursts) */ - uint minlen; /* Minimum length of packets to send */ - uint maxlen; /* Maximum length of packets to send */ - uint numsent; /* Count of test packets sent */ - uint numrcvd; /* Count of test packets received */ - uint numfail; /* Count of test send failures */ - uint mode; /* Test mode (type of test packets) */ - uint stop; /* Stop after this many tx failures */ + uint32 version; /* To allow structure change tracking */ + uint32 freq; /* Max ticks between tx/rx attempts */ + uint32 count; /* Test packets to send/rcv each attempt */ + uint32 print; /* Print counts every attempts */ + uint32 total; /* Total packets (or bursts) */ + uint32 minlen; /* Minimum length of packets to send */ + uint32 maxlen; /* Maximum length of packets to send */ + uint32 numsent; /* Count of test packets sent */ + uint32 numrcvd; /* Count of test packets received */ + uint32 numfail; /* Count of test send failures */ + uint32 mode; /* Test mode (type of test packets) */ + uint32 stop; /* Stop after this many tx failures */ } dhd_pktgen_t; /* Version in case structure changes */ @@ -138,7 +149,5 @@ typedef struct dhd_pktgen { #define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */ -/* require default structure packing */ -#include #endif /* _dhdioctl_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dnglevent.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dnglevent.h new file mode 100644 index 000000000000..40a0047a6781 --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/dnglevent.h @@ -0,0 +1,120 @@ +/* + * Broadcom Event protocol definitions + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * Dependencies: bcmeth.h + * + * $Id: dnglevent.h $ + * + * <> + * + * ----------------------------------------------------------------------------- + * + */ + +/* + * Broadcom dngl Ethernet Events protocol defines + * + */ + +#ifndef _DNGLEVENT_H_ +#define _DNGLEVENT_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif +#include +#include + +/* This marks the start of a packed structure section. */ +#include +#define BCM_DNGL_EVENT_MSG_VERSION 1 +#define DNGL_E_RSRVD_1 0x0 +#define DNGL_E_RSRVD_2 0x1 +#define DNGL_E_SOCRAM_IND 0x2 +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; /* Current version is 1 */ + uint16 reserved; /* reserved for any future extension */ + uint16 event_type; /* DNGL_E_SOCRAM_IND */ + uint16 datalen; /* Length of the event payload */ +} BWL_POST_PACKED_STRUCT bcm_dngl_event_msg_t; + +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_event { + struct ether_header eth; + bcmeth_hdr_t bcm_hdr; + bcm_dngl_event_msg_t dngl_event; + /* data portion follows */ +} BWL_POST_PACKED_STRUCT bcm_dngl_event_t; + +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_socramind { + uint16 tag; /* data tag */ + uint16 length; /* data length */ + uint8 value[1]; /* data value with variable length specified by length */ +} BWL_POST_PACKED_STRUCT bcm_dngl_socramind_t; + +/* SOCRAM_IND type tags */ +#define SOCRAM_IND_ASSERT_TAG 0x1 +#define SOCRAM_IND_TAG_HEALTH_CHECK 0x2 +/* Health check top level module tags */ +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_healthcheck { + uint16 top_module_tag; /* top level module tag */ + uint16 top_module_len; /* Type of PCIE issue indication */ + uint8 value[1]; /* data value with variable length specified by length */ +} BWL_POST_PACKED_STRUCT bcm_dngl_healthcheck_t; + +/* Health check top level module tags */ +#define HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE 1 +#define HEALTH_CHECK_PCIEDEV_VERSION_1 1 +#define HEALTH_CHECK_PCIEDEV_FLAG_IN_D3_SHIFT 0 +#define HEALTH_CHECK_PCIEDEV_FLAG_AER_SHIFT 1 +#define HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN_SHIFT 2 +#define HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT_SHIFT 3 +#define HEALTH_CHECK_PCIEDEV_FLAG_NODS_SHIFT 4 +#define HEALTH_CHECK_PCIEDEV_FLAG_IN_D3 1 << HEALTH_CHECK_PCIEDEV_FLAG_IN_D3_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_AER 1 << HEALTH_CHECK_PCIEDEV_FLAG_AER_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN 1 << HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT 1 << HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT_SHIFT +#define HEALTH_CHECK_PCIEDEV_FLAG_NODS 1 << HEALTH_CHECK_PCIEDEV_FLAG_NODS_SHIFT +/* PCIE Module TAGs */ +#define HEALTH_CHECK_PCIEDEV_INDUCED_IND 0x1 +#define HEALTH_CHECK_PCIEDEV_H2D_DMA_IND 0x2 +#define HEALTH_CHECK_PCIEDEV_D2H_DMA_IND 0x3 +#define HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND 0x4 +#define HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND 0x5 +#define HEALTH_CHECK_PCIEDEV_NODS_IND 0x6 +#define HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND 0x7 + +#define HC_PCIEDEV_CONFIG_REGLIST_MAX 20 +typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_pcie_hc { + uint16 version; /* HEALTH_CHECK_PCIEDEV_VERSION_1 */ + uint16 reserved; + uint16 pcie_err_ind_type; /* PCIE Module TAGs */ + uint16 pcie_flag; + uint32 pcie_control_reg; + uint32 pcie_config_regs[HC_PCIEDEV_CONFIG_REGLIST_MAX]; +} BWL_POST_PACKED_STRUCT bcm_dngl_pcie_hc_t; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _DNGLEVENT_H_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/eapol.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/eapol.h old mode 100755 new mode 100644 similarity index 95% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/eapol.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/eapol.h index be4ef5358fa5..ef917abc06ea --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/eapol.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/eapol.h @@ -5,7 +5,7 @@ * IEEE Std 802.1X-2001 * IEEE 802.1X RADIUS Usage Guidelines * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,11 +24,11 @@ * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. - * + * * * <> * - * $Id: eapol.h 518342 2014-12-01 23:21:41Z $ + * $Id: eapol.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _eapol_h_ @@ -41,7 +41,9 @@ /* This marks the start of a packed structure section. */ #include +#if !defined(BCMCRYPTO_COMPONENT) #include +#endif /* !BCMCRYPTO_COMPONENT */ /* EAPOL for 802.3/Ethernet */ typedef BWL_PRE_PACKED_STRUCT struct { @@ -107,13 +109,14 @@ typedef BWL_PRE_PACKED_STRUCT struct { #define EAPOL_KEY_INDEX_MASK 0x7f /* WPA/802.11i/WPA2 EAPOL-Key header field sizes */ +#define EAPOL_AKW_BLOCK_LEN 8 #define EAPOL_WPA_KEY_REPLAY_LEN 8 #define EAPOL_WPA_KEY_NONCE_LEN 32 #define EAPOL_WPA_KEY_IV_LEN 16 #define EAPOL_WPA_KEY_RSC_LEN 8 #define EAPOL_WPA_KEY_ID_LEN 8 #define EAPOL_WPA_KEY_MIC_LEN 16 -#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN) +#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + EAPOL_AKW_BLOCK_LEN) #define EAPOL_WPA_MAX_KEY_SIZE 32 /* WPA EAPOL-Key */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/epivers.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/epivers.h index dfa3aff501c0..4cf4c70348c8 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/epivers.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/epivers.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -22,7 +22,7 @@ * * <> * - * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $ + * $Id: epivers.h.in 596126 2015-10-29 19:53:48Z $ * */ @@ -31,21 +31,21 @@ #define EPI_MAJOR_VERSION 1 -#define EPI_MINOR_VERSION 363 +#define EPI_MINOR_VERSION 579 -#define EPI_RC_NUMBER 59 +#define EPI_RC_NUMBER 77 -#define EPI_INCREMENTAL_NUMBER 144 +#define EPI_INCREMENTAL_NUMBER 41 #define EPI_BUILD_NUMBER 0 -#define EPI_VERSION 1, 363, 59, 144 +#define EPI_VERSION 1, 579, 77, 41 -#define EPI_VERSION_NUM 0x0116b3b9 +#define EPI_VERSION_NUM 0x012434d29 -#define EPI_VERSION_DEV 1.363.59 +#define EPI_VERSION_DEV 1.579.77.41 /* Driver Version String, ASCII, 32 chars max */ -#define EPI_VERSION_STR "1.363.59.144.10 (r)" +#define EPI_VERSION_STR "1.579.77.41.2 (r)" #endif /* _epivers_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/ethernet.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/ethernet.h old mode 100755 new mode 100644 similarity index 98% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/ethernet.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/ethernet.h index 022fee41a196..2e338676a9eb --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/ethernet.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/ethernet.h @@ -1,7 +1,7 @@ /* * From FreeBSD 2.2.7: Fundamental constants relating to ethernet. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: ethernet.h 518342 2014-12-01 23:21:41Z $ + * $Id: ethernet.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _NET_ETHERNET_H_ /* use native BSD ethernet.h when available */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log.h index d06d811cb925..0c2120e19be3 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log.h @@ -1,7 +1,7 @@ /* * EVENT_LOG system definitions * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,15 +24,17 @@ * * <> * - * $Id: event_log.h 591285 2015-10-07 11:56:29Z $ + * $Id: event_log.h 711908 2017-07-20 10:37:34Z $ */ #ifndef _EVENT_LOG_H_ #define _EVENT_LOG_H_ #include -#include -#include +#include +#include +#include +#include /* logstrs header */ #define LOGSTRS_MAGIC 0x4C4F4753 @@ -46,6 +48,8 @@ #define EVENT_LOG_PSM_BLOCK_SIZE 0x200 #define EVENT_LOG_BUS_BLOCK_SIZE 0x200 #define EVENT_LOG_ERROR_BLOCK_SIZE 0x200 +/* Maximum event log record payload size = 1024 bytes or 256 words. */ +#define EVENT_LOG_MAX_RECORD_PAYLOAD_SIZE 256 /* * There are multiple levels of objects define here: @@ -90,9 +94,18 @@ typedef struct event_log_block { /* Start of packet sent for log tracing */ uint16 pktlen; /* Size of rest of block */ uint16 count; /* Logtrace counter */ - uint32 timestamp; /* Timestamp at start of use */ + uint32 extra_hdr_info; /* LSB: 6 bits set id. MSB 24 bits reserved */ uint32 event_logs; } event_log_block_t; +#define EVENT_LOG_BLOCK_HDRLEN 8 /* pktlen 2 + count 2 + extra_hdr_info 4 */ +#define NAN_EVENT_LOG_MIN_LENGTH 2 /* Minimum length of Nan event */ + +typedef enum { + SET_DESTINATION_INVALID = -1, + SET_DESTINATION_HOST = 0, + SET_DESTINATION_NONE = 1, + SET_DESTINATION_MAX +} event_log_set_destination_t; /* There can be multiple event_sets with each logging a set of * associated events (i.e, "fast" and "slow" events). @@ -108,6 +121,8 @@ typedef struct event_log_set { uint16 blockfill_count; /* Fill count for logtrace */ uint32 timestamp; /* Last timestamp event */ uint32 cyclecount; /* Cycles at last timestamp event */ + event_log_set_destination_t destination; + uint16 size; /* same size for all buffers in one set */ } event_log_set_t; /* Top data structure for access to everything else */ @@ -165,7 +180,7 @@ typedef struct { * */ -#ifndef EVENT_LOG_DUMPER +#if !defined(EVENT_LOG_DUMPER) && !defined(DHD_EFI) #ifndef EVENT_LOG_COMPILE @@ -182,8 +197,11 @@ typedef struct { #define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) #define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) +#define EVENT_LOG_IS_ON(tag) 0 #define EVENT_LOG_IS_LOG_ON(tag) 0 +#define EVENT_LOG_BUFFER(tag, buf, size) + #else /* EVENT_LOG_COMPILE */ /* The first few are special because they can be done more efficiently @@ -268,7 +286,7 @@ typedef struct { do { \ if (event_log_tag_sets != NULL) { \ uint8 tag_flag = *(event_log_tag_sets + tag); \ - if (tag_flag != 0) { \ + if ((tag_flag & ~EVENT_LOG_TAG_FLAG_SET_MASK) != 0) { \ _EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__); \ } \ } \ @@ -284,7 +302,7 @@ typedef struct { do { \ if (event_log_tag_sets != NULL) { \ uint8 tag_flag = *(event_log_tag_sets + tag); \ - if (tag_flag != 0) { \ + if ((tag_flag & ~EVENT_LOG_TAG_FLAG_SET_MASK) != 0) { \ _EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__); \ } \ } \ @@ -312,6 +330,18 @@ typedef struct { #define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) \ EVENT_LOG_COMPACT_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) +/* Minimal event logging. Event log internally calls event_logx() + * log return address in caller. + * Note that the if(0){..} below is to avoid compiler warnings + * due to unused variables caused by this macro + */ +#define EVENT_LOG_RA(tag, args) \ + do { \ + if (0) { \ + EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, args); \ + } \ + event_log_caller_return_address(tag); \ + } while (0) #define EVENT_LOG_IS_LOG_ON(tag) (*(event_log_tag_sets + (tag)) & EVENT_LOG_TAG_FLAG_LOG) @@ -319,16 +349,22 @@ typedef struct { extern uint8 *event_log_tag_sets; -#include +extern int event_log_init(osl_t *osh); +extern int event_log_set_init(osl_t *osh, int set_num, int size); +extern int event_log_set_expand(osl_t *osh, int set_num, int size); +extern int event_log_set_shrink(osl_t *osh, int set_num, int size); -extern int event_log_init(si_t *sih); -extern int event_log_set_init(si_t *sih, int set_num, int size); -extern int event_log_set_expand(si_t *sih, int set_num, int size); -extern int event_log_set_shrink(si_t *sih, int set_num, int size); extern int event_log_tag_start(int tag, int set_num, int flags); extern int event_log_tag_stop(int tag); + +typedef void (*event_log_logtrace_trigger_fn_t)(void *ctx); +void event_log_set_logtrace_trigger_fn(event_log_logtrace_trigger_fn_t fn, void *ctx); + +event_log_top_t *event_log_get_top(void); + extern int event_log_get(int set_num, int buflen, void *buf); -extern uint8 * event_log_next_logtrace(int set_num); + +extern uint8 *event_log_next_logtrace(int set_num); extern void event_log0(int tag, int fmtNum); extern void event_log1(int tag, int fmtNum, uint32 t1); @@ -339,6 +375,9 @@ extern void event_logn(int num_args, int tag, int fmtNum, ...); extern void event_log_time_sync(uint32 ms); extern void event_log_buffer(int tag, uint8 *buf, int size); +extern void event_log_caller_return_address(int tag); +extern int event_log_set_destination_set(int set, event_log_set_destination_t dest); +extern event_log_set_destination_t event_log_set_destination_get(int set); #endif /* EVENT_LOG_DUMPER */ @@ -346,4 +385,4 @@ extern void event_log_buffer(int tag, uint8 *buf, int size); #endif /* __ASSEMBLER__ */ -#endif /* _EVENT_LOG_H */ +#endif /* _EVENT_LOG_H_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log_payload.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log_payload.h new file mode 100644 index 000000000000..d01264c5b8f2 --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log_payload.h @@ -0,0 +1,673 @@ +/* + * EVENT_LOG System Definitions + * + * This file describes the payloads of event log entries that are data buffers + * rather than formatted string entries. The contents are generally XTLVs. + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log_payload.h 700076 2017-05-17 14:42:22Z $ + */ + +#ifndef _EVENT_LOG_PAYLOAD_H_ +#define _EVENT_LOG_PAYLOAD_H_ + +#include +#include +#include +#include + +#define EVENT_LOG_XTLV_ID_STR 0 /**< XTLV ID for a string */ +#define EVENT_LOG_XTLV_ID_TXQ_SUM 1 /**< XTLV ID for txq_summary_t */ +#define EVENT_LOG_XTLV_ID_SCBDATA_SUM 2 /**< XTLV ID for cb_subq_summary_t */ +#define EVENT_LOG_XTLV_ID_SCBDATA_AMPDU_TX_SUM 3 /**< XTLV ID for scb_ampdu_tx_summary_t */ +#define EVENT_LOG_XTLV_ID_BSSCFGDATA_SUM 4 /**< XTLV ID for bsscfg_q_summary_t */ +#define EVENT_LOG_XTLV_ID_UCTXSTATUS 5 /**< XTLV ID for ucode TxStatus array */ +#define EVENT_LOG_XTLV_ID_TXQ_SUM_V2 6 /**< XTLV ID for txq_summary_v2_t */ + +/** + * An XTLV holding a string + * String is not null terminated, length is the XTLV len. + */ +typedef struct xtlv_string { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_STR */ + uint16 len; /* XTLV Len (String length) */ + char str[1]; /* var len array characters */ +} xtlv_string_t; + +#define XTLV_STRING_FULL_LEN(str_len) (BCM_XTLV_HDR_SIZE + (str_len) * sizeof(char)) + +/** + * Summary for a single TxQ context + * Two of these will be used per TxQ context---one for the high TxQ, and one for + * the low txq that contains DMA prepared pkts. The high TxQ is a full multi-precidence + * queue and also has a BSSCFG map to identify the BSSCFGS associated with the queue context. + * The low txq counterpart does not populate the BSSCFG map. + * The excursion queue will have no bsscfgs associated and is the first queue dumped. + */ +typedef struct txq_summary { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_TXQ_SUM */ + uint16 len; /* XTLV Len */ + uint32 bsscfg_map; /* bitmap of bsscfg indexes associated with this queue */ + uint32 stopped; /* flow control bitmap */ + uint8 prec_count; /* count of precedences/fifos and len of following array */ + uint8 pad; + uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */ +} txq_summary_t; + +#define TXQ_SUMMARY_LEN (OFFSETOF(txq_summary_t, plen)) +#define TXQ_SUMMARY_FULL_LEN(num_q) (TXQ_SUMMARY_LEN + (num_q) * sizeof(uint16)) + +typedef struct txq_summary_v2 { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_TXQ_SUM_V2 */ + uint16 len; /* XTLV Len */ + uint32 bsscfg_map; /* bitmap of bsscfg indexes associated with this queue */ + uint32 stopped; /* flow control bitmap */ + uint32 hw_stopped; /* flow control bitmap */ + uint8 prec_count; /* count of precedences/fifos and len of following array */ + uint8 pad; + uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */ +} txq_summary_v2_t; + +#define TXQ_SUMMARY_V2_LEN (OFFSETOF(txq_summary_v2_t, plen)) +#define TXQ_SUMMARY_V2_FULL_LEN(num_q) (TXQ_SUMMARY_V2_LEN + (num_q) * sizeof(uint16)) + +/** + * Summary for tx datapath of an SCB cubby + * This is a generic summary structure (one size fits all) with + * a cubby ID and sub-ID to differentiate SCB cubby types and possible sub-queues. + */ +typedef struct scb_subq_summary { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_SCBDATA_SUM */ + uint16 len; /* XTLV Len */ + uint32 flags; /* cubby specficic flags */ + uint8 cubby_id; /* ID registered for cubby */ + uint8 sub_id; /* sub ID if a cubby has more than one queue */ + uint8 prec_count; /* count of precedences/fifos and len of following array */ + uint8 pad; + uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */ +} scb_subq_summary_t; + +#define SCB_SUBQ_SUMMARY_LEN (OFFSETOF(scb_subq_summary_t, plen)) +#define SCB_SUBQ_SUMMARY_FULL_LEN(num_q) (SCB_SUBQ_SUMMARY_LEN + (num_q) * sizeof(uint16)) + +/* scb_subq_summary_t.flags for APPS */ +#define SCBDATA_APPS_F_PS 0x00000001 +#define SCBDATA_APPS_F_PSPEND 0x00000002 +#define SCBDATA_APPS_F_INPVB 0x00000004 +#define SCBDATA_APPS_F_APSD_USP 0x00000008 +#define SCBDATA_APPS_F_TXBLOCK 0x00000010 +#define SCBDATA_APPS_F_APSD_HPKT_TMR 0x00000020 +#define SCBDATA_APPS_F_APSD_TX_PEND 0x00000040 +#define SCBDATA_APPS_F_INTRANS 0x00000080 +#define SCBDATA_APPS_F_OFF_PEND 0x00000100 +#define SCBDATA_APPS_F_OFF_BLOCKED 0x00000200 +#define SCBDATA_APPS_F_OFF_IN_PROG 0x00000400 + + +/** + * Summary for tx datapath AMPDU SCB cubby + * This is a specific data structure to describe the AMPDU datapath state for an SCB + * used instead of scb_subq_summary_t. + * Info is for one TID, so one will be dumped per BA TID active for an SCB. + */ +typedef struct scb_ampdu_tx_summary { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_SCBDATA_AMPDU_TX_SUM */ + uint16 len; /* XTLV Len */ + uint32 flags; /* misc flags */ + uint8 tid; /* initiator TID (priority) */ + uint8 ba_state; /* internal BA state */ + uint8 bar_cnt; /* number of bars sent with no progress */ + uint8 retry_bar; /* reason code if bar to be retried at watchdog */ + uint16 barpending_seq; /* seqnum for bar */ + uint16 bar_ackpending_seq; /* seqnum of bar for which ack is pending */ + uint16 start_seq; /* seqnum of the first unacknowledged packet */ + uint16 max_seq; /* max unacknowledged seqnum sent */ + uint32 released_bytes_inflight; /* Number of bytes pending in bytes */ + uint32 released_bytes_target; +} scb_ampdu_tx_summary_t; + +/* scb_ampdu_tx_summary.flags defs */ +#define SCBDATA_AMPDU_TX_F_BAR_ACKPEND 0x00000001 /* bar_ackpending */ + +/** XTLV stuct to summarize a BSSCFG's packet queue */ +typedef struct bsscfg_q_summary { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_BSSCFGDATA_SUM */ + uint16 len; /* XTLV Len */ + struct ether_addr BSSID; /* BSSID */ + uint8 bsscfg_idx; /* bsscfg index */ + uint8 type; /* bsscfg type enumeration: BSSCFG_TYPE_XXX */ + uint8 subtype; /* bsscfg subtype enumeration: BSSCFG_SUBTYPE_XXX */ + uint8 prec_count; /* count of precedences/fifos and len of following array */ + uint16 plen[1]; /* var len array of lengths of each prec/fifo in the queue */ +} bsscfg_q_summary_t; + +#define BSSCFG_Q_SUMMARY_LEN (OFFSETOF(bsscfg_q_summary_t, plen)) +#define BSSCFG_Q_SUMMARY_FULL_LEN(num_q) (BSSCFG_Q_SUMMARY_LEN + (num_q) * sizeof(uint16)) + +/** + * An XTLV holding a TxStats array + * TxStatus entries are 8 or 16 bytes, size in words (2 or 4) givent in + * entry_size field. + * Array is uint32 words + */ +typedef struct xtlv_uc_txs { + uint16 id; /* XTLV ID: EVENT_LOG_XTLV_ID_UCTXSTATUS */ + uint16 len; /* XTLV Len */ + uint8 entry_size; /* num uint32 words per entry */ + uint8 pad[3]; /* reserved, zero */ + uint32 w[1]; /* var len array of words */ +} xtlv_uc_txs_t; + +#define XTLV_UCTXSTATUS_LEN (OFFSETOF(xtlv_uc_txs_t, w)) +#define XTLV_UCTXSTATUS_FULL_LEN(words) (XTLV_UCTXSTATUS_LEN + (words) * sizeof(uint32)) + +#define SCAN_SUMMARY_VERSION 1 +/* Scan flags */ +#define SCAN_SUM_CHAN_INFO 0x1 +/* Scan_sum flags */ +#define BAND5G_SIB_ENAB 0x2 +#define BAND2G_SIB_ENAB 0x4 +#define PARALLEL_SCAN 0x8 +#define SCAN_ABORT 0x10 + +/* scan_channel_info flags */ +#define ACTIVE_SCAN_SCN_SUM 0x2 +#define SCAN_SUM_WLC_CORE0 0x4 +#define SCAN_SUM_WLC_CORE1 0x8 +#define HOME_CHAN 0x10 + +typedef struct wl_scan_ssid_info +{ + uint8 ssid_len; /* the length of SSID */ + uint8 ssid[32]; /* SSID string */ +} wl_scan_ssid_info_t; + +typedef struct wl_scan_channel_info { + uint16 chanspec; /* chanspec scanned */ + uint16 reserv; + uint32 start_time; /* Scan start time in + * milliseconds for the chanspec + * or home_dwell time start + */ + uint32 end_time; /* Scan end time in + * milliseconds for the chanspec + * or home_dwell time end + */ + uint16 probe_count; /* No of probes sent out. For future use + */ + uint16 scn_res_count; /* Count of scan_results found per + * channel. For future use + */ +} wl_scan_channel_info_t; + +typedef struct wl_scan_summary_info { + uint32 total_chan_num; /* Total number of channels scanned */ + uint32 scan_start_time; /* Scan start time in milliseconds */ + uint32 scan_end_time; /* Scan end time in milliseconds */ + wl_scan_ssid_info_t ssid[1]; /* SSID being scanned in current + * channel. For future use + */ +} wl_scan_summary_info_t; + +struct wl_scan_summary { + uint8 version; /* Version */ + uint8 reserved; + uint16 len; /* Length of the data buffer including SSID + * list. + */ + uint16 sync_id; /* Scan Sync ID */ + uint16 scan_flags; /* flags [0] or SCAN_SUM_CHAN_INFO = */ + /* channel_info, if not set */ + /* it is scan_summary_info */ + /* when channel_info is used, */ + /* the following flag bits are overridden: */ + /* flags[1] or ACTIVE_SCAN_SCN_SUM = active channel if set */ + /* passive if not set */ + /* flags[2] or WLC_CORE0 = if set, represents wlc_core0 */ + /* flags[3] or WLC_CORE1 = if set, represents wlc_core1 */ + /* flags[4] or HOME_CHAN = if set, represents home-channel */ + /* flags[5:15] = reserved */ + /* when scan_summary_info is used, */ + /* the following flag bits are used: */ + /* flags[1] or BAND5G_SIB_ENAB = */ + /* allowSIBParallelPassiveScan on 5G band */ + /* flags[2] or BAND2G_SIB_ENAB = */ + /* allowSIBParallelPassiveScan on 2G band */ + /* flags[3] or PARALLEL_SCAN = Parallel scan enabled or not */ + /* flags[4] or SCAN_ABORT = SCAN_ABORTED scenario */ + /* flags[5:15] = reserved */ + union { + wl_scan_channel_info_t scan_chan_info; /* scan related information + * for each channel scanned + */ + wl_scan_summary_info_t scan_sum_info; /* Cumulative scan related + * information. + */ + } u; +}; + +/* Channel switch log record structure + * Host may map the following structure on channel switch event log record + * received from dongle. Note that all payload entries in event log record are + * uint32/int32. + */ +typedef struct wl_chansw_event_log_record { + uint32 time; /* Time in us */ + uint32 old_chanspec; /* Old channel spec */ + uint32 new_chanspec; /* New channel spec */ + uint32 chansw_reason; /* Reason for channel change */ + int32 dwell_time; +} wl_chansw_event_log_record_t; + +/* Sub-block type for EVENT_LOG_TAG_AMPDU_DUMP */ +#define WL_AMPDU_STATS_TYPE_RXMCSx1 0 /* RX MCS rate (Nss = 1) */ +#define WL_AMPDU_STATS_TYPE_RXMCSx2 1 +#define WL_AMPDU_STATS_TYPE_RXMCSx3 2 +#define WL_AMPDU_STATS_TYPE_RXMCSx4 3 +#define WL_AMPDU_STATS_TYPE_RXVHTx1 4 /* RX VHT rate (Nss = 1) */ +#define WL_AMPDU_STATS_TYPE_RXVHTx2 5 +#define WL_AMPDU_STATS_TYPE_RXVHTx3 6 +#define WL_AMPDU_STATS_TYPE_RXVHTx4 7 +#define WL_AMPDU_STATS_TYPE_TXMCSx1 8 /* TX MCS rate (Nss = 1) */ +#define WL_AMPDU_STATS_TYPE_TXMCSx2 9 +#define WL_AMPDU_STATS_TYPE_TXMCSx3 10 +#define WL_AMPDU_STATS_TYPE_TXMCSx4 11 +#define WL_AMPDU_STATS_TYPE_TXVHTx1 12 /* TX VHT rate (Nss = 1) */ +#define WL_AMPDU_STATS_TYPE_TXVHTx2 13 +#define WL_AMPDU_STATS_TYPE_TXVHTx3 14 +#define WL_AMPDU_STATS_TYPE_TXVHTx4 15 +#define WL_AMPDU_STATS_TYPE_RXMCSSGI 16 /* RX SGI usage (for all MCS rates) */ +#define WL_AMPDU_STATS_TYPE_TXMCSSGI 17 /* TX SGI usage (for all MCS rates) */ +#define WL_AMPDU_STATS_TYPE_RXVHTSGI 18 /* RX SGI usage (for all VHT rates) */ +#define WL_AMPDU_STATS_TYPE_TXVHTSGI 19 /* TX SGI usage (for all VHT rates) */ +#define WL_AMPDU_STATS_TYPE_RXMCSPER 20 /* RX PER (for all MCS rates) */ +#define WL_AMPDU_STATS_TYPE_TXMCSPER 21 /* TX PER (for all MCS rates) */ +#define WL_AMPDU_STATS_TYPE_RXVHTPER 22 /* RX PER (for all VHT rates) */ +#define WL_AMPDU_STATS_TYPE_TXVHTPER 23 /* TX PER (for all VHT rates) */ +#define WL_AMPDU_STATS_TYPE_RXDENS 24 /* RX AMPDU density */ +#define WL_AMPDU_STATS_TYPE_TXDENS 25 /* TX AMPDU density */ +#define WL_AMPDU_STATS_TYPE_RXMCSOK 26 /* RX all MCS rates */ +#define WL_AMPDU_STATS_TYPE_RXVHTOK 27 /* RX all VHT rates */ +#define WL_AMPDU_STATS_TYPE_TXMCSALL 28 /* TX all MCS rates */ +#define WL_AMPDU_STATS_TYPE_TXVHTALL 29 /* TX all VHT rates */ +#define WL_AMPDU_STATS_TYPE_TXMCSOK 30 /* TX all MCS rates */ +#define WL_AMPDU_STATS_TYPE_TXVHTOK 31 /* TX all VHT rates */ + +#define WL_AMPDU_STATS_MAX_CNTS 64 + +typedef struct { + uint16 type; /* AMPDU statistics sub-type */ + uint16 len; /* Number of 32-bit counters */ + uint32 counters[WL_AMPDU_STATS_MAX_CNTS]; +} wl_ampdu_stats_generic_t; + +typedef struct { + uint16 type; /* AMPDU statistics sub-type */ + uint16 len; /* Number of 32-bit counters + 2 */ + uint32 total_ampdu; + uint32 total_mpdu; + uint32 aggr_dist[WL_AMPDU_STATS_MAX_CNTS + 1]; +} wl_ampdu_stats_aggrsz_t; + +/* Sub-block type for EVENT_LOG_TAG_MSCHPROFILE */ +#define WL_MSCH_PROFILER_START 0 /* start event check */ +#define WL_MSCH_PROFILER_EXIT 1 /* exit event check */ +#define WL_MSCH_PROFILER_REQ 2 /* request event */ +#define WL_MSCH_PROFILER_CALLBACK 3 /* call back event */ +#define WL_MSCH_PROFILER_MESSAGE 4 /* message event */ +#define WL_MSCH_PROFILER_PROFILE_START 5 +#define WL_MSCH_PROFILER_PROFILE_END 6 +#define WL_MSCH_PROFILER_REQ_HANDLE 7 +#define WL_MSCH_PROFILER_REQ_ENTITY 8 +#define WL_MSCH_PROFILER_CHAN_CTXT 9 +#define WL_MSCH_PROFILER_EVENT_LOG 10 +#define WL_MSCH_PROFILER_REQ_TIMING 11 +#define WL_MSCH_PROFILER_TYPE_MASK 0x00ff +#define WL_MSCH_PROFILER_WLINDEX_SHIFT 8 +#define WL_MSCH_PROFILER_WLINDEX_MASK 0x0f00 +#define WL_MSCH_PROFILER_VER_SHIFT 12 +#define WL_MSCH_PROFILER_VER_MASK 0xf000 + +/* MSCH Event data current verion */ +#define WL_MSCH_PROFILER_VER 2 + +/* msch version history */ +#define WL_MSCH_PROFILER_RSDB_VER 1 +#define WL_MSCH_PROFILER_REPORT_VER 2 + +/* msch collect header size */ +#define WL_MSCH_PROFILE_HEAD_SIZE OFFSETOF(msch_collect_tlv_t, value) + +/* msch event log header size */ +#define WL_MSCH_EVENT_LOG_HEAD_SIZE OFFSETOF(msch_event_log_profiler_event_data_t, data) + +/* MSCH data buffer size */ +#define WL_MSCH_PROFILER_BUFFER_SIZE 512 + +/* request type used in wlc_msch_req_param_t struct */ +#define WL_MSCH_RT_BOTH_FIXED 0 /* both start and end time is fixed */ +#define WL_MSCH_RT_START_FLEX 1 /* start time is flexible and duration is fixed */ +#define WL_MSCH_RT_DUR_FLEX 2 /* start time is fixed and end time is flexible */ +#define WL_MSCH_RT_BOTH_FLEX 3 /* Both start and duration is flexible */ + +/* Flags used in wlc_msch_req_param_t struct */ +#define WL_MSCH_REQ_FLAGS_CHAN_CONTIGUOUS (1 << 0) /* Don't break up channels in chanspec_list */ +#define WL_MSCH_REQ_FLAGS_MERGE_CONT_SLOTS (1 << 1) /* No slot end if slots are continous */ +#define WL_MSCH_REQ_FLAGS_PREMTABLE (1 << 2) /* Req can be pre-empted by PREMT_CURTS req */ +#define WL_MSCH_REQ_FLAGS_PREMT_CURTS (1 << 3) /* Pre-empt request at the end of curts */ +#define WL_MSCH_REQ_FLAGS_PREMT_IMMEDIATE (1 << 4) /* Pre-empt cur_ts immediately */ + +/* Requested slot Callback states + * req->pend_slot/cur_slot->flags + */ +#define WL_MSCH_RC_FLAGS_ONCHAN_FIRE (1 << 0) +#define WL_MSCH_RC_FLAGS_START_FIRE_DONE (1 << 1) +#define WL_MSCH_RC_FLAGS_END_FIRE_DONE (1 << 2) +#define WL_MSCH_RC_FLAGS_ONFIRE_DONE (1 << 3) +#define WL_MSCH_RC_FLAGS_SPLIT_SLOT_START (1 << 4) +#define WL_MSCH_RC_FLAGS_SPLIT_SLOT_END (1 << 5) +#define WL_MSCH_RC_FLAGS_PRE_ONFIRE_DONE (1 << 6) + +/* Request entity flags */ +#define WL_MSCH_ENTITY_FLAG_MULTI_INSTANCE (1 << 0) + +/* Request Handle flags */ +#define WL_MSCH_REQ_HDL_FLAGS_NEW_REQ (1 << 0) /* req_start callback */ + +/* MSCH state flags (msch_info->flags) */ +#define WL_MSCH_STATE_IN_TIEMR_CTXT 0x1 +#define WL_MSCH_STATE_SCHD_PENDING 0x2 + +/* MSCH callback type */ +#define WL_MSCH_CT_REQ_START 0x1 +#define WL_MSCH_CT_ON_CHAN 0x2 +#define WL_MSCH_CT_SLOT_START 0x4 +#define WL_MSCH_CT_SLOT_END 0x8 +#define WL_MSCH_CT_SLOT_SKIP 0x10 +#define WL_MSCH_CT_OFF_CHAN 0x20 +#define WL_MSCH_CT_OFF_CHAN_DONE 0x40 +#define WL_MSCH_CT_REQ_END 0x80 +#define WL_MSCH_CT_PARTIAL 0x100 +#define WL_MSCH_CT_PRE_ONCHAN 0x200 +#define WL_MSCH_CT_PRE_REQ_START 0x400 + +/* MSCH command bits */ +#define WL_MSCH_CMD_ENABLE_BIT 0x01 +#define WL_MSCH_CMD_PROFILE_BIT 0x02 +#define WL_MSCH_CMD_CALLBACK_BIT 0x04 +#define WL_MSCH_CMD_REGISTER_BIT 0x08 +#define WL_MSCH_CMD_ERROR_BIT 0x10 +#define WL_MSCH_CMD_DEBUG_BIT 0x20 +#define WL_MSCH_CMD_INFOM_BIT 0x40 +#define WL_MSCH_CMD_TRACE_BIT 0x80 +#define WL_MSCH_CMD_ALL_BITS 0xfe +#define WL_MSCH_CMD_SIZE_MASK 0x00ff0000 +#define WL_MSCH_CMD_SIZE_SHIFT 16 +#define WL_MSCH_CMD_VER_MASK 0xff000000 +#define WL_MSCH_CMD_VER_SHIFT 24 + +/* maximum channels returned by the get valid channels iovar */ +#define WL_MSCH_NUMCHANNELS 64 + +typedef struct msch_collect_tlv { + uint16 type; + uint16 size; + char value[1]; +} msch_collect_tlv_t; + +typedef struct msch_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; +} msch_profiler_event_data_t; + +typedef struct msch_start_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint32 status; +} msch_start_profiler_event_data_t; + +typedef struct msch_message_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + char message[1]; /* message */ +} msch_message_profiler_event_data_t; + +typedef struct msch_event_log_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + event_log_hdr_t hdr; /* event log header */ + uint32 data[9]; /* event data */ +} msch_event_log_profiler_event_data_t; + +typedef struct msch_req_param_profiler_event_data { + uint16 flags; /* Describe various request properties */ + uint8 req_type; /* Describe start and end time flexiblilty */ + uint8 priority; /* Define the request priority */ + uint32 start_time_l; /* Requested start time offset in us unit */ + uint32 start_time_h; + uint32 duration; /* Requested duration in us unit */ + uint32 interval; /* Requested periodic interval in us unit, + * 0 means non-periodic + */ + union { + uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */ + struct { + uint32 min_dur; /* min duration for traffic, maps to home_time */ + uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time */ + uint32 hi_prio_time_l; + uint32 hi_prio_time_h; + uint32 hi_prio_interval; /* repeated high priority interval */ + } bf; + } flex; +} msch_req_param_profiler_event_data_t; + +typedef struct msch_req_timing_profiler_event_data { + uint32 p_req_timing; + uint32 p_prev; + uint32 p_next; + uint16 flags; + uint16 timeslot_ptr; + uint32 fire_time_l; + uint32 fire_time_h; + uint32 pre_start_time_l; + uint32 pre_start_time_h; + uint32 start_time_l; + uint32 start_time_h; + uint32 end_time_l; + uint32 end_time_h; + uint32 p_timeslot; +} msch_req_timing_profiler_event_data_t; + +typedef struct msch_chan_ctxt_profiler_event_data { + uint32 p_chan_ctxt; + uint32 p_prev; + uint32 p_next; + uint16 chanspec; + uint16 bf_sch_pending; + uint32 bf_link_prev; + uint32 bf_link_next; + uint32 onchan_time_l; + uint32 onchan_time_h; + uint32 actual_onchan_dur_l; + uint32 actual_onchan_dur_h; + uint32 pend_onchan_dur_l; + uint32 pend_onchan_dur_h; + uint16 req_entity_list_cnt; + uint16 req_entity_list_ptr; + uint16 bf_entity_list_cnt; + uint16 bf_entity_list_ptr; + uint32 bf_skipped_count; +} msch_chan_ctxt_profiler_event_data_t; + +typedef struct msch_req_entity_profiler_event_data { + uint32 p_req_entity; + uint32 req_hdl_link_prev; + uint32 req_hdl_link_next; + uint32 chan_ctxt_link_prev; + uint32 chan_ctxt_link_next; + uint32 rt_specific_link_prev; + uint32 rt_specific_link_next; + uint32 start_fixed_link_prev; + uint32 start_fixed_link_next; + uint32 both_flex_list_prev; + uint32 both_flex_list_next; + uint16 chanspec; + uint16 priority; + uint16 cur_slot_ptr; + uint16 pend_slot_ptr; + uint16 pad; + uint16 chan_ctxt_ptr; + uint32 p_chan_ctxt; + uint32 p_req_hdl; + uint32 bf_last_serv_time_l; + uint32 bf_last_serv_time_h; + uint16 onchan_chn_idx; + uint16 cur_chn_idx; + uint32 flags; + uint32 actual_start_time_l; + uint32 actual_start_time_h; + uint32 curts_fire_time_l; + uint32 curts_fire_time_h; +} msch_req_entity_profiler_event_data_t; + +typedef struct msch_req_handle_profiler_event_data { + uint32 p_req_handle; + uint32 p_prev; + uint32 p_next; + uint32 cb_func; + uint32 cb_ctxt; + uint16 req_param_ptr; + uint16 req_entity_list_cnt; + uint16 req_entity_list_ptr; + uint16 chan_cnt; + uint32 flags; + uint16 chanspec_list; + uint16 chanspec_cnt; + uint16 chan_idx; + uint16 last_chan_idx; + uint32 req_time_l; + uint32 req_time_h; +} msch_req_handle_profiler_event_data_t; + +typedef struct msch_profiler_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint32 free_req_hdl_list; + uint32 free_req_entity_list; + uint32 free_chan_ctxt_list; + uint32 free_chanspec_list; + uint16 cur_msch_timeslot_ptr; + uint16 next_timeslot_ptr; + uint32 p_cur_msch_timeslot; + uint32 p_next_timeslot; + uint32 cur_armed_timeslot; + uint32 flags; + uint32 ts_id; + uint32 service_interval; + uint32 max_lo_prio_interval; + uint16 flex_list_cnt; + uint16 msch_chanspec_alloc_cnt; + uint16 msch_req_entity_alloc_cnt; + uint16 msch_req_hdl_alloc_cnt; + uint16 msch_chan_ctxt_alloc_cnt; + uint16 msch_timeslot_alloc_cnt; + uint16 msch_req_hdl_list_cnt; + uint16 msch_req_hdl_list_ptr; + uint16 msch_chan_ctxt_list_cnt; + uint16 msch_chan_ctxt_list_ptr; + uint16 msch_req_timing_list_cnt; + uint16 msch_req_timing_list_ptr; + uint16 msch_start_fixed_list_cnt; + uint16 msch_start_fixed_list_ptr; + uint16 msch_both_flex_req_entity_list_cnt; + uint16 msch_both_flex_req_entity_list_ptr; + uint16 msch_start_flex_list_cnt; + uint16 msch_start_flex_list_ptr; + uint16 msch_both_flex_list_cnt; + uint16 msch_both_flex_list_ptr; + uint32 slotskip_flag; +} msch_profiler_profiler_event_data_t; + +typedef struct msch_req_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint16 chanspec_cnt; + uint16 chanspec_ptr; + uint16 req_param_ptr; + uint16 pad; +} msch_req_profiler_event_data_t; + +typedef struct msch_callback_profiler_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint16 type; /* callback type */ + uint16 chanspec; /* actual chanspec, may different with requested one */ + uint32 start_time_l; /* time slot start time low 32bit */ + uint32 start_time_h; /* time slot start time high 32bit */ + uint32 end_time_l; /* time slot end time low 32 bit */ + uint32 end_time_h; /* time slot end time high 32 bit */ + uint32 timeslot_id; /* unique time slot id */ + uint32 p_req_hdl; + uint32 onchan_idx; /* Current channel index */ + uint32 cur_chan_seq_start_time_l; /* start time of current sequence */ + uint32 cur_chan_seq_start_time_h; +} msch_callback_profiler_event_data_t; + +typedef struct msch_timeslot_profiler_event_data { + uint32 p_timeslot; + uint32 timeslot_id; + uint32 pre_start_time_l; + uint32 pre_start_time_h; + uint32 end_time_l; + uint32 end_time_h; + uint32 sch_dur_l; + uint32 sch_dur_h; + uint32 p_chan_ctxt; + uint32 fire_time_l; + uint32 fire_time_h; + uint32 state; +} msch_timeslot_profiler_event_data_t; + +typedef struct msch_register_params { + uint16 wlc_index; /* Optional wlc index */ + uint16 flags; /* Describe various request properties */ + uint32 req_type; /* Describe start and end time flexiblilty */ + uint16 id; /* register id */ + uint16 priority; /* Define the request priority */ + uint32 start_time; /* Requested start time offset in ms unit */ + uint32 duration; /* Requested duration in ms unit */ + uint32 interval; /* Requested periodic interval in ms unit, + * 0 means non-periodic + */ + uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */ + uint32 min_dur; /* min duration for traffic, maps to home_time */ + uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time */ + uint32 hi_prio_time; + uint32 hi_prio_interval; /* repeated high priority interval */ + uint32 chanspec_cnt; + uint16 chanspec_list[WL_MSCH_NUMCHANNELS]; +} msch_register_params_t; + +#endif /* _EVENT_LOG_PAYLOAD_H_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/event_log_set.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log_set.h similarity index 74% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/event_log_set.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log_set.h index 910cbcf169af..db28360ea265 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/event_log_set.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log_set.h @@ -1,7 +1,7 @@ /* * EVENT_LOG system definitions * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: event_log.h 241182 2011-02-17 21:50:03Z $ + * $Id: event_log_set.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _EVENT_LOG_SET_H_ @@ -41,5 +41,13 @@ #define EVENT_LOG_SET_PSM 2 #define EVENT_LOG_SET_ERROR 3 #define EVENT_LOG_SET_MEM_API 4 +/* Share the set with MEM_API for now to limit ROM invalidation. + * The above set is used in dingo only + * On trunk, MSCH should move to a different set. + */ +#define EVENT_LOG_SET_MSCH_PROFILER 4 +#define EVENT_LOG_SET_ECOUNTERS 5 /* Host to instantiate this for ecounters. */ +#define EVENT_LOG_SET_6 6 /* Instantiated by host for channel switch logs */ +#define EVENT_LOG_SET_7 7 /* Instantiated by host for AMPDU stats */ #endif /* _EVENT_LOG_SET_H_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/event_log_tag.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log_tag.h similarity index 66% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/event_log_tag.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log_tag.h index 25acbc7420e1..41332e7aa7a3 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/event_log_tag.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_log_tag.h @@ -1,7 +1,7 @@ /* * EVENT_LOG system definitions * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: event_log.h 241182 2011-02-17 21:50:03Z $ + * $Id: event_log_tag.h 700681 2017-05-20 16:37:38Z $ */ #ifndef _EVENT_LOG_TAG_H_ @@ -76,7 +76,11 @@ #define EVENT_LOG_TAG_PCI_DBG 52 #define EVENT_LOG_TAG_PCI_DATA 53 #define EVENT_LOG_TAG_PCI_RING 54 +/* EVENT_LOG_TAG_AWDL_TRACE_RANGING will be removed after wlc_ranging merge from IGUANA + * keeping it here to avoid compilation error on trunk + */ #define EVENT_LOG_TAG_AWDL_TRACE_RANGING 55 +#define EVENT_LOG_TAG_RANGING_TRACE 55 #define EVENT_LOG_TAG_WL_ERROR 56 #define EVENT_LOG_TAG_PHY_ERROR 57 #define EVENT_LOG_TAG_OTP_ERROR 58 @@ -127,7 +131,68 @@ #define EVENT_LOG_TAG_LQM 103 #define EVENT_LOG_TAG_TRACE_WL_INFO 104 #define EVENT_LOG_TAG_TRACE_BTCOEX_INFO 105 -#define EVENT_LOG_TAG_MAX 105 /* Set to the same value of last tag, not last tag + 1 */ +#define EVENT_LOG_TAG_ECOUNTERS_TIME_DATA 106 +#define EVENT_LOG_TAG_NAN_ERROR 107 +#define EVENT_LOG_TAG_NAN_INFO 108 +#define EVENT_LOG_TAG_NAN_DBG 109 +#define EVENT_LOG_TAG_STF_ARBITRATOR_ERROR 110 +#define EVENT_LOG_TAG_STF_ARBITRATOR_TRACE 111 +#define EVENT_LOG_TAG_STF_ARBITRATOR_WARN 112 +#define EVENT_LOG_TAG_SCAN_SUMMARY 113 +#define EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT 114 +#define EVENT_LOG_TAG_OCL_INFO 115 +#define EVENT_LOG_TAG_RSDB_PMGR_DEBUG 116 +#define EVENT_LOG_TAG_RSDB_PMGR_ERR 117 +#define EVENT_LOG_TAG_NAT_ERR 118 +#define EVENT_LOG_TAG_NAT_WARN 119 +#define EVENT_LOG_TAG_NAT_INFO 120 +#define EVENT_LOG_TAG_NAT_DEBUG 121 +#define EVENT_LOG_TAG_STA_INFO 122 +#define EVENT_LOG_TAG_PROXD_ERROR 123 +#define EVENT_LOG_TAG_PROXD_TRACE 124 +#define EVENT_LOG_TAG_PROXD_INFO 125 +#define EVENT_LOG_TAG_IE_ERROR 126 +#define EVENT_LOG_TAG_ASSOC_ERROR 127 +#define EVENT_LOG_TAG_SCAN_ERR 128 +#define EVENT_LOG_TAG_AMSDU_ERROR 129 +#define EVENT_LOG_TAG_AMPDU_ERROR 130 +#define EVENT_LOG_TAG_KM_ERROR 131 +#define EVENT_LOG_TAG_DFS 132 +#define EVENT_LOG_TAG_REGULATORY 133 +#define EVENT_LOG_TAG_CSA 134 +#define EVENT_LOG_TAG_WNM_BSSTRANS_ERR 135 +#define EVENT_LOG_TAG_SUP_INFO 136 +#define EVENT_LOG_TAG_SUP_ERROR 137 +#define EVENT_LOG_TAG_CHANCTXT_TRACE 138 +#define EVENT_LOG_TAG_CHANCTXT_INFO 139 +#define EVENT_LOG_TAG_CHANCTXT_ERROR 140 +#define EVENT_LOG_TAG_CHANCTXT_WARN 141 +#define EVENT_LOG_TAG_MSCHPROFILE 142 +#define EVENT_LOG_TAG_4WAYHANDSHAKE 143 +#define EVENT_LOG_TAG_MSCHPROFILE_TLV 144 +#define EVENT_LOG_TAG_ADPS 145 +#define EVENT_LOG_TAG_MBO_DBG 146 +#define EVENT_LOG_TAG_MBO_INFO 147 +#define EVENT_LOG_TAG_MBO_ERR 148 +#define EVENT_LOG_TAG_TXDELAY 149 +#define EVENT_LOG_TAG_BCNTRIM_INFO 150 +#define EVENT_LOG_TAG_BCNTRIM_TRACE 151 +#define EVENT_LOG_TAG_OPS_INFO 152 +#define EVENT_LOG_TAG_STATS 153 +#define EVENT_LOG_TAG_BAM 154 +#define EVENT_LOG_TAG_TXFAIL 155 +#define EVENT_LOG_TAG_AWDL_CONFIG_DBG 156 +#define EVENT_LOG_TAG_AWDL_SYNC_DBG 157 +#define EVENT_LOG_TAG_AWDL_PEER_DBG 158 +#define EVENT_LOG_TAG_RANDMAC_INFO 159 +#define EVENT_LOG_TAG_RANDMAC_DBG 160 +#define EVENT_LOG_TAG_RANDMAC_ERR 161 +#define EVENT_LOG_TAG_AWDL_DFSP_DBG 162 +#define EVENT_LOG_TAG_TPA_ERR 163 +#define EVENT_LOG_TAG_TPA_INFO 164 + +/* EVENT_LOG_TAG_MAX = Set to the same value of last tag, not last tag + 1 */ +#define EVENT_LOG_TAG_MAX 164 /* Note: New event should be added/reserved in trunk before adding it to branches */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_trace.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_trace.h new file mode 100644 index 000000000000..cd24bdf28eab --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/event_trace.h @@ -0,0 +1,123 @@ +/* + * Trace log blocks sent over HBUS + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_trace.h 645268 2016-06-23 08:39:17Z $ + */ + +/** + * @file + * @brief + * Define the trace event ID and tag ID + */ + +#ifndef _WL_DIAG_H +#define _WL_DIAG_H + +#define DIAG_MAJOR_VERSION 1 /* 4 bits */ +#define DIAG_MINOR_VERSION 0 /* 4 bits */ +#define DIAG_MICRO_VERSION 0 /* 4 bits */ + +#define DIAG_VERSION \ + ((DIAG_MICRO_VERSION&0xF) | (DIAG_MINOR_VERSION&0xF)<<4 | \ + (DIAG_MAJOR_VERSION&0xF)<<8) + /* bit[11:8] major ver */ + /* bit[7:4] minor ver */ + /* bit[3:0] micro ver */ + +/* event ID for trace purpose only, to avoid the conflict with future new +* WLC_E_ , starting from 0x8000 +*/ +#define TRACE_FW_AUTH_STARTED 0x8000 +#define TRACE_FW_ASSOC_STARTED 0x8001 +#define TRACE_FW_RE_ASSOC_STARTED 0x8002 +#define TRACE_G_SCAN_STARTED 0x8003 +#define TRACE_ROAM_SCAN_STARTED 0x8004 +#define TRACE_ROAM_SCAN_COMPLETE 0x8005 +#define TRACE_FW_EAPOL_FRAME_TRANSMIT_START 0x8006 +#define TRACE_FW_EAPOL_FRAME_TRANSMIT_STOP 0x8007 +#define TRACE_BLOCK_ACK_NEGOTIATION_COMPLETE 0x8008 /* protocol status */ +#define TRACE_BT_COEX_BT_SCO_START 0x8009 +#define TRACE_BT_COEX_BT_SCO_STOP 0x800a +#define TRACE_BT_COEX_BT_SCAN_START 0x800b +#define TRACE_BT_COEX_BT_SCAN_STOP 0x800c +#define TRACE_BT_COEX_BT_HID_START 0x800d +#define TRACE_BT_COEX_BT_HID_STOP 0x800e +#define TRACE_ROAM_AUTH_STARTED 0x800f +/* Event ID for NAN, start from 0x9000 */ +#define TRACE_NAN_CLUSTER_STARTED 0x9000 +#define TRACE_NAN_CLUSTER_JOINED 0x9001 +#define TRACE_NAN_CLUSTER_MERGED 0x9002 +#define TRACE_NAN_ROLE_CHANGED 0x9003 +#define TRACE_NAN_SCAN_COMPLETE 0x9004 +#define TRACE_NAN_STATUS_CHNG 0x9005 + +/* Parameters of wifi logger events are TLVs */ +/* Event parameters tags are defined as: */ +#define TRACE_TAG_VENDOR_SPECIFIC 0 /* take a byte stream as parameter */ +#define TRACE_TAG_BSSID 1 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_ADDR 2 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_SSID 3 /* takes a 32 bytes SSID address as parameter */ +#define TRACE_TAG_STATUS 4 /* takes an integer as parameter */ +#define TRACE_TAG_CHANNEL_SPEC 5 /* takes one or more wifi_channel_spec as */ + /* parameter */ +#define TRACE_TAG_WAKE_LOCK_EVENT 6 /* takes a wake_lock_event struct as parameter */ +#define TRACE_TAG_ADDR1 7 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_ADDR2 8 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_ADDR3 9 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_ADDR4 10 /* takes a 6 bytes MAC address as parameter */ +#define TRACE_TAG_TSF 11 /* take a 64 bits TSF value as parameter */ +#define TRACE_TAG_IE 12 /* take one or more specific 802.11 IEs */ + /* parameter, IEs are in turn indicated in */ + /* TLV format as per 802.11 spec */ +#define TRACE_TAG_INTERFACE 13 /* take interface name as parameter */ +#define TRACE_TAG_REASON_CODE 14 /* take a reason code as per 802.11 */ + /* as parameter */ +#define TRACE_TAG_RATE_MBPS 15 /* take a wifi rate in 0.5 mbps */ +#define TRACE_TAG_REQUEST_ID 16 /* take an integer as parameter */ +#define TRACE_TAG_BUCKET_ID 17 /* take an integer as parameter */ +#define TRACE_TAG_GSCAN_PARAMS 18 /* takes a wifi_scan_cmd_params struct as parameter */ +#define TRACE_TAG_GSCAN_CAPABILITIES 19 /* takes a wifi_gscan_capabilities struct as parameter */ +#define TRACE_TAG_SCAN_ID 20 /* take an integer as parameter */ +#define TRACE_TAG_RSSI 21 /* take an integer as parameter */ +#define TRACE_TAG_CHANNEL 22 /* take an integer as parameter */ +#define TRACE_TAG_LINK_ID 23 /* take an integer as parameter */ +#define TRACE_TAG_LINK_ROLE 24 /* take an integer as parameter */ +#define TRACE_TAG_LINK_STATE 25 /* take an integer as parameter */ +#define TRACE_TAG_LINK_TYPE 26 /* take an integer as parameter */ +#define TRACE_TAG_TSCO 27 /* take an integer as parameter */ +#define TRACE_TAG_RSCO 28 /* take an integer as parameter */ +#define TRACE_TAG_EAPOL_MESSAGE_TYPE 29 /* take an integer as parameter */ + /* M1-1, M2-2, M3-3, M4-4 */ + +typedef union { + struct { + uint16 event: 16; + uint16 version: 16; + }; + uint32 t; +} wl_event_log_id_ver_t; + +#endif /* _WL_DIAG_H */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_armtrap.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_armtrap.h index baf55724c595..e7c005c6f1b3 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_armtrap.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_armtrap.h @@ -1,7 +1,7 @@ /* * HND arm trap handling. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: hnd_armtrap.h 514727 2014-11-12 03:02:48Z $ + * $Id: hnd_armtrap.h 545867 2015-04-01 22:45:19Z $ */ #ifndef _hnd_armtrap_h_ @@ -40,9 +40,7 @@ #define FIRST_TRAP TR_RST #define LAST_TRAP (TR_FIQ * TRAP_STRIDE) -#if defined(__ARM_ARCH_4T__) -#define MAX_TRAP_TYPE (TR_FIQ + 1) -#elif defined(__ARM_ARCH_7M__) +#if defined(__ARM_ARCH_7M__) #define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS) #endif /* __ARM_ARCH_7M__ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_cons.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_cons.h index 2dee71abefeb..3470d6a91307 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_cons.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_cons.h @@ -1,7 +1,7 @@ /* * Console support for RTE - for host use only. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: hnd_cons.h 514727 2014-11-12 03:02:48Z $ + * $Id: hnd_cons.h 568961 2015-07-06 18:14:49Z $ */ #ifndef _hnd_cons_h_ #define _hnd_cons_h_ @@ -34,7 +34,11 @@ #define CBUF_LEN (128) +#if defined(BCM_BIG_LOG) +#define LOG_BUF_LEN (16 * 1024) +#else #define LOG_BUF_LEN 1024 +#endif #ifdef BOOTLOADER_CONSOLE_OUTPUT #undef RWL_MAX_DATA_LEN diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_debug.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_debug.h new file mode 100644 index 000000000000..239e596e96af --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_debug.h @@ -0,0 +1,206 @@ +/* + * HND Run Time Environment debug info area + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_debug.h 678890 2017-01-11 11:48:36Z $ + */ + +#ifndef _HND_DEBUG_H +#define _HND_DEBUG_H + +/* Magic number at a magic location to find HND_DEBUG pointers */ +#define HND_DEBUG_PTR_PTR_MAGIC 0x50504244 /* DBPP */ + +/* Magic number at a magic location to find RAM size */ +#define HND_RAMSIZE_PTR_MAGIC 0x534d4152 /* RAMS */ + +#ifndef _LANGUAGE_ASSEMBLY + +/* Includes only when building dongle code */ + + +/* We use explicit sizes here since this gets included from different + * systems. The sizes must be the size of the creating system + * (currently 32 bit ARM) since this is gleaned from dump. + */ + +#ifdef FWID +extern uint32 gFWID; +#endif + +/* Define pointers for use on other systems */ +#define _HD_EVLOG_P uint32 +#define _HD_CONS_P uint32 +#define _HD_TRAP_P uint32 + +/* This struct is placed at a well-defined location, and contains a pointer to hnd_debug. */ +typedef struct hnd_debug_ptr { + uint32 magic; + + /* RAM address of 'hnd_debug'. For legacy versions of this struct, it is a 0-indexed + * offset instead. + */ + uint32 hnd_debug_addr; + + /* Base address of RAM. This field does not exist for legacy versions of this struct. */ + uint32 ram_base_addr; + +} hnd_debug_ptr_t; + +/* This struct is placed at a well-defined location. */ +typedef struct hnd_ramsize_ptr { + uint32 magic; /* 'RAMS' */ + + /* RAM size information. */ + uint32 ram_size; +} hnd_ramsize_ptr_t; + +#define HND_DEBUG_EPIVERS_MAX_STR_LEN 32 +#define HND_DEBUG_BUILD_SIGNATURE_FWID_LEN 17 +#define HND_DEBUG_BUILD_SIGNATURE_VER_LEN 22 +typedef struct hnd_debug { + uint32 magic; +#define HND_DEBUG_MAGIC 0x47424544 /* 'DEBG' */ + + uint32 version; /* Debug struct version */ +#define HND_DEBUG_VERSION 1 + + uint32 fwid; /* 4 bytes of fw info */ + char epivers[HND_DEBUG_EPIVERS_MAX_STR_LEN]; + + _HD_TRAP_P trap_ptr; /* trap_t data struct */ + _HD_CONS_P console; /* Console */ + + uint32 ram_base; + uint32 ram_size; + + uint32 rom_base; + uint32 rom_size; + + _HD_EVLOG_P event_log_top; + + /* To populated fields below, + * INCLUDE_BUILD_SIGNATURE_IN_SOCRAM needs to be enabled + */ + char fwid_signature[HND_DEBUG_BUILD_SIGNATURE_FWID_LEN]; /* fwid= */ + char ver_signature[HND_DEBUG_BUILD_SIGNATURE_VER_LEN]; /* ver=abc.abc.abc.abc */ + +} hnd_debug_t; + +/* + * timeval_t and prstatus_t are copies of the Linux structures. + * Included here because we need the definitions for the target processor + * (32 bits) and not the definition on the host this is running on + * (which could be 64 bits). + */ + +typedef struct { /* Time value with microsecond resolution */ + uint32 tv_sec; /* Seconds */ + uint32 tv_usec; /* Microseconds */ +} timeval_t; + + +/* Linux/ARM 32 prstatus for notes section */ +typedef struct prstatus { + int32 si_signo; /* Signal number */ + int32 si_code; /* Extra code */ + int32 si_errno; /* Errno */ + uint16 pr_cursig; /* Current signal. */ + uint16 unused; + uint32 pr_sigpend; /* Set of pending signals. */ + uint32 pr_sighold; /* Set of held signals. */ + uint32 pr_pid; + uint32 pr_ppid; + uint32 pr_pgrp; + uint32 pr_sid; + timeval_t pr_utime; /* User time. */ + timeval_t pr_stime; /* System time. */ + timeval_t pr_cutime; /* Cumulative user time. */ + timeval_t pr_cstime; /* Cumulative system time. */ + uint32 uregs[18]; + int32 pr_fpvalid; /* True if math copro being used. */ +} prstatus_t; + +/* for mkcore and other utilities use */ +#define DUMP_INFO_PTR_PTR_0 0x74 +#define DUMP_INFO_PTR_PTR_1 0x78 +#define DUMP_INFO_PTR_PTR_2 0xf0 +#define DUMP_INFO_PTR_PTR_3 0xf8 +#define DUMP_INFO_PTR_PTR_4 0x874 +#define DUMP_INFO_PTR_PTR_5 0x878 +#define DUMP_INFO_PTR_PTR_END 0xffffffff +#define DUMP_INFO_PTR_PTR_LIST DUMP_INFO_PTR_PTR_0, \ + DUMP_INFO_PTR_PTR_1, \ + DUMP_INFO_PTR_PTR_2, \ + DUMP_INFO_PTR_PTR_3, \ + DUMP_INFO_PTR_PTR_4, \ + DUMP_INFO_PTR_PTR_5, \ + DUMP_INFO_PTR_PTR_END + +/* for DHD driver to get dongle ram size info. */ +#define RAMSIZE_PTR_PTR_0 0x6c +#define RAMSIZE_PTR_PTR_END 0xffffffff +#define RAMSIZE_PTR_PTR_LIST RAMSIZE_PTR_PTR_0, \ + RAMSIZE_PTR_PTR_END + +typedef struct hnd_ext_trap_hdr { + uint8 version; /* Extended trap version info */ + uint8 reserved; /* currently unused */ + uint16 len; /* Length of data excluding this header */ + uint8 data[]; /* TLV data */ +} hnd_ext_trap_hdr_t; + +#define TAG_TRAP_SIGNATURE 1 /* Processor register dumps */ +#define TAG_TRAP_STACK 2 /* Processor stack dump (possible code locations) */ +#define TAG_TRAP_MEMORY 3 /* Memory subsystem dump */ +#define TAG_TRAP_DEEPSLEEP 4 /* Deep sleep health check failures */ +#define TAG_TRAP_PSM_WD 5 /* PSM watchdog information */ +#define TAG_TRAP_PHY 6 /* Phy related issues */ +#define TAG_TRAP_BUS 7 /* Bus level issues */ +#define TAG_TRAP_MAC 8 /* Mac level issues */ +#define TAG_TRAP_BACKPLANE 9 /* Backplane related errors */ + +typedef struct hnd_ext_trap_bp_err +{ + uint32 error; + uint32 coreid; + uint32 baseaddr; + uint32 ioctrl; + uint32 iostatus; + uint32 resetctrl; + uint32 resetstatus; + uint32 errlogctrl; + uint32 errlogdone; + uint32 errlogstatus; + uint32 errlogaddrlo; + uint32 errlogaddrhi; + uint32 errlogid; + uint32 errloguser; + uint32 errlogflags; +} hnd_ext_trap_bp_err_t; + +#endif /* !LANGUAGE_ASSEMBLY */ + +#endif /* _HND_DEBUG_H */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_pktpool.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_pktpool.h index 3cf46727b044..e5d0eaa650fe 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_pktpool.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_pktpool.h @@ -1,7 +1,7 @@ /* * HND generic packet pool operation primitives * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: hnd_pktpool.h 591285 2015-10-07 11:56:29Z $ + * $Id: hnd_pktpool.h 613891 2016-01-20 10:05:44Z $ */ #ifndef _hnd_pktpool_h_ @@ -112,7 +112,7 @@ typedef struct pktpool { uint16 avail; /**< number of packets in pool's free list */ uint16 len; /**< number of packets managed by pool */ uint16 maxlen; /**< maximum size of pool <= PKTPOOL_LEN_MAX */ - uint16 plen; /**< size of pkt buffer, excluding lbuf|lbuf_frag */ + uint16 plen; /**< size of pkt buffer in [bytes], excluding lbuf|lbuf_frag */ bool empty; uint8 cbtoggle; @@ -164,7 +164,7 @@ extern int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void #define POOLSETID(pp, ppid) (POOLPTR(pp)->id = (ppid)) -#define pktpool_len(pp) (POOLPTR(pp)->len) +#define pktpool_len(pp) (POOLPTR(pp)->len) /**< returns packet length in [bytes] */ #define pktpool_avail(pp) (POOLPTR(pp)->avail) #define pktpool_plen(pp) (POOLPTR(pp)->plen) #define pktpool_maxlen(pp) (POOLPTR(pp)->maxlen) @@ -211,8 +211,8 @@ extern pktpool_t *pktpool_shared_lfrag; #define SHARED_RXFRAG_POOL (pktpool_shared_rxlfrag) extern pktpool_t *pktpool_shared_rxlfrag; -void hnd_pktpool_init(osl_t *osh); -void hnd_pktpool_fill(pktpool_t *pktpool, bool minimal); +int hnd_pktpool_init(osl_t *osh); +int hnd_pktpool_fill(pktpool_t *pktpool, bool minimal); void hnd_pktpool_refill(bool minimal); #else /* BCMPKTPOOL */ #define SHARED_POOL ((struct pktpool *)NULL) diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_pktq.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_pktq.h index 1586de3ca5b4..ad778da896e7 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_pktq.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hnd_pktq.h @@ -1,7 +1,7 @@ /* * HND generic pktq operation primitives * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: hnd_pktq.h 591283 2015-10-07 11:52:00Z $ + * $Id: hnd_pktq.h 641285 2016-06-02 02:33:55Z $ */ #ifndef _hnd_pktq_h_ @@ -97,17 +97,25 @@ typedef struct { uint32 _logtime; /**< timestamp of last counter clear */ } pktq_counters_t; -typedef struct { +#define PKTQ_LOG_COMMON \ + uint32 pps_time; /**< time spent in ps pretend state */ \ uint32 _prec_log; + +typedef struct { + PKTQ_LOG_COMMON pktq_counters_t* _prec_cnt[PKTQ_MAX_PREC]; /**< Counters per queue */ } pktq_log_t; +#else +typedef struct pktq_log pktq_log_t; #endif /* PKTQ_LOG */ #define PKTQ_COMMON \ + HND_PKTQ_MUTEX_DECL(mutex) \ + pktq_log_t *pktqlog; \ uint16 num_prec; /**< number of precedences in use */ \ uint16 hi_prec; /**< rapid dequeue hint (>= highest non-empty prec) */ \ - uint16 max; /**< total max packets */ \ + uint16 max; /**< total max packets */ \ uint16 len; /**< total number of packets */ /* multi-priority pkt queue */ @@ -115,10 +123,6 @@ struct pktq { PKTQ_COMMON /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */ struct pktq_prec q[PKTQ_MAX_PREC]; - HND_PKTQ_MUTEX_DECL(mutex) -#ifdef PKTQ_LOG - pktq_log_t* pktqlog; -#endif }; /* simple, non-priority pkt queue */ @@ -126,14 +130,72 @@ struct spktq { PKTQ_COMMON /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */ struct pktq_prec q[1]; - HND_PKTQ_MUTEX_DECL(mutex) }; #define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--) -/* fn(pkt, arg). return true if pkt belongs to if */ +/* fn(pkt, arg). return true if pkt belongs to bsscfg */ typedef bool (*ifpkt_cb_t)(void*, int); +/* + * pktq filter support + */ + +/* filter function return values */ +typedef enum { + PKT_FILTER_NOACTION = 0, /**< restore the pkt to its position in the queue */ + PKT_FILTER_DELETE = 1, /**< delete the pkt */ + PKT_FILTER_REMOVE = 2, /**< do not restore the pkt to the queue, + * filter fn has taken ownership of the pkt + */ +} pktq_filter_result_t; + +/** + * Caller supplied filter function to pktq_pfilter(), pktq_filter(). + * Function filter(ctx, pkt) is called with its ctx pointer on each pkt in the + * pktq. When the filter function is called, the supplied pkt will have been + * unlinked from the pktq. The filter function returns a pktq_filter_result_t + * result specifying the action pktq_filter()/pktq_pfilter() should take for + * the pkt. + * Here are the actions taken by pktq_filter/pfilter() based on the supplied + * filter function's return value: + * + * PKT_FILTER_NOACTION - The filter will re-link the pkt at its + * previous location. + * + * PKT_FILTER_DELETE - The filter will not relink the pkt and will + * call the user supplied defer_free_pkt fn on the packet. + * + * PKT_FILTER_REMOVE - The filter will not relink the pkt. The supplied + * filter fn took ownership (or deleted) the pkt. + * + * WARNING: pkts inserted by the user (in pkt_filter and/or flush callbacks + * and chains) in the prec queue will not be seen by the filter, and the prec + * queue will be temporarily be removed from the queue hence there're side + * effects including pktq_len() on the queue won't reflect the correct number + * of packets in the queue. + */ +typedef pktq_filter_result_t (*pktq_filter_t)(void* ctx, void* pkt); + +/* The defer_free_pkt callback is invoked when the the pktq_filter callback + * returns PKT_FILTER_DELETE decision, which allows the user to deposite + * the packet appropriately based on the situation (free the packet or + * save it in a temporary queue etc.). + */ +typedef void (*defer_free_pkt_fn_t)(void *ctx, void *pkt); + +/* The flush_free_pkt callback is invoked when all packets in the pktq + * are processed. + */ +typedef void (*flush_free_pkt_fn_t)(void *ctx); + +/* filter a pktq, using the caller supplied filter/deposition/flush functions */ +extern void pktq_filter(struct pktq *pq, pktq_filter_t fn, void* arg, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx); +/* filter a particular precedence in pktq, using the caller supplied filter function */ +extern void pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fn, void* arg, + defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx); + /* operations on a specific precedence in packet queue */ #define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max)) @@ -159,9 +221,6 @@ extern void *pktq_pdeq(struct pktq *pq, int prec); extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p); extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg); extern void *pktq_pdeq_tail(struct pktq *pq, int prec); -/* Empty the queue at particular precedence level */ -extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, - ifpkt_cb_t fn, int arg); /* Remove a specified packet from its queue */ extern bool pktq_pdel(struct pktq *pq, void *p, int prec); @@ -189,11 +248,18 @@ extern bool pktq_full(struct pktq *pq); #define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)(void *)pq), 0, (p)) #define pktdeq(pq) pktq_pdeq(((struct pktq *)(void *)pq), 0) #define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)(void *)pq), 0) -#define pktqflush(osh, pq) pktq_flush(osh, ((struct pktq *)(void *)pq), TRUE, NULL, 0) +#define pktqflush(osh, pq, dir) pktq_pflush(osh, ((struct pktq *)(void *)pq), 0, dir) #define pktqinit(pq, len) pktq_init(((struct pktq *)(void *)pq), 1, len) #define pktqdeinit(pq) pktq_deinit((struct pktq *)(void *)pq) #define pktqavail(pq) pktq_avail((struct pktq *)(void *)pq) #define pktqfull(pq) pktq_full((struct pktq *)(void *)pq) +#define pktqfilter(pq, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx) \ + pktq_pfilter((struct pktq *)pq, 0, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx) + +/* wrap macros for modules in components use */ +#define spktqinit(pq, max_pkts) pktqinit(pq, max_pkts) +#define spktenq(pq, p) pktenq(pq, p) +#define spktdeq(pq) pktdeq(pq) extern bool pktq_init(struct pktq *pq, int num_prec, int max_len); extern bool pktq_deinit(struct pktq *pq); @@ -205,10 +271,14 @@ extern void *pktq_deq(struct pktq *pq, int *prec_out); extern void *pktq_deq_tail(struct pktq *pq, int *prec_out); extern void *pktq_peek(struct pktq *pq, int *prec_out); extern void *pktq_peek_tail(struct pktq *pq, int *prec_out); -extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg); + +/* flush pktq */ +extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir); +/* Empty the queue at particular precedence level */ +extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir); #ifdef __cplusplus - } +} #endif #endif /* _hnd_pktq_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hndpmu.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hndpmu.h index dfc83d3d7fd1..bfc916693e1f 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hndpmu.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hndpmu.h @@ -1,7 +1,7 @@ /* * HND SiliconBackplane PMU support. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: hndpmu.h 530150 2015-01-29 08:43:40Z $ + * $Id: hndpmu.h 657872 2016-09-02 22:17:34Z $ */ #ifndef _hndpmu_h_ @@ -33,6 +33,7 @@ #include #include #include +#include extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask); @@ -41,5 +42,15 @@ extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestre extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear); extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh); extern void si_pmu_avbtimer_enable(si_t *sih, osl_t *osh, bool set_flag); - +extern uint32 si_pmu_dump_pmucap_binary(si_t *sih, uchar *p); +extern uint32 si_pmu_dump_buf_size_pmucap(si_t *sih); +extern int si_pmu_wait_for_steady_state(si_t *sih, osl_t *osh, pmuregs_t *pmu); +#if defined(BCMULP) +int si_pmu_ulp_register(si_t *sih); +extern void si_pmu_ulp_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period); +#endif /* BCMULP */ +extern uint32 si_pmu_get_pmutimer(si_t *sih); +extern void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask); +extern bool si_pmu_cap_fast_lpo(si_t *sih); +extern int si_pmu_fast_lpo_disable(si_t *sih); #endif /* _hndpmu_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hndsoc.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hndsoc.h index 36884a088b6f..b35380ad57bf 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hndsoc.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/hndsoc.h @@ -1,7 +1,7 @@ /* * Broadcom HND chip & on-chip-interconnect-related definitions. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: hndsoc.h 517544 2014-11-26 00:40:42Z $ + * $Id: hndsoc.h 613129 2016-01-17 09:25:52Z $ */ #ifndef _HNDSOC_H @@ -45,10 +45,9 @@ #define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */ #define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */ -#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */ - -#define SI_WRAP_BASE 0x18100000 /* Wrapper space base */ -#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */ +#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */ +#define SI_WRAP_BASE 0x18100000 /* Wrapper space base */ +#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */ #ifndef SI_MAXCORES #define SI_MAXCORES 32 /* NorthStar has more cores */ @@ -96,6 +95,7 @@ #define SI_BCM53573_NANDFLASH 0x30000000 /* 53573 NAND flash base */ #define SI_BCM53573_NORFLASH 0x1c000000 /* 53573 NOR flash base */ +#define SI_BCM53573_FLASH2_SZ 0x04000000 /* 53573 NOR flash2 size */ #define SI_BCM53573_NORFLASH_WINDOW 0x01000000 /* only support 16M direct access for * 3-byte address modes in spi flash @@ -103,9 +103,19 @@ #define SI_BCM53573_BOOTDEV_MASK 0x3 #define SI_BCM53573_BOOTDEV_NOR 0x0 +#define SI_BCM53573_NAND_PRE_MASK 0x100 /* 53573 NAND present mask */ + #define SI_BCM53573_DDRTYPE_MASK 0x10 #define SI_BCM53573_DDRTYPE_DDR3 0x10 +#define SI_BCM47189_RGMII_VDD_MASK 0x3 +#define SI_BCM47189_RGMII_VDD_SHIFT 21 +#define SI_BCM47189_RGMII_VDD_3_3V 0 +#define SI_BCM47189_RGMII_VDD_2_5V 1 +#define SI_BCM47189_RGMII_VDD_1_5V 1 + +#define SI_BCM53573_LOCKED_CPUPLL 0x1 + /* APB bridge code */ #define APB_BRIDGE_ID 0x135 /* APB Bridge 0, 1, etc. */ @@ -275,6 +285,7 @@ #define CCS_ARMFASTCLOCKSTATUS 0x01000000 /* Fast CPU clock is running */ #define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */ #define CCS_ERSRC_STS_SHIFT 24 +#define CCS_SECI_AVAIL 0x01000000 /* RO: SECI is available */ #define CCS0_HTAVAIL 0x00010000 /* HT avail in chipc and pcmcia on 4328a0 */ #define CCS0_ALPAVAIL 0x00020000 /* ALP avail in chipc and pcmcia on 4328a0 */ @@ -312,4 +323,5 @@ int soc_boot_dev(void *sih); int soc_knl_dev(void *sih); #endif /* !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) */ +#define PMU_BASE_OFFSET 0x00012000 /* PMU offset is changed for ccrev >= 56 */ #endif /* _HNDSOC_H */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/linux_osl.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/linux_osl.h index d560feca11e8..3dd51bc372e5 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/linux_osl.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/linux_osl.h @@ -1,7 +1,7 @@ /* * Linux OS Independent Layer * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: linux_osl.h 601764 2015-11-24 03:47:41Z $ + * $Id: linux_osl.h 672413 2016-11-28 11:13:23Z $ */ #ifndef _linux_osl_h_ @@ -58,6 +58,17 @@ extern void* osl_get_bus_handle(osl_t *osh); /* Global ASSERT type */ extern uint32 g_assert_type; +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define PRI_FMT_x "llx" +#define PRI_FMT_X "llX" +#define PRI_FMT_o "llo" +#define PRI_FMT_d "lld" +#else +#define PRI_FMT_x "x" +#define PRI_FMT_X "X" +#define PRI_FMT_o "o" +#define PRI_FMT_d "d" +#endif /* CONFIG_PHYS_ADDR_T_64BIT */ /* ASSERT */ #if defined(BCMASSERT_LOG) #define ASSERT(exp) \ @@ -79,6 +90,14 @@ extern void osl_assert(const char *exp, const char *file, int line); /* bcm_prefetch_32B */ static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B) { +#if (defined(STB) && defined(__arm__)) && (__LINUX_ARM_ARCH__ >= 5) + switch (cachelines_32B) { + case 4: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 96) : "cc"); + case 3: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 64) : "cc"); + case 2: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 32) : "cc"); + case 1: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 0) : "cc"); + } +#endif } /* microsecond delay */ @@ -115,6 +134,7 @@ extern uint osl_pcie_bus(osl_t *osh); extern struct pci_dev *osl_pci_device(osl_t *osh); #define OSL_ACP_COHERENCE (1<<1L) +#define OSL_FWDERBUF (1<<2L) /* Pkttag flag should be part of public information */ typedef struct { @@ -126,6 +146,7 @@ typedef struct { } osl_pubinfo_t; extern void osl_flag_set(osl_t *osh, uint32 mask); +extern void osl_flag_clr(osl_t *osh, uint32 mask); extern bool osl_is_flag_set(osl_t *osh, uint32 mask); #define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \ @@ -140,15 +161,20 @@ extern bool osl_is_flag_set(osl_t *osh, uint32 mask); #define MALLOC(osh, size) osl_malloc((osh), (size)) #define MALLOCZ(osh, size) osl_mallocz((osh), (size)) #define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size)) + #define VMALLOC(osh, size) osl_vmalloc((osh), (size)) + #define VMALLOCZ(osh, size) osl_vmallocz((osh), (size)) + #define VMFREE(osh, addr, size) osl_vmfree((osh), (addr), (size)) #define MALLOCED(osh) osl_malloced((osh)) #define MEMORY_LEFTOVER(osh) osl_check_memleak(osh) extern void *osl_malloc(osl_t *osh, uint size); extern void *osl_mallocz(osl_t *osh, uint size); extern void osl_mfree(osl_t *osh, void *addr, uint size); + extern void *osl_vmalloc(osl_t *osh, uint size); + extern void *osl_vmallocz(osl_t *osh, uint size); + extern void osl_vmfree(osl_t *osh, void *addr, uint size); extern uint osl_malloced(osl_t *osh); extern uint osl_check_memleak(osl_t *osh); - #define MALLOC_FAILED(osh) osl_malloc_failed((osh)) extern uint osl_malloc_failed(osl_t *osh); @@ -170,6 +196,7 @@ extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa); /* map/unmap direction */ +#define DMA_NO 0 /* Used to skip cache op */ #define DMA_TX 1 /* TX direction for DMA */ #define DMA_RX 2 /* RX direction for DMA */ @@ -189,25 +216,29 @@ extern void osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction); extern void osl_cpu_relax(void); #define OSL_CPU_RELAX() osl_cpu_relax() +extern void osl_preempt_disable(osl_t *osh); +extern void osl_preempt_enable(osl_t *osh); +#define OSL_DISABLE_PREEMPTION(osh) osl_preempt_disable(osh) +#define OSL_ENABLE_PREEMPTION(osh) osl_preempt_enable(osh) + #if (!defined(DHD_USE_COHERENT_MEM_FOR_RING) && defined(__ARM_ARCH_7A__)) || \ - (defined(STBLINUX) && defined(__ARM_ARCH_7A__)) || (defined(CONFIG_ARCH_MSM8996) || \ - defined(CONFIG_SOC_EXYNOS8890)) + (defined(STBLINUX) && defined(__ARM_ARCH_7A__)) extern void osl_cache_flush(void *va, uint size); extern void osl_cache_inv(void *va, uint size); extern void osl_prefetch(const void *ptr); - #define OSL_CACHE_FLUSH(va, len) osl_cache_flush((void *) va, len) - #define OSL_CACHE_INV(va, len) osl_cache_inv((void *) va, len) + #define OSL_CACHE_FLUSH(va, len) osl_cache_flush((void *)(va), len) + #define OSL_CACHE_INV(va, len) osl_cache_inv((void *)(va), len) #define OSL_PREFETCH(ptr) osl_prefetch(ptr) -#ifdef __ARM_ARCH_7A__ +#if defined(__ARM_ARCH_7A__) extern int osl_arch_is_coherent(void); #define OSL_ARCH_IS_COHERENT() osl_arch_is_coherent() extern int osl_acp_war_enab(void); #define OSL_ACP_WAR_ENAB() osl_acp_war_enab() -#else +#else /* !__ARM_ARCH_7A__ */ #define OSL_ARCH_IS_COHERENT() NULL #define OSL_ACP_WAR_ENAB() NULL -#endif /* __ARM_ARCH_7A__ */ -#else +#endif /* !__ARM_ARCH_7A__ */ +#else /* !__mips__ && !__ARM_ARCH_7A__ */ #define OSL_CACHE_FLUSH(va, len) BCM_REFERENCE(va) #define OSL_CACHE_INV(va, len) BCM_REFERENCE(va) #define OSL_PREFETCH(ptr) BCM_REFERENCE(ptr) @@ -223,8 +254,21 @@ extern void osl_cpu_relax(void); (uintptr)(r), sizeof(*(r)), (v))) #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \ (uintptr)(r), sizeof(*(r)))) +#elif (defined(STB) && defined(__arm__)) +extern void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size); + +#define OSL_READ_REG(osh, r) \ + ({\ + __typeof(*(r)) __osl_v; \ + osl_pcie_rreg(osh, (uintptr)(r), (void *)&__osl_v, sizeof(*(r))); \ + __osl_v; \ + }) #endif +#if (defined(STB) && defined(__arm__)) + #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) + #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;}) +#else /* !BCM47XX_CA9 */ #if defined(BCMSDIO) #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \ mmap_op else bus_op @@ -234,6 +278,7 @@ extern void osl_cpu_relax(void); #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) #endif +#endif #define OSL_ERROR(bcmerror) osl_error(bcmerror) extern int osl_error(int bcmerror); @@ -251,9 +296,12 @@ extern int osl_error(int bcmerror); #include /* for vsn/printf's */ #include /* for mem*, str* */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) +extern uint64 osl_sysuptime_us(void); #define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies)) +#define OSL_SYSUPTIME_US() osl_sysuptime_us() #else #define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ)) +#error "OSL_SYSUPTIME_US() may need to be defined" #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */ #define printf(fmt, args...) printk(fmt , ## args) #include /* for vsn/printf's */ @@ -265,12 +313,12 @@ extern int osl_error(int bcmerror); /* register access macros */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && \ - defined(CONFIG_X86) +#ifdef CONFIG_64BIT +/* readq is defined only for 64 bit platform */ #define R_REG(osh, r) (\ SELECT_BUS_READ(osh, \ ({ \ - __typeof(*(r)) __osl_v; \ + __typeof(*(r)) __osl_v = 0; \ switch (sizeof(*(r))) { \ case sizeof(uint8): __osl_v = \ readb((volatile uint8*)(r)); break; \ @@ -279,17 +327,17 @@ extern int osl_error(int bcmerror); case sizeof(uint32): __osl_v = \ readl((volatile uint32*)(r)); break; \ case sizeof(uint64): __osl_v = \ - readq((volatile uint64*)(r)); break; \ + readq((volatile uint64*)(r)); break; \ } \ __osl_v; \ }), \ OSL_READ_REG(osh, r)) \ ) -#else +#else /* !CONFIG_64BIT */ #define R_REG(osh, r) (\ SELECT_BUS_READ(osh, \ ({ \ - __typeof(*(r)) __osl_v; \ + __typeof(*(r)) __osl_v = 0; \ switch (sizeof(*(r))) { \ case sizeof(uint8): __osl_v = \ readb((volatile uint8*)(r)); break; \ @@ -302,9 +350,10 @@ extern int osl_error(int bcmerror); }), \ OSL_READ_REG(osh, r)) \ ) -#endif /* KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && defined(CONFIG_X86) */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && \ - defined(CONFIG_X86) +#endif /* CONFIG_64BIT */ + +#ifdef CONFIG_64BIT +/* writeq is defined only for 64 bit platform */ #define W_REG(osh, r, v) do { \ SELECT_BUS_WRITE(osh, \ switch (sizeof(*(r))) { \ @@ -315,7 +364,8 @@ extern int osl_error(int bcmerror); }, \ (OSL_WRITE_REG(osh, r, v))); \ } while (0) -#else + +#else /* !CONFIG_64BIT */ #define W_REG(osh, r, v) do { \ SELECT_BUS_WRITE(osh, \ switch (sizeof(*(r))) { \ @@ -325,7 +375,7 @@ extern int osl_error(int bcmerror); }, \ (OSL_WRITE_REG(osh, r, v))); \ } while (0) -#endif /* KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && defined(CONFIG_X86) */ +#endif /* CONFIG_64BIT */ #define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) #define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) @@ -497,6 +547,7 @@ extern void osl_pkt_orphan_partial(struct sk_buff *skb, int tsq); typedef struct ctfpool { void *head; spinlock_t lock; + osl_t *osh; uint max_obj; uint curr_obj; uint obj_size; @@ -719,15 +770,18 @@ extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt); * to be accompanied with a clear of the FWDERBUF tag. */ -/** Forwarded packets, have a HWRXOFF sized rx header (etc.h) */ -#define FWDER_HWRXOFF (30) +/** Forwarded packets, have a GMAC_FWDER_HWRXOFF sized rx header (etc.h) */ +#define FWDER_HWRXOFF (18) -/** Maximum amount of a pktadat that a downstream forwarder (GMAC) may have +/** Maximum amount of a pkt data that a downstream forwarder (GMAC) may have * read into the L1 cache (not dirty). This may be used in reduced cache ops. * - * Max 56: ET HWRXOFF[30] + BRCMHdr[4] + EtherHdr[14] + VlanHdr[4] + IP[4] + * Max 44: ET HWRXOFF[18] + BRCMHdr[4] + EtherHdr[14] + VlanHdr[4] + IP[4] + * Min 32: GMAC_FWDER_HWRXOFF[18] + EtherHdr[14] */ -#define FWDER_PKTMAPSZ (FWDER_HWRXOFF + 4 + 14 + 4 + 4) +#define FWDER_MINMAPSZ (FWDER_HWRXOFF + 14) +#define FWDER_MAXMAPSZ (FWDER_HWRXOFF + 4 + 14 + 4 + 4) +#define FWDER_PKTMAPSZ (FWDER_MINMAPSZ) #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) @@ -927,8 +981,10 @@ extern uint osl_pktalloced(osl_t *osh); #define OSL_RAND() osl_rand() extern uint32 osl_rand(void); +#if !defined(BCM_SECURE_DMA) #define DMA_MAP(osh, va, size, direction, p, dmah) \ osl_dma_map((osh), (va), (size), (direction), (p), (dmah)) +#endif /* !(defined(BCM_SECURE_DMA)) */ #ifdef PKTC /* Use 8 bytes of skb tstamp field to store below info */ @@ -1008,11 +1064,6 @@ extern int bcmp(const void *b1, const void *b2, size_t len); extern void bzero(void *b, size_t len); #endif /* ! BCMDRIVER */ -typedef struct sec_cma_info { - struct sec_mem_elem *sec_alloc_list; - struct sec_mem_elem *sec_alloc_list_tail; -} sec_cma_info_t; - /* Current STB 7445D1 doesn't use ACP and it is non-coherrent. * Adding these dummy values for build apss only * When we revisit need to change these. @@ -1039,6 +1090,14 @@ typedef struct sec_cma_info { osl_sec_dma_unmap((osh), (pa), (size), (direction), (p), (dmah), (pcma), (offset)) #define SECURE_DMA_UNMAP_ALL(osh, pcma) \ osl_sec_dma_unmap_all((osh), (pcma)) + +#define DMA_MAP(osh, va, size, direction, p, dmah) + +typedef struct sec_cma_info { + struct sec_mem_elem *sec_alloc_list; + struct sec_mem_elem *sec_alloc_list_tail; +} sec_cma_info_t; + #if defined(__ARM_ARCH_7A__) #define CMA_BUFSIZE_4K 4096 #define CMA_BUFSIZE_2K 2048 @@ -1046,13 +1105,13 @@ typedef struct sec_cma_info { #define CMA_BUFNUM 2048 #define SEC_CMA_COHERENT_BLK 0x8000 /* 32768 */ -#define SEC_CMA_COHERENT_MAX 32 +#define SEC_CMA_COHERENT_MAX 278 #define CMA_DMA_DESC_MEMBLOCK (SEC_CMA_COHERENT_BLK * SEC_CMA_COHERENT_MAX) #define CMA_DMA_DATA_MEMBLOCK (CMA_BUFSIZE_4K*CMA_BUFNUM) #define CMA_MEMBLOCK (CMA_DMA_DESC_MEMBLOCK + CMA_DMA_DATA_MEMBLOCK) -#define CONT_ARMREGION 0x02 /* Region CMA */ +#define CONT_REGION 0x02 /* Region CMA */ #else -#define CONT_MIPREGION 0x00 /* To access the MIPs mem, Not yet... */ +#define CONT_REGION 0x00 /* To access the MIPs mem, Not yet... */ #endif /* !defined __ARM_ARCH_7A__ */ #define SEC_DMA_ALIGN (1<<16) @@ -1063,6 +1122,7 @@ typedef struct sec_mem_elem { void *va; /**< virtual address of driver pkt */ dma_addr_t dma_handle; /**< bus address assign by linux */ void *vac; /**< virtual address of cma buffer */ + struct page *pa_cma_page; /* phys to page address */ struct sec_mem_elem *next; } sec_mem_elem_t; @@ -1085,4 +1145,16 @@ typedef struct sk_buff_head PKT_LIST; #define PKTLIST_UNLINK(x, y) skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x)) #define PKTLIST_FINI(x) skb_queue_purge((struct sk_buff_head *)(x)) +typedef struct osl_timer { + struct timer_list *timer; + bool set; +} osl_timer_t; + +typedef void (*linux_timer_fn)(ulong arg); + +extern osl_timer_t * osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg); +extern void osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic); +extern void osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic); +extern bool osl_timer_del(osl_t *osh, osl_timer_t *t); + #endif /* _linux_osl_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/linuxver.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/linuxver.h index 7fa3e7b76332..36cba287ddbe 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/linuxver.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/linuxver.h @@ -2,7 +2,7 @@ * Linux-specific abstractions to gain some independence from linux kernel versions. * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: linuxver.h 604758 2015-12-08 12:01:08Z $ + * $Id: linuxver.h 646721 2016-06-30 12:36:41Z $ */ #ifndef _linuxver_h_ @@ -622,7 +622,7 @@ static inline bool binary_sema_up(tsk_ctl_t *tsk) spin_lock_init(&((tsk_ctl)->spinlock)); \ DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ - } \ + }; \ } #define PROC_STOP(tsk_ctl) \ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/miniopt.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/miniopt.h index 2eb6d18ea7ca..6722351436e3 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/miniopt.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/miniopt.h @@ -1,7 +1,7 @@ /* * Command line options parser. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/msgtrace.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/msgtrace.h index 0d67000c9df3..a2da1d3c7967 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/msgtrace.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/msgtrace.h @@ -1,7 +1,7 @@ /* * Trace messages sent over HBUS * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: msgtrace.h 514727 2014-11-12 03:02:48Z $ + * $Id: msgtrace.h 542902 2015-03-22 23:29:48Z $ */ #ifndef _MSGTRACE_H @@ -34,11 +34,9 @@ #include #endif - /* This marks the start of a packed structure section. */ #include -/* for osl_t */ -#include + #define MSGTRACE_VERSION 1 /* Message trace header */ @@ -58,23 +56,6 @@ typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr { #define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t) -/* The hbus driver generates traces when sending a trace message. This causes endless traces. - * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put. - * This prevents endless traces but generates hasardous lost of traces only in bus device code. - * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing - * hbus error traces. hbus error trace should not generates endless traces. - */ -extern bool msgtrace_hbus_trace; - -typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr, - uint16 hdrlen, uint8 *buf, uint16 buflen); -extern void msgtrace_start(void); -extern void msgtrace_stop(void); -extern int msgtrace_sent(void); -extern void msgtrace_put(char *buf, int count); -extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send); -extern bool msgtrace_event_enabled(void); - /* This marks the end of a packed structure section. */ #include diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/nan.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/nan.h new file mode 100644 index 000000000000..4534414fbc38 --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/nan.h @@ -0,0 +1,1468 @@ +/* + * Fundamental types and constants relating to WFA NAN + * (Neighbor Awareness Networking) + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: nan.h 700076 2017-05-17 14:42:22Z $ + */ +#ifndef _NAN_H_ +#define _NAN_H_ + +#include +#include <802.11.h> + + +/* This marks the start of a packed structure section. */ +#include + +/* WiFi NAN OUI values */ +#define NAN_OUI WFA_OUI /* WiFi OUI */ +/* For oui_type field identifying the type and version of the NAN IE. */ +#define NAN_OUI_TYPE 0x13 /* Type/Version */ +#define NAN_AF_OUI_TYPE 0x18 /* Type/Version */ +/* IEEE 802.11 vendor specific information element. (Same as P2P_IE_ID.) */ +#define NAN_IE_ID 0xdd + +/* Same as P2P_PUB_AF_CATEGORY and DOT11_ACTION_CAT_PUBLIC */ +#define NAN_PUB_AF_CATEGORY DOT11_ACTION_CAT_PUBLIC +/* Protected dual public action frame category */ +#define NAN_PROT_DUAL_PUB_AF_CATEGORY DOT11_ACTION_CAT_PDPA +/* IEEE 802.11 Public Action Frame Vendor Specific. (Same as P2P_PUB_AF_ACTION.) */ +#define NAN_PUB_AF_ACTION DOT11_PUB_ACTION_VENDOR_SPEC +/* Number of octents in hash of service name. (Same as P2P_WFDS_HASH_LEN.) */ +#define NAN_SVC_HASH_LEN 6 +/* Size of fixed length part of nan_pub_act_frame_t before attributes. */ +#define NAN_PUB_ACT_FRAME_FIXED_LEN 6 +/* Number of octents in master rank value. */ +#define NAN_MASTER_RANK_LEN 8 +/* NAN public action frame header size */ +#define NAN_PUB_ACT_FRAME_HDR_SIZE (OFFSETOF(nan_pub_act_frame_t, data)) +/* NAN network ID */ +#define NAN_NETWORK_ID "\x51\x6F\x9A\x01\x00\x00" +/* Service Control Type length */ +#define NAN_SVC_CONTROL_TYPE_LEN 2 +/* Binding Bitmap length */ +#define NAN_BINDING_BITMAP_LEN 2 +/* Service Response Filter (SRF) control field masks */ +#define NAN_SRF_BLOOM_MASK 0x01 +#define NAN_SRF_INCLUDE_MASK 0x02 +#define NAN_SRF_INDEX_MASK 0x0C +/* SRF Bloom Filter index shift */ +#define NAN_SRF_BLOOM_SHIFT 2 +#define NAN_SRF_INCLUDE_SHIFT 1 +/* Mask for CRC32 output, used in hash function for NAN bloom filter */ +#define NAN_BLOOM_CRC32_MASK 0xFFFF + +/* Attribute TLV header size */ +#define NAN_ATTR_ID_OFF 0 +#define NAN_ATTR_LEN_OFF 1 +#define NAN_ATTR_DATA_OFF 3 + +#define NAN_ATTR_ID_LEN 1u /* ID field length */ +#define NAN_ATTR_LEN_LEN 2u /* Length field length */ +#define NAN_ATTR_HDR_LEN (NAN_ATTR_ID_LEN + NAN_ATTR_LEN_LEN) +#define NAN_ENTRY_CTRL_LEN 1 /* Entry control field length from FAM attribute */ +#define NAN_MAP_ID_LEN 1 /* MAP ID length to signify band */ +#define NAN_OPERATING_CLASS_LEN 1 /* operating class field length from NAN FAM */ +#define NAN_CHANNEL_NUM_LEN 1 /* channel number field length 1 byte */ + +/* NAN slot duration / period */ +#define NAN_MIN_TU 16 +#define NAN_TU_PER_DW 512 +#define NAN_MAX_DW 16 +#define NAN_MAX_TU (NAN_MAX_DW * NAN_TU_PER_DW) + +#define NAN_SLOT_DUR_0TU 0 +#define NAN_SLOT_DUR_16TU 16 +#define NAN_SLOT_DUR_32TU 32 +#define NAN_SLOT_DUR_64TU 64 +#define NAN_SLOT_DUR_128TU 128 +#define NAN_SLOT_DUR_256TU 256 +#define NAN_SLOT_DUR_512TU 512 +#define NAN_SLOT_DUR_1024TU 1024 +#define NAN_SLOT_DUR_2048TU 2048 +#define NAN_SLOT_DUR_4096TU 4096 +#define NAN_SLOT_DUR_8192TU 8192 + +#define NAN_MAP_ID_2G 2 /* NAN Further Avail Map ID for band 2.4G */ +#define NAN_MAP_ID_5G 5 /* NAN Further Avail Map ID for band 5G */ +#define NAN_MAP_NUM_IDS 2 /* Max number of NAN Further Avail Map IDs supported */ + +/* size of ndc id */ +#define NAN_DATA_NDC_ID_SIZE 6 + +#define NAN_AVAIL_ENTRY_LEN_RES0 7 /* Avail entry len in FAM attribute for resolution 16TU */ +#define NAN_AVAIL_ENTRY_LEN_RES1 5 /* Avail entry len in FAM attribute for resolution 32TU */ +#define NAN_AVAIL_ENTRY_LEN_RES2 4 /* Avail entry len in FAM attribute for resolution 64TU */ + +/* Vendor-specific public action frame for NAN */ +typedef BWL_PRE_PACKED_STRUCT struct nan_pub_act_frame_s { + /* NAN_PUB_AF_CATEGORY 0x04 */ + uint8 category_id; + /* NAN_PUB_AF_ACTION 0x09 */ + uint8 action_field; + /* NAN_OUI 0x50-6F-9A */ + uint8 oui[DOT11_OUI_LEN]; + /* NAN_OUI_TYPE 0x13 */ + uint8 oui_type; + /* One or more NAN Attributes follow */ + uint8 data[]; +} BWL_POST_PACKED_STRUCT nan_pub_act_frame_t; + +/* NAN attributes as defined in the nan spec */ +enum { + NAN_ATTR_MASTER_IND = 0, + NAN_ATTR_CLUSTER = 1, + NAN_ATTR_SVC_ID_LIST = 2, + NAN_ATTR_SVC_DESCRIPTOR = 3, + NAN_ATTR_CONN_CAP = 4, + NAN_ATTR_INFRA = 5, + NAN_ATTR_P2P = 6, + NAN_ATTR_IBSS = 7, + NAN_ATTR_MESH = 8, + NAN_ATTR_FURTHER_NAN_SD = 9, + NAN_ATTR_FURTHER_AVAIL = 10, + NAN_ATTR_COUNTRY_CODE = 11, + NAN_ATTR_RANGING = 12, + NAN_ATTR_CLUSTER_DISC = 13, + /* nan 2.0 */ + NAN_ATTR_SVC_DESC_EXTENSION = 14, + NAN_ATTR_NAN_DEV_CAP = 15, + NAN_ATTR_NAN_NDP = 16, + NAN_ATTR_NAN_NMSG = 17, + NAN_ATTR_NAN_AVAIL = 18, + NAN_ATTR_NAN_NDC = 19, + NAN_ATTR_NAN_NDL = 20, + NAN_ATTR_NAN_NDL_QOS = 21, + NAN_ATTR_MCAST_SCHED = 22, + NAN_ATTR_UNALIGN_SCHED = 23, + NAN_ATTR_PAGING_UCAST = 24, + NAN_ATTR_PAGING_MCAST = 25, + NAN_ATTR_RANGING_INFO = 26, + NAN_ATTR_RANGING_SETUP = 27, + NAN_ATTR_FTM_RANGE_REPORT = 28, + NAN_ATTR_ELEMENT_CONTAINER = 29, + NAN_ATTR_WLAN_INFRA_EXT = 30, + NAN_ATTR_EXT_P2P_OPER = 31, + NAN_ATTR_EXT_IBSS = 32, + NAN_ATTR_EXT_MESH = 33, + NAN_ATTR_CIPHER_SUITE_INFO = 34, + NAN_ATTR_SEC_CTX_ID_INFO = 35, + NAN_ATTR_SHARED_KEY_DESC = 36, + NAN_ATTR_MCAST_SCHED_CHANGE = 37, + NAN_ATTR_MCAST_SCHED_OWNER_CHANGE = 38, + NAN_ATTR_PUBLIC_AVAILABILITY = 39, + NAN_ATTR_SUB_SVC_ID_LIST = 40, + /* change NAN_ATTR_MAX_ID to max ids + 1, excluding NAN_ATTR_VENDOR_SPECIFIC. + * This is used in nan_parse.c + */ + NAN_ATTR_MAX_ID = NAN_ATTR_SUB_SVC_ID_LIST + 1, + + NAN_ATTR_VENDOR_SPECIFIC = 221 +}; + +enum wifi_nan_avail_resolution { + NAN_AVAIL_RES_16_TU = 0, + NAN_AVAIL_RES_32_TU = 1, + NAN_AVAIL_RES_64_TU = 2, + NAN_AVAIL_RES_INVALID = 255 +}; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ie_s { + uint8 id; /* IE ID: NAN_IE_ID 0xDD */ + uint8 len; /* IE length */ + uint8 oui[DOT11_OUI_LEN]; /* NAN_OUI 50:6F:9A */ + uint8 oui_type; /* NAN_OUI_TYPE 0x13 */ + uint8 attr[]; /* var len attributes */ +} BWL_POST_PACKED_STRUCT wifi_nan_ie_t; + +#define NAN_IE_HDR_SIZE (OFFSETOF(wifi_nan_ie_t, attr)) + +/* master indication record */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_master_ind_attr_s { + uint8 id; + uint16 len; + uint8 master_preference; + uint8 random_factor; +} BWL_POST_PACKED_STRUCT wifi_nan_master_ind_attr_t; + +/* cluster attr record */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_cluster_attr_s { + uint8 id; + uint16 len; + uint8 amr[NAN_MASTER_RANK_LEN]; + uint8 hop_count; + /* Anchor Master Beacon Transmission Time */ + uint32 ambtt; +} BWL_POST_PACKED_STRUCT wifi_nan_cluster_attr_t; + +/* container for service ID records */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_id_attr_s { + uint8 id; + uint16 len; + uint8 svcid[0]; /* 6*len of srvc IDs */ +} BWL_POST_PACKED_STRUCT wifi_nan_svc_id_attr_t; + +/* service_control bitmap for wifi_nan_svc_descriptor_attr_t below */ +#define NAN_SC_PUBLISH 0x0 +#define NAN_SC_SUBSCRIBE 0x1 +#define NAN_SC_FOLLOWUP 0x2 +/* Set to 1 if a Matching Filter field is included in descriptors. */ +#define NAN_SC_MATCHING_FILTER_PRESENT 0x4 +/* Set to 1 if a Service Response Filter field is included in descriptors. */ +#define NAN_SC_SR_FILTER_PRESENT 0x8 +/* Set to 1 if a Service Info field is included in descriptors. */ +#define NAN_SC_SVC_INFO_PRESENT 0x10 +/* range is close proximity only */ +#define NAN_SC_RANGE_LIMITED 0x20 +/* Set to 1 if binding bitamp is present in descriptors */ +#define NAN_SC_BINDING_BITMAP_PRESENT 0x40 + +/* Service descriptor */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_descriptor_attr_s { + /* Attribute ID - 0x03. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* Hash of the Service Name */ + uint8 svc_hash[NAN_SVC_HASH_LEN]; + /* Publish or subscribe instance id */ + uint8 instance_id; + /* Requestor Instance ID */ + uint8 requestor_id; + /* Service Control Bitmask. Also determines what data follows. */ + uint8 svc_control; + /* Optional fields follow */ +} BWL_POST_PACKED_STRUCT wifi_nan_svc_descriptor_attr_t; + +/* IBSS attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ibss_attr_s { + /* Attribute ID - 0x07. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* BSSID of the ibss */ + struct ether_addr bssid; + /* + map control:, bits: + [0-3]: Id for associated further avail map attribute + [4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved + [6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf? + [7] : reserved + */ + uint8 map_ctrl; + /* avail. intervals bitmap, var len */ + uint8 avail_bmp[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_ibss_attr_t; + +/* Further Availability MAP attr */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_favail_attr_s { + /* Attribute ID - 0x0A. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* MAP id: val [0..15], values[16-255] reserved */ + uint8 map_id; + /* availibility entry, var len */ + uint8 avil_entry[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_favail_attr_t; + +/* Further Availability MAP attr */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_entry_s { + /* + entry control + [0-1]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; + [2:7] reserved + */ + uint8 entry_ctrl; + /* operating class: freq band etc IEEE 802.11 */ + uint8 opclass; + /* channel number */ + uint8 chan; + /* avail bmp, var len */ + uint8 avail_bmp[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_avail_entry_t; + +/* Map control Field */ +#define NAN_MAPCTRL_IDMASK 0x7 +#define NAN_MAPCTRL_DURSHIFT 4 +#define NAN_MAPCTRL_DURMASK 0x30 +#define NAN_MAPCTRL_REPEAT 0x40 +#define NAN_MAPCTRL_REPEATSHIFT 6 + +#define NAN_VENDOR_TYPE_RTT 0 +#define NAN_VENDOR_TYPE_P2P 1 + +/* Vendor Specific Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_vendor_attr_s { + uint8 id; /* 0xDD */ + uint16 len; /* IE length */ + uint8 oui[DOT11_OUI_LEN]; /* 00-90-4C */ + uint8 type; /* attribute type */ + uint8 attr[1]; /* var len attributes */ +} BWL_POST_PACKED_STRUCT wifi_nan_vendor_attr_t; + +#define NAN_VENDOR_HDR_SIZE (OFFSETOF(wifi_nan_vendor_attr_t, attr)) + +/* p2p operation attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_p2p_op_attr_s { + /* Attribute ID - 0x06. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* P2P device role */ + uint8 dev_role; + /* BSSID of the ibss */ + struct ether_addr p2p_dev_addr; + /* + map control:, bits: + [0-3]: Id for associated further avail map attribute + [4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved + [6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf? + [7] : reserved + */ + uint8 map_ctrl; + /* avail. intervals bitmap */ + uint8 avail_bmp[1]; +} BWL_POST_PACKED_STRUCT wifi_nan_p2p_op_attr_t; + +/* ranging attribute */ +#define NAN_RANGING_MAP_CTRL_ID_SHIFT 0 +#define NAN_RANGING_MAP_CTRL_ID_MASK 0x0F +#define NAN_RANGING_MAP_CTRL_DUR_SHIFT 4 +#define NAN_RANGING_MAP_CTRL_DUR_MASK 0x30 +#define NAN_RANGING_MAP_CTRL_REPEAT_SHIFT 6 +#define NAN_RANGING_MAP_CTRL_REPEAT_MASK 0x40 +#define NAN_RANGING_MAP_CTRL_REPEAT_DW(_ctrl) (((_ctrl) & \ + NAN_RANGING_MAP_CTRL_DUR_MASK) ? 16 : 1) +#define NAN_RANGING_MAP_CTRL(_id, _dur, _repeat) (\ + (((_id) << NAN_RANGING_MAP_CTRL_ID_SHIFT) & \ + NAN_RANGING_MAP_CTRL_ID_MASK) | \ + (((_dur) << NAN_RANGING_MAP_CTRL_DUR_SHIFT) & \ + NAN_RANGING_MAP_CTRL_DUR_MASK) | \ + (((_repeat) << NAN_RANGING_MAP_CTRL_REPEAT_SHIFT) & \ + NAN_RANGING_MAP_CTRL_REPEAT_MASK)) + +enum { + NAN_RANGING_PROTO_FTM = 0 +}; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_attr_s { + uint8 id; /* 0x0C */ + uint16 len; /* length that follows */ + struct ether_addr dev_addr; /* device mac address */ + + /* + map control:, bits: + [0-3]: Id for associated further avail map attribute + [4-5]: avail interval duration: 0:16ms; 1:32ms; 2:64ms; 3:reserved + [6] : repeat : 0 - applies to next DW, 1: 16 intervals max? wtf? + [7] : reserved + */ + uint8 map_ctrl; + + uint8 protocol; /* FTM = 0 */ + uint32 avail_bmp; /* avail interval bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_attr_t; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_info_attr_s { + uint8 id; /* 0x1A */ + uint16 len; /* length that follows */ + /* + location info availability bit map + 0: LCI Local Coordinates + 1: Geospatial LCI WGS84 + 2: Civi Location + 3: Last Movement Indication + [4-7]: reserved + */ + uint8 lc_info_avail; + /* + Last movement indication + present if bit 3 is set in lc_info_avail + cluster TSF[29:14] at the last detected platform movement + */ + uint16 last_movement; + +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_info_attr_t; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_setup_attr_hdr_s { + uint8 id; /* 0x1B */ + uint16 len; /* length that follows */ + uint8 dialog_token; /* Identify req and resp */ + uint8 type_status; /* bits 0-3 type, 4-7 status */ + /* reason code + i. when frm type = response & status = reject + ii. frm type = termination + */ + uint8 reason; +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_setup_attr_hdr_t; + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_setup_attr_s { + + wifi_nan_ranging_setup_attr_hdr_t setup_attr_hdr; + /* Below fields not required when frm type = termination */ + uint8 ranging_ctrl; /* Bit 0: ranging report required or not */ + uint8 ftm_params[3]; + uint8 data[]; /* schedule entry list */ +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_setup_attr_t; + +#define NAN_RANGE_SETUP_ATTR_OFFSET_TBM_INFO (OFFSETOF(wifi_nan_ranging_setup_attr_t, data)) + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ranging_report_attr_s { + uint8 id; /* 0x1C */ + uint16 len; /* length that follows */ + /* FTM report format in spec. + See definition in 9.4.2.22.18 in 802.11mc D5.0 + */ + uint8 entry_count; + uint8 data[2]; /* includes pad */ + /* + dot11_ftm_range_entry_t entries[entry_count]; + uint8 error_count; + dot11_ftm_error_entry_t errors[error_count]; + */ +} BWL_POST_PACKED_STRUCT wifi_nan_ranging_report_attr_t; + +/* Ranging control flags */ +#define NAN_RNG_REPORT_REQUIRED 0x01 +#define NAN_RNG_FTM_PARAMS_PRESENT 0x02 +#define NAN_RNG_SCHED_ENTRY_PRESENT 0X04 + +/* Location info flags */ +#define NAN_RNG_LOCATION_FLAGS_LOCAL_CORD 0x1 +#define NAN_RNG_LOCATION_FLAGS_GEO_SPATIAL 0x2 +#define NAN_RNG_LOCATION_FLAGS_CIVIC 0x4 +#define NAN_RNG_LOCATION_FLAGS_LAST_MVMT 0x8 + +/* Last movement mask and shift value */ +#define NAN_RNG_LOCATION_MASK_LAST_MVT_TSF 0x3FFFC000 +#define NAN_RNG_LOCATION_SHIFT_LAST_MVT_TSF 14 + +/* FTM params shift values */ +#define NAN_FTM_MAX_BURST_DUR_SHIFT 0 +#define NAN_FTM_MIN_FTM_DELTA_SHIFT 4 +#define NAN_FTM_NUM_FTM_SHIFT 10 +#define NAN_FTM_FORMAT_BW_SHIFT 15 + +/* FTM params mask */ +#define NAN_FTM_MAX_BURST_DUR_MASK 0x00000F +#define NAN_FTM_MIN_FTM_DELTA_MASK 0x00003F +#define NAN_FTM_NUM_FTM_MASK 0x00001F +#define NAN_FTM_FORMAT_BW_MASK 0x00003F + +#define FTM_PARAMS_BURSTTMO_FACTOR 250 + +/* set to value to uint32 */ +#define NAN_FTM_SET_BURST_DUR(ftm, dur) (ftm |= (((dur + 2) & NAN_FTM_MAX_BURST_DUR_MASK) <<\ + NAN_FTM_MAX_BURST_DUR_SHIFT)) +#define NAN_FTM_SET_FTM_DELTA(ftm, delta) (ftm |= (((delta/100) & NAN_FTM_MIN_FTM_DELTA_MASK) <<\ + NAN_FTM_MIN_FTM_DELTA_SHIFT)) +#define NAN_FTM_SET_NUM_FTM(ftm, delta) (ftm |= ((delta & NAN_FTM_NUM_FTM_MASK) <<\ + NAN_FTM_NUM_FTM_SHIFT)) +#define NAN_FTM_SET_FORMAT_BW(ftm, delta) (ftm |= ((delta & NAN_FTM_FORMAT_BW_MASK) <<\ + NAN_FTM_FORMAT_BW_SHIFT)) +/* set uint32 to attribute */ +#define NAN_FTM_PARAMS_UINT32_TO_ATTR(ftm_u32, ftm_attr) {ftm_attr[0] = ftm_u32 & 0xFF; \ + ftm_attr[1] = (ftm_u32 >> 8) & 0xFF; ftm_attr[2] = (ftm_u32 >> 16) & 0xFF;} + +/* get atrribute to uint32 */ +#define NAN_FTM_PARAMS_ATTR_TO_UINT32(ftm_p, ftm_u32) (ftm_u32 = ftm_p[0] | ftm_p[1] << 8 | \ + ftm_p[2] << 16) +/* get param values from uint32 */ +#define NAN_FTM_GET_BURST_DUR(ftm) (((ftm >> NAN_FTM_MAX_BURST_DUR_SHIFT) &\ + NAN_FTM_MAX_BURST_DUR_MASK)) +#define NAN_FTM_GET_BURST_DUR_USEC(_val) ((1 << ((_val)-2)) * FTM_PARAMS_BURSTTMO_FACTOR) +#define NAN_FTM_GET_FTM_DELTA(ftm) (((ftm >> NAN_FTM_MIN_FTM_DELTA_SHIFT) &\ + NAN_FTM_MIN_FTM_DELTA_MASK)*100) +#define NAN_FTM_GET_NUM_FTM(ftm) ((ftm >> NAN_FTM_NUM_FTM_SHIFT) &\ + NAN_FTM_NUM_FTM_MASK) +#define NAN_FTM_GET_FORMAT_BW(ftm) ((ftm >> NAN_FTM_FORMAT_BW_SHIFT) &\ + NAN_FTM_FORMAT_BW_MASK) + +#define NAN_CONN_CAPABILITY_WFD 0x0001 +#define NAN_CONN_CAPABILITY_WFDS 0x0002 +#define NAN_CONN_CAPABILITY_TDLS 0x0004 +#define NAN_CONN_CAPABILITY_INFRA 0x0008 +#define NAN_CONN_CAPABILITY_IBSS 0x0010 +#define NAN_CONN_CAPABILITY_MESH 0x0020 + +#define NAN_DEFAULT_MAP_ID 0 /* nan default map id */ +#define NAN_DEFAULT_MAP_CTRL 0 /* nan default map control */ + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_conn_cap_attr_s { + /* Attribute ID - 0x04. */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + uint16 conn_cap_bmp; /* Connection capability bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_conn_cap_attr_t; + +/* NAN Element container Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_container_attr_s { + uint8 id; /* id - 0x20 */ + uint16 len; /* Total length of following IEs */ + uint8 map_id; /* map id */ + uint8 data[1]; /* Data pointing to one or more IEs */ +} BWL_POST_PACKED_STRUCT wifi_nan_container_attr_t; + +/* NAN 2.0 NAN avail attribute */ + +/* Availability Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_attr_s { + uint8 id; /* id - 0x12 */ + uint16 len; /* total length */ + uint8 seqid; /* sequence id */ + uint16 ctrl; /* attribute control */ + uint8 entry[1]; /* availability entry list */ +} BWL_POST_PACKED_STRUCT wifi_nan_avail_attr_t; + +/* for processing/building time bitmap info in nan_avail_entry */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_time_bitmap_s { + uint16 ctrl; /* Time bitmap control */ + uint8 len; /* Time bitmap length */ + uint8 bitmap[]; /* Time bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_time_bitmap_t; + +/* Availability Entry format */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_avail_entry_attr_s { + uint16 len; /* Length */ + uint16 entry_cntrl; /* Entry Control */ + uint8 var[]; /* Time bitmap and channel entry list */ +} BWL_POST_PACKED_STRUCT wifi_nan_avail_entry_attr_t; + +/* FAC Channel Entry (section 10.7.19.1.5) */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_chan_entry_s { + uint8 oper_class; /* Operating Class */ + uint16 chan_bitmap; /* Channel Bitmap */ + uint8 primary_chan_bmp; /* Primary Channel Bitmap */ + uint8 aux_chan[0]; /* Auxiliary Channel bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_chan_entry_t; + +/* Channel entry */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_channel_entry_s { + uint8 opclass; /* Operating class */ + uint16 chan_bitmap; /* Channel bitmap */ + uint8 prim_bitmap; /* Primary channel bitmap */ + uint16 aux_bitmap; /* Time bitmap length */ +} BWL_POST_PACKED_STRUCT wifi_nan_channel_entry_t; + +/* Type of Availability: committed */ +#define NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL_MASK 0x1 +/* Type of Availability: potential */ +#define NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL_MASK 0x2 +/* Type of Availability: conditional */ +#define NAN_ENTRY_CNTRL_TYPE_COND_AVAIL_MASK 0x4 + +#define NAN_AVAIL_CTRL_MAP_ID_MASK 0x000F +#define NAN_AVAIL_CTRL_MAP_ID(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_MAP_ID_MASK) +#define NAN_AVAIL_CTRL_COMM_CHANGED_MASK 0x0010 +#define NAN_AVAIL_CTRL_COMM_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_COMM_CHANGED_MASK) +#define NAN_AVAIL_CTRL_POTEN_CHANGED_MASK 0x0020 +#define NAN_AVAIL_CTRL_POTEN_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_POTEN_CHANGED_MASK) +#define NAN_AVAIL_CTRL_PUBLIC_CHANGED_MASK 0x0040 +#define NAN_AVAIL_CTRL_PUBLIC_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_PUBLIC_CHANGED_MASK) +#define NAN_AVAIL_CTRL_NDC_CHANGED_MASK 0x0080 +#define NAN_AVAIL_CTRL_NDC_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_NDC_CHANGED_MASK) +#define NAN_AVAIL_CTRL_MCAST_CHANGED_MASK 0x0100 +#define NAN_AVAIL_CTRL_MCAST_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_MCAST_CHANGED_MASK) +#define NAN_AVAIL_CTRL_MCAST_CHG_CHANGED_MASK 0x0200 +#define NAN_AVAIL_CTRL_MCAST_CHG_CHANGED(_ctrl) ((_ctrl) & NAN_AVAIL_CTRL_MCAST_CHG_CHANGED_MASK) +#define NAN_AVAIL_CTRL_CHANGED_FLAGS_MASK 0x03f0 + +#define NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE_MASK 0x07 +#define NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE(_flags) ((_flags) & NAN_AVAIL_ENTRY_CTRL_AVAIL_TYPE_MASK) +#define NAN_AVAIL_ENTRY_CTRL_USAGE_MASK 0x18 +#define NAN_AVAIL_ENTRY_CTRL_USAGE_SHIFT 3 +#define NAN_AVAIL_ENTRY_CTRL_USAGE(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_USAGE_MASK) \ + >> NAN_AVAIL_ENTRY_CTRL_USAGE_SHIFT) +#define NAN_AVAIL_ENTRY_CTRL_UTIL_MASK 0x1E0 +#define NAN_AVAIL_ENTRY_CTRL_UTIL_SHIFT 5 +#define NAN_AVAIL_ENTRY_CTRL_UTIL(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_UTIL_MASK) \ + >> NAN_AVAIL_ENTRY_CTRL_UTIL_SHIFT) +#define NAN_AVAIL_ENTRY_CTRL_RX_NSS_MASK 0xF00 +#define NAN_AVAIL_ENTRY_CTRL_RX_NSS_SHIFT 8 +#define NAN_AVAIL_ENTRY_CTRL_RX_NSS(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_RX_NSS_MASK) \ + >> NAN_AVAIL_ENTRY_CTRL_RX_NSS_SHIFT) +#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_MASK 0x1000 +#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_SHIFT 12 +#define NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT(_flags) (((_flags) & \ + NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_MASK) >> NAN_AVAIL_ENTRY_CTRL_BITMAP_PRESENT_SHIFT) + +#define NAN_TIME_BMAP_CTRL_BITDUR_MASK 0x07 +#define NAN_TIME_BMAP_CTRL_BITDUR(_flags) ((_flags) & NAN_TIME_BMAP_CTRL_BITDUR_MASK) +#define NAN_TIME_BMAP_CTRL_PERIOD_MASK 0x38 +#define NAN_TIME_BMAP_CTRL_PERIOD_SHIFT 3 +#define NAN_TIME_BMAP_CTRL_PERIOD(_flags) (((_flags) & NAN_TIME_BMAP_CTRL_PERIOD_MASK) \ + >> NAN_TIME_BMAP_CTRL_PERIOD_SHIFT) +#define NAN_TIME_BMAP_CTRL_OFFSET_MASK 0x7FC0 +#define NAN_TIME_BMAP_CTRL_OFFSET_SHIFT 6 +#define NAN_TIME_BMAP_CTRL_OFFSET(_flags) (((_flags) & NAN_TIME_BMAP_CTRL_OFFSET_MASK) \ + >> NAN_TIME_BMAP_CTRL_OFFSET_SHIFT) +#define NAN_TIME_BMAP_LEN(avail_entry) \ + (*(uint8 *)(((wifi_nan_avail_entry_attr_t *)avail_entry)->var + 2)) + +#define NAN_AVAIL_CHAN_LIST_HDR_LEN 1 +#define NAN_AVAIL_CHAN_LIST_TYPE_CHANNEL 0x01 +#define NAN_AVAIL_CHAN_LIST_NON_CONTIG_BW 0x02 +#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_MASK 0xF0 +#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_SHIFT 4 +#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES(_ctrl) (((_ctrl) & NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_MASK) \ + >> NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_SHIFT) + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_channel_entry_list_s { + uint8 chan_info; + uint8 var[0]; +} BWL_POST_PACKED_STRUCT wifi_nan_channel_entry_list_t; + +/* define for chan_info */ +#define NAN_CHAN_OP_CLASS_MASK 0x01 +#define NAN_CHAN_NON_CONT_BW_MASK 0x02 +#define NAN_CHAN_RSVD_MASK 0x03 +#define NAN_CHAN_NUM_ENTRIES_MASK 0xF0 + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_band_entry_s { + uint8 band[0]; +} BWL_POST_PACKED_STRUCT wifi_nan_band_entry_t; + +/* Type of Availability: committed */ +#define NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL 0x1 +/* Type of Availability: potential */ +#define NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL 0x2 +/* Type of Availability: conditional */ +#define NAN_ENTRY_CNTRL_TYPE_COND_AVAIL 0x4 +/* Committed + Potential */ +#define NAN_ENTRY_CNTRL_TYPE_COMM_POTEN \ + (NAN_ENTRY_CNTRL_TYPE_COMM_AVAIL | NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL) +/* Conditional + Potential */ +#define NAN_ENTRY_CNTRL_TYPE_COND_POTEN \ + (NAN_ENTRY_CNTRL_TYPE_COND_AVAIL | NAN_ENTRY_CNTRL_TYPE_POTEN_AVAIL) + +/* Type of Availability */ +#define NAN_ENTRY_CNTRL_TYPE_OF_AVAIL_MASK 0x07 +#define NAN_ENTRY_CNTRL_TYPE_OF_AVAIL_SHIFT 0 +/* Usage Preference */ +#define NAN_ENTRY_CNTRL_USAGE_PREF_MASK 0x18 +#define NAN_ENTRY_CNTRL_USAGE_PREF_SHIFT 3 +/* Utilization */ +#define NAN_ENTRY_CNTRL_UTIL_MASK 0x1E0 +#define NAN_ENTRY_CNTRL_UTIL_SHIFT 5 + +/* Time Bitmap Control field (section 5.7.18.2.3) */ + +/* Reserved */ +#define NAN_TIME_BMP_CNTRL_RSVD_MASK 0x01 +#define NAN_TIME_BMP_CNTRL_RSVD_SHIFT 0 +/* Bitmap Len */ +#define NAN_TIME_BMP_CNTRL_BMP_LEN_MASK 0x7E +#define NAN_TIME_BMP_CNTRL_BMP_LEN_SHIFT 1 +/* Bit Duration */ +#define NAN_TIME_BMP_CNTRL_BIT_DUR_MASK 0x380 +#define NAN_TIME_BMP_CNTRL_BIT_DUR_SHIFT 7 +/* Bitmap Len */ +#define NAN_TIME_BMP_CNTRL_PERIOD_MASK 0x1C00 +#define NAN_TIME_BMP_CNTRL_PERIOD_SHIFT 10 +/* Start Offset */ +#define NAN_TIME_BMP_CNTRL_START_OFFSET_MASK 0x3FE000 +#define NAN_TIME_BMP_CNTRL_START_OFFSET_SHIFT 13 +/* Reserved */ +#define NAN_TIME_BMP_CNTRL_RESERVED_MASK 0xC00000 +#define NAN_TIME_BMP_CNTRL_RESERVED_SHIFT 22 + +/* Time Bitmap Control field: Period */ +typedef enum +{ + NAN_TIME_BMP_CTRL_PERIOD_128TU = 1, + NAN_TIME_BMP_CTRL_PERIOD_256TU = 2, + NAN_TIME_BMP_CTRL_PERIOD_512TU = 3, + NAN_TIME_BMP_CTRL_PERIOD_1024TU = 4, + NAN_TIME_BMP_CTRL_PERIOD_2048U = 5, + NAN_TIME_BMP_CTRL_PERIOD_4096U = 6, + NAN_TIME_BMP_CTRL_PERIOD_8192U = 7 +} nan_time_bmp_ctrl_repeat_interval_t; + +enum +{ + NAN_TIME_BMP_BIT_DUR_16TU_IDX = 0, + NAN_TIME_BMP_BIT_DUR_32TU_IDX = 1, + NAN_TIME_BMP_BIT_DUR_64TU_IDX = 2, + NAN_TIME_BMP_BIT_DUR_128TU_IDX = 3 +}; + +enum +{ + NAN_TIME_BMP_BIT_DUR_IDX_0 = 16, + NAN_TIME_BMP_BIT_DUR_IDX_1 = 32, + NAN_TIME_BMP_BIT_DUR_IDX_2 = 64, + NAN_TIME_BMP_BIT_DUR_IDX_3 = 128 +}; + +enum +{ + NAN_TIME_BMP_CTRL_PERIOD_IDX_1 = 128, + NAN_TIME_BMP_CTRL_PERIOD_IDX_2 = 256, + NAN_TIME_BMP_CTRL_PERIOD_IDX_3 = 512, + NAN_TIME_BMP_CTRL_PERIOD_IDX_4 = 1024, + NAN_TIME_BMP_CTRL_PERIOD_IDX_5 = 2048, + NAN_TIME_BMP_CTRL_PERIOD_IDX_6 = 4096, + NAN_TIME_BMP_CTRL_PERIOD_IDX_7 = 8192 +}; + +/* Channel Entries List field */ + +/* Type */ +#define NAN_CHAN_ENTRY_TYPE_MASK 0x01 +#define NAN_CHAN_ENTRY_TYPE_SHIFT 0 +/* Channel Entry Length Indication */ +#define NAN_CHAN_ENTRY_LEN_IND_MASK 0x02 +#define NAN_CHAN_ENTRY_LEN_IND_SHIFT 1 +/* Reserved */ +#define NAN_CHAN_ENTRY_RESERVED_MASK 0x0C +#define NAN_CHAN_ENTRY_RESERVED_SHIFT 2 +/* Number of FAC Band or Channel Entries */ +#define NAN_CHAN_ENTRY_NO_OF_CHAN_ENTRY_MASK 0xF0 +#define NAN_CHAN_ENTRY_NO_OF_CHAN_ENTRY_SHIFT 4 + +#define NAN_CHAN_ENTRY_TYPE_BANDS 0 +#define NAN_CHAN_ENTRY_TYPE_OPCLASS_CHANS 1 + +#define NAN_CHAN_ENTRY_BW_LT_80MHZ 0 +#define NAN_CHAN_ENTRY_BW_EQ_160MHZ 1 + +/* + * NDL Attribute WFA Tech. Spec ver 1.0.r12 (section 10.7.19.2) + */ +#define NDL_ATTR_IM_MAP_ID_LEN 1 +#define NDL_ATTR_IM_TIME_BMP_CTRL_LEN 2 +#define NDL_ATTR_IM_TIME_BMP_LEN_LEN 1 + +/* + * NDL Control field - Table xx + */ +#define NDL_ATTR_CTRL_PEER_ID_PRESENT_MASK 0x01 +#define NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT 0 +#define NDL_ATTR_CTRL_IM_SCHED_PRESENT_MASK 0x02 +#define NDL_ATTR_CTRL_IM_SCHED_PRESENT_SHIFT 1 +#define NDL_ATTR_CTRL_NDC_ATTR_PRESENT_MASK 0x04 +#define NDL_ATTR_CTRL_NDC_ATTR_PRESENT_SHIFT 2 +#define NDL_ATTR_CTRL_QOS_ATTR_PRESENT_MASK 0x08 +#define NDL_ATTR_CTRL_QOS_ATTR_PRESENT_SHIFT 3 +#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_MASK 0x10 /* max idle period */ +#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_SHIFT 4 +#define NDL_ATTR_CTRL_NDL_TYPE_MASK 0x20 /* NDL type */ +#define NDL_ATTR_CTRL_NDL_TYPE_SHIFT 5 +#define NDL_ATTR_CTRL_NDL_SETUP_REASON_MASK 0xC0 /* NDL Setup Reason */ +#define NDL_ATTR_CTRL_NDL_SETUP_REASON_SHIFT 6 + +/* NDL setup Reason */ +#define NDL_ATTR_CTRL_NDL_TYPE_S_NDL 0x0 /* S-NDL */ +#define NDL_ATTR_CTRL_NDL_TYPE_P_NDL 0x1 /* P-NDL */ + +/* NDL setup Reason */ +#define NDL_ATTR_CTRL_NDL_SETUP_REASON_NDP_RANG 0x0 /* NDP or Ranging */ +#define NDL_ATTR_CTRL_NDL_SETUP_REASON_FSD_GAS 0x1 /* FSD using GAS */ + +#define NAN_NDL_TYPE_MASK 0x0F +#define NDL_ATTR_TYPE_STATUS_REQUEST 0x00 +#define NDL_ATTR_TYPE_STATUS_RESPONSE 0x01 +#define NDL_ATTR_TYPE_STATUS_CONFIRM 0x02 +#define NDL_ATTR_TYPE_STATUS_CONTINUED 0x00 +#define NDL_ATTR_TYPE_STATUS_ACCEPTED 0x10 +#define NDL_ATTR_TYPE_STATUS_REJECTED 0x20 + +#define NAN_NDL_TYPE_CHECK(_ndl, x) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == (x)) +#define NAN_NDL_REQUEST(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \ + NDL_ATTR_TYPE_STATUS_REQUEST) +#define NAN_NDL_RESPONSE(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \ + NDL_ATTR_TYPE_STATUS_RESPONSE) +#define NAN_NDL_CONFIRM(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \ + NDL_ATTR_TYPE_STATUS_CONFIRM) + + +#define NAN_NDL_STATUS_SHIFT 4 +#define NAN_NDL_STATUS_MASK 0xF0 +#define NAN_NDL_CONT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \ + NDL_ATTR_TYPE_STATUS_CONTINUED) +#define NAN_NDL_ACCEPT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \ + NDL_ATTR_TYPE_STATUS_ACCEPTED) +#define NAN_NDL_REJECT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \ + NDL_ATTR_TYPE_STATUS_REJECTED) + +#define NDL_ATTR_CTRL_NONE 0 +#define NDL_ATTR_CTRL_PEER_ID_PRESENT (1 << NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT) +#define NDL_ATTR_CTRL_IMSCHED_PRESENT (1 << NDL_ATTR_CTRL_IM_SCHED_PRESENT_SHIFT) +#define NDL_ATTR_CTRL_NDC_PRESENT (1 << NDL_ATTR_CTRL_NDC_ATTR_PRESENT_SHIFT) +#define NDL_ATTR_CTRL_NDL_QOS_PRESENT (1 << NDL_ATTR_CTRL_QOS_ATTR_PRESENT_SHIFT) +#define NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT (1 << NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT_SHIFT) + +#define NA_NDL_IS_IMMUT_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_IMSCHED_PRESENT) +#define NA_NDL_IS_PEER_ID_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_PEER_ID_PRESENT) +#define NA_NDL_IS_MAX_IDLE_PER_PRESENT(ndl) (((ndl)->ndl_ctrl) & NDL_ATTR_CTRL_MAX_IDLE_PER_PRESENT) + +#define NDL_ATTR_PEERID_LEN 1 +#define NDL_ATTR_MAX_IDLE_PERIOD_LEN 2 + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndl_attr_s { + uint8 id; /* NAN_ATTR_NAN_NDL = 0x17 */ + uint16 len; /* Length of the fields in the attribute */ + uint8 dialog_token; /* Identify req and resp */ + uint8 type_status; /* Bits[3-0] type subfield, Bits[7-4] status subfield */ + uint8 reason; /* Identifies reject reason */ + uint8 ndl_ctrl; /* NDL control field */ + uint8 var[]; /* Optional fields follow */ +} BWL_POST_PACKED_STRUCT wifi_nan_ndl_attr_t; + +/* + * NDL QoS Attribute WFA Tech. Spec ver r26 + */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndl_qos_attr_s { + uint8 id; /* NAN_ATTR_NAN_NDL_QOS = 24 */ + uint16 len; /* Length of the attribute field following */ + uint8 min_slots; /* Min. number of FAW slots needed per DW interval */ + uint16 max_latency; /* Max interval between non-cont FAW */ +} BWL_POST_PACKED_STRUCT wifi_nan_ndl_qos_attr_t; + +/* no preference to min time slots */ +#define NAN_NDL_QOS_MIN_SLOT_NO_PREF 0 +/* no preference to no. of slots between two non-contiguous slots */ +#define NAN_NDL_QOS_MAX_LAT_NO_PREF 0xFFFF + +/* Device Capability Attribute */ + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_dev_cap_s { + uint8 id; /* 0x0F */ + uint16 len; /* Length */ + uint8 map_id; /* map id */ + uint16 commit_dw_info; /* Committed DW Info */ + uint8 bands_supported; /* Supported Bands */ + uint8 op_mode; /* Operation Mode */ + uint8 num_antennas; /* Bit 0-3 tx, 4-7 rx */ + uint16 chan_switch_time; /* Max channel switch time in us */ + uint8 capabilities; /* DFS Master, Extended key id etc */ +} BWL_POST_PACKED_STRUCT wifi_nan_dev_cap_t; + +/* Awake DW Info field format */ + +/* 2.4GHz DW */ +#define NAN_DEV_CAP_AWAKE_DW_2G_MASK 0x07 +/* 5GHz DW */ +#define NAN_DEV_CAP_AWAKE_DW_5G_MASK 0x38 +/* Reserved */ +#define NAN_DEV_CAP_AWAKE_DW_RSVD_MASK 0xC0 + +/* bit shift for dev cap */ +#define NAN_DEV_CAP_AWAKE_DW_2G_SHIFT 0 +#define NAN_DEV_CAP_AWAKE_DW_5G_SHIFT 3 + +/* Device Capability Attribute Format */ + +/* Committed DW Info field format */ +/* 2.4GHz DW */ +#define NAN_DEV_CAP_COMMIT_DW_2G_MASK 0x07 +#define NAN_DEV_CAP_COMMIT_DW_2G_OVERWRITE_MASK 0x3C0 +/* 5GHz DW */ +#define NAN_DEV_CAP_COMMIT_DW_5G_MASK 0x38 +#define NAN_DEV_CAP_COMMIT_DW_5G_OVERWRITE_MASK 0x3C00 +/* Reserved */ +#define NAN_DEV_CAP_COMMIT_DW_RSVD_MASK 0xC000 +/* Committed DW bit shift for dev cap */ +#define NAN_DEV_CAP_COMMIT_DW_2G_SHIFT 0 +#define NAN_DEV_CAP_COMMIT_DW_5G_SHIFT 3 +#define NAN_DEV_CAP_COMMIT_DW_2G_OVERWRITE_SHIFT 6 +#define NAN_DEV_CAP_COMMIT_DW_5G_OVERWRITE_SHIFT 10 +/* Operation Mode */ +#define NAN_DEV_CAP_OP_PHY_MODE_HT_ONLY 0x00 +#define NAN_DEV_CAP_OP_PHY_MODE_VHT 0x01 +#define NAN_DEV_CAP_OP_PHY_MODE_VHT_8080 0x02 +#define NAN_DEV_CAP_OP_PHY_MODE_VHT_160 0x04 +#define NAN_DEV_CAP_OP_PAGING_NDL 0x08 + +#define NAN_DEV_CAP_OP_MODE_VHT_MASK 0x01 +#define NAN_DEV_CAP_OP_MODE_VHT8080_MASK 0x03 +#define NAN_DEV_CAP_OP_MODE_VHT160_MASK 0x05 +#define NAN_DEV_CAP_OP_MODE_PAGING_NDL_MASK 0x08 + +#define NAN_DEV_CAP_RX_ANT_SHIFT 4 +#define NAN_DEV_CAP_TX_ANT_MASK 0x0F +#define NAN_DEV_CAP_RX_ANT_MASK 0xF0 + +/* Device capabilities */ + +/* DFS master capability */ +#define NAN_DEV_CAP_DFS_MASTER_MASK 0x01 +#define NAN_DEV_CAP_DFS_MASTER_SHIFT 0 +/* extended iv cap */ +#define NAN_DEV_CAP_EXT_KEYID_MASK 0x02 +#define NAN_DEV_CAP_EXT_KEYID_SHIFT 1 + +/* Band IDs */ +enum { + NAN_BAND_ID_TVWS = 0, + NAN_BAND_ID_SIG = 1, /* Sub 1 GHz */ + NAN_BAND_ID_2G = 2, /* 2.4 GHz */ + NAN_BAND_ID_3G = 3, /* 3.6 GHz */ + NAN_BAND_ID_5G = 4, /* 4.9 & 5 GHz */ + NAN_BAND_ID_60G = 5 +}; +typedef uint8 nan_band_id_t; + +/* + * Unaligned schedule attribute section 10.7.19.6 spec. ver r15 + */ +#define NAN_ULW_ATTR_CTRL_SCHED_ID_MASK 0x000F +#define NAN_ULW_ATTR_CTRL_SCHED_ID_SHIFT 0 +#define NAN_ULW_ATTR_CTRL_SEQ_ID_MASK 0xFF00 +#define NAN_ULW_ATTR_CTRL_SEQ_ID_SHIFT 8 + +#define NAN_ULW_OVWR_ALL_MASK 0x01 +#define NAN_ULW_OVWR_ALL_SHIFT 0 +#define NAN_ULW_OVWR_MAP_ID_MASK 0x1E +#define NAN_ULW_OVWR_MAP_ID_SHIFT 1 + +#define NAN_ULW_CTRL_TYPE_MASK 0x03 +#define NAN_ULW_CTRL_TYPE_SHIFT 0 +#define NAN_ULW_CTRL_TYPE(ctrl) (ctrl & NAN_ULW_CTRL_TYPE_MASK) +#define NAN_ULW_CTRL_CHAN_AVAIL_MASK 0x04 +#define NAN_ULW_CTRL_CHAN_AVAIL_SHIFT 2 +#define NAN_ULW_CTRL_CHAN_AVAIL(ctrl) ((ctrl & NAN_ULW_CTRL_CHAN_AVAIL_MASK) \ + >> NAN_ULW_CTRL_CHAN_AVAIL_SHIFT) +#define NAN_ULW_CTRL_RX_NSS_MASK 0x78 +#define NAN_ULW_CTRL_RX_NSS_SHIFT 3 + +#define NAN_ULW_CTRL_TYPE_BAND 0 +#define NAN_ULW_CTRL_TYPE_CHAN_NOAUX 1 +#define NAN_ULW_CTRL_TYPE_CHAN_AUX 2 + +#define NAN_ULW_CNT_DOWN_NO_EXPIRE 0xFF /* ULWs doen't end until next sched update */ +#define NAN_ULW_CNT_DOWN_CANCEL 0x0 /* cancel remaining ulws */ + +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ulw_attr_s { + uint8 id; + uint16 len; + uint16 ctrl; + uint32 start; /* low 32 bits of tsf */ + uint32 dur; + uint32 period; + uint8 count_down; + uint8 overwrite; + /* + * ulw[0] == optional field ULW control when present. + * band ID or channel follows + */ + uint8 ulw_entry[]; +} BWL_POST_PACKED_STRUCT wifi_nan_ulw_attr_t; + +/* NAN2 Management Frame (section 5.6) */ + +/* Public action frame for NAN2 */ +typedef BWL_PRE_PACKED_STRUCT struct nan2_pub_act_frame_s { + /* NAN_PUB_AF_CATEGORY 0x04 */ + uint8 category_id; + /* NAN_PUB_AF_ACTION 0x09 */ + uint8 action_field; + /* NAN_OUI 0x50-6F-9A */ + uint8 oui[DOT11_OUI_LEN]; + /* NAN_OUI_TYPE TBD */ + uint8 oui_type; + /* NAN_OUI_SUB_TYPE TBD */ + uint8 oui_sub_type; + /* One or more NAN Attributes follow */ + uint8 data[]; +} BWL_POST_PACKED_STRUCT nan2_pub_act_frame_t; + +#define NAN2_PUB_ACT_FRM_SIZE (OFFSETOF(nan2_pub_act_frame_t, data)) + +/* NAN Action Frame Subtypes */ +/* Subtype-0 is Reserved */ +#define NAN_MGMT_FRM_SUBTYPE_RESERVED 0 +/* NAN Ranging Request */ +#define NAN_MGMT_FRM_SUBTYPE_RANGING_REQ 1 +/* NAN Ranging Response */ +#define NAN_MGMT_FRM_SUBTYPE_RANGING_RESP 2 +/* NAN Ranging Termination */ +#define NAN_MGMT_FRM_SUBTYPE_RANGING_TERM 3 +/* NAN Ranging Report */ +#define NAN_MGMT_FRM_SUBTYPE_RANGING_RPT 4 +/* NDP Request */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_REQ 5 +/* NDP Response */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_RESP 6 +/* NDP Confirm */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_CONFIRM 7 +/* NDP Key Installment */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_KEY_INST 8 +/* NDP Termination */ +#define NAN_MGMT_FRM_SUBTYPE_NDP_END 9 +/* Schedule Request */ +#define NAN_MGMT_FRM_SUBTYPE_SCHED_REQ 10 +/* Schedule Response */ +#define NAN_MGMT_FRM_SUBTYPE_SCHED_RESP 11 +/* Schedule Confirm */ +#define NAN_MGMT_FRM_SUBTYPE_SCHED_CONF 12 +/* Schedule Update */ +#define NAN_MGMT_FRM_SUBTYPE_SCHED_UPD 13 + +/* Reason code defines */ +#define NAN_REASON_RESERVED 0x0 +#define NAN_REASON_UNSPECIFIED 0x1 +#define NAN_REASON_RESOURCE_LIMIT 0x2 +#define NAN_REASON_INVALID_PARAMS 0x3 +#define NAN_REASON_FTM_PARAM_INCAP 0x4 +#define NAN_REASON_NO_MOVEMENT 0x5 +#define NAN_REASON_INVALID_AVAIL 0x6 +#define NAN_REASON_IMMUT_UNACCEPT 0x7 +#define NAN_REASON_SEC_POLICY 0x8 +#define NAN_REASON_QOS_UNACCEPT 0x9 +#define NAN_REASON_NDP_REJECT 0xa +#define NAN_REASON_NDL_UNACCEPTABLE 0xb + +/* nan 2.0 qos (not attribute) */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndp_qos_s { + uint8 tid; /* traffic identifier */ + uint16 pkt_size; /* service data pkt size */ + uint8 data_rate; /* mean data rate */ + uint8 svc_interval; /* max service interval */ +} BWL_POST_PACKED_STRUCT wifi_nan_ndp_qos_t; + +/* NDP control bitmap defines */ +#define NAN_NDP_CTRL_CONFIRM_REQUIRED 0x01 +#define NAN_NDP_CTRL_SECURTIY_PRESENT 0x04 +#define NAN_NDP_CTRL_PUB_ID_PRESENT 0x08 +#define NAN_NDP_CTRL_RESP_NDI_PRESENT 0x10 +#define NAN_NDP_CTRL_SPEC_INFO_PRESENT 0x20 +#define NAN_NDP_CTRL_RESERVED 0xA0 + +/* NDP Attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndp_attr_s { + uint8 id; /* 0x10 */ + uint16 len; /* length */ + uint8 dialog_token; /* dialog token */ + uint8 type_status; /* bits 0-3 type, 4-7 status */ + uint8 reason; /* reason code */ + struct ether_addr init_ndi; /* ndp initiator's data interface address */ + uint8 ndp_id; /* ndp identifier (created by initiator */ + uint8 control; /* ndp control field */ + uint8 var[]; /* Optional fields follow */ +} BWL_POST_PACKED_STRUCT wifi_nan_ndp_attr_t; +/* NDP attribute type and status macros */ +#define NAN_NDP_TYPE_MASK 0x0F +#define NAN_NDP_TYPE_REQUEST 0x0 +#define NAN_NDP_TYPE_RESPONSE 0x1 +#define NAN_NDP_TYPE_CONFIRM 0x2 +#define NAN_NDP_TYPE_SECURITY 0x3 +#define NAN_NDP_TYPE_TERMINATE 0x4 +#define NAN_NDP_REQUEST(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_REQUEST) +#define NAN_NDP_RESPONSE(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_RESPONSE) +#define NAN_NDP_CONFIRM(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == NAN_NDP_TYPE_CONFIRM) +#define NAN_NDP_SECURITY_INST(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == \ + NAN_NDP_TYPE_SECURITY) +#define NAN_NDP_TERMINATE(_ndp) (((_ndp)->type_status & NAN_NDP_TYPE_MASK) == \ + NAN_NDP_TYPE_TERMINATE) +#define NAN_NDP_STATUS_SHIFT 4 +#define NAN_NDP_STATUS_MASK 0xF0 +#define NAN_NDP_STATUS_CONT (0 << NAN_NDP_STATUS_SHIFT) +#define NAN_NDP_STATUS_ACCEPT (1 << NAN_NDP_STATUS_SHIFT) +#define NAN_NDP_STATUS_REJECT (2 << NAN_NDP_STATUS_SHIFT) +#define NAN_NDP_CONT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == NAN_NDP_STATUS_CONT) +#define NAN_NDP_ACCEPT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \ + NAN_NDP_STATUS_ACCEPT) +#define NAN_NDP_REJECT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \ + NAN_NDP_STATUS_REJECT) +/* NDP Setup Status */ +#define NAN_NDP_SETUP_STATUS_OK 1 +#define NAN_NDP_SETUP_STATUS_FAIL 0 +#define NAN_NDP_SETUP_STATUS_REJECT 2 + +/* Rng setup attribute type and status macros */ +#define NAN_RNG_TYPE_MASK 0x0F +#define NAN_RNG_TYPE_REQUEST 0x0 +#define NAN_RNG_TYPE_RESPONSE 0x1 +#define NAN_RNG_TYPE_TERMINATE 0x2 + +#define NAN_RNG_STATUS_SHIFT 4 +#define NAN_RNG_STATUS_MASK 0xF0 +#define NAN_RNG_STATUS_ACCEPT (0 << NAN_RNG_STATUS_SHIFT) +#define NAN_RNG_STATUS_REJECT (1 << NAN_RNG_STATUS_SHIFT) + +#define NAN_RNG_ACCEPT(_rsua) (((_rsua)->type_status & NAN_RNG_STATUS_MASK) == \ + NAN_RNG_STATUS_ACCEPT) +#define NAN_RNG_REJECT(_rsua) (((_rsua)->type_status & NAN_RNG_STATUS_MASK) == \ + NAN_RNG_STATUS_REJECT) + +/* schedule entry */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sched_entry_s { + uint8 map_id; /* map id */ + uint16 tbmp_ctrl; /* time bitmap control */ + uint8 tbmp_len; /* time bitmap len */ + uint8 tbmp[]; /* time bitmap - Optional */ +} BWL_POST_PACKED_STRUCT wifi_nan_sched_entry_t; + +#define NAN_SCHED_ENTRY_MIN_SIZE OFFSETOF(wifi_nan_sched_entry_t, tbmp) +#define NAN_SCHED_ENTRY_SIZE(_entry) (NAN_SCHED_ENTRY_MIN_SIZE + (_entry)->tbmp_len) + +/* for dev cap, element container etc. */ +#define NAN_DEV_ELE_MAPID_CTRL_MASK 0x1 +#define NAN_DEV_ELE_MAPID_CTRL_SHIFT 0 +#define NAN_DEV_ELE_MAPID_MASK 0x1E +#define NAN_DEV_ELE_MAPID_SHIFT 1 + +#define NAN_DEV_ELE_MAPID_CTRL_SET(_mapid_field, value) \ + do {(_mapid_field) &= ~NAN_DEV_ELE_MAPID_CTRL_MASK; \ + (_mapid_field) |= ((value << NAN_DEV_ELE_MAPID_CTRL_SHIFT) & \ + NAN_DEV_ELE_MAPID_CTRL_MASK); \ + } while (0); + +#define NAN_DEV_ELE_MAPID_CTRL_GET(_mapid_field) \ + (((_mapid_field) & NAN_DEV_ELE_MAPID_CTRL_MASK) >> \ + NAN_DEV_ELE_MAPID_CTRL_SHIFT) + +#define NAN_DEV_ELE_MAPID_SET(_mapid_field, value) \ + do {(_mapid_field) &= ~NAN_DEV_ELE_MAPID_MASK; \ + (_mapid_field) |= ((value << NAN_DEV_ELE_MAPID_SHIFT) & \ + NAN_DEV_ELE_MAPID_MASK); \ + } while (0); + +#define NAN_DEV_ELE_MAPID_GET(_mapid_field) \ + (((_mapid_field) & NAN_DEV_ELE_MAPID_MASK) >> \ + NAN_DEV_ELE_MAPID_SHIFT) + +/* schedule entry map id handling */ +#define NAN_SCHED_ENTRY_MAPID_MASK 0x0F +#define NAN_SCHED_ENTRY_MAPID_SHIFT 0 + +#define NAN_SCHED_ENTRY_MAPID_SET(_mapid_field, value) \ + do {(_mapid_field) &= ~NAN_SCHED_ENTRY_MAPID_MASK; \ + (_mapid_field) |= ((value << NAN_SCHED_ENTRY_MAPID_SHIFT) & \ + NAN_SCHED_ENTRY_MAPID_MASK); \ + } while (0); + +#define NAN_SCHED_ENTRY_MAPID_GET(_mapid_field) \ + (((_mapid_field) & NAN_SCHED_ENTRY_MAPID_MASK) >> \ + NAN_SCHED_ENTRY_MAPID_SHIFT) + +/* NDC attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndc_attr_s { + uint8 id; + uint16 len; + uint8 ndc_id[NAN_DATA_NDC_ID_SIZE]; + uint8 attr_cntrl; + uint8 var[]; +} BWL_POST_PACKED_STRUCT wifi_nan_ndc_attr_t; + +/* Attribute control subfield of NDC attr */ +/* Proposed NDC */ +#define NAN_NDC_ATTR_PROPOSED_NDC_MASK 0x1 +#define NAN_NDC_ATTR_PROPOSED_NDC_SHIFT 0 + +/* get & set */ +#define NAN_NDC_GET_PROPOSED_FLAG(_attr) \ + (((_attr)->attr_cntrl & NAN_NDC_ATTR_PROPOSED_NDC_MASK) >> \ + NAN_NDC_ATTR_PROPOSED_NDC_SHIFT) +#define NAN_NDC_SET_PROPOSED_FLAG(_attr, value) \ + do {((_attr)->attr_cntrl &= ~NAN_NDC_ATTR_PROPOSED_NDC_MASK); \ + ((_attr)->attr_cntrl |= \ + (((value) << NAN_NDC_ATTR_PROPOSED_NDC_SHIFT) & NAN_NDC_ATTR_PROPOSED_NDC_MASK)); \ + } while (0) + +/* Service descriptor extension attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_svc_desc_ext_attr_s { + /* Attribute ID - 0x11 */ + uint8 id; + /* Length of the following fields in the attribute */ + uint16 len; + /* Instance id of associated service descriptor attribute */ + uint8 instance_id; + /* SDE control field */ + uint16 control; + /* range limit, svc upd indicator etc. */ + uint8 var[]; +} BWL_POST_PACKED_STRUCT wifi_nan_svc_desc_ext_attr_t; + +#define NAN_SDE_ATTR_MIN_LEN OFFSETOF(wifi_nan_svc_desc_ext_attr_t, var) + +/* SDEA control field bit definitions and access macros */ +#define NAN_SDE_CF_FSD_REQUIRED (1 << 0) +#define NAN_SDE_CF_FSD_GAS (1 << 1) +#define NAN_SDE_CF_DP_REQUIRED (1 << 2) +#define NAN_SDE_CF_DP_TYPE (1 << 3) +#define NAN_SDE_CF_MULTICAST_TYPE (1 << 4) +#define NAN_SDE_CF_QOS_REQUIRED (1 << 5) +#define NAN_SDE_CF_SECURITY_REQUIRED (1 << 6) +#define NAN_SDE_CF_RANGING_REQUIRED (1 << 7) +#define NAN_SDE_CF_RANGE_PRESENT (1 << 8) +#define NAN_SDE_CF_SVC_UPD_IND_PRESENT (1 << 9) +#define NAN_SDE_FSD_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_FSD_REQUIRED) +#define NAN_SDE_FSD_GAS(_sde) ((_sde)->control & NAN_SDE_CF_FSD_GAS) +#define NAN_SDE_DP_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_DP_REQUIRED) +#define NAN_SDE_DP_MULTICAST(_sde) ((_sde)->control & NAN_SDE_CF_DP_TYPE) +#define NAN_SDE_MULTICAST_M_TO_M(_sde) ((_sde)->control & NAN_SDE_CF_MULTICAST_TYPE) +#define NAN_SDE_QOS_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_QOS_REQUIRED) +#define NAN_SDE_SECURITY_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_SECURITY_REQUIRED) +#define NAN_SDE_RANGING_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_RANGING_REQUIRED) +#define NAN_SDE_RANGE_PRESENT(_sde) ((_sde)->control & NAN_SDE_CF_RANGE_PRESENT) +#define NAN_SDE_SVC_UPD_IND_PRESENT(_sde) ((_sde)->control & NAN_SDE_CF_SVC_UPD_IND_PRESENT) + +/* nan2 security */ + +/* + * Cipher suite information Attribute. + * WFA Tech. Spec ver 1.0.r21 (section 10.7.24.2) + */ +#define NAN_SEC_CIPHER_SUITE_CAP_REPLAY_4 0 +#define NAN_SEC_CIPHER_SUITE_CAP_REPLAY_16 (1 << 0) + +/* enum security algo. +*/ +enum nan_sec_csid { + NAN_SEC_ALGO_NONE = 0, + NAN_SEC_ALGO_NCS_SK_CCM_128 = 1, /* CCMP 128 */ + NAN_SEC_ALGO_NCS_SK_GCM_256 = 2, /* GCMP 256 */ + NAN_SEC_ALGO_LAST = 3 +}; +typedef int8 nan_sec_csid_e; + +/* nan2 cipher suite attribute field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_cipher_suite_field_s { + uint8 cipher_suite_id; + uint8 inst_id; /* Instance Id */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_cipher_suite_field_t; + +/* nan2 cipher suite information attribute field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_cipher_suite_info_attr_s { + uint8 attr_id; /* 0x22 - NAN_ATTR_CIPHER_SUITE_INFO */ + uint16 len; + uint8 capabilities; + uint8 var[]; /* cipher suite list */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_cipher_suite_info_attr_t; + +/* + * Security context identifier attribute + * WFA Tech. Spec ver 1.0.r21 (section 10.7.24.4) + */ + +#define NAN_SEC_CTX_ID_TYPE_PMKID (1 << 0) + +/* nan2 security context identifier attribute field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ctx_id_field_s { + uint16 sec_ctx_id_type_len; /* length of security ctx identifier */ + uint8 sec_ctx_id_type; + uint8 inst_id; /* Instance Id */ + uint8 var[]; /* security ctx identifier */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_ctx_id_field_t; + +/* nan2 security context identifier info attribute field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ctx_id_info_attr_s { + uint8 attr_id; /* 0x23 - NAN_ATTR_SEC_CTX_ID_INFO */ + uint16 len; + uint8 var[]; /* security context identifier list */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_ctx_id_info_attr_t; + +/* + * Nan shared key descriptor attribute + * WFA Tech. Spec ver 23 + */ + +#define NAN_SEC_NCSSK_DESC_REPLAY_CNT_LEN 8 +#define NAN_SEC_NCSSK_DESC_KEY_NONCE_LEN 32 + +/* nan shared key descriptor attr field */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_sec_ncssk_key_desc_attr_s { + uint8 attr_id; /* 0x24 - NAN_ATTR_SHARED_KEY_DESC */ + uint16 len; + uint8 inst_id; /* Publish service instance ID */ + uint8 desc_type; + uint16 key_info; + uint16 key_len; + uint8 key_replay_cntr[NAN_SEC_NCSSK_DESC_REPLAY_CNT_LEN]; + uint8 key_nonce[NAN_SEC_NCSSK_DESC_KEY_NONCE_LEN]; + uint8 reserved[32]; /* EAPOL IV + Key RSC + Rsvd fields in EAPOL Key */ + uint8 mic[]; /* mic + key data len + key data */ +} BWL_POST_PACKED_STRUCT wifi_nan_sec_ncssk_key_desc_attr_t; + +/* Key Info fields */ +#define NAN_SEC_NCSSK_DESC_MASK 0x7 +#define NAN_SEC_NCSSK_DESC_SHIFT 0 +#define NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK 0x8 +#define NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT 3 +#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK 0x40 +#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT 6 +#define NAN_SEC_NCSSK_DESC_KEY_ACK_MASK 0x80 +#define NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT 7 +#define NAN_SEC_NCSSK_DESC_KEY_MIC_MASK 0x100 +#define NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT 8 +#define NAN_SEC_NCSSK_DESC_KEY_SEC_MASK 0x200 +#define NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT 9 +#define NAN_SEC_NCSSK_DESC_KEY_ERR_MASK 0x400 +#define NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT 10 +#define NAN_SEC_NCSSK_DESC_KEY_REQ_MASK 0x800 +#define NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT 11 +#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK 0x1000 +#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT 12 +#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK 0x2000 +#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT 13 + +/* Key Info get & set macros */ +#define NAN_SEC_NCSSK_KEY_DESC_VER_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_MASK) >> NAN_SEC_NCSSK_DESC_SHIFT) +#define NAN_SEC_NCSSK_KEY_DESC_VER_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_SHIFT) & \ + NAN_SEC_NCSSK_DESC_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_TYPE_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK) >> NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_TYPE_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_TYPE_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_TYPE_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK) >> \ + NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_INSTALL_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_INSTALL_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_INSTALL_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_ACK_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ACK_MASK) >> NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_ACK_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ACK_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ACK_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_ACK_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_MIC_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_MIC_MASK) >> NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_MIC_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_MIC_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_MIC_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_MIC_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_SEC_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_SEC_MASK) >> NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_SEC_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_SEC_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_SEC_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_SEC_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_ERR_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ERR_MASK) >> NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_ERR_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ERR_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ERR_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_ERR_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_REQ_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_REQ_MASK) >> NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_REQ_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_REQ_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_REQ_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_REQ_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK) >> \ + NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_ENC_KEY_MASK);} while (0) +#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_GET(_key_info) \ + (((_key_info) & NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK) >> \ + NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT) +#define NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SET(_val, _key_info) \ + do {(_key_info) &= ~NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK; \ + (_key_info) |= (((_val) << NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_SHIFT) & \ + NAN_SEC_NCSSK_DESC_KEY_SMK_MSG_MASK);} while (0) + +#define NAN_SEC_NCSSK_IEEE80211_KDESC_TYPE 2 /* IEEE 802.11 Key Descriptor Type */ +#define NAN_SEC_NCSSK_KEY_DESC_VER 0 /* NCSSK-128/256 */ +#define NAN_SEC_NCSSK_KEY_TYPE_PAIRWISE 1 /* Pairwise */ +#define NAN_SEC_NCSSK_LIFETIME_KDE 7 /* Lifetime KDE type */ + +/* TODO include MTK related attributes */ + +/* NAN Multicast service group(NMSG) definitions */ +/* Length of NMSG_ID -- (NDI * 2^16 + pub_id * 2^8 + Random_factor) */ +#define NAN_NMSG_ID_LEN 8 + +#define NAN_NMSG_TYPE_MASK 0x0F +#define NMSG_ATTR_TYPE_STATUS_REQUEST 0x00 +#define NMSG_ATTR_TYPE_STATUS_RESPONSE 0x01 +#define NMSG_ATTR_TYPE_STATUS_CONFIRM 0x02 +#define NMSG_ATTR_TYPE_STATUS_SEC_INSTALL 0x03 +#define NMSG_ATTR_TYPE_STATUS_TERMINATE 0x04 +#define NMSG_ATTR_TYPE_STATUS_IMPLICIT_ENROL 0x05 + +#define NMSG_ATTR_TYPE_STATUS_CONTINUED 0x00 +#define NMSG_ATTR_TYPE_STATUS_ACCEPTED 0x10 +#define NMSG_ATTR_TYPE_STATUS_REJECTED 0x20 + +#define NMSG_CTRL_PUB_ID_PRESENT 0x0001 +#define NMSG_CTRL_NMSG_ID_PRESENT 0x0002 +#define NMSG_CTRL_SECURITY_PRESENT 0x0004 +#define NMSG_CTRL_MANY_TO_MANY_PRESENT 0x0008 +#define NMSG_CTRL_SVC_INFO_PRESENT 0x0010 + +/* NMSG attribute */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_nmsg_attr_s { + uint8 id; /* Attribute ID - 0x11 */ + uint16 len; /* Length including pubid, NMSGID and svc info */ + uint8 dialog_token; + uint8 type_status; /* Type and Status field byte */ + uint8 reason_code; + uint8 mc_id; /* Multicast id similar to NDPID */ + uint8 nmsg_ctrl; /* NMSG control field */ + /* Optional publish id, NMSGID and svc info are included in var[] */ + uint8 var[0]; +} BWL_POST_PACKED_STRUCT wifi_nan_nmsg_attr_t; + +#define NMSG_ATTR_MCAST_SCHED_MAP_ID_MASK 0x1E +#define NMSG_ATTR_MCAST_SCHED_MAP_ID_SHIFT 1 +#define NMSG_ATTR_MCAST_SCHED_TIME_MAP_MASK 0x20 +#define NMSG_ATTR_MCAST_SCHED_TIME_MAP_SHIFT 5 + +/* NAN Multicast Schedule atribute structure */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_mcast_sched_attr_s { + uint8 id; /* 0x16 */ + uint16 len; + uint8 nmsg_id[NAN_NMSG_ID_LEN]; + uint8 attr_cntrl; + uint8 sched_own[ETHER_ADDR_LEN]; + uint8 var[]; /* multicast sched entry list (schedule_entry_list) */ +} BWL_POST_PACKED_STRUCT wifi_nan_mcast_sched_attr_t; + + +/* FAC Channel Entry (section 10.7.19.1.5) */ +typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_fac_chan_entry_s { + uint8 oper_class; /* Operating Class */ + uint16 chan_bitmap; /* Channel Bitmap */ + uint8 primary_chan_bmp; /* Primary Channel Bitmap */ + uint16 aux_chan; /* Auxiliary Channel bitmap */ +} BWL_POST_PACKED_STRUCT wifi_nan_fac_chan_entry_t; + +/* TODO move this from nan.h */ +#define NAN_ALL_NAN_MGMT_FRAMES (NAN_FRM_SCHED_AF | \ + NAN_FRM_NDP_AF | NAN_FRM_NDL_AF | \ + NAN_FRM_DISC_BCN | NAN_FRM_SYNC_BCN | \ + NAN_FRM_SVC_DISC | NAN_FRM_RNG_REQ_AF | \ + NAN_FRM_RNG_RESP_AF | NAN_FRM_RNG_REPORT_AF | \ + NAN_FRM_RNG_TERM_AF) + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _NAN_H_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/osl.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/osl.h index 8a00f9d55a83..082b30152266 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/osl.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/osl.h @@ -1,7 +1,7 @@ /* * OS Abstraction Layer * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: osl.h 526460 2015-01-14 08:25:24Z $ + * $Id: osl.h 642189 2016-06-07 21:12:50Z $ */ #ifndef _osl_h_ @@ -42,7 +42,6 @@ typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned in typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size); - #if defined(WL_UNITTEST) #include #else @@ -76,6 +75,10 @@ typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, #define OSL_SYSUPTIME_SUPPORT TRUE #endif /* OSL_SYSUPTIME */ +#if !defined(OSL_SYSUPTIME_US) +#define OSL_SYSUPTIME_US() (0) +#endif /* OSL_SYSUPTIME */ + #ifndef OSL_SYS_HALT #define OSL_SYS_HALT() do {} while (0) #endif @@ -84,7 +87,17 @@ typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, #define OSL_MEM_AVAIL() (0xffffffff) #endif -#if !defined(PKTC) && !defined(PKTC_DONGLE) +#if !(defined(PKTC) || defined(PKTC_DONGLE)) + +#ifndef OSL_OBFUSCATE_BUF +/* For security reasons printing pointers is not allowed. + * Some OSLs implement OSL_OBFUSCATE_BUF to OS specific obfuscate API. + * If OSL_OBFUSCATE_BUF() is not implemented in OSL, then default to + * printing the input pointer + */ +#define OSL_OBFUSCATE_BUF(x) (x) +#endif /* OSL_OBFUSCATE_BUF */ + #define PKTCGETATTR(skb) (0) #define PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb) #define PKTCCLRATTR(skb) BCM_REFERENCE(skb) @@ -114,65 +127,134 @@ do { \ } while (0) #endif /* !linux || !PKTC */ -#if !defined(HNDCTF) && !defined(PKTC_TX_DONGLE) +#if !(defined(HNDCTF) || defined(PKTC_TX_DONGLE) || defined(PKTC)) #define PKTSETCHAINED(osh, skb) BCM_REFERENCE(osh) #define PKTCLRCHAINED(osh, skb) BCM_REFERENCE(osh) #define PKTISCHAINED(skb) FALSE #endif /* Lbuf with fraglist */ +#ifndef PKTFRAGPKTID #define PKTFRAGPKTID(osh, lb) (0) +#endif +#ifndef PKTSETFRAGPKTID #define PKTSETFRAGPKTID(osh, lb, id) BCM_REFERENCE(osh) +#endif +#ifndef PKTFRAGTOTNUM #define PKTFRAGTOTNUM(osh, lb) (0) +#endif +#ifndef PKTSETFRAGTOTNUM #define PKTSETFRAGTOTNUM(osh, lb, tot) BCM_REFERENCE(osh) +#endif +#ifndef PKTFRAGTOTLEN #define PKTFRAGTOTLEN(osh, lb) (0) +#endif +#ifndef PKTSETFRAGTOTLEN #define PKTSETFRAGTOTLEN(osh, lb, len) BCM_REFERENCE(osh) +#endif +#ifndef PKTIFINDEX #define PKTIFINDEX(osh, lb) (0) +#endif +#ifndef PKTSETIFINDEX #define PKTSETIFINDEX(osh, lb, idx) BCM_REFERENCE(osh) +#endif +#ifndef PKTGETLF #define PKTGETLF(osh, len, send, lbuf_type) (0) +#endif /* in rx path, reuse totlen as used len */ +#ifndef PKTFRAGUSEDLEN #define PKTFRAGUSEDLEN(osh, lb) (0) +#endif +#ifndef PKTSETFRAGUSEDLEN #define PKTSETFRAGUSEDLEN(osh, lb, len) BCM_REFERENCE(osh) - +#endif +#ifndef PKTFRAGLEN #define PKTFRAGLEN(osh, lb, ix) (0) +#endif +#ifndef PKTSETFRAGLEN #define PKTSETFRAGLEN(osh, lb, ix, len) BCM_REFERENCE(osh) +#endif +#ifndef PKTFRAGDATA_LO #define PKTFRAGDATA_LO(osh, lb, ix) (0) +#endif +#ifndef PKTSETFRAGDATA_LO #define PKTSETFRAGDATA_LO(osh, lb, ix, addr) BCM_REFERENCE(osh) +#endif +#ifndef PKTFRAGDATA_HI #define PKTFRAGDATA_HI(osh, lb, ix) (0) +#endif +#ifndef PKTSETFRAGDATA_HI #define PKTSETFRAGDATA_HI(osh, lb, ix, addr) BCM_REFERENCE(osh) +#endif /* RX FRAG */ +#ifndef PKTISRXFRAG #define PKTISRXFRAG(osh, lb) (0) +#endif +#ifndef PKTSETRXFRAG #define PKTSETRXFRAG(osh, lb) BCM_REFERENCE(osh) +#endif +#ifndef PKTRESETRXFRAG #define PKTRESETRXFRAG(osh, lb) BCM_REFERENCE(osh) +#endif /* TX FRAG */ +#ifndef PKTISTXFRAG #define PKTISTXFRAG(osh, lb) (0) +#endif +#ifndef PKTSETTXFRAG #define PKTSETTXFRAG(osh, lb) BCM_REFERENCE(osh) +#endif /* Need Rx completion used for AMPDU reordering */ +#ifndef PKTNEEDRXCPL #define PKTNEEDRXCPL(osh, lb) (TRUE) +#endif +#ifndef PKTSETNORXCPL #define PKTSETNORXCPL(osh, lb) BCM_REFERENCE(osh) +#endif +#ifndef PKTRESETNORXCPL #define PKTRESETNORXCPL(osh, lb) BCM_REFERENCE(osh) - +#endif +#ifndef PKTISFRAG #define PKTISFRAG(osh, lb) (0) +#endif +#ifndef PKTFRAGISCHAINED #define PKTFRAGISCHAINED(osh, i) (0) +#endif /* TRIM Tail bytes from lfrag */ +#ifndef PKTFRAG_TRIM_TAILBYTES #define PKTFRAG_TRIM_TAILBYTES(osh, p, len, type) PKTSETLEN(osh, p, PKTLEN(osh, p) - len) +#endif +#ifndef PKTISHDRCONVTD +#define PKTISHDRCONVTD(osh, lb) (0) +#endif #ifdef BCM_SECURE_DMA #define SECURE_DMA_ENAB(osh) (1) #else #define SECURE_DMA_ENAB(osh) (0) +#ifndef BCMDMA64OSL +#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) ((dmaaddr_t) ((0))) +#else #define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) ((dmaaddr_t) {(0)}) +#endif #define SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) 0 +#ifndef BCMDMA64OSL +#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) ((dmaaddr_t) ((0))) +#else #define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) ((dmaaddr_t) {(0)}) +#endif #define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) #define SECURE_DMA_UNMAP_ALL(osh, pcma) -#endif +#endif /* BCMDMA64OSL */ +#ifndef ROMMABLE_ASSERT +#define ROMMABLE_ASSERT(exp) ASSERT(exp) +#endif /* ROMMABLE_ASSERT */ + #endif /* _osl_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/osl_decl.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/osl_decl.h index 6c8d86eeabf1..977a1ca46073 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/osl_decl.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/osl_decl.h @@ -1,7 +1,7 @@ /* * osl forward declarations * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: osl_decl.h 591283 2015-10-07 11:52:00Z $ + * $Id: osl_decl.h 596126 2015-10-29 19:53:48Z $ */ #ifndef _osl_decl_h_ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/osl_ext.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/osl_ext.h index 61984e68c4d0..2503f6a37a02 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/osl_ext.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/osl_ext.h @@ -2,7 +2,7 @@ * OS Abstraction Layer Extension - the APIs defined by the "extension" API * are only supported by a subset of all operating systems. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: osl_ext.h 514727 2014-11-12 03:02:48Z $ + * $Id: osl_ext.h 611959 2016-01-12 15:23:56Z $ */ #ifndef _osl_ext_h_ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/p2p.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/p2p.h old mode 100755 new mode 100644 similarity index 99% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/p2p.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/p2p.h index 91f5147f54d0..cc7aa686e861 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/p2p.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/p2p.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to WFA P2P (aka WiFi Direct) * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: p2p.h 536785 2015-02-24 08:35:00Z $ + * $Id: p2p.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _P2P_H_ @@ -34,7 +34,7 @@ #include #endif #include -#include +#include <802.11.h> /* This marks the start of a packed structure section. */ #include diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/packed_section_end.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/packed_section_end.h index e3a35c7e9270..4827c709af26 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/packed_section_end.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/packed_section_end.h @@ -15,7 +15,7 @@ * #include * * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/packed_section_start.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/packed_section_start.h index 617176461f75..9beb45d5e082 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/packed_section_start.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/packed_section_start.h @@ -15,7 +15,7 @@ * #include * * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/pcicfg.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/pcicfg.h index be0a92a17847..98f22c3df918 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/pcicfg.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/pcicfg.h @@ -1,7 +1,7 @@ /* * pcicfg.h: PCI configuration constants and structures. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: pcicfg.h 514727 2014-11-12 03:02:48Z $ + * $Id: pcicfg.h 621340 2016-02-25 12:26:40Z $ */ #ifndef _h_pcicfg_ @@ -52,7 +52,16 @@ #define PCI_CFG_HDR 0xe #define PCI_CFG_BIST 0xf #define PCI_CFG_BAR0 0x10 +/* +* TODO: PCI_CFG_BAR1 is wrongly defined to be 0x14 whereas it should be +* 0x18 as per the PCIe full dongle spec. Need to modify the values below +* correctly at a later point of time +*/ +#ifdef DHD_EFI +#define PCI_CFG_BAR1 0x18 +#else #define PCI_CFG_BAR1 0x14 +#endif /* DHD_EFI */ #define PCI_CFG_BAR2 0x18 #define PCI_CFG_BAR3 0x1c #define PCI_CFG_BAR4 0x20 @@ -67,6 +76,7 @@ #define PCI_CFG_MINGNT 0x3e #define PCI_CFG_MAXLAT 0x3f #define PCI_CFG_DEVCTRL 0xd8 +#define PCI_CFG_TLCNTRL_5 0x814 /* PCI CAPABILITY DEFINES */ @@ -170,6 +180,15 @@ typedef struct _pcie_enhanced_caphdr { #define PCI_BAR0_WIN2 0xac /* backplane addres space accessed by second 4KB of BAR0 */ #define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */ #define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */ +#define PCIE_CFG_DEVICE_CONTROL 0xb4 /* 0xb4 is used as device control in PCIE devices */ +#define PCIE_DC_AER_CORR_EN (1u << 0u) +#define PCIE_DC_AER_NON_FATAL_EN (1u << 1u) +#define PCIE_DC_AER_FATAL_EN (1u << 2u) +#define PCIE_DC_AER_UNSUP_EN (1u << 3u) + +#define PCI_BAR0_WIN2_OFFSET 0x1000u +#define PCIE2_BAR0_CORE2_WIN2_OFFSET 0x5000u + #define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */ #define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control register */ @@ -186,6 +205,17 @@ typedef struct _pcie_enhanced_caphdr { #define PCI_L2_EVENTCNT 0xaa4 #define PCI_L2_STATETMR 0xaa8 +#define PCI_LINK_STATUS 0x4dc +#define PCI_LINK_SPEED_MASK (15u << 0u) +#define PCI_LINK_SPEED_SHIFT (0) +#define PCIE_LNK_SPEED_GEN1 0x1 +#define PCIE_LNK_SPEED_GEN2 0x2 +#define PCIE_LNK_SPEED_GEN3 0x3 + +#define PCI_PL_SPARE 0x1808 /* Config to Increase external clkreq deasserted minimum time */ +#define PCI_CONFIG_EXT_CLK_MIN_TIME_MASK (1u << 31u) +#define PCI_CONFIG_EXT_CLK_MIN_TIME_SHIFT (31) + #define PCI_PMCR_REFUP 0x1814 /* Trefup time */ #define PCI_PMCR_REFUP_EXT 0x1818 /* Trefup extend Max */ #define PCI_TPOWER_SCALE_MASK 0x3 @@ -208,6 +238,15 @@ typedef struct _pcie_enhanced_caphdr { #define PCIE2_BAR0_CORE2_WIN 0x74 /* backplane addres space accessed by second 4KB of BAR0 */ #define PCIE2_BAR0_CORE2_WIN2 0x78 /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCI_BAR0_WIN2_OFFSET 0x1000u +#define PCI_CORE_ENUM_OFFSET 0x2000u +#define PCI_CC_CORE_ENUM_OFFSET 0x3000u +#define PCI_SEC_BAR0_WIN_OFFSET 0x4000u +#define PCI_SEC_BAR0_WRAP_OFFSET 0x5000u +#define PCI_CORE_ENUM2_OFFSET 0x6000u +#define PCI_CC_CORE_ENUM2_OFFSET 0x7000u +#define PCI_LAST_OFFSET 0x8000u + #define PCI_BAR0_WINSZ (16 * 1024) /* bar0 window size Match with corerev 13 */ /* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */ #define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) /* bar0 + 8K accesses pci/pcie core registers */ @@ -215,6 +254,37 @@ typedef struct _pcie_enhanced_caphdr { #define PCI_16KBB0_WINSZ (16 * 1024) /* bar0 window size */ #define PCI_SECOND_BAR0_OFFSET (16 * 1024) /* secondary bar 0 window */ +/* On AI chips we have a second window to map DMP regs are mapped: */ +#define PCI_16KB0_WIN2_OFFSET (4 * 1024) /* bar0 + 4K is "Window 2" */ + +/* PCI_INT_STATUS */ +#define PCI_SBIM_STATUS_SERR 0x4 /* backplane SBErr interrupt status */ + +/* PCI_INT_MASK */ +#define PCI_SBIM_SHIFT 8 /* backplane core interrupt mask bits offset */ +#define PCI_SBIM_MASK 0xff00 /* backplane core interrupt mask */ +#define PCI_SBIM_MASK_SERR 0x4 /* backplane SBErr interrupt mask */ +#define PCI_CTO_INT_SHIFT 16 /* backplane SBErr interrupt mask */ +#define PCI_CTO_INT_MASK (1 << PCI_CTO_INT_SHIFT) /* backplane SBErr interrupt mask */ + +/* PCI_SPROM_CONTROL */ +#define SPROM_SZ_MSK 0x02 /* SPROM Size Mask */ +#define SPROM_LOCKED 0x08 /* SPROM Locked */ +#define SPROM_BLANK 0x04 /* indicating a blank SPROM */ +#define SPROM_WRITEEN 0x10 /* SPROM write enable */ +#define SPROM_BOOTROM_WE 0x20 /* external bootrom write enable */ +#define SPROM_BACKPLANE_EN 0x40 /* Enable indirect backplane access */ +#define SPROM_OTPIN_USE 0x80 /* device OTP In use */ +#define SPROM_CFG_TO_SB_RST 0x400 /* backplane reset */ + +/* Bits in PCI command and status regs */ +#define PCI_CMD_IO 0x00000001 /* I/O enable */ +#define PCI_CMD_MEMORY 0x00000002 /* Memory enable */ +#define PCI_CMD_MASTER 0x00000004 /* Master enable */ +#define PCI_CMD_SPECIAL 0x00000008 /* Special cycles enable */ +#define PCI_CMD_INVALIDATE 0x00000010 /* Invalidate? */ +#define PCI_CMD_VGA_PAL 0x00000040 /* VGA Palate */ +#define PCI_STAT_TA 0x08000000 /* target abort status */ /* Header types */ #define PCI_HEADER_MULTI 0x80 diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/pcie_core.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/pcie_core.h index 25a156adcb4f..463561d08440 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/pcie_core.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/pcie_core.h @@ -1,7 +1,7 @@ /* * BCM43XX PCIE core hardware definitions. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: pcie_core.h 514727 2014-11-12 03:02:48Z $ + * $Id: pcie_core.h 673814 2016-12-05 06:10:24Z $ */ #ifndef _PCIE_CORE_H #define _PCIE_CORE_H @@ -59,11 +59,14 @@ #define PCIE_BAR0_CCCOREREG_OFFSET 0x3000 /* different register spaces to access thr'u pcie indirect access */ -#define PCIE_CONFIGREGS 1 /* Access to config space */ -#define PCIE_PCIEREGS 2 /* Access to pcie registers */ +#define PCIE_CONFIGREGS 1 /* Access to config space */ +#define PCIE_PCIEREGS 2 /* Access to pcie registers */ + +#define PCIEDEV_HOSTADDR_MAP_BASE 0x8000000 +#define PCIEDEV_HOSTADDR_MAP_WIN_MASK 0xFC000000 /* dma regs to control the flow between host2dev and dev2host */ -typedef struct pcie_devdmaregs { +typedef volatile struct pcie_devdmaregs { dma64regs_t tx; uint32 PAD[2]; dma64regs_t rx; @@ -83,8 +86,78 @@ typedef struct pcie_doorbell { uint32 dev2host_1; } pcie_doorbell_t; +/* Flow Ring Manager */ +#define IFRM_FR_IDX_MAX 256 +#define IFRM_FR_GID_MAX 4 +#define IFRM_FR_DEV_MAX 8 +#define IFRM_FR_TID_MAX 8 +#define IFRM_FR_DEV_VALID 2 + +#define IFRM_VEC_REG_BITS 32 + +#define IFRM_FR_PER_VECREG 4 +#define IFRM_FR_PER_VECREG_SHIFT 2 +#define IFRM_FR_PER_VECREG_MASK ((0x1 << IFRM_FR_PER_VECREG_SHIFT) - 1) + +#define IFRM_VEC_BITS_PER_FR (IFRM_VEC_REG_BITS/IFRM_FR_PER_VECREG) + +/* IFRM_DEV_0 : d11AC, IFRM_DEV_1 : d11AD */ +#define IFRM_DEV_0 0 +#define IFRM_DEV_1 1 + +#define IFRM_FR_GID_0 0 +#define IFRM_FR_GID_1 1 +#define IFRM_FR_GID_2 2 +#define IFRM_FR_GID_3 3 + +#define IFRM_TIDMASK 0xffffffff + +/* ifrm_ctrlst register */ +#define IFRM_EN (1<<0) +#define IFRM_BUFF_INIT_DONE (1<<1) +#define IFRM_COMPARE_EN0 (1<<4) +#define IFRM_COMPARE_EN1 (1<<5) +#define IFRM_COMPARE_EN2 (1<<6) +#define IFRM_COMPARE_EN3 (1<<7) +#define IFRM_INIT_DV0 (1<<8) +#define IFRM_INIT_DV1 (1<<9) +#define IFRM_INIT_DV2 (1<<10) +#define IFRM_INIT_DV3 (1<<11) + +/* ifrm_msk_arr.addr, ifrm_tid_arr.addr register */ +#define IFRM_ADDR_SHIFT 0 +#define IFRM_FRG_ID_SHIFT 8 + +/* ifrm_vec.diff_lat register */ +#define IFRM_DV_LAT (1<<0) +#define IFRM_DV_LAT_DONE (1<<1) +#define IFRM_SDV_OFFSET_SHIFT 4 +#define IFRM_SDV_FRGID_SHIFT 8 +#define IFRM_VECSTAT_MASK 0x3 +#define IFRM_VEC_MASK 0xff + +/* idma frm array */ +typedef struct pcie_ifrm_array { + uint32 addr; + uint32 data; +} pcie_ifrm_array_t; + +/* idma frm vector */ +typedef struct pcie_ifrm_vector { + uint32 diff_lat; + uint32 sav_tid; + uint32 sav_diff; + uint32 PAD[1]; +} pcie_ifrm_vector_t; + +/* idma frm interrupt */ +typedef struct pcie_ifrm_intr { + uint32 intstat; + uint32 intmask; +} pcie_ifrm_intr_t; + /* SB side: PCIE core and host control registers */ -typedef struct sbpcieregs { +typedef volatile struct sbpcieregs { uint32 control; /* host mode only */ uint32 iocstatus; /* PCIE2: iostatus */ uint32 PAD[1]; @@ -98,15 +171,17 @@ typedef struct sbpcieregs { uint32 obffcontrol; /* PCIE2: 0x2C */ uint32 obffintstatus; /* PCIE2: 0x30 */ uint32 obffdatastatus; /* PCIE2: 0x34 */ - uint32 PAD[2]; + uint32 PAD[1]; + uint32 ctoctrl; /* PCIE2: 0x3C */ uint32 errlog; /* PCIE2: 0x40 */ uint32 errlogaddr; /* PCIE2: 0x44 */ uint32 mailboxint; /* PCIE2: 0x48 */ uint32 mailboxintmsk; /* PCIE2: 0x4c */ uint32 ltrspacing; /* PCIE2: 0x50 */ uint32 ltrhysteresiscnt; /* PCIE2: 0x54 */ - uint32 PAD[42]; - + uint32 msivectorassign; /* PCIE2: 0x58 */ + uint32 intmask2; /* PCIE2: 0x5C */ + uint32 PAD[40]; uint32 sbtopcie0; /* sb to pcie translation 0: 0x100 */ uint32 sbtopcie1; /* sb to pcie translation 1: 0x104 */ uint32 sbtopcie2; /* sb to pcie translation 2: 0x108 */ @@ -125,6 +200,8 @@ typedef struct sbpcieregs { uint32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */ uint32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */ uint32 PAD[177]; + /* 0x400 - 0x7FF, PCIE Cfg Space, note: not used anymore in PcieGen2 */ + uint32 pciecfg[4][64]; } pcie1; struct { /* mdio access to serdes */ @@ -145,9 +222,17 @@ typedef struct sbpcieregs { uint32 ltr_state; /* 0x1A0 */ uint32 pwr_int_status; /* 0x1A4 */ uint32 pwr_int_mask; /* 0x1A8 */ - uint32 PAD[13]; /* 0x1AC - 0x1DF */ + uint32 pme_source; /* 0x1AC */ + uint32 err_hdr_logreg1; /* 0x1B0 */ + uint32 err_hdr_logreg2; /* 0x1B4 */ + uint32 err_hdr_logreg3; /* 0x1B8 */ + uint32 err_hdr_logreg4; /* 0x1BC */ + uint32 err_code_logreg; /* 0x1C0 */ + uint32 PAD[7]; /* 0x1C4 - 0x1DF */ uint32 clk_ctl_st; /* 0x1E0 */ - uint32 PAD[7]; /* 0x1E4 - 0x1FF */ + uint32 PAD[1]; /* 0x1E4 */ + uint32 powerctl; /* 0x1E8 */ + uint32 PAD[5]; /* 0x1EC - 0x1FF */ pcie_devdmaregs_t h2d0_dmaregs; /* 0x200 - 0x23c */ pcie_devdmaregs_t d2h0_dmaregs; /* 0x240 - 0x27c */ pcie_devdmaregs_t h2d1_dmaregs; /* 0x280 - 0x2bc */ @@ -156,25 +241,67 @@ typedef struct sbpcieregs { pcie_devdmaregs_t d2h2_dmaregs; /* 0x340 - 0x37c */ pcie_devdmaregs_t h2d3_dmaregs; /* 0x380 - 0x3bc */ pcie_devdmaregs_t d2h3_dmaregs; /* 0x3c0 - 0x3fc */ + uint32 d2h_intrlazy_1; /* 0x400 */ + uint32 h2d_intrlazy_1; /* 0x404 */ + uint32 h2d_intstat_1; /* 0x408 */ + uint32 h2d_intmask_1; /* 0x40c */ + uint32 d2h_intstat_1; /* 0x410 */ + uint32 d2h_intmask_1; /* 0x414 */ + uint32 PAD[2]; /* 0x418 - 0x41C */ + uint32 d2h_intrlazy_2; /* 0x420 */ + uint32 h2d_intrlazy_2; /* 0x424 */ + uint32 h2d_intstat_2; /* 0x428 */ + uint32 h2d_intmask_2; /* 0x42c */ + uint32 d2h_intstat_2; /* 0x430 */ + uint32 d2h_intmask_2; /* 0x434 */ + uint32 PAD[10]; /* 0x438 - 0x45F */ + uint32 ifrm_ctrlst; /* 0x460 */ + uint32 PAD[1]; /* 0x464 */ + pcie_ifrm_array_t ifrm_msk_arr; /* 0x468 - 0x46F */ + pcie_ifrm_array_t ifrm_tid_arr[IFRM_FR_DEV_VALID]; + /* 0x470 - 0x47F */ + pcie_ifrm_vector_t ifrm_vec[IFRM_FR_DEV_MAX]; + /* 0x480 - 0x4FF */ + pcie_ifrm_intr_t ifrm_intr[IFRM_FR_DEV_MAX]; + /* 0x500 - 0x53F */ + uint32 PAD[48]; /* 0x540 - 0x5FF */ + uint32 PAD[2][64]; /* 0x600 - 0x7FF */ } pcie2; } u; - uint32 pciecfg[4][64]; /* 0x400 - 0x7FF, PCIE Cfg Space */ - uint16 sprom[64]; /* SPROM shadow Area */ + uint16 sprom[64]; /* SPROM shadow Area : 0x800 - 0x880 */ + uint32 PAD[96]; /* 0x880 - 0x9FF */ + /* direct memory access (pcie2 rev19 and after) : 0xA00 - 0xAFF */ + uint32 PAD[16]; /* 0xA00 - 0xA3F */ + uint32 dm_errlog; /* 0xA40 */ + uint32 dm_erraddr; /* 0xA44 */ + uint32 PAD[37]; /* 0xA48 - 0xADC */ + uint32 dm_clk_ctl_st; /* 0xAE0 */ + uint32 PAD[1]; /* 0xAE4 */ + uint32 dm_powerctl; /* 0xAE8 */ } sbpcieregs_t; +#define PCIE_CFG_DA_OFFSET 0x400 /* direct access register offset for configuration space */ + /* PCI control */ #define PCIE_RST_OE 0x01 /* When set, drives PCI_RESET out to pin */ #define PCIE_RST 0x02 /* Value driven out to pin */ #define PCIE_SPERST 0x04 /* SurvivePeRst */ +#define PCIE_FORCECFGCLKON_ALP 0x08 #define PCIE_DISABLE_L1CLK_GATING 0x10 #define PCIE_DLYPERST 0x100 /* Delay PeRst to CoE Core */ #define PCIE_DISSPROMLD 0x200 /* DisableSpromLoadOnPerst */ #define PCIE_WakeModeL2 0x1000 /* Wake on L2 */ +#define PCIE_MULTIMSI_EN 0x2000 /* enable multi-vector MSI messages */ #define PCIE_PipeIddqDisable0 0x8000 /* Disable assertion of pcie_pipe_iddq during L1.2 and L2 */ #define PCIE_PipeIddqDisable1 0x10000 /* Disable assertion of pcie_pipe_iddq during L2 */ +#define PCIE_MSI_B2B_EN 0x100000 /* enable back-to-back MSI messages */ +#define PCIE_MSI_FIFO_CLEAR 0x200000 /* reset MSI FIFO */ +#define PCIE_IDMA_MODE_EN 0x800000 /* implicit M2M DMA mode */ #define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */ #define PCIE_CFGDATA 0x124 /* offsetof(configdata) */ +#define PCIE_SWPME_FN0 0x10000 +#define PCIE_SWPME_FN0_SHF 16 /* Interrupt status/mask */ #define PCIE_INTA 0x01 /* PCIE INTA message is received */ @@ -194,6 +321,12 @@ typedef struct sbpcieregs { #define PCIE_INT_MB_FN3_0 0x4000 /* PCIE to SB Mailbox int Fn3.0 is received */ #define PCIE_INT_MB_FN3_1 0x8000 /* PCIE to SB Mailbox int Fn3.1 is received */ +/* PCIE MSI Vector Assignment register */ +#define MSIVEC_MB_0 (0x1 << 1) /* MSI Vector offset for mailbox0 is 2 */ +#define MSIVEC_MB_1 (0x1 << 2) /* MSI Vector offset for mailbox1 is 3 */ +#define MSIVEC_D2H0_DB0 (0x1 << 3) /* MSI Vector offset for interface0 door bell 0 is 4 */ +#define MSIVEC_D2H0_DB1 (0x1 << 4) /* MSI Vector offset for interface0 door bell 1 is 5 */ + /* PCIE MailboxInt/MailboxIntMask register */ #define PCIE_MB_TOSB_FN0_0 0x0001 /* write to assert PCIEtoSB Mailbox interrupt */ #define PCIE_MB_TOSB_FN0_1 0x0002 @@ -226,6 +359,13 @@ typedef struct sbpcieregs { PCIE_MB_TOPCIE_D2H2_DB0 | PCIE_MB_TOPCIE_D2H2_DB1 | \ PCIE_MB_TOPCIE_D2H3_DB0 | PCIE_MB_TOPCIE_D2H3_DB1) +#define SBTOPCIE0_BASE 0x08000000 +#define SBTOPCIE1_BASE 0x0c000000 + +/* On chips with CCI-400, the small pcie 128 MB region base has shifted */ +#define CCI400_SBTOPCIE0_BASE 0x20000000 +#define CCI400_SBTOPCIE1_BASE 0x24000000 + /* SB to PCIE translation masks */ #define SBTOPCIE0_MASK 0xfc000000 #define SBTOPCIE1_MASK 0xfc000000 @@ -259,82 +399,118 @@ typedef struct sbpcieregs { #define PCIEADDR_PL_DLLP 1 #define PCIEADDR_PL_PLP 2 +#define PCIE_CORE_REG_CONTROL 0x00u /* Control */ +#define PCIE_CORE_REG_IOSTATUS 0x04u /* IO status */ +#define PCIE_CORE_REG_BITSTATUS 0x0Cu /* bitstatus */ +#define PCIE_CORE_REG_GPIO_SEL 0x10u /* gpio sel */ +#define PCIE_CORE_REG_GPIO_OUT_EN 0x14u /* gpio out en */ +#define PCIE_CORE_REG_INT_STATUS 0x20u /* int status */ +#define PCIE_CORE_REG_INT_MASK 0x24u /* int mask */ +#define PCIE_CORE_REG_SB_PCIE_MB 0x28u /* sbpcie mb */ +#define PCIE_CORE_REG_ERRLOG 0x40u /* errlog */ +#define PCIE_CORE_REG_ERR_ADDR 0x44u /* errlog addr */ +#define PCIE_CORE_REG_MB_INTR 0x48u /* MB intr */ +#define PCIE_CORE_REG_SB_PCIE_0 0x100u /* sbpcie0 map */ +#define PCIE_CORE_REG_SB_PCIE_1 0x104u /* sbpcie1 map */ +#define PCIE_CORE_REG_SB_PCIE_2 0x108u /* sbpcie2 map */ + +/* PCIE Config registers */ +#define PCIE_CFG_DEV_STS_CTRL_2 0x0d4u /* "dev_sts_control_2 */ +#define PCIE_CFG_ADV_ERR_CAP 0x100u /* adv_err_cap */ +#define PCIE_CFG_UC_ERR_STS 0x104u /* uc_err_status */ +#define PCIE_CFG_UC_ERR_MASK 0x108u /* ucorr_err_mask */ +#define PCIE_CFG_UNCOR_ERR_SERV 0x10cu /* ucorr_err_sevr */ +#define PCIE_CFG_CORR_ERR_STS 0x110u /* corr_err_status */ +#define PCIE_CFG_CORR_ERR_MASK 0x114u /* corr_err_mask */ +#define PCIE_CFG_ADV_ERR_CTRL 0x118u /* adv_err_cap_control */ +#define PCIE_CFG_HDR_LOG1 0x11Cu /* header_log1 */ +#define PCIE_CFG_HDR_LOG2 0x120u /* header_log2 */ +#define PCIE_CFG_HDR_LOG3 0x124u /* header_log3 */ +#define PCIE_CFG_HDR_LOG4 0x128u /* header_log4 */ +#define PCIE_CFG_PML1_SUB_CAP_ID 0x240u /* PML1sub_capID */ +#define PCIE_CFG_PML1_SUB_CAP_REG 0x244u /* PML1_sub_Cap_reg */ +#define PCIE_CFG_PML1_SUB_CTRL1 0x248u /* PML1_sub_control1 */ +#define PCIE_CFG_PML1_SUB_CTRL3 0x24Cu /* PML1_sub_control2 */ +#define PCIE_CFG_TL_CTRL_5 0x814u /* tl_control_5 */ +#define PCIE_CFG_PHY_ERR_ATT_VEC 0x1820u /* phy_err_attn_vec */ +#define PCIE_CFG_PHY_ERR_ATT_MASK 0x1824u /* phy_err_attn_mask */ + /* PCIE protocol PHY diagnostic registers */ -#define PCIE_PLP_MODEREG 0x200 /* Mode */ -#define PCIE_PLP_STATUSREG 0x204 /* Status */ -#define PCIE_PLP_LTSSMCTRLREG 0x208 /* LTSSM control */ -#define PCIE_PLP_LTLINKNUMREG 0x20c /* Link Training Link number */ -#define PCIE_PLP_LTLANENUMREG 0x210 /* Link Training Lane number */ -#define PCIE_PLP_LTNFTSREG 0x214 /* Link Training N_FTS */ -#define PCIE_PLP_ATTNREG 0x218 /* Attention */ -#define PCIE_PLP_ATTNMASKREG 0x21C /* Attention Mask */ -#define PCIE_PLP_RXERRCTR 0x220 /* Rx Error */ -#define PCIE_PLP_RXFRMERRCTR 0x224 /* Rx Framing Error */ -#define PCIE_PLP_RXERRTHRESHREG 0x228 /* Rx Error threshold */ -#define PCIE_PLP_TESTCTRLREG 0x22C /* Test Control reg */ -#define PCIE_PLP_SERDESCTRLOVRDREG 0x230 /* SERDES Control Override */ -#define PCIE_PLP_TIMINGOVRDREG 0x234 /* Timing param override */ -#define PCIE_PLP_RXTXSMDIAGREG 0x238 /* RXTX State Machine Diag */ -#define PCIE_PLP_LTSSMDIAGREG 0x23C /* LTSSM State Machine Diag */ +#define PCIE_PLP_MODEREG 0x200u /* Mode */ +#define PCIE_PLP_STATUSREG 0x204u /* Status */ +#define PCIE_PLP_LTSSMCTRLREG 0x208u /* LTSSM control */ +#define PCIE_PLP_LTLINKNUMREG 0x20cu /* Link Training Link number */ +#define PCIE_PLP_LTLANENUMREG 0x210u /* Link Training Lane number */ +#define PCIE_PLP_LTNFTSREG 0x214u /* Link Training N_FTS */ +#define PCIE_PLP_ATTNREG 0x218u /* Attention */ +#define PCIE_PLP_ATTNMASKREG 0x21Cu /* Attention Mask */ +#define PCIE_PLP_RXERRCTR 0x220u /* Rx Error */ +#define PCIE_PLP_RXFRMERRCTR 0x224u /* Rx Framing Error */ +#define PCIE_PLP_RXERRTHRESHREG 0x228u /* Rx Error threshold */ +#define PCIE_PLP_TESTCTRLREG 0x22Cu /* Test Control reg */ +#define PCIE_PLP_SERDESCTRLOVRDREG 0x230u /* SERDES Control Override */ +#define PCIE_PLP_TIMINGOVRDREG 0x234u /* Timing param override */ +#define PCIE_PLP_RXTXSMDIAGREG 0x238u /* RXTX State Machine Diag */ +#define PCIE_PLP_LTSSMDIAGREG 0x23Cu /* LTSSM State Machine Diag */ /* PCIE protocol DLLP diagnostic registers */ -#define PCIE_DLLP_LCREG 0x100 /* Link Control */ -#define PCIE_DLLP_LSREG 0x104 /* Link Status */ -#define PCIE_DLLP_LAREG 0x108 /* Link Attention */ -#define PCIE_DLLP_LAMASKREG 0x10C /* Link Attention Mask */ -#define PCIE_DLLP_NEXTTXSEQNUMREG 0x110 /* Next Tx Seq Num */ -#define PCIE_DLLP_ACKEDTXSEQNUMREG 0x114 /* Acked Tx Seq Num */ -#define PCIE_DLLP_PURGEDTXSEQNUMREG 0x118 /* Purged Tx Seq Num */ -#define PCIE_DLLP_RXSEQNUMREG 0x11C /* Rx Sequence Number */ -#define PCIE_DLLP_LRREG 0x120 /* Link Replay */ -#define PCIE_DLLP_LACKTOREG 0x124 /* Link Ack Timeout */ -#define PCIE_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */ -#define PCIE_DLLP_RTRYWPREG 0x12C /* Retry buffer write ptr */ -#define PCIE_DLLP_RTRYRPREG 0x130 /* Retry buffer Read ptr */ -#define PCIE_DLLP_RTRYPPREG 0x134 /* Retry buffer Purged ptr */ -#define PCIE_DLLP_RTRRWREG 0x138 /* Retry buffer Read/Write */ -#define PCIE_DLLP_ECTHRESHREG 0x13C /* Error Count Threshold */ -#define PCIE_DLLP_TLPERRCTRREG 0x140 /* TLP Error Counter */ -#define PCIE_DLLP_ERRCTRREG 0x144 /* Error Counter */ -#define PCIE_DLLP_NAKRXCTRREG 0x148 /* NAK Received Counter */ -#define PCIE_DLLP_TESTREG 0x14C /* Test */ -#define PCIE_DLLP_PKTBIST 0x150 /* Packet BIST */ -#define PCIE_DLLP_PCIE11 0x154 /* DLLP PCIE 1.1 reg */ +#define PCIE_DLLP_LCREG 0x100u /* Link Control */ +#define PCIE_DLLP_LSREG 0x104u /* Link Status */ +#define PCIE_DLLP_LAREG 0x108u /* Link Attention */ +#define PCIE_DLLP_LAMASKREG 0x10Cu /* Link Attention Mask */ +#define PCIE_DLLP_NEXTTXSEQNUMREG 0x110u /* Next Tx Seq Num */ +#define PCIE_DLLP_ACKEDTXSEQNUMREG 0x114u /* Acked Tx Seq Num */ +#define PCIE_DLLP_PURGEDTXSEQNUMREG 0x118u /* Purged Tx Seq Num */ +#define PCIE_DLLP_RXSEQNUMREG 0x11Cu /* Rx Sequence Number */ +#define PCIE_DLLP_LRREG 0x120u /* Link Replay */ +#define PCIE_DLLP_LACKTOREG 0x124u /* Link Ack Timeout */ +#define PCIE_DLLP_PMTHRESHREG 0x128u /* Power Management Threshold */ +#define PCIE_DLLP_RTRYWPREG 0x12Cu /* Retry buffer write ptr */ +#define PCIE_DLLP_RTRYRPREG 0x130u /* Retry buffer Read ptr */ +#define PCIE_DLLP_RTRYPPREG 0x134u /* Retry buffer Purged ptr */ +#define PCIE_DLLP_RTRRWREG 0x138u /* Retry buffer Read/Write */ +#define PCIE_DLLP_ECTHRESHREG 0x13Cu /* Error Count Threshold */ +#define PCIE_DLLP_TLPERRCTRREG 0x140u /* TLP Error Counter */ +#define PCIE_DLLP_ERRCTRREG 0x144u /* Error Counter */ +#define PCIE_DLLP_NAKRXCTRREG 0x148u /* NAK Received Counter */ +#define PCIE_DLLP_TESTREG 0x14Cu /* Test */ +#define PCIE_DLLP_PKTBIST 0x150u /* Packet BIST */ +#define PCIE_DLLP_PCIE11 0x154u /* DLLP PCIE 1.1 reg */ -#define PCIE_DLLP_LSREG_LINKUP (1 << 16) +#define PCIE_DLLP_LSREG_LINKUP (1u << 16u) /* PCIE protocol TLP diagnostic registers */ -#define PCIE_TLP_CONFIGREG 0x000 /* Configuration */ -#define PCIE_TLP_WORKAROUNDSREG 0x004 /* TLP Workarounds */ -#define PCIE_TLP_WRDMAUPPER 0x010 /* Write DMA Upper Address */ -#define PCIE_TLP_WRDMALOWER 0x014 /* Write DMA Lower Address */ -#define PCIE_TLP_WRDMAREQ_LBEREG 0x018 /* Write DMA Len/ByteEn Req */ -#define PCIE_TLP_RDDMAUPPER 0x01C /* Read DMA Upper Address */ -#define PCIE_TLP_RDDMALOWER 0x020 /* Read DMA Lower Address */ -#define PCIE_TLP_RDDMALENREG 0x024 /* Read DMA Len Req */ -#define PCIE_TLP_MSIDMAUPPER 0x028 /* MSI DMA Upper Address */ -#define PCIE_TLP_MSIDMALOWER 0x02C /* MSI DMA Lower Address */ -#define PCIE_TLP_MSIDMALENREG 0x030 /* MSI DMA Len Req */ -#define PCIE_TLP_SLVREQLENREG 0x034 /* Slave Request Len */ -#define PCIE_TLP_FCINPUTSREQ 0x038 /* Flow Control Inputs */ -#define PCIE_TLP_TXSMGRSREQ 0x03C /* Tx StateMachine and Gated Req */ -#define PCIE_TLP_ADRACKCNTARBLEN 0x040 /* Address Ack XferCnt and ARB Len */ -#define PCIE_TLP_DMACPLHDR0 0x044 /* DMA Completion Hdr 0 */ -#define PCIE_TLP_DMACPLHDR1 0x048 /* DMA Completion Hdr 1 */ -#define PCIE_TLP_DMACPLHDR2 0x04C /* DMA Completion Hdr 2 */ -#define PCIE_TLP_DMACPLMISC0 0x050 /* DMA Completion Misc0 */ -#define PCIE_TLP_DMACPLMISC1 0x054 /* DMA Completion Misc1 */ -#define PCIE_TLP_DMACPLMISC2 0x058 /* DMA Completion Misc2 */ -#define PCIE_TLP_SPTCTRLLEN 0x05C /* Split Controller Req len */ -#define PCIE_TLP_SPTCTRLMSIC0 0x060 /* Split Controller Misc 0 */ -#define PCIE_TLP_SPTCTRLMSIC1 0x064 /* Split Controller Misc 1 */ -#define PCIE_TLP_BUSDEVFUNC 0x068 /* Bus/Device/Func */ -#define PCIE_TLP_RESETCTR 0x06C /* Reset Counter */ -#define PCIE_TLP_RTRYBUF 0x070 /* Retry Buffer value */ -#define PCIE_TLP_TGTDEBUG1 0x074 /* Target Debug Reg1 */ -#define PCIE_TLP_TGTDEBUG2 0x078 /* Target Debug Reg2 */ -#define PCIE_TLP_TGTDEBUG3 0x07C /* Target Debug Reg3 */ -#define PCIE_TLP_TGTDEBUG4 0x080 /* Target Debug Reg4 */ +#define PCIE_TLP_CONFIGREG 0x000u /* Configuration */ +#define PCIE_TLP_WORKAROUNDSREG 0x004u /* TLP Workarounds */ +#define PCIE_TLP_WRDMAUPPER 0x010u /* Write DMA Upper Address */ +#define PCIE_TLP_WRDMALOWER 0x014u /* Write DMA Lower Address */ +#define PCIE_TLP_WRDMAREQ_LBEREG 0x018u /* Write DMA Len/ByteEn Req */ +#define PCIE_TLP_RDDMAUPPER 0x01Cu /* Read DMA Upper Address */ +#define PCIE_TLP_RDDMALOWER 0x020u /* Read DMA Lower Address */ +#define PCIE_TLP_RDDMALENREG 0x024u /* Read DMA Len Req */ +#define PCIE_TLP_MSIDMAUPPER 0x028u /* MSI DMA Upper Address */ +#define PCIE_TLP_MSIDMALOWER 0x02Cu /* MSI DMA Lower Address */ +#define PCIE_TLP_MSIDMALENREG 0x030u /* MSI DMA Len Req */ +#define PCIE_TLP_SLVREQLENREG 0x034u /* Slave Request Len */ +#define PCIE_TLP_FCINPUTSREQ 0x038u /* Flow Control Inputs */ +#define PCIE_TLP_TXSMGRSREQ 0x03Cu /* Tx StateMachine and Gated Req */ +#define PCIE_TLP_ADRACKCNTARBLEN 0x040u /* Address Ack XferCnt and ARB Len */ +#define PCIE_TLP_DMACPLHDR0 0x044u /* DMA Completion Hdr 0 */ +#define PCIE_TLP_DMACPLHDR1 0x048u /* DMA Completion Hdr 1 */ +#define PCIE_TLP_DMACPLHDR2 0x04Cu /* DMA Completion Hdr 2 */ +#define PCIE_TLP_DMACPLMISC0 0x050u /* DMA Completion Misc0 */ +#define PCIE_TLP_DMACPLMISC1 0x054u /* DMA Completion Misc1 */ +#define PCIE_TLP_DMACPLMISC2 0x058u /* DMA Completion Misc2 */ +#define PCIE_TLP_SPTCTRLLEN 0x05Cu /* Split Controller Req len */ +#define PCIE_TLP_SPTCTRLMSIC0 0x060u /* Split Controller Misc 0 */ +#define PCIE_TLP_SPTCTRLMSIC1 0x064u /* Split Controller Misc 1 */ +#define PCIE_TLP_BUSDEVFUNC 0x068u /* Bus/Device/Func */ +#define PCIE_TLP_RESETCTR 0x06Cu /* Reset Counter */ +#define PCIE_TLP_RTRYBUF 0x070u /* Retry Buffer value */ +#define PCIE_TLP_TGTDEBUG1 0x074u /* Target Debug Reg1 */ +#define PCIE_TLP_TGTDEBUG2 0x078u /* Target Debug Reg2 */ +#define PCIE_TLP_TGTDEBUG3 0x07Cu /* Target Debug Reg3 */ +#define PCIE_TLP_TGTDEBUG4 0x080u /* Target Debug Reg4 */ /* PCIE2 MDIO register offsets */ #define PCIE2_MDIO_CONTROL 0x128 @@ -343,10 +519,10 @@ typedef struct sbpcieregs { /* MDIO control */ -#define MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */ -#define MDIOCTL_DIVISOR_VAL 0x2 -#define MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */ -#define MDIOCTL_ACCESS_DONE 0x100 /* Tranaction complete */ +#define MDIOCTL_DIVISOR_MASK 0x7fu /* clock to be used on MDIO */ +#define MDIOCTL_DIVISOR_VAL 0x2u +#define MDIOCTL_PREAM_EN 0x80u /* Enable preamble sequnce */ +#define MDIOCTL_ACCESS_DONE 0x100u /* Tranaction complete */ /* MDIO Data */ #define MDIODATA_MASK 0x0000ffff /* data 2 bytes */ @@ -376,7 +552,7 @@ typedef struct sbpcieregs { #define MDIOCTL2_SLAVE_BYPASS 0x10000000 /* IP slave bypass */ #define MDIOCTL2_READ 0x20000000 /* IP slave bypass */ -#define MDIODATA2_DONE 0x80000000 /* rd/wr transaction done */ +#define MDIODATA2_DONE 0x80000000u /* rd/wr transaction done */ #define MDIODATA2_MASK 0x7FFFFFFF /* rd/wr transaction data */ #define MDIODATA2_DEVADDR_SHF 4 /* Physmedia devaddr shift */ @@ -424,8 +600,8 @@ typedef struct sbpcieregs { #define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */ /* Power management threshold */ -#define PCIE_L0THRESHOLDTIME_MASK 0xFF00 /* bits 0 - 7 */ -#define PCIE_L1THRESHOLDTIME_MASK 0xFF00 /* bits 8 - 15 */ +#define PCIE_L0THRESHOLDTIME_MASK 0xFF00u /* bits 0 - 7 */ +#define PCIE_L1THRESHOLDTIME_MASK 0xFF00u /* bits 8 - 15 */ #define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */ #define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */ #define PCIE_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */ @@ -436,20 +612,75 @@ typedef struct sbpcieregs { #define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */ #define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */ #define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */ -#define SRSH_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */ +#define SRSH_L23READY_EXIT_NOPERST 0x8000u /* bit 15 */ #define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */ #define SRSH_CLKREQ_OFFSET_REV8 52 /* word 52 for srom rev 8 */ #define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */ #define SRSH_BD_OFFSET 6 /* word 6 */ #define SRSH_AUTOINIT_OFFSET 18 /* auto initialization enable */ +/* PCI Capability ID's + * Reference include/linux/pci_regs.h + * #define PCI_CAP_LIST_ID 0 // Capability ID + * #define PCI_CAP_ID_PM 0x01 // Power Management + * #define PCI_CAP_ID_AGP 0x02 // Accelerated Graphics Port + * #define PCI_CAP_ID_VPD 0x03 // Vital Product Data + * #define PCI_CAP_ID_SLOTID 0x04 // Slot Identification + * #define PCI_CAP_ID_MSI 0x05 // Message Signalled Interrupts + * #define PCI_CAP_ID_CHSWP 0x06 // CompactPCI HotSwap + * #define PCI_CAP_ID_PCIX 0x07 // PCI-X + * #define PCI_CAP_ID_HT 0x08 // HyperTransport + * #define PCI_CAP_ID_VNDR 0x09 // Vendor-Specific + * #define PCI_CAP_ID_DBG 0x0A // Debug port + * #define PCI_CAP_ID_CCRC 0x0B // CompactPCI Central Resource Control + * #define PCI_CAP_ID_SHPC 0x0C // PCI Standard Hot-Plug Controller + * #define PCI_CAP_ID_SSVID 0x0D // Bridge subsystem vendor/device ID + * #define PCI_CAP_ID_AGP3 0x0E // AGP Target PCI-PCI bridge + * #define PCI_CAP_ID_SECDEV 0x0F // Secure Device + * #define PCI_CAP_ID_MSIX 0x11 // MSI-X + * #define PCI_CAP_ID_SATA 0x12 // SATA Data/Index Conf. + * #define PCI_CAP_ID_AF 0x13 // PCI Advanced Features + * #define PCI_CAP_ID_EA 0x14 // PCI Enhanced Allocation + * #define PCI_CAP_ID_MAX PCI_CAP_ID_EA + */ + +#define PCIE_CAP_ID_EXP 0x10 // PCI Express + +/* PCIe Capabilities Offsets + * Reference include/linux/pci_regs.h + * #define PCIE_CAP_FLAGS 2 // Capabilities register + * #define PCIE_CAP_DEVCAP 4 // Device capabilities + * #define PCIE_CAP_DEVCTL 8 // Device Control + * #define PCIE_CAP_DEVSTA 10 // Device Status + * #define PCIE_CAP_LNKCAP 12 // Link Capabilities + * #define PCIE_CAP_LNKCTL 16 // Link Control + * #define PCIE_CAP_LNKSTA 18 // Link Status + * #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 // v1 endpoints end here + * #define PCIE_CAP_SLTCAP 20 // Slot Capabilities + * #define PCIE_CAP_SLTCTL 24 // Slot Control + * #define PCIE_CAP_SLTSTA 26 // Slot Status + * #define PCIE_CAP_RTCTL 28 // Root Control + * #define PCIE_CAP_RTCAP 30 // Root Capabilities + * #define PCIE_CAP_RTSTA 32 // Root Status + */ + + +/* Linkcapability reg offset in PCIE Cap */ +#define PCIE_CAP_LINKCAP_OFFSET 12 /* linkcap offset in pcie cap */ +#define PCIE_CAP_LINKCAP_LNKSPEED_MASK 0xf /* Supported Link Speeds */ +#define PCIE_CAP_LINKCAP_GEN2 0x2 /* Value for GEN2 */ + +/* Uc_Err reg offset in AER Cap */ +#define PCIE_EXTCAP_ID_ERR 0x01 /* Advanced Error Reporting */ +#define PCIE_EXTCAP_AER_UCERR_OFFSET 4 /* Uc_Err reg offset in AER Cap */ + /* Linkcontrol reg offset in PCIE Cap */ #define PCIE_CAP_LINKCTRL_OFFSET 16 /* linkctrl offset in pcie cap */ #define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */ #define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */ #define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */ -#define PCIE_LINKSPEED_MASK 0xF0000 /* bits 0 - 3 of high word */ -#define PCIE_LINKSPEED_SHIFT 16 /* PCIE_LINKSPEED_SHIFT */ +#define PCIE_LINKSPEED_MASK 0xF0000u /* bits 0 - 3 of high word */ +#define PCIE_LINKSPEED_SHIFT 16 /* PCIE_LINKSPEED_SHIFT */ /* Devcontrol reg offset in PCIE Cap */ #define PCIE_CAP_DEVCTRL_OFFSET 8 /* devctrl offset in pcie cap */ @@ -474,6 +705,12 @@ typedef struct sbpcieregs { #define PCIE_ASPM_L11_ENAB 8 /* ASPM L1.1 in PML1_sub_control2 */ #define PCIE_ASPM_L12_ENAB 4 /* ASPM L1.2 in PML1_sub_control2 */ +/* NumMsg and NumMsgEn in PCIE MSI Cap */ +#define MSICAP_NUM_MSG_SHF 17 +#define MSICAP_NUM_MSG_MASK (0x7 << MSICAP_NUM_MSG_SHF) +#define MSICAP_NUM_MSG_EN_SHF 20 +#define MSICAP_NUM_MSG_EN_MASK (0x7 << MSICAP_NUM_MSG_EN_SHF) + /* Devcontrol2 reg offset in PCIE Cap */ #define PCIE_CAP_DEVCTRL2_OFFSET 0x28 /* devctrl2 offset in pcie cap */ #define PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK 0x400 /* Latency Tolerance Reporting Enable */ @@ -481,22 +718,22 @@ typedef struct sbpcieregs { #define PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK 0x6000 /* Enable OBFF mechanism, select signaling method */ /* LTR registers in PCIE Cap */ -#define PCIE_LTR0_REG_OFFSET 0x844 /* ltr0_reg offset in pcie cap */ -#define PCIE_LTR1_REG_OFFSET 0x848 /* ltr1_reg offset in pcie cap */ -#define PCIE_LTR2_REG_OFFSET 0x84c /* ltr2_reg offset in pcie cap */ -#define PCIE_LTR0_REG_DEFAULT_60 0x883c883c /* active latency default to 60usec */ -#define PCIE_LTR0_REG_DEFAULT_150 0x88968896 /* active latency default to 150usec */ -#define PCIE_LTR1_REG_DEFAULT 0x88648864 /* idle latency default to 100usec */ -#define PCIE_LTR2_REG_DEFAULT 0x90039003 /* sleep latency default to 3msec */ +#define PCIE_LTR0_REG_OFFSET 0x844u /* ltr0_reg offset in pcie cap */ +#define PCIE_LTR1_REG_OFFSET 0x848u /* ltr1_reg offset in pcie cap */ +#define PCIE_LTR2_REG_OFFSET 0x84cu /* ltr2_reg offset in pcie cap */ +#define PCIE_LTR0_REG_DEFAULT_60 0x883c883cu /* active latency default to 60usec */ +#define PCIE_LTR0_REG_DEFAULT_150 0x88968896u /* active latency default to 150usec */ +#define PCIE_LTR1_REG_DEFAULT 0x88648864u /* idle latency default to 100usec */ +#define PCIE_LTR2_REG_DEFAULT 0x90039003u /* sleep latency default to 3msec */ /* Status reg PCIE_PLP_STATUSREG */ #define PCIE_PLP_POLARITYINV_STAT 0x10 /* PCIE BRCM Vendor CAP REVID reg bits */ -#define BRCMCAP_PCIEREV_CT_MASK 0xF00 -#define BRCMCAP_PCIEREV_CT_SHIFT 8 -#define BRCMCAP_PCIEREV_REVID_MASK 0xFF +#define BRCMCAP_PCIEREV_CT_MASK 0xF00u +#define BRCMCAP_PCIEREV_CT_SHIFT 8u +#define BRCMCAP_PCIEREV_REVID_MASK 0xFFu #define BRCMCAP_PCIEREV_REVID_SHIFT 0 #define PCIE_REVREG_CT_PCIE1 0 @@ -528,15 +765,22 @@ typedef struct sbpcieregs { #define PCIECFGREG_MSI_ADDR_L 0x5C #define PCIECFGREG_MSI_ADDR_H 0x60 #define PCIECFGREG_MSI_DATA 0x64 -#define PCIECFGREG_LINK_STATUS_CTRL 0xBC -#define PCIECFGREG_LINK_STATUS_CTRL2 0xDC +#define PCIECFGREG_LINK_STATUS_CTRL 0xBCu +#define PCIECFGREG_DEV_STATUS_CTRL 0xB4u +#define PCIECFGGEN_DEV_STATUS_CTRL2 0xD4 +#define PCIECFGREG_LINK_STATUS_CTRL2 0xDCu #define PCIECFGREG_RBAR_CTRL 0x228 #define PCIECFGREG_PML1_SUB_CTRL1 0x248 +#define PCIECFGREG_PML1_SUB_CTRL2 0x24C #define PCIECFGREG_REG_BAR2_CONFIG 0x4E0 #define PCIECFGREG_REG_BAR3_CONFIG 0x4F4 #define PCIECFGREG_PDL_CTRL1 0x1004 #define PCIECFGREG_PDL_IDDQ 0x1814 #define PCIECFGREG_REG_PHY_CTL7 0x181c +#define PCIECFGREG_PHY_DBG_CLKREQ0 0x1E10 +#define PCIECFGREG_PHY_DBG_CLKREQ1 0x1E14 +#define PCIECFGREG_PHY_DBG_CLKREQ2 0x1E18 +#define PCIECFGREG_PHY_DBG_CLKREQ3 0x1E1C /* PCIECFGREG_PML1_SUB_CTRL1 Bit Definition */ #define PCI_PM_L1_2_ENA_MASK 0x00000001 /* PCI-PM L1.2 Enabled */ @@ -556,17 +800,35 @@ typedef struct sbpcieregs { /* enumeration Core regs */ #define PCIH2D_MailBox 0x140 -#define PCIH2D_DB1 0x144 +#define PCIH2D_DB1 0x144 #define PCID2H_MailBox 0x148 +#define PCIH2D_MailBox_1 0x150 /* for dma channel1 */ +#define PCIH2D_DB1_1 0x154 +#define PCID2H_MailBox_1 0x158 +#define PCIH2D_MailBox_2 0x160 /* for dma channel2 which will be used for Implicit DMA */ +#define PCIH2D_DB1_2 0x164 +#define PCID2H_MailBox_2 0x168 + #define PCIMailBoxInt 0x48 #define PCIMailBoxMask 0x4C +#define PCIMSIVecAssign 0x58 #define I_F0_B0 (0x1 << 8) /* Mail box interrupt Function 0 interrupt, bit 0 */ #define I_F0_B1 (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */ #define PCIECFGREG_DEVCONTROL 0xB4 +#define PCIECFGREG_BASEADDR0 0x10 #define PCIECFGREG_DEVCONTROL_MRRS_SHFT 12 #define PCIECFGREG_DEVCONTROL_MRRS_MASK (0x7 << PCIECFGREG_DEVCONTROL_MRRS_SHFT) +#define PCIECFGREG_DEVCTRL_MPS_SHFT 5 +#define PCIECFGREG_DEVCTRL_MPS_MASK (0x7 << PCIECFGREG_DEVCTRL_MPS_SHFT) +#define PCIECFGREG_PM_CSR_STATE_MASK 0x00000003 +#define PCIECFGREG_PM_CSR_STATE_D0 0 +#define PCIECFGREG_PM_CSR_STATE_D1 1 +#define PCIECFGREG_PM_CSR_STATE_D2 2 +#define PCIECFGREG_PM_CSR_STATE_D3_HOT 3 +#define PCIECFGREG_PM_CSR_STATE_D3_COLD 4 + /* SROM hardware region */ #define SROM_OFFSET_BAR1_CTRL 52 @@ -619,6 +881,11 @@ typedef struct sbpcieregs { #define SBTOPCIE_MB_FUNC2_SHIFT 12 #define SBTOPCIE_MB_FUNC3_SHIFT 14 +#define SBTOPCIE_MB1_FUNC0_SHIFT 9 +#define SBTOPCIE_MB1_FUNC1_SHIFT 11 +#define SBTOPCIE_MB1_FUNC2_SHIFT 13 +#define SBTOPCIE_MB1_FUNC3_SHIFT 15 + /* pcieiocstatus */ #define PCIEGEN2_IOC_D0_STATE_SHIFT 8 #define PCIEGEN2_IOC_D1_STATE_SHIFT 9 @@ -644,9 +911,49 @@ typedef struct sbpcieregs { #define PCIE_STAT_CTRL_INTENABLE 0x4 #define PCIE_STAT_CTRL_INTSTATUS 0x8 +/* cpl_timeout_ctrl_reg */ +#define PCIE_CTO_TO_THRESHOLD_SHIFT 0 +#define PCIE_CTO_TO_THRESHHOLD_MASK (0xfffff << PCIE_CTO_TO_THRESHOLD_SHIFT) + +#define PCIE_CTO_CLKCHKCNT_SHIFT 24 +#define PCIE_CTO_CLKCHKCNT_MASK (0xf << PCIE_CTO_CLKCHKCNT_SHIFT) + +#define PCIE_CTO_ENAB_SHIFT 31 +#define PCIE_CTO_ENAB_MASK (0x1 << PCIE_CTO_ENAB_SHIFT) + +#define PCIE_CTO_TO_THRESH_DEFAULT 0x58000 +#define PCIE_CTO_CLKCHKCNT_VAL 0xA + +/* ErrLog */ +#define PCIE_SROMRD_ERR_SHIFT 5 +#define PCIE_SROMRD_ERR_MASK (0x1 << PCIE_SROMRD_ERR_SHIFT) + +#define PCIE_CTO_ERR_SHIFT 8 +#define PCIE_CTO_ERR_MASK (0x1 << PCIE_CTO_ERR_SHIFT) + +#define PCIE_CTO_ERR_CODE_SHIFT 9 +#define PCIE_CTO_ERR_CODE_MASK (0x3 << PCIE_CTO_ERR_CODE_SHIFT) + +#define PCIE_BP_CLK_OFF_ERR_SHIFT 12 +#define PCIE_BP_CLK_OFF_ERR_MASK (0x1 << PCIE_BP_CLK_OFF_ERR_SHIFT) + +#define PCIE_BP_IN_RESET_ERR_SHIFT 13 +#define PCIE_BP_IN_RESET_ERR_MASK (0x1 << PCIE_BP_IN_RESET_ERR_SHIFT) + #ifdef BCMDRIVER void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs); void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs); +void pcie_set_trefup_time_100us(si_t *sih); #endif /* BCMDRIVER */ +/* DMA intstatus and intmask */ +#define I_PC (1 << 10) /* pci descriptor error */ +#define I_PD (1 << 11) /* pci data error */ +#define I_DE (1 << 12) /* descriptor protocol error */ +#define I_RU (1 << 13) /* receive descriptor underflow */ +#define I_RO (1 << 14) /* receive fifo overflow */ +#define I_XU (1 << 15) /* transmit fifo underflow */ +#define I_RI (1 << 16) /* receive interrupt */ +#define I_XI (1 << 24) /* transmit interrupt */ + #endif /* _PCIE_CORE_H */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmudp.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmudp.h deleted file mode 100755 index 97cf815db76c..000000000000 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bcmudp.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Fundamental constants relating to UDP Protocol - * - * Copyright (C) 2016, Broadcom Corporation - * All Rights Reserved. - * - * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; - * the contents of this file may not be disclosed to third parties, copied - * or duplicated in any form, in whole or in part, without the prior - * written permission of Broadcom Corporation. - * - * - * <> - * - * $Id: bcmudp.h 518342 2014-12-01 23:21:41Z $ - */ - -#ifndef _bcmudp_h_ -#define _bcmudp_h_ - -#ifndef _TYPEDEFS_H_ -#include -#endif - -/* This marks the start of a packed structure section. */ -#include - - -/* UDP header */ -#define UDP_DEST_PORT_OFFSET 2 /* UDP dest port offset */ -#define UDP_LEN_OFFSET 4 /* UDP length offset */ -#define UDP_CHKSUM_OFFSET 6 /* UDP body checksum offset */ - -#define UDP_HDR_LEN 8 /* UDP header length */ -#define UDP_PORT_LEN 2 /* UDP port length */ - -/* These fields are stored in network order */ -BWL_PRE_PACKED_STRUCT struct bcmudp_hdr -{ - uint16 src_port; /* Source Port Address */ - uint16 dst_port; /* Destination Port Address */ - uint16 len; /* Number of bytes in datagram including header */ - uint16 chksum; /* entire datagram checksum with pseudoheader */ -} BWL_POST_PACKED_STRUCT; - -/* This marks the end of a packed structure section. */ -#include - -#endif /* #ifndef _bcmudp_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bt_amp_hci.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bt_amp_hci.h deleted file mode 100755 index 4e948d24dfc1..000000000000 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/bt_amp_hci.h +++ /dev/null @@ -1,444 +0,0 @@ -/* - * BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface) - * - * Copyright (C) 1999-2016, Broadcom Corporation - * - * Unless you and Broadcom execute a separate written software license - * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2 (the "GPL"), - * available at http://www.broadcom.com/licenses/GPLv2.php, with the - * following added to such license: - * - * As a special exception, the copyright holders of this software give you - * permission to link this software with independent modules, and to copy and - * distribute the resulting executable under terms of your choice, provided that - * you also meet, for each linked independent module, the terms and conditions of - * the license of that module. An independent module is a module which is not - * derived from this software. The special exception does not apply to any - * modifications of the software. - * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a license - * other than the GPL, without Broadcom's express prior written consent. - * - * - * <> - * - * $Id: bt_amp_hci.h 518342 2014-12-01 23:21:41Z $ -*/ - -#ifndef _bt_amp_hci_h -#define _bt_amp_hci_h - -/* This marks the start of a packed structure section. */ -#include - - -/* AMP HCI CMD packet format */ -typedef BWL_PRE_PACKED_STRUCT struct amp_hci_cmd { - uint16 opcode; - uint8 plen; - uint8 parms[1]; -} BWL_POST_PACKED_STRUCT amp_hci_cmd_t; - -#define HCI_CMD_PREAMBLE_SIZE OFFSETOF(amp_hci_cmd_t, parms) -#define HCI_CMD_DATA_SIZE 255 - -/* AMP HCI CMD opcode layout */ -#define HCI_CMD_OPCODE(ogf, ocf) ((((ogf) & 0x3F) << 10) | ((ocf) & 0x03FF)) -#define HCI_CMD_OGF(opcode) ((uint8)(((opcode) >> 10) & 0x3F)) -#define HCI_CMD_OCF(opcode) ((opcode) & 0x03FF) - -/* AMP HCI command opcodes */ -#define HCI_Read_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0001) -#define HCI_Reset_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0002) -#define HCI_Read_Link_Quality HCI_CMD_OPCODE(0x05, 0x0003) -#define HCI_Read_Local_AMP_Info HCI_CMD_OPCODE(0x05, 0x0009) -#define HCI_Read_Local_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000A) -#define HCI_Write_Remote_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000B) -#define HCI_Create_Physical_Link HCI_CMD_OPCODE(0x01, 0x0035) -#define HCI_Accept_Physical_Link_Request HCI_CMD_OPCODE(0x01, 0x0036) -#define HCI_Disconnect_Physical_Link HCI_CMD_OPCODE(0x01, 0x0037) -#define HCI_Create_Logical_Link HCI_CMD_OPCODE(0x01, 0x0038) -#define HCI_Accept_Logical_Link HCI_CMD_OPCODE(0x01, 0x0039) -#define HCI_Disconnect_Logical_Link HCI_CMD_OPCODE(0x01, 0x003A) -#define HCI_Logical_Link_Cancel HCI_CMD_OPCODE(0x01, 0x003B) -#define HCI_Flow_Spec_Modify HCI_CMD_OPCODE(0x01, 0x003C) -#define HCI_Write_Flow_Control_Mode HCI_CMD_OPCODE(0x01, 0x0067) -#define HCI_Read_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x0069) -#define HCI_Write_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x006A) -#define HCI_Short_Range_Mode HCI_CMD_OPCODE(0x01, 0x006B) -#define HCI_Reset HCI_CMD_OPCODE(0x03, 0x0003) -#define HCI_Read_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0015) -#define HCI_Write_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0016) -#define HCI_Read_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0036) -#define HCI_Write_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0037) -#define HCI_Enhanced_Flush HCI_CMD_OPCODE(0x03, 0x005F) -#define HCI_Read_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0061) -#define HCI_Write_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0062) -#define HCI_Set_Event_Mask_Page_2 HCI_CMD_OPCODE(0x03, 0x0063) -#define HCI_Read_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0064) -#define HCI_Write_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0065) -#define HCI_Read_Local_Version_Info HCI_CMD_OPCODE(0x04, 0x0001) -#define HCI_Read_Local_Supported_Commands HCI_CMD_OPCODE(0x04, 0x0002) -#define HCI_Read_Buffer_Size HCI_CMD_OPCODE(0x04, 0x0005) -#define HCI_Read_Data_Block_Size HCI_CMD_OPCODE(0x04, 0x000A) - -/* AMP HCI command parameters */ -typedef BWL_PRE_PACKED_STRUCT struct read_local_cmd_parms { - uint8 plh; - uint8 offset[2]; /* length so far */ - uint8 max_remote[2]; -} BWL_POST_PACKED_STRUCT read_local_cmd_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct write_remote_cmd_parms { - uint8 plh; - uint8 offset[2]; - uint8 len[2]; - uint8 frag[1]; -} BWL_POST_PACKED_STRUCT write_remote_cmd_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct phy_link_cmd_parms { - uint8 plh; - uint8 key_length; - uint8 key_type; - uint8 key[1]; -} BWL_POST_PACKED_STRUCT phy_link_cmd_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_cmd_parms { - uint8 plh; - uint8 reason; -} BWL_POST_PACKED_STRUCT dis_phy_link_cmd_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct log_link_cmd_parms { - uint8 plh; - uint8 txflow[16]; - uint8 rxflow[16]; -} BWL_POST_PACKED_STRUCT log_link_cmd_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct ext_flow_spec { - uint8 id; - uint8 service_type; - uint8 max_sdu[2]; - uint8 sdu_ia_time[4]; - uint8 access_latency[4]; - uint8 flush_timeout[4]; -} BWL_POST_PACKED_STRUCT ext_flow_spec_t; - -typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_cmd_parms { - uint8 plh; - uint8 tx_fs_ID; -} BWL_POST_PACKED_STRUCT log_link_cancel_cmd_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_cmd_parms { - uint8 llh[2]; - uint8 txflow[16]; - uint8 rxflow[16]; -} BWL_POST_PACKED_STRUCT flow_spec_mod_cmd_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct plh_pad { - uint8 plh; - uint8 pad; -} BWL_POST_PACKED_STRUCT plh_pad_t; - -typedef BWL_PRE_PACKED_STRUCT union hci_handle { - uint16 bredr; - plh_pad_t amp; -} BWL_POST_PACKED_STRUCT hci_handle_t; - -typedef BWL_PRE_PACKED_STRUCT struct ls_to_cmd_parms { - hci_handle_t handle; - uint8 timeout[2]; -} BWL_POST_PACKED_STRUCT ls_to_cmd_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct befto_cmd_parms { - uint8 llh[2]; - uint8 befto[4]; -} BWL_POST_PACKED_STRUCT befto_cmd_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct srm_cmd_parms { - uint8 plh; - uint8 srm; -} BWL_POST_PACKED_STRUCT srm_cmd_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct ld_cmd_parms { - uint8 ld_aware; - uint8 ld[2]; - uint8 ld_opts; - uint8 l_opts; -} BWL_POST_PACKED_STRUCT ld_cmd_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct eflush_cmd_parms { - uint8 llh[2]; - uint8 packet_type; -} BWL_POST_PACKED_STRUCT eflush_cmd_parms_t; - -/* Generic AMP extended flow spec service types */ -#define EFS_SVCTYPE_NO_TRAFFIC 0 -#define EFS_SVCTYPE_BEST_EFFORT 1 -#define EFS_SVCTYPE_GUARANTEED 2 - -/* AMP HCI event packet format */ -typedef BWL_PRE_PACKED_STRUCT struct amp_hci_event { - uint8 ecode; - uint8 plen; - uint8 parms[1]; -} BWL_POST_PACKED_STRUCT amp_hci_event_t; - -#define HCI_EVT_PREAMBLE_SIZE OFFSETOF(amp_hci_event_t, parms) - -/* AMP HCI event codes */ -#define HCI_Command_Complete 0x0E -#define HCI_Command_Status 0x0F -#define HCI_Flush_Occurred 0x11 -#define HCI_Enhanced_Flush_Complete 0x39 -#define HCI_Physical_Link_Complete 0x40 -#define HCI_Channel_Select 0x41 -#define HCI_Disconnect_Physical_Link_Complete 0x42 -#define HCI_Logical_Link_Complete 0x45 -#define HCI_Disconnect_Logical_Link_Complete 0x46 -#define HCI_Flow_Spec_Modify_Complete 0x47 -#define HCI_Number_of_Completed_Data_Blocks 0x48 -#define HCI_Short_Range_Mode_Change_Complete 0x4C -#define HCI_Status_Change_Event 0x4D -#define HCI_Vendor_Specific 0xFF - -/* AMP HCI event mask bit positions */ -#define HCI_Physical_Link_Complete_Event_Mask 0x0001 -#define HCI_Channel_Select_Event_Mask 0x0002 -#define HCI_Disconnect_Physical_Link_Complete_Event_Mask 0x0004 -#define HCI_Logical_Link_Complete_Event_Mask 0x0020 -#define HCI_Disconnect_Logical_Link_Complete_Event_Mask 0x0040 -#define HCI_Flow_Spec_Modify_Complete_Event_Mask 0x0080 -#define HCI_Number_of_Completed_Data_Blocks_Event_Mask 0x0100 -#define HCI_Short_Range_Mode_Change_Complete_Event_Mask 0x1000 -#define HCI_Status_Change_Event_Mask 0x2000 -#define HCI_All_Event_Mask 0x31e7 -/* AMP HCI event parameters */ -typedef BWL_PRE_PACKED_STRUCT struct cmd_status_parms { - uint8 status; - uint8 cmdpkts; - uint16 opcode; -} BWL_POST_PACKED_STRUCT cmd_status_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct cmd_complete_parms { - uint8 cmdpkts; - uint16 opcode; - uint8 parms[1]; -} BWL_POST_PACKED_STRUCT cmd_complete_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct flush_occurred_evt_parms { - uint16 handle; -} BWL_POST_PACKED_STRUCT flush_occurred_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct write_remote_evt_parms { - uint8 status; - uint8 plh; -} BWL_POST_PACKED_STRUCT write_remote_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct read_local_evt_parms { - uint8 status; - uint8 plh; - uint16 len; - uint8 frag[1]; -} BWL_POST_PACKED_STRUCT read_local_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct read_local_info_evt_parms { - uint8 status; - uint8 AMP_status; - uint32 bandwidth; - uint32 gbandwidth; - uint32 latency; - uint32 PDU_size; - uint8 ctrl_type; - uint16 PAL_cap; - uint16 AMP_ASSOC_len; - uint32 max_flush_timeout; - uint32 be_flush_timeout; -} BWL_POST_PACKED_STRUCT read_local_info_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct log_link_evt_parms { - uint8 status; - uint16 llh; - uint8 plh; - uint8 tx_fs_ID; -} BWL_POST_PACKED_STRUCT log_link_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct disc_log_link_evt_parms { - uint8 status; - uint16 llh; - uint8 reason; -} BWL_POST_PACKED_STRUCT disc_log_link_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_evt_parms { - uint8 status; - uint8 plh; - uint8 tx_fs_ID; -} BWL_POST_PACKED_STRUCT log_link_cancel_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_evt_parms { - uint8 status; - uint16 llh; -} BWL_POST_PACKED_STRUCT flow_spec_mod_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct phy_link_evt_parms { - uint8 status; - uint8 plh; -} BWL_POST_PACKED_STRUCT phy_link_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_evt_parms { - uint8 status; - uint8 plh; - uint8 reason; -} BWL_POST_PACKED_STRUCT dis_phy_link_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct read_ls_to_evt_parms { - uint8 status; - hci_handle_t handle; - uint16 timeout; -} BWL_POST_PACKED_STRUCT read_ls_to_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct read_lla_ca_to_evt_parms { - uint8 status; - uint16 timeout; -} BWL_POST_PACKED_STRUCT read_lla_ca_to_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct read_data_block_size_evt_parms { - uint8 status; - uint16 ACL_pkt_len; - uint16 data_block_len; - uint16 data_block_num; -} BWL_POST_PACKED_STRUCT read_data_block_size_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct data_blocks { - uint16 handle; - uint16 pkts; - uint16 blocks; -} BWL_POST_PACKED_STRUCT data_blocks_t; - -typedef BWL_PRE_PACKED_STRUCT struct num_completed_data_blocks_evt_parms { - uint16 num_blocks; - uint8 num_handles; - data_blocks_t completed[1]; -} BWL_POST_PACKED_STRUCT num_completed_data_blocks_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct befto_evt_parms { - uint8 status; - uint32 befto; -} BWL_POST_PACKED_STRUCT befto_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct srm_evt_parms { - uint8 status; - uint8 plh; - uint8 srm; -} BWL_POST_PACKED_STRUCT srm_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct contact_counter_evt_parms { - uint8 status; - uint8 llh[2]; - uint16 counter; -} BWL_POST_PACKED_STRUCT contact_counter_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct contact_counter_reset_evt_parms { - uint8 status; - uint8 llh[2]; -} BWL_POST_PACKED_STRUCT contact_counter_reset_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct read_linkq_evt_parms { - uint8 status; - hci_handle_t handle; - uint8 link_quality; -} BWL_POST_PACKED_STRUCT read_linkq_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct ld_evt_parms { - uint8 status; - uint8 ld_aware; - uint8 ld[2]; - uint8 ld_opts; - uint8 l_opts; -} BWL_POST_PACKED_STRUCT ld_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct eflush_complete_evt_parms { - uint16 handle; -} BWL_POST_PACKED_STRUCT eflush_complete_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct vendor_specific_evt_parms { - uint8 len; - uint8 parms[1]; -} BWL_POST_PACKED_STRUCT vendor_specific_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct local_version_info_evt_parms { - uint8 status; - uint8 hci_version; - uint16 hci_revision; - uint8 pal_version; - uint16 mfg_name; - uint16 pal_subversion; -} BWL_POST_PACKED_STRUCT local_version_info_evt_parms_t; - -#define MAX_SUPPORTED_CMD_BYTE 64 -typedef BWL_PRE_PACKED_STRUCT struct local_supported_cmd_evt_parms { - uint8 status; - uint8 cmd[MAX_SUPPORTED_CMD_BYTE]; -} BWL_POST_PACKED_STRUCT local_supported_cmd_evt_parms_t; - -typedef BWL_PRE_PACKED_STRUCT struct status_change_evt_parms { - uint8 status; - uint8 amp_status; -} BWL_POST_PACKED_STRUCT status_change_evt_parms_t; - -/* AMP HCI error codes */ -#define HCI_SUCCESS 0x00 -#define HCI_ERR_ILLEGAL_COMMAND 0x01 -#define HCI_ERR_NO_CONNECTION 0x02 -#define HCI_ERR_MEMORY_FULL 0x07 -#define HCI_ERR_CONNECTION_TIMEOUT 0x08 -#define HCI_ERR_MAX_NUM_OF_CONNECTIONS 0x09 -#define HCI_ERR_CONNECTION_EXISTS 0x0B -#define HCI_ERR_CONNECTION_DISALLOWED 0x0C -#define HCI_ERR_CONNECTION_ACCEPT_TIMEOUT 0x10 -#define HCI_ERR_UNSUPPORTED_VALUE 0x11 -#define HCI_ERR_ILLEGAL_PARAMETER_FMT 0x12 -#define HCI_ERR_CONN_TERM_BY_LOCAL_HOST 0x16 -#define HCI_ERR_UNSPECIFIED 0x1F -#define HCI_ERR_UNIT_KEY_USED 0x26 -#define HCI_ERR_QOS_REJECTED 0x2D -#define HCI_ERR_PARAM_OUT_OF_RANGE 0x30 -#define HCI_ERR_NO_SUITABLE_CHANNEL 0x39 -#define HCI_ERR_CHANNEL_MOVE 0xFF - -/* AMP HCI ACL Data packet format */ -typedef BWL_PRE_PACKED_STRUCT struct amp_hci_ACL_data { - uint16 handle; /* 12-bit connection handle + 2-bit PB and 2-bit BC flags */ - uint16 dlen; /* data total length */ - uint8 data[1]; -} BWL_POST_PACKED_STRUCT amp_hci_ACL_data_t; - -#define HCI_ACL_DATA_PREAMBLE_SIZE OFFSETOF(amp_hci_ACL_data_t, data) - -#define HCI_ACL_DATA_BC_FLAGS (0x0 << 14) -#define HCI_ACL_DATA_PB_FLAGS (0x3 << 12) - -#define HCI_ACL_DATA_HANDLE(handle) ((handle) & 0x0fff) -#define HCI_ACL_DATA_FLAGS(handle) ((handle) >> 12) - -/* AMP Activity Report packet formats */ -typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report { - uint8 ScheduleKnown; - uint8 NumReports; - uint8 data[1]; -} BWL_POST_PACKED_STRUCT amp_hci_activity_report_t; - -typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report_triple { - uint32 StartTime; - uint32 Duration; - uint32 Periodicity; -} BWL_POST_PACKED_STRUCT amp_hci_activity_report_triple_t; - -#define HCI_AR_SCHEDULE_KNOWN 0x01 - - -/* This marks the end of a packed structure section. */ -#include - -#endif /* _bt_amp_hci_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/rte_ioctl.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/rte_ioctl.h index 9c214ae704ac..f4c8c803c167 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/rte_ioctl.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/rte_ioctl.h @@ -1,7 +1,7 @@ /* * HND Run Time Environment ioctl. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: rte_ioctl.h 514727 2014-11-12 03:02:48Z $ + * $Id: rte_ioctl.h 615249 2016-01-27 02:04:07Z $ */ #ifndef _rte_ioctl_h_ @@ -61,7 +61,9 @@ enum hnd_ioctl_cmd { BUS_FLUSH_RXREORDER_Q = 4, BUS_SET_LTR_STATE = 5, BUS_FLUSH_CHAINED_PKTS = 6, - BUS_SET_COPY_COUNT = 7 + BUS_SET_COPY_COUNT = 7, + BUS_UPDATE_FLOW_PKTS_MAX = 8, + BUS_UPDATE_EXTRA_TXLFRAGS = 9 }; #define SDPCMDEV_SET_MAXTXPKTGLOM 1 diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbchipc.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbchipc.h index 6d2389d17d67..ffec624c53dc 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbchipc.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbchipc.h @@ -5,9 +5,9 @@ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer, * GPIO interface, extbus, and support for serial and parallel flashes. * - * $Id: sbchipc.h 574579 2015-07-27 15:36:37Z $ + * $Id: sbchipc.h 657872 2016-09-02 22:17:34Z $ * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -53,14 +53,14 @@ typedef volatile struct { uint32 PAD[384]; uint32 pmucontrol; /* 0x600 */ - uint32 pmucapabilities; - uint32 pmustatus; - uint32 res_state; - uint32 res_pending; - uint32 pmutimer; - uint32 min_res_mask; - uint32 max_res_mask; - uint32 res_table_sel; + uint32 pmucapabilities; /* 0x604 */ + uint32 pmustatus; /* 0x608 */ + uint32 res_state; /* 0x60C */ + uint32 res_pending; /* 0x610 */ + uint32 pmutimer; /* 0x614 */ + uint32 min_res_mask; /* 0x618 */ + uint32 max_res_mask; /* 0x61C */ + uint32 res_table_sel; /* 0x620 */ uint32 res_dep_mask; uint32 res_updn_timer; uint32 res_timer; @@ -68,10 +68,10 @@ typedef volatile struct { uint32 pmuwatchdog; uint32 gpiosel; /* 0x638, rev >= 1 */ uint32 gpioenable; /* 0x63c, rev >= 1 */ - uint32 res_req_timer_sel; - uint32 res_req_timer; - uint32 res_req_mask; - uint32 PAD; + uint32 res_req_timer_sel; /* 0x640 */ + uint32 res_req_timer; /* 0x644 */ + uint32 res_req_mask; /* 0x648 */ + uint32 core_cap_ext; /* 0x64C */ uint32 chipcontrol_addr; /* 0x650 */ uint32 chipcontrol_data; /* 0x654 */ uint32 regcontrol_addr; @@ -81,10 +81,13 @@ typedef volatile struct { uint32 pmustrapopt; /* 0x668, corerev >= 28 */ uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */ uint32 retention_ctl; /* 0x670 */ - uint32 PAD[3]; + uint32 ILPPeriod; /* 0x674 */ + uint32 PAD[2]; uint32 retention_grpidx; /* 0x680 */ uint32 retention_grpctl; /* 0x684 */ - uint32 PAD[20]; + uint32 mac_res_req_timer; /* 0x688 */ + uint32 mac_res_req_mask; /* 0x68c */ + uint32 PAD[18]; uint32 pmucontrol_ext; /* 0x6d8 */ uint32 slowclkperiod; /* 0x6dc */ uint32 PAD[8]; @@ -303,7 +306,8 @@ typedef volatile struct { /* Clock control and hardware workarounds (corerev >= 20) */ uint32 clk_ctl_st; /* 0x1e0 */ uint32 hw_war; - uint32 PAD[70]; + uint32 powerctl; /* 0x1e8 */ + uint32 PAD[69]; /* UARTs */ uint8 uart0data; /* 0x300 */ @@ -331,8 +335,11 @@ typedef volatile struct { uint32 sr_control0; /* 0x504 */ uint32 sr_control1; /* 0x508 */ uint32 gpio_control; /* 0x50C */ - uint32 PAD[60]; - + uint32 PAD[29]; + /* 2 SR engines case */ + uint32 sr1_control0; /* 0x584 */ + uint32 sr1_control1; /* 0x588 */ + uint32 PAD[29]; /* PMU registers (corerev >= 20) */ /* Note: all timers driven by ILP clock are updated asynchronously to HT/ALP. * The CPU must read them twice, compare, and retry if different. @@ -488,7 +495,7 @@ typedef volatile struct { uint32 gci_rxfifo_common_ctrl; /* 0xDC4 */ uint32 gci_rxfifoctrl; /* 0xDC8 */ uint32 gci_uartreadid; /* DCC */ - uint32 gci_uartescval; /* DD0 */ + uint32 gci_seciuartescval; /* DD0 */ uint32 PAD; uint32 gci_secififolevel; /* DD8 */ uint32 gci_seciuartdata; /* DDC */ @@ -566,10 +573,15 @@ typedef volatile struct { #define CC_CHIPCTL_DATA 0x654 #define PMU_REG_CONTROL_ADDR 0x658 #define PMU_REG_CONTROL_DATA 0x65C -#define PMU_PLL_CONTROL_ADDR 0x660 -#define PMU_PLL_CONTROL_DATA 0x664 +#define PMU_PLL_CONTROL_ADDR 0x660 +#define PMU_PLL_CONTROL_DATA 0x664 + #define CC_SROM_CTRL 0x190 -#define CC_SROM_OTP 0x800 /* SROM/OTP address space */ +#ifdef SROM16K_4364_ADDRSPACE +#define CC_SROM_OTP 0xa000 /* SROM/OTP address space */ +#else +#define CC_SROM_OTP 0x0800 +#endif #define CC_GCI_INDIRECT_ADDR_REG 0xC40 #define CC_GCI_CHIP_CTRL_REG 0xE00 #define CC_GCI_CC_OFFSET_2 2 @@ -578,7 +590,6 @@ typedef volatile struct { #define CC_SWD_REQACK 0x384 #define CC_SWD_DATA 0x388 - #define CHIPCTRLREG0 0x0 #define CHIPCTRLREG1 0x1 #define CHIPCTRLREG2 0x2 @@ -685,14 +696,19 @@ typedef volatile struct { #define CC_CAP2_GSIO 0x00000002 /**< GSIO (spi/i2c) present, rev >= 37 */ /* capabilities extension */ -#define CC_CAP_EXT_SECI_PRESENT 0x00000001 /**< SECI present */ -#define CC_CAP_EXT_GSIO_PRESENT 0x00000002 /**< GSIO present */ -#define CC_CAP_EXT_GCI_PRESENT 0x00000004 /**< GCI present */ -#define CC_CAP_EXT_AOB_PRESENT 0x00000040 /**< AOB present */ -#define CC_CAP_EXT_SWD_PRESENT 0x00000400 /**< SWD present */ +#define CC_CAP_EXT_SECI_PRESENT 0x00000001 /**< SECI present */ +#define CC_CAP_EXT_GSIO_PRESENT 0x00000002 /**< GSIO present */ +#define CC_CAP_EXT_GCI_PRESENT 0x00000004 /**< GCI present */ +#define CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008 /**< UART present */ +#define CC_CAP_EXT_AOB_PRESENT 0x00000040 /**< AOB present */ +#define CC_CAP_EXT_SWD_PRESENT 0x00000400 /**< SWD present */ /* WL Channel Info to BT via GCI - bits 40 - 47 */ #define GCI_WL_CHN_INFO_MASK (0xFF00) +/* WL indication of MCHAN enabled/disabled to BT in awdl mode- bit 36 */ +#define GCI_WL_MCHAN_BIT_MASK (0x0010) +/* WL Strobe to BT */ +#define GCI_WL_STROBE_BIT_MASK (0x0020) /* bits [51:48] - reserved for wlan TX pwr index */ /* bits [55:52] btc mode indication */ #define GCI_WL_BTC_MODE_SHIFT (20) @@ -737,6 +753,8 @@ typedef volatile struct { #define NS_SLOW_MEM_CLOCK 400000000 #endif /* CFG_SIM */ +#define ALP_CLOCK_53573 40000000 + /* HT clock */ #define HT_CLOCK 80000000 @@ -832,6 +850,7 @@ typedef volatile struct { #define OTPL_WRAP_TYPE_SHIFT 16 #define OTPL_WRAP_TYPE_65NM 0 #define OTPL_WRAP_TYPE_40NM 1 +#define OTPL_WRAP_TYPE_28NM 2 #define OTPL_ROW_SIZE_MASK 0x0000F000 #define OTPL_ROW_SIZE_SHIFT 12 @@ -867,6 +886,45 @@ typedef volatile struct { #define OTPPOC_OVST_READ_40NM 14 #define OTPPOC_OVST_PROG_40NM 15 +/* Opcodes for OTPP_OC field (28NM) */ +#define OTPPOC_READ_28NM 0 +#define OTPPOC_READBURST_28NM 1 +#define OTPPOC_PROG_ENABLE_28NM 2 +#define OTPPOC_PROG_DISABLE_28NM 3 +#define OTPPOC_PRESCREEN_28NM 4 +#define OTPPOC_PRESCREEN_RP_28NM 5 +#define OTPPOC_FLUSH_28NM 6 +#define OTPPOC_NOP_28NM 7 +#define OTPPOC_PROG_ECC_28NM 8 +#define OTPPOC_PROG_ECC_READ_28NM 9 +#define OTPPOC_PROG_28NM 10 +#define OTPPOC_PROGRAM_RP_28NM 11 +#define OTPPOC_PROGRAM_OVST_28NM 12 +#define OTPPOC_RELOAD_28NM 13 +#define OTPPOC_ERASE_28NM 14 +#define OTPPOC_LOAD_RF_28NM 15 +#define OTPPOC_CTRL_WR_28NM 16 +#define OTPPOC_CTRL_RD_28NM 17 +#define OTPPOC_READ_HP_28NM 18 +#define OTPPOC_READ_OVST_28NM 19 +#define OTPPOC_READ_VERIFY0_28NM 20 +#define OTPPOC_READ_VERIFY1_28NM 21 +#define OTPPOC_READ_FORCE0_28NM 22 +#define OTPPOC_READ_FORCE1_28NM 23 +#define OTPPOC_BURNIN_28NM 24 +#define OTPPOC_PROGRAM_LOCK_28NM 25 +#define OTPPOC_PROGRAM_TESTCOL_28NM 26 +#define OTPPOC_READ_TESTCOL_28NM 27 +#define OTPPOC_READ_FOUT_28NM 28 +#define OTPPOC_SFT_RESET_28NM 29 + +#define OTPP_OC_MASK_28NM 0x0f800000 +#define OTPP_OC_SHIFT_28NM 23 +#define OTPC_PROGEN_28NM 0x8 +#define OTPC_DBLERRCLR 0x20 +#define OTPC_CLK_EN_MASK 0x00000040 +#define OTPC_CLK_DIV_MASK 0x00000F80 + /* Fields in otplayoutextension */ #define OTPLAYOUTEXT_FUSE_MASK 0x3FF @@ -1080,7 +1138,11 @@ typedef volatile struct { #define PCTL_XTALFREQ_SHIFT 2 #define PCTL_ILP_DIV_EN 0x00000002 #define PCTL_LPO_SEL 0x00000001 + +/* Fields in pmucontrol_ext */ +#define PCTL_EXT_FASTLPO_ENAB 0x00000080 #define PCTL_EXT_FASTLPO_SWENAB 0x00000200 +#define PCTL_EXT_FASTLPO_PCIE_SWENAB 0x00004000 /**< rev33 for FLL1M */ #define DEFAULT_43012_MIN_RES_MASK 0x0f8bfe77 @@ -1236,8 +1298,13 @@ typedef volatile struct { #define SFLASH_ST_CSA 0x1000 /**< Keep chip select asserted */ #define SFLASH_ST_SSE 0x0220 /**< Sub-sector Erase */ -#define SFLASH_MXIC_RDID 0x0390 /**< Read Manufacture ID */ -#define SFLASH_MXIC_MFID 0xc2 /**< MXIC Manufacture ID */ +#define SFLASH_ST_READ4B 0x6313 /* Read Data Bytes in 4Byte address */ +#define SFLASH_ST_PP4B 0x6312 /* Page Program in 4Byte address */ +#define SFLASH_ST_SE4B 0x62dc /* Sector Erase in 4Byte address */ +#define SFLASH_ST_SSE4B 0x6221 /* Sub-sector Erase */ + +#define SFLASH_MXIC_RDID 0x0390 /* Read Manufacture ID */ +#define SFLASH_MXIC_MFID 0xc2 /* MXIC Manufacture ID */ /* Status register bits for ST flashes */ #define SFLASH_ST_WIP 0x01 /**< Write In Progress */ @@ -1361,6 +1428,9 @@ typedef volatile struct { #define PCAP5_CC_MASK 0xf8000000 #define PCAP5_CC_SHIFT 27 +/* CoreCapabilitiesExtension */ +#define PCAP_EXT_USE_MUXED_ILP_CLK_MASK 0x04000000 + /* PMU Resource Request Timer registers */ /* This is based on PmuRev0 */ #define PRRT_TIME_MASK 0x03ff @@ -1415,14 +1485,18 @@ typedef volatile struct { #define PMU_CC1_ENABLE_CLOSED_LOOP 0x00000000 /* PMU chip control2 register */ +#define PMU_CC2_RFLDO3P3_PU_FORCE_ON (1 << 15) +#define PMU_CC2_RFLDO3P3_PU_CLEAR 0x00000000 + +#define PMU_CC2_WL2CDIG_I_PMU_SLEEP (1 << 16) #define PMU_CHIPCTL2 2 -#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON (1 << 18) -#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON (1 << 19) -#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON (1 << 20) -#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON (1 << 21) +#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON (1 << 18) +#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON (1 << 19) +#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON (1 << 20) +#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON (1 << 21) #define PMU_CC2_MASK_WL_DEV_WAKE (1 << 22) #define PMU_CC2_INV_GPIO_POLARITY_PMU_WAKE (1 << 25) - +#define PMU_CC2_GCI2_WAKE (1 << 31) /* PMU chip control3 register */ #define PMU_CHIPCTL3 3 @@ -1444,6 +1518,7 @@ typedef volatile struct { #define PMU_CC4_SW_TYPE_EPHYMII 0x00004000 #define PMU_CC4_SW_TYPE_EPHYRMII 0x00008000 #define PMU_CC4_SW_TYPE_RGMII 0x0000c000 +#define PMU_CC4_DISABLE_LQ_AVAIL (1<<27) /* PMU chip control5 register */ #define PMU_CHIPCTL5 5 @@ -1463,6 +1538,8 @@ typedef volatile struct { #define PMU_CC7_IF_TYPE_MII 0x00000040 #define PMU_CC7_IF_TYPE_RGMII 0x00000080 +#define PMU_CHIPCTL8 8 +#define PMU_CHIPCTL9 9 /* PMU corerev and chip specific PLL controls. * PMU_PLL_XX where is PMU corerev and is an arbitrary number @@ -1533,7 +1610,10 @@ typedef volatile struct { #define PMU1_PLL0_PC2_M5DIV_SHIFT 0 #define PMU1_PLL0_PC2_M5DIV_BY_12 0xc #define PMU1_PLL0_PC2_M5DIV_BY_18 0x12 +#define PMU1_PLL0_PC2_M5DIV_BY_31 0x1f #define PMU1_PLL0_PC2_M5DIV_BY_36 0x24 +#define PMU1_PLL0_PC2_M5DIV_BY_42 0x2a +#define PMU1_PLL0_PC2_M5DIV_BY_60 0x3c #define PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00 #define PMU1_PLL0_PC2_M6DIV_SHIFT 8 #define PMU1_PLL0_PC2_M6DIV_BY_18 0x12 @@ -1555,8 +1635,11 @@ typedef volatile struct { /* pll_ctrl, vco_rng, clkdrive_ch */ #define PMU1_PLL0_PLLCTL5 5 -#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00 -#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8 +#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00 +#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8 +#define PMU1_PLL0_PC5_ASSERT_CH_MASK 0x3f000000 +#define PMU1_PLL0_PC5_ASSERT_CH_SHIFT 24 +#define PMU1_PLL0_PC5_DEASSERT_CH_MASK 0xff000000 #define PMU1_PLL0_PLLCTL6 6 #define PMU1_PLL0_PLLCTL7 7 @@ -1565,6 +1648,10 @@ typedef volatile struct { #define PMU1_PLLCTL8_OPENLOOP_MASK (1 << 1) #define PMU_PLL4350_OPENLOOP_MASK (1 << 7) +#define PMU1_PLL0_PLLCTL9 9 + +#define PMU1_PLL0_PLLCTL10 10 + /* PMU rev 2 control words */ #define PMU2_PHY_PLL_PLLCTL 4 #define PMU2_SI_PLL_PLLCTL 10 @@ -1820,6 +1907,21 @@ typedef volatile struct { #define PMU4335_PLL0_PC1_MDIV2_MASK 0x0000ff00 #define PMU4335_PLL0_PC1_MDIV2_SHIFT 8 +/* PLL usage in 4347 */ +#define PMU4347_PLL0_PC2_P1DIV_MASK 0x000f0000 +#define PMU4347_PLL0_PC2_P1DIV_SHIFT 16 +#define PMU4347_PLL0_PC2_NDIV_INT_MASK 0x3ff00000 +#define PMU4347_PLL0_PC2_NDIV_INT_SHIFT 20 +#define PMU4347_PLL0_PC3_NDIV_FRAC_MASK 0x000fffff +#define PMU4347_PLL0_PC3_NDIV_FRAC_SHIFT 0 +#define PMU4347_PLL1_PC5_P1DIV_MASK 0xc0000000 +#define PMU4347_PLL1_PC5_P1DIV_SHIFT 30 +#define PMU4347_PLL1_PC6_P1DIV_MASK 0x00000003 +#define PMU4347_PLL1_PC6_P1DIV_SHIFT 0 +#define PMU4347_PLL1_PC6_NDIV_INT_MASK 0x00000ffc +#define PMU4347_PLL1_PC6_NDIV_INT_SHIFT 2 +#define PMU4347_PLL1_PC6_NDIV_FRAC_MASK 0xfffff000 +#define PMU4347_PLL1_PC6_NDIV_FRAC_SHIFT 12 /* PLL usage in 5356/5357 */ #define PMU5356_MAINPLL_PLL0 0 @@ -2392,8 +2494,15 @@ typedef volatile struct { #define CCTRL_4330_JTAG_DISABLE 0x00000008 /* 1=disable JTAG interface on mux'd pins */ #define PMU_VREG0_ADDR 0 +#define PMU_VREG0_I_SR_CNTL_EN_SHIFT 0 #define PMU_VREG0_DISABLE_PULLD_BT_SHIFT 2 #define PMU_VREG0_DISABLE_PULLD_WL_SHIFT 3 +#define PMU_VREG0_CBUCKFSW_ADJ_SHIFT 7 +#define PMU_VREG0_CBUCKFSW_ADJ_MASK 0x1F +#define PMU_VREG0_RAMP_SEL_SHIFT 13 +#define PMU_VREG0_RAMP_SEL_MASK 0x7 +#define PMU_VREG0_VFB_RSEL_SHIFT 17 +#define PMU_VREG0_VFB_RSEL_MASK 3 #define PMU_VREG4_ADDR 4 @@ -2721,6 +2830,8 @@ typedef volatile struct { #define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \ CST4360_RSRC_INIT_MODE_SHIFT) +#define PMU4360_CC1_GPIO7_OVRD (1<<23) /* GPIO7 override */ + /* 43602 PMU resources based on pmu_params.xls version v0.95 */ #define RES43602_LPLDO_PU 0 @@ -2824,8 +2935,32 @@ typedef volatile struct { #define RES4349_HT_AVAIL 29 #define RES4349_MACPHY_CLKAVAIL 30 -#define CR4_4349_RAM_BASE (0x180000) -#define CR4_4349_RAM_BASE_FROM_REV_9 (0x160000) +/* SR Control0 bits */ +#define CC_SR0_4349_SR_ENG_EN_MASK 0x1 +#define CC_SR0_4349_SR_ENG_EN_SHIFT 0 +#define CC_SR0_4349_SR_ENG_CLK_EN (1 << 1) +#define CC_SR0_4349_SR_RSRC_TRIGGER (0xC << 2) +#define CC_SR0_4349_SR_WD_MEM_MIN_DIV (0x3 << 6) +#define CC_SR0_4349_SR_MEM_STBY_ALLOW_MSK (1 << 16) +#define CC_SR0_4349_SR_MEM_STBY_ALLOW_SHIFT 16 +#define CC_SR0_4349_SR_ENABLE_ILP (1 << 17) +#define CC_SR0_4349_SR_ENABLE_ALP (1 << 18) +#define CC_SR0_4349_SR_ENABLE_HT (1 << 19) +#define CC_SR0_4349_SR_ALLOW_PIC (3 << 20) +#define CC_SR0_4349_SR_PMU_MEM_DISABLE (1 << 30) + +/* SR Control0 bits */ +#define CC_SR0_4349_SR_ENG_EN_MASK 0x1 +#define CC_SR0_4349_SR_ENG_EN_SHIFT 0 +#define CC_SR0_4349_SR_ENG_CLK_EN (1 << 1) +#define CC_SR0_4349_SR_RSRC_TRIGGER (0xC << 2) +#define CC_SR0_4349_SR_WD_MEM_MIN_DIV (0x3 << 6) +#define CC_SR0_4349_SR_MEM_STBY_ALLOW (1 << 16) +#define CC_SR0_4349_SR_ENABLE_ILP (1 << 17) +#define CC_SR0_4349_SR_ENABLE_ALP (1 << 18) +#define CC_SR0_4349_SR_ENABLE_HT (1 << 19) +#define CC_SR0_4349_SR_ALLOW_PIC (3 << 20) +#define CC_SR0_4349_SR_PMU_MEM_DISABLE (1 << 30) /* SR binary offset is at 8K */ #define CC_SR1_4349_SR_ASM_ADDR (0x10) @@ -2835,22 +2970,236 @@ typedef volatile struct { #define CST4349_SPROM_PRESENT 0x00000010 +#define VREG4_4349_MEMLPLDO_PWRUP_MASK (1 << 31) +#define VREG4_4349_MEMLPLDO_PWRUP_SHIFT (31) +#define VREG4_4349_LPLDO1_OUTPUT_VOLT_ADJ_MASK (0x7 << 15) +#define VREG4_4349_LPLDO1_OUTPUT_VOLT_ADJ_SHIFT (15) +#define CC2_4349_PHY_PWRSE_RST_CNT_MASK (0xF << 0) +#define CC2_4349_PHY_PWRSE_RST_CNT_SHIFT (0) #define CC2_4349_VDDM_PWRSW_EN_MASK (1 << 20) #define CC2_4349_VDDM_PWRSW_EN_SHIFT (20) +#define CC2_4349_MEMLPLDO_PWRSW_EN_MASK (1 << 21) +#define CC2_4349_MEMLPLDO_PWRSW_EN_SHIFT (21) #define CC2_4349_SDIO_AOS_WAKEUP_MASK (1 << 24) #define CC2_4349_SDIO_AOS_WAKEUP_SHIFT (24) +#define CC2_4349_PMUWAKE_EN_MASK (1 << 31) +#define CC2_4349_PMUWAKE_EN_SHIFT (31) +#define CC5_4349_MAC_PHY_CLK_8_DIV (1 << 27) #define CC6_4349_PCIE_CLKREQ_WAKEUP_MASK (1 << 4) #define CC6_4349_PCIE_CLKREQ_WAKEUP_SHIFT (4) #define CC6_4349_PMU_WAKEUP_ALPAVAIL_MASK (1 << 6) #define CC6_4349_PMU_WAKEUP_ALPAVAIL_SHIFT (6) #define CC6_4349_PMU_EN_EXT_PERST_MASK (1 << 13) +#define CC6_4349_PMU_EN_L2_DEASSERT_MASK (1 << 14) +#define CC6_4349_PMU_EN_L2_DEASSERT_SHIF (14) #define CC6_4349_PMU_ENABLE_L2REFCLKPAD_PWRDWN (1 << 15) #define CC6_4349_PMU_EN_MDIO_MASK (1 << 16) #define CC6_4349_PMU_EN_ASSERT_L2_MASK (1 << 25) +/* 4349 GCI function sel values */ +/* + * Reference + * http://hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/ToplevelArchitecture4349B0#Function_Sel + */ +#define CC4349_FNSEL_HWDEF (0) +#define CC4349_FNSEL_SAMEASPIN (1) +#define CC4349_FNSEL_GPIO (2) +#define CC4349_FNSEL_FAST_UART (3) +#define CC4349_FNSEL_GCI0 (4) +#define CC4349_FNSEL_GCI1 (5) +#define CC4349_FNSEL_DGB_UART (6) +#define CC4349_FNSEL_I2C (7) +#define CC4349_FNSEL_SPROM (8) +#define CC4349_FNSEL_MISC0 (9) +#define CC4349_FNSEL_MISC1 (10) +#define CC4349_FNSEL_MISC2 (11) +#define CC4349_FNSEL_IND (12) +#define CC4349_FNSEL_PDN (13) +#define CC4349_FNSEL_PUP (14) +#define CC4349_FNSEL_TRISTATE (15) + +/* 4364 related */ +#define RES4364_LPLDO_PU 0 +#define RES4364_BG_PU 1 +#define RES4364_MEMLPLDO_PU 2 +#define RES4364_PALDO3P3_PU 3 +#define RES4364_CBUCK_1P2 4 +#define RES4364_CBUCK_1V8 5 +#define RES4364_COLD_START_WAIT 6 +#define RES4364_SR_3x3_VDDM_PWRSW 7 +#define RES4364_3x3_MACPHY_CLKAVAIL 8 +#define RES4364_XTALLDO_PU 9 +#define RES4364_LDO3P3_PU 10 +#define RES4364_OTP_PU 11 +#define RES4364_XTAL_PU 12 +#define RES4364_SR_CLK_START 13 +#define RES4364_3x3_RADIO_PU 14 +#define RES4364_RF_LDO 15 +#define RES4364_PERST_OVR 16 +#define RES4364_WL_CORE_RDY 17 +#define RES4364_ILP_REQ 18 +#define RES4364_ALP_AVAIL 19 +#define RES4364_1x1_MINI_PMU 20 +#define RES4364_1x1_RADIO_PU 21 +#define RES4364_SR_CLK_STABLE 22 +#define RES4364_SR_SAVE_RESTORE 23 +#define RES4364_SR_PHY_PWRSW 24 +#define RES4364_SR_VDDM_PWRSW 25 +#define RES4364_SR_SUBCORE_PWRSW 26 +#define RES4364_SR_SLEEP 27 +#define RES4364_HT_START 28 +#define RES4364_HT_AVAIL 29 +#define RES4364_MACPHY_CLKAVAIL 30 + +/* 4349 GPIO */ +#define CC4349_PIN_GPIO_00 (0) +#define CC4349_PIN_GPIO_01 (1) +#define CC4349_PIN_GPIO_02 (2) +#define CC4349_PIN_GPIO_03 (3) +#define CC4349_PIN_GPIO_04 (4) +#define CC4349_PIN_GPIO_05 (5) +#define CC4349_PIN_GPIO_06 (6) +#define CC4349_PIN_GPIO_07 (7) +#define CC4349_PIN_GPIO_08 (8) +#define CC4349_PIN_GPIO_09 (9) +#define CC4349_PIN_GPIO_10 (10) +#define CC4349_PIN_GPIO_11 (11) +#define CC4349_PIN_GPIO_12 (12) +#define CC4349_PIN_GPIO_13 (13) +#define CC4349_PIN_GPIO_14 (14) +#define CC4349_PIN_GPIO_15 (15) +#define CC4349_PIN_GPIO_16 (16) +#define CC4349_PIN_GPIO_17 (17) +#define CC4349_PIN_GPIO_18 (18) +#define CC4349_PIN_GPIO_19 (19) + +/* Mask used to decide whether HOSTWAKE MUX to be performed or not */ +#define MUXENAB4349_HOSTWAKE_MASK (0x000000f0) /* configure GPIO for SDIO host_wake */ +#define MUXENAB4349_HOSTWAKE_SHIFT 4 +#define MUXENAB4349_GETIX(val, name) \ + ((((val) & MUXENAB4349_ ## name ## _MASK) >> MUXENAB4349_ ## name ## _SHIFT) - 1) + +#define CR4_4364_RAM_BASE (0x160000) + +/* SR binary offset is at 8K */ +#define CC_SR1_4364_SR_CORE0_ASM_ADDR (0x10) +#define CC_SR1_4364_SR_CORE1_ASM_ADDR (0x10) + +#define CC_SR0_4364_SR_ENG_EN_MASK 0x1 +#define CC_SR0_4364_SR_ENG_EN_SHIFT 0 +#define CC_SR0_4364_SR_ENG_CLK_EN (1 << 1) +#define CC_SR0_4364_SR_RSRC_TRIGGER (0xC << 2) +#define CC_SR0_4364_SR_WD_MEM_MIN_DIV (0x3 << 6) +#define CC_SR0_4364_SR_MEM_STBY_ALLOW_MSK (1 << 16) +#define CC_SR0_4364_SR_MEM_STBY_ALLOW_SHIFT 16 +#define CC_SR0_4364_SR_ENABLE_ILP (1 << 17) +#define CC_SR0_4364_SR_ENABLE_ALP (1 << 18) +#define CC_SR0_4364_SR_ENABLE_HT (1 << 19) +#define CC_SR0_4364_SR_ALLOW_PIC (3 << 20) +#define CC_SR0_4364_SR_PMU_MEM_DISABLE (1 << 30) + +#define PMU_4364_CC1_ENABLE_BBPLL_PWR_DWN (0x1 << 4) +#define PMU_4364_CC1_BBPLL_ARESET_LQ_TIME (0x1 << 8) +#define PMU_4364_CC1_BBPLL_ARESET_HT_UPTIME (0x1 << 10) +#define PMU_4364_CC1_BBPLL_DRESET_LQ_UPTIME (0x1 << 12) +#define PMU_4364_CC1_BBPLL_DRESET_HT_UPTIME (0x4 << 16) +#define PMU_4364_CC1_SUBCORE_PWRSW_UP_DELAY (0x8 << 20) +#define PMU_4364_CC1_SUBCORE_PWRSW_RESET_CNT (0x4 << 24) + +#define PMU_4364_CC2_PHY_PWRSW_RESET_CNT (0x2 << 0) +#define PMU_4364_CC2_PHY_PWRSW_RESET_MASK (0x7) +#define PMU_4364_CC2_SEL_CHIPC_IF_FOR_SR (1 << 21) + +#define PMU_4364_CC3_MEMLPLDO3x3_PWRSW_FORCE_MASK (1 << 23) +#define PMU_4364_CC3_MEMLPLDO1x1_PWRSW_FORCE_MASK (1 << 24) +#define PMU_4364_CC3_CBUCK1P2_PU_SR_VDDM_REQ_ON (1 << 25) +#define PMU_4364_CC3_MEMLPLDO3x3_PWRSW_FORCE_OFF (0) +#define PMU_4364_CC3_MEMLPLDO1x1_PWRSW_FORCE_OFF (0) + + +#define PMU_4364_CC5_DISABLE_BBPLL_CLKOUT6_DIV2_MASK (1 << 26) +#define PMU_4364_CC5_ENABLE_ARMCR4_DEBUG_CLK_MASK (1 << 4) +#define PMU_4364_CC5_DISABLE_BBPLL_CLKOUT6_DIV2 (1 << 26) +#define PMU_4364_CC5_ENABLE_ARMCR4_DEBUG_CLK_OFF (0) + +#define PMU_4364_CC6_MDI_RESET_MASK (1 << 16) +#define PMU_4364_CC6_USE_CLK_REQ_MASK (1 << 18) +#define PMU_4364_CC6_HIGHER_CLK_REQ_ALP_MASK (1 << 20) +#define PMU_4364_CC6_HT_AVAIL_REQ_ALP_AVAIL_MASK (1 << 21) +#define PMU_4364_CC6_PHY_CLK_REQUESTS_ALP_AVAIL_MASK (1 << 22) +#define PMU_4364_CC6_MDI_RESET (1 << 16) +#define PMU_4364_CC6_USE_CLK_REQ (1 << 18) + +#define PMU_4364_CC6_HIGHER_CLK_REQ_ALP (1 << 20) +#define PMU_4364_CC6_HT_AVAIL_REQ_ALP_AVAIL (1 << 21) +#define PMU_4364_CC6_PHY_CLK_REQUESTS_ALP_AVAIL (1 << 22) + +#define PMU_4364_VREG0_DISABLE_BT_PULL_DOWN (1 << 2) +#define PMU_4364_VREG1_DISABLE_WL_PULL_DOWN (1 << 2) + +#define PMU_VREG_0 (0x0) +#define PMU_VREG_1 (0x1) +#define PMU_VREG_3 (0x3) +#define PMU_VREG_4 (0x4) +#define PMU_VREG_5 (0x5) +#define PMU_VREG_6 (0x6) + +#define PMU_4364_VREG3_DISABLE_WPT_REG_ON_PULL_DOWN (1 << 11) + +#define PMU_4364_VREG4_MEMLPLDO_PU_ON (1 << 31) +#define PMU_4364_VREG4_LPLPDO_ADJ (3 << 16) +#define PMU_4364_VREG4_LPLPDO_ADJ_MASK (3 << 16) +#define PMU_4364_VREG5_MAC_CLK_1x1_AUTO (0x1 << 18) +#define PMU_4364_VREG5_SR_AUTO (0x1 << 20) +#define PMU_4364_VREG5_BT_PWM_MASK (0x1 << 21) +#define PMU_4364_VREG5_BT_AUTO (0x1 << 22) +#define PMU_4364_VREG5_WL2CLB_DVFS_EN_MASK (0x1 << 23) +#define PMU_4364_VREG5_BT_PWMK (0) +#define PMU_4364_VREG5_WL2CLB_DVFS_EN (0) + +#define PMU_4364_VREG6_BBPLL_AUTO (0x1 << 17) +#define PMU_4364_VREG6_MINI_PMU_PWM (0x1 << 18) +#define PMU_4364_VREG6_LNLDO_AUTO (0x1 << 21) +#define PMU_4364_VREG6_PCIE_PWRDN_0_AUTO (0x1 << 23) +#define PMU_4364_VREG6_PCIE_PWRDN_1_AUTO (0x1 << 25) +#define PMU_4364_VREG6_MAC_CLK_3x3_PWM (0x1 << 27) +#define PMU_4364_VREG6_ENABLE_FINE_CTRL (0x1 << 30) + +#define PMU_4364_PLL0_DISABLE_CHANNEL6 (0x1 << 18) + +#define CC_GCI1_REG (0x1) +#define CC_GCI1_4364_IND_STATE_FOR_GPIO9_11 (0x0ccccccc) +#define CC2_4364_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC2_4364_SDIO_AOS_WAKEUP_SHIFT (24) + +#define CC6_4364_PCIE_CLKREQ_WAKEUP_MASK (1 << 4) +#define CC6_4364_PCIE_CLKREQ_WAKEUP_SHIFT (4) +#define CC6_4364_PMU_WAKEUP_ALPAVAIL_MASK (1 << 6) +#define CC6_4364_PMU_WAKEUP_ALPAVAIL_SHIFT (6) + +#define CST4364_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */ +#define CST4364_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */ +#define CST4364_SPROM_PRESENT 0x00000010 + +#define PMU_4364_MACCORE_0_RES_REQ_MASK 0x3FCBF7FF +#define PMU_4364_MACCORE_1_RES_REQ_MASK 0x7FFB3647 + + +#define PMU1_PLL0_SWITCH_MACCLOCK_120MHZ (0) +#define PMU1_PLL0_SWITCH_MACCLOCK_160MHZ (1) +#define TSF_CLK_FRAC_L_4364_120MHZ 0x8889 +#define TSF_CLK_FRAC_H_4364_120MHZ 0x8 +#define TSF_CLK_FRAC_L_4364_160MHZ 0x6666 +#define TSF_CLK_FRAC_H_4364_160MHZ 0x6 +#define PMU1_PLL0_PC1_M2DIV_VALUE_120MHZ 8 +#define PMU1_PLL0_PC1_M2DIV_VALUE_160MHZ 6 + +#define CST4347_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */ +#define CST4347_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */ +#define CST4347_SPROM_PRESENT 0x00000010 /* 43430 PMU resources based on pmu_params.xls */ #define RES43430_LPLDO_PU 0 @@ -2901,10 +3250,26 @@ typedef volatile struct { #define CST43430_TRIM_EN 0x00800000 #define CST43430_DIN_PACKAGE_OPTION 0x10000000 -#define PMU_MACCORE_0_RES_REQ_TIMER 0x19000000 +#define PMU43430_PLL0_PC2_P1DIV_MASK 0x0000000f +#define PMU43430_PLL0_PC2_P1DIV_SHIFT 0 +#define PMU43430_PLL0_PC2_NDIV_INT_MASK 0x0000ff80 +#define PMU43430_PLL0_PC2_NDIV_INT_SHIFT 7 +#define PMU43430_PLL0_PC4_MDIV2_MASK 0x0000ff00 +#define PMU43430_PLL0_PC4_MDIV2_SHIFT 8 + +/* 43430 chip SR definitions */ +#define SRAM_43430_SR_ASM_ADDR 0x7f800 +#define CC_SR1_43430_SR_ASM_ADDR ((SRAM_43430_SR_ASM_ADDR - 0x60000) >> 8) + +/* 43430 PMU Chip Control bits */ +#define CC2_43430_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC2_43430_SDIO_AOS_WAKEUP_SHIFT (24) + + +#define PMU_MACCORE_0_RES_REQ_TIMER 0x1d000000 #define PMU_MACCORE_0_RES_REQ_MASK 0x5FF2364F -#define PMU_MACCORE_1_RES_REQ_TIMER 0x19000000 +#define PMU_MACCORE_1_RES_REQ_TIMER 0x1d000000 #define PMU_MACCORE_1_RES_REQ_MASK 0x5FF2364F /* defines to detect active host interface in use */ @@ -2971,11 +3336,13 @@ typedef volatile struct { #define CR4_4345_LT_C0_RAM_BASE (0x1b0000) #define CR4_4345_GE_C0_RAM_BASE (0x198000) #define CR4_4349_RAM_BASE (0x180000) +#define CR4_4349_RAM_BASE_FROM_REV_9 (0x160000) #define CR4_4350_RAM_BASE (0x180000) #define CR4_4360_RAM_BASE (0x0) #define CR4_43602_RAM_BASE (0x180000) #define CA7_4365_RAM_BASE (0x200000) +#define CR4_4347_RAM_BASE (0x170000) /* 4335 chip OTP present & OTP select bits. */ #define SPROM4335_OTP_SELECT 0x00000010 @@ -3020,6 +3387,98 @@ typedef volatile struct { /* 4335 resources--END */ +/* 43012 PMU resources based on pmu_params.xls - Start */ +#define RES43012_MEMLPLDO_PU 0 +#define RES43012_PMU_SLEEP 1 +#define RES43012_FAST_LPO 2 +#define RES43012_BTLPO_3P3 3 +#define RES43012_SR_POK 4 +#define RES43012_DUMMY_PWRSW 5 +#define RES43012_DUMMY_LDO3P3 6 +#define RES43012_DUMMY_BT_LDO3P3 7 +#define RES43012_DUMMY_RADIO 8 +#define RES43012_VDDB_VDDRET 9 +#define RES43012_HV_LDO3P3 10 +#define RES43012_OTP_PU 11 +#define RES43012_XTAL_PU 12 +#define RES43012_SR_CLK_START 13 +#define RES43012_XTAL_STABLE 14 +#define RES43012_FCBS 15 +#define RES43012_CBUCK_MODE 16 +#define RES43012_WL_CORE_RDY 17 +#define RES43012_ILP_REQ 18 +#define RES43012_ALP_AVAIL 19 +#define RES43012_RADIO_LDO 20 +#define RES43012_MINI_PMU 21 +#define RES43012_DUMMY 22 +#define RES43012_SR_SAVE_RESTORE 23 +#define RES43012_SR_PHY_PWRSW 24 +#define RES43012_SR_VDDB_CLDO 25 +#define RES43012_SR_SUBCORE_PWRSW 26 +#define RES43012_SR_SLEEP 27 +#define RES43012_HT_START 28 +#define RES43012_HT_AVAIL 29 +#define RES43012_MACPHY_CLK_AVAIL 30 +#define CST43012_SPROM_PRESENT 0x00000010 + +/* PLL usage in 43012 */ +#define PMU43012_PLL0_PC0_NDIV_INT_MASK 0x0000003f +#define PMU43012_PLL0_PC0_NDIV_INT_SHIFT 0 +#define PMU43012_PLL0_PC0_NDIV_FRAC_MASK 0xfffffc00 +#define PMU43012_PLL0_PC0_NDIV_FRAC_SHIFT 10 +#define PMU43012_PLL0_PC3_PDIV_MASK 0x00003c00 +#define PMU43012_PLL0_PC3_PDIV_SHIFT 10 +#define PMU43012_PLL_NDIV_FRAC_BITS 20 +#define PMU43012_PLL_P_DIV_SCALE_BITS 10 + +#define CCTL_43012_ARM_OFFCOUNT_MASK 0x00000003 +#define CCTL_43012_ARM_OFFCOUNT_SHIFT 0 +#define CCTL_43012_ARM_ONCOUNT_MASK 0x0000000c +#define CCTL_43012_ARM_ONCOUNT_SHIFT 2 + +/* PMU Rev >= 30 */ +#define PMU30_ALPCLK_ONEMHZ_ENAB 0x80000000 + +/* 43012 PMU Chip Control Registers */ +#define PMUCCTL02_43012_SUBCORE_PWRSW_FORCE_ON 0x00000010 +#define PMUCCTL02_43012_PHY_PWRSW_FORCE_ON 0x00000040 +#define PMUCCTL02_43012_LHL_TIMER_SELECT 0x00000800 +#define PMUCCTL02_43012_RFLDO3P3_PU_FORCE_ON 0x00008000 +#define PMUCCTL02_43012_WL2CDIG_I_PMU_SLEEP_ENAB 0x00010000 + +#define PMUCCTL04_43012_BBPLL_ENABLE_PWRDN 0x00100000 +#define PMUCCTL04_43012_BBPLL_ENABLE_PWROFF 0x00200000 +#define PMUCCTL04_43012_FORCE_BBPLL_ARESET 0x00400000 +#define PMUCCTL04_43012_FORCE_BBPLL_DRESET 0x00800000 +#define PMUCCTL04_43012_FORCE_BBPLL_PWRDN 0x01000000 +#define PMUCCTL04_43012_FORCE_BBPLL_ISOONHIGH 0x02000000 +#define PMUCCTL04_43012_FORCE_BBPLL_PWROFF 0x04000000 +#define PMUCCTL04_43012_DISABLE_LQ_AVAIL 0x08000000 +#define PMUCCTL04_43012_DISABLE_HT_AVAIL 0x10000000 +#define PMUCCTL04_43012_USE_LOCK 0x20000000 +#define PMUCCTL04_43012_OPEN_LOOP_ENABLE 0x40000000 +#define PMUCCTL04_43012_FORCE_OPEN_LOOP 0x80000000 +#define PMUCCTL08_43012_XTAL_CORE_SIZE_PMOS_NORMAL_MASK 0x00000FC0 +#define PMUCCTL08_43012_XTAL_CORE_SIZE_PMOS_NORMAL_SHIFT 6 +#define PMUCCTL08_43012_XTAL_CORE_SIZE_NMOS_NORMAL_MASK 0x00FC0000 +#define PMUCCTL08_43012_XTAL_CORE_SIZE_NMOS_NORMAL_SHIFT 18 +#define PMUCCTL08_43012_XTAL_SEL_BIAS_RES_NORMAL_MASK 0x07000000 +#define PMUCCTL08_43012_XTAL_SEL_BIAS_RES_NORMAL_SHIFT 24 +#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_NORMAL_MASK 0x0003F000 +#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_NORMAL_SHIFT 12 +#define PMUCCTL09_43012_XTAL_CORESIZE_RES_BYPASS_NORMAL_MASK 0x00000038 +#define PMUCCTL09_43012_XTAL_CORESIZE_RES_BYPASS_NORMAL_SHIFT 3 +#define PMUCCTL13_43012_FCBS_UP_TRIG_EN 0x00000400 + +#define PMUCCTL14_43012_ARMCM3_RESET_INITVAL 0x00000001 +#define PMUCCTL14_43012_DOT11MAC_CLKEN_INITVAL 0x00000020 +#define PMUCCTL14_43012_SDIOD_RESET_INIVAL 0x00000400 +#define PMUCCTL14_43012_SDIO_CLK_DMN_RESET_INITVAL 0x00001000 +#define PMUCCTL14_43012_SOCRAM_CLKEN_INITVAL 0x00004000 +#define PMUCCTL14_43012_M2MDMA_RESET_INITVAL 0x00008000 +#define PMUCCTL14_43012_DISABLE_LQ_AVAIL 0x08000000 + + /* 4345 Chip specific ChipStatus register bits */ #define CST4345_SPROM_MASK 0x00000020 #define CST4345_SFLASH_MASK 0x00000040 @@ -3141,10 +3600,13 @@ typedef volatile struct { #define MUXENAB4350_UART_MASK (0x0000000f) #define MUXENAB4350_UART_SHIFT 0 -#define MUXENAB4350_HOSTWAKE_MASK (0x000000f0) /**< configure GPIO for SDIO host_wake */ +#define MUXENAB4350_HOSTWAKE_MASK (0x000000f0) /**< configure GPIO for host_wake */ #define MUXENAB4350_HOSTWAKE_SHIFT 4 +#define MUXENAB4349_UART_MASK (0xf) +#define CC4350_GPIO_COUNT 16 + /* 4350 GCI function sel values */ #define CC4350_FNSEL_HWDEF (0) #define CC4350_FNSEL_SAMEASPIN (1) @@ -3306,6 +3768,9 @@ typedef volatile struct { #define CC_GCI_CHIPCTRL_06 (6) #define CC_GCI_CHIPCTRL_07 (7) #define CC_GCI_CHIPCTRL_08 (8) +#define CC_GCI_CHIPCTRL_09 (9) +#define CC_GCI_CHIPCTRL_10 (10) +#define CC_GCI_CHIPCTRL_10 (10) #define CC_GCI_CHIPCTRL_11 (11) #define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12) @@ -3314,10 +3779,24 @@ typedef volatile struct { #define CC_GCI_NUMCHIPCTRLREGS(cap1) ((cap1 & 0xF00) >> 8) +/* GCI chipstatus register indices */ +#define GCI_CHIPSTATUS_00 (0) +#define GCI_CHIPSTATUS_01 (1) +#define GCI_CHIPSTATUS_02 (2) +#define GCI_CHIPSTATUS_03 (3) +#define GCI_CHIPSTATUS_04 (4) +#define GCI_CHIPSTATUS_05 (5) +#define GCI_CHIPSTATUS_06 (6) +#define GCI_CHIPSTATUS_07 (7) +#define GCI_CHIPSTATUS_08 (8) + +/* 43021 GCI chipstatus registers */ +#define GCI43012_CHIPSTATUS_07_BBPLL_LOCK_MASK (1 << 3) + /* 4345 PMU resources */ #define RES4345_LPLDO_PU 0 #define RES4345_PMU_BG_PU 1 -#define RES4345_PMU_SLEEP 2 +#define RES4345_PMU_SLEEP 2 #define RES4345_HSICLDO_PU 3 #define RES4345_CBUCK_LPOM_PU 4 #define RES4345_CBUCK_PFM_PU 5 @@ -3347,6 +3826,44 @@ typedef volatile struct { #define RES4345_HT_AVAIL 29 #define RES4345_MACPHY_CLK_AVAIL 30 +/* 43012 pins + * note: only the values set as default/used are added here. + */ +#define CC43012_PIN_GPIO_00 (0) +#define CC43012_PIN_GPIO_01 (1) +#define CC43012_PIN_GPIO_02 (2) +#define CC43012_PIN_GPIO_03 (3) +#define CC43012_PIN_GPIO_04 (4) +#define CC43012_PIN_GPIO_05 (5) +#define CC43012_PIN_GPIO_06 (6) +#define CC43012_PIN_GPIO_07 (7) +#define CC43012_PIN_GPIO_08 (8) +#define CC43012_PIN_GPIO_09 (9) +#define CC43012_PIN_GPIO_10 (10) +#define CC43012_PIN_GPIO_11 (11) +#define CC43012_PIN_GPIO_12 (12) +#define CC43012_PIN_GPIO_13 (13) +#define CC43012_PIN_GPIO_14 (14) +#define CC43012_PIN_GPIO_15 (15) + +/* 43012 GCI function sel values */ +#define CC43012_FNSEL_HWDEF (0) +#define CC43012_FNSEL_SAMEASPIN (1) +#define CC43012_FNSEL_GPIO0 (2) +#define CC43012_FNSEL_GPIO1 (3) +#define CC43012_FNSEL_GCI0 (4) +#define CC43012_FNSEL_GCI1 (5) +#define CC43012_FNSEL_DBG_UART (6) +#define CC43012_FNSEL_I2C (7) +#define CC43012_FNSEL_BT_SFLASH (8) +#define CC43012_FNSEL_MISC0 (9) +#define CC43012_FNSEL_MISC1 (10) +#define CC43012_FNSEL_MISC2 (11) +#define CC43012_FNSEL_IND (12) +#define CC43012_FNSEL_PDN (13) +#define CC43012_FNSEL_PUP (14) +#define CC43012_FNSEL_TRI (15) + /* 4335 pins * note: only the values set as default/used are added here. */ @@ -3478,6 +3995,28 @@ typedef volatile struct { #define CC4345_GCI_AVS_CTRL_SHIFT (2) #define CC4345_GCI_AVS_CTRL_ENAB (1 << 5) +/* 43430 Pin */ +#define CC43430_PIN_GPIO_00 (0) +#define CC43430_PIN_GPIO_01 (1) +#define CC43430_PIN_GPIO_02 (2) +#define CC43430_PIN_GPIO_07 (7) +#define CC43430_PIN_GPIO_08 (8) +#define CC43430_PIN_GPIO_09 (9) +#define CC43430_PIN_GPIO_10 (10) + +#define CC43430_FNSEL_SDIO_INT (2) +#define CC43430_FNSEL_6_FAST_UART (6) +#define CC43430_FNSEL_10_FAST_UART (10) + +#define MUXENAB43430_UART_MASK (0x0000000f) +#define MUXENAB43430_UART_SHIFT 0 +#define MUXENAB43430_HOSTWAKE_MASK (0x000000f0) /* configure GPIO for SDIO host_wake */ +#define MUXENAB43430_HOSTWAKE_SHIFT 4 + +#define CC43430_FNSEL_SAMEASPIN (1) +#define CC43430_RFSWCTRL_EN_MASK (0x7f8) +#define CC43430_RFSWCTRL_EN_SHIFT (3) + /* GCI GPIO for function sel GCI-0/GCI-1 */ #define CC_GCI_GPIO_0 (0) #define CC_GCI_GPIO_1 (1) @@ -3575,6 +4114,13 @@ typedef volatile struct { #define GCI_WAKE_ON_GCI_GPIO8 8 #define GCI_WAKE_ON_GCI_SECI_IN 9 +/* 43012 ULB dividers */ +#define PMU43012_CC0_ULB_DIVMASK 0xfffffc00 +#define PMU43012_10MHZ_ULB_DIV ((1 << 0) | (1 << 5)) +#define PMU43012_5MHZ_ULB_DIV ((3 << 0) | (3 << 5)) +#define PMU43012_2P5MHZ_ULB_DIV ((7 << 0) | (7 << 5)) +#define PMU43012_ULB_NO_DIV 0 + /* 4335 MUX options. each nibble belongs to a setting. Non-zero value specifies a logic * for now only UART for bootloader. */ @@ -3586,6 +4132,10 @@ typedef volatile struct { #define MUXENAB4335_GETIX(val, name) \ ((((val) & MUXENAB4335_ ## name ## _MASK) >> MUXENAB4335_ ## name ## _SHIFT) - 1) +/* 43012 MUX options */ +#define MUXENAB43012_HOSTWAKE_MASK (0x00000001) +#define MUXENAB43012_GETIX(val, name) (val - 1) + /* * Maximum delay for the PMU state transition in us. * This is an upper bound intended for spinwaits etc. @@ -3595,6 +4145,73 @@ typedef volatile struct { /* PMU resource up transition time in ILP cycles */ #define PMURES_UP_TRANSITION 2 +/* 53573 PMU Resource */ +#define RES53573_REGULATOR_PU 0 +#define RES53573_XTALLDO_PU 1 +#define RES53573_XTAL_PU 2 +#define RES53573_MINI_PMU 3 +#define RES53573_RADIO_PU 4 +#define RES53573_ILP_REQ 5 +#define RES53573_ALP_AVAIL 6 +#define RES53573_CPUPLL_LDO_PU 7 +#define RES53573_CPU_PLL_PU 8 +#define RES53573_WLAN_BB_PLL_PU 9 +#define RES53573_MISCPLL_LDO_PU 10 +#define RES53573_MISCPLL_PU 11 +#define RES53573_AUDIOPLL_PU 12 +#define RES53573_PCIEPLL_LDO_PU 13 +#define RES53573_PCIEPLL_PU 14 +#define RES53573_DDRPLL_LDO_PU 15 +#define RES53573_DDRPLL_PU 16 +#define RES53573_HT_AVAIL 17 +#define RES53573_MACPHY_CLK_AVAIL 18 +#define RES53573_OTP_PU 19 +#define RES53573_RSVD20 20 + +/* 53573 Chip status registers */ +#define CST53573_LOCK_CPUPLL 0x00000001 +#define CST53573_LOCK_MISCPLL 0x00000002 +#define CST53573_LOCK_DDRPLL 0x00000004 +#define CST53573_LOCK_PCIEPLL 0x00000008 +#define CST53573_EPHY_ENERGY_DET 0x00001f00 +#define CST53573_RAW_ENERGY 0x0003e000 +#define CST53573_BBPLL_LOCKED_O 0x00040000 +#define CST53573_SERDES_PIPE_PLLLOCK 0x00080000 +#define CST53573_STRAP_PCIE_EP_MODE 0x00100000 +#define CST53573_EPHY_PLL_LOCK 0x00200000 +#define CST53573_AUDIO_PLL_LOCKED_O 0x00400000 +#define CST53573_PCIE_LINK_IN_L11 0x01000000 +#define CST53573_PCIE_LINK_IN_L12 0x02000000 +#define CST53573_DIN_PACKAGEOPTION 0xf0000000 + +/* 53573 Chip control registers macro definitions */ +#define PMU_53573_CHIPCTL1 1 +#define PMU_53573_CC1_HT_CLK_REQ_CTRL_MASK 0x00000010 +#define PMU_53573_CC1_HT_CLK_REQ_CTRL 0x00000010 + +#define PMU_53573_CHIPCTL3 3 +#define PMU_53573_CC3_ENABLE_CLOSED_LOOP_MASK 0x00000010 +#define PMU_53573_CC3_ENABLE_CLOSED_LOOP 0x00000000 +#define PMU_53573_CC3_ENABLE_BBPLL_PWRDOWN_MASK 0x00000002 +#define PMU_53573_CC3_ENABLE_BBPLL_PWRDOWN 0x00000002 + +#define CST53573_CHIPMODE_PCIE(cs) FALSE + + +/* SECI Status (0x134) & Mask (0x138) bits - Rev 35 */ +#define SECI_STAT_BI (1 << 0) /* Break Interrupt */ +#define SECI_STAT_SPE (1 << 1) /* Parity Error */ +#define SECI_STAT_SFE (1 << 2) /* Parity Error */ +#define SECI_STAT_SDU (1 << 3) /* Data Updated */ +#define SECI_STAT_SADU (1 << 4) /* Auxiliary Data Updated */ +#define SECI_STAT_SAS (1 << 6) /* AUX State */ +#define SECI_STAT_SAS2 (1 << 7) /* AUX2 State */ +#define SECI_STAT_SRITI (1 << 8) /* Idle Timer Interrupt */ +#define SECI_STAT_STFF (1 << 9) /* Tx FIFO Full */ +#define SECI_STAT_STFAE (1 << 10) /* Tx FIFO Almost Empty */ +#define SECI_STAT_SRFE (1 << 11) /* Rx FIFO Empty */ +#define SECI_STAT_SRFAF (1 << 12) /* Rx FIFO Almost Full */ +#define SECI_STAT_SFCE (1 << 13) /* Flow Control Event */ /* SECI configuration */ #define SECI_MODE_UART 0x0 @@ -3617,6 +4234,7 @@ typedef volatile struct { #define SECI_REFRESH_REQ 0xDA /* seci clk_ctl_st bits */ +#define CLKCTL_STS_HT_AVAIL_REQ (1 << 4) #define CLKCTL_STS_SECI_CLK_REQ (1 << 8) #define CLKCTL_STS_SECI_CLK_AVAIL (1 << 24) @@ -3773,4 +4391,26 @@ typedef volatile struct { #define GCI_GPIO_STS_VALUE (1 << GCI_GPIO_STS_VALUE_BIT) +/* SR Power Control */ +#define SRPWR_DMN0_PCIE (0) /* PCIE */ +#define SRPWR_DMN0_PCIE_SHIFT (SRPWR_DMN0_PCIE) /* PCIE */ +#define SRPWR_DMN0_PCIE_MASK (1 << SRPWR_DMN0_PCIE_SHIFT) /* PCIE */ +#define SRPWR_DMN1_ARMBPSD (1) /* ARM/BP/SDIO */ +#define SRPWR_DMN1_ARMBPSD_SHIFT (SRPWR_DMN1_ARMBPSD) /* ARM/BP/SDIO */ +#define SRPWR_DMN1_ARMBPSD_MASK (1 << SRPWR_DMN1_ARMBPSD_SHIFT) /* ARM/BP/SDIO */ +#define SRPWR_DMN2_MACAUX (2) /* MAC/Phy Aux */ +#define SRPWR_DMN2_MACAUX_SHIFT (SRPWR_DMN2_MACAUX) /* MAC/Phy Aux */ +#define SRPWR_DMN2_MACAUX_MASK (1 << SRPWR_DMN2_MACAUX_SHIFT) /* MAC/Phy Aux */ +#define SRPWR_DMN3_MACMAIN (3) /* MAC/Phy Main */ +#define SRPWR_DMN3_MACMAIN_SHIFT (SRPWR_DMN3_MACMAIN) /* MAC/Phy Main */ +#define SRPWR_DMN3_MACMAIN_MASK (1 << SRPWR_DMN3_MACMAIN_SHIFT) /* MAC/Phy Main */ +#define SRPWR_DMN_ALL_MASK (0xF) + +#define SRPWR_REQON_SHIFT (8) /* PowerOnRequest[11:8] */ +#define SRPWR_REQON_MASK (SRPWR_DMN_ALL_MASK << SRPWR_REQON_SHIFT) +#define SRPWR_STATUS_SHIFT (16) /* ExtPwrStatus[19:16], RO */ +#define SRPWR_STATUS_MASK (SRPWR_DMN_ALL_MASK << SRPWR_STATUS_SHIFT) +#define SRPWR_DMN_SHIFT (28) /* PowerDomain[31:28], RO */ +#define SRPWR_DMN_MASK (SRPWR_DMN_ALL_MASK << SRPWR_DMN_SHIFT) + #endif /* _SBCHIPC_H */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbconfig.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbconfig.h index 53e26ae4e320..ad9c408cd344 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbconfig.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbconfig.h @@ -1,7 +1,7 @@ /* * Broadcom SiliconBackplane hardware register definitions. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbgci.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbgci.h new file mode 100644 index 000000000000..f04232daca1a --- /dev/null +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbgci.h @@ -0,0 +1,248 @@ +/* + * SiliconBackplane GCI core hardware definitions + * + * Copyright (C) 1999-2017, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbgci.h 612498 2016-01-14 05:09:09Z $ + */ + +#ifndef _SBGCI_H +#define _SBGCI_H + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +#define GCI_OFFSETOF(sih, reg) \ + (AOB_ENAB(sih) ? OFFSETOF(gciregs_t, reg) : OFFSETOF(chipcregs_t, reg)) +#define GCI_CORE_IDX(sih) (AOB_ENAB(sih) ? si_findcoreidx(sih, GCI_CORE_ID, 0) : SI_CC_IDX) + +typedef volatile struct { + uint32 gci_corecaps0; /* 0x000 */ + uint32 gci_corecaps1; /* 0x004 */ + uint32 gci_corecaps2; /* 0x008 */ + uint32 gci_corectrl; /* 0x00c */ + uint32 gci_corestat; /* 0x010 */ + uint32 gci_intstat; /* 0x014 */ + uint32 gci_intmask; /* 0x018 */ + uint32 gci_wakemask; /* 0x01c */ + uint32 gci_levelintstat; /* 0x020 */ + uint32 gci_eventintstat; /* 0x024 */ + uint32 gci_wakelevelintstat; /* 0x028 */ + uint32 gci_wakeeventintstat; /* 0x02c */ + uint32 semaphoreintstatus; /* 0x030 */ + uint32 semaphoreintmask; /* 0x034 */ + uint32 semaphorerequest; /* 0x038 */ + uint32 semaphorereserve; /* 0x03c */ + uint32 gci_indirect_addr; /* 0x040 */ + uint32 gci_gpioctl; /* 0x044 */ + uint32 gci_gpiostatus; /* 0x048 */ + uint32 gci_gpiomask; /* 0x04c */ + uint32 eventsummary; /* 0x050 */ + uint32 gci_miscctl; /* 0x054 */ + uint32 gci_gpiointmask; /* 0x058 */ + uint32 gci_gpiowakemask; /* 0x05c */ + uint32 gci_input[32]; /* 0x060 */ + uint32 gci_event[32]; /* 0x0e0 */ + uint32 gci_output[4]; /* 0x160 */ + uint32 gci_control_0; /* 0x170 */ + uint32 gci_control_1; /* 0x174 */ + uint32 gci_intpolreg; /* 0x178 */ + uint32 gci_levelintmask; /* 0x17c */ + uint32 gci_eventintmask; /* 0x180 */ + uint32 wakelevelintmask; /* 0x184 */ + uint32 wakeeventintmask; /* 0x188 */ + uint32 hwmask; /* 0x18c */ + uint32 PAD; + uint32 gci_inbandeventintmask; /* 0x194 */ + uint32 PAD; + uint32 gci_inbandeventstatus; /* 0x19c */ + uint32 gci_seciauxtx; /* 0x1a0 */ + uint32 gci_seciauxrx; /* 0x1a4 */ + uint32 gci_secitx_datatag; /* 0x1a8 */ + uint32 gci_secirx_datatag; /* 0x1ac */ + uint32 gci_secitx_datamask; /* 0x1b0 */ + uint32 gci_seciusef0tx_reg; /* 0x1b4 */ + uint32 gci_secif0tx_offset; /* 0x1b8 */ + uint32 gci_secif0rx_offset; /* 0x1bc */ + uint32 gci_secif1tx_offset; /* 0x1c0 */ + uint32 gci_rxfifo_common_ctrl; /* 0x1c4 */ + uint32 gci_rxfifoctrl; /* 0x1c8 */ + uint32 gci_hw_sema_status; /* 0x1cc */ + uint32 gci_seciuartescval; /* 0x1d0 */ + uint32 gic_seciuartautobaudctr; /* 0x1d4 */ + uint32 gci_secififolevel; /* 0x1d8 */ + uint32 gci_seciuartdata; /* 0x1dc */ + uint32 gci_secibauddiv; /* 0x1e0 */ + uint32 gci_secifcr; /* 0x1e4 */ + uint32 gci_secilcr; /* 0x1e8 */ + uint32 gci_secimcr; /* 0x1ec */ + uint32 gci_secilsr; /* 0x1f0 */ + uint32 gci_secimsr; /* 0x1f4 */ + uint32 gci_baudadj; /* 0x1f8 */ + uint32 gci_inbandintmask; /* 0x1fc */ + uint32 gci_chipctrl; /* 0x200 */ + uint32 gci_chipsts; /* 0x204 */ + uint32 gci_gpioout; /* 0x208 */ + uint32 gci_gpioout_read; /* 0x20C */ + uint32 gci_mpwaketx; /* 0x210 */ + uint32 gci_mpwakedetect; /* 0x214 */ + uint32 gci_seciin_ctrl; /* 0x218 */ + uint32 gci_seciout_ctrl; /* 0x21C */ + uint32 gci_seciin_auxfifo_en; /* 0x220 */ + uint32 gci_seciout_txen_txbr; /* 0x224 */ + uint32 gci_seciin_rxbrstatus; /* 0x228 */ + uint32 gci_seciin_rxerrstatus; /* 0x22C */ + uint32 gci_seciin_fcstatus; /* 0x230 */ + uint32 gci_seciout_txstatus; /* 0x234 */ + uint32 gci_seciout_txbrstatus; /* 0x238 */ + uint32 wlan_mem_info; /* 0x23C */ + uint32 wlan_bankxinfo; /* 0x240 */ + uint32 bt_smem_select; /* 0x244 */ + uint32 bt_smem_stby; /* 0x248 */ + uint32 bt_smem_status; /* 0x24C */ + uint32 wlan_bankxactivepda; /* 0x250 */ + uint32 wlan_bankxsleeppda; /* 0x254 */ + uint32 wlan_bankxkill; /* 0x258 */ + uint32 PAD[41]; + uint32 gci_chipid; /* 0x300 */ + uint32 PAD[3]; + uint32 otpstatus; /* 0x310 */ + uint32 otpcontrol; /* 0x314 */ + uint32 otpprog; /* 0x318 */ + uint32 otplayout; /* 0x31c */ + uint32 otplayoutextension; /* 0x320 */ + uint32 otpcontrol1; /* 0x324 */ + uint32 otpprogdata; /* 0x328 */ + uint32 PAD[52]; + uint32 otpECCstatus; /* 0x3FC */ + uint32 PAD[512]; + uint32 lhl_core_capab_adr; /* 0xC00 */ + uint32 lhl_main_ctl_adr; /* 0xC04 */ + uint32 lhl_pmu_ctl_adr; /* 0xC08 */ + uint32 lhl_extlpo_ctl_adr; /* 0xC0C */ + uint32 lpo_ctl_adr; /* 0xC10 */ + uint32 lhl_lpo2_ctl_adr; /* 0xC14 */ + uint32 lhl_osc32k_ctl_adr; /* 0xC18 */ + uint32 lhl_clk_status_adr; /* 0xC1C */ + uint32 lhl_clk_det_ctl_adr; /* 0xC20 */ + uint32 lhl_clk_sel_adr; /* 0xC24 */ + uint32 hidoff_cnt_adr[2]; /* 0xC28-0xC2C */ + uint32 lhl_autoclk_ctl_adr; /* 0xC30 */ + uint32 PAD; /* reserved */ + uint32 lhl_hibtim_adr; /* 0xC38 */ + uint32 lhl_wl_ilp_val_adr; /* 0xC3C */ + uint32 lhl_wl_armtim0_intrp_adr; /* 0xC40 */ + uint32 lhl_wl_armtim0_st_adr; /* 0xC44 */ + uint32 lhl_wl_armtim0_adr; /* 0xC48 */ + uint32 PAD[9]; /* 0xC4C-0xC6C */ + uint32 lhl_wl_mactim0_intrp_adr; /* 0xC70 */ + uint32 lhl_wl_mactim0_st_adr; /* 0xC74 */ + uint32 lhl_wl_mactim_int0_adr; /* 0xC78 */ + uint32 lhl_wl_mactim_frac0_adr; /* 0xC7C */ + uint32 lhl_wl_mactim1_intrp_adr; /* 0xC80 */ + uint32 lhl_wl_mactim1_st_adr; /* 0xC84 */ + uint32 lhl_wl_mactim_int1_adr; /* 0xC88 */ + uint32 lhl_wl_mactim_frac1_adr; /* 0xC8C */ + uint32 PAD[8]; /* 0xC90-0xCAC */ + uint32 gpio_int_en_port_adr[4]; /* 0xCB0-0xCBC */ + uint32 gpio_int_st_port_adr[4]; /* 0xCC0-0xCCC */ + uint32 gpio_ctrl_iocfg_p_adr[64]; /* 0xCD0-0xDCC */ + uint32 gpio_gctrl_iocfg_p0_p39_adr; /* 0xDD0 */ + uint32 gpio_gdsctrl_iocfg_p0_p25_p30_p39_adr; /* 0xDD4 */ + uint32 gpio_gdsctrl_iocfg_p26_p29_adr; /* 0xDD8 */ + uint32 PAD[8]; /* 0xDDC-0xDF8 */ + uint32 lhl_gpio_din0_adr; /* 0xDFC */ + uint32 lhl_gpio_din1_adr; /* 0xE00 */ + uint32 lhl_wkup_status_adr; /* 0xE04 */ + uint32 lhl_ctl_adr; /* 0xE08 */ + uint32 lhl_adc_ctl_adr; /* 0xE0C */ + uint32 lhl_qdxyz_in_dly_adr; /* 0xE10 */ + uint32 lhl_optctl_adr; /* 0xE14 */ + uint32 lhl_optct2_adr; /* 0xE18 */ + uint32 lhl_scanp_cntr_init_val_adr; /* 0xE1C */ + uint32 lhl_opt_togg_val_adr[6]; /* 0xE20-0xE34 */ + uint32 lhl_optx_smp_val_adr; /* 0xE38 */ + uint32 lhl_opty_smp_val_adr; /* 0xE3C */ + uint32 lhl_optz_smp_val_adr; /* 0xE40 */ + uint32 lhl_hidoff_keepstate_adr[3]; /* 0xE44-0xE4C */ + uint32 lhl_bt_slmboot_ctl0_adr[4]; /* 0xE50-0xE5C */ + uint32 lhl_wl_fw_ctl; /* 0xE60 */ + uint32 lhl_wl_hw_ctl_adr[2]; /* 0xE64-0xE68 */ + uint32 lhl_bt_hw_ctl_adr; /* 0xE6C */ + uint32 lhl_top_pwrseq_en_adr; /* 0xE70 */ + uint32 lhl_top_pwrdn_ctl_adr; /* 0xE74 */ + uint32 lhl_top_pwrup_ctl_adr; /* 0xE78 */ + uint32 lhl_top_pwrseq_ctl_adr; /* 0xE7C */ + uint32 lhl_top_pwrdn2_ctl_adr; /* 0xE80 */ + uint32 lhl_top_pwrup2_ctl_adr; /* 0xE84 */ + uint32 wpt_regon_intrp_cfg_adr; /* 0xE88 */ + uint32 bt_regon_intrp_cfg_adr; /* 0xE8C */ + uint32 wl_regon_intrp_cfg_adr; /* 0xE90 */ + uint32 regon_intrp_st_adr; /* 0xE94 */ + uint32 regon_intrp_en_adr; /* 0xE98 */ +} gciregs_t; + +#define GCI_CAP0_REV_MASK 0x000000ff + +/* GCI Capabilities registers */ +#define GCI_CORE_CAP_0_COREREV_MASK 0xFF +#define GCI_CORE_CAP_0_COREREV_SHIFT 0 + +#define GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK 0x3F +#define GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT 0 +#define GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK 0xF +#define GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT 16 + +#define WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK 0xFFFF + +#define WLAN_BANKX_PKILL_REG_SLEEPPDA_MASK 0x1 + +/* WLAN BankXInfo Register */ +#define WLAN_BANKXINFO_BANK_SIZE_MASK 0x00FFF000 +#define WLAN_BANKXINFO_BANK_SIZE_SHIFT 12 + +/* WLAN Mem Info Register */ +#define WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_MASK 0x000000FF +#define WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_SHIFT 0 + +#define WLAN_MEM_INFO_REG_NUMD11MACBM_MASK 0x0000FF00 +#define WLAN_MEM_INFO_REG_NUMD11MACBM_SHIFT 8 + +#define WLAN_MEM_INFO_REG_NUMD11MACUCM_MASK 0x00FF0000 +#define WLAN_MEM_INFO_REG_NUMD11MACUCM_SHIFT 16 + +#define WLAN_MEM_INFO_REG_NUMD11MACSHM_MASK 0xFF000000 +#define WLAN_MEM_INFO_REG_NUMD11MACSHM_SHIFT 24 + + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + + +#endif /* _SBGCI_H */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbhnddma.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbhnddma.h index 5692ea954b35..6d10d6ad3940 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbhnddma.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbhnddma.h @@ -2,7 +2,7 @@ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface * This supports the following chips: BCM42xx, 44xx, 47xx . * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: sbhnddma.h 530150 2015-01-29 08:43:40Z $ + * $Id: sbhnddma.h 615537 2016-01-28 00:46:34Z $ */ #ifndef _sbhnddma_h_ @@ -125,6 +125,10 @@ typedef volatile struct { #define DMA_PT_4 2 #define DMA_PT_8 3 +/** Channel Switch */ +#define DMA_CS_OFF 0 +#define DMA_CS_ON 1 + /* transmit descriptor table pointer */ #define XP_LD_MASK 0xfff /**< last valid descriptor */ @@ -163,7 +167,8 @@ typedef volatile struct { #define RC_PC_SHIFT 21 #define RC_PT_MASK 0x03000000 /**< Prefetch threshold */ #define RC_PT_SHIFT 24 - +#define RC_WAITCMP_MASK 0x00001000 +#define RC_WAITCMP_SHIFT 12 /* receive descriptor table pointer */ #define RP_LD_MASK 0xfff /**< last valid descriptor */ @@ -288,6 +293,8 @@ typedef volatile struct { #define D64_XC_FL 0x00000010 /**< flush request */ #define D64_XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ #define D64_XC_MR_SHIFT 6 +#define D64_XC_CS_SHIFT 9 /**< channel switch enable */ +#define D64_XC_CS_MASK 0x00000200 /**< channel switch enable */ #define D64_XC_PD 0x00000800 /**< parity check disable */ #define D64_XC_AE 0x00030000 /**< address extension bits */ #define D64_XC_AE_SHIFT 16 @@ -340,6 +347,8 @@ typedef volatile struct { #define D64_RC_PC_SHIFT 21 #define D64_RC_PT_MASK 0x03000000 /**< Prefetch threshold */ #define D64_RC_PT_SHIFT 24 +#define D64_RC_WAITCMP_MASK 0x00001000 +#define D64_RC_WAITCMP_SHIFT 12 /* flags for dma controller */ #define DMA_CTRL_PEN (1 << 0) /**< partity enable */ @@ -364,8 +373,8 @@ typedef volatile struct { #define D64_RS0_RS_STOPPED 0x30000000 /**< stopped */ #define D64_RS0_RS_SUSP 0x40000000 /**< suspend pending */ -#define D64_RS1_AD_MASK 0x0001ffff /**< active descriptor */ -#define D64_RS1_RE_MASK 0xf0000000 /**< receive errors */ +#define D64_RS1_AD_MASK (di->d64_rs1_ad_mask) /* active descriptor pointer */ +#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */ #define D64_RS1_RE_SHIFT 28 #define D64_RS1_RE_NOERR 0x00000000 /**< no error */ #define D64_RS1_RE_DPO 0x10000000 /**< descriptor protocol error */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbpcmcia.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbpcmcia.h index d2e42ffffdbe..0ffc97be1a48 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbpcmcia.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbpcmcia.h @@ -1,7 +1,7 @@ /* * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: sbpcmcia.h 521344 2014-12-17 10:03:55Z $ + * $Id: sbpcmcia.h 616054 2016-01-29 13:22:24Z $ */ #ifndef _SBPCMCIA_H @@ -106,11 +106,34 @@ #define SRI_OTP 0x80 +#define SROM16K_BANK_SEL_MASK (3 << 11) +#define SROM16K_BANK_SHFT_MASK 11 +#define SROM16K_ADDR_SEL_MASK ((1 << SROM16K_BANK_SHFT_MASK) - 1) + + + +/* Standard tuples we know about */ + +#define CISTPL_NULL 0x00 +#define CISTPL_END 0xff /* End of the CIS tuple chain */ + + +#define CISTPL_BRCM_HNBU 0x80 + + +#define HNBU_BOARDREV 0x02 /* One byte board revision */ + + +#define HNBU_BOARDTYPE 0x1b /* 2 bytes; boardtype */ + + +#define HNBU_HNBUCIS 0x1d /* what follows is proprietary HNBU CIS format */ + + /* sbtmstatelow */ #define SBTML_INT_ACK 0x40000 /* ack the sb interrupt */ #define SBTML_INT_EN 0x20000 /* enable sb interrupt */ /* sbtmstatehigh */ #define SBTMH_INT_STATUS 0x40000 /* sb interrupt status */ - #endif /* _SBPCMCIA_H */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsdio.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsdio.h index f4760a22c077..68707c4d489e 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsdio.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsdio.h @@ -4,7 +4,7 @@ * * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsdpcmdev.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsdpcmdev.h index c0c889e5316f..8607d6357910 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsdpcmdev.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsdpcmdev.h @@ -2,7 +2,7 @@ * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific * device core support * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: sbsdpcmdev.h 514727 2014-11-12 03:02:48Z $ + * $Id: sbsdpcmdev.h 610395 2016-01-06 22:52:57Z $ */ #ifndef _sbsdpcmdev_h_ @@ -126,7 +126,9 @@ typedef volatile struct { uint32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */ uint32 PAD[40]; uint32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */ - uint32 PAD[7]; + uint32 PAD[1]; + uint32 powerctl; /* 0x1e8 */ + uint32 PAD[5]; /* DMA engines */ volatile union { diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsocram.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsocram.h index ad7b1fcf2fe8..b2c018839788 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsocram.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsocram.h @@ -1,7 +1,7 @@ /* * BCM47XX Sonics SiliconBackplane embedded ram core * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: sbsocram.h 514727 2014-11-12 03:02:48Z $ + * $Id: sbsocram.h 604712 2015-12-08 08:05:42Z $ */ #ifndef _SBSOCRAM_H diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsysmem.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsysmem.h index 99a810c434e8..df26399b2d61 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsysmem.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sbsysmem.h @@ -1,7 +1,7 @@ /* * SiliconBackplane System Memory core * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: sbsysmem.h 514727 2014-11-12 03:02:48Z $ + * $Id: sbsysmem.h 563229 2015-06-12 04:50:06Z $ */ #ifndef _SBSYSMEM_H @@ -113,16 +113,10 @@ typedef volatile struct sysmemregs { * the memory size is number of banks times bank size. * The same applies to rom size. */ -#define SRCI_ROMNB_MASK 0xf000 -#define SRCI_ROMNB_SHIFT 12 -#define SRCI_ROMBSZ_MASK 0xf00 -#define SRCI_ROMBSZ_SHIFT 8 -#define SRCI_SRNB_MASK 0xf0 -#define SRCI_SRNB_SHIFT 4 -#define SRCI_SRBSZ_MASK 0xf -#define SRCI_SRBSZ_SHIFT 0 - -#define SR_BSZ_BASE 14 +#define SYSMEM_SRCI_ROMNB_MASK 0x3e0 +#define SYSMEM_SRCI_ROMNB_SHIFT 5 +#define SYSMEM_SRCI_SRNB_MASK 0x1f +#define SYSMEM_SRCI_SRNB_SHIFT 0 /* Standby control register */ #define SRSC_SBYOVR_MASK 0x80000000 @@ -163,31 +157,17 @@ typedef volatile struct sysmemregs { /* bankidx and bankinfo reg defines */ #define SYSMEM_BANKINFO_SZMASK 0x7f -#define SYSMEM_BANKIDX_ROM_MASK 0x100 - -#define SYSMEM_BANKIDX_MEMTYPE_SHIFT 8 -/* sysmem bankinfo memtype */ -#define SYSMEM_MEMTYPE_RAM 0 -#define SYSMEM_MEMTYPE_R0M 1 -#define SYSMEM_MEMTYPE_DEVRAM 2 +#define SYSMEM_BANKIDX_ROM_MASK 0x80 #define SYSMEM_BANKINFO_REG 0x40 #define SYSMEM_BANKIDX_REG 0x10 -#define SYSMEM_BANKINFO_STDBY_MASK 0x400 -#define SYSMEM_BANKINFO_STDBY_TIMER 0x800 +#define SYSMEM_BANKINFO_STDBY_MASK 0x200 +#define SYSMEM_BANKINFO_STDBY_TIMER 0x400 -#define SYSMEM_BANKINFO_DEVRAMSEL_SHIFT 13 -#define SYSMEM_BANKINFO_DEVRAMSEL_MASK 0x2000 -#define SYSMEM_BANKINFO_DEVRAMPRO_SHIFT 14 -#define SYSMEM_BANKINFO_DEVRAMPRO_MASK 0x4000 -#define SYSMEM_BANKINFO_SLPSUPP_SHIFT 15 -#define SYSMEM_BANKINFO_SLPSUPP_MASK 0x8000 -#define SYSMEM_BANKINFO_RETNTRAM_SHIFT 16 -#define SYSMEM_BANKINFO_RETNTRAM_MASK 0x00010000 -#define SYSMEM_BANKINFO_PDASZ_SHIFT 17 -#define SYSMEM_BANKINFO_PDASZ_MASK 0x003E0000 -#define SYSMEM_BANKINFO_DEVRAMREMAP_SHIFT 24 -#define SYSMEM_BANKINFO_DEVRAMREMAP_MASK 0x01000000 +#define SYSMEM_BANKINFO_SLPSUPP_SHIFT 14 +#define SYSMEM_BANKINFO_SLPSUPP_MASK 0x4000 +#define SYSMEM_BANKINFO_PDASZ_SHIFT 16 +#define SYSMEM_BANKINFO_PDASZ_MASK 0x001F0000 /* extracoreinfo register */ #define SYSMEM_DEVRAMBANK_MASK 0xF000 diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdio.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdio.h index ca53afbcf3e9..8f77e4150e76 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdio.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdio.h @@ -2,7 +2,7 @@ * SDIO spec header file * Protocol and standard (common) device definitions * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: sdio.h 514727 2014-11-12 03:02:48Z $ + * $Id: sdio.h 644725 2016-06-21 12:26:04Z $ */ #ifndef _SDIO_H @@ -106,6 +106,10 @@ typedef volatile struct { /* io_en */ #define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */ #define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */ +#if defined(BT_OVER_SDIO) +#define SDIO_FUNC_ENABLE_3 0x08 /* function 2 I/O enable */ +#define SDIO_FUNC_DISABLE_3 0xF0 /* function 2 I/O enable */ +#endif /* defined (BT_OVER_SDIO) */ /* io_rdys */ #define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */ @@ -115,7 +119,9 @@ typedef volatile struct { #define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */ #define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */ #define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */ - +#if defined(BT_OVER_SDIO) +#define INTR_CTL_FUNC3_EN 0x8 /* interrupt enable for function 3 */ +#endif /* defined (BT_OVER_SDIO) */ /* intr_status */ #define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */ #define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */ @@ -254,7 +260,6 @@ typedef volatile struct { #define SDIO_FUNC_0 0 #define SDIO_FUNC_1 1 #define SDIO_FUNC_2 2 -#define SDIO_FUNC_3 3 #define SDIO_FUNC_4 4 #define SDIO_FUNC_5 5 #define SDIO_FUNC_6 6 diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdioh.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdioh.h index bc1fcbc0a04b..f37c5f62aef7 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdioh.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdioh.h @@ -2,7 +2,7 @@ * SDIO Host Controller Spec header file * Register map and definitions for the Standard Host Controller * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdiovar.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdiovar.h index 335e53a2f65b..719eeb1064b1 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdiovar.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdiovar.h @@ -2,7 +2,7 @@ * Structure used by apps whose drivers access SDIO drivers. * Pulled out separately so dhdu and wlu can both use it. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: sdiovar.h 514727 2014-11-12 03:02:48Z $ + * $Id: sdiovar.h 610006 2016-01-06 01:38:47Z $ */ #ifndef _sdiovar_h_ @@ -57,6 +57,51 @@ typedef struct sdreg { #define NUM_PREV_TRANSACTIONS 16 +typedef struct sdio_bus_metrics { + uint32 active_dur; /* msecs */ + + /* Generic */ + uint32 data_intr_cnt; /* data interrupt counter */ + uint32 mb_intr_cnt; /* mailbox interrupt counter */ + uint32 error_intr_cnt; /* error interrupt counter */ + uint32 wakehost_cnt; /* counter for OOB wakehost */ + + /* DS forcewake */ + uint32 ds_wake_on_cnt; /* counter for (clock) ON */ + uint32 ds_wake_on_dur; /* duration for (clock) ON) */ + uint32 ds_wake_off_cnt; /* counter for (clock) OFF */ + uint32 ds_wake_off_dur; /* duration for (clock) OFF */ + + /* DS_D0 state */ + uint32 ds_d0_cnt; /* counter for DS_D0 state */ + uint32 ds_d0_dur; /* duration for DS_D0 state */ + + /* DS_D3 state */ + uint32 ds_d3_cnt; /* counter for DS_D3 state */ + uint32 ds_d3_dur; /* duration for DS_D3 state */ + + /* DS DEV_WAKE */ + uint32 ds_dw_assrt_cnt; /* counter for DW_ASSERT */ + uint32 ds_dw_dassrt_cnt; /* counter for DW_DASSERT */ + + /* DS mailbox signals */ + uint32 ds_tx_dsreq_cnt; /* counter for tx HMB_DATA_DSREQ */ + uint32 ds_tx_dsexit_cnt; /* counter for tx HMB_DATA_DSEXIT */ + uint32 ds_tx_d3ack_cnt; /* counter for tx HMB_DATA_D3ACK */ + uint32 ds_tx_d3exit_cnt; /* counter for tx HMB_DATA_D3EXIT */ + uint32 ds_rx_dsack_cnt; /* counter for rx SMB_DATA_DSACK */ + uint32 ds_rx_dsnack_cnt; /* counter for rx SMB_DATA_DSNACK */ + uint32 ds_rx_d3inform_cnt; /* counter for rx SMB_DATA_D3INFORM */ +} sdio_bus_metrics_t; + +/* Bus interface info for SDIO */ +typedef struct wl_pwr_sdio_stats { + uint16 type; /* WL_PWRSTATS_TYPE_SDIO */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + sdio_bus_metrics_t sdio; /* stats from SDIO bus driver */ +} wl_pwr_sdio_stats_t; + #include #endif /* _sdiovar_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/sdspi.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdspi.h old mode 100755 new mode 100644 similarity index 97% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/sdspi.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdspi.h index a1d7ac937cf3..c5b8aff22862 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/sdspi.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/sdspi.h @@ -1,7 +1,7 @@ /* * SD-SPI Protocol Standard * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: sdspi.h 518342 2014-12-01 23:21:41Z $ + * $Id: sdspi.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _SD_SPI_H #define _SD_SPI_H diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/siutils.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/siutils.h index 4393a7426d34..2afca2549250 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/siutils.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/siutils.h @@ -2,7 +2,7 @@ * Misc utility routines for accessing the SOC Interconnects * of Broadcom HNBU chips. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: siutils.h 530150 2015-01-29 08:43:40Z $ + * $Id: siutils.h 668442 2016-11-03 08:42:43Z $ */ #ifndef _siutils_h_ @@ -36,6 +36,29 @@ #endif /* SR_DEBUG */ +#define WARM_BOOT 0xA0B0C0D0 + +#ifdef BCM_BACKPLANE_TIMEOUT + +#define SI_MAX_ERRLOG_SIZE 4 +typedef struct si_axi_error +{ + uint32 error; + uint32 coreid; + uint32 errlog_lo; + uint32 errlog_hi; + uint32 errlog_id; + uint32 errlog_flags; + uint32 errlog_status; +} si_axi_error_t; + +typedef struct si_axi_error_info +{ + uint32 count; + si_axi_error_t axi_error[SI_MAX_ERRLOG_SIZE]; +} si_axi_error_info_t; +#endif /* BCM_BACKPLANE_TIMEOUT */ + /** * Data structure to export all chip specific common variables * public (read-only) portion of siutils handle returned by si_attach()/si_kattach() @@ -64,7 +87,10 @@ struct si_pub { bool issim; /**< chip is in simulation or emulation */ uint socirev; /**< SOC interconnect rev */ bool pci_pr32414; - + int gcirev; /**< gci core rev */ +#ifdef BCM_BACKPLANE_TIMEOUT + si_axi_error_info_t * err_info; +#endif /* BCM_BACKPLANE_TIMEOUT */ }; /* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver @@ -117,6 +143,17 @@ typedef const struct si_pub si_t; #define PMU_RES 31 #endif /* SR_DEBUG */ +/* "access" param defines for si_seci_access() below */ +#define SECI_ACCESS_STATUSMASK_SET 0 +#define SECI_ACCESS_INTRS 1 +#define SECI_ACCESS_UART_CTS 2 +#define SECI_ACCESS_UART_RTS 3 +#define SECI_ACCESS_UART_RXEMPTY 4 +#define SECI_ACCESS_UART_GETC 5 +#define SECI_ACCESS_UART_TXFULL 6 +#define SECI_ACCESS_UART_PUTC 7 +#define SECI_ACCESS_STATUSMASK_GET 8 + #define ISSIM_ENAB(sih) FALSE #define INVALID_ADDR (~0) @@ -128,8 +165,12 @@ typedef const struct si_pub si_t; #define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU) #endif +#if defined(BCMAOBENAB) +#define AOB_ENAB(sih) (BCMAOBENAB) +#else #define AOB_ENAB(sih) ((sih)->ccrev >= 35 ? \ ((sih)->cccaps_ext & CC_CAP_EXT_AOB_PRESENT) : 0) +#endif /* BCMAOBENAB */ /* chipcommon clock/power control (exclusive with PMU's) */ #if defined(BCMPMUCTL) && BCMPMUCTL @@ -172,12 +213,12 @@ typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg); #define SI_BPIND_4BYTE 0xF #include /* === exported functions === */ -extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, +extern si_t *si_attach(uint pcidev, osl_t *osh, volatile void *regs, uint bustype, void *sdh, char **vars, uint *varsz); extern si_t *si_kattach(osl_t *osh); extern void si_detach(si_t *sih); extern bool si_pci_war16165(si_t *sih); -extern void * +extern volatile void * si_d11_switch_addrbase(si_t *sih, uint coreunit); extern uint si_corelist(si_t *sih, uint coreid[]); extern uint si_coreid(si_t *sih); @@ -194,23 +235,22 @@ extern uint si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read); extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val); -extern uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff); -extern void *si_coreregs(si_t *sih); +extern volatile uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern volatile void *si_coreregs(si_t *sih); extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val); extern uint si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val); extern void *si_wrapperregs(si_t *sih); extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val); extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val); -extern void si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih); -extern void si_d11rsdb_core1_alt_reg_clk_en(si_t *sih); +extern void si_commit(si_t *sih); extern bool si_iscoreup(si_t *sih); extern uint si_numcoreunits(si_t *sih, uint coreid); extern uint si_numd11coreunits(si_t *sih); extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit); -extern void *si_setcoreidx(si_t *sih, uint coreidx); -extern void *si_setcore(si_t *sih, uint coreid, uint coreunit); -extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val); +extern volatile void *si_setcoreidx(si_t *sih, uint coreidx); +extern volatile void *si_setcore(si_t *sih, uint coreid, uint coreunit); +extern volatile void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val); extern void si_restore_core(si_t *sih, uint coreid, uint intr_val); extern int si_numaddrspaces(si_t *sih); extern uint32 si_addrspace(si_t *sih, uint asidx); @@ -239,6 +279,7 @@ extern int si_clkctl_xtal(si_t *sih, uint what, bool on); extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val); extern void si_btcgpiowar(si_t *sih); extern bool si_deviceremoved(si_t *sih); +extern void si_set_device_removed(si_t *sih, bool status); extern uint32 si_sysmem_size(si_t *sih); extern uint32 si_socram_size(si_t *sih); extern uint32 si_socdevram_size(si_t *sih); @@ -252,13 +293,14 @@ extern uint32 si_socdevram_remap_size(si_t *sih); extern void si_watchdog(si_t *sih, uint ticks); extern void si_watchdog_ms(si_t *sih, uint32 ms); extern uint32 si_watchdog_msticks(void); -extern void *si_gpiosetcore(si_t *sih); +extern volatile void *si_gpiosetcore(si_t *sih); extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority); extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority); extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority); extern uint32 si_gpioin(si_t *sih); extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority); extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioeventintmask(si_t *sih, uint32 mask, uint32 val, uint8 priority); extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val); extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority); extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority); @@ -270,6 +312,8 @@ extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value) extern uint8 si_gci_host_wake_gpio_init(si_t *sih); extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state); +extern void si_invalidate_second_bar0win(si_t *sih); + /* GCI interrupt handlers */ extern void si_gci_handler_process(si_t *sih); @@ -299,12 +343,16 @@ extern uint16 si_d11_devid(si_t *sih); extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice, uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader); +extern uint32 si_seci_access(si_t *sih, uint32 val, int access); +extern volatile void* si_seci_init(si_t *sih, uint8 seci_mode); +extern void si_seci_clk_force(si_t *sih, bool val); +extern bool si_seci_clk_force_status(si_t *sih); + #define si_eci(sih) 0 static INLINE void * si_eci_init(si_t *sih) {return NULL;} #define si_eci_notify_bt(sih, type, val) (0) #define si_seci(sih) 0 #define si_seci_upd(sih, a) do {} while (0) -static INLINE void * si_seci_init(si_t *sih, uint8 use_seci) {return NULL;} static INLINE void * si_gci_init(si_t *sih) {return NULL;} #define si_seci_down(sih) do {} while (0) #define si_gci(sih) 0 @@ -316,8 +364,6 @@ extern void si_otp_power(si_t *sih, bool on, uint32* min_res_mask); /* SPROM availability */ extern bool si_is_sprom_available(si_t *sih); -extern bool si_is_sprom_enabled(si_t *sih); -extern void si_sprom_enable(si_t *sih, bool enable); /* OTP/SROM CIS stuff */ extern int si_cis_source(si_t *sih); @@ -367,6 +413,7 @@ extern void si_pcie_power_save_enable(si_t *sih, bool enable); extern void si_pcie_extendL1timer(si_t *sih, bool extend); extern int si_pci_fixcfg(si_t *sih); extern void si_chippkg_set(si_t *sih, uint); +extern bool si_is_warmboot(void); extern void si_chipcontrl_btshd0_4331(si_t *sih, bool on); extern void si_chipcontrl_restore(si_t *sih, uint32 val); @@ -392,6 +439,8 @@ extern bool si_taclear(si_t *sih, bool details); #if defined(BCMDBG_PHYDUMP) struct bcmstrbuf; extern int si_dump_pcieinfo(si_t *sih, struct bcmstrbuf *b); +extern void si_dump_pmuregs(si_t *sih, struct bcmstrbuf *b); +extern int si_dump_pcieregs(si_t *sih, struct bcmstrbuf *b); #endif #if defined(BCMDBG_PHYDUMP) @@ -420,8 +469,11 @@ extern int si_pcie_configspace_cache(si_t *sih); extern int si_pcie_configspace_restore(si_t *sih); extern int si_pcie_configspace_get(si_t *sih, uint8 *buf, uint size); -char *si_getnvramflvar(si_t *sih, const char *name); +#ifdef BCM_BACKPLANE_TIMEOUT +extern const si_axi_error_info_t * si_get_axi_errlog_info(si_t *sih); +extern void si_reset_axi_errlog_info(si_t * sih); +#endif /* BCM_BACKPLANE_TIMEOUT */ extern uint32 si_tcm_size(si_t *sih); extern bool si_has_flops(si_t *sih); @@ -439,21 +491,21 @@ extern void si_gci_reset(si_t *sih); extern void si_gci_seci_init(si_t *sih); extern void si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum, uint32 ltecx_fnsel, uint32 ltecx_gcigpio); +#endif /* BCMLTECOEX */ extern void si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum, uint32 ltecx_fnsel, uint32 ltecx_gcigpio); -#endif /* BCMLTECOEX */ + +extern bool si_btcx_wci2_init(si_t *sih); + extern void si_gci_set_functionsel(si_t *sih, uint32 pin, uint8 fnsel); extern uint32 si_gci_get_functionsel(si_t *sih, uint32 pin); extern void si_gci_clear_functionsel(si_t *sih, uint8 fnsel); extern uint8 si_gci_get_chipctrlreg_idx(uint32 pin, uint32 *regidx, uint32 *pos); extern uint32 si_gci_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val); extern uint32 si_gci_chipstatus(si_t *sih, uint reg); -extern uint16 si_cc_get_reg16(uint32 reg_offs); -extern uint32 si_cc_get_reg32(uint32 reg_offs); -extern uint32 si_cc_set_reg32(uint32 reg_offs, uint32 val); -extern uint32 si_gci_preinit_upd_indirect(uint32 regidx, uint32 setval, uint32 mask); extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status); extern void si_swdenable(si_t *sih, uint32 swdflag); +extern uint8 si_enable_perst_wake(si_t *sih, uint8 *perst_wake_mask, uint8 *perst_cur_status); extern uint32 si_get_pmu_reg_addr(si_t *sih, uint32 offset); #define CHIPCTRLREG1 0x1 @@ -481,10 +533,15 @@ extern void si_pcie_hw_L1SS_war(si_t *sih); extern void si_pciedev_crwlpciegen2(si_t *sih); extern void si_pcie_prep_D3(si_t *sih, bool enter_D3); extern void si_pciedev_reg_pm_clk_period(si_t *sih); +extern void si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih); +extern void si_d11rsdb_core1_alt_reg_clk_en(si_t *sih); +extern void si_pcie_disable_oobselltr(si_t *sih); +extern uint32 si_raw_reg(si_t *sih, uint32 reg, uint32 val, uint32 wrire_req); #ifdef WLRSDB extern void si_d11rsdb_core_disable(si_t *sih, uint32 bits); extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void set_secondary_d11_core(si_t *sih, void **secmap, void **secwrap); #endif @@ -519,6 +576,13 @@ extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); OFFSETOF(pmuregs_t, member), mask, val): \ si_corereg(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member), mask, val)) +#define LHL_REG(si, member, mask, val) \ + si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \ + OFFSETOF(gciregs_t, member), mask, val) + +#define CHIPC_REG(si, member, mask, val) \ + si_corereg(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member), mask, val) + /* GCI Macros */ #define ALLONES_32 0xFFFFFFFF #define GCI_CCTL_SECIRST_OFFSET 0 /**< SeciReset */ @@ -560,6 +624,7 @@ extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); #define GCI_SECIOUT_MODE_OFFSET 0 #define GCI_SECIOUT_GCIGPIO_OFFSET 4 +#define GCI_SECIOUT_LOOPBACK_OFFSET 8 #define GCI_SECIOUT_SECIINRELATED_OFFSET 16 #define GCI_SECIAUX_RXENABLE_OFFSET 0 @@ -583,6 +648,7 @@ extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); #define GCI_WLAN_IP_ID 0 #define GCI_WLAN_BEGIN 0 #define GCI_WLAN_PRIO_POS (GCI_WLAN_BEGIN + 4) +#define GCI_WLAN_PERST_POS (GCI_WLAN_BEGIN + 15) /* GCI [639:512] = LTE [127:0] */ #define GCI_LTE_IP_ID 4 @@ -590,6 +656,8 @@ extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); #define GCI_LTE_FRAMESYNC_POS (GCI_LTE_BEGIN + 0) #define GCI_LTE_RX_POS (GCI_LTE_BEGIN + 1) #define GCI_LTE_TX_POS (GCI_LTE_BEGIN + 2) +#define GCI_LTE_WCI2TYPE_POS (GCI_LTE_BEGIN + 48) +#define GCI_LTE_WCI2TYPE_MASK 7 #define GCI_LTE_AUXRXDVALID_POS (GCI_LTE_BEGIN + 56) /* Reg Index corresponding to ECI bit no x of ECI space */ @@ -609,7 +677,53 @@ extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); #define PMU_OOB_BIT 0x12 #endif /* REROUTE_OOBINT */ +#define GCI_REG(si, offset, mask, val) \ + (AOB_ENAB(si) ? \ + si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \ + offset, mask, val): \ + si_corereg(si, SI_CC_IDX, offset, mask, val)) + extern void si_pll_sr_reinit(si_t *sih); extern void si_pll_closeloop(si_t *sih); +void si_config_4364_d11_oob(si_t *sih, uint coreid); +extern void si_update_macclk_mul_fact(si_t *sih, uint mul_fact); +extern uint32 si_get_macclk_mul_fact(si_t *sih); +extern void si_gci_set_femctrl(si_t *sih, osl_t *osh, bool set); +extern void si_gci_set_femctrl_mask_ant01(si_t *sih, osl_t *osh, bool set); +extern uint si_num_slaveports(si_t *sih, uint coreid); +extern uint32 si_get_slaveport_addr(si_t *sih, uint asidx, uint core_id, uint coreunit); +extern uint32 si_get_d11_slaveport_addr(si_t *sih, uint asidx, uint coreunit); +uint si_introff(si_t *sih); +void si_intrrestore(si_t *sih, uint intr_val); +void si_nvram_res_masks(si_t *sih, uint32 *min_mask, uint32 *max_mask); +uint32 si_xtalfreq(si_t *sih); +extern uint32 si_wrapper_dump_buf_size(si_t *sih); +extern uint32 si_wrapper_dump_binary(si_t *sih, uchar *p); + +/* SR Power Control */ +extern uint32 si_srpwr_request(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_srpwr_stat_spinwait(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_srpwr_stat(si_t *sih); +extern uint32 si_srpwr_domain(si_t *sih); + +/* SR Power Control */ +#ifdef BCMSRPWR + /* No capabilities bit so using chipid for now */ + #define SRPWR_CAP(sih) (\ + (CHIPID(sih->chip) == BCM4347_CHIP_ID) || \ + (0)) + + extern bool _bcmsrpwr; + #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define SRPWR_ENAB() (_bcmsrpwr) + #elif defined(BCMSRPWR_DISABLED) + #define SRPWR_ENAB() (0) + #else + #define SRPWR_ENAB() (1) + #endif +#else + #define SRPWR_CAP(sih) (0) + #define SRPWR_ENAB() (0) +#endif /* BCMSRPWR */ #endif /* _siutils_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/spid.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/spid.h index 9a39aaf0dd3f..8fdefa43778b 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/spid.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/spid.h @@ -1,7 +1,7 @@ /* * SPI device spec header file * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/trxhdr.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/trxhdr.h index f7404be99b0e..50cd3c1ac952 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/trxhdr.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/trxhdr.h @@ -1,7 +1,7 @@ /* * TRX image file header format. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/typedefs.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/typedefs.h index 0e110a1908ed..aed3ace91553 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/typedefs.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/typedefs.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -22,12 +22,15 @@ * * <> * - * $Id: typedefs.h 514727 2014-11-12 03:02:48Z $ + * $Id: typedefs.h 639587 2016-05-24 06:44:44Z $ */ #ifndef _TYPEDEFS_H_ #define _TYPEDEFS_H_ +#if (!defined(EDK_RELEASE_VERSION) || (EDK_RELEASE_VERSION < 0x00020000)) || \ + !defined(BWL_NO_INTERNAL_STDLIB_SUPPORT) + #ifdef SITE_TYPEDEFS /* @@ -77,6 +80,9 @@ typedef unsigned long long int uintptr; +/* float_t types conflict with the same typedefs from the standard ANSI-C +** math.h header file. Don't re-typedef them here. +*/ #if defined(_NEED_SIZE_T_) typedef long unsigned int size_t; @@ -331,6 +337,47 @@ typedef float64 float_t; /* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */ #define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr)) +#else + +#include +#include +#include + +#ifdef stderr +#undef stderr +#define stderr stdout +#endif + +typedef UINT32 uint; +typedef UINT64 ulong; +typedef UINT16 ushort; +typedef UINT8 uint8; +typedef UINT16 uint16; +typedef UINT32 uint32; +typedef UINT64 uint64; +typedef INT8 int8; +typedef INT16 int16; +typedef INT32 int32; +typedef INT64 int64; + +typedef BOOLEAN bool; +typedef unsigned char uchar; +typedef UINTN uintptr; + +typedef UINT8 u_char; +typedef UINT16 u_short; +typedef UINTN u_int; +typedef ULONGN u_long; + +#define UNUSED_PARAMETER(x) (void)(x) +#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr)) +#define INLINE +#define AUTO (-1) /* Auto = -1 */ +#define ON 1 /* ON = 1 */ +#define OFF 0 + +#endif /* !EDK_RELEASE_VERSION || (EDK_RELEASE_VERSION < 0x00020000) */ + /* * Including the bcmdefs.h here, to make sure everyone including typedefs.h * gets this automatically diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/vlan.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/vlan.h old mode 100755 new mode 100644 similarity index 97% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/vlan.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/vlan.h index 77b1458b3683..4879eae58528 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/vlan.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/vlan.h @@ -1,7 +1,7 @@ /* * 802.1Q VLAN protocol definitions * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: vlan.h 518342 2014-12-01 23:21:41Z $ + * $Id: vlan.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _vlan_h_ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlfc_proto.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlfc_proto.h index 0d5b434198ee..121af90b664d 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlfc_proto.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlfc_proto.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -22,7 +22,7 @@ * * <> * - * $Id: wlfc_proto.h 542895 2015-03-22 14:13:12Z $ + * $Id: wlfc_proto.h 675983 2016-12-19 23:18:49Z $ * */ @@ -205,44 +205,59 @@ ((ctr) & WL_TXSTATUS_FREERUNCTR_MASK)) #define WL_TXSTATUS_GET_FREERUNCTR(x) ((x)& WL_TXSTATUS_FREERUNCTR_MASK) -/* Seq number part of AMSDU */ +/* AMSDU part of d11 seq number */ #define WL_SEQ_AMSDU_MASK 0x1 /* allow 1 bit */ #define WL_SEQ_AMSDU_SHIFT 14 #define WL_SEQ_SET_AMSDU(x, val) ((x) = \ ((x) & ~(WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT)) | \ - (((val) & WL_SEQ_AMSDU_MASK) << WL_SEQ_AMSDU_SHIFT)) -#define WL_SEQ_GET_AMSDU(x) (((x) >> WL_SEQ_AMSDU_SHIFT) & \ - WL_SEQ_AMSDU_MASK) + (((val) & WL_SEQ_AMSDU_MASK) << WL_SEQ_AMSDU_SHIFT)) /**< sets a single AMSDU bit */ +/** returns TRUE if ring item is AMSDU (seq = d11 seq nr) */ +#define WL_SEQ_IS_AMSDU(x) (((x) >> WL_SEQ_AMSDU_SHIFT) & \ + WL_SEQ_AMSDU_MASK) -/* Seq number is valid coming from FW */ #define WL_SEQ_FROMFW_MASK 0x1 /* allow 1 bit */ #define WL_SEQ_FROMFW_SHIFT 13 #define WL_SEQ_SET_FROMFW(x, val) ((x) = \ ((x) & ~(WL_SEQ_FROMFW_MASK << WL_SEQ_FROMFW_SHIFT)) | \ (((val) & WL_SEQ_FROMFW_MASK) << WL_SEQ_FROMFW_SHIFT)) -#define WL_SEQ_GET_FROMFW(x) (((x) >> WL_SEQ_FROMFW_SHIFT) & \ - WL_SEQ_FROMFW_MASK) +/** Set when firmware assigns D11 sequence number to packet */ +#define SET_WL_HAS_ASSIGNED_SEQ(x) WL_SEQ_SET_FROMFW((x), 1) + +/** returns TRUE if packet has been assigned a d11 seq number by the WL firmware layer */ +#define GET_WL_HAS_ASSIGNED_SEQ(x) (((x) >> WL_SEQ_FROMFW_SHIFT) & WL_SEQ_FROMFW_MASK) /** * Proptxstatus related. * - * Pkt from bus layer (DHD for SDIO and pciedev for PCIE) - * is re-using seq number previously suppressed - * so FW should not assign new one + * When a packet is suppressed by WL or the D11 core, the packet has to be retried. Assigning + * a new d11 sequence number for the packet when retrying would cause the peer to be unable to + * reorder the packets within an AMPDU. So, suppressed packet from bus layer (DHD for SDIO and + * pciedev for PCIE) is re-using d11 seq number, so FW should not assign a new one. */ #define WL_SEQ_FROMDRV_MASK 0x1 /* allow 1 bit */ #define WL_SEQ_FROMDRV_SHIFT 12 -#define WL_SEQ_SET_FROMDRV(x, val) ((x) = \ + +/** + * Proptxstatus, host or fw PCIe layer requests WL layer to reuse d11 seq no. Bit is reset by WL + * subsystem when it reuses the seq number. + */ +#define WL_SEQ_SET_REUSE(x, val) ((x) = \ ((x) & ~(WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT)) | \ (((val) & WL_SEQ_FROMDRV_MASK) << WL_SEQ_FROMDRV_SHIFT)) -#define WL_SEQ_GET_FROMDRV(x) (((x) >> WL_SEQ_FROMDRV_SHIFT) & \ +#define SET_WL_TO_REUSE_SEQ(x) WL_SEQ_SET_REUSE((x), 1) +#define RESET_WL_TO_REUSE_SEQ(x) WL_SEQ_SET_REUSE((x), 0) + +/** Proptxstatus, related to reuse of d11 seq numbers when retransmitting */ +#define IS_WL_TO_REUSE_SEQ(x) (((x) >> WL_SEQ_FROMDRV_SHIFT) & \ WL_SEQ_FROMDRV_MASK) #define WL_SEQ_NUM_MASK 0xfff /* allow 12 bit */ #define WL_SEQ_NUM_SHIFT 0 +/** Proptxstatus, sets d11seq no in pkt tag, related to reuse of d11seq no when retransmitting */ #define WL_SEQ_SET_NUM(x, val) ((x) = \ ((x) & ~(WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) | \ (((val) & WL_SEQ_NUM_MASK) << WL_SEQ_NUM_SHIFT)) +/** Proptxstatus, gets d11seq no from pkt tag, related to reuse of d11seq no when retransmitting */ #define WL_SEQ_GET_NUM(x) (((x) >> WL_SEQ_NUM_SHIFT) & \ WL_SEQ_NUM_MASK) @@ -281,19 +296,14 @@ #define WLFC_CTL_PKTFLAG_DISCARD_NOACK 4 /* Firmware wrongly reported suppressed previously,now fixing to acked */ #define WLFC_CTL_PKTFLAG_SUPPRESS_ACKED 5 - -#define WLFC_D11_STATUS_INTERPRET(txs) \ - ((txs)->status.was_acked ? WLFC_CTL_PKTFLAG_DISCARD : \ - (TXS_SUPR_MAGG_DONE((txs)->status.suppr_ind) ? \ - WLFC_CTL_PKTFLAG_DISCARD_NOACK : WLFC_CTL_PKTFLAG_D11SUPPRESS)) - +#define WLFC_CTL_PKTFLAG_MASK (0x0f) /* For 4-bit mask with one extra bit */ #ifdef PROP_TXSTATUS_DEBUG #define WLFC_DBGMESG(x) printf x /* wlfc-breadcrumb */ #define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \ {printf("WLFC: %s():%d:caller:%p\n", \ - __FUNCTION__, __LINE__, __builtin_return_address(0));}} while (0) + __FUNCTION__, __LINE__, CALL_SITE);}} while (0) #define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \ banner, ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]); } while (0) #define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s)) @@ -333,18 +343,34 @@ #define WLFC_SET_AFQ(x, val) ((x) = \ ((x) & ~(1 << WLFC_MODE_AFQ_SHIFT)) | \ (((val) & 1) << WLFC_MODE_AFQ_SHIFT)) +/** returns TRUE if firmware supports 'at firmware queue' feature */ #define WLFC_GET_AFQ(x) (((x) >> WLFC_MODE_AFQ_SHIFT) & 1) #define WLFC_MODE_REUSESEQ_SHIFT 3 /* seq reuse bit */ #define WLFC_SET_REUSESEQ(x, val) ((x) = \ ((x) & ~(1 << WLFC_MODE_REUSESEQ_SHIFT)) | \ (((val) & 1) << WLFC_MODE_REUSESEQ_SHIFT)) +/** returns TRUE if 'd11 sequence reuse' has been agreed upon between host and dongle */ #define WLFC_GET_REUSESEQ(x) (((x) >> WLFC_MODE_REUSESEQ_SHIFT) & 1) #define WLFC_MODE_REORDERSUPP_SHIFT 4 /* host reorder suppress pkt bit */ #define WLFC_SET_REORDERSUPP(x, val) ((x) = \ ((x) & ~(1 << WLFC_MODE_REORDERSUPP_SHIFT)) | \ (((val) & 1) << WLFC_MODE_REORDERSUPP_SHIFT)) +/** returns TRUE if 'reorder suppress' has been agreed upon between host and dongle */ #define WLFC_GET_REORDERSUPP(x) (((x) >> WLFC_MODE_REORDERSUPP_SHIFT) & 1) +#define FLOW_RING_CREATE 1 +#define FLOW_RING_DELETE 2 +#define FLOW_RING_FLUSH 3 +#define FLOW_RING_OPEN 4 +#define FLOW_RING_CLOSED 5 +#define FLOW_RING_FLUSHED 6 +#define FLOW_RING_TIM_SET 7 +#define FLOW_RING_TIM_RESET 8 +#define FLOW_RING_FLUSH_TXFIFO 9 + +/* bit 7, indicating if is TID(1) or AC(0) mapped info in tid field) */ +#define PCIEDEV_IS_AC_TID_MAP_MASK 0x80 + #endif /* __wlfc_proto_definitions_h__ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlioctl.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlioctl.h index fb58ec2ec058..1e6a3a280e08 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlioctl.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlioctl.h @@ -6,7 +6,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,32 +27,31 @@ * other than the GPL, without Broadcom's express prior written consent. * <> * - * $Id: wlioctl.h 609280 2016-01-01 06:31:38Z $ + * $Id: wlioctl.h 677952 2017-01-05 23:25:28Z $ */ #ifndef _wlioctl_h_ #define _wlioctl_h_ #include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include <802.11.h> +#include <802.11s.h> +#include <802.1d.h> #include #include -#include -#include +#include +#include #include #include - - - typedef struct { uint32 num; chanspec_t list[1]; @@ -60,13 +59,12 @@ typedef struct { #define RSN_KCK_LENGTH 16 #define RSN_KEK_LENGTH 16 - - +#define TPK_FTM_LEN 16 #ifndef INTF_NAME_SIZ #define INTF_NAME_SIZ 16 #endif -/* Used to send ioctls over the transport pipe */ +/**Used to send ioctls over the transport pipe */ typedef struct remote_ioctl { cdc_ioctl_t msg; uint32 data_len; @@ -74,8 +72,11 @@ typedef struct remote_ioctl { } rem_ioctl_t; #define REMOTE_SIZE sizeof(rem_ioctl_t) +#define BCM_IOV_XTLV_VERSION 0 -/* DFS Forced param */ +#define MAX_NUM_D11CORES 2 + +/**DFS Forced param */ typedef struct wl_dfs_forced_params { chanspec_t chspec; uint16 version; @@ -84,16 +85,21 @@ typedef struct wl_dfs_forced_params { #define DFS_PREFCHANLIST_VER 0x01 #define WL_CHSPEC_LIST_FIXED_SIZE OFFSETOF(chanspec_list_t, list) +/* size of dfs forced param size given n channels are in the list */ +#define WL_DFS_FORCED_PARAMS_SIZE(n) \ + (sizeof(wl_dfs_forced_t) + (((n) < 1) ? (0) : (((n) - 1)* sizeof(chanspec_t)))) #define WL_DFS_FORCED_PARAMS_FIXED_SIZE \ (WL_CHSPEC_LIST_FIXED_SIZE + OFFSETOF(wl_dfs_forced_t, chspec_list)) #define WL_DFS_FORCED_PARAMS_MAX_SIZE \ WL_DFS_FORCED_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(chanspec_t)) -/* association decision information */ +/**association decision information */ typedef struct { - bool assoc_approved; /**< (re)association approved */ + uint8 assoc_approved; /**< (re)association approved */ + uint8 pad; uint16 reject_reason; /**< reason code for rejecting association */ struct ether_addr da; + uint8 pad1[6]; int64 sys_time; /**< current system time */ } assoc_decision_t; @@ -124,9 +130,10 @@ typedef struct ssid_info } ssid_info_t; typedef struct wl_af_params { - uint32 channel; - int32 dwell_time; - struct ether_addr BSSID; + uint32 channel; + int32 dwell_time; + struct ether_addr BSSID; + uint8 PAD[2]; wl_action_frame_t action_frame; } wl_af_params_t; @@ -135,16 +142,31 @@ typedef struct wl_af_params { #define MFP_TEST_FLAG_NORMAL 0 #define MFP_TEST_FLAG_ANY_KEY 1 typedef struct wl_sa_query { - uint32 flag; - uint8 action; - uint16 id; - struct ether_addr da; + uint32 flag; + uint8 action; + uint8 PAD; + uint16 id; + struct ether_addr da; + uint16 PAD; } wl_sa_query_t; -/* require default structure packing */ -#define BWL_DEFAULT_PACKING -#include - +/* EXT_STA */ +/**association information */ +typedef struct { + uint32 assoc_req; /**< offset to association request frame */ + uint32 assoc_req_len; /**< association request frame length */ + uint32 assoc_rsp; /**< offset to association response frame */ + uint32 assoc_rsp_len; /**< association response frame length */ + uint32 bcn; /**< offset to AP beacon */ + uint32 bcn_len; /**< AP beacon length */ + uint32 wsec; /**< ucast security algo */ + uint32 wpaie; /**< offset to WPA ie */ + uint8 auth_alg; /**< 802.11 authentication mode */ + uint8 WPA_auth; /**< WPA: authenticated key management */ + uint8 ewc_cap; /**< EWC (MIMO) capable */ + uint8 ofdm; /**< OFDM */ +} assoc_info_t; +/* defined(EXT_STA) */ /* Flags for OBSS IOVAR Parameters */ #define WL_OBSS_DYN_BWSW_FLAG_ACTIVITY_PERIOD (0x01) @@ -157,12 +179,14 @@ typedef struct wl_sa_query { /* OBSS IOVAR Version information */ #define WL_PROT_OBSS_CONFIG_PARAMS_VERSION 1 + +#include typedef BWL_PRE_PACKED_STRUCT struct { - uint8 obss_bwsw_activity_cfm_count_cfg; /* configurable count in + uint8 obss_bwsw_activity_cfm_count_cfg; /**< configurable count in * seconds before we confirm that OBSS is present and * dynamically activate dynamic bwswitch. */ - uint8 obss_bwsw_no_activity_cfm_count_cfg; /* configurable count in + uint8 obss_bwsw_no_activity_cfm_count_cfg; /**< configurable count in * seconds before we confirm that OBSS is GONE and * dynamically start pseudo upgrade. If in pseudo sense time, we * will see OBSS, [means that, we false detected that OBSS-is-gone @@ -173,48 +197,65 @@ typedef BWL_PRE_PACKED_STRUCT struct { */ uint8 obss_bwsw_no_activity_cfm_count_incr_cfg; /* see above */ - uint16 obss_bwsw_pseudo_sense_count_cfg; /* number of msecs/cnt to be in + uint16 obss_bwsw_pseudo_sense_count_cfg; /**< number of msecs/cnt to be in * pseudo state. This is used to sense/measure the stats from lq. */ - uint8 obss_bwsw_rx_crs_threshold_cfg; /* RX CRS default threshold */ - uint8 obss_bwsw_dur_thres; /* OBSS dyn bwsw trigger/RX CRS Sec */ - uint8 obss_bwsw_txop_threshold_cfg; /* TXOP default threshold */ -} BWL_POST_PACKED_STRUCT wlc_prot_dynbwsw_config_t; + uint8 obss_bwsw_rx_crs_threshold_cfg; /**< RX CRS default threshold */ + uint8 obss_bwsw_dur_thres; /**< OBSS dyn bwsw trigger/RX CRS Sec */ + uint8 obss_bwsw_txop_threshold_cfg; /**< TXOP default threshold */ +} BWL_POST_PACKED_STRUCT wlc_obss_dynbwsw_config_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct { uint32 version; /**< version field */ uint32 config_mask; uint32 reset_mask; - wlc_prot_dynbwsw_config_t config_params; + wlc_obss_dynbwsw_config_t config_params; } BWL_POST_PACKED_STRUCT obss_config_params_t; +#include - -/* bsscfg type */ -typedef enum bsscfg_type_t { - BSSCFG_TYPE_GENERIC = 0, /**< default */ - BSSCFG_TYPE_P2P = 1, /**< The BSS is for p2p link */ - BSSCFG_TYPE_BTA = 2, +/**bsscfg type */ +typedef enum bsscfg_type { + BSSCFG_TYPE_GENERIC = 0, /**< Generic AP/STA/IBSS BSS */ + BSSCFG_TYPE_P2P = 1, /**< P2P BSS */ + /* index 2 earlier used for BTAMP */ + BSSCFG_TYPE_PSTA = 3, BSSCFG_TYPE_TDLS = 4, - BSSCFG_TYPE_AWDL = 5, + BSSCFG_TYPE_SLOTTED_BSS = 5, BSSCFG_TYPE_PROXD = 6, BSSCFG_TYPE_NAN = 7, - BSSCFG_TYPE_MAX + BSSCFG_TYPE_MESH = 8, + BSSCFG_TYPE_AIBSS = 9 } bsscfg_type_t; /* bsscfg subtype */ -enum { - BSSCFG_GENERIC_STA = 1, /* GENERIC */ - BSSCFG_GENERIC_AP = 2, /* GENERIC */ - BSSCFG_P2P_GC = 3, /* P2P */ - BSSCFG_P2P_GO = 4, /* P2P */ - BSSCFG_P2P_DISC = 5, /* P2P */ -}; +typedef enum bsscfg_subtype { + BSSCFG_SUBTYPE_NONE = 0, + BSSCFG_GENERIC_STA = 1, /* GENERIC */ + BSSCFG_GENERIC_AP = 2, + BSSCFG_GENERIC_IBSS = 6, + BSSCFG_P2P_GC = 3, /* P2P */ + BSSCFG_P2P_GO = 4, + BSSCFG_P2P_DISC = 5, + /* Index 7 & 8 earlier used for BTAMP */ + BSSCFG_SUBTYPE_AWDL = 9, /* SLOTTED_BSS_TYPE */ + BSSCFG_SUBTYPE_NAN_MGMT = 10, + BSSCFG_SUBTYPE_NAN_DATA = 11, + BSSCFG_SUBTYPE_NAN_MGMT_DATA = 12 +} bsscfg_subtype_t; typedef struct wlc_bsscfg_info { uint32 type; uint32 subtype; } wlc_bsscfg_info_t; +/* ULP SHM Offsets info */ +typedef struct ulp_shm_info { + uint32 m_ulp_ctrl_sdio; + uint32 m_ulp_wakeevt_ind; + uint32 m_ulp_wakeind; +} ulp_shm_info_t; /* Legacy structure to help keep backward compatible wl tool and tray app */ @@ -231,15 +272,19 @@ typedef struct wl_bss_info_107 { uint16 capability; /**< Capability information */ uint8 SSID_len; uint8 SSID[32]; + uint8 PAD; struct { - uint count; /**< # rates in this set */ + uint32 count; /**< # rates in this set */ uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ } rateset; /**< supported rates */ uint8 channel; /**< Channel no. */ + uint8 PAD; uint16 atim_window; /**< units are Kusec */ uint8 dtim_period; /**< DTIM period */ + uint8 PAD; int16 RSSI; /**< receive signal strength (in dBm) */ int8 phy_noise; /**< noise (in dBm) */ + uint8 PAD[3]; uint32 ie_length; /**< byte length of Information Elements */ /* variable length Information Elements */ } wl_bss_info_107_t; @@ -250,7 +295,8 @@ typedef struct wl_bss_info_107 { #define LEGACY2_WL_BSS_INFO_VERSION 108 /**< old version of wl_bss_info struct */ -/* BSS info structure +/** + * BSS info structure * Applications MUST CHECK ie_offset field and length field to access IEs and * next bss_info structure in a vector (in wl_scan_results_t) */ @@ -264,33 +310,40 @@ typedef struct wl_bss_info_108 { uint16 capability; /**< Capability information */ uint8 SSID_len; uint8 SSID[32]; + uint8 PAD[1]; struct { - uint count; /**< # rates in this set */ + uint32 count; /**< # rates in this set */ uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ } rateset; /**< supported rates */ chanspec_t chanspec; /**< chanspec for bss */ uint16 atim_window; /**< units are Kusec */ uint8 dtim_period; /**< DTIM period */ + uint8 PAD; int16 RSSI; /**< receive signal strength (in dBm) */ int8 phy_noise; /**< noise (in dBm) */ uint8 n_cap; /**< BSS is 802.11N Capable */ + uint8 PAD[2]; uint32 nbss_cap; /**< 802.11N BSS Capabilities (based on HT_CAP_*) */ uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint8 PAD[3]; uint32 reserved32[1]; /**< Reserved for expansion of BSS properties */ uint8 flags; /**< flags */ uint8 reserved[3]; /**< Reserved for expansion of BSS properties */ uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint8 PAD[2]; uint32 ie_length; /**< byte length of Information Elements */ /* Add new fields here */ /* variable length Information Elements */ } wl_bss_info_108_t; + #define WL_BSS_INFO_VERSION 109 /**< current version of wl_bss_info struct */ -/* BSS info structure +/** + * BSS info structure * Applications MUST CHECK ie_offset field and length field to access IEs and * next bss_info structure in a vector (in wl_scan_results_t) */ @@ -304,17 +357,19 @@ typedef struct wl_bss_info { uint16 capability; /**< Capability information */ uint8 SSID_len; uint8 SSID[32]; + uint8 bcnflags; /* additional flags w.r.t. beacon */ struct { - uint count; /**< # rates in this set */ + uint32 count; /**< # rates in this set */ uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ } rateset; /**< supported rates */ chanspec_t chanspec; /**< chanspec for bss */ uint16 atim_window; /**< units are Kusec */ uint8 dtim_period; /**< DTIM period */ + uint8 accessnet; /* from beacon interwork IE (if bcnflags) */ int16 RSSI; /**< receive signal strength (in dBm) */ int8 phy_noise; /**< noise (in dBm) */ - uint8 n_cap; /**< BSS is 802.11N Capable */ + uint16 freespace1; /* make implicit padding explicit */ uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */ uint8 ctl_ch; /**< 802.11N BSS control channel number */ uint8 padding1[3]; /**< explicit struct alignment padding */ @@ -326,6 +381,7 @@ typedef struct wl_bss_info { uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint16 freespace2; /* making implicit padding explicit */ uint32 ie_length; /**< byte length of Information Elements */ int16 SNR; /**< average SNR of during frame reception */ uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */ @@ -335,7 +391,7 @@ typedef struct wl_bss_info { /* variable length Information Elements */ } wl_bss_info_t; -#define WL_GSCAN_BSS_INFO_VERSION 1 /* current version of wl_gscan_bss_info struct */ +#define WL_GSCAN_FULL_RESULT_VERSION 2 /* current version of wl_gscan_result_t struct */ #define WL_GSCAN_INFO_FIXED_FIELD_SIZE (sizeof(wl_gscan_bss_info_t) - sizeof(wl_bss_info_t)) typedef struct wl_gscan_bss_info { @@ -366,6 +422,7 @@ typedef struct wl_if_add { uint32 if_flags; uint32 ap; struct ether_addr mac_addr; + uint16 PAD; uint32 wlc_index; } wl_if_add_t; @@ -379,21 +436,24 @@ typedef struct wl_bss_config { * radar channel. */ -#define DLOAD_HANDLER_VER 1 /**< Downloader version */ +#define DLOAD_HANDLER_VER 1 /**< Downloader version */ #define DLOAD_FLAG_VER_MASK 0xf000 /**< Downloader version mask */ -#define DLOAD_FLAG_VER_SHIFT 12 /**< Downloader version shift */ +#define DLOAD_FLAG_VER_SHIFT 12 /**< Downloader version shift */ #define DL_CRC_NOT_INUSE 0x0001 #define DL_BEGIN 0x0002 #define DL_END 0x0004 -/* generic download types & flags */ +/* Flags for Major/Minor/Date number shift and mask */ +#define EPI_VER_SHIFT 16 +#define EPI_VER_MASK 0xFFFF +/** generic download types & flags */ enum { DL_TYPE_UCODE = 1, DL_TYPE_CLM = 2 }; -/* ucode type values */ +/** ucode type values */ enum { UCODE_FW, INIT_VALS, @@ -428,142 +488,36 @@ struct wl_clm_dload_info { }; typedef struct wl_clm_dload_info wl_clm_dload_info_t; + typedef struct wlc_ssid { uint32 SSID_len; - uchar SSID[DOT11_MAX_SSID_LEN]; + uint8 SSID[DOT11_MAX_SSID_LEN]; } wlc_ssid_t; typedef struct wlc_ssid_ext { - bool hidden; - uint32 SSID_len; - uchar SSID[DOT11_MAX_SSID_LEN]; + uint8 hidden; + uint8 PAD; + uint16 flags; + uint8 SSID_len; + int8 rssi_thresh; + uint8 SSID[DOT11_MAX_SSID_LEN]; } wlc_ssid_ext_t; - #define MAX_PREFERRED_AP_NUM 5 typedef struct wlc_fastssidinfo { - uint32 SSID_channel[MAX_PREFERRED_AP_NUM]; + uint32 SSID_channel[MAX_PREFERRED_AP_NUM]; wlc_ssid_t SSID_info[MAX_PREFERRED_AP_NUM]; } wlc_fastssidinfo_t; -#ifdef CUSTOMER_HW_31_1 - -#define AP_NORM 0 -#define AP_STEALTH 1 -#define STREET_PASS_AP 2 - -#define NSC_MAX_TGT_SSID 20 -typedef struct nsc_ssid_entry_list { - wlc_ssid_t ssid_info; - int ssid_type; -} nsc_ssid_entry_list_t; - -typedef struct nsc_ssid_list { - uint32 num_entries; /* N wants 150 */ - nsc_ssid_entry_list_t ssid_entry[1]; -} nsc_ssid_list_t; - -#define NSC_TGT_SSID_BUFSZ (sizeof(nsc_ssid_entry_list_t) * \ - (NSC_MAX_TGT_SSID - 1) + sizeof(nsc_ssid_list_t)) - -/* Default values from N */ -#define NSC_SCPATT_ARRSZ 32 - -/* scan types */ -#define UNI_SCAN 0 -#define SP_SCAN_ACTIVE 1 -#define SP_SCAN_PASSIVE 2 -#define DOZE 3 - -/* what we found */ -typedef struct nsc_scan_results { - wlc_ssid_t ssid; - struct ether_addr mac; - int scantype; - uint16 channel; -} nsc_scan_results_t; - -typedef BWL_PRE_PACKED_STRUCT struct nsc_af_body { - uint8 type; /* should be 0x7f */ - uint8 oui[DOT11_OUI_LEN]; /* just like it says */ - uint8 subtype; - uint8 ielen; /* */ - uint8 data[1]; /* variable */ -} BWL_POST_PACKED_STRUCT nsc_af_body_t; - -typedef BWL_PRE_PACKED_STRUCT struct nsc_sdlist { - uint8 scantype; - uint16 duration; - uint16 channel; /* SP only */ - uint8 ssid_index; /* SP only */ - uint16 rate; /* SP only */ -} BWL_POST_PACKED_STRUCT nsc_sdlist_t; - -typedef struct nsc_scandes { - uint32 num_entries; /* number of list entries */ - nsc_sdlist_t sdlist[1]; /* variable */ -} nsc_scandes_t; - -#define NSC_MAX_SDLIST_ENTRIES 8 -#define NSC_SDDESC_BUFSZ (sizeof(nsc_sdlist_t) * \ - (NSC_MAX_SDLIST_ENTRIES - 1) + sizeof(nsc_scandes_t)) - -#define SCAN_ARR_END (NSC_MAX_SDLIST_ENTRIES) -#endif /* CUSTOMER_HW_31_1 */ - -typedef BWL_PRE_PACKED_STRUCT struct wnm_url { +typedef struct wnm_url { uint8 len; uint8 data[1]; -} BWL_POST_PACKED_STRUCT wnm_url_t; - -#define WNM_BSS_SELECT_TYPE_RSSI 0 -#define WNM_BSS_SELECT_TYPE_CU 1 - -#define WNM_BSSLOAD_MONITOR_VERSION 1 -typedef struct wnm_bssload_monitor_cfg { - uint8 version; - uint8 band; - uint8 duration; /* duration between 1 to 20sec */ -} wnm_bssload_monitor_cfg_t; - -#define BSS_MAXTABLE_SIZE 10 -#define WNM_BSS_SELECT_FACTOR_VERSION 1 -typedef struct wnm_bss_select_factor_params { - uint8 low; - uint8 high; - uint8 factor; - uint8 pad; -} wnm_bss_select_factor_params_t; - -typedef struct wnm_bss_select_factor_cfg { - uint8 version; - uint8 band; - uint16 type; - uint16 pad; - uint16 count; - wnm_bss_select_factor_params_t params[1]; -} wnm_bss_select_factor_cfg_t; - -#define WNM_BSS_SELECT_WEIGHT_VERSION 1 -typedef struct wnm_bss_select_weight_cfg { - uint8 version; - uint8 band; - uint16 type; - uint16 weight; /* weightage for each type between 0 to 100 */ -} wnm_bss_select_weight_cfg_t; - -#define WNM_ROAM_TRIGGER_VERSION 1 -typedef struct wnm_roam_trigger_cfg { - uint8 version; - uint8 band; - uint16 type; - int16 trigger; /* trigger for each type in new roam algorithm */ -} wnm_roam_trigger_cfg_t; +} wnm_url_t; typedef struct chan_scandata { uint8 txpower; uint8 pad; - chanspec_t channel; /**< Channel num, bw, ctrl_sb and band */ + chanspec_t channel; /**< Channel num, bw, ctrl_sb and band */ uint32 channel_mintime; uint32 channel_maxtime; } chan_scandata_t; @@ -581,7 +535,7 @@ typedef struct wl_extdscan_params { int8 split_scan; /**< split scan */ int8 band; /**< band */ int8 pad; - wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /* ssid list */ + wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /**< ssid list */ uint32 tx_rate; /**< in 500ksec units */ wl_scan_type_t scan_type; /**< enum */ int32 channel_num; @@ -626,13 +580,13 @@ typedef struct wl_scan_params { uint16 channel_list[1]; /**< list of chanspecs */ } wl_scan_params_t; -/* size of wl_scan_params not including variable length array */ +/** size of wl_scan_params not including variable length array */ #define WL_SCAN_PARAMS_FIXED_SIZE 64 #define WL_MAX_ROAMSCAN_DATSZ (WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16))) #define ISCAN_REQ_VERSION 1 -/* incremental scan struct */ +/** incremental scan struct */ typedef struct wl_iscan_params { uint32 version; uint16 action; @@ -640,7 +594,7 @@ typedef struct wl_iscan_params { wl_scan_params_t params; } wl_iscan_params_t; -/* 3 fields + size of wl_scan_params, not including variable length array */ +/** 3 fields + size of wl_scan_params, not including variable length array */ #define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t)) typedef struct wl_scan_results { @@ -650,10 +604,8 @@ typedef struct wl_scan_results { wl_bss_info_t bss_info[1]; } wl_scan_results_t; -/* size of wl_scan_results not including variable length array */ +/** size of wl_scan_results not including variable length array */ #define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t)) - - #define ESCAN_REQ_VERSION 1 /** event scan reduces amount of SOC memory needed to store scan results */ @@ -676,49 +628,24 @@ typedef struct wl_escan_result { } wl_escan_result_t; #define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t)) - typedef struct wl_gscan_result { uint32 buflen; uint32 version; + uint32 scan_ch_bucket; wl_gscan_bss_info_t bss_info[1]; } wl_gscan_result_t; #define WL_GSCAN_RESULTS_FIXED_SIZE (sizeof(wl_gscan_result_t) - sizeof(wl_gscan_bss_info_t)) - -/* incremental scan results struct */ +/** incremental scan results struct */ typedef struct wl_iscan_results { uint32 status; wl_scan_results_t results; } wl_iscan_results_t; -/* size of wl_iscan_results not including variable length array */ +/** size of wl_iscan_results not including variable length array */ #define WL_ISCAN_RESULTS_FIXED_SIZE \ (WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results)) -#define SCANOL_PARAMS_VERSION 1 - -typedef struct scanol_params { - uint32 version; - uint32 flags; /**< offload scanning flags */ - int32 active_time; /**< -1 use default, dwell time per channel for active scanning */ - int32 passive_time; /**< -1 use default, dwell time per channel for passive scanning */ - int32 idle_rest_time; /**< -1 use default, time idle between scan cycle */ - int32 idle_rest_time_multiplier; - int32 active_rest_time; - int32 active_rest_time_multiplier; - int32 scan_cycle_idle_rest_time; - int32 scan_cycle_idle_rest_multiplier; - int32 scan_cycle_active_rest_time; - int32 scan_cycle_active_rest_multiplier; - int32 max_rest_time; - int32 max_scan_cycles; - int32 nprobes; /**< -1 use default, number of probes per channel */ - int32 scan_start_delay; - uint32 nchannels; - uint32 ssid_count; - wlc_ssid_t ssidlist[1]; -} scanol_params_t; - typedef struct wl_probe_params { wlc_ssid_t ssid; struct ether_addr bssid; @@ -727,15 +654,15 @@ typedef struct wl_probe_params { #define WL_MAXRATES_IN_SET 16 /**< max # of rates in a rateset */ typedef struct wl_rateset { - uint32 count; /**< # rates in this set */ + uint32 count; /**< # rates in this set */ uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */ } wl_rateset_t; typedef struct wl_rateset_args { - uint32 count; /**< # rates in this set */ + uint32 count; /**< # rates in this set */ uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */ - uint8 mcs[MCSSET_LEN]; /* supported mcs index bit map */ - uint16 vht_mcs[VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */ + uint8 mcs[MCSSET_LEN]; /**< supported mcs index bit map */ + uint16 vht_mcs[VHT_CAP_MCS_MAP_NSS_MAX]; /**< supported mcs index bit map per nss */ } wl_rateset_args_t; #define TXBF_RATE_MCS_ALL 4 @@ -753,6 +680,26 @@ typedef struct wl_txbf_rateset { uint8 txbf_rate_ofdm_cnt_bcm; } wl_txbf_rateset_t; +#define NUM_BFGAIN_ARRAY_1RX 2 +#define NUM_BFGAIN_ARRAY_2RX 3 +#define NUM_BFGAIN_ARRAY_3RX 4 +#define NUM_BFGAIN_ARRAY_4RX 5 + +typedef struct wl_txbf_expgainset { + /* bitmap for each element: B[4:0]=>c0, B[9:5]=>c1, B[14:10]=>c2, B[19:15]=>c[3-7] + * B[24:20]=>c[8-9], B[29:25]=>c[10-11] + */ + uint32 bfgain_2x1[NUM_BFGAIN_ARRAY_1RX]; /* exp 1ss, imp 1ss */ + uint32 bfgain_2x2[NUM_BFGAIN_ARRAY_2RX]; /* exp [1-2]ss, imp 1ss */ + uint32 bfgain_3x1[NUM_BFGAIN_ARRAY_1RX]; + uint32 bfgain_3x2[NUM_BFGAIN_ARRAY_2RX]; + uint32 bfgain_3x3[NUM_BFGAIN_ARRAY_3RX]; /* exp [1-3]ss, imp 1ss */ + uint32 bfgain_4x1[NUM_BFGAIN_ARRAY_1RX]; + uint32 bfgain_4x2[NUM_BFGAIN_ARRAY_2RX]; + uint32 bfgain_4x3[NUM_BFGAIN_ARRAY_3RX]; + uint32 bfgain_4x4[NUM_BFGAIN_ARRAY_4RX]; /* exp [1-4]ss, imp 1ss */ +} wl_txbf_expgainset_t; + #define OFDM_RATE_MASK 0x0000007f typedef uint8 ofdm_rates_t; @@ -765,17 +712,23 @@ typedef struct wl_rates_info { uint8 mcsallow; uint8 bw; uint8 txstreams; + uint8 PAD[3]; } wl_rates_info_t; -/* uint32 list */ +/**uint32 list */ typedef struct wl_uint32_list { - /* in - # of elements, out - # of entries */ + /** in - # of elements, out - # of entries */ uint32 count; - /* variable length uint32 list */ + /** variable length uint32 list */ uint32 element[1]; } wl_uint32_list_t; -/* used for association with a specific BSSID and chanspec list */ +/* WLC_SET_ALLOW_MODE values */ +#define ALLOW_MODE_ANY_BSSID 0 +#define ALLOW_MODE_ONLY_DESIRED_BSSID 1 +#define ALLOW_MODE_NO_BSSID 2 + +/** used for association with a specific BSSID and chanspec list */ typedef struct wl_assoc_params { struct ether_addr bssid; /**< 00:00:00:00:00:00: broadcast scan */ uint16 bssid_cnt; /**< 0: use chanspec_num, and the single bssid, @@ -792,15 +745,15 @@ typedef struct wl_assoc_params { #define WL_ASSOC_PARAMS_FIXED_SIZE OFFSETOF(wl_assoc_params_t, chanspec_list) -/* used for reassociation/roam to a specific BSSID and channel */ +/** used for reassociation/roam to a specific BSSID and channel */ typedef wl_assoc_params_t wl_reassoc_params_t; #define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE -/* used for association to a specific BSSID and channel */ +/** used for association to a specific BSSID and channel */ typedef wl_assoc_params_t wl_join_assoc_params_t; #define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE -/* used for join with or without a specific bssid and channel list */ +/** used for join with or without a specific bssid and channel list */ typedef struct wl_join_params { wlc_ssid_t ssid; wl_assoc_params_t params; /**< optional field, but it must include the fixed portion @@ -810,9 +763,77 @@ typedef struct wl_join_params { #define WL_JOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_join_params_t, params) + \ WL_ASSOC_PARAMS_FIXED_SIZE) -/* scan params for extended join */ + +typedef struct wlc_roam_exp_params { + int8 a_band_boost_threshold; + int8 a_band_penalty_threshold; + int8 a_band_boost_factor; + int8 a_band_penalty_factor; + int8 cur_bssid_boost; + int8 alert_roam_trigger_threshold; + int16 a_band_max_boost; +} wlc_roam_exp_params_t; + +#define ROAM_EXP_CFG_VERSION 1 + +#define ROAM_EXP_ENABLE_FLAG (1 << 0) + +#define ROAM_EXP_CFG_PRESENT (1 << 1) + +typedef struct wl_roam_exp_cfg { + uint16 version; + uint16 flags; + wlc_roam_exp_params_t params; +} wl_roam_exp_cfg_t; + +typedef struct wl_bssid_pref_list { + struct ether_addr bssid; + /* Add this to modify rssi */ + int8 rssi_factor; + int8 flags; +} wl_bssid_pref_list_t; + +#define BSSID_PREF_LIST_VERSION 1 +#define ROAM_EXP_CLEAR_BSSID_PREF (1 << 0) + +typedef struct wl_bssid_pref_cfg { + uint16 version; + uint16 flags; + uint16 count; + uint16 reserved; + wl_bssid_pref_list_t bssids[]; +} wl_bssid_pref_cfg_t; + +#define SSID_WHITELIST_VERSION 1 + +#define ROAM_EXP_CLEAR_SSID_WHITELIST (1 << 0) + +/* Roam SSID whitelist, ssids in this list are ok to */ +/* be considered as targets to join when considering a roam */ + +typedef struct wl_ssid_whitelist { + + uint16 version; + uint16 flags; + + uint8 ssid_count; + uint8 reserved[3]; + wlc_ssid_t ssids[]; +} wl_ssid_whitelist_t; + +#define ROAM_EXP_EVENT_VERSION 1 + +typedef struct wl_roam_exp_event { + + uint16 version; + uint16 flags; + wlc_ssid_t cur_ssid; +} wl_roam_exp_event_t; + +/** scan params for extended join */ typedef struct wl_join_scan_params { uint8 scan_type; /**< 0 use default, active or passive scan */ + uint8 PAD[3]; int32 nprobes; /**< -1 use default, number of probes per channel */ int32 active_time; /**< -1 use default, dwell time per channel for * active scanning @@ -825,7 +846,7 @@ typedef struct wl_join_scan_params { */ } wl_join_scan_params_t; -/* extended join params */ +/** extended join params */ typedef struct wl_extjoin_params { wlc_ssid_t ssid; /**< {0, ""}: wildcard scan */ wl_join_scan_params_t scan; @@ -841,7 +862,7 @@ typedef struct wl_extjoin_params { #define MAX_STREAMS_SUPPORTED 4 /**< max number of streams supported */ typedef struct { uint8 ant_config[ANT_SELCFG_MAX]; /**< antenna configuration */ - uint8 num_antcfg; /**< number of available antenna configurations */ + uint8 num_antcfg; /**< number of available antenna configurations */ } wlc_antselcfg_t; typedef struct { @@ -858,7 +879,6 @@ typedef struct { uint16 num_secs; /**< How many secs worth of data */ cca_congest_t secs[1]; /**< Data */ } cca_congest_channel_req_t; - typedef struct { uint32 duration; /**< millisecs spent sampling this channel */ uint32 congest; /**< millisecs detecting busy CCA */ @@ -868,39 +888,50 @@ typedef struct { typedef struct { uint16 status; uint16 id; - chanspec_t chanspec; /**< Which channel? */ + chanspec_t chanspec; /**< Which channel? */ uint16 len; union { cca_congest_simple_t cca_busy; /**< CCA busy */ - int noise; /**< noise floor */ + int32 noise; /**< noise floor */ }; } cca_chan_qual_event_t; +typedef struct { + uint32 msrmnt_time; /**< Time for Measurement (msec) */ + uint32 msrmnt_done; /**< flag set when measurement complete */ + char buf[]; +} cca_stats_n_flags; + +typedef struct { + uint32 msrmnt_query; /* host to driver query for measurement done */ + uint32 time_req; /* time required for measurement */ + uint8 report_opt; /* option to print different stats in report */ + uint8 PAD[3]; +} cca_msrmnt_query; /* interference sources */ enum interference_source { - ITFR_NONE = 0, /**< interference */ - ITFR_PHONE, /**< wireless phone */ - ITFR_VIDEO_CAMERA, /**< wireless video camera */ - ITFR_MICROWAVE_OVEN, /**< microwave oven */ - ITFR_BABY_MONITOR, /**< wireless baby monitor */ - ITFR_BLUETOOTH, /**< bluetooth */ + ITFR_NONE = 0, /**< interference */ + ITFR_PHONE, /**< wireless phone */ + ITFR_VIDEO_CAMERA, /**< wireless video camera */ + ITFR_MICROWAVE_OVEN, /**< microwave oven */ + ITFR_BABY_MONITOR, /**< wireless baby monitor */ + ITFR_BLUETOOTH, /**< bluetooth */ ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /**< wireless camera or baby monitor */ ITFR_BLUETOOTH_OR_BABY_MONITOR, /**< bluetooth or baby monitor */ ITFR_VIDEO_CAMERA_OR_PHONE, /**< video camera or phone */ - ITFR_UNIDENTIFIED /**< interference from unidentified source */ + ITFR_UNIDENTIFIED /**< interference from unidentified source */ }; -/* structure for interference source report */ +/** structure for interference source report */ typedef struct { - uint32 flags; /**< flags. bit definitions below */ - uint32 source; /**< last detected interference source */ + uint32 flags; /**< flags. bit definitions below */ + uint32 source; /**< last detected interference source */ uint32 timestamp; /**< second timestamp on interferenced flag change */ } interference_source_rep_t; #define WLC_CNTRY_BUF_SZ 4 /**< Country string is 3 bytes + NUL */ - typedef struct wl_country { char country_abbrev[WLC_CNTRY_BUF_SZ]; /**< nul-terminated country code used in * the Country IE @@ -916,6 +947,7 @@ typedef struct wl_country { */ } wl_country_t; + #define CCODE_INFO_VERSION 1 typedef enum wl_ccode_role { @@ -937,11 +969,10 @@ typedef struct wl_ccode_entry { typedef struct wl_ccode_info { uint16 version; - uint16 count; /* Number of ccodes entries in the set */ + uint16 count; /**< Number of ccodes entries in the set */ wl_ccode_entry_t ccodelist[1]; } wl_ccode_info_t; #define WL_CCODE_INFO_FIXED_LEN OFFSETOF(wl_ccode_info_t, ccodelist) - typedef struct wl_channels_in_country { uint32 buflen; uint32 band; @@ -1002,8 +1033,6 @@ typedef struct wl_rm_rep { wl_rm_rep_elt_t rep[1]; /**< variable length block of reports */ } wl_rm_rep_t; #define WL_RM_REP_FIXED_LEN 8 - - typedef enum sup_auth_status { /* Basic supplicant authentication states */ WLC_SUP_DISCONNECTED = 0, @@ -1017,9 +1046,9 @@ typedef enum sup_auth_status { WLC_SUP_LAST_BASIC_STATE, /* Extended supplicant authentication states */ - /* Waiting to receive handshake msg M1 */ + /** Waiting to receive handshake msg M1 */ WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED, - /* Preparing to send handshake msg M2 */ + /** Preparing to send handshake msg M2 */ WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE, /* Waiting to receive handshake msg M3 */ WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE, @@ -1036,26 +1065,28 @@ typedef struct wl_wsec_key { uint32 algo; /**< CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */ uint32 flags; /**< misc flags */ uint32 pad_2[2]; - int pad_3; - int iv_initialized; /**< has IV been initialized already? */ - int pad_4; + int32 pad_3; + int32 iv_initialized; /**< has IV been initialized already? */ + int32 pad_4; /* Rx IV */ struct { uint32 hi; /**< upper 32 bits of IV */ uint16 lo; /**< lower 16 bits of IV */ + uint16 PAD; } rxiv; uint32 pad_5[2]; struct ether_addr ea; /**< per station */ + uint16 PAD; } wl_wsec_key_t; #define WSEC_MIN_PSK_LEN 8 #define WSEC_MAX_PSK_LEN 64 -/* Flag for key material needing passhash'ing */ +/** Flag for key material needing passhash'ing */ #define WSEC_PASSPHRASE (1<<0) -/* receptacle for WLC_SET_WSEC_PMK parameter */ -typedef struct { +/**receptacle for WLC_SET_WSEC_PMK parameter */ +typedef struct wsec_pmk { ushort key_len; /**< octets in key material */ ushort flags; /**< key handling qualification */ uint8 key[WSEC_MAX_PSK_LEN]; /**< PMK material */ @@ -1088,26 +1119,28 @@ typedef struct wl_assoc_info { uint32 resp_len; uint32 flags; struct dot11_assoc_req req; - struct ether_addr reassoc_bssid; /* used in reassoc's */ + struct ether_addr reassoc_bssid; /**< used in reassoc's */ struct dot11_assoc_resp resp; } wl_assoc_info_t; typedef struct wl_led_info { - uint32 index; /* led index */ + uint32 index; /**< led index */ uint32 behavior; uint8 activehi; + uint8 PAD[3]; } wl_led_info_t; -/* srom read/write struct passed through ioctl */ +/** srom read/write struct passed through ioctl */ typedef struct { - uint byteoff; /**< byte offset */ - uint nbytes; /**< number of bytes */ - uint16 buf[1]; + uint32 byteoff; /**< byte offset */ + uint32 nbytes; /**< number of bytes */ + uint16 buf[]; } srom_rw_t; -#define CISH_FLAG_PCIECIS (1 << 15) /* write CIS format bit for PCIe CIS */ -/* similar cis (srom or otp) struct [iovar: may not be aligned] */ +#define CISH_FLAG_PCIECIS (1 << 15) /**< write CIS format bit for PCIe CIS */ + +/** similar cis (srom or otp) struct [iovar: may not be aligned] */ typedef struct { uint16 source; /**< cis source */ uint16 flags; /**< flags */ @@ -1116,16 +1149,18 @@ typedef struct { /* data follows here */ } cis_rw_t; -/* R_REG and W_REG struct passed through ioctl */ +/** R_REG and W_REG struct passed through ioctl */ typedef struct { uint32 byteoff; /**< byte offset of the field in d11regs_t */ uint32 val; /**< read/write value of the field */ uint32 size; /**< sizeof the field */ - uint band; /**< band (optional) */ + uint32 band; /**< band (optional) */ } rw_reg_t; -/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */ -/* PCL - Power Control Loop */ +/** + * Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band + * PCL - Power Control Loop + */ typedef struct { uint16 auto_ctrl; /**< WL_ATTEN_XX */ uint16 bb; /**< Baseband attenuation */ @@ -1133,24 +1168,25 @@ typedef struct { uint16 txctl1; /**< Radio TX_CTL1 value */ } atten_t; -/* Per-AC retry parameters */ +/** Per-AC retry parameters */ struct wme_tx_params_s { uint8 short_retry; uint8 short_fallback; uint8 long_retry; uint8 long_fallback; - uint16 max_rate; /* In units of 512 Kbps */ + uint16 max_rate; /**< In units of 512 Kbps */ }; typedef struct wme_tx_params_s wme_tx_params_t; #define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT) -/* Used to get specific link/ac parameters */ +/**Used to get specific link/ac parameters */ typedef struct { int32 ac; uint8 val; struct ether_addr ea; + uint8 PAD; } link_val_t; @@ -1161,19 +1197,23 @@ typedef struct wl_pm_mute_tx { uint16 len; /**< length */ uint16 deadline; /**< deadline timer (in milliseconds) */ uint8 enable; /**< set to 1 to enable mode; set to 0 to disable it */ + uint8 PAD; } wl_pm_mute_tx_t; +/* sta_info_t version 4 */ typedef struct { uint16 ver; /**< version of this struct */ uint16 len; /**< length in bytes of this structure */ uint16 cap; /**< sta's advertised capabilities */ + uint16 PAD; uint32 flags; /**< flags defined below */ uint32 idle; /**< time since data pkt rx'd from sta */ struct ether_addr ea; /**< Station address */ - wl_rateset_t rateset; /**< rateset in use */ + uint16 PAD; + wl_rateset_t rateset; /**< rateset in use */ uint32 in; /**< seconds elapsed since associated */ - uint32 listen_interval_inms; /* Min Listen interval in ms for this STA */ + uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */ uint32 tx_pkts; /**< # of user packets transmitted (unicast) */ uint32 tx_failures; /**< # of user packets failed */ uint32 rx_ucast_pkts; /**< # of unicast packets received */ @@ -1191,20 +1231,21 @@ typedef struct { uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */ uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */ uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */ - int8 rssi[WL_STA_ANT_MAX]; /* average rssi per antenna - * of data frames - */ + int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna + * of data frames + */ int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */ - uint16 aid; /**< association ID */ + uint16 aid; /**< association ID */ uint16 ht_capabilities; /**< advertised ht caps */ uint16 vht_flags; /**< converted vht flags */ + uint16 PAD; uint32 tx_pkts_retried; /**< # of frames where a retry was * necessary */ - uint32 tx_pkts_retry_exhausted; /* # of user frames where a retry + uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry * was exhausted */ - int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /* Per antenna RSSI of last + int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last * received data frame. */ /* TX WLAN retry/failure statistics: @@ -1220,13 +1261,96 @@ typedef struct { */ uint32 rx_pkts_retried; /**< # rx with retry bit set */ uint32 tx_rate_fallback; /**< lowest fallback TX rate */ -} sta_info_t; + /* Fields above this line are common to sta_info_t versions 4 and 5 */ + + uint32 rx_dur_total; /* total user RX duration (estimated) */ + + chanspec_t chanspec; /** chanspec this sta is on */ + uint16 PAD; + wl_rateset_args_t rateset_adv; /* rateset along with mcs index bitmap */ + uint32 PAD; +} sta_info_v4_t; + +/* Note: Version 4 is the latest version of sta_info_t. Version 5 is abandoned. + * Please add new fields to version 4, not version 5. + */ +/* sta_info_t version 5 */ +typedef struct { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint16 cap; /**< sta's advertised capabilities */ + uint16 PAD; + uint32 flags; /**< flags defined below */ + uint32 idle; /**< time since data pkt rx'd from sta */ + struct ether_addr ea; /**< Station address */ + uint16 PAD; + wl_rateset_t rateset; /**< rateset in use */ + uint32 in; /**< seconds elapsed since associated */ + uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */ + uint32 tx_pkts; /**< # of user packets transmitted (unicast) */ + uint32 tx_failures; /**< # of user packets failed */ + uint32 rx_ucast_pkts; /**< # of unicast packets received */ + uint32 rx_mcast_pkts; /**< # of multicast packets received */ + uint32 tx_rate; /**< Rate used by last tx frame */ + uint32 rx_rate; /**< Rate of last successful rx frame */ + uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */ + uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */ + uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */ + uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */ + uint32 tx_mcast_pkts; /**< # of mcast pkts txed */ + uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */ + uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */ + uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */ + uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */ + uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */ + uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */ + int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna + * of data frames + */ + int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */ + uint16 aid; /**< association ID */ + uint16 ht_capabilities; /**< advertised ht caps */ + uint16 vht_flags; /**< converted vht flags */ + uint16 PAD; + uint32 tx_pkts_retried; /**< # of frames where a retry was + * necessary + */ + uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry + * was exhausted + */ + int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last + * received data frame. + */ + /* TX WLAN retry/failure statistics: + * Separated for host requested frames and WLAN locally generated frames. + * Include unicast frame only where the retries/failures can be counted. + */ + uint32 tx_pkts_total; /**< # user frames sent successfully */ + uint32 tx_pkts_retries; /**< # user frames retries */ + uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry + * was exhausted + */ + uint32 rx_pkts_retried; /**< # rx with retry bit set */ + uint32 tx_rate_fallback; /**< lowest fallback TX rate */ + /* Fields above this line are common to sta_info_t versions 4 and 5 */ + + chanspec_t chanspec; /** chanspec this sta is on */ + uint16 PAD; + wl_rateset_args_t rateset_adv; /* rateset along with mcs index bitmap */ +} sta_info_v5_t; #define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_tot_pkts) -#define WL_STA_VER 4 +#define WL_STA_VER_4 4 +#define WL_STA_VER_5 5 +#define WL_STA_VER WL_STA_VER_4 -typedef struct { +#define SWDIV_STATS_VERSION_2 2 +#define SWDIV_STATS_CURRENT_VERSION SWDIV_STATS_VERSION_2 + +struct wlc_swdiv_stats_v1 { uint32 auto_en; uint32 active_ant; uint32 rxcount; @@ -1253,51 +1377,86 @@ typedef struct { uint32 rx_policy; uint32 tx_policy; uint32 cell_policy; -} wlc_swdiv_stats_t; + uint32 swap_snrdrop0; + uint32 swap_snrdrop1; + uint32 mws_antsel_ovr_tx; + uint32 mws_antsel_ovr_rx; + uint8 swap_trig_event_id; +}; + +struct wlc_swdiv_stats_v2 { + uint16 version; /* version of the structure + * as defined by SWDIV_STATS_CURRENT_VERSION + */ + uint16 length; /* length of the entire structure */ + uint32 auto_en; + uint32 active_ant; + uint32 rxcount; + int32 avg_snr_per_ant0; + int32 avg_snr_per_ant1; + int32 avg_snr_per_ant2; + uint32 swap_ge_rxcount0; + uint32 swap_ge_rxcount1; + uint32 swap_ge_snrthresh0; + uint32 swap_ge_snrthresh1; + uint32 swap_txfail0; + uint32 swap_txfail1; + uint32 swap_timer0; + uint32 swap_timer1; + uint32 swap_alivecheck0; + uint32 swap_alivecheck1; + uint32 rxcount_per_ant0; + uint32 rxcount_per_ant1; + uint32 acc_rxcount; + uint32 acc_rxcount_per_ant0; + uint32 acc_rxcount_per_ant1; + uint32 tx_auto_en; + uint32 tx_active_ant; + uint32 rx_policy; + uint32 tx_policy; + uint32 cell_policy; + uint32 swap_snrdrop0; + uint32 swap_snrdrop1; + uint32 mws_antsel_ovr_tx; + uint32 mws_antsel_ovr_rx; + uint32 swap_trig_event_id; +}; #define WLC_NUMRATES 16 /**< max # of rates in a rateset */ -typedef struct wlc_rateset { - uint32 count; /**< number of rates in rates[] */ - uint8 rates[WLC_NUMRATES]; /**< rates in 500kbps units w/hi bit set if basic */ - uint8 htphy_membership; /**< HT PHY Membership */ - uint8 mcs[MCSSET_LEN]; /**< supported mcs index bit map */ - uint16 vht_mcsmap; /**< supported vht mcs nss bit map */ - uint16 vht_mcsmap_prop; /**< supported prop vht mcs nss bit map */ -} wlc_rateset_t; - -/* Used to get specific STA parameters */ +/**Used to get specific STA parameters */ typedef struct { uint32 val; struct ether_addr ea; + uint16 PAD; } scb_val_t; -/* Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */ +/**Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */ typedef struct { uint32 code; scb_val_t ioctl_args; } authops_t; -/* channel encoding */ +/** channel encoding */ typedef struct channel_info { - int hw_channel; - int target_channel; - int scan_channel; + int32 hw_channel; + int32 target_channel; + int32 scan_channel; } channel_info_t; -/* For ioctls that take a list of MAC addresses */ +/** For ioctls that take a list of MAC addresses */ typedef struct maclist { - uint count; /**< number of MAC addresses */ + uint32 count; /**< number of MAC addresses */ struct ether_addr ea[1]; /**< variable length array of MAC addresses */ } maclist_t; -/* get pkt count struct passed through ioctl */ +/**get pkt count struct passed through ioctl */ typedef struct get_pktcnt { - uint rx_good_pkt; - uint rx_bad_pkt; - uint tx_good_pkt; - uint tx_bad_pkt; - uint rx_ocast_good_pkt; /* unicast packets destined for others */ + uint32 rx_good_pkt; + uint32 rx_bad_pkt; + uint32 tx_good_pkt; + uint32 tx_bad_pkt; + uint32 rx_ocast_good_pkt; /**< unicast packets destined for others */ } get_pktcnt_t; /* NINTENDO2 */ @@ -1309,12 +1468,13 @@ typedef struct get_pktcnt { #define LQ_STOP_MONITOR 0 #define LQ_START_MONITOR 1 -/* Get averages RSSI, Rx PHY rate and SNR values */ +/** Get averages RSSI, Rx PHY rate and SNR values */ +/* Link Quality */ typedef struct { - int rssi[LQ_IDX_LAST]; /* Array to keep min, max, avg rssi */ - int snr[LQ_IDX_LAST]; /* Array to keep min, max, avg snr */ - int isvalid; /* Flag indicating whether above data is valid */ -} wl_lq_t; /* Link Quality */ + int32 rssi[LQ_IDX_LAST]; /**< Array to keep min, max, avg rssi */ + int32 snr[LQ_IDX_LAST]; /**< Array to keep min, max, avg snr */ + int32 isvalid; /**< Flag indicating whether above data is valid */ +} wl_lq_t; typedef enum wl_wakeup_reason_type { LCD_ON = 1, @@ -1325,56 +1485,68 @@ typedef enum wl_wakeup_reason_type { } wl_wr_type_t; typedef struct { -/* Unique filter id */ + /** Unique filter id */ uint32 id; - -/* stores the reason for the last wake up */ + /** stores the reason for the last wake up */ uint8 reason; + uint8 PAD[3]; } wl_wr_t; -/* Get MAC specific rate histogram command */ +/** Get MAC specific rate histogram command */ typedef struct { struct ether_addr ea; /**< MAC Address */ uint8 ac_cat; /**< Access Category */ uint8 num_pkts; /**< Number of packet entries to be averaged */ -} wl_mac_ratehisto_cmd_t; /**< MAC Specific Rate Histogram command */ - -/* Get MAC rate histogram response */ +} wl_mac_ratehisto_cmd_t; +/** Get MAC rate histogram response */ typedef struct { uint32 rate[DOT11_RATE_MAX + 1]; /**< Rates */ uint32 mcs[WL_RATESET_SZ_HT_IOCTL * WL_TX_CHAINS_MAX]; /**< MCS counts */ uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /**< VHT counts */ uint32 tsf_timer[2][2]; /**< Start and End time for 8bytes value */ - uint32 prop11n_mcs[WLC_11N_LAST_PROP_MCS - WLC_11N_FIRST_PROP_MCS + 1]; /* MCS counts */ -} wl_mac_ratehisto_res_t; /**< MAC Specific Rate Histogram Response */ + uint32 prop11n_mcs[WLC_11N_LAST_PROP_MCS - WLC_11N_FIRST_PROP_MCS + 1]; /** MCS counts */ +} wl_mac_ratehisto_res_t; -/* Linux network driver ioctl encoding */ +/* sta_info ecounters */ +typedef struct { + struct ether_addr ea; /* Station MAC addr */ + struct ether_addr BSSID; /* BSSID of the BSS */ + uint32 tx_pkts_fw_total; /* # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /* # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /* # FW generated which + * failed after retry + */ +} sta_info_ecounters_t; + +#define STAMON_MODULE_VER 1 + +/**Linux network driver ioctl encoding */ typedef struct wl_ioctl { - uint cmd; /**< common ioctl definition */ + uint32 cmd; /**< common ioctl definition */ void *buf; /**< pointer to user buffer */ - uint len; /**< length of user buffer */ + uint32 len; /**< length of user buffer */ uint8 set; /**< 1=set IOCTL; 0=query IOCTL */ - uint used; /**< bytes read or written (optional) */ - uint needed; /**< bytes needed (optional) */ + uint32 used; /**< bytes read or written (optional) */ + uint32 needed; /**< bytes needed (optional) */ } wl_ioctl_t; #ifdef CONFIG_COMPAT typedef struct compat_wl_ioctl { - uint cmd; /**< common ioctl definition */ + uint32 cmd; /**< common ioctl definition */ uint32 buf; /**< pointer to user buffer */ - uint len; /**< length of user buffer */ + uint32 len; /**< length of user buffer */ uint8 set; /**< 1=set IOCTL; 0=query IOCTL */ - uint used; /**< bytes read or written (optional) */ - uint needed; /**< bytes needed (optional) */ + uint32 used; /**< bytes read or written (optional) */ + uint32 needed; /**< bytes needed (optional) */ } compat_wl_ioctl_t; #endif /* CONFIG_COMPAT */ -#define WL_NUM_RATES_CCK 4 /* 1, 2, 5.5, 11 Mbps */ -#define WL_NUM_RATES_OFDM 8 /* 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */ -#define WL_NUM_RATES_MCS_1STREAM 8 /* MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */ -#define WL_NUM_RATES_EXTRA_VHT 2 /* Additional VHT 11AC rates */ -#define WL_NUM_RATES_VHT 10 -#define WL_NUM_RATES_MCS32 1 +#define WL_NUM_RATES_CCK 4 /**< 1, 2, 5.5, 11 Mbps */ +#define WL_NUM_RATES_OFDM 8 /**< 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */ +#define WL_NUM_RATES_MCS_1STREAM 8 /**< MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */ +#define WL_NUM_RATES_EXTRA_VHT 2 /**< Additional VHT 11AC rates */ +#define WL_NUM_RATES_VHT 10 +#define WL_NUM_RATES_MCS32 1 /* @@ -1382,34 +1554,41 @@ typedef struct compat_wl_ioctl { * revision info up from the driver. */ typedef struct wlc_rev_info { - uint vendorid; /**< PCI vendor id */ - uint deviceid; /**< device id of chip */ - uint radiorev; /**< radio revision */ - uint chiprev; /**< chip revision */ - uint corerev; /**< core revision */ - uint boardid; /**< board identifier (usu. PCI sub-device id) */ - uint boardvendor; /**< board vendor (usu. PCI sub-vendor id) */ - uint boardrev; /**< board revision */ - uint driverrev; /**< driver version */ - uint ucoderev; /**< microcode version */ - uint bus; /**< bus type */ - uint chipnum; /**< chip number */ - uint phytype; /**< phy type */ - uint phyrev; /**< phy revision */ - uint anarev; /**< anacore rev */ - uint chippkg; /**< chip package info */ - uint nvramrev; /**< nvram revision number */ + uint32 vendorid; /**< PCI vendor id */ + uint32 deviceid; /**< device id of chip */ + uint32 radiorev; /**< radio revision */ + uint32 chiprev; /**< chip revision */ + uint32 corerev; /**< core revision */ + uint32 boardid; /**< board identifier (usu. PCI sub-device id) */ + uint32 boardvendor; /**< board vendor (usu. PCI sub-vendor id) */ + uint32 boardrev; /**< board revision */ + uint32 driverrev; /**< driver version */ + uint32 ucoderev; /**< microcode version */ + uint32 bus; /**< bus type */ + uint32 chipnum; /**< chip number */ + uint32 phytype; /**< phy type */ + uint32 phyrev; /**< phy revision */ + uint32 anarev; /**< anacore rev */ + uint32 chippkg; /**< chip package info */ + uint32 nvramrev; /**< nvram revision number */ + uint32 phyminorrev; /**< phy minor rev */ + uint32 coreminorrev; /**< core minor rev */ + uint32 drvrev_major; /**< driver version: major */ + uint32 drvrev_minor; /**< driver version: minor */ + uint32 drvrev_rc; /**< driver version: rc */ + uint32 drvrev_rc_inc; /**< driver version: rc incremental */ } wlc_rev_info_t; #define WL_REV_INFO_LEGACY_LENGTH 48 #define WL_BRAND_MAX 10 typedef struct wl_instance_info { - uint instance; - char brand[WL_BRAND_MAX]; + uint32 instance; + int8 brand[WL_BRAND_MAX]; + int8 PAD[4-(WL_BRAND_MAX%4)]; } wl_instance_info_t; -/* structure to change size of tx fifo */ +/** structure to change size of tx fifo */ typedef struct wl_txfifo_sz { uint16 magic; uint16 fifo; @@ -1417,27 +1596,25 @@ typedef struct wl_txfifo_sz { } wl_txfifo_sz_t; /* Transfer info about an IOVar from the driver */ -/* Max supported IOV name size in bytes, + 1 for nul termination */ -#define WLC_IOV_NAME_LEN 30 +/**Max supported IOV name size in bytes, + 1 for nul termination */ +#define WLC_IOV_NAME_LEN (32 + 1) + typedef struct wlc_iov_trx_s { uint8 module; uint8 type; char name[WLC_IOV_NAME_LEN]; } wlc_iov_trx_t; -/* bump this number if you change the ioctl interface */ +/** bump this number if you change the ioctl interface */ #define WLC_IOCTL_VERSION 2 #define WLC_IOCTL_VERSION_LEGACY_IOTYPES 1 - -#ifdef CONFIG_USBRNDIS_RETAIL -/* struct passed in for WLC_NDCONFIG_ITEM */ -typedef struct { - char *name; - void *param; -} ndconfig_item_t; -#endif - - +/* ifdef EXT_STA */ +typedef struct _wl_assoc_result { + ulong associated; + ulong NDIS_auth; + ulong NDIS_infra; +} wl_assoc_result_t; +/* EXT_STA */ #define WL_PHY_PAVARS_LEN 32 /**< Phytype, Bandrange, chain, a[0], b[0], c[0], d[0] .. */ @@ -1458,6 +1635,7 @@ typedef struct wl_po { uint16 phy_type; /**< Phy type */ uint16 band; uint16 cckpo; + uint16 PAD; uint32 ofdmpo; uint16 mcspo[8]; } wl_po_t; @@ -1469,11 +1647,18 @@ typedef struct wl_rpcal { uint16 update; } wl_rpcal_t; +#define WL_NUM_RPCALPHASEVARS 5 /* number of rpcal phase vars */ + +typedef struct wl_rpcal_phase { + uint16 value; + uint16 update; +} wl_rpcal_phase_t; + typedef struct wl_aci_args { - int enter_aci_thresh; /* Trigger level to start detecting ACI */ - int exit_aci_thresh; /* Trigger level to exit ACI mode */ - int usec_spin; /* microsecs to delay between rssi samples */ - int glitch_delay; /* interval between ACI scans when glitch count is consistently high */ + int32 enter_aci_thresh; /* Trigger level to start detecting ACI */ + int32 exit_aci_thresh; /* Trigger level to exit ACI mode */ + int32 usec_spin; /* microsecs to delay between rssi samples */ + int32 glitch_delay; /* interval between ACI scans when glitch count is consistently high */ uint16 nphy_adcpwr_enter_thresh; /**< ADC power to enter ACI mitigation mode */ uint16 nphy_adcpwr_exit_thresh; /**< ADC power to exit ACI mitigation mode */ uint16 nphy_repeat_ctr; /**< Number of tries per channel to compute power */ @@ -1482,7 +1667,7 @@ typedef struct wl_aci_args { uint16 nphy_b_energy_lo_aci; /**< low ACI power energy threshold for bphy */ uint16 nphy_b_energy_md_aci; /**< mid ACI power energy threshold for bphy */ uint16 nphy_b_energy_hi_aci; /**< high ACI power energy threshold for bphy */ - uint16 nphy_noise_noassoc_glitch_th_up; /* wl interference 4 */ + uint16 nphy_noise_noassoc_glitch_th_up; /**< wl interference 4 */ uint16 nphy_noise_noassoc_glitch_th_dn; uint16 nphy_noise_assoc_glitch_th_up; uint16 nphy_noise_assoc_glitch_th_dn; @@ -1501,13 +1686,16 @@ typedef struct wl_aci_args { typedef struct wl_samplecollect_args { /* version 0 fields */ uint8 coll_us; - int cores; + uint8 PAD[3]; + int32 cores; /* add'l version 1 fields */ - uint16 version; /* see definition of WL_SAMPLECOLLECT_T_VERSION */ - uint16 length; /* length of entire structure */ + uint16 version; /**< see definition of WL_SAMPLECOLLECT_T_VERSION */ + uint16 length; /**< length of entire structure */ int8 trigger; + uint8 PAD; uint16 timeout; uint16 mode; + uint16 PAD; uint32 pre_dur; uint32 post_dur; uint8 gpio_sel; @@ -1520,8 +1708,11 @@ typedef struct wl_samplecollect_args { uint8 module_sel1; uint8 module_sel2; uint16 nsamps; - int bitStart; + uint16 PAD; + int32 bitStart; uint32 gpioCapMask; + uint8 gpio_collection; + uint8 PAD[3]; } wl_samplecollect_args_t; #define WL_SAMPLEDATA_T_VERSION 1 /**< version of wl_samplecollect_args_t struct */ @@ -1540,11 +1731,12 @@ typedef struct wl_sampledata { /* WL_OTA START */ /* OTA Test Status */ enum { - WL_OTA_TEST_IDLE = 0, /**< Default Idle state */ - WL_OTA_TEST_ACTIVE = 1, /**< Test Running */ + WL_OTA_TEST_IDLE = 0, /**< Default Idle state */ + WL_OTA_TEST_ACTIVE = 1, /**< Test Running */ WL_OTA_TEST_SUCCESS = 2, /**< Successfully Finished Test */ - WL_OTA_TEST_FAIL = 3 /**< Test Failed in the Middle */ + WL_OTA_TEST_FAIL = 3 /**< Test Failed in the Middle */ }; + /* OTA SYNC Status */ enum { WL_OTA_SYNC_IDLE = 0, /**< Idle state */ @@ -1555,7 +1747,7 @@ enum { /* Various error states dut can get stuck during test */ enum { WL_OTA_SKIP_TEST_CAL_FAIL = 1, /**< Phy calibration failed */ - WL_OTA_SKIP_TEST_SYNCH_FAIL = 2, /**< Sync Packet not recieved */ + WL_OTA_SKIP_TEST_SYNCH_FAIL = 2, /**< Sync Packet not recieved */ WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3, /**< Cmd flow file download failed */ WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4, /**< No test found in Flow file */ WL_OTA_SKIP_TEST_WL_NOT_UP = 5, /**< WL UP failed */ @@ -1575,7 +1767,6 @@ enum { WL_OTA_TEST_BW_40MHZ = 2, /**< full 40Mhz operation */ WL_OTA_TEST_BW_80MHZ = 3 /* full 80Mhz operation */ }; - #define HT_MCS_INUSE 0x00000080 /* HT MCS in use,indicates b0-6 holds an mcs */ #define VHT_MCS_INUSE 0x00000100 /* VHT MCS in use,indicates b0-6 holds an mcs */ #define OTA_RATE_MASK 0x0000007f /* rate/mcs value */ @@ -1586,6 +1777,7 @@ enum { typedef struct ota_rate_info { uint8 rate_cnt; /**< Total number of rates */ + uint8 PAD; uint16 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE]; /**< array of rates from 1mbps to 130mbps */ /**< for legacy rates : ratein mbps * 2 */ /**< for HT rates : mcs index */ @@ -1599,20 +1791,40 @@ typedef struct ota_power_info { } ota_power_info_t; typedef struct ota_packetengine { - uint16 delay; /* Inter-packet delay */ + uint16 delay; /**< Inter-packet delay */ /**< for ota_tx, delay is tx ifs in micro seconds */ /* for ota_rx, delay is wait time in milliseconds */ - uint16 nframes; /* Number of frames */ - uint16 length; /* Packet length */ + uint16 nframes; /**< Number of frames */ + uint16 length; /**< Packet length */ } ota_packetengine_t; -/* Test info vector */ +/* + * OTA txant/rxant parameter + * bit7-4: 4 bits swdiv_tx/rx_policy bitmask, specify antenna-policy for SW diversity + * bit3-0: 4 bits TxCore bitmask, specify cores used for transmit frames + * (maximum spatial expansion) + */ +#define WL_OTA_TEST_ANT_MASK 0xF0 +#define WL_OTA_TEST_CORE_MASK 0x0F + +/* OTA txant/rxant 'ant_mask' field; map to Tx/Rx antenna policy for SW diversity */ +enum { + WL_OTA_TEST_FORCE_ANT0 = 0x10, /* force antenna to Ant 0 */ + WL_OTA_TEST_FORCE_ANT1 = 0x20, /* force antenna to Ant 1 */ +}; + +/* antenna/core fields access */ +#define WL_OTA_TEST_GET_ANT(_txant) ((_txant) & WL_OTA_TEST_ANT_MASK) +#define WL_OTA_TEST_GET_CORE(_txant) ((_txant) & WL_OTA_TEST_CORE_MASK) + +/** Test info vector */ typedef struct wl_ota_test_args { uint8 cur_test; /**< test phase */ uint8 chan; /**< channel */ uint8 bw; /**< bandwidth */ uint8 control_band; /**< control band */ uint8 stf_mode; /**< stf mode */ + uint8 PAD; ota_rate_info_t rt_info; /**< Rate info */ ota_packetengine_t pkteng; /**< packeteng info */ uint8 txant; /**< tx antenna */ @@ -1621,6 +1833,7 @@ typedef struct wl_ota_test_args { uint8 wait_for_sync; /**< wait for sync or not */ uint8 ldpc; uint8 sgi; + uint8 PAD; /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */ } wl_ota_test_args_t; @@ -1641,10 +1854,11 @@ typedef struct wl_ota_test_vector { } wl_ota_test_vector_t; -/* struct copied back form dongle to host to query the status */ +/** struct copied back form dongle to host to query the status */ typedef struct wl_ota_test_status { int16 cur_test_cnt; /**< test phase */ int8 skip_test_reason; /**< skip test reasoin */ + uint8 PAD; wl_ota_test_args_t test_arg; /**< cur test arg details */ uint16 test_cnt; /**< total no of test downloaded */ uint8 file_dwnld_valid; /**< file successfully downloaded ? */ @@ -1654,9 +1868,23 @@ typedef struct wl_ota_test_status { struct ether_addr tx_mac; /**< tx mac address */ struct ether_addr rx_mac; /**< rx mac address */ uint8 test_stage; /**< check the test status */ - int8 loop_test; /**< Debug feature to puts test enfine in a loop */ + int8 loop_test; /**< Debug feature to puts test enfine in a loop */ uint8 sync_status; /**< sync status */ } wl_ota_test_status_t; + +/* FOR ioctl that take the sta monitor information */ +typedef struct stamon_data { + struct ether_addr ea; + uint8 PAD[2]; + int32 rssi; +} stamon_data_t; + +typedef struct stamon_info { + int32 version; + uint32 count; + stamon_data_t sta_data[1]; +} stamon_info_t; + typedef struct wl_ota_rx_rssi { uint16 pktcnt; /* Pkt count used for this rx test */ chanspec_t chanspec; /* Channel info on which the packets are received */ @@ -1674,88 +1902,135 @@ typedef struct wl_ota_test_rssi { /* WL_OTA END */ -/* wl_radar_args_t */ +/**wl_radar_args_t */ typedef struct { - int npulses; /**< required number of pulses at n * t_int */ - int ncontig; /**< required number of pulses at t_int */ - int min_pw; /**< minimum pulse width (20 MHz clocks) */ - int max_pw; /**< maximum pulse width (20 MHz clocks) */ + int32 npulses; /**< required number of pulses at n * t_int */ + int32 ncontig; /**< required number of pulses at t_int */ + int32 min_pw; /**< minimum pulse width (20 MHz clocks) */ + int32 max_pw; /**< maximum pulse width (20 MHz clocks) */ uint16 thresh0; /**< Radar detection, thresh 0 */ uint16 thresh1; /**< Radar detection, thresh 1 */ uint16 blank; /**< Radar detection, blank control */ uint16 fmdemodcfg; /**< Radar detection, fmdemod config */ - int npulses_lp; /* Radar detection, minimum long pulses */ - int min_pw_lp; /* Minimum pulsewidth for long pulses */ - int max_pw_lp; /* Maximum pulsewidth for long pulses */ - int min_fm_lp; /* Minimum fm for long pulses */ - int max_span_lp; /* Maximum deltat for long pulses */ - int min_deltat; /* Minimum spacing between pulses */ - int max_deltat; /* Maximum spacing between pulses */ + int32 npulses_lp; /**< Radar detection, minimum long pulses */ + int32 min_pw_lp; /**< Minimum pulsewidth for long pulses */ + int32 max_pw_lp; /**< Maximum pulsewidth for long pulses */ + int32 min_fm_lp; /**< Minimum fm for long pulses */ + int32 max_span_lp; /**< Maximum deltat for long pulses */ + int32 min_deltat; /**< Minimum spacing between pulses */ + int32 max_deltat; /**< Maximum spacing between pulses */ uint16 autocorr; /**< Radar detection, autocorr on or off */ uint16 st_level_time; /**< Radar detection, start_timing level */ - uint16 t2_min; /* minimum clocks needed to remain in state 2 */ - uint32 version; /* version */ + uint16 t2_min; /**< minimum clocks needed to remain in state 2 */ + uint8 PAD[2]; + uint32 version; /**< version */ uint32 fra_pulse_err; /**< sample error margin for detecting French radar pulsed */ - int npulses_fra; /* Radar detection, minimum French pulses set */ - int npulses_stg2; /* Radar detection, minimum staggered-2 pulses set */ - int npulses_stg3; /* Radar detection, minimum staggered-3 pulses set */ + int32 npulses_fra; /**< Radar detection, minimum French pulses set */ + int32 npulses_stg2; /**< Radar detection, minimum staggered-2 pulses set */ + int32 npulses_stg3; /**< Radar detection, minimum staggered-3 pulses set */ uint16 percal_mask; /**< defines which period cal is masked from radar detection */ - int quant; /**< quantization resolution to pulse positions */ + uint8 PAD[2]; + int32 quant; /**< quantization resolution to pulse positions */ uint32 min_burst_intv_lp; /**< minimum burst to burst interval for bin3 radar */ uint32 max_burst_intv_lp; /**< maximum burst to burst interval for bin3 radar */ - int nskip_rst_lp; /**< number of skipped pulses before resetting lp buffer */ - int max_pw_tol; /**< maximum tolerance allowd in detected pulse width for radar detection */ - uint16 feature_mask; /* 16-bit mask to specify enabled features */ + int32 nskip_rst_lp; /**< number of skipped pulses before resetting lp buffer */ + int32 max_pw_tol; /* maximum tolerance allowd in detected pulse width for radar detection */ + uint16 feature_mask; /**< 16-bit mask to specify enabled features */ + uint16 thresh0_sc; /**< Radar detection, thresh 0 */ + uint16 thresh1_sc; /**< Radar detection, thresh 1 */ + uint8 PAD[2]; } wl_radar_args_t; #define WL_RADAR_ARGS_VERSION 2 typedef struct { - uint32 version; /* version */ - uint16 thresh0_20_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */ - uint16 thresh1_20_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */ - uint16 thresh0_40_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */ - uint16 thresh1_40_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */ - uint16 thresh0_80_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */ - uint16 thresh1_80_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */ - uint16 thresh0_20_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */ - uint16 thresh1_20_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */ - uint16 thresh0_40_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */ - uint16 thresh1_40_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */ - uint16 thresh0_80_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */ - uint16 thresh1_80_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */ -#ifdef WL11AC160 - uint16 thresh0_160_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */ - uint16 thresh1_160_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */ - uint16 thresh0_160_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */ - uint16 thresh1_160_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */ -#endif /* WL11AC160 */ + uint32 version; /**< version */ + uint16 thresh0_20_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */ + uint16 thresh1_20_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */ + uint16 thresh0_40_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */ + uint16 thresh1_40_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */ + uint16 thresh0_80_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */ + uint16 thresh1_80_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */ + uint16 thresh0_20_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */ + uint16 thresh1_20_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */ + uint16 thresh0_40_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */ + uint16 thresh1_40_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */ + uint16 thresh0_80_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */ + uint16 thresh1_80_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */ + uint16 thresh0_160_lo; /**< Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */ + uint16 thresh1_160_lo; /**< Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */ + uint16 thresh0_160_hi; /**< Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */ + uint16 thresh1_160_hi; /**< Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */ } wl_radar_thr_t; +typedef struct { + uint32 version; /* version */ + uint16 thresh0_sc_20_lo; + uint16 thresh1_sc_20_lo; + uint16 thresh0_sc_40_lo; + uint16 thresh1_sc_40_lo; + uint16 thresh0_sc_80_lo; + uint16 thresh1_sc_80_lo; + uint16 thresh0_sc_20_hi; + uint16 thresh1_sc_20_hi; + uint16 thresh0_sc_40_hi; + uint16 thresh1_sc_40_hi; + uint16 thresh0_sc_80_hi; + uint16 thresh1_sc_80_hi; + uint16 fc_varth_sb; + uint16 fc_varth_bin5_sb; + uint16 notradar_enb; + uint16 max_notradar_lp; + uint16 max_notradar; + uint16 max_notradar_lp_sc; + uint16 max_notradar_sc; + uint16 highpow_war_enb; + uint16 highpow_sp_ratio; //unit is 0.5 +} wl_radar_thr2_t; + #define WL_RADAR_THR_VERSION 2 -/* RSSI per antenna */ +typedef struct { + uint32 ver; + uint32 len; + int32 rssi_th[3]; + uint8 rssi_gain_80[4]; + uint8 rssi_gain_160[4]; +} wl_dyn_switch_th_t; + +#define WL_PHY_DYN_SWITCH_TH_VERSION 1 + +/** RSSI per antenna */ typedef struct { uint32 version; /**< version field */ uint32 count; /**< number of valid antenna rssi */ int8 rssi_ant[WL_RSSI_ANT_MAX]; /**< rssi per antenna */ } wl_rssi_ant_t; -/* data structure used in 'dfs_status' wl interface, which is used to query dfs status */ +/* SNR per antenna */ typedef struct { - uint state; /**< noted by WL_DFS_CACSTATE_XX. */ - uint duration; /**< time spent in ms in state. */ - /* as dfs enters ISM state, it removes the operational channel from quiet channel + uint32 version; /* version field */ + uint32 count; /* number of valid antenna snr */ + int8 snr_ant[WL_RSSI_ANT_MAX]; /* snr per antenna */ +} wl_snr_ant_t; + + +/** data structure used in 'dfs_status' wl interface, which is used to query dfs status */ +typedef struct { + uint32 state; /**< noted by WL_DFS_CACSTATE_XX. */ + uint32 duration; /**< time spent in ms in state. */ + /** + * as dfs enters ISM state, it removes the operational channel from quiet channel * list and notes the channel in channel_cleared. set to 0 if no channel is cleared */ chanspec_t chanspec_cleared; - /* chanspec cleared used to be a uint, add another to uint16 to maintain size */ + /** chanspec cleared used to be a uint32, add another to uint16 to maintain size */ uint16 pad; } wl_dfs_status_t; typedef struct { - uint state; /* noted by WL_DFS_CACSTATE_XX */ - uint duration; /* time spent in ms in state */ + uint32 state; /* noted by WL_DFS_CACSTATE_XX */ + uint32 duration; /* time spent in ms in state */ chanspec_t chanspec; /* chanspec of this core */ chanspec_t chanspec_last_cleared; /* chanspec last cleared for operation by scanning */ uint16 sub_type; /* currently just the index of the core or the respective PLL */ @@ -1770,29 +2045,42 @@ typedef struct { } wl_dfs_status_all_t; #define WL_DFS_AP_MOVE_VERSION (1) -typedef struct wl_dfs_ap_move_status { + +struct wl_dfs_ap_move_status_v1 { + int16 dfs_status; /* DFS scan status */ + chanspec_t chanspec; /* New AP Chanspec */ + wl_dfs_status_t cac_status; /* CAC status */ +}; + +typedef struct wl_dfs_ap_move_status_v2 { int8 version; /* version field; current max version 1 */ int8 move_status; /* DFS move status */ chanspec_t chanspec; /* New AP Chanspec */ wl_dfs_status_all_t scan_status; /* status; see dfs_status_all for wl_dfs_status_all_t */ -} wl_dfs_ap_move_status_t; +} wl_dfs_ap_move_status_v2_t; + +#define WL_DFS_AP_MOVE_ABORT -1 /* Abort any dfs_ap_move in progress immediately */ +#define WL_DFS_AP_MOVE_STUNT -2 /* Stunt move but continue background CSA if in progress */ -/* data structure used in 'radar_status' wl interface, which is use to query radar det status */ +/** data structure used in 'radar_status' wl interface, which is use to query radar det status */ typedef struct { - bool detected; - int count; - bool pretended; + uint8 detected; + uint8 PAD[3]; + int32 count; + uint8 pretended; + uint8 PAD[3]; uint32 radartype; uint32 timenow; uint32 timefromL; - int lp_csect_single; - int detected_pulse_index; - int nconsecq_pulses; + int32 lp_csect_single; + int32 detected_pulse_index; + int32 nconsecq_pulses; chanspec_t ch; - int pw[10]; - int intv[10]; - int fm[10]; + uint8 PAD[2]; + int32 pw[10]; + int32 intv[10]; + int32 fm[10]; } wl_radar_status_t; #define NUM_PWRCTRL_RATES 12 @@ -1821,20 +2109,21 @@ typedef struct { typedef struct { uint32 flags; - chanspec_t chanspec; /* txpwr report for this channel */ - chanspec_t local_chanspec; /* channel on which we are associated */ - uint8 local_max; /* local max according to the AP */ - uint8 local_constraint; /* local constraint according to the AP */ - int8 antgain[2]; /* Ant gain for each band - from SROM */ - uint8 rf_cores; /* count of RF Cores being reported */ - uint8 est_Pout[4]; /* Latest tx power out estimate per RF + chanspec_t chanspec; /**< txpwr report for this channel */ + chanspec_t local_chanspec; /**< channel on which we are associated */ + uint8 local_max; /**< local max according to the AP */ + uint8 local_constraint; /**< local constraint according to the AP */ + int8 antgain[2]; /**< Ant gain for each band - from SROM */ + uint8 rf_cores; /**< count of RF Cores being reported */ + uint8 est_Pout[4]; /**< Latest tx power out estimate per RF * chain without adjustment */ - uint8 est_Pout_cck; /* Latest CCK tx power out estimate */ - uint8 user_limit[WL_TX_POWER_RATES_LEGACY]; /* User limit */ - uint8 reg_limit[WL_TX_POWER_RATES_LEGACY]; /* Regulatory power limit */ - uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /* Max power board can support (SROM) */ - uint8 target[WL_TX_POWER_RATES_LEGACY]; /* Latest target power */ + uint8 est_Pout_cck; /**< Latest CCK tx power out estimate */ + uint8 user_limit[WL_TX_POWER_RATES_LEGACY]; /**< User limit */ + uint8 reg_limit[WL_TX_POWER_RATES_LEGACY]; /**< Regulatory power limit */ + uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /**< Max power board can support (SROM) */ + uint8 target[WL_TX_POWER_RATES_LEGACY]; /**< Latest target power */ + uint8 PAD[2]; } tx_power_legacy2_t; #define WL_NUM_2x2_ELEMENTS 4 @@ -1854,12 +2143,12 @@ typedef struct { #define WL_TXPPR_VERSION 1 #define WL_TXPPR_LENGTH (sizeof(wl_txppr_t)) #define TX_POWER_T_VERSION 45 -/* number of ppr serialization buffers, it should be reg, board and target */ +/** number of ppr serialization buffers, it should be reg, board and target */ #define WL_TXPPR_SER_BUF_NUM (3) typedef struct chanspec_txpwr_max { - chanspec_t chanspec; /* chanspec */ - uint8 txpwr_max; /* max txpwr in all the rates */ + chanspec_t chanspec; /**< chanspec */ + uint8 txpwr_max; /**< max txpwr in all the rates */ uint8 padding; } chanspec_txpwr_max_t; @@ -1882,8 +2171,227 @@ typedef struct tx_inst_power { typedef struct wl_txchain_pwr_offsets { int8 offset[WL_NUM_TXCHAIN_MAX]; /**< quarter dBm signed offset for each chain */ } wl_txchain_pwr_offsets_t; -/* maximum channels returned by the get valid channels iovar */ + +/** maximum channels returned by the get valid channels iovar */ #define WL_NUMCHANNELS 64 +#define WL_NUMCHANNELS_MANY_CHAN 10 +#define WL_ITER_LIMIT_MANY_CHAN 5 + +#define WL_MIMO_PS_CFG_VERSION_1 1 + +typedef struct wl_mimops_cfg { + uint8 version; + /* active_chains: 0 for all, 1 for 1 chain. */ + uint8 active_chains; + /* static (0) or dynamic (1).or disabled (3) Mode applies only when active_chains = 0. */ + uint8 mode; + /* bandwidth = Full (0), 20M (1), 40M (2), 80M (3). */ + uint8 bandwidth; + uint8 applychangesafterlearning; + uint8 pad[3]; +} wl_mimops_cfg_t; + +/* This event is for tracing MIMO PS metrics snapshot calls. + * It is helpful to debug out-of-sync issue between + * ucode SHM values and FW snapshot calculation. + * It is part of the EVENT_LOG_TAG_MIMO_PS_TRACE. + */ +#define WL_MIMO_PS_METRICS_SNAPSHOT_TRACE_TYPE 0 +typedef struct wl_mimo_ps_metrics_snapshot_trace { + /* type field for this TLV: */ + uint16 type; + /* length field for this TLV */ + uint16 len; + uint32 idle_slotcnt_mimo; /* MIMO idle slotcnt raw SHM value */ + uint32 last_idle_slotcnt_mimo; /* stored value snapshot */ + uint32 idle_slotcnt_siso; /* SISO idle slotcnt raw SHM value */ + uint32 last_idle_slotcnt_siso; /* stored value snapshot */ + uint32 rx_time_mimo; /* Rx MIMO raw SHM value */ + uint32 last_rx_time_mimo; /* stored value snapshot */ + uint32 rx_time_siso; /* RX SISO raw SHM value */ + uint32 last_rx_time_siso; /* stored value snapshot */ + uint32 tx_time_1chain; /* Tx 1-chain raw SHM value */ + uint32 last_tx_time_1chain; /* stored value snapshot */ + uint32 tx_time_2chain; /* Tx 2-chain raw SHM value */ + uint32 last_tx_time_2chain; /* stored value snapshot */ + uint32 tx_time_3chain; /* Tx 3-chain raw SHM value */ + uint32 last_tx_time_3chain; /* stored value snapshot */ + uint16 reason; /* reason for snapshot call, see below */ + /* Does the call reset last values after delta calculation */ + uint16 reset_last; +} wl_mimo_ps_metrics_snapshot_trace_t; +/* reason codes for mimo ps metrics snapshot function calls */ +#define WL_MIMOPS_METRICS_SNAPSHOT_REPORT 1 +#define WL_MIMOPS_METRICS_SNAPSHOT_RXCHAIN_SET 2 +#define WL_MIMOPS_METRICS_SNAPSHOT_ARBI 3 +#define WL_MIMOPS_METRICS_SNAPSHOT_SLOTUPD 4 +#define WL_MIMOPS_METRICS_SNAPSHOT_PMBCNRX 5 +#define WL_MIMOPS_METRICS_SNAPSHOT_BMACINIT 6 +#define WL_MIMOPS_METRICS_SNAPSHOT_HT_COMPLETE 7 +#define WL_MIMOPS_METRICS_SNAPSHOT_OCL 8 + +#define WL_MIMO_PS_STATUS_VERSION_2 2 +typedef struct wl_mimo_ps_status { + uint8 version; + uint8 ap_cap; /* The associated AP's capability (BW, MIMO/SISO). */ + uint8 association_status; /* How we are associated to the AP (MIMO/SISO). */ + uint8 mimo_ps_state; /* mimo_ps_cfg states: [0-5]. See below for values */ + uint8 mrc_state; /* MRC state: NONE (0), ACTIVE(1) */ + uint8 bss_rxchain; /* bss rxchain bitmask */ + uint8 bss_txchain; /* bss txchain bitmask */ + uint8 bss_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */ + uint16 hw_state; /* bitmask of hw state. See below for values */ + uint8 hw_rxchain; /* actual HW rxchain bitmask */ + uint8 hw_txchain; /* actual HW txchain bitmask */ + uint8 hw_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */ + uint8 pm_bcnrx_state; /* actual state of ucode flag */ + uint8 basic_rates_present; /* internal flag to trigger siso bcmc rx */ + uint8 siso_bcmc_rx_state; /* actual state of ucode flag */ +} wl_mimo_ps_status_t; + +#define WL_MIMO_PS_STATUS_VERSION_1 1 +typedef struct wl_mimo_ps_status_v1 { + uint8 version; + uint8 ap_cap; /* The associated AP's capability (BW, MIMO/SISO). */ + uint8 association_status; /* How we are associated to the AP (MIMO/SISO). */ + uint8 mimo_ps_state; /* mimo_ps_cfg states: [0-5]. See below for values */ + uint8 mrc_state; /* MRC state: NONE (0), ACTIVE(1) */ + uint8 bss_rxchain; /* bss rxchain bitmask */ + uint8 bss_txchain; /* bss txchain bitmask */ + uint8 bss_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */ + uint16 hw_state; /* bitmask of hw state. See below for values */ + uint8 hw_rxchain; /* actual HW rxchain bitmask */ + uint8 hw_txchain; /* actual HW txchain bitmask */ + uint8 hw_bw; /* bandwidth: Full (0), 20M (1), 40M (2), 80M (3), etc */ + uint8 pad[3]; +} wl_mimo_ps_status_v1_t; + +#define WL_MIMO_PS_STATUS_AP_CAP(ap_cap) (ap_cap & 0x0F) +#define WL_MIMO_PS_STATUS_AP_CAP_BW(ap_cap) (ap_cap >> 4) +#define WL_MIMO_PS_STATUS_ASSOC_BW_SHIFT 4 + +/* version 3: assoc status: low nibble is status enum, high other flags */ +#define WL_MIMO_PS_STATUS_VERSION_3 3 +#define WL_MIMO_PS_STATUS_ASSOC_STATUS_MASK 0x0F +#define WL_MIMO_PS_STATUS_ASSOC_STATUS_VHT_WITHOUT_OMN 0x80 + +/* mimo_ps_status: ap_cap/association status */ +enum { + WL_MIMO_PS_STATUS_ASSOC_NONE = 0, + WL_MIMO_PS_STATUS_ASSOC_SISO = 1, + WL_MIMO_PS_STATUS_ASSOC_MIMO = 2, + WL_MIMO_PS_STATUS_ASSOC_LEGACY = 3 +}; + +/* mimo_ps_status: mimo_ps_cfg states */ +enum { + WL_MIMO_PS_CFG_STATE_NONE = 0, + WL_MIMO_PS_CFG_STATE_INFORM_AP_INPROGRESS = 1, + WL_MIMO_PS_CFG_STATE_INFORM_AP_DONE = 2, + WL_MIMO_PS_CFG_STATE_LEARNING = 3, + WL_MIMO_PS_CFG_STATE_HW_CONFIGURE = 4, + WL_MIMO_PS_CFG_STATE_INFORM_AP_PENDING = 5 +}; + +/* mimo_ps_status: hw_state values */ +#define WL_MIMO_PS_STATUS_HW_STATE_NONE 0 +#define WL_MIMO_PS_STATUS_HW_STATE_LTECOEX (0x1 << 0) +#define WL_MIMO_PS_STATUS_HW_STATE_MIMOPS_BSS (0x1 << 1) +#define WL_MIMO_PS_STATUS_HW_STATE_AWDL_BSS (0x1 << 2) +#define WL_MIMO_PS_STATUS_HW_STATE_SCAN (0x1 << 3) +#define WL_MIMO_PS_STATUS_HW_STATE_TXPPR (0x1 << 4) +#define WL_MIMO_PS_STATUS_HW_STATE_PWRTHOTTLE (0x1 << 5) +#define WL_MIMO_PS_STATUS_HW_STATE_TMPSENSE (0x1 << 6) +#define WL_MIMO_PS_STATUS_HW_STATE_IOVAR (0x1 << 7) +#define WL_MIMO_PS_STATUS_HW_STATE_AP_BSS (0x1 << 8) + +/* mimo_ps_status: mrc states */ +#define WL_MIMO_PS_STATUS_MRC_NONE 0 +#define WL_MIMO_PS_STATUS_MRC_ACTIVE 1 + +/* mimo_ps_status: core flag states for single-core beacon and siso-bcmc rx */ +#define WL_MIMO_PS_STATUS_MHF_FLAG_NONE 0 +#define WL_MIMO_PS_STATUS_MHF_FLAG_ACTIVE 1 +#define WL_MIMO_PS_STATUS_MHF_FLAG_COREDOWN 2 +#define WL_MIMO_PS_STATUS_MHF_FLAG_INVALID 3 + +/* Type values for the REASON */ +#define WL_MIMO_PS_PS_LEARNING_ABORTED (1 << 0) +#define WL_MIMO_PS_PS_LEARNING_COMPLETED (1 << 1) +#define WL_MIMO_PS_PS_LEARNING_ONGOING (1 << 2) + +typedef struct wl_mimo_ps_learning_event_data { + uint32 startTimeStamp; + uint32 endTimeStamp; + uint16 reason; + struct ether_addr BSSID; + uint32 totalSISO_below_rssi_threshold; + uint32 totalMIMO_below_rssi_threshold; + uint32 totalSISO_above_rssi_threshold; + uint32 totalMIMO_above_rssi_threshold; +} wl_mimo_ps_learning_event_data_t; + +#define WL_MIMO_PS_PS_LEARNING_CFG_ABORT (1 << 0) +#define WL_MIMO_PS_PS_LEARNING_CFG_STATUS (1 << 1) +#define WL_MIMO_PS_PS_LEARNING_CFG_CONFIG (1 << 2) + +#define WL_MIMO_PS_PS_LEARNING_CFG_V1 1 + +typedef struct wl_mimops_learning_cfg { + /* flag: bit 0 for abort */ + /* flag: bit 1 for status */ + /* flag: bit 2 for configuring no of packets and rssi */ + uint8 flag; + /* mimo ps learning version, compatible version is 0 */ + uint8 version; + /* if version is 0 or rssi is 0, ignored */ + int8 learning_rssi_threshold; + uint8 reserved; + uint32 no_of_packets_for_learning; + wl_mimo_ps_learning_event_data_t mimops_learning_data; +} wl_mimops_learning_cfg_t; + + +#define WL_OCL_STATUS_VERSION 1 +typedef struct ocl_status_info { + uint8 version; + uint8 len; + uint16 fw_status; /* Bits representing FW disable reasons */ + uint8 hw_status; /* Bits for actual HW config and SISO/MIMO coremask */ + uint8 coremask; /* The ocl core mask (indicating listening core) */ +} ocl_status_info_t; + +/* MWS OCL map */ +#define WL_MWS_OCL_OVERRIDE_VERSION 1 +typedef struct wl_mws_ocl_override { + uint16 version; /* Structure version */ + uint16 bitmap_2g; /* bitmap for 2.4G channels bits 1-13 */ + uint16 bitmap_5g_lo; /* bitmap for 5G low channels by 2: + *34-48, 52-56, 60-64, 100-102 + */ + uint16 bitmap_5g_mid; /* bitmap for 5G mid channels by 2: + * 104, 108-112, 116-120, 124-128, + * 132-136, 140, 149-151 + */ + uint16 bitmap_5g_high; /* bitmap for 5G high channels by 2 + * 153, 157-161, 165 + */ +} wl_mws_ocl_override_t; + +/* Bits for fw_status */ +#define OCL_DISABLED_HOST 0x01 /* Host has disabled through ocl_enable */ +#define OCL_DISABLED_RSSI 0x02 /* Disabled because of ocl_rssi_threshold */ +#define OCL_DISABLED_LTEC 0x04 /* Disabled due to LTE Coex activity */ +#define OCL_DISABLED_SISO 0x08 /* Disabled while in SISO mode */ +#define OCL_DISABLED_CAL 0x10 /* Disabled during active calibration */ +#define OCL_DISABLED_CHANSWITCH 0x20 /* Disabled during active channel switch */ +#define OCL_DISABLED_ASPEND 0x40 /* Disabled due to assoc pending */ + +/* Bits for hw_status */ +#define OCL_HWCFG 0x01 /* State of OCL config bit in phy HW */ +#define OCL_HWMIMO 0x02 /* Set if current coremask is > 1 bit */ +#define OCL_COREDOWN 0x80 /* Set if core is currently down */ + /* * Join preference iovar value is an array of tuples. Each tuple has a one-byte type, @@ -1953,19 +2461,19 @@ typedef struct wl_bsstrans_rssi { #define RSSI_RATE_MAP_MAX_STREAMS 4 /**< max streams supported */ -/* RSSI to rate mapping, all 20Mhz, no SGI */ +/** RSSI to rate mapping, all 20Mhz, no SGI */ typedef struct wl_bsstrans_rssi_rate_map { uint16 ver; - uint16 len; /* length of entire structure */ - wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /* 2.4G only */ - wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /* 6 to 54mbps */ + uint16 len; /**< length of entire structure */ + wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */ + wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */ wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */ - wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /* MCS0-9 */ + wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /**< MCS0-9 */ } wl_bsstrans_rssi_rate_map_t; #define WL_BSSTRANS_ROAMTHROTTLE_VERSION 1 -/* Configure number of scans allowed per throttle period */ +/** Configure number of scans allowed per throttle period */ typedef struct wl_bsstrans_roamthrottle { uint16 ver; uint16 period; @@ -1973,56 +2481,164 @@ typedef struct wl_bsstrans_roamthrottle { } wl_bsstrans_roamthrottle_t; #define NFIFO 6 /**< # tx/rx fifopairs */ + +#if defined(BCM_DMA_CT) && !defined(BCM_DMA_CT_DISABLED) +#define NFIFO_EXT 32 /* 6 traditional FIFOs + 2 rsvd + 24 MU FIFOs */ +#elif defined(WL11AX) && defined(WL11AX_TRIGGERQ_ENABLED) +#define NFIFO_EXT 10 +#else +#define NFIFO_EXT NFIFO +#endif + +/* Reinit reason codes */ +enum { + WL_REINIT_RC_NONE = 0, + WL_REINIT_RC_PS_SYNC = 1, + WL_REINIT_RC_PSM_WD = 2, + WL_REINIT_RC_MAC_WAKE = 3, + WL_REINIT_RC_MAC_SUSPEND = 4, + WL_REINIT_RC_MAC_SPIN_WAIT = 5, + WL_REINIT_RC_AXI_BUS_ERROR = 6, + WL_REINIT_RC_DEVICE_REMOVED = 7, + WL_REINIT_RC_PCIE_FATAL_ERROR = 8, + WL_REINIT_RC_OL_FW_TRAP = 9, + WL_REINIT_RC_FIFO_ERR = 10, + WL_REINIT_RC_INV_TX_STATUS = 11, + WL_REINIT_RC_MQ_ERROR = 12, + WL_REINIT_RC_PHYTXERR_THRESH = 13, + WL_REINIT_RC_USER_FORCED = 14, + WL_REINIT_RC_FULL_RESET = 15, + WL_REINIT_RC_AP_BEACON = 16, + WL_REINIT_RC_PM_EXCESSED = 17, + WL_REINIT_RC_NO_CLK = 18, + WL_REINIT_RC_SW_ASSERT = 19, + WL_REINIT_RC_PSM_JMP0 = 20, + WL_REINIT_RC_PSM_RUN = 21, + WL_REINIT_RC_ENABLE_MAC = 22, + WL_REINIT_RC_SCAN_TIMEOUT = 23, + WL_REINIT_RC_JOIN_TIMEOUT = 24, + /* Below error codes are generated during D3 exit validation */ + WL_REINIT_RC_LINK_NOT_ACTIVE = 25, + WL_REINIT_RC_PCI_CFG_RD_FAIL = 26, + WL_REINIT_RC_INV_VEN_ID = 27, + WL_REINIT_RC_INV_DEV_ID = 28, + WL_REINIT_RC_INV_BAR0 = 29, + WL_REINIT_RC_INV_BAR2 = 30, + WL_REINIT_RC_AER_UC_FATAL = 31, + WL_REINIT_RC_AER_UC_NON_FATAL = 32, + WL_REINIT_RC_AER_CORR = 33, + WL_REINIT_RC_AER_DEV_STS = 34, + WL_REINIT_RC_PCIe_STS = 35, + WL_REINIT_RC_MMIO_RD_FAIL = 36, + WL_REINIT_RC_MMIO_RD_INVAL = 37, + WL_REINIT_RC_MMIO_ARM_MEM_RD_FAIL = 38, + WL_REINIT_RC_MMIO_ARM_MEM_INVAL = 39, + WL_REINIT_RC_SROM_LOAD_FAILED = 40, + WL_REINIT_RC_PHY_CRASH = 41, + WL_REINIT_TX_STALL = 42, + WL_REINIT_RC_TX_FLOW_CONTROL_BLOCKED = 43, + WL_REINIT_RC_RX_HC_FAIL = 44, + WL_REINIT_RC_RX_DMA_STALL = 45, + WL_REINIT_UTRACE_BUF_OVERLAP_SR = 46, + WL_REINIT_UTRACE_TPL_OUT_BOUNDS = 47, + WL_REINIT_UTRACE_TPL_OSET_STRT0 = 48, + WL_REINIT_RC_PHYTXERR = 49, + WL_REINIT_RC_PSM_FATAL_SUSP = 50, + WL_REINIT_RC_TX_FIFO_SUSP = 51, + WL_REINIT_RC_MAC_ENABLE = 52, + WL_REINIT_RC_SCAN_STALLED = 53, + WL_REINIT_RC_LAST /* This must be the last entry */ +}; + #define NREINITREASONCOUNT 8 -#define REINITREASONIDX(_x) (((_x) < NREINITREASONCOUNT) ? (_x) : 0) + +#define REINITRSNIDX(_x) (((_x) < WL_REINIT_RC_LAST) ? (_x) : 0) #define WL_CNT_T_VERSION 30 /**< current version of wl_cnt_t struct */ #define WL_CNT_VERSION_6 6 #define WL_CNT_VERSION_11 11 +#define WL_CNT_VERSION_XTLV 30 + +#define WL_COUNTERS_IOV_VERSION_1 1 +#define WL_SUBCNTR_IOV_VER WL_COUNTERS_IOV_VERSION_1 +/* First two uint16 are version and lenght fields. So offset of the first counter will be 4 */ +#define FIRST_COUNTER_OFFSET 0x04 #define WLC_WITH_XTLV_CNT -/* +/** * tlv IDs uniquely identifies counter component * packed into wl_cmd_t container */ enum wl_cnt_xtlv_id { + WL_CNT_XTLV_SLICE_IDX = 0x1, /**< Slice index */ WL_CNT_XTLV_WLC = 0x100, /**< WLC layer counters */ + WL_CNT_XTLV_WLC_RINIT_RSN = 0x101, /**< WLC layer reinitreason extension */ WL_CNT_XTLV_CNTV_LE10_UCODE = 0x200, /**< wl counter ver < 11 UCODE MACSTAT */ WL_CNT_XTLV_LT40_UCODE_V1 = 0x300, /**< corerev < 40 UCODE MACSTAT */ WL_CNT_XTLV_GE40_UCODE_V1 = 0x400, /**< corerev >= 40 UCODE MACSTAT */ WL_CNT_XTLV_GE64_UCODEX_V1 = 0x800 /* corerev >= 64 UCODEX MACSTAT */ }; -/* The number of variables in wl macstat cnt struct. +/** + * The number of variables in wl macstat cnt struct. * (wl_cnt_ge40mcst_v1_t, wl_cnt_lt40mcst_v1_t, wl_cnt_v_le10_mcst_t) */ #define WL_CNT_MCST_VAR_NUM 64 /* sizeof(wl_cnt_ge40mcst_v1_t), sizeof(wl_cnt_lt40mcst_v1_t), and sizeof(wl_cnt_v_le10_mcst_t) */ -#define WL_CNT_MCST_STRUCT_SZ ((uint)sizeof(uint32) * WL_CNT_MCST_VAR_NUM) +#define WL_CNT_MCST_STRUCT_SZ ((uint32)sizeof(uint32) * WL_CNT_MCST_VAR_NUM) +#define WL_CNT_MCXST_STRUCT_SZ ((uint32)sizeof(wl_cnt_ge64mcxst_v1_t)) #define INVALID_CNT_VAL (uint32)(-1) -#define WL_CNT_MCXST_STRUCT_SZ ((uint)sizeof(wl_cnt_ge64mcxst_v1_t)) -#define WL_XTLV_CNTBUF_MAX_SIZE ((uint)(OFFSETOF(wl_cnt_info_t, data)) + \ - (uint)BCM_XTLV_HDR_SIZE + (uint)sizeof(wl_cnt_wlc_t) + \ - (uint)BCM_XTLV_HDR_SIZE + WL_CNT_MCST_STRUCT_SZ + \ - (uint)BCM_XTLV_HDR_SIZE + WL_CNT_MCXST_STRUCT_SZ) +#define WL_XTLV_CNTBUF_MAX_SIZE ((uint32)(OFFSETOF(wl_cnt_info_t, data)) + \ + (uint32)BCM_XTLV_HDR_SIZE + (uint32)sizeof(wl_cnt_wlc_t) + \ + (uint32)BCM_XTLV_HDR_SIZE + WL_CNT_MCST_STRUCT_SZ + \ + (uint32)BCM_XTLV_HDR_SIZE + WL_CNT_MCXST_STRUCT_SZ) -#define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint)sizeof(wl_cnt_ver_11_t)) +#define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint32)sizeof(wl_cnt_ver_11_t)) -/* Top structure of counters IOVar buffer */ + +/** Top structure of counters IOVar buffer */ typedef struct { uint16 version; /**< see definition of WL_CNT_T_VERSION */ uint16 datalen; /**< length of data including all paddings. */ - uint8 data [1]; /**< variable length payload: + uint8 data []; /**< variable length payload: * 1 or more bcm_xtlv_t type of tuples. * each tuple is padded to multiple of 4 bytes. * 'datalen' field of this structure includes all paddings. */ } wl_cnt_info_t; -/* wlc layer counters */ +/* Top structure of subcounters IOVar buffer + * Whenever we make any change in this structure + * WL_SUBCNTR_IOV_VER should be updated accordingly + * The structure definition should remain consistant b/w + * FW and wl/WLM app. + */ +typedef struct { + uint16 version; /* Version of IOVAR structure. Used for backward + * compatibility in future. Whenever we make any + * changes to this structure then value of WL_SUBCNTR_IOV_VER + * needs to be updated properly. + */ + uint16 length; /* length in bytes of this structure */ + uint16 counters_version; /* see definition of WL_CNT_T_VERSION + * wl app will send the version of counters + * which is used to calculate the offset of counters. + * It must match the version of counters FW is using + * else FW will return error with his version of counters + * set in this field. + */ + uint16 num_subcounters; /* Number of counter offset passed by wl app to FW. */ + uint32 data[1]; /* variable length payload: + * Offsets to the counters will be passed to FW + * throught this data field. FW will return the value of counters + * at the offsets passed by wl app in this fiels itself. + */ +} wl_subcnt_info_t; + +/** wlc layer counters */ typedef struct { /* transmit stat counters */ uint32 txframe; /**< tx data frames */ @@ -2118,31 +2734,31 @@ typedef struct { uint32 prq_bad_entries; /**< which could not be translated to info */ uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ - uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ + uint32 bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */ uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ /* per-rate receive stat counters */ - uint32 rx1mbps; /* packets rx at 1Mbps */ - uint32 rx2mbps; /* packets rx at 2Mbps */ - uint32 rx5mbps5; /* packets rx at 5.5Mbps */ - uint32 rx6mbps; /* packets rx at 6Mbps */ - uint32 rx9mbps; /* packets rx at 9Mbps */ - uint32 rx11mbps; /* packets rx at 11Mbps */ - uint32 rx12mbps; /* packets rx at 12Mbps */ - uint32 rx18mbps; /* packets rx at 18Mbps */ - uint32 rx24mbps; /* packets rx at 24Mbps */ - uint32 rx36mbps; /* packets rx at 36Mbps */ - uint32 rx48mbps; /* packets rx at 48Mbps */ - uint32 rx54mbps; /* packets rx at 54Mbps */ - uint32 rx108mbps; /* packets rx at 108mbps */ - uint32 rx162mbps; /* packets rx at 162mbps */ - uint32 rx216mbps; /* packets rx at 216 mbps */ - uint32 rx270mbps; /* packets rx at 270 mbps */ - uint32 rx324mbps; /* packets rx at 324 mbps */ - uint32 rx378mbps; /* packets rx at 378 mbps */ - uint32 rx432mbps; /* packets rx at 432 mbps */ - uint32 rx486mbps; /* packets rx at 486 mbps */ - uint32 rx540mbps; /* packets rx at 540 mbps */ + uint32 rx1mbps; /**< packets rx at 1Mbps */ + uint32 rx2mbps; /**< packets rx at 2Mbps */ + uint32 rx5mbps5; /**< packets rx at 5.5Mbps */ + uint32 rx6mbps; /**< packets rx at 6Mbps */ + uint32 rx9mbps; /**< packets rx at 9Mbps */ + uint32 rx11mbps; /**< packets rx at 11Mbps */ + uint32 rx12mbps; /**< packets rx at 12Mbps */ + uint32 rx18mbps; /**< packets rx at 18Mbps */ + uint32 rx24mbps; /**< packets rx at 24Mbps */ + uint32 rx36mbps; /**< packets rx at 36Mbps */ + uint32 rx48mbps; /**< packets rx at 48Mbps */ + uint32 rx54mbps; /**< packets rx at 54Mbps */ + uint32 rx108mbps; /**< packets rx at 108mbps */ + uint32 rx162mbps; /**< packets rx at 162mbps */ + uint32 rx216mbps; /**< packets rx at 216 mbps */ + uint32 rx270mbps; /**< packets rx at 270 mbps */ + uint32 rx324mbps; /**< packets rx at 324 mbps */ + uint32 rx378mbps; /**< packets rx at 378 mbps */ + uint32 rx432mbps; /**< packets rx at 432 mbps */ + uint32 rx486mbps; /**< packets rx at 486 mbps */ + uint32 rx540mbps; /**< packets rx at 540 mbps */ uint32 rfdisable; /**< count of radio disables */ @@ -2178,7 +2794,7 @@ typedef struct { uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */ uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */ - uint32 cso_passthrough; /* hw cso required but passthrough */ + uint32 cso_passthrough; /**< hw cso required but passthrough */ uint32 cso_normal; /**< hw cso hdr for normal process */ uint32 chained; /**< number of frames chained */ uint32 chainedsz1; /**< number of chain size 1 frames */ @@ -2187,42 +2803,61 @@ typedef struct { uint32 currchainsz; /**< current chain size */ uint32 pciereset; /**< Secondary Bus Reset issued by driver */ uint32 cfgrestore; /**< configspace restore by driver */ - uint32 reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */ + uint32 reinitreason[NREINITREASONCOUNT]; /**< reinitreason counters; 0: Unknown reason */ uint32 rxrtry; - - uint32 rxmpdu_mu; /* Number of MU MPDUs received */ + uint32 rxmpdu_mu; /**< Number of MU MPDUs received */ /* detailed control/management frames */ - uint32 txbar; /**< Number of TX BAR */ - uint32 rxbar; /**< Number of RX BAR */ - uint32 txpspoll; /**< Number of TX PS-poll */ - uint32 rxpspoll; /**< Number of RX PS-poll */ - uint32 txnull; /**< Number of TX NULL_DATA */ - uint32 rxnull; /**< Number of RX NULL_DATA */ - uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ - uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ - uint32 txassocreq; /**< Number of TX ASSOC request */ - uint32 rxassocreq; /**< Number of RX ASSOC request */ - uint32 txreassocreq; /**< Number of TX REASSOC request */ - uint32 rxreassocreq; /**< Number of RX REASSOC request */ - uint32 txdisassoc; /**< Number of TX DISASSOC */ - uint32 rxdisassoc; /**< Number of RX DISASSOC */ - uint32 txassocrsp; /**< Number of TX ASSOC response */ - uint32 rxassocrsp; /**< Number of RX ASSOC response */ - uint32 txreassocrsp; /**< Number of TX REASSOC response */ - uint32 rxreassocrsp; /**< Number of RX REASSOC response */ - uint32 txauth; /**< Number of TX AUTH */ - uint32 rxauth; /**< Number of RX AUTH */ - uint32 txdeauth; /**< Number of TX DEAUTH */ - uint32 rxdeauth; /**< Number of RX DEAUTH */ - uint32 txprobereq; /**< Number of TX probe request */ - uint32 rxprobereq; /**< Number of RX probe request */ - uint32 txprobersp; /**< Number of TX probe response */ - uint32 rxprobersp; /**< Number of RX probe response */ - uint32 txaction; /**< Number of TX action frame */ - uint32 rxaction; /**< Number of RX action frame */ + uint32 txbar; /**< Number of TX BAR */ + uint32 rxbar; /**< Number of RX BAR */ + uint32 txpspoll; /**< Number of TX PS-poll */ + uint32 rxpspoll; /**< Number of RX PS-poll */ + uint32 txnull; /**< Number of TX NULL_DATA */ + uint32 rxnull; /**< Number of RX NULL_DATA */ + uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ + uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ + uint32 txassocreq; /**< Number of TX ASSOC request */ + uint32 rxassocreq; /**< Number of RX ASSOC request */ + uint32 txreassocreq; /**< Number of TX REASSOC request */ + uint32 rxreassocreq; /**< Number of RX REASSOC request */ + uint32 txdisassoc; /**< Number of TX DISASSOC */ + uint32 rxdisassoc; /**< Number of RX DISASSOC */ + uint32 txassocrsp; /**< Number of TX ASSOC response */ + uint32 rxassocrsp; /**< Number of RX ASSOC response */ + uint32 txreassocrsp; /**< Number of TX REASSOC response */ + uint32 rxreassocrsp; /**< Number of RX REASSOC response */ + uint32 txauth; /**< Number of TX AUTH */ + uint32 rxauth; /**< Number of RX AUTH */ + uint32 txdeauth; /**< Number of TX DEAUTH */ + uint32 rxdeauth; /**< Number of RX DEAUTH */ + uint32 txprobereq; /**< Number of TX probe request */ + uint32 rxprobereq; /**< Number of RX probe request */ + uint32 txprobersp; /**< Number of TX probe response */ + uint32 rxprobersp; /**< Number of RX probe response */ + uint32 txaction; /**< Number of TX action frame */ + uint32 rxaction; /**< Number of RX action frame */ + uint32 ampdu_wds; /**< Number of AMPDU watchdogs */ + uint32 txlost; /**< Number of lost packets reported in txs */ + uint32 txdatamcast; /**< Number of TX multicast data packets */ + uint32 txdatabcast; /**< Number of TX broadcast data packets */ + uint32 psmxwds; /**< Number of PSMx watchdogs */ + uint32 rxback; + uint32 txback; + uint32 p2p_tbtt; /**< Number of P2P TBTT Events */ + uint32 p2p_tbtt_miss; /**< Number of P2P TBTT Events Miss */ + uint32 txqueue_start; + uint32 txqueue_end; + uint32 txbcast; /* Broadcast TransmittedFrameCount */ + uint32 txdropped; /* tx dropped pkts */ + uint32 rxbcast; /* BroadcastReceivedFrameCount */ + uint32 rxdropped; /* rx dropped pkts (derived: sum of others) */ } wl_cnt_wlc_t; +/* Reinit reasons - do not put anything else other than reinit reasons here */ +typedef struct { + uint32 rsn[WL_REINIT_RC_LAST]; +} reinit_rsns_t; + /* MACXSTAT counters for ucodex (corerev >= 64) */ typedef struct { uint32 macxsusp; @@ -2233,7 +2868,7 @@ typedef struct { uint32 sfb2v; } wl_cnt_ge64mcxst_v1_t; -/* MACSTAT counters for ucode (corerev >= 40) */ +/** MACSTAT counters for ucode (corerev >= 40) */ typedef struct { /* MAC counters: 32-bit version of d11.h's macstat_t */ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, @@ -2253,8 +2888,8 @@ typedef struct { uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for * driver enqueued frames */ - uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ - uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ @@ -2281,7 +2916,7 @@ typedef struct { * (unlikely to see these) */ uint32 rxbeaconmbss; /**< beacons received from member of BSS */ - uint32 rxdtucastobss; /* number of unicast frames addressed to the MAC from + uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from * other BSS (WDS FRAME) */ uint32 rxbeaconobss; /**< beacons received from other BSS */ @@ -2318,7 +2953,7 @@ typedef struct { uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ } wl_cnt_ge40mcst_v1_t; -/* MACSTAT counters for ucode (corerev < 40) */ +/** MACSTAT counters for ucode (corerev < 40) */ typedef struct { /* MAC counters: 32-bit version of d11.h's macstat_t */ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, @@ -2366,7 +3001,7 @@ typedef struct { * (unlikely to see these) */ uint32 rxbeaconmbss; /**< beacons received from member of BSS */ - uint32 rxdtucastobss; /* number of unicast frames addressed to the MAC from + uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from * other BSS (WDS FRAME) */ uint32 rxbeaconobss; /**< beacons received from other BSS */ @@ -2403,7 +3038,7 @@ typedef struct { uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ } wl_cnt_lt40mcst_v1_t; -/* MACSTAT counters for "wl counter" version <= 10 */ +/** MACSTAT counters for "wl counter" version <= 10 */ typedef struct { /* MAC counters: 32-bit version of d11.h's macstat_t */ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, @@ -2423,8 +3058,8 @@ typedef struct { uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for * driver enqueued frames */ - uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ - uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not @@ -2490,6 +3125,16 @@ typedef struct { uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ } wl_cnt_v_le10_mcst_t; +#define MAX_RX_FIFO 3 +#define WL_RXFIFO_CNT_VERSION 1 /* current version of wl_rxfifo_cnt_t */ +typedef struct { + /* Counters for frames received from rx fifos */ + uint16 version; + uint16 length; /* length of entire structure */ + uint32 rxf_data[MAX_RX_FIFO]; /* data frames from rx fifo */ + uint32 rxf_mgmtctl[MAX_RX_FIFO]; /* mgmt/ctl frames from rx fifo */ +} wl_rxfifo_cnt_t; + typedef struct { uint16 version; /**< see definition of WL_CNT_T_VERSION */ uint16 length; /**< length of entire structure */ @@ -2594,7 +3239,7 @@ typedef struct { * (unlikely to see these) */ uint32 rxbeaconmbss; /**< beacons received from member of BSS */ - uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from + uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from * other BSS (WDS FRAME) */ uint32 rxbeaconobss; /**< beacons received from other BSS */ @@ -2665,35 +3310,35 @@ typedef struct { uint32 prq_bad_entries; /**< which could not be translated to info */ uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ - uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ + uint32 bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */ uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ /* per-rate receive stat counters */ - uint32 rx1mbps; /* packets rx at 1Mbps */ - uint32 rx2mbps; /* packets rx at 2Mbps */ - uint32 rx5mbps5; /* packets rx at 5.5Mbps */ - uint32 rx6mbps; /* packets rx at 6Mbps */ - uint32 rx9mbps; /* packets rx at 9Mbps */ - uint32 rx11mbps; /* packets rx at 11Mbps */ - uint32 rx12mbps; /* packets rx at 12Mbps */ - uint32 rx18mbps; /* packets rx at 18Mbps */ - uint32 rx24mbps; /* packets rx at 24Mbps */ - uint32 rx36mbps; /* packets rx at 36Mbps */ - uint32 rx48mbps; /* packets rx at 48Mbps */ - uint32 rx54mbps; /* packets rx at 54Mbps */ - uint32 rx108mbps; /* packets rx at 108mbps */ - uint32 rx162mbps; /* packets rx at 162mbps */ - uint32 rx216mbps; /* packets rx at 216 mbps */ - uint32 rx270mbps; /* packets rx at 270 mbps */ - uint32 rx324mbps; /* packets rx at 324 mbps */ - uint32 rx378mbps; /* packets rx at 378 mbps */ - uint32 rx432mbps; /* packets rx at 432 mbps */ - uint32 rx486mbps; /* packets rx at 486 mbps */ - uint32 rx540mbps; /* packets rx at 540 mbps */ + uint32 rx1mbps; /**< packets rx at 1Mbps */ + uint32 rx2mbps; /**< packets rx at 2Mbps */ + uint32 rx5mbps5; /**< packets rx at 5.5Mbps */ + uint32 rx6mbps; /**< packets rx at 6Mbps */ + uint32 rx9mbps; /**< packets rx at 9Mbps */ + uint32 rx11mbps; /**< packets rx at 11Mbps */ + uint32 rx12mbps; /**< packets rx at 12Mbps */ + uint32 rx18mbps; /**< packets rx at 18Mbps */ + uint32 rx24mbps; /**< packets rx at 24Mbps */ + uint32 rx36mbps; /**< packets rx at 36Mbps */ + uint32 rx48mbps; /**< packets rx at 48Mbps */ + uint32 rx54mbps; /**< packets rx at 54Mbps */ + uint32 rx108mbps; /**< packets rx at 108mbps */ + uint32 rx162mbps; /**< packets rx at 162mbps */ + uint32 rx216mbps; /**< packets rx at 216 mbps */ + uint32 rx270mbps; /**< packets rx at 270 mbps */ + uint32 rx324mbps; /**< packets rx at 324 mbps */ + uint32 rx378mbps; /**< packets rx at 378 mbps */ + uint32 rx432mbps; /**< packets rx at 432 mbps */ + uint32 rx486mbps; /**< packets rx at 486 mbps */ + uint32 rx540mbps; /**< packets rx at 540 mbps */ /* pkteng rx frame stats */ - uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ - uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ uint32 rfdisable; /**< count of radio disables */ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ @@ -2731,7 +3376,7 @@ typedef struct { uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */ uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */ - uint32 cso_passthrough; /* hw cso required but passthrough */ + uint32 cso_passthrough; /**< hw cso required but passthrough */ uint32 cso_normal; /**< hw cso hdr for normal process */ uint32 chained; /**< number of frames chained */ uint32 chainedsz1; /**< number of chain size 1 frames */ @@ -2741,319 +3386,327 @@ typedef struct { uint32 rxdrop20s; /**< drop secondary cnt */ uint32 pciereset; /**< Secondary Bus Reset issued by driver */ uint32 cfgrestore; /**< configspace restore by driver */ - uint32 reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */ + uint32 reinitreason[NREINITREASONCOUNT]; /**< reinitreason counters; 0: Unknown reason */ uint32 rxrtry; /**< num of received packets with retry bit on */ uint32 txmpdu; /**< macstat cnt only valid in ver 11. number of MPDUs txed. */ uint32 rxnodelim; /**< macstat cnt only valid in ver 11. * number of occasions that no valid delimiter is detected * by ampdu parser. */ - uint32 rxmpdu_mu; /* Number of MU MPDUs received */ + uint32 rxmpdu_mu; /**< Number of MU MPDUs received */ /* detailed control/management frames */ - uint32 txbar; /**< Number of TX BAR */ - uint32 rxbar; /**< Number of RX BAR */ - uint32 txpspoll; /**< Number of TX PS-poll */ - uint32 rxpspoll; /**< Number of RX PS-poll */ - uint32 txnull; /**< Number of TX NULL_DATA */ - uint32 rxnull; /**< Number of RX NULL_DATA */ - uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ - uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ - uint32 txassocreq; /**< Number of TX ASSOC request */ - uint32 rxassocreq; /**< Number of RX ASSOC request */ - uint32 txreassocreq; /**< Number of TX REASSOC request */ - uint32 rxreassocreq; /**< Number of RX REASSOC request */ - uint32 txdisassoc; /**< Number of TX DISASSOC */ - uint32 rxdisassoc; /**< Number of RX DISASSOC */ - uint32 txassocrsp; /**< Number of TX ASSOC response */ - uint32 rxassocrsp; /**< Number of RX ASSOC response */ - uint32 txreassocrsp; /**< Number of TX REASSOC response */ - uint32 rxreassocrsp; /**< Number of RX REASSOC response */ - uint32 txauth; /**< Number of TX AUTH */ - uint32 rxauth; /**< Number of RX AUTH */ - uint32 txdeauth; /**< Number of TX DEAUTH */ - uint32 rxdeauth; /**< Number of RX DEAUTH */ - uint32 txprobereq; /**< Number of TX probe request */ - uint32 rxprobereq; /**< Number of RX probe request */ - uint32 txprobersp; /**< Number of TX probe response */ - uint32 rxprobersp; /**< Number of RX probe response */ - uint32 txaction; /**< Number of TX action frame */ - uint32 rxaction; /**< Number of RX action frame */ + uint32 txbar; /**< Number of TX BAR */ + uint32 rxbar; /**< Number of RX BAR */ + uint32 txpspoll; /**< Number of TX PS-poll */ + uint32 rxpspoll; /**< Number of RX PS-poll */ + uint32 txnull; /**< Number of TX NULL_DATA */ + uint32 rxnull; /**< Number of RX NULL_DATA */ + uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ + uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ + uint32 txassocreq; /**< Number of TX ASSOC request */ + uint32 rxassocreq; /**< Number of RX ASSOC request */ + uint32 txreassocreq; /**< Number of TX REASSOC request */ + uint32 rxreassocreq; /**< Number of RX REASSOC request */ + uint32 txdisassoc; /**< Number of TX DISASSOC */ + uint32 rxdisassoc; /**< Number of RX DISASSOC */ + uint32 txassocrsp; /**< Number of TX ASSOC response */ + uint32 rxassocrsp; /**< Number of RX ASSOC response */ + uint32 txreassocrsp; /**< Number of TX REASSOC response */ + uint32 rxreassocrsp; /**< Number of RX REASSOC response */ + uint32 txauth; /**< Number of TX AUTH */ + uint32 rxauth; /**< Number of RX AUTH */ + uint32 txdeauth; /**< Number of TX DEAUTH */ + uint32 rxdeauth; /**< Number of RX DEAUTH */ + uint32 txprobereq; /**< Number of TX probe request */ + uint32 rxprobereq; /**< Number of RX probe request */ + uint32 txprobersp; /**< Number of TX probe response */ + uint32 rxprobersp; /**< Number of RX probe response */ + uint32 txaction; /**< Number of TX action frame */ + uint32 rxaction; /**< Number of RX action frame */ + uint32 ampdu_wds; /**< Number of AMPDU watchdogs */ + uint32 txlost; /**< Number of lost packets reported in txs */ + uint32 txdatamcast; /**< Number of TX multicast data packets */ + uint32 txdatabcast; /**< Number of TX broadcast data packets */ + uint32 txbcast; /* Broadcast TransmittedFrameCount */ + uint32 txdropped; /* tx dropped pkts */ + uint32 rxbcast; /* BroadcastReceivedFrameCount */ + uint32 rxdropped; /* rx dropped pkts (derived: sum of others) */ } wl_cnt_ver_11_t; typedef struct { - uint16 version; /* see definition of WL_CNT_T_VERSION */ - uint16 length; /* length of entire structure */ + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 length; /**< length of entire structure */ /* transmit stat counters */ - uint32 txframe; /* tx data frames */ - uint32 txbyte; /* tx data bytes */ - uint32 txretrans; /* tx mac retransmits */ - uint32 txerror; /* tx data errors (derived: sum of others) */ - uint32 txctl; /* tx management frames */ - uint32 txprshort; /* tx short preamble frames */ - uint32 txserr; /* tx status errors */ - uint32 txnobuf; /* tx out of buffers errors */ - uint32 txnoassoc; /* tx discard because we're not associated */ - uint32 txrunt; /* tx runt frames */ - uint32 txchit; /* tx header cache hit (fastpath) */ - uint32 txcmiss; /* tx header cache miss (slowpath) */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ /* transmit chip error counters */ - uint32 txuflo; /* tx fifo underflows */ - uint32 txphyerr; /* tx phy errors (indicated in tx status) */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ uint32 txphycrs; /* receive stat counters */ - uint32 rxframe; /* rx data frames */ - uint32 rxbyte; /* rx data bytes */ - uint32 rxerror; /* rx data errors (derived: sum of others) */ - uint32 rxctl; /* rx management frames */ - uint32 rxnobuf; /* rx out of buffers errors */ - uint32 rxnondata; /* rx non data frames in the data channel errors */ - uint32 rxbadds; /* rx bad DS errors */ - uint32 rxbadcm; /* rx bad control or management frames */ - uint32 rxfragerr; /* rx fragmentation errors */ - uint32 rxrunt; /* rx runt frames */ - uint32 rxgiant; /* rx giant frames */ - uint32 rxnoscb; /* rx no scb error */ - uint32 rxbadproto; /* rx invalid frames */ - uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */ - uint32 rxbadda; /* rx frames tossed for invalid da */ - uint32 rxfilter; /* rx frames filtered out */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ /* receive chip error counters */ - uint32 rxoflo; /* rx fifo overflow errors */ - uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ - uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */ - uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */ - uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */ + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ /* misc counters */ - uint32 dmade; /* tx/rx dma descriptor errors */ - uint32 dmada; /* tx/rx dma data errors */ - uint32 dmape; /* tx/rx dma descriptor protocol errors */ - uint32 reset; /* reset count */ - uint32 tbtt; /* cnts the TBTT int's */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ uint32 txdmawar; - uint32 pkt_callback_reg_fail; /* callbacks register failure */ + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ /* MAC counters: 32-bit version of d11.h's macstat_t */ - uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS, + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, * Control Management (includes retransmissions) */ - uint32 txrtsfrm; /* number of RTS sent out by the MAC */ - uint32 txctsfrm; /* number of CTS sent out by the MAC */ - uint32 txackfrm; /* number of ACK frames sent out */ - uint32 txdnlfrm; /* Not used */ - uint32 txbcnfrm; /* beacons transmitted */ - uint32 txfunfl[6]; /* per-fifo tx underflows */ - uint32 rxtoolate; /* receive too late */ - uint32 txfbw; /* transmit at fallback bw (dynamic bw) */ - uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< Not used */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 rxtoolate; /**< receive too late */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS * or BCN) */ - uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for * driver enqueued frames */ - uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */ - uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */ - uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not * data/control/management */ - uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */ - uint32 rxbadplcp; /* parity check of the PLCP header failed */ - uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */ - uint32 rxstrt; /* Number of received frames with a good PLCP + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP * (i.e. passing parity check) */ - uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */ - uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ - uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */ - uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */ - uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */ - uint32 rxackucast; /* number of ucast ACKS received (good FCS) */ - uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */ - uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */ - uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */ - uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */ - uint32 rxctsocast; /* number of received CTS not addressed to the MAC */ - uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */ - uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */ - uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC + uint32 rxdfrmucastmbss; /**< # of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /**< # of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /**< # of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< # of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< # of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< # of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC * (unlikely to see these) */ - uint32 rxbeaconmbss; /* beacons received from member of BSS */ - uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from * other BSS (WDS FRAME) */ - uint32 rxbeaconobss; /* beacons received from other BSS */ - uint32 rxrsptmout; /* Number of response timeouts for transmitted frames + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< Number of response timeouts for transmitted frames * expecting a response */ - uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */ - uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */ - uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */ - uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */ - uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */ - uint32 pmqovfl; /* Number of PMQ overflows */ - uint32 rxcgprqfrm; /* Number of received Probe requests that made it into + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxf0ovfl; /**< Number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< Number of PMQ overflows */ + uint32 rxcgprqfrm; /**< Number of received Probe requests that made it into * the PRQ fifo */ - uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */ - uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did * not get ACK */ - uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */ - uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< Number of probe requests that were dropped from the PRQ * fifo because a probe response could not be sent out within * the time limit defined in M_PRS_MAXTIME */ uint32 rxnack; uint32 frmscons; - uint32 txnack; /* obsolete */ - uint32 rxback; /* blockack rxcnt */ - uint32 txback; /* blockack txcnt */ + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ - uint32 txfrag; /* dot11TransmittedFragmentCount */ - uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ - uint32 txfail; /* dot11FailedCount */ - uint32 txretry; /* dot11RetryCount */ - uint32 txretrie; /* dot11MultipleRetryCount */ - uint32 rxdup; /* dot11FrameduplicateCount */ - uint32 txrts; /* dot11RTSSuccessCount */ - uint32 txnocts; /* dot11RTSFailureCount */ - uint32 txnoack; /* dot11ACKFailureCount */ - uint32 rxfrag; /* dot11ReceivedFragmentCount */ - uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ - uint32 rxcrc; /* dot11FCSErrorCount */ - uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */ - uint32 rxundec; /* dot11WEPUndecryptableCount */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ /* WPA2 counters (see rxundec for DecryptFailureCount) */ - uint32 tkipmicfaill; /* TKIPLocalMICFailures */ - uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */ - uint32 tkipreplay; /* TKIPReplays */ - uint32 ccmpfmterr; /* CCMPFormatErrors */ - uint32 ccmpreplay; /* CCMPReplays */ - uint32 ccmpundec; /* CCMPDecryptErrors */ - uint32 fourwayfail; /* FourWayHandshakeFailures */ - uint32 wepundec; /* dot11WEPUndecryptableCount */ - uint32 wepicverr; /* dot11WEPICVErrorCount */ - uint32 decsuccess; /* DecryptSuccessCount */ - uint32 tkipicverr; /* TKIPICVErrorCount */ - uint32 wepexcluded; /* dot11WEPExcludedCount */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ - uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */ + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ /* WPA2 counters (see rxundec for DecryptFailureCount) */ - uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */ - uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */ - uint32 tkipreplay_mcst; /* TKIPReplays */ - uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */ - uint32 ccmpreplay_mcst; /* CCMPReplays */ - uint32 ccmpundec_mcst; /* CCMPDecryptErrors */ - uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */ - uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */ - uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */ - uint32 decsuccess_mcst; /* DecryptSuccessCount */ - uint32 tkipicverr_mcst; /* TKIPICVErrorCount */ - uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ - uint32 txchanrej; /* Tx frames suppressed due to channel rejection */ - uint32 txexptime; /* Tx frames suppressed due to timer expiration */ - uint32 psmwds; /* Count PSM watchdogs */ - uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */ + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ /* MBSS counters, AP only */ - uint32 prq_entries_handled; /* PRQ entries read in */ - uint32 prq_undirected_entries; /* which were bcast bss & ssid */ - uint32 prq_bad_entries; /* which could not be translated to info */ - uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */ - uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */ - uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ - uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /**< ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ /* per-rate receive stat counters */ - uint32 rx1mbps; /* packets rx at 1Mbps */ - uint32 rx2mbps; /* packets rx at 2Mbps */ - uint32 rx5mbps5; /* packets rx at 5.5Mbps */ - uint32 rx6mbps; /* packets rx at 6Mbps */ - uint32 rx9mbps; /* packets rx at 9Mbps */ - uint32 rx11mbps; /* packets rx at 11Mbps */ - uint32 rx12mbps; /* packets rx at 12Mbps */ - uint32 rx18mbps; /* packets rx at 18Mbps */ - uint32 rx24mbps; /* packets rx at 24Mbps */ - uint32 rx36mbps; /* packets rx at 36Mbps */ - uint32 rx48mbps; /* packets rx at 48Mbps */ - uint32 rx54mbps; /* packets rx at 54Mbps */ - uint32 rx108mbps; /* packets rx at 108mbps */ - uint32 rx162mbps; /* packets rx at 162mbps */ - uint32 rx216mbps; /* packets rx at 216 mbps */ - uint32 rx270mbps; /* packets rx at 270 mbps */ - uint32 rx324mbps; /* packets rx at 324 mbps */ - uint32 rx378mbps; /* packets rx at 378 mbps */ - uint32 rx432mbps; /* packets rx at 432 mbps */ - uint32 rx486mbps; /* packets rx at 486 mbps */ - uint32 rx540mbps; /* packets rx at 540 mbps */ + uint32 rx1mbps; /**< packets rx at 1Mbps */ + uint32 rx2mbps; /**< packets rx at 2Mbps */ + uint32 rx5mbps5; /**< packets rx at 5.5Mbps */ + uint32 rx6mbps; /**< packets rx at 6Mbps */ + uint32 rx9mbps; /**< packets rx at 9Mbps */ + uint32 rx11mbps; /**< packets rx at 11Mbps */ + uint32 rx12mbps; /**< packets rx at 12Mbps */ + uint32 rx18mbps; /**< packets rx at 18Mbps */ + uint32 rx24mbps; /**< packets rx at 24Mbps */ + uint32 rx36mbps; /**< packets rx at 36Mbps */ + uint32 rx48mbps; /**< packets rx at 48Mbps */ + uint32 rx54mbps; /**< packets rx at 54Mbps */ + uint32 rx108mbps; /**< packets rx at 108mbps */ + uint32 rx162mbps; /**< packets rx at 162mbps */ + uint32 rx216mbps; /**< packets rx at 216 mbps */ + uint32 rx270mbps; /**< packets rx at 270 mbps */ + uint32 rx324mbps; /**< packets rx at 324 mbps */ + uint32 rx378mbps; /**< packets rx at 378 mbps */ + uint32 rx432mbps; /**< packets rx at 432 mbps */ + uint32 rx486mbps; /**< packets rx at 486 mbps */ + uint32 rx540mbps; /**< packets rx at 540 mbps */ /* pkteng rx frame stats */ - uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ - uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ - uint32 rfdisable; /* count of radio disables */ - uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */ + uint32 rfdisable; /**< count of radio disables */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ uint32 bphy_badplcp; - uint32 txmpdu_sgi; /* count for sgi transmit */ - uint32 rxmpdu_sgi; /* count for sgi received */ - uint32 txmpdu_stbc; /* count for stbc transmit */ - uint32 rxmpdu_stbc; /* count for stbc received */ + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ - uint32 rxdrop20s; /* drop secondary cnt */ + uint32 rxdrop20s; /**< drop secondary cnt */ } wl_cnt_ver_6_t; -#define WL_DELTA_STATS_T_VERSION 2 /* current version of wl_delta_stats_t struct */ +#define WL_DELTA_STATS_T_VERSION 2 /**< current version of wl_delta_stats_t struct */ typedef struct { - uint16 version; /* see definition of WL_DELTA_STATS_T_VERSION */ - uint16 length; /* length of entire structure */ + uint16 version; /**< see definition of WL_DELTA_STATS_T_VERSION */ + uint16 length; /**< length of entire structure */ /* transmit stat counters */ - uint32 txframe; /* tx data frames */ - uint32 txbyte; /* tx data bytes */ - uint32 txretrans; /* tx mac retransmits */ - uint32 txfail; /* tx failures */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txfail; /**< tx failures */ /* receive stat counters */ - uint32 rxframe; /* rx data frames */ - uint32 rxbyte; /* rx data bytes */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ /* per-rate receive stat counters */ - uint32 rx1mbps; /* packets rx at 1Mbps */ - uint32 rx2mbps; /* packets rx at 2Mbps */ - uint32 rx5mbps5; /* packets rx at 5.5Mbps */ - uint32 rx6mbps; /* packets rx at 6Mbps */ - uint32 rx9mbps; /* packets rx at 9Mbps */ - uint32 rx11mbps; /* packets rx at 11Mbps */ - uint32 rx12mbps; /* packets rx at 12Mbps */ - uint32 rx18mbps; /* packets rx at 18Mbps */ - uint32 rx24mbps; /* packets rx at 24Mbps */ - uint32 rx36mbps; /* packets rx at 36Mbps */ - uint32 rx48mbps; /* packets rx at 48Mbps */ - uint32 rx54mbps; /* packets rx at 54Mbps */ - uint32 rx108mbps; /* packets rx at 108mbps */ - uint32 rx162mbps; /* packets rx at 162mbps */ - uint32 rx216mbps; /* packets rx at 216 mbps */ - uint32 rx270mbps; /* packets rx at 270 mbps */ - uint32 rx324mbps; /* packets rx at 324 mbps */ - uint32 rx378mbps; /* packets rx at 378 mbps */ - uint32 rx432mbps; /* packets rx at 432 mbps */ - uint32 rx486mbps; /* packets rx at 486 mbps */ - uint32 rx540mbps; /* packets rx at 540 mbps */ + uint32 rx1mbps; /**< packets rx at 1Mbps */ + uint32 rx2mbps; /**< packets rx at 2Mbps */ + uint32 rx5mbps5; /**< packets rx at 5.5Mbps */ + uint32 rx6mbps; /**< packets rx at 6Mbps */ + uint32 rx9mbps; /**< packets rx at 9Mbps */ + uint32 rx11mbps; /**< packets rx at 11Mbps */ + uint32 rx12mbps; /**< packets rx at 12Mbps */ + uint32 rx18mbps; /**< packets rx at 18Mbps */ + uint32 rx24mbps; /**< packets rx at 24Mbps */ + uint32 rx36mbps; /**< packets rx at 36Mbps */ + uint32 rx48mbps; /**< packets rx at 48Mbps */ + uint32 rx54mbps; /**< packets rx at 54Mbps */ + uint32 rx108mbps; /**< packets rx at 108mbps */ + uint32 rx162mbps; /**< packets rx at 162mbps */ + uint32 rx216mbps; /**< packets rx at 216 mbps */ + uint32 rx270mbps; /**< packets rx at 270 mbps */ + uint32 rx324mbps; /**< packets rx at 324 mbps */ + uint32 rx378mbps; /**< packets rx at 378 mbps */ + uint32 rx432mbps; /**< packets rx at 432 mbps */ + uint32 rx486mbps; /**< packets rx at 486 mbps */ + uint32 rx540mbps; /**< packets rx at 540 mbps */ /* phy stats */ uint32 rxbadplcp; @@ -3063,23 +3716,69 @@ typedef struct { } wl_delta_stats_t; +/* Partial statistics counter report */ +#define WL_CNT_CTL_MGT_FRAMES 0 + +typedef struct { + uint16 type; + uint16 len; + + /* detailed control/management frames */ + uint32 txnull; + uint32 rxnull; + uint32 txqosnull; + uint32 rxqosnull; + uint32 txassocreq; + uint32 rxassocreq; + uint32 txreassocreq; + uint32 rxreassocreq; + uint32 txdisassoc; + uint32 rxdisassoc; + uint32 txassocrsp; + uint32 rxassocrsp; + uint32 txreassocrsp; + uint32 rxreassocrsp; + uint32 txauth; + uint32 rxauth; + uint32 txdeauth; + uint32 rxdeauth; + uint32 txprobereq; + uint32 rxprobereq; + uint32 txprobersp; + uint32 rxprobersp; + uint32 txaction; + uint32 rxaction; + uint32 txrts; + uint32 rxrts; + uint32 txcts; + uint32 rxcts; + uint32 txack; + uint32 rxack; + uint32 txbar; + uint32 rxbar; + uint32 txback; + uint32 rxback; + uint32 txpspoll; + uint32 rxpspoll; +} wl_ctl_mgt_cnt_t; + typedef struct { uint32 packets; uint32 bytes; } wl_traffic_stats_t; typedef struct { - uint16 version; /* see definition of WL_WME_CNT_VERSION */ - uint16 length; /* length of entire structure */ + uint16 version; /**< see definition of WL_WME_CNT_VERSION */ + uint16 length; /**< length of entire structure */ - wl_traffic_stats_t tx[AC_COUNT]; /* Packets transmitted */ - wl_traffic_stats_t tx_failed[AC_COUNT]; /* Packets dropped or failed to transmit */ - wl_traffic_stats_t rx[AC_COUNT]; /* Packets received */ - wl_traffic_stats_t rx_failed[AC_COUNT]; /* Packets failed to receive */ + wl_traffic_stats_t tx[AC_COUNT]; /**< Packets transmitted */ + wl_traffic_stats_t tx_failed[AC_COUNT]; /**< Packets dropped or failed to transmit */ + wl_traffic_stats_t rx[AC_COUNT]; /**< Packets received */ + wl_traffic_stats_t rx_failed[AC_COUNT]; /**< Packets failed to receive */ - wl_traffic_stats_t forward[AC_COUNT]; /* Packets forwarded by AP */ + wl_traffic_stats_t forward[AC_COUNT]; /**< Packets forwarded by AP */ - wl_traffic_stats_t tx_expired[AC_COUNT]; /* packets dropped due to lifetime expiry */ + wl_traffic_stats_t tx_expired[AC_COUNT]; /**< packets dropped due to lifetime expiry */ } wl_wme_cnt_t; @@ -3088,10 +3787,27 @@ struct wl_msglevel2 { uint32 high; }; +#define WL_ICMP_IPV6_CFG_VERSION 1 +#define WL_ICMP_IPV6_CLEAR_ALL (1 << 0) + +typedef struct wl_icmp_ipv6_cfg { + uint16 version; + uint16 length; + uint16 fixed_length; + uint16 flags; + uint32 num_ipv6; + /* num_ipv6 to follow */ + struct ipv6_addr host_ipv6[]; +} wl_icmp_ipv6_cfg_t; + +#define WL_ICMP_CFG_IPV6_FIXED_LEN OFFSETOF(wl_icmp_ipv6_cfg_t, host_ipv6) +#define WL_ICMP_CFG_IPV6_LEN(count) (WL_ICMP_CFG_IPV6_FIXED_LEN + \ + ((count) * sizeof(struct ipv6_addr))) + typedef struct wl_mkeep_alive_pkt { uint16 version; /* Version for mkeep_alive */ uint16 length; /* length of fixed parameters in the structure */ - uint32 period_msec; + uint32 period_msec; /* high bit on means immediate send */ uint16 len_bytes; uint8 keep_alive_id; /* 0 - 3 for N = 4 */ uint8 data[1]; @@ -3100,201 +3816,254 @@ typedef struct wl_mkeep_alive_pkt { #define WL_MKEEP_ALIVE_VERSION 1 #define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data) #define WL_MKEEP_ALIVE_PRECISION 500 +#define WL_MKEEP_ALIVE_PERIOD_MASK 0x7FFFFFFF +#define WL_MKEEP_ALIVE_IMMEDIATE 0x80000000 -/* TCP Keep-Alive conn struct */ +/** TCP Keep-Alive conn struct */ typedef struct wl_mtcpkeep_alive_conn_pkt { - struct ether_addr saddr; /* src mac address */ - struct ether_addr daddr; /* dst mac address */ - struct ipv4_addr sipaddr; /* source IP addr */ - struct ipv4_addr dipaddr; /* dest IP addr */ - uint16 sport; /* src port */ - uint16 dport; /* dest port */ - uint32 seq; /* seq number */ - uint32 ack; /* ACK number */ - uint16 tcpwin; /* TCP window */ + struct ether_addr saddr; /**< src mac address */ + struct ether_addr daddr; /**< dst mac address */ + struct ipv4_addr sipaddr; /**< source IP addr */ + struct ipv4_addr dipaddr; /**< dest IP addr */ + uint16 sport; /**< src port */ + uint16 dport; /**< dest port */ + uint32 seq; /**< seq number */ + uint32 ack; /**< ACK number */ + uint16 tcpwin; /**< TCP window */ + uint16 PAD; } wl_mtcpkeep_alive_conn_pkt_t; -/* TCP Keep-Alive interval struct */ +/** TCP Keep-Alive interval struct */ typedef struct wl_mtcpkeep_alive_timers_pkt { - uint16 interval; /* interval timer */ - uint16 retry_interval; /* retry_interval timer */ - uint16 retry_count; /* retry_count */ + uint16 interval; /**< interval timer */ + uint16 retry_interval; /**< retry_interval timer */ + uint16 retry_count; /**< retry_count */ } wl_mtcpkeep_alive_timers_pkt_t; typedef struct wake_info { uint32 wake_reason; - uint32 wake_info_len; /* size of packet */ - uchar packet[1]; + uint32 wake_info_len; /**< size of packet */ + uint8 packet[]; } wake_info_t; typedef struct wake_pkt { - uint32 wake_pkt_len; /* size of packet */ - uchar packet[1]; + uint32 wake_pkt_len; /**< size of packet */ + uint8 packet[]; } wake_pkt_t; #define WL_MTCPKEEP_ALIVE_VERSION 1 -#ifdef WLBA +/* #ifdef WLBA */ -#define WLC_BA_CNT_VERSION 1 /* current version of wlc_ba_cnt_t */ +#define WLC_BA_CNT_VERSION 1 /**< current version of wlc_ba_cnt_t */ -/* block ack related stats */ +/** block ack related stats */ typedef struct wlc_ba_cnt { - uint16 version; /* WLC_BA_CNT_VERSION */ - uint16 length; /* length of entire structure */ + uint16 version; /**< WLC_BA_CNT_VERSION */ + uint16 length; /**< length of entire structure */ /* transmit stat counters */ - uint32 txpdu; /* pdus sent */ - uint32 txsdu; /* sdus sent */ - uint32 txfc; /* tx side flow controlled packets */ - uint32 txfci; /* tx side flow control initiated */ - uint32 txretrans; /* retransmitted pdus */ - uint32 txbatimer; /* ba resend due to timer */ - uint32 txdrop; /* dropped packets */ - uint32 txaddbareq; /* addba req sent */ - uint32 txaddbaresp; /* addba resp sent */ - uint32 txdelba; /* delba sent */ - uint32 txba; /* ba sent */ - uint32 txbar; /* bar sent */ - uint32 txpad[4]; /* future */ + uint32 txpdu; /**< pdus sent */ + uint32 txsdu; /**< sdus sent */ + uint32 txfc; /**< tx side flow controlled packets */ + uint32 txfci; /**< tx side flow control initiated */ + uint32 txretrans; /**< retransmitted pdus */ + uint32 txbatimer; /**< ba resend due to timer */ + uint32 txdrop; /**< dropped packets */ + uint32 txaddbareq; /**< addba req sent */ + uint32 txaddbaresp; /**< addba resp sent */ + uint32 txdelba; /**< delba sent */ + uint32 txba; /**< ba sent */ + uint32 txbar; /**< bar sent */ + uint32 txpad[4]; /**< future */ /* receive side counters */ - uint32 rxpdu; /* pdus recd */ - uint32 rxqed; /* pdus buffered before sending up */ - uint32 rxdup; /* duplicate pdus */ - uint32 rxnobuf; /* pdus discarded due to no buf */ - uint32 rxaddbareq; /* addba req recd */ - uint32 rxaddbaresp; /* addba resp recd */ - uint32 rxdelba; /* delba recd */ - uint32 rxba; /* ba recd */ - uint32 rxbar; /* bar recd */ - uint32 rxinvba; /* invalid ba recd */ - uint32 rxbaholes; /* ba recd with holes */ - uint32 rxunexp; /* unexpected packets */ - uint32 rxpad[4]; /* future */ + uint32 rxpdu; /**< pdus recd */ + uint32 rxqed; /**< pdus buffered before sending up */ + uint32 rxdup; /**< duplicate pdus */ + uint32 rxnobuf; /**< pdus discarded due to no buf */ + uint32 rxaddbareq; /**< addba req recd */ + uint32 rxaddbaresp; /**< addba resp recd */ + uint32 rxdelba; /**< delba recd */ + uint32 rxba; /**< ba recd */ + uint32 rxbar; /**< bar recd */ + uint32 rxinvba; /**< invalid ba recd */ + uint32 rxbaholes; /**< ba recd with holes */ + uint32 rxunexp; /**< unexpected packets */ + uint32 rxpad[4]; /**< future */ } wlc_ba_cnt_t; -#endif /* WLBA */ +/* #endif WLBA */ -/* structure for per-tid ampdu control */ +/** structure for per-tid ampdu control */ struct ampdu_tid_control { uint8 tid; /* tid */ uint8 enable; /* enable/disable */ }; -/* struct for ampdu tx/rx aggregation control */ +/** struct for ampdu tx/rx aggregation control */ struct ampdu_aggr { - int8 aggr_override; /* aggr overrided by dongle. Not to be set by host. */ - uint16 conf_TID_bmap; /* bitmap of TIDs to configure */ - uint16 enab_TID_bmap; /* enable/disable per TID */ + int8 aggr_override; /**< aggr overrided by dongle. Not to be set by host. */ + uint16 conf_TID_bmap; /**< bitmap of TIDs to configure */ + uint16 enab_TID_bmap; /**< enable/disable per TID */ }; -/* structure for identifying ea/tid for sending addba/delba */ +/** structure for identifying ea/tid for sending addba/delba */ struct ampdu_ea_tid { - struct ether_addr ea; /* Station address */ - uint8 tid; /* tid */ - uint8 initiator; /* 0 is recipient, 1 is originator */ -}; -/* structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */ -struct ampdu_retry_tid { - uint8 tid; /* tid */ - uint8 retry; /* retry value */ + struct ether_addr ea; /**< Station address */ + uint8 tid; /**< tid */ + uint8 initiator; /**< 0 is recipient, 1 is originator */ }; -#define BDD_FNAME_LEN 32 /* Max length of friendly name */ +/** structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */ +struct ampdu_retry_tid { + uint8 tid; /**< tid */ + uint8 retry; /**< retry value */ +}; + +#define BDD_FNAME_LEN 32 /**< Max length of friendly name */ typedef struct bdd_fname { - uint8 len; /* length of friendly name */ - uchar name[BDD_FNAME_LEN]; /* friendly name */ + uint8 len; /**< length of friendly name */ + uchar name[BDD_FNAME_LEN]; /**< friendly name */ } bdd_fname_t; /* structure for addts arguments */ -/* For ioctls that take a list of TSPEC */ +/** For ioctls that take a list of TSPEC */ struct tslist { - int count; /* number of tspecs */ - struct tsinfo_arg tsinfo[1]; /* variable length array of tsinfo */ + int32 count; /**< number of tspecs */ + struct tsinfo_arg tsinfo[]; /**< variable length array of tsinfo */ }; -#ifdef WLTDLS -/* structure for tdls iovars */ +/* WLTDLS */ +/**structure for tdls iovars */ typedef struct tdls_iovar { - struct ether_addr ea; /* Station address */ - uint8 mode; /* mode: depends on iovar */ + struct ether_addr ea; /**< Station address */ + uint8 mode; /**< mode: depends on iovar */ + uint8 PAD; chanspec_t chanspec; - uint32 pad; /* future */ + uint16 PAD; + uint32 pad; /**< future */ } tdls_iovar_t; #define TDLS_WFD_IE_SIZE 512 -/* structure for tdls wfd ie */ +/**structure for tdls wfd ie */ typedef struct tdls_wfd_ie_iovar { - struct ether_addr ea; /* Station address */ + struct ether_addr ea; /**< Station address */ uint8 mode; + uint8 PAD; uint16 length; uint8 data[TDLS_WFD_IE_SIZE]; } tdls_wfd_ie_iovar_t; -#endif /* WLTDLS */ +/* #endif WLTDLS */ -/* structure for addts/delts arguments */ +/** structure for addts/delts arguments */ typedef struct tspec_arg { - uint16 version; /* see definition of TSPEC_ARG_VERSION */ - uint16 length; /* length of entire structure */ - uint flag; /* bit field */ + uint16 version; /**< see definition of TSPEC_ARG_VERSION */ + uint16 length; /**< length of entire structure */ + uint32 flag; /**< bit field */ /* TSPEC Arguments */ - struct tsinfo_arg tsinfo; /* TS Info bit field */ - uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */ - uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */ - uint min_srv_interval; /* Minimum Service Interval (us) */ - uint max_srv_interval; /* Maximum Service Interval (us) */ - uint inactivity_interval; /* Inactivity Interval (us) */ - uint suspension_interval; /* Suspension Interval (us) */ - uint srv_start_time; /* Service Start Time (us) */ - uint min_data_rate; /* Minimum Data Rate (bps) */ - uint mean_data_rate; /* Mean Data Rate (bps) */ - uint peak_data_rate; /* Peak Data Rate (bps) */ - uint max_burst_size; /* Maximum Burst Size (bytes) */ - uint delay_bound; /* Delay Bound (us) */ - uint min_phy_rate; /* Minimum PHY Rate (bps) */ - uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0 to 8.0) */ - uint16 medium_time; /* Medium Time (32 us/s periods) */ - uint8 dialog_token; /* dialog token */ + struct tsinfo_arg tsinfo; /**< TS Info bit field */ + uint8 PAD; + uint16 nom_msdu_size; /**< (Nominal or fixed) MSDU Size (bytes) */ + uint16 max_msdu_size; /**< Maximum MSDU Size (bytes) */ + uint32 min_srv_interval; /**< Minimum Service Interval (us) */ + uint32 max_srv_interval; /**< Maximum Service Interval (us) */ + uint32 inactivity_interval; /**< Inactivity Interval (us) */ + uint32 suspension_interval; /**< Suspension Interval (us) */ + uint32 srv_start_time; /**< Service Start Time (us) */ + uint32 min_data_rate; /**< Minimum Data Rate (bps) */ + uint32 mean_data_rate; /**< Mean Data Rate (bps) */ + uint32 peak_data_rate; /**< Peak Data Rate (bps) */ + uint32 max_burst_size; /**< Maximum Burst Size (bytes) */ + uint32 delay_bound; /**< Delay Bound (us) */ + uint32 min_phy_rate; /**< Minimum PHY Rate (bps) */ + uint16 surplus_bw; /**< Surplus Bandwidth Allowance (range 1.0 to 8.0) */ + uint16 medium_time; /**< Medium Time (32 us/s periods) */ + uint8 dialog_token; /**< dialog token */ + uint8 PAD[3]; } tspec_arg_t; -/* tspec arg for desired station */ +/** tspec arg for desired station */ typedef struct tspec_per_sta_arg { struct ether_addr ea; + uint8 PAD[2]; struct tspec_arg ts; } tspec_per_sta_arg_t; -/* structure for max bandwidth for each access category */ +/** structure for max bandwidth for each access category */ typedef struct wme_max_bandwidth { - uint32 ac[AC_COUNT]; /* max bandwidth for each access category */ + uint32 ac[AC_COUNT]; /**< max bandwidth for each access category */ } wme_max_bandwidth_t; #define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t)) /* current version of wl_tspec_arg_t struct */ -#define TSPEC_ARG_VERSION 2 /* current version of wl_tspec_arg_t struct */ -#define TSPEC_ARG_LENGTH 55 /* argument length from tsinfo to medium_time */ -#define TSPEC_DEFAULT_DIALOG_TOKEN 42 /* default dialog token */ -#define TSPEC_DEFAULT_SBW_FACTOR 0x3000 /* default surplus bw */ +#define TSPEC_ARG_VERSION 2 /**< current version of wl_tspec_arg_t struct */ +#define TSPEC_ARG_LENGTH 55 /**< argument length from tsinfo to medium_time */ +#define TSPEC_DEFAULT_DIALOG_TOKEN 42 /**< default dialog token */ +#define TSPEC_DEFAULT_SBW_FACTOR 0x3000 /**< default surplus bw */ #define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE 80 #define WLC_WOWL_MAX_KEEPALIVE 2 -/* Packet lifetime configuration per ac */ +/** Packet lifetime configuration per ac */ typedef struct wl_lifetime { - uint32 ac; /* access class */ - uint32 lifetime; /* Packet lifetime value in ms */ + uint32 ac; /**< access class */ + uint32 lifetime; /**< Packet lifetime value in ms */ } wl_lifetime_t; +/** Management time configuration */ +typedef struct wl_lifetime_mg { + uint32 mgmt_bitmap; /**< Mgmt subtype */ + uint32 lifetime; /**< Packet lifetime value in us */ +} wl_lifetime_mg_t; -/* Channel Switch Announcement param */ +/* MAC Sample Capture related */ +#define WL_MACCAPTR_DEFSTART_PTR 0xA00 +#define WL_MACCAPTR_DEFSTOP_PTR 0xA3F +#define WL_MACCAPTR_DEFSZ 0x3F + +#define WL_MACCAPTR_DEF_MASK 0xFFFFFFFF + +typedef enum { + WL_MACCAPT_TRIG = 0, + WL_MACCAPT_STORE = 1, + WL_MACCAPT_TRANS = 2, + WL_MACCAPT_MATCH = 3 +} maccaptr_optn; + +typedef enum { + WL_MACCAPT_STRT = 1, + WL_MACCAPT_STOP = 2, + WL_MACCAPT_RST = 3 +} maccaptr_cmd_t; + +/* MAC Sample Capture Set-up Paramters */ +typedef struct wl_maccapture_params { + uint8 gpio_sel; + uint8 la_mode; /* TRUE: GPIO Out Enabled */ + uint8 PAD[2]; + uint32 start_ptr; /* Start address to store */ + uint32 stop_ptr; /* Stop address to store */ + uint8 optn_bmp; /* Options */ + uint8 PAD[3]; + uint32 tr_mask; /* Trigger Mask */ + uint32 tr_val; /* Trigger Value */ + uint32 s_mask; /* Store Mode Mask */ + uint32 x_mask; /* Trans. Mode Mask */ + uint32 m_mask; /* Match Mode Mask */ + uint32 m_val; /* Match Value */ + maccaptr_cmd_t cmd; /* Start / Stop */ +} wl_maccapture_params_t; + +/** Channel Switch Announcement param */ typedef struct wl_chan_switch { - uint8 mode; /* value 0 or 1 */ - uint8 count; /* count # of beacons before switching */ - chanspec_t chspec; /* chanspec */ - uint8 reg; /* regulatory class */ - uint8 frame_type; /* csa frame type, unicast or broadcast */ + uint8 mode; /**< value 0 or 1 */ + uint8 count; /**< count # of beacons before switching */ + chanspec_t chspec; /**< chanspec */ + uint8 reg; /**< regulatory class */ + uint8 frame_type; /**< csa frame type, unicast or broadcast */ } wl_chan_switch_t; enum { @@ -3324,7 +4093,7 @@ enum { #define IMMEDIATE_EVENT_BIT 8 #define SUPPRESS_SSID_BIT 9 #define ENABLE_NET_OFFLOAD_BIT 10 -/* report found/lost events for SSID and BSSID networks seperately */ +/** report found/lost events for SSID and BSSID networks seperately */ #define REPORT_SEPERATELY_BIT 11 #define SORT_CRITERIA_MASK 0x0001 @@ -3338,12 +4107,10 @@ enum { #define IMMEDIATE_EVENT_MASK 0x0100 #define SUPPRESS_SSID_MASK 0x0200 #define ENABLE_NET_OFFLOAD_MASK 0x0400 -/* report found/lost events for SSID and BSSID networks seperately */ +/** report found/lost events for SSID and BSSID networks seperately */ #define REPORT_SEPERATELY_MASK 0x0800 #define PFN_VERSION 2 -#define PFN_SCANRESULT_VERSION 1 -#define MAX_PFN_LIST_COUNT 16 #define PFN_COMPLETE 1 #define PFN_INCOMPLETE 0 @@ -3355,47 +4122,102 @@ enum { #define PFN_PARTIAL_SCAN_BIT 0 #define PFN_PARTIAL_SCAN_MASK 1 + #define PFN_SWC_RSSI_WINDOW_MAX 8 #define PFN_SWC_MAX_NUM_APS 16 #define PFN_HOTLIST_MAX_NUM_APS 64 -/* PFN network info structure */ -typedef struct wl_pfn_subnet_info { +#define MAX_EPNO_HIDDEN_SSID 8 +#define MAX_WHITELIST_SSID 2 + +/* Version 1 and 2 for various scan results structures defined below */ +#define PFN_SCANRESULTS_VERSION_V1 1 +#define PFN_SCANRESULTS_VERSION_V2 2 + +/** PFN network info structure */ +typedef struct wl_pfn_subnet_info_v1 { struct ether_addr BSSID; - uint8 channel; /* channel number only */ + uint8 channel; /**< channel number only */ uint8 SSID_len; uint8 SSID[32]; -} wl_pfn_subnet_info_t; +} wl_pfn_subnet_info_v1_t; -typedef struct wl_pfn_net_info { - wl_pfn_subnet_info_t pfnsubnet; - int16 RSSI; /* receive signal strength (in dBm) */ - uint16 timestamp; /* age in seconds */ -} wl_pfn_net_info_t; +typedef struct wl_pfn_subnet_info_v2 { + struct ether_addr BSSID; + uint8 channel; /**< channel number only */ + uint8 SSID_len; + union { + uint8 SSID[32]; + uint16 index; + } u; +} wl_pfn_subnet_info_v2_t; -typedef struct wl_pfn_lnet_info { - wl_pfn_subnet_info_t pfnsubnet; /* BSSID + channel + SSID len + SSID */ - uint16 flags; /* partial scan, etc */ - int16 RSSI; /* receive signal strength (in dBm) */ - uint32 timestamp; /* age in miliseconds */ - uint16 rtt0; /* estimated distance to this AP in centimeters */ - uint16 rtt1; /* standard deviation of the distance to this AP in centimeters */ -} wl_pfn_lnet_info_t; +typedef struct wl_pfn_net_info_v1 { + wl_pfn_subnet_info_v1_t pfnsubnet; + int16 RSSI; /**< receive signal strength (in dBm) */ + uint16 timestamp; /**< age in seconds */ +} wl_pfn_net_info_v1_t; -typedef struct wl_pfn_lscanresults { +typedef struct wl_pfn_net_info_v2 { + wl_pfn_subnet_info_v2_t pfnsubnet; + int16 RSSI; /**< receive signal strength (in dBm) */ + uint16 timestamp; /**< age in seconds */ +} wl_pfn_net_info_v2_t; + +/* Version 1 and 2 for various lbest scan results structures below */ +#define PFN_LBEST_SCAN_RESULT_VERSION_V1 1 +#define PFN_LBEST_SCAN_RESULT_VERSION_V2 2 + +#define MAX_CHBKT_PER_RESULT 4 + +typedef struct wl_pfn_lnet_info_v1 { + wl_pfn_subnet_info_v1_t pfnsubnet; /**< BSSID + channel + SSID len + SSID */ + uint16 flags; /**< partial scan, etc */ + int16 RSSI; /**< receive signal strength (in dBm) */ + uint32 timestamp; /**< age in miliseconds */ + uint16 rtt0; /**< estimated distance to this AP in centimeters */ + uint16 rtt1; /**< standard deviation of the distance to this AP in centimeters */ +} wl_pfn_lnet_info_v1_t; + +typedef struct wl_pfn_lnet_info_v2 { + wl_pfn_subnet_info_v2_t pfnsubnet; /**< BSSID + channel + SSID len + SSID */ + uint16 flags; /**< partial scan, etc */ + int16 RSSI; /**< receive signal strength (in dBm) */ + uint32 timestamp; /**< age in miliseconds */ + uint16 rtt0; /**< estimated distance to this AP in centimeters */ + uint16 rtt1; /**< standard deviation of the distance to this AP in centimeters */ +} wl_pfn_lnet_info_v2_t; + +typedef struct wl_pfn_lscanresults_v1 { uint32 version; uint32 status; uint32 count; - wl_pfn_lnet_info_t netinfo[1]; -} wl_pfn_lscanresults_t; + wl_pfn_lnet_info_v1_t netinfo[1]; +} wl_pfn_lscanresults_v1_t; -/* this is used to report on 1-* pfn scan results */ -typedef struct wl_pfn_scanresults { +typedef struct wl_pfn_lscanresults_v2 { + uint32 version; + uint16 status; + uint16 count; + uint32 scan_ch_buckets[MAX_CHBKT_PER_RESULT]; + wl_pfn_lnet_info_v2_t netinfo[1]; +} wl_pfn_lscanresults_v2_t; + +/**this is used to report on 1-* pfn scan results */ +typedef struct wl_pfn_scanresults_v1 { uint32 version; uint32 status; uint32 count; - wl_pfn_net_info_t netinfo[1]; -} wl_pfn_scanresults_t; + wl_pfn_net_info_v1_t netinfo[1]; +} wl_pfn_scanresults_v1_t; + +typedef struct wl_pfn_scanresults_v2 { + uint32 version; + uint32 status; + uint32 count; + uint32 scan_ch_bucket; + wl_pfn_net_info_v2_t netinfo[1]; +} wl_pfn_scanresults_v2_t; typedef struct wl_pfn_significant_net { uint16 flags; @@ -3404,45 +4226,72 @@ typedef struct wl_pfn_significant_net { int8 rssi[PFN_SWC_RSSI_WINDOW_MAX]; } wl_pfn_significant_net_t; +#define PFN_SWC_SCANRESULT_VERSION 1 typedef struct wl_pfn_swc_results { uint32 version; - uint32 pkt_count; - uint32 total_count; - wl_pfn_significant_net_t list[1]; + uint32 pkt_count; /**< No. of results in current frame */ + uint32 total_count; /**< Total expected results */ + wl_pfn_significant_net_t list[]; } wl_pfn_swc_results_t; +typedef struct wl_pfn_net_info_bssid { + struct ether_addr BSSID; + uint8 channel; /**< channel number only */ + int8 RSSI; /**< receive signal strength (in dBm) */ + uint16 flags; /**< (e.g. partial scan, off channel) */ + uint16 timestamp; /**< age in seconds */ +} wl_pfn_net_info_bssid_t; -/* used to report exactly one scan result */ -/* plus reports detailed scan info in bss_info */ -typedef struct wl_pfn_scanresult { +typedef struct wl_pfn_scanhist_bssid { uint32 version; uint32 status; uint32 count; - wl_pfn_net_info_t netinfo; - wl_bss_info_t bss_info; -} wl_pfn_scanresult_t; + wl_pfn_net_info_bssid_t netinfo[1]; +} wl_pfn_scanhist_bssid_t; -/* PFN data structure */ +/* Version 1 and 2 for various single scan result */ +#define PFN_SCANRESULT_VERSION_V1 1 +#define PFN_SCANRESULT_VERSION_V2 2 + +/* used to report exactly one scan result */ +/* plus reports detailed scan info in bss_info */ +typedef struct wl_pfn_scanresult_v1 { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_v1_t netinfo; + wl_bss_info_t bss_info; +} wl_pfn_scanresult_v1_t; + +typedef struct wl_pfn_scanresult_v2 { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_v2_t netinfo; + wl_bss_info_t bss_info; +} wl_pfn_scanresult_v2_t; + +/**PFN data structure */ typedef struct wl_pfn_param { - int32 version; /* PNO parameters version */ - int32 scan_freq; /* Scan frequency */ - int32 lost_network_timeout; /* Timeout in sec. to declare + int32 version; /**< PNO parameters version */ + int32 scan_freq; /**< Scan frequency */ + int32 lost_network_timeout; /**< Timeout in sec. to declare * discovered network as lost */ - int16 flags; /* Bit field to control features + int16 flags; /**< Bit field to control features * of PFN such as sort criteria auto * enable switch and background scan */ - int16 rssi_margin; /* Margin to avoid jitter for choosing a + int16 rssi_margin; /**< Margin to avoid jitter for choosing a * PFN based on RSSI sort criteria */ - uint8 bestn; /* number of best networks in each scan */ - uint8 mscan; /* number of scans recorded */ - uint8 repeat; /* Minimum number of scan intervals + uint8 bestn; /**< number of best networks in each scan */ + uint8 mscan; /**< number of scans recorded */ + uint8 repeat; /**< Minimum number of scan intervals *before scan frequency changes in adaptive scan */ - uint8 exp; /* Exponent of 2 for maximum scan interval */ - int32 slow_freq; /* slow scan period */ + uint8 exp; /**< Exponent of 2 for maximum scan interval */ + int32 slow_freq; /**< slow scan period */ } wl_pfn_param_t; typedef struct wl_pfn_bssid { @@ -3450,7 +4299,6 @@ typedef struct wl_pfn_bssid { /* Bit4: suppress_lost, Bit3: suppress_found */ uint16 flags; } wl_pfn_bssid_t; - typedef struct wl_pfn_significant_bssid { struct ether_addr macaddr; int8 rssi_low_threshold; @@ -3458,6 +4306,12 @@ typedef struct wl_pfn_significant_bssid { } wl_pfn_significant_bssid_t; #define WL_PFN_SUPPRESSFOUND_MASK 0x08 #define WL_PFN_SUPPRESSLOST_MASK 0x10 +#define WL_PFN_SSID_IMPRECISE_MATCH 0x80 +#define WL_PFN_SSID_SAME_NETWORK 0x10000 +#define WL_PFN_SUPPRESS_AGING_MASK 0x20000 +#define WL_PFN_FLUSH_ALL_SSIDS 0x40000 + +#define WL_PFN_IOVAR_FLAG_MASK 0xFFFF00FF #define WL_PFN_RSSI_MASK 0xff00 #define WL_PFN_RSSI_SHIFT 8 @@ -3468,54 +4322,103 @@ typedef struct wl_pfn_cfg { uint32 flags; } wl_pfn_cfg_t; -#define CH_BUCKET_REPORT_REGULAR 0 +#define WL_PFN_SSID_CFG_VERSION 1 +#define WL_PFN_SSID_CFG_CLEAR 0x1 + +typedef struct wl_pfn_ssid_params { + int8 min5G_rssi; /* minimum 5GHz RSSI for a BSSID to be considered */ + int8 min2G_rssi; /* minimum 2.4GHz RSSI for a BSSID to be considered */ + int16 init_score_max; /* The maximum score that a network can have before bonuses */ + + int16 cur_bssid_bonus; /* Add to current bssid */ + int16 same_ssid_bonus; /* score bonus for all networks with the same network flag */ + int16 secure_bonus; /* score bonus for networks that are not open */ + int16 band_5g_bonus; +} wl_pfn_ssid_params_t; + +typedef struct wl_ssid_ext_params { + int8 min5G_rssi; /* minimum 5GHz RSSI for a BSSID to be considered */ + int8 min2G_rssi; /* minimum 2.4GHz RSSI for a BSSID to be considered */ + int16 init_score_max; /* The maximum score that a network can have before bonuses */ + int16 cur_bssid_bonus; /* Add to current bssid */ + int16 same_ssid_bonus; /* score bonus for all networks with the same network flag */ + int16 secure_bonus; /* score bonus for networks that are not open */ + int16 band_5g_bonus; +} wl_ssid_ext_params_t; + +typedef struct wl_pfn_ssid_cfg { + uint16 version; + uint16 flags; + wl_ssid_ext_params_t params; +} wl_pfn_ssid_cfg_t; + +#define CH_BUCKET_REPORT_NONE 0 +#define CH_BUCKET_REPORT_SCAN_COMPLETE_ONLY 1 #define CH_BUCKET_REPORT_FULL_RESULT 2 -#define CH_BUCKET_GSCAN 4 +#define CH_BUCKET_REPORT_SCAN_COMPLETE (CH_BUCKET_REPORT_SCAN_COMPLETE_ONLY | \ + CH_BUCKET_REPORT_FULL_RESULT) +#define CH_BUCKET_REPORT_REGULAR 0 +#define CH_BUCKET_GSCAN 4 - -typedef struct wl_pfn_gscan_channel_bucket { - uint16 bucket_end_index; +typedef struct wl_pfn_gscan_ch_bucket_cfg { + uint8 bucket_end_index; uint8 bucket_freq_multiple; - uint8 report_flag; -} wl_pfn_gscan_channel_bucket_t; + uint8 flag; + uint8 reserved; + uint16 repeat; + uint16 max_freq_multiple; +} wl_pfn_gscan_ch_bucket_cfg_t; -#define GSCAN_SEND_ALL_RESULTS_MASK (1 << 0) -#define GSCAN_CFG_FLAGS_ONLY_MASK (1 << 7) +typedef struct wl_pfn_capabilities { + uint16 max_mscan; + uint16 max_bestn; + uint16 max_swc_bssid; + uint16 max_hotlist_bssid; +} wl_pfn_capabilities_t; +#define GSCAN_SEND_ALL_RESULTS_MASK (1 << 0) +#define GSCAN_ALL_BUCKETS_IN_FIRST_SCAN_MASK (1 << 3) +#define GSCAN_CFG_FLAGS_ONLY_MASK (1 << 7) +#define WL_GSCAN_CFG_VERSION 1 typedef struct wl_pfn_gscan_cfg { - /* BIT0 1 = send probes/beacons to HOST - * BIT2 Reserved + uint16 version; + /** + * BIT0 1 = send probes/beacons to HOST + * BIT1 Reserved + * BIT2 Reserved * Add any future flags here * BIT7 1 = no other useful cfg sent */ - uint8 flags; - /* Buffer filled threshold in % to generate an event */ + uint8 flags; + /** Buffer filled threshold in % to generate an event */ uint8 buffer_threshold; - /* No. of BSSIDs with "change" to generate an evt + /** + * No. of BSSIDs with "change" to generate an evt * change - crosses rssi threshold/lost */ uint8 swc_nbssid_threshold; /* Max=8 (for now) Size of rssi cache buffer */ uint8 swc_rssi_window_size; - uint16 count_of_channel_buckets; + uint8 count_of_channel_buckets; + uint8 retry_threshold; uint16 lost_ap_window; - wl_pfn_gscan_channel_bucket_t channel_bucket[1]; + wl_pfn_gscan_ch_bucket_cfg_t channel_bucket[1]; } wl_pfn_gscan_cfg_t; - #define WL_PFN_REPORT_ALLNET 0 #define WL_PFN_REPORT_SSIDNET 1 #define WL_PFN_REPORT_BSSIDNET 2 + #define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */ -#define WL_PFN_CFG_FLAGS_RESERVED 0xfffffffe /* Remaining reserved for future use */ +#define WL_PFN_CFG_FLAGS_RESERVED 0xfffffffe /**< Remaining reserved for future use */ typedef struct wl_pfn { - wlc_ssid_t ssid; /* ssid name and its length */ - int32 flags; /* bit2: hidden */ - int32 infra; /* BSS Vs IBSS */ - int32 auth; /* Open Vs Closed */ - int32 wpa_auth; /* WPA type */ - int32 wsec; /* wsec value */ + wlc_ssid_t ssid; /**< ssid name and its length */ + int32 flags; /**< bit2: hidden */ + int32 infra; /**< BSS Vs IBSS */ + int32 auth; /**< Open Vs Closed */ + int32 wpa_auth; /**< WPA type */ + int32 wsec; /**< wsec value */ } wl_pfn_t; typedef struct wl_pfn_list { @@ -3525,33 +4428,134 @@ typedef struct wl_pfn_list { wl_pfn_t pfn[1]; } wl_pfn_list_t; -#define WL_PFN_MAC_OUI_ONLY_MASK 1 -#define WL_PFN_SET_MAC_UNASSOC_MASK 2 -/* To configure pfn_macaddr */ -typedef struct wl_pfn_macaddr_cfg { - uint8 version; +#define PFN_SSID_EXT_VERSION 1 + +typedef struct wl_pfn_ext { uint8 flags; - struct ether_addr macaddr; -} wl_pfn_macaddr_cfg_t; -#define WL_PFN_MACADDR_CFG_VER 1 -typedef BWL_PRE_PACKED_STRUCT struct pfn_olmsg_params_t { - wlc_ssid_t ssid; - uint32 cipher_type; - uint32 auth_type; - uint8 channels[4]; -} BWL_POST_PACKED_STRUCT pfn_olmsg_params; + int8 rssi_thresh; /* RSSI threshold, track only if RSSI > threshold */ + uint16 wpa_auth; /* Match the wpa auth type defined in wlioctl_defs.h */ + uint8 ssid[DOT11_MAX_SSID_LEN]; + uint8 ssid_len; + uint8 pad; +} wl_pfn_ext_t; +typedef struct wl_pfn_ext_list { + uint16 version; + uint16 count; + wl_pfn_ext_t pfn_ext[1]; +} wl_pfn_ext_list_t; + +#define WL_PFN_SSID_EXT_FOUND 0x1 +#define WL_PFN_SSID_EXT_LOST 0x2 +typedef struct wl_pfn_result_ssid { + uint8 flags; + int8 rssi; + /* channel number */ + uint16 channel; + /* Assume idx in order of cfg */ + uint32 index; +} wl_pfn_result_ssid_crc32_t; + +typedef struct wl_pfn_ssid_ext_result { + uint16 version; + uint16 count; + wl_pfn_result_ssid_crc32_t net[1]; +} wl_pfn_ssid_ext_result_t; + +#define PFN_EXT_AUTH_CODE_OPEN 1 /* open */ +#define PFN_EXT_AUTH_CODE_PSK 2 /* WPA_PSK or WPA2PSK */ +#define PFN_EXT_AUTH_CODE_EAPOL 4 /* any EAPOL */ #define WL_PFN_HIDDEN_BIT 2 #define WL_PFN_HIDDEN_MASK 0x4 #ifndef BESTN_MAX -#define BESTN_MAX 3 +#define BESTN_MAX 10 #endif #ifndef MSCAN_MAX #define MSCAN_MAX 90 #endif +/* Dynamic scan configuration for motion profiles */ + +#define WL_PFN_MPF_VERSION 1 + +/* Valid group IDs, may be expanded in the future */ +#define WL_PFN_MPF_GROUP_SSID 0 +#define WL_PFN_MPF_GROUP_BSSID 1 +#define WL_PFN_MPF_MAX_GROUPS 2 + +/* Max number of MPF states supported in this time */ +#define WL_PFN_MPF_STATES_MAX 4 + +/* Flags for the mpf-specific stuff */ +#define WL_PFN_MPF_ADAPT_ON_BIT 0 +#define WL_PFN_MPF_ADAPTSCAN_BIT 1 + +#define WL_PFN_MPF_ADAPT_ON_MASK 0x0001 +#define WL_PFN_MPF_ADAPTSCAN_MASK 0x0006 + +/* Per-state timing values */ +typedef struct wl_pfn_mpf_state_params { + int32 scan_freq; /* Scan frequency (secs) */ + int32 lost_network_timeout; /* Timeout to declare net lost (secs) */ + int16 flags; /* Space for flags: ADAPT etc */ + uint8 exp; /* Exponent of 2 for max interval for SMART/STRICT_ADAPT */ + uint8 repeat; /* Number of scans before changing adaptation level */ + int32 slow_freq; /* Slow scan period for SLOW_ADAPT */ +} wl_pfn_mpf_state_params_t; + +typedef struct wl_pfn_mpf_param { + uint16 version; /* Structure version */ + uint16 groupid; /* Group ID: 0 (SSID), 1 (BSSID), other: reserved */ + wl_pfn_mpf_state_params_t state[WL_PFN_MPF_STATES_MAX]; +} wl_pfn_mpf_param_t; + +/* Structure for setting pfn_override iovar */ +typedef struct wl_pfn_override_param { + uint16 version; /* Structure version */ + uint16 start_offset; /* Seconds from now to apply new params */ + uint16 duration; /* Seconds to keep new params applied */ + uint16 reserved; + wl_pfn_mpf_state_params_t override; +} wl_pfn_override_param_t; +#define WL_PFN_OVERRIDE_VERSION 1 + +/* + * Definitions for base MPF configuration + */ + +#define WL_MPF_VERSION 1 +#define WL_MPF_MAX_BITS 3 +#define WL_MPF_MAX_STATES (1 << WL_MPF_MAX_BITS) + +#define WL_MPF_STATE_NAME_MAX 12 + +typedef struct wl_mpf_val { + uint16 val; /* Value of GPIO bits */ + uint16 state; /* State identifier */ + char name[WL_MPF_STATE_NAME_MAX]; /* Optional name */ +} wl_mpf_val_t; + +typedef struct wl_mpf_map { + uint16 version; + uint16 type; + uint16 mask; /* Which GPIO bits to use */ + uint8 count; /* Count of state/value mappings */ + uint8 PAD; + wl_mpf_val_t vals[WL_MPF_MAX_STATES]; +} wl_mpf_map_t; + +#define WL_MPF_STATE_AUTO (0xFFFF) /* (uint16)-1) */ + +typedef struct wl_mpf_state { + uint16 version; + uint16 type; + uint16 state; /* Get/Set */ + uint8 force; /* 0 - auto (HW) state, 1 - forced state */ + char name[WL_MPF_STATE_NAME_MAX]; /* Get/Set: Optional/actual name */ + uint8 PAD; +} wl_mpf_state_t; /* * WLFCTS definition */ @@ -3565,108 +4569,110 @@ typedef struct wl_txstatus_additional_info { uint8 tx_cnt; } wl_txstatus_additional_info_t; -/* Service discovery */ +/** Service discovery */ typedef struct { - uint8 transaction_id; /* Transaction id */ - uint8 protocol; /* Service protocol type */ - uint16 query_len; /* Length of query */ - uint16 response_len; /* Length of response */ - uint8 qrbuf[1]; + uint8 transaction_id; /**< Transaction id */ + uint8 protocol; /**< Service protocol type */ + uint16 query_len; /**< Length of query */ + uint16 response_len; /**< Length of response */ + uint8 qrbuf[]; } wl_p2po_qr_t; typedef struct { - uint16 period; /* extended listen period */ - uint16 interval; /* extended listen interval */ - uint16 count; /* count to repeat */ + uint16 period; /**< extended listen period */ + uint16 interval; /**< extended listen interval */ + uint16 count; /* count to repeat */ uint16 pad; /* pad for 32bit align */ } wl_p2po_listen_t; -/* GAS state machine tunable parameters. Structure field values of 0 means use the default. */ +/** GAS state machine tunable parameters. Structure field values of 0 means use the default. */ typedef struct wl_gas_config { - uint16 max_retransmit; /* Max # of firmware/driver retransmits on no Ack + uint16 max_retransmit; /**< Max # of firmware/driver retransmits on no Ack * from peer (on top of the ucode retries). */ - uint16 response_timeout; /* Max time to wait for a GAS-level response + uint16 response_timeout; /**< Max time to wait for a GAS-level response * after sending a packet. */ - uint16 max_comeback_delay; /* Max GAS response comeback delay. + uint16 max_comeback_delay; /**< Max GAS response comeback delay. * Exceeding this fails the GAS exchange. */ - uint16 max_retries; /* Max # of GAS state machine retries on failure + uint16 max_retries; /**< Max # of GAS state machine retries on failure * of a GAS frame exchange. */ } wl_gas_config_t; -/* P2P Find Offload parameters */ -typedef BWL_PRE_PACKED_STRUCT struct wl_p2po_find_config { - uint16 version; /* Version of this struct */ - uint16 length; /* sizeof(wl_p2po_find_config_t) */ - int32 search_home_time; /* P2P search state home time when concurrent +/** P2P Find Offload parameters */ +typedef struct wl_p2po_find_config { + uint16 version; /**< Version of this struct */ + uint16 length; /**< sizeof(wl_p2po_find_config_t) */ + int32 search_home_time; /**< P2P search state home time when concurrent * connection exists. -1 for default. */ uint8 num_social_channels; - /* Number of social channels up to WL_P2P_SOCIAL_CHANNELS_MAX. + /**< Number of social channels up to WL_P2P_SOCIAL_CHANNELS_MAX. * 0 means use default social channels. */ uint8 flags; - uint16 social_channels[1]; /* Variable length array of social channels */ -} BWL_POST_PACKED_STRUCT wl_p2po_find_config_t; -#define WL_P2PO_FIND_CONFIG_VERSION 2 /* value for version field */ + uint16 social_channels[1]; /**< Variable length array of social channels */ +} wl_p2po_find_config_t; +#define WL_P2PO_FIND_CONFIG_VERSION 2 /**< value for version field */ -/* wl_p2po_find_config_t flags */ -#define P2PO_FIND_FLAG_SCAN_ALL_APS 0x01 /* Whether to scan for all APs in the p2po_find +/** wl_p2po_find_config_t flags */ +#define P2PO_FIND_FLAG_SCAN_ALL_APS 0x01 /**< Whether to scan for all APs in the p2po_find * periodic scans of all channels. * 0 means scan for only P2P devices. * 1 means scan for P2P devices plus non-P2P APs. */ -/* For adding a WFDS service to seek */ -typedef BWL_PRE_PACKED_STRUCT struct { - uint32 seek_hdl; /* unique id chosen by host */ - uint8 addr[6]; /* Seek service from a specific device with this +/** For adding a WFDS service to seek */ +typedef struct { + uint32 seek_hdl; /**< unique id chosen by host */ + uint8 addr[6]; /**< Seek service from a specific device with this * MAC address, all 1's for any device. */ uint8 service_hash[P2P_WFDS_HASH_LEN]; uint8 service_name_len; uint8 service_name[MAX_WFDS_SEEK_SVC_NAME_LEN]; - /* Service name to seek, not null terminated */ + /**< Service name to seek, not null terminated */ uint8 service_info_req_len; - uint8 service_info_req[1]; /* Service info request, not null terminated. + uint8 service_info_req[1]; /**< Service info request, not null terminated. * Variable length specified by service_info_req_len. * Maximum length is MAX_WFDS_SEEK_SVC_INFO_LEN. */ -} BWL_POST_PACKED_STRUCT wl_p2po_wfds_seek_add_t; +} wl_p2po_wfds_seek_add_t; -/* For deleting a WFDS service to seek */ +/** For deleting a WFDS service to seek */ +typedef struct { + uint32 seek_hdl; /**< delete service specified by id */ +} wl_p2po_wfds_seek_del_t; + + +/** For adding a WFDS service to advertise */ +#include typedef BWL_PRE_PACKED_STRUCT struct { - uint32 seek_hdl; /* delete service specified by id */ -} BWL_POST_PACKED_STRUCT wl_p2po_wfds_seek_del_t; - - -/* For adding a WFDS service to advertise */ -typedef BWL_PRE_PACKED_STRUCT struct { - uint32 advertise_hdl; /* unique id chosen by host */ + uint32 advertise_hdl; /**< unique id chosen by host */ uint8 service_hash[P2P_WFDS_HASH_LEN]; uint32 advertisement_id; uint16 service_config_method; uint8 service_name_len; uint8 service_name[MAX_WFDS_SVC_NAME_LEN]; - /* Service name , not null terminated */ + /**< Service name , not null terminated */ uint8 service_status; uint16 service_info_len; - uint8 service_info[1]; /* Service info, not null terminated. + uint8 service_info[1]; /**< Service info, not null terminated. * Variable length specified by service_info_len. * Maximum length is MAX_WFDS_ADV_SVC_INFO_LEN. */ } BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_add_t; +#include -/* For deleting a WFDS service to advertise */ -typedef BWL_PRE_PACKED_STRUCT struct { - uint32 advertise_hdl; /* delete service specified by hdl */ -} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_del_t; +/** For deleting a WFDS service to advertise */ +typedef struct { + uint32 advertise_hdl; /**< delete service specified by hdl */ +} wl_p2po_wfds_advertise_del_t; -/* P2P Offload discovery mode for the p2po_state iovar */ +/** P2P Offload discovery mode for the p2po_state iovar */ typedef enum { WL_P2PO_DISC_STOP, WL_P2PO_DISC_LISTEN, @@ -3677,42 +4683,44 @@ typedef enum { #define ANQPO_MAX_QUERY_SIZE 256 typedef struct { - uint16 max_retransmit; /* ~0 use default, max retransmit on no ACK from peer */ - uint16 response_timeout; /* ~0 use default, msec to wait for resp after tx packet */ - uint16 max_comeback_delay; /* ~0 use default, max comeback delay in resp else fail */ - uint16 max_retries; /* ~0 use default, max retries on failure */ - uint16 query_len; /* length of ANQP query */ - uint8 query_data[1]; /* ANQP encoded query (max ANQPO_MAX_QUERY_SIZE) */ + uint16 max_retransmit; /**< ~0 use default, max retransmit on no ACK from peer */ + uint16 response_timeout; /**< ~0 use default, msec to wait for resp after tx packet */ + uint16 max_comeback_delay; /**< ~0 use default, max comeback delay in resp else fail */ + uint16 max_retries; /**< ~0 use default, max retries on failure */ + uint16 query_len; /**< length of ANQP query */ + uint8 query_data[1]; /**< ANQP encoded query (max ANQPO_MAX_QUERY_SIZE) */ } wl_anqpo_set_t; typedef struct { - uint16 channel; /* channel of the peer */ - struct ether_addr addr; /* addr of the peer */ + uint16 channel; /**< channel of the peer */ + struct ether_addr addr; /**< addr of the peer */ } wl_anqpo_peer_t; #define ANQPO_MAX_PEER_LIST 64 typedef struct { - uint16 count; /* number of peers in list */ - wl_anqpo_peer_t peer[1]; /* max ANQPO_MAX_PEER_LIST */ + uint16 count; /**< number of peers in list */ + wl_anqpo_peer_t peer[1]; /**< max ANQPO_MAX_PEER_LIST */ } wl_anqpo_peer_list_t; #define ANQPO_MAX_IGNORE_SSID 64 typedef struct { - bool is_clear; /* set to clear list (not used on GET) */ - uint16 count; /* number of SSID in list */ - wlc_ssid_t ssid[1]; /* max ANQPO_MAX_IGNORE_SSID */ + uint8 is_clear; /**< set to clear list (not used on GET) */ + uint8 PAD; + uint16 count; /**< number of SSID in list */ + wlc_ssid_t ssid[1]; /**< max ANQPO_MAX_IGNORE_SSID */ } wl_anqpo_ignore_ssid_list_t; #define ANQPO_MAX_IGNORE_BSSID 64 typedef struct { - bool is_clear; /* set to clear list (not used on GET) */ - uint16 count; /* number of addr in list */ - struct ether_addr bssid[1]; /* max ANQPO_MAX_IGNORE_BSSID */ + uint8 is_clear; /**< set to clear list (not used on GET) */ + uint8 PAD; + uint16 count; /**< number of addr in list */ + struct ether_addr bssid[]; /**< max ANQPO_MAX_IGNORE_BSSID */ } wl_anqpo_ignore_bssid_list_t; struct toe_ol_stats_t { - /* Num of tx packets that don't need to be checksummed */ + /** Num of tx packets that don't need to be checksummed */ uint32 tx_summed; /* Num of tx packets where checksum is filled by offload engine */ @@ -3742,46 +4750,92 @@ struct toe_ol_stats_t { uint32 rx_icmp_errinj; }; -/* Arp offload statistic counts */ +/** Arp offload statistic counts */ struct arp_ol_stats_t { - uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */ - uint32 host_ip_overflow; /* Host IP table additions skipped due to overflow */ + uint32 host_ip_entries; /**< Host IP table addresses (more than one if multihomed) */ + uint32 host_ip_overflow; /**< Host IP table additions skipped due to overflow */ - uint32 arp_table_entries; /* ARP table entries */ - uint32 arp_table_overflow; /* ARP table additions skipped due to overflow */ + uint32 arp_table_entries; /**< ARP table entries */ + uint32 arp_table_overflow; /**< ARP table additions skipped due to overflow */ - uint32 host_request; /* ARP requests from host */ - uint32 host_reply; /* ARP replies from host */ - uint32 host_service; /* ARP requests from host serviced by ARP Agent */ + uint32 host_request; /**< ARP requests from host */ + uint32 host_reply; /**< ARP replies from host */ + uint32 host_service; /**< ARP requests from host serviced by ARP Agent */ - uint32 peer_request; /* ARP requests received from network */ - uint32 peer_request_drop; /* ARP requests from network that were dropped */ - uint32 peer_reply; /* ARP replies received from network */ - uint32 peer_reply_drop; /* ARP replies from network that were dropped */ - uint32 peer_service; /* ARP request from host serviced by ARP Agent */ + uint32 peer_request; /**< ARP requests received from network */ + uint32 peer_request_drop; /**< ARP requests from network that were dropped */ + uint32 peer_reply; /**< ARP replies received from network */ + uint32 peer_reply_drop; /**< ARP replies from network that were dropped */ + uint32 peer_service; /**< ARP request from host serviced by ARP Agent */ }; -/* NS offload statistic counts */ +/** NS offload statistic counts */ struct nd_ol_stats_t { - uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */ - uint32 host_ip_overflow; /* Host IP table additions skipped due to overflow */ - uint32 peer_request; /* NS requests received from network */ - uint32 peer_request_drop; /* NS requests from network that were dropped */ - uint32 peer_reply_drop; /* NA replies from network that were dropped */ - uint32 peer_service; /* NS request from host serviced by firmware */ + uint32 host_ip_entries; /**< Host IP table addresses (more than one if multihomed) */ + uint32 host_ip_overflow; /**< Host IP table additions skipped due to overflow */ + uint32 peer_request; /**< NS requests received from network */ + uint32 peer_request_drop; /**< NS requests from network that were dropped */ + uint32 peer_reply_drop; /**< NA replies from network that were dropped */ + uint32 peer_service; /**< NS request from host serviced by firmware */ }; +/* + * Neighbor Discovery Offloading + */ +enum { + WL_ND_IPV6_ADDR_TYPE_UNICAST = 0, + WL_ND_IPV6_ADDR_TYPE_ANYCAST +}; + +typedef struct wl_nd_host_ip_addr { + struct ipv6_addr ip_addr; /* host ip address */ + uint8 type; /* type of address */ + uint8 pad[3]; +} wl_nd_host_ip_addr_t; + +typedef struct wl_nd_host_ip_list { + uint32 count; + wl_nd_host_ip_addr_t host_ip[1]; +} wl_nd_host_ip_list_t; + +#define WL_ND_HOSTIP_IOV_VER 1 + +enum { + WL_ND_HOSTIP_OP_VER = 0, /* get version */ + WL_ND_HOSTIP_OP_ADD, /* add address */ + WL_ND_HOSTIP_OP_DEL, /* delete specified address */ + WL_ND_HOSTIP_OP_DEL_UC, /* delete all unicast address */ + WL_ND_HOSTIP_OP_DEL_AC, /* delete all anycast address */ + WL_ND_HOSTIP_OP_DEL_ALL, /* delete all addresses */ + WL_ND_HOSTIP_OP_LIST, /* get list of host ip address */ + WL_ND_HOSTIP_OP_MAX +}; + +typedef struct wl_nd_hostip { + uint16 version; /* version of iovar buf */ + uint16 op_type; /* operation type */ + uint32 length; /* length of entire structure */ + union { + wl_nd_host_ip_addr_t host_ip; /* set param for add */ + uint16 version; /* get return for ver */ + } u; +} wl_nd_hostip_t; + +#define WL_ND_HOSTIP_FIXED_LEN OFFSETOF(wl_nd_hostip_t, u) +#define WL_ND_HOSTIP_WITH_ADDR_LEN (WL_ND_HOSTIP_FIXED_LEN + sizeof(wl_nd_host_ip_addr_t)) + /* * Keep-alive packet offloading. */ -/* NAT keep-alive packets format: specifies the re-transmission period, the packet +/** + * NAT keep-alive packets format: specifies the re-transmission period, the packet * length, and packet contents. */ typedef struct wl_keep_alive_pkt { - uint32 period_msec; /* Retransmission period (0 to disable packet re-transmits) */ + uint32 period_msec; /** Retransmission period (0 to disable packet re-transmits) */ uint16 len_bytes; /* Size of packet to transmit (0 to disable packet re-transmits) */ - uint8 data[1]; /* Variable length packet to transmit. Contents should include + uint8 data[1]; /** Variable length packet to transmit. Contents should include * entire ethernet packet (enet header, IP header, UDP header, * and UDP payload) in network byte order. */ @@ -3789,12 +4843,69 @@ typedef struct wl_keep_alive_pkt { #define WL_KEEP_ALIVE_FIXED_LEN OFFSETOF(wl_keep_alive_pkt_t, data) +#define MAX_RSSI_COUNT 8 +typedef struct rssi_struct { + int8 val[MAX_RSSI_COUNT]; /**< rssi values in AFs */ + int16 sum; /**< total rssi sum */ + uint8 cnt; /**< number rssi samples */ + uint8 idx; /**< next rssi location */ +} rssi_struct_t; + + +/* + * ptk_start: iovar to start 4-way handshake for secured ranging +*/ + +/* ptk negotiation security type - determines negotiation parameters */ +typedef enum { + WL_PTK_START_SEC_TYPE_PMK = 1 +} wl_ptk_start_sec_type_t; + +/* ptk negotiation role */ +typedef enum { + ROLE_NONE = 0x0, + ROLE_AUTH = 0x1, + ROLE_SUP = 0x2, + ROLE_STATIC = 0x3, + ROLE_INVALID = 0xff, + WL_PTK_START_ROLE_NONE = ROLE_NONE, + WL_PTK_START_ROLE_AUTH = ROLE_AUTH, + WL_PTK_START_ROLE_SUP = ROLE_SUP, + WL_PTK_START_ROLE_STATIC = ROLE_STATIC, + WL_PTK_START_ROLE_INVALID = ROLE_INVALID +} wl_ptk_start_role_t; + +typedef struct wl_ptk_start_tlv { + uint16 id; + uint16 len; + uint8 data[1]; +} wl_ptk_start_tlv_t; + +typedef enum { + WL_PTK_START_TLV_PMK = 1 /* uint8[] */ +} wl_ptk_start_tlv_type; + +typedef enum { + WL_PTK_START_FLAG_NO_DATA_PROT = 1, /* data frame protection disabled */ + WL_PTK_START_FLAG_GEN_FTM_TPK = 2 /* Generate FTM Toast/Seq Protection Key */ +} wl_ptk_start_flags_t; + +typedef struct wl_ptk_start_iov { + uint16 version; + uint16 len; /* length of entire iov from version */ + wl_ptk_start_flags_t flags; + wl_ptk_start_sec_type_t sec_type; + wl_ptk_start_role_t role; + struct ether_addr peer_addr; + uint16 pad; /* reserved/32 bit alignment */ + wl_ptk_start_tlv_t tlvs[1]; +} wl_ptk_start_iov_t; /* * Dongle pattern matching filter. */ -#define MAX_WAKE_PACKET_CACHE_BYTES 128 /* Maximum cached wake packet */ +#define MAX_WAKE_PACKET_CACHE_BYTES 128 /**< Maximum cached wake packet */ #define MAX_WAKE_PACKET_BYTES (DOT11_A3_HDR_LEN + \ DOT11_QOS_LEN + \ @@ -3802,79 +4913,109 @@ typedef struct wl_keep_alive_pkt { ETHER_MAX_DATA) typedef struct pm_wake_packet { - uint32 status; /* Is the wake reason a packet (if all the other field's valid) */ - uint32 pattern_id; /* Pattern ID that matched */ + uint32 status; /**< Is the wake reason a packet (if all the other field's valid) */ + uint32 pattern_id; /**< Pattern ID that matched */ uint32 original_packet_size; uint32 saved_packet_size; - uchar packet[MAX_WAKE_PACKET_CACHE_BYTES]; + uint8 packet[MAX_WAKE_PACKET_CACHE_BYTES]; } pm_wake_packet_t; /* Packet filter types. Currently, only pattern matching is supported. */ typedef enum wl_pkt_filter_type { - WL_PKT_FILTER_TYPE_PATTERN_MATCH=0, /* Pattern matching filter */ - WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH=1, /* Magic packet match */ - WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH=2, /* A pattern list (match all to match filter) */ - WL_PKT_FILTER_TYPE_ENCRYPTED_PATTERN_MATCH=3, /* SECURE WOWL magic / net pattern match */ + WL_PKT_FILTER_TYPE_PATTERN_MATCH=0, /**< Pattern matching filter */ + WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH=1, /**< Magic packet match */ + WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH=2, /**< A pattern list (match all to match filter) */ + WL_PKT_FILTER_TYPE_ENCRYPTED_PATTERN_MATCH=3, /**< SECURE WOWL magic / net pattern match */ + WL_PKT_FILTER_TYPE_APF_MATCH=4, /* Android packet filter match */ + WL_PKT_FILTER_TYPE_PATTERN_MATCH_TIMEOUT=5, /* Pattern matching filter with timeout event */ + WL_PKT_FILTER_TYPE_IMMEDIATE_PATTERN_MATCH=6, /* Immediately pattern matching filter */ + WL_PKT_FILTYER_TYPE_MAX = 7, /* Pkt filter type MAX */ } wl_pkt_filter_type_t; #define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t /* String mapping for types that may be used by applications or debug */ #define WL_PKT_FILTER_TYPE_NAMES \ - { "PATTERN", WL_PKT_FILTER_TYPE_PATTERN_MATCH }, \ - { "MAGIC", WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH }, \ - { "PATLIST", WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH } + { "PATTERN", WL_PKT_FILTER_TYPE_PATTERN_MATCH }, \ + { "MAGIC", WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH }, \ + { "PATLIST", WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH }, \ + { "SECURE WOWL", WL_PKT_FILTER_TYPE_ENCRYPTED_PATTERN_MATCH }, \ + { "APF", WL_PKT_FILTER_TYPE_APF_MATCH }, \ + { "PATTERN TIMEOUT", WL_PKT_FILTER_TYPE_PATTERN_MATCH_TIMEOUT }, \ + { "IMMEDIATE", WL_PKT_FILTER_TYPE_IMMEDIATE_PATTERN_MATCH } -/* Secured WOWL packet was encrypted, need decrypted before check filter match */ +/** Secured WOWL packet was encrypted, need decrypted before check filter match */ typedef struct wl_pkt_decrypter { uint8* (*dec_cb)(void* dec_ctx, const void *sdu, int sending); void* dec_ctx; } wl_pkt_decrypter_t; -/* Pattern matching filter. Specifies an offset within received packets to +/** + * Pattern matching filter. Specifies an offset within received packets to * start matching, the pattern to match, the size of the pattern, and a bitmask * that indicates which bits within the pattern should be matched. */ typedef struct wl_pkt_filter_pattern { - uint32 offset; /* Offset within received packet to start pattern matching. + uint32 offset; /**< Offset within received packet to start pattern matching. * Offset '0' is the first byte of the ethernet header. */ - uint32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */ - uint8 mask_and_pattern[1]; /* Variable length mask and pattern data. mask starts + uint32 size_bytes; /**< Size of the pattern. Bitmask must be the same size. */ + uint8 mask_and_pattern[]; /**< Variable length mask and pattern data. mask starts * at offset 0. Pattern immediately follows mask. for * secured pattern, put the descrypter pointer to the * beginning, mask and pattern postponed correspondingly */ } wl_pkt_filter_pattern_t; -/* A pattern list is a numerically specified list of modified pattern structures. */ +/** A pattern list is a numerically specified list of modified pattern structures. */ typedef struct wl_pkt_filter_pattern_listel { - uint16 rel_offs; /* Offset to begin match (relative to 'base' below) */ - uint16 base_offs; /* Base for offset (defined below) */ - uint16 size_bytes; /* Size of mask/pattern */ - uint16 match_flags; /* Addition flags controlling the match */ - uint8 mask_and_data[1]; /* Variable length mask followed by data, each size_bytes */ + uint16 rel_offs; /**< Offset to begin match (relative to 'base' below) */ + uint16 base_offs; /**< Base for offset (defined below) */ + uint16 size_bytes; /**< Size of mask/pattern */ + uint16 match_flags; /**< Addition flags controlling the match */ + uint8 mask_and_data[1]; /**< Variable length mask followed by data, each size_bytes */ } wl_pkt_filter_pattern_listel_t; typedef struct wl_pkt_filter_pattern_list { - uint8 list_cnt; /* Number of elements in the list */ - uint8 PAD1[1]; /* Reserved (possible version: reserved) */ - uint16 totsize; /* Total size of this pattern list (includes this struct) */ - wl_pkt_filter_pattern_listel_t patterns[1]; /* Variable number of list elements */ + uint8 list_cnt; /**< Number of elements in the list */ + uint8 PAD1[1]; /**< Reserved (possible version: reserved) */ + uint16 totsize; /**< Total size of this pattern list (includes this struct) */ + wl_pkt_filter_pattern_listel_t patterns[]; /**< Variable number of list elements */ } wl_pkt_filter_pattern_list_t; -/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */ +typedef struct wl_apf_program { + uint16 version; + uint16 instr_len; /* number of instruction blocks */ + uint32 inst_ts; /* program installation timestamp */ + uint8 instrs[]; /* variable length instructions */ +} wl_apf_program_t; + +typedef struct wl_pkt_filter_pattern_timeout { + uint32 offset; /* Offset within received packet to start pattern matching. + * Offset '0' is the first byte of the ethernet header. + */ + uint32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */ + uint32 timeout; /* Timeout(seconds) */ + uint8 mask_and_pattern[1]; /* Variable length mask and pattern data. + * mask starts at offset 0. Pattern + * immediately follows mask. + */ +} wl_pkt_filter_pattern_timeout_t; + +/** IOVAR "pkt_filter_add" parameter. Used to install packet filters. */ typedef struct wl_pkt_filter { - uint32 id; /* Unique filter id, specified by app. */ - uint32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */ - uint32 negate_match; /* Negate the result of filter matches */ + uint32 id; /**< Unique filter id, specified by app. */ + uint32 type; /**< Filter type (WL_PKT_FILTER_TYPE_xxx). */ + uint32 negate_match; /**< Negate the result of filter matches */ union { /* Filter definitions */ - wl_pkt_filter_pattern_t pattern; /* Pattern matching filter */ - wl_pkt_filter_pattern_list_t patlist; /* List of patterns to match */ + wl_pkt_filter_pattern_t pattern; /**< Pattern matching filter */ + wl_pkt_filter_pattern_list_t patlist; /**< List of patterns to match */ + wl_apf_program_t apf_program; /* apf program */ + wl_pkt_filter_pattern_timeout_t pattern_timeout; /* Pattern timeout event filter */ } u; } wl_pkt_filter_t; -/* IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */ +/** IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */ typedef struct wl_tcp_keep_set { uint32 val1; uint32 val2; @@ -3885,35 +5026,45 @@ typedef struct wl_tcp_keep_set { #define WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_list_t, patterns) #define WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN \ OFFSETOF(wl_pkt_filter_pattern_listel_t, mask_and_data) +#define WL_PKT_FILTER_PATTERN_TIMEOUT_FIXED_LEN \ + OFFSETOF(wl_pkt_filter_pattern_timeout_t, mask_and_pattern) -/* IOVAR "pkt_filter_enable" parameter. */ +#define WL_APF_INTERNAL_VERSION 1 +#define WL_APF_PROGRAM_MAX_SIZE (2 * 1024) +#define WL_APF_PROGRAM_FIXED_LEN OFFSETOF(wl_apf_program_t, instrs) +#define WL_APF_PROGRAM_LEN(apf_program) \ + ((apf_program)->instr_len * sizeof((apf_program)->instrs[0])) +#define WL_APF_PROGRAM_TOTAL_LEN(apf_program) \ + (WL_APF_PROGRAM_FIXED_LEN + WL_APF_PROGRAM_LEN(apf_program)) + +/** IOVAR "pkt_filter_enable" parameter. */ typedef struct wl_pkt_filter_enable { - uint32 id; /* Unique filter id */ - uint32 enable; /* Enable/disable bool */ + uint32 id; /**< Unique filter id */ + uint32 enable; /**< Enable/disable bool */ } wl_pkt_filter_enable_t; -/* IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */ +/** IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */ typedef struct wl_pkt_filter_list { - uint32 num; /* Number of installed packet filters */ - wl_pkt_filter_t filter[1]; /* Variable array of packet filters. */ + uint32 num; /**< Number of installed packet filters */ + wl_pkt_filter_t filter[1]; /**< Variable array of packet filters. */ } wl_pkt_filter_list_t; #define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter) -/* IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */ +/** IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */ typedef struct wl_pkt_filter_stats { - uint32 num_pkts_matched; /* # filter matches for specified filter id */ - uint32 num_pkts_forwarded; /* # packets fwded from dongle to host for all filters */ - uint32 num_pkts_discarded; /* # packets discarded by dongle for all filters */ + uint32 num_pkts_matched; /**< # filter matches for specified filter id */ + uint32 num_pkts_forwarded; /**< # packets fwded from dongle to host for all filters */ + uint32 num_pkts_discarded; /**< # packets discarded by dongle for all filters */ } wl_pkt_filter_stats_t; -/* IOVAR "pkt_filter_ports" parameter. Configure TCP/UDP port filters. */ +/** IOVAR "pkt_filter_ports" parameter. Configure TCP/UDP port filters. */ typedef struct wl_pkt_filter_ports { - uint8 version; /* Be proper */ - uint8 reserved; /* Be really proper */ - uint16 count; /* Number of ports following */ + uint8 version; /**< Be proper */ + uint8 reserved; /**< Be really proper */ + uint16 count; /**< Number of ports following */ /* End of fixed data */ - uint16 ports[1]; /* Placeholder for ports[] */ + uint16 ports[1]; /**< Placeholder for ports[] */ } wl_pkt_filter_ports_t; #define WL_PKT_FILTER_PORTS_FIXED_LEN OFFSETOF(wl_pkt_filter_ports_t, ports) @@ -3923,20 +5074,21 @@ typedef struct wl_pkt_filter_ports { #define RSN_REPLAY_LEN 8 typedef struct _gtkrefresh { - uchar KCK[RSN_KCK_LENGTH]; - uchar KEK[RSN_KEK_LENGTH]; - uchar ReplayCounter[RSN_REPLAY_LEN]; + uint8 KCK[RSN_KCK_LENGTH]; + uint8 KEK[RSN_KEK_LENGTH]; + uint8 ReplayCounter[RSN_REPLAY_LEN]; } gtk_keyinfo_t, *pgtk_keyinfo_t; -/* Sequential Commands ioctl */ +/** Sequential Commands ioctl */ typedef struct wl_seq_cmd_ioctl { - uint32 cmd; /* common ioctl definition */ - uint32 len; /* length of user buffer */ + uint32 cmd; /**< common ioctl definition */ + uint32 len; /**< length of user buffer */ } wl_seq_cmd_ioctl_t; #define WL_SEQ_CMD_ALIGN_BYTES 4 -/* These are the set of get IOCTLs that should be allowed when using +/** + * These are the set of get IOCTLs that should be allowed when using * IOCTL sequence commands. These are issued implicitly by wl.exe each time * it is invoked. We never want to buffer these, or else wl.exe will stop working. */ @@ -3948,20 +5100,42 @@ typedef struct wl_seq_cmd_ioctl { typedef struct wl_pkteng { uint32 flags; - uint32 delay; /* Inter-packet delay */ - uint32 nframes; /* Number of frames */ - uint32 length; /* Packet length */ - uint8 seqno; /* Enable/disable sequence no. */ - struct ether_addr dest; /* Destination address */ - struct ether_addr src; /* Source address */ + uint32 delay; /**< Inter-packet delay */ + uint32 nframes; /**< Number of frames */ + uint32 length; /**< Packet length */ + uint8 seqno; /**< Enable/disable sequence no. */ + struct ether_addr dest; /**< Destination address */ + struct ether_addr src; /**< Source address */ + uint8 PAD[3]; } wl_pkteng_t; +#define WL_PKTENG_RU_FILL_VER_1 1 +// struct for ru packet engine +typedef struct wl_pkteng_ru { + uint16 version; /* ver is 1 */ + uint16 length; /* size of complete structure */ + uint8 bw; /* bandwidth info */ + uint8 ru_alloc_val; /* ru allocation index number */ + uint8 mcs_val; /* mcs allocated value */ + uint8 nss_val; /* num of spatial streams */ + uint32 num_bytes; /* approx num of bytes to calculate other required params */ + uint8 cp_ltf_val ; /* GI and LTF symbol size */ + uint8 he_ltf_symb ; /* num of HE-LTF symbols */ + uint8 stbc; /* STBC support */ + uint8 coding_val; /* BCC/LDPC coding support */ + uint8 pe_category; /* PE duration 0/8/16usecs */ + uint8 dcm; /* dual carrier modulation */ + uint8 mumimo_ltfmode; /* ltf mode */ + uint8 PAD[1]; /* pad bytes to make structure occupy 4 byte aligned */ +} wl_pkteng_ru_fill_t; + typedef struct wl_pkteng_stats { - uint32 lostfrmcnt; /* RX PER test: no of frames lost (skip seqno) */ - int32 rssi; /* RSSI */ - int32 snr; /* signal to noise ratio */ + uint32 lostfrmcnt; /**< RX PER test: no of frames lost (skip seqno) */ + int32 rssi; /**< RSSI */ + int32 snr; /**< signal to noise ratio */ uint16 rxpktcnt[NUM_80211_RATES+1]; - uint8 rssi_qdb; /* qdB portion of the computed rssi */ + uint8 rssi_qdb; /**< qdB portion of the computed rssi */ + uint8 PAD; } wl_pkteng_stats_t; typedef struct wl_txcal_params { @@ -3969,9 +5143,46 @@ typedef struct wl_txcal_params { uint8 gidx_start; int8 gidx_step; uint8 gidx_stop; + uint8 PAD; } wl_txcal_params_t; +typedef struct wl_sslpnphy_papd_debug_data { + uint8 psat_pwr; + uint8 psat_indx; + uint8 final_idx; + uint8 start_idx; + int32 min_phase; + int32 voltage; + int8 temperature; + uint8 PAD[3]; +} wl_sslpnphy_papd_debug_data_t; +typedef struct wl_sslpnphy_debug_data { + int16 papdcompRe [64]; + int16 papdcompIm [64]; +} wl_sslpnphy_debug_data_t; +typedef struct wl_sslpnphy_spbdump_data { + uint16 tbl_length; + int16 spbreal[256]; + int16 spbimg[256]; +} wl_sslpnphy_spbdump_data_t; +typedef struct wl_sslpnphy_percal_debug_data { + uint32 cur_idx; + uint32 tx_drift; + uint8 prev_cal_idx; + uint8 PAD[3]; + uint32 percal_ctr; + int32 nxt_cal_idx; + uint32 force_1idxcal; + uint32 onedxacl_req; + int32 last_cal_volt; + int8 last_cal_temp; + uint8 PAD[3]; + uint32 vbat_ripple; + uint32 exit_route; + int32 volt_winner; +} wl_sslpnphy_percal_debug_data_t; + typedef enum { wowl_pattern_type_bitmap = 0, wowl_pattern_type_arp, @@ -3979,40 +5190,35 @@ typedef enum { } wowl_pattern_type_t; typedef struct wl_wowl_pattern { - uint32 masksize; /* Size of the mask in #of bytes */ - uint32 offset; /* Pattern byte offset in packet */ - uint32 patternoffset; /* Offset of start of pattern in the structure */ - uint32 patternsize; /* Size of the pattern itself in #of bytes */ - uint32 id; /* id */ - uint32 reasonsize; /* Size of the wakeup reason code */ - wowl_pattern_type_t type; /* Type of pattern */ + uint32 masksize; /**< Size of the mask in #of bytes */ + uint32 offset; /**< Pattern byte offset in packet */ + uint32 patternoffset; /**< Offset of start of pattern in the structure */ + uint32 patternsize; /**< Size of the pattern itself in #of bytes */ + uint32 id; /**< id */ + uint32 reasonsize; /**< Size of the wakeup reason code */ + wowl_pattern_type_t type; /**< Type of pattern */ /* Mask follows the structure above */ /* Pattern follows the mask is at 'patternoffset' from the start */ } wl_wowl_pattern_t; typedef struct wl_wowl_pattern_list { - uint count; + uint32 count; wl_wowl_pattern_t pattern[1]; } wl_wowl_pattern_list_t; typedef struct wl_wowl_wakeind { - uint8 pci_wakeind; /* Whether PCI PMECSR PMEStatus bit was set */ - uint32 ucode_wakeind; /* What wakeup-event indication was set by ucode */ + uint8 pci_wakeind; /**< Whether PCI PMECSR PMEStatus bit was set */ + uint32 ucode_wakeind; /**< What wakeup-event indication was set by ucode */ } wl_wowl_wakeind_t; -typedef struct { - uint32 pktlen; /* size of packet */ - void *sdu; -} tcp_keepalive_wake_pkt_infop_t; - -/* per AC rate control related data structure */ +/** per AC rate control related data structure */ typedef struct wl_txrate_class { uint8 init_rate; uint8 min_rate; uint8 max_rate; } wl_txrate_class_t; -/* structure for Overlap BSS scan arguments */ +/** structure for Overlap BSS scan arguments */ typedef struct wl_obss_scan_arg { int16 passive_dwell; int16 active_dwell; @@ -4025,18 +5231,34 @@ typedef struct wl_obss_scan_arg { #define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t) -/* RSSI event notification configuration. */ +/** RSSI event notification configuration. */ typedef struct wl_rssi_event { - uint32 rate_limit_msec; /* # of events posted to application will be limited to + uint32 rate_limit_msec; /**< # of events posted to application will be limited to * one per specified period (0 to disable rate limit). */ - uint8 num_rssi_levels; /* Number of entries in rssi_levels[] below */ - int8 rssi_levels[MAX_RSSI_LEVELS]; /* Variable number of RSSI levels. An event + uint8 num_rssi_levels; /**< Number of entries in rssi_levels[] below */ + int8 rssi_levels[MAX_RSSI_LEVELS]; /**< Variable number of RSSI levels. An event * will be posted each time the RSSI of received * beacons/packets crosses a level. */ + int8 pad[3]; } wl_rssi_event_t; +#define RSSI_MONITOR_VERSION 1 +#define RSSI_MONITOR_STOP (1 << 0) +typedef struct wl_rssi_monitor_cfg { + uint8 version; + uint8 flags; + int8 max_rssi; + int8 min_rssi; +} wl_rssi_monitor_cfg_t; + +typedef struct wl_rssi_monitor_evt { + uint8 version; + int8 cur_rssi; + uint16 pad; +} wl_rssi_monitor_evt_t; + /* CCA based channel quality event configuration */ #define WL_CHAN_QUAL_CCA 0 #define WL_CHAN_QUAL_NF 1 @@ -4046,22 +5268,21 @@ typedef struct wl_rssi_event { #define MAX_CHAN_QUAL_LEVELS 8 typedef struct wl_chan_qual_metric { - uint8 id; /* metric ID */ - uint8 num_levels; /* Number of entries in rssi_levels[] below */ + uint8 id; /**< metric ID */ + uint8 num_levels; /**< Number of entries in rssi_levels[] below */ uint16 flags; - int16 htol[MAX_CHAN_QUAL_LEVELS]; /* threshold level array: hi-to-lo */ - int16 ltoh[MAX_CHAN_QUAL_LEVELS]; /* threshold level array: lo-to-hi */ + int16 htol[MAX_CHAN_QUAL_LEVELS]; /**< threshold level array: hi-to-lo */ + int16 ltoh[MAX_CHAN_QUAL_LEVELS]; /**< threshold level array: lo-to-hi */ } wl_chan_qual_metric_t; typedef struct wl_chan_qual_event { - uint32 rate_limit_msec; /* # of events posted to application will be limited to + uint32 rate_limit_msec; /**< # of events posted to application will be limited to * one per specified period (0 to disable rate limit). */ uint16 flags; uint16 num_metrics; - wl_chan_qual_metric_t metric[WL_CHAN_QUAL_TOTAL]; /* metric array */ + wl_chan_qual_metric_t metric[WL_CHAN_QUAL_TOTAL]; /**< metric array */ } wl_chan_qual_event_t; - typedef struct wl_action_obss_coex_req { uint8 info; uint8 num; @@ -4069,29 +5290,31 @@ typedef struct wl_action_obss_coex_req { } wl_action_obss_coex_req_t; -/* IOVar parameter block for small MAC address array with type indicator */ +/** IOVar parameter block for small MAC address array with type indicator */ #define WL_IOV_MAC_PARAM_LEN 4 #define WL_IOV_PKTQ_LOG_PRECS 16 +#include typedef BWL_PRE_PACKED_STRUCT struct { uint32 num_addrs; - char addr_type[WL_IOV_MAC_PARAM_LEN]; + uint8 addr_type[WL_IOV_MAC_PARAM_LEN]; struct ether_addr ea[WL_IOV_MAC_PARAM_LEN]; } BWL_POST_PACKED_STRUCT wl_iov_mac_params_t; +#include -/* This is extra info that follows wl_iov_mac_params_t */ -typedef BWL_PRE_PACKED_STRUCT struct { +/** This is extra info that follows wl_iov_mac_params_t */ +typedef struct { uint32 addr_info[WL_IOV_MAC_PARAM_LEN]; -} BWL_POST_PACKED_STRUCT wl_iov_mac_extra_params_t; +} wl_iov_mac_extra_params_t; -/* Combined structure */ +/** Combined structure */ typedef struct { wl_iov_mac_params_t params; wl_iov_mac_extra_params_t extra_params; } wl_iov_mac_full_params_t; -/* Parameter block for PKTQ_LOG statistics */ +/** Parameter block for PKTQ_LOG statistics */ #define PKTQ_LOG_COUNTERS_V4 \ /* packets requested to be stored */ \ uint32 requested; \ @@ -4140,10 +5363,10 @@ typedef struct { PKTQ_LOG_COUNTERS_V4 } pktq_log_counters_v04_t; -/* v5 is the same as V4 with extra parameter */ +/** v5 is the same as V4 with extra parameter */ typedef struct { PKTQ_LOG_COUNTERS_V4 - /* cumulative time to transmit */ + /** cumulative time to transmit */ uint32 airtime; } pktq_log_counters_v05_t; @@ -4152,7 +5375,7 @@ typedef struct { pktq_log_counters_v04_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS]; uint32 counter_info[WL_IOV_MAC_PARAM_LEN]; uint32 pspretend_time_delta[WL_IOV_MAC_PARAM_LEN]; - char headings[1]; + char headings[]; } pktq_log_format_v04_t; typedef struct { @@ -4160,7 +5383,7 @@ typedef struct { pktq_log_counters_v05_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS]; uint32 counter_info[WL_IOV_MAC_PARAM_LEN]; uint32 pspretend_time_delta[WL_IOV_MAC_PARAM_LEN]; - char headings[1]; + char headings[]; } pktq_log_format_v05_t; @@ -4177,61 +5400,75 @@ typedef struct { #define PKTQ_LOG_AUTO (1 << 31) #define PKTQ_LOG_DEF_PREC (1 << 30) - +typedef struct wl_pfn_macaddr_cfg_0 { + uint8 version; + uint8 reserved; + struct ether_addr macaddr; +} wl_pfn_macaddr_cfg_0_t; #define LEGACY1_WL_PFN_MACADDR_CFG_VER 0 - #define WL_PFN_MAC_OUI_ONLY_MASK 1 #define WL_PFN_SET_MAC_UNASSOC_MASK 2 #define WL_PFN_RESTRICT_LA_MAC_MASK 4 #define WL_PFN_MACADDR_FLAG_MASK 0x7 - +/** To configure pfn_macaddr */ +typedef struct wl_pfn_macaddr_cfg { + uint8 version; + uint8 flags; + struct ether_addr macaddr; +} wl_pfn_macaddr_cfg_t; +#define WL_PFN_MACADDR_CFG_VER 1 /* * SCB_BS_DATA iovar definitions start. */ #define SCB_BS_DATA_STRUCT_VERSION 1 -/* The actual counters maintained for each station */ -typedef BWL_PRE_PACKED_STRUCT struct { +/** The actual counters maintained for each station */ +typedef struct { /* The following counters are a subset of what pktq_stats provides per precedence. */ - uint32 retry; /* packets re-sent because they were not received */ - uint32 retry_drop; /* packets finally dropped after retry limit */ - uint32 rtsfail; /* count of rts attempts that failed to receive cts */ - uint32 acked; /* count of packets sent (acked) successfully */ - uint32 txrate_succ; /* running total of phy rate of packets sent successfully */ - uint32 txrate_main; /* running total of phy 'main' rate */ - uint32 throughput; /* actual data transferred successfully */ - uint32 time_delta; /* time difference since last pktq_stats */ - uint32 airtime; /* cumulative total medium access delay in useconds */ -} BWL_POST_PACKED_STRUCT iov_bs_data_counters_t; + uint32 retry; /**< packets re-sent because they were not received */ + uint32 retry_drop; /**< packets finally dropped after retry limit */ + uint32 rtsfail; /**< count of rts attempts that failed to receive cts */ + uint32 acked; /**< count of packets sent (acked) successfully */ + uint32 txrate_succ; /**< running total of phy rate of packets sent successfully */ + uint32 txrate_main; /**< running total of phy 'main' rate */ + uint32 throughput; /**< actual data transferred successfully */ + uint32 time_delta; /**< time difference since last pktq_stats */ + uint32 airtime; /**< cumulative total medium access delay in useconds */ +} iov_bs_data_counters_t; -/* The structure for individual station information. */ +/** The structure for individual station information. */ +#include typedef BWL_PRE_PACKED_STRUCT struct { - struct ether_addr station_address; /* The station MAC address */ - uint16 station_flags; /* Bit mask of flags, for future use. */ - iov_bs_data_counters_t station_counters; /* The actual counter values */ + struct ether_addr station_address; /**< The station MAC address */ + uint16 station_flags; /**< Bit mask of flags, for future use. */ + iov_bs_data_counters_t station_counters; /**< The actual counter values */ } BWL_POST_PACKED_STRUCT iov_bs_data_record_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct { - uint16 structure_version; /* Structure version number (for wl/wlu matching) */ - uint16 structure_count; /* Number of iov_bs_data_record_t records following */ - iov_bs_data_record_t structure_record[1]; /* 0 - structure_count records */ + uint16 structure_version; /**< Structure version number (for wl/wlu matching) */ + uint16 structure_count; /**< Number of iov_bs_data_record_t records following */ + iov_bs_data_record_t structure_record[1]; /**< 0 - structure_count records */ } BWL_POST_PACKED_STRUCT iov_bs_data_struct_t; +#include /* Bitmask of options that can be passed in to the iovar. */ enum { - SCB_BS_DATA_FLAG_NO_RESET = (1<<0) /* Do not clear the counters after reading */ + SCB_BS_DATA_FLAG_NO_RESET = (1<<0) /**< Do not clear the counters after reading */ }; /* * SCB_BS_DATA iovar definitions end. */ typedef struct wlc_extlog_cfg { - int max_number; - uint16 module; /* bitmap */ + int32 max_number; + uint16 module; /**< bitmap */ uint8 level; uint8 flag; uint16 version; + uint16 PAD; } wlc_extlog_cfg_t; typedef struct log_record { @@ -4241,8 +5478,10 @@ typedef struct log_record { uint8 level; uint8 sub_unit; uint8 seq_num; + uint8 pad; int32 arg; - char str[MAX_ARGSTR_LEN]; + char str[MAX_ARGSTR_LEN]; + char PAD[4-MAX_ARGSTR_LEN%4]; } log_record_t; typedef struct wlc_extlog_req { @@ -4304,16 +5543,7 @@ typedef enum { FMTSTR_MAX_ID } log_fmtstr_id_t; -#ifdef DONGLEOVERLAYS -typedef struct { - uint32 flags_idx; /* lower 8 bits: overlay index; upper 24 bits: flags */ - uint32 offset; /* offset into overlay region to write code */ - uint32 len; /* overlay code len */ - /* overlay code follows this struct */ -} wl_ioctl_overlay_t; -#endif /* DONGLEOVERLAYS */ - -/* 11k Neighbor Report element (unversioned, deprecated) */ +/** 11k Neighbor Report element (unversioned, deprecated) */ typedef struct nbr_element { uint8 id; uint8 len; @@ -4326,7 +5556,7 @@ typedef struct nbr_element { } nbr_element_t; #define WL_RRM_NBR_RPT_VER 1 -/* 11k Neighbor Report element */ +/** 11k Neighbor Report element */ typedef struct nbr_rpt_elem { uint8 version; uint8 id; @@ -4340,8 +5570,9 @@ typedef struct nbr_rpt_elem { uint8 phytype; uint8 pad_2; wlc_ssid_t ssid; + chanspec_t chanspec; uint8 bss_trans_preference; - uint8 pad_3[3]; + uint8 flags; } nbr_rpt_elem_t; typedef enum event_msgs_ext_command { @@ -4367,61 +5598,52 @@ typedef struct eventmsgs_ext uint8 mask[1]; } eventmsgs_ext_t; +#include typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_params { - /* no of host dma descriptors programmed by the firmware before a commit */ + /** no of host dma descriptors programmed by the firmware before a commit */ uint16 max_dma_descriptors; - uint16 host_buf_len; /* length of host buffer */ - dmaaddr_t host_buf_addr; /* physical address for bus_throughput_buf */ + uint16 host_buf_len; /**< length of host buffer */ + dmaaddr_t host_buf_addr; /**< physical address for bus_throughput_buf */ } BWL_POST_PACKED_STRUCT pcie_bus_tput_params_t; -typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_stats { - uint16 time_taken; /* no of secs the test is run */ - uint16 nbytes_per_descriptor; /* no of bytes of data dma ed per descriptor */ - - /* no of desciptors fo which dma is sucessfully completed within the test time */ - uint32 count; -} BWL_POST_PACKED_STRUCT pcie_bus_tput_stats_t; - -#define MAX_ROAMOFFL_BSSID_NUM 100 - -typedef BWL_PRE_PACKED_STRUCT struct roamoffl_bssid_list { - int32 cnt; - struct ether_addr bssid[1]; -} BWL_POST_PACKED_STRUCT roamoffl_bssid_list_t; - -/* no default structure packing */ #include +typedef struct pcie_bus_tput_stats { + uint16 time_taken; /**< no of secs the test is run */ + uint16 nbytes_per_descriptor; /**< no of bytes of data dma ed per descriptor */ + + /** no of desciptors for which dma is sucessfully completed within the test time */ + uint32 count; +} pcie_bus_tput_stats_t; + typedef struct keepalives_max_idle { - uint16 keepalive_count; /* nmbr of keepalives per bss_max_idle period */ - uint8 mkeepalive_index; /* mkeepalive_index for keepalive frame to be used */ - uint8 PAD; /* to align next field */ - uint16 max_interval; /* seconds */ + uint16 keepalive_count; /**< nmbr of keepalives per bss_max_idle period */ + uint8 mkeepalive_index; /**< mkeepalive_index for keepalive frame to be used */ + uint8 PAD; /**< to align next field */ + uint16 max_interval; /**< seconds */ } keepalives_max_idle_t; #define PM_IGNORE_BCMC_PROXY_ARP (1 << 0) #define PM_IGNORE_BCMC_ALL_DMS_ACCEPTED (1 << 1) -/* require strict packing */ -#include - /* ##### Power Stats section ##### */ #define WL_PWRSTATS_VERSION 2 -/* Input structure for pwrstats IOVAR */ -typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats_query { - uint16 length; /* Number of entries in type array. */ - uint16 type[1]; /* Types (tags) to retrieve. +/** Input structure for pwrstats IOVAR */ +typedef struct wl_pwrstats_query { + uint16 length; /**< Number of entries in type array. */ + uint16 type[1]; /**< Types (tags) to retrieve. * Length 0 (no types) means get all. */ -} BWL_POST_PACKED_STRUCT wl_pwrstats_query_t; +} wl_pwrstats_query_t; -/* This structure is for version 2; version 1 will be deprecated in by FW */ +/** This structure is for version 2; version 1 will be deprecated in by FW */ +#include typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats { - uint16 version; /* Version = 2 is TLV format */ - uint16 length; /* Length of entire structure */ - uint8 data[1]; /* TLV data, a series of structures, + uint16 version; /**< Version = 2 is TLV format */ + uint16 length; /**< Length of entire structure */ + uint8 data[1]; /**< TLV data, a series of structures, * each starting with type and length. * * Padded as necessary so each section @@ -4432,21 +5654,13 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats { * valid len values are 0-4095. */ } BWL_POST_PACKED_STRUCT wl_pwrstats_t; +#include #define WL_PWR_STATS_HDRLEN OFFSETOF(wl_pwrstats_t, data) -/* Type values for the data section */ -#define WL_PWRSTATS_TYPE_PHY 0 /* struct wl_pwr_phy_stats */ -#define WL_PWRSTATS_TYPE_SCAN 1 /* struct wl_pwr_scan_stats */ -#define WL_PWRSTATS_TYPE_USB_HSIC 2 /* struct wl_pwr_usb_hsic_stats */ -#define WL_PWRSTATS_TYPE_PM_AWAKE1 3 /* struct wl_pwr_pm_awake_stats_v1 */ -#define WL_PWRSTATS_TYPE_CONNECTION 4 /* struct wl_pwr_connect_stats; assoc and key-exch time */ -#define WL_PWRSTATS_TYPE_PCIE 6 /* struct wl_pwr_pcie_stats */ -#define WL_PWRSTATS_TYPE_PM_AWAKE2 7 /* struct wl_pwr_pm_awake_stats_v2 */ - /* Bits for wake reasons */ #define WLC_PMD_WAKE_SET 0x1 #define WLC_PMD_PM_AWAKE_BCN 0x2 -#define WLC_PMD_BTA_ACTIVE 0x4 +/* BIT:3 is no longer being used */ #define WLC_PMD_SCAN_IN_PROGRESS 0x8 #define WLC_PMD_RM_IN_PROGRESS 0x10 #define WLC_PMD_AS_IN_PROGRESS 0x20 @@ -4454,22 +5668,123 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats { #define WLC_PMD_PS_POLL 0x80 #define WLC_PMD_CHK_UNALIGN_TBTT 0x100 #define WLC_PMD_APSD_STA_UP 0x200 -#define WLC_PMD_TX_PEND_WAR 0x400 +#define WLC_PMD_TX_PEND_WAR 0x400 /* obsolete, can be reused */ #define WLC_PMD_GPTIMER_STAY_AWAKE 0x800 #define WLC_PMD_PM2_RADIO_SOFF_PEND 0x2000 #define WLC_PMD_NON_PRIM_STA_UP 0x4000 #define WLC_PMD_AP_UP 0x8000 -typedef BWL_PRE_PACKED_STRUCT struct wlc_pm_debug { - uint32 timestamp; /* timestamp in millisecond */ - uint32 reason; /* reason(s) for staying awake */ -} BWL_POST_PACKED_STRUCT wlc_pm_debug_t; +typedef struct wlc_pm_debug { + uint32 timestamp; /**< timestamp in millisecond */ + uint32 reason; /**< reason(s) for staying awake */ +} wlc_pm_debug_t; -/* WL_PWRSTATS_TYPE_PM_AWAKE1 structures (for 6.25 firmware) */ +/** WL_PWRSTATS_TYPE_PM_AWAKE1 structures (for 6.25 firmware) */ #define WLC_STA_AWAKE_STATES_MAX_V1 30 #define WLC_PMD_EVENT_MAX_V1 32 -/* Data sent as part of pwrstats IOVAR (and EXCESS_PM_WAKE event) */ +/** Data sent as part of pwrstats IOVAR (and EXCESS_PM_WAKE event) */ +#include typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v1 { + uint32 curr_time; /**< ms */ + uint32 hw_macc; /**< HW maccontrol */ + uint32 sw_macc; /**< SW maccontrol */ + uint32 pm_dur; /**< Total sleep time in PM, msecs */ + uint32 mpc_dur; /**< Total sleep time in MPC, msecs */ + + /* int32 drifts = remote - local; +ve drift => local-clk slow */ + int32 last_drift; /**< Most recent TSF drift from beacon */ + int32 min_drift; /**< Min TSF drift from beacon in magnitude */ + int32 max_drift; /**< Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /**< Avg TSF drift from beacon */ + + /* Wake history tracking */ + uint8 pmwake_idx; /**< for stepping through pm_state */ + wlc_pm_debug_t pm_state[WLC_STA_AWAKE_STATES_MAX_V1]; /**< timestamped wake bits */ + uint32 pmd_event_wake_dur[WLC_PMD_EVENT_MAX_V1]; /**< cumulative usecs per wake reason */ + uint32 drift_cnt; /**< Count of drift readings over which avg_drift was computed */ +} BWL_POST_PACKED_STRUCT pm_awake_data_v1_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v1 { + uint16 type; /**< WL_PWRSTATS_TYPE_PM_AWAKE */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + + pm_awake_data_v1_t awake_data; + uint32 frts_time; /**< Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /**< No of times frts ended since driver load */ +} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v1_t; +#include + +/** WL_PWRSTATS_TYPE_PM_AWAKE2 structures. Data sent as part of pwrstats IOVAR */ +typedef struct pm_awake_data_v2 { + uint32 curr_time; /**< ms */ + uint32 hw_macc; /**< HW maccontrol */ + uint32 sw_macc; /**< SW maccontrol */ + uint32 pm_dur; /**< Total sleep time in PM, msecs */ + uint32 mpc_dur; /**< Total sleep time in MPC, msecs */ + + /* int32 drifts = remote - local; +ve drift => local-clk slow */ + int32 last_drift; /**< Most recent TSF drift from beacon */ + int32 min_drift; /**< Min TSF drift from beacon in magnitude */ + int32 max_drift; /**< Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /**< Avg TSF drift from beacon */ + + /* Wake history tracking */ + + /* pmstate array (type wlc_pm_debug_t) start offset */ + uint16 pm_state_offset; + /** pmstate number of array entries */ + uint16 pm_state_len; + + /** array (type uint32) start offset */ + uint16 pmd_event_wake_dur_offset; + /** pmd_event_wake_dur number of array entries */ + uint16 pmd_event_wake_dur_len; + + uint32 drift_cnt; /**< Count of drift readings over which avg_drift was computed */ + uint8 pmwake_idx; /**< for stepping through pm_state */ + uint8 flags; /**< bit0: 1-sleep, 0- wake. bit1: 0-bit0 invlid, 1-bit0 valid */ + uint8 pad[2]; + uint32 frts_time; /**< Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /**< No of times frts ended since driver load */ +} pm_awake_data_v2_t; + +typedef struct wl_pwr_pm_awake_stats_v2 { + uint16 type; /**< WL_PWRSTATS_TYPE_PM_AWAKE */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + + pm_awake_data_v2_t awake_data; +} wl_pwr_pm_awake_stats_v2_t; + +/* bit0: 1-sleep, 0- wake. bit1: 0-bit0 invlid, 1-bit0 valid */ +#define WL_PWR_PM_AWAKE_STATS_WAKE 0x02 +#define WL_PWR_PM_AWAKE_STATS_ASLEEP 0x03 +#define WL_PWR_PM_AWAKE_STATS_WAKE_MASK 0x03 + +/* WL_PWRSTATS_TYPE_PM_AWAKE Version 2 structures taken from 4324/43342 */ +/* These structures are only to be used with 4324/43342 devices */ + +#define WL_STA_AWAKE_STATES_MAX_V2 30 +#define WL_PMD_EVENT_MAX_V2 32 +#define MAX_P2P_BSS_DTIM_PRD 4 + +#include +typedef BWL_PRE_PACKED_STRUCT struct ucode_dbg_v2 { + uint32 macctrl; + uint16 m_p2p_hps; + uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD]; + uint32 psmdebug[20]; + uint32 phydebug[20]; + uint32 psm_brc; + uint32 ifsstat; +} BWL_POST_PACKED_STRUCT ucode_dbg_v2_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct pmalert_awake_data_v2 { uint32 curr_time; /* ms */ uint32 hw_macc; /* HW maccontrol */ uint32 sw_macc; /* SW maccontrol */ @@ -4485,172 +5800,296 @@ typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v1 { /* Wake history tracking */ uint8 pmwake_idx; /* for stepping through pm_state */ - wlc_pm_debug_t pm_state[WLC_STA_AWAKE_STATES_MAX_V1]; /* timestamped wake bits */ - uint32 pmd_event_wake_dur[WLC_PMD_EVENT_MAX_V1]; /* cumulative usecs per wake reason */ + wlc_pm_debug_t pm_state[WL_STA_AWAKE_STATES_MAX_V2]; /* timestamped wake bits */ + uint32 pmd_event_wake_dur[WL_PMD_EVENT_MAX_V2]; /* cumulative usecs per wake reason */ uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */ -} BWL_POST_PACKED_STRUCT pm_awake_data_v1_t; + uint32 start_event_dur[WL_PMD_EVENT_MAX_V2]; /* start event-duration */ + ucode_dbg_v2_t ud; + uint32 frts_time; /* Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /* No of times frts ended since driver load */ +} BWL_POST_PACKED_STRUCT pmalert_awake_data_v2_t; +#include -typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v1 { +#include +typedef BWL_PRE_PACKED_STRUCT struct pm_alert_data_v2 { + uint32 version; + uint32 length; /* Length of entire structure */ + uint32 reasons; /* reason(s) for pm_alert */ + /* Following fields are present only for reasons + * PM_DUR_EXCEEDED, MPC_DUR_EXCEEDED & CONST_AWAKE_DUR_EXCEEDED + */ + uint32 prev_stats_time; /* msecs */ + uint32 prev_pm_dur; /* msecs */ + uint32 prev_mpc_dur; /* msecs */ + pmalert_awake_data_v2_t awake_data; +} BWL_POST_PACKED_STRUCT pm_alert_data_v2_t; +#include + +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_status_v2 { uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */ uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + pmalert_awake_data_v2_t awake_data; + uint32 frts_time; /* Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /* No of times frts ended since driver load */ +} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_status_v2_t; +#include + +/* Below are latest definitions from PHO25178RC100_BRANCH_6_50 */ +/* wl_pwr_pm_awake_stats_v1_t is used for WL_PWRSTATS_TYPE_PM_AWAKE */ +/* (at least) the chip independent registers */ +typedef struct ucode_dbg_ext { + uint32 x120; + uint32 x124; + uint32 x154; + uint32 x158; + uint32 x15c; + uint32 x180; + uint32 x184; + uint32 x188; + uint32 x18c; + uint32 x1a0; + uint32 x1a8; + uint32 x1e0; + uint32 scr_x14; + uint32 scr_x2b; + uint32 scr_x2c; + uint32 scr_x2d; + uint32 scr_x2e; + + uint16 x40a; + uint16 x480; + uint16 x490; + uint16 x492; + uint16 x4d8; + uint16 x4b8; + uint16 x4ba; + uint16 x4bc; + uint16 x4be; + uint16 x500; + uint16 x50e; + uint16 x522; + uint16 x546; + uint16 x578; + uint16 x602; + uint16 x646; + uint16 x648; + uint16 x666; + uint16 x670; + uint16 x690; + uint16 x692; + uint16 x6a0; + uint16 x6a2; + uint16 x6a4; + uint16 x6b2; + uint16 x7c0; + + uint16 shm_x20; + uint16 shm_x4a; + uint16 shm_x5e; + uint16 shm_x5f; + uint16 shm_xaab; + uint16 shm_x74a; + uint16 shm_x74b; + uint16 shm_x74c; + uint16 shm_x74e; + uint16 shm_x756; + uint16 shm_x75b; + uint16 shm_x7b9; + uint16 shm_x7d4; + + uint16 shm_P2P_HPS; + uint16 shm_P2P_intr[16]; + uint16 shm_P2P_perbss[48]; +} ucode_dbg_ext_t; + +#include +typedef BWL_PRE_PACKED_STRUCT struct pm_alert_data_v1 { + uint32 version; + uint32 length; /**< Length of entire structure */ + uint32 reasons; /**< reason(s) for pm_alert */ + /* Following fields are present only for reasons + * PM_DUR_EXCEEDED, MPC_DUR_EXCEEDED & CONST_AWAKE_DUR_EXCEEDED + */ + uint32 prev_stats_time; /**< msecs */ + uint32 prev_pm_dur; /**< msecs */ + uint32 prev_mpc_dur; /**< msecs */ pm_awake_data_v1_t awake_data; - uint32 frts_time; /* Cumulative ms spent in frts since driver load */ - uint32 frts_end_cnt; /* No of times frts ended since driver load */ -} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v1_t; + uint32 start_event_dur[WLC_PMD_EVENT_MAX_V1]; /**< start event-duration */ + ucode_dbg_v2_t ud; + uint32 frts_time; /**< Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /**< No of times frts ended since driver load */ + ucode_dbg_ext_t ud_ext; + uint32 prev_frts_dur; /**< ms */ +} BWL_POST_PACKED_STRUCT pm_alert_data_v1_t; +#include -/* WL_PWRSTATS_TYPE_PM_AWAKE2 structures */ -/* Data sent as part of pwrstats IOVAR */ -typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v2 { - uint32 curr_time; /* ms */ - uint32 hw_macc; /* HW maccontrol */ - uint32 sw_macc; /* SW maccontrol */ - uint32 pm_dur; /* Total sleep time in PM, msecs */ - uint32 mpc_dur; /* Total sleep time in MPC, msecs */ - - /* int32 drifts = remote - local; +ve drift => local-clk slow */ - int32 last_drift; /* Most recent TSF drift from beacon */ - int32 min_drift; /* Min TSF drift from beacon in magnitude */ - int32 max_drift; /* Max TSF drift from beacon in magnitude */ - - uint32 avg_drift; /* Avg TSF drift from beacon */ - - /* Wake history tracking */ - - /* pmstate array (type wlc_pm_debug_t) start offset */ - uint16 pm_state_offset; - /* pmstate number of array entries */ - uint16 pm_state_len; - - /* array (type uint32) start offset */ - uint16 pmd_event_wake_dur_offset; - /* pmd_event_wake_dur number of array entries */ - uint16 pmd_event_wake_dur_len; - - uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */ - uint8 pmwake_idx; /* for stepping through pm_state */ - uint8 pad[3]; - uint32 frts_time; /* Cumulative ms spent in frts since driver load */ - uint32 frts_end_cnt; /* No of times frts ended since driver load */ -} BWL_POST_PACKED_STRUCT pm_awake_data_v2_t; - -typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v2 { - uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */ - uint16 len; /* Up to 4K-1, top 4 bits are reserved */ - - pm_awake_data_v2_t awake_data; -} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v2_t; +/* End of 43342/4324 v2 structure definitions */ /* Original bus structure is for HSIC */ -typedef BWL_PRE_PACKED_STRUCT struct bus_metrics { - uint32 suspend_ct; /* suspend count */ - uint32 resume_ct; /* resume count */ - uint32 disconnect_ct; /* disconnect count */ - uint32 reconnect_ct; /* reconnect count */ - uint32 active_dur; /* msecs in bus, usecs for user */ - uint32 suspend_dur; /* msecs in bus, usecs for user */ - uint32 disconnect_dur; /* msecs in bus, usecs for user */ -} BWL_POST_PACKED_STRUCT bus_metrics_t; -/* Bus interface info for USB/HSIC */ +typedef struct bus_metrics { + uint32 suspend_ct; /**< suspend count */ + uint32 resume_ct; /**< resume count */ + uint32 disconnect_ct; /**< disconnect count */ + uint32 reconnect_ct; /**< reconnect count */ + uint32 active_dur; /**< msecs in bus, usecs for user */ + uint32 suspend_dur; /**< msecs in bus, usecs for user */ + uint32 disconnect_dur; /**< msecs in bus, usecs for user */ +} bus_metrics_t; + +/** Bus interface info for USB/HSIC */ +#include typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_usb_hsic_stats { - uint16 type; /* WL_PWRSTATS_TYPE_USB_HSIC */ - uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + uint16 type; /**< WL_PWRSTATS_TYPE_USB_HSIC */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ - bus_metrics_t hsic; /* stats from hsic bus driver */ + bus_metrics_t hsic; /**< stats from hsic bus driver */ } BWL_POST_PACKED_STRUCT wl_pwr_usb_hsic_stats_t; +#include -typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_metrics { - uint32 d3_suspend_ct; /* suspend count */ - uint32 d0_resume_ct; /* resume count */ - uint32 perst_assrt_ct; /* PERST# assert count */ - uint32 perst_deassrt_ct; /* PERST# de-assert count */ - uint32 active_dur; /* msecs */ - uint32 d3_suspend_dur; /* msecs */ - uint32 perst_dur; /* msecs */ - uint32 l0_cnt; /* L0 entry count */ - uint32 l0_usecs; /* L0 duration in usecs */ - uint32 l1_cnt; /* L1 entry count */ - uint32 l1_usecs; /* L1 duration in usecs */ - uint32 l1_1_cnt; /* L1_1ss entry count */ - uint32 l1_1_usecs; /* L1_1ss duration in usecs */ - uint32 l1_2_cnt; /* L1_2ss entry count */ - uint32 l1_2_usecs; /* L1_2ss duration in usecs */ - uint32 l2_cnt; /* L2 entry count */ - uint32 l2_usecs; /* L2 duration in usecs */ - uint32 timestamp; /* Timestamp on when stats are collected */ - uint32 num_h2d_doorbell; /* # of doorbell interrupts - h2d */ - uint32 num_d2h_doorbell; /* # of doorbell interrupts - d2h */ - uint32 num_submissions; /* # of submissions */ - uint32 num_completions; /* # of completions */ - uint32 num_rxcmplt; /* # of rx completions */ - uint32 num_rxcmplt_drbl; /* of drbl interrupts for rx complt. */ - uint32 num_txstatus; /* # of tx completions */ - uint32 num_txstatus_drbl; /* of drbl interrupts for tx complt. */ - uint32 ltr_active_ct; /* # of times chip went to LTR ACTIVE */ - uint32 ltr_active_dur; /* # of msecs chip was in LTR ACTIVE */ - uint32 ltr_sleep_ct; /* # of times chip went to LTR SLEEP */ - uint32 ltr_sleep_dur; /* # of msecs chip was in LTR SLEEP */ - uint32 deepsleep_count; /* # of times chip went to deepsleep */ - uint32 deepsleep_dur; /* # of msecs chip was in deepsleep */ -} BWL_POST_PACKED_STRUCT pcie_bus_metrics_t; +typedef struct pcie_bus_metrics { + uint32 d3_suspend_ct; /**< suspend count */ + uint32 d0_resume_ct; /**< resume count */ + uint32 perst_assrt_ct; /**< PERST# assert count */ + uint32 perst_deassrt_ct; /**< PERST# de-assert count */ + uint32 active_dur; /**< msecs */ + uint32 d3_suspend_dur; /**< msecs */ + uint32 perst_dur; /**< msecs */ + uint32 l0_cnt; /**< L0 entry count */ + uint32 l0_usecs; /**< L0 duration in usecs */ + uint32 l1_cnt; /**< L1 entry count */ + uint32 l1_usecs; /**< L1 duration in usecs */ + uint32 l1_1_cnt; /**< L1_1ss entry count */ + uint32 l1_1_usecs; /**< L1_1ss duration in usecs */ + uint32 l1_2_cnt; /**< L1_2ss entry count */ + uint32 l1_2_usecs; /**< L1_2ss duration in usecs */ + uint32 l2_cnt; /**< L2 entry count */ + uint32 l2_usecs; /**< L2 duration in usecs */ + uint32 timestamp; /**< Timestamp on when stats are collected */ + uint32 num_h2d_doorbell; /**< # of doorbell interrupts - h2d */ + uint32 num_d2h_doorbell; /**< # of doorbell interrupts - d2h */ + uint32 num_submissions; /**< # of submissions */ + uint32 num_completions; /**< # of completions */ + uint32 num_rxcmplt; /**< # of rx completions */ + uint32 num_rxcmplt_drbl; /**< of drbl interrupts for rx complt. */ + uint32 num_txstatus; /**< # of tx completions */ + uint32 num_txstatus_drbl; /**< of drbl interrupts for tx complt. */ + uint32 deepsleep_count; /**< # of times chip went to deepsleep */ + uint32 deepsleep_dur; /**< # of msecs chip was in deepsleep */ + uint32 ltr_active_ct; /**< # of times chip went to LTR ACTIVE */ + uint32 ltr_active_dur; /**< # of msecs chip was in LTR ACTIVE */ + uint32 ltr_sleep_ct; /**< # of times chip went to LTR SLEEP */ + uint32 ltr_sleep_dur; /**< # of msecs chip was in LTR SLEEP */ +} pcie_bus_metrics_t; -/* Bus interface info for PCIE */ -typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pcie_stats { - uint16 type; /* WL_PWRSTATS_TYPE_PCIE */ - uint16 len; /* Up to 4K-1, top 4 bits are reserved */ - pcie_bus_metrics_t pcie; /* stats from pcie bus driver */ -} BWL_POST_PACKED_STRUCT wl_pwr_pcie_stats_t; +/** Bus interface info for PCIE */ +typedef struct wl_pwr_pcie_stats { + uint16 type; /**< WL_PWRSTATS_TYPE_PCIE */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + pcie_bus_metrics_t pcie; /**< stats from pcie bus driver */ +} wl_pwr_pcie_stats_t; -/* Scan information history per category */ -typedef BWL_PRE_PACKED_STRUCT struct scan_data { - uint32 count; /* Number of scans performed */ - uint32 dur; /* Total time (in us) used */ -} BWL_POST_PACKED_STRUCT scan_data_t; +/** Scan information history per category */ +typedef struct scan_data { + uint32 count; /**< Number of scans performed */ + uint32 dur; /**< Total time (in us) used */ +} scan_data_t; -typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_scan_stats { - uint16 type; /* WL_PWRSTATS_TYPE_SCAN */ - uint16 len; /* Up to 4K-1, top 4 bits are reserved */ +typedef struct wl_pwr_scan_stats { + uint16 type; /**< WL_PWRSTATS_TYPE_SCAN */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ /* Scan history */ - scan_data_t user_scans; /* User-requested scans: (i/e/p)scan */ - scan_data_t assoc_scans; /* Scans initiated by association requests */ - scan_data_t roam_scans; /* Scans initiated by the roam engine */ - scan_data_t pno_scans[8]; /* For future PNO bucketing (BSSID, SSID, etc) */ - scan_data_t other_scans; /* Scan engine usage not assigned to the above */ -} BWL_POST_PACKED_STRUCT wl_pwr_scan_stats_t; + scan_data_t user_scans; /**< User-requested scans: (i/e/p)scan */ + scan_data_t assoc_scans; /**< Scans initiated by association requests */ + scan_data_t roam_scans; /**< Scans initiated by the roam engine */ + scan_data_t pno_scans[8]; /**< For future PNO bucketing (BSSID, SSID, etc) */ + scan_data_t other_scans; /**< Scan engine usage not assigned to the above */ +} wl_pwr_scan_stats_t; -typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_connect_stats { - uint16 type; /* WL_PWRSTATS_TYPE_SCAN */ - uint16 len; /* Up to 4K-1, top 4 bits are reserved */ +typedef struct wl_pwr_connect_stats { + uint16 type; /**< WL_PWRSTATS_TYPE_SCAN */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ /* Connection (Association + Key exchange) data */ - uint32 count; /* Number of connections performed */ - uint32 dur; /* Total time (in ms) used */ -} BWL_POST_PACKED_STRUCT wl_pwr_connect_stats_t; + uint32 count; /**< Number of connections performed */ + uint32 dur; /**< Total time (in ms) used */ +} wl_pwr_connect_stats_t; -typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_phy_stats { - uint16 type; /* WL_PWRSTATS_TYPE_PHY */ - uint16 len; /* Up to 4K-1, top 4 bits are reserved */ - uint32 tx_dur; /* TX Active duration in us */ - uint32 rx_dur; /* RX Active duration in us */ -} BWL_POST_PACKED_STRUCT wl_pwr_phy_stats_t; +typedef struct wl_pwr_phy_stats { + uint16 type; /**< WL_PWRSTATS_TYPE_PHY */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + uint32 tx_dur; /**< TX Active duration in us */ + uint32 rx_dur; /**< RX Active duration in us */ +} wl_pwr_phy_stats_t; +typedef struct wl_mimo_meas_metrics_v1 { + uint16 type; + uint16 len; + /* Total time(us) idle in MIMO RX chain configuration */ + uint32 total_idle_time_mimo; + /* Total time(us) idle in SISO RX chain configuration */ + uint32 total_idle_time_siso; + /* Total receive time (us) in SISO RX chain configuration */ + uint32 total_rx_time_siso; + /* Total receive time (us) in MIMO RX chain configuration */ + uint32 total_rx_time_mimo; + /* Total 1-chain transmit time(us) */ + uint32 total_tx_time_1chain; + /* Total 2-chain transmit time(us) */ + uint32 total_tx_time_2chain; + /* Total 3-chain transmit time(us) */ + uint32 total_tx_time_3chain; +} wl_mimo_meas_metrics_v1_t; + +typedef struct wl_mimo_meas_metrics { + uint16 type; + uint16 len; + /* Total time(us) idle in MIMO RX chain configuration */ + uint32 total_idle_time_mimo; + /* Total time(us) idle in SISO RX chain configuration */ + uint32 total_idle_time_siso; + /* Total receive time (us) in SISO RX chain configuration */ + uint32 total_rx_time_siso; + /* Total receive time (us) in MIMO RX chain configuration */ + uint32 total_rx_time_mimo; + /* Total 1-chain transmit time(us) */ + uint32 total_tx_time_1chain; + /* Total 2-chain transmit time(us) */ + uint32 total_tx_time_2chain; + /* Total 3-chain transmit time(us) */ + uint32 total_tx_time_3chain; + /* End of original, OCL fields start here */ + /* Total time(us) idle in ocl mode */ + uint32 total_idle_time_ocl; + /* Total receive time (us) in ocl mode */ + uint32 total_rx_time_ocl; + /* End of OCL fields, internal adjustment fields here */ + /* Total SIFS idle time in MIMO mode */ + uint32 total_sifs_time_mimo; + /* Total SIFS idle time in SISO mode */ + uint32 total_sifs_time_siso; +} wl_mimo_meas_metrics_t; /* ##### End of Power Stats section ##### */ -/* IPV4 Arp offloads for ndis context */ +/** IPV4 Arp offloads for ndis context */ +#include BWL_PRE_PACKED_STRUCT struct hostip_id { struct ipv4_addr ipa; uint8 id; } BWL_POST_PACKED_STRUCT; +#include /* Return values */ -#define ND_REPLY_PEER 0x1 /* Reply was sent to service NS request from peer */ -#define ND_REQ_SINK 0x2 /* Input packet should be discarded */ -#define ND_FORCE_FORWARD 0X3 /* For the dongle to forward req to HOST */ +#define ND_REPLY_PEER 0x1 /**< Reply was sent to service NS request from peer */ +#define ND_REQ_SINK 0x2 /**< Input packet should be discarded */ +#define ND_FORCE_FORWARD 0X3 /**< For the dongle to forward req to HOST */ -/* Neighbor Solicitation Response Offload IOVAR param */ +/** Neighbor Solicitation Response Offload IOVAR param */ +#include typedef BWL_PRE_PACKED_STRUCT struct nd_param { struct ipv6_addr host_ip[2]; struct ipv6_addr solicit_ip; @@ -4658,11 +6097,12 @@ typedef BWL_PRE_PACKED_STRUCT struct nd_param { uint8 host_mac[ETHER_ADDR_LEN]; uint32 offload_id; } BWL_POST_PACKED_STRUCT nd_param_t; +#include -typedef BWL_PRE_PACKED_STRUCT struct wl_pfn_roam_thresh { - uint32 pfn_alert_thresh; /* time in ms */ - uint32 roam_alert_thresh; /* time in ms */ -} BWL_POST_PACKED_STRUCT wl_pfn_roam_thresh_t; +typedef struct wl_pfn_roam_thresh { + uint32 pfn_alert_thresh; /**< time in ms */ + uint32 roam_alert_thresh; /**< time in ms */ +} wl_pfn_roam_thresh_t; /* Reasons for wl_pmalert_t */ @@ -4675,17 +6115,16 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_pfn_roam_thresh { #define MIN_PM_ALERT_LEN 9 -/* Data sent in EXCESS_PM_WAKE event */ +/** Data sent in EXCESS_PM_WAKE event */ #define WL_PM_ALERT_VERSION 3 -#define MAX_P2P_BSS_DTIM_PRD 4 - -/* This structure is for version 3; version 2 will be deprecated in by FW */ +/** This structure is for version 3; version 2 will be deprecated in by FW */ +#include typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert { - uint16 version; /* Version = 3 is TLV format */ - uint16 length; /* Length of entire structure */ - uint32 reasons; /* reason(s) for pm_alert */ - uint8 data[1]; /* TLV data, a series of structures, + uint16 version; /**< Version = 3 is TLV format */ + uint16 length; /**< Length of entire structure */ + uint32 reasons; /**< reason(s) for pm_alert */ + uint8 data[1]; /**< TLV data, a series of structures, * each starting with type and length. * * Padded as necessary so each section @@ -4696,63 +6135,66 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert { * valid len values are 0-4095. */ } BWL_POST_PACKED_STRUCT wl_pmalert_t; +#include /* Type values for the data section */ -#define WL_PMALERT_FIXED 0 /* struct wl_pmalert_fixed_t, fixed fields */ -#define WL_PMALERT_PMSTATE 1 /* struct wl_pmalert_pmstate_t, variable */ -#define WL_PMALERT_EVENT_DUR 2 /* struct wl_pmalert_event_dur_t, variable */ -#define WL_PMALERT_UCODE_DBG 3 /* struct wl_pmalert_ucode_dbg_t, variable */ -#define WL_PMALERT_PS_ALLOWED_HIST 4 /* struct wl_pmalert_ps_allowed_history, variable */ -#define WL_PMALERT_EXT_UCODE_DBG 5 /* struct wl_pmalert_ext_ucode_dbg_t, variable */ -#define WL_PMALERT_EPM_START_EVENT_DUR 6 /* struct wl_pmalert_event_dur_t, variable */ +#define WL_PMALERT_FIXED 0 /**< struct wl_pmalert_fixed_t, fixed fields */ +#define WL_PMALERT_PMSTATE 1 /**< struct wl_pmalert_pmstate_t, variable */ +#define WL_PMALERT_EVENT_DUR 2 /**< struct wl_pmalert_event_dur_t, variable */ +#define WL_PMALERT_UCODE_DBG 3 /**< struct wl_pmalert_ucode_dbg_v1, variable */ +#define WL_PMALERT_PS_ALLOWED_HIST 4 /**< struct wl_pmalert_ps_allowed_history, variable */ +#define WL_PMALERT_EXT_UCODE_DBG 5 /**< struct wl_pmalert_ext_ucode_dbg_t, variable */ +#define WL_PMALERT_EPM_START_EVENT_DUR 6 /**< struct wl_pmalert_event_dur_t, variable */ +#define WL_PMALERT_UCODE_DBG_V2 7 /**< struct wl_pmalert_ucode_dbg_v2, variable */ -typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_fixed { - uint16 type; /* WL_PMALERT_FIXED */ - uint16 len; /* Up to 4K-1, top 4 bits are reserved */ - uint32 prev_stats_time; /* msecs */ - uint32 curr_time; /* ms */ - uint32 prev_pm_dur; /* msecs */ - uint32 pm_dur; /* Total sleep time in PM, msecs */ - uint32 prev_mpc_dur; /* msecs */ - uint32 mpc_dur; /* Total sleep time in MPC, msecs */ - uint32 hw_macc; /* HW maccontrol */ - uint32 sw_macc; /* SW maccontrol */ +typedef struct wl_pmalert_fixed { + uint16 type; /**< WL_PMALERT_FIXED */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + uint32 prev_stats_time; /**< msecs */ + uint32 curr_time; /**< ms */ + uint32 prev_pm_dur; /**< msecs */ + uint32 pm_dur; /**< Total sleep time in PM, msecs */ + uint32 prev_mpc_dur; /**< msecs */ + uint32 mpc_dur; /**< Total sleep time in MPC, msecs */ + uint32 hw_macc; /**< HW maccontrol */ + uint32 sw_macc; /**< SW maccontrol */ /* int32 drifts = remote - local; +ve drift -> local-clk slow */ - int32 last_drift; /* Most recent TSF drift from beacon */ - int32 min_drift; /* Min TSF drift from beacon in magnitude */ - int32 max_drift; /* Max TSF drift from beacon in magnitude */ + int32 last_drift; /**< Most recent TSF drift from beacon */ + int32 min_drift; /**< Min TSF drift from beacon in magnitude */ + int32 max_drift; /**< Max TSF drift from beacon in magnitude */ - uint32 avg_drift; /* Avg TSF drift from beacon */ - uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */ - uint32 frts_time; /* Cumulative ms spent in data frts since driver load */ - uint32 frts_end_cnt; /* No of times frts ended since driver load */ - uint32 prev_frts_dur; /* Data frts duration at start of pm-period */ - uint32 cal_dur; /* Cumulative ms spent in calibration */ - uint32 prev_cal_dur; /* cal duration at start of pm-period */ -} BWL_POST_PACKED_STRUCT wl_pmalert_fixed_t; + uint32 avg_drift; /**< Avg TSF drift from beacon */ + uint32 drift_cnt; /**< Count of drift readings over which avg_drift was computed */ + uint32 frts_time; /**< Cumulative ms spent in data frts since driver load */ + uint32 frts_end_cnt; /**< No of times frts ended since driver load */ + uint32 prev_frts_dur; /**< Data frts duration at start of pm-period */ + uint32 cal_dur; /**< Cumulative ms spent in calibration */ + uint32 prev_cal_dur; /**< cal duration at start of pm-period */ +} wl_pmalert_fixed_t; -typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_pmstate { - uint16 type; /* WL_PMALERT_PMSTATE */ - uint16 len; /* Up to 4K-1, top 4 bits are reserved */ +typedef struct wl_pmalert_pmstate { + uint16 type; /**< WL_PMALERT_PMSTATE */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ - uint8 pmwake_idx; /* for stepping through pm_state */ + uint8 pmwake_idx; /**< for stepping through pm_state */ uint8 pad[3]; /* Array of pmstate; len of array is based on tlv len */ wlc_pm_debug_t pmstate[1]; -} BWL_POST_PACKED_STRUCT wl_pmalert_pmstate_t; +} wl_pmalert_pmstate_t; -typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_event_dur { - uint16 type; /* WL_PMALERT_EVENT_DUR */ - uint16 len; /* Up to 4K-1, top 4 bits are reserved */ +typedef struct wl_pmalert_event_dur { + uint16 type; /**< WL_PMALERT_EVENT_DUR */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ /* Array of event_dur, len of array is based on tlv len */ uint32 event_dur[1]; -} BWL_POST_PACKED_STRUCT wl_pmalert_event_dur_t; +} wl_pmalert_event_dur_t; -typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg { - uint16 type; /* WL_PMALERT_UCODE_DBG */ - uint16 len; /* Up to 4K-1, top 4 bits are reserved */ +#include +BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg_v1 { + uint16 type; /* WL_PMALERT_UCODE_DBG */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ uint32 macctrl; uint16 m_p2p_hps; uint32 psm_brc; @@ -4760,186 +6202,260 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg { uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD]; uint32 psmdebug[20]; uint32 phydebug[20]; -} BWL_POST_PACKED_STRUCT wl_pmalert_ucode_dbg_t; + uint16 M_P2P_BSS[3][12]; + uint16 M_P2P_PRE_TBTT[3]; + /* Following is valid only for corerevs<40 */ + uint16 xmtfifordy; + + /* Following 3 are valid only for 11ac corerevs (>=40) */ + uint16 psm_maccommand; + uint16 txe_status1; + uint16 AQMFifoReady; +} BWL_POST_PACKED_STRUCT; +#include + +#include +BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg_v2 { + uint16 type; /**< WL_PMALERT_UCODE_DBG_V2 */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + uint32 macctrl; + uint16 m_p2p_hps; + uint32 psm_brc; + uint32 ifsstat; + uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD]; + uint32 psmdebug[20]; + uint32 phydebug[20]; + uint16 M_P2P_BSS[3][12]; + uint16 M_P2P_PRE_TBTT[3]; + + /* Following is valid only for corerevs<40 */ + uint16 xmtfifordy; + + /* Following 3 are valid only for 11ac corerevs (>=40) */ + uint16 psm_maccommand; + uint16 txe_status1; + uint32 AQMFifoReady; +} BWL_POST_PACKED_STRUCT; +#include + +typedef struct wlc_ps_debug { + uint32 timestamp; /**< timestamp in millisecond */ + uint32 ps_mask; /**< reason(s) for disallowing ps */ +} wlc_ps_debug_t; + +typedef struct wl_pmalert_ps_allowed_hist { + uint16 type; /**< WL_PMALERT_PS_ALLOWED_HIST */ + uint16 len; /**< Up to 4K-1, top 4 bits are reserved */ + uint32 ps_allowed_start_idx; + /* Array of ps_debug, len of array is based on tlv len */ + wlc_ps_debug_t ps_debug[1]; +} wl_pmalert_ps_allowed_hist_t; /* Structures and constants used for "vndr_ie" IOVar interface */ -#define VNDR_IE_CMD_LEN 4 /* length of the set command string: +#define VNDR_IE_CMD_LEN 4 /**< length of the set command string: * "add", "del" (+ NUL) */ #define VNDR_IE_INFO_HDR_LEN (sizeof(uint32)) +#include typedef BWL_PRE_PACKED_STRUCT struct { - uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */ - vndr_ie_t vndr_ie_data; /* vendor IE data */ + uint32 pktflag; /**< bitmask indicating which packet(s) contain this IE */ + vndr_ie_t vndr_ie_data; /**< vendor IE data */ } BWL_POST_PACKED_STRUCT vndr_ie_info_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct { - int iecount; /* number of entries in the vndr_ie_list[] array */ - vndr_ie_info_t vndr_ie_list[1]; /* variable size list of vndr_ie_info_t structs */ + int32 iecount; /**< number of entries in the vndr_ie_list[] array */ + vndr_ie_info_t vndr_ie_list[1]; /**< variable size list of vndr_ie_info_t structs */ } BWL_POST_PACKED_STRUCT vndr_ie_buf_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct { - char cmd[VNDR_IE_CMD_LEN]; /* vndr_ie IOVar set command : "add", "del" + NUL */ - vndr_ie_buf_t vndr_ie_buffer; /* buffer containing Vendor IE list information */ + char cmd[VNDR_IE_CMD_LEN]; /**< vndr_ie IOVar set command : "add", "del" + NUL */ + vndr_ie_buf_t vndr_ie_buffer; /**< buffer containing Vendor IE list information */ } BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t; +#include -/* tag_ID/length/value_buffer tuple */ +/** tag_ID/length/value_buffer tuple */ +#include typedef BWL_PRE_PACKED_STRUCT struct { uint8 id; uint8 len; uint8 data[1]; } BWL_POST_PACKED_STRUCT tlv_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct { - uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */ - tlv_t ie_data; /* IE data */ + uint32 pktflag; /**< bitmask indicating which packet(s) contain this IE */ + tlv_t ie_data; /**< IE data */ } BWL_POST_PACKED_STRUCT ie_info_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct { - int iecount; /* number of entries in the ie_list[] array */ - ie_info_t ie_list[1]; /* variable size list of ie_info_t structs */ + int32 iecount; /**< number of entries in the ie_list[] array */ + ie_info_t ie_list[1]; /**< variable size list of ie_info_t structs */ } BWL_POST_PACKED_STRUCT ie_buf_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct { - char cmd[VNDR_IE_CMD_LEN]; /* ie IOVar set command : "add" + NUL */ - ie_buf_t ie_buffer; /* buffer containing IE list information */ + char cmd[VNDR_IE_CMD_LEN]; /**< ie IOVar set command : "add" + NUL */ + ie_buf_t ie_buffer; /**< buffer containing IE list information */ } BWL_POST_PACKED_STRUCT ie_setbuf_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct { - uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */ - uint8 id; /* IE type */ + uint32 pktflag; /**< bitmask indicating which packet(s) contain this IE */ + uint8 id; /**< IE type */ } BWL_POST_PACKED_STRUCT ie_getbuf_t; +#include /* structures used to define format of wps ie data from probe requests */ /* passed up to applications via iovar "prbreq_wpsie" */ -typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr { +typedef struct sta_prbreq_wps_ie_hdr { struct ether_addr staAddr; uint16 ieLen; -} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t; +} sta_prbreq_wps_ie_hdr_t; +#include typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data { sta_prbreq_wps_ie_hdr_t hdr; uint8 ieData[1]; } BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list { uint32 totLen; uint8 ieDataList[1]; } BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t; +#include - -#ifdef WLMEDIA_TXFAILEVENT -typedef BWL_PRE_PACKED_STRUCT struct { - char dest[ETHER_ADDR_LEN]; /* destination MAC */ - uint8 prio; /* Packet Priority */ - uint8 flags; /* Flags */ - uint32 tsf_l; /* TSF timer low */ - uint32 tsf_h; /* TSF timer high */ - uint16 rates; /* Main Rates */ - uint16 txstatus; /* TX Status */ -} BWL_POST_PACKED_STRUCT txfailinfo_t; -#endif /* WLMEDIA_TXFAILEVENT */ - +#include typedef BWL_PRE_PACKED_STRUCT struct { uint32 flags; - chanspec_t chanspec; /* txpwr report for this channel */ - chanspec_t local_chanspec; /* channel on which we are associated */ - uint8 local_max; /* local max according to the AP */ - uint8 local_constraint; /* local constraint according to the AP */ - int8 antgain[2]; /* Ant gain for each band - from SROM */ - uint8 rf_cores; /* count of RF Cores being reported */ - uint8 est_Pout[4]; /* Latest tx power out estimate per RF chain */ - uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain w/o adjustment */ - uint8 est_Pout_cck; /* Latest CCK tx power out estimate */ - uint8 tx_power_max[4]; /* Maximum target power among all rates */ - uint tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */ - int8 sar; /* SAR limit for display by wl executable */ - int8 channel_bandwidth; /* 20, 40 or 80 MHz bandwidth? */ - uint8 version; /* Version of the data format wlu <--> driver */ - uint8 display_core; /* Displayed curpower core */ - int8 target_offsets[4]; /* Target power offsets for current rate per core */ - uint32 last_tx_ratespec; /* Ratespec for last transmition */ - uint user_target; /* user limit */ - uint32 ppr_len; /* length of each ppr serialization buffer */ + chanspec_t chanspec; /**< txpwr report for this channel */ + chanspec_t local_chanspec; /**< channel on which we are associated */ + uint8 local_max; /**< local max according to the AP */ + uint8 local_constraint; /**< local constraint according to the AP */ + int8 antgain[2]; /**< Ant gain for each band - from SROM */ + uint8 rf_cores; /**< count of RF Cores being reported */ + uint8 est_Pout[4]; /**< Latest tx power out estimate per RF chain */ + uint8 est_Pout_act[4]; /**< Latest tx power out estimate per RF chain w/o adjustment */ + uint8 est_Pout_cck; /**< Latest CCK tx power out estimate */ + uint8 tx_power_max[4]; /**< Maximum target power among all rates */ + uint32 tx_power_max_rate_ind[4]; /**< Index of the rate with the max target power */ + int8 sar; /**< SAR limit for display by wl executable */ + int8 channel_bandwidth; /**< 20, 40 or 80 MHz bandwidth? */ + uint8 version; /**< Version of the data format wlu <--> driver */ + uint8 display_core; /**< Displayed curpower core */ + int8 target_offsets[4]; /**< Target power offsets for current rate per core */ + uint32 last_tx_ratespec; /**< Ratespec for last transmition */ + uint32 user_target; /**< user limit */ + uint32 ppr_len; /**< length of each ppr serialization buffer */ int8 SARLIMIT[MAX_STREAMS_SUPPORTED]; - uint8 pprdata[1]; /* ppr serialization buffer */ + uint8 pprdata[1]; /**< ppr serialization buffer */ } BWL_POST_PACKED_STRUCT tx_pwr_rpt_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct { struct ipv4_addr ipv4_addr; struct ether_addr nexthop; } BWL_POST_PACKED_STRUCT ibss_route_entry_t; +#include + +#include typedef BWL_PRE_PACKED_STRUCT struct { uint32 num_entry; ibss_route_entry_t route_entry[1]; } BWL_POST_PACKED_STRUCT ibss_route_tbl_t; +#include #define MAX_IBSS_ROUTE_TBL_ENTRY 64 #define TXPWR_TARGET_VERSION 0 +#include typedef BWL_PRE_PACKED_STRUCT struct { - int32 version; /* version number */ - chanspec_t chanspec; /* txpwr report for this channel */ - int8 txpwr[WL_STA_ANT_MAX]; /* Max tx target power, in qdb */ - uint8 rf_cores; /* count of RF Cores being reported */ + int32 version; /**< version number */ + chanspec_t chanspec; /**< txpwr report for this channel */ + int8 txpwr[WL_STA_ANT_MAX]; /**< Max tx target power, in qdb */ + uint8 rf_cores; /**< count of RF Cores being reported */ } BWL_POST_PACKED_STRUCT txpwr_target_max_t; +#include #define BSS_PEER_INFO_PARAM_CUR_VER 0 -/* Input structure for IOV_BSS_PEER_INFO */ +/** Input structure for IOV_BSS_PEER_INFO */ +#include typedef BWL_PRE_PACKED_STRUCT struct { uint16 version; - struct ether_addr ea; /* peer MAC address */ + struct ether_addr ea; /**< peer MAC address */ } BWL_POST_PACKED_STRUCT bss_peer_info_param_t; +#include #define BSS_PEER_INFO_CUR_VER 0 +#include typedef BWL_PRE_PACKED_STRUCT struct { uint16 version; struct ether_addr ea; int32 rssi; - uint32 tx_rate; /* current tx rate */ - uint32 rx_rate; /* current rx rate */ - wl_rateset_t rateset; /* rateset in use */ - uint32 age; /* age in seconds */ + uint32 tx_rate; /**< current tx rate */ + uint32 rx_rate; /**< current rx rate */ + wl_rateset_t rateset; /**< rateset in use */ + uint32 age; /**< age in seconds */ } BWL_POST_PACKED_STRUCT bss_peer_info_t; +#include #define BSS_PEER_LIST_INFO_CUR_VER 0 +#include typedef BWL_PRE_PACKED_STRUCT struct { uint16 version; - uint16 bss_peer_info_len; /* length of bss_peer_info_t */ - uint32 count; /* number of peer info */ - bss_peer_info_t peer_info[1]; /* peer info */ + uint16 bss_peer_info_len; /**< length of bss_peer_info_t */ + uint32 count; /**< number of peer info */ + bss_peer_info_t peer_info[1]; /**< peer info */ } BWL_POST_PACKED_STRUCT bss_peer_list_info_t; +#include #define BSS_PEER_LIST_INFO_FIXED_LEN OFFSETOF(bss_peer_list_info_t, peer_info) #define AIBSS_BCN_FORCE_CONFIG_VER_0 0 -/* structure used to configure AIBSS beacon force xmit */ +/** structure used to configure AIBSS beacon force xmit */ +#include typedef BWL_PRE_PACKED_STRUCT struct { uint16 version; uint16 len; - uint32 initial_min_bcn_dur; /* dur in ms to check a bcn in bcn_flood period */ - uint32 min_bcn_dur; /* dur in ms to check a bcn after bcn_flood period */ - uint32 bcn_flood_dur; /* Initial bcn xmit period in ms */ + uint32 initial_min_bcn_dur; /**< dur in ms to check a bcn in bcn_flood period */ + uint32 min_bcn_dur; /**< dur in ms to check a bcn after bcn_flood period */ + uint32 bcn_flood_dur; /**< Initial bcn xmit period in ms */ } BWL_POST_PACKED_STRUCT aibss_bcn_force_config_t; +#include #define AIBSS_TXFAIL_CONFIG_VER_0 0 #define AIBSS_TXFAIL_CONFIG_VER_1 1 #define AIBSS_TXFAIL_CONFIG_CUR_VER AIBSS_TXFAIL_CONFIG_VER_1 -/* structure used to configure aibss tx fail event */ +/** structure used to configure aibss tx fail event */ +#include typedef BWL_PRE_PACKED_STRUCT struct { uint16 version; uint16 len; - uint32 bcn_timeout; /* dur in seconds to receive 1 bcn */ - uint32 max_tx_retry; /* no of consecutive no acks to send txfail event */ - uint32 max_atim_failure; /* no of consecutive atim failure */ + uint32 bcn_timeout; /**< dur in seconds to receive 1 bcn */ + uint32 max_tx_retry; /**< no of consecutive no acks to send txfail event */ + uint32 max_atim_failure; /**< no of consecutive atim failure */ } BWL_POST_PACKED_STRUCT aibss_txfail_config_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct wl_aibss_if { uint16 version; uint16 len; @@ -4947,23 +6463,45 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_aibss_if { struct ether_addr addr; chanspec_t chspec; } BWL_POST_PACKED_STRUCT wl_aibss_if_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_entry { struct ipv4_addr ip_addr; struct ether_addr nexthop; } BWL_POST_PACKED_STRUCT wlc_ipfo_route_entry_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl { uint32 num_entry; wlc_ipfo_route_entry_t route_entry[1]; } BWL_POST_PACKED_STRUCT wlc_ipfo_route_tbl_t; +#include + +/* Version of wlc_btc_stats_t structure. + * Increment whenever a change is made to wlc_btc_stats_t + */ +#define BTCX_STATS_VER 2 + +typedef struct wlc_btc_stats { + uint16 version; /* version number of struct */ + uint16 valid; /* Size of this struct */ + uint32 stats_update_timestamp; /* tStamp when data is updated. */ + uint32 btc_status; /* Hybrid/TDM indicator: Bit2:Hybrid, Bit1:TDM,Bit0:CoexEnabled */ + uint32 bt_req_type_map; /* BT Antenna Req types since last stats sample */ + uint32 bt_req_cnt; /* #BT antenna requests since last stats sampl */ + uint32 bt_gnt_cnt; /* #BT antenna grants since last stats sample */ + uint32 bt_gnt_dur; /* usec BT owns antenna since last stats sample */ + uint16 bt_abort_cnt; /* #Times WL was preempted due to BT since WL up */ + uint16 bt_rxf1ovfl_cnt; /* #Time PSNULL retry count exceeded since WL up */ + uint16 bt_latency_cnt; /* #Time ucode high latency detected since WL up */ + uint16 rsvd; /* pad to align struct to 32bit bndry */ +} wlc_btc_stats_t; #define WL_IPFO_ROUTE_TBL_FIXED_LEN 4 #define WL_MAX_IPFO_ROUTE_TBL_ENTRY 64 -/* no strict structure packing */ -#include - /* Global ASSERT Logging */ #define ASSERTLOG_CUR_VER 0x0100 #define MAX_ASSRTSTR_LEN 64 @@ -4971,7 +6509,7 @@ typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl { typedef struct assert_record { uint32 time; uint8 seq_num; - char str[MAX_ASSRTSTR_LEN]; + int8 str[MAX_ASSRTSTR_LEN]; } assert_record_t; typedef struct assertlog_results { @@ -4983,40 +6521,69 @@ typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl { #define LOGRRC_FIX_LEN 8 #define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type)) - - +/* BCMWAPI_WAI */ +#define IV_LEN 16 + struct wapi_sta_msg_t + { + uint16 msg_type; + uint16 datalen; + uint8 vap_mac[6]; + uint8 reserve_data1[2]; + uint8 sta_mac[6]; + uint8 reserve_data2[2]; + uint8 gsn[IV_LEN]; + uint8 wie[256]; + }; +/* #endif BCMWAPI_WAI */ /* chanim acs record */ typedef struct { - bool valid; + uint8 valid; uint8 trigger; chanspec_t selected_chspc; int8 bgnoise; uint32 glitch_cnt; uint8 ccastats; uint8 chan_idle; - uint timestamp; + uint32 timestamp; } chanim_acs_record_t; typedef struct { chanim_acs_record_t acs_record[CHANIM_ACS_RECORD]; uint8 count; - uint timestamp; + uint32 timestamp; } wl_acs_record_t; - typedef struct chanim_stats { - uint32 glitchcnt; /* normalized as per second count */ - uint32 badplcp; /* normalized as per second count */ - uint8 ccastats[CCASTATS_MAX]; /* normalized as 0-255 */ - int8 bgnoise; /* background noise level (in dBm) */ - chanspec_t chanspec; /* ctrl chanspec of the interface */ - uint32 timestamp; /* time stamp at which the stats are collected */ - uint32 bphy_glitchcnt; /* normalized as per second count */ - uint32 bphy_badplcp; /* normalized as per second count */ - uint8 chan_idle; /* normalized as 0~255 */ - } chanim_stats_t; +#define WL_CHANIM_STATS_V2 2 +#define CCASTATS_V2_MAX 9 +typedef struct chanim_stats_v2 { + uint32 glitchcnt; /**< normalized as per second count */ + uint32 badplcp; /**< normalized as per second count */ + uint8 ccastats[CCASTATS_V2_MAX]; /**< normalized as 0-255 */ + int8 bgnoise; /**< background noise level (in dBm) */ + chanspec_t chanspec; /**< ctrl chanspec of the interface */ + uint32 timestamp; /**< time stamp at which the stats are collected */ + uint32 bphy_glitchcnt; /**< normalized as per second count */ + uint32 bphy_badplcp; /**< normalized as per second count */ + uint8 chan_idle; /**< normalized as 0~255 */ + uint8 PAD[3]; +} chanim_stats_v2_t; -#define WL_CHANIM_STATS_VERSION 2 +typedef struct chanim_stats { + uint32 glitchcnt; /**< normalized as per second count */ + uint32 badplcp; /**< normalized as per second count */ + uint8 ccastats[CCASTATS_MAX]; /**< normalized as 0-255 */ + int8 bgnoise; /**< background noise level (in dBm) */ + uint8 pad_1[11 - CCASTATS_MAX]; + chanspec_t chanspec; /**< ctrl chanspec of the interface */ + uint8 pad_2[2]; + uint32 timestamp; /**< time stamp at which the stats are collected */ + uint32 bphy_glitchcnt; /**< normalized as per second count */ + uint32 bphy_badplcp; /**< normalized as per second count */ + uint8 chan_idle; /**< normalized as 0~255 */ + uint8 PAD[3]; +} chanim_stats_t; +#define WL_CHANIM_STATS_VERSION 3 typedef struct { uint32 buflen; uint32 version; @@ -5026,10 +6593,10 @@ typedef struct { #define WL_CHANIM_STATS_FIXED_LEN OFFSETOF(wl_chanim_stats_t, stats) -/* Noise measurement metrics. */ +/** Noise measurement metrics. */ #define NOISE_MEASURE_KNOISE 0x1 -/* scb probe parameter */ +/** scb probe parameter */ typedef struct { uint32 scb_timeout; uint32 scb_activity_time; @@ -5039,20 +6606,21 @@ typedef struct { /* structure/defines for selective mgmt frame (smf) stats support */ #define SMFS_VERSION 1 -/* selected mgmt frame (smf) stats element */ +/** selected mgmt frame (smf) stats element */ typedef struct wl_smfs_elem { uint32 count; - uint16 code; /* SC or RC code */ + uint16 code; /**< SC or RC code */ + uint8 PAD[2]; } wl_smfs_elem_t; typedef struct wl_smf_stats { uint32 version; - uint16 length; /* reserved for future usage */ + uint16 length; /**< reserved for future usage */ uint8 type; uint8 codetype; uint32 ignored_cnt; uint32 malformed_cnt; - uint32 count_total; /* count included the interested group */ + uint32 count_total; /**< count included the interested group */ wl_smfs_elem_t elem[1]; } wl_smf_stats_t; @@ -5074,7 +6642,7 @@ typedef enum smfs_type { SMFS_TYPE_MAX } smfs_type_t; -#ifdef PHYMON +/* #ifdef PHYMON */ #define PHYMON_VERSION 1 @@ -5091,84 +6659,88 @@ typedef struct wl_phycal_core_state { int8 tx_iqlocal_fi; int8 tx_iqlocal_fq; - /* Rx IQ calibration coeffs */ + /** Rx IQ calibration coeffs */ int16 rx_iqcal_a; int16 rx_iqcal_b; - uint8 tx_iqlocal_pwridx; /* Tx Power Index for Tx IQ/LO calibration */ - uint32 papd_epsilon_table[64]; /* PAPD epsilon table */ - int16 papd_epsilon_offset; /* PAPD epsilon offset */ - uint8 curr_tx_pwrindex; /* Tx power index */ - int8 idle_tssi; /* Idle TSSI */ - int8 est_tx_pwr; /* Estimated Tx Power (dB) */ - int8 est_rx_pwr; /* Estimated Rx Power (dB) from RSSI */ - uint16 rx_gaininfo; /* Rx gain applied on last Rx pkt */ - uint16 init_gaincode; /* initgain required for ACI */ + uint8 tx_iqlocal_pwridx; /**< Tx Power Index for Tx IQ/LO calibration */ + uint8 PAD[3]; + uint32 papd_epsilon_table[64]; /**< PAPD epsilon table */ + int16 papd_epsilon_offset; /**< PAPD epsilon offset */ + uint8 curr_tx_pwrindex; /**< Tx power index */ + int8 idle_tssi; /**< Idle TSSI */ + int8 est_tx_pwr; /**< Estimated Tx Power (dB) */ + int8 est_rx_pwr; /**< Estimated Rx Power (dB) from RSSI */ + uint16 rx_gaininfo; /**< Rx gain applied on last Rx pkt */ + uint16 init_gaincode; /**< initgain required for ACI */ int8 estirr_tx; int8 estirr_rx; - } wl_phycal_core_state_t; typedef struct wl_phycal_state { - int version; - int8 num_phy_cores; /* number of cores */ - int8 curr_temperature; /* on-chip temperature sensor reading */ - chanspec_t chspec; /* channspec for this state */ - bool aci_state; /* ACI state: ON/OFF */ - uint16 crsminpower; /* crsminpower required for ACI */ - uint16 crsminpowerl; /* crsminpowerl required for ACI */ - uint16 crsminpoweru; /* crsminpoweru required for ACI */ + int32 version; + int8 num_phy_cores; /**< number of cores */ + int8 curr_temperature; /**< on-chip temperature sensor reading */ + chanspec_t chspec; /**< channspec for this state */ + uint8 aci_state; /**< ACI state: ON/OFF */ + uint8 PAD; + uint16 crsminpower; /**< crsminpower required for ACI */ + uint16 crsminpowerl; /**< crsminpowerl required for ACI */ + uint16 crsminpoweru; /**< crsminpoweru required for ACI */ wl_phycal_core_state_t phycal_core[1]; } wl_phycal_state_t; #define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core) -#endif /* PHYMON */ +/* endif PHYMON */ -/* discovery state */ +/** discovery state */ typedef struct wl_p2p_disc_st { - uint8 state; /* see state */ - chanspec_t chspec; /* valid in listen state */ - uint16 dwell; /* valid in listen state, in ms */ + uint8 state; /**< see state */ + uint8 PAD; + chanspec_t chspec; /**< valid in listen state */ + uint16 dwell; /**< valid in listen state, in ms */ } wl_p2p_disc_st_t; -/* scan request */ +/** scan request */ typedef struct wl_p2p_scan { - uint8 type; /* 'S' for WLC_SCAN, 'E' for "escan" */ + uint8 type; /**< 'S' for WLC_SCAN, 'E' for "escan" */ uint8 reserved[3]; /* scan or escan parms... */ } wl_p2p_scan_t; -/* i/f request */ +/** i/f request */ typedef struct wl_p2p_if { struct ether_addr addr; - uint8 type; /* see i/f type */ - chanspec_t chspec; /* for p2p_ifadd GO */ + uint8 type; /**< see i/f type */ + uint8 PAD; + chanspec_t chspec; /**< for p2p_ifadd GO */ } wl_p2p_if_t; -/* i/f query */ +/** i/f query */ typedef struct wl_p2p_ifq { - uint bsscfgidx; + uint32 bsscfgidx; char ifname[BCM_MSG_IFNAME_MAX]; } wl_p2p_ifq_t; -/* OppPS & CTWindow */ +/** OppPS & CTWindow */ typedef struct wl_p2p_ops { - uint8 ops; /* 0: disable 1: enable */ - uint8 ctw; /* >= 10 */ + uint8 ops; /**< 0: disable 1: enable */ + uint8 ctw; /**< >= 10 */ } wl_p2p_ops_t; -/* absence and presence request */ +/** absence and presence request */ typedef struct wl_p2p_sched_desc { uint32 start; uint32 interval; uint32 duration; - uint32 count; /* see count */ + uint32 count; /**< see count */ } wl_p2p_sched_desc_t; typedef struct wl_p2p_sched { - uint8 type; /* see schedule type */ - uint8 action; /* see schedule action */ - uint8 option; /* see schedule option */ + uint8 type; /**< see schedule type */ + uint8 action; /**< see schedule action */ + uint8 option; /**< see schedule option */ + uint8 PAD; wl_p2p_sched_desc_t desc[1]; } wl_p2p_sched_t; @@ -5178,59 +6750,53 @@ typedef struct wl_p2p_wfds_hash { uint8 wfds_hash[6]; uint8 name_len; uint8 service_name[MAX_WFDS_SVC_NAME_LEN]; + uint8 PAD[3]; } wl_p2p_wfds_hash_t; typedef struct wl_bcmdcs_data { - uint reason; + uint32 reason; chanspec_t chspec; + uint8 PAD[2]; } wl_bcmdcs_data_t; - - -/* NAT configuration */ +/* ifdef EXT_STA */ +/** + * Format of IHV data passed to OID_DOT11_NIC_SPECIFIC_EXTENSION. + */ +typedef struct _IHV_NIC_SPECIFIC_EXTENSION { + uint8 oui[4]; /**< vendor specific OUI value */ + uint32 event; /**< event code */ + uint8 ihvData[1]; /**< ihv data */ +} IHV_NIC_SPECIFIC_EXTENSION, *PIHV_NIC_SPECIFIC_EXTENSION; +#define IHV_NIC_SPECIFIC_EXTENTION_HEADER OFFSETOF(IHV_NIC_SPECIFIC_EXTENSION, ihvData[0]) +/* EXT_STA */ +/** NAT configuration */ typedef struct { - uint32 ipaddr; /* interface ip address */ - uint32 ipaddr_mask; /* interface ip address mask */ - uint32 ipaddr_gateway; /* gateway ip address */ - uint8 mac_gateway[6]; /* gateway mac address */ - uint32 ipaddr_dns; /* DNS server ip address, valid only for public if */ - uint8 mac_dns[6]; /* DNS server mac address, valid only for public if */ - uint8 GUID[38]; /* interface GUID */ + uint32 ipaddr; /**< interface ip address */ + uint32 ipaddr_mask; /**< interface ip address mask */ + uint32 ipaddr_gateway; /**< gateway ip address */ + uint8 mac_gateway[6]; /**< gateway mac address */ + uint8 PAD[2]; + uint32 ipaddr_dns; /**< DNS server ip address, valid only for public if */ + uint8 mac_dns[6]; /**< DNS server mac address, valid only for public if */ + uint8 GUID[38]; /**< interface GUID */ } nat_if_info_t; typedef struct { - uint op; /* operation code */ - bool pub_if; /* set for public if, clear for private if */ - nat_if_info_t if_info; /* interface info */ + uint32 op; /**< operation code */ + uint8 pub_if; /**< set for public if, clear for private if */ + uint8 PAD[3]; + nat_if_info_t if_info; /**< interface info */ } nat_cfg_t; typedef struct { - int state; /* NAT state returned */ + int32 state; /**< NAT state returned */ } nat_state_t; - -#define BTA_STATE_LOG_SZ 64 - -/* BTAMP Statemachine states */ -enum { - HCIReset = 1, - HCIReadLocalAMPInfo, - HCIReadLocalAMPASSOC, - HCIWriteRemoteAMPASSOC, - HCICreatePhysicalLink, - HCIAcceptPhysicalLinkRequest, - HCIDisconnectPhysicalLink, - HCICreateLogicalLink, - HCIAcceptLogicalLink, - HCIDisconnectLogicalLink, - HCILogicalLinkCancel, - HCIAmpStateChange, - HCIWriteLogicalLinkAcceptTimeout -}; - typedef struct flush_txfifo { uint32 txfifobmp; uint32 hwtxfifoflush; struct ether_addr ea; + uint8 PAD[2]; } flush_txfifo_t; enum { @@ -5242,42 +6808,99 @@ enum { SPATIAL_MODE_MAX_IDX }; -#define WLC_TXCORE_MAX 4 /* max number of txcore supports */ -#define WLC_SUBBAND_MAX 4 /* max number of sub-band supports */ +#define WLC_TXCORE_MAX 4 /**< max number of txcore supports */ +#define WLC_TXCORE_MAX_OLD 2 /**< backward compatibilty for TXCAL */ +#define WLC_SUBBAND_MAX 4 /**< max number of sub-band supports */ typedef struct { uint8 band2g[WLC_TXCORE_MAX]; uint8 band5g[WLC_SUBBAND_MAX][WLC_TXCORE_MAX]; } sar_limit_t; -#define WLC_TXCAL_CORE_MAX 2 /* max number of txcore supports for txcal */ #define MAX_NUM_TXCAL_MEAS 128 #define MAX_NUM_PWR_STEP 40 -#define TXCAL_ROUNDING_FIX 1 -typedef struct wl_txcal_meas { -#ifdef TXCAL_ROUNDING_FIX - uint16 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; -#else - uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; -#endif /* TXCAL_ROUNDING_FIX */ - int16 pwr[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; +#define TXCAL_IOVAR_VERSION 0x1 +typedef struct wl_txcal_meas_percore { + uint16 tssi[MAX_NUM_TXCAL_MEAS]; + int16 pwr[MAX_NUM_TXCAL_MEAS]; +} wl_txcal_meas_percore_t; + +typedef struct wl_txcal_meas_ncore { + uint16 version; uint8 valid_cnt; + uint8 num_core; + wl_txcal_meas_percore_t txcal_percore[1]; +} wl_txcal_meas_ncore_t; + +typedef struct wl_txcal_power_tssi_percore { + int16 tempsense; + int16 pwr_start; + uint8 pwr_start_idx; + uint8 num_entries; + uint16 pad; + uint8 tssi[MAX_NUM_PWR_STEP]; +} wl_txcal_power_tssi_percore_t; + +typedef struct wl_txcal_power_tssi_ncore { + uint16 version; + uint8 set_core; + uint8 channel; + uint8 num_core; + uint8 gen_tbl; + uint16 pad; + wl_txcal_power_tssi_percore_t tssi_percore[1]; +} wl_txcal_power_tssi_ncore_t; + +typedef struct wl_txcal_meas { + uint16 tssi[WLC_TXCORE_MAX][MAX_NUM_TXCAL_MEAS]; + int16 pwr[WLC_TXCORE_MAX][MAX_NUM_TXCAL_MEAS]; + uint8 valid_cnt; + uint8 PAD; } wl_txcal_meas_t; +typedef struct wl_txcal_meas_old { + uint16 tssi[WLC_TXCORE_MAX_OLD][MAX_NUM_TXCAL_MEAS]; + int16 pwr[WLC_TXCORE_MAX_OLD][MAX_NUM_TXCAL_MEAS]; + uint8 valid_cnt; + uint8 PAD; +} wl_txcal_meas_old_t; + typedef struct wl_txcal_power_tssi { uint8 set_core; uint8 channel; - int16 tempsense[WLC_TXCAL_CORE_MAX]; - int16 pwr_start[WLC_TXCAL_CORE_MAX]; - uint8 pwr_start_idx[WLC_TXCAL_CORE_MAX]; - uint8 num_entries[WLC_TXCAL_CORE_MAX]; - uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_PWR_STEP]; - bool gen_tbl; + int16 tempsense[WLC_TXCORE_MAX]; + int16 pwr_start[WLC_TXCORE_MAX]; + uint8 pwr_start_idx[WLC_TXCORE_MAX]; + uint8 num_entries[WLC_TXCORE_MAX]; + uint8 tssi[WLC_TXCORE_MAX][MAX_NUM_PWR_STEP]; + uint8 gen_tbl; + uint8 PAD; } wl_txcal_power_tssi_t; -/* IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */ +typedef struct wl_txcal_power_tssi_old { + uint8 set_core; + uint8 channel; + int16 tempsense[WLC_TXCORE_MAX_OLD]; + int16 pwr_start[WLC_TXCORE_MAX_OLD]; + uint8 pwr_start_idx[WLC_TXCORE_MAX_OLD]; + uint8 num_entries[WLC_TXCORE_MAX_OLD]; + uint8 tssi[WLC_TXCORE_MAX_OLD][MAX_NUM_PWR_STEP]; + uint8 gen_tbl; + uint8 PAD; +} wl_txcal_power_tssi_old_t; + +typedef struct wl_olpc_pwr { + uint16 version; + uint8 core; + uint8 channel; + int16 tempsense; + uint8 olpc_idx; + uint8 pad; +} wl_olpc_pwr_t; + +/** IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */ typedef struct wl_mempool_stats { - int num; /* Number of memory pools */ - bcm_mp_stats_t s[1]; /* Variable array of memory pool stats. */ + int32 num; /**< Number of memory pools */ + bcm_mp_stats_t s[1]; /**< Variable array of memory pool stats. */ } wl_mempool_stats_t; typedef struct { @@ -5286,46 +6909,47 @@ typedef struct { uint32 ipaddr_gateway; } nwoe_ifconfig_t; -/* Traffic management priority classes */ +/** Traffic management priority classes */ typedef enum trf_mgmt_priority_class { - trf_mgmt_priority_low = 0, /* Maps to 802.1p BK */ - trf_mgmt_priority_medium = 1, /* Maps to 802.1p BE */ - trf_mgmt_priority_high = 2, /* Maps to 802.1p VI */ - trf_mgmt_priority_nochange = 3, /* do not update the priority */ + trf_mgmt_priority_low = 0, /**< Maps to 802.1p BK */ + trf_mgmt_priority_medium = 1, /**< Maps to 802.1p BE */ + trf_mgmt_priority_high = 2, /**< Maps to 802.1p VI */ + trf_mgmt_priority_nochange = 3, /**< do not update the priority */ trf_mgmt_priority_invalid = (trf_mgmt_priority_nochange + 1) } trf_mgmt_priority_class_t; -/* Traffic management configuration parameters */ +/** Traffic management configuration parameters */ typedef struct trf_mgmt_config { - uint32 trf_mgmt_enabled; /* 0 - disabled, 1 - enabled */ - uint32 flags; /* See TRF_MGMT_FLAG_xxx defines */ - uint32 host_ip_addr; /* My IP address to determine subnet */ - uint32 host_subnet_mask; /* My subnet mask */ - uint32 downlink_bandwidth; /* In units of kbps */ - uint32 uplink_bandwidth; /* In units of kbps */ - uint32 min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /* Minimum guaranteed tx bandwidth */ - uint32 min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /* Minimum guaranteed rx bandwidth */ + uint32 trf_mgmt_enabled; /**< 0 - disabled, 1 - enabled */ + uint32 flags; /**< See TRF_MGMT_FLAG_xxx defines */ + uint32 host_ip_addr; /**< My IP address to determine subnet */ + uint32 host_subnet_mask; /**< My subnet mask */ + uint32 downlink_bandwidth; /**< In units of kbps */ + uint32 uplink_bandwidth; /**< In units of kbps */ + uint32 min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /**< Minimum guaranteed tx bandwidth */ + uint32 min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /**< Minimum guaranteed rx bandwidth */ } trf_mgmt_config_t; -/* Traffic management filter */ +/** Traffic management filter */ typedef struct trf_mgmt_filter { - struct ether_addr dst_ether_addr; /* His L2 address */ - uint32 dst_ip_addr; /* His IP address */ - uint16 dst_port; /* His L4 port */ - uint16 src_port; /* My L4 port */ - uint16 prot; /* L4 protocol (only TCP or UDP) */ - uint16 flags; /* TBD. For now, this must be zero. */ - trf_mgmt_priority_class_t priority; /* Priority for filtered packets */ - uint32 dscp; /* DSCP */ + struct ether_addr dst_ether_addr; /**< His L2 address */ + uint8 PAD[2]; + uint32 dst_ip_addr; /**< His IP address */ + uint16 dst_port; /**< His L4 port */ + uint16 src_port; /**< My L4 port */ + uint16 prot; /**< L4 protocol (only TCP or UDP) */ + uint16 flags; /**< TBD. For now, this must be zero. */ + trf_mgmt_priority_class_t priority; /**< Priority for filtered packets */ + uint32 dscp; /**< DSCP */ } trf_mgmt_filter_t; -/* Traffic management filter list (variable length) */ +/** Traffic management filter list (variable length) */ typedef struct trf_mgmt_filter_list { uint32 num_filters; trf_mgmt_filter_t filter[1]; } trf_mgmt_filter_list_t; -/* Traffic management global info used for all queues */ +/** Traffic management global info used for all queues */ typedef struct trf_mgmt_global_info { uint32 maximum_bytes_per_second; uint32 maximum_bytes_per_sampling_period; @@ -5334,18 +6958,18 @@ typedef struct trf_mgmt_global_info { uint32 total_unused_bytes_per_sampling_period; } trf_mgmt_global_info_t; -/* Traffic management shaping info per priority queue */ +/** Traffic management shaping info per priority queue */ typedef struct trf_mgmt_shaping_info { uint32 gauranteed_bandwidth_percentage; uint32 guaranteed_bytes_per_second; uint32 guaranteed_bytes_per_sampling_period; uint32 num_bytes_produced_per_second; uint32 num_bytes_consumed_per_second; - uint32 num_queued_packets; /* Number of packets in queue */ - uint32 num_queued_bytes; /* Number of bytes in queue */ + uint32 num_queued_packets; /**< Number of packets in queue */ + uint32 num_queued_bytes; /**< Number of bytes in queue */ } trf_mgmt_shaping_info_t; -/* Traffic management shaping info array */ +/** Traffic management shaping info array */ typedef struct trf_mgmt_shaping_info_array { trf_mgmt_global_info_t tx_global_shaping_info; trf_mgmt_shaping_info_t tx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES]; @@ -5354,75 +6978,113 @@ typedef struct trf_mgmt_shaping_info_array { } trf_mgmt_shaping_info_array_t; -/* Traffic management statistical counters */ +/** Traffic management statistical counters */ typedef struct trf_mgmt_stats { - uint32 num_processed_packets; /* Number of packets processed */ - uint32 num_processed_bytes; /* Number of bytes processed */ - uint32 num_discarded_packets; /* Number of packets discarded from queue */ + uint32 num_processed_packets; /**< Number of packets processed */ + uint32 num_processed_bytes; /**< Number of bytes processed */ + uint32 num_discarded_packets; /**< Number of packets discarded from queue */ } trf_mgmt_stats_t; -/* Traffic management statisics array */ +/** Traffic management statistics array */ typedef struct trf_mgmt_stats_array { trf_mgmt_stats_t tx_queue_stats[TRF_MGMT_MAX_PRIORITIES]; trf_mgmt_stats_t rx_queue_stats[TRF_MGMT_MAX_PRIORITIES]; } trf_mgmt_stats_array_t; +/* Both powersel_params and lpc_params are used by IOVAR lpc_params. + * The powersel_params is replaced by lpc_params in later WLC versions. + */ typedef struct powersel_params { /* LPC Params exposed via IOVAR */ - int32 tp_ratio_thresh; /* Throughput ratio threshold */ - uint8 rate_stab_thresh; /* Thresh for rate stability based on nupd */ - uint8 pwr_stab_thresh; /* Number of successes before power step down */ - uint8 pwr_sel_exp_time; /* Time lapse for expiry of database */ + int32 tp_ratio_thresh; /**< Throughput ratio threshold */ + uint8 rate_stab_thresh; /**< Thresh for rate stability based on nupd */ + uint8 pwr_stab_thresh; /**< Number of successes before power step down */ + uint8 pwr_sel_exp_time; /**< Time lapse for expiry of database */ + uint8 PAD; } powersel_params_t; +#define WL_LPC_PARAMS_VER_2 2 +#define WL_LPC_PARAMS_CURRENT_VERSION WL_LPC_PARAMS_VER_2 + typedef struct lpc_params { + uint16 version; + uint16 length; /* LPC Params exposed via IOVAR */ - uint8 rate_stab_thresh; /* Thresh for rate stability based on nupd */ - uint8 pwr_stab_thresh; /* Number of successes before power step down */ - uint8 lpc_exp_time; /* Time lapse for expiry of database */ - uint8 pwrup_slow_step; /* Step size for slow step up */ - uint8 pwrup_fast_step; /* Step size for fast step up */ - uint8 pwrdn_slow_step; /* Step size for slow step down */ + uint8 rate_stab_thresh; /**< Thresh for rate stability based on nupd */ + uint8 pwr_stab_thresh; /**< Number of successes before power step down */ + uint8 lpc_exp_time; /**< Time lapse for expiry of database */ + uint8 pwrup_slow_step; /**< Step size for slow step up */ + uint8 pwrup_fast_step; /**< Step size for fast step up */ + uint8 pwrdn_slow_step; /**< Step size for slow step down */ } lpc_params_t; /* tx pkt delay statistics */ -#define SCB_RETRY_SHORT_DEF 7 /* Default Short retry Limit */ -#define WLPKTDLY_HIST_NBINS 16 /* number of bins used in the Delay histogram */ +#define SCB_RETRY_SHORT_DEF 7 /**< Default Short retry Limit */ +#define WLPKTDLY_HIST_NBINS 16 /**< number of bins used in the Delay histogram */ -/* structure to store per-AC delay statistics */ +/** structure to store per-AC delay statistics */ typedef struct scb_delay_stats { - uint32 txmpdu_lost; /* number of MPDUs lost */ - uint32 txmpdu_cnt[SCB_RETRY_SHORT_DEF]; /* retry times histogram */ - uint32 delay_sum[SCB_RETRY_SHORT_DEF]; /* cumulative packet latency */ - uint32 delay_min; /* minimum packet latency observed */ - uint32 delay_max; /* maximum packet latency observed */ - uint32 delay_avg; /* packet latency average */ - uint32 delay_hist[WLPKTDLY_HIST_NBINS]; /* delay histogram */ + uint32 txmpdu_lost; /**< number of MPDUs lost */ + uint32 txmpdu_cnt[SCB_RETRY_SHORT_DEF]; /**< retry times histogram */ + uint32 delay_sum[SCB_RETRY_SHORT_DEF]; /**< cumulative packet latency */ + uint32 delay_min; /**< minimum packet latency observed */ + uint32 delay_max; /**< maximum packet latency observed */ + uint32 delay_avg; /**< packet latency average */ + uint32 delay_hist[WLPKTDLY_HIST_NBINS]; /**< delay histogram */ + uint32 delay_count; /**< minimum number of time period units before + consequent packet delay events can be generated + */ + uint32 prev_txmpdu_cnt; /**< Previous value of txmpdu_cnt[] during last iteration */ + uint32 prev_delay_sum; /**< Previous value of delay_sum[] during last iteration */ } scb_delay_stats_t; -/* structure for txdelay event */ +/** structure for txdelay event */ typedef struct txdelay_event { - uint8 status; - int rssi; + uint8 status; + uint8 PAD[3]; + int32 rssi; chanim_stats_t chanim_stats; scb_delay_stats_t delay_stats[AC_COUNT]; } txdelay_event_t; -/* structure for txdelay parameters */ +/** structure for txdelay parameters */ typedef struct txdelay_params { - uint16 ratio; /* Avg Txdelay Delta */ - uint8 cnt; /* Sample cnt */ - uint8 period; /* Sample period */ - uint8 tune; /* Debug */ + uint16 ratio; /**< Avg Txdelay Delta */ + uint8 cnt; /**< Sample cnt */ + uint8 period; /**< Sample period */ + uint8 tune; /**< Debug */ + uint8 PAD; } txdelay_params_t; +#define MAX_TXDELAY_STATS_SCBS 6 +#define TXDELAY_STATS_VERSION 1 +enum { + TXDELAY_STATS_PARTIAL_RESULT = 0, + TXDELAY_STATS_FULL_RESULT = 1 +}; + +typedef struct scb_total_delay_stats { + struct ether_addr ea; + uint8 pad[2]; + scb_delay_stats_t dlystats[AC_COUNT]; +} scb_total_delay_stats_t; + +typedef struct txdelay_stats { + uint32 version; + uint32 full_result; /* 0:Partial, 1:full */ + uint32 scb_cnt; /* in:requested, out:returned */ + scb_total_delay_stats_t scb_delay_stats[1]; +} txdelay_stats_t; + +#define WL_TXDELAY_STATS_FIXED_SIZE \ + (sizeof(txdelay_stats_t)+(MAX_TXDELAY_STATS_SCBS-1)*sizeof(scb_total_delay_stats_t)) enum { WNM_SERVICE_DMS = 1, WNM_SERVICE_FMS = 2, WNM_SERVICE_TFS = 3 }; -/* Definitions for WNM/NPS TCLAS */ +/** Definitions for WNM/NPS TCLAS */ typedef struct wl_tclas { uint8 user_priority; uint8 fc_len; @@ -5433,10 +7095,10 @@ typedef struct wl_tclas { typedef struct wl_tclas_list { uint32 num; - wl_tclas_t tclas[1]; + wl_tclas_t tclas[]; } wl_tclas_list_t; -/* Definitions for WNM/NPS Traffic Filter Service */ +/** Definitions for WNM/NPS Traffic Filter Service */ typedef struct wl_tfs_req { uint8 tfs_id; uint8 tfs_actcode; @@ -5445,50 +7107,50 @@ typedef struct wl_tfs_req { } wl_tfs_req_t; typedef struct wl_tfs_filter { - uint8 status; /* Status returned by the AP */ - uint8 tclas_proc; /* TCLAS processing value (0:and, 1:or) */ - uint8 tclas_cnt; /* count of all wl_tclas_t in tclas array */ - uint8 tclas[1]; /* VLA of wl_tclas_t */ + uint8 status; /**< Status returned by the AP */ + uint8 tclas_proc; /**< TCLAS processing value (0:and, 1:or) */ + uint8 tclas_cnt; /**< count of all wl_tclas_t in tclas array */ + uint8 tclas[1]; /**< VLA of wl_tclas_t */ } wl_tfs_filter_t; #define WL_TFS_FILTER_FIXED_SIZE OFFSETOF(wl_tfs_filter_t, tclas) typedef struct wl_tfs_fset { - struct ether_addr ea; /* Address of AP/STA involved with this filter set */ - uint8 tfs_id; /* TFS ID field chosen by STA host */ - uint8 status; /* Internal status TFS_STATUS_xxx */ - uint8 actcode; /* Action code DOT11_TFS_ACTCODE_xxx */ - uint8 token; /* Token used in last request frame */ - uint8 notify; /* Notify frame sent/received because of this set */ - uint8 filter_cnt; /* count of all wl_tfs_filter_t in filter array */ - uint8 filter[1]; /* VLA of wl_tfs_filter_t */ + struct ether_addr ea; /**< Address of AP/STA involved with this filter set */ + uint8 tfs_id; /**< TFS ID field chosen by STA host */ + uint8 status; /**< Internal status TFS_STATUS_xxx */ + uint8 actcode; /**< Action code DOT11_TFS_ACTCODE_xxx */ + uint8 token; /**< Token used in last request frame */ + uint8 notify; /**< Notify frame sent/received because of this set */ + uint8 filter_cnt; /**< count of all wl_tfs_filter_t in filter array */ + uint8 filter[1]; /**< VLA of wl_tfs_filter_t */ } wl_tfs_fset_t; #define WL_TFS_FSET_FIXED_SIZE OFFSETOF(wl_tfs_fset_t, filter) enum { - TFS_STATUS_DISABLED = 0, /* TFS filter set disabled by user */ - TFS_STATUS_DISABLING = 1, /* Empty request just sent to AP */ - TFS_STATUS_VALIDATED = 2, /* Filter set validated by AP (but maybe not enabled!) */ - TFS_STATUS_VALIDATING = 3, /* Filter set just sent to AP */ - TFS_STATUS_NOT_ASSOC = 4, /* STA not associated */ - TFS_STATUS_NOT_SUPPORT = 5, /* TFS not supported by AP */ - TFS_STATUS_DENIED = 6, /* Filter set refused by AP (=> all sets are disabled!) */ + TFS_STATUS_DISABLED = 0, /**< TFS filter set disabled by user */ + TFS_STATUS_DISABLING = 1, /**< Empty request just sent to AP */ + TFS_STATUS_VALIDATED = 2, /**< Filter set validated by AP (but maybe not enabled!) */ + TFS_STATUS_VALIDATING = 3, /**< Filter set just sent to AP */ + TFS_STATUS_NOT_ASSOC = 4, /**< STA not associated */ + TFS_STATUS_NOT_SUPPORT = 5, /**< TFS not supported by AP */ + TFS_STATUS_DENIED = 6, /**< Filter set refused by AP (=> all sets are disabled!) */ }; typedef struct wl_tfs_status { - uint8 fset_cnt; /* count of all wl_tfs_fset_t in fset array */ - wl_tfs_fset_t fset[1]; /* VLA of wl_tfs_fset_t */ + uint8 fset_cnt; /**< count of all wl_tfs_fset_t in fset array */ + wl_tfs_fset_t fset[1]; /**< VLA of wl_tfs_fset_t */ } wl_tfs_status_t; typedef struct wl_tfs_set { - uint8 send; /* Immediatly register registered sets on AP side */ - uint8 tfs_id; /* ID of a specific set (existing or new), or nul for all */ - uint8 actcode; /* Action code for this filter set */ - uint8 tclas_proc; /* TCLAS processing operator for this filter set */ + uint8 send; /**< Immediatly register registered sets on AP side */ + uint8 tfs_id; /**< ID of a specific set (existing or new), or nul for all */ + uint8 actcode; /**< Action code for this filter set */ + uint8 tclas_proc; /**< TCLAS processing operator for this filter set */ } wl_tfs_set_t; typedef struct wl_tfs_term { - uint8 del; /* Delete internal set once confirmation received */ - uint8 tfs_id; /* ID of a specific set (existing), or nul for all */ + uint8 del; /**< Delete internal set once confirmation received */ + uint8 tfs_id; /**< ID of a specific set (existing), or nul for all */ } wl_tfs_term_t; @@ -5496,17 +7158,17 @@ typedef struct wl_tfs_term { /* Definitions for WNM/NPS Directed Multicast Service */ enum { - DMS_STATUS_DISABLED = 0, /* DMS desc disabled by user */ - DMS_STATUS_ACCEPTED = 1, /* Request accepted by AP */ - DMS_STATUS_NOT_ASSOC = 2, /* STA not associated */ - DMS_STATUS_NOT_SUPPORT = 3, /* DMS not supported by AP */ - DMS_STATUS_DENIED = 4, /* Request denied by AP */ - DMS_STATUS_TERM = 5, /* Request terminated by AP */ - DMS_STATUS_REMOVING = 6, /* Remove request just sent */ - DMS_STATUS_ADDING = 7, /* Add request just sent */ - DMS_STATUS_ERROR = 8, /* Non compliant AP behvior */ - DMS_STATUS_IN_PROGRESS = 9, /* Request just sent */ - DMS_STATUS_REQ_MISMATCH = 10 /* Conditions for sending DMS req not met */ + DMS_STATUS_DISABLED = 0, /**< DMS desc disabled by user */ + DMS_STATUS_ACCEPTED = 1, /**< Request accepted by AP */ + DMS_STATUS_NOT_ASSOC = 2, /**< STA not associated */ + DMS_STATUS_NOT_SUPPORT = 3, /**< DMS not supported by AP */ + DMS_STATUS_DENIED = 4, /**< Request denied by AP */ + DMS_STATUS_TERM = 5, /**< Request terminated by AP */ + DMS_STATUS_REMOVING = 6, /**< Remove request just sent */ + DMS_STATUS_ADDING = 7, /**< Add request just sent */ + DMS_STATUS_ERROR = 8, /**< Non compliant AP behvior */ + DMS_STATUS_IN_PROGRESS = 9, /**< Request just sent */ + DMS_STATUS_REQ_MISMATCH = 10 /**< Conditions for sending DMS req not met */ }; typedef struct wl_dms_desc { @@ -5515,9 +7177,9 @@ typedef struct wl_dms_desc { uint8 token; uint8 dms_id; uint8 tclas_proc; - uint8 mac_len; /* length of all ether_addr in data array, 0 if STA */ - uint8 tclas_len; /* length of all wl_tclas_t in data array */ - uint8 data[1]; /* VLA of 'ether_addr' and 'wl_tclas_t' (in this order ) */ + uint8 mac_len; /**< length of all ether_addr in data array, 0 if STA */ + uint8 tclas_len; /**< length of all wl_tclas_t in data array */ + uint8 data[1]; /**< VLA of 'ether_addr' and 'wl_tclas_t' (in this order ) */ } wl_dms_desc_t; #define WL_DMS_DESC_FIXED_SIZE OFFSETOF(wl_dms_desc_t, data) @@ -5545,20 +7207,20 @@ typedef struct wl_service_term { } u; } wl_service_term_t; -/* Definitions for WNM/NPS BSS Transistion */ +/** Definitions for WNM/NPS BSS Transistion */ typedef struct wl_bsstrans_req { - uint16 tbtt; /* time of BSS to end of life, in unit of TBTT */ - uint16 dur; /* time of BSS to keep off, in unit of minute */ - uint8 reqmode; /* request mode of BSS transition request */ - uint8 unicast; /* request by unicast or by broadcast */ + uint16 tbtt; /**< time of BSS to end of life, in unit of TBTT */ + uint16 dur; /**< time of BSS to keep off, in unit of minute */ + uint8 reqmode; /**< request mode of BSS transition request */ + uint8 unicast; /**< request by unicast or by broadcast */ } wl_bsstrans_req_t; enum { - BSSTRANS_RESP_AUTO = 0, /* Currently equivalent to ENABLE */ - BSSTRANS_RESP_DISABLE = 1, /* Never answer BSS Trans Req frames */ - BSSTRANS_RESP_ENABLE = 2, /* Always answer Req frames with preset data */ - BSSTRANS_RESP_WAIT = 3, /* Send ind, wait and/or send preset data (NOT IMPL) */ - BSSTRANS_RESP_IMMEDIATE = 4 /* After an ind, set data and send resp (NOT IMPL) */ + BSSTRANS_RESP_AUTO = 0, /**< Currently equivalent to ENABLE */ + BSSTRANS_RESP_DISABLE = 1, /**< Never answer BSS Trans Req frames */ + BSSTRANS_RESP_ENABLE = 2, /**< Always answer Req frames with preset data */ + BSSTRANS_RESP_WAIT = 3, /**< Send ind, wait and/or send preset data (NOT IMPL) */ + BSSTRANS_RESP_IMMEDIATE = 4 /**< After an ind, set data and send resp (NOT IMPL) */ }; typedef struct wl_bsstrans_resp { @@ -5574,39 +7236,42 @@ typedef struct wl_bsstrans_resp { * mandates different behavior on receiving BSS-transition request. To accomodate * such divergent behaviors these policies have been created. */ -enum { - WL_BSSTRANS_POLICY_ROAM_ALWAYS = 0, /* Roam (or disassociate) in all cases */ - WL_BSSTRANS_POLICY_ROAM_IF_MODE = 1, /* Roam only if requested by Request Mode field */ - WL_BSSTRANS_POLICY_ROAM_IF_PREF = 2, /* Roam only if Preferred BSS provided */ - WL_BSSTRANS_POLICY_WAIT = 3, /* Wait for deauth and send Accepted status */ - WL_BSSTRANS_POLICY_PRODUCT = 4, /* Policy for real product use cases (non-pf) */ -}; +typedef enum { + WL_BSSTRANS_POLICY_ROAM_ALWAYS = 0, /**< Roam (or disassociate) in all cases */ + WL_BSSTRANS_POLICY_ROAM_IF_MODE = 1, /**< Roam only if requested by Request Mode field */ + WL_BSSTRANS_POLICY_ROAM_IF_PREF = 2, /**< Roam only if Preferred BSS provided */ + WL_BSSTRANS_POLICY_WAIT = 3, /**< Wait for deauth and send Accepted status */ + WL_BSSTRANS_POLICY_PRODUCT = 4, /**< Policy for real product use cases (Olympic) */ + WL_BSSTRANS_POLICY_PRODUCT_WBTEXT = 5, /**< Policy for real product use cases (SS) */ + WL_BSSTRANS_POLICY_MAX = 6 +} wnm_bsstrans_policy_type_t; -/* Definitions for WNM/NPS TIM Broadcast */ +/** Definitions for WNM/NPS TIM Broadcast */ typedef struct wl_timbc_offset { - int16 offset; /* offset in us */ - uint16 fix_intv; /* override interval sent from STA */ - uint16 rate_override; /* use rate override to send high rate TIM broadcast frame */ - uint8 tsf_present; /* show timestamp in TIM broadcast frame */ + int16 offset; /**< offset in us */ + uint16 fix_intv; /**< override interval sent from STA */ + uint16 rate_override; /**< use rate override to send high rate TIM broadcast frame */ + uint8 tsf_present; /**< show timestamp in TIM broadcast frame */ + uint8 PAD; } wl_timbc_offset_t; typedef struct wl_timbc_set { - uint8 interval; /* Interval in DTIM wished or required. */ - uint8 flags; /* Bitfield described below */ - uint16 rate_min; /* Minimum rate required for High/Low TIM frames. Optionnal */ - uint16 rate_max; /* Maximum rate required for High/Low TIM frames. Optionnal */ + uint8 interval; /**< Interval in DTIM wished or required. */ + uint8 flags; /**< Bitfield described below */ + uint16 rate_min; /**< Minimum rate required for High/Low TIM frames. Optionnal */ + uint16 rate_max; /**< Maximum rate required for High/Low TIM frames. Optionnal */ } wl_timbc_set_t; enum { - WL_TIMBC_SET_TSF_REQUIRED = 1, /* Enable TIMBC only if TSF in TIM frames */ - WL_TIMBC_SET_NO_OVERRIDE = 2, /* ... if AP does not override interval */ - WL_TIMBC_SET_PROXY_ARP = 4, /* ... if AP support Proxy ARP */ - WL_TIMBC_SET_DMS_ACCEPTED = 8 /* ... if all DMS desc have been accepted */ + WL_TIMBC_SET_TSF_REQUIRED = 1, /**< Enable TIMBC only if TSF in TIM frames */ + WL_TIMBC_SET_NO_OVERRIDE = 2, /**< ... if AP does not override interval */ + WL_TIMBC_SET_PROXY_ARP = 4, /**< ... if AP support Proxy ARP */ + WL_TIMBC_SET_DMS_ACCEPTED = 8 /**< ... if all DMS desc have been accepted */ }; typedef struct wl_timbc_status { - uint8 status_sta; /* Status from internal state machine (check below) */ - uint8 status_ap; /* From AP response frame (check 8.4.2.86 from 802.11) */ + uint8 status_sta; /**< Status from internal state machine (check below) */ + uint8 status_ap; /**< From AP response frame (check 8.4.2.86 from 802.11) */ uint8 interval; uint8 pad; int32 offset; @@ -5615,39 +7280,40 @@ typedef struct wl_timbc_status { } wl_timbc_status_t; enum { - WL_TIMBC_STATUS_DISABLE = 0, /* TIMBC disabled by user */ - WL_TIMBC_STATUS_REQ_MISMATCH = 1, /* AP settings do no match user requirements */ - WL_TIMBC_STATUS_NOT_ASSOC = 2, /* STA not associated */ - WL_TIMBC_STATUS_NOT_SUPPORT = 3, /* TIMBC not supported by AP */ - WL_TIMBC_STATUS_DENIED = 4, /* Req to disable TIMBC sent to AP */ - WL_TIMBC_STATUS_ENABLE = 5 /* TIMBC enabled */ + WL_TIMBC_STATUS_DISABLE = 0, /**< TIMBC disabled by user */ + WL_TIMBC_STATUS_REQ_MISMATCH = 1, /**< AP settings do no match user requirements */ + WL_TIMBC_STATUS_NOT_ASSOC = 2, /**< STA not associated */ + WL_TIMBC_STATUS_NOT_SUPPORT = 3, /**< TIMBC not supported by AP */ + WL_TIMBC_STATUS_DENIED = 4, /**< Req to disable TIMBC sent to AP */ + WL_TIMBC_STATUS_ENABLE = 5 /**< TIMBC enabled */ }; -/* Definitions for PM2 Dynamic Fast Return To Sleep */ +/** Definitions for PM2 Dynamic Fast Return To Sleep */ typedef struct wl_pm2_sleep_ret_ext { - uint8 logic; /* DFRTS logic: see WL_DFRTS_LOGIC_* below */ - uint16 low_ms; /* Low FRTS timeout */ - uint16 high_ms; /* High FRTS timeout */ - uint16 rx_pkts_threshold; /* switching threshold: # rx pkts */ - uint16 tx_pkts_threshold; /* switching threshold: # tx pkts */ - uint16 txrx_pkts_threshold; /* switching threshold: # (tx+rx) pkts */ - uint32 rx_bytes_threshold; /* switching threshold: # rx bytes */ - uint32 tx_bytes_threshold; /* switching threshold: # tx bytes */ - uint32 txrx_bytes_threshold; /* switching threshold: # (tx+rx) bytes */ + uint8 logic; /**< DFRTS logic: see WL_DFRTS_LOGIC_* below */ + uint8 PAD; + uint16 low_ms; /**< Low FRTS timeout */ + uint16 high_ms; /**< High FRTS timeout */ + uint16 rx_pkts_threshold; /**< switching threshold: # rx pkts */ + uint16 tx_pkts_threshold; /**< switching threshold: # tx pkts */ + uint16 txrx_pkts_threshold; /**< switching threshold: # (tx+rx) pkts */ + uint32 rx_bytes_threshold; /**< switching threshold: # rx bytes */ + uint32 tx_bytes_threshold; /**< switching threshold: # tx bytes */ + uint32 txrx_bytes_threshold; /**< switching threshold: # (tx+rx) bytes */ } wl_pm2_sleep_ret_ext_t; -#define WL_DFRTS_LOGIC_OFF 0 /* Feature is disabled */ -#define WL_DFRTS_LOGIC_OR 1 /* OR all non-zero threshold conditions */ -#define WL_DFRTS_LOGIC_AND 2 /* AND all non-zero threshold conditions */ +#define WL_DFRTS_LOGIC_OFF 0 /**< Feature is disabled */ +#define WL_DFRTS_LOGIC_OR 1 /**< OR all non-zero threshold conditions */ +#define WL_DFRTS_LOGIC_AND 2 /**< AND all non-zero threshold conditions */ /* Values for the passive_on_restricted_mode iovar. When set to non-zero, this iovar * disables automatic conversions of a channel from passively scanned to * actively scanned. These values only have an effect for country codes such * as XZ where some 5 GHz channels are defined to be passively scanned. */ -#define WL_PASSACTCONV_DISABLE_NONE 0 /* Enable permanent and temporary conversions */ -#define WL_PASSACTCONV_DISABLE_ALL 1 /* Disable permanent and temporary conversions */ -#define WL_PASSACTCONV_DISABLE_PERM 2 /* Disable only permanent conversions */ +#define WL_PASSACTCONV_DISABLE_NONE 0 /**< Enable permanent and temporary conversions */ +#define WL_PASSACTCONV_DISABLE_ALL 1 /**< Disable permanent and temporary conversions */ +#define WL_PASSACTCONV_DISABLE_PERM 2 /**< Disable only permanent conversions */ /* Definitions for Reliable Multicast */ #define WL_RMC_CNT_VERSION 1 @@ -5665,108 +7331,111 @@ typedef struct wl_pm2_sleep_ret_ext { #define WL_RMC_MAX_TRS_IN_ACKALL 1 #define WL_RMC_ACK_MCAST0 0x02 #define WL_RMC_ACK_MCAST_ALL 0x01 -#define WL_RMC_ACTF_TIME_MIN 300 /* time in ms */ -#define WL_RMC_ACTF_TIME_MAX 20000 /* time in ms */ -#define WL_RMC_MAX_NUM_TRS 32 /* maximun transmitters allowed */ -#define WL_RMC_ARTMO_MIN 350 /* time in ms */ -#define WL_RMC_ARTMO_MAX 40000 /* time in ms */ +#define WL_RMC_ACTF_TIME_MIN 300 /**< time in ms */ +#define WL_RMC_ACTF_TIME_MAX 20000 /**< time in ms */ +#define WL_RMC_MAX_NUM_TRS 32 /**< maximun transmitters allowed */ +#define WL_RMC_ARTMO_MIN 350 /**< time in ms */ +#define WL_RMC_ARTMO_MAX 40000 /**< time in ms */ /* RMC events in action frames */ enum rmc_opcodes { - RELMCAST_ENTRY_OP_DISABLE = 0, /* Disable multi-cast group */ - RELMCAST_ENTRY_OP_DELETE = 1, /* Delete multi-cast group */ - RELMCAST_ENTRY_OP_ENABLE = 2, /* Enable multi-cast group */ - RELMCAST_ENTRY_OP_ACK_ALL = 3 /* Enable ACK ALL bit in AMT */ + RELMCAST_ENTRY_OP_DISABLE = 0, /**< Disable multi-cast group */ + RELMCAST_ENTRY_OP_DELETE = 1, /**< Delete multi-cast group */ + RELMCAST_ENTRY_OP_ENABLE = 2, /**< Enable multi-cast group */ + RELMCAST_ENTRY_OP_ACK_ALL = 3 /**< Enable ACK ALL bit in AMT */ }; /* RMC operational modes */ enum rmc_modes { - WL_RMC_MODE_RECEIVER = 0, /* Receiver mode by default */ - WL_RMC_MODE_TRANSMITTER = 1, /* Transmitter mode using wl ackreq */ - WL_RMC_MODE_INITIATOR = 2 /* Initiator mode using wl ackreq */ + WL_RMC_MODE_RECEIVER = 0, /**< Receiver mode by default */ + WL_RMC_MODE_TRANSMITTER = 1, /**< Transmitter mode using wl ackreq */ + WL_RMC_MODE_INITIATOR = 2 /**< Initiator mode using wl ackreq */ }; -/* Each RMC mcast client info */ +/** Each RMC mcast client info */ typedef struct wl_relmcast_client { - uint8 flag; /* status of client such as AR, R, or blacklisted */ - int16 rssi; /* rssi value of RMC client */ - struct ether_addr addr; /* mac address of RMC client */ + uint8 flag; /**< status of client such as AR, R, or blacklisted */ + uint8 PAD; + int16 rssi; /**< rssi value of RMC client */ + struct ether_addr addr; /**< mac address of RMC client */ } wl_relmcast_client_t; -/* RMC Counters */ +/** RMC Counters */ typedef struct wl_rmc_cnts { - uint16 version; /* see definition of WL_CNT_T_VERSION */ - uint16 length; /* length of entire structure */ - uint16 dupcnt; /* counter for duplicate rmc MPDU */ - uint16 ackreq_err; /* counter for wl ackreq error */ - uint16 af_tx_err; /* error count for action frame transmit */ - uint16 null_tx_err; /* error count for rmc null frame transmit */ - uint16 af_unicast_tx_err; /* error count for rmc unicast frame transmit */ - uint16 mc_no_amt_slot; /* No mcast AMT entry available */ + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 length; /**< length of entire structure */ + uint16 dupcnt; /**< counter for duplicate rmc MPDU */ + uint16 ackreq_err; /**< counter for wl ackreq error */ + uint16 af_tx_err; /**< error count for action frame transmit */ + uint16 null_tx_err; /**< error count for rmc null frame transmit */ + uint16 af_unicast_tx_err; /**< error count for rmc unicast frame transmit */ + uint16 mc_no_amt_slot; /**< No mcast AMT entry available */ /* Unused. Keep for rom compatibility */ - uint16 mc_no_glb_slot; /* No mcast entry available in global table */ - uint16 mc_not_mirrored; /* mcast group is not mirrored */ - uint16 mc_existing_tr; /* mcast group is already taken by transmitter */ - uint16 mc_exist_in_amt; /* mcast group is already programmed in amt */ + uint16 mc_no_glb_slot; /**< No mcast entry available in global table */ + uint16 mc_not_mirrored; /**< mcast group is not mirrored */ + uint16 mc_existing_tr; /**< mcast group is already taken by transmitter */ + uint16 mc_exist_in_amt; /**< mcast group is already programmed in amt */ /* Unused. Keep for rom compatibility */ - uint16 mc_not_exist_in_gbl; /* mcast group is not in global table */ - uint16 mc_not_exist_in_amt; /* mcast group is not in AMT table */ - uint16 mc_utilized; /* mcast addressed is already taken */ - uint16 mc_taken_other_tr; /* multi-cast addressed is already taken */ - uint32 rmc_rx_frames_mac; /* no of mc frames received from mac */ - uint32 rmc_tx_frames_mac; /* no of mc frames transmitted to mac */ - uint32 mc_null_ar_cnt; /* no. of times NULL AR is received */ - uint32 mc_ar_role_selected; /* no. of times took AR role */ - uint32 mc_ar_role_deleted; /* no. of times AR role cancelled */ - uint32 mc_noacktimer_expired; /* no. of times noack timer expired */ - uint16 mc_no_wl_clk; /* no wl clk detected when trying to access amt */ - uint16 mc_tr_cnt_exceeded; /* No of transmitters in the network exceeded */ + uint16 mc_not_exist_in_gbl; /**< mcast group is not in global table */ + uint16 mc_not_exist_in_amt; /**< mcast group is not in AMT table */ + uint16 mc_utilized; /**< mcast addressed is already taken */ + uint16 mc_taken_other_tr; /**< multi-cast addressed is already taken */ + uint32 rmc_rx_frames_mac; /**< no of mc frames received from mac */ + uint32 rmc_tx_frames_mac; /**< no of mc frames transmitted to mac */ + uint32 mc_null_ar_cnt; /**< no. of times NULL AR is received */ + uint32 mc_ar_role_selected; /**< no. of times took AR role */ + uint32 mc_ar_role_deleted; /**< no. of times AR role cancelled */ + uint32 mc_noacktimer_expired; /**< no. of times noack timer expired */ + uint16 mc_no_wl_clk; /**< no wl clk detected when trying to access amt */ + uint16 mc_tr_cnt_exceeded; /**< No of transmitters in the network exceeded */ } wl_rmc_cnts_t; -/* RMC Status */ +/** RMC Status */ typedef struct wl_relmcast_st { - uint8 ver; /* version of RMC */ - uint8 num; /* number of clients detected by transmitter */ + uint8 ver; /**< version of RMC */ + uint8 num; /**< number of clients detected by transmitter */ wl_relmcast_client_t clients[WL_RMC_MAX_CLIENT]; - uint16 err; /* error status (used in infra) */ - uint16 actf_time; /* action frame time period */ + uint16 err; /**< error status (used in infra) */ + uint16 actf_time; /**< action frame time period */ } wl_relmcast_status_t; -/* Entry for each STA/node */ +/** Entry for each STA/node */ typedef struct wl_rmc_entry { /* operation on multi-cast entry such add, * delete, ack-all */ int8 flag; - struct ether_addr addr; /* multi-cast group mac address */ + struct ether_addr addr; /**< multi-cast group mac address */ } wl_rmc_entry_t; -/* RMC table */ +/** RMC table */ typedef struct wl_rmc_entry_table { - uint8 index; /* index to a particular mac entry in table */ - uint8 opcode; /* opcodes or operation on entry */ + uint8 index; /**< index to a particular mac entry in table */ + uint8 opcode; /**< opcodes or operation on entry */ wl_rmc_entry_t entry[WL_RMC_MAX_TABLE_ENTRY]; } wl_rmc_entry_table_t; typedef struct wl_rmc_trans_elem { - struct ether_addr tr_mac; /* transmitter mac */ - struct ether_addr ar_mac; /* ar mac */ - uint16 artmo; /* AR timeout */ - uint8 amt_idx; /* amt table entry */ - uint16 flag; /* entry will be acked, not acked, programmed, full etc */ + struct ether_addr tr_mac; /**< transmitter mac */ + struct ether_addr ar_mac; /**< ar mac */ + uint16 artmo; /**< AR timeout */ + uint8 amt_idx; /**< amt table entry */ + uint8 PAD; + uint16 flag; /**< entry will be acked, not acked, programmed, full etc */ } wl_rmc_trans_elem_t; -/* RMC transmitters */ +/** RMC transmitters */ typedef struct wl_rmc_trans_in_network { - uint8 ver; /* version of RMC */ - uint8 num_tr; /* number of transmitters in the network */ + uint8 ver; /**< version of RMC */ + uint8 num_tr; /**< number of transmitters in the network */ wl_rmc_trans_elem_t trs[WL_RMC_MAX_NUM_TRS]; } wl_rmc_trans_in_network_t; -/* To update vendor specific ie for RMC */ +/** To update vendor specific ie for RMC */ typedef struct wl_rmc_vsie { uint8 oui[DOT11_OUI_LEN]; - uint16 payload; /* IE Data Payload */ + uint8 PAD; + uint16 payload; /**< IE Data Payload */ } wl_rmc_vsie_t; @@ -5795,12 +7464,20 @@ enum proxd_method { #define WL_PROXD_FLAG_ONEWAY 0x40 #define WL_PROXD_FLAG_SEQ_EN 0x80 +#define WL_PROXD_SETFLAG_K 0x1 +#define WL_PROXD_SETFLAG_N 0x2 +#define WL_PROXD_SETFLAG_S 0x4 + +#define WL_PROXD_SETFLAG_K 0x1 +#define WL_PROXD_SETFLAG_N 0x2 +#define WL_PROXD_SETFLAG_S 0x4 + #define WL_PROXD_RANDOM_WAKEUP 0x8000 #define WL_PROXD_MAXREPORT 8 typedef struct wl_proxd_iovar { - uint16 method; /* Proxmity Detection method */ - uint16 mode; /* Mode (neutral, initiator, target) */ + uint16 method; /**< Proximity Detection method */ + uint16 mode; /**< Mode (neutral, initiator, target) */ } wl_proxd_iovar_t; /* @@ -5809,33 +7486,30 @@ typedef struct wl_proxd_iovar { * common params should be placed at the beginning */ -/* require strict packing */ -#include +typedef struct wl_proxd_params_common { + chanspec_t chanspec; /**< channel spec */ + int16 tx_power; /**< tx power of Proximity Detection(PD) frames (in dBm) */ + uint16 tx_rate; /**< tx rate of PD rames (in 500kbps units) */ + uint16 timeout; /**< timeout value */ + uint16 interval; /**< interval between neighbor finding attempts (in TU) */ + uint16 duration; /**< duration of neighbor finding attempts (in ms) */ +} wl_proxd_params_common_t; -typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_common { - chanspec_t chanspec; /* channel spec */ - int16 tx_power; /* tx power of Proximity Detection(PD) frames (in dBm) */ - uint16 tx_rate; /* tx rate of PD rames (in 500kbps units) */ - uint16 timeout; /* timeout value */ - uint16 interval; /* interval between neighbor finding attempts (in TU) */ - uint16 duration; /* duration of neighbor finding attempts (in ms) */ -} BWL_POST_PACKED_STRUCT wl_proxd_params_common_t; - -typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_rssi_method { - chanspec_t chanspec; /* chanspec for home channel */ - int16 tx_power; /* tx power of Proximity Detection frames (in dBm) */ - uint16 tx_rate; /* tx rate of PD frames, 500kbps units */ - uint16 timeout; /* state machine wait timeout of the frames (in ms) */ - uint16 interval; /* interval between neighbor finding attempts (in TU) */ - uint16 duration; /* duration of neighbor finding attempts (in ms) */ +typedef struct wl_proxd_params_rssi_method { + chanspec_t chanspec; /**< chanspec for home channel */ + int16 tx_power; /**< tx power of Proximity Detection frames (in dBm) */ + uint16 tx_rate; /**< tx rate of PD frames, 500kbps units */ + uint16 timeout; /**< state machine wait timeout of the frames (in ms) */ + uint16 interval; /**< interval between neighbor finding attempts (in TU) */ + uint16 duration; /**< duration of neighbor finding attempts (in ms) */ /* method specific ones go after this line */ - int16 rssi_thresh; /* RSSI threshold (in dBm) */ - uint16 maxconvergtmo; /* max wait converge timeout (in ms) */ + int16 rssi_thresh; /**< RSSI threshold (in dBm) */ + uint16 maxconvergtmo; /**< max wait converge timeout (in ms) */ } wl_proxd_params_rssi_method_t; -#define Q1_NS 25 /* Q1 time units */ +#define Q1_NS 25 /**< Q1 time units */ -#define TOF_BW_NUM 3 /* number of bandwidth that the TOF can support */ +#define TOF_BW_NUM 3 /**< number of bandwidth that the TOF can support */ #define TOF_BW_SEQ_NUM (TOF_BW_NUM+2) /* number of total index */ enum tof_bw_index { TOF_BW_20MHZ_INDEX = 0, @@ -5845,31 +7519,31 @@ enum tof_bw_index { TOF_BW_SEQRX_INDEX = 4 }; -#define BANDWIDTH_BASE 20 /* base value of bandwidth */ +#define BANDWIDTH_BASE 20 /**< base value of bandwidth */ #define TOF_BW_20MHZ (BANDWIDTH_BASE << TOF_BW_20MHZ_INDEX) #define TOF_BW_40MHZ (BANDWIDTH_BASE << TOF_BW_40MHZ_INDEX) #define TOF_BW_80MHZ (BANDWIDTH_BASE << TOF_BW_80MHZ_INDEX) #define TOF_BW_10MHZ 10 -#define NFFT_BASE 64 /* base size of fft */ +#define NFFT_BASE 64 /**< base size of fft */ #define TOF_NFFT_20MHZ (NFFT_BASE << TOF_BW_20MHZ_INDEX) #define TOF_NFFT_40MHZ (NFFT_BASE << TOF_BW_40MHZ_INDEX) #define TOF_NFFT_80MHZ (NFFT_BASE << TOF_BW_80MHZ_INDEX) -typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_method { - chanspec_t chanspec; /* chanspec for home channel */ - int16 tx_power; /* tx power of Proximity Detection(PD) frames (in dBm) */ - uint16 tx_rate; /* tx rate of PD rames (in 500kbps units) */ - uint16 timeout; /* state machine wait timeout of the frames (in ms) */ - uint16 interval; /* interval between neighbor finding attempts (in TU) */ - uint16 duration; /* duration of neighbor finding attempts (in ms) */ +typedef struct wl_proxd_params_tof_method { + chanspec_t chanspec; /**< chanspec for home channel */ + int16 tx_power; /**< tx power of Proximity Detection(PD) frames (in dBm) */ + uint16 tx_rate; /**< tx rate of PD rames (in 500kbps units) */ + uint16 timeout; /**< state machine wait timeout of the frames (in ms) */ + uint16 interval; /**< interval between neighbor finding attempts (in TU) */ + uint16 duration; /**< duration of neighbor finding attempts (in ms) */ /* specific for the method go after this line */ - struct ether_addr tgt_mac; /* target mac addr for TOF method */ - uint16 ftm_cnt; /* number of the frames txed by initiator */ - uint16 retry_cnt; /* number of retransmit attampts for ftm frames */ - int16 vht_rate; /* ht or vht rate */ + struct ether_addr tgt_mac; /**< target mac addr for TOF method */ + uint16 ftm_cnt; /**< number of the frames txed by initiator */ + uint16 retry_cnt; /**< number of retransmit attampts for ftm frames */ + int16 vht_rate; /**< ht or vht rate */ /* add more params required for other methods can be added here */ -} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_method_t; +} wl_proxd_params_tof_method_t; typedef struct wl_proxd_seq_config { @@ -5881,42 +7555,56 @@ typedef struct wl_proxd_seq_config int16 w_offset; } wl_proxd_seq_config_t; - +#define WL_PROXD_TUNE_VERSION_1 1 +#include typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune { - uint32 Ki; /* h/w delay K factor for initiator */ - uint32 Kt; /* h/w delay K factor for target */ - int16 vhtack; /* enable/disable VHT ACK */ - int16 N_log2[TOF_BW_SEQ_NUM]; /* simple threshold crossing */ - int16 w_offset[TOF_BW_NUM]; /* offset of threshold crossing window(per BW) */ - int16 w_len[TOF_BW_NUM]; /* length of threshold crossing window(per BW) */ - int32 maxDT; /* max time difference of T4/T1 or T3/T2 */ - int32 minDT; /* min time difference of T4/T1 or T3/T2 */ - uint8 totalfrmcnt; /* total count of transfered measurement frames */ - uint16 rsv_media; /* reserve media value for TOF */ - uint32 flags; /* flags */ - uint8 core; /* core to use for tx */ - uint8 force_K; /* set to force value of K */ - int16 N_scale[TOF_BW_SEQ_NUM]; /* simple threshold crossing */ - uint8 sw_adj; /* enable sw assisted timestamp adjustment */ - uint8 hw_adj; /* enable hw assisted timestamp adjustment */ - uint8 seq_en; /* enable ranging sequence */ - uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /* number of ftm frames based on bandwidth */ - int16 N_log2_2g; /* simple threshold crossing for 2g channel */ - int16 N_scale_2g; /* simple threshold crossing for 2g channel */ + uint32 version; + uint32 Ki; /**< h/w delay K factor for initiator */ + uint32 Kt; /**< h/w delay K factor for target */ + int16 vhtack; /**< enable/disable VHT ACK */ + int16 N_log2[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */ + int16 w_offset[TOF_BW_NUM]; /**< offset of threshold crossing window(per BW) */ + int16 w_len[TOF_BW_NUM]; /**< length of threshold crossing window(per BW) */ + int32 maxDT; /**< max time difference of T4/T1 or T3/T2 */ + int32 minDT; /**< min time difference of T4/T1 or T3/T2 */ + uint8 totalfrmcnt; /**< total count of transfered measurement frames */ + uint16 rsv_media; /**< reserve media value for TOF */ + uint32 flags; /**< flags */ + uint8 core; /**< core to use for tx */ + uint8 setflags; /* set flags of K, N. S values */ + int16 N_scale[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */ + uint8 sw_adj; /**< enable sw assisted timestamp adjustment */ + uint8 hw_adj; /**< enable hw assisted timestamp adjustment */ + uint8 seq_en; /**< enable ranging sequence */ + uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /**< number of ftm frames based on bandwidth */ + int16 N_log2_2g; /**< simple threshold crossing for 2g channel */ + int16 N_scale_2g; /**< simple threshold crossing for 2g channel */ wl_proxd_seq_config_t seq_5g20; + wl_proxd_seq_config_t seq_2g20; /* Thresh crossing params for 2G Sequence */ + uint16 bitflip_thresh; /* bitflip threshold */ + uint16 snr_thresh; /* SNR threshold */ + int8 recv_2g_thresh; /* 2g recieve sensitivity threshold */ + uint32 acs_gdv_thresh; + int8 acs_rssi_thresh; + uint8 smooth_win_en; + int32 acs_gdmm_thresh; + int8 acs_delta_rssi_thresh; + int32 emu_delay; } BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_t; +#include typedef struct wl_proxd_params_iovar { - uint16 method; /* Proxmity Detection method */ + uint16 method; /**< Proximity Detection method */ union { /* common params for pdsvc */ - wl_proxd_params_common_t cmn_params; /* common parameters */ + wl_proxd_params_common_t cmn_params; /**< common parameters */ /* method specific */ - wl_proxd_params_rssi_method_t rssi_params; /* RSSI method parameters */ - wl_proxd_params_tof_method_t tof_params; /* TOF meothod parameters */ + wl_proxd_params_rssi_method_t rssi_params; /**< RSSI method parameters */ + wl_proxd_params_tof_method_t tof_params; /**< TOF method parameters */ /* tune parameters */ - wl_proxd_params_tof_tune_t tof_tune; /* TOF tune parameters */ - } u; /* Method specific optional parameters */ + wl_proxd_params_tof_tune_t tof_tune; /**< TOF tune parameters */ + uint8 PAD[sizeof(wl_proxd_params_tof_tune_t)+1]; + } u; /**< Method specific optional parameters */ } wl_proxd_params_iovar_t; #define PROXD_COLLECT_GET_STATUS 0 @@ -5925,121 +7613,491 @@ typedef struct wl_proxd_params_iovar { #define PROXD_COLLECT_QUERY_DATA 3 #define PROXD_COLLECT_QUERY_DEBUG 4 #define PROXD_COLLECT_REMOTE_REQUEST 5 -#define PROXD_COLLECT_DONE 6 +#define PROXD_COLLECT_DONE 6 +typedef enum { + WL_PROXD_COLLECT_METHOD_TYPE_DISABLE = 0x0, + WL_PROXD_COLLECT_METHOD_TYPE_IOVAR = 0x1, + WL_PROXD_COLLECT_METHOD_TYPE_EVENT = 0x2, + WL_PROXD_COLLECT_METHOD_TYPE_EVENT_LOG = 0x4 +} wl_proxd_collect_method_type_t; + +typedef uint16 wl_proxd_collect_method_t; /* query status: method to send proxd collect */ + +#include typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_query { - uint32 method; /* method */ - uint8 request; /* Query request. */ - uint8 status; /* 0 -- disable, 1 -- enable collection, */ - /* 2 -- enable collection & debug */ - uint16 index; /* The current frame index [0 to total_frames - 1]. */ - uint16 mode; /* Initiator or Target */ - bool busy; /* tof sm is busy */ - bool remote; /* Remote collect data */ + uint32 method; /**< method */ + uint8 request; /**< Query request. */ + uint8 status; /**< bitmask 0 -- disable, 0x1 -- enable collection, */ + /* 0x2 -- Use generic event, 0x4 -- use event log */ + uint16 index; /**< The current frame index [0 to total_frames - 1]. */ + uint16 mode; /**< Initiator or Target */ + uint8 busy; /**< tof sm is busy */ + uint8 remote; /**< Remote collect data */ } BWL_POST_PACKED_STRUCT wl_proxd_collect_query_t; +#include +#include typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_header { - uint16 total_frames; /* The totral frames for this collect. */ - uint16 nfft; /* nfft value */ - uint16 bandwidth; /* bandwidth */ - uint16 channel; /* channel number */ - uint32 chanspec; /* channel spec */ - uint32 fpfactor; /* avb timer value factor */ - uint16 fpfactor_shift; /* avb timer value shift bits */ - int32 distance; /* distance calculated by fw */ - uint32 meanrtt; /* mean of RTTs */ - uint32 modertt; /* mode of RTTs */ - uint32 medianrtt; /* median of RTTs */ - uint32 sdrtt; /* standard deviation of RTTs */ - uint32 clkdivisor; /* clock divisor */ - uint16 chipnum; /* chip type */ - uint8 chiprev; /* chip revision */ - uint8 phyver; /* phy version */ - struct ether_addr loaclMacAddr; /* local mac address */ - struct ether_addr remoteMacAddr; /* remote mac address */ + uint16 total_frames; /**< The total frames for this collect. */ + uint16 nfft; /**< nfft value */ + uint16 bandwidth; /**< bandwidth */ + uint16 channel; /**< channel number */ + uint32 chanspec; /**< channel spec */ + uint32 fpfactor; /**< avb timer value factor */ + uint16 fpfactor_shift; /**< avb timer value shift bits */ + int32 distance; /**< distance calculated by fw */ + uint32 meanrtt; /**< mean of RTTs */ + uint32 modertt; /**< mode of RTTs */ + uint32 medianrtt; /**< median of RTTs */ + uint32 sdrtt; /**< standard deviation of RTTs */ + uint32 clkdivisor; /**< clock divisor */ + uint16 chipnum; /**< chip type */ + uint8 chiprev; /**< chip revision */ + uint8 phyver; /**< phy version */ + struct ether_addr localMacAddr; /**< local mac address */ + struct ether_addr remoteMacAddr; /**< remote mac address */ wl_proxd_params_tof_tune_t params; } BWL_POST_PACKED_STRUCT wl_proxd_collect_header_t; +#include -#ifdef WL_NAN +/* ifdef WL_NAN */ /* ********************** NAN wl interface struct types and defs ******************** */ +/* + * Uses new common IOVAR batch processing mechanism + */ -#define WL_NAN_IOCTL_VERSION 0x1 -#define NAN_IOC_BUFSZ 256 /**< some sufficient ioc buff size for our module */ -#define NAN_IOC_BUFSZ_EXT 1024 /* some sufficient ioc buff size for dump commands */ +/* + * NAN config control + * Bits 0 - 23 can be set by host + * Bits 24 - 31 - Internal use for firmware, host cannot set it + */ -/* wl_nan_sub_cmd may also be used in dhd */ -typedef struct wl_nan_sub_cmd wl_nan_sub_cmd_t; -typedef int (cmd_handler_t)(void *wl, const wl_nan_sub_cmd_t *cmd, char **argv); -/* nan cmd list entry */ -struct wl_nan_sub_cmd { - char *name; - uint8 version; /* cmd version */ - uint16 id; /* id for the dongle f/w switch/case */ - uint16 type; /* base type of argument */ - cmd_handler_t *handler; /* cmd handler */ +/* + * Bit 0 : If set to 1, means event uses nan bsscfg, + * otherwise uses infra bsscfg. Default is using infra bsscfg + */ +#define WL_NAN_CTRL_ROUTE_EVENT_VIA_NAN_BSSCFG 0x1 +/* If set, discovery beacons are transmitted on 2G band */ +#define WL_NAN_CTRL_DISC_BEACON_TX_2G 0x2 +/* If set, sync beacons are transmitted on 2G band */ +#define WL_NAN_CTRL_SYNC_BEACON_TX_2G 0x4 +/* If set, discovery beacons are transmitted on 5G band */ +#define WL_NAN_CTRL_DISC_BEACON_TX_5G 0x8 +/* If set, sync beacons are transmitted on 5G band */ +#define WL_NAN_CTRL_SYNC_BEACON_TX_5G 0x10 +/* If set, auto datapath responses will be sent by FW */ +#define WL_NAN_CTRL_AUTO_DPRESP 0x20 +/* If set, auto datapath confirms will be sent by FW */ +#define WL_NAN_CTRL_AUTO_DPCONF 0x40 + +/* Value when all host-configurable bits set */ +#define WL_NAN_CTRL_MAX_MASK 0xFFFFFF +#define WL_NAN_CFG_CTRL_FW_BITS 8 + +/* Bit 31: + * If set - indicates that NAN initialization is successful + * NOTE: This is a ready-only bit. All sets to this are masked off + */ +#define WL_NAN_PROTO_INIT_DONE 0x80000000 +#define WL_NAN_GET_PROTO_INIT_STATUS(x) \ + (((x) >> 31) & 1) +#define WL_NAN_CLEAR_PROTO_INIT_STATUS(x) \ + ((x) &= ~WL_NAN_PROTO_INIT_DONE) +#define WL_NAN_SET_PROTO_INIT_STATUS(x) \ + ((x) |= (1 << 31)) + +#define WL_NAN_IOCTL_VERSION 0x2 +/* < some sufficient ioc buff size for our module */ +#define WL_NAN_IOC_BUFSZ 256 +/* some sufficient ioc buff size for dump commands */ +#define WL_NAN_IOC_BUFSZ_EXT 1024 +#define WL_NAN_MAX_SIDS_IN_BEACONS 127 /* Max allowed SIDs */ +#define WL_NAN_MASTER_RANK_LEN 8 +#define WL_NAN_RANGE_LIMITED 0x0040 /* Publish/Subscribe flags */ + +/** The service hash (service id) is exactly this many bytes. */ +#define WL_NAN_SVC_HASH_LEN 6 +#define WL_NAN_HASHES_PER_BLOOM 4 /** Number of hash functions per bloom filter */ + +/* no. of max last disc results */ +#define WL_NAN_MAX_DISC_RESULTS 3 + +/* Max len of Rx and Tx filters */ +#define WL_NAN_MAX_SVC_MATCH_FILTER_LEN 255 + +/* Max service name len */ +#define WL_NAN_MAX_SVC_NAME_LEN 32 + +/* Type of Data path connection */ +#define WL_NAN_DP_TYPE_UNICAST 0 +#define WL_NAN_DP_TYPE_MULTICAST 1 + +/* MAX security params length PMK field */ +#define WL_NAN_NCS_SK_PMK_LEN 32 + +/* Post disc attr ID type */ +typedef uint8 wl_nan_post_disc_attr_id_t; + +/* + * Component IDs + */ +typedef enum { + WL_NAN_COMPID_CONFIG = 1, + WL_NAN_COMPID_ELECTION = 2, + WL_NAN_COMPID_SD = 3, + WL_NAN_COMPID_TIMESYNC = 4, + WL_NAN_COMPID_DATA_PATH = 5, + WL_NAN_COMPID_DEBUG = 15 /* Keep this at the end */ +} wl_nan_comp_id_t; + +#define WL_NAN_COMP_SHIFT 8 +#define WL_NAN_COMP_MASK(_c) (0x0F & ((uint8)(_c))) +#define WL_NAN_COMP_ID(_c) (WL_NAN_COMP_MASK(_c) << WL_NAN_COMP_SHIFT) + +/* NAN Events */ + +/** Instance ID type (unique identifier) */ +typedef uint8 wl_nan_instance_id_t; + +/* Publish sent for a subscribe */ +/* WL_NAN_EVENT_REPLIED */ + +typedef struct wl_nan_ev_replied { + struct ether_addr sub_mac; /* Subscriber MAC */ + wl_nan_instance_id_t pub_id; /* Publisher Instance ID */ + uint8 sub_id; /* Subscriber ID */ + int8 sub_rssi; /* Subscriber RSSI */ + uint8 pad[3]; +} wl_nan_ev_replied_t; + +typedef struct wl_nan_event_replied { + struct ether_addr sub_mac; /* Subscriber MAC */ + wl_nan_instance_id_t pub_id; /* Publisher Instance ID */ + uint8 sub_id; /* Subscriber ID */ + int8 sub_rssi; /* Subscriber RSSI */ + uint8 attr_num; + uint16 attr_list_len; /* sizeof attributes attached to payload */ + uint8 attr_list[0]; /* attributes payload */ +} wl_nan_event_replied_t; + +/* Subscribe or Publish instance Terminated */ + +/* WL_NAN_EVENT_TERMINATED */ + +#define NAN_SD_TERM_REASON_TIMEOUT 1 +#define NAN_SD_TERM_REASON_HOSTREQ 2 +#define NAN_SD_TERM_REASON_FWTERM 3 +#define NAN_SD_TERM_REASON_FAIL 4 + +typedef struct wl_nan_ev_terminated { + uint8 instance_id; /* publish / subscribe instance id */ + uint8 reason; /* 1=timeout, 2=Host/IOVAR, 3=FW Terminated 4=Failure */ + uint8 svctype; /* 0 - Publish, 0x1 - Subscribe */ + uint8 pad; /* Align */ +} wl_nan_ev_terminated_t; + +/* Follow up received against a pub / subscr */ +/* WL_NAN_EVENT_RECEIVE */ + +typedef struct wl_nan_ev_receive { + struct ether_addr remote_addr; /* Peer NAN device MAC */ + uint8 local_id; /* Local subscribe or publish ID */ + uint8 remote_id; /* Remote subscribe or publish ID */ + int8 fup_rssi; + uint8 attr_num; + uint16 attr_list_len; /* sizeof attributes attached to payload */ + uint8 attr_list[0]; /* attributes payload */ +} wl_nan_ev_receive_t; + +/* + * TLVs - Below XTLV definitions will be deprecated + * in due course (soon as all other branches update + * to the comp ID based XTLVs listed below). + */ +enum wl_nan_cmd_xtlv_id { + WL_NAN_XTLV_MAC_ADDR = 0x120, + WL_NAN_XTLV_MATCH_RX = 0x121, + WL_NAN_XTLV_MATCH_TX = 0x122, + WL_NAN_XTLV_SVC_INFO = 0x123, + WL_NAN_XTLV_SVC_NAME = 0x124, + WL_NAN_XTLV_SR_FILTER = 0x125, + WL_NAN_XTLV_FOLLOWUP = 0x126, + WL_NAN_XTLV_SVC_LIFE_COUNT = 0x127, + WL_NAN_XTLV_AVAIL = 0x128, + WL_NAN_XTLV_SDF_RX = 0x129, + WL_NAN_XTLV_SDE_CONTROL = 0x12a, + WL_NAN_XTLV_SDE_RANGE_LIMIT = 0x12b, + WL_NAN_XTLV_NAN_AF = 0x12c, + WL_NAN_XTLV_SD_TERMINATE = 0x12d, + WL_NAN_XTLV_CLUSTER_ID = 0x12e, + WL_NAN_XTLV_PEER_RSSI = 0x12f, + WL_NAN_XTLV_BCN_RX = 0x130, + WL_NAN_XTLV_REPLIED = 0x131, /* Publish sent for a subscribe */ + WL_NAN_XTLV_RECEIVED = 0x132, /* FUP Received */ + WL_NAN_XTLV_DISC_RESULTS = 0x133 /* Discovery results */ }; -/* container for nan iovtls & events */ -typedef BWL_PRE_PACKED_STRUCT struct wl_nan_ioc { - uint16 version; /* interface command or event version */ - uint16 id; /* nan ioctl cmd ID */ - uint16 len; /* total length of all tlv records in data[] */ - uint16 pad; /* pad to be 32 bit aligment */ - uint8 data [1]; /* var len payload of bcm_xtlv_t type */ -} BWL_POST_PACKED_STRUCT wl_nan_ioc_t; +#define WL_NAN_CMD_GLOBAL 0x00 +#define WL_NAN_CMD_CFG_COMP_ID 0x01 +#define WL_NAN_CMD_ELECTION_COMP_ID 0x02 +#define WL_NAN_CMD_SD_COMP_ID 0x03 +#define WL_NAN_CMD_SYNC_COMP_ID 0x04 +#define WL_NAN_CMD_DATA_COMP_ID 0x05 +#define WL_NAN_CMD_DAM_COMP_ID 0x06 +#define WL_NAN_CMD_RANGE_COMP_ID 0x07 +#define WL_NAN_CMD_DBG_COMP_ID 0x0f -typedef struct wl_nan_status { - uint8 inited; - uint8 joined; - uint8 role; - uint8 hop_count; - uint32 chspec; - uint8 amr[8]; /* Anchor Master Rank */ - uint32 cnt_pend_txfrm; /* pending TX frames */ - uint32 cnt_bcn_tx; /* TX disc/sync beacon count */ - uint32 cnt_bcn_rx; /* RX disc/sync beacon count */ - uint32 cnt_svc_disc_tx; /* TX svc disc frame count */ - uint32 cnt_svc_disc_rx; /* RX svc disc frame count */ - struct ether_addr cid; - uint32 chspec_5g; -} wl_nan_status_t; +#define WL_NAN_CMD_COMP_SHIFT 8 +#define NAN_CMD(x, y) (((x) << WL_NAN_CMD_COMP_SHIFT) | (y)) -typedef struct wl_nan_count { - uint32 cnt_bcn_tx; /* TX disc/sync beacon count */ - uint32 cnt_bcn_rx; /* RX disc/sync beacon count */ - uint32 cnt_svc_disc_tx; /* TX svc disc frame count */ - uint32 cnt_svc_disc_rx; /* RX svc disc frame count */ -} wl_nan_count_t; +/* + * Module based NAN TLV IDs + */ +typedef enum wl_nan_tlv { -/* various params and ctl swithce for nan_debug instance */ -typedef struct nan_debug_params { - uint8 enabled; /* runtime debuging enabled */ - uint8 collect; /* enables debug svc sdf monitor mode */ - uint16 cmd; /* debug cmd to perform a debug action */ - uint32 msglevel; /* msg level if enabled */ - uint16 status; -} nan_debug_params_t; + WL_NAN_XTLV_CFG_MATCH_RX = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x01), + WL_NAN_XTLV_CFG_MATCH_TX = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x02), + WL_NAN_XTLV_CFG_SR_FILTER = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x03), + WL_NAN_XTLV_CFG_SVC_NAME = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x04), + WL_NAN_XTLV_CFG_NAN_STATUS = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x05), + WL_NAN_XTLV_CFG_SVC_LIFE_COUNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x06), + WL_NAN_XTLV_CFG_SVC_HASH = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x07), + WL_NAN_XTLV_CFG_SEC_CSID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x08), /* Security CSID */ + WL_NAN_XTLV_CFG_SEC_PMK = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x09), /* Security PMK */ + WL_NAN_XTLV_CFG_SEC_PMKID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0A), -/* time slot */ -#define NAN_MAX_TIMESLOT 32 -typedef struct nan_timeslot { - uint32 abitmap; /* available bitmap */ - uint32 chanlist[NAN_MAX_TIMESLOT]; -} nan_timeslot_t; + WL_NAN_XTLV_SD_SVC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x01), + WL_NAN_XTLV_SD_FOLLOWUP = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x02), + WL_NAN_XTLV_SD_SDF_RX = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x03), + WL_NAN_XTLV_SD_SDE_CONTROL = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x04), + WL_NAN_XTLV_SD_SDE_RANGE_LIMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x05), + WL_NAN_XTLV_SD_NAN_AF = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x06), + WL_NAN_XTLV_SD_TERM = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x07), + WL_NAN_XTLV_SD_REPLIED = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x08), /* Pub sent */ + WL_NAN_XTLV_SD_FUP_RECEIVED = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x09), /* FUP Received */ + WL_NAN_XTLV_SD_DISC_RESULTS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0A), /* Pub RX */ -/* nan passive scan params */ -#define NAN_SCAN_MAX_CHCNT 8 -typedef struct nan_scan_params { - uint16 scan_time; - uint16 home_time; - uint16 ms_intvl; /* interval between merge scan */ - uint16 ms_dur; /* duration of merge scan */ - uint16 chspec_num; - uint8 pad[2]; - chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /* act. used 3, 5 rfu */ -} nan_scan_params_t; + WL_NAN_XTLV_SYNC_BCN_RX = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01), + WL_NAN_XTLV_DATA_DP_END = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x01), + WL_NAN_XTLV_DATA_DP_INFO = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x02), + WL_NAN_XTLV_DATA_DP_SEC_INST = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x03), + + WL_NAN_XTLV_RANGE_INFO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01) +} wl_nan_tlv_t; + +enum wl_nan_sub_cmd_xtlv_id { + + /* Special command - Tag zero */ + WL_NAN_CMD_GLB_NAN_VER = NAN_CMD(WL_NAN_CMD_GLOBAL, 0x00), + + /* nan cfg sub-commands */ + + WL_NAN_CMD_CFG_NAN_INIT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x01), + WL_NAN_CMD_CFG_ROLE = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x02), + WL_NAN_CMD_CFG_HOP_CNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x03), + WL_NAN_CMD_CFG_HOP_LIMIT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x04), + WL_NAN_CMD_CFG_WARMUP_TIME = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x05), + WL_NAN_CMD_CFG_STATUS = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x06), + WL_NAN_CMD_CFG_OUI = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x07), + WL_NAN_CMD_CFG_COUNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x08), + WL_NAN_CMD_CFG_CLEARCOUNT = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x09), + WL_NAN_CMD_CFG_CHANNEL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0A), + WL_NAN_CMD_CFG_BAND = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0B), + WL_NAN_CMD_CFG_CID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0C), + WL_NAN_CMD_CFG_IF_ADDR = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0D), + WL_NAN_CMD_CFG_BCN_INTERVAL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0E), + WL_NAN_CMD_CFG_SDF_TXTIME = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0F), + WL_NAN_CMD_CFG_SID_BEACON = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x10), + WL_NAN_CMD_CFG_DW_LEN = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x11), + WL_NAN_CMD_CFG_AVAIL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x12), + WL_NAN_CMD_CFG_WFA_TM = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x13), + WL_NAN_CMD_CFG_EVENT_MASK = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x14), + WL_NAN_CMD_CFG_NAN_CONFIG = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x15), + WL_NAN_CMD_CFG_NAN_ENAB = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x16), + WL_NAN_CMD_CFG_ULW = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x17), + WL_NAN_CMD_CFG_MAX = WL_NAN_CMD_CFG_NAN_ENAB, + /* Add new commands before and update */ + + /* nan election sub-commands */ + WL_NAN_CMD_ELECTION_JOIN = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x04), /* Deprecated */ + WL_NAN_CMD_ELECTION_STOP = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x07), /* Deprecate */ + + WL_NAN_CMD_ELECTION_HOST_ENABLE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x01), + WL_NAN_CMD_ELECTION_METRICS_CONFIG = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x02), + WL_NAN_CMD_ELECTION_METRICS_STATE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x03), + WL_NAN_CMD_ELECTION_LEAVE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x03), + WL_NAN_CMD_ELECTION_MERGE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x04), + WL_NAN_CMD_ELECTION_ADVERTISERS = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x05), + WL_NAN_CMD_ELECTION_RSSI_THRESHOLD = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x06), + WL_NAN_CMD_ELECTION_MAX = WL_NAN_CMD_ELECTION_RSSI_THRESHOLD, + /* New commands go before and update */ + + /* nan SD sub-commands */ + WL_NAN_CMD_SD_PARAMS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x01), + WL_NAN_CMD_SD_PUBLISH = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x02), + WL_NAN_CMD_SD_PUBLISH_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x03), + WL_NAN_CMD_SD_CANCEL_PUBLISH = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x04), + WL_NAN_CMD_SD_SUBSCRIBE = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x05), + WL_NAN_CMD_SD_SUBSCRIBE_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x06), + WL_NAN_CMD_SD_CANCEL_SUBSCRIBE = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x07), + WL_NAN_CMD_SD_VND_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x08), + WL_NAN_CMD_SD_STATS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x09), + WL_NAN_CMD_SD_TRANSMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0A), + WL_NAN_CMD_SD_FUP_TRANSMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0B), + WL_NAN_CMD_SD_CONNECTION = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0C), + WL_NAN_CMD_SD_SHOW = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0D), + WL_NAN_CMD_SD_MAX = WL_NAN_CMD_SD_SHOW, + + /* nan time sync sub-commands */ + + WL_NAN_CMD_SYNC_SOCIAL_CHAN = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01), + WL_NAN_CMD_SYNC_AWAKE_DWS = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x02), + WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x03), + WL_NAN_CMD_SYNC_MAX = WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD, + + /* nan2 commands */ + WL_NAN_CMD_DATA_CONFIG = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x01), + WL_NAN_CMD_DATA_RSVD02 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x02), + WL_NAN_CMD_DATA_RSVD03 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x03), + WL_NAN_CMD_DATA_DATAREQ = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x04), + WL_NAN_CMD_DATA_DATARESP = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x05), + WL_NAN_CMD_DATA_DATAEND = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x06), + WL_NAN_CMD_DATA_SCHEDUPD = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x07), + WL_NAN_CMD_DATA_RSVD08 = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x08), + WL_NAN_CMD_DATA_CAP = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x9), + WL_NAN_CMD_DATA_STATUS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0A), + WL_NAN_CMD_DATA_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0B), + WL_NAN_CMD_DATA_RSVD0C = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0C), + WL_NAN_CMD_DATA_NDP_SHOW = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0D), + WL_NAN_CMD_DATA_DATACONF = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0E), + WL_NAN_CMD_DATA_MIN_TX_RATE = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0F), + WL_NAN_CMD_DATA_MAX_PEERS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x10), + WL_NAN_CMD_DATA_PATH_MAX = WL_NAN_CMD_DATA_MAX_PEERS, /* New ones before and update */ + + /* nan dam sub-commands */ + WL_NAN_CMD_DAM_CFG = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01), + WL_NAN_CMD_DAM_MAX = WL_NAN_CMD_DAM_CFG, /* New ones before and update */ + + /* nan2.0 ranging commands */ + WL_NAN_CMD_RANGE_REQUEST = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01), + WL_NAN_CMD_RANGE_AUTO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x02), + WL_NAN_CMD_RANGE_RESPONSE = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x03), + WL_NAN_CMD_RANGE_CANCEL = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x04), + + /* nan debug sub-commands */ + WL_NAN_CMD_DBG_SCAN_PARAMS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x01), + WL_NAN_CMD_DBG_SCAN = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x02), + WL_NAN_CMD_DBG_SCAN_RESULTS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x03), + /* This is now moved under CFG */ + WL_NAN_CMD_DBG_EVENT_MASK = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x04), + WL_NAN_CMD_DBG_EVENT_CHECK = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x05), + WL_NAN_CMD_DBG_DUMP = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x06), + WL_NAN_CMD_DBG_CLEAR = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x07), + WL_NAN_CMD_DBG_RSSI = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x08), + WL_NAN_CMD_DBG_DEBUG = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x09), + WL_NAN_CMD_DBG_TEST1 = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0A), + WL_NAN_CMD_DBG_TEST2 = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0B), + WL_NAN_CMD_DBG_TEST3 = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0C), + WL_NAN_CMD_DBG_DISC_RESULTS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0D), + WL_NAN_CMD_DBG_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0E), + WL_NAN_CMD_DBG_LEVEL = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0F), + WL_NAN_CMD_DBG_MAX = WL_NAN_CMD_DBG_LEVEL /* New ones before and update */ +}; + +/** status - TBD BCME_ vs NAN status - range reserved for BCME_ */ +enum { + /* add new status here... */ + WL_NAN_E_SEC_SA_NOTFOUND = -2076, + WL_NAN_E_BSSCFG_NOTFOUND = -2075, + WL_NAN_E_SCB_NOTFOUND = -2074, + WL_NAN_E_NCS_SK_KDESC_TYPE = -2073, + WL_NAN_E_NCS_SK_KEY_DESC_VER = -2072, /* key descr ver */ + WL_NAN_E_NCS_SK_KEY_TYPE = -2071, /* key descr type */ + WL_NAN_E_NCS_SK_KEYINFO_FAIL = -2070, /* key info (generic) */ + WL_NAN_E_NCS_SK_KEY_LEN = -2069, /* key len */ + WL_NAN_E_NCS_SK_KDESC_NOT_FOUND = -2068, /* key desc not found */ + WL_NAN_E_NCS_SK_INVALID_PARAMS = -2067, /* invalid args */ + WL_NAN_E_NCS_SK_KDESC_INVALID = -2066, /* key descr is not valid */ + WL_NAN_E_NCS_SK_NONCE_MISMATCH = -2065, + WL_NAN_E_NCS_SK_KDATA_SAVE_FAIL = -2064, /* not able to save key data */ + WL_NAN_E_NCS_SK_AUTH_TOKEN_CALC_FAIL = -2063, + WL_NAN_E_NCS_SK_PTK_CALC_FAIL = -2062, + WL_NAN_E_INVALID_STARTOFFSET = -2061, + WL_NAN_E_BAD_NA_ENTRY_TYPE = -2060, + WL_NAN_E_INVALID_CHANBMP = -2059, + WL_NAN_E_INVALID_OP_CLASS = -2058, + WL_NAN_E_NO_IES = -2057, + WL_NAN_E_NO_PEER_ENTRY_AVAIL = -2056, + WL_NAN_E_INVALID_PEER = -2055, + WL_NAN_E_PEER_EXISTS = -2054, + WL_NAN_E_PEER_NOTFOUND = -2053, + WL_NAN_E_NO_MEM = -2052, + WL_NAN_E_INVALID_OPTION = -2051, + WL_NAN_E_INVALID_BAND = -2050, + WL_NAN_E_INVALID_MAC = -2049, + WL_NAN_E_BAD_INSTANCE = -2048, + WL_NAN_E_NDC_EXISTS = -2047, + WL_NAN_E_NO_NDC_ENTRY_AVAIL = -2046, + WL_NAN_E_INVALID_NDC_ENTRY = -2045, + WL_NAN_E_ERROR = -1, + WL_NAN_E_OK = 0 +}; + +typedef int32 wl_nan_status_t; + +/** nan cmd list entry */ +enum wl_nan_sub_cmd_input_flags { + WL_NAN_SUB_CMD_FLAG_NONE = 0, + WL_NAN_SUB_CMD_FLAG_SKIP = 1, /* Skip to next sub-command on error */ + WL_NAN_SUB_CMD_FLAG_TERMINATE = 2, /* Terminate processing and return */ + WL_NAN_SUB_CMD_FLAG_LAST /* Keep this at the end */ +}; + +/** container for nan events */ +typedef struct wl_nan_ioc { + uint16 version; /**< interface command or event version */ + uint16 id; /**< nan ioctl cmd ID */ + uint16 len; /**< total length of all tlv records in data[] */ + uint16 pad; /**< pad to be 32 bit aligment */ + uint8 data []; /**< var len payload of bcm_xtlv_t type */ +} wl_nan_ioc_t; + +/* + * NAN sub-command data structures + */ + +/* + * Config component WL_NAN_CMD_CFG_XXXX sub-commands + * WL_NAN_CMD_CFG_ENABLE + */ +enum wl_nan_config_state { + WL_NAN_CONFIG_STATE_DISABLE = 0, + WL_NAN_CONFIG_STATE_ENABLE = 1 +}; + +typedef int8 wl_nan_config_state_t; + +/* WL_NAN_CMD_CFG_NAN_INIT */ + +typedef uint8 wl_nan_init_t; + +/* WL_NAN_CMD_CFG_NAN_VERSION */ +typedef uint16 wl_nan_ver_t; + +/* WL_NAN_CMD_CFG_NAN_CONFIG */ +typedef uint32 wl_nan_cfg_ctrl_t; + +/* + * WL_NAN_CMD_CFG_BAND, WL_NAN_CMD_CFG_RSSI_THRESHOLD(Get only) + */ +typedef uint8 wl_nan_band_t; + +/* + * WL_NAN_CMD_CFG_ROLE + */ enum wl_nan_role { WL_NAN_ROLE_AUTO = 0, WL_NAN_ROLE_NON_MASTER_NON_SYNC = 1, @@ -6047,7 +8105,549 @@ enum wl_nan_role { WL_NAN_ROLE_MASTER = 3, WL_NAN_ROLE_ANCHOR_MASTER = 4 }; -#define NAN_MASTER_RANK_LEN 8 + +typedef uint8 wl_nan_role_t; + +typedef struct wl_nan_device_state +{ + wl_nan_role_t role; /* Sync Master, Non-Sync Master */ + uint8 state; /* TBD */ + uint8 hopcount; /* Hops to the Anchor Master */ + struct ether_addr immediate_master; /* Master MAC */ + struct ether_addr anchor_master; /* Anchor Master MAC */ + struct ether_addr cluster_id; /* Cluster ID to which this device belongs to */ + uint8 PAD[3]; + uint32 tsf_high; /* NAN Cluster TSFs */ + uint32 tsf_low; +} wl_nan_device_state_t; + +/* + * WL_NAN_CMD_CFG_HOP_CNT, WL_NAN_CMD_CFG_HOP_LIMIT + */ +typedef uint8 wl_nan_hop_count_t; + +/* + * WL_NAN_CMD_CFG_WARMUP_TIME + */ +typedef uint32 wl_nan_warmup_time_ticks_t; + +/* + * WL_NAN_CMD_CFG_RSSI_THRESHOLD + * rssi_close and rssi_mid are used to transition master to non-master + * role by NAN state machine. rssi thresholds corresponding to the band + * will be updated. + */ +/* To be deprecated */ +typedef struct wl_nan_rssi_threshold { + wl_nan_band_t band; + int8 rssi_close; + int8 rssi_mid; + uint8 pad; +} wl_nan_rssi_threshold_t; + +/* WL_NAN_CMD_ELECTION_RSSI_THRESHOLD */ + +typedef struct wl_nan_rssi_thld { + int8 rssi_close_2g; + int8 rssi_mid_2g; + int8 rssi_close_5g; + int8 rssi_mid_5g; +} wl_nan_rssi_thld_t; + +/* WL_NAN_CMD_DATA_MAX_PEERS */ + +typedef uint8 wl_nan_max_peers_t; + +#define NAN_MAX_BANDS 2 +/* + * WL_NAN_CMD_CFG_STATUS + */ +/* Deprecated - Begin */ +typedef struct wl_nan_cfg_status { + uint8 enabled; + uint8 inited; + uint8 joined; + uint8 merged; + uint8 role; + uint8 PAD[3]; + uint32 chspec[2]; + uint8 mr[8]; /**< Master Rank */ + uint8 amr[8]; /**< Anchor Master Rank */ + uint32 cnt_pend_txfrm; /**< pending TX frames */ + uint32 cnt_bcn_tx; /**< TX disc/sync beacon count */ + uint32 cnt_bcn_rx; /**< RX disc/sync beacon count */ + uint32 cnt_svc_disc_tx; /**< TX svc disc frame count */ + uint32 cnt_svc_disc_rx; /**< RX svc disc frame count */ + uint32 ambtt; /**< Anchor master beacon target time */ + struct ether_addr cid; /**< Cluster id */ + uint8 hop_count; /**< Hop count */ + uint8 PAD; +} wl_nan_cfg_status_t; + +typedef struct wl_nan_config_status { + struct ether_addr def_cid; /* Default Cluster id */ + uint8 inited; /* NAN Initialized successfully */ + uint8 enabled; /* NAN Enabled */ + struct ether_addr cur_cid; /* Default Cluster id */ + uint8 joined; /* Joined or started own cluster */ + uint8 role; /* Master, Non Master, NM Sync & Non-Sync */ + chanspec_t chspec[NAN_MAX_BANDS]; /* Channel Spec 2.4G followed by 5G */ + uint8 mr[WL_NAN_MASTER_RANK_LEN]; /* Master Rank */ + uint8 amr[WL_NAN_MASTER_RANK_LEN]; /* Anchor Master Rank */ + uint32 cnt_pend_txfrm; /* Pending Tx Frames */ + uint32 cnt_bcn_tx; /* TX disc/sync beacon count */ + uint32 cnt_bcn_rx; /* RX disc/sync beacon count */ + uint32 cnt_svc_disc_tx; /* TX svc disc frame count */ + uint32 cnt_svc_disc_rx; /* RX svc disc frame count */ + uint32 ambtt; /* Anchor master beacon target time */ + uint8 hop_count; /* Hop count */ + uint8 pad[3]; /* Align */ +} wl_nan_config_status_t; +/* Deprecated - End */ + +typedef enum wl_nan_election_mode { + WL_NAN_ELECTION_RUN_BY_HOST = 1, + WL_NAN_ELECTION_RUN_BY_FW = 2 +} wl_nan_election_mode_t; + +typedef struct wl_nan_conf_status { + struct ether_addr nmi; /* NAN mgmt interface address */ + uint8 enabled; /* NAN is enabled */ + uint8 role; /* Current nan sync role */ + struct ether_addr cid; /* Current Cluster id */ + uint8 social_chans[2]; /* Social channels */ + uint8 mr[8]; /* Master Rank */ + uint8 amr[8]; /* Anchor Master Rank */ + uint32 ambtt; /* Anchor master beacon target time */ + uint32 cluster_tsf_h; /* Current Cluster TSF High */ + uint32 cluster_tsf_l; /* Current Cluster TSF Low */ + uint8 election_mode; /* Election mode, host or firmware */ + uint8 hop_count; /* Current Hop count */ + uint8 pad[2]; +} wl_nan_conf_status_t; + +/* + * WL_NAN_CMD_CFG_OUI + */ +typedef struct wl_nan_oui_type { + uint8 nan_oui[DOT11_OUI_LEN]; + uint8 type; +} wl_nan_oui_type_t; + +/* + * WL_NAN_CMD_CFG_COUNT + */ +typedef struct wl_nan_count { + uint32 cnt_bcn_tx; /**< TX disc/sync beacon count */ + uint32 cnt_bcn_rx; /**< RX disc/sync beacon count */ + uint32 cnt_svc_disc_tx; /**< TX svc disc frame count */ + uint32 cnt_svc_disc_rx; /**< RX svc disc frame count */ +} wl_nan_count_t; +/* + * Election component WL_NAN_CMD_ELECTION_XXXX sub-commands + * WL_NAN_CMD_ELECTION_HOST_ENABLE + */ +enum wl_nan_enable_flags { + WL_NAN_DISABLE_FLAG_HOST_ELECTION = 0, + WL_NAN_ENABLE_FLAG_HOST_ELECTION = 1 +}; + +/* + * 0 - disable host based election + * 1 - enable host based election + */ +typedef uint8 wl_nan_host_enable_t; + +/* + * WL_NAN_CMD_ELECTION_METRICS_CONFIG + */ +/* Set only */ +typedef struct wl_nan_election_metric_config { + uint8 random_factor; /* Configured random factor */ + uint8 master_pref; /* configured master preference */ + uint8 pad[2]; +} wl_nan_election_metric_config_t; + +/* + * WL_NAN_CMD_ELECTION_METRICS_STATE + */ +/* Get only */ +typedef struct wl_nan_election_metric_state { + uint8 random_factor; /* random factor used in MIs */ + uint8 master_pref; /* Master advertised in MIs */ + uint8 pad[2]; +} wl_nan_election_metric_state_t; + +/* + * WL_NAN_CMD_ELECTION_LEAVE + * WL_NAN_CMD_ELECTION_STOP + */ +typedef struct ether_addr wl_nan_cluster_id_t; + +/* + * WL_NAN_CMD_ELECTION_JOIN + */ +typedef struct wl_nan_join { + uint8 start_cluster; /* Start a cluster */ + uint8 pad[3]; + wl_nan_cluster_id_t cluster_id; /* Cluster ID to join */ +} wl_nan_join_t; + +/* + * WL_NAN_CMD_ELECTION_MERGE + * 0 - disable cluster merge + * 1 - enable cluster merge + */ +typedef uint8 wl_nan_merge_enable_t; + +/* + * WL_NAN_CMD_CFG_ROLE + * role = 0 means configuration by firmware; otherwise by host + * when host configures role, also need target master address to sync to + */ +#define NAN_SYNC_MASTER_SELF 0 +#define NAN_SYNC_MASTER_AM 1 +#define NAN_SYNC_MASTER_INTERMEDIATE 2 +/* ltsf_h, ltsf_l: + The local TSF timestamp filled in by FW in the WL_NAN_EVENT_BCN_RX event; + rtsf_h, rtsf_l: + The timestamp in the Rx beacon frame, filled in by host + uint32 ambtt: + the amtt in the cluster ID attribute in the Rx beacon frame +*/ +typedef struct nan_sync_master { + uint8 flag; /* 0: self, 1: anchor-master, 2: intermediate master */ + uint8 hop_count; + struct ether_addr addr; + struct ether_addr cluster_id; + chanspec_t channel; /* bcn reception channel */ + uint32 ltsf_h; + uint32 ltsf_l; + uint32 rtsf_h; + uint32 rtsf_l; + uint8 amr[WL_NAN_MASTER_RANK_LEN]; + uint32 ambtt; +} nan_sync_master_t; + +/* NAN advertiser structure */ +/* TODO RSDB: add chspec to indicates core corresponds correct core */ +typedef struct nan_adv_entry { + uint8 age; /* used to remove stale entries */ + uint8 hop_count; + struct ether_addr addr; + struct ether_addr cluster_id; + chanspec_t channel; /* bcn reception channel */ + uint32 ltsf_h; + uint32 ltsf_l; + uint32 rtsf_h; + uint32 rtsf_l; + uint8 amr[WL_NAN_MASTER_RANK_LEN]; + uint32 ambtt; + int8 rssi[NAN_MAX_BANDS]; /* rssi last af was received at */ + int8 last_rssi[NAN_MAX_BANDS]; /* rssi in the last AF */ +} nan_adv_entry_t; + +typedef struct nan_adv_table { + uint8 num_adv; + uint8 adv_size; + uint8 pad[2]; + nan_adv_entry_t adv_nodes[0]; +} nan_adv_table_t; + +typedef struct wl_nan_role_cfg { + wl_nan_role_t cfg_role; + wl_nan_role_t cur_role; + uint8 pad[2]; + nan_sync_master_t target_master; +} wl_nan_role_cfg_t; + +typedef struct wl_nan_role_config { + wl_nan_role_t role; + struct ether_addr target_master; + uint8 pad; +} wl_nan_role_config_t; + +typedef int8 wl_nan_sd_optional_field_types_t; + +/* Flag bits for Publish and Subscribe (wl_nan_sd_params_t flags) */ + +#define WL_NAN_RANGE_LIMITED 0x0040 + +/* Event generation indicator (default is continuous) */ + +#define WL_NAN_MATCH_ONCE 0x100000 +#define WL_NAN_MATCH_NEVER 0x200000 + +/* Bits specific to Publish */ + +#define WL_NAN_PUB_UNSOLICIT 0x1000 /* Unsolicited Tx */ +#define WL_NAN_PUB_SOLICIT 0x2000 /* Solicited Tx */ +#define WL_NAN_PUB_BOTH 0x3000 /* Both the above */ + +#define WL_NAN_PUB_BCAST 0x4000 /* bcast solicited Tx only */ +#define WL_NAN_PUB_EVENT 0x8000 /* Event on each solicited Tx */ +#define WL_NAN_PUB_SOLICIT_PENDING 0x10000 /* Used for one-time solicited Publish */ + +#define WL_NAN_FOLLOWUP 0x20000 /* Follow-up frames */ + +/* Bits specific to Subscribe */ + +#define WL_NAN_SUB_ACTIVE 0x1000 /* Active subscribe mode */ +#define WL_NAN_SUB_MATCH_IF_SVC_INFO 0x2000 /* Service info in publish */ + +#define WL_NAN_TTL_UNTIL_CANCEL 0xFFFFFFFF /* Special values for time to live (ttl) parameter */ + +/* + * Publish - runs until first transmission + * Subscribe - runs until first DiscoveryResult event + */ +#define WL_NAN_TTL_FIRST 0 + +/* + * WL_NAN_CMD_SD_PARAMS + */ +typedef struct wl_nan_sd_params +{ + uint16 length; /* length including options */ + uint8 period; /* period of the unsolicited SDF xmission in DWs */ + uint8 pad; + uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; /* Hash for the service name */ + uint8 instance_id; /* Instance of the current service */ + int8 proximity_rssi; /* RSSI limit to Rx subscribe or pub SDF 0 no effect */ + uint32 flags; /* bitmap representing aforesaid optional flags */ + int32 ttl; /* TTL for this instance id, -1 will run till cancelled */ + tlv_t optional[1]; /* optional fields in the SDF as appropriate */ +} wl_nan_sd_params_t; + +/* + * WL_NAN_CMD_SD_PUBLISH_LIST + * WL_NAN_CMD_SD_SUBSCRIBE_LIST + */ +typedef struct wl_nan_service_info +{ + uint8 instance_id; /* Publish instance ID */ + uint8 service_hash[WL_NAN_SVC_HASH_LEN]; /* Hash for service name */ +} wl_nan_service_info_t; + +typedef struct wl_nan_service_list +{ + uint16 id_count; /* Number of registered publish/subscribe services */ + wl_nan_service_info_t list[1]; /* service info defined by nan_service instance */ +} wl_nan_service_list_t; + +/* + * WL_NAN_CMD_CFG_BCN_INTERVAL + */ +typedef uint16 wl_nan_disc_bcn_interval_t; + +/* + * WL_NAN_CMD_CFG_SDF_TXTIME + */ +typedef uint16 wl_nan_svc_disc_txtime_t; + +/* + * WL_NAN_CMD_CFG_STOP_BCN_TX + */ +typedef uint16 wl_nan_stop_bcn_tx_t; + +/* + * WL_NAN_CMD_CFG_SID_BEACON + */ +typedef struct wl_nan_sid_beacon_control { + uint8 sid_enable; /* Flag to indicate the inclusion of Service IDs in Beacons */ + uint8 sid_count; /* Limit for number of SIDs to be included in Beacons */ + uint8 pad[2]; +} wl_nan_sid_beacon_control_t; + +/* + * WL_NAN_CMD_CFG_DW_LEN + */ +typedef uint16 wl_nan_dw_len_t; + +/* + * WL_NAN_CMD_CFG_AWAKE_DW Will be deprecated. + */ +typedef struct wl_nan_awake_dw { + wl_nan_band_t band; /* 0 - b mode 1- a mode */ + uint8 interval; /* 1 or 2 or 4 or 8 or 16 */ + uint16 pad; +} wl_nan_awake_dw_t; + +/* + * WL_NAN_CMD_CFG_AWAKE_DWS + */ +typedef struct wl_nan_awake_dws { + uint8 dw_interval_2g; /* 2G DW interval */ + uint8 dw_interval_5g; /* 5G DW interval */ + uint16 pad; +} wl_nan_awake_dws_t; + +/* WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD */ + +typedef struct wl_nan_rssi_notif_thld { + int8 bcn_rssi_2g; + int8 bcn_rssi_5g; + int16 pad; +} wl_nan_rssi_notif_thld_t; + +/* + * WL_NAN_CMD_CFG_SOCIAL_CHAN + */ +typedef struct wl_nan_social_channels { + uint8 soc_chan_2g; /* 2G social channel */ + uint8 soc_chan_5g; /* 5G social channel */ + uint16 pad; +} wl_nan_social_channels_t; + +/* + * WL_NAN_CMD_SD_CANCEL_PUBLISH + * WL_NAN_CMD_SD_CANCEL_SUBSCRIBE + */ +typedef uint8 wl_nan_instance_id; /* Instance ID of an active publish instance */ + +/* + * WL_NAN_CMD_SD_VND_INFO + */ +typedef struct wl_nan_sd_vendor_info +{ + uint16 length; /* Size in bytes of the payload following this field */ + uint8 data[]; /* Vendor Information */ +} wl_nan_sd_vendor_info_t; + +/* + * WL_NAN_CMD_SD_STATS + */ +typedef struct wl_nan_sd_stats { + uint32 sdftx; + uint32 sdfrx; + uint32 sdsrffail; + uint32 sdrejrssi; + uint32 sdfollowuprx; + uint32 sdsubmatch; + uint32 sdpubreplied; + uint32 sdmftfail1; + uint32 sdmftfail2; + uint32 sdmftfail3; + uint32 sdmftfail4; +} wl_nan_sd_stats_t; + +/* + * WL_NAN_CMD_SD_TRANSMIT + * WL_NAN_CMD_SD_FUP_TRANSMIT + */ +typedef struct wl_nan_sd_transmit { + uint8 local_service_id; /* Sender Service ID */ + uint8 requestor_service_id; /* Destination Service ID */ + struct ether_addr destination_addr; /* Destination MAC */ + uint16 token; /* follow_up_token when a follow-up msg is queued successfully */ + uint8 priority; /* requested relative prio */ + uint8 service_info_len; /* size in bytes of the service info payload */ + uint8 service_info[]; /* Service Info payload */ +} wl_nan_sd_transmit_t; + +/* + * WL_NAN_CMD_SYNC_TSRESERVE + */ +/** time slot */ +#define NAN_MAX_TIMESLOT 32 +typedef struct wl_nan_timeslot { + uint32 abitmap; /**< available bitmap */ + uint32 chanlist[NAN_MAX_TIMESLOT]; +} wl_nan_timeslot_t; + +/* + * Deprecated + * + * WL_NAN_CMD_SYNC_TSRELEASE + */ +typedef uint32 wl_nan_ts_bitmap_t; + +/* nan passive scan params */ +#define NAN_SCAN_MAX_CHCNT 8 +typedef struct wl_nan_scan_params { + uint16 scan_time; + uint16 home_time; + uint16 ms_intvl; /**< interval between merge scan */ + uint16 ms_dur; /**< duration of merge scan */ + uint16 chspec_num; + uint8 pad[2]; + chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /**< act. used 3, 5 rfu */ +} wl_nan_scan_params_t; + +/* + * WL_NAN_CMD_DBG_SCAN + */ +typedef struct wl_nan_dbg_scan { + struct ether_addr cid; + uint8 pad[2]; +} wl_nan_dbg_scan_t; + +/* NAN_DBG_LEVEL */ +typedef struct wl_nan_dbg_level { + uint32 nan_err_level; /* for Error levels */ + uint32 nan_dbg_level; /* for bebug logs and trace */ + uint32 nan_info_level; /* for dumps like prhex */ +} wl_nan_dbg_level_t; + +/* + * WL_NAN_CMD_DBG_EVENT_MASK + */ +typedef uint32 wl_nan_event_mask_t; + +/* + * WL_NAN_CMD_DBG_EVENT_CHECK + */ +typedef uint8 wl_nan_dbg_ifname[BCM_MSG_IFNAME_MAX]; + +/* + * WL_NAN_CMD_DBG_DUMP + * WL_NAN_CMD_DBG_CLEAR + */ +enum wl_nan_dbg_dump_type { + WL_NAN_DBG_DT_RSSI_DATA = 1, + WL_NAN_DBG_DT_STATS_DATA = 2, + /* + * Additional enums before this line + */ + WL_NAN_DBG_DT_INVALID +}; +typedef int8 wl_nan_dbg_dump_type_t; + +/** various params and ctl swithce for nan_debug instance */ +/* + * WL_NAN_CMD_DBG_DEBUG + */ +typedef struct wl_nan_debug_params { + uint16 cmd; /**< debug cmd to perform a debug action */ + uint16 status; + uint32 msglevel; /**< msg level if enabled */ + uint8 enabled; /**< runtime debuging enabled */ + uint8 collect; + uint8 PAD[2]; +} wl_nan_debug_params_t; + + +typedef struct wl_nan_sched_svc_timeslot_s { + uint32 abitmap; /* availability bitmap */ + uint32 chanlist[NAN_MAX_TIMESLOT]; + uint8 res; /* resolution: 0 = 16ms, 1 = 32ms, 2 = 64ms 3 = reserved. REfer NAN spec */ + uint8 mapid; /* mapid from NAN spec. Used to differentiate 2G Vs 5G band */ + uint8 PAD[2]; +} wl_nan_sched_svc_timeslot_t; + + +/* nan passive scan params */ +#define NAN_SCAN_MAX_CHCNT 8 +typedef struct nan_scan_params { + uint16 scan_time; + uint16 home_time; + uint16 ms_intvl; /**< interval between merge scan */ + uint16 ms_dur; /**< duration of merge scan */ + uint16 chspec_num; + uint8 pad[2]; + chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /**< act. used 3, 5 rfu */ +} nan_scan_params_t; + /* nan cmd IDs */ enum wl_nan_cmds { /* nan cfg /disc & dbg ioctls */ @@ -6061,6 +8661,7 @@ enum wl_nan_cmds { WL_NAN_CMD_TSSCHEDULE = 8, WL_NAN_CMD_TSRELEASE = 9, WL_NAN_CMD_OUI = 10, + WL_NAN_CMD_OOB_AF = 11, WL_NAN_CMD_COUNT = 15, WL_NAN_CMD_CLEARCOUNT = 16, @@ -6088,144 +8689,58 @@ enum wl_nan_cmds { WL_NAN_CMD_TEST1 = 61, WL_NAN_CMD_TEST2 = 62, WL_NAN_CMD_TEST3 = 63, - WL_NAN_CMD_DISC_RESULTS = 64 + WL_NAN_CMD_DISC_RESULTS = 64, + /* nan 2.0 data path commands */ + WL_NAN_CMD_DATAPATH = 65 }; -/* - * tlv IDs uniquely identifies cmd parameters - * packed into wl_nan_ioc_t container - */ -enum wl_nan_cmd_xtlv_id { - /* 0x00 ~ 0xFF: standard TLV ID whose data format is the same as NAN attribute TLV */ - WL_NAN_XTLV_ZERO = 0, /* used as tlv buf end marker */ -#ifdef NAN_STD_TLV /* rfu, don't use yet */ - WL_NAN_XTLV_MASTER_IND = 1, /* == NAN_ATTR_MASTER_IND, */ - WL_NAN_XTLV_CLUSTER = 2, /* == NAN_ATTR_CLUSTER, */ - WL_NAN_XTLV_VENDOR = 221, /* == NAN_ATTR_VENDOR, */ -#endif - /* 0x02 ~ 0xFF: reserved. In case to use with the same data format as NAN attribute TLV */ - /* 0x100 ~ : private TLV ID defined just for NAN command */ - /* common types */ - WL_NAN_XTLV_MAC_ADDR = 0x102, /* used in various cmds */ - WL_NAN_XTLV_REASON = 0x103, - WL_NAN_XTLV_ENABLED = 0x104, - /* explicit types, primarily for discovery engine iovars */ - WL_NAN_XTLV_SVC_PARAMS = 0x120, /* Contains required params: wl_nan_disc_params_t */ - WL_NAN_XTLV_MATCH_RX = 0x121, /* Matching filter to evaluate on receive */ - WL_NAN_XTLV_MATCH_TX = 0x122, /* Matching filter to send */ - WL_NAN_XTLV_SVC_INFO = 0x123, /* Service specific info */ - WL_NAN_XTLV_SVC_NAME = 0x124, /* Optional UTF-8 service name, for debugging. */ - WL_NAN_XTLV_INSTANCE_ID = 0x125, /* Identifies unique publish or subscribe instance */ - WL_NAN_XTLV_PRIORITY = 0x126, /* used in transmit cmd context */ - WL_NAN_XTLV_REQUESTOR_ID = 0x127, /* Requestor instance ID */ - WL_NAN_XTLV_VNDR = 0x128, /* Vendor specific attribute */ - WL_NAN_XTLV_SR_FILTER = 0x129, /* Service Response Filter */ - WL_NAN_XTLV_FOLLOWUP = 0x130, /* Service Info for Follow-Up SDF */ - WL_NAN_XTLV_PEER_INSTANCE_ID = 0x131, /* Used to parse remote instance Id */ - /* explicit types, primarily for NAN MAC iovars */ - WL_NAN_XTLV_DW_LEN = 0x140, /* discovery win length */ - WL_NAN_XTLV_BCN_INTERVAL = 0x141, /* beacon interval, both sync and descovery bcns? */ - WL_NAN_XTLV_CLUSTER_ID = 0x142, - WL_NAN_XTLV_IF_ADDR = 0x143, - WL_NAN_XTLV_MC_ADDR = 0x144, - WL_NAN_XTLV_ROLE = 0x145, - WL_NAN_XTLV_START = 0x146, - - WL_NAN_XTLV_MASTER_PREF = 0x147, - WL_NAN_XTLV_DW_INTERVAL = 0x148, - WL_NAN_XTLV_PTBTT_OVERRIDE = 0x149, - /* nan status command xtlvs */ - WL_NAN_XTLV_MAC_INITED = 0x14a, - WL_NAN_XTLV_MAC_ENABLED = 0x14b, - WL_NAN_XTLV_MAC_CHANSPEC = 0x14c, - WL_NAN_XTLV_MAC_AMR = 0x14d, /* anchormaster rank u8 amr[8] */ - WL_NAN_XTLV_MAC_HOPCNT = 0x14e, - WL_NAN_XTLV_MAC_AMBTT = 0x14f, - WL_NAN_XTLV_MAC_TXRATE = 0x150, - WL_NAN_XTLV_MAC_STATUS = 0x151, /* xtlv payload is nan_status_t */ - WL_NAN_XTLV_NAN_SCANPARAMS = 0x152, /* payload is nan_scan_params_t */ - WL_NAN_XTLV_DEBUGPARAMS = 0x153, /* payload is nan_scan_params_t */ - WL_NAN_XTLV_SUBSCR_ID = 0x154, /* subscriber id */ - WL_NAN_XTLV_PUBLR_ID = 0x155, /* publisher id */ - WL_NAN_XTLV_EVENT_MASK = 0x156, - WL_NAN_XTLV_MASTER_RANK = 0x158, - WL_NAN_XTLV_WARM_UP_TIME = 0x159, - WL_NAN_XTLV_PM_OPTION = 0x15a, - WL_NAN_XTLV_OUI = 0x15b, /* NAN OUI */ - WL_NAN_XTLV_MAC_COUNT = 0x15c, /* xtlv payload is nan_count_t */ - /* nan timeslot management */ - WL_NAN_XTLV_TSRESERVE = 0x160, - WL_NAN_XTLV_TSRELEASE = 0x161, - WL_NAN_XTLV_IDLE_DW_TIMEOUT = 0x162, - WL_NAN_XTLV_IDLE_DW_LEN = 0x163, - WL_NAN_XTLV_RND_FACTOR = 0x164, - WL_NAN_XTLV_SVC_DISC_TXTIME = 0x165, /* svc disc frame tx time in DW */ - WL_NAN_XTLV_OPERATING_BAND = 0x166, - WL_NAN_XTLV_STOP_BCN_TX = 0x167, - WL_NAN_XTLV_CONCUR_SCAN = 0x168, - WL_NAN_XTLV_DUMP_CLR_TYPE = 0x175, /* wl nan dump/clear subtype */ - WL_NAN_XTLV_PEER_RSSI = 0x176, /* xtlv payload for wl nan dump rssi */ - WL_NAN_XTLV_MAC_CHANSPEC_1 = 0x17A, /* to get chanspec[1] */ - WL_NAN_XTLV_DISC_RESULTS = 0x17B, /* get disc results */ - WL_NAN_XTLV_MAC_STATS = 0x17C /* xtlv payload for wl nan dump stats */ +/* NAN DP interface commands */ +enum wl_nan_dp_cmds { + /* nan 2.0 ioctls */ + WL_NAN_CMD_DP_CAP = 1000, + WL_NAN_CMD_DP_CONFIG = 1001, + WL_NAN_CMD_DP_CREATE = 1002, + WL_NAN_CMD_DP_AUTO_CONNECT = 1003, + WL_NAN_CMD_DP_DATA_REQ = 1004, + WL_NAN_CMD_DP_DATA_RESP = 1005, + WL_NAN_CMD_DP_SCHED_UPD = 1006, + WL_NAN_CMD_DP_END = 1007, + WL_NAN_CMD_DP_CONNECT = 1008, + WL_NAN_CMD_DP_STATUS = 1009 }; -/* Flag bits for Publish and Subscribe (wl_nan_disc_params_t flags) */ -#define WL_NAN_RANGE_LIMITED 0x0040 -/* Bits specific to Publish */ -/* Unsolicited transmissions */ -#define WL_NAN_PUB_UNSOLICIT 0x1000 -/* Solicited transmissions */ -#define WL_NAN_PUB_SOLICIT 0x2000 -#define WL_NAN_PUB_BOTH 0x3000 -/* Set for broadcast solicited transmission - * Do not set for unicast solicited transmission - */ -#define WL_NAN_PUB_BCAST 0x4000 -/* Generate event on each solicited transmission */ -#define WL_NAN_PUB_EVENT 0x8000 -/* Used for one-time solicited Publish functions to indicate transmision occurred */ -#define WL_NAN_PUB_SOLICIT_PENDING 0x10000 -/* Follow-up frames */ -#define WL_NAN_FOLLOWUP 0x20000 -/* Bits specific to Subscribe */ -/* Active subscribe mode (Leave unset for passive) */ -#define WL_NAN_SUB_ACTIVE 0x1000 +/* TODO Should remove this fixed length */ +#define WL_NAN_DATA_SVC_SPEC_INFO_LEN 32 /* arbitrary */ +#define WL_NAN_DP_MAX_SVC_INFO 0xFF +#define WL_NAN_DATA_NDP_INST_SUPPORT 16 -/* Special values for time to live (ttl) parameter */ -#define WL_NAN_TTL_UNTIL_CANCEL 0xFFFFFFFF -/* Publish - runs until first transmission - * Subscribe - runs until first DiscoveryResult event - */ -#define WL_NAN_TTL_FIRST 0 +/* Nan flags */ +#define WL_NAN_DP_FLAG_SVC_INFO (1 << 0) +#define WL_NAN_DP_FLAG_CONFIRM (1 << 1) +#define WL_NAN_DP_FLAG_EXPLICIT_CFM (1 << 2) +#define WL_NAN_DP_FLAG_SECURITY (1 << 3) -/* The service hash (service id) is exactly this many bytes. */ +/* NAN Datapath host status */ +#define WL_NAN_DP_STATUS_ACCEPTED 1 +#define WL_NAN_DP_STATUS_REJECTED 0 + +/* to be done */ +typedef struct wl_nan_dp_cap { + uint8 tbd; +} wl_nan_dp_cap_t; + + +/** The service hash (service id) is exactly this many bytes. */ #define WL_NAN_SVC_HASH_LEN 6 - -/* Number of hash functions per bloom filter */ +/** Number of hash functions per bloom filter */ #define WL_NAN_HASHES_PER_BLOOM 4 - -/* Instance ID type (unique identifier) */ -typedef uint8 wl_nan_instance_id_t; - /* no. of max last disc results */ #define WL_NAN_MAX_DISC_RESULTS 3 -/** Mandatory parameters for publish/subscribe iovars - NAN_TLV_SVC_PARAMS */ -typedef struct wl_nan_disc_params_s { - /* Periodicity of unsolicited/query transmissions, in DWs */ - uint32 period; - /* Time to live in DWs */ - uint32 ttl; - /* Flag bits */ - uint32 flags; - /* Publish or subscribe service id, i.e. hash of the service name */ - uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; - /* pad to make 4 byte alignment, can be used for something else in the future */ - uint8 pad; - /* Publish or subscribe id */ - wl_nan_instance_id_t instance_id; -} wl_nan_disc_params_t; +/* NAN security related defines */ +/* NCS-SK related */ +#define WL_NAN_NCS_SK_PMK_LEN 32 +#define WL_NAN_NCS_SK_PMKID_LEN 16 /* recent discovery results */ typedef struct wl_nan_disc_result_s @@ -6242,20 +8757,65 @@ typedef struct wl_nan_disc_results_s wl_nan_disc_result_t disc_result[WL_NAN_MAX_DISC_RESULTS]; } wl_nan_disc_results_list_t; +/* nan 1.0 events */ +/* To be deprecated - will be replaced by event_disc_result */ +typedef struct wl_nan_ev_disc_result { + wl_nan_instance_id_t pub_id; + wl_nan_instance_id_t sub_id; + struct ether_addr pub_mac; + uint8 opt_tlvs[0]; +} wl_nan_ev_disc_result_t; + +typedef struct wl_nan_event_disc_result { + wl_nan_instance_id_t pub_id; + wl_nan_instance_id_t sub_id; + struct ether_addr pub_mac; + int8 publish_rssi; /* publisher RSSI */ + uint8 attr_num; + uint16 attr_list_len; /* length of the all the attributes in the SDF */ + uint8 attr_list[0]; /* list of NAN attributes */ +} wl_nan_event_disc_result_t; + +typedef struct wl_nan_ev_p2p_avail { + struct ether_addr sender; + struct ether_addr p2p_dev_addr; + uint8 dev_role; + uint8 resolution; + uint8 repeat; + uint8 pad[3]; + chanspec_t chanspec; + uint32 avail_bmap; +} wl_nan_ev_p2p_avail_t; + /* -* desovery interface event structures * +* discovery interface event structures * */ +/* mandatory parameters for OOB action frame */ +/* single-shot when bitmap and offset are set to 0; periodic otherwise */ +typedef struct wl_nan_oob_af_params_s +{ + /* bitmap for the 32 timeslots in 512TU dw interval */ + uint32 ts_map; + /* offset from start of dw, in us */ + uint32 tx_offset; + struct ether_addr bssid; + struct ether_addr dest; + uint32 pkt_lifetime; + uint16 payload_len; + uint8 payload[1]; +} wl_nan_oob_af_params_t; + /* NAN Ranging */ /* Bit defines for global flags */ -#define WL_NAN_RANGING_ENABLE 1 /* enable RTT */ -#define WL_NAN_RANGING_RANGED 2 /* Report to host if ranged as target */ +#define WL_NAN_RANGING_ENABLE 1 /**< enable RTT */ +#define WL_NAN_RANGING_RANGED 2 /**< Report to host if ranged as target */ typedef struct nan_ranging_config { - uint32 chanspec; /* Ranging chanspec */ - uint16 timeslot; /* NAN RTT start time slot 1-511 */ - uint16 duration; /* NAN RTT duration in ms */ - struct ether_addr allow_mac; /* peer initiated ranging: the allowed peer mac + uint32 chanspec; /**< Ranging chanspec */ + uint16 timeslot; /**< NAN RTT start time slot 1-511 */ + uint16 duration; /**< NAN RTT duration in ms */ + struct ether_addr allow_mac; /**< peer initiated ranging: the allowed peer mac * address, a unicast (for one peer) or * a broadcast for all. Setting it to all zeros * means responding to none,same as not setting @@ -6264,67 +8824,74 @@ typedef struct nan_ranging_config { uint16 flags; } wl_nan_ranging_config_t; -/* list of peers for self initiated ranging */ -/* Bit defines for per peer flags */ -#define WL_NAN_RANGING_REPORT (1<<0) /* Enable reporting range to target */ +/** list of peers for self initiated ranging */ +/** Bit defines for per peer flags */ +#define WL_NAN_RANGING_REPORT (1<<0) /**< Enable reporting range to target */ typedef struct nan_ranging_peer { - uint32 chanspec; /* desired chanspec for this peer */ - uint32 abitmap; /* available bitmap */ - struct ether_addr ea; /* peer MAC address */ - uint8 frmcnt; /* frame count */ - uint8 retrycnt; /* retry count */ - uint16 flags; /* per peer flags, report or not */ + uint32 chanspec; /**< desired chanspec for this peer */ + uint32 abitmap; /**< available bitmap */ + struct ether_addr ea; /**< peer MAC address */ + uint8 frmcnt; /**< frame count */ + uint8 retrycnt; /**< retry count */ + uint16 flags; /**< per peer flags, report or not */ + uint16 PAD; } wl_nan_ranging_peer_t; typedef struct nan_ranging_list { - uint8 count; /* number of MAC addresses */ - uint8 num_peers_done; /* host set to 0, when read, shows number of peers + uint8 count; /**< number of MAC addresses */ + uint8 num_peers_done; /**< host set to 0, when read, shows number of peers * completed, success or fail */ - uint8 num_dws; /* time period to do the ranging, specified in dws */ - uint8 reserve; /* reserved field */ - wl_nan_ranging_peer_t rp[1]; /* variable length array of peers */ + uint8 num_dws; /**< time period to do the ranging, specified in dws */ + uint8 reserve; /**< reserved field */ + wl_nan_ranging_peer_t rp[1]; /**< variable length array of peers */ } wl_nan_ranging_list_t; /* ranging results, a list for self initiated ranging and one for peer initiated ranging */ /* There will be one structure for each peer */ #define WL_NAN_RANGING_STATUS_SUCCESS 1 -#define WL_NAN_RANGING_STATUS_FAIL 2 +#define WL_NAN_RANGING_STATUS_FAIL 2 #define WL_NAN_RANGING_STATUS_TIMEOUT 3 -#define WL_NAN_RANGING_STATUS_ABORT 4 /* with partial results if sounding count > 0 */ +#define WL_NAN_RANGING_STATUS_ABORT 4 /**< with partial results if sounding count > 0 */ typedef struct nan_ranging_result { - uint8 status; /* 1: Success, 2: Fail 3: Timeout 4: Aborted */ - uint8 sounding_count; /* number of measurements completed (0 = failure) */ - struct ether_addr ea; /* initiator MAC address */ - uint32 chanspec; /* Chanspec where the ranging was done */ - uint32 timestamp; /* 32bits of the TSF timestamp ranging was completed at */ - uint32 distance; /* mean distance in meters expressed as Q4 number. + uint8 status; /**< 1: Success, 2: Fail 3: Timeout 4: Aborted */ + uint8 sounding_count; /**< number of measurements completed (0 = failure) */ + struct ether_addr ea; /**< initiator MAC address */ + uint32 chanspec; /**< Chanspec where the ranging was done */ + uint32 timestamp; /**< 32bits of the TSF timestamp ranging was completed at */ + uint32 distance; /**< mean distance in meters expressed as Q4 number. * Only valid when sounding_count > 0. Examples: * 0x08 = 0.5m * 0x10 = 1m * 0x18 = 1.5m * set to 0xffffffff to indicate invalid number */ - int32 rtt_var; /* standard deviation in 10th of ns of RTTs measured. + int32 rtt_var; /**< standard deviation in 10th of ns of RTTs measured. * Only valid when sounding_count > 0 */ - struct ether_addr tgtea; /* target MAC address */ + struct ether_addr tgtea; /**< target MAC address */ + uint8 PAD[2]; } wl_nan_ranging_result_t; typedef struct nan_ranging_event_data { - uint8 mode; /* 1: Result of host initiated ranging */ + uint8 mode; /**< 1: Result of host initiated ranging */ /* 2: Result of peer initiated ranging */ uint8 reserved; - uint8 success_count; /* number of peers completed successfully */ - uint8 count; /* number of peers in the list */ - wl_nan_ranging_result_t rr[1]; /* variable array of ranging peers */ + uint8 success_count; /**< number of peers completed successfully */ + uint8 count; /**< number of peers in the list */ + wl_nan_ranging_result_t rr[1]; /**< variable array of ranging peers */ } wl_nan_ranging_event_data_t; + enum { - WL_NAN_RSSI_DATA = 1, + WL_NAN_STATS_RSSI = 1, WL_NAN_STATS_DATA = 2, + WL_NAN_STATS_DP = 3, /* * ***** ADD before this line **** */ - WL_NAN_INVALID + WL_NAN_STATS_INVALID }; +typedef struct wl_nan_dp_stats { + uint32 tbd; /* TBD */ +} wl_nan_dp_stats_t; typedef struct wl_nan_stats { /* general */ @@ -6346,12 +8913,19 @@ typedef struct wl_nan_stats { uint32 cnt_nms; /* non master sync */ uint32 cnt_nmns; /* non master non sync */ /* TX */ - uint32 cnt_err_txtime; /* error in txtime */ + uint32 cnt_err_txtime; /* txtime in sync bcn frame not a multiple of dw intv */ uint32 cnt_err_unsch_tx; /* tx while not in DW/ disc bcn slot */ uint32 cnt_err_bcn_tx; /* beacon tx error */ uint32 cnt_sync_bcn_tx_miss; /* no. of times time delta between 2 cosequetive - * sync beacons is more than dw interval + * sync beacons is more than expected */ + /* MSCH */ + uint32 cnt_err_msch_reg; /* error is Dw/disc reg with msch */ + uint32 cnt_err_wrong_ch_cb; /* count of msch calbacks in wrong channel */ + uint32 cnt_dw_skip; /* count of DW rejected */ + uint32 cnt_disc_skip; /* count of disc bcn rejected */ + uint32 cnt_dw_start_early; /* msch cb not at registered time */ + uint32 cnt_dw_start_late; /* no. of delays in slot start */ /* SCANS */ uint32 cnt_mrg_scan; /* count of merge scans completed */ uint32 cnt_err_ms_rej; /* number of merge scan failed */ @@ -6361,6 +8935,7 @@ typedef struct wl_nan_stats { /* enable/disable */ uint32 cnt_nan_enab; /* no. of times nan feature got enabled */ uint32 cnt_nan_disab; /* no. of times nan feature got disabled */ + uint32 cnt_sync_bcn_rx; /* count of sync bcn rx within DW */ } wl_nan_stats_t; #define WL_NAN_MAC_MAX_NAN_PEERS 6 @@ -6368,8 +8943,9 @@ typedef struct wl_nan_stats { typedef struct wl_nan_nbr_rssi { uint8 rx_chan; /* channel number on which bcn rcvd */ - int rssi_raw; /* received rssi value */ - int rssi_avg; /* normalized rssi value */ + uint8 PAD[3]; + int32 rssi_raw; /* received rssi value */ + int32 rssi_avg; /* normalized rssi value */ } wl_nan_peer_rssi_t; typedef struct wl_nan_peer_rssi_entry { @@ -6389,105 +8965,715 @@ typedef struct wl_nan_nbr_rssi_data { wl_nan_peer_rssi_entry_t peers[1]; /* peers data list */ } wl_nan_peer_rssi_data_t; -/* ********************* end of NAN section ******************************** */ -#endif /* WL_NAN */ +/* WL_NAN_CMD_DBG_DUMP, GET Resp */ +typedef struct wl_nan_dbg_dump_rsp { + wl_nan_dbg_dump_type_t dump_type; /* dump data type */ + uint8 pad[3]; + union { + wl_nan_peer_rssi_data_t peer_rssi; + wl_nan_stats_t nan_stats; + } u; +} wl_nan_dbg_dump_rsp_t; +enum nan_termination_status { + NAN_TERM_REASON_INVALID = 1, + NAN_TERM_REASON_TIMEOUT = 2, + NAN_TERM_REASON_USER_REQ = 3, + NAN_TERM_REASON_FAILURE = 4, + NAN_TERM_REASON_COUNT_REACHED = 5, + NAN_TERM_REASON_DE_SHUTDOWN = 6, + NAN_TERM_REASON_DISABLE_IN_PROGRESS = 7 +}; + +/* nan2 data iovar */ +/* nan2 qos */ +typedef struct wl_nan_dp_qos +{ + uint8 tid; + uint8 pad; + uint16 pkt_size; + uint16 mean_rate; + uint16 svc_interval; +} wl_nan_dp_qos_t; +/* ndp config */ +typedef struct wl_nan_ndp_config +{ + uint8 ndp_id; + uint8 pub_id; + struct ether_addr pub_addr; + struct ether_addr data_addr; /* configure local data addr */ + struct ether_addr init_data_addr; /* initiator data addr */ + uint8 svc_spec_info[WL_NAN_DATA_SVC_SPEC_INFO_LEN]; + wl_nan_dp_qos_t qos; + uint16 avail_len; + uint8 pad[3]; + uint8 data[1]; +} wl_nan_ndp_config_t; + +/* nan2 device capabilities */ +typedef struct wl_nan_ndp_oper_cfg { + uint8 awake_dw_2g; + uint8 awake_dw_5g; + uint8 bands_supported; + uint8 op_mode; +} wl_nan_ndp_oper_cfg_t; + +typedef uint8 wl_nan_ndp_ndpid_t; +typedef uint8 wl_nan_ndp_conn_t; + +typedef struct wl_nan_dp_req { + uint8 type; /* 0- unicast 1 - multicast */ + uint8 pub_id; /* Publisher ID */ + uint16 flags; + struct ether_addr peer_mac; /* Peer's NMI addr */ + struct ether_addr mcast_mac; /* Multicast addr */ + wl_nan_dp_qos_t qos; + uint8 tlv_params[]; /* xtlv parameters for command */ +} wl_nan_dp_req_t; + +/* TODO Need to replace ndp_id with lndp_id */ +/* Return structure to data req IOVAR */ +typedef struct wl_nan_dp_req_ret { + struct ether_addr indi; /* Initiators data mac addr */ + uint8 ndp_id; /* Initiators ndpid */ + uint8 pad; +} wl_nan_dp_req_ret_t; + +typedef struct wl_nan_dp_resp { + uint8 type; /* 0- unicast 1 - multicast */ + uint8 status; /* Accepted or Rejected */ + uint8 reason_code; + /* Local NDP ID for unicast, mc_id for multicast, 0 for implicit NMSG */ + uint8 ndp_id; + wl_nan_dp_qos_t qos; + /* Initiator data address for unicast or multicast address for multicast */ + struct ether_addr mac_addr; + uint16 flags; + uint8 tlv_params[]; /* xtlv parameters for command */ +} wl_nan_dp_resp_t; + +/* Return structure to data resp IOVAR */ +typedef struct wl_nan_dp_resp_ret { + uint8 nmsgid; /* NMSG ID or for multicast else 0 */ + uint8 pad[3]; +} wl_nan_dp_resp_ret_t; + +typedef struct wl_nan_dp_conf { + uint8 lndp_id; + uint8 status; /* Accepted or Rejected */ + uint8 pad[2]; +} wl_nan_dp_conf_t; + +typedef struct wl_nan_dp_end +{ + uint8 lndp_id; + uint8 status; + uint8 pad[2]; +} wl_nan_dp_end_t; + +typedef struct wl_nan_dp_schedupd { + uint8 type; /* 0: unicast, 1: multicast */ + uint8 flags; + struct ether_addr addr; /* peer NMI or multicast addr */ + wl_nan_dp_qos_t qos; + uint8 map_id; + uint8 pad[3]; +} wl_nan_dp_schedupd_t; + +/* set: update with notification, unset: NDL setup handshake */ +#define WL_NAN_DP_SCHEDUPD_NOTIF (1 << 0) + +/* list ndp ids */ +typedef struct wl_nan_ndp_id_list { + uint16 ndp_count; + uint8 lndp_id[]; +} wl_nan_ndp_id_list_t; + +/* nan2 status */ +typedef struct ndp_session { + uint8 lndp_id; + uint8 state; + uint8 pub_id; + uint8 pad; +} ndp_session_t; + +typedef struct wl_nan_ndp_status { + struct ether_addr peer_nmi; + struct ether_addr peer_ndi; + ndp_session_t session; + uint8 pad; +} wl_nan_ndp_status_t; + +/* events */ +#define NAN_DP_SESSION_UNICAST 0 +#define NAN_DP_SESSION_MULTICAST 1 +#define NAN_DP_SECURITY_NONE 0 +#define NAN_DP_SECURITY_CSID 1 +#define NAN_DP_SECURITY_MK 2 +#define WL_NAN_DATA_NMSGID_LEN 8 /* 8 bytes as per nan spec */ + +/* Common event structure for Nan Datapath + * Used for sending NDP Indication, Response, Confirmation, Securty Install and Establish events + */ +typedef struct wl_nan_ev_datapath_cmn { + uint8 type; + /* ndp_id is valid only if type is unicast */ + uint8 ndp_id; + uint8 pub_id; + uint8 security; + /* Following two fields are valid only if type is unicast */ + struct ether_addr initiator_ndi; + struct ether_addr responder_ndi; + struct ether_addr peer_nmi; + uint8 status; + uint8 role; + /* Following two fields are valid only if type is multicast */ + uint8 nmsg_id[WL_NAN_DATA_NMSGID_LEN]; + uint8 mc_id; + uint8 pad[1]; + uint16 opt_tlv_len; + uint8 opt_tlvs[]; +} wl_nan_ev_datapath_cmn_t; + +typedef struct wl_nan_ev_datapath_end { + uint8 ndp_id; + uint8 status; + uint8 pad[2]; + struct ether_addr peer_nmi; + struct ether_addr peer_ndi; +} wl_nan_ev_datapath_end_t; + +/* NAN2.0 Ranging definitions */ + +/* result indication bit map */ +#define NAN_RANGE_INDICATION_CONT (1<<0) +#define NAN_RANGE_INDICATION_INGRESS (1<<1) +#define NAN_RANGE_INIDICATION_EGRESS (1<<2) + +/* responder flags */ +#define NAN_RANGE_FLAG_AUTO_ACCEPT (1 << 0) +#define NAN_RANGE_FLAG_RESULT_REQUIRED (1 << 1) + +typedef struct wl_nan_range_req { + struct ether_addr peer; + uint8 publisher_id; + uint8 indication; /* bit map for result event */ + uint32 resolution; /* default millimeters */ + uint32 ingress; /* ingress limit in mm */ + uint32 egress; /* egress limit in mm */ + uint32 interval; /* max interval(in TU) b/w two ranging measurements */ +} wl_nan_range_req_t; + +#define NAN_RNG_REQ_IOV_LEN 24 + +typedef uint8 wl_nan_range_id; + +typedef struct wl_nan_range_resp { + wl_nan_range_id range_id; + uint8 flags; /* auto response, range result required */ + uint8 status; /* accept, reject */ + uint8 indication; /* bit map for result event */ + uint32 resolution; /* default millimeters */ + uint32 ingress; /* ingress limit in mm */ + uint32 egress; /* egress limit in mm */ + uint32 interval; /* max interval(in TU) b/w two ranging measurements */ +} wl_nan_range_resp_t; + +#define NAN_RNG_RESP_IOV_LEN 20 + +#define NAN_RNG_MAX_IOV_LEN 255 + +typedef struct wl_nan_ev_rng_req_ind { + struct ether_addr peer_m_addr; + uint8 rng_id; + /* ftm parameters */ + uint8 max_burst_dur; + uint8 min_ftm_delta; + uint8 max_num_ftm; + uint8 ftm_format_bw; + /* location info availability bit map */ + uint8 lc_info_avail; + /* Last movement indication */ + uint16 last_movement; + uint8 pad[2]; +} wl_nan_ev_rng_req_ind_t; + +#define NAN_RNG_REQ_IND_SIZE 14 + +typedef struct wl_nan_ev_rng_rpt_ind { + uint32 dist_mm; /* in millimeter */ + struct ether_addr peer_m_addr; + uint8 indication; /* indication definitions mentioned above */ + uint8 pad; +} wl_nan_ev_rng_rpt_ind_t; + +#define NAN_RNG_RPT_IND_SIZE 11 + +typedef struct wl_nan_ev_rng_term_ind { + struct ether_addr peer_m_addr; + uint8 reason_code; + uint8 pad; +} wl_nan_ev_rng_term_ind_t; + +#define NAN_RNG_TERM_IND_SIZE 7 + + +/* ********************* end of NAN section ******************************** */ +/* endif WL_NAN */ + +#define P2P_NAN_IOC_BUFSZ 512 /* some sufficient ioc buff size */ +#define WL_P2P_NAN_IOCTL_VERSION 0x1 + +/* container for p2p nan iovtls & events */ +typedef struct wl_p2p_nan_ioc { + uint16 version; /* interface command or event version */ + uint16 id; /* p2p nan ioctl cmd ID */ + uint16 len; /* total length of data[] */ + uint16 pad; /* padding */ + uint8 data []; /* var len payload of bcm_xtlv_t type */ +} wl_p2p_nan_ioc_t; + +/* p2p nan cmd IDs */ +enum wl_p2p_nan_cmds { + /* p2p nan cfg ioctls */ + WL_P2P_NAN_CMD_ENABLE = 1, + WL_P2P_NAN_CMD_CONFIG = 2, + WL_P2P_NAN_CMD_DEL_CONFIG = 3, + WL_P2P_NAN_CMD_GET_INSTS = 4 +}; + +#define WL_P2P_NAN_CONFIG_VERSION 1 + +#define WL_P2P_NAN_DEVICE_P2P 0x0 +#define WL_P2P_NAN_DEVICE_GO 0x1 +#define WL_P2P_NAN_DEVICE_GC 0x2 +#define WL_P2P_NAN_DEVICE_INVAL 0xFF + +/* NAN P2P operation */ +typedef struct p2p_nan_config { + uint16 version; /* wl_p2p_nan_config_t structure version */ + uint16 len; /* total length including version and variable IE */ + uint32 flags; /* 0x1 to NEW, 0x2 to ADD, 0x4 to DEL */ + uint8 inst_id; /* publisher/subscriber id */ + uint8 inst_type; /* publisher/subscriber */ + uint8 dev_role; /* P2P device role: 'P2P','GO' or 'GC' */ + uint8 pad1; /* padding */ + uint8 resolution; /* Availability bitmap resolution */ + uint8 repeat; /* Whether Availabilty repeat across DW */ + uint16 ie_len; /* variable ie len */ + struct ether_addr dev_mac; /* P2P device addres */ + uint16 pad2; /* Padding */ + uint32 avail_bmap; /* availability interval bitmap */ + uint32 chanspec; /* Chanspec */ + uint8 ie[]; /* hex ie data */ +} wl_p2p_nan_config_t; + +#define WL_P2P_NAN_SERVICE_LIST_VERSION 1 +typedef enum wl_nan_service_type { + WL_NAN_SVC_INST_PUBLISHER = 1, + WL_NAN_SVC_INST_SUBSCRIBER = 2 +} wl_nan_service_type_t; + +#define WL_P2P_NAN_CONFIG_NEW 0x1 +#define WL_P2P_NAN_CONFIG_ADD 0x2 +#define WL_P2P_NAN_CONFIG_DEL 0x4 + +typedef struct wl_nan_svc_inst { + uint8 inst_id; /* publisher/subscriber id */ + uint8 inst_type; /* publisher/subscriber */ +} wl_nan_svc_inst_t; + +typedef struct wl_nan_svc_inst_list { + uint16 version; /* this structure version */ + uint16 len; /* total length including version and variable svc list */ + uint16 count; /* service instance count */ + uint16 pad; /* padding */ + wl_nan_svc_inst_t svc[1]; /* service instance list */ +} wl_nan_svc_inst_list_t; + +#define NAN_POST_DISC_P2P_DATA_VER 1 +/* This structure will be used send peer p2p data with + * NAN discovery result + */ +typedef struct nan_post_disc_p2p_data { + uint8 ver; /* this structure version */ + uint8 dev_role; /* P2P Device role */ + uint8 resolution; /* Availability bitmap resolution */ + uint8 repeat; /* Whether Availabilty repeat across DW */ + struct ether_addr dev_mac; /* P2P device addres */ + uint16 pad1; /* Padding */ + uint32 chanspec; /* Chanspec */ + uint32 avl_bmp; /* availability interval bitmap */ +} nan_post_disc_p2p_data_t; + +/* timeslot etc for NAN */ +enum { + WL_TMU_TU = 0, + WL_TMU_SEC = 1, + WL_TMU_MILLI_SEC = 2, + WL_TMU_MICRO_SEC = 3, + WL_TMU_NANO_SEC = 4, + WL_TMU_PICO_SEC = 5 +}; +typedef int16 wl_tmu_t; + +typedef struct { + uint32 intvl; /* time interval */ + wl_tmu_t tmu; /* time unit */ + uint8 pad[2]; /* padding */ +} wl_time_interval_t; + +/* availabiloty slot flags */ +enum { + WL_AVAIL_SLOT_NONE = 0x0000, + WL_AVAIL_SLOT_COM = 0x0001, /* committed */ + WL_AVAIL_SLOT_POT = 0x0002, /* potential */ + WL_AVAIL_SLOT_PROP = 0x0004, /* proposed - note: not configurable */ + WL_AVAIL_SLOT_PAGED = 0x0008 /* P-NDL */ + /* 0x0030 - resrved for NDC index */ + /* 0x00c0 - resrved for usage preference */ +}; +typedef int16 wl_avail_slot_flags_t; + +#define WL_AVAIL_SLOT_NDC_MASK 0x0030 /* up to 4 NDCs */ +#define WL_AVAIL_SLOT_NDC_SHIFT 4 +#define WL_AVAIL_SLOT_NDC(_flags) (((_flags) & WL_AVAIL_SLOT_NDC_MASK) \ + >> WL_AVAIL_SLOT_NDC_SHIFT) +#define WL_AVAIL_SLOT_SET_NDC(_flags, _ndc_idx) (((_flags) & ~WL_AVAIL_SLOT_NDC_MASK) |\ + ((_ndc_idx) << WL_AVAIL_SLOT_NDC_SHIFT)) + +#define WL_AVAIL_SLOT_UPREF_MASK 0x00c0 /* up to 4 usage preferences */ +#define WL_AVAIL_SLOT_UPREF_SHIFT 6 +#define WL_AVAIL_SLOT_UPREF(_flags) (((_flags) & WL_AVAIL_SLOT_UPREF_MASK) \ + >> WL_AVAIL_SLOT_UPREF_SHIFT) +#define WL_AVAIL_SLOT_SET_UPREF(_flags, _pref) (((_flags) & ~WL_AVAIL_SLOT_UPREF_MASK) |\ + ((_pref) << WL_AVAIL_SLOT_UPREF_SHIFT)) + +typedef struct wl_avail_slot { + wl_avail_slot_flags_t flags; + uint16 PAD; + wl_time_interval_t start; /* from time ref */ + wl_time_interval_t duration; /* from start */ + uint32 chanspec; /* channel spec */ +} wl_avail_slot_t; + +/* time reference */ +enum { + WL_TIME_REF_NONE = 0, + WL_TIME_REF_DEV_TSF = 1, + WL_TIME_REF_NAN_DW = 2, + WL_TIME_REF_TBTT = 3, + WL_TIME_REF_NAN_DW0 = 4 +}; +typedef int16 wl_time_ref_t; + +enum { + WL_AVAIL_NONE = 0x0000, + WL_AVAIL_LOCAL = 0x0001, + WL_AVAIL_PEER = 0x0002, + WL_AVAIL_NDC = 0x0003, + WL_AVAIL_IMMUTABLE = 0x0004, + WL_AVAIL_RESPONSE = 0x0005, + WL_AVAIL_COUNTER = 0x0006, + WL_AVAIL_RANGING = 0x0007, + WL_AVAIL_TYPE_MAX = WL_AVAIL_RANGING /* New ones before and update */ +}; +#define WL_AVAIL_TYPE_MASK 0x000F +#define WL_AVAIL_FLAG_RAW_MODE 0x8000 +typedef int16 wl_avail_flags_t; + +/* availability entry flags */ +enum { + WL_AVAIL_ENTRY_NONE = 0x0000, + WL_AVAIL_ENTRY_COM = 0x0001, /* committed */ + WL_AVAIL_ENTRY_POT = 0x0002, /* potential */ + WL_AVAIL_ENTRY_COND = 0x0004, /* conditional */ + WL_AVAIL_ENTRY_PAGED = 0x0008, /* P-NDL */ + WL_AVAIL_ENTRY_USAGE = 0x0030, /* usage preference */ + WL_AVAIL_ENTRY_BIT_DUR = 0x00c0, /* bit duration */ + WL_AVAIL_ENTRY_BAND_PRESENT = 0x0100, /* band present */ + WL_AVAIL_ENTRY_CHAN_PRESENT = 0x0200, /* channel information present */ + WL_AVAIL_ENTRY_CHAN_ENTRY_PRESENT = 0x0400, /* channel entry (opclass+bitmap) */ +}; + +/* bit duration */ +enum { + WL_AVAIL_BIT_DUR_16 = 0, /* 16TU */ + WL_AVAIL_BIT_DUR_32 = 1, /* 32TU */ + WL_AVAIL_BIT_DUR_64 = 2, /* 64TU */ + WL_AVAIL_BIT_DUR_128 = 3, /* 128TU */ +}; + +/* period */ +enum { + WL_AVAIL_PERIOD_0 = 0, /* 0TU */ + WL_AVAIL_PERIOD_128 = 1, /* 128TU */ + WL_AVAIL_PERIOD_256 = 2, /* 256TU */ + WL_AVAIL_PERIOD_512 = 3, /* 512TU */ + WL_AVAIL_PERIOD_1024 = 4, /* 1024TU */ + WL_AVAIL_PERIOD_2048 = 5, /* 2048TU */ + WL_AVAIL_PERIOD_4096 = 6, /* 4096TU */ + WL_AVAIL_PERIOD_8192 = 7, /* 8192TU */ +}; + +/* band */ +enum { + WL_AVAIL_BAND_NONE = 0, /* reserved */ + WL_AVAIL_BAND_SUB1G = 1, /* sub-1 GHz */ + WL_AVAIL_BAND_2G = 2, /* 2.4 GHz */ + WL_AVAIL_BAND_3G = 3, /* reserved (for 3.6 GHz) */ + WL_AVAIL_BAND_5G = 4, /* 4.9 and 5 GHz */ + WL_AVAIL_BAND_60G = 5, /* reserved (for 60 GHz) */ +}; + +#define WL_AVAIL_ENTRY_TYPE_MASK 0x0F +#define WL_AVAIL_ENTRY_USAGE_MASK 0x0030 /* up to 4 usage preferences */ +#define WL_AVAIL_ENTRY_USAGE_SHIFT 4 +#define WL_AVAIL_ENTRY_USAGE_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_USAGE_MASK) \ + >> WL_AVAIL_ENTRY_USAGE_SHIFT) + +#define WL_AVAIL_ENTRY_BIT_DUR_MASK 0x00c0 /* 0:16TU, 1:32TU, 2:64TU, 3:128TU */ +#define WL_AVAIL_ENTRY_BIT_DUR_SHIFT 6 +#define WL_AVAIL_ENTRY_BIT_DUR_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_BIT_DUR_MASK) \ + >> WL_AVAIL_ENTRY_BIT_DUR_SHIFT) + +#define WL_AVAIL_ENTRY_BAND_MASK 0x0100 /* 0=band not present, 1=present */ +#define WL_AVAIL_ENTRY_BAND_SHIFT 8 + +#define WL_AVAIL_ENTRY_CHAN_MASK 0x0200 /* 0=channel info not present, 1=present */ +#define WL_AVAIL_ENTRY_CHAN_SHIFT 9 + +#define WL_AVAIL_ENTRY_CHAN_ENTRY_MASK 0x0400 /* 0=chanspec, 1=hex channel entry */ +#define WL_AVAIL_ENTRY_CHAN_ENTRY_SHIFT 10 + +#define WL_AVAIL_ENTRY_OPCLASS_MASK 0xFF +#define WL_AVAIL_ENTRY_CHAN_BITMAP_MASK 0xFF00 +#define WL_AVAIL_ENTRY_CHAN_BITMAP_SHIFT 8 +#define WL_AVAIL_ENTRY_CHAN_BITMAP_VAL(_info) (((_info) & WL_AVAIL_ENTRY_CHAN_BITMAP_MASK) \ + >> WL_AVAIL_ENTRY_CHAN_BITMAP_SHIFT) + +/* Used for raw channel entry field input */ +#define MAX_CHAN_ENTRY_LEN 6 + +typedef struct wl_avail_entry { + uint16 length; /* total length */ + uint16 start_offset; /* in TUs, multiply by 16 for total offset */ + union { + uint32 channel_info; /* either chanspec or hex channel entry (opclass + + * bitmap per NAN spec), as indicated by setting + * WL_AVAIL_ENTRY_HEX_CHAN_ENTRY flag + */ + uint32 band; /* defined by WL_BAND enum, 2=2.4GHz, 4=5GHz */ + uint8 channel_entry[MAX_CHAN_ENTRY_LEN]; + } u; /* band or channel value, 0=all band/channels */ + uint8 pad[2]; + uint8 period; /* in TUs, defined by WL_AVAIL_PERIOD enum + * 1:128, 2:256, 3:512, 4:1024, 5:2048, 6:4096, + * 7:8192 + */ + uint8 bitmap_len; + uint16 flags; /* defined by avail entry flags enum: + * type, usage pref, bit duration, band, channel + */ + uint8 bitmap[]; /* time bitmap */ +} wl_avail_entry_t; + +typedef struct wl_avail { + uint16 length; /* total length */ + uint16 flags; /* defined by WL_AVAIL enum + * 1=local, 2=peer, 3=ndc, 4=immutable, + * 5=response, 6=counter + */ + uint8 id; /* id used for multiple maps/avail */ + uint8 pad[3]; + struct ether_addr addr; /* peer mac address or ndc id */ + uint8 num_entries; + uint8 entry_offset; + /* add additional fields above this line */ + uint8 entry[]; +} wl_avail_t; + +#define WL_AVAIL_MIN_LEN(n) ((n) ? OFFSETOF(wl_avail_t, entry) + \ + ((n) * OFFSETOF(wl_avail_entry_t, bitmap)) : 0) + +/* unaligned schedule (window) */ +typedef struct wl_avail_ulw { + uint8 id; /* schedule ID */ + uint8 overwrite; /* bit 0: overwrite all + * 1-4: map ID if overwrite all is 0 + */ + uint16 flags; + uint32 start; /* start time of first ULW, in us */ + uint32 dur; /* duration of ULW, in us */ + uint32 period; /* time between consecutive ULWs, in us */ + union { + uint32 chanspec; + uint32 band; + uint8 chan_entry[MAX_CHAN_ENTRY_LEN]; + uint8 pad[8]; + } u; + uint8 cntdwn; /* remaining ULWs before schedule ends */ + uint8 pad[3]; +} wl_avail_ulw_t; + +/* unset: NAN is not available during ULW, set: NAN is avail depending on ctrl flags */ +#define WL_NAN_ULW_CTRL_PRESENT (1 << 0) +/* unset: band, set: channel */ +#define WL_NAN_ULW_CTRL_TYPE (1 << 1) +/* set: NAN is availabile on specified band/channel */ +#define WL_NAN_ULW_CTRL_AVAIL (1 << 2) +/* channel is provided in raw attribute format */ +#define WL_NAN_ULW_CTRL_RAW_CHAN (1 << 3) + +/* nan wfa testmode operations */ +enum { + WL_NAN_WFA_TM_IGNORE_TERMINATE_NAF = 0x00000001, + WL_NAN_WFA_TM_IGNORE_RX_DATA_OUTSIDE_CRB = 0x00000002, + WL_NAN_WFA_TM_ALLOW_TX_DATA_OUTSIDE_CRB = 0x00000004, + WL_NAN_WFA_TM_ENFORCE_NDL_COUNTER = 0x00000008, + WL_NAN_WFA_TM_BYPASS_NDL_PROPOSAL_VALIDATION = 0x00000010, + /* allow data(pings) tx while ndp sec negotiation */ + WL_NAN_WFA_TM_SEC_SEND_PINGS_BYPASS_NDP_SM = 0x00000020, + /* generate and insert incorrect mic */ + WL_NAN_WFA_TM_SEC_INCORRECT_MIC = 0x00000040, + /* send m4 reject deliberately */ + WL_NAN_WFA_TM_SEC_REJECT_STATUS4M4 = 0x00000080, + /* send mgmt frame (for eg. ndp terminate) in clear txt (bypass security) */ + WL_NAN_WFA_TM_SEC_SEND_MGMT_CLEAR = 0x00000100, + WL_NAN_WFA_TM_FLAG_MASK = 0x000001ff /* add above & update mask */ +}; +typedef uint32 wl_nan_wfa_testmode_t; #define RSSI_THRESHOLD_SIZE 16 #define MAX_IMP_RESP_SIZE 256 -typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias { - int32 version; /* version */ - int32 threshold[RSSI_THRESHOLD_SIZE]; /* threshold */ - int32 peak_offset; /* peak offset */ - int32 bias; /* rssi bias */ - int32 gd_delta; /* GD - GD_ADJ */ - int32 imp_resp[MAX_IMP_RESP_SIZE]; /* (Hi*Hi)+(Hr*Hr) */ -} BWL_POST_PACKED_STRUCT wl_proxd_rssi_bias_t; +typedef struct wl_proxd_rssi_bias { + int32 version; /**< version */ + int32 threshold[RSSI_THRESHOLD_SIZE]; /**< threshold */ + int32 peak_offset; /**< peak offset */ + int32 bias; /**< rssi bias */ + int32 gd_delta; /**< GD - GD_ADJ */ + int32 imp_resp[MAX_IMP_RESP_SIZE]; /**< (Hi*Hi)+(Hr*Hr) */ +} wl_proxd_rssi_bias_t; -typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias_avg { - int32 avg_threshold[RSSI_THRESHOLD_SIZE]; /* avg threshold */ - int32 avg_peak_offset; /* avg peak offset */ - int32 avg_rssi; /* avg rssi */ - int32 avg_bias; /* avg bias */ -} BWL_POST_PACKED_STRUCT wl_proxd_rssi_bias_avg_t; +typedef struct wl_proxd_rssi_bias_avg { + int32 avg_threshold[RSSI_THRESHOLD_SIZE]; /**< avg threshold */ + int32 avg_peak_offset; /**< avg peak offset */ + int32 avg_rssi; /**< avg rssi */ + int32 avg_bias; /**< avg bias */ +} wl_proxd_rssi_bias_avg_t; +#include typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_info { - uint16 type; /* type: 0 channel table, 1 channel smoothing table, 2 and 3 seq */ - uint16 index; /* The current frame index, from 1 to total_frames. */ - uint16 tof_cmd; /* M_TOF_CMD */ - uint16 tof_rsp; /* M_TOF_RSP */ - uint16 tof_avb_rxl; /* M_TOF_AVB_RX_L */ - uint16 tof_avb_rxh; /* M_TOF_AVB_RX_H */ - uint16 tof_avb_txl; /* M_TOF_AVB_TX_L */ - uint16 tof_avb_txh; /* M_TOF_AVB_TX_H */ - uint16 tof_id; /* M_TOF_ID */ + uint16 type; /**< type: 0 channel table, 1 channel smoothing table, 2 and 3 seq */ + uint16 index; /**< The current frame index, from 1 to total_frames. */ + uint16 tof_cmd; /**< M_TOF_CMD */ + uint16 tof_rsp; /**< M_TOF_RSP */ + uint16 tof_avb_rxl; /**< M_TOF_AVB_RX_L */ + uint16 tof_avb_rxh; /**< M_TOF_AVB_RX_H */ + uint16 tof_avb_txl; /**< M_TOF_AVB_TX_L */ + uint16 tof_avb_txh; /**< M_TOF_AVB_TX_H */ + uint16 tof_id; /**< M_TOF_ID */ uint8 tof_frame_type; uint8 tof_frame_bw; int8 tof_rssi; int32 tof_cfo; - int32 gd_adj_ns; /* gound delay */ - int32 gd_h_adj_ns; /* group delay + threshold crossing */ -#ifdef RSSI_REFINE - wl_proxd_rssi_bias_t rssi_bias; /* RSSI refinement info */ -#endif - int16 nfft; /* number of samples stored in H */ + int32 gd_adj_ns; /**< gound delay */ + int32 gd_h_adj_ns; /**< group delay + threshold crossing */ + int16 nfft; /**< number of samples stored in H */ + uint8 num_max_cores; } BWL_POST_PACKED_STRUCT wl_proxd_collect_info_t; +#include -#define k_tof_collect_H_pad 1 -#define k_tof_collect_H_size (256+16+k_tof_collect_H_pad) -#define k_tof_collect_Hraw_size (2*k_tof_collect_H_size) -typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data { +#define K_TOF_COLLECT_H_PAD 1 +#define K_TOF_COLLECT_SC_20MHZ (64) +/* Maximum possible size of sample capture */ +#define K_TOF_COLLECT_SC_80MHZ (2*K_TOF_COLLECT_SC_20MHZ) +/* Maximum possible size of channel dump */ +#define K_TOF_COLLECT_CHAN_SIZE (2*K_TOF_COLLECT_SC_80MHZ) + +/* +A few extra samples are required to estimate frequency offset +Right now 16 samples are being used. Can be changed in future. +*/ +#define K_TOF_COLLECT_SAMP_SIZE_20MHZ (2*(K_TOF_COLLECT_SC_20MHZ)+16+K_TOF_COLLECT_H_PAD) +#define K_TOF_COLLECT_RAW_SAMP_SIZE_20MHZ (2*K_TOF_COLLECT_SAMP_SIZE_20MHZ) +#define K_TOF_COLLECT_H_SIZE_20MHZ (K_TOF_COLLECT_SAMP_SIZE_20MHZ) +#define K_TOF_COLLECT_HRAW_SIZE_20MHZ (K_TOF_COLLECT_RAW_SAMP_SIZE_20MHZ) + +#define K_TOF_COLLECT_SAMP_SIZE_80MHZ (2*(K_TOF_COLLECT_SC_80MHZ)+16+K_TOF_COLLECT_H_PAD) +#define K_TOF_COLLECT_RAW_SAMP_SIZE_80MHZ (2*K_TOF_COLLECT_SAMP_SIZE_80MHZ) +#define K_TOF_COLLECT_H_SIZE_80MHZ (K_TOF_COLLECT_SAMP_SIZE_80MHZ) +#define K_TOF_COLLECT_HRAW_SIZE_80MHZ (K_TOF_COLLECT_RAW_SAMP_SIZE_80MHZ) + +#define WL_PROXD_COLLECT_DATA_VERSION_1 1 +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data_v1 { wl_proxd_collect_info_t info; - uint32 H[k_tof_collect_H_size]; /* raw data read from phy used to adjust timestamps */ + uint8 ri_rr[FTM_TPK_RI_RR_LEN]; + /**< raw data read from phy used to adjust timestamps */ + uint32 H[K_TOF_COLLECT_H_SIZE_20MHZ]; +} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t_v1; +#include -} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t; +#define WL_PROXD_COLLECT_DATA_VERSION_2 2 +typedef struct wl_proxd_collect_data_v2 { + uint16 version; + uint16 len; + wl_proxd_collect_info_t info; + uint8 ri_rr[FTM_TPK_RI_RR_LEN]; + uint8 pad[3]; /* should be based on FTM_TPK_RI_RR_LEN */ + /**< raw data read from phy used to adjust timestamps */ + uint32 H[K_TOF_COLLECT_H_SIZE_20MHZ]; + uint32 chan[4 * K_TOF_COLLECT_CHAN_SIZE]; +} wl_proxd_collect_data_t_v2; +#define WL_PROXD_COLLECT_DATA_VERSION_MAX WL_PROXD_COLLECT_DATA_VERSION_2 -typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_debug_data { - uint8 count; /* number of packets */ - uint8 stage; /* state machone stage */ - uint8 received; /* received or txed */ - uint8 paket_type; /* packet type */ - uint8 category; /* category field */ - uint8 action; /* action field */ - uint8 token; /* token number */ - uint8 follow_token; /* following token number */ - uint16 index; /* index of the packet */ - uint16 tof_cmd; /* M_TOF_CMD */ - uint16 tof_rsp; /* M_TOF_RSP */ - uint16 tof_avb_rxl; /* M_TOF_AVB_RX_L */ - uint16 tof_avb_rxh; /* M_TOF_AVB_RX_H */ - uint16 tof_avb_txl; /* M_TOF_AVB_TX_L */ - uint16 tof_avb_txh; /* M_TOF_AVB_TX_H */ - uint16 tof_id; /* M_TOF_ID */ - uint16 tof_status0; /* M_TOF_STATUS_0 */ - uint16 tof_status2; /* M_TOF_STATUS_2 */ - uint16 tof_chsm0; /* M_TOF_CHNSM_0 */ - uint16 tof_phyctl0; /* M_TOF_PHYCTL0 */ - uint16 tof_phyctl1; /* M_TOF_PHYCTL1 */ - uint16 tof_phyctl2; /* M_TOF_PHYCTL2 */ - uint16 tof_lsig; /* M_TOF_LSIG */ - uint16 tof_vhta0; /* M_TOF_VHTA0 */ - uint16 tof_vhta1; /* M_TOF_VHTA1 */ - uint16 tof_vhta2; /* M_TOF_VHTA2 */ - uint16 tof_vhtb0; /* M_TOF_VHTB0 */ - uint16 tof_vhtb1; /* M_TOF_VHTB1 */ - uint16 tof_apmductl; /* M_TOF_AMPDU_CTL */ - uint16 tof_apmdudlim; /* M_TOF_AMPDU_DLIM */ - uint16 tof_apmdulen; /* M_TOF_AMPDU_LEN */ -} BWL_POST_PACKED_STRUCT wl_proxd_debug_data_t; +typedef struct wl_proxd_debug_data { + uint8 count; /**< number of packets */ + uint8 stage; /**< state machone stage */ + uint8 received; /**< received or txed */ + uint8 paket_type; /**< packet type */ + uint8 category; /**< category field */ + uint8 action; /**< action field */ + uint8 token; /**< token number */ + uint8 follow_token; /**< following token number */ + uint16 index; /**< index of the packet */ + uint16 tof_cmd; /**< M_TOF_CMD */ + uint16 tof_rsp; /**< M_TOF_RSP */ + uint16 tof_avb_rxl; /**< M_TOF_AVB_RX_L */ + uint16 tof_avb_rxh; /**< M_TOF_AVB_RX_H */ + uint16 tof_avb_txl; /**< M_TOF_AVB_TX_L */ + uint16 tof_avb_txh; /**< M_TOF_AVB_TX_H */ + uint16 tof_id; /**< M_TOF_ID */ + uint16 tof_status0; /**< M_TOF_STATUS_0 */ + uint16 tof_status2; /**< M_TOF_STATUS_2 */ + uint16 tof_chsm0; /**< M_TOF_CHNSM_0 */ + uint16 tof_phyctl0; /**< M_TOF_PHYCTL0 */ + uint16 tof_phyctl1; /**< M_TOF_PHYCTL1 */ + uint16 tof_phyctl2; /**< M_TOF_PHYCTL2 */ + uint16 tof_lsig; /**< M_TOF_LSIG */ + uint16 tof_vhta0; /**< M_TOF_VHTA0 */ + uint16 tof_vhta1; /**< M_TOF_VHTA1 */ + uint16 tof_vhta2; /**< M_TOF_VHTA2 */ + uint16 tof_vhtb0; /**< M_TOF_VHTB0 */ + uint16 tof_vhtb1; /**< M_TOF_VHTB1 */ + uint16 tof_apmductl; /**< M_TOF_AMPDU_CTL */ + uint16 tof_apmdudlim; /**< M_TOF_AMPDU_DLIM */ + uint16 tof_apmdulen; /**< M_TOF_AMPDU_LEN */ +} wl_proxd_debug_data_t; -/* version of the wl_wsec_info structure */ +/** version of the wl_wsec_info structure */ #define WL_WSEC_INFO_VERSION 0x01 -/* start enum value for BSS properties */ +/** start enum value for BSS properties */ #define WL_WSEC_INFO_BSS_BASE 0x0100 -/* size of len and type fields of wl_wsec_info_tlv_t struct */ +/** size of len and type fields of wl_wsec_info_tlv_t struct */ #define WL_WSEC_INFO_TLV_HDR_LEN OFFSETOF(wl_wsec_info_tlv_t, data) -/* Allowed wl_wsec_info properties; not all of them may be supported. */ +/** Allowed wl_wsec_info properties; not all of them may be supported. */ typedef enum { WL_WSEC_INFO_NONE = 0, WL_WSEC_INFO_MAX_KEYS = 1, @@ -6504,68 +9690,336 @@ typedef enum { WL_WSEC_INFO_BSS_TX_KEY_ID = (WL_WSEC_INFO_BSS_BASE + 3), WL_WSEC_INFO_BSS_ALGO = (WL_WSEC_INFO_BSS_BASE + 4), WL_WSEC_INFO_BSS_KEY_LEN = (WL_WSEC_INFO_BSS_BASE + 5), + WL_WSEC_INFO_BSS_ALGOS = (WL_WSEC_INFO_BSS_BASE + 6), /* add per-BSS properties above */ WL_WSEC_INFO_MAX = 0xffff } wl_wsec_info_type_t; -/* tlv used to return wl_wsec_info properties */ +typedef struct { + uint32 algos; /* set algos to be enabled/disabled */ + uint32 mask; /* algos outside mask unaltered */ +} wl_wsec_info_algos_t; + +/** tlv used to return wl_wsec_info properties */ typedef struct { uint16 type; - uint16 len; /* data length */ - uint8 data[1]; /* data follows */ + uint16 len; /**< data length */ + uint8 data[1]; /**< data follows */ } wl_wsec_info_tlv_t; -/* input/output data type for wsec_info iovar */ +/** input/output data type for wsec_info iovar */ typedef struct wl_wsec_info { - uint8 version; /* structure version */ + uint8 version; /**< structure version */ uint8 pad[2]; uint8 num_tlvs; - wl_wsec_info_tlv_t tlvs[1]; /* tlv data follows */ + wl_wsec_info_tlv_t tlvs[1]; /**< tlv data follows */ } wl_wsec_info_t; +/* + * randmac definitions + */ +#define WL_RANDMAC_MODULE "randmac" +#define WL_RANDMAC_API_VERSION 0x0100 /**< version 1.0 */ +#define WL_RANDMAC_API_MIN_VERSION 0x0100 /**< version 1.0 */ + +/** subcommands that can apply to randmac */ +enum { + WL_RANDMAC_SUBCMD_NONE = 0, + WL_RANDMAC_SUBCMD_GET_VERSION = 1, + WL_RANDMAC_SUBCMD_ENABLE = 2, + WL_RANDMAC_SUBCMD_DISABLE = 3, + WL_RANDMAC_SUBCMD_CONFIG = 4, + WL_RANDMAC_SUBCMD_STATS = 5, + WL_RANDMAC_SUBCMD_CLEAR_STATS = 6, + + WL_RANDMAC_SUBCMD_MAX +}; +typedef int16 wl_randmac_subcmd_t; + +/* Common IOVAR struct */ +typedef struct wl_randmac { + uint16 version; + uint16 len; /* total length */ + wl_randmac_subcmd_t subcmd_id; /* subcommand id */ + uint8 data[0]; /* subcommand data */ +} wl_randmac_t; + +#define WL_RANDMAC_IOV_HDR_SIZE OFFSETOF(wl_randmac_t, data) + +/* randmac version subcommand */ +typedef struct wl_randmac_version { + uint16 version; /* Randmac method version info */ + uint8 pad[2]; /* Align on 4 byte boundary */ +} wl_randmac_version_t; + +/* + * Bitmask for methods supporting MAC randomization feature + */ +#define WL_RANDMAC_USER_NONE 0x0000 +#define WL_RANDMAC_USER_FTM 0x0001 +#define WL_RANDMAC_USER_NAN 0x0002 +#define WL_RANDMAC_USER_SCAN 0x0004 +#define WL_RANDMAC_USER_ALL 0xFFFF +typedef uint16 wl_randmac_method_t; + +enum { + WL_RANDMAC_FLAGS_NONE = 0x00, + WL_RANDMAC_FLAGS_ADDR = 0x01, + WL_RANDMAC_FLAGS_MASK = 0x02, + WL_RANDMAC_FLAGS_METHOD = 0x04, + WL_RANDMAC_FLAGS_ALL = 0xFF +}; +typedef uint8 wl_randmac_flags_t; + +/* randmac statistics subcommand */ +typedef struct wl_randmac_stats { + uint32 set_ok; /* Set random addr success count */ + uint32 set_fail; /* Set random addr failed count */ + uint32 set_reqs; /* Set random addr count */ + uint32 reset_reqs; /* Restore random addr count */ + uint32 restore_ok; /* Restore random addr succes count */ + uint32 restore_fail; /* Restore random addr failed count */ + uint32 events_sent; /* randmac module events count */ + uint32 events_rcvd; /* randmac events received count */ +} wl_randmac_stats_t; + +/* randmac config subcommand */ +typedef struct wl_randmac_config { + struct ether_addr addr; /* Randomized MAC address */ + struct ether_addr addr_mask; /* bitmask for randomization */ + wl_randmac_method_t method; /* Enabled methods */ + wl_randmac_flags_t flags; /* What config info changed */ + uint8 PAD; +} wl_randmac_config_t; + +enum { + WL_RANDMAC_EVENT_NONE = 0, /**< not an event, reserved */ + WL_RANDMAC_EVENT_BSSCFG_ADDR_SET = 1, /* bsscfg addr randomized */ + WL_RANDMAC_EVENT_BSSCFG_ADDR_RESTORE = 2, /* bsscfg addr restored */ + WL_RANDMAC_EVENT_ENABLED = 3, /* randmac module enabled */ + WL_RANDMAC_EVENT_DISABLE = 4, /* randmac module disabled */ + WL_RANDMAC_EVENT_BSSCFG_STATUS = 5, /* bsscfg enable/disable */ + + WL_RANDMAC_EVENT_MAX +}; +typedef int16 wl_randmac_event_type_t; +typedef int32 wl_randmac_status_t; +typedef uint32 wl_randmac_event_mask_t; + +#define WL_RANDMAC_EVENT_MASK_ALL 0xfffffffe +#define WL_RANDMAC_EVENT_MASK_EVENT(_event_type) (1 << (_event_type)) +#define WL_RANDMAC_EVENT_ENABLED(_mask, _event_type) (\ + ((_mask) & WL_RANDMAC_EVENT_MASK_EVENT(_event_type)) != 0) + +/** tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */ +enum { + WL_RANDMAC_TLV_NONE = 0, + WL_RANDMAC_TLV_METHOD = 1, + WL_RANDMAC_TLV_ADDR = 2, + WL_RANDMAC_TLV_MASK = 3 +}; +typedef uint16 wl_randmac_tlv_id_t; + +typedef struct wl_randmac_tlv { + wl_randmac_tlv_id_t id; + uint16 len; /* Length of variable */ + uint8 data[1]; +} wl_randmac_tlv_t; + +/** randmac event */ +typedef struct wl_randmac_event { + uint16 version; + uint16 len; /* Length of all variables */ + wl_randmac_event_type_t type; + wl_randmac_method_t method; + uint8 pad[2]; + wl_randmac_tlv_t tlvs[1]; /**< variable */ +} wl_randmac_event_t; + /* * scan MAC definitions */ -/* common iovar struct */ +/** common iovar struct */ typedef struct wl_scanmac { - uint16 subcmd_id; /* subcommand id */ - uint16 len; /* total length of data[] */ - uint8 data[1]; /* subcommand data */ + uint16 subcmd_id; /**< subcommand id */ + uint16 len; /**< total length of data[] */ + uint8 data[]; /**< subcommand data */ } wl_scanmac_t; /* subcommand ids */ #define WL_SCANMAC_SUBCMD_ENABLE 0 -#define WL_SCANMAC_SUBCMD_BSSCFG 1 /* only GET supported */ +#define WL_SCANMAC_SUBCMD_BSSCFG 1 /**< only GET supported */ #define WL_SCANMAC_SUBCMD_CONFIG 2 -/* scanmac enable data struct */ +/** scanmac enable data struct */ typedef struct wl_scanmac_enable { - uint8 enable; /* 1 - enable, 0 - disable */ - uint8 pad[3]; /* 4-byte struct alignment */ + uint8 enable; /**< 1 - enable, 0 - disable */ + uint8 pad[3]; /**< 4-byte struct alignment */ } wl_scanmac_enable_t; -/* scanmac bsscfg data struct */ +/** scanmac bsscfg data struct */ typedef struct wl_scanmac_bsscfg { - uint32 bsscfg; /* bsscfg index */ + uint32 bsscfg; /**< bsscfg index */ } wl_scanmac_bsscfg_t; -/* scanmac config data struct */ +/** scanmac config data struct */ typedef struct wl_scanmac_config { - struct ether_addr mac; /* 6 bytes of MAC address or MAC prefix (i.e. OUI) */ - struct ether_addr random_mask; /* randomized bits on each scan */ - uint16 scan_bitmap; /* scans to use this MAC address */ - uint8 pad[2]; /* 4-byte struct alignment */ + struct ether_addr mac; /**< 6 bytes of MAC address or MAC prefix (i.e. OUI) */ + struct ether_addr random_mask; /**< randomized bits on each scan */ + uint16 scan_bitmap; /**< scans to use this MAC address */ + uint8 pad[2]; /**< 4-byte struct alignment */ } wl_scanmac_config_t; /* scan bitmap */ -#define WL_SCANMAC_SCAN_UNASSOC (0x01 << 0) /* unassociated scans */ -#define WL_SCANMAC_SCAN_ASSOC_ROAM (0x01 << 1) /* associated roam scans */ -#define WL_SCANMAC_SCAN_ASSOC_PNO (0x01 << 2) /* associated PNO scans */ -#define WL_SCANMAC_SCAN_ASSOC_HOST (0x01 << 3) /* associated host scans */ +#define WL_SCANMAC_SCAN_UNASSOC (0x01 << 0) /**< unassociated scans */ +#define WL_SCANMAC_SCAN_ASSOC_ROAM (0x01 << 1) /**< associated roam scans */ +#define WL_SCANMAC_SCAN_ASSOC_PNO (0x01 << 2) /**< associated PNO scans */ +#define WL_SCANMAC_SCAN_ASSOC_HOST (0x01 << 3) /**< associated host scans */ +/* + * bonjour dongle offload definitions + */ -/* no default structure packing */ -#include +/* common iovar struct */ +typedef struct wl_bdo { + uint16 subcmd_id; /* subcommand id */ + uint16 len; /* total length of data[] */ + uint8 data[]; /* subcommand data */ +} wl_bdo_t; + +/* subcommand ids */ +#define WL_BDO_SUBCMD_DOWNLOAD 0 /* Download flattened database */ +#define WL_BDO_SUBCMD_ENABLE 1 /* Start bonjour after download */ +#define WL_BDO_SUBCMD_MAX_DOWNLOAD 2 /* Get the max download size */ + +/* maximum fragment size */ +#define BDO_MAX_FRAGMENT_SIZE 1024 + +/* download flattened database + * + * BDO must be disabled before database download else fail. + * + * If database size is within BDO_MAX_FRAGMENT_SIZE then only a single fragment + * is required (i.e. frag_num = 0, total_size = frag_size). + * If database size exceeds BDO_MAX_FRAGMENT_SIZE then multiple fragments are required. + */ +typedef struct wl_bdo_download { + uint16 total_size; /* total database size */ + uint16 frag_num; /* fragment number, 0 for first fragment, N-1 for last fragment */ + uint16 frag_size; /* size of fragment (max BDO_MAX_FRAGMENT_SIZE) */ + uint8 pad[2]; /* 4-byte struct alignment */ + uint8 fragment[BDO_MAX_FRAGMENT_SIZE]; /* fragment data */ +} wl_bdo_download_t; + +/* enable + * + * Enable requires a downloaded database else fail. + */ +typedef struct wl_bdo_enable { + uint8 enable; /* 1 - enable, 0 - disable */ + uint8 pad[3]; /* 4-byte struct alignment */ +} wl_bdo_enable_t; + +/* + * Get the max download size for Bonjour Offload. + */ +typedef struct wl_bdo_max_download { + uint16 size; /* Max download size in bytes */ + uint8 pad[2]; /* 4-byte struct alignment */ +} wl_bdo_max_download_t; + +/* + * TCP keepalive offload definitions + */ + +/* common iovar struct */ +typedef struct wl_tko { + uint16 subcmd_id; /* subcommand id */ + uint16 len; /* total length of data[] */ + uint8 data[]; /* subcommand data */ +} wl_tko_t; + +/* subcommand ids */ +#define WL_TKO_SUBCMD_MAX_TCP 0 /* max TCP connections supported */ +#define WL_TKO_SUBCMD_PARAM 1 /* configure offload common parameters */ +#define WL_TKO_SUBCMD_CONNECT 2 /* TCP connection info */ +#define WL_TKO_SUBCMD_ENABLE 3 /* enable/disable */ +#define WL_TKO_SUBCMD_STATUS 4 /* TCP connection status */ + +/* WL_TKO_SUBCMD_MAX_CONNECT subcommand data */ +typedef struct wl_tko_max_tcp { + uint8 max; /* max TCP connections supported */ + uint8 pad[3]; /* 4-byte struct alignment */ +} wl_tko_max_tcp_t; + +/* WL_TKO_SUBCMD_PARAM subcommand data */ +typedef struct wl_tko_param { + uint16 interval; /* keepalive tx interval (secs) */ + uint16 retry_interval; /* keepalive retry interval (secs) */ + uint16 retry_count; /* retry_count */ + uint8 pad[2]; /* 4-byte struct alignment */ +} wl_tko_param_t; + +/* WL_TKO_SUBCMD_CONNECT subcommand data + * invoke with unique 'index' for each TCP connection + */ +typedef struct wl_tko_connect { + uint8 index; /* TCP connection index, 0 to max-1 */ + uint8 ip_addr_type; /* 0 - IPv4, 1 - IPv6 */ + uint16 local_port; /* local port */ + uint16 remote_port; /* remote port */ + uint16 PAD; + uint32 local_seq; /* local sequence number */ + uint32 remote_seq; /* remote sequence number */ + uint16 request_len; /* TCP keepalive request packet length */ + uint16 response_len; /* TCP keepalive response packet length */ + uint8 data[]; /* variable length field containing local/remote IPv4/IPv6, + * TCP keepalive request packet, TCP keepalive response packet + * For IPv4, length is 4 * 2 + request_length + response_length + * offset 0 - local IPv4 + * offset 4 - remote IPv4 + * offset 8 - TCP keepalive request packet + * offset 8+request_length - TCP keepalive response packet + * For IPv6, length is 16 * 2 + request_length + response_length + * offset 0 - local IPv6 + * offset 16 - remote IPv6 + * offset 32 - TCP keepalive request packet + * offset 32+request_length - TCP keepalive response packet + */ +} wl_tko_connect_t; + +/* WL_TKO_SUBCMD_CONNECT subcommand data to GET configured info for specific index */ +typedef struct wl_tko_get_connect { + uint8 index; /* TCP connection index, 0 to max-1 */ + uint8 pad[3]; /* 4-byte struct alignment */ +} wl_tko_get_connect_t; + +typedef struct wl_tko_enable { + uint8 enable; /* 1 - enable, 0 - disable */ + uint8 pad[3]; /* 4-byte struct alignment */ +} wl_tko_enable_t; + +/* WL_TKO_SUBCMD_STATUS subcommand data */ +/* must be invoked before tko is disabled else status is unavailable */ +typedef struct wl_tko_status { + uint8 count; /* number of status entries (i.e. equals + * max TCP connections supported) + */ + uint8 status[1]; /* variable length field contain status for + * each TCP connection index + */ +} wl_tko_status_t; + +typedef enum { + TKO_STATUS_NORMAL = 0, /* TCP connection normal, no error */ + TKO_STATUS_NO_RESPONSE = 1, /* no response to TCP keepalive */ + TKO_STATUS_NO_TCP_ACK_FLAG = 2, /* TCP ACK flag not set */ + TKO_STATUS_UNEXPECT_TCP_FLAG = 3, /* unexpect TCP flags set other than ACK */ + TKO_STATUS_SEQ_NUM_INVALID = 4, /* ACK != sequence number */ + TKO_STATUS_REMOTE_SEQ_NUM_INVALID = 5, /* SEQ > remote sequence number */ + TKO_STATUS_TCP_DATA = 6, /* TCP data available */ + TKO_STATUS_UNAVAILABLE = 255, /* not used/configured */ +} tko_status_t; enum rssi_reason { RSSI_REASON_UNKNOW = 0, @@ -6625,7 +10079,7 @@ enum tof_rate_type { TOF_FRAME_RATE_LEGACY = 1 }; -#define TOF_ADJ_TYPE_NUM 4 /* number of assisted timestamp adjustment */ +#define TOF_ADJ_TYPE_NUM 4 /**< number of assisted timestamp adjustment */ enum tof_adj_mode { TOF_ADJ_SOFTWARE = 0, TOF_ADJ_HARDWARE = 1, @@ -6633,7 +10087,7 @@ enum tof_adj_mode { TOF_ADJ_NONE = 3 }; -#define FRAME_TYPE_NUM 4 /* number of frame type */ +#define FRAME_TYPE_NUM 4 /**< number of frame type */ enum frame_type { FRAME_TYPE_CCK = 0, FRAME_TYPE_OFDM = 1, @@ -6642,28 +10096,30 @@ enum frame_type { }; typedef struct wl_proxd_status_iovar { - uint16 method; /* method */ - uint8 mode; /* mode */ - uint8 peermode; /* peer mode */ - uint8 state; /* state */ - uint8 reason; /* reason code */ - uint32 distance; /* distance */ - uint32 txcnt; /* tx pkt counter */ - uint32 rxcnt; /* rx pkt counter */ - struct ether_addr peer; /* peer mac address */ - int8 avg_rssi; /* average rssi */ - int8 hi_rssi; /* highest rssi */ - int8 low_rssi; /* lowest rssi */ - uint32 dbgstatus; /* debug status */ - uint16 frame_type_cnt[FRAME_TYPE_NUM]; /* frame types */ - uint8 adj_type_cnt[TOF_ADJ_TYPE_NUM]; /* adj types HW/SW */ + uint16 method; /**< method */ + uint8 mode; /**< mode */ + uint8 peermode; /**< peer mode */ + uint8 state; /**< state */ + uint8 reason; /**< reason code */ + uint8 PAD[2]; + uint32 distance; /**< distance */ + uint32 txcnt; /**< tx pkt counter */ + uint32 rxcnt; /**< rx pkt counter */ + struct ether_addr peer; /**< peer mac address */ + int8 avg_rssi; /**< average rssi */ + int8 hi_rssi; /**< highest rssi */ + int8 low_rssi; /**< lowest rssi */ + uint8 PAD[3]; + uint32 dbgstatus; /**< debug status */ + uint16 frame_type_cnt[FRAME_TYPE_NUM]; /**< frame types */ + uint8 adj_type_cnt[TOF_ADJ_TYPE_NUM]; /**< adj types HW/SW */ } wl_proxd_status_iovar_t; -#ifdef NET_DETECT +/* ifdef NET_DETECT */ typedef struct net_detect_adapter_features { - bool wowl_enabled; - bool net_detect_enabled; - bool nlo_enabled; + uint8 wowl_enabled; + uint8 net_detect_enabled; + uint8 nlo_enabled; } net_detect_adapter_features_t; typedef enum net_detect_bss_type { @@ -6674,22 +10130,24 @@ typedef enum net_detect_bss_type { typedef struct net_detect_profile { wlc_ssid_t ssid; - net_detect_bss_type_t bss_type; /* Ignore for now since Phase 1 is only for ESS */ - uint32 cipher_type; /* DOT11_CIPHER_ALGORITHM enumeration values */ - uint32 auth_type; /* DOT11_AUTH_ALGORITHM enumeration values */ + net_detect_bss_type_t bss_type; /**< Ignore for now since Phase 1 is only for ESS */ + uint32 cipher_type; /**< DOT11_CIPHER_ALGORITHM enumeration values */ + uint32 auth_type; /**< DOT11_AUTH_ALGORITHM enumeration values */ } net_detect_profile_t; typedef struct net_detect_profile_list { uint32 num_nd_profiles; - net_detect_profile_t nd_profile[0]; + net_detect_profile_t nd_profile[]; } net_detect_profile_list_t; typedef struct net_detect_config { - bool nd_enabled; + uint8 nd_enabled; + uint8 PAD[3]; uint32 scan_interval; uint32 wait_period; - bool wake_if_connected; - bool wake_if_disconnected; + uint8 wake_if_connected; + uint8 wake_if_disconnected; + uint8 PAD[2]; net_detect_profile_list_t nd_profile_list; } net_detect_config_t; @@ -6703,20 +10161,22 @@ typedef enum net_detect_wake_reason { typedef struct net_detect_wake_data { net_detect_wake_reason_t nd_wake_reason; uint32 nd_wake_date_length; - uint8 nd_wake_data[0]; /* Wake data (currently unused) */ + uint8 nd_wake_data[0]; /**< Wake data (currently unused) */ } net_detect_wake_data_t; -#endif /* NET_DETECT */ +/* endif NET_DETECT */ /* (unversioned, deprecated) */ typedef struct bcnreq { uint8 bcn_mode; - int dur; - int channel; + uint8 PAD[3]; + int32 dur; + int32 channel; struct ether_addr da; uint16 random_int; wlc_ssid_t ssid; uint16 reps; + uint8 PAD[2]; } bcnreq_t; #define WL_RRM_BCN_REQ_VER 1 @@ -6760,9 +10220,105 @@ typedef struct statreq { uint16 random_int; uint16 dur; uint8 group_id; + uint8 PAD; uint16 reps; } statreq_t; +typedef struct wl_rrm_config_ioc { + uint16 version; /* command version */ + uint16 id; /* subiovar cmd ID */ + uint16 len; /* total length of all bytes in data[] */ + uint16 pad; /* 4-byte boundary padding */ + uint8 data[1]; /* payload */ +} wl_rrm_config_ioc_t; + +enum { + WL_RRM_CONFIG_NONE = 0, /* reserved */ + WL_RRM_CONFIG_GET_LCI = 1, /* get LCI */ + WL_RRM_CONFIG_SET_LCI = 2, /* set LCI */ + WL_RRM_CONFIG_GET_CIVIC = 3, /* get civic location */ + WL_RRM_CONFIG_SET_CIVIC = 4, /* set civic location */ + WL_RRM_CONFIG_MAX = 5 +}; + +#define WL_RRM_CONFIG_NAME "rrm_config" +#define WL_RRM_CONFIG_MIN_LENGTH OFFSETOF(wl_rrm_config_ioc_t, data) + +enum { + WL_RRM_EVENT_NONE = 0, /* not an event, reserved */ + WL_RRM_EVENT_FRNG_REQ = 1, /* Receipt of FRNG request frame */ + WL_RRM_EVENT_FRNG_REP = 2, /* Receipt of FRNG report frame */ + + WL_RRM_EVENT_MAX +}; +typedef int16 wl_rrm_event_type_t; + +typedef struct frngreq_target { + uint32 bssid_info; + uint8 channel; + uint8 phytype; + uint8 reg; + uint8 pad; + struct ether_addr bssid; + chanspec_t chanspec; + uint32 sid; +} frngreq_target_t; + +typedef struct frngreq { + wl_rrm_event_type_t event; /* RRM event type */ + struct ether_addr da; + uint16 max_init_delay; /* Upper bound of random delay, in TUs */ + uint8 min_ap_count; /* Min FTM ranges requested (1-15) */ + uint8 num_aps; /* Number of APs to range, at least min_ap_count */ + uint16 max_age; /* Max elapsed time before FTM request, 0xFFFF = any */ + uint16 reps; /* Number of repetitions of this measurement type */ + frngreq_target_t targets[1]; /* Target BSSIDs to range */ +} frngreq_t; + +typedef struct frngrep_range { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint8 pad[2]; + uint32 range; + uint32 max_err; + uint8 rsvd; + uint8 pad2[3]; +} frngrep_range_t; + +typedef struct frngrep_error { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint8 code; + uint8 pad[1]; +} frngrep_error_t; + +typedef struct frngrep { + wl_rrm_event_type_t event; /* RRM event type */ + struct ether_addr da; + uint8 range_entry_count; + uint8 error_entry_count; + uint16 dialog_token; /* dialog token */ + frngrep_range_t range_entries[DOT11_FTM_RANGE_ENTRY_MAX_COUNT]; + frngrep_error_t error_entries[DOT11_FTM_RANGE_ERROR_ENTRY_MAX_COUNT]; +} frngrep_t; + +typedef struct wl_rrm_frng_ioc { + uint16 version; /* command version */ + uint16 id; /* subiovar cmd ID */ + uint16 len; /* total length of all bytes in data[] */ + uint16 pad; /* 4-byte boundary padding */ + uint8 data[]; /* payload */ +} wl_rrm_frng_ioc_t; + +enum { + WL_RRM_FRNG_NONE = 0, /* reserved */ + WL_RRM_FRNG_SET_REQ = 1, /* send ftm ranging request */ + WL_RRM_FRNG_MAX = 2 +}; + +#define WL_RRM_FRNG_NAME "rrm_frng" +#define WL_RRM_FRNG_MIN_LENGTH OFFSETOF(wl_rrm_frng_ioc_t, data) + #define WL_RRM_RPT_VER 0 #define WL_RRM_RPT_MAX_PAYLOAD 256 #define WL_RRM_RPT_MIN_PAYLOAD 7 @@ -6770,29 +10326,25 @@ typedef struct statreq { #define WL_RRM_RPT_FALG_GRP_ID_PROPR (1 << 0) #define WL_RRM_RPT_FALG_GRP_ID_0 (1 << 1) typedef struct { - uint16 ver; /* version */ - struct ether_addr addr; /* STA MAC addr */ - uint32 timestamp; /* timestamp of the report */ - uint16 flag; /* flag */ - uint16 len; /* length of payload data */ - unsigned char data[WL_RRM_RPT_MAX_PAYLOAD]; + uint16 ver; /**< version */ + struct ether_addr addr; /**< STA MAC addr */ + uint32 timestamp; /**< timestamp of the report */ + uint16 flag; /**< flag */ + uint16 len; /**< length of payload data */ + uint8 data[WL_RRM_RPT_MAX_PAYLOAD]; } statrpt_t; -typedef struct wlc_l2keepalive_ol_params { - uint8 flags; - uint8 prio; - uint16 period_ms; -} wlc_l2keepalive_ol_params_t; - typedef struct wlc_dwds_config { uint32 enable; - uint32 mode; /* STA/AP interface */ + uint32 mode; /**< STA/AP interface */ struct ether_addr ea; + uint8 PAD[2]; } wlc_dwds_config_t; typedef struct wl_el_set_params_s { - uint8 set; /* Set number */ - uint32 size; /* Size to make/expand */ + uint8 set; /**< Set number */ + uint8 PAD[3]; + uint32 size; /**< Size to make/expand */ } wl_el_set_params_t; typedef struct wl_el_tag_params_s { @@ -6801,32 +10353,38 @@ typedef struct wl_el_tag_params_s { uint8 flags; } wl_el_tag_params_t; -/* Video Traffic Interference Monitor config */ +/** Video Traffic Interference Monitor config */ #define INTFER_VERSION 1 typedef struct wl_intfer_params { - uint16 version; /* version */ - uint8 period; /* sample period */ - uint8 cnt; /* sample cnt */ - uint8 txfail_thresh; /* non-TCP txfail threshold */ - uint8 tcptxfail_thresh; /* tcptxfail threshold */ + uint16 version; /**< version */ + uint8 period; /**< sample period */ + uint8 cnt; /**< sample cnt */ + uint8 txfail_thresh; /**< non-TCP txfail threshold */ + uint8 tcptxfail_thresh; /**< tcptxfail threshold */ } wl_intfer_params_t; typedef struct wl_staprio_cfg { - struct ether_addr ea; /* mac addr */ - uint8 prio; /* scb priority */ + struct ether_addr ea; /**< mac addr */ + uint8 prio; /**< scb priority */ } wl_staprio_cfg_t; typedef enum wl_stamon_cfg_cmd_type { STAMON_CFG_CMD_DEL = 0, - STAMON_CFG_CMD_ADD = 1 + STAMON_CFG_CMD_ADD = 1, + STAMON_CFG_CMD_ENB = 2, + STAMON_CFG_CMD_DSB = 3, + STAMON_CFG_CMD_CNT = 4, + STAMON_CFG_CMD_RSTCNT = 5, + STAMON_CFG_CMD_GET_STATS = 6 } wl_stamon_cfg_cmd_type_t; typedef struct wlc_stamon_sta_config { - wl_stamon_cfg_cmd_type_t cmd; /* 0 - delete, 1 - add */ + wl_stamon_cfg_cmd_type_t cmd; /**< 0 - delete, 1 - add */ struct ether_addr ea; + uint8 PAD[2]; } wlc_stamon_sta_config_t; -#ifdef SR_DEBUG +/* ifdef SR_DEBUG */ typedef struct /* pmu_reg */{ uint32 pmu_control; uint32 pmu_capabilities; @@ -6842,234 +10400,301 @@ typedef struct /* pmu_reg */{ uint32 pmu_rsrc_up_down_timer[31]; uint32 rsrc_dep_mask[31]; } pmu_reg_t; -#endif /* pmu_reg */ +/* endif SR_DEBUG */ typedef struct wl_taf_define { - struct ether_addr ea; /* STA MAC or 0xFF... */ - uint16 version; /* version */ - uint32 sch; /* method index */ - uint32 prio; /* priority */ - uint32 misc; /* used for return value */ - char text[1]; /* used to pass and return ascii text */ + struct ether_addr ea; /**< STA MAC or 0xFF... */ + uint16 version; /**< version */ + uint32 sch; /**< method index */ + uint32 prio; /**< priority */ + uint32 misc; /**< used for return value */ + uint8 text[]; /**< used to pass and return ascii text */ } wl_taf_define_t; -/* Received Beacons lengths information */ +/** Received Beacons lengths information */ #define WL_LAST_BCNS_INFO_FIXED_LEN OFFSETOF(wlc_bcn_len_hist_t, bcnlen_ring) typedef struct wlc_bcn_len_hist { - uint16 ver; /* version field */ - uint16 cur_index; /* current pointed index in ring buffer */ - uint32 max_bcnlen; /* Max beacon length received */ - uint32 min_bcnlen; /* Min beacon length received */ - uint32 ringbuff_len; /* Length of the ring buffer 'bcnlen_ring' */ - uint32 bcnlen_ring[1]; /* ring buffer storing received beacon lengths */ + uint16 ver; /**< version field */ + uint16 cur_index; /**< current pointed index in ring buffer */ + uint32 max_bcnlen; /**< Max beacon length received */ + uint32 min_bcnlen; /**< Min beacon length received */ + uint32 ringbuff_len; /**< Length of the ring buffer 'bcnlen_ring' */ + uint32 bcnlen_ring[1]; /**< ring buffer storing received beacon lengths */ } wlc_bcn_len_hist_t; /* WDS net interface types */ -#define WL_WDSIFTYPE_NONE 0x0 /* The interface type is neither WDS nor DWDS. */ -#define WL_WDSIFTYPE_WDS 0x1 /* The interface is WDS type. */ -#define WL_WDSIFTYPE_DWDS 0x2 /* The interface is DWDS type. */ +#define WL_WDSIFTYPE_NONE 0x0 /**< The interface type is neither WDS nor DWDS. */ +#define WL_WDSIFTYPE_WDS 0x1 /**< The interface is WDS type. */ +#define WL_WDSIFTYPE_DWDS 0x2 /**< The interface is DWDS type. */ typedef struct wl_bssload_static { - bool is_static; + uint8 is_static; + uint8 PAD; uint16 sta_count; uint8 chan_util; + uint8 PAD; uint16 aac; } wl_bssload_static_t; +/* Buffer of size WLC_SAMPLECOLLECT_MAXLEN (=10240 for 4345a0 ACPHY) + * gets copied to this, multiple times + */ +typedef enum wl_gpaio_option { + GPAIO_PMU_AFELDO, + GPAIO_PMU_TXLDO, + GPAIO_PMU_VCOLDO, + GPAIO_PMU_LNALDO, + GPAIO_PMU_ADCLDO, + GPAIO_ICTAT_CAL, + GPAIO_PMU_CLEAR, + GPAIO_OFF, + GPAIO_PMU_LOGENLDO, + GPAIO_PMU_RXLDO2G, + GPAIO_PMU_RXLDO5G +} wl_gpaio_option_t; -/* IO Var Operations - the Value of iov_op In wlc_ap_doiovar */ -typedef enum wlc_ap_iov_operation { +/** IO Var Operations - the Value of iov_op In wlc_ap_doiovar */ +typedef enum wlc_ap_iov_bss_operation { WLC_AP_IOV_OP_DELETE = -1, WLC_AP_IOV_OP_DISABLE = 0, WLC_AP_IOV_OP_ENABLE = 1, WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE = 2, WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE = 3, WLC_AP_IOV_OP_MOVE = 4 -} wlc_ap_iov_oper_t; +} wlc_ap_iov_bss_oper_t; /* LTE coex info */ /* Analogue of HCI Set MWS Signaling cmd */ typedef struct { - uint16 mws_rx_assert_offset; - uint16 mws_rx_assert_jitter; - uint16 mws_rx_deassert_offset; - uint16 mws_rx_deassert_jitter; - uint16 mws_tx_assert_offset; - uint16 mws_tx_assert_jitter; - uint16 mws_tx_deassert_offset; - uint16 mws_tx_deassert_jitter; - uint16 mws_pattern_assert_offset; - uint16 mws_pattern_assert_jitter; - uint16 mws_inact_dur_assert_offset; - uint16 mws_inact_dur_assert_jitter; - uint16 mws_scan_freq_assert_offset; - uint16 mws_scan_freq_assert_jitter; - uint16 mws_prio_assert_offset_req; + int16 mws_rx_assert_offset; + int16 mws_rx_assert_jitter; + int16 mws_rx_deassert_offset; + int16 mws_rx_deassert_jitter; + int16 mws_tx_assert_offset; + int16 mws_tx_assert_jitter; + int16 mws_tx_deassert_offset; + int16 mws_tx_deassert_jitter; + int16 mws_pattern_assert_offset; + int16 mws_pattern_assert_jitter; + int16 mws_inact_dur_assert_offset; + int16 mws_inact_dur_assert_jitter; + int16 mws_scan_freq_assert_offset; + int16 mws_scan_freq_assert_jitter; + int16 mws_prio_assert_offset_req; } wci2_config_t; -/* Analogue of HCI MWS Channel Params */ +/** Analogue of HCI MWS Channel Params */ typedef struct { - uint16 mws_rx_center_freq; /* MHz */ + uint16 mws_rx_center_freq; /**< MHz */ uint16 mws_tx_center_freq; - uint16 mws_rx_channel_bw; /* KHz */ + uint16 mws_rx_channel_bw; /**< KHz */ uint16 mws_tx_channel_bw; uint8 mws_channel_en; - uint8 mws_channel_type; /* Don't care for WLAN? */ + uint8 mws_channel_type; /**< Don't care for WLAN? */ } mws_params_t; -/* MWS wci2 message */ +#define LTECX_MAX_NUM_PERIOD_TYPES 7 + +/* LTE Frame params */ typedef struct { - uint8 mws_wci2_data; /* BT-SIG msg */ - uint16 mws_wci2_interval; /* Interval in us */ - uint16 mws_wci2_repeat; /* No of msgs to send */ + uint16 mws_frame_dur; + int16 mws_framesync_assert_offset; + uint16 mws_framesync_assert_jitter; + uint16 mws_period_dur[LTECX_MAX_NUM_PERIOD_TYPES]; + uint8 mws_period_type[LTECX_MAX_NUM_PERIOD_TYPES]; + uint8 mws_num_periods; +} mws_frame_config_t; + +/** MWS wci2 message */ +typedef struct { + uint8 mws_wci2_data; /**< BT-SIG msg */ + uint8 PAD; + uint16 mws_wci2_interval; /**< Interval in us */ + uint16 mws_wci2_repeat; /**< No of msgs to send */ } mws_wci2_msg_t; +/* MWS ANT map */ +typedef struct { + uint16 combo1; /* mws ant selection 1 */ + uint16 combo2; /* mws ant selection 2 */ + uint16 combo3; /* mws ant selection 3 */ + uint16 combo4; /* mws ant selection 4 */ +} mws_ant_map_t; + +/* MWS SCAN_REQ Bitmap */ +typedef struct mws_scanreq_params { + uint16 idx; + uint16 bm_2g; + uint16 bm_5g_lo; + uint16 bm_5g_mid; + uint16 bm_5g_hi; +} mws_scanreq_params_t; typedef struct { - uint32 config; /* MODE: AUTO (-1), Disable (0), Enable (1) */ - uint32 status; /* Current state: Disabled (0), Enabled (1) */ + uint32 config; /**< MODE: AUTO (-1), Disable (0), Enable (1) */ + uint32 status; /**< Current state: Disabled (0), Enabled (1) */ } wl_config_t; #define WLC_RSDB_MODE_AUTO_MASK 0x80 #define WLC_RSDB_EXTRACT_MODE(val) ((int8)((val) & (~(WLC_RSDB_MODE_AUTO_MASK)))) -#define WL_IF_STATS_T_VERSION 1 /* current version of wl_if_stats structure */ +typedef struct { + uint16 request; /* type of sensor hub request */ + uint16 enable; /* enable/disable response for specified request */ + uint16 interval; /* interval between responses to the request */ +} shub_req_t; -/* per interface counters */ +#define WL_IF_STATS_T_VERSION 1 /**< current version of wl_if_stats structure */ + +/** per interface counters */ typedef struct wl_if_stats { - uint16 version; /* version of the structure */ - uint16 length; /* length of the entire structure */ - uint32 PAD; /* padding */ + uint16 version; /**< version of the structure */ + uint16 length; /**< length of the entire structure */ + uint32 PAD; /**< padding */ /* transmit stat counters */ - uint64 txframe; /* tx data frames */ - uint64 txbyte; /* tx data bytes */ - uint64 txerror; /* tx data errors (derived: sum of others) */ - uint64 txnobuf; /* tx out of buffer errors */ - uint64 txrunt; /* tx runt frames */ - uint64 txfail; /* tx failed frames */ - uint64 txretry; /* tx retry frames */ - uint64 txretrie; /* tx multiple retry frames */ - uint64 txfrmsnt; /* tx sent frames */ - uint64 txmulti; /* tx mulitcast sent frames */ - uint64 txfrag; /* tx fragments sent */ + uint64 txframe; /**< tx data frames */ + uint64 txbyte; /**< tx data bytes */ + uint64 txerror; /**< tx data errors (derived: sum of others) */ + uint64 txnobuf; /**< tx out of buffer errors */ + uint64 txrunt; /**< tx runt frames */ + uint64 txfail; /**< tx failed frames */ + uint64 txretry; /**< tx retry frames */ + uint64 txretrie; /**< tx multiple retry frames */ + uint64 txfrmsnt; /**< tx sent frames */ + uint64 txmulti; /**< tx mulitcast sent frames */ + uint64 txfrag; /**< tx fragments sent */ /* receive stat counters */ - uint64 rxframe; /* rx data frames */ - uint64 rxbyte; /* rx data bytes */ - uint64 rxerror; /* rx data errors (derived: sum of others) */ - uint64 rxnobuf; /* rx out of buffer errors */ - uint64 rxrunt; /* rx runt frames */ - uint64 rxfragerr; /* rx fragment errors */ - uint64 rxmulti; /* rx multicast frames */ + uint64 rxframe; /**< rx data frames */ + uint64 rxbyte; /**< rx data bytes */ + uint64 rxerror; /**< rx data errors (derived: sum of others) */ + uint64 rxnobuf; /**< rx out of buffer errors */ + uint64 rxrunt; /**< rx runt frames */ + uint64 rxfragerr; /**< rx fragment errors */ + uint64 rxmulti; /**< rx multicast frames */ + + uint64 txexptime; /* DATA Tx frames suppressed due to timer expiration */ + uint64 txrts; /* RTS/CTS succeeeded count */ + uint64 txnocts; /* RTS/CTS faled count */ + + uint64 txretrans; /* Number of frame retransmissions */ } wl_if_stats_t; typedef struct wl_band { - uint16 bandtype; /* WL_BAND_2G, WL_BAND_5G */ - uint16 bandunit; /* bandstate[] index */ - uint16 phytype; /* phytype */ + uint16 bandtype; /**< WL_BAND_2G, WL_BAND_5G */ + uint16 bandunit; /**< bandstate[] index */ + uint16 phytype; /**< phytype */ uint16 phyrev; } wl_band_t; -#define WL_WLC_VERSION_T_VERSION 1 /* current version of wlc_version structure */ +#define WL_WLC_VERSION_T_VERSION 1 /**< current version of wlc_version structure */ -/* wlc interface version */ +/** wlc interface version */ typedef struct wl_wlc_version { - uint16 version; /* version of the structure */ - uint16 length; /* length of the entire structure */ + uint16 version; /**< version of the structure */ + uint16 length; /**< length of the entire structure */ /* epi version numbers */ - uint16 epi_ver_major; /* epi major version number */ - uint16 epi_ver_minor; /* epi minor version number */ - uint16 epi_rc_num; /* epi RC number */ - uint16 epi_incr_num; /* epi increment number */ + uint16 epi_ver_major; /**< epi major version number */ + uint16 epi_ver_minor; /**< epi minor version number */ + uint16 epi_rc_num; /**< epi RC number */ + uint16 epi_incr_num; /**< epi increment number */ /* wlc interface version numbers */ - uint16 wlc_ver_major; /* wlc interface major version number */ - uint16 wlc_ver_minor; /* wlc interface minor version number */ + uint16 wlc_ver_major; /**< wlc interface major version number */ + uint16 wlc_ver_minor; /**< wlc interface minor version number */ } wl_wlc_version_t; -/* Version of WLC interface to be returned as a part of wl_wlc_version structure. - * For the discussion related to versions update policy refer to - * http://hwnbu-twiki.broadcom.com/bin/view/Mwgroup/WlShimAbstractionLayer - * For now the policy is to increment WLC_VERSION_MAJOR each time - * there is a change that involves both WLC layer and per-port layer. - * WLC_VERSION_MINOR is currently not in use. - */ -#define WLC_VERSION_MAJOR 3 -#define WLC_VERSION_MINOR 0 +/* Highest version of WLC_API_VERSION supported */ +#define WLC_API_VERSION_MAJOR_MAX 8 +#define WLC_API_VERSION_MINOR_MAX 0 /* begin proxd definitions */ #include -#define WL_PROXD_API_VERSION 0x0300 /* version 3.0 */ +#define WL_PROXD_API_VERSION 0x0300 /**< version 3.0 */ -/* Minimum supported API version */ +/** Minimum supported API version */ #define WL_PROXD_API_MIN_VERSION 0x0300 -/* proximity detection methods */ +/** proximity detection methods */ enum { WL_PROXD_METHOD_NONE = 0, - WL_PROXD_METHOD_RSVD1 = 1, /* backward compatibility - RSSI, not supported */ - WL_PROXD_METHOD_TOF = 2, - WL_PROXD_METHOD_RSVD2 = 3, /* 11v only - if needed */ - WL_PROXD_METHOD_FTM = 4, /* IEEE rev mc/2014 */ + WL_PROXD_METHOD_RSVD1 = 1, /**< backward compatibility - RSSI, not supported */ + WL_PROXD_METHOD_TOF = 2, + WL_PROXD_METHOD_RSVD2 = 3, /**< 11v only - if needed */ + WL_PROXD_METHOD_FTM = 4, /**< IEEE rev mc/2014 */ WL_PROXD_METHOD_MAX }; typedef int16 wl_proxd_method_t; -/* global and method configuration flags */ +/** global and method configuration flags */ enum { - WL_PROXD_FLAG_NONE = 0x00000000, - WL_PROXD_FLAG_RX_ENABLED = 0x00000001, /* respond to requests */ - WL_PROXD_FLAG_RX_RANGE_REQ = 0x00000002, /* 11mc range requests enabled */ - WL_PROXD_FLAG_TX_LCI = 0x00000004, /* transmit location, if available */ - WL_PROXD_FLAG_TX_CIVIC = 0x00000008, /* tx civic loc, if available */ - WL_PROXD_FLAG_RX_AUTO_BURST = 0x00000010, /* respond to requests w/o host action */ - WL_PROXD_FLAG_TX_AUTO_BURST = 0x00000020, /* continue requests w/o host action */ - WL_PROXD_FLAG_AVAIL_PUBLISH = 0x00000040, /* publish availability */ - WL_PROXD_FLAG_AVAIL_SCHEDULE = 0x00000080, /* schedule using availability */ - WL_PROXD_FLAG_ALL = 0xffffffff + WL_PROXD_FLAG_NONE = 0x00000000, + WL_PROXD_FLAG_RX_ENABLED = 0x00000001, /**< respond to requests, per bss */ + WL_PROXD_FLAG_RX_RANGE_REQ = 0x00000002, /**< 11mc range requests enabled */ + WL_PROXD_FLAG_TX_LCI = 0x00000004, /**< tx lci, if known */ + WL_PROXD_FLAG_TX_CIVIC = 0x00000008, /**< tx civic, if known */ + WL_PROXD_FLAG_RX_AUTO_BURST = 0x00000010, /**< auto respond w/o host action */ + WL_PROXD_FLAG_TX_AUTO_BURST = 0x00000020, /**< continue tx w/o host action */ + WL_PROXD_FLAG_AVAIL_PUBLISH = 0x00000040, /**< publish availability */ + WL_PROXD_FLAG_AVAIL_SCHEDULE = 0x00000080, /**< schedule using availability */ + WL_PROXD_FLAG_ASAP_CAPABLE = 0x00000100, /* ASAP capable */ + WL_PROXD_FLAG_MBURST_FOLLOWUP = 0x00000200, /* new multi-burst algorithm */ + WL_PROXD_FLAG_SECURE = 0x00000400, /* per bsscfg option */ + WL_PROXD_FLAG_NO_TSF_SYNC = 0x00000800, /* disable tsf sync */ + WL_PROXD_FLAG_ALL = 0xffffffff }; typedef uint32 wl_proxd_flags_t; #define WL_PROXD_FLAGS_AVAIL (WL_PROXD_FLAG_AVAIL_PUBLISH | \ WL_PROXD_FLAG_AVAIL_SCHEDULE) -/* session flags */ +/** session flags */ enum { - WL_PROXD_SESSION_FLAG_NONE = 0x00000000, /* no flags */ - WL_PROXD_SESSION_FLAG_INITIATOR = 0x00000001, /* local device is initiator */ - WL_PROXD_SESSION_FLAG_TARGET = 0x00000002, /* local device is target */ - WL_PROXD_SESSION_FLAG_ONE_WAY = 0x00000004, /* (initiated) 1-way rtt */ - WL_PROXD_SESSION_FLAG_AUTO_BURST = 0x00000008, /* created w/ rx_auto_burst */ - WL_PROXD_SESSION_FLAG_PERSIST = 0x00000010, /* good until cancelled */ - WL_PROXD_SESSION_FLAG_RTT_DETAIL = 0x00000020, /* rtt detail in results */ - WL_PROXD_SESSION_FLAG_TOF_COMPAT = 0x00000040, /* TOF compatibility - TBD */ - WL_PROXD_SESSION_FLAG_AOA = 0x00000080, /* AOA along w/ RTT */ - WL_PROXD_SESSION_FLAG_RX_AUTO_BURST = 0x00000100, /* Same as proxd flags above */ - WL_PROXD_SESSION_FLAG_TX_AUTO_BURST = 0x00000200, /* Same as proxd flags above */ - WL_PROXD_SESSION_FLAG_NAN_BSS = 0x00000400, /* Use NAN BSS, if applicable */ - WL_PROXD_SESSION_FLAG_TS1 = 0x00000800, /* e.g. FTM1 - cap or rx */ - WL_PROXD_SESSION_FLAG_REPORT_FAILURE= 0x00002000, /* report failure to target */ - WL_PROXD_SESSION_FLAG_INITIATOR_RPT = 0x00004000, /* report distance to target */ - WL_PROXD_SESSION_FLAG_NOCHANSWT = 0x00008000, /* No channel switching */ - WL_PROXD_SESSION_FLAG_NETRUAL = 0x00010000, /* netrual mode */ - WL_PROXD_SESSION_FLAG_SEQ_EN = 0x00020000, /* Toast */ - WL_PROXD_SESSION_FLAG_NO_PARAM_OVRD = 0x00040000, /* no param override from target */ - WL_PROXD_SESSION_FLAG_ASAP = 0x00080000, /* ASAP session */ - WL_PROXD_SESSION_FLAG_REQ_LCI = 0x00100000, /* transmit LCI req */ - WL_PROXD_SESSION_FLAG_REQ_CIV = 0x00200000, /* transmit civic loc req */ - WL_PROXD_SESSION_FLAG_COLLECT = 0x80000000, /* debug - collect */ - WL_PROXD_SESSION_FLAG_ALL = 0xffffffff + WL_PROXD_SESSION_FLAG_NONE = 0x00000000, /**< no flags */ + WL_PROXD_SESSION_FLAG_INITIATOR = 0x00000001, /**< local device is initiator */ + WL_PROXD_SESSION_FLAG_TARGET = 0x00000002, /**< local device is target */ + WL_PROXD_SESSION_FLAG_ONE_WAY = 0x00000004, /**< (initiated) 1-way rtt */ + WL_PROXD_SESSION_FLAG_AUTO_BURST = 0x00000008, /**< created w/ rx_auto_burst */ + WL_PROXD_SESSION_FLAG_PERSIST = 0x00000010, /**< good until cancelled */ + WL_PROXD_SESSION_FLAG_RTT_DETAIL = 0x00000020, /**< rtt detail in results */ + WL_PROXD_SESSION_FLAG_SECURE = 0x00000040, /**< sessionis secure */ + WL_PROXD_SESSION_FLAG_AOA = 0x00000080, /**< AOA along w/ RTT */ + WL_PROXD_SESSION_FLAG_RX_AUTO_BURST = 0x00000100, /**< Same as proxd flags above */ + WL_PROXD_SESSION_FLAG_TX_AUTO_BURST = 0x00000200, /**< Same as proxd flags above */ + WL_PROXD_SESSION_FLAG_NAN_BSS = 0x00000400, /**< Use NAN BSS, if applicable */ + WL_PROXD_SESSION_FLAG_TS1 = 0x00000800, /**< e.g. FTM1 - ASAP-capable */ + WL_PROXD_SESSION_FLAG_REPORT_FAILURE = 0x00002000, /**< report failure to target */ + WL_PROXD_SESSION_FLAG_INITIATOR_RPT = 0x00004000, /**< report distance to target */ + WL_PROXD_SESSION_FLAG_NOCHANSWT = 0x00008000, + WL_PROXD_SESSION_FLAG_NETRUAL = 0x00010000, /**< netrual mode */ + WL_PROXD_SESSION_FLAG_SEQ_EN = 0x00020000, /**< Toast */ + WL_PROXD_SESSION_FLAG_NO_PARAM_OVRD = 0x00040000, /**< no param override from target */ + WL_PROXD_SESSION_FLAG_ASAP = 0x00080000, /**< ASAP session */ + WL_PROXD_SESSION_FLAG_REQ_LCI = 0x00100000, /**< transmit LCI req */ + WL_PROXD_SESSION_FLAG_REQ_CIV = 0x00200000, /**< transmit civic loc req */ + WL_PROXD_SESSION_FLAG_PRE_SCAN = 0x00400000, /* enable pre-scan for asap=1 */ + WL_PROXD_SESSION_FLAG_AUTO_VHTACK = 0x00800000, /* use vhtack based on brcm ie */ + WL_PROXD_SESSION_FLAG_VHTACK = 0x01000000, /* vht ack is in use - output only */ + WL_PROXD_SESSION_FLAG_BDUR_NOPREF = 0x02000000, /* burst-duration: no preference */ + WL_PROXD_SESSION_FLAG_NUM_FTM_NOPREF = 0x04000000, /* num of FTM frames: no preference */ + WL_PROXD_SESSION_FLAG_FTM_SEP_NOPREF = 0x08000000, /* time btw FTM frams: no pref */ + WL_PROXD_SESSION_FLAG_NUM_BURST_NOPREF = 0x10000000, /* num of bursts: no pref */ + WL_PROXD_SESSION_FLAG_BURST_PERIOD_NOPREF = 0x20000000, /* burst period: no pref */ + WL_PROXD_SESSION_FLAG_MBURST_FOLLOWUP = 0x40000000, /* new mburst algo - reserved */ + WL_PROXD_SESSION_FLAG_MBURST_NODELAY = 0x80000000, /**< good until cancelled */ + WL_PROXD_SESSION_FLAG_ALL = 0xffffffff + }; typedef uint32 wl_proxd_session_flags_t; -/* time units - mc supports up to 0.1ns resolution */ +/** time units - mc supports up to 0.1ns resolution */ enum { - WL_PROXD_TMU_TU = 0, /* 1024us */ + WL_PROXD_TMU_TU = 0, /**< 1024us */ WL_PROXD_TMU_SEC = 1, WL_PROXD_TMU_MILLI_SEC = 2, WL_PROXD_TMU_MICRO_SEC = 3, @@ -7078,14 +10703,14 @@ enum { }; typedef int16 wl_proxd_tmu_t; -/* time interval e.g. 10ns */ +/** time interval e.g. 10ns */ typedef struct wl_proxd_intvl { uint32 intvl; wl_proxd_tmu_t tmu; uint8 pad[2]; } wl_proxd_intvl_t; -/* commands that can apply to proxd, method or a session */ +/** commands that can apply to proxd, method or a session */ enum { WL_PROXD_CMD_NONE = 0, WL_PROXD_CMD_GET_VERSION = 1, @@ -7102,8 +10727,8 @@ enum { WL_PROXD_CMD_GET_SESSIONS = 12, WL_PROXD_CMD_GET_COUNTERS = 13, WL_PROXD_CMD_CLEAR_COUNTERS = 14, - WL_PROXD_CMD_COLLECT = 15, - WL_PROXD_CMD_TUNE = 16, + WL_PROXD_CMD_COLLECT = 15, /* not supported, see 'wl proxd_collect' */ + WL_PROXD_CMD_TUNE = 16, /* not supported, see 'wl proxd_tune' */ WL_PROXD_CMD_DUMP = 17, WL_PROXD_CMD_START_RANGING = 18, WL_PROXD_CMD_STOP_RANGING = 19, @@ -7123,22 +10748,47 @@ enum { WL_PROXD_SESSION_ID_GLOBAL = 0 }; -#define WL_PROXD_SID_HOST_MAX 0x7fff -#define WL_PROXD_SID_HOST_ALLOC(_sid) ((_sid) > 0 && (_sid) <= WL_PROXD_SID_HOST_MAX) +/* Externally allocated sids */ +#define WL_PROXD_SID_EXT_MAX 0x7fff +#define WL_PROXD_SID_EXT_ALLOC(_sid) ((_sid) > 0 && (_sid) <= WL_PROXD_SID_EXT_MAX) -/* maximum number sessions that can be allocated, may be less if tunable */ +/* block size for reserved sid blocks */ +#define WL_PROXD_SID_EXT_BLKSZ 256 +#define WL_PROXD_SID_EXT_BLK_START(_i) (WL_PROXD_SID_EXT_MAX - (_i) * WL_PROXD_SID_EXT_BLKSZ + 1) +#define WL_PROXD_SID_EXT_BLK_END(_start) ((_start) + WL_PROXD_SID_EXT_BLKSZ - 1) + +/* rrm block */ +#define WL_PROXD_SID_RRM_START WL_PROXD_SID_EXT_BLK_START(1) +#define WL_PROXD_SID_RRM_END WL_PROXD_SID_EXT_BLK_END(WL_PROXD_SID_RRM_START) + +/* nan block */ +#define WL_PROXD_SID_NAN_START WL_PROXD_SID_EXT_BLK_START(2) +#define WL_PROXD_SID_NAN_END WL_PROXD_SID_EXT_BLK_END(WL_PROXD_SID_NAN_START) + +/** maximum number sessions that can be allocated, may be less if tunable */ #define WL_PROXD_MAX_SESSIONS 16 typedef uint16 wl_proxd_session_id_t; -/* status - TBD BCME_ vs proxd status - range reserved for BCME_ */ +/** status - TBD BCME_ vs proxd status - range reserved for BCME_ */ enum { + WL_PROXD_E_NOAVAIL = -1056, + WL_PROXD_E_EXT_SCHED = -1055, + WL_PROXD_E_NOT_BCM = -1054, + WL_PROXD_E_FRAME_TYPE = -1053, + WL_PROXD_E_VERNOSUPPORT = -1052, + WL_PROXD_E_SEC_NOKEY = -1051, + WL_PROXD_E_SEC_POLICY = -1050, + WL_PROXD_E_SCAN_INPROCESS = -1049, + WL_PROXD_E_BAD_PARTIAL_TSF = -1048, + WL_PROXD_E_SCANFAIL = -1047, + WL_PROXD_E_NOTSF = -1046, WL_PROXD_E_POLICY = -1045, WL_PROXD_E_INCOMPLETE = -1044, WL_PROXD_E_OVERRIDDEN = -1043, WL_PROXD_E_ASAP_FAILED = -1042, WL_PROXD_E_NOTSTARTED = -1041, - WL_PROXD_E_INVALIDAVB = -1040, + WL_PROXD_E_INVALIDMEAS = -1040, WL_PROXD_E_INCAPABLE = -1039, WL_PROXD_E_MISMATCH = -1038, WL_PROXD_E_DUP_SESSION = -1037, @@ -7152,7 +10802,7 @@ enum { WL_PROXD_E_DEFERRED = -1029, WL_PROXD_E_INVALID_SID = -1028, WL_PROXD_E_REMOTE_CANCEL = -1027, - WL_PROXD_E_CANCELED = -1026, /* local */ + WL_PROXD_E_CANCELED = -1026, /**< local */ WL_PROXD_E_INVALID_SESSION = -1025, WL_PROXD_E_BAD_STATE = -1024, WL_PROXD_E_ERROR = -1, @@ -7160,7 +10810,23 @@ enum { }; typedef int32 wl_proxd_status_t; -/* session states */ +/* proxd errors from phy */ +#define PROXD_TOF_INIT_ERR_BITS 16 + +enum { + WL_PROXD_PHY_ERR_LB_CORR_THRESH = (1 << 0), /* Loopback Correlation threshold */ + WL_PROXD_PHY_ERR_RX_CORR_THRESH = (1 << 1), /* Received Correlation threshold */ + WL_PROXD_PHY_ERR_LB_PEAK_POWER = (1 << 2), /* Loopback Peak power */ + WL_PROXD_PHY_ERR_RX_PEAK_POWER = (1 << 3), /* Received Peak power */ + WL_PROXD_PHY_ERR_BITFLIP = (1 << 4), /* Bitflips */ + WL_PROXD_PHY_ERR_SNR = (1 << 5), /* SNR */ + WL_PROXD_PHY_RX_STRT_WIN_OFF = (1 << 6), /* Receive start window is off */ + WL_PROXD_PHY_RX_END_WIN_OFF = (1 << 7), /* Receive End window is off */ + WL_PROXD_PHY_ERR_LOW_CONFIDENCE = (1 << 15), /* Low confidence on meas distance */ +}; +typedef uint32 wl_proxd_phy_error_t; + +/** session states */ enum { WL_PROXD_SESSION_STATE_NONE = 0, WL_PROXD_SESSION_STATE_CREATED = 1, @@ -7172,93 +10838,117 @@ enum { WL_PROXD_SESSION_STATE_BURST = 7, WL_PROXD_SESSION_STATE_STOPPING = 8, WL_PROXD_SESSION_STATE_ENDED = 9, + WL_PROXD_SESSION_STATE_START_WAIT = 10, WL_PROXD_SESSION_STATE_DESTROYING = -1 }; typedef int16 wl_proxd_session_state_t; -/* RTT sample flags */ +/** RTT sample flags */ enum { - WL_PROXD_RTT_SAMPLE_NONE = 0x00, - WL_PROXD_RTT_SAMPLE_DISCARD = 0x01 + WL_PROXD_RTT_SAMPLE_NONE = 0x00, + WL_PROXD_RTT_SAMPLE_DISCARD = 0x01 }; typedef uint8 wl_proxd_rtt_sample_flags_t; +typedef int16 wl_proxd_rssi_t; +typedef uint16 wl_proxd_snr_t; +typedef uint16 wl_proxd_bitflips_t; typedef struct wl_proxd_rtt_sample { - uint8 id; /* id for the sample - non-zero */ + uint8 id; /**< id for the sample - non-zero */ wl_proxd_rtt_sample_flags_t flags; - int16 rssi; - wl_proxd_intvl_t rtt; /* round trip time */ - uint32 ratespec; + wl_proxd_rssi_t rssi; + wl_proxd_intvl_t rtt; /**< round trip time */ + uint32 ratespec; + wl_proxd_snr_t snr; + wl_proxd_bitflips_t bitflips; + wl_proxd_status_t status; + int32 distance; + wl_proxd_phy_error_t tof_phy_error; + wl_proxd_phy_error_t tof_tgt_phy_error; /* target phy error bit map */ + wl_proxd_snr_t tof_tgt_snr; + wl_proxd_bitflips_t tof_tgt_bitflips; + uint8 coreid; + uint8 pad[3]; } wl_proxd_rtt_sample_t; -/* result flags */ +/** result flags */ enum { WL_PRXOD_RESULT_FLAG_NONE = 0x0000, - WL_PROXD_RESULT_FLAG_NLOS = 0x0001, /* LOS - if available */ - WL_PROXD_RESULT_FLAG_LOS = 0x0002, /* NLOS - if available */ - WL_PROXD_RESULT_FLAG_FATAL = 0x0004, /* Fatal error during burst */ + WL_PROXD_RESULT_FLAG_NLOS = 0x0001, /**< LOS - if available */ + WL_PROXD_RESULT_FLAG_LOS = 0x0002, /**< NLOS - if available */ + WL_PROXD_RESULT_FLAG_FATAL = 0x0004, /**< Fatal error during burst */ + WL_PROXD_RESULT_FLAG_VHTACK = 0x0008, /* VHTACK or Legacy ACK used */ + WL_PROXD_REQUEST_SENT = 0x0010, /* FTM request was sent */ + WL_PROXD_REQUEST_ACKED = 0x0020, /* FTM request was acked */ + WL_PROXD_LTFSEQ_STARTED = 0x0040, /* LTF sequence started */ WL_PROXD_RESULT_FLAG_ALL = 0xffff }; typedef int16 wl_proxd_result_flags_t; -/* rtt measurement result */ +/** rtt measurement result */ typedef struct wl_proxd_rtt_result { - wl_proxd_session_id_t sid; - wl_proxd_result_flags_t flags; - wl_proxd_status_t status; - struct ether_addr peer; - wl_proxd_session_state_t state; /* current state */ + wl_proxd_session_id_t sid; + wl_proxd_result_flags_t flags; + wl_proxd_status_t status; + struct ether_addr peer; + wl_proxd_session_state_t state; /**< current state */ union { - wl_proxd_intvl_t retry_after; /* hint for errors */ - wl_proxd_intvl_t burst_duration; /* burst duration */ + wl_proxd_intvl_t retry_after; /* hint for errors */ + wl_proxd_intvl_t burst_duration; /* burst duration */ } u; - wl_proxd_rtt_sample_t avg_rtt; - uint32 avg_dist; /* 1/256m units */ - uint16 sd_rtt; /* RTT standard deviation */ - uint8 num_valid_rtt; /* valid rtt cnt */ - uint8 num_ftm; /* actual num of ftm cnt */ - uint16 burst_num; /* in a session */ - uint16 num_rtt; /* 0 if no detail */ - wl_proxd_rtt_sample_t rtt[1]; /* variable */ + wl_proxd_rtt_sample_t avg_rtt; + uint32 avg_dist; /* 1/256m units */ + uint16 sd_rtt; /* RTT standard deviation */ + uint8 num_valid_rtt; /* valid rtt cnt */ + uint8 num_ftm; /* actual num of ftm cnt (Configured) */ + uint16 burst_num; /* in a session */ + uint16 num_rtt; /* 0 if no detail */ + uint16 num_meas; /* number of ftm frames seen OTA */ + uint8 pad[2]; + wl_proxd_rtt_sample_t rtt[1]; /* variable */ } wl_proxd_rtt_result_t; -/* aoa measurement result */ +/** aoa measurement result */ typedef struct wl_proxd_aoa_result { wl_proxd_session_id_t sid; wl_proxd_result_flags_t flags; wl_proxd_status_t status; struct ether_addr peer; - wl_proxd_session_state_t state; + wl_proxd_session_state_t state; uint16 burst_num; uint8 pad[2]; /* wl_proxd_aoa_sample_t sample_avg; TBD */ } BWL_POST_PACKED_STRUCT wl_proxd_aoa_result_t; +#include -/* global stats */ +/** global stats */ typedef struct wl_proxd_counters { - uint32 tx; /* tx frame count */ - uint32 rx; /* rx frame count */ - uint32 burst; /* total number of burst */ - uint32 sessions; /* total number of sessions */ - uint32 max_sessions; /* max concurrency */ - uint32 sched_fail; /* scheduling failures */ - uint32 timeouts; /* timeouts */ - uint32 protoerr; /* protocol errors */ - uint32 noack; /* tx w/o ack */ - uint32 txfail; /* any tx falure */ - uint32 lci_req_tx; /* tx LCI requests */ - uint32 lci_req_rx; /* rx LCI requests */ - uint32 lci_rep_tx; /* tx LCI reports */ - uint32 lci_rep_rx; /* rx LCI reports */ - uint32 civic_req_tx; /* tx civic requests */ - uint32 civic_req_rx; /* rx civic requests */ - uint32 civic_rep_tx; /* tx civic reports */ - uint32 civic_rep_rx; /* rx civic reports */ - uint32 rctx; /* ranging contexts created */ - uint32 rctx_done; /* count of ranging done */ - uint32 publish_err; /* availability publishing errors */ - uint32 on_chan; /* count of scheduler onchan */ - uint32 off_chan; /* count of scheduler offchan */ + uint32 tx; /**< tx frame count */ + uint32 rx; /**< rx frame count */ + uint32 burst; /**< total number of burst */ + uint32 sessions; /**< total number of sessions */ + uint32 max_sessions; /**< max concurrency */ + uint32 sched_fail; /**< scheduling failures */ + uint32 timeouts; /**< timeouts */ + uint32 protoerr; /**< protocol errors */ + uint32 noack; /**< tx w/o ack */ + uint32 txfail; /**< any tx falure */ + uint32 lci_req_tx; /**< tx LCI requests */ + uint32 lci_req_rx; /**< rx LCI requests */ + uint32 lci_rep_tx; /**< tx LCI reports */ + uint32 lci_rep_rx; /**< rx LCI reports */ + uint32 civic_req_tx; /**< tx civic requests */ + uint32 civic_req_rx; /**< rx civic requests */ + uint32 civic_rep_tx; /**< tx civic reports */ + uint32 civic_rep_rx; /**< rx civic reports */ + uint32 rctx; /**< ranging contexts created */ + uint32 rctx_done; /**< count of ranging done */ + uint32 publish_err; /**< availability publishing errors */ + uint32 on_chan; /**< count of scheduler onchan */ + uint32 off_chan; /**< count of scheduler offchan */ + uint32 tsf_lo; /* local tsf or session tsf */ + uint32 tsf_hi; + uint32 num_meas; } wl_proxd_counters_t; typedef struct wl_proxd_counters wl_proxd_session_counters_t; @@ -7269,29 +10959,34 @@ enum { }; typedef int16 wl_proxd_caps_t; -/* method capabilities */ +/** method capabilities */ enum { WL_PROXD_FTM_CAP_NONE = 0x0000, WL_PROXD_FTM_CAP_FTM1 = 0x0001 }; typedef uint16 wl_proxd_ftm_caps_t; -typedef struct BWL_PRE_PACKED_STRUCT wl_proxd_tlv_id_list { +typedef struct wl_proxd_tlv_id_list { uint16 num_ids; uint16 ids[1]; -} BWL_POST_PACKED_STRUCT wl_proxd_tlv_id_list_t; +} wl_proxd_tlv_id_list_t; typedef struct wl_proxd_session_id_list { uint16 num_ids; wl_proxd_session_id_t ids[1]; } wl_proxd_session_id_list_t; +typedef struct wl_proxd_tpk { + struct ether_addr peer; + uint8 tpk[TPK_FTM_LEN]; +} wl_proxd_tpk_t; + /* tlvs returned for get_info on ftm method - * configuration: - * proxd flags - * event mask - * debug mask - * session defaults (session tlvs) + * configuration: + * proxd flags + * event mask + * debug mask + * session defaults (session tlvs) * status tlv - not supported for ftm method * info tlv */ @@ -7302,6 +10997,14 @@ typedef struct wl_proxd_ftm_info { uint16 rx_max_burst; } wl_proxd_ftm_info_t; +enum { + WL_PROXD_WAIT_NONE = 0x0000, + WL_PROXD_WAIT_KEY = 0x0001, + WL_PROXD_WAIT_SCHED = 0x0002, + WL_PROXD_WAIT_TSF = 0x0004 +}; +typedef int16 wl_proxd_wait_reason_t; + /* tlvs returned for get_info on session * session config (tlvs) * session info tlv @@ -7314,6 +11017,9 @@ typedef struct wl_proxd_ftm_session_info { wl_proxd_session_state_t state; wl_proxd_status_t status; uint16 burst_num; + wl_proxd_wait_reason_t wait_reason; + uint32 meas_start_lo; /* sn tsf of 1st meas for cur/prev burst */ + uint32 meas_start_hi; } wl_proxd_ftm_session_info_t; typedef struct wl_proxd_ftm_session_status { @@ -7321,15 +11027,16 @@ typedef struct wl_proxd_ftm_session_status { wl_proxd_session_state_t state; wl_proxd_status_t status; uint16 burst_num; + uint16 pad; } wl_proxd_ftm_session_status_t; -/* rrm range request */ +/** rrm range request */ typedef struct wl_proxd_range_req { uint16 num_repeat; - uint16 init_delay_range; /* in TUs */ + uint16 init_delay_range; /**< in TUs */ uint8 pad; - uint8 num_nbr; /* number of (possible) neighbors */ - nbr_element_t nbr[1]; + uint8 num_nbr; /**< number of (possible) neighbors */ + nbr_element_t nbr[1]; } wl_proxd_range_req_t; #define WL_PROXD_LCI_LAT_OFF 0 @@ -7373,15 +11080,15 @@ typedef struct wl_proxd_range_req { #define WL_PROXD_LCI_VERSION(_lci) ((_lci)->data[15] >> 6) /* availability. advertising mechanism bss specific */ -/* availablity flags */ +/** availablity flags */ enum { WL_PROXD_AVAIL_NONE = 0, WL_PROXD_AVAIL_NAN_PUBLISHED = 0x0001, - WL_PROXD_AVAIL_SCHEDULED = 0x0002 /* scheduled by proxd */ + WL_PROXD_AVAIL_SCHEDULED = 0x0002 /**< scheduled by proxd */ }; typedef int16 wl_proxd_avail_flags_t; -/* time reference */ +/** time reference */ enum { WL_PROXD_TREF_NONE = 0, WL_PROXD_TREF_DEV_TSF = 1, @@ -7391,19 +11098,19 @@ enum { }; typedef int16 wl_proxd_time_ref_t; -/* proxd channel-time slot */ +/** proxd channel-time slot */ typedef struct { - wl_proxd_intvl_t start; /* from ref */ - wl_proxd_intvl_t duration; /* from start */ + wl_proxd_intvl_t start; /**< from ref */ + wl_proxd_intvl_t duration; /**< from start */ uint32 chanspec; } wl_proxd_time_slot_t; typedef struct wl_proxd_avail24 { - wl_proxd_avail_flags_t flags; /* for query only */ + wl_proxd_avail_flags_t flags; /**< for query only */ wl_proxd_time_ref_t time_ref; - uint16 max_slots; /* for query only */ + uint16 max_slots; /**< for query only */ uint16 num_slots; - wl_proxd_time_slot_t slots[1]; /* ROM compat - not used */ + wl_proxd_time_slot_t slots[1]; /**< ROM compat - not used */ wl_proxd_intvl_t repeat; wl_proxd_time_slot_t ts0[1]; } wl_proxd_avail24_t; @@ -7415,9 +11122,9 @@ typedef struct wl_proxd_avail24 { (_num_slots) * sizeof(*WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0))) typedef struct wl_proxd_avail { - wl_proxd_avail_flags_t flags; /* for query only */ + wl_proxd_avail_flags_t flags; /**< for query only */ wl_proxd_time_ref_t time_ref; - uint16 max_slots; /* for query only */ + uint16 max_slots; /**< for query only */ uint16 num_slots; wl_proxd_intvl_t repeat; wl_proxd_time_slot_t slots[1]; @@ -7432,73 +11139,85 @@ typedef struct wl_proxd_avail { /* collect support TBD */ -/* debugging */ +/** debugging */ enum { WL_PROXD_DEBUG_NONE = 0x00000000, WL_PROXD_DEBUG_LOG = 0x00000001, WL_PROXD_DEBUG_IOV = 0x00000002, - WL_PROXD_DEBUG_EVENT = 0x00000004, - WL_PROXD_DEBUG_SESSION = 0x00000008, - WL_PROXD_DEBUG_PROTO = 0x00000010, - WL_PROXD_DEBUG_SCHED = 0x00000020, - WL_PROXD_DEBUG_RANGING = 0x00000040, + WL_PROXD_DEBUG_EVENT = 0x00000004, + WL_PROXD_DEBUG_SESSION = 0x00000008, + WL_PROXD_DEBUG_PROTO = 0x00000010, + WL_PROXD_DEBUG_SCHED = 0x00000020, + WL_PROXD_DEBUG_RANGING = 0x00000040, + WL_PROXD_DEBUG_NAN = 0x00000080, + WL_PROXD_DEBUG_PKT = 0x00000100, + WL_PROXD_DEBUG_SEC = 0x00000200, + WL_PROXD_DEBUG_EVENTLOG = 0x80000000, /* map/enable EVNET_LOG_TAG_PROXD_INFO */ WL_PROXD_DEBUG_ALL = 0xffffffff }; typedef uint32 wl_proxd_debug_mask_t; -/* tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */ +/** tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */ enum { - WL_PROXD_TLV_ID_NONE = 0, - WL_PROXD_TLV_ID_METHOD = 1, - WL_PROXD_TLV_ID_FLAGS = 2, - WL_PROXD_TLV_ID_CHANSPEC = 3, /* note: uint32 */ - WL_PROXD_TLV_ID_TX_POWER = 4, - WL_PROXD_TLV_ID_RATESPEC = 5, - WL_PROXD_TLV_ID_BURST_DURATION = 6, /* intvl - length of burst */ - WL_PROXD_TLV_ID_BURST_PERIOD = 7, /* intvl - between bursts */ - WL_PROXD_TLV_ID_BURST_FTM_SEP = 8, /* intvl - between FTMs */ - WL_PROXD_TLV_ID_BURST_NUM_FTM = 9, /* uint16 - per burst */ - WL_PROXD_TLV_ID_NUM_BURST = 10, /* uint16 */ - WL_PROXD_TLV_ID_FTM_RETRIES = 11, /* uint16 at FTM level */ - WL_PROXD_TLV_ID_BSS_INDEX = 12, /* uint8 */ - WL_PROXD_TLV_ID_BSSID = 13, - WL_PROXD_TLV_ID_INIT_DELAY = 14, /* intvl - optional, non-standalone only */ - WL_PROXD_TLV_ID_BURST_TIMEOUT = 15, /* expect response within - intvl */ - WL_PROXD_TLV_ID_EVENT_MASK = 16, /* interested events - in/out */ - WL_PROXD_TLV_ID_FLAGS_MASK = 17, /* interested flags - in only */ - WL_PROXD_TLV_ID_PEER_MAC = 18, /* mac address of peer */ - WL_PROXD_TLV_ID_FTM_REQ = 19, /* dot11_ftm_req */ - WL_PROXD_TLV_ID_LCI_REQ = 20, - WL_PROXD_TLV_ID_LCI = 21, + WL_PROXD_TLV_ID_NONE = 0, + WL_PROXD_TLV_ID_METHOD = 1, + WL_PROXD_TLV_ID_FLAGS = 2, + WL_PROXD_TLV_ID_CHANSPEC = 3, /**< note: uint32 */ + WL_PROXD_TLV_ID_TX_POWER = 4, + WL_PROXD_TLV_ID_RATESPEC = 5, + WL_PROXD_TLV_ID_BURST_DURATION = 6, /**< intvl - length of burst */ + WL_PROXD_TLV_ID_BURST_PERIOD = 7, /**< intvl - between bursts */ + WL_PROXD_TLV_ID_BURST_FTM_SEP = 8, /**< intvl - between FTMs */ + WL_PROXD_TLV_ID_BURST_NUM_FTM = 9, /**< uint16 - per burst */ + WL_PROXD_TLV_ID_NUM_BURST = 10, /**< uint16 */ + WL_PROXD_TLV_ID_FTM_RETRIES = 11, /**< uint16 at FTM level */ + WL_PROXD_TLV_ID_BSS_INDEX = 12, /**< uint8 */ + WL_PROXD_TLV_ID_BSSID = 13, + WL_PROXD_TLV_ID_INIT_DELAY = 14, /**< intvl - optional,non-standalone only */ + WL_PROXD_TLV_ID_BURST_TIMEOUT = 15, /**< expect response within - intvl */ + WL_PROXD_TLV_ID_EVENT_MASK = 16, /**< interested events - in/out */ + WL_PROXD_TLV_ID_FLAGS_MASK = 17, /**< interested flags - in only */ + WL_PROXD_TLV_ID_PEER_MAC = 18, /**< mac address of peer */ + WL_PROXD_TLV_ID_FTM_REQ = 19, /**< dot11_ftm_req */ + WL_PROXD_TLV_ID_LCI_REQ = 20, + WL_PROXD_TLV_ID_LCI = 21, WL_PROXD_TLV_ID_CIVIC_REQ = 22, WL_PROXD_TLV_ID_CIVIC = 23, - WL_PROXD_TLV_ID_AVAIL24 = 24, /* ROM compatibility */ - WL_PROXD_TLV_ID_SESSION_FLAGS = 25, - WL_PROXD_TLV_ID_SESSION_FLAGS_MASK = 26, /* in only */ - WL_PROXD_TLV_ID_RX_MAX_BURST = 27, /* uint16 - limit bursts per session */ - WL_PROXD_TLV_ID_RANGING_INFO = 28, /* ranging info */ - WL_PROXD_TLV_ID_RANGING_FLAGS = 29, /* uint16 */ - WL_PROXD_TLV_ID_RANGING_FLAGS_MASK = 30, /* uint16, in only */ - WL_PROXD_TLV_ID_NAN_MAP_ID = 31, - WL_PROXD_TLV_ID_DEV_ADDR = 32, - WL_PROXD_TLV_ID_AVAIL = 33, /* wl_proxd_avail_t */ - WL_PROXD_TLV_ID_TLV_ID = 34, /* uint16 tlv-id */ - WL_PROXD_TLV_ID_FTM_REQ_RETRIES = 35, /* uint16 FTM request retries */ + WL_PROXD_TLV_ID_AVAIL24 = 24, /**< ROM compatibility */ + WL_PROXD_TLV_ID_SESSION_FLAGS = 25, + WL_PROXD_TLV_ID_SESSION_FLAGS_MASK = 26, /**< in only */ + WL_PROXD_TLV_ID_RX_MAX_BURST = 27, /**< uint16 - limit bursts per session */ + WL_PROXD_TLV_ID_RANGING_INFO = 28, /**< ranging info */ + WL_PROXD_TLV_ID_RANGING_FLAGS = 29, /**< uint16 */ + WL_PROXD_TLV_ID_RANGING_FLAGS_MASK = 30, /**< uint16, in only */ + WL_PROXD_TLV_ID_NAN_MAP_ID = 31, + WL_PROXD_TLV_ID_DEV_ADDR = 32, + WL_PROXD_TLV_ID_AVAIL = 33, /**< wl_proxd_avail_t */ + WL_PROXD_TLV_ID_TLV_ID = 34, /* uint16 tlv-id */ + WL_PROXD_TLV_ID_FTM_REQ_RETRIES = 35, /* uint16 FTM request retries */ + WL_PROXD_TLV_ID_TPK = 36, /* 32byte TPK */ + WL_PROXD_TLV_ID_RI_RR = 36, /* RI_RR */ + WL_PROXD_TLV_ID_TUNE = 37, /* wl_proxd_pararms_tof_tune_t */ /* output - 512 + x */ - WL_PROXD_TLV_ID_STATUS = 512, - WL_PROXD_TLV_ID_COUNTERS = 513, - WL_PROXD_TLV_ID_INFO = 514, - WL_PROXD_TLV_ID_RTT_RESULT = 515, + WL_PROXD_TLV_ID_STATUS = 512, + WL_PROXD_TLV_ID_COUNTERS = 513, + WL_PROXD_TLV_ID_INFO = 514, + WL_PROXD_TLV_ID_RTT_RESULT = 515, WL_PROXD_TLV_ID_AOA_RESULT = 516, - WL_PROXD_TLV_ID_SESSION_INFO = 517, - WL_PROXD_TLV_ID_SESSION_STATUS = 518, - WL_PROXD_TLV_ID_SESSION_ID_LIST = 519, + WL_PROXD_TLV_ID_SESSION_INFO = 517, + WL_PROXD_TLV_ID_SESSION_STATUS = 518, + WL_PROXD_TLV_ID_SESSION_ID_LIST = 519, /* debug tlvs can be added starting 1024 */ - WL_PROXD_TLV_ID_DEBUG_MASK = 1024, - WL_PROXD_TLV_ID_COLLECT = 1025, /* output only */ - WL_PROXD_TLV_ID_STRBUF = 1026, + WL_PROXD_TLV_ID_DEBUG_MASK = 1024, + WL_PROXD_TLV_ID_COLLECT = 1025, /**< output only */ + WL_PROXD_TLV_ID_STRBUF = 1026, + + WL_PROXD_TLV_ID_COLLECT_HEADER = 1025, /* wl_proxd_collect_header_t */ + WL_PROXD_TLV_ID_COLLECT_INFO = 1028, /* wl_proxd_collect_info_t */ + WL_PROXD_TLV_ID_COLLECT_DATA = 1029, /* wl_proxd_collect_data_t */ + WL_PROXD_TLV_ID_COLLECT_CHAN_DATA = 1030, /* wl_proxd_collect_data_t */ WL_PROXD_TLV_ID_MAX }; @@ -7509,15 +11228,15 @@ typedef struct wl_proxd_tlv { uint8 data[1]; } wl_proxd_tlv_t; -/* proxd iovar - applies to proxd, method or session */ +/** proxd iovar - applies to proxd, method or session */ typedef struct wl_proxd_iov { - uint16 version; - uint16 len; - wl_proxd_cmd_t cmd; - wl_proxd_method_t method; - wl_proxd_session_id_t sid; - uint8 pad[2]; - wl_proxd_tlv_t tlvs[1]; /* variable */ + uint16 version; + uint16 len; + wl_proxd_cmd_t cmd; + wl_proxd_method_t method; + wl_proxd_session_id_t sid; + uint8 PAD[2]; + wl_proxd_tlv_t tlvs[1]; /**< variable */ } wl_proxd_iov_t; #define WL_PROXD_IOV_HDR_SIZE OFFSETOF(wl_proxd_iov_t, tlvs) @@ -7526,29 +11245,31 @@ typedef struct wl_proxd_iov { * across needs more invasive changes unrelated to proxd */ enum { - WL_PROXD_EVENT_NONE = 0, /* not an event, reserved */ - WL_PROXD_EVENT_SESSION_CREATE = 1, - WL_PROXD_EVENT_SESSION_START = 2, + WL_PROXD_EVENT_NONE = 0, /**< not an event, reserved */ + WL_PROXD_EVENT_SESSION_CREATE = 1, + WL_PROXD_EVENT_SESSION_START = 2, WL_PROXD_EVENT_FTM_REQ = 3, WL_PROXD_EVENT_BURST_START = 4, WL_PROXD_EVENT_BURST_END = 5, WL_PROXD_EVENT_SESSION_END = 6, - WL_PROXD_EVENT_SESSION_RESTART = 7, - WL_PROXD_EVENT_BURST_RESCHED = 8, /* burst rescheduled - e.g. partial TSF */ - WL_PROXD_EVENT_SESSION_DESTROY = 9, - WL_PROXD_EVENT_RANGE_REQ = 10, + WL_PROXD_EVENT_SESSION_RESTART = 7, + WL_PROXD_EVENT_BURST_RESCHED = 8, /**< burst rescheduled-e.g. partial TSF */ + WL_PROXD_EVENT_SESSION_DESTROY = 9, + WL_PROXD_EVENT_RANGE_REQ = 10, WL_PROXD_EVENT_FTM_FRAME = 11, WL_PROXD_EVENT_DELAY = 12, - WL_PROXD_EVENT_VS_INITIATOR_RPT = 13, /* (target) rx initiator-report */ + WL_PROXD_EVENT_VS_INITIATOR_RPT = 13, /**< (target) rx initiator-report */ WL_PROXD_EVENT_RANGING = 14, - WL_PROXD_EVENT_LCI_MEAS_REP = 15, /* LCI measurement report */ - WL_PROXD_EVENT_CIVIC_MEAS_REP = 16, /* civic measurement report */ + WL_PROXD_EVENT_LCI_MEAS_REP = 15, /* LCI measurement report */ + WL_PROXD_EVENT_CIVIC_MEAS_REP = 16, /* civic measurement report */ + WL_PROXD_EVENT_COLLECT = 17, + WL_PROXD_EVENT_START_WAIT = 18, /* waiting to start */ WL_PROXD_EVENT_MAX }; typedef int16 wl_proxd_event_type_t; -/* proxd event mask - upto 32 events for now */ +/** proxd event mask - upto 32 events for now */ typedef uint32 wl_proxd_event_mask_t; #define WL_PROXD_EVENT_MASK_ALL 0xfffffffe @@ -7556,7 +11277,7 @@ typedef uint32 wl_proxd_event_mask_t; #define WL_PROXD_EVENT_ENABLED(_mask, _event_type) (\ ((_mask) & WL_PROXD_EVENT_MASK_EVENT(_event_type)) != 0) -/* proxd event - applies to proxd, method or session */ +/** proxd event - applies to proxd, method or session */ typedef struct wl_proxd_event { uint16 version; uint16 len; @@ -7564,7 +11285,7 @@ typedef struct wl_proxd_event { wl_proxd_method_t method; wl_proxd_session_id_t sid; uint8 pad[2]; - wl_proxd_tlv_t tlvs[1]; /* variable */ + wl_proxd_tlv_t tlvs[1]; /**< variable */ } wl_proxd_event_t; enum { @@ -7575,9 +11296,9 @@ enum { }; typedef int16 wl_proxd_ranging_state_t; -/* proxd ranging flags */ +/** proxd ranging flags */ enum { - WL_PROXD_RANGING_FLAG_NONE = 0x0000, /* no flags */ + WL_PROXD_RANGING_FLAG_NONE = 0x0000, /**< no flags */ WL_PROXD_RANGING_FLAG_DEL_SESSIONS_ON_STOP = 0x0001, WL_PROXD_RANGING_FLAG_ALL = 0xffff }; @@ -7591,21 +11312,27 @@ struct wl_proxd_ranging_info { uint16 num_done; }; typedef struct wl_proxd_ranging_info wl_proxd_ranging_info_t; -#include -/* end proxd definitions */ -/* require strict packing */ #include -/* Data returned by the bssload_report iovar. - * This is also the WLC_E_BSS_LOAD event data. - */ -typedef BWL_PRE_PACKED_STRUCT struct wl_bssload { - uint16 sta_count; /* station count */ - uint16 aac; /* available admission capacity */ - uint8 chan_util; /* channel utilization */ -} BWL_POST_PACKED_STRUCT wl_bssload_t; +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_event_data { + uint32 H_LB[K_TOF_COLLECT_H_SIZE_20MHZ]; + uint32 H_RX[K_TOF_COLLECT_H_SIZE_20MHZ]; + uint8 ri_rr[FTM_TPK_LEN]; + wl_proxd_phy_error_t phy_err_mask; +} BWL_POST_PACKED_STRUCT wl_proxd_collect_event_data_t; +#include -/* Maximum number of configurable BSS Load levels. The number of BSS Load +/** Data returned by the bssload_report iovar. This is also the WLC_E_BSS_LOAD event data */ +#include +typedef BWL_PRE_PACKED_STRUCT struct wl_bssload { + uint16 sta_count; /**< station count */ + uint16 aac; /**< available admission capacity */ + uint8 chan_util; /**< channel utilization */ +} BWL_POST_PACKED_STRUCT wl_bssload_t; +#include + +/** + * Maximum number of configurable BSS Load levels. The number of BSS Load * ranges is always 1 more than the number of configured levels. eg. if * 3 levels of 10, 20, 30 are configured then this defines 4 load ranges: * 0-10, 11-20, 21-30, 31-255. A WLC_E_BSS_LOAD event is generated each time @@ -7614,60 +11341,151 @@ typedef BWL_PRE_PACKED_STRUCT struct wl_bssload { #define MAX_BSSLOAD_LEVELS 8 #define MAX_BSSLOAD_RANGES (MAX_BSSLOAD_LEVELS + 1) -/* BSS Load event notification configuration. */ +/** BSS Load event notification configuration. */ typedef struct wl_bssload_cfg { - uint32 rate_limit_msec; /* # of events posted to application will be limited to + uint32 rate_limit_msec; /**< # of events posted to application will be limited to * one per specified period (0 to disable rate limit). */ - uint8 num_util_levels; /* Number of entries in util_levels[] below */ + uint8 num_util_levels; /**< Number of entries in util_levels[] below */ uint8 util_levels[MAX_BSSLOAD_LEVELS]; - /* Variable number of BSS Load utilization levels in + /**< Variable number of BSS Load utilization levels in * low to high order. An event will be posted each time * a received beacon's BSS Load IE channel utilization * value crosses a level. */ + uint8 PAD[3]; } wl_bssload_cfg_t; -/* Multiple roaming profile suport */ +/** Multiple roaming profile suport */ #define WL_MAX_ROAM_PROF_BRACKETS 4 -#define WL_MAX_ROAM_PROF_VER 1 +#define WL_ROAM_PROF_VER_0 0 +#define WL_ROAM_PROF_VER_1 1 +#define WL_MAX_ROAM_PROF_VER WL_ROAM_PROF_VER_1 #define WL_ROAM_PROF_NONE (0 << 0) #define WL_ROAM_PROF_LAZY (1 << 0) #define WL_ROAM_PROF_NO_CI (1 << 1) #define WL_ROAM_PROF_SUSPEND (1 << 2) #define WL_ROAM_PROF_SYNC_DTIM (1 << 6) -#define WL_ROAM_PROF_DEFAULT (1 << 7) /* backward compatible single default profile */ +#define WL_ROAM_PROF_DEFAULT (1 << 7) /**< backward compatible single default profile */ #define WL_FACTOR_TABLE_MAX_LIMIT 5 -typedef struct wl_roam_prof { - int8 roam_flags; /* bit flags */ - int8 roam_trigger; /* RSSI trigger level per profile/RSSI bracket */ +#define WL_CU_2G_ROAM_TRIGGER (-60) +#define WL_CU_5G_ROAM_TRIGGER (-70) + +#define WL_CU_SCORE_DELTA_DEFAULT 20 + +#define WL_MAX_CHANNEL_USAGE 0x0FF +#define WL_CU_PERCENTAGE_DISABLE 0 +#define WL_CU_PERCENTAGE_DEFAULT 70 +#define WL_CU_PERCENTAGE_MAX 100 +#define WL_CU_CALC_DURATION_DEFAULT 10 /* seconds */ +#define WL_CU_CALC_DURATION_MAX 60 /* seconds */ + +typedef struct wl_roam_prof_v2 { + int8 roam_flags; /**< bit flags */ + int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */ int8 rssi_lower; int8 roam_delta; - int8 rssi_boost_thresh; /* Min RSSI to qualify for RSSI boost */ - int8 rssi_boost_delta; /* RSSI boost for AP in the other band */ - uint16 nfscan; /* nuber of full scan to start with */ + + /* if channel_usage if zero, roam_delta is rssi delta required for new AP */ + /* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */ + int8 rssi_boost_thresh; /**< Min RSSI to qualify for RSSI boost */ + int8 rssi_boost_delta; /**< RSSI boost for AP in the other band */ + uint16 nfscan; /**< number of full scan to start with */ uint16 fullscan_period; uint16 init_scan_period; uint16 backoff_multiplier; uint16 max_scan_period; - uint8 channel_usage; - uint8 cu_avg_calc_dur; -} wl_roam_prof_t; + uint8 channel_usage; + uint8 cu_avg_calc_dur; + uint8 pad[2]; +} wl_roam_prof_v2_t; -typedef struct wl_roam_prof_band { - uint32 band; /* Must be just one band */ - uint16 ver; /* version of this struct */ - uint16 len; /* length in bytes of this structure */ - wl_roam_prof_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS]; -} wl_roam_prof_band_t; +typedef struct wl_roam_prof_v1 { + int8 roam_flags; /**< bit flags */ + int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */ + int8 rssi_lower; + int8 roam_delta; + + /* if channel_usage if zero, roam_delta is rssi delta required for new AP */ + /* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */ + int8 rssi_boost_thresh; /**< Min RSSI to qualify for RSSI boost */ + int8 rssi_boost_delta; /**< RSSI boost for AP in the other band */ + uint16 nfscan; /**< number of full scan to start with */ + uint16 fullscan_period; + uint16 init_scan_period; + uint16 backoff_multiplier; + uint16 max_scan_period; +} wl_roam_prof_v1_t; + +typedef struct wl_roam_prof_band_v2 { + uint32 band; /**< Must be just one band */ + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + wl_roam_prof_v2_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS]; +} wl_roam_prof_band_v2_t; + +typedef struct wl_roam_prof_band_v1 { + uint32 band; /**< Must be just one band */ + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + wl_roam_prof_v1_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS]; +} wl_roam_prof_band_v1_t; + +#define BSS_MAXTABLE_SIZE 10 +#define WNM_BSS_SELECT_FACTOR_VERSION 1 +typedef struct wnm_bss_select_factor_params { + uint8 low; + uint8 high; + uint8 factor; + uint8 pad; +} wnm_bss_select_factor_params_t; + +#define WNM_BSS_SELECT_FIXED_SIZE OFFSETOF(wnm_bss_select_factor_cfg_t, params) +typedef struct wnm_bss_select_factor_cfg { + uint8 version; + uint8 band; + uint16 type; + uint16 pad; + uint16 count; + wnm_bss_select_factor_params_t params[1]; +} wnm_bss_select_factor_cfg_t; + +#define WNM_BSS_SELECT_WEIGHT_VERSION 1 +typedef struct wnm_bss_select_weight_cfg { + uint8 version; + uint8 band; + uint16 type; + uint16 weight; /* weightage for each type between 0 to 100 */ +} wnm_bss_select_weight_cfg_t; + +#define WNM_BSS_SELECT_TYPE_RSSI 0 +#define WNM_BSS_SELECT_TYPE_CU 1 + +#define WNM_BSSLOAD_MONITOR_VERSION 1 +typedef struct wnm_bssload_monitor_cfg { + uint8 version; + uint8 band; + uint8 duration; /* duration between 1 to 20sec */ +} wnm_bssload_monitor_cfg_t; + +#define WNM_ROAM_TRIGGER_VERSION 1 +typedef struct wnm_roam_trigger_cfg { + uint8 version; + uint8 band; + uint16 type; + int16 trigger; /* trigger for each type in new roam algorithm */ +} wnm_roam_trigger_cfg_t; /* Data structures for Interface Create/Remove */ #define WL_INTERFACE_CREATE_VER (0) +#define WL_INTERFACE_CREATE_VER_1 1 +#define WL_INTERFACE_CREATE_VER_2 2 +#define WL_INTERFACE_CREATE_VER_3 3 /* * The flags filed of the wl_interface_create is designed to be @@ -7681,10 +11499,23 @@ typedef struct wl_roam_prof_band { * be created is STA or AP. * 0 - Create a STA interface * 1 - Create an AP interface + * NOTE: This Bit 0 is applicable for the WL_INTERFACE_CREATE_VER < 2 */ #define WL_INTERFACE_CREATE_STA (0 << 0) #define WL_INTERFACE_CREATE_AP (1 << 0) +/* + * From revision >= 2 Bit 0 of flags field will not used be for STA or AP interface creation. + * "iftype" field shall be used for identifying the interface type. + */ +typedef enum wl_interface_type { + WL_INTERFACE_TYPE_STA = 0, + WL_INTERFACE_TYPE_AP = 1, + WL_INTERFACE_TYPE_AWDL = 2, + WL_INTERFACE_TYPE_NAN = 3, + WL_INTERFACE_TYPE_MAX +} wl_interface_type_t; + /* * Bit 1 of flags field is used to inform whether MAC is present in the * data structure or not. @@ -7694,21 +11525,200 @@ typedef struct wl_roam_prof_band { #define WL_INTERFACE_MAC_DONT_USE (0 << 1) #define WL_INTERFACE_MAC_USE (1 << 1) +/* + * Bit 2 of flags field is used to inform whether core or wlc index + * is present in the data structure or not. + * 0 - Ignore wlc_index field + * 1 - Use the wlc_index field + */ +#define WL_INTERFACE_WLC_INDEX_DONT_USE (0 << 2) +#define WL_INTERFACE_WLC_INDEX_USE (1 << 2) + +/* + * Bit 3 of flags field is used to create interface on the host requested interface index + * 0 - Ignore if_index field + * 1 - Use the if_index field + */ +#define WL_INTERFACE_IF_INDEX_USE (1 << 3) + +/* + * Bit 4 of flags field is used to assign BSSID + * 0 - Ignore bssid field + * 1 - Use the bssid field + */ +#define WL_INTERFACE_BSSID_INDEX_USE (1 << 4) + typedef struct wl_interface_create { uint16 ver; /* version of this struct */ uint32 flags; /* flags that defines the operation */ struct ether_addr mac_addr; /* Optional Mac address */ } wl_interface_create_t; -typedef struct wl_interface_info { - uint16 ver; /* version of this struct */ - struct ether_addr mac_addr; /* MAC address of the interface */ - char ifname[BCM_MSG_IFNAME_MAX]; /* name of interface */ - uint8 bsscfgidx; /* source bsscfg index */ -} wl_interface_info_t; +typedef struct wl_interface_create_v1 { + uint16 ver; /**< version of this struct */ + uint8 pad1[2]; /**< Padding bytes */ + uint32 flags; /**< flags that defines the operation */ + struct ether_addr mac_addr; /**< Optional Mac address */ + uint8 pad2[2]; /**< Padding bytes */ + uint32 wlc_index; /**< Optional wlc index */ +} wl_interface_create_v1_t; -/* no default structure packing */ -#include +typedef struct wl_interface_create_v2 { + uint16 ver; /**< version of this struct */ + uint8 pad1[2]; /**< Padding bytes */ + uint32 flags; /**< flags that defines the operation */ + struct ether_addr mac_addr; /**< Optional Mac address */ + uint8 iftype; /**< Type of interface created */ + uint8 pad2; /**< Padding bytes */ + uint32 wlc_index; /**< Optional wlc index */ +} wl_interface_create_v2_t; + +typedef struct wl_interface_create_v3 { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length of whole structure including variable length */ + uint16 fixed_len; /**< Fixed length of this structure excluding data[] */ + uint8 iftype; /**< Type of interface created */ + uint8 wlc_index; /**< Optional wlc index */ + uint32 flags; /**< flags that defines the operation */ + struct ether_addr mac_addr; /**< Optional Mac address */ + struct ether_addr bssid; /**< Optional BSSID */ + uint8 if_index; /**< interface index requested by Host */ + uint8 pad[3]; /**< Padding bytes to ensure data[] is at 32 bit aligned */ + uint8 data[]; /**< Optional application/Module specific data */ +} wl_interface_create_v3_t; + +#define WL_INTERFACE_INFO_VER_1 1 +#define WL_INTERFACE_INFO_VER_2 2 + +typedef struct wl_interface_info_v1 { + uint16 ver; /**< version of this struct */ + struct ether_addr mac_addr; /**< MAC address of the interface */ + char ifname[BCM_MSG_IFNAME_MAX]; /**< name of interface */ + uint8 bsscfgidx; /**< source bsscfg index */ + uint8 PAD; +} wl_interface_info_v1_t; + +typedef struct wl_interface_info_v2 { + uint16 ver; /**< version of this struct */ + uint16 length; /**< length of the whole structure */ + struct ether_addr mac_addr; /**< MAC address of the interface */ + uint8 bsscfgidx; /**< source bsscfg index */ + uint8 if_index; /**< Interface index allocated by FW */ + char ifname[BCM_MSG_IFNAME_MAX]; /**< name of interface */ +} wl_interface_info_v2_t; + +#define PHY_RXIQEST_AVERAGING_DELAY 10 + +typedef struct wl_iqest_params { + uint32 rxiq; + uint8 niter; + uint8 delay; + uint8 PAD[2]; +} wl_iqest_params_t; + +typedef struct wl_iqest_sweep_params { + wl_iqest_params_t params; + uint8 nchannels; + uint8 channel[3]; /** variable */ +} wl_iqest_sweep_params_t; + +typedef struct wl_iqest_value { + uint8 channel; + uint8 PAD[3]; + uint32 rxiq; +} wl_iqest_value_t; + +typedef struct wl_iqest_result { + uint8 nvalues; + uint8 PAD[3]; + wl_iqest_value_t value[1]; +} wl_iqest_result_t; + +/* BTCX AIBSS (Oxygen) Status */ +typedef struct wlc_btc_aibss_info { + uint32 prev_tsf_l; // Lower 32 bits of last read of TSF + uint32 prev_tsf_h; // Higher 32 bits of last read of TSF + uint32 last_btinfo; // Last read of BT info + uint32 local_btinfo; // Local BT INFO BitMap + uint8 bt_out_of_sync_cnt; // BT not in sync with strobe + uint8 esco_off_cnt; // Count incremented when ESCO is off + uint8 strobe_enabled; // Set only in AIBSS mode + uint8 strobe_on; // strobe to BT is on for Oxygen + uint8 local_bt_in_sync; // Sync status of local BT when strobe is on + uint8 other_bt_in_sync; // Sync state of BT in other devices in AIBSS + uint8 local_bt_is_master; // Local BT is master + uint8 sco_prot_on; // eSCO Protection on in local device + uint8 other_esco_present; // eSCO status in other devices in AIBSS + uint8 rx_agg_change; // Indicates Rx Agg size needs to change + uint8 rx_agg_modified; // Rx Agg size modified + uint8 acl_grant_set; // ACL grants on for speeding up sync + uint8 write_ie_err_cnt; // BTCX Ie write error cnt + uint8 parse_ie_err_cnt; // BTCX IE parse error cnt + uint8 wci2_fail_cnt; // WCI2 init failure cnt + uint8 strobe_enable_err_cnt; // Strobe enable err cnt + uint8 strobe_init_err_cnt; // Strobe init err cnt + uint8 tsf_jump_cnt; // TSF jump cnt + uint8 acl_grant_cnt; // ALC grant cnt + uint8 pad1; + uint16 ibss_tsf_shm; // SHM address of strobe TSF + uint16 pad2; +} wlc_btc_aibss_info_t; + +#define WLC_BTC_AIBSS_STATUS_VER 1 +#define WLC_BTC_AIBSS_STATUS_LEN (sizeof(wlc_btc_aibss_status_t) - 2 * (sizeof(uint16))) + +typedef struct wlc_btc_aibss_status { + uint16 version; // Version # + uint16 len; // Length of the structure(excluding len & version) + int32 mode; // Current value of btc_mode + uint16 bth_period; // bt coex period. read from shm. + uint16 agg_off_bm; // AGG OFF BM read from SHM + uint8 bth_active; // bt active session + uint8 pad[3]; + wlc_btc_aibss_info_t aibss_info; // Structure definition above +} wlc_btc_aibss_status_t; + +typedef enum { + STATE_NONE = 0, + + /* WLAN -> BT */ + W2B_DATA_SET = 21, + B2W_ACK_SET = 22, + W2B_DATA_CLEAR = 23, + B2W_ACK_CLEAR = 24, + + /* BT -> WLAN */ + B2W_DATA_SET = 31, + W2B_ACK_SET = 32, + B2W_DATA_CLEAR = 33, + W2B_ACK_CLEAR = 34 +} bwte_gci_intstate_t; + +#define WL_BWTE_STATS_VERSION 1 /* version of bwte_stats_t */ +typedef struct { + uint32 version; + + bwte_gci_intstate_t inttobt; + bwte_gci_intstate_t intfrombt; + + uint32 bt2wl_intrcnt; /* bt->wlan interrrupt count */ + uint32 wl2bt_intrcnt; /* wlan->bt interrupt count */ + + uint32 wl2bt_dset_cnt; + uint32 wl2bt_dclear_cnt; + uint32 wl2bt_aset_cnt; + uint32 wl2bt_aclear_cnt; + + uint32 bt2wl_dset_cnt; + uint32 bt2wl_dclear_cnt; + uint32 bt2wl_aset_cnt; + uint32 bt2wl_aclear_cnt; + + uint32 state_error_1; + uint32 state_error_2; + uint32 state_error_3; + uint32 state_error_4; +} bwte_stats_t; #define TBOW_MAX_SSID_LEN 32 #define TBOW_MAX_PASSPHRASE_LEN 63 @@ -7724,6 +11734,8 @@ typedef struct tbow_setup_netinfo { uint8 passphrase_len; uint8 passphrase[TBOW_MAX_PASSPHRASE_LEN]; chanspec_t chanspec; + uint8 PAD[2]; + uint32 channel; } tbow_setup_netinfo_t; typedef enum tbow_ho_opmode { @@ -7738,36 +11750,167 @@ typedef enum tbow_ho_opmode { } tbow_ho_opmode_t; /* Beacon trim feature statistics */ -/* Configuration params */ -#define M_BCNTRIM_N (0) /* Enable/Disable Beacon Trim */ -#define M_BCNTRIM_TIMEND (1) /* Waiting time for TIM IE to end */ -#define M_BCNTRIM_TSFTLRN (2) /* TSF tolerance value (usecs) */ -/* PSM internal use */ -#define M_BCNTRIM_PREVBCNLEN (3) /* Beacon length excluding the TIM IE */ -#define M_BCNTRIM_N_COUNTER (4) /* PSM's local beacon trim counter */ -#define M_BCNTRIM_STATE (5) /* PSM's Beacon trim status register */ -#define M_BCNTRIM_TIMLEN (6) /* TIM IE Length */ -#define M_BCNTRIM_BMPCTL (7) /* Bitmap control word */ -#define M_BCNTRIM_TSF_L (8) /* Lower TSF word */ -#define M_BCNTRIM_TSF_ML (9) /* Lower middle TSF word */ -#define M_BCNTRIM_RSSI (10) /* Partial beacon RSSI */ -#define M_BCNTRIM_CHANNEL (11) /* Partial beacon channel */ -/* Trimming Counters */ -#define M_BCNTRIM_SBCNRXED (12) /* Self-BSSID beacon received */ -#define M_BCNTRIM_CANTRIM (13) /* Num of beacons which can be trimmed */ -#define M_BCNTRIM_TRIMMED (14) /* # beacons which were trimmed */ -#define M_BCNTRIM_BCNLENCNG (15) /* # beacons trimmed due to length change */ -#define M_BCNTRIM_TSFADJ (16) /* # beacons not trimmed due to large TSF delta */ -#define M_BCNTRIM_TIMNOTFOUND (17) /* # beacons not trimmed due to TIM missing */ -#define M_RXTSFTMRVAL_WD0 (18) -#define M_RXTSFTMRVAL_WD1 (19) -#define M_RXTSFTMRVAL_WD2 (20) -#define M_RXTSFTMRVAL_WD3 (21) -#define BCNTRIM_STATS_NUMPARAMS (22) /* 16 bit words */ +/* configuration */ +#define BCNTRIMST_PER 0 /* Number of beacons to trim (0: disable) */ +#define BCNTRIMST_TIMEND 1 /* Number of bytes till TIM IE */ +#define BCNTRIMST_TSFLMT 2 /* TSF tolerance value (usecs) */ +/* internal use */ +#define BCNTRIMST_CUR 3 /* PSM's local beacon trim counter */ +#define BCNTRIMST_PREVLEN 4 /* Beacon length excluding the TIM IE */ +#define BCNTRIMST_TIMLEN 5 /* TIM IE Length */ +#define BCNTRIMST_RSSI 6 /* Partial beacon RSSI */ +#define BCNTRIMST_CHAN 7 /* Partial beacon channel */ +/* debug stat (off by default) */ +#define BCNTRIMST_DUR 8 /* RX duration until beacon trimmed */ +#define BCNTRIMST_RXMBSS 9 /* MYBSSID beacon received */ +#define BCNTRIMST_CANTRIM 10 /* # beacons which were trimmed */ +#define BCNTRIMST_LENCHG 11 /* # beacons not trimmed due to length change */ +#define BCNTRIMST_TSFDRF 12 /* # beacons not trimmed due to large TSF delta */ +#define BCNTRIMST_NOTIM 13 /* # beacons not trimmed due to TIM missing */ +#define BCNTRIMST_NUM 14 + +#define WL_BCNTRIM_STATUS_VERSION_1 1 +typedef struct wl_bcntrim_status_query_v1 { + uint16 version; + uint16 len; /* Total length includes fixed fields */ + uint8 reset; /* reset after reading the stats */ + uint8 pad[3]; /* 4-byte alignment */ +} wl_bcntrim_status_query_v1_t; + +typedef struct wl_bcntrim_status_v1 { + uint16 version; + uint16 len; /* Total length includes fixed fields and variable data[] */ + uint8 curr_slice_id; /* slice index of the interface */ + uint8 applied_cfg; /* applied bcntrim N threshold */ + uint8 pad[2]; /* 4-byte alignment */ + uint32 fw_status; /* Bits representing bcntrim disable reason in FW */ + uint32 total_disable_dur; /* total duration (msec) bcntrim remains + disabled due to FW disable reasons + */ + uint32 data[]; /* variable length data containing stats */ +} wl_bcntrim_status_v1_t; + +#define BCNTRIM_STATS_MAX 10 /* Total stats part of the status data[] */ + +/* Bits for FW status */ +#define WL_BCNTRIM_DISABLE_HOST 0x1 /* Host disabled bcntrim through bcntrim IOVar */ +#define WL_BCNTRIM_DISABLE_PHY_RATE 0x2 /* bcntrim disabled because beacon rx rate is + higher than phy_rate_thresh + */ +#define WL_BCNTRIM_DISABLE_QUIET_IE 0x4 /* bcntrim disable when Quiet IE present */ + +#define WL_BCNTRIM_CFG_VERSION_1 1 +/* Common IOVAR struct */ +typedef struct wl_bcntrim_cfg_v1 { + uint16 version; + uint16 len; /* Total length includes fixed fields and variable data[] */ + uint16 subcmd_id; /* subcommand id */ + uint16 pad; /* pad/reserved */ + uint8 data[]; /* subcommand data; could be empty */ +} wl_bcntrim_cfg_v1_t; + +/* subcommands ids */ +enum { + WL_BCNTRIM_CFG_SUBCMD_PHY_RATE_THRESH = 0, /* PHY rate threshold above + which bcntrim is not applied + */ + WL_BCNTRIM_CFG_SUBCMD_OVERRIDE_DISABLE_MASK = 1, /* Override bcntrim disable reasons */ + WL_BCNTRIM_CFG_SUBCMD_TSF_DRIFT_LIMIT = 2 /* TSF drift limit to consider bcntrim */ +}; + +#define BCNTRIM_MAX_PHY_RATE 48 /* in 500Kbps */ +#define BCNTRIM_MAX_TSF_DRIFT 65535 /* in usec */ +#define WL_BCNTRIM_OVERRIDE_DISABLE_MASK (WL_BCNTRIM_DISABLE_QUIET_IE) + +/* WL_BCNTRIM_CFG_SUBCMD_PHY_RATE_TRESH */ +typedef struct wl_bcntrim_cfg_phy_rate_thresh { + uint32 rate; /* beacon rate (in 500kbps units) */ +} wl_bcntrim_cfg_phy_rate_thresh_t; + +/* WL_BCNTRIM_CFG_SUBCMD_OVERRIDE_DISABLE_MASK */ +typedef struct wl_bcntrim_cfg_override_disable_mask { + uint32 mask; /* bits representing individual disable reason to override */ +} wl_bcntrim_cfg_override_disable_mask_t; + +/* WL_BCNTRIM_CFG_SUBCMD_TSF_DRIFT_LIMIT */ +typedef struct wl_bcntrim_cfg_tsf_drift_limit { + uint16 drift; /* tsf drift limit specified in usec */ + uint8 pad[2]; /* 4-byte alignment */ +} wl_bcntrim_cfg_tsf_drift_limit_t; + + +/* -------------- TX Power Cap --------------- */ #define TXPWRCAP_MAX_NUM_CORES 8 #define TXPWRCAP_MAX_NUM_ANTENNAS (TXPWRCAP_MAX_NUM_CORES * 2) +#define TXPWRCAP_NUM_SUBBANDS 5 + +/* IOVAR txcapconfig enum's */ +#define TXPWRCAPCONFIG_WCI2 0 +#define TXPWRCAPCONFIG_HOST 1 +#define TXPWRCAPCONFIG_WCI2_AND_HOST 2 + +/* IOVAR txcapstate enum's */ +#define TXPWRCAPSTATE_LOW_CAP 0 +#define TXPWRCAPSTATE_HIGH_CAP 1 +#define TXPWRCAPSTATE_HOST_LOW_WCI2_LOW_CAP 0 +#define TXPWRCAPSTATE_HOST_LOW_WCI2_HIGH_CAP 1 +#define TXPWRCAPSTATE_HOST_HIGH_WCI2_LOW_CAP 2 +#define TXPWRCAPSTATE_HOST_HIGH_WCI2_HIGH_CAP 3 + +/* IOVAR txcapconfig and txcapstate structure is shared: SET and GET */ +#define TXPWRCAPCTL_VERSION 2 +typedef struct wl_txpwrcap_ctl { + uint8 version; + uint8 ctl[TXPWRCAP_NUM_SUBBANDS]; +} wl_txpwrcap_ctl_t; + +/* IOVAR txcapdump structure: GET only */ +#define TXPWRCAP_DUMP_VERSION 2 +typedef struct wl_txpwrcap_dump { + uint8 version; + uint8 pad0; + uint8 current_country[2]; + uint32 current_channel; + uint8 config[TXPWRCAP_NUM_SUBBANDS]; + uint8 state[TXPWRCAP_NUM_SUBBANDS]; + uint8 high_cap_state_enabled; + uint8 wci2_cell_status_last; + uint8 download_present; + uint8 num_subbands; + uint8 num_antennas; + uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES]; + uint8 num_cc_groups; + uint8 current_country_cc_group_info_index; + int8 low_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS]; + int8 high_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS]; + uint8 PAD[3]; +} wl_txpwrcap_dump_t; + +typedef struct wl_txpwrcap_dump_v3 { + uint8 version; + uint8 pad0; + uint8 current_country[2]; + uint32 current_channel; + uint8 config[TXPWRCAP_NUM_SUBBANDS]; + uint8 state[TXPWRCAP_NUM_SUBBANDS]; + uint8 high_cap_state_enabled; + uint8 wci2_cell_status_last; + uint8 download_present; + uint8 num_subbands; + uint8 num_antennas; + uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES]; + uint8 num_cc_groups; + uint8 current_country_cc_group_info_index; + uint8 cap_states_per_cc_group; + int8 host_low_wci2_low_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS]; + int8 host_low_wci2_high_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS]; + int8 host_high_wci2_low_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS]; + int8 host_high_wci2_high_cap[TXPWRCAP_MAX_NUM_ANTENNAS*TXPWRCAP_NUM_SUBBANDS]; + uint8 PAD[2]; +} wl_txpwrcap_dump_v3_t; + typedef struct wl_txpwrcap_tbl { uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES]; /* Stores values for valid antennas */ @@ -7775,37 +11918,153 @@ typedef struct wl_txpwrcap_tbl { int8 pwrcap_cell_off[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */ } wl_txpwrcap_tbl_t; -/* -------------- dynamic BTCOEX --------------- */ -/* require strict packing */ -#include +/* ##### Ecounters section ##### */ +#define ECOUNTERS_VERSION_1 1 -#define DCTL_TROWS 2 /* currently practical number of rows */ -#define DCTL_TROWS_MAX 4 /* 2 extra rows RFU */ +/* Input structure for ecounters IOVAR */ +typedef struct ecounters_config_request { + uint16 version; /* config version */ + uint16 set; /* Set where data will go. */ + uint16 size; /* Size of the set. */ + uint16 timeout; /* timeout in seconds. */ + uint16 num_events; /* Number of events to report. */ + uint16 ntypes; /* Number of entries in type array. */ + uint16 type[1]; /* Statistics Types (tags) to retrieve. */ +} ecounters_config_request_t; + +#define ECOUNTERS_EVENTMSGS_VERSION_1 1 +#define ECOUNTERS_TRIGGER_CONFIG_VERSION_1 1 + +#define ECOUNTERS_EVENTMSGS_EXT_MASK_OFFSET \ + OFFSETOF(ecounters_eventmsgs_ext_t, mask[0]) + +#define ECOUNTERS_TRIG_CONFIG_TYPE_OFFSET \ + OFFSETOF(ecounters_trigger_config_t, type[0]) + +typedef struct ecounters_eventmsgs_ext { + uint8 version; + uint8 len; + uint8 mask[1]; +} ecounters_eventmsgs_ext_t; + +typedef struct ecounters_trigger_config { + uint16 version; /* version */ + uint16 set; /* set where data should go */ + uint16 rsvd; /* reserved */ + uint16 pad; /* pad/reserved */ + uint16 ntypes; /* number of types/tags */ + uint16 type[1]; /* list of types */ +} ecounters_trigger_config_t; + +#define ECOUNTERS_TRIGGER_REASON_VERSION_1 1 +/* Triggered due to timer based ecounters */ +#define ECOUNTERS_TRIGGER_REASON_TIMER 0 +/* Triggered due to event based configuration */ +#define ECOUNTERS_TRIGGER_REASON_EVENTS 1 +#define ECOUNTERS_TRIGGER_REASON_MAX 1 + +typedef struct ecounters_trigger_reason { + uint16 version; /* version */ + uint16 trigger_reason; /* trigger reason */ + uint32 sub_reason_code; /* sub reason code */ + uint32 trigger_time_now; /* time in ms at trigger */ + uint32 host_ref_time; /* host ref time */ +} ecounters_trigger_reason_t; + +#define WL_LQM_VERSION_1 1 + +/* For wl_lqm_t flags field */ +#define WL_LQM_CURRENT_BSS_VALID 0x1 +#define WL_LQM_TARGET_BSS_VALID 0x2 + +typedef struct { + struct ether_addr BSSID; + chanspec_t chanspec; + int32 rssi; + int32 snr; +} wl_rx_signal_metric_t; + +typedef struct { + uint8 version; + uint8 flags; + uint16 pad; + int32 noise_level; /* current noise level */ + wl_rx_signal_metric_t current_bss; + wl_rx_signal_metric_t target_bss; +} wl_lqm_t; + +/* ##### Ecounters v2 section ##### */ + +#define ECOUNTERS_VERSION_2 2 + +/* Enumeration of various ecounters request types. This namespace is different from + * global reportable stats namespace. +*/ +enum { + WL_ECOUNTERS_XTLV_REPORT_REQ = 1 +}; + +/* Input structure for ecounters IOVAR */ +typedef struct ecounters_config_request_v2 { + uint16 version; /* config version */ + uint16 len; /* Length of this struct including variable len */ + uint16 logset; /* Set where data will go. */ + uint16 reporting_period; /* reporting_period */ + uint16 num_reports; /* Number of timer expirations to report on */ + uint8 pad[2]; /* Reserved for future use */ + uint8 ecounters_xtlvs[]; /* Statistics Types (tags) to retrieve. */ +} ecounters_config_request_v2_t; + +#define ECOUNTERS_STATS_TYPES_FLAG_SLICE 0x1 +#define ECOUNTERS_STATS_TYPES_FLAG_IFACE 0x2 +#define ECOUNTERS_STATS_TYPES_FLAG_GLOBAL 0x4 + +/* Slice mask bits */ +#define ECOUNTERS_STATS_TYPES_SLICE_MASK_SLICE0 0x1 +#define ECOUNTERS_STATS_TYPES_SLICE_MASK_SLICE1 0x2 + +typedef struct ecounters_stats_types_report_req { + /* flags: bit0 = slice, bit1 = iface, bit2 = global, + * rest reserved + */ + uint16 flags; + uint16 if_index; /* host interface index */ + uint16 slice_mask; /* bit0 = slice0, bit1=slice1, rest reserved */ + uint8 pad[2]; /* padding */ + uint8 stats_types_req[]; /* XTLVs of requested types */ +} ecounters_stats_types_report_req_t; + +/* -------------- dynamic BTCOEX --------------- */ +#define DCTL_TROWS 2 /**< currently practical number of rows */ +#define DCTL_TROWS_MAX 4 /**< 2 extra rows RFU */ /* DYNCTL profile flags */ -#define DCTL_FLAGS_DYNCTL (1 << 0) /* 1 - enabled, 0 - legacy only */ -#define DCTL_FLAGS_DESENSE (1 << 1) /* auto desense is enabled */ -#define DCTL_FLAGS_MSWITCH (1 << 2) /* mode switching is enabled */ +#define DCTL_FLAGS_DISABLED 0 /**< default value: all features disabled */ +#define DCTL_FLAGS_DYNCTL (1 << 0) /**< 1 - enabled, 0 - legacy only */ +#define DCTL_FLAGS_DESENSE (1 << 1) /**< auto desense is enabled */ +#define DCTL_FLAGS_MSWITCH (1 << 2) /**< mode switching is enabled */ +#define DCTL_FLAGS_PWRCTRL (1 << 3) /**< Tx power control is enabled */ /* for now AGG on/off is handled separately */ -#define DCTL_FLAGS_TX_AGG_OFF (1 << 3) /* TBD: allow TX agg Off */ -#define DCTL_FLAGS_RX_AGG_OFF (1 << 4) /* TBD: allow RX agg Off */ +#define DCTL_FLAGS_TX_AGG_OFF (1 << 4) /**< TBD: allow TX agg Off */ +#define DCTL_FLAGS_RX_AGG_OFF (1 << 5) /**< TBD: allow RX agg Off */ /* used for dry run testing only */ -#define DCTL_FLAGS_DRYRUN (1 << 7) /* Eenables dynctl dry run mode */ +#define DCTL_FLAGS_DRYRUN (1 << 7) /**< Enables dynctl dry run mode */ #define IS_DYNCTL_ON(prof) ((prof->flags & DCTL_FLAGS_DYNCTL) != 0) #define IS_DESENSE_ON(prof) ((prof->flags & DCTL_FLAGS_DESENSE) != 0) #define IS_MSWITCH_ON(prof) ((prof->flags & DCTL_FLAGS_MSWITCH) != 0) +#define IS_PWRCTRL_ON(prof) ((prof->flags & DCTL_FLAGS_PWRCTRL) != 0) /* desense level currently in use */ #define DESENSE_OFF 0 #define DFLT_DESENSE_MID 12 #define DFLT_DESENSE_HIGH 2 -/* +/** * dynctl data points(a set of btpwr & wlrssi thresholds) * for mode & desense switching */ typedef struct btc_thr_data { - int8 mode; /* used by desense sw */ - int8 bt_pwr; /* BT tx power threshold */ - int8 bt_rssi; /* BT rssi threshold */ + int8 mode; /**< used by desense sw */ + int8 bt_pwr; /**< BT tx power threshold */ + int8 bt_rssi; /**< BT rssi threshold */ /* wl rssi range when mode or desense change may be needed */ int8 wl_rssi_high; int8 wl_rssi_low; @@ -7813,90 +12072,95 @@ typedef struct btc_thr_data { /* dynctl. profile data structure */ #define DCTL_PROFILE_VER 0x01 +#include typedef BWL_PRE_PACKED_STRUCT struct dctl_prof { - uint8 version; /* dynctl profile version */ + uint8 version; /**< dynctl profile version */ /* dynctl profile flags bit:0 - dynctl On, bit:1 dsns On, bit:2 mode sw On, */ - uint8 flags; /* bit[6:3] reserved, bit7 - Dryrun (sim) - On */ - /* wl desense levels to apply */ + uint8 flags; /**< bit[6:3] reserved, bit7 - Dryrun (sim) - On */ + /** wl desense levels to apply */ uint8 dflt_dsns_level; uint8 low_dsns_level; uint8 mid_dsns_level; uint8 high_dsns_level; - /* mode switching hysteresis in dBm */ + /** mode switching hysteresis in dBm */ int8 msw_btrssi_hyster; - /* default btcoex mode */ + /** default btcoex mode */ uint8 default_btc_mode; - /* num of active rows in mode switching table */ + /** num of active rows in mode switching table */ uint8 msw_rows; - /* num of rows in desense table */ + /** num of rows in desense table */ uint8 dsns_rows; - /* dynctl mode switching data table */ + /** dynctl mode switching data table */ btc_thr_data_t msw_data[DCTL_TROWS_MAX]; - /* dynctl desense switching data table */ + /** dynctl desense switching data table */ btc_thr_data_t dsns_data[DCTL_TROWS_MAX]; } BWL_POST_PACKED_STRUCT dctl_prof_t; +#include -/* dynctl status info */ +/** dynctl status info */ +#include typedef BWL_PRE_PACKED_STRUCT struct dynctl_status { - bool sim_on; /* true if simulation is On */ - uint16 bt_pwr_shm; /* BT per/task power as read from ucode */ - int8 bt_pwr; /* BT pwr extracted & converted to dBm */ - int8 bt_rssi; /* BT rssi in dBm */ - int8 wl_rssi; /* last wl rssi reading used by btcoex */ - uint8 dsns_level; /* current desense level */ - uint8 btc_mode; /* current btcoex mode */ + uint8 sim_on; /**< true if simulation is On */ + uint16 bt_pwr_shm; /**< BT per/task power as read from ucode */ + int8 bt_pwr; /**< BT pwr extracted & converted to dBm */ + int8 bt_rssi; /**< BT rssi in dBm */ + int8 wl_rssi; /**< last wl rssi reading used by btcoex */ + uint8 dsns_level; /**< current desense level */ + uint8 btc_mode; /**< current btcoex mode */ /* add more status items if needed, pad to 4 BB if needed */ } BWL_POST_PACKED_STRUCT dynctl_status_t; +#include -/* dynctl simulation (dryrun data) */ +/** dynctl simulation (dryrun data) */ +#include typedef BWL_PRE_PACKED_STRUCT struct dynctl_sim { - bool sim_on; /* simulation mode on/off */ - int8 btpwr; /* simulated BT power in dBm */ - int8 btrssi; /* simulated BT rssi in dBm */ - int8 wlrssi; /* simulated WL rssi in dBm */ + uint8 sim_on; /**< simulation mode on/off */ + int8 btpwr; /**< simulated BT power in dBm */ + int8 btrssi; /**< simulated BT rssi in dBm */ + int8 wlrssi; /**< simulated WL rssi in dBm */ } BWL_POST_PACKED_STRUCT dynctl_sim_t; /* no default structure packing */ #include -/* PTK key maintained per SCB */ +/** PTK key maintained per SCB */ #define RSN_TEMP_ENCR_KEY_LEN 16 typedef struct wpa_ptk { - uint8 kck[RSN_KCK_LENGTH]; /* EAPOL-Key Key Confirmation Key (KCK) */ - uint8 kek[RSN_KEK_LENGTH]; /* EAPOL-Key Key Encryption Key (KEK) */ - uint8 tk1[RSN_TEMP_ENCR_KEY_LEN]; /* Temporal Key 1 (TK1) */ - uint8 tk2[RSN_TEMP_ENCR_KEY_LEN]; /* Temporal Key 2 (TK2) */ + uint8 kck[RSN_KCK_LENGTH]; /**< EAPOL-Key Key Confirmation Key (KCK) */ + uint8 kek[RSN_KEK_LENGTH]; /**< EAPOL-Key Key Encryption Key (KEK) */ + uint8 tk1[RSN_TEMP_ENCR_KEY_LEN]; /**< Temporal Key 1 (TK1) */ + uint8 tk2[RSN_TEMP_ENCR_KEY_LEN]; /**< Temporal Key 2 (TK2) */ } wpa_ptk_t; -/* GTK key maintained per SCB */ +/** GTK key maintained per SCB */ typedef struct wpa_gtk { uint32 idx; uint32 key_len; uint8 key[DOT11_MAX_KEY_SIZE]; } wpa_gtk_t; -/* FBT Auth Response Data structure */ +/** FBT Auth Response Data structure */ typedef struct wlc_fbt_auth_resp { - uint8 macaddr[ETHER_ADDR_LEN]; /* station mac address */ + uint8 macaddr[ETHER_ADDR_LEN]; /**< station mac address */ uint8 pad[2]; uint8 pmk_r1_name[WPA2_PMKID_LEN]; - wpa_ptk_t ptk; /* pairwise key */ - wpa_gtk_t gtk; /* group key */ + wpa_ptk_t ptk; /**< pairwise key */ + wpa_gtk_t gtk; /**< group key */ uint32 ie_len; - uint8 status; /* Status of parsing FBT authentication + uint8 status; /**< Status of parsing FBT authentication Request in application */ - uint8 ies[1]; /* IEs contains MDIE, RSNIE, + uint8 ies[1]; /**< IEs contains MDIE, RSNIE, FBTIE (ANonce, SNonce,R0KH-ID, R1KH-ID) */ } wlc_fbt_auth_resp_t; -/* FBT Action Response frame */ +/** FBT Action Response frame */ typedef struct wlc_fbt_action_resp { - uint16 version; /* structure version */ - uint16 length; /* length of structure */ - uint8 macaddr[ETHER_ADDR_LEN]; /* station mac address */ - uint8 data_len; /* len of ie from Category */ - uint8 data[1]; /* data contains category, action, sta address, target ap, + uint16 version; /**< structure version */ + uint16 length; /**< length of structure */ + uint8 macaddr[ETHER_ADDR_LEN]; /**< station mac address */ + uint8 data_len; /**< len of ie from Category */ + uint8 data[1]; /**< data contains category, action, sta address, target ap, status code,fbt response frame body */ } wlc_fbt_action_resp_t; @@ -7907,73 +12171,215 @@ typedef struct wlc_fbt_action_resp { typedef struct _wl_macdbg_pmac_param_t { char type[MACDBG_PMAC_OBJ_TYPE_LEN]; uint8 step; - uint8 num; + uint8 w_en; + uint16 num; uint32 bitmap; - bool addr_raw; + uint8 addr_raw; uint8 addr_num; uint16 addr[MACDBG_PMAC_ADDR_INPUT_MAXNUM]; + uint8 pad0[2]; + uint32 w_val; } wl_macdbg_pmac_param_t; -/* IOVAR 'svmp_mem' parameter. Used to read/clear svmp memory */ +/** IOVAR 'svmp_sampcol' parameter. Used to set and read SVMP_SAMPLE_COLLECT's setting */ +typedef struct wl_svmp_sampcol_param { + uint32 version; /* version */ + uint8 enable; + uint8 trigger_mode; /* SVMP_SAMPCOL_TRIGGER */ + uint8 trigger_mode_s[2]; /* SVMP_SAMPCOL_PKTPROC */ + uint8 data_samplerate; /* SVMP_SAMPCOL_SAMPLERATE */ + uint8 data_sel_phy1; /* SVMP_SAMPCOL_PHY1MUX */ + uint8 data_sel_rx1; /* SVMP_SAMPCOL_RX1MUX without iqCompOut */ + uint8 data_sel_dualcap; /* SVMP_SAMPCOL_RX1MUX */ + uint8 pack_mode; /* SVMP_SAMPCOL_PACK */ + uint8 pack_order; + uint8 pack_cfix_fmt; + uint8 pack_1core_sel; + uint16 waitcnt; + uint16 caplen; + uint32 buff_addr_start; /* in word-size (2-bytes) */ + uint32 buff_addr_end; /* note: Tcl in byte-size, HW in vector-size (8-bytes) */ + uint8 int2vasip; + uint8 PAD; + uint16 status; +} wl_svmp_sampcol_t; + +#define WL_SVMP_SAMPCOL_PARAMS_VERSION 1 + +enum { + SVMP_SAMPCOL_TRIGGER_PKTPROC_TRANSITION = 0, + SVMP_SAMPCOL_TRIGGER_FORCE_IMMEDIATE, + SVMP_SAMPCOL_TRIGGER_RADAR_DET +}; + +enum { + SVMP_SAMPCOL_PHY1MUX_GPIOOUT = 0, + SVMP_SAMPCOL_PHY1MUX_FFT, + SVMP_SAMPCOL_PHY1MUX_DBGHX, + SVMP_SAMPCOL_PHY1MUX_RX1MUX +}; + +enum { + SVMP_SAMPCOL_RX1MUX_FARROWOUT = 4, + SVMP_SAMPCOL_RX1MUX_IQCOMPOUT, + SVMP_SAMPCOL_RX1MUX_DCFILTEROUT, + SVMP_SAMPCOL_RX1MUX_RXFILTEROUT, + SVMP_SAMPCOL_RX1MUX_ACIFILTEROUT +}; + +enum { + SVMP_SAMPCOL_SAMPLERATE_1XBW = 0, + SVMP_SAMPCOL_SAMPLERATE_2XBW +}; + +enum { + SVMP_SAMPCOL_PACK_DUALCAP = 0, + SVMP_SAMPCOL_PACK_4CORE, + SVMP_SAMPCOL_PACK_2CORE, + SVMP_SAMPCOL_PACK_1CORE +}; + +enum { + SVMP_SAMPCOL_PKTPROC_RESET = 0, + SVMP_SAMPCOL_PKTPROC_CARRIER_SEARCH, + SVMP_SAMPCOL_PKTPROC_WAIT_FOR_NB_PWR, + SVMP_SAMPCOL_PKTPROC_WAIT_FOR_W1_PWR, + SVMP_SAMPCOL_PKTPROC_WAIT_FOR_W2_PWR, + SVMP_SAMPCOL_PKTPROC_OFDM_PHY, + SVMP_SAMPCOL_PKTPROC_TIMING_SEARCH, + SVMP_SAMPCOL_PKTPROC_CHAN_EST_1, + SVMP_SAMPCOL_PKTPROC_LEG_SIG_DEC, + SVMP_SAMPCOL_PKTPROC_SIG_DECODE_1, + SVMP_SAMPCOL_PKTPROC_SIG_DECODE_2, + SVMP_SAMPCOL_PKTPROC_HT_AGC, + SVMP_SAMPCOL_PKTPROC_CHAN_EST_2, + SVMP_SAMPCOL_PKTPROC_PAY_DECODE, + SVMP_SAMPCOL_PKTPROC_DSSS_CCK_PHY, + SVMP_SAMPCOL_PKTPROC_WAIT_ENERGY_DROP, + SVMP_SAMPCOL_PKTPROC_WAIT_NCLKS, + SVMP_SAMPCOL_PKTPROC_PAY_DEC_EXT, + SVMP_SAMPCOL_PKTPROC_SIG_FAIL_DELAY, + SVMP_SAMPCOL_PKTPROC_RIFS_SEARCH, + SVMP_SAMPCOL_PKTPROC_BOARD_SWITCH_DIV_SEARCH, + SVMP_SAMPCOL_PKTPROC_DSSS_CCK_BOARD_SWITCH_DIV_SEARCH, + SVMP_SAMPCOL_PKTPROC_CHAN_EST_3, + SVMP_SAMPCOL_PKTPROC_CHAN_EST_4, + SVMP_SAMPCOL_PKTPROC_FINE_TIMING_SEARCH, + SVMP_SAMPCOL_PKTPROC_SET_CLIP_GAIN, + SVMP_SAMPCOL_PKTPROC_NAP, + SVMP_SAMPCOL_PKTPROC_VHT_SIGA_DEC, + SVMP_SAMPCOL_PKTPROC_VHT_SIGB_DEC, + SVMP_SAMPCOL_PKTPROC_PKT_ABORT, + SVMP_SAMPCOL_PKTPROC_DCCAL +}; + +/** IOVAR 'svmp_mem' parameter. Used to read/clear svmp memory */ typedef struct svmp_mem { - uint32 addr; /* offset to read svmp memory from vasip base address */ - uint16 len; /* length in count of uint16's */ - uint16 val; /* set the range of addr/len with a value */ + uint32 addr; /**< offset to read svmp memory from vasip base address */ + uint16 len; /**< length in count of uint16's */ + uint16 val; /**< set the range of addr/len with a value */ } svmp_mem_t; +/** IOVAR 'mu_rate' parameter. read/set mu rate for upto four users */ +#define MU_RATE_CFG_VERSION 1 +typedef struct mu_rate { + uint16 version; /**< version of the structure as defined by MU_RATE_CFG_VERSION */ + uint16 length; /**< length of entire structure */ + uint8 auto_rate; /**< enable/disable auto rate */ + uint8 PAD; + uint16 rate_user[4]; /**< rate per each of four users, set to -1 for no change */ +} mu_rate_t; + +/** IOVAR 'mu_group' parameter. Used to set and read MU group recommendation setting */ +#define WL_MU_GROUP_AUTO_COMMAND -1 +#define WL_MU_GROUP_PARAMS_VERSION 3 +#define WL_MU_GROUP_METHOD_NAMELEN 64 +#define WL_MU_GROUP_NGROUP_MAX 15 +#define WL_MU_GROUP_NUSER_MAX 4 +#define WL_MU_GROUP_METHOD_MIN 0 +#define WL_MU_GROUP_NUMBER_AUTO_MIN 1 +#define WL_MU_GROUP_NUMBER_AUTO_MAX 15 +#define WL_MU_GROUP_NUMBER_FORCED_MAX 8 +#define WL_MU_GROUP_METHOD_OLD 0 +#define WL_MU_GROUP_MODE_AUTO 0 +#define WL_MU_GROUP_MODE_FORCED 1 +#define WL_MU_GROUP_FORCED_1GROUP 1 +#define WL_MU_GROUP_ENTRY_EMPTY -1 +typedef struct mu_group { + uint32 version; /* version */ + int16 forced; /* forced group recommendation */ + int16 forced_group_mcs; /* forced group with mcs */ + int16 forced_group_num; /* forced group number */ + int16 group_option[WL_MU_GROUP_NGROUP_MAX][WL_MU_GROUP_NUSER_MAX]; + /* set mode for forced grouping and read mode for auto grouping */ + int16 group_GID[WL_MU_GROUP_NGROUP_MAX]; + int16 group_method; /* methof for VASIP group recommendation */ + int16 group_number; /* requested number for VASIP group recommendation */ + int16 auto_group_num; /* exact number from VASIP group recommendation */ + int8 group_method_name[WL_MU_GROUP_METHOD_NAMELEN]; + uint8 PAD[2]; +} mu_group_t; + +typedef struct mupkteng_sta { + struct ether_addr ea; + uint8 PAD[2]; + int32 nrxchain; + int32 idx; +} mupkteng_sta_t; + +typedef struct mupkteng_client { + int32 rspec; + int32 idx; + int32 flen; + int32 nframes; +} mupkteng_client_t; + +typedef struct mupkteng_tx { + mupkteng_client_t client[8]; + int32 nclients; + int32 ntx; +} mupkteng_tx_t; + +/* + * MU Packet engine interface. + * The following two definitions will go into + * wlioctl_defs.h + * when wl utility changes are merged to EAGLE TOB & Trunk + */ + +#define WL_MUPKTENG_PER_TX_START 0x10 +#define WL_MUPKTENG_PER_TX_STOP 0x20 + +/** IOVAR 'mu_policy' parameter. Used to configure MU admission control policies */ +#define WL_MU_POLICY_PARAMS_VERSION 1 +#define WL_MU_POLICY_SCHED_DEFAULT 60 +#define WL_MU_POLICY_DISABLED 0 +#define WL_MU_POLICY_ENABLED 1 +#define WL_MU_POLICY_NRX_MIN 1 +#define WL_MU_POLICY_NRX_MAX 2 +typedef struct mu_policy { + uint16 version; + uint16 length; + uint32 sched_timer; + uint32 pfmon; + uint32 pfmon_gpos; + uint32 samebw; + uint32 nrx; + uint32 max_muclients; +} mu_policy_t; + #define WL_NAN_BAND_STR_SIZE 5 /* sizeof ("auto") */ -/* Definitions of different NAN Bands */ -enum { /* mode selection for reading/writing tx iqlo cal coefficients */ - NAN_BAND_AUTO, - NAN_BAND_B, - NAN_BAND_A, - NAN_BAND_INVALID = 0xFF +/** Definitions of different NAN Bands */ +/* do not change the order */ +enum { + NAN_BAND_B = 0, + NAN_BAND_A, + NAN_BAND_AUTO, + NAN_BAND_INVALID = 0xFF }; -#if defined(WL_LINKSTAT) -typedef struct { - uint32 preamble; - uint32 nss; - uint32 bw; - uint32 rateMcsIdx; - uint32 reserved; - uint32 bitrate; -} wifi_rate; - -typedef struct { - uint16 version; - uint16 length; - uint32 tx_mpdu; - uint32 rx_mpdu; - uint32 mpdu_lost; - uint32 retries; - uint32 retries_short; - uint32 retries_long; - wifi_rate rate; -} wifi_rate_stat_t; - -typedef int32 wifi_radio; - -typedef struct { - uint16 version; - uint16 length; - wifi_radio radio; - uint32 on_time; - uint32 tx_time; - uint32 rx_time; - uint32 on_time_scan; - uint32 on_time_nbd; - uint32 on_time_gscan; - uint32 on_time_roam_scan; - uint32 on_time_pno_scan; - uint32 on_time_hs20; - uint32 num_channels; - uint8 channels[1]; -} wifi_radio_stat; -#endif /* WL_LINKSTAT */ - -#ifdef WL11ULB +/* ifdef WL11ULB */ /* ULB Mode configured via "ulb_mode" IOVAR */ enum { ULB_MODE_DISABLED = 0, @@ -7996,32 +12402,192 @@ typedef enum { /* Add all other enums before this */ MAX_SUPP_ULB_BW } ulb_bw_type_t; -#endif /* WL11ULB */ +/* endif WL11ULB */ + + +#define WL_MESH_IOCTL_VERSION 1 +#define MESH_IOC_BUFSZ 512 /* sufficient ioc buff size for mesh */ + +#ifdef WLMESH +typedef struct mesh_peer_info_ext { + mesh_peer_info_t peer_info; + uint8 pad1; + uint16 local_aid; /* AID generated by *local* to peer */ + uint32 entry_state; /* see MESH_PEER_ENTRY_STATE_ACTIVE etc; valid + * ONLY for internal peering requests + */ + int8 rssi; + uint8 pad2; + struct ether_addr ea; /* peer ea */ +} mesh_peer_info_ext_t; + +/* #ifdef WLMESH */ +typedef struct mesh_peer_info_dump { + uint32 buflen; + uint32 version; + uint16 count; /* number of results */ + uint16 remaining; /* remaining rsults */ + mesh_peer_info_ext_t mpi_ext[1]; +} mesh_peer_info_dump_t; +#define WL_MESH_PEER_RES_FIXED_SIZE (sizeof(mesh_peer_info_dump_t) - sizeof(mesh_peer_info_ext_t)) + +#endif /* WLMESH */ +/* container for mesh iovtls & events */ +typedef struct wl_mesh_ioc { + uint16 version; /* interface command or event version */ + uint16 id; /* mesh ioctl cmd ID */ + uint16 len; /* total length of all tlv records in data[] */ + uint16 pad; /* pad to be 32 bit aligment */ + uint8 data[]; /* var len payload of bcm_xtlv_t type */ +} wl_mesh_ioc_t; + +enum wl_mesh_cmds { + WL_MESH_CMD_ENABLE = 1, + WL_MESH_CMD_JOIN = 2, + WL_MESH_CMD_PEER_STATUS = 3, + WL_MESH_CMD_ADD_ROUTE = 4, + WL_MESH_CMD_DEL_ROUTE = 5, + WL_MESH_CMD_ADD_FILTER = 6, + WL_MESH_CMD_ENAB_AL_METRIC = 7 +}; + +enum wl_mesh_cmd_xtlv_id { + WL_MESH_XTLV_ENABLE = 1, + WL_MESH_XTLV_JOIN = 2, + WL_MESH_XTLV_STATUS = 3, + WL_MESH_XTLV_ADD_ROUTE = 4, + WL_MESH_XTLV_DEL_ROUTE = 5, + WL_MESH_XTLV_ADD_FILTER = 6, + WL_MESH_XTLV_ENAB_AIRLINK = 7 +}; +/* endif WLMESH */ + +/* Fast BSS Transition parameter configuration */ +#define FBT_PARAM_CURRENT_VERSION 0 + +typedef struct _wl_fbt_params { + uint16 version; /* version of the structure + * as defined by FBT_PARAM_CURRENT_VERSION + */ + uint16 length; /* length of the entire structure */ + + uint16 param_type; /* type of parameter defined below */ + uint16 param_len; /* length of the param_value */ + uint8 param_value[1]; /* variable length */ +} wl_fbt_params_t; + +#define WL_FBT_PARAM_TYPE_RSNIE 0 +#define WL_FBT_PARAM_TYPE_FTIE 0x1 +#define WL_FBT_PARAM_TYPE_SNONCE 0x2 +#define WL_FBT_PARAM_TYPE_MDE 0x3 +#define WL_FBT_PARAM_TYPE_PMK_R0_NAME 0x4 +#define WL_FBT_PARAM_TYPE_R0_KHID 0x5 +#define WL_FBT_PARAM_TYPE_R1_KHID 0x6 +#define WL_FBT_PARAM_TYPE_FIRST_INVALID 0x7 + +/* Assoc Mgr commands for fine control of assoc */ +#define WL_ASSOC_MGR_CURRENT_VERSION 0x0 + +typedef struct { + uint16 version; /* version of the structure as + * defined by WL_ASSOC_MGR_CURRENT_VERSION + */ + uint16 length; /* length of the entire structure */ + + uint16 cmd; + uint16 params; +} wl_assoc_mgr_cmd_t; + +#define WL_ASSOC_MGR_CMD_PAUSE_ON_EVT 0 /* have assoc pause on certain events */ +#define WL_ASSOC_MGR_CMD_ABORT_ASSOC 1 + +#define WL_ASSOC_MGR_PARAMS_EVENT_NONE 0 /* use this to resume as well as clear */ +#define WL_ASSOC_MGR_PARAMS_PAUSE_EVENT_AUTH_RESP 1 + +#define WL_WINVER_STRUCT_VER_1 (1) + +typedef struct wl_winver { + + /* Version and length of this structure. Length includes all fields in wl_winver_t */ + uint16 struct_version; + uint16 struct_length; + + /* Windows operating system version info (Microsoft provided) */ + struct { + uint32 major_ver; + uint32 minor_ver; + uint32 build; + } os_runtime; + + /* NDIS runtime version (Microsoft provided) */ + struct { + uint16 major_ver; + uint16 minor_ver; + } ndis_runtime; + + /* NDIS Driver version (Broadcom provided) */ + struct { + uint16 major_ver; + uint16 minor_ver; + } ndis_driver; + + /* WDI Upper Edge (UE) Driver version (Microsoft provided) */ + struct { + uint8 major_ver; + uint8 minor_ver; + uint8 suffix; + } wdi_ue; + + /* WDI Lower Edge (LE) Driver version (Broadcom provided) */ + struct { + uint8 major_ver; + uint8 minor_ver; + uint8 suffix; + } wdi_le; + uint8 PAD[2]; +} wl_winver_t; + +/* defined(WLRCC) || defined(ROAM_CHANNEL_CACHE) */ +#define MAX_ROAM_CHANNEL 20 +typedef struct { + int32 n; + chanspec_t channels[MAX_ROAM_CHANNEL]; +} wl_roam_channel_list_t; +/* endif RCC || ROAM_CHANNEL_CACHE */ -#ifdef MFP /* values for IOV_MFP arg */ enum { WL_MFP_NONE = 0, WL_MFP_CAPABLE, WL_MFP_REQUIRED }; -#endif /* MFP */ - -#if defined(WLRCC) -#define MAX_ROAM_CHANNEL 20 - -typedef struct { - int n; - chanspec_t channels[MAX_ROAM_CHANNEL]; -} wl_roam_channel_list_t; -#endif +typedef enum { + CHANSW_UNKNOWN = 0, /* channel switch due to unknown reason */ + CHANSW_SCAN = 1, /* channel switch due to scan */ + CHANSW_PHYCAL = 2, /* channel switch due to phy calibration */ + CHANSW_INIT = 3, /* channel set at WLC up time */ + CHANSW_ASSOC = 4, /* channel switch due to association */ + CHANSW_ROAM = 5, /* channel switch due to roam */ + CHANSW_MCHAN = 6, /* channel switch triggered by mchan module */ + CHANSW_IOVAR = 7, /* channel switch due to IOVAR */ + CHANSW_CSA_DFS = 8, /* channel switch due to chan switch announcement from AP */ + CHANSW_APCS = 9, /* Channel switch from AP channel select module */ + CHANSW_AWDL = 10, /* channel switch due to AWDL */ + CHANSW_FBT = 11, /* Channel switch from FBT module for action frame response */ + CHANSW_UPDBW = 12, /* channel switch at update bandwidth */ + CHANSW_ULB = 13, /* channel switch at ULB */ + CHANSW_LAST = 14 /* last channel switch reason */ +} chansw_reason_t; /* - * Neighbor Discover Offload: enable NDO feature - * Called by ipv6 event handler when interface comes up - * Set RA rate limit interval value(%) + * WOWL unassociated mode power svae pattern. */ +typedef struct wowl_radio_duty_cycle { + uint16 wake_interval; + uint16 sleep_interval; +} wowl_radio_duty_cycle_t; + typedef struct nd_ra_ol_limits { uint16 version; /* version of the iovar buffer */ uint16 type; /* type of data provided */ @@ -8050,25 +12616,1203 @@ typedef struct nd_ra_ol_limits { #define ND_RA_OL_LIMITS_REL_TYPE_LEN 12 #define ND_RA_OL_LIMITS_FIXED_TYPE_LEN 10 -#define ND_RA_OL_SET "SET" -#define ND_RA_OL_GET "GET" -#define ND_PARAM_SIZE 50 -#define ND_VALUE_SIZE 5 -#define ND_PARAMS_DELIMETER " " -#define ND_PARAM_VALUE_DELLIMETER '=' -#define ND_LIMIT_STR_FMT ("%50s %50s") - -#define ND_RA_TYPE "TYPE" -#define ND_RA_MIN_TIME "MIN" -#define ND_RA_PER "PER" -#define ND_RA_HOLD "HOLD" - /* * Temperature Throttling control mode */ typedef struct wl_temp_control { - bool enable; + uint8 enable; + uint8 PAD; uint16 control_bit; } wl_temp_control_t; +/* SensorHub Interworking mode */ + +#define SHUB_CONTROL_VERSION 1 +#define SHUB_CONTROL_LEN 12 + +typedef struct { + uint16 verison; + uint16 length; + uint16 cmd; + uint16 op_mode; + uint16 interval; + uint16 enable; +} shub_control_t; + +/* WLC_MAJOR_VER <= 5 */ +/* Data structures for non-TLV format */ + +/* Data structures for rsdb caps */ +/* + * The flags field of the rsdb_caps_response is designed to be + * a Bit Mask. As of now only Bit 0 is used as mentioned below. + */ + +/* Bit-0 in flags is used to indicate if the cores can operate synchronously +* i.e either as 2x2 MIMO or 2(1x1 SISO). This is true only for 4349 variants +* 0 - device can operate only in rsdb mode (eg: 4364) +* 1 - device can operate in both rsdb and mimo (eg : 4359 variants) +*/ + +#define WL_RSDB_CAPS_VER 2 +#define SYNCHRONOUS_OPERATION_TRUE (1 << 0) +#define WL_RSDB_CAPS_FIXED_LEN OFFSETOF(rsdb_caps_response_t, num_chains) + +typedef struct rsdb_caps_response { + uint8 ver; /* Version */ + uint8 len; /* length of this structure excluding ver and len */ + uint8 rsdb; /* TRUE for rsdb chip */ + uint8 num_of_cores; /* no of d11 cores */ + uint16 flags; /* Flags to indicate various capabilities */ + uint8 num_chains[1]; /* Tx/Rx chains for each core */ +} rsdb_caps_response_t; + +/* Data structures for rsdb bands */ + +#define WL_RSDB_BANDS_VER 2 +#define WL_RSDB_BANDS_FIXED_LEN OFFSETOF(rsdb_bands_t, band) + +typedef struct rsdb_bands +{ + uint8 ver; + uint8 len; + uint16 num_cores; /* num of D11 cores */ + int16 band[1]; /* The band operating on each of the d11 cores */ +} rsdb_bands_t; + +/* rsdb config */ + +#define WL_RSDB_CONFIG_VER 3 +#define ALLOW_SIB_PARALLEL_SCAN (1 << 0) +#define MAX_BANDS 2 + +#define WL_RSDB_CONFIG_LEN sizeof(rsdb_config_t) + + +typedef uint8 rsdb_opmode_t; +typedef uint32 rsdb_flags_t; + +typedef enum rsdb_modes { + WLC_SDB_MODE_NOSDB_MAIN = 1, /* 2X2 or MIMO mode (applicable only for 4355) */ + WLC_SDB_MODE_NOSDB_AUX = 2, + WLC_SDB_MODE_SDB_MAIN = 3, /* This is RSDB mode(default) applicable only for 4364 */ + WLC_SDB_MODE_SDB_AUX = 4, + WLC_SDB_MODE_SDB_AUTO = 5, /* Same as WLC_RSDB_MODE_RSDB(1+1) mode above */ +} rsdb_modes_t; + +typedef struct rsdb_config { + uint8 ver; + uint8 len; + uint16 reserved; + rsdb_opmode_t non_infra_mode; + rsdb_opmode_t infra_mode[MAX_BANDS]; + rsdb_flags_t flags[MAX_BANDS]; + rsdb_opmode_t current_mode; /* Valid only in GET, returns the current mode */ + uint8 pad[3]; +} rsdb_config_t; + +/* WLC_MAJOR_VER > =5 */ +/* TLV definitions and data structures for rsdb subcmds */ + +enum wl_rsdb_cmd_ids { + /* RSDB ioctls */ + WL_RSDB_CMD_VER = 0, + WL_RSDB_CMD_CAPS = 1, + WL_RSDB_CMD_BANDS = 2, + WL_RSDB_CMD_CONFIG = 3, + /* Add before this !! */ + WL_RSDB_CMD_LAST +}; +#define WL_RSDB_IOV_VERSION 0x1 + +typedef struct rsdb_caps_response_v1 { + uint8 rsdb; /* TRUE for rsdb chip */ + uint8 num_of_cores; /* no of d11 cores */ + uint16 flags; /* Flags to indicate various capabilities */ + uint8 num_chains[MAX_NUM_D11CORES]; /* Tx/Rx chains for each core */ + uint8 band_cap[MAX_NUM_D11CORES]; /* band cap bitmask per slice */ +} rsdb_caps_response_v1_t; + +typedef struct rsdb_bands_v1 +{ + uint8 num_cores; /* num of D11 cores */ + uint8 pad; /* padding bytes for 4 byte alignment */ + int8 band[MAX_NUM_D11CORES]; /* The band operating on each of the d11 cores */ +} rsdb_bands_v1_t; + +typedef struct rsdb_config_xtlv { + rsdb_opmode_t reserved1; /* Non_infra mode is no more applicable */ + rsdb_opmode_t infra_mode[MAX_BANDS]; /* Target mode for Infra association */ + uint8 pad; /* pad bytes for 4 byte alignment */ + rsdb_flags_t flags[MAX_BANDS]; + rsdb_opmode_t current_mode; /* GET only; has current mode of operation */ + uint8 pad1[3]; +} rsdb_config_xtlv_t; + +/* Definitions for slot_bss chanseq iovar */ +#define WL_SLOT_BSS_VERSION 1 + +enum wl_slotted_bss_cmd_id { + WL_SLOTTED_BSS_CMD_VER = 0, + WL_SLOTTED_BSS_CMD_CHANSEQ = 1 +}; +typedef uint16 chan_seq_type_t; +enum chan_seq_type { + CHAN_SEQ_TYPE_AWDL = 1, + CHAN_SEQ_TYPE_SLICE = 2, + CHAN_SEQ_TYPE_NAN = 3 +}; +typedef uint8 sched_flag_t; +enum sched_flag { + NO_SDB_SCHED = 0x1, + SDB_TDM_SCHED = 0x2, + SDB_SPLIT_BAND_SCHED = 0x4, /* default mode for 4357 */ + MAIN_ONLY = 0x8, + AUX_ONLY = 0x10, + SDB_DUAL_TIME = (MAIN_ONLY | AUX_ONLY), + NO_SDB_MAIN_ONLY = (NO_SDB_SCHED | MAIN_ONLY), /* default mode for 4364 */ + SDB_TDM_SCHED_MAIN = (SDB_TDM_SCHED | MAIN_ONLY), + SDB_TDM_SCHED_AUX = (SDB_TDM_SCHED | AUX_ONLY), + SDB_TDM_SCHED_DUAL_TIME = (SDB_TDM_SCHED | SDB_DUAL_TIME), + SDB_SPLIT_BAND_SCHED_DUAL_TIME = (SDB_SPLIT_BAND_SCHED | SDB_DUAL_TIME) +}; + +typedef struct chan_seq_tlv_data { + uint32 flags; + uint8 data[1]; +} chan_seq_tlv_data_t; + +typedef struct chan_seq_tlv { + chan_seq_type_t type; + uint16 len; + chan_seq_tlv_data_t chanseq_data[1]; +} chan_seq_tlv_t; + +typedef struct sb_channel_sequence { + sched_flag_t sched_flags; /* (sdb-tdm or sdb-sb or Dual-Time) */ + uint8 num_seq; /* number of chan_seq_tlv following */ + uint16 pad; + chan_seq_tlv_t seq[1]; +} sb_channel_sequence_t; + +typedef struct slice_chan_seq { + uint8 slice_index; /* 0(Main) or 1 (Aux) */ + uint8 num_chanspecs; + uint16 pad; + chanspec_t chanspecs[1]; +} slice_chan_seq_t; + +#define WL_SLICE_CHAN_SEQ_FIXED_LEN OFFSETOF(slice_chan_seq_t, chanspecs) + +typedef struct sim_pm_params { + uint32 enabled; + uint16 cycle; + uint16 up; +} sim_pm_params_t; + +/* Bits for fw_status */ +#define NAP_DISABLED_HOST 0x01 /* Host has disabled through nap_enable */ +#define NAP_DISABLED_RSSI 0x02 /* Disabled because of nap_rssi_threshold */ + +/* Bits for hw_status */ +#define NAP_HWCFG 0x01 /* State of NAP config bit in phy HW */ + +/* ifdef WL_NATOE */ +#define WL_NATOE_IOCTL_VERSION 1 +#define WL_NATOE_IOC_BUFSZ 512 /* sufficient ioc buff size for natoe */ +#define WL_NATOE_DBG_STATS_BUFSZ 2048 + +/* config natoe STA and AP IP's structure */ +typedef struct { + uint32 sta_ip; + uint32 sta_netmask; + uint32 sta_router_ip; + uint32 sta_dnsip; + uint32 ap_ip; + uint32 ap_netmask; +} wl_natoe_config_ips_t; + +/* natoe ports config structure */ +typedef struct { + uint16 start_port_num; + uint16 no_of_ports; +} wl_natoe_ports_config_t; + +/* natoe ports exception info */ +typedef struct { + uint16 sta_port_num; + uint16 dst_port_num; /* for SIP type protocol, dst_port_num info can be ignored by FW */ + uint32 ip; /* for SIP ip is APcli_ip and for port clash it is dst_ip */ + uint8 entry_type; /* Create/Destroy */ + uint8 pad[3]; +} wl_natoe_exception_port_t; + +/* container for natoe ioctls & events */ +typedef struct wl_natoe_ioc { + uint16 version; /* interface command or event version */ + uint16 id; /* natoe ioctl cmd ID */ + uint16 len; /* total length of all tlv records in data[] */ + uint16 pad; /* pad to be 32 bit aligment */ + uint8 data[]; /* var len payload of bcm_xtlv_t type */ +} wl_natoe_ioc_t; + +enum wl_natoe_cmds { + WL_NATOE_CMD_ENABLE = 1, + WL_NATOE_CMD_CONFIG_IPS = 2, + WL_NATOE_CMD_CONFIG_PORTS = 3, + WL_NATOE_CMD_DBG_STATS = 4, + WL_NATOE_CMD_EXCEPTION_PORT = 5, + WL_NATOE_CMD_SKIP_PORT = 6, + WL_NATOE_CMD_TBL_CNT = 7 +}; + +enum wl_natoe_cmd_xtlv_id { + WL_NATOE_XTLV_ENABLE = 1, + WL_NATOE_XTLV_CONFIG_IPS = 2, + WL_NATOE_XTLV_CONFIG_PORTS = 3, + WL_NATOE_XTLV_DBG_STATS = 4, + WL_NATOE_XTLV_EXCEPTION_PORT = 5, + WL_NATOE_XTLV_SKIP_PORT = 6, + WL_NATOE_XTLV_TBL_CNT = 7 +}; + +/* endif WL_NATOE */ + +enum wl_idauth_cmd_ids { + WL_IDAUTH_CMD_CONFIG = 1, + WL_IDAUTH_CMD_PEER_INFO = 2, + WL_IDAUTH_CMD_COUNTERS = 3, + WL_IDAUTH_CMD_LAST +}; +enum wl_idauth_xtlv_id { + WL_IDAUTH_XTLV_AUTH_ENAB = 0x1, + WL_IDAUTH_XTLV_GTK_ROTATION = 0x2, + WL_IDAUTH_XTLV_EAPOL_COUNT = 0x3, + WL_IDAUTH_XTLV_EAPOL_INTRVL = 0x4, + WL_IDAUTH_XTLV_BLKLIST_COUNT = 0x5, + WL_IDAUTH_XTLV_BLKLIST_AGE = 0x6, + WL_IDAUTH_XTLV_PEERS_INFO = 0x7, + WL_IDAUTH_XTLV_COUNTERS = 0x8 +}; +enum wl_idauth_stats { + WL_AUTH_PEER_STATE_AUTHORISED = 0x01, + WL_AUTH_PEER_STATE_BLACKLISTED = 0x02, + WL_AUTH_PEER_STATE_4WAY_HS_ONGOING = 0x03, + WL_AUTH_PEER_STATE_LAST +}; +typedef struct { + uint16 state; /* Peer State: Authorised or Blacklisted */ + struct ether_addr peer_addr; /* peer Address */ + uint32 blklist_end_time; /* Time of blacklist end */ +} auth_peer_t; +typedef struct wl_idauth_counters { + uint32 auth_reqs; /* No of auth req recvd */ + uint32 mic_fail; /* No of mic fails */ + uint32 four_way_hs_fail; /* No of 4-way handshake fails */ +} wl_idauth_counters_t; + +#define WLC_UTRACE_LEN 512 +#define WLC_UTRACE_READ_END 0 +#define WLC_UTRACE_MORE_DATA 1 +typedef struct wl_utrace_capture_args_v1 { + uint32 length; + uint32 flag; +} wl_utrace_capture_args_v1_t; + +#define UTRACE_CAPTURE_VER_2 2 +typedef struct wl_utrace_capture_args_v2 { + /* structure control */ + uint16 version; /**< structure version */ + uint16 length; /**< length of the response */ + uint32 flag; /* Indicates if there is more data or not */ +} wl_utrace_capture_args_v2_t; + +/* XTLV IDs for the Health Check "hc" iovar top level container */ +enum { + WL_HC_XTLV_ID_CAT_HC = 1, /* category for HC as a whole */ + WL_HC_XTLV_ID_CAT_DATAPATH_TX = 2, /* Datapath Tx */ + WL_HC_XTLV_ID_CAT_DATAPATH_RX = 3, /* Datapath Rx */ + WL_HC_XTLV_ID_CAT_SCAN = 4, /* Scan */ +}; + +/* Health Check: Common XTLV IDs for sub-elements in the top level container + * Number starts at 0x8000 to be out of the way for category specific IDs. + */ +enum { + WL_HC_XTLV_ID_ERR = 0x8000, /* for sub-command err return */ + WL_HC_XTLV_ID_IDLIST = 0x8001, /* container for uint16 IDs */ +}; + +/* Health Check: Datapath TX IDs */ +enum { + WL_HC_TX_XTLV_ID_VAL_STALL_THRESHOLD = 1, /* stall_threshold */ + WL_HC_TX_XTLV_ID_VAL_STALL_SAMPLE_SIZE = 2, /* stall_sample_size */ + WL_HC_TX_XTLV_ID_VAL_STALL_TIMEOUT = 3, /* stall_timeout */ + WL_HC_TX_XTLV_ID_VAL_STALL_FORCE = 4, /* stall_force */ + WL_HC_TX_XTLV_ID_VAL_STALL_EXCLUDE = 5, /* stall_exclude */ + WL_HC_TX_XTLV_ID_VAL_FC_TIMEOUT = 6, /* flow ctl timeout */ + WL_HC_TX_XTLV_ID_VAL_FC_FORCE = 7, /* flow ctl force failure */ + WL_HC_TX_XTLV_ID_VAL_DELAY_TO_TRAP = 8, /* delay threshold for forced trap */ + WL_HC_TX_XTLV_ID_VAL_DELAY_TO_RPT = 9, /* delay threshold for event log report */ +}; + +/* Health Check: Datapath RX IDs */ +enum { + WL_HC_RX_XTLV_ID_VAL_DMA_STALL_TIMEOUT = 1, /* dma_stall_timeout */ + WL_HC_RX_XTLV_ID_VAL_DMA_STALL_FORCE = 2, /* dma_stall test trigger */ + WL_HC_RX_XTLV_ID_VAL_STALL_THRESHOLD = 3, /* stall_threshold */ + WL_HC_RX_XTLV_ID_VAL_STALL_SAMPLE_SIZE = 4, /* stall_sample_size */ + WL_HC_RX_XTLV_ID_VAL_STALL_FORCE = 5, /* stall test trigger */ +}; + +/* Health Check: Datapath SCAN IDs */ +enum { + WL_HC_XTLV_ID_VAL_SCAN_STALL_THRESHOLD = 1, /* scan stall threshold */ +}; + +/* IDs of Health Check report structures for sub types of health checks within WL */ +enum { + WL_HC_DD_UNDEFINED = 0, /* Undefined */ + WL_HC_DD_RX_DMA_STALL = 1, /* RX DMA stall check */ + WL_HC_DD_RX_STALL = 2, /* RX stall check */ + WL_HC_DD_TX_STALL = 3, /* TX stall check */ + WL_HC_DD_SCAN_STALL = 4, /* SCAN stall check */ + WL_HC_DD_MAX +}; + +/* + * Health Check report structures for sub types of health checks within WL + */ + +/* Health Check report structure for Rx DMA Stall check */ +typedef struct { + uint16 type; + uint16 length; + uint16 timeout; + uint16 stalled_dma_bitmap; +} wl_rx_dma_hc_info_t; + +/* Health Check report structure for Tx packet failure check */ +typedef struct { + uint16 type; + uint16 length; + uint32 stall_bitmap; + uint32 stall_bitmap1; + uint32 failure_ac; + uint32 threshold; + uint32 tx_all; + uint32 tx_failure_all; +} wl_tx_hc_info_t; + +/* Health Check report structure for Rx dropped packet failure check */ +typedef struct { + uint16 type; + uint16 length; + uint32 bsscfg_idx; + uint32 rx_hc_pkts; + uint32 rx_hc_dropped_all; + uint32 rx_hc_alert_th; +} wl_rx_hc_info_t; + +/* HE top level command IDs */ +enum { + WL_HE_CMD_ENAB = 0, + WL_HE_CMD_FEATURES = 1, + WL_HE_CMD_TWT_SETUP = 2, + WL_HE_CMD_TWT_TEARDOWN = 3, + WL_HE_CMD_TWT_INFO = 4, + WL_HE_CMD_BSSCOLOR = 5, + WL_HE_CMD_PARTIAL_BSSCOLOR = 6, + WL_HE_CMD_LAST +}; + +#define WL_HEB_VERSION 0 + +/* HEB top level command IDs */ +enum { + WL_HEB_CMD_ENAB = 0, + WL_HEB_CMD_NUM_HEB = 1, + WL_HEB_CMD_COUNTERS = 1, + WL_HEB_CMD_CLEAR_COUNTERS = 2, + WL_HEB_CMD_LAST +}; + +/* HEB counters structures */ +typedef struct { + uint16 pre_event; + uint16 start_event; + uint16 end_event; + uint16 missed; +} wl_heb_int_cnt_t; + +typedef struct { + /* structure control */ + uint16 version; /* structure version */ + uint16 length; /* data length (starting after this field) */ + wl_heb_int_cnt_t heb_int_cnt[1]; +} wl_heb_cnt_t; + + +/* TWT Setup descriptor */ +typedef struct { + /* Setup Command. */ + uint8 setup_cmd; /* See TWT_SETUP_CMD_XXXX in 802.11ah.h, + * valid when bcast_twt is FALSE. + */ + /* Flow attributes */ + uint8 flow_flags; /* See WL_TWT_FLOW_FLAG_XXXX below */ + uint8 flow_id; /* must be between 0 and 7 */ + /* Target Wake Time */ + uint8 wake_type; /* See WL_TWT_TIME_TYPE_XXXX below */ + uint32 wake_time_h; /* target wake time - BSS TSF (us) */ + uint32 wake_time_l; + uint32 wake_dur; /* target wake duration in us units */ + uint32 wake_int; /* target wake interval */ +} wl_twt_sdesc_t; + +/* Flow flags */ +#define WL_TWT_FLOW_FLAG_BROADCAST (1<<0) +#define WL_TWT_FLOW_FLAG_IMPLICIT (1<<1) +#define WL_TWT_FLOW_FLAG_UNANNOUNCED (1<<2) +#define WL_TWT_FLOW_FLAG_TRIGGER (1<<3) + +/* Flow id */ +#define WL_TWT_FLOW_ID_FID 0x07 /* flow id */ +#define WL_TWT_FLOW_ID_GID_MASK 0x70 /* group id - broadcast TWT only */ +#define WL_TWT_FLOW_ID_GID_SHIFT 4 + +/* Wake type */ +/* TODO: not yet finalized */ +#define WL_TWT_TIME_TYPE_BSS 0 /* The time specified in wake_time_h/l is + * the BSS TSF time. + */ +#define WL_TWT_TIME_TYPE_OFFSET 1 /* The time specified in wake_time_h/l is an offset + * of the TSF time when the iovar is processed. + */ + +#define WL_TWT_SETUP_VER 0 + +/* HE TWT Setup command */ +typedef struct { + /* structure control */ + uint16 version; /* structure version */ + uint16 length; /* data length (starting after this field) */ + /* peer address */ + struct ether_addr peer; /* leave it all 0s' for AP */ + /* session id */ + uint8 dialog; /* an arbitrary number to identify the seesion */ + uint8 pad; + /* setup descriptor */ + wl_twt_sdesc_t desc; +} wl_twt_setup_t; + +#define WL_TWT_TEARDOWN_VER 0 + +/* HE TWT Teardown command */ +typedef struct { + /* structure control */ + uint16 version; /* structure version */ + uint16 length; /* data length (starting after this field) */ + /* peer address */ + struct ether_addr peer; /* leave it all 0s' for AP */ + /* flow attributes */ + uint8 flow_flags; /* See WL_TWT_FLOW_FLAG_XXXX above. + * (only BORADCAST) is applicable) + */ + uint8 flow_id; /* must be between 0 and 7 */ +} wl_twt_teardown_t; + +/* twt information descriptor */ +typedef struct { + uint8 flow_flags; /* See WL_TWT_INFO_FLAG_XXX below */ + uint8 flow_id; + uint8 pad[2]; + uint32 next_twt_h; + uint32 next_twt_l; +} wl_twt_idesc_t; + +/* Flow flags */ +#define WL_TWT_INFO_FLAG_RESP_REQ (1<<0) /* Request response */ + +#define WL_TWT_INFO_VER 0 + +/* HE TWT Information command */ +typedef struct { + /* structure control */ + uint16 version; /* structure version */ + uint16 length; /* data length (starting after this field) */ + /* peer address */ + struct ether_addr peer; /* leave it all 0s' for AP */ + uint8 pad[2]; + /* information descriptor */ + wl_twt_idesc_t desc; +} wl_twt_info_t; + +/* Current version for wlc_clm_power_limits_req_t structure and flags */ +#define WLC_CLM_POWER_LIMITS_REQ_VERSION 1 +/* "clm_power_limits" iovar request structure */ +typedef struct wlc_clm_power_limits_req { + /* Input. Structure and flags version */ + uint32 version; + /* Full length of buffer (includes this structure and space for TLV-encoded PPR) */ + uint32 buflen; + /* Input. Flags (see WLC_CLM_POWER_LIMITS_INPUT_FLAG_... below) */ + uint32 input_flags; + /* Input. CC of region whose data is being requested */ + char cc[WLC_CNTRY_BUF_SZ]; + /* Input. Channel/subchannel in chanspec_t format */ + uint32 chanspec; + /* Subchannel encoded as clm_limits_type_t */ + uint32 clm_subchannel; + /* Input. 0-based antenna index */ + uint32 antenna_idx; + /* Output. General flags (see WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_... below) */ + uint32 output_flags; + /* Output. 2.4G country flags, encoded as clm_flags_t enum */ + uint32 clm_country_flags_2g; + /* Output. 5G country flags, encoded as clm_flags_t enum */ + uint32 clm_country_flags_5g; + /* Output. Length of TLV-encoded PPR data that follows this structure */ + uint32 ppr_tlv_size; + /* Output. Beginning of buffer for TLV-encoded PPR data */ + uint8 ppr_tlv[1]; +} wlc_clm_power_limits_req_t; + +/* Input. Do not apply SAR limits */ +#define WLC_CLM_POWER_LIMITS_INPUT_FLAG_NO_SAR 0x00000001 +/* Input. Do not apply board limits */ +#define WLC_CLM_POWER_LIMITS_INPUT_FLAG_NO_BOARD 0x00000002 +/* Output. Limits taken from product-specific country data */ +#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_PRODUCT_LIMITS 0x00000001 +/* Output. Limits taken from product-specific worldwide data */ +#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_WORLDWIDE_LIMITS 0x00000002 +/* Output. Limits taken from country-default (all-product) data */ +#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_DEFAULT_COUNTRY_LIMITS 0x00000004 + +/* + * WOG (Wake On Googlecast) + */ + +#define MAX_GCAST_APPID_CNT_LIMIT 50 +#define MAX_DNS_LABEL 63 + +typedef struct wog_appid { + uint8 appID[MAX_DNS_LABEL+1]; +} wog_appid_t; + +enum { + WOG_APPID_ADD, + WOG_APPID_DEL, + WOG_APPID_CLEAR, + WOG_APPID_LIST, + WOG_MAX_APPID_CNT +}; + +#define WOG_APPID_IOV_VER 1 +typedef struct wog_appid_iov { + /* version for iovar */ + uint32 ver; + /* add/del/clear/list operation */ + uint32 operation; + /* for adding or deleting multiple items */ + /* for WOG_MAX_APPID_CNT, this value is used for max count for AppID */ + uint32 cnt; + /* Application IDs */ + /* If FW found an AppID from this list, FW will respond to discovery */ + /* without wake up the host */ + wog_appid_t appids[1]; +} wog_appid_iov_t; + +/* dns service record */ +/* service name : _googlecast */ +typedef struct wog_srv_record { + uint32 ttl; + uint16 port; /* tcp 8008 or 8009 */ + uint8 PAD[2]; +} wog_srv_record_t; + +#define GCAST_MAX_MODEL_NAME_LEN 16 +#define GCAST_MAX_FNAME_LEN 64 +#define GCAST_MAX_RS_LEN 60 + +#define GCAST_UUID_LEN 32 +#define GCAST_PUBLICKEY_ID_LEN 64 +#define GCAST_VER_LEN 2 +typedef struct wog_txt_record { + uint32 ttl; + /* id : UUID for the receiver */ + char id[GCAST_UUID_LEN+1]; + + /* Cast protocol version supported. Begins at 2 */ + /* and is incremented by 1 with each version */ + char ver[GCAST_VER_LEN+1]; + + /* 256bit receiver Subject Public Key Identifier from the SSL cert */ + char public_key[GCAST_PUBLICKEY_ID_LEN+1]; + + /* A bitfield of device capabilities. */ + /* bit 0 : video_out (1:has video out, 0:no video) */ + /* bit 1 : video_in */ + /* bit 2 : audio_out */ + /* bit 3 : audio_in */ + /* bit 4 : dev_mode */ + /* (1:dev mode enabled, 0: not enabled) */ + char capability; + + /* Receiver status flag 0:IDLE, 1(BUSY/JOIN) */ + /* IDLE : The receiver is idle */ + /* and doesn't need to be connected now. */ + /* BUSY/JOIN : The receiver is hosting an activity */ + /* and invites the sender to join */ + char receiver_status_flag; + + uint8 PAD0[1]; + + char friendly_name[GCAST_MAX_FNAME_LEN+1]; + uint8 PAD1[3]; + + char model_name[GCAST_MAX_MODEL_NAME_LEN+1]; + uint8 PAD2[3]; + + /* Receiver Status text for Cast Protocol v2 */ + /* Spec says that if the status text exceeds 60 characters in length, */ + /* it is truncated at 60 caracters and */ + /* a UTF-8 ellipsis character is appended to indicate trucation. */ + /* But our dongle won't use UTF-8 ellipsis. It's not a big deal. */ + char receiver_status[GCAST_MAX_RS_LEN+1]; + uint8 PAD3[3]; +} wog_txt_record_t; + +/* ip will be taken from the ip of wog_info_t */ +typedef struct wog_a_record { + uint32 ttl; +} wog_a_record_t; + +/* Google Cast protocl uses mDNS SD for its discovery */ +#define WOG_SD_RESP_VER 1 +typedef struct wog_sd_resp { + /* version for iovar */ + int32 ver; + /* device name of Google Cast receiver */ + char device_name[MAX_DNS_LABEL+1]; + /* IP address of Google Cast receiver */ + uint8 ip[4]; + /* ttl of PTR response */ + uint32 ptr_ttl; + /* DNS TXT record */ + wog_txt_record_t txt; + /* DNS SRV record */ + wog_srv_record_t srv; + /* DNS A record */ + wog_a_record_t a; +} wog_sd_resp_t; + +enum wl_mbo_cmd_ids { + WL_MBO_CMD_ADD_CHAN_PREF = 1, + WL_MBO_CMD_DEL_CHAN_PREF = 2, + WL_MBO_CMD_LIST_CHAN_PREF = 3, + WL_MBO_CMD_CELLULAR_DATA_CAP = 4, + WL_MBO_CMD_DUMP_COUNTERS = 5, + WL_MBO_CMD_CLEAR_COUNTERS = 6, + WL_MBO_CMD_FORCE_ASSOC = 7, + WL_MBO_CMD_BSSTRANS_REJECT = 8, + WL_MBO_CMD_SEND_NOTIF = 9, + /* Add before this !! */ + WL_MBO_CMD_LAST +}; + +enum wl_mbo_xtlv_id { + WL_MBO_XTLV_OPCLASS = 0x1, + WL_MBO_XTLV_CHAN = 0x2, + WL_MBO_XTLV_PREFERENCE = 0x3, + WL_MBO_XTLV_REASON_CODE = 0x4, + WL_MBO_XTLV_CELL_DATA_CAP = 0x5, + WL_MBO_XTLV_COUNTERS = 0x6, + WL_MBO_XTLV_ENABLE = 0x7, + WL_MBO_XTLV_SUB_ELEM_TYPE = 0x8 +}; + +typedef struct wl_mbo_counters { + /* No of transition req recvd */ + uint16 trans_req_rcvd; + /* No of transition req with disassoc imminent */ + uint16 trans_req_disassoc; + /* No of transition req with BSS Termination */ + uint16 trans_req_bss_term; + /* No of trans req w/ unspecified reason */ + uint16 trans_resn_unspec; + /* No of trans req w/ reason frame loss */ + uint16 trans_resn_frm_loss; + /* No of trans req w/ reason traffic delay */ + uint16 trans_resn_traffic_delay; + /* No of trans req w/ reason insufficient buffer */ + uint16 trans_resn_insuff_bw; + /* No of trans req w/ reason load balance */ + uint16 trans_resn_load_bal; + /* No of trans req w/ reason low rssi */ + uint16 trans_resn_low_rssi; + /* No of trans req w/ reason excessive retransmission */ + uint16 trans_resn_xcess_retransmn; + /* No of trans req w/ reason gray zone */ + uint16 trans_resn_gray_zone; + /* No of trans req w/ reason switch to premium AP */ + uint16 trans_resn_prem_ap_sw; + /* No of transition rejection sent */ + uint16 trans_rejn_sent; + /* No of trans rejn reason excessive frame loss */ + uint16 trans_rejn_xcess_frm_loss; + /* No of trans rejn reason excessive traffic delay */ + uint16 trans_rejn_xcess_traffic_delay; + /* No of trans rejn reason insufficient QoS capability */ + uint16 trans_rejn_insuffic_qos_cap; + /* No of trans rejn reason low RSSI */ + uint16 trans_rejn_low_rssi; + /* No of trans rejn reason high interference */ + uint16 trans_rejn_high_interference; + /* No of trans rejn reason service unavilable */ + uint16 trans_rejn_service_unavail; + /* No of beacon request rcvd */ + uint16 bcn_req_rcvd; + /* No of beacon report sent */ + uint16 bcn_rep_sent; + /* No of null beacon report sent */ + uint16 null_bcn_rep_sent; + /* No of wifi to cell switch */ + uint16 wifi_to_cell; +} wl_mbo_counters_t; + +/* otpread command */ +#define WL_OTPREAD_VER 1 + +typedef struct { + uint16 version; /* cmd structure version */ + uint16 cmd_len; /* cmd struct len */ + uint32 rdmode; /* otp read mode */ + uint32 rdoffset; /* byte offset into otp to start read */ + uint32 rdsize; /* number of bytes to read */ +} wl_otpread_cmd_t; + +/* "otpecc_rows" command */ +typedef struct { + uint16 version; /* version of this structure */ + uint16 len; /* len in bytes of this structure */ + uint32 cmdtype; /* command type : 0 : read row data, 1 : ECC lock */ + uint32 rowoffset; /* start row offset */ + uint32 numrows; /* number of rows */ + uint8 rowdata[]; /* read rows data */ +} wl_otpecc_rows_t; + +#define WL_OTPECC_ROWS_VER 1 + +#define WL_OTPECC_ROWS_CMD_READ 0 +#define WL_OTPECC_ROWS_CMD_LOCK 1 + +#define WL_OTPECC_ARGIDX_CMDTYPE 0 /* command type */ +#define WL_OTPECC_ARGIDX_ROWOFFSET 1 /* start row offset */ +#define WL_OTPECC_ARGIDX_NUMROWS 2 /* number of rows */ + +/* "otpeccrows" raw data size per row */ +#define WL_ECCDUMP_ROW_SIZE_BYTE 6 /* 4 bytes row data + 2 bytes ECC status */ +#define WL_ECCDUMP_ROW_SIZE_WORD 3 + +/* otpECCstatus */ +#define OTP_ECC_ENAB_SHIFT 13 +#define OTP_ECC_ENAB_MASK 0x7 +#define OTP_ECC_CORR_ST_SHIFT 12 +#define OTP_ECC_CORR_ST_MASK 0x1 +#define OTP_ECC_DBL_ERR_SHIFT 11 +#define OTP_ECC_DBL_ERR_MASK 0x1 +#define OTP_ECC_DED_ST_SHIFT 10 +#define OTP_ECC_DED_ST_MASK 0x1 +#define OTP_ECC_SEC_ST_SHIFT 9 +#define OTP_ECC_SEC_ST_MASK 0x1 +#define OTP_ECC_DATA_SHIFT 0 +#define OTP_ECC_DATA_MASK 0x7f + +/* OTP_ECC_CORR_ST field */ +#define OTP_ECC_MODE 1 +#define OTP_NO_ECC_MODE 0 + +/* OTP_ECC_ENAB field (bit15:13) : + * When 2 or 3 bits are set, + * it indicates that OTP ECC is enabled on the last row read. + * Otherwise, ECC is disabled + */ +#define OTP_ECC_ENAB(val) \ + (bcm_bitcount((uint8 *)&(val), sizeof(uint8)) > 1) + +#define WL_LEAKY_AP_STATS_GT_TYPE 0 +#define WL_LEAKY_AP_STATS_PKT_TYPE 1 +typedef struct wlc_leaked_infra_guard_marker { + /* type field for this TLV: WL_LEAKY_AP_STATS_GT_TYPE */ + uint16 type; + /* length field for this TLV */ + uint16 len; + /* guard sample sequence number; Updated by 1 on every guard sample */ + uint32 seq_number; + /* Guard time start time (tsf; PS indicated and acked) */ + uint32 start_time; + /* tsf timestamp for the GT end event */ + uint32 gt_tsf_l; + /* Guard time period in ms */ + uint16 guard_duration; + /* Number PPDUs in the notification */ + uint16 num_pkts; + /* Flags to indicate some states see below */ + uint8 flag; + /* pad for 32-bit alignment */ + uint8 reserved[3]; +} wlc_leaked_infra_guard_marker_t; + +/* Flag information */ +#define WL_LEAKED_GUARD_TIME_NONE 0 /* Not in any guard time */ +#define WL_LEAKED_GUARD_TIME_FRTS (0x01 << 0) /* Normal FRTS power save */ +#define WL_LEAKED_GUARD_TIME_SCAN (0x01 << 1) /* Channel switch due to scanning */ +#define WL_LEAKED_GUARD_TIME_AWDL_PSF (0x01 << 2) /* Channel switch due to AWDL PSF */ +#define WL_LEAKED_GUARD_TIME_AWDL_AW (0x01 << 3) /* Channel switch due to AWDL AW */ +#define WL_LEAKED_GUARD_TIME_INFRA_STA (0x01 << 4) /* generic type infra sta channel switch */ +#define WL_LEAKED_GUARD_TIME_TERMINATED (0x01 << 7) /* indicate a GT is terminated early */ + +typedef struct wlc_leaked_infra_packet_stat { + uint16 type; /* type field for this TLV: WL_LEAKY_AP_STATS_PKT_TYPE */ + uint16 len; /* length field for this TLV */ + uint16 ppdu_len_bytes; /* PPDU packet length in bytes */ + uint16 num_mpdus; /* number of the MPDUs in the PPDU */ + uint32 ppdu_time; /* PPDU arrival time at the begining of the guard time */ + uint32 rate; /* PPDU packet rate; Received packet's data rate */ + uint16 seq_number; /* sequence number */ + int8 rssi; /* RSSI */ + uint8 tid; /* tid */ +} wlc_leaked_infra_packet_stat_t; + +/* Wake timer structure definition */ +#define WAKE_TIMER_VERSION 1 +#define WAKE_TIMER_NOLIMIT 0xFFFF + +typedef struct wake_timer { + uint16 ver; + uint16 len; + uint16 limit; /* number of events to deliver + * 0-disable, 0xffff-indefinite, num_events otherwise + */ + uint16 count; /* number of events delivered since enable (get only) */ + uint16 period; /* timeout/period in milliseconds */ +} wake_timer_t; + +typedef struct wl_desense_restage_gain { + uint16 version; + uint16 length; + uint32 band; + uint8 num_cores; + uint8 desense_array[WL_TX_CHAINS_MAX]; + uint8 PAD[3]; +} wl_desense_restage_gain_t; + +#define MAX_UCM_CHAINS 5 +#define MAX_UCM_PROFILES 4 +#define UCM_PROFILE_VERSION_1 1 + +/* UCM per chain attribute struct */ +typedef struct wlc_btcx_chain_attr { + uint16 length; /* chain attr length, version is same as profile version */ + int8 desense_level; /* per chain desense level */ + int8 ack_pwr_strong_rssi; /* per chain ack power at strong rssi */ + int8 ack_pwr_weak_rssi; /* per chain ack power at weak rssi */ + int8 tx_pwr_strong_rssi; /* per chain tx power at strong rssi */ + int8 tx_pwr_weak_rssi; /* per chain tx power at weak rssi */ + uint8 PAD[1]; /* additional bytes for alignment */ +} wlc_btcx_chain_attr_t; + +typedef struct wlc_btcx_profile_v1 { + uint16 version; /* UCM profile version */ + uint16 length; /* profile size */ + uint16 fixed_length; /* size of the fixed portion of the profile */ + uint8 init; /* profile initialized or not */ + uint8 chain_attr_count; /* Number of elements in chain_attr array */ + uint8 profile_index; /* profile index */ + uint8 mode_strong_wl_bt; /* Mode under strong WLAN and BT RSSI */ + uint8 mode_weak_wl; /* Mode under weak WLAN RSSI */ + uint8 mode_weak_bt; /* Mode under weak BT RSSI */ + uint8 mode_weak_wl_bt; /* Mode under weak BT and WLAN RSSI */ + int8 mode_wl_hi_lo_rssi_thresh; /* Strong to weak WLAN RSSI threshold for mode selection */ + int8 mode_wl_lo_hi_rssi_thresh; /* Weak to strong WLAN RSSI threshold for mode selection */ + int8 mode_bt_hi_lo_rssi_thresh; /* Strong to weak BT RSSI threshold for mode selection */ + int8 mode_bt_lo_hi_rssi_thresh; /* Weak to strong BT RSSI threshold for mode selection */ + int8 desense_wl_hi_lo_rssi_thresh; /* Strong to weak RSSI threshold for desense */ + int8 desense_wl_lo_hi_rssi_thresh; /* Weak to strong RSSI threshold for desense */ + int8 ack_pwr_wl_hi_lo_rssi_thresh; /* Strong to weak RSSI threshold for ACK power */ + int8 ack_pwr_wl_lo_hi_rssi_thresh; /* Weak to strong RSSI threshold for ACK power */ + int8 tx_pwr_wl_hi_lo_rssi_thresh; /* Strong to weak RSSI threshold for Tx power */ + int8 tx_pwr_wl_lo_hi_rssi_thresh; /* Weak to strong RSSI threshold for Tx power */ + uint8 PAD[1]; /* additional bytes for 4 byte alignment */ + wlc_btcx_chain_attr_t chain_attr[]; /* variable length array with chain attributes */ +} wlc_btcx_profile_v1_t; + +#define SSSR_D11_RESET_SEQ_STEPS 5 +#define SSSR_REG_INFO_VER 0 + +typedef struct sssr_reg_info { + uint16 version; + uint16 length; /* length of the structure validated at host */ + struct { + struct { + uint32 pmuintmask0; + uint32 pmuintmask1; + uint32 resreqtimer; + uint32 macresreqtimer; + uint32 macresreqtimer1; + } base_regs; + } pmu_regs; + struct { + struct { + uint32 intmask; + uint32 powerctrl; + uint32 clockcontrolstatus; + uint32 powerctrl_mask; + } base_regs; + } chipcommon_regs; + struct { + struct { + uint32 clockcontrolstatus; + uint32 clockcontrolstatus_val; + } base_regs; + struct { + uint32 resetctrl; + uint32 itopoobb; + } wrapper_regs; + } arm_regs; + struct { + struct { + uint32 ltrstate; + uint32 clockcontrolstatus; + uint32 clockcontrolstatus_val; + } base_regs; + struct { + uint32 itopoobb; + } wrapper_regs; + } pcie_regs; + struct { + struct { + uint32 ioctrl; + } wrapper_regs; + uint32 vasip_sr_addr; + uint32 vasip_sr_size; + } vasip_regs; + struct { + struct { + uint32 xmtaddress; + uint32 xmtdata; + uint32 clockcontrolstatus; + uint32 clockcontrolstatus_val; + } base_regs; + struct { + uint32 resetctrl; + uint32 itopoobb; + uint32 ioctrl; + uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS]; + } wrapper_regs; + uint32 sr_size; + } mac_regs[MAX_NUM_D11CORES]; +} sssr_reg_info_t; + +/* ADaptive Power Save(ADPS) structure definition */ +#define WL_ADPS_IOV_MAJOR_VER 1 +#define WL_ADPS_IOV_MINOR_VER 0 +#define WL_ADPS_IOV_MAJOR_VER_SHIFT 8 +#define WL_ADPS_IOV_VER \ + ((WL_ADPS_IOV_MAJOR_VER << WL_ADPS_IOV_MAJOR_VER_SHIFT) | WL_ADPS_IOV_MINOR_VER) + +#define ADPS_NUM_DIR 2 +#define ADPS_RX 0 +#define ADPS_TX 1 + +#define WL_ADPS_IOV_MODE 0x0001 +#define WL_ADPS_IOV_RSSI 0x0002 +#define WL_ADPS_IOV_DUMP 0x0003 +#define WL_ADPS_IOV_DUMP_CLEAR 0x0004 + +#define ADPS_SUMMARY_STEP_NUM 2 +#define ADPS_SUMMARY_STEP_LOW 0 +#define ADPS_SUMMARY_STEP_HIGH 1 + +#define ADPS_SUB_IOV_VERSION_1 1 +#define ADPS_SUB_IOV_VERSION_2 2 + +typedef struct wl_adps_params_v1 { + uint16 version; + uint16 length; + uint8 band; /* band - 2G or 5G */ + uint8 mode; /* operation mode, default = 0 (ADPS disable) */ + uint16 padding; +} wl_adps_params_v1_t; + +typedef struct wl_adps_rssi { + int32 thresh_hi; /* rssi threshold to resume ADPS operation */ + int32 thresh_lo; /* rssi threshold to suspend ADPS operation */ +} wl_adps_rssi_t; + +typedef struct wl_adps_rssi_params_v1 { + uint16 version; + uint16 length; + uint8 band; + uint8 padding[3]; + wl_adps_rssi_t rssi; +} wl_adps_rssi_params_v1_t; + +typedef struct adps_stat_elem { + uint32 duration; /* each step duration time (mSec) */ + uint32 counts; /* each step hit count number */ +} adps_stat_elem_t; + +typedef struct wl_adps_dump_summary_v1 { + uint16 version; + uint16 length; + uint8 mode; /* operation mode: On/Off */ + uint8 flags; /* restrict flags */ + uint8 current_step; /* current step */ + uint8 padding; + adps_stat_elem_t stat[ADPS_SUMMARY_STEP_NUM]; /* statistics */ +} wl_adps_dump_summary_v1_t; + +typedef struct wlc_btc_2gchain_dis { + uint16 ver; + uint16 len; + uint8 chain_dis; + uint8 flag; +} wlc_btc_2gchain_dis_t; + +#define WLC_BTC_2GCHAIN_DIS_REASSOC 0x1 +#define WLC_BTC_2GCHAIN_DIS_VER1 0x1 +#define WLC_BTC_2GCHAIN_DIS_VER1_LEN 6 + +enum wl_rpsnoa_cmd_ids { + WL_RPSNOA_CMD_ENABLE = 1, + WL_RPSNOA_CMD_STATUS, + WL_RPSNOA_CMD_PARAMS, + WL_RPSNOA_CMD_LAST +}; + +typedef struct rpsnoa_cmnhdr { + uint16 ver; /* cmd structure version */ + uint16 len; /* cmd structure len */ + uint32 subcmd; + uint32 cnt; +} rpsnoa_cmnhdr_t; + +typedef struct rpsnoa_data { + int16 band; + int16 value; +} rpsnoa_data_t; + +typedef struct rpsnoa_param { + uint16 band; + uint8 level; + uint8 stas_assoc_check; + uint32 pps; + uint32 quiet_time; +} rpsnoa_param_t; + +typedef struct rpsnoa_iovar { + rpsnoa_cmnhdr_t hdr; + rpsnoa_data_t data[1]; +} rpsnoa_iovar_t; + +typedef struct rpsnoa_iovar_params { + rpsnoa_cmnhdr_t hdr; + rpsnoa_param_t param[1]; +} rpsnoa_iovar_params_t; + +/* Per-interface reportable stats types */ +enum wl_ifstats_xtlv_id { + /* global */ + WL_IFSTATS_XTLV_SLICE_INDEX = 1, + WL_IFSTATS_XTLV_IF_INDEX = 2, + WL_IFSTATS_XTLV_MAC_ADDR = 3, + WL_IFSTATS_XTLV_REPORT_CMD = 4, /* Comes in an iovar */ + WL_IFSTATS_XTLV_BUS_PCIE = 5, + + /* Report data across all SCBs using ecounters */ + WL_IFSTATS_XTLV_WL_STA_INFO_ECOUNTERS = 0x100, + + /* Per-slice information + * Per-interface reporting could also include slice specific data + */ + /* xtlv container for reporting */ + WL_IFSTATS_XTLV_WL_SLICE = 0x301, + /* Per-slice AMPDU stats */ + WL_IFSTATS_XTLV_WL_SLICE_AMPDU_DUMP = 0x302, + /* Per-slice BTCOEX stats */ + WL_IFSTATS_XTLV_WL_SLICE_BTCOEX = 0x303, + /* V11_WLCNTRS used in ecounters */ + WL_IFSTATS_XTLV_WL_SLICE_V11_WLCNTRS = 0x304, + /* V30_WLCNTRS Used in ecounters */ + WL_IFSTATS_XTLV_WL_SLICE_V30_WLCNTRS = 0x305, + + /* Per-interface */ + /* XTLV container for reporting */ + WL_IFSTATS_XTLV_IF = 0x501, + /* Generic stats applicable to all IFs */ + WL_IFSTATS_XTLV_GENERIC = 0x502, + /* Infra specific */ + WL_IFSTATS_XTLV_INFRA_SPECIFIC = 0x503, + /* MGT counters infra and softAP */ + WL_IFSTATS_XTLV_MGT_CNT = 0x504, + /* AMPDU stats on per-IF */ + WL_IFSTATS_XTLV_AMPDU_DUMP = 0x505, + WL_IFSTATS_XTLV_IF_SPECIFIC = 0x506 +}; + +/* interface specific mgt count */ +#define WL_MGT_STATS_VERSION_V1 1 +/* Associated stats type: WL_IFSTATS_MGT_CNT */ +typedef struct { + uint16 version; + uint8 pad[2]; + + /* detailed control/management frames */ + uint32 txnull; + uint32 rxnull; + uint32 txqosnull; + uint32 rxqosnull; + uint32 txassocreq; + uint32 rxassocreq; + uint32 txreassocreq; + uint32 rxreassocreq; + uint32 txdisassoc; + uint32 rxdisassoc; + uint32 txassocrsp; + uint32 rxassocrsp; + uint32 txreassocrsp; + uint32 rxreassocrsp; + uint32 txauth; + uint32 rxauth; + uint32 txdeauth; + uint32 rxdeauth; + uint32 txprobereq; + uint32 rxprobereq; + uint32 txprobersp; + uint32 rxprobersp; + uint32 txaction; + uint32 rxaction; + uint32 txpspoll; + uint32 rxpspoll; +} wl_if_mgt_stats_t; + +#define WL_INFRA_STATS_VERSION_V1 1 +/* Associated stats type: WL_IFSTATS_INFRA_SPECIFIC */ +typedef struct wl_infra_stats { + uint16 version; /**< version of the structure */ + uint8 pad[2]; + uint32 rxbeaconmbss; + uint32 tbtt; +} wl_if_infra_stats_t; + +typedef struct csa_event_data { + chanspec_t chan_old; + dot11_ext_csa_ie_t ecsa; + dot11_mesh_csp_ie_t mcsp; + dot11_wide_bw_chan_switch_ie_t wbcs; + uint8 PAD; +} csa_event_data_t; + #endif /* _wlioctl_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/devctrl_if/wlioctl_defs.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlioctl_defs.h similarity index 92% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/devctrl_if/wlioctl_defs.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlioctl_defs.h index 3cae18d8a9d8..8038588d58e4 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/devctrl_if/wlioctl_defs.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlioctl_defs.h @@ -4,7 +4,7 @@ * * Definitions subject to change without notice. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -27,7 +27,7 @@ * * <> * - * $Id: wlioctl_defs.h 403826 2013-05-22 16:40:55Z $ + * $Id: wlioctl_defs.h 677667 2017-01-04 07:43:05Z $ */ @@ -37,32 +37,36 @@ - /* All builds use the new 11ac ratespec/chanspec */ #undef D11AC_IOTYPES #define D11AC_IOTYPES +#ifndef USE_NEW_RSPEC_DEFS /* WL_RSPEC defines for rate information */ -#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */ -#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */ -#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */ -#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */ -#define WL_RSPEC_TXEXP_MASK 0x00000300 -#define WL_RSPEC_TXEXP_SHIFT 8 -#define WL_RSPEC_BW_MASK 0x00070000 /* bandwidth mask */ -#define WL_RSPEC_BW_SHIFT 16 /* bandwidth shift */ -#define WL_RSPEC_STBC 0x00100000 /* STBC encoding, Nsts = 2 x Nss */ -#define WL_RSPEC_TXBF 0x00200000 /* bit indicates TXBF mode */ -#define WL_RSPEC_LDPC 0x00400000 /* bit indicates adv coding in use */ -#define WL_RSPEC_SGI 0x00800000 /* Short GI mode */ -#define WL_RSPEC_ENCODING_MASK 0x03000000 /* Encoding of Rate/MCS field */ -#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /* bit indicate to override mcs only */ -#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /* bit indicates override both rate & mode */ +#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */ +#define WL_RSPEC_HE_MCS_MASK 0x0000000F /* HE MCS value */ +#define WL_RSPEC_HE_NSS_MASK 0x000000F0 /* HE Nss value */ +#define WL_RSPEC_HE_NSS_SHIFT 4 /* HE Nss value shift */ +#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */ +#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */ +#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */ +#define WL_RSPEC_TXEXP_MASK 0x00000300 +#define WL_RSPEC_TXEXP_SHIFT 8 +#define WL_RSPEC_BW_MASK 0x00070000 /* bandwidth mask */ +#define WL_RSPEC_BW_SHIFT 16 /* bandwidth shift */ +#define WL_RSPEC_STBC 0x00100000 /* STBC encoding, Nsts = 2 x Nss */ +#define WL_RSPEC_TXBF 0x00200000 /* bit indicates TXBF mode */ +#define WL_RSPEC_LDPC 0x00400000 /* bit indicates adv coding in use */ +#define WL_RSPEC_SGI 0x00800000 /* Short GI mode */ +#define WL_RSPEC_ENCODING_MASK 0x03000000 /* Encoding of Rate/MCS field */ +#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /* bit indicate to override mcs only */ +#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /* bit indicates override rate & mode */ /* WL_RSPEC_ENCODING field defs */ -#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */ -#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */ -#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */ +#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */ +#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */ +#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */ +#define WL_RSPEC_ENCODE_HE 0x03000000 /* HE MCS and Nss is stored in RSPEC_RATE_MASK */ /* WL_RSPEC_BW field defs */ #define WL_RSPEC_BW_UNSPECIFIED 0 @@ -74,6 +78,10 @@ #define WL_RSPEC_BW_5MHZ 0x00060000 #define WL_RSPEC_BW_2P5MHZ 0x00070000 +#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */ + +#endif /* !USE_NEW_RSPEC_DEFS */ + /* Legacy defines for the nrate iovar */ #define OLD_NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */ #define OLD_NRATE_RATE_MASK 0x0000007f /* rate/mcs value */ @@ -89,13 +97,10 @@ #define OLD_NRATE_STF_STBC 2 /* stf mode STBC */ #define OLD_NRATE_STF_SDM 3 /* stf mode SDM */ -#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */ - #define WLC_11N_N_PROP_MCS 6 #define WLC_11N_FIRST_PROP_MCS 87 #define WLC_11N_LAST_PROP_MCS 102 - #define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */ #define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */ @@ -143,7 +148,8 @@ #define WL_STA_RIFS_CAP 0x00080000 /* rifs enabled */ #define WL_STA_VHT_CAP 0x00100000 /* STA VHT(11ac) capable */ #define WL_STA_WPS 0x00200000 /* WPS state */ - +#define WL_STA_DWDS_CAP 0x01000000 /* DWDS CAP */ +#define WL_STA_DWDS 0x02000000 /* DWDS active */ #define WL_WDS_LINKUP WL_STA_WDS_LINKUP /* deprecated */ /* STA HT cap fields */ @@ -202,7 +208,6 @@ #define WL_BSSTYPE_INFRA 1 #define WL_BSSTYPE_ANY 2 /* deprecated */ #define WL_BSSTYPE_MESH 3 - /* Bitmask for scan_type */ #define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */ #define WL_SCANFLAGS_RESERVED 0x02 /* Reserved */ @@ -214,6 +219,8 @@ * by default parallel scan will be disabled if actcb_fn_t * is provided. */ +#define WL_SCANFLAGS_SISO 0x40 /* Use 1 RX chain for scanning */ +#define WL_SCANFLAGS_MIMO 0x80 /* Force MIMO scanning */ /* wl_iscan_results status values */ #define WL_SCAN_RESULTS_SUCCESS 0 @@ -262,7 +269,6 @@ #define WL_SCAN_ACTION_CONTINUE 2 #define WL_SCAN_ACTION_ABORT 3 - #define ANTENNA_NUM_1 1 /* total number of antennas to be used */ #define ANTENNA_NUM_2 2 #define ANTENNA_NUM_3 3 @@ -334,6 +340,7 @@ /* check this magic number */ #define WLC_IOCTL_MAGIC 0x14e46c77 + /* bss_info_cap_t flags */ #define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */ #define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */ @@ -344,6 +351,10 @@ #define WL_BSS_FLAGS_SNR_INVALID 0x40 /* BSS contains invalid SNR */ #define WL_BSS_FLAGS_NF_INVALID 0x80 /* BSS contains invalid noise floor */ +/* bit definitions for bcnflags in wl_bss_info */ +#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT 0x01 /* beacon had IE, accessnet valid */ +#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT_VALID 0x02 /* on indicates support for this API */ + /* bssinfo flag for nbss_cap */ #define VHT_BI_SGI_80MHZ 0x00000100 #define VHT_BI_80MHZ 0x00000200 @@ -386,6 +397,26 @@ #define CRYPTO_ALGO_NONE CRYPTO_ALGO_OFF +/* algo bit vector */ +#define KEY_ALGO_MASK(_algo) (1 << _algo) + +#if defined(BCMEXTCCX) +#define KEY_ALGO_MASK_CCX (KEY_ALGO_MASK(CRYPTO_ALGO_CKIP) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_CKIP_MMH) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_WEP_MMH)) +#endif + +#define KEY_ALGO_MASK_WEP (KEY_ALGO_MASK(CRYPTO_ALGO_WEP1) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_WEP128) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_NALG)) + +#define KEY_ALGO_MASK_AES (KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM256) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM) | \ + KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM256)) +#define KEY_ALGO_MASK_TKIP (KEY_ALGO_MASK(CRYPTO_ALGO_TKIP)) +#define KEY_ALGO_MASK_WAPI (KEY_ALGO_MASK(CRYPTO_ALGO_SMS4)) + #define WSEC_GEN_MIC_ERROR 0x0001 #define WSEC_GEN_REPLAY 0x0002 #define WSEC_GEN_ICV_ERROR 0x0004 @@ -411,14 +442,15 @@ #define WSEC_SWFLAG 0x0008 #define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */ -/* wsec macros for operating on the above definitions */ #define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED) #define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED) #define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED) #define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED)) + #define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED) + /* Following macros are not used any more. Just kept here to * avoid build issue in BISON/CARIBOU branch */ @@ -784,9 +816,9 @@ /* #define WLC_LAST 310 */ /* Never used - can be reused */ #define WLC_SET_INTERFERENCE_OVERRIDE_MODE 311 /* set inter mode override */ #define WLC_GET_INTERFERENCE_OVERRIDE_MODE 312 /* get inter mode override */ -/* #define WLC_GET_WAI_RESTRICT 313 */ /* for WAPI, deprecated use iovar instead */ -/* #define WLC_SET_WAI_RESTRICT 314 */ /* for WAPI, deprecated use iovar instead */ -/* #define WLC_SET_WAI_REKEY 315 */ /* for WAPI, deprecated use iovar instead */ +/* #define WLC_GET_WAI_RESTRICT 313 */ +/* #define WLC_SET_WAI_RESTRICT 314 */ +/* #define WLC_SET_WAI_REKEY 315 */ #define WLC_SET_NAT_CONFIG 316 /* for configuring NAT filter driver */ #define WLC_GET_NAT_STATE 317 #define WLC_GET_TXBF_RATESET 318 @@ -894,6 +926,7 @@ #define WLC_BAND_5G 1 /* 5 Ghz */ #define WLC_BAND_2G 2 /* 2.4 Ghz */ #define WLC_BAND_ALL 3 /* all bands */ +#define WLC_BAND_INVALID -1 /* Invalid band */ /* band range returned by band_range iovar */ #define WL_CHAN_FREQ_RANGE_2G 0 @@ -913,7 +946,6 @@ #define WL_CHAN_FREQ_RANGE_5G_BAND3 4 #define WL_CHAN_FREQ_RANGE_5G_4BAND 5 - /* SROM12 */ #define WL_CHAN_FREQ_RANGE_5G_BAND4 5 #define WL_CHAN_FREQ_RANGE_2G_40 6 @@ -1010,7 +1042,6 @@ #define WLC_BW_10MHZ_BIT (1<<4) #define WLC_BW_5MHZ_BIT (1<<5) #define WLC_BW_2P5MHZ_BIT (1<<6) - /* Bandwidth capabilities */ #define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT) #define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) @@ -1029,7 +1060,6 @@ #define WL_BW_CAP_2P5MHZ(bw_cap)(((bw_cap) & WLC_BW_2P5MHZ_BIT) ? TRUE : FALSE) #define WL_BW_CAP_5MHZ(bw_cap) (((bw_cap) & WLC_BW_5MHZ_BIT) ? TRUE : FALSE) #define WL_BW_CAP_10MHZ(bw_cap) (((bw_cap) & WLC_BW_10MHZ_BIT) ? TRUE : FALSE) - /* values to force tx/rx chain */ #define WLC_N_TXRX_CHAIN0 0 #define WLC_N_TXRX_CHAIN1 1 @@ -1038,6 +1068,7 @@ #define WLC_N_SGI_20 0x01 #define WLC_N_SGI_40 0x02 #define WLC_VHT_SGI_80 0x04 +#define WLC_VHT_SGI_160 0x08 /* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */ #define WLC_SGI_ALL 0x02 @@ -1057,8 +1088,10 @@ #define ACPHY_ACI_HWACI_PKTGAINLMT 2 /* bit 1 */ #define ACPHY_ACI_W2NB_PKTGAINLMT 4 /* bit 2 */ #define ACPHY_ACI_PREEMPTION 8 /* bit 3 */ -#define ACPHY_HWACI_MITIGATION 16 /* bit 4 */ -#define ACPHY_ACI_MAX_MODE 31 +#define ACPHY_HWACI_MITIGATION 16 /* bit 4 */ +#define ACPHY_LPD_PREEMPTION 32 /* bit 5 */ +#define ACPHY_HWOBSS_MITIGATION 64 /* bit 6 */ +#define ACPHY_ACI_MAX_MODE 127 /* AP environment */ #define AP_ENV_DETECT_NOT_USED 0 /* We aren't using AP environment detection */ @@ -1085,7 +1118,6 @@ #define WL_OTA_TEST_MAX_NUM_RATE 30 #define WL_OTA_TEST_MAX_NUM_SEQ 100 #define WL_OTA_TEST_MAX_NUM_RSSI 85 - #define WL_THRESHOLD_LO_BAND 70 /* range from 5250MHz - 5350MHz */ /* radar iovar SET defines */ @@ -1146,7 +1178,7 @@ #define WL_TX_POWER_F_VHT 0x20 #define WL_TX_POWER_F_OPENLOOP 0x40 #define WL_TX_POWER_F_PROP11NRATES 0x80 - +#define WL_TX_POWER_F_UNIT_QDBM 0x100 /* Message levels */ #define WL_ERROR_VAL 0x00000001 #define WL_TRACE_VAL 0x00000002 @@ -1171,6 +1203,7 @@ #define WL_LOFT_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ #define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */ #define WL_REGULATORY_VAL 0x00080000 +#define WL_CSA_VAL 0x00080000 /* Reusing REGULATORY_VAL due to lackof bits */ #define WL_TAF_VAL 0x00100000 #define WL_RADAR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ #define WL_WDI_VAL 0x00200000 /* Using retired WL_RADAR_VAL VAL */ @@ -1186,19 +1219,22 @@ #define WL_AMSDU_VAL 0x10000000 #define WL_AMPDU_VAL 0x20000000 #define WL_FFPLD_VAL 0x40000000 +#define WL_ROAM_EXP_VAL 0x80000000 /* wl_msg_level is full. For new bits take the next one and AND with * wl_msg_level2 in wl_dbg.h */ #define WL_DPT_VAL 0x00000001 /* re-using WL_DPT_VAL */ +/* re-using WL_MESH_VAL */ +#define WL_NATOE_VAL 0x00000001 #define WL_MESH_VAL 0x00000001 #define WL_SCAN_VAL 0x00000002 #define WL_WOWL_VAL 0x00000004 #define WL_COEX_VAL 0x00000008 #define WL_RTDC_VAL 0x00000010 #define WL_PROTO_VAL 0x00000020 -#define WL_BTA_VAL 0x00000040 +#define WL_SWDIV_VAL 0x00000040 #define WL_CHANINT_VAL 0x00000080 #define WL_WMF_VAL 0x00000100 #define WL_P2P_VAL 0x00000200 @@ -1216,14 +1252,18 @@ #define WL_P2PO_VAL 0x00200000 #define WL_TBTT_VAL 0x00400000 #define WL_FBT_VAL 0x00800000 +#define WL_RRM_VAL 0x00800000 /* reuse */ #define WL_MQ_VAL 0x01000000 /* This level is currently used in Phoenix2 only */ #define WL_SRSCAN_VAL 0x02000000 #define WL_WNM_VAL 0x04000000 +/* re-using WL_WNM_VAL for MBO */ +#define WL_MBO_VAL 0x04000000 #define WL_PWRSEL_VAL 0x10000000 #define WL_NET_DETECT_VAL 0x20000000 +#define WL_OCE_VAL 0x20000000 /* reuse */ #define WL_PCIE_VAL 0x40000000 #define WL_PMDUR_VAL 0x80000000 @@ -1273,8 +1313,10 @@ /* number of bytes needed to define a proper bit mask for MAC event reporting */ #define BCMIO_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #define BCMIO_NBBY 8 -#define WL_EVENTING_MASK_LEN 16 +#define WL_EVENTING_MASK_LEN (16+4) +#define WL_EVENTING_MASK_EXT_LEN \ + MAX(WL_EVENTING_MASK_LEN, (ROUNDUP(WLC_E_LAST, NBBY)/NBBY)) /* join preference types */ #define WL_JOIN_PREF_RSSI 1 /* by RSSI */ @@ -1305,13 +1347,23 @@ */ #define SPECT_MNGMT_LOOSE_11H_D 4 /* operation defined above */ -#define WL_CHAN_VALID_HW (1 << 0) /* valid with current HW */ -#define WL_CHAN_VALID_SW (1 << 1) /* valid with current country setting */ -#define WL_CHAN_BAND_5G (1 << 2) /* 5GHz-band channel */ -#define WL_CHAN_RADAR (1 << 3) /* radar sensitive channel */ -#define WL_CHAN_INACTIVE (1 << 4) /* temporarily inactive due to radar */ -#define WL_CHAN_PASSIVE (1 << 5) /* channel is in passive mode */ -#define WL_CHAN_RESTRICTED (1 << 6) /* restricted use channel */ +/* bit position in per_chan_info; these depend on current country/regulatory domain */ +#define WL_CHAN_VALID_HW (1 << 0) /* valid with current HW */ +#define WL_CHAN_VALID_SW (1 << 1) /* valid with current country setting */ +#define WL_CHAN_BAND_5G (1 << 2) /* 5GHz-band channel */ +#define WL_CHAN_RADAR (1 << 3) /* radar sensitive channel */ +#define WL_CHAN_INACTIVE (1 << 4) /* temporarily inactive due to radar */ +#define WL_CHAN_PASSIVE (1 << 5) /* channel is in passive mode */ +#define WL_CHAN_RESTRICTED (1 << 6) /* restricted use channel */ +#define WL_CHAN_RADAR_EU_WEATHER (1 << 7) /* EU Radar weather channel. Implies an + * EU Radar channel. + */ +#define WL_CHAN_CLM_RESTRICTED (1 << 8) /* channel restricted in CLM + * (i.e. by default) + */ + +/* following definition is for precommit; will be removed once wl, acsd switch to the new def */ +#define WL_CHAN_WEATHER_RADAR WL_CHAN_RADAR_EU_WEATHER /* BTC mode used by "btc_mode" iovar */ #define WL_BTC_DISABLE 0 /* disable BT coexistence */ @@ -1410,12 +1462,17 @@ #define WL_PKTENG_PER_RX_WITH_ACK_START 0x05 #define WL_PKTENG_PER_TX_WITH_ACK_START 0x06 #define WL_PKTENG_PER_RX_STOP 0x08 +#define WL_PKTENG_PER_RU_TX_START 0x09 #define WL_PKTENG_PER_MASK 0xff #define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */ #define WL_PKTENG_SYNCHRONOUS_UNBLK 0x200 /* synchronous unblock flag */ - -#define WL_PKTENG_MAXPKTSZ 16384 /* max pktsz limit for pkteng */ +#ifdef PKTENG_LONGPKTSZ +/* max pktsz limit for pkteng */ +#define WL_PKTENG_MAXPKTSZ PKTENG_LONGPKTSZ +#else +#define WL_PKTENG_MAXPKTSZ 16384 +#endif #define NUM_80211b_RATES 4 #define NUM_80211ag_RATES 8 @@ -1435,8 +1492,7 @@ #define WL_WOWL_EAPID (1 << 7) /* Wakeup after receipt of EAP-Identity Req */ #define WL_WOWL_PME_GPIO (1 << 8) /* Wakeind via PME(0) or GPIO(1) */ #define WL_WOWL_ULP_BAILOUT (1 << 8) /* wakeind via unknown pkt by basic ULP-offloads - - * WL_WOWL_ULP_BAILOUT - same as WL_WOWL_PME_GPIO used only for DONGLE BUILDS and - * not WLC_HIGH_ONLY case + * WL_WOWL_ULP_BAILOUT - same as WL_WOWL_PME_GPIO used only for DONGLE BUILDS */ #define WL_WOWL_NEEDTKIP1 (1 << 9) /* need tkip phase 1 key to be updated by the driver */ #define WL_WOWL_GTK_FAILURE (1 << 10) /* enable wakeup if GTK fails */ @@ -1455,6 +1511,7 @@ #define WL_WOWL_MIC_FAIL (1 << 23) /* Offloads detected MIC failure(s) */ #define WL_WOWL_UNASSOC (1 << 24) /* Wakeup in Unassociated state (Net/Magic Pattern) */ #define WL_WOWL_SECURE (1 << 25) /* Wakeup if received matched secured pattern */ +#define WL_WOWL_EXCESS_WAKE (1 << 26) /* Excess wake */ #define WL_WOWL_LINKDOWN (1 << 31) /* Link Down indication in WoWL mode */ #define WL_WOWL_TCPKEEP (1 << 20) /* temp copy to satisfy automerger */ @@ -1466,7 +1523,6 @@ #define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */ #define MAGIC_PKT_NUM_MAC_ADDRS 16 - /* Overlap BSS Scan parameters default, minimum, maximum */ #define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 20 /* unit TU */ #define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5 /* unit TU */ @@ -1537,7 +1593,9 @@ #define VNDR_IE_PRBREQ_FLAG 0x10 #define VNDR_IE_ASSOCREQ_FLAG 0x20 #define VNDR_IE_IWAPID_FLAG 0x40 /* vendor IE in IW advertisement protocol ID field */ +#define VNDR_IE_AUTHREQ_FLAG 0x80 #define VNDR_IE_CUSTOM_FLAG 0x100 /* allow custom IE id */ +#define VNDR_IE_DISASSOC_FLAG 0x200 #if defined(WLP2P) /* P2P Action Frames flags (spec ordered) */ @@ -1568,7 +1626,6 @@ #define APCS_IOCTL 1 #define APCS_CHANIM 2 #define APCS_CSTIMER 3 -#define APCS_BTA 4 #define APCS_TXDLY 5 #define APCS_NONACSD 6 #define APCS_DFS_REENTRY 7 @@ -1588,7 +1645,13 @@ #define CCASTATS_TXOP 6 #define CCASTATS_GDTXDUR 7 #define CCASTATS_BDTXDUR 8 + +#ifndef WLCHANIM_V2 #define CCASTATS_MAX 9 +#else /* WLCHANIM_V2 */ +#define CCASTATS_MYRX 9 +#define CCASTATS_MAX 10 +#endif /* WLCHANIM_V2 */ #define WL_CHANIM_COUNT_ALL 0xff #define WL_CHANIM_COUNT_ONE 0x1 @@ -1732,8 +1795,8 @@ #define WL_WNM_DMS 0x00000040 #define WL_WNM_FMS 0x00000080 #define WL_WNM_NOTIF 0x00000100 -#define WL_WNM_MAX 0x00000200 - +#define WL_WNM_WBTEXT 0x00000200 +#define WL_WNM_MAX 0x00000400 #ifdef WLWNM_BRCM #define BRCM_WNM_FEATURE_SET\ (WL_WNM_PROXYARP | \ @@ -1746,7 +1809,6 @@ WL_WNM_NOTIF | \ 0) #endif /* WLWNM_BRCM */ - #ifndef ETHER_MAX_DATA #define ETHER_MAX_DATA 1500 #endif /* ETHER_MAX_DATA */ @@ -1865,8 +1927,14 @@ #define BESTN_BSSID_ONLY_MASK 0x1000 #define PFN_VERSION 2 +#ifdef PFN_SCANRESULT_2 +#define PFN_SCANRESULT_VERSION 2 +#else #define PFN_SCANRESULT_VERSION 1 +#endif /* PFN_SCANRESULT_2 */ +#ifndef MAX_PFN_LIST_COUNT #define MAX_PFN_LIST_COUNT 16 +#endif /* MAX_PFN_LIST_COUNT */ #define PFN_COMPLETE 1 #define PFN_INCOMPLETE 0 @@ -1881,6 +1949,12 @@ #define WL_PFN_SUPPRESSFOUND_MASK 0x08 #define WL_PFN_SUPPRESSLOST_MASK 0x10 +#define WL_PFN_SSID_A_BAND_TRIG 0x20 +#define WL_PFN_SSID_BG_BAND_TRIG 0x40 +#define WL_PFN_SSID_IMPRECISE_MATCH 0x80 +#define WL_PFN_SSID_SAME_NETWORK 0x10000 +#define WL_PFN_SUPPRESS_AGING_MASK 0x20000 +#define WL_PFN_FLUSH_ALL_SSIDS 0x40000 #define WL_PFN_RSSI_MASK 0xff00 #define WL_PFN_RSSI_SHIFT 8 @@ -1896,9 +1970,12 @@ #define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */ #define PNO_SCAN_MIN_FW_SEC 10 /* min time scan time in SEC */ #define WL_PFN_HIDDEN_MASK 0x4 +#define MAX_SSID_WHITELIST_NUM 4 +#define MAX_BSSID_PREF_LIST_NUM 32 +#define MAX_BSSID_BLACKLIST_NUM 32 #ifndef BESTN_MAX -#define BESTN_MAX 8 +#define BESTN_MAX 10 #endif #ifndef MSCAN_MAX @@ -1923,8 +2000,6 @@ #define ARP_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */ #define ND_MULTIHOMING_MAX 10 /* Maximum local host IP addresses */ #define ND_REQUEST_MAX 5 /* Max set of offload params */ - - /* AOAC wake event flag */ #define WAKE_EVENT_NLO_DISCOVERY_BIT 1 #define WAKE_EVENT_AP_ASSOCIATION_LOST_BIT 2 @@ -1932,7 +2007,6 @@ #define WAKE_EVENT_4WAY_HANDSHAKE_REQUEST_BIT 8 #define WAKE_EVENT_NET_PACKET_BIT 0x10 - #define MAX_NUM_WOL_PATTERN 22 /* LOGO requirements min 22 */ @@ -1972,7 +2046,6 @@ #define BCM_DCS_IOVAR 0x1 #define BCM_DCS_UNKNOWN 0xFF - #ifdef PROP_TXSTATUS /* Bit definitions for tlv iovar */ /* @@ -2036,6 +2109,7 @@ #define NET_DETECT_MAX_CHANNELS 50 #endif /* NET_DETECT */ + /* Bit masks for radio disabled status - returned by WL_GET_RADIO */ #define WL_RADIO_SW_DISABLE (1<<0) #define WL_RADIO_HW_DISABLE (1<<1) @@ -2060,8 +2134,18 @@ /* Override bit for WLC_SET_TXPWR. if set, ignore other level limits */ #define WL_TXPWR_OVERRIDE (1U<<31) -#define WL_TXPWR_NEG (1U<<30) +#define WL_TXPWR_2G (1U<<30) +#define WL_TXPWR_5G (1U<<29) +#define WL_TXPWR_NEG (1U<<28) +#define WL_TXPWR_MASK (~(0x7<<29)) +#define WL_TXPWR_CORE_MAX (3) +#define WL_TXPWR_CORE0_MASK (0x000000FF) +#define WL_TXPWR_CORE0_SHIFT (0) +#define WL_TXPWR_CORE1_MASK (0x0000FF00) +#define WL_TXPWR_CORE1_SHIFT (8) +#define WL_TXPWR_CORE2_MASK (0x00FF0000) +#define WL_TXPWR_CORE2_SHIFT (16) /* phy types (returned by WLC_GET_PHYTPE) */ #define WLC_PHY_TYPE_A 0 @@ -2112,4 +2196,15 @@ #define AP_ISOLATE_SENDUP_ALL 0x01 #define AP_ISOLATE_SENDUP_MCAST 0x02 +/* Type values for the wl_pwrstats_t data field */ +#define WL_PWRSTATS_TYPE_PHY 0 /**< struct wl_pwr_phy_stats */ +#define WL_PWRSTATS_TYPE_SCAN 1 /**< struct wl_pwr_scan_stats */ +#define WL_PWRSTATS_TYPE_USB_HSIC 2 /**< struct wl_pwr_usb_hsic_stats */ +#define WL_PWRSTATS_TYPE_PM_AWAKE1 3 /**< struct wl_pwr_pm_awake_stats_v1 */ +#define WL_PWRSTATS_TYPE_CONNECTION 4 /* struct wl_pwr_connect_stats; assoc and key-exch time */ +#define WL_PWRSTATS_TYPE_PCIE 6 /**< struct wl_pwr_pcie_stats */ +#define WL_PWRSTATS_TYPE_PM_AWAKE2 7 /**< struct wl_pwr_pm_awake_stats_v2 */ +#define WL_PWRSTATS_TYPE_SDIO 8 /* struct wl_pwr_sdio_stats */ +#define WL_PWRSTATS_TYPE_MIMO_PS_METRICS 9 /* struct wl_mimo_meas_metrics_t */ + #endif /* wlioctl_defs_h */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlioctl_utils.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlioctl_utils.h index c3fe428580b4..797531cface7 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlioctl_utils.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wlioctl_utils.h @@ -1,7 +1,7 @@ /* * Custom OID/ioctl related helper functions. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -22,7 +22,7 @@ * other than the GPL, without Broadcom's express prior written consent. * <> * - * $Id: wlioctl_utils.h 555740 2015-05-11 10:16:23Z $ + * $Id: wlioctl_utils.h 614820 2016-01-23 17:16:17Z $ */ #ifndef _wlioctl_utils_h_ @@ -44,10 +44,18 @@ extern int cca_analyze(cca_congest_channel_req_t *input[], int num_chans, extern int wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev); +extern const char * wl_get_reinit_rc_name(int rc); + /* Get data pointer of wlc layer counters tuple from xtlv formatted counters IOVar buffer. */ #define GET_WLCCNT_FROM_CNTBUF(cntbuf) \ bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)cntbuf)->data, \ ((wl_cnt_info_t *)cntbuf)->datalen, WL_CNT_XTLV_WLC, \ NULL, BCM_XTLV_OPTION_ALIGN32) +#define CHK_CNTBUF_DATALEN(cntbuf, ioctl_buflen) do { \ + if (((wl_cnt_info_t *)cntbuf)->datalen + \ + OFFSETOF(wl_cnt_info_t, data) > ioctl_buflen) \ + printf("%s: IOVAR buffer short!\n", __FUNCTION__); \ +} while (0) + #endif /* _wlioctl_utils_h_ */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/wpa.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wpa.h old mode 100755 new mode 100644 similarity index 95% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/wpa.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wpa.h index ef5d664dabee..f681ece53dff --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/wpa.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wpa.h @@ -1,7 +1,7 @@ /* * Fundamental types and constants relating to WPA * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,14 +24,14 @@ * * <> * - * $Id: wpa.h 518342 2014-12-01 23:21:41Z $ + * $Id: wpa.h 700076 2017-05-17 14:42:22Z $ */ #ifndef _proto_wpa_h_ #define _proto_wpa_h_ #include -#include +#include /* This marks the start of a packed structure section. */ @@ -125,6 +125,9 @@ typedef BWL_PRE_PACKED_STRUCT struct #define WPA_CIPHER_BIP 6 /* WEP (104-bit) */ #define WPA_CIPHER_TPK 7 /* Group addressed traffic not allowed */ +#define WPA_CIPHER_AES_GCM 8 /* AES (GCM) */ +#define WPA_CIPHER_AES_GCM256 9 /* AES (GCM256) */ + #define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \ (cipher) == WPA_CIPHER_WEP_40 || \ @@ -132,6 +135,8 @@ typedef BWL_PRE_PACKED_STRUCT struct (cipher) == WPA_CIPHER_TKIP || \ (cipher) == WPA_CIPHER_AES_OCB || \ (cipher) == WPA_CIPHER_AES_CCM || \ + (cipher) == WPA_CIPHER_AES_GCM || \ + (cipher) == WPA_CIPHER_AES_GCM256 || \ (cipher) == WPA_CIPHER_TPK) diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/wps.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wps.h similarity index 99% rename from drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/wps.h rename to drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wps.h index 495d7f181fd3..aa4cc1b9f0e0 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/proto/wps.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include/wps.h @@ -1,7 +1,7 @@ /* * WPS IE definitions * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/linux_osl.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/linux_osl.c index 16d873f81643..efbcf36ecc10 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/linux_osl.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/linux_osl.c @@ -1,7 +1,7 @@ /* * Linux OS Independent Layer * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: linux_osl.c 602478 2015-11-26 04:46:12Z $ + * $Id: linux_osl.c 680580 2017-01-20 11:49:58Z $ */ #define LINUX_PORT @@ -46,6 +46,7 @@ #include #include #include +#include #include @@ -59,19 +60,18 @@ #include #include #include -#include +#include #include #include #include -#if defined(__ARM_ARCH_7A__) -#include -#include -#endif -#include #endif /* BCM_SECURE_DMA */ #include +#if defined(STB) +#include +extern spinlock_t l2x0_reg_lock; +#endif #ifdef BCM_OBJECT_TRACE #include @@ -108,7 +108,7 @@ #define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) typedef struct bcm_static_buf { - struct semaphore static_sem; + spinlock_t static_lock; unsigned char *buf_ptr; unsigned char buf_use[STATIC_BUF_MAX_NUM]; } bcm_static_buf_t; @@ -128,7 +128,7 @@ static bcm_static_buf_t *bcm_static_buf = 0; #ifdef DHD_USE_STATIC_CTRLBUF #define STATIC_PKT_1PAGE_NUM 0 -#define STATIC_PKT_2PAGE_NUM 64 +#define STATIC_PKT_2PAGE_NUM 128 #else #define STATIC_PKT_1PAGE_NUM 8 #define STATIC_PKT_2PAGE_NUM 8 @@ -175,6 +175,7 @@ struct osl_cmn_info { atomic_t pktalloced; /* Number of allocated packet buffers */ spinlock_t dbgmem_lock; bcm_mem_link_t *dbgmem_list; + bcm_mem_link_t *dbgvmem_list; spinlock_t pktalloc_lock; atomic_t refcount; /* Number of references to this shared structure. */ }; @@ -199,11 +200,6 @@ struct osl_info { int ctrace_num; #endif /* BCMDBG_CTRACE */ #ifdef BCM_SECURE_DMA - struct cma_dev *cma; - struct sec_mem_elem *sec_list_512; - struct sec_mem_elem *sec_list_base_512; - struct sec_mem_elem *sec_list_2048; - struct sec_mem_elem *sec_list_base_2048; struct sec_mem_elem *sec_list_4096; struct sec_mem_elem *sec_list_base_4096; phys_addr_t contig_base; @@ -212,24 +208,21 @@ struct osl_info { void *contig_base_alloc_va; phys_addr_t contig_base_alloc_coherent; void *contig_base_alloc_coherent_va; - phys_addr_t contig_delta_va_pa; + void *contig_base_coherent_va; + void *contig_delta_va_pa; struct { phys_addr_t pa; void *va; bool avail; } sec_cma_coherent[SEC_CMA_COHERENT_MAX]; - + int stb_ext_params; #endif /* BCM_SECURE_DMA */ }; #ifdef BCM_SECURE_DMA -phys_addr_t g_contig_delta_va_pa; -static void osl_sec_dma_setup_contig_mem(osl_t *osh, unsigned long memsize, int regn); -static int osl_sec_dma_alloc_contig_mem(osl_t *osh, unsigned long memsize, int regn); -static void osl_sec_dma_free_contig_mem(osl_t *osh, u32 memsize, int regn); static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr); static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size); -static void osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, +static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list); static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base); @@ -267,12 +260,25 @@ do { \ /* PCMCIA attribute space access macros */ -/* Global ASSERT type flag */ -uint32 g_assert_type = 1; +uint32 g_assert_type = 0; /* By Default Kernel Panic */ + module_param(g_assert_type, int, 0); +#ifdef BCM_SECURE_DMA +#define SECDMA_MODULE_PARAMS 0 +#define SECDMA_EXT_FILE 1 +unsigned long secdma_addr = 0; +unsigned long secdma_addr2 = 0; +u32 secdma_size = 0; +u32 secdma_size2 = 0; +module_param(secdma_addr, ulong, 0); +module_param(secdma_size, int, 0); +module_param(secdma_addr2, ulong, 0); +module_param(secdma_size2, int, 0); +static int secdma_found = 0; +#endif /* BCM_SECURE_DMA */ static int16 linuxbcmerrormap[] = -{ 0, /* 0 */ +{ 0, /* 0 */ -EINVAL, /* BCME_ERROR */ -EINVAL, /* BCME_BADARG */ -EINVAL, /* BCME_BADOPTION */ @@ -326,12 +332,19 @@ static int16 linuxbcmerrormap[] = -ERANGE, /* BCME_REPLAY */ -EINVAL, /* BCME_IE_NOTFOUND */ -EINVAL, /* BCME_DATA_NOTFOUND */ + -EINVAL, /* BCME_NOT_GC */ + -EINVAL, /* BCME_PRS_REQ_FAILED */ + -EINVAL, /* BCME_NO_P2P_SE */ + -EINVAL, /* BCME_NOA_PND */ + -EINVAL, /* BCME_FRAG_Q_FAILED */ + -EINVAL, /* BCME_GET_AF_FAILED */ + -EINVAL, /* BCME_MSCH_NOTREADY */ /* When an new error code is added to bcmutils.h, add os * specific error translation here as well */ /* check if BCME_LAST changed since the last time this function was updated */ -#if BCME_LAST != -53 +#if BCME_LAST != -60 #error "You need to add a OS error translation in the linuxbcmerrormap \ for new error code defined in bcmutils.h" #endif @@ -363,6 +376,9 @@ osl_attach(void *pdev, uint bustype, bool pkttag) #endif /* SHARED_OSL_CMN */ osl_t *osh; gfp_t flags; +#ifdef BCM_SECURE_DMA + u32 secdma_memsize; +#endif flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; if (!(osh = kmalloc(sizeof(osl_t), flags))) @@ -403,12 +419,63 @@ osl_attach(void *pdev, uint bustype, bool pkttag) osh->magic = OS_HANDLE_MAGIC; #ifdef BCM_SECURE_DMA - osl_sec_dma_setup_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION); - + if ((secdma_addr != 0) && (secdma_size != 0)) { + printk("linux_osl.c: Buffer info passed via module params, using it.\n"); + if (secdma_found == 0) { + osh->contig_base_alloc = (phys_addr_t)secdma_addr; + secdma_memsize = secdma_size; + } else if (secdma_found == 1) { + osh->contig_base_alloc = (phys_addr_t)secdma_addr2; + secdma_memsize = secdma_size2; + } else { + printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found); + kfree(osh); + return NULL; + } + osh->contig_base = (phys_addr_t)osh->contig_base_alloc; + printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize); + printf("linux_osl.c: secdma_cma_addr = 0x%x \n", + (unsigned int)osh->contig_base_alloc); + osh->stb_ext_params = SECDMA_MODULE_PARAMS; + } + else if (stbpriv_init(osh) == 0) { + printk("linux_osl.c: stbpriv.txt found. Get buffer info.\n"); + if (secdma_found == 0) { + osh->contig_base_alloc = + (phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr"), NULL, 0); + secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size"), NULL, 0); + } else if (secdma_found == 1) { + osh->contig_base_alloc = + (phys_addr_t)bcm_strtoul(stbparam_get("secdma_cma_addr2"), NULL, 0); + secdma_memsize = bcm_strtoul(stbparam_get("secdma_cma_size2"), NULL, 0); + } else { + printk("linux_osl.c secdma: secDMA instances %d \n", secdma_found); + kfree(osh); + return NULL; + } + osh->contig_base = (phys_addr_t)osh->contig_base_alloc; + printf("linux_osl.c: secdma_cma_size = 0x%x\n", secdma_memsize); + printf("linux_osl.c: secdma_cma_addr = 0x%x \n", + (unsigned int)osh->contig_base_alloc); + osh->stb_ext_params = SECDMA_EXT_FILE; + } + else { + printk("linux_osl.c: secDMA no longer supports internal buffer allocation.\n"); + kfree(osh); + return NULL; + } + secdma_found++; osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh, phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE); + if (osh->contig_base_alloc_coherent_va == NULL) { + if (osh->cmn) + kfree(osh->cmn); + kfree(osh); + return NULL; + } + osh->contig_base_coherent_va = osh->contig_base_alloc_coherent_va; osh->contig_base_alloc_coherent = osh->contig_base_alloc; osl_sec_dma_init_consistent(osh); @@ -416,15 +483,24 @@ osl_attach(void *pdev, uint bustype, bool pkttag) osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh, phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE); + if (osh->contig_base_alloc_va == NULL) { + osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK); + if (osh->cmn) + kfree(osh->cmn); + kfree(osh); + return NULL; + } osh->contig_base_va = osh->contig_base_alloc_va; - /* - * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512); - * osh->sec_list_base_512 = osh->sec_list_512; - * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048); - * osh->sec_list_base_2048 = osh->sec_list_2048; - */ - osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096); + if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh, + CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) { + osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK); + osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK); + if (osh->cmn) + kfree(osh->cmn); + kfree(osh); + return NULL; + } osh->sec_list_base_4096 = osh->sec_list_4096; #endif /* BCM_SECURE_DMA */ @@ -471,7 +547,7 @@ int osl_static_mem_init(osl_t *osh, void *adapter) printk("alloc static buf at %p!\n", bcm_static_buf); } - sema_init(&bcm_static_buf->static_sem, 1); + spin_lock_init(&bcm_static_buf->static_lock); bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE; } @@ -526,11 +602,12 @@ osl_detach(osl_t *osh) return; #ifdef BCM_SECURE_DMA - osl_sec_dma_free_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION); - osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512); - osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, osh->sec_list_base_2048); + if (osh->stb_ext_params == SECDMA_EXT_FILE) + stbpriv_exit(osh); osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096); - osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_MEMBLOCK); + osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK); + osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK); + secdma_found--; #endif /* BCM_SECURE_DMA */ @@ -559,14 +636,82 @@ int osl_static_mem_deinit(osl_t *osh, void *adapter) return 0; } +/* APIs to set/get specific quirks in OSL layer */ +void BCMFASTPATH +osl_flag_set(osl_t *osh, uint32 mask) +{ + osh->flags |= mask; +} + +void +osl_flag_clr(osl_t *osh, uint32 mask) +{ + osh->flags &= ~mask; +} + +#if defined(STB) +inline bool BCMFASTPATH +#else +bool +#endif +osl_is_flag_set(osl_t *osh, uint32 mask) +{ + return (osh->flags & mask); +} + + +#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) + +inline int BCMFASTPATH +osl_arch_is_coherent(void) +{ + return 0; +} + +inline int BCMFASTPATH +osl_acp_war_enab(void) +{ + return 0; +} + +inline void BCMFASTPATH +osl_cache_flush(void *va, uint size) +{ + + if (size > 0) + dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TO_DEVICE); +} + +inline void BCMFASTPATH +osl_cache_inv(void *va, uint size) +{ + + dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE); +} + +inline void BCMFASTPATH +osl_prefetch(const void *ptr) +{ + __asm__ __volatile__("pld\t%0" :: "o"(*(char *)ptr) : "cc"); +} + +#endif + +/* + * To avoid ACP latency, a fwder buf will be sent directly to DDR using + * DDR aliasing into non-ACP address space. Such Fwder buffers must be + * explicitly managed from a coherency perspective. + */ +static inline void BCMFASTPATH +osl_fwderbuf_reset(osl_t *osh, struct sk_buff *skb) +{ +} + static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len) { struct sk_buff *skb; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; -#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA) - flags |= GFP_ATOMIC; -#endif #ifdef DHD_USE_ATOMIC_PKTGET flags = GFP_ATOMIC; #endif /* DHD_USE_ATOMIC_PKTGET */ @@ -630,6 +775,9 @@ osl_ctfpool_add(osl_t *osh) /* Use bit flag to indicate skb from fast ctfpool */ PKTFAST(osh, skb) = FASTBUF; + /* If ctfpool's osh is a fwder osh, reset the fwder buf */ + osl_fwderbuf_reset(osh->ctfpool->osh, skb); + CTFPOOL_UNLOCK(osh->ctfpool, flags); return skb; @@ -663,6 +811,8 @@ osl_ctfpool_init(osl_t *osh, uint numobj, uint size) osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags); ASSERT(osh->ctfpool); + osh->ctfpool->osh = osh; + osh->ctfpool->max_obj = numobj; osh->ctfpool->obj_size = size; @@ -877,9 +1027,9 @@ osl_pkt_tonative(osl_t *osh, void *pkt) for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) { if (PKTISCHAINED(nskb1)) { nskb2 = PKTCLINK(nskb1); - } - else + } else { nskb2 = NULL; + } DEL_CTRACE(osh, nskb1); } @@ -899,30 +1049,42 @@ osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file) osl_pkt_frmnative(osl_t *osh, void *pkt) #endif /* BCMDBG_CTRACE */ { + struct sk_buff *cskb; struct sk_buff *nskb; -#ifdef BCMDBG_CTRACE - struct sk_buff *nskb1, *nskb2; -#endif + unsigned long pktalloced = 0; if (osh->pub.pkttag) OSL_PKTTAG_CLEAR(pkt); - /* Increment the packet counter */ - for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) { - atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced); + /* walk the PKTCLINK() list */ + for (cskb = (struct sk_buff *)pkt; + cskb != NULL; + cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) { + + /* walk the pkt buffer list */ + for (nskb = cskb; nskb; nskb = nskb->next) { + + /* Increment the packet counter */ + pktalloced++; + + /* clean the 'prev' pointer + * Kernel 3.18 is leaving skb->prev pointer set to skb + * to indicate a non-fragmented skb + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + nskb->prev = NULL; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */ + #ifdef BCMDBG_CTRACE - for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) { - if (PKTISCHAINED(nskb1)) { - nskb2 = PKTCLINK(nskb1); - } - else - nskb2 = NULL; - - ADD_CTRACE(osh, nskb1, file, line); - } + ADD_CTRACE(osh, nskb, file, line); #endif /* BCMDBG_CTRACE */ + } } + + /* Increment the packet counter */ + atomic_add(pktalloced, &osh->cmn->pktalloced); + return (void *)pkt; } @@ -1002,6 +1164,9 @@ osl_pktfastfree(osl_t *osh, struct sk_buff *skb) ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb); ASSERT(ctfpool != NULL); + /* if osh is a fwder osh, reset the fwder buf */ + osl_fwderbuf_reset(ctfpool->osh, skb); + /* Add object to the ctfpool */ CTFPOOL_LOCK(ctfpool, flags); skb->next = (struct sk_buff *)ctfpool->head; @@ -1148,14 +1313,14 @@ osl_pktget_static(osl_t *osh, uint len) down(&bcm_static_skb->osl_pkt_sem); if (len <= DHD_SKB_1PAGE_BUFSIZE) { - for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) { if (bcm_static_skb->skb_4k[i] && bcm_static_skb->pkt_use[i] == 0) { break; } } - if (i != STATIC_PKT_MAX_NUM) { + if (i != STATIC_PKT_1PAGE_NUM) { bcm_static_skb->pkt_use[i] = 1; skb = bcm_static_skb->skb_4k[i]; @@ -1286,8 +1451,8 @@ osl_pktfree_static(osl_t *osh, void *p, bool send) } #endif up(&bcm_static_skb->osl_pkt_sem); - osl_pktfree(osh, p, send); #endif /* DHD_USE_STATIC_CTRLBUF */ + osl_pktfree(osh, p, send); } #endif /* CONFIG_DHD_USE_STATIC_BUF */ @@ -1414,10 +1579,11 @@ osl_malloc(osl_t *osh, uint size) #ifdef CONFIG_DHD_USE_STATIC_BUF if (bcm_static_buf) { + unsigned long irq_flags; int i = 0; if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE)) { - down(&bcm_static_buf->static_sem); + spin_lock_irqsave(&bcm_static_buf->static_lock, irq_flags); for (i = 0; i < STATIC_BUF_MAX_NUM; i++) { @@ -1427,13 +1593,13 @@ osl_malloc(osl_t *osh, uint size) if (i == STATIC_BUF_MAX_NUM) { - up(&bcm_static_buf->static_sem); + spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags); printk("all static buff in use!\n"); goto original; } bcm_static_buf->buf_use[i] = 1; - up(&bcm_static_buf->static_sem); + spin_unlock_irqrestore(&bcm_static_buf->static_lock, irq_flags); bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size); if (osh) @@ -1475,6 +1641,8 @@ void osl_mfree(osl_t *osh, void *addr, uint size) { #ifdef CONFIG_DHD_USE_STATIC_BUF + unsigned long flags; + if (bcm_static_buf) { if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr @@ -1484,9 +1652,9 @@ osl_mfree(osl_t *osh, void *addr, uint size) buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE; - down(&bcm_static_buf->static_sem); + spin_lock_irqsave(&bcm_static_buf->static_lock, flags); bcm_static_buf->buf_use[buf_idx] = 0; - up(&bcm_static_buf->static_sem); + spin_unlock_irqrestore(&bcm_static_buf->static_lock, flags); if (osh && osh->cmn) { ASSERT(osh->magic == OS_HANDLE_MAGIC); @@ -1506,6 +1674,52 @@ osl_mfree(osl_t *osh, void *addr, uint size) kfree(addr); } +void * +osl_vmalloc(osl_t *osh, uint size) +{ + void *addr; + + /* only ASSERT if osh is defined */ + if (osh) + ASSERT(osh->magic == OS_HANDLE_MAGIC); + if ((addr = vmalloc(size)) == NULL) { + if (osh) + osh->failed++; + return (NULL); + } + if (osh && osh->cmn) + atomic_add(size, &osh->cmn->malloced); + + return (addr); +} + +void * +osl_vmallocz(osl_t *osh, uint size) +{ + void *ptr; + + ptr = osl_vmalloc(osh, size); + + if (ptr != NULL) { + bzero(ptr, size); + } + + return ptr; +} + +void +osl_vmfree(osl_t *osh, void *addr, uint size) +{ + if (osh && osh->cmn) { + ASSERT(osh->magic == OS_HANDLE_MAGIC); + + ASSERT(size <= osl_malloced(osh)); + + atomic_sub(size, &osh->cmn->malloced); + } + vfree(addr); +} + uint osl_check_memleak(osl_t *osh) { @@ -1561,7 +1775,7 @@ osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced #ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL flags = GFP_ATOMIC; #else - flags = GFP_KERNEL; + flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; #endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */ va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags); #ifdef BCMDMA64OSL @@ -1606,10 +1820,9 @@ dmaaddr_t BCMFASTPATH osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah) { int dir; -#ifdef BCMDMA64OSL - dmaaddr_t ret; - dma_addr_t map_addr; -#endif /* BCMDMA64OSL */ + dmaaddr_t ret_addr; + dma_addr_t map_addr; + int ret; ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; @@ -1617,14 +1830,24 @@ osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_ -#ifdef BCMDMA64OSL map_addr = pci_map_single(osh->pdev, va, size, dir); - PHYSADDRLOSET(ret, map_addr & 0xffffffff); - PHYSADDRHISET(ret, (map_addr >> 32) & 0xffffffff); - return ret; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + ret = pci_dma_mapping_error(osh->pdev, map_addr); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5)) + ret = pci_dma_mapping_error(map_addr); #else - return (pci_map_single(osh->pdev, va, size, dir)); -#endif /* BCMDMA64OSL */ + ret = 0; +#endif + if (ret) { + printk("%s: Failed to map memory\n", __FUNCTION__); + PHYSADDRLOSET(ret_addr, 0); + PHYSADDRHISET(ret_addr, 0); + } else { + PHYSADDRLOSET(ret_addr, map_addr & 0xffffffff); + PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff); + } + + return ret_addr; } void BCMFASTPATH @@ -1654,142 +1877,16 @@ osl_cpu_relax(void) cpu_relax(); } - -#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) || \ - defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)) - -#include - -/* - * Note that its gauranteed that the Ring is cache line aligned, but - * the messages are not. And we see that __dma_inv_range in - * arch/arm64/mm/cache.S invalidates only if the request size is - * cache line aligned. If not, it will Clean and invalidate. - * So we'll better invalidate the whole ring. - * - * Also, the latest Kernel versions invoke cache maintenance operations - * from arch/arm64/mm/dma-mapping.c, __swiotlb_sync_single_for_device - * Only if is_device_dma_coherent returns 0. Since we don't have BSP - * source, assuming that its the case, since we pass NULL for the dev ptr - */ -inline void BCMFASTPATH -osl_cache_flush(void *va, uint size) +extern void osl_preempt_disable(osl_t *osh) { - /* - * using long for address arithmatic is OK, in linux - * 32 bit its 4 bytes and 64 bit its 8 bytes - */ - unsigned long end_cache_line_start; - unsigned long end_addr; - unsigned long next_cache_line_start; - - end_addr = (unsigned long)va + size; - - /* Start address beyond the cache line we plan to operate */ - end_cache_line_start = (end_addr & ~(L1_CACHE_BYTES - 1)); - next_cache_line_start = end_cache_line_start + L1_CACHE_BYTES; - - /* Align the start address to cache line boundary */ - va = (void *)((unsigned long)va & ~(L1_CACHE_BYTES - 1)); - - /* Ensure that size is also aligned and extends partial line to full */ - size = next_cache_line_start - (unsigned long)va; - -#ifndef BCM_SECURE_DMA - -#ifdef CONFIG_ARM64 - /* - * virt_to_dma is not present in arm64/include/dma-mapping.h - * So have to convert the va to pa first and then get the dma addr - * of the same. - */ - { - phys_addr_t pa; - dma_addr_t dma_addr; - pa = virt_to_phys(va); - dma_addr = phys_to_dma(NULL, pa); - if (size > 0) - dma_sync_single_for_device(OSH_NULL, dma_addr, size, DMA_TX); - } -#else - if (size > 0) - dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TX); -#endif /* !CONFIG_ARM64 */ -#else - phys_addr_t orig_pa = (phys_addr_t)(va - g_contig_delta_va_pa); - if (size > 0) - dma_sync_single_for_device(OSH_NULL, orig_pa, size, DMA_TX); -#endif /* defined BCM_SECURE_DMA */ + preempt_disable(); } -inline void BCMFASTPATH -osl_cache_inv(void *va, uint size) +extern void osl_preempt_enable(osl_t *osh) { - /* - * using long for address arithmatic is OK, in linux - * 32 bit its 4 bytes and 64 bit its 8 bytes - */ - unsigned long end_cache_line_start; - unsigned long end_addr; - unsigned long next_cache_line_start; - - end_addr = (unsigned long)va + size; - - /* Start address beyond the cache line we plan to operate */ - end_cache_line_start = (end_addr & ~(L1_CACHE_BYTES - 1)); - next_cache_line_start = end_cache_line_start + L1_CACHE_BYTES; - - /* Align the start address to cache line boundary */ - va = (void *)((unsigned long)va & ~(L1_CACHE_BYTES - 1)); - - /* Ensure that size is also aligned and extends partial line to full */ - size = next_cache_line_start - (unsigned long)va; - -#ifndef BCM_SECURE_DMA - -#ifdef CONFIG_ARM64 - /* - * virt_to_dma is not present in arm64/include/dma-mapping.h - * So have to convert the va to pa first and then get the dma addr - * of the same. - */ - { - phys_addr_t pa; - dma_addr_t dma_addr; - pa = virt_to_phys(va); - dma_addr = phys_to_dma(NULL, pa); - dma_sync_single_for_cpu(OSH_NULL, dma_addr, size, DMA_RX); - } -#else - dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_RX); -#endif /* !CONFIG_ARM64 */ -#else - phys_addr_t orig_pa = (phys_addr_t)(va - g_contig_delta_va_pa); - dma_sync_single_for_cpu(OSH_NULL, orig_pa, size, DMA_RX); -#endif /* defined BCM_SECURE_DMA */ + preempt_enable(); } -inline void osl_prefetch(const void *ptr) -{ - /* PLD instruction is not applicable in ARM 64. We don't care for now */ -#ifndef CONFIG_ARM64 - __asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc"); -#endif -} - -int osl_arch_is_coherent(void) -{ - return 0; -} - - -inline int osl_acp_war_enab(void) -{ - return 0; -} - -#endif - #if defined(BCMASSERT_LOG) void osl_assert(const char *exp, const char *file, int line) @@ -1811,12 +1908,13 @@ osl_assert(const char *exp, const char *file, int line) #endif /* BCMASSERT_LOG */ -#if defined(BCMASSERT_LOG) switch (g_assert_type) { case 0: panic("%s", tempbuf); break; case 1: + /* fall through */ + case 3: printk("%s", tempbuf); break; case 2: @@ -1826,8 +1924,6 @@ osl_assert(const char *exp, const char *file, int line) default: break; } -#endif - } #endif @@ -1854,6 +1950,17 @@ osl_sleep(uint ms) msleep(ms); } +uint64 +osl_sysuptime_us(void) +{ + struct timeval tv; + uint64 usec; + + do_gettimeofday(&tv); + /* tv_usec content is fraction of a second */ + usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec; + return usec; +} /* Clone a packet. @@ -2073,70 +2180,44 @@ osl_os_image_size(void *image) /* Linux Kernel: File Operations: end */ - -/* APIs to set/get specific quirks in OSL layer */ -void -osl_flag_set(osl_t *osh, uint32 mask) +#if (defined(STB) && defined(__arm__)) +inline void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size) { - osh->flags |= mask; -} + unsigned long flags = 0; + int pci_access = 0; +#if defined(BCM_GMAC3) + const int acp_war_enab = 1; +#else /* !BCM_GMAC3 */ + int acp_war_enab = ACP_WAR_ENAB(); +#endif /* !BCM_GMAC3 */ -bool -osl_is_flag_set(osl_t *osh, uint32 mask) -{ - return (osh->flags & mask); + if (osh && BUSTYPE(osh->bustype) == PCI_BUS) + pci_access = 1; + + if (pci_access && acp_war_enab) + spin_lock_irqsave(&l2x0_reg_lock, flags); + + switch (size) { + case sizeof(uint8): + *(uint8*)v = readb((volatile uint8*)(addr)); + break; + case sizeof(uint16): + *(uint16*)v = readw((volatile uint16*)(addr)); + break; + case sizeof(uint32): + *(uint32*)v = readl((volatile uint32*)(addr)); + break; + case sizeof(uint64): + *(uint64*)v = *((volatile uint64*)(addr)); + break; + } + + if (pci_access && acp_war_enab) + spin_unlock_irqrestore(&l2x0_reg_lock, flags); } +#endif #ifdef BCM_SECURE_DMA - -static void -osl_sec_dma_setup_contig_mem(osl_t *osh, unsigned long memsize, int regn) -{ - int ret; - -#if defined(__ARM_ARCH_7A__) - if (regn == CONT_ARMREGION) { - ret = osl_sec_dma_alloc_contig_mem(osh, memsize, regn); - if (ret != BCME_OK) - printk("linux_osl.c: CMA memory access failed\n"); - } -#endif - /* implement the MIPS Here */ -} - -static int -osl_sec_dma_alloc_contig_mem(osl_t *osh, unsigned long memsize, int regn) -{ - u64 addr; - - printk("linux_osl.c: The value of cma mem block size = %ld\n", memsize); - osh->cma = cma_dev_get_cma_dev(regn); - printk("The value of cma = %p\n", osh->cma); - if (!osh->cma) { - printk("linux_osl.c:contig_region index is invalid\n"); - return BCME_ERROR; - } - if (cma_dev_get_mem(osh->cma, &addr, (u32)memsize, SEC_DMA_ALIGN) < 0) { - printk("linux_osl.c: contiguous memory block allocation failure\n"); - return BCME_ERROR; - } - osh->contig_base_alloc = (phys_addr_t)addr; - osh->contig_base = (phys_addr_t)osh->contig_base_alloc; - printk("contig base alloc=%lx \n", (ulong)osh->contig_base_alloc); - - return BCME_OK; -} - -static void -osl_sec_dma_free_contig_mem(osl_t *osh, u32 memsize, int regn) -{ - int ret; - - ret = cma_dev_put_mem(osh->cma, (u64)osh->contig_base, memsize); - if (ret) - printf("%s contig base free failed\n", __FUNCTION__); -} - static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr) { @@ -2159,19 +2240,16 @@ osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bo if (iscache) { addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL)); if (isdecr) { - osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page)); - g_contig_delta_va_pa = osh->contig_delta_va_pa; + osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page)); } - } - else { + } else { #if defined(__ARM_ARCH_7A__) addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, pgprot_noncached(__pgprot(PAGE_KERNEL))); #endif if (isdecr) { - osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page)); - g_contig_delta_va_pa = osh->contig_delta_va_pa; + osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page)); } } @@ -2185,10 +2263,11 @@ osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size) vunmap(contig_base_va); } -static void +static int osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list) { int i; + int ret = BCME_OK; sec_mem_elem_t *sec_mem_elem; if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) { @@ -2198,25 +2277,29 @@ osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem for (i = 0; i < max-1; i++) { sec_mem_elem->next = (sec_mem_elem + 1); sec_mem_elem->size = mbsize; - sec_mem_elem->pa_cma = (u32)osh->contig_base_alloc; + sec_mem_elem->pa_cma = osh->contig_base_alloc; sec_mem_elem->vac = osh->contig_base_alloc_va; + sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma); osh->contig_base_alloc += mbsize; - osh->contig_base_alloc_va += mbsize; + osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize); sec_mem_elem = sec_mem_elem + 1; } sec_mem_elem->next = NULL; sec_mem_elem->size = mbsize; - sec_mem_elem->pa_cma = (u32)osh->contig_base_alloc; + sec_mem_elem->pa_cma = osh->contig_base_alloc; sec_mem_elem->vac = osh->contig_base_alloc_va; + sec_mem_elem->pa_cma_page = phys_to_page(sec_mem_elem->pa_cma); osh->contig_base_alloc += mbsize; - osh->contig_base_alloc_va += mbsize; + osh->contig_base_alloc_va = ((uint8 *)osh->contig_base_alloc_va + mbsize); - } - else + } else { printf("%s sec mem elem kmalloc failed\n", __FUNCTION__); + ret = BCME_ERROR; + } + return ret; } @@ -2233,32 +2316,21 @@ osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction, { sec_mem_elem_t *sec_mem_elem = NULL; - if (size <= 512 && osh->sec_list_512) { - sec_mem_elem = osh->sec_list_512; - osh->sec_list_512 = sec_mem_elem->next; - } - else if (size <= 2048 && osh->sec_list_2048) { - sec_mem_elem = osh->sec_list_2048; - osh->sec_list_2048 = sec_mem_elem->next; - } - else if (osh->sec_list_4096) { + ASSERT(osh->sec_list_4096); sec_mem_elem = osh->sec_list_4096; osh->sec_list_4096 = sec_mem_elem->next; - } else { - printf("%s No matching Pool available size=%d \n", __FUNCTION__, size); - return NULL; - } - if (sec_mem_elem != NULL) { sec_mem_elem->next = NULL; if (ptr_cma_info->sec_alloc_list_tail) { ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem; + ptr_cma_info->sec_alloc_list_tail = sec_mem_elem; } - - ptr_cma_info->sec_alloc_list_tail = sec_mem_elem; - if (ptr_cma_info->sec_alloc_list == NULL) + else { + /* First allocation: If tail is NULL, sec_alloc_list MUST also be NULL */ + ASSERT(ptr_cma_info->sec_alloc_list == NULL); ptr_cma_info->sec_alloc_list = sec_mem_elem; + ptr_cma_info->sec_alloc_list_tail = sec_mem_elem; } return sec_mem_elem; } @@ -2268,21 +2340,8 @@ osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem) { sec_mem_elem->dma_handle = 0x0; sec_mem_elem->va = NULL; - - if (sec_mem_elem->size == 512) { - sec_mem_elem->next = osh->sec_list_512; - osh->sec_list_512 = sec_mem_elem; - } - else if (sec_mem_elem->size == 2048) { - sec_mem_elem->next = osh->sec_list_2048; - osh->sec_list_2048 = sec_mem_elem; - } - else if (sec_mem_elem->size == 4096) { sec_mem_elem->next = osh->sec_list_4096; osh->sec_list_4096 = sec_mem_elem; - } - else - printf("%s free failed size=%d \n", __FUNCTION__, sec_mem_elem->size); } static sec_mem_elem_t * BCMFASTPATH @@ -2302,6 +2361,7 @@ osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_add return sec_mem_elem; } + sec_mem_elem = sec_mem_elem->next; while (sec_mem_elem != NULL) { @@ -2350,7 +2410,7 @@ osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p, sec_mem_elem_t *sec_mem_elem; struct page *pa_cma_page; uint loffset; - void *vaorig = va + size; + void *vaorig = ((uint8 *)va + size); dma_addr_t dma_handle = 0x0; /* packet will be the one added with osl_sec_dma_map() just before this call */ @@ -2361,7 +2421,7 @@ osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p, pa_cma_page = phys_to_page(sec_mem_elem->pa_cma); loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1)); - dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset, size, + dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size, (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); } else { @@ -2379,22 +2439,16 @@ osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, sec_mem_elem_t *sec_mem_elem; struct page *pa_cma_page; void *pa_cma_kmap_va = NULL; - int *fragva; uint buflen = 0; - struct sk_buff *skb; dma_addr_t dma_handle = 0x0; uint loffset; - int i = 0; + ASSERT((direction == DMA_RX) || (direction == DMA_TX)); sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset); - if (sec_mem_elem == NULL) { - printk("linux_osl.c: osl_sec_dma_map - cma allocation failed\n"); - return 0; - } sec_mem_elem->va = va; sec_mem_elem->direction = direction; - pa_cma_page = phys_to_page(sec_mem_elem->pa_cma); + pa_cma_page = sec_mem_elem->pa_cma_page; loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1)); /* pa_cma_kmap_va = kmap_atomic(pa_cma_page); @@ -2402,55 +2456,31 @@ osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, */ pa_cma_kmap_va = sec_mem_elem->vac; + pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + offset); + buflen = size; if (direction == DMA_TX) { + memcpy((uint8*)pa_cma_kmap_va+offset, va, size); - if (p == NULL) { - - memcpy(pa_cma_kmap_va+offset, va, size); - buflen = size; - } - else { - for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) { - if (skb_is_nonlinear(skb)) { - - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - fragva = kmap_atomic(skb_frag_page(f)); - memcpy((pa_cma_kmap_va+offset+buflen), - (fragva + f->page_offset), skb_frag_size(f)); - kunmap_atomic(fragva); - buflen += skb_frag_size(f); - } - } - else { - memcpy((pa_cma_kmap_va+offset+buflen), skb->data, skb->len); - buflen += skb->len; - } - } - - } if (dmah) { dmah->nsegs = 1; dmah->origsize = buflen; } } - - else if (direction == DMA_RX) + else { - buflen = size; if ((p != NULL) && (dmah != NULL)) { dmah->nsegs = 1; dmah->origsize = buflen; } + *(uint32 *)(pa_cma_kmap_va) = 0x0; } - if (direction == DMA_RX || direction == DMA_TX) { - dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset+offset, buflen, + if (direction == DMA_RX) { + flush_kernel_vmap_range(pa_cma_kmap_va, sizeof(int)); + } + dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset+offset, buflen, (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); - - } if (dmah) { dmah->segs[0].addr = dma_handle; dmah->segs[0].length = buflen; @@ -2469,11 +2499,11 @@ osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hndd dma_addr_t dma_handle = 0x0; uint loffset; - pa_cma = (phys_addr_t)(va - osh->contig_delta_va_pa); + pa_cma = ((uint8 *)va - (uint8 *)osh->contig_delta_va_pa); pa_cma_page = phys_to_page(pa_cma); loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1)); - dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset, size, + dma_handle = dma_map_page(OSH_NULL, pa_cma_page, loffset, size, (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); return dma_handle; @@ -2484,29 +2514,21 @@ osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction, void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset) { sec_mem_elem_t *sec_mem_elem; - struct page *pa_cma_page; void *pa_cma_kmap_va = NULL; uint buflen = 0; dma_addr_t pa_cma; void *va; - uint loffset = 0; int read_count = 0; BCM_REFERENCE(buflen); BCM_REFERENCE(read_count); sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle); - if (sec_mem_elem == NULL) { - printf("%s sec_mem_elem is NULL and dma_handle =0x%lx and dir=%d\n", - __FUNCTION__, (ulong)dma_handle, direction); - return; - } + ASSERT(sec_mem_elem); va = sec_mem_elem->va; - va -= offset; + va = (uint8 *)va - offset; pa_cma = sec_mem_elem->pa_cma; - pa_cma_page = phys_to_page(pa_cma); - loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1)); if (direction == DMA_RX) { @@ -2518,12 +2540,22 @@ void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset) pa_cma_kmap_va = sec_mem_elem->vac; - dma_unmap_page(osh->cma->dev, pa_cma, size, DMA_FROM_DEVICE); + do { + invalidate_kernel_vmap_range(pa_cma_kmap_va, sizeof(int)); + + buflen = *(uint *)(pa_cma_kmap_va); + if (buflen) + break; + + OSL_DELAY(1); + read_count++; + } while (read_count < 200); + dma_unmap_page(OSH_NULL, pa_cma, size, DMA_FROM_DEVICE); memcpy(va, pa_cma_kmap_va, size); /* kunmap_atomic(pa_cma_kmap_va); */ } } else { - dma_unmap_page(osh->cma->dev, pa_cma, size+offset, DMA_TO_DEVICE); + dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE); } osl_sec_dma_free_mem_elem(osh, sec_mem_elem); @@ -2539,7 +2571,7 @@ osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info) while (sec_mem_elem != NULL) { - dma_unmap_page(osh->cma->dev, sec_mem_elem->pa_cma, sec_mem_elem->size, + dma_unmap_page(OSH_NULL, sec_mem_elem->pa_cma, sec_mem_elem->size, sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE); osl_sec_dma_free_mem_elem(osh, sec_mem_elem); @@ -2558,7 +2590,7 @@ osl_sec_dma_init_consistent(osl_t *osh) osh->sec_cma_coherent[i].avail = TRUE; osh->sec_cma_coherent[i].va = temp_va; osh->sec_cma_coherent[i].pa = temp_pa; - temp_va += SEC_CMA_COHERENT_BLK; + temp_va = ((uint8 *)temp_va)+SEC_CMA_COHERENT_BLK; temp_pa += SEC_CMA_COHERENT_BLK; } } @@ -2648,3 +2680,91 @@ osl_pkt_orphan_partial(struct sk_buff *skb, int tsq) atomic_sub(fraction, &skb->sk->sk_wmem_alloc); } #endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */ + +/* timer apis */ +/* Note: All timer api's are thread unsafe and should be protected with locks by caller */ + +osl_timer_t * +osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg) +{ + osl_timer_t *t; + BCM_REFERENCE(fn); + if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) { + printk(KERN_ERR "osl_timer_init: malloced failed for osl_timer_t\n"); + return (NULL); + } + bzero(t, sizeof(osl_timer_t)); + if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) { + printk(KERN_ERR "osl_timer_init: malloc failed\n"); + MFREE(NULL, t, sizeof(osl_timer_t)); + return (NULL); + } + t->timer->data = (ulong)arg; + t->timer->function = (linux_timer_fn)fn; + t->set = TRUE; + + init_timer(t->timer); + + return (t); +} + +void +osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic) +{ + + if (t == NULL) { + printf("%s: Timer handle is NULL\n", __FUNCTION__); + return; + } + ASSERT(!t->set); + + t->set = TRUE; + if (periodic) { + printf("Periodic timers are not supported by Linux timer apis\n"); + } + t->timer->expires = jiffies + ms*HZ/1000; + + add_timer(t->timer); + + return; +} + +void +osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic) +{ + + if (t == NULL) { + printf("%s: Timer handle is NULL\n", __FUNCTION__); + return; + } + if (periodic) { + printf("Periodic timers are not supported by Linux timer apis\n"); + } + t->set = TRUE; + t->timer->expires = jiffies + ms*HZ/1000; + + mod_timer(t->timer, t->timer->expires); + + return; +} + +/* + * Return TRUE if timer successfully deleted, FALSE if still pending + */ +bool +osl_timer_del(osl_t *osh, osl_timer_t *t) +{ + if (t == NULL) { + printf("%s: Timer handle is NULL\n", __FUNCTION__); + return (FALSE); + } + if (t->set) { + t->set = FALSE; + if (t->timer) { + del_timer(t->timer); + MFREE(NULL, t->timer, sizeof(struct timer_list)); + } + MFREE(NULL, t, sizeof(osl_timer_t)); + } + return (TRUE); +} diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/pcie_core.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/pcie_core.c index c36bc62ecdb4..63f955cb6693 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/pcie_core.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/pcie_core.c @@ -3,7 +3,7 @@ * Contains PCIe related functions that are shared between different driver models (e.g. firmware * builds, DHD builds, BMAC builds), in order to avoid code duplication. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -26,7 +26,7 @@ * * <> * - * $Id: pcie_core.c 591285 2015-10-07 11:56:29Z $ + * $Id: pcie_core.c 658668 2016-09-09 00:42:11Z $ */ #include @@ -37,6 +37,7 @@ #include #include #include +#include #include "pcie_core.h" @@ -112,4 +113,23 @@ void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs) si_setcoreidx(sih, origidx); } + +#define PCIE_PMCR_REFUP_MASK 0x3f0001e0 +#define PCIE_PMCR_REFEXT_MASK 0x400000 +#define PCIE_PMCR_REFUP_100US 0x38000080 +#define PCIE_PMCR_REFEXT_100US 0x400000 + +/* Set PCIE TRefUp time to 100us */ +void pcie_set_trefup_time_100us(si_t *sih) +{ + si_corereg(sih, sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_PMCR_REFUP); + si_corereg(sih, sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), PCIE_PMCR_REFUP_MASK, PCIE_PMCR_REFUP_100US); + + si_corereg(sih, sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_PMCR_REFUP_EXT); + si_corereg(sih, sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), PCIE_PMCR_REFEXT_MASK, PCIE_PMCR_REFEXT_100US); +} #endif /* BCMDRIVER */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/sbutils.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/sbutils.c index 0804ef455135..975ee6afaf08 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/sbutils.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/sbutils.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: sbutils.c 514727 2014-11-12 03:02:48Z $ + * $Id: sbutils.c 599296 2015-11-13 06:36:13Z $ */ #include @@ -45,13 +45,13 @@ /* local prototypes */ static uint _sb_coreidx(si_info_t *sii, uint32 sba); -static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, +static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, uint32 sbba, uint ncores); static uint32 _sb_coresba(si_info_t *sii); -static void *_sb_setcoreidx(si_info_t *sii, uint coreidx); +static volatile void *_sb_setcoreidx(si_info_t *sii, uint coreidx); #define SET_SBREG(sii, r, mask, val) \ W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val))) -#define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF) +#define REGS2SB(va) (sbconfig_t*) ((volatile int8*)(va) + SBCONFIGOFF) /* sonicsrev */ #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT) @@ -65,7 +65,6 @@ static void *_sb_setcoreidx(si_info_t *sii, uint coreidx); static uint32 sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr) { - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint8 tmp; uint32 val, intr_val = 0; @@ -97,7 +96,6 @@ sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr) static void sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v) { - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint8 tmp; volatile uint32 dummy; uint32 intr_val = 0; @@ -149,8 +147,7 @@ uint sb_intflag(si_t *sih) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; - void *corereg; + volatile void *corereg; sbconfig_t *sb; uint origidx, intflag, intr_val = 0; @@ -376,7 +373,7 @@ uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) { uint origidx = 0; - uint32 *r = NULL; + volatile uint32 *r = NULL; uint w; uint intr_val = 0; bool fast = FALSE; @@ -399,7 +396,7 @@ sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) SI_CORE_SIZE); ASSERT(GOODREGS(cores_info->regs[coreidx])); } - r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); + r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff); } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ @@ -407,17 +404,18 @@ sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) /* Chipc registers are mapped at 12KB */ fast = TRUE; - r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); } else if (sii->pub.buscoreidx == coreidx) { /* pci registers are at either in the last 2KB of an 8KB window * or, in pcie and pci rev 13 at 8KB */ fast = TRUE; if (SI_FAST(sii)) - r = (uint32 *)((char *)sii->curmap + + r = (volatile uint32 *)((volatile char *)sii->curmap + PCI_16KB0_PCIREGS_OFFSET + regoff); else - r = (uint32 *)((char *)sii->curmap + + r = (volatile uint32 *)((volatile char *)sii->curmap + ((regoff >= SBCONFIGOFF) ? PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + regoff); @@ -431,7 +429,8 @@ sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) origidx = si_coreidx(&sii->pub); /* switch core */ - r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff); + r = (volatile uint32*) ((volatile uchar*)sb_setcoreidx(&sii->pub, coreidx) + + regoff); } ASSERT(r != NULL); @@ -450,12 +449,7 @@ sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) if (regoff >= SBCONFIGOFF) w = R_SBREG(sii, r); else { - if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) && - (coreidx == SI_CC_IDX) && - (regoff == OFFSETOF(chipcregs_t, watchdog))) { - w = val; - } else - w = R_REG(sii->osh, r); + w = R_REG(sii->osh, r); } if (!fast) { @@ -478,10 +472,10 @@ sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) * For accessing registers that would need a core switch, this function will return * NULL. */ -uint32 * +volatile uint32 * sb_corereg_addr(si_t *sih, uint coreidx, uint regoff) { - uint32 *r = NULL; + volatile uint32 *r = NULL; bool fast = FALSE; si_info_t *sii = SI_INFO(sih); si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; @@ -501,7 +495,7 @@ sb_corereg_addr(si_t *sih, uint coreidx, uint regoff) SI_CORE_SIZE); ASSERT(GOODREGS(cores_info->regs[coreidx])); } - r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); + r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff); } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ @@ -509,17 +503,18 @@ sb_corereg_addr(si_t *sih, uint coreidx, uint regoff) /* Chipc registers are mapped at 12KB */ fast = TRUE; - r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + r = (volatile uint32 *)((volatile char *)sii->curmap + + PCI_16KB0_CCREGS_OFFSET + regoff); } else if (sii->pub.buscoreidx == coreidx) { /* pci registers are at either in the last 2KB of an 8KB window * or, in pcie and pci rev 13 at 8KB */ fast = TRUE; if (SI_FAST(sii)) - r = (uint32 *)((char *)sii->curmap + + r = (volatile uint32 *)((volatile char *)sii->curmap + PCI_16KB0_PCIREGS_OFFSET + regoff); else - r = (uint32 *)((char *)sii->curmap + + r = (volatile uint32 *)((volatile char *)sii->curmap + ((regoff >= SBCONFIGOFF) ? PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + regoff); @@ -541,7 +536,8 @@ sb_corereg_addr(si_t *sih, uint coreidx, uint regoff) */ #define SB_MAXBUSES 2 static uint -_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores) +_sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, + uint32 sbba, uint numcores) { uint next; uint ncc = 0; @@ -587,9 +583,7 @@ _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint num /* Older chips */ uint chip = CHIPID(sii->pub.chip); - if (chip == BCM4306_CHIP_ID) /* < 4306c0 */ - numcores = 6; - else if (chip == BCM4704_CHIP_ID) + if (chip == BCM4704_CHIP_ID) numcores = 9; else if (chip == BCM5365_CHIP_ID) numcores = 7; @@ -633,11 +627,12 @@ _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint num /* scan the sb enumerated space to identify all cores */ void -sb_scan(si_t *sih, void *regs, uint devid) +sb_scan(si_t *sih, volatile void *regs, uint devid) { uint32 origsba; sbconfig_t *sb; si_info_t *sii = SI_INFO(sih); + BCM_REFERENCE(devid); sb = REGS2SB(sii->curmap); @@ -657,7 +652,7 @@ sb_scan(si_t *sih, void *regs, uint devid) * must be called with interrupts off. * Moreover, callers should keep interrupts off during switching out of and back to d11 core */ -void * +volatile void * sb_setcoreidx(si_t *sih, uint coreidx) { si_info_t *sii = SI_INFO(sih); @@ -680,12 +675,12 @@ sb_setcoreidx(si_t *sih, uint coreidx) /* This function changes the logical "focus" to the indicated core. * Return the current core's virtual address. */ -static void * +static volatile void * _sb_setcoreidx(si_info_t *sii, uint coreidx) { si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint32 sbaddr = cores_info->coresba[coreidx]; - void *regs; + volatile void *regs; switch (BUSTYPE(sii->pub.bustype)) { case SI_BUS: @@ -811,7 +806,6 @@ void sb_commit(si_t *sih) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; @@ -973,7 +967,6 @@ uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; uint32 tmp, ret = 0xffffffff; diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/siutils.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/siutils.c index 6f461e68c743..18fd64a7d5d3 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/siutils.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/siutils.c @@ -2,7 +2,7 @@ * Misc utility routines for accessing chip-specific features * of the SiliconBackplane-based Broadcom chips. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -25,7 +25,7 @@ * * <> * - * $Id: siutils.c 552034 2015-04-24 19:00:35Z $ + * $Id: siutils.c 668442 2016-11-03 08:42:43Z $ */ #include @@ -37,6 +37,7 @@ #include #include #include +#include #ifdef BCMPCIEDEV #include #endif /* BCMPCIEDEV */ @@ -61,8 +62,17 @@ #ifdef HNDGCI #include #endif /* HNDGCI */ +#ifdef BCMULP +#include +#endif /* BCMULP */ + #include "siutils_priv.h" +#ifdef SECI_UART +/* Defines the set of GPIOs to be used for SECI UART if not specified in NVRAM */ +#define DEFAULT_SECI_UART_PINMUX_43430 0x0102 +static bool force_seci_clk = 0; +#endif /* SECI_UART */ /** * A set of PMU registers is clocked in the ILP domain, which has an implication on register write @@ -79,26 +89,26 @@ (regoff) == OFFSETOF(chipcregs_t, pmuwatchdog) || \ (regoff) == OFFSETOF(chipcregs_t, res_req_timer)) +#define GCI_FEM_CTRL_WAR 0x11111111 + /* local prototypes */ -static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, +static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, volatile void *regs, uint bustype, void *sdh, char **vars, uint *varsz); static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh); static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, - uint *origidx, void *regs); + uint *origidx, volatile void *regs); static bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff); -#ifdef BCMLTECOEX -static void si_config_gcigpio(si_t *sih, uint32 gci_pos, uint8 gcigpio, - uint8 gpioctl_mask, uint8 gpioctl_val); -#endif /* BCMLTECOEX */ /* global variable to indicate reservation/release of gpio's */ static uint32 si_gpioreservation = 0; /* global flag to prevent shared resources from being initialized multiple times in si_attach() */ +static bool si_onetimeinit = FALSE; + #ifdef SR_DEBUG static const uint32 si_power_island_test_array[] = { 0x0000, 0x0001, 0x0010, 0x0011, @@ -110,10 +120,18 @@ static const uint32 si_power_island_test_array[] = { int do_4360_pcie2_war = 0; +#ifdef BCMULP +/* Variable to store boot_type: warm_boot/cold_boot/etc. */ +static int boot_type = 0; +#endif + /* global kernel resource */ static si_info_t ksii; static si_cores_info_t ksii_cores_info; +static const char rstr_rmin[] = "rmin"; +static const char rstr_rmax[] = "rmax"; + /** * Allocate an si handle. This function may be called multiple times. * @@ -126,7 +144,7 @@ static si_cores_info_t ksii_cores_info; * varsz - pointer to int to return the size of the vars */ si_t * -si_attach(uint devid, osl_t *osh, void *regs, +si_attach(uint devid, osl_t *osh, volatile void *regs, uint bustype, void *sdh, char **vars, uint *varsz) { si_info_t *sii; @@ -157,7 +175,7 @@ si_attach(uint devid, osl_t *osh, void *regs, } -static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */ +static uint32 wd_msticks; /**< watchdog timer ticks normalized to ms */ /** generic kernel variant of si_attach() */ si_t * @@ -185,24 +203,27 @@ si_kattach(osl_t *osh) /* save ticks normalized to ms for si_watchdog_ms() */ if (PMUCTL_ENAB(&ksii.pub)) { + { /* based on 32KHz ILP clock */ wd_msticks = 32; + } } else { wd_msticks = ALP_CLOCK / 1000; } ksii_attached = TRUE; SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n", - ksii.pub.ccrev, wd_msticks)); + CCREV(ksii.pub.ccrev), wd_msticks)); } return &ksii.pub; } - static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh) { + BCM_REFERENCE(sdh); + BCM_REFERENCE(devid); /* need to set memseg flag for CF card first before any sb registers access */ if (BUSTYPE(bustype) == PCMCIA_BUS) sii->memseg = TRUE; @@ -270,22 +291,24 @@ si_get_pmu_reg_addr(si_t *sih, uint32 offset) pmuaddr = SI_ENUM_BASE + offset; done: - SI_MSG(("%s: pmuaddr: %x\n", __FUNCTION__, pmuaddr)); + printf("%s: addrRET: %x\n", __FUNCTION__, pmuaddr); return pmuaddr; } static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, - uint *origidx, void *regs) + uint *origidx, volatile void *regs) { si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; bool pci, pcie, pcie_gen2 = FALSE; uint i; uint pciidx, pcieidx, pcirev, pcierev; +#if defined(BCM_BACKPLANE_TIMEOUT) || defined(AXI_TIMEOUTS) /* first, enable backplane timeouts */ - if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) - ai_enable_backplane_timeouts(&sii->pub); + si_slave_wrapper_add(&sii->pub); +#endif + sii->curidx = 0; cc = si_setcoreidx(&sii->pub, SI_CC_IDX); ASSERT((uintptr)cc); @@ -294,14 +317,14 @@ si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, sii->pub.ccrev = (int)si_corerev(&sii->pub); /* get chipcommon chipstatus */ - if (sii->pub.ccrev >= 11) + if (CCREV(sii->pub.ccrev) >= 11) sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus); /* get chipcommon capabilites */ sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities); /* get chipcommon extended capabilities */ - if (sii->pub.ccrev >= 35) + if (CCREV(sii->pub.ccrev) >= 35) sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext); /* get pmu rev and caps */ @@ -309,10 +332,21 @@ si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, if (AOB_ENAB(&sii->pub)) { uint pmucoreidx; pmuregs_t *pmu; + struct si_pub *sih = &sii->pub; + pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0); + if (!GOODIDX(pmucoreidx)) { + SI_ERROR(("si_buscore_setup: si_findcoreidx failed\n")); + return FALSE; + } + pmu = si_setcoreidx(&sii->pub, pmucoreidx); sii->pub.pmucaps = R_REG(sii->osh, &pmu->pmucapabilities); si_setcoreidx(&sii->pub, SI_CC_IDX); + + sii->pub.gcirev = si_corereg(sih, + GCI_CORE_IDX(sih), + GCI_OFFSETOF(sih, gci_corecaps0), 0, 0) & GCI_CAP0_REV_MASK; } else sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities); @@ -320,7 +354,7 @@ si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, } SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n", - sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev, + CCREV(sii->pub.ccrev), sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev, sii->pub.pmucaps)); /* figure out bus/orignal core idx */ @@ -341,7 +375,7 @@ si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, /* Display cores found */ SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n", - i, cid, crev, cores_info->coresba[i], cores_info->regs[i])); + i, cid, crev, cores_info->coresba[i], cores_info->regs[i])); if (BUSTYPE(bustype) == SI_BUS) { /* now look at the chipstatus register to figure the pacakge */ @@ -349,9 +383,10 @@ si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, if (cid == PCIE2_CORE_ID) { if (BCM43602_CHIP(sii->pub.chip) || (CHIPID(sii->pub.chip) == BCM4365_CHIP_ID) || + (CHIPID(sii->pub.chip) == BCM4347_CHIP_ID) || (CHIPID(sii->pub.chip) == BCM4366_CHIP_ID) || - ((CHIPID(sii->pub.chip) == BCM4345_CHIP_ID || - CHIPID(sii->pub.chip) == BCM43454_CHIP_ID) && + ((BCM4345_CHIP(sii->pub.chip) || + BCM4349_CHIP(sii->pub.chip)) && CST4345_CHIPMODE_PCIE(sii->pub.chipst))) { pcieidx = i; pcierev = crev; @@ -360,8 +395,7 @@ si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, } } - } - else if (BUSTYPE(bustype) == PCI_BUS) { + } else if (BUSTYPE(bustype) == PCI_BUS) { if (cid == PCI_CORE_ID) { pciidx = i; pcirev = crev; @@ -427,10 +461,6 @@ si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype, sii->pub.buscorerev)); - if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) && - (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3)) - OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL); - #if defined(BCMSDIO) /* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was @@ -452,6 +482,7 @@ si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, + uint16 si_chipid(si_t *sih) { @@ -460,6 +491,7 @@ si_chipid(si_t *sih) return (sii->chipnew) ? sii->chipnew : sih->chip; } +/* CHIP_ID's being mapped here should not be used anywhere else in the code */ static void si_chipid_fixup(si_t *sih) { @@ -471,20 +503,41 @@ si_chipid_fixup(si_t *sih) sii->chipnew = sih->chip; /* save it */ sii->pub.chip = BCM43570_CHIP_ID; /* chip class */ break; + case BCM43562_CHIP_ID: case BCM4358_CHIP_ID: case BCM43566_CHIP_ID: sii->chipnew = sih->chip; /* save it */ sii->pub.chip = BCM43569_CHIP_ID; /* chip class */ break; case BCM4356_CHIP_ID: + case BCM4371_CHIP_ID: sii->chipnew = sih->chip; /* save it */ sii->pub.chip = BCM4354_CHIP_ID; /* chip class */ break; + case BCM4357_CHIP_ID: + case BCM4361_CHIP_ID: + sii->chipnew = sih->chip; /* save it */ + sii->pub.chip = BCM4347_CHIP_ID; /* chip class */ + break; default: break; } } +#ifdef BCMULP +static void +si_check_boot_type(si_t *sih, osl_t *osh) +{ + if (sih->pmurev >= 30) { + boot_type = PMU_REG_NEW(sih, swscratch, 0, 0); + } else { + boot_type = CHIPC_REG(sih, flashdata, 0, 0); + } + + SI_ERROR(("%s: boot_type: 0x%08x\n", __func__, boot_type)); +} +#endif /* BCMULP */ + /** * Allocate an si handle. This function may be called multiple times. * @@ -492,7 +545,7 @@ si_chipid_fixup(si_t *sih) * function set 'vars' to NULL. */ static si_info_t * -si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, +si_doattach(si_info_t *sii, uint devid, osl_t *osh, volatile void *regs, uint bustype, void *sdh, char **vars, uint *varsz) { struct si_pub *sih = &sii->pub; @@ -508,12 +561,22 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, savewin = 0; sih->buscoreidx = BADIDX; + sii->device_removed = FALSE; sii->curmap = regs; sii->sdh = sdh; sii->osh = osh; sii->second_bar0win = ~0x0; +#if defined(BCM_BACKPLANE_TIMEOUT) + sih->err_info = MALLOCZ(osh, sizeof(si_axi_error_info_t)); + if (sih->err_info == NULL) { + SI_ERROR(("%s: %d bytes MALLOC FAILED", + __FUNCTION__, sizeof(si_axi_error_info_t))); + return NULL; + } +#endif /* BCM_BACKPLANE_TIMEOUT */ + /* check to see if we are a si core mimic'ing a pci core */ if ((bustype == PCI_BUS) && @@ -576,10 +639,6 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, si_chipid_fixup(sih); - if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && CHIPREV(sih->chiprev == 0) && - (sih->chippkg != BCM4329_289PIN_PKG_ID)) { - sih->chippkg = BCM4329_182PIN_PKG_ID; - } sih->issim = IS_SIM(sih->chippkg); /* scan for cores */ @@ -593,6 +652,16 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, else SI_MSG(("Found chip type NAI (0x%08x)\n", w)); /* pass chipc address instead of original core base */ + + sii->axi_wrapper = (axi_wrapper_t *)MALLOCZ(sii->osh, + (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS)); + + if (sii->axi_wrapper == NULL) { + SI_ERROR(("%s: %zu bytes MALLOC Failed", __FUNCTION__, + (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS))); + return NULL; + } + ai_scan(&sii->pub, (void *)(uintptr)cc, devid); } else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) { SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip)); @@ -613,17 +682,18 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, SI_ERROR(("si_doattach: si_buscore_setup failed\n")); goto exit; } +#ifdef BCMULP + si_check_boot_type(sih, osh); + + if (ulp_module_init(osh, sih) != BCME_OK) { + ULP_ERR(("%s: err in ulp_module_init\n", __FUNCTION__)); + goto exit; + } +#endif /* BCMULP */ #if !defined(_CFEZ_) || defined(CFG_WL) - if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK) - >> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT | - CST4322_SPROM_PRESENT))) { - SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__)); - return NULL; - } - /* assume current core is CC */ - if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID || + if ((CCREV(sii->pub.ccrev) == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID || CHIPID(sih->chip) == BCM43235_CHIP_ID || CHIPID(sih->chip) == BCM43234_CHIP_ID || CHIPID(sih->chip) == BCM43238_CHIP_ID) && @@ -668,9 +738,10 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, pvars = NULL; BCM_REFERENCE(pvars); + if (!si_onetimeinit) { - if (sii->pub.ccrev >= 20) { + if (CCREV(sii->pub.ccrev) >= 20) { uint32 gpiopullup = 0, gpiopulldown = 0; cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); ASSERT(cc != NULL); @@ -688,6 +759,7 @@ si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, si_setcoreidx(sih, origidx); } + } /* clear any previous epidiag-induced target abort */ ASSERT(!si_taclear(sih, FALSE)); @@ -727,6 +799,19 @@ si_detach(si_t *sih) #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */ MFREE(sii->osh, cores_info, sizeof(si_cores_info_t)); +#if defined(BCM_BACKPLANE_TIMEOUT) + if (sih->err_info) { + MFREE(sii->osh, sih->err_info, sizeof(si_axi_error_info_t)); + sii->pub.err_info = NULL; + } +#endif /* BCM_BACKPLANE_TIMEOUT */ + + if (sii->axi_wrapper) { + MFREE(sii->osh, sii->axi_wrapper, + (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS)); + sii->axi_wrapper = NULL; + } + #if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS) if (sii != &ksii) #endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */ @@ -856,7 +941,7 @@ si_coreidx(si_t *sih) return sii->curidx; } -void * +volatile void * si_d11_switch_addrbase(si_t *sih, uint coreunit) { return si_setcore(sih, D11_CORE_ID, coreunit); @@ -924,7 +1009,6 @@ si_corerev(si_t *sih) } } - /* return index of coreid or BADIDX if not found */ uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit) @@ -951,18 +1035,27 @@ si_findcoreidx(si_t *sih, uint coreid, uint coreunit) uint si_numcoreunits(si_t *sih, uint coreid) { - si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; - uint found = 0; - uint i; + if ((CHIPID(sih->chip) == BCM4347_CHIP_ID) && + (CHIPREV(sih->chiprev) == 0)) { + /* + * 4347TC2 does not have Aux core. + * fixed to 1 here because EROM (using 4349 EROM) has two entries + */ + return 1; + } else { + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint found = 0; + uint i; - for (i = 0; i < sii->numcores; i++) { - if (cores_info->coreid[i] == coreid) { - found++; + for (i = 0; i < sii->numcores; i++) { + if (cores_info->coreid[i] == coreid) { + found++; + } } - } - return found; + return found; + } } /** return total D11 coreunits */ @@ -1008,7 +1101,7 @@ si_wrapperregs(si_t *sih) } /** return current register mapping */ -void * +volatile void * si_coreregs(si_t *sih) { si_info_t *sii; @@ -1019,12 +1112,13 @@ si_coreregs(si_t *sih) return (sii->curmap); } + /** * This function changes logical "focus" to the indicated core; * must be called with interrupts off. * Moreover, callers should keep interrupts off during switching out of and back to d11 core */ -void * +volatile void * si_setcore(si_t *sih, uint coreid, uint coreunit) { uint idx; @@ -1045,7 +1139,7 @@ si_setcore(si_t *sih, uint coreid, uint coreunit) } } -void * +volatile void * si_setcoreidx(si_t *sih, uint coreidx) { if (CHIPTYPE(sih->socitype) == SOCI_SB) @@ -1061,12 +1155,11 @@ si_setcoreidx(si_t *sih, uint coreidx) } /** Turn off interrupt as required by sb_setcore, before switch core */ -void * +volatile void * si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val) { - void *cc; + volatile void *cc; si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; if (SI_FAST(sii)) { /* Overloading the origidx variable to remember the coreid, @@ -1075,9 +1168,9 @@ si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val) */ *origidx = coreid; if (coreid == CC_CORE_ID) - return (void *)CCREGS_FAST(sii); - else if (coreid == sih->buscoretype) - return (void *)PCIEREGS(sii); + return (volatile void *)CCREGS_FAST(sii); + else if (coreid == BUSCORETYPE(sih->buscoretype)) + return (volatile void *)PCIEREGS(sii); } INTR_OFF(sii, *intr_val); *origidx = sii->curidx; @@ -1092,9 +1185,8 @@ void si_restore_core(si_t *sih, uint coreid, uint intr_val) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; - if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype))) + if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == BUSCORETYPE(sih->buscoretype)))) return; si_setcoreidx(sih, coreid); @@ -1199,6 +1291,20 @@ si_core_sflags(si_t *sih, uint32 mask, uint32 val) } } +void +si_commit(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_commit(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_AI || CHIPTYPE(sih->socitype) == SOCI_NAI) + ; + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ; + else { + ASSERT(0); + } +} + bool si_iscoreup(si_t *sih) { @@ -1251,13 +1357,20 @@ static int si_backplane_addr_sane(uint addr, uint size) bcmerror = BCME_ERROR; } } - return bcmerror; } + +void +si_invalidate_second_bar0win(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + sii->second_bar0win = ~0x0; +} + uint si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read) { - uint32 *r = NULL; + volatile uint32 *r = NULL; uint32 region = 0; si_info_t *sii = SI_INFO(sih); @@ -1286,31 +1399,30 @@ si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read) * PCI_SECOND_BAR0_OFFSET : secondar bar-0 offset * regoff : actual reg offset */ - r = (uint32 *)((char *)sii->curmap + PCI_SECOND_BAR0_OFFSET + addr); + r = (volatile uint32 *)((volatile char *)sii->curmap + PCI_SECOND_BAR0_OFFSET + addr); SI_VMSG(("si curmap %p region %x regaddr %x effective addr %p READ %d\n", - (char*)sii->curmap, region, addr, r, read)); + (volatile char*)sii->curmap, region, addr, r, read)); switch (size) { case sizeof(uint8) : if (read) - *val = R_REG(sii->osh, (uint8*)r); + *val = R_REG(sii->osh, (volatile uint8*)r); else - W_REG(sii->osh, (uint8*)r, *val); + W_REG(sii->osh, (volatile uint8*)r, *val); break; case sizeof(uint16) : if (read) - *val = R_REG(sii->osh, (uint16*)r); + *val = R_REG(sii->osh, (volatile uint16*)r); else - W_REG(sii->osh, (uint16*)r, *val); + W_REG(sii->osh, (volatile uint16*)r, *val); break; case sizeof(uint32) : if (read) - *val = R_REG(sii->osh, (uint32*)r); + *val = R_REG(sii->osh, (volatile uint32*)r); else - W_REG(sii->osh, (uint32*)r, *val); + W_REG(sii->osh, (volatile uint32*)r, *val); break; - default : SI_ERROR(("Invalid size %d \n", size)); return (BCME_ERROR); @@ -1354,7 +1466,7 @@ si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val) int pmustatus_offset; /* prevent backplane stall on double write to 'ILP domain' registers in the PMU */ - if (mask != 0 && sih->pmurev >= 22 && + if (mask != 0 && PMUREV(sih->pmurev) >= 22 && si_pmu_is_ilp_sensitive(idx, regoff)) { pmustatus_offset = AOB_ENAB(sih) ? OFFSETOF(pmuregs_t, pmustatus) : OFFSETOF(chipcregs_t, pmustatus); @@ -1375,7 +1487,7 @@ si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val) * For accessing registers that would need a core switch, this function will return * NULL. */ -uint32 * +volatile uint32 * si_corereg_addr(si_t *sih, uint coreidx, uint regoff) { if (CHIPTYPE(sih->socitype) == SOCI_SB) @@ -1434,6 +1546,58 @@ si_corebist(si_t *sih) return result; } +uint +si_num_slaveports(si_t *sih, uint coreid) +{ + uint idx = si_findcoreidx(sih, coreid, 0); + uint num = 0; + + if ((CHIPTYPE(sih->socitype) == SOCI_AI)) + num = ai_num_slaveports(sih, idx); + + return num; +} + +uint32 +si_get_slaveport_addr(si_t *sih, uint asidx, uint core_id, uint coreunit) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx = sii->curidx; + uint32 addr = 0x0; + + if (!((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))) + goto done; + + si_setcore(sih, core_id, coreunit); + + addr = ai_addrspace(sih, asidx); + + si_setcoreidx(sih, origidx); + +done: + return addr; +} + +uint32 +si_get_d11_slaveport_addr(si_t *sih, uint asidx, uint coreunit) +{ + si_info_t *sii = SI_INFO(sih); + uint origidx = sii->curidx; + uint32 addr = 0x0; + + if (!((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))) + goto done; + + si_setcore(sih, D11_CORE_ID, coreunit); + + addr = ai_addrspace(sih, asidx); + + si_setcoreidx(sih, origidx); + +done: + return addr; +} + static uint32 factor6(uint32 x) { @@ -1541,7 +1705,6 @@ si_clock_rate(uint32 pll_type, uint32 n, uint32 m) return (clock); } - return 0; } /** @@ -1554,7 +1717,10 @@ si_chip_hostif(si_t *sih) uint hosti = 0; switch (CHIPID(sih->chip)) { - + case BCM43018_CHIP_ID: + case BCM43430_CHIP_ID: + hosti = CHIP_HOSTIF_SDIOMODE; + break; case BCM43012_CHIP_ID: hosti = CHIP_HOSTIF_SDIOMODE; break; @@ -1583,8 +1749,7 @@ si_chip_hostif(si_t *sih) hosti = CHIP_HOSTIF_PCIEMODE; break; - case BCM4345_CHIP_ID: - case BCM43454_CHIP_ID: + CASE_BCM4345_CHIP: if (CST4345_CHIPMODE_USB20D(sih->chipst) || CST4345_CHIPMODE_HSIC(sih->chipst)) hosti = CHIP_HOSTIF_USBMODE; else if (CST4345_CHIPMODE_SDIOD(sih->chipst)) @@ -1594,15 +1759,21 @@ si_chip_hostif(si_t *sih) break; case BCM4349_CHIP_GRPID: + case BCM53573_CHIP_GRPID: if (CST4349_CHIPMODE_SDIOD(sih->chipst)) hosti = CHIP_HOSTIF_SDIOMODE; else if (CST4349_CHIPMODE_PCIE(sih->chipst)) hosti = CHIP_HOSTIF_PCIEMODE; break; + case BCM4347_CHIP_ID: + if (CST4347_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4347_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; case BCM4350_CHIP_ID: case BCM4354_CHIP_ID: - case BCM4356_CHIP_ID: case BCM43556_CHIP_ID: case BCM43558_CHIP_ID: case BCM43566_CHIP_ID: @@ -1635,20 +1806,11 @@ void si_watchdog(si_t *sih, uint ticks) { uint nb, maxt; + uint pmu_wdt = 1; - if (PMUCTL_ENAB(sih)) { -#if !defined(_CFEZ_) || defined(CFG_WL) - if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) && - (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) { - si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2); - si_setcore(sih, USB20D_CORE_ID, 0); - si_core_disable(sih, 1); - si_setcore(sih, CC_CORE_ID, 0); - } -#endif - - nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24); + if (PMUCTL_ENAB(sih) && pmu_wdt) { + nb = (CCREV(sih->ccrev) < 26) ? 16 : ((CCREV(sih->ccrev) >= 37) ? 32 : 24); /* The mips compiler uses the sllv instruction, * so we specially handle the 32-bit case. */ @@ -1708,14 +1870,14 @@ si_slowclk_src(si_info_t *sii) ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); - if (sii->pub.ccrev < 6) { + if (CCREV(sii->pub.ccrev) < 6) { if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) && (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) & PCI_CFG_GPIO_SCS)) return (SCC_SS_PCI); else return (SCC_SS_XTAL); - } else if (sii->pub.ccrev < 10) { + } else if (CCREV(sii->pub.ccrev) < 10) { cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx); ASSERT(cc); return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK); @@ -1736,12 +1898,12 @@ si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc) ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL); slowclk = si_slowclk_src(sii); - if (sii->pub.ccrev < 6) { + if (CCREV(sii->pub.ccrev) < 6) { if (slowclk == SCC_SS_PCI) return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64)); else return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32)); - } else if (sii->pub.ccrev < 10) { + } else if (CCREV(sii->pub.ccrev) < 10) { div = 4 * (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1); if (slowclk == SCC_SS_LPO) @@ -1779,7 +1941,7 @@ si_clkctl_setdelay(si_info_t *sii, void *chipcregs) pll_delay += XTAL_ON_DELAY; /* Starting with 4318 it is ILP that is used for the delays */ - slowmaxfreq = si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc); + slowmaxfreq = si_slowclk_freq(sii, (CCREV(sii->pub.ccrev) >= 10) ? FALSE : TRUE, cc); pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000; fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000; @@ -1811,7 +1973,7 @@ si_clkctl_init(si_t *sih) ASSERT(cc != NULL); /* set all Instaclk chip ILP to 1 MHz */ - if (sih->ccrev >= 10) + if (CCREV(sih->ccrev) >= 10) SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK, (ILP_DIV_1MHZ << SYCC_CD_SHIFT)); @@ -1825,7 +1987,7 @@ si_clkctl_init(si_t *sih) /** change logical "focus" to the gpio core for optimized access */ -void * +volatile void * si_gpiosetcore(si_t *sih) { return (si_setcoreidx(sih, SI_CC_IDX)); @@ -2004,11 +2166,25 @@ si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority) return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); } +uint32 +si_gpioeventintmask(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + /* gpios could be shared on router platforms */ + if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + regoff = OFFSETOF(chipcregs_t, gpioeventintmask); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + /* assign the gpio to an led */ uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val) { - if (sih->ccrev < 16) + if (CCREV(sih->ccrev) < 16) return 0xffffffff; /* gpio led powersave reg */ @@ -2019,7 +2195,7 @@ si_gpioled(si_t *sih, uint32 mask, uint32 val) uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval) { - if (sih->ccrev < 16) + if (CCREV(sih->ccrev) < 16) return 0xffffffff; return (si_corereg(sih, SI_CC_IDX, @@ -2031,7 +2207,7 @@ si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val) { uint offs; - if (sih->ccrev < 20) + if (CCREV(sih->ccrev) < 20) return 0xffffffff; offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup)); @@ -2043,7 +2219,7 @@ si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val) { uint offs; - if (sih->ccrev < 11) + if (CCREV(sih->ccrev) < 11) return 0xffffffff; if (regtype == GPIO_REGEVT) @@ -2063,7 +2239,7 @@ si_gpio_int_enable(si_t *sih, bool enable) { uint offs; - if (sih->ccrev < 11) + if (CCREV(sih->ccrev) < 11) return 0xffffffff; offs = OFFSETOF(chipcregs_t, intmask); @@ -2072,12 +2248,10 @@ si_gpio_int_enable(si_t *sih, bool enable) /** Return the size of the specified SYSMEM bank */ static uint -sysmem_banksize(si_info_t *sii, sysmemregs_t *regs, uint8 idx, uint8 mem_type) +sysmem_banksize(si_info_t *sii, sysmemregs_t *regs, uint8 idx) { uint banksize, bankinfo; - uint bankidx = idx | (mem_type << SYSMEM_BANKIDX_MEMTYPE_SHIFT); - - ASSERT(mem_type <= SYSMEM_MEMTYPE_DEVRAM); + uint bankidx = idx; W_REG(sii->osh, ®s->bankidx, bankidx); bankinfo = R_REG(sii->osh, ®s->bankinfo); @@ -2090,7 +2264,6 @@ uint32 si_sysmem_size(si_t *sih) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; @@ -2099,7 +2272,7 @@ si_sysmem_size(si_t *sih) uint32 coreinfo; uint memsize = 0; uint8 i; - uint nb; + uint nb, nrb; /* Block ints and save current core */ INTR_OFF(sii, intr_val); @@ -2114,9 +2287,12 @@ si_sysmem_size(si_t *sih) si_core_reset(sih, 0, 0); coreinfo = R_REG(sii->osh, ®s->coreinfo); - nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + /* Number of ROM banks, SW need to skip the ROM banks. */ + nrb = (coreinfo & SYSMEM_SRCI_ROMNB_MASK) >> SYSMEM_SRCI_ROMNB_SHIFT; + + nb = (coreinfo & SYSMEM_SRCI_SRNB_MASK) >> SYSMEM_SRCI_SRNB_SHIFT; for (i = 0; i < nb; i++) - memsize += sysmem_banksize(sii, regs, i, SYSMEM_MEMTYPE_RAM); + memsize += sysmem_banksize(sii, regs, i + nrb); si_setcoreidx(sih, origidx); @@ -2144,7 +2320,6 @@ socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type) void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; sbsocramregs_t *regs; @@ -2181,7 +2356,6 @@ void si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; sbsocramregs_t *regs; @@ -2229,8 +2403,7 @@ si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap) (1 << SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT); } W_REG(sii->osh, ®s->bankinfo, bankinfo); - } - else if (i == 0) { + } else if (i == 0) { if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) { *enable = 1; if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK) @@ -2255,7 +2428,6 @@ bool si_socdevram_remap_isenb(si_t *sih) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; sbsocramregs_t *regs; @@ -2316,7 +2488,6 @@ uint32 si_socdevram_size(si_t *sih) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; uint32 memsize = 0; @@ -2363,7 +2534,6 @@ uint32 si_socdevram_remap_size(si_t *sih) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; uint32 memsize = 0, banksz; @@ -2429,7 +2599,6 @@ uint32 si_socram_size(si_t *sih) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; @@ -2498,10 +2667,9 @@ uint32 si_tcm_size(si_t *sih) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; - uint8 *regs; + volatile uint8 *regs; bool wasup; uint32 corecap; uint memsize = 0; @@ -2510,9 +2678,9 @@ si_tcm_size(si_t *sih) uint32 totb = 0; uint32 bxinfo = 0; uint32 idx = 0; - uint32 *arm_cap_reg; - uint32 *arm_bidx; - uint32 *arm_binfo; + volatile uint32 *arm_cap_reg; + volatile uint32 *arm_bidx; + volatile uint32 *arm_binfo; /* Block ints and save current core */ INTR_OFF(sii, intr_val); @@ -2528,15 +2696,15 @@ si_tcm_size(si_t *sih) if (!(wasup = si_iscoreup(sih))) si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT); - arm_cap_reg = (uint32 *)(regs + SI_CR4_CAP); + arm_cap_reg = (volatile uint32 *)(regs + SI_CR4_CAP); corecap = R_REG(sii->osh, arm_cap_reg); nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT; nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT; totb = nab + nbb; - arm_bidx = (uint32 *)(regs + SI_CR4_BANKIDX); - arm_binfo = (uint32 *)(regs + SI_CR4_BANKINFO); + arm_bidx = (volatile uint32 *)(regs + SI_CR4_BANKIDX); + arm_binfo = (volatile uint32 *)(regs + SI_CR4_BANKINFO); for (idx = 0; idx < totb; idx++) { W_REG(sii->osh, arm_bidx, idx); @@ -2576,7 +2744,6 @@ uint32 si_socram_srmem_size(si_t *sih) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; @@ -2590,7 +2757,8 @@ si_socram_srmem_size(si_t *sih) return (32 * 1024); } - if (CHIPID(sih->chip) == BCM43430_CHIP_ID) { + if (CHIPID(sih->chip) == BCM43430_CHIP_ID || + CHIPID(sih->chip) == BCM43018_CHIP_ID) { return (64 * 1024); } @@ -2636,7 +2804,6 @@ void si_btcgpiowar(si_t *sih) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; uint origidx; uint intr_val = 0; chipcregs_t *cc; @@ -2667,7 +2834,6 @@ void si_chipcontrl_btshd0_4331(si_t *sih, bool on) { si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; chipcregs_t *cc; uint origidx; uint32 val; @@ -2815,22 +2981,6 @@ si_clk_srom4365(si_t *sih) si_setcoreidx(sih, origidx); } -void -si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih) -{ -#if defined(WLRSDB) && !defined(WLRSDB_DISABLED) - ai_d11rsdb_core1_alt_reg_clk_dis(sih); -#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */ -} - -void -si_d11rsdb_core1_alt_reg_clk_en(si_t *sih) -{ -#if defined(WLRSDB) && !defined(WLRSDB_DISABLED) - ai_d11rsdb_core1_alt_reg_clk_en(sih); -#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */ -} - void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl) { @@ -2968,11 +3118,24 @@ si_btcombo_43228_war(si_t *sih) si_setcoreidx(sih, origidx); } +/** cache device removed state */ +void si_set_device_removed(si_t *sih, bool status) +{ + si_info_t *sii = SI_INFO(sih); + + sii->device_removed = status; +} + /** check if the device is removed */ bool si_deviceremoved(si_t *sih) { uint32 w; + si_info_t *sii = SI_INFO(sih); + + if (sii->device_removed) { + return TRUE; + } switch (BUSTYPE(sih->bustype)) { case PCI_BUS: @@ -2985,10 +3148,21 @@ si_deviceremoved(si_t *sih) return FALSE; } +bool +si_is_warmboot(void) +{ + +#ifdef BCMULP + return (boot_type == WARM_BOOT); +#else + return FALSE; +#endif +} + bool si_is_sprom_available(si_t *sih) { - if (sih->ccrev >= 31) { + if (CCREV(sih->ccrev) >= 31) { si_info_t *sii; uint origidx; chipcregs_t *cc; @@ -3007,24 +3181,9 @@ si_is_sprom_available(si_t *sih) } switch (CHIPID(sih->chip)) { - case BCM4312_CHIP_ID: - return ((sih->chipst & CST4312_SPROM_OTP_SEL_MASK) != CST4312_OTP_SEL); - case BCM4325_CHIP_ID: - return (sih->chipst & CST4325_SPROM_SEL) != 0; - case BCM4322_CHIP_ID: case BCM43221_CHIP_ID: case BCM43231_CHIP_ID: - case BCM43222_CHIP_ID: case BCM43111_CHIP_ID: case BCM43112_CHIP_ID: - case BCM4342_CHIP_ID: { - uint32 spromotp; - spromotp = (sih->chipst & CST4322_SPROM_OTP_SEL_MASK) >> - CST4322_SPROM_OTP_SEL_SHIFT; - return (spromotp & CST4322_SPROM_PRESENT) != 0; - } - case BCM4329_CHIP_ID: - return (sih->chipst & CST4329_SPROM_SEL) != 0; - case BCM4315_CHIP_ID: - return (sih->chipst & CST4315_SPROM_SEL) != 0; - case BCM4319_CHIP_ID: - return (sih->chipst & CST4319_SPROM_SEL) != 0; + case BCM43018_CHIP_ID: + case BCM43430_CHIP_ID: + return FALSE; case BCM4336_CHIP_ID: case BCM43362_CHIP_ID: return (sih->chipst & CST4336_SPROM_PRESENT) != 0; @@ -3043,16 +3202,18 @@ si_is_sprom_available(si_t *sih) return ((sih->chipst & CST4324_SPROM_MASK) && !(sih->chipst & CST4324_SFLASH_MASK)); case BCM4335_CHIP_ID: - case BCM4345_CHIP_ID: - case BCM43454_CHIP_ID: + CASE_BCM4345_CHIP: return ((sih->chipst & CST4335_SPROM_MASK) && !(sih->chipst & CST4335_SFLASH_MASK)); case BCM4349_CHIP_GRPID: return (sih->chipst & CST4349_SPROM_PRESENT) != 0; + case BCM53573_CHIP_GRPID: + return FALSE; /* SPROM PRESENT is not defined for 53573 as of now */ + case BCM4347_CHIP_ID: + return (sih->chipst & CST4347_SPROM_PRESENT) != 0; break; case BCM4350_CHIP_ID: case BCM4354_CHIP_ID: - case BCM4356_CHIP_ID: case BCM43556_CHIP_ID: case BCM43558_CHIP_ID: case BCM43566_CHIP_ID: @@ -3131,7 +3292,6 @@ si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 uint origidx, intr_val = 0; uint ret_val; si_info_t *sii = SI_INFO(sih); - si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; origidx = si_coreidx(sih); @@ -3158,8 +3318,7 @@ si_pmu_res_req_timer_clr(si_t *sih) uint32 mask; mask = PRRT_REQ_ACTIVE | PRRT_INTEN | PRRT_HT_REQ; - if (CHIPID(sih->chip) != BCM4328_CHIP_ID) - mask <<= 14; + mask <<= 14; /* clear mask bits */ pmu_corereg(sih, SI_CC_IDX, res_req_timer, mask, 0); /* readback to ensure write completes */ @@ -3235,6 +3394,16 @@ si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 sperst_va #endif /* SURVIVE_PERST_ENAB */ } +/* Caller of this function should make sure is on PCIE core + * Used in pciedev.c. + */ +void +si_pcie_disable_oobselltr(si_t *sih) +{ + ASSERT(si_coreid(sih) == PCIE2_CORE_ID); + si_wrapperreg(sih, AI_OOBSELIND30, ~0, 0); +} + void si_pcie_ltr_war(si_t *sih) { @@ -3261,12 +3430,115 @@ si_pcie_prep_D3(si_t *sih, bool enter_D3) } +#ifdef BCM_BACKPLANE_TIMEOUT +uint32 +si_clear_backplane_to_fast(si_t *sih, void * addr) +{ + if (CHIPTYPE(sih->socitype) == SOCI_AI) { + return ai_clear_backplane_to_fast(sih, addr); + } + + return 0; +} + +const si_axi_error_info_t * si_get_axi_errlog_info(si_t * sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_AI) { + return (const si_axi_error_info_t *)sih->err_info; + } + + return NULL; +} + +void si_reset_axi_errlog_info(si_t * sih) +{ + sih->err_info->count = 0; +} +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) +uint32 +si_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap) +{ + if (CHIPTYPE(sih->socitype) == SOCI_AI) { + return ai_clear_backplane_to_per_core(sih, coreid, coreunit, wrap); + } + + return AXI_WRAP_STS_NONE; +} +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ + +uint32 +si_clear_backplane_to(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_AI) { + return ai_clear_backplane_to(sih); + } + + return 0; +} + +/* + * This routine adds the AXI timeouts for + * chipcommon, pcie and ARM slave wrappers + */ +void +si_slave_wrapper_add(si_t *sih) +{ +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) + /* Enable only for AXI */ + if (CHIPTYPE(sih->socitype) != SOCI_AI) { + return; + } + + if (CHIPID(sih->chip) == BCM4345_CHIP_ID && CHIPREV(sih->chiprev) >= 6) { + si_info_t *sii = SI_INFO(sih); + + int wrapper_idx = (int)sii->axi_num_wrappers - 1; + + ASSERT(wrapper_idx >= 0); /* axi_wrapper[] not initialised */ + do { + if (sii->axi_wrapper[wrapper_idx].wrapper_type == AI_SLAVE_WRAPPER && + sii->axi_wrapper[wrapper_idx].cid == 0xfff) { + sii->axi_wrapper[wrapper_idx].wrapper_addr = 0x1810b000; + break; + } + } while (wrapper_idx-- > 0); + ASSERT(wrapper_idx >= 0); /* all addresses valid for the chiprev under test */ + } + + /* All required slave wrappers are added in ai_scan */ + ai_enable_backplane_timeouts(sih); +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ +} + void si_pll_sr_reinit(si_t *sih) { } + +/* Programming d11 core oob settings for 4364 + * WARs for HW4364-237 and HW4364-166 +*/ +void +si_config_4364_d11_oob(si_t *sih, uint coreid) +{ + uint save_idx; + + save_idx = si_coreidx(sih); + si_setcore(sih, coreid, 0); + si_wrapperreg(sih, AI_OOBSELINC30, ~0, 0x81828180); + si_wrapperreg(sih, AI_OOBSELINC74, ~0, 0x87868183); + si_wrapperreg(sih, AI_OOBSELOUTB74, ~0, 0x84858484); + si_setcore(sih, coreid, 1); + si_wrapperreg(sih, AI_OOBSELINC30, ~0, 0x81828180); + si_wrapperreg(sih, AI_OOBSELINC74, ~0, 0x87868184); + si_wrapperreg(sih, AI_OOBSELOUTB74, ~0, 0x84868484); + si_setcoreidx(sih, save_idx); +} + void si_pll_closeloop(si_t *sih) { @@ -3276,6 +3548,7 @@ si_pll_closeloop(si_t *sih) /* disable PLL open loop operation */ switch (CHIPID(sih->chip)) { #ifdef SAVERESTORE + case BCM43018_CHIP_ID: case BCM43430_CHIP_ID: if (SR_ENAB() && sr_isenab(sih)) { /* read back the pll openloop state */ @@ -3295,3 +3568,158 @@ si_pll_closeloop(si_t *sih) } #endif } + +void +si_update_macclk_mul_fact(si_t *sih, uint32 mul_fact) +{ + si_info_t *sii = SI_INFO(sih); + sii->macclk_mul_fact = mul_fact; +} + +uint32 +si_get_macclk_mul_fact(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + return sii->macclk_mul_fact; +} + + +#if defined(BCMSRPWR) && !defined(BCMSRPWR_DISABLED) +bool _bcmsrpwr = TRUE; +#else +bool _bcmsrpwr = FALSE; +#endif + +uint32 +si_srpwr_request(si_t *sih, uint32 mask, uint32 val) +{ + uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */ + uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx; + + if (mask || val) { + mask <<= SRPWR_REQON_SHIFT; + val <<= SRPWR_REQON_SHIFT; + + r = ((si_corereg(sih, cidx, offset, 0, 0) & ~mask) | val); + r = si_corereg(sih, cidx, offset, ~0, r); + } else { + r = si_corereg(sih, cidx, offset, 0, 0); + } + + return r; +} + +uint32 +si_srpwr_stat_spinwait(si_t *sih, uint32 mask, uint32 val) +{ + uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */ + uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx; + + ASSERT(mask); + ASSERT(val); + + /* spinwait on pwrstatus */ + mask <<= SRPWR_STATUS_SHIFT; + val <<= SRPWR_STATUS_SHIFT; + + SPINWAIT(((si_corereg(sih, cidx, offset, 0, 0) & mask) != val), + PMU_MAX_TRANSITION_DLY); + ASSERT((si_corereg(sih, cidx, offset, 0, 0) & mask) == val); + + r = si_corereg(sih, cidx, offset, 0, 0) & mask; + r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK; + + return r; +} + +uint32 +si_srpwr_stat(si_t *sih) +{ + uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */ + uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx; + + r = si_corereg(sih, cidx, offset, 0, 0); + r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK; + + return r; +} + +uint32 +si_srpwr_domain(si_t *sih) +{ + uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */ + uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx; + + r = si_corereg(sih, cidx, offset, 0, 0); + r = (r >> SRPWR_DMN_SHIFT) & SRPWR_DMN_ALL_MASK; + + return r; +} + +/* Utility API to read/write the raw registers with absolute address. + * This function can be invoked from either FW or host driver. + */ +uint32 +si_raw_reg(si_t *sih, uint32 reg, uint32 val, uint32 wrire_req) +{ + si_info_t *sii = SI_INFO(sih); + uint32 address_space = reg & ~0xFFF; + volatile uint32 * addr = (void*)(uintptr)(reg); + uint32 prev_value = 0; + uint32 cfg_reg = 0; + + if (sii == NULL) { + return 0; + } + + /* No need to translate the absolute address on SI bus */ + if (BUSTYPE(sih->bustype) == SI_BUS) { + goto skip_cfg; + } + + /* This API supports only the PCI host interface */ + if (BUSTYPE(sih->bustype) != PCI_BUS) { + return ID32_INVALID; + } + + if (PCIE_GEN2(sii)) { + /* Use BAR0 Secondary window is PCIe Gen2. + * Set the secondary BAR0 Window to current register of interest + */ + addr = (volatile uint32*)(((volatile uint8*)sii->curmap) + + PCI_SEC_BAR0_WIN_OFFSET + (reg & 0xfff)); + cfg_reg = PCIE2_BAR0_CORE2_WIN; + + } else { + /* PCIe Gen1 do not have secondary BAR0 window. + * reuse the BAR0 WIN2 + */ + addr = (volatile uint32*)(((volatile uint8*)sii->curmap) + + PCI_BAR0_WIN2_OFFSET + (reg & 0xfff)); + cfg_reg = PCI_BAR0_WIN2; + } + + prev_value = OSL_PCI_READ_CONFIG(sii->osh, cfg_reg, 4); + + if (prev_value != address_space) { + OSL_PCI_WRITE_CONFIG(sii->osh, cfg_reg, + sizeof(uint32), address_space); + } else { + prev_value = 0; + } + +skip_cfg: + if (wrire_req) { + W_REG(sii->osh, addr, val); + } else { + val = R_REG(sii->osh, addr); + } + + if (prev_value) { + /* Restore BAR0 WIN2 for PCIE GEN1 devices */ + OSL_PCI_WRITE_CONFIG(sii->osh, + cfg_reg, sizeof(uint32), prev_value); + } + + return val; +} diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/siutils_priv.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/siutils_priv.h index 41f0d0d321b7..4b887df6ac3f 100755 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/siutils_priv.h +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/siutils_priv.h @@ -1,7 +1,7 @@ /* * Include file private to the SOC Interconnect support files. * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,13 +24,28 @@ * * <> * - * $Id: siutils_priv.h 520760 2014-12-15 00:54:16Z $ + * $Id: siutils_priv.h 625739 2016-03-17 12:28:03Z $ */ #ifndef _siutils_priv_h_ #define _siutils_priv_h_ +#if defined(SI_ERROR_ENFORCE) +#define SI_ERROR(args) printf args +#else #define SI_ERROR(args) printf args +#endif + +#if defined(ENABLE_CORECAPTURE) + +#define SI_PRINT(args) osl_wificc_logDebug args + +#else + +#define SI_PRINT(args) printf args + +#endif /* ENABLE_CORECAPTURE */ + #define SI_MSG(args) @@ -57,63 +72,91 @@ typedef struct gci_gpio_item { struct gci_gpio_item *next; } gci_gpio_item_t; +#define AI_SLAVE_WRAPPER 0 +#define AI_MASTER_WRAPPER 1 + +typedef struct axi_wrapper { + uint32 mfg; + uint32 cid; + uint32 rev; + uint32 wrapper_type; + uint32 wrapper_addr; + uint32 wrapper_size; +} axi_wrapper_t; + +#define SI_MAX_AXI_WRAPPERS 32 +#define AI_REG_READ_TIMEOUT 300 /* in msec */ + +/* for some combo chips, BT side accesses chipcommon->0x190, as a 16 byte addr */ +/* register at 0x19C doesn't exist, so error is logged at the slave wrapper */ +#define BT_CC_SPROM_BADREG_LO 0x18000190 +#define BT_CC_SPROM_BADREG_HI 0 +#define BCM4350_BT_AXI_ID 6 +#define BCM4345_BT_AXI_ID 6 typedef struct si_cores_info { - void *regs[SI_MAXCORES]; /* other regs va */ + volatile void *regs[SI_MAXCORES]; /* other regs va */ - uint coreid[SI_MAXCORES]; /* id of each core */ - uint32 coresba[SI_MAXCORES]; /* backplane address of each core */ - void *regs2[SI_MAXCORES]; /* va of each core second register set (usbh20) */ - uint32 coresba2[SI_MAXCORES]; /* address of each core second register set (usbh20) */ - uint32 coresba_size[SI_MAXCORES]; /* backplane address space size */ - uint32 coresba2_size[SI_MAXCORES]; /* second address space size */ + uint coreid[SI_MAXCORES]; /**< id of each core */ + uint32 coresba[SI_MAXCORES]; /**< backplane address of each core */ + void *regs2[SI_MAXCORES]; /**< va of each core second register set (usbh20) */ + uint32 coresba2[SI_MAXCORES]; /**< address of each core second register set (usbh20) */ + uint32 coresba_size[SI_MAXCORES]; /**< backplane address space size */ + uint32 coresba2_size[SI_MAXCORES]; /**< second address space size */ - void *wrappers[SI_MAXCORES]; /* other cores wrapper va */ - uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */ + void *wrappers[SI_MAXCORES]; /**< other cores wrapper va */ + uint32 wrapba[SI_MAXCORES]; /**< address of controlling wrapper */ - void *wrappers2[SI_MAXCORES]; /* other cores wrapper va */ - uint32 wrapba2[SI_MAXCORES]; /* address of controlling wrapper */ + void *wrappers2[SI_MAXCORES]; /**< other cores wrapper va */ + uint32 wrapba2[SI_MAXCORES]; /**< address of controlling wrapper */ - uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */ - uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */ + uint32 cia[SI_MAXCORES]; /**< erom cia entry for each core */ + uint32 cib[SI_MAXCORES]; /**< erom cia entry for each core */ } si_cores_info_t; -/* misc si info needed by some of the routines */ +/** misc si info needed by some of the routines */ typedef struct si_info { - struct si_pub pub; /* back plane public state (must be first field) */ + struct si_pub pub; /**< back plane public state (must be first field) */ - void *osh; /* osl os handle */ - void *sdh; /* bcmsdh handle */ + void *osh; /**< osl os handle */ + void *sdh; /**< bcmsdh handle */ - uint dev_coreid; /* the core provides driver functions */ - void *intr_arg; /* interrupt callback function arg */ - si_intrsoff_t intrsoff_fn; /* turns chip interrupts off */ - si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */ - si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */ + uint dev_coreid; /**< the core provides driver functions */ + void *intr_arg; /**< interrupt callback function arg */ + si_intrsoff_t intrsoff_fn; /**< turns chip interrupts off */ + si_intrsrestore_t intrsrestore_fn; /**< restore chip interrupts */ + si_intrsenabled_t intrsenabled_fn; /**< check if interrupts are enabled */ - void *pch; /* PCI/E core handle */ + void *pch; /**< PCI/E core handle */ - bool memseg; /* flag to toggle MEM_SEG register */ + bool memseg; /**< flag to toggle MEM_SEG register */ char *vars; uint varsz; - void *curmap; /* current regs va */ + volatile void *curmap; /* current regs va */ - uint curidx; /* current core index */ - uint numcores; /* # discovered cores */ + uint curidx; /**< current core index */ + uint numcores; /**< # discovered cores */ - void *curwrap; /* current wrapper va */ + void *curwrap; /**< current wrapper va */ - uint32 oob_router; /* oob router registers for axi */ + uint32 oob_router; /**< oob router registers for axi */ - void *cores_info; - gci_gpio_item_t *gci_gpio_head; /* gci gpio interrupts head */ - uint chipnew; /* new chip number */ - uint second_bar0win; /* Backplane region */ - uint num_br; /* # discovered bridges */ - uint32 br_wrapba[SI_MAXBR]; /* address of bridge controlling wrapper */ + si_cores_info_t *cores_info; + gci_gpio_item_t *gci_gpio_head; /**< gci gpio interrupts head */ + uint chipnew; /**< new chip number */ + uint second_bar0win; /**< Backplane region */ + uint num_br; /**< # discovered bridges */ + uint32 br_wrapba[SI_MAXBR]; /**< address of bridge controlling wrapper */ uint32 xtalfreq; + uint32 macclk_mul_fact; /* Multiplication factor necessary to adjust MAC Clock + * during ULB Mode operation. One instance where this is used is configuring TSF L-frac + * register + */ + bool device_removed; + uint axi_num_wrappers; + axi_wrapper_t * axi_wrapper; } si_info_t; @@ -124,7 +167,7 @@ typedef struct si_info { #define GOODREGS(regs) ((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE)) #define BADCOREADDR 0 #define GOODIDX(idx) (((uint)idx) < SI_MAXCORES) -#define NOREV -1 /* Invalid rev */ +#define NOREV -1 /**< Invalid rev */ #define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ ((si)->pub.buscoretype == PCI_CORE_ID)) @@ -139,54 +182,46 @@ typedef struct si_info { #define PCMCIA(si) ((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE)) -/* Newer chips can access PCI/PCIE and CC core without requiring to change - * PCI BAR0 WIN - */ +/** Newer chips can access PCI/PCIE and CC core without requiring to change PCI BAR0 WIN */ #define SI_FAST(si) (PCIE(si) || (PCI(si) && ((si)->pub.buscorerev >= 13))) -#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET)) -#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET)) +#define CCREGS_FAST(si) \ + (((si)->curmap == NULL) ? NULL : \ + ((volatile char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET)) +#define PCIEREGS(si) (((volatile char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET)) /* * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/ * after core switching to avoid invalid register accesss inside ISR. */ #define INTR_OFF(si, intr_val) \ - if ((si)->intrsoff_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) { \ + if ((si)->intrsoff_fn && (si)->cores_info->coreid[(si)->curidx] == (si)->dev_coreid) { \ intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); } #define INTR_RESTORE(si, intr_val) \ - if ((si)->intrsrestore_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) { \ + if ((si)->intrsrestore_fn && (si)->cores_info->coreid[(si)->curidx] == (si)->dev_coreid) { \ (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); } /* dynamic clock control defines */ -#define LPOMINFREQ 25000 /* low power oscillator min */ -#define LPOMAXFREQ 43000 /* low power oscillator max */ -#define XTALMINFREQ 19800000 /* 20 MHz - 1% */ -#define XTALMAXFREQ 20200000 /* 20 MHz + 1% */ -#define PCIMINFREQ 25000000 /* 25 MHz */ -#define PCIMAXFREQ 34000000 /* 33 MHz + fudge */ +#define LPOMINFREQ 25000 /**< low power oscillator min */ +#define LPOMAXFREQ 43000 /**< low power oscillator max */ +#define XTALMINFREQ 19800000 /**< 20 MHz - 1% */ +#define XTALMAXFREQ 20200000 /**< 20 MHz + 1% */ +#define PCIMINFREQ 25000000 /**< 25 MHz */ +#define PCIMAXFREQ 34000000 /**< 33 MHz + fudge */ -#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */ -#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */ - -/* Force fast clock for 4360b0 */ -#define PCI_FORCEHT(si) \ - (((PCIE_GEN1(si)) && (CHIPID(si->pub.chip) == BCM4311_CHIP_ID) && \ - ((CHIPREV(si->pub.chiprev) <= 1))) || \ - ((PCI(si) || PCIE_GEN1(si)) && (CHIPID(si->pub.chip) == BCM4321_CHIP_ID)) || \ - (PCIE_GEN1(si) && (CHIPID(si->pub.chip) == BCM4716_CHIP_ID)) || \ - (PCIE_GEN1(si) && (CHIPID(si->pub.chip) == BCM4748_CHIP_ID))) +#define ILP_DIV_5MHZ 0 /**< ILP = 5 MHz */ +#define ILP_DIV_1MHZ 4 /**< ILP = 1 MHz */ /* GPIO Based LED powersave defines */ -#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */ -#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */ +#define DEFAULT_GPIO_ONTIME 10 /**< Default: 10% on */ +#define DEFAULT_GPIO_OFFTIME 90 /**< Default: 10% on */ #ifndef DEFAULT_GPIOTIMERVAL #define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME) #endif /* Silicon Backplane externs */ -extern void sb_scan(si_t *sih, void *regs, uint devid); +extern void sb_scan(si_t *sih, volatile void *regs, uint devid); extern uint sb_coreid(si_t *sih); extern uint sb_intflag(si_t *sih); extern uint sb_flag(si_t *sih); @@ -194,9 +229,9 @@ extern void sb_setint(si_t *sih, int siflag); extern uint sb_corevendor(si_t *sih); extern uint sb_corerev(si_t *sih); extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); -extern uint32 *sb_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern volatile uint32 *sb_corereg_addr(si_t *sih, uint coreidx, uint regoff); extern bool sb_iscoreup(si_t *sih); -extern void *sb_setcoreidx(si_t *sih, uint coreidx); +extern volatile void *sb_setcoreidx(si_t *sih, uint coreidx); extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val); extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val); @@ -237,10 +272,10 @@ extern void ai_setint(si_t *sih, int siflag); extern uint ai_coreidx(si_t *sih); extern uint ai_corevendor(si_t *sih); extern uint ai_corerev(si_t *sih); -extern uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern volatile uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff); extern bool ai_iscoreup(si_t *sih); -extern void *ai_setcoreidx(si_t *sih, uint coreidx); -extern void *ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx); +extern volatile void *ai_setcoreidx(si_t *sih, uint coreidx); +extern volatile void *ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx); extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val); extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val); @@ -248,9 +283,6 @@ extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits); extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits, void *p, void *s); -extern void ai_d11rsdb_core1_alt_reg_clk_en(si_t *sih); -extern void ai_d11rsdb_core1_alt_reg_clk_dis(si_t *sih); - extern void ai_core_disable(si_t *sih, uint32 bits); extern void ai_d11rsdb_core_disable(const si_info_t *sii, uint32 bits, aidmp_t *pmacai, aidmp_t *smacai); @@ -260,12 +292,23 @@ extern uint32 ai_addrspacesize(si_t *sih, uint asidx); extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size); extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val); extern void ai_enable_backplane_timeouts(si_t *sih); -extern void ai_clear_backplane_to(si_t *sih); +extern uint32 ai_clear_backplane_to(si_t *sih); +extern uint ai_num_slaveports(si_t *sih, uint coreidx); + +#ifdef BCM_BACKPLANE_TIMEOUT +uint32 ai_clear_backplane_to_fast(si_t *sih, void * addr); +#endif /* BCM_BACKPLANE_TIMEOUT */ + +#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT) +extern uint32 ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap); +#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */ #if defined(BCMDBG_PHYDUMP) extern void ai_dumpregs(si_t *sih, struct bcmstrbuf *b); #endif +extern uint32 ai_wrapper_dump_buf_size(si_t *sih); +extern uint32 ai_wrapper_dump_binary(si_t *sih, uchar *p); #define ub_scan(a, b, c) do {} while (0) #define ub_flag(a) (0) diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/uamp_api.h b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/uamp_api.h deleted file mode 100755 index 0d04a9d86037..000000000000 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/uamp_api.h +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Name: uamp_api.h - * - * Description: Universal AMP API - * - * Copyright (C) 1999-2016, Broadcom Corporation - * - * Unless you and Broadcom execute a separate written software license - * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2 (the "GPL"), - * available at http://www.broadcom.com/licenses/GPLv2.php, with the - * following added to such license: - * - * As a special exception, the copyright holders of this software give you - * permission to link this software with independent modules, and to copy and - * distribute the resulting executable under terms of your choice, provided that - * you also meet, for each linked independent module, the terms and conditions of - * the license of that module. An independent module is a module which is not - * derived from this software. The special exception does not apply to any - * modifications of the software. - * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a license - * other than the GPL, without Broadcom's express prior written consent. - * - * - * <> - * - * $Id: uamp_api.h 514727 2014-11-12 03:02:48Z $ - * - */ - - -#ifndef UAMP_API_H -#define UAMP_API_H - - -#include "typedefs.h" - - -/***************************************************************************** -** Constant and Type Definitions -****************************************************************************** -*/ - -#define BT_API - -/* Types. */ -typedef bool BOOLEAN; -typedef uint8 UINT8; -typedef uint16 UINT16; - - -/* UAMP identifiers */ -#define UAMP_ID_1 1 -#define UAMP_ID_2 2 -typedef UINT8 tUAMP_ID; - -/* UAMP event ids (used by UAMP_CBACK) */ -#define UAMP_EVT_RX_READY 0 /* Data from AMP controller is ready to be read */ -#define UAMP_EVT_CTLR_REMOVED 1 /* Controller removed */ -#define UAMP_EVT_CTLR_READY 2 /* Controller added/ready */ -typedef UINT8 tUAMP_EVT; - - -/* UAMP Channels */ -#define UAMP_CH_HCI_CMD 0 /* HCI Command channel */ -#define UAMP_CH_HCI_EVT 1 /* HCI Event channel */ -#define UAMP_CH_HCI_DATA 2 /* HCI ACL Data channel */ -typedef UINT8 tUAMP_CH; - -/* tUAMP_EVT_DATA: union for event-specific data, used by UAMP_CBACK */ -typedef union { - tUAMP_CH channel; /* UAMP_EVT_RX_READY: channel for which rx occured */ -} tUAMP_EVT_DATA; - - -/***************************************************************************** -** -** Function: UAMP_CBACK -** -** Description: Callback for events. Register callback using UAMP_Init. -** -** Parameters amp_id: AMP device identifier that generated the event -** amp_evt: event id -** p_amp_evt_data: pointer to event-specific data -** -****************************************************************************** -*/ -typedef void (*tUAMP_CBACK)(tUAMP_ID amp_id, tUAMP_EVT amp_evt, tUAMP_EVT_DATA *p_amp_evt_data); - -/***************************************************************************** -** external function declarations -****************************************************************************** -*/ -#ifdef __cplusplus -extern "C" -{ -#endif - -/***************************************************************************** -** -** Function: UAMP_Init -** -** Description: Initialize UAMP driver -** -** Parameters p_cback: Callback function for UAMP event notification -** -****************************************************************************** -*/ -BT_API BOOLEAN UAMP_Init(tUAMP_CBACK p_cback); - - -/***************************************************************************** -** -** Function: UAMP_Open -** -** Description: Open connection to local AMP device. -** -** Parameters app_id: Application specific AMP identifer. This value -** will be included in AMP messages sent to the -** BTU task, to identify source of the message -** -****************************************************************************** -*/ -BT_API BOOLEAN UAMP_Open(tUAMP_ID amp_id); - -/***************************************************************************** -** -** Function: UAMP_Close -** -** Description: Close connection to local AMP device. -** -** Parameters app_id: Application specific AMP identifer. -** -****************************************************************************** -*/ -BT_API void UAMP_Close(tUAMP_ID amp_id); - - -/***************************************************************************** -** -** Function: UAMP_Write -** -** Description: Send buffer to AMP device. Frees GKI buffer when done. -** -** -** Parameters: app_id: AMP identifer. -** p_buf: pointer to buffer to write -** num_bytes: number of bytes to write -** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_CMD -** -** Returns: number of bytes written -** -****************************************************************************** -*/ -BT_API UINT16 UAMP_Write(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 num_bytes, tUAMP_CH channel); - -/***************************************************************************** -** -** Function: UAMP_Read -** -** Description: Read incoming data from AMP. Call after receiving a -** UAMP_EVT_RX_READY callback event. -** -** Parameters: app_id: AMP identifer. -** p_buf: pointer to buffer for holding incoming AMP data -** buf_size: size of p_buf -** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_EVT -** -** Returns: number of bytes read -** -****************************************************************************** -*/ -BT_API UINT16 UAMP_Read(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 buf_size, tUAMP_CH channel); - -#ifdef __cplusplus -} -#endif - -#endif /* UAMP_API_H */ diff --git a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/wl_android.c b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/wl_android.c index 1961e6a7795c..42afe36f8834 100644 --- a/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/wl_android.c +++ b/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/wl_android.c @@ -1,7 +1,7 @@ /* * Linux cfg80211 driver - Android related functions * - * Copyright (C) 1999-2016, Broadcom Corporation + * Copyright (C) 1999-2017, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -24,7 +24,7 @@ * * <> * - * $Id: wl_android.c 608788 2015-12-29 10:59:33Z $ + * $Id: wl_android.c 710862 2017-07-14 07:43:59Z $ */ #include @@ -36,14 +36,16 @@ #include #include +#include #include +#include #include #include #include #include #include #include -#include +#include #ifdef PNO_SUPPORT #include #endif @@ -53,12 +55,17 @@ #ifdef WL_CFG80211 #include #endif -#ifdef WL_NAN -#include -#endif /* WL_NAN */ #ifdef DHDTCPACK_SUPPRESS #include #endif /* DHDTCPACK_SUPPRESS */ +#include +#ifdef DHD_PKT_LOGGING +#include +#endif /* DHD_PKT_LOGGING */ + +#if defined(STAT_REPORT) +#include +#endif /* STAT_REPORT */ #ifndef WL_CFG80211 #define htod32(i) i @@ -91,6 +98,7 @@ uint android_msg_level = ANDROID_ERROR_LEVEL; #define CMD_BTCOEXMODE "BTCOEXMODE" #define CMD_SETSUSPENDOPT "SETSUSPENDOPT" #define CMD_SETSUSPENDMODE "SETSUSPENDMODE" +#define CMD_MAXDTIM_IN_SUSPEND "MAX_DTIM_IN_SUSPEND" #define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR" #define CMD_SETFWPATH "SETFWPATH" #define CMD_SETBAND "SETBAND" @@ -104,13 +112,11 @@ uint android_msg_level = ANDROID_ERROR_LEVEL; #define CMD_P2P_LISTEN_OFFLOAD "P2P_LO_" #define CMD_P2P_SET_PS "P2P_SET_PS" #define CMD_P2P_ECSA "P2P_ECSA" +#define CMD_P2P_INC_BW "P2P_INCREASE_BW" #define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE" #define CMD_SETROAMMODE "SETROAMMODE" #define CMD_SETIBSSBEACONOUIDATA "SETIBSSBEACONOUIDATA" #define CMD_MIRACAST "MIRACAST" -#ifdef WL_NAN -#define CMD_NAN "NAN_" -#endif /* WL_NAN */ #define CMD_COUNTRY_DELIMITER "/" #ifdef WL11ULB #define CMD_ULB_MODE "ULB_MODE" @@ -176,32 +182,39 @@ uint android_msg_level = ANDROID_ERROR_LEVEL; #define CMD_SET_SCSCAN "SETSINGLEANT" #define CMD_GET_SCSCAN "GETSINGLEANT" - -/* FCC_PWR_LIMIT_2G */ -#define CUSTOMER_HW4_ENABLE 0 -#define CUSTOMER_HW4_DISABLE -1 -#define CUSTOMER_HW4_EN_CONVERT(i) (i += 1) - #ifdef WLTDLS #define CMD_TDLS_RESET "TDLS_RESET" #endif /* WLTDLS */ -#ifdef IPV6_NDO_SUPPORT -#define CMD_NDRA_LIMIT "NDRA_LIMIT" -#endif /* IPV6_NDO_SUPPORT */ +#ifdef FCC_PWR_LIMIT_2G +#define CMD_GET_FCC_PWR_LIMIT_2G "GET_FCC_CHANNEL" +#define CMD_SET_FCC_PWR_LIMIT_2G "SET_FCC_CHANNEL" +/* CUSTOMER_HW4's value differs from BRCM FW value for enable/disable */ +#define CUSTOMER_HW4_ENABLE 0 +#define CUSTOMER_HW4_DISABLE -1 +#define CUSTOMER_HW4_EN_CONVERT(i) (i += 1) +#endif /* FCC_PWR_LIMIT_2G */ #endif /* CUSTOMER_HW4_PRIVATE_CMD */ + #define CMD_ROAM_OFFLOAD "SETROAMOFFLOAD" -#define CMD_ROAM_OFFLOAD_APLIST "SETROAMOFFLAPLIST" #define CMD_INTERFACE_CREATE "INTERFACE_CREATE" #define CMD_INTERFACE_DELETE "INTERFACE_DELETE" +#define CMD_GET_LINK_STATUS "GETLINKSTATUS" #if defined(DHD_ENABLE_BIGDATA_LOGGING) #define CMD_GET_BSS_INFO "GETBSSINFO" #define CMD_GET_ASSOC_REJECT_INFO "GETASSOCREJECTINFO" #endif /* DHD_ENABLE_BIGDATA_LOGGING */ +#define CMD_GET_STA_INFO "GETSTAINFO" + +/* related with CMD_GET_LINK_STATUS */ +#define WL_ANDROID_LINK_VHT 0x01 +#define WL_ANDROID_LINK_MIMO 0x02 +#define WL_ANDROID_LINK_AP_VHT_SUPPORT 0x04 +#define WL_ANDROID_LINK_AP_MIMO_SUPPORT 0x08 #ifdef P2PRESP_WFDIE_SRC #define CMD_P2P_SET_WFDIE_RESP "P2P_SET_WFDIE_RESP" @@ -214,6 +227,8 @@ uint android_msg_level = ANDROID_ERROR_LEVEL; #define CMD_WBTEXT_WEIGHT_CONFIG "WBTEXT_WEIGHT_CONFIG" #define CMD_WBTEXT_TABLE_CONFIG "WBTEXT_TABLE_CONFIG" #define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG" +#define CMD_WBTEXT_BTM_TIMER_THRESHOLD "WBTEXT_BTM_TIMER_THRESHOLD" +#define CMD_WBTEXT_BTM_DELTA "WBTEXT_BTM_DELTA" #ifdef WLWFDS #define CMD_ADD_WFDS_HASH "ADD_WFDS_HASH" @@ -230,6 +245,25 @@ uint android_msg_level = ANDROID_ERROR_LEVEL; #define CMD_MURX_BFE_CAP "MURX_BFE_CAP" +#ifdef SUPPORT_AP_HIGHER_BEACONRATE +#define CMD_SET_AP_BEACONRATE "SET_AP_BEACONRATE" +#define CMD_GET_AP_BASICRATE "GET_AP_BASICRATE" +#endif /* SUPPORT_AP_HIGHER_BEACONRATE */ + +#ifdef SUPPORT_AP_RADIO_PWRSAVE +#define CMD_SET_AP_RPS "SET_AP_RPS" +#define CMD_GET_AP_RPS "GET_AP_RPS" +#define CMD_SET_AP_RPS_PARAMS "SET_AP_RPS_PARAMS" +#endif /* SUPPORT_AP_RADIO_PWRSAVE */ + +#ifdef SUPPORT_RSSI_LOGGING +#define CMD_SET_RSSI_LOGGING "SET_RSSI_LOGGING" +#define CMD_GET_RSSI_LOGGING "GET_RSSI_LOGGING" +#define CMD_GET_RSSI_PER_ANT "GET_RSSI_PER_ANT" +#endif /* SUPPORT_RSSI_LOGGING */ + +#define CMD_GET_SNR "GET_SNR" + /* miracast related definition */ #define MIRACAST_MODE_OFF 0 #define MIRACAST_MODE_SOURCE 1 @@ -267,6 +301,11 @@ struct connection_stats { }; #endif /* CONNECTION_STATISTICS */ +#ifdef SUPPORT_LQCM +#define CMD_SET_LQCM_ENABLE "SET_LQCM_ENABLE" +#define CMD_GET_LQCM_REPORT "GET_LQCM_REPORT" +#endif + static LIST_HEAD(miracast_resume_list); #ifdef WL_CFG80211 static u8 miracast_cur_mode; @@ -277,9 +316,13 @@ static u8 miracast_cur_mode; extern void dhd_schedule_log_dump(dhd_pub_t *dhdp); extern int dhd_bus_mem_dump(dhd_pub_t *dhd); #endif /* DHD_LOG_DUMP */ -#ifdef DHD_TRACE_WAKE_LOCK -extern void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp); -#endif /* DHD_TRACE_WAKE_LOCK */ + +#ifdef DHD_HANG_SEND_UP_TEST +#define CMD_MAKE_HANG "MAKE_HANG" +#endif /* CMD_DHD_HANG_SEND_UP_TEST */ +#ifdef DHD_DEBUG_UART +extern bool dhd_debug_uart_is_running(struct net_device *dev); +#endif /* DHD_DEBUG_UART */ struct io_cfg { s8 *iovar; @@ -290,20 +333,6 @@ struct io_cfg { struct list_head list; }; -typedef struct _android_wifi_priv_cmd { - char *buf; - int used_len; - int total_len; -} android_wifi_priv_cmd; - -#ifdef CONFIG_COMPAT -typedef struct _compat_android_wifi_priv_cmd { - compat_caddr_t buf; - int used_len; - int total_len; -} compat_android_wifi_priv_cmd; -#endif /* CONFIG_COMPAT */ - #if defined(BCMFW_ROAM_ENABLE) #define CMD_SET_ROAMPREF "SET_ROAMPREF" @@ -318,6 +347,102 @@ typedef struct _compat_android_wifi_priv_cmd { (JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES)) #endif /* BCMFW_ROAM_ENABLE */ +#ifdef WL_NATOE + +#define CMD_NATOE "NATOE" + +#define NATOE_MAX_PORT_NUM 65535 + +/* natoe command info structure */ +typedef struct wl_natoe_cmd_info { + uint8 *command; /* pointer to the actual command */ + uint16 tot_len; /* total length of the command */ + uint16 bytes_written; /* Bytes written for get response */ +} wl_natoe_cmd_info_t; + +typedef struct wl_natoe_sub_cmd wl_natoe_sub_cmd_t; +typedef int (natoe_cmd_handler_t)(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info); + +struct wl_natoe_sub_cmd { + char *name; + uint8 version; /* cmd version */ + uint16 id; /* id for the dongle f/w switch/case */ + uint16 type; /* base type of argument */ + natoe_cmd_handler_t *handler; /* cmd handler */ +}; + +#define WL_ANDROID_NATOE_FUNC(suffix) wl_android_natoe_subcmd_ ##suffix +static int wl_android_process_natoe_cmd(struct net_device *dev, + char *command, int total_len); +static int wl_android_natoe_subcmd_enable(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info); +static int wl_android_natoe_subcmd_config_ips(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info); +static int wl_android_natoe_subcmd_config_ports(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info); +static int wl_android_natoe_subcmd_dbg_stats(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info); +static int wl_android_natoe_subcmd_tbl_cnt(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info); + +static const wl_natoe_sub_cmd_t natoe_cmd_list[] = { + /* wl natoe enable [0/1] or new: "wl natoe [0/1]" */ + {"enable", 0x01, WL_NATOE_CMD_ENABLE, + IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(enable) + }, + {"config_ips", 0x01, WL_NATOE_CMD_CONFIG_IPS, + IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(config_ips) + }, + {"config_ports", 0x01, WL_NATOE_CMD_CONFIG_PORTS, + IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(config_ports) + }, + {"stats", 0x01, WL_NATOE_CMD_DBG_STATS, + IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(dbg_stats) + }, + {"tbl_cnt", 0x01, WL_NATOE_CMD_TBL_CNT, + IOVT_BUFFER, WL_ANDROID_NATOE_FUNC(tbl_cnt) + }, + {NULL, 0, 0, 0, NULL} +}; + +#endif /* WL_NATOE */ + +#ifdef SET_PCIE_IRQ_CPU_CORE +#define CMD_PCIE_IRQ_CORE "PCIE_IRQ_CORE" +#endif /* SET_PCIE_IRQ_CPU_CORE */ + +#ifdef WLADPS_PRIVATE_CMD +#define CMD_SET_ADPS "SET_ADPS" +#define CMD_GET_ADPS "GET_ADPS" +#endif /* WLADPS_PRIVATE_CMD */ + +#ifdef DHD_PKT_LOGGING +#define CMD_PKTLOG_FILTER_ENABLE "PKTLOG_FILTER_ENABLE" +#define CMD_PKTLOG_FILTER_DISABLE "PKTLOG_FILTER_DISABLE" +#define CMD_PKTLOG_FILTER_PATTERN_ENABLE "PKTLOG_FILTER_PATTERN_ENABLE" +#define CMD_PKTLOG_FILTER_PATTERN_DISABLE "PKTLOG_FILTER_PATTERN_DISABLE" +#define CMD_PKTLOG_FILTER_ADD "PKTLOG_FILTER_ADD" +#define CMD_PKTLOG_FILTER_INFO "PKTLOG_FILTER_INFO" +#define CMD_PKTLOG_START "PKTLOG_START" +#define CMD_PKTLOG_STOP "PKTLOG_STOP" +#define CMD_PKTLOG_FILTER_EXIST "PKTLOG_FILTER_EXIST" +#endif /* DHD_PKT_LOGGING */ + +#if defined(STAT_REPORT) +#define CMD_STAT_REPORT_GET_START "STAT_REPORT_GET_START" +#define CMD_STAT_REPORT_GET_NEXT "STAT_REPORT_GET_NEXT" +#endif /* STAT_REPORT */ + + +#ifdef SUPPORT_LQCM +#define LQCM_ENAB_MASK 0x000000FF /* LQCM enable flag mask */ +#define LQCM_TX_INDEX_MASK 0x0000FF00 /* LQCM tx index mask */ +#define LQCM_RX_INDEX_MASK 0x00FF0000 /* LQCM rx index mask */ + +#define LQCM_TX_INDEX_SHIFT 8 /* LQCM tx index shift */ +#define LQCM_RX_INDEX_SHIFT 16 /* LQCM rx index shift */ +#endif /* SUPPORT_LQCM */ /** * Extern function declarations (TODO: move them to dhd_linux.h) @@ -338,8 +463,16 @@ int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len) { return 0; } int wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len) { return 0; } +int wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len) +{ return 0; } #endif /* WL_CFG80211 */ - +#ifdef WBTEXT +static int wl_android_wbtext(struct net_device *dev, char *command, int total_len); +static int wl_cfg80211_wbtext_btm_timer_threshold(struct net_device *dev, + char *command, int total_len); +static int wl_cfg80211_wbtext_btm_delta(struct net_device *dev, + char *command, int total_len); +#endif /* WBTEXT */ #ifdef ENABLE_4335BT_WAR extern int bcm_bt_lock(int cookie); @@ -407,8 +540,10 @@ static int wl_android_get_link_speed(struct net_device *net, char *command, int int error; error = wldev_get_link_speed(net, &link_speed); - if (error) + if (error) { + ANDROID_ERROR(("Get linkspeed failed \n")); return -1; + } /* Convert Kbps to Android Mbps */ link_speed = link_speed / 1000; @@ -419,11 +554,16 @@ static int wl_android_get_link_speed(struct net_device *net, char *command, int static int wl_android_get_rssi(struct net_device *net, char *command, int total_len) { - wlc_ssid_t ssid = {0}; + wlc_ssid_t ssid = {0, {0}}; int bytes_written = 0; int error = 0; scb_val_t scbval; char *delim = NULL; + struct net_device *target_ndev = net; +#ifdef WL_VIRTUAL_APSTA + char *pos = NULL; + struct bcm_cfg80211 *cfg; +#endif /* WL_VIRTUAL_APSTA */ delim = strchr(command, ' '); /* For Ap mode rssi command would be @@ -445,29 +585,57 @@ static int wl_android_get_rssi(struct net_device *net, char *command, int total_ } scbval.val = htod32(0); ANDROID_TRACE(("%s: address:"MACDBG, __FUNCTION__, MAC2STRDBG(scbval.ea.octet))); +#ifdef WL_VIRTUAL_APSTA + /* RSDB AP may have another virtual interface + * In this case, format of private command is as following, + * DRIVER rssi + */ + + /* Current position is start of MAC address string */ + pos = delim; + delim = strchr(pos, ' '); + if (delim) { + /* skip space from delim after finding char */ + delim++; + if (strnlen(delim, IFNAMSIZ)) { + cfg = wl_get_cfg(net); + target_ndev = wl_get_ap_netdev(cfg, delim); + if (target_ndev == NULL) + target_ndev = net; + } + } +#endif /* WL_VIRTUAL_APSTA */ } else { /* STA/GC mode */ memset(&scbval, 0, sizeof(scb_val_t)); } - error = wldev_get_rssi(net, &scbval); + error = wldev_get_rssi(target_ndev, &scbval); if (error) return -1; #if defined(RSSIOFFSET) scbval.val = wl_update_rssi_offset(net, scbval.val); #endif - error = wldev_get_ssid(net, &ssid); + error = wldev_get_ssid(target_ndev, &ssid); if (error) return -1; if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) { ANDROID_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__)); + } else if (total_len <= ssid.SSID_len) { + return -ENOMEM; } else { memcpy(command, ssid.SSID, ssid.SSID_len); bytes_written = ssid.SSID_len; } - bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", scbval.val); + if ((total_len - bytes_written) < (strlen(" rssi -XXX") + 1)) + return -ENOMEM; + + bytes_written += scnprintf(&command[bytes_written], total_len - bytes_written, + " rssi %d", scbval.val); + command[bytes_written] = '\0'; + ANDROID_TRACE(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written)); return bytes_written; } @@ -520,11 +688,11 @@ static int wl_android_set_suspendmode(struct net_device *dev, char *command, int #ifdef WL_CFG80211 int wl_android_get_80211_mode(struct net_device *dev, char *command, int total_len) { - uint8 mode[4]; + uint8 mode[5]; int error = 0; int bytes_written = 0; - error = wldev_get_mode(dev, mode); + error = wldev_get_mode(dev, mode, sizeof(mode)); if (error) return -1; @@ -631,7 +799,7 @@ int wl_android_get_assoclist(struct net_device *dev, char *command, int total_le assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST); - error = wldev_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf), false); + error = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf)); if (error) return -1; @@ -724,6 +892,24 @@ static int wl_android_set_csa(struct net_device *dev, char *command, int total_l } #endif +static int +wl_android_set_max_dtim(struct net_device *dev, char *command, int total_len) +{ + int ret = 0; + int dtim_flag; + + dtim_flag = *(command + strlen(CMD_MAXDTIM_IN_SUSPEND) + 1) - '0'; + + if (!(ret = net_os_set_max_dtim_enable(dev, dtim_flag))) { + ANDROID_TRACE(("%s: use Max bcn_li_dtim in suspend %s\n", + __FUNCTION__, (dtim_flag ? "Enable" : "Disable"))); + } else { + ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + } + + return ret; +} + static int wl_android_get_band(struct net_device *dev, char *command, int total_len) { uint band; @@ -739,6 +925,23 @@ static int wl_android_get_band(struct net_device *dev, char *command, int total_ #ifdef CUSTOMER_HW4_PRIVATE_CMD +#ifdef WLTDLS +int wl_android_tdls_reset(struct net_device *dev) +{ + int ret = 0; + ret = dhd_tdls_enable(dev, false, false, NULL); + if (ret < 0) { + ANDROID_ERROR(("Disable tdls failed. %d\n", ret)); + return ret; + } + ret = dhd_tdls_enable(dev, true, true, NULL); + if (ret < 0) { + ANDROID_ERROR(("enable tdls failed. %d\n", ret)); + return ret; + } + return 0; +} +#endif /* WLTDLS */ #ifdef FCC_PWR_LIMIT_2G int wl_android_set_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len) @@ -785,175 +988,193 @@ wl_android_get_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total } #endif /* FCC_PWR_LIMIT_2G */ -#ifdef IPV6_NDO_SUPPORT -int -wl_android_nd_ra_limit(struct net_device *dev, char *command, int total_len) +s32 +wl_cfg80211_get_sta_info(struct net_device *dev, char* command, int total_len) { - int err = 0; - int bytes_written = 0; - uint tokens; - char *pos, *token, *delim; - char smbuf[WLC_IOCTL_SMLEN]; - char param[ND_PARAM_SIZE+1], value[ND_VALUE_SIZE+1]; - uint16 type = 0xff, min = 0, per = 0, hold = 0; - nd_ra_ol_limits_t ra_ol_limit; + static char iovar_buf[WLC_IOCTL_MAXLEN]; + int bytes_written = -1, ret = 0; + char *pcmd = command; + char *str; + sta_info_t *sta = NULL; + wl_cnt_wlc_t* wlc_cnt = NULL; + struct ether_addr mac; - WL_TRACE(("command=%s, len=%d\n", command, total_len)); - pos = command + strlen(CMD_NDRA_LIMIT) + 1; - memset(&ra_ol_limit, 0, sizeof(nd_ra_ol_limits_t)); + /* Client information */ + uint16 cap = 0; + uint32 rxrtry = 0; + uint32 rxmulti = 0; - if (!strncmp(pos, ND_RA_OL_SET, strlen(ND_RA_OL_SET))) { - WL_TRACE(("SET NDRA_LIMIT\n")); - pos += strlen(ND_RA_OL_SET) + 1; - while ((token = strsep(&pos, ND_PARAMS_DELIMETER)) != NULL) { - memset(param, 0, sizeof(param)); - memset(value, 0, sizeof(value)); - - delim = strchr(token, ND_PARAM_VALUE_DELLIMETER); - if (delim != NULL) - *delim = ' '; - - tokens = sscanf(token, ND_LIMIT_STR_FMT, param, value); - if (!strncmp(param, ND_RA_TYPE, strlen(ND_RA_TYPE))) { - type = simple_strtol(value, NULL, 0); - } else if (!strncmp(param, ND_RA_MIN_TIME, strlen(ND_RA_MIN_TIME))) { - min = simple_strtol(value, NULL, 0); - } else if (!strncmp(param, ND_RA_PER, strlen(ND_RA_PER))) { - per = simple_strtol(value, NULL, 0); - if (per > 100) { - ANDROID_ERROR(("Invalid PERCENT %d\n", per)); - err = BCME_BADARG; - goto exit; - } - } else if (!strncmp(param, ND_RA_HOLD, strlen(ND_RA_HOLD))) { - hold = simple_strtol(value, NULL, 0); - } + ANDROID_TRACE(("%s\n", command)); + str = bcmstrtok(&pcmd, " ", NULL); + if (str) { + str = bcmstrtok(&pcmd, " ", NULL); + /* If GETSTAINFO subcmd name is not provided, return error */ + if (str == NULL) { + ANDROID_ERROR(("GETSTAINFO subcmd not provided %s\n", __FUNCTION__)); + goto error; } - ra_ol_limit.version = htod32(ND_RA_OL_LIMITS_VER); - ra_ol_limit.type = htod32(type); - if (type == ND_RA_OL_LIMITS_REL_TYPE) { - if ((min == 0) || (per == 0)) { - ANDROID_ERROR(("Invalid min_time %d, percent %d\n", min, per)); - err = BCME_BADARG; - goto exit; + memset(&mac, 0, ETHER_ADDR_LEN); + if ((bcm_ether_atoe((str), &mac))) { + /* get the sta info */ + ret = wldev_iovar_getbuf(dev, "sta_info", + (struct ether_addr *)mac.octet, + ETHER_ADDR_LEN, iovar_buf, WLC_IOCTL_SMLEN, NULL); + if (ret < 0) { + ANDROID_ERROR(("Get sta_info ERR %d\n", ret)); + goto error; } - ra_ol_limit.length = htod32(ND_RA_OL_LIMITS_REL_TYPE_LEN); - ra_ol_limit.limits.lifetime_relative.min_time = htod32(min); - ra_ol_limit.limits.lifetime_relative.lifetime_percent = htod32(per); - } else if (type == ND_RA_OL_LIMITS_FIXED_TYPE) { - if (hold == 0) { - ANDROID_ERROR(("Invalid hold_time %d\n", hold)); - err = BCME_BADARG; - goto exit; + + sta = (sta_info_t *)iovar_buf; + cap = dtoh16(sta->cap); + rxrtry = dtoh32(sta->rx_pkts_retried); + rxmulti = dtoh32(sta->rx_mcast_pkts); + } else if ((!strncmp(str, "all", 3)) || (!strncmp(str, "ALL", 3))) { + /* get counters info */ + ret = wldev_iovar_getbuf(dev, "counters", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (unlikely(ret)) { + ANDROID_ERROR(("counters error (%d) - size = %zu\n", + ret, sizeof(wl_cnt_wlc_t))); + goto error; } - ra_ol_limit.length = htod32(ND_RA_OL_LIMITS_FIXED_TYPE_LEN); - ra_ol_limit.limits.fixed.hold_time = htod32(hold); + ret = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0); + if (ret != BCME_OK) { + ANDROID_ERROR(("wl_cntbuf_to_xtlv_format ERR %d\n", ret)); + goto error; + } + if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) { + ANDROID_ERROR(("wlc_cnt NULL!\n")); + goto error; + } + + rxrtry = dtoh32(wlc_cnt->rxrtry); + rxmulti = dtoh32(wlc_cnt->rxmulti); } else { - ANDROID_ERROR(("unknown TYPE %d\n", type)); - err = BCME_BADARG; - goto exit; + ANDROID_ERROR(("Get address fail\n")); + goto error; } - - err = wldev_iovar_setbuf(dev, "nd_ra_limit_intv", &ra_ol_limit, - sizeof(nd_ra_ol_limits_t), smbuf, sizeof(smbuf), NULL); - if (err) { - ANDROID_ERROR(("Failed to set nd_ra_limit_intv, error = %d\n", err)); - goto exit; - } - - WL_TRACE(("TYPE %d, MIN %d, PER %d, HOLD %d\n", type, min, per, hold)); - } else if (!strncmp(pos, ND_RA_OL_GET, strlen(ND_RA_OL_GET))) { - WL_TRACE(("GET NDRA_LIMIT\n")); - err = wldev_iovar_getbuf(dev, "nd_ra_limit_intv", NULL, 0, - smbuf, sizeof(smbuf), NULL); - if (err) { - ANDROID_ERROR(("Failed to get nd_ra_limit_intv, error = %d\n", err)); - goto exit; - } - - memcpy(&ra_ol_limit, (uint8 *)smbuf, sizeof(nd_ra_ol_limits_t)); - type = ra_ol_limit.type; - if (ra_ol_limit.version != ND_RA_OL_LIMITS_VER) { - ANDROID_ERROR(("Invalid Version %d\n", ra_ol_limit.version)); - err = BCME_VERSION; - goto exit; - } - - if (ra_ol_limit.type == ND_RA_OL_LIMITS_REL_TYPE) { - min = ra_ol_limit.limits.lifetime_relative.min_time; - per = ra_ol_limit.limits.lifetime_relative.lifetime_percent; - ANDROID_ERROR(("TYPE %d, MIN %d, PER %d\n", type, min, per)); - bytes_written = snprintf(command, total_len, - "%s GET TYPE %d, MIN %d, PER %d", CMD_NDRA_LIMIT, type, min, per); - } else if (ra_ol_limit.type == ND_RA_OL_LIMITS_FIXED_TYPE) { - hold = ra_ol_limit.limits.fixed.hold_time; - ANDROID_ERROR(("TYPE %d, HOLD %d\n", type, hold)); - bytes_written = snprintf(command, total_len, - "%s GET TYPE %d, HOLD %d", CMD_NDRA_LIMIT, type, hold); - } else { - ANDROID_ERROR(("unknown TYPE %d\n", type)); - err = BCME_ERROR; - goto exit; - } - - return bytes_written; } else { - ANDROID_ERROR(("unknown command\n")); - err = BCME_ERROR; - goto exit; + ANDROID_ERROR(("Command ERR\n")); + goto error; } -exit: - return err; + bytes_written = snprintf(command, total_len, + "%s %s Rx_Retry_Pkts=%d Rx_BcMc_Pkts=%d CAP=%04x\n", + CMD_GET_STA_INFO, str, rxrtry, rxmulti, cap); + + ANDROID_TRACE(("%s", command)); + +error: + return bytes_written; } -#endif /* IPV6_NDO_SUPPORT */ -#ifdef WLTDLS -int wl_android_tdls_reset(struct net_device *dev) -{ - int ret = 0; - ret = dhd_tdls_enable(dev, false, false, NULL); - if (ret < 0) { - ANDROID_ERROR(("Disable tdls failed. %d\n", ret)); - return ret; - } - ret = dhd_tdls_enable(dev, true, true, NULL); - if (ret < 0) { - ANDROID_ERROR(("enable tdls failed. %d\n", ret)); - return ret; - } - return 0; -} -#endif /* WLTDLS */ #endif /* CUSTOMER_HW4_PRIVATE_CMD */ + +#ifdef WBTEXT static int wl_android_wbtext(struct net_device *dev, char *command, int total_len) { - int error = 0, argc = 0; + int error = BCME_OK, argc = 0; int data, bytes_written; + int roam_trigger[2]; + dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev); - argc = sscanf(command+sizeof("WBTEXT_ENABLE"), "%d", &data); + argc = sscanf(command+sizeof(CMD_WBTEXT_ENABLE), "%d", &data); if (!argc) { error = wldev_iovar_getint(dev, "wnm_bsstrans_resp", &data); if (error) { ANDROID_ERROR(("%s: Failed to set wbtext error = %d\n", __FUNCTION__, error)); + return error; } bytes_written = snprintf(command, total_len, "WBTEXT %s\n", - (data == WL_BSSTRANS_POLICY_PRODUCT)? "ENABLED" : "DISABLED"); + (data == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT)? + "ENABLED" : "DISABLED"); return bytes_written; } else { - if (data) - data = WL_BSSTRANS_POLICY_PRODUCT; + if (data) { + data = WL_BSSTRANS_POLICY_PRODUCT_WBTEXT; + } - error = wldev_iovar_setint(dev, "wnm_bsstrans_resp", data); - if (error) { + if ((error = wldev_iovar_setint(dev, "wnm_bsstrans_resp", data)) != BCME_OK) { ANDROID_ERROR(("%s: Failed to set wbtext error = %d\n", __FUNCTION__, error)); + return error; + } + + if (data) { + /* reset roam_prof when wbtext is on */ + if ((error = wl_cfg80211_wbtext_set_default(dev)) != BCME_OK) { + return error; + } + dhdp->wbtext_support = TRUE; + } else { + /* reset legacy roam trigger when wbtext is off */ + roam_trigger[0] = DEFAULT_ROAM_TRIGGER_VALUE; + roam_trigger[1] = WLC_BAND_ALL; + if ((error = wldev_ioctl_set(dev, WLC_SET_ROAM_TRIGGER, roam_trigger, + sizeof(roam_trigger))) != BCME_OK) { + ANDROID_ERROR(("%s: Failed to reset roam trigger = %d\n", + __FUNCTION__, error)); + return error; + } + dhdp->wbtext_support = FALSE; } } return error; } +static int wl_cfg80211_wbtext_btm_timer_threshold(struct net_device *dev, + char *command, int total_len) +{ + int error = BCME_OK, argc = 0; + int data, bytes_written; + + argc = sscanf(command, CMD_WBTEXT_BTM_TIMER_THRESHOLD " %d\n", &data); + if (!argc) { + error = wldev_iovar_getint(dev, "wnm_bsstrans_timer_threshold", &data); + if (error) { + ANDROID_ERROR(("Failed to get wnm_bsstrans_timer_threshold (%d)\n", error)); + return error; + } + bytes_written = snprintf(command, total_len, "%d\n", data); + return bytes_written; + } else { + if ((error = wldev_iovar_setint(dev, "wnm_bsstrans_timer_threshold", + data)) != BCME_OK) { + ANDROID_ERROR(("Failed to set wnm_bsstrans_timer_threshold (%d)\n", error)); + return error; + } + } + return error; +} + +static int wl_cfg80211_wbtext_btm_delta(struct net_device *dev, + char *command, int total_len) +{ + int error = BCME_OK, argc = 0; + int data = 0, bytes_written; + + argc = sscanf(command, CMD_WBTEXT_BTM_DELTA " %d\n", &data); + if (!argc) { + error = wldev_iovar_getint(dev, "wnm_btmdelta", &data); + if (error) { + ANDROID_ERROR(("Failed to get wnm_btmdelta (%d)\n", error)); + return error; + } + bytes_written = snprintf(command, total_len, "%d\n", data); + return bytes_written; + } else { + if ((error = wldev_iovar_setint(dev, "wnm_btmdelta", + data)) != BCME_OK) { + ANDROID_ERROR(("Failed to set wnm_btmdelta (%d)\n", error)); + return error; + } + } + return error; +} + +#endif /* WBTEXT */ + #ifdef PNO_SUPPORT #define PNO_PARAM_SIZE 50 #define VALUE_SIZE 50 @@ -964,9 +1185,9 @@ wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) int err = BCME_OK; uint i, tokens; char *pos, *pos2, *token, *token2, *delim; - char param[PNO_PARAM_SIZE], value[VALUE_SIZE]; + char param[PNO_PARAM_SIZE+1], value[VALUE_SIZE+1]; struct dhd_pno_batch_params batch_params; - ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); if (total_len < strlen(CMD_WLS_BATCHING)) { ANDROID_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len)); err = BCME_ERROR; @@ -991,13 +1212,13 @@ wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) tokens = sscanf(token, LIMIT_STR_FMT, param, value); if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_SCANFREQ))) { batch_params.scan_fr = simple_strtol(value, NULL, 0); - ANDROID_INFO(("scan_freq : %d\n", batch_params.scan_fr)); + DHD_PNO(("scan_freq : %d\n", batch_params.scan_fr)); } else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_BESTN))) { batch_params.bestn = simple_strtol(value, NULL, 0); - ANDROID_INFO(("bestn : %d\n", batch_params.bestn)); + DHD_PNO(("bestn : %d\n", batch_params.bestn)); } else if (!strncmp(param, PNO_PARAM_MSCAN, strlen(PNO_PARAM_MSCAN))) { batch_params.mscan = simple_strtol(value, NULL, 0); - ANDROID_INFO(("mscan : %d\n", batch_params.mscan)); + DHD_PNO(("mscan : %d\n", batch_params.mscan)); } else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_CHANNEL))) { i = 0; pos2 = value; @@ -1008,8 +1229,8 @@ wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) " <> params\n", __FUNCTION__)); goto exit; } - while ((token2 = strsep(&pos2, - PNO_PARAM_CHANNEL_DELIMETER)) != NULL) { + while ((token2 = strsep(&pos2, + PNO_PARAM_CHANNEL_DELIMETER)) != NULL) { if (token2 == NULL || !*token2) break; if (*token2 == '\0') @@ -1017,19 +1238,26 @@ wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) if (*token2 == 'A' || *token2 == 'B') { batch_params.band = (*token2 == 'A')? WLC_BAND_5G : WLC_BAND_2G; - ANDROID_INFO(("band : %s\n", + DHD_PNO(("band : %s\n", (*token2 == 'A')? "A" : "B")); } else { + if ((batch_params.nchan >= WL_NUMCHANNELS) || + (i >= WL_NUMCHANNELS)) { + ANDROID_ERROR(("Too many nchan %d\n", + batch_params.nchan)); + err = BCME_BUFTOOSHORT; + goto exit; + } batch_params.chan_list[i++] = - simple_strtol(token2, NULL, 0); + simple_strtol(token2, NULL, 0); batch_params.nchan++; - ANDROID_INFO(("channel :%d\n", - batch_params.chan_list[i-1])); + DHD_PNO(("channel :%d\n", + batch_params.chan_list[i-1])); } } } else if (!strncmp(param, PNO_PARAM_RTT, strlen(PNO_PARAM_RTT))) { batch_params.rtt = simple_strtol(value, NULL, 0); - ANDROID_INFO(("rtt : %d\n", batch_params.rtt)); + DHD_PNO(("rtt : %d\n", batch_params.rtt)); } else { ANDROID_ERROR(("%s : unknown param: %s\n", __FUNCTION__, param)); err = BCME_ERROR; @@ -1099,7 +1327,7 @@ static int wl_android_set_pno_setup(struct net_device *dev, char *command, int t 0x00 }; #endif /* PNO_SET_DEBUG */ - ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) { ANDROID_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len)); @@ -1122,7 +1350,7 @@ static int wl_android_set_pno_setup(struct net_device *dev, char *command, int t str_ptr += sizeof(cmd_tlv_t); tlv_size_left -= sizeof(cmd_tlv_t); - if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local, + if ((nssid = wl_parse_ssid_list_tlv(&str_ptr, ssids_local, MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) { ANDROID_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid)); goto exit_proc; @@ -1134,7 +1362,7 @@ static int wl_android_set_pno_setup(struct net_device *dev, char *command, int t } str_ptr++; pno_time = simple_strtoul(str_ptr, &str_ptr, 16); - ANDROID_INFO(("%s: pno_time=%d\n", __FUNCTION__, pno_time)); + DHD_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time)); if (str_ptr[0] != 0) { if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) { @@ -1144,7 +1372,7 @@ static int wl_android_set_pno_setup(struct net_device *dev, char *command, int t } str_ptr++; pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16); - ANDROID_INFO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat)); + DHD_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat)); if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) { ANDROID_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n", __FUNCTION__)); @@ -1152,7 +1380,7 @@ static int wl_android_set_pno_setup(struct net_device *dev, char *command, int t } str_ptr++; pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16); - ANDROID_INFO(("%s: pno_freq_expo_max=%d\n", + DHD_PNO(("%s: pno_freq_expo_max=%d\n", __FUNCTION__, pno_freq_expo_max)); } } @@ -1192,21 +1420,21 @@ wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist * struct maclist *assoc_maclist = (struct maclist *)mac_buf; /* set filtering mode */ - if ((ret = wldev_ioctl(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode), true)) != 0) { + if ((ret = wldev_ioctl_set(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode)) != 0)) { ANDROID_ERROR(("%s : WLC_SET_MACMODE error=%d\n", __FUNCTION__, ret)); return ret; } if (macmode != MACLIST_MODE_DISABLED) { /* set the MAC filter list */ - if ((ret = wldev_ioctl(dev, WLC_SET_MACLIST, maclist, - sizeof(int) + sizeof(struct ether_addr) * maclist->count, true)) != 0) { + if ((ret = wldev_ioctl_set(dev, WLC_SET_MACLIST, maclist, + sizeof(int) + sizeof(struct ether_addr) * maclist->count)) != 0) { ANDROID_ERROR(("%s : WLC_SET_MACLIST error=%d\n", __FUNCTION__, ret)); return ret; } /* get the current list of associated STAs */ assoc_maclist->count = MAX_NUM_OF_ASSOCLIST; - if ((ret = wldev_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, - sizeof(mac_buf), false)) != 0) { + if ((ret = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST, assoc_maclist, + sizeof(mac_buf))) != 0) { ANDROID_ERROR(("%s : WLC_GET_ASSOCLIST error=%d\n", __FUNCTION__, ret)); return ret; } @@ -1235,9 +1463,9 @@ wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist * scbval.val = htod32(1); memcpy(&scbval.ea, &assoc_maclist->ea[i], ETHER_ADDR_LEN); - if ((ret = wldev_ioctl(dev, + if ((ret = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, - &scbval, sizeof(scb_val_t), true)) != 0) + &scbval, sizeof(scb_val_t))) != 0) ANDROID_ERROR(("%s WLC_SCB_DEAUTHENTICATE error=%d\n", __FUNCTION__, ret)); } @@ -1252,7 +1480,7 @@ wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist * * */ static int -wl_android_set_mac_address_filter(struct net_device *dev, const char* str) +wl_android_set_mac_address_filter(struct net_device *dev, char* str) { int i; int ret = 0; @@ -1260,7 +1488,7 @@ wl_android_set_mac_address_filter(struct net_device *dev, const char* str) int macmode = MACLIST_MODE_DISABLED; struct maclist *list; char eabuf[ETHER_ADDR_STR_LEN]; - char *token; + const char *token; /* string should look like below (macmode/macnum/maclist) */ /* 1 2 00:11:22:33:44:55 00:11:22:33:44:ff */ @@ -1339,10 +1567,11 @@ int wl_android_wifi_on(struct net_device *dev) printf("%s in 2: g_wifi_on=%d\n", __FUNCTION__, g_wifi_on); if (!g_wifi_on) { do { - dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY); + if (!dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY)) { #ifdef BCMSDIO ret = dhd_net_bus_resume(dev, 0); #endif /* BCMSDIO */ + } #ifdef BCMPCIE ret = dhd_net_bus_devreset(dev, FALSE); #endif /* BCMPCIE */ @@ -1413,12 +1642,19 @@ int wl_android_wifi_off(struct net_device *dev, bool on_failure) } printf("%s in 1\n", __FUNCTION__); +#if defined(BCMPCIE) && defined(DHD_DEBUG_UART) + ret = dhd_debug_uart_is_running(dev); + if (ret) { + ANDROID_ERROR(("%s - Debug UART App is running\n", __FUNCTION__)); + return -EBUSY; + } +#endif /* BCMPCIE && DHD_DEBUG_UART */ dhd_net_if_lock(dev); printf("%s in 2: g_wifi_on=%d, on_failure=%d\n", __FUNCTION__, g_wifi_on, on_failure); if (g_wifi_on || on_failure) { #if defined(BCMSDIO) || defined(BCMPCIE) ret = dhd_net_bus_devreset(dev, TRUE); -#ifdef BCMSDIO +#if defined(BCMSDIO) dhd_net_bus_suspend(dev); #endif /* BCMSDIO */ #endif /* BCMSDIO || BCMPCIE */ @@ -1450,7 +1686,6 @@ wl_chanim_stats(struct net_device *dev, u8 *chan_idle) chanim_stats_t *stats; memset(¶m, 0, sizeof(param)); - memset(result, 0, sizeof(result)); param.buflen = htod32(sizeof(wl_chanim_stats_t)); param.count = htod32(WL_CHANIM_COUNT_ONE); @@ -1497,7 +1732,8 @@ wl_chanim_stats(struct net_device *dev, u8 *chan_idle) static int wl_android_get_connection_stats(struct net_device *dev, char *command, int total_len) { - wl_cnt_t* cnt = NULL; + static char iovar_buf[WLC_IOCTL_MAXLEN]; + wl_cnt_wlc_t* wlc_cnt = NULL; #ifndef DISABLE_IF_COUNTERS wl_if_stats_t* if_stats = NULL; #endif /* DISABLE_IF_COUNTERS */ @@ -1540,37 +1776,35 @@ wl_android_get_connection_stats(struct net_device *dev, char *command, int total /* In case if_stats IOVAR is not supported, get information from counters. */ #endif /* DISABLE_IF_COUNTERS */ - if ((cnt = kmalloc(sizeof(*cnt), GFP_KERNEL)) == NULL) { - ANDROID_ERROR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__)); - goto error; - } - memset(cnt, 0, sizeof(*cnt)); - ret = wldev_iovar_getbuf(dev, "counters", NULL, 0, - (char *)cnt, sizeof(wl_cnt_t), NULL); - if (ret) { - ANDROID_ERROR(("%s: wldev_iovar_getbuf() failed, ret=%d\n", - __FUNCTION__, ret)); + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (unlikely(ret)) { + ANDROID_ERROR(("counters error (%d) - size = %zu\n", ret, sizeof(wl_cnt_wlc_t))); + goto error; + } + ret = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0); + if (ret != BCME_OK) { + ANDROID_ERROR(("%s wl_cntbuf_to_xtlv_format ERR %d\n", + __FUNCTION__, ret)); goto error; } - if (dtoh16(cnt->version) > WL_CNT_T_VERSION) { - ANDROID_ERROR(("%s: incorrect version of wl_cnt_t, expected=%u got=%u\n", - __FUNCTION__, WL_CNT_T_VERSION, cnt->version)); + if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) { + ANDROID_ERROR(("%s wlc_cnt NULL!\n", __FUNCTION__)); goto error; } - output->txframe = dtoh32(cnt->txframe); - output->txbyte = dtoh32(cnt->txbyte); - output->txerror = dtoh32(cnt->txerror); - output->rxframe = dtoh32(cnt->rxframe); - output->rxbyte = dtoh32(cnt->rxbyte); - output->txfail = dtoh32(cnt->txfail); - output->txretry = dtoh32(cnt->txretry); - output->txretrie = dtoh32(cnt->txretrie); - output->txrts = dtoh32(cnt->txrts); - output->txnocts = dtoh32(cnt->txnocts); - output->txexptime = dtoh32(cnt->txexptime); + output->txframe = dtoh32(wlc_cnt->txframe); + output->txbyte = dtoh32(wlc_cnt->txbyte); + output->txerror = dtoh32(wlc_cnt->txerror); + output->rxframe = dtoh32(wlc_cnt->rxframe); + output->rxbyte = dtoh32(wlc_cnt->rxbyte); + output->txfail = dtoh32(wlc_cnt->txfail); + output->txretry = dtoh32(wlc_cnt->txretry); + output->txretrie = dtoh32(wlc_cnt->txretrie); + output->txrts = dtoh32(wlc_cnt->txrts); + output->txnocts = dtoh32(wlc_cnt->txnocts); + output->txexptime = dtoh32(wlc_cnt->txexptime); #ifndef DISABLE_IF_COUNTERS } else { /* Populate from if_stats. */ @@ -1588,10 +1822,15 @@ wl_android_get_connection_stats(struct net_device *dev, char *command, int total output->txfail = (uint32)dtoh64(if_stats->txfail); output->txretry = (uint32)dtoh64(if_stats->txretry); output->txretrie = (uint32)dtoh64(if_stats->txretrie); - /* Unavailable */ - output->txrts = 0; - output->txnocts = 0; - output->txexptime = 0; + if (dtoh16(if_stats->length) > OFFSETOF(wl_if_stats_t, txexptime)) { + output->txexptime = (uint32)dtoh64(if_stats->txexptime); + output->txrts = (uint32)dtoh64(if_stats->txrts); + output->txnocts = (uint32)dtoh64(if_stats->txnocts); + } else { + output->txexptime = 0; + output->txrts = 0; + output->txnocts = 0; + } } #endif /* DISABLE_IF_COUNTERS */ @@ -1618,18 +1857,623 @@ error: kfree(if_stats); } #endif /* DISABLE_IF_COUNTERS */ - if (cnt) { - kfree(cnt); - } return bytes_written; } #endif /* CONNECTION_STATISTICS */ +#ifdef WL_NATOE +static int +wl_android_process_natoe_cmd(struct net_device *dev, char *command, int total_len) +{ + int ret = BCME_ERROR; + char *pcmd = command; + char *str = NULL; + wl_natoe_cmd_info_t cmd_info; + const wl_natoe_sub_cmd_t *natoe_cmd = &natoe_cmd_list[0]; + + /* skip to cmd name after "natoe" */ + str = bcmstrtok(&pcmd, " ", NULL); + + /* If natoe subcmd name is not provided, return error */ + if (*pcmd == '\0') { + ANDROID_ERROR(("natoe subcmd not provided %s\n", __FUNCTION__)); + ret = -EINVAL; + return ret; + } + + /* get the natoe command name to str */ + str = bcmstrtok(&pcmd, " ", NULL); + + while (natoe_cmd->name != NULL) { + if (strcmp(natoe_cmd->name, str) == 0) { + /* dispacth cmd to appropriate handler */ + if (natoe_cmd->handler) { + cmd_info.command = command; + cmd_info.tot_len = total_len; + ret = natoe_cmd->handler(dev, natoe_cmd, pcmd, &cmd_info); + } + return ret; + } + natoe_cmd++; + } + return ret; +} + +static int +wlu_natoe_set_vars_cbfn(void *ctx, uint8 *data, uint16 type, uint16 len) +{ + int res = BCME_OK; + wl_natoe_cmd_info_t *cmd_info = (wl_natoe_cmd_info_t *)ctx; + uint8 *command = cmd_info->command; + uint16 total_len = cmd_info->tot_len; + uint16 bytes_written = 0; + + UNUSED_PARAMETER(len); + + switch (type) { + + case WL_NATOE_XTLV_ENABLE: + { + bytes_written = snprintf(command, total_len, "natoe: %s\n", + *data?"enabled":"disabled"); + cmd_info->bytes_written = bytes_written; + break; + } + + case WL_NATOE_XTLV_CONFIG_IPS: + { + wl_natoe_config_ips_t *config_ips; + uint8 buf[16]; + + config_ips = (wl_natoe_config_ips_t *)data; + bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_ip, buf); + bytes_written = snprintf(command, total_len, "sta ip: %s\n", buf); + bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_netmask, buf); + bytes_written += snprintf(command + bytes_written, total_len, + "sta netmask: %s\n", buf); + bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_router_ip, buf); + bytes_written += snprintf(command + bytes_written, total_len, + "sta router ip: %s\n", buf); + bcm_ip_ntoa((struct ipv4_addr *)&config_ips->sta_dnsip, buf); + bytes_written += snprintf(command + bytes_written, total_len, + "sta dns ip: %s\n", buf); + bcm_ip_ntoa((struct ipv4_addr *)&config_ips->ap_ip, buf); + bytes_written += snprintf(command + bytes_written, total_len, + "ap ip: %s\n", buf); + bcm_ip_ntoa((struct ipv4_addr *)&config_ips->ap_netmask, buf); + bytes_written += snprintf(command + bytes_written, total_len, + "ap netmask: %s\n", buf); + cmd_info->bytes_written = bytes_written; + break; + } + + case WL_NATOE_XTLV_CONFIG_PORTS: + { + wl_natoe_ports_config_t *ports_config; + + ports_config = (wl_natoe_ports_config_t *)data; + bytes_written = snprintf(command, total_len, "starting port num: %d\n", + dtoh16(ports_config->start_port_num)); + bytes_written += snprintf(command + bytes_written, total_len, + "number of ports: %d\n", dtoh16(ports_config->no_of_ports)); + cmd_info->bytes_written = bytes_written; + break; + } + + case WL_NATOE_XTLV_DBG_STATS: + { + char *stats_dump = (char *)data; + + bytes_written = snprintf(command, total_len, "%s\n", stats_dump); + cmd_info->bytes_written = bytes_written; + break; + } + + case WL_NATOE_XTLV_TBL_CNT: + { + bytes_written = snprintf(command, total_len, "natoe max tbl entries: %d\n", + dtoh32(*(uint32 *)data)); + cmd_info->bytes_written = bytes_written; + break; + } + + default: + /* ignore */ + break; + } + + return res; +} + +/* + * --- common for all natoe get commands ---- + */ +static int +wl_natoe_get_ioctl(struct net_device *dev, wl_natoe_ioc_t *natoe_ioc, + uint16 iocsz, uint8 *buf, uint16 buflen, wl_natoe_cmd_info_t *cmd_info) +{ + /* for gets we only need to pass ioc header */ + wl_natoe_ioc_t *iocresp = (wl_natoe_ioc_t *)buf; + int res; + + /* send getbuf natoe iovar */ + res = wldev_iovar_getbuf(dev, "natoe", natoe_ioc, iocsz, buf, + buflen, NULL); + + /* check the response buff */ + if ((res == BCME_OK)) { + /* scans ioctl tlvbuf f& invokes the cbfn for processing */ + res = bcm_unpack_xtlv_buf(cmd_info, iocresp->data, iocresp->len, + BCM_XTLV_OPTION_ALIGN32, wlu_natoe_set_vars_cbfn); + + if (res == BCME_OK) { + res = cmd_info->bytes_written; + } + } + else + { + ANDROID_ERROR(("%s: get command failed code %d\n", __FUNCTION__, res)); + res = BCME_ERROR; + } + + return res; +} + +static int +wl_android_natoe_subcmd_enable(struct net_device *dev, const wl_natoe_sub_cmd_t *cmd, + char *command, wl_natoe_cmd_info_t *cmd_info) +{ + int ret = BCME_OK; + wl_natoe_ioc_t *natoe_ioc; + char *pcmd = command; + uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ; + uint16 buflen = WL_NATOE_IOC_BUFSZ; + bcm_xtlv_t *pxtlv = NULL; + char *ioctl_buf = NULL; + + ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, kflags); + if (!ioctl_buf) { + ANDROID_ERROR(("ioctl memory alloc failed\n")); + return -ENOMEM; + } + + /* alloc mem for ioctl headr + tlv data */ + natoe_ioc = kzalloc(iocsz, kflags); + if (!natoe_ioc) { + ANDROID_ERROR(("ioctl header memory alloc failed\n")); + kfree(ioctl_buf); + return -ENOMEM; + } + + /* make up natoe cmd ioctl header */ + natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION); + natoe_ioc->id = htod16(cmd->id); + natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ); + pxtlv = (bcm_xtlv_t *)natoe_ioc->data; + + if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */ + iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv); + ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, + WLC_IOCTL_MEDLEN, cmd_info); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ret = -EINVAL; + } + } else { /* set */ + uint8 val = bcm_atoi(pcmd); + + /* buflen is max tlv data we can write, it will be decremented as we pack */ + /* save buflen at start */ + uint16 buflen_at_start = buflen; + + /* we'll adjust final ioc size at the end */ + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &buflen, WL_NATOE_XTLV_ENABLE, + sizeof(uint8), &val, BCM_XTLV_OPTION_ALIGN32); + + if (ret != BCME_OK) { + ret = -EINVAL; + goto exit; + } + + /* adjust iocsz to the end of last data record */ + natoe_ioc->len = (buflen_at_start - buflen); + iocsz = sizeof(*natoe_ioc) + natoe_ioc->len; + + ret = wldev_iovar_setbuf(dev, "natoe", + natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + } + } + +exit: + kfree(ioctl_buf); + kfree(natoe_ioc); + + return ret; +} + +static int +wl_android_natoe_subcmd_config_ips(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info) +{ + int ret = BCME_OK; + wl_natoe_config_ips_t config_ips; + wl_natoe_ioc_t *natoe_ioc; + char *pcmd = command; + char *str; + uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ; + uint16 buflen = WL_NATOE_IOC_BUFSZ; + bcm_xtlv_t *pxtlv = NULL; + char *ioctl_buf = NULL; + + ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, kflags); + if (!ioctl_buf) { + ANDROID_ERROR(("ioctl memory alloc failed\n")); + return -ENOMEM; + } + + /* alloc mem for ioctl headr + tlv data */ + natoe_ioc = kzalloc(iocsz, kflags); + if (!natoe_ioc) { + ANDROID_ERROR(("ioctl header memory alloc failed\n")); + kfree(ioctl_buf); + return -ENOMEM; + } + + /* make up natoe cmd ioctl header */ + natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION); + natoe_ioc->id = htod16(cmd->id); + natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ); + pxtlv = (bcm_xtlv_t *)natoe_ioc->data; + + if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */ + iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv); + ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, + WLC_IOCTL_MEDLEN, cmd_info); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ret = -EINVAL; + } + } else { /* set */ + /* buflen is max tlv data we can write, it will be decremented as we pack */ + /* save buflen at start */ + uint16 buflen_at_start = buflen; + + memset(&config_ips, 0, sizeof(config_ips)); + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_ip)) { + ANDROID_ERROR(("Invalid STA IP addr %s\n", str)); + ret = -EINVAL; + goto exit; + } + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_netmask)) { + ANDROID_ERROR(("Invalid STA netmask %s\n", str)); + ret = -EINVAL; + goto exit; + } + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_router_ip)) { + ANDROID_ERROR(("Invalid STA router IP addr %s\n", str)); + ret = -EINVAL; + goto exit; + } + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_dnsip)) { + ANDROID_ERROR(("Invalid STA DNS IP addr %s\n", str)); + ret = -EINVAL; + goto exit; + } + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.ap_ip)) { + ANDROID_ERROR(("Invalid AP IP addr %s\n", str)); + ret = -EINVAL; + goto exit; + } + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.ap_netmask)) { + ANDROID_ERROR(("Invalid AP netmask %s\n", str)); + ret = -EINVAL; + goto exit; + } + + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, + &buflen, WL_NATOE_XTLV_CONFIG_IPS, sizeof(config_ips), + &config_ips, BCM_XTLV_OPTION_ALIGN32); + + if (ret != BCME_OK) { + ret = -EINVAL; + goto exit; + } + + /* adjust iocsz to the end of last data record */ + natoe_ioc->len = (buflen_at_start - buflen); + iocsz = sizeof(*natoe_ioc) + natoe_ioc->len; + + ret = wldev_iovar_setbuf(dev, "natoe", + natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + } + } + +exit: + kfree(ioctl_buf); + kfree(natoe_ioc); + + return ret; +} + +static int +wl_android_natoe_subcmd_config_ports(struct net_device *dev, + const wl_natoe_sub_cmd_t *cmd, char *command, wl_natoe_cmd_info_t *cmd_info) +{ + int ret = BCME_OK; + wl_natoe_ports_config_t ports_config; + wl_natoe_ioc_t *natoe_ioc; + char *pcmd = command; + char *str; + uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ; + uint16 buflen = WL_NATOE_IOC_BUFSZ; + bcm_xtlv_t *pxtlv = NULL; + char *ioctl_buf = NULL; + + ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, kflags); + if (!ioctl_buf) { + ANDROID_ERROR(("ioctl memory alloc failed\n")); + return -ENOMEM; + } + + /* alloc mem for ioctl headr + tlv data */ + natoe_ioc = kzalloc(iocsz, kflags); + if (!natoe_ioc) { + ANDROID_ERROR(("ioctl header memory alloc failed\n")); + kfree(ioctl_buf); + return -ENOMEM; + } + + /* make up natoe cmd ioctl header */ + natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION); + natoe_ioc->id = htod16(cmd->id); + natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ); + pxtlv = (bcm_xtlv_t *)natoe_ioc->data; + + if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */ + iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv); + ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, + WLC_IOCTL_MEDLEN, cmd_info); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ret = -EINVAL; + } + } else { /* set */ + /* buflen is max tlv data we can write, it will be decremented as we pack */ + /* save buflen at start */ + uint16 buflen_at_start = buflen; + + memset(&ports_config, 0, sizeof(ports_config)); + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str) { + ANDROID_ERROR(("Invalid port string %s\n", str)); + ret = -EINVAL; + goto exit; + } + ports_config.start_port_num = htod16(bcm_atoi(str)); + + str = bcmstrtok(&pcmd, " ", NULL); + if (!str) { + ANDROID_ERROR(("Invalid port string %s\n", str)); + ret = -EINVAL; + goto exit; + } + ports_config.no_of_ports = htod16(bcm_atoi(str)); + + if ((uint32)(ports_config.start_port_num + ports_config.no_of_ports) > + NATOE_MAX_PORT_NUM) { + ANDROID_ERROR(("Invalid port configuration\n")); + ret = -EINVAL; + goto exit; + } + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, + &buflen, WL_NATOE_XTLV_CONFIG_PORTS, sizeof(ports_config), + &ports_config, BCM_XTLV_OPTION_ALIGN32); + + if (ret != BCME_OK) { + ret = -EINVAL; + goto exit; + } + + /* adjust iocsz to the end of last data record */ + natoe_ioc->len = (buflen_at_start - buflen); + iocsz = sizeof(*natoe_ioc) + natoe_ioc->len; + + ret = wldev_iovar_setbuf(dev, "natoe", + natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + } + } + +exit: + kfree(ioctl_buf); + kfree(natoe_ioc); + + return ret; +} + +static int +wl_android_natoe_subcmd_dbg_stats(struct net_device *dev, const wl_natoe_sub_cmd_t *cmd, + char *command, wl_natoe_cmd_info_t *cmd_info) +{ + int ret = BCME_OK; + wl_natoe_ioc_t *natoe_ioc; + char *pcmd = command; + uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_DBG_STATS_BUFSZ; + uint16 buflen = WL_NATOE_DBG_STATS_BUFSZ; + bcm_xtlv_t *pxtlv = NULL; + char *ioctl_buf = NULL; + + ioctl_buf = kzalloc(WLC_IOCTL_MAXLEN, kflags); + if (!ioctl_buf) { + ANDROID_ERROR(("ioctl memory alloc failed\n")); + return -ENOMEM; + } + + /* alloc mem for ioctl headr + tlv data */ + natoe_ioc = kzalloc(iocsz, kflags); + if (!natoe_ioc) { + ANDROID_ERROR(("ioctl header memory alloc failed\n")); + kfree(ioctl_buf); + return -ENOMEM; + } + + /* make up natoe cmd ioctl header */ + natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION); + natoe_ioc->id = htod16(cmd->id); + natoe_ioc->len = htod16(WL_NATOE_DBG_STATS_BUFSZ); + pxtlv = (bcm_xtlv_t *)natoe_ioc->data; + + if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */ + iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv); + ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, + WLC_IOCTL_MAXLEN, cmd_info); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ret = -EINVAL; + } + } else { /* set */ + uint8 val = bcm_atoi(pcmd); + + /* buflen is max tlv data we can write, it will be decremented as we pack */ + /* save buflen at start */ + uint16 buflen_at_start = buflen; + + /* we'll adjust final ioc size at the end */ + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &buflen, WL_NATOE_XTLV_ENABLE, + sizeof(uint8), &val, BCM_XTLV_OPTION_ALIGN32); + + if (ret != BCME_OK) { + ret = -EINVAL; + goto exit; + } + + /* adjust iocsz to the end of last data record */ + natoe_ioc->len = (buflen_at_start - buflen); + iocsz = sizeof(*natoe_ioc) + natoe_ioc->len; + + ret = wldev_iovar_setbuf(dev, "natoe", + natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MAXLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + } + } + +exit: + kfree(ioctl_buf); + kfree(natoe_ioc); + + return ret; +} + +static int +wl_android_natoe_subcmd_tbl_cnt(struct net_device *dev, const wl_natoe_sub_cmd_t *cmd, + char *command, wl_natoe_cmd_info_t *cmd_info) +{ + int ret = BCME_OK; + wl_natoe_ioc_t *natoe_ioc; + char *pcmd = command; + uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ; + uint16 buflen = WL_NATOE_IOC_BUFSZ; + bcm_xtlv_t *pxtlv = NULL; + char *ioctl_buf = NULL; + + ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, kflags); + if (!ioctl_buf) { + ANDROID_ERROR(("ioctl memory alloc failed\n")); + return -ENOMEM; + } + + /* alloc mem for ioctl headr + tlv data */ + natoe_ioc = kzalloc(iocsz, kflags); + if (!natoe_ioc) { + ANDROID_ERROR(("ioctl header memory alloc failed\n")); + kfree(ioctl_buf); + return -ENOMEM; + } + + /* make up natoe cmd ioctl header */ + natoe_ioc->version = htod16(WL_NATOE_IOCTL_VERSION); + natoe_ioc->id = htod16(cmd->id); + natoe_ioc->len = htod16(WL_NATOE_IOC_BUFSZ); + pxtlv = (bcm_xtlv_t *)natoe_ioc->data; + + if(*pcmd == WL_IOCTL_ACTION_GET) { /* get */ + iocsz = sizeof(*natoe_ioc) + sizeof(*pxtlv); + ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf, + WLC_IOCTL_MEDLEN, cmd_info); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__)); + ret = -EINVAL; + } + } else { /* set */ + uint32 val = bcm_atoi(pcmd); + + /* buflen is max tlv data we can write, it will be decremented as we pack */ + /* save buflen at start */ + uint16 buflen_at_start = buflen; + + /* we'll adjust final ioc size at the end */ + ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &buflen, WL_NATOE_XTLV_TBL_CNT, + sizeof(uint32), &val, BCM_XTLV_OPTION_ALIGN32); + + if (ret != BCME_OK) { + ret = -EINVAL; + goto exit; + } + + /* adjust iocsz to the end of last data record */ + natoe_ioc->len = (buflen_at_start - buflen); + iocsz = sizeof(*natoe_ioc) + natoe_ioc->len; + + ret = wldev_iovar_setbuf(dev, "natoe", + natoe_ioc, iocsz, ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + if (ret != BCME_OK) { + ANDROID_ERROR(("Fail to set iovar %d\n", ret)); + ret = -EINVAL; + } + } + +exit: + kfree(ioctl_buf); + kfree(natoe_ioc); + + return ret; +} + +#endif /* WL_NATOE */ #ifdef CUSTOMER_HW4_PRIVATE_CMD #endif /* CUSTOMER_HW4_PRIVATE_CMD */ +#if defined(WL_SUPPORT_AUTO_CHANNEL) /* SoftAP feature */ #define APCS_BAND_2G_LEGACY1 20 #define APCS_BAND_2G_LEGACY2 0 @@ -1640,7 +2484,6 @@ error: #define APCS_MAX_RETRY 10 #define APCS_DEFAULT_2G_CH 1 #define APCS_DEFAULT_5G_CH 149 -#if defined(WL_SUPPORT_AUTO_CHANNEL) static int wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, char* command, int total_len) @@ -1653,6 +2496,7 @@ wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, u8 *reqbuf = NULL; uint32 band = WLC_BAND_2G; uint32 buf_size; + char *pos = command; if (cmd_str) { ANDROID_INFO(("Command: %s len:%d \n", cmd_str, (int)strlen(cmd_str))); @@ -1683,7 +2527,8 @@ wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, } ANDROID_INFO(("HAPD_AUTO_CHANNEL = %d, band=%d \n", channel, band)); - if ((ret = wldev_ioctl(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect), false)) < 0) { + if ((ret = + wldev_ioctl_get(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect))) < 0) { ANDROID_ERROR(("ACS: error getting the spect\n")); goto done; } @@ -1692,7 +2537,7 @@ wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, /* If STA is connected, return is STA channel, else ACS can be issued, * set spect to 0 and proceed with ACS */ - channel = wl_cfg80211_get_sta_channel(); + channel = wl_cfg80211_get_sta_channel(dev); if (channel) { channel = (channel <= CH_MAX_2G_CHANNEL) ? channel : APCS_DEFAULT_2G_CH; goto done2; @@ -1735,8 +2580,8 @@ wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, } buf_size = (band == WLC_BAND_AUTO) ? sizeof(int) : CHANSPEC_BUF_SIZE; - ret = wldev_ioctl(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf, - buf_size, true); + ret = wldev_ioctl_set(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf, + buf_size); if (ret < 0) { ANDROID_ERROR(("can't start auto channel scan, err = %d\n", ret)); channel = 0; @@ -1756,8 +2601,8 @@ wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, retry = APCS_MAX_RETRY; while (retry--) { - ret = wldev_ioctl(dev, WLC_GET_CHANNEL_SEL, &chosen, - sizeof(chosen), false); + ret = wldev_ioctl_get(dev, WLC_GET_CHANNEL_SEL, &chosen, + sizeof(chosen)); if (ret < 0) { chosen = 0; } else { @@ -1779,7 +2624,7 @@ wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, apcs_band = (band == WLC_BAND_AUTO) ? WLC_BAND_2G : band; chosen_band = (channel <= CH_MAX_2G_CHANNEL) ? WLC_BAND_2G : WLC_BAND_5G; if (apcs_band == chosen_band) { - ANDROID_ERROR(("selected channel = %d\n", channel)); + printf("%s: selected channel = %d\n", __FUNCTION__, channel); break; } } @@ -1810,7 +2655,11 @@ done2: } if (channel) { - snprintf(command, 4, "%d", channel); + if (channel < 15) + pos += snprintf(pos, total_len, "2g="); + else + pos += snprintf(pos, total_len, "5g="); + pos += snprintf(pos, total_len, "%d", channel); ANDROID_INFO(("command result is %s \n", command)); return strlen(command); } else { @@ -1832,13 +2681,13 @@ wl_android_set_lpc(struct net_device *dev, const char* string_num) lpc_enabled = bcm_atoi(string_num); ANDROID_INFO(("%s : HAPD_LPC_ENABLED = %d\n", __FUNCTION__, lpc_enabled)); - ret = wldev_ioctl(dev, WLC_DOWN, &val, sizeof(s32), true); + ret = wldev_ioctl_set(dev, WLC_DOWN, &val, sizeof(s32)); if (ret < 0) ANDROID_ERROR(("WLC_DOWN error %d\n", ret)); wldev_iovar_setint(dev, "lpc", lpc_enabled); - ret = wldev_ioctl(dev, WLC_UP, &val, sizeof(s32), true); + ret = wldev_ioctl_set(dev, WLC_UP, &val, sizeof(s32)); if (ret < 0) ANDROID_ERROR(("WLC_UP error %d\n", ret)); @@ -1857,11 +2706,11 @@ wl_android_ch_res_rl(struct net_device *dev, bool change) srl = 4; lrl = 2; } - error = wldev_ioctl(dev, WLC_SET_SRL, &srl, sizeof(s32), true); + error = wldev_ioctl_set(dev, WLC_SET_SRL, &srl, sizeof(s32)); if (error) { ANDROID_ERROR(("Failed to set SRL, error = %d\n", error)); } - error = wldev_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(s32), true); + error = wldev_ioctl_set(dev, WLC_SET_LRL, &lrl, sizeof(s32)); if (error) { ANDROID_ERROR(("Failed to set LRL, error = %d\n", error)); } @@ -1869,6 +2718,7 @@ wl_android_ch_res_rl(struct net_device *dev, bool change) } +#ifdef WL_RELMCAST static int wl_android_rmc_enable(struct net_device *net, int rmc_enable) { @@ -1920,12 +2770,13 @@ static int wl_android_set_rmc_event(struct net_device *dev, char *command, int t } /* set pid, and if the event was happened, let's send a notification through netlink */ - wl_cfg80211_set_rmc_pid(pid); + wl_cfg80211_set_rmc_pid(dev, pid); ANDROID_TRACE(("RMC pid=%d\n", pid)); return err; } +#endif /* WL_RELMCAST */ int wl_android_get_singlecore_scan(struct net_device *dev, char *command, int total_len) { @@ -2022,7 +2873,7 @@ wl_android_set_sarlimit_txctrl(struct net_device *dev, const char* string_num) int setval = 0; s32 mode = bcm_atoi(string_num); - /* As Samsung specific and their requirement, '0' means activate sarlimit + /* '0' means activate sarlimit * and '-1' means back to normal state (deactivate sarlimit) */ if (mode == 0) { @@ -2078,7 +2929,8 @@ int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, in s32 iecount; uint32 pktflag; u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; - s32 err = BCME_OK; + s32 err = BCME_OK, bssidx; + struct bcm_cfg80211 *cfg = wl_get_cfg(dev); /* Check the VSIE (Vendor Specific IE) which was added. * If exist then send IOVAR to delete it @@ -2087,8 +2939,17 @@ int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, in return -EINVAL; } + if (total_len < (strlen(CMD_SETIBSSBEACONOUIDATA) + 1)) { + ANDROID_ERROR(("error. total_len:%d\n", total_len)); + return -EINVAL; + } + pcmd = command + strlen(CMD_SETIBSSBEACONOUIDATA) + 1; for (idx = 0; idx < DOT11_OUI_LEN; idx++) { + if (*pcmd == '\0') { + ANDROID_ERROR(("error while parsing OUI.\n")); + return -EINVAL; + } hex[0] = *pcmd++; hex[1] = *pcmd++; ie_buf[idx] = (uint8)simple_strtoul(hex, NULL, 16); @@ -2100,7 +2961,13 @@ int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, in ie_buf[idx++] = (uint8)simple_strtoul(hex, NULL, 16); datalen++; } - tot_len = sizeof(vndr_ie_setbuf_t) + (datalen - 1); + + if (datalen <= 0) { + ANDROID_ERROR(("error. vndr ie len:%d\n", datalen)); + return -EINVAL; + } + + tot_len = (int)(sizeof(vndr_ie_setbuf_t) + (datalen - 1)); vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags); if (!vndr_ie) { ANDROID_ERROR(("IE memory alloc failed\n")); @@ -2138,9 +3005,15 @@ int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, in return -ENOMEM; } memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN); /* init the buffer */ - err = wldev_iovar_setbuf(dev, "ie", vndr_ie, tot_len, ioctl_buf, WLC_IOCTL_MEDLEN, NULL); - + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + ANDROID_ERROR(("Find index failed\n")); + err = BCME_ERROR; + goto end; + } + err = wldev_iovar_setbuf_bsscfg(dev, "vndr_ie", vndr_ie, tot_len, ioctl_buf, + WLC_IOCTL_MEDLEN, bssidx, &cfg->ioctl_buf_sync); +end: if (err != BCME_OK) { err = -EINVAL; if (vndr_ie) { @@ -2149,7 +3022,7 @@ int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, in } else { /* do NOT free 'vndr_ie' for the next process */ - wl_cfg80211_ibss_vsie_set_buffer(vndr_ie, tot_len); + wl_cfg80211_ibss_vsie_set_buffer(dev, vndr_ie, tot_len); } if (ioctl_buf) { @@ -2183,6 +3056,11 @@ wl_android_set_roampref(struct net_device *dev, char *command, int total_len) total_len_left = total_len - strlen(CMD_SET_ROAMPREF) + 1; num_akm_suites = simple_strtoul(pcmd, NULL, 16); + if (num_akm_suites > MAX_NUM_SUITES) { + ANDROID_ERROR(("too many AKM suites = %d\n", num_akm_suites)); + return -1; + } + /* Increment for number of AKM suites field + space */ pcmd += 3; total_len_left -= 3; @@ -2322,13 +3200,13 @@ wl_android_iolist_add(struct net_device *dev, struct list_head *head, struct io_ ret = -ENOMEM; goto error; } - ret = wldev_ioctl(dev, config->ioctl, resume_cfg->arg, config->len, false); + ret = wldev_ioctl_get(dev, config->ioctl, resume_cfg->arg, config->len); if (ret) { ANDROID_ERROR(("%s: Failed to get ioctl %d\n", __FUNCTION__, config->ioctl)); goto error; } - ret = wldev_ioctl(dev, config->ioctl + 1, config->arg, config->len, true); + ret = wldev_ioctl_set(dev, config->ioctl + 1, config->arg, config->len); if (ret) { ANDROID_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__, config->iovar, config->param)); @@ -2356,16 +3234,23 @@ wl_android_iolist_resume(struct net_device *dev, struct list_head *head) struct list_head *cur, *q; s32 ret = 0; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif list_for_each_safe(cur, q, head) { config = list_entry(cur, struct io_cfg, list); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif if (config->iovar) { if (!ret) ret = wldev_iovar_setint(dev, config->iovar, config->param); } else { if (!ret) - ret = wldev_ioctl(dev, config->ioctl + 1, - config->arg, config->len, true); + ret = wldev_ioctl_set(dev, config->ioctl + 1, + config->arg, config->len); if (config->ioctl + 1 == WLC_SET_PM) wl_cfg80211_update_power_mode(dev); kfree(config->arg); @@ -2439,7 +3324,7 @@ wl_android_set_miracast(struct net_device *dev, char *command, int total_len) /* setting mchan_algo to platform specific value */ config.iovar = "mchan_algo"; - ret = wldev_ioctl(dev, WLC_GET_BCNPRD, &val, sizeof(int), false); + ret = wldev_ioctl_get(dev, WLC_GET_BCNPRD, &val, sizeof(int)); if (!ret && val > 100) { config.param = 0; ANDROID_ERROR(("%s: Connected station's beacon interval: " @@ -2482,7 +3367,7 @@ wl_android_set_miracast(struct net_device *dev, char *command, int total_len) } /* tunr off pm */ - ret = wldev_ioctl(dev, WLC_GET_PM, &val, sizeof(val), false); + ret = wldev_ioctl_get(dev, WLC_GET_PM, &val, sizeof(val)); if (ret) { goto resume; } @@ -2563,7 +3448,7 @@ static void wl_netlink_deinit(void) } s32 -wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size) +wl_netlink_send_msg(int pid, int type, int seq, const void *data, size_t size) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh = NULL; @@ -2603,22 +3488,16 @@ nlmsg_failure: int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len) { - char buf[256]; - const char *str; wl_mkeep_alive_pkt_t mkeep_alive_pkt; - wl_mkeep_alive_pkt_t *mkeep_alive_pktp; - int buf_len; - int str_len; - int res = -1; + int ret; uint period_msec = 0; + char *buf; - if (extra == NULL) - { + if (extra == NULL) { ANDROID_ERROR(("%s: extra is NULL\n", __FUNCTION__)); return -1; } - if (sscanf(extra, "%d", &period_msec) != 1) - { + if (sscanf(extra, "%d", &period_msec) != 1) { ANDROID_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__)); return -EINVAL; } @@ -2626,106 +3505,29 @@ int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len) memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); - str = "mkeep_alive"; - str_len = strlen(str); - strncpy(buf, str, str_len); - buf[ str_len ] = '\0'; - mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1); mkeep_alive_pkt.period_msec = period_msec; - buf_len = str_len + 1; mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); /* Setup keep alive zero for null packet generation */ mkeep_alive_pkt.keep_alive_id = 0; mkeep_alive_pkt.len_bytes = 0; - buf_len += WL_MKEEP_ALIVE_FIXED_LEN; - /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and - * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no - * guarantee that the buffer is properly aligned. - */ - memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); - if ((res = wldev_ioctl(dev, WLC_SET_VAR, buf, buf_len, TRUE)) < 0) - { - ANDROID_ERROR(("%s:keep_alive set failed. res[%d]\n", __FUNCTION__, res)); + buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL); + if (!buf) { + ANDROID_ERROR(("%s: buffer alloc failed\n", __FUNCTION__)); + return BCME_NOMEM; } + ret = wldev_iovar_setbuf(dev, "mkeep_alive", (char *)&mkeep_alive_pkt, + WL_MKEEP_ALIVE_FIXED_LEN, buf, WLC_IOCTL_SMLEN, NULL); + if (ret < 0) + ANDROID_ERROR(("%s:keep_alive set failed:%d\n", __FUNCTION__, ret)); else - { - ANDROID_ERROR(("%s:keep_alive set ok. res[%d]\n", __FUNCTION__, res)); - } - - return res; + ANDROID_TRACE(("%s:keep_alive set ok\n", __FUNCTION__)); + kfree(buf); + return ret; } -#ifdef WL_CFG80211 -static const char * -get_string_by_separator(char *result, int result_len, const char *src, char separator) -{ - char *end = result + result_len - 1; - while ((result != end) && (*src != separator) && (*src)) { - *result++ = *src++; - } - *result = 0; - if (*src == separator) { - ++src; - } - return src; -} - -int -wl_android_set_roam_offload_bssid_list(struct net_device *dev, const char *cmd) -{ - char sbuf[32]; - int i, cnt, size, err, ioctl_buf_len; - roamoffl_bssid_list_t *bssid_list; - const char *str = cmd; - char *ioctl_buf; - dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(); - - str = get_string_by_separator(sbuf, 32, str, ','); - cnt = bcm_atoi(sbuf); - cnt = MIN(cnt, MAX_ROAMOFFL_BSSID_NUM); - - if ((cnt > 0) && - (((dhdp->op_mode & DHD_FLAG_STA_MODE) && (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) || - FALSE)) { - ANDROID_ERROR(("Can't set ROAMOFFL_BSSID when enabled STA-SoftAP or WES\n")); - return -EINVAL; - } - - size = sizeof(int32) + sizeof(struct ether_addr) * cnt; - ANDROID_ERROR(("ROAM OFFLOAD BSSID LIST %d BSSIDs, size %d\n", cnt, size)); - bssid_list = kmalloc(size, GFP_KERNEL); - if (bssid_list == NULL) { - ANDROID_ERROR(("%s: memory alloc for bssid list(%d) failed\n", - __FUNCTION__, size)); - return -ENOMEM; - } - ioctl_buf_len = size + 64; - ioctl_buf = kmalloc(ioctl_buf_len, GFP_KERNEL); - if (ioctl_buf == NULL) { - ANDROID_ERROR(("%s: memory alloc for ioctl_buf(%d) failed\n", - __FUNCTION__, ioctl_buf_len)); - kfree(bssid_list); - return -ENOMEM; - } - - for (i = 0; i < cnt; i++) { - str = get_string_by_separator(sbuf, 32, str, ','); - bcm_ether_atoe(sbuf, &bssid_list->bssid[i]); - } - - bssid_list->cnt = (int32)cnt; - err = wldev_iovar_setbuf(dev, "roamoffl_bssid_list", - bssid_list, size, ioctl_buf, ioctl_buf_len, NULL); - kfree(bssid_list); - kfree(ioctl_buf); - - return err; -} -#endif - #ifdef P2PRESP_WFDIE_SRC static int wl_android_get_wfdie_resp(struct net_device *dev, char *command, int total_len) { @@ -2792,7 +3594,7 @@ wl_android_set_rps_cpus(struct net_device *dev, char *command, int total_len) #if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE) && defined(WL_CFG80211) if (!error) { - void *dhdp = wl_cfg80211_get_dhdp(); + void *dhdp = wl_cfg80211_get_dhdp(net); if (enable) { ANDROID_TRACE(("%s : set ack suppress. TCPACK_SUP_HOLD.\n", __FUNCTION__)); dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD); @@ -2806,6 +3608,132 @@ wl_android_set_rps_cpus(struct net_device *dev, char *command, int total_len) return error; } #endif /* SET_RPS_CPUS */ + +static int wl_android_get_link_status(struct net_device *dev, char *command, + int total_len) +{ + int bytes_written, error, result = 0, single_stream, stf = -1, i, nss = 0, mcs_map; + uint32 rspec; + uint encode, rate, txexp; + struct wl_bss_info *bi; + int datalen = sizeof(uint32) + sizeof(wl_bss_info_t); + char buf[datalen]; + + /* get BSS information */ + *(u32 *) buf = htod32(datalen); + error = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, (void *)buf, datalen); + if (unlikely(error)) { + ANDROID_ERROR(("Could not get bss info %d\n", error)); + return -1; + } + + bi = (struct wl_bss_info *) (buf + sizeof(uint32)); + + for (i = 0; i < ETHER_ADDR_LEN; i++) { + if (bi->BSSID.octet[i] > 0) { + break; + } + } + + if (i == ETHER_ADDR_LEN) { + ANDROID_TRACE(("No BSSID\n")); + return -1; + } + + /* check VHT capability at beacon */ + if (bi->vht_cap) { + if (CHSPEC_IS5G(bi->chanspec)) { + result |= WL_ANDROID_LINK_AP_VHT_SUPPORT; + } + } + + /* get a rspec (radio spectrum) rate */ + error = wldev_iovar_getint(dev, "nrate", &rspec); + if (unlikely(error) || rspec == 0) { + ANDROID_ERROR(("get link status error (%d)\n", error)); + return -1; + } + + encode = (rspec & WL_RSPEC_ENCODING_MASK); + rate = (rspec & WL_RSPEC_RATE_MASK); + txexp = (rspec & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT; + + switch (encode) { + case WL_RSPEC_ENCODE_HT: + /* check Rx MCS Map for HT */ + for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) { + int8 bitmap = 0xFF; + if (i == MAX_STREAMS_SUPPORTED-1) { + bitmap = 0x7F; + } + if (bi->basic_mcs[i] & bitmap) { + nss++; + } + } + break; + case WL_RSPEC_ENCODE_VHT: + /* check Rx MCS Map for VHT */ + for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) { + mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap)); + if (mcs_map != VHT_CAP_MCS_MAP_NONE) { + nss++; + } + } + break; + } + + /* check MIMO capability with nss in beacon */ + if (nss > 1) { + result |= WL_ANDROID_LINK_AP_MIMO_SUPPORT; + } + + single_stream = (encode == WL_RSPEC_ENCODE_RATE) || + ((encode == WL_RSPEC_ENCODE_HT) && rate < 8) || + ((encode == WL_RSPEC_ENCODE_VHT) && + ((rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT) == 1); + + if (txexp == 0) { + if ((rspec & WL_RSPEC_STBC) && single_stream) { + stf = OLD_NRATE_STF_STBC; + } else { + stf = (single_stream) ? OLD_NRATE_STF_SISO : OLD_NRATE_STF_SDM; + } + } else if (txexp == 1 && single_stream) { + stf = OLD_NRATE_STF_CDD; + } + + /* check 11ac (VHT) */ + if (encode == WL_RSPEC_ENCODE_VHT) { + if (CHSPEC_IS5G(bi->chanspec)) { + result |= WL_ANDROID_LINK_VHT; + } + } + + /* check MIMO */ + if (result & WL_ANDROID_LINK_AP_MIMO_SUPPORT) { + switch (stf) { + case OLD_NRATE_STF_SISO: + break; + case OLD_NRATE_STF_CDD: + case OLD_NRATE_STF_STBC: + result |= WL_ANDROID_LINK_MIMO; + break; + case OLD_NRATE_STF_SDM: + if (!single_stream) { + result |= WL_ANDROID_LINK_MIMO; + } + break; + } + } + + ANDROID_TRACE(("%s:result=%d, stf=%d, single_stream=%d, mcs map=%d\n", + __FUNCTION__, result, stf, single_stream, nss)); + + bytes_written = sprintf(command, "%s %d", CMD_GET_LINK_STATUS, result); + + return bytes_written; +} + #ifdef P2P_LISTEN_OFFLOADING s32 wl_cfg80211_p2plo_offload(struct net_device *dev, char *cmd, char* buf, int len) @@ -2827,38 +3755,881 @@ wl_cfg80211_p2plo_offload(struct net_device *dev, char *cmd, char* buf, int len) #endif /* P2P_LISTEN_OFFLOADING */ #ifdef WL_CFG80211 +#ifdef BCM4359_CHIP int wl_android_murx_bfe_cap(struct net_device *dev, int val) { int err = BCME_OK; - int iface_count = wl_cfg80211_iface_count(); + int iface_count = wl_cfg80211_iface_count(dev); + struct ether_addr bssid; + wl_reassoc_params_t params; if (iface_count > 1) { - ANDROID_ERROR(("%s: murx_bfe_cap change is not allowed when " - "there are multiple interfaces\n", __FUNCTION__)); + ANDROID_ERROR(("murx_bfe_cap change is not allowed when " + "there are multiple interfaces\n")); return -EINVAL; } /* Now there is only single interface */ err = wldev_iovar_setint(dev, "murx_bfe_cap", val); - if (err) { - ANDROID_ERROR(("%s: Failed to set murx_bfe_cap IOVAR to %d," - "error %d\n", __FUNCTION__, val, err)); - err = -EINVAL; + if (unlikely(err)) { + ANDROID_ERROR(("Failed to set murx_bfe_cap IOVAR to %d," + "error %d\n", val, err)); + return err; + } + + /* If successful intiate a reassoc */ + memset(&bssid, 0, ETHER_ADDR_LEN); + if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN)) < 0) { + ANDROID_ERROR(("Failed to get bssid, error=%d\n", err)); + return err; + } + + bzero(¶ms, sizeof(wl_reassoc_params_t)); + memcpy(¶ms.bssid, &bssid, ETHER_ADDR_LEN); + + if ((err = wldev_ioctl_set(dev, WLC_REASSOC, ¶ms, + sizeof(wl_reassoc_params_t))) < 0) { + ANDROID_ERROR(("reassoc failed err:%d \n", err)); + } else { + ANDROID_TRACE(("reassoc issued successfully\n")); + } + + return err; +} +#endif /* BCM4359_CHIP */ +#endif + +#ifdef SUPPORT_AP_HIGHER_BEACONRATE +int +wl_android_set_ap_beaconrate(struct net_device *dev, char *command) +{ + int rate = 0; + char *pos, *token; + char *ifname = NULL; + int err = BCME_OK; + + /* + * DRIVER SET_AP_BEACONRATE + */ + pos = command; + + /* drop command */ + token = bcmstrtok(&pos, " ", NULL); + + /* Rate */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) + return -EINVAL; + rate = bcm_atoi(token); + + /* get the interface name */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) + return -EINVAL; + ifname = token; + + ANDROID_TRACE(("rate %d, ifacename %s\n", rate, ifname)); + + err = wl_set_ap_beacon_rate(dev, rate, ifname); + if (unlikely(err)) { + ANDROID_ERROR(("Failed to set ap beacon rate to %d, error = %d\n", rate, err)); + } + + return err; +} + +int wl_android_get_ap_basicrate(struct net_device *dev, char *command, int total_len) +{ + char *pos, *token; + char *ifname = NULL; + int bytes_written = 0; + /* + * DRIVER GET_AP_BASICRATE + */ + pos = command; + + /* drop command */ + token = bcmstrtok(&pos, " ", NULL); + + /* get the interface name */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) + return -EINVAL; + ifname = token; + + ANDROID_TRACE(("ifacename %s\n", ifname)); + + bytes_written = wl_get_ap_basic_rate(dev, command, ifname, total_len); + if (bytes_written < 1) { + ANDROID_ERROR(("Failed to get ap basic rate, error = %d\n", bytes_written)); + return -EPROTO; + } + + return bytes_written; + +} +#endif /* SUPPORT_AP_HIGHER_BEACONRATE */ + +#ifdef SUPPORT_AP_RADIO_PWRSAVE +int +wl_android_get_ap_rps(struct net_device *dev, char *command, int total_len) +{ + char *pos, *token; + char *ifname = NULL; + int bytes_written = 0; + /* + * DRIVER GET_AP_RPS + */ + pos = command; + + /* drop command */ + token = bcmstrtok(&pos, " ", NULL); + + /* get the interface name */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) + return -EINVAL; + ifname = token; + + ANDROID_TRACE(("ifacename %s\n", ifname)); + + bytes_written = wl_get_ap_rps(dev, command, ifname, total_len); + if (bytes_written < 1) { + ANDROID_ERROR(("Failed to get rps, error = %d\n", bytes_written)); + return -EPROTO; + } + + return bytes_written; + +} + +int +wl_android_set_ap_rps(struct net_device *dev, char *command, int total_len) +{ + int enable = 0; + char *pos, *token; + char *ifname = NULL; + int err = BCME_OK; + + /* + * DRIVER SET_AP_RPS <0/1> + */ + pos = command; + + /* drop command */ + token = bcmstrtok(&pos, " ", NULL); + + /* Enable */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) + return -EINVAL; + enable = bcm_atoi(token); + + /* get the interface name */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) + return -EINVAL; + ifname = token; + + ANDROID_TRACE(("enable %d, ifacename %s\n", enable, ifname)); + + err = wl_set_ap_rps(dev, enable? TRUE: FALSE, ifname); + if (unlikely(err)) { + ANDROID_ERROR(("Failed to set rps, enable %d, error = %d\n", enable, err)); + } + + return err; +} + +int +wl_android_set_ap_rps_params(struct net_device *dev, char *command, int total_len) +{ + ap_rps_info_t rps; + char *pos, *token; + char *ifname = NULL; + int err = BCME_OK; + + memset(&rps, 0, sizeof(rps)); + /* + * DRIVER SET_AP_RPS_PARAMS + */ + pos = command; + + /* drop command */ + token = bcmstrtok(&pos, " ", NULL); + + /* pps */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) + return -EINVAL; + rps.pps = bcm_atoi(token); + + /* level */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) + return -EINVAL; + rps.level = bcm_atoi(token); + + /* quiettime */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) + return -EINVAL; + rps.quiet_time = bcm_atoi(token); + + /* sta assoc check */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) + return -EINVAL; + rps.sta_assoc_check = bcm_atoi(token); + + /* get the interface name */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) + return -EINVAL; + ifname = token; + + ANDROID_TRACE(("pps %d, level %d, quiettime %d, sta_assoc_check %d, " + "ifacename %s\n", rps.pps, rps.level, rps.quiet_time, + rps.sta_assoc_check, ifname)); + + err = wl_update_ap_rps_params(dev, &rps, ifname); + if (unlikely(err)) { + ANDROID_ERROR(("Failed to update rps, pps %d, level %d, quiettime %d, " + "sta_assoc_check %d, err = %d\n", rps.pps, rps.level, rps.quiet_time, + rps.sta_assoc_check, err)); + } + + return err; +} +#endif /* SUPPORT_AP_RADIO_PWRSAVE */ + +#ifdef SUPPORT_RSSI_LOGGING +int +wl_android_get_rssi_per_ant(struct net_device *dev, char *command, int total_len) +{ + wl_rssi_ant_mimo_t rssi_ant_mimo; + char *ifname = NULL; + char *peer_mac = NULL; + char *mimo_cmd = "mimo"; + char *pos, *token; + int err = BCME_OK; + int bytes_written = 0; + bool mimo_rssi = FALSE; + + memset(&rssi_ant_mimo, 0, sizeof(wl_rssi_ant_mimo_t)); + /* + * STA I/F: DRIVER GET_RSSI_PER_ANT + * AP/GO I/F: DRIVER GET_RSSI_PER_ANT + */ + pos = command; + + /* drop command */ + token = bcmstrtok(&pos, " ", NULL); + + /* get the interface name */ + token = bcmstrtok(&pos, " ", NULL); + if (!token) { + ANDROID_ERROR(("Invalid arguments\n")); + return -EINVAL; + } + ifname = token; + + /* Optional: Check the MIMO RSSI mode or peer MAC address */ + token = bcmstrtok(&pos, " ", NULL); + if (token) { + /* Check the MIMO RSSI mode */ + if (strncmp(token, mimo_cmd, strlen(mimo_cmd)) == 0) { + mimo_rssi = TRUE; + } else { + peer_mac = token; + } + } + + /* Optional: Check the MIMO RSSI mode - RSSI sum across antennas */ + token = bcmstrtok(&pos, " ", NULL); + if (token && strncmp(token, mimo_cmd, strlen(mimo_cmd)) == 0) { + mimo_rssi = TRUE; + } + + err = wl_get_rssi_per_ant(dev, ifname, peer_mac, &rssi_ant_mimo); + if (unlikely(err)) { + ANDROID_ERROR(("Failed to get RSSI info, err=%d\n", err)); + return err; + } + + /* Parse the results */ + ANDROID_TRACE(("ifname %s, version %d, count %d, mimo rssi %d\n", + ifname, rssi_ant_mimo.version, rssi_ant_mimo.count, mimo_rssi)); + if (mimo_rssi) { + ANDROID_TRACE(("MIMO RSSI: %d\n", rssi_ant_mimo.rssi_sum)); + bytes_written = snprintf(command, total_len, "%s MIMO %d", + CMD_GET_RSSI_PER_ANT, rssi_ant_mimo.rssi_sum); + } else { + int cnt; + bytes_written = snprintf(command, total_len, "%s PER_ANT ", CMD_GET_RSSI_PER_ANT); + for (cnt = 0; cnt < rssi_ant_mimo.count; cnt++) { + ANDROID_TRACE(("RSSI[%d]: %d\n", cnt, rssi_ant_mimo.rssi_ant[cnt])); + bytes_written = snprintf(command, total_len, "%d ", + rssi_ant_mimo.rssi_ant[cnt]); + } + } + + return bytes_written; +} + +int +wl_android_set_rssi_logging(struct net_device *dev, char *command, int total_len) +{ + rssilog_set_param_t set_param; + char *pos, *token; + int err = BCME_OK; + + memset(&set_param, 0, sizeof(rssilog_set_param_t)); + /* + * DRIVER SET_RSSI_LOGGING