| | 1 | /*- |
| | 2 | * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting |
| | 3 | * All rights reserved. |
| | 4 | * |
| | 5 | * Redistribution and use in source and binary forms, with or without |
| | 6 | * modification, are permitted provided that the following conditions |
| | 7 | * are met: |
| | 8 | * 1. Redistributions of source code must retain the above copyright |
| | 9 | * notice, this list of conditions and the following disclaimer, |
| | 10 | * without modification. |
| | 11 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer |
| | 12 | * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any |
| | 13 | * redistribution must be conditioned upon including a substantially |
| | 14 | * similar Disclaimer requirement for further binary redistribution. |
| | 15 | * 3. Neither the names of the above-listed copyright holders nor the names |
| | 16 | * of any contributors may be used to endorse or promote products derived |
| | 17 | * from this software without specific prior written permission. |
| | 18 | * |
| | 19 | * Alternatively, this software may be distributed under the terms of the |
| | 20 | * GNU General Public License ("GPL") version 2 as published by the Free |
| | 21 | * Software Foundation. |
| | 22 | * |
| | 23 | * NO WARRANTY |
| | 24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| | 25 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| | 26 | * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY |
| | 27 | * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL |
| | 28 | * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, |
| | 29 | * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| | 30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| | 31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER |
| | 32 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| | 33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
| | 34 | * THE POSSIBILITY OF SUCH DAMAGES. |
| | 35 | * |
| | 36 | * $Id: if_ath.c 2616 2007-07-26 13:42:40Z mrenzmann $ |
| | 37 | */ |
| | 38 | |
| | 39 | /* |
| | 40 | * Driver for the Atheros Wireless LAN controller. |
| | 41 | * |
| | 42 | * This software is derived from work of Atsushi Onoe; his contribution |
| | 43 | * is greatly appreciated. |
| | 44 | */ |
| | 45 | #include "opt_ah.h" |
| | 46 | |
| | 47 | #ifndef AUTOCONF_INCLUDED |
| | 48 | #include <linux/config.h> |
| | 49 | #endif |
| | 50 | #include <linux/version.h> |
| | 51 | #include <linux/module.h> |
| | 52 | #include <linux/init.h> |
| | 53 | #include <linux/skbuff.h> |
| | 54 | #include <linux/netdevice.h> |
| | 55 | #include <linux/etherdevice.h> |
| | 56 | #include <linux/random.h> |
| | 57 | #include <linux/delay.h> |
| | 58 | #include <linux/cache.h> |
| | 59 | #include <linux/sysctl.h> |
| | 60 | #include <linux/proc_fs.h> |
| | 61 | #include <linux/if_arp.h> |
| | 62 | #include <linux/rtnetlink.h> |
| | 63 | #include <asm/uaccess.h> |
| | 64 | |
| | 65 | #include "if_ethersubr.h" /* for ETHER_IS_MULTICAST */ |
| | 66 | #include "if_media.h" |
| | 67 | #include "if_llc.h" |
| | 68 | |
| | 69 | #include <net80211/ieee80211_radiotap.h> |
| | 70 | #include <net80211/ieee80211_var.h> |
| | 71 | #include <net80211/ieee80211_monitor.h> |
| | 72 | #include <net80211/ieee80211_rate.h> |
| | 73 | |
| | 74 | #ifdef USE_HEADERLEN_RESV |
| | 75 | #include <net80211/if_llc.h> |
| | 76 | #endif |
| | 77 | |
| | 78 | #define AR_DEBUG |
| | 79 | |
| | 80 | #include "net80211/if_athproto.h" |
| | 81 | #include "if_athvar.h" |
| | 82 | #include "ah_desc.h" |
| | 83 | #include "ah_devid.h" /* XXX to identify chipset */ |
| | 84 | |
| | 85 | #ifdef ATH_PCI /* PCI BUS */ |
| | 86 | #include "if_ath_pci.h" |
| | 87 | #endif /* PCI BUS */ |
| | 88 | #ifdef ATH_AHB /* AHB BUS */ |
| | 89 | #include "if_ath_ahb.h" |
| | 90 | #endif /* AHB BUS */ |
| | 91 | |
| | 92 | #ifdef ATH_TX99_DIAG |
| | 93 | #include "ath_tx99.h" |
| | 94 | #endif |
| | 95 | |
| | 96 | /* unaligned little endian access */ |
| | 97 | #define LE_READ_2(p) \ |
| | 98 | ((u_int16_t) \ |
| | 99 | ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8))) |
| | 100 | #define LE_READ_4(p) \ |
| | 101 | ((u_int32_t) \ |
| | 102 | ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \ |
| | 103 | (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24))) |
| | 104 | |
| | 105 | /* Default rate control algorithm */ |
| | 106 | #ifdef CONFIG_ATHEROS_RATE_DEFAULT |
| | 107 | #define DEF_RATE_CTL CONFIG_ATHEROS_RATE_DEFAULT |
| | 108 | #else |
| | 109 | #define DEF_RATE_CTL "sample" |
| | 110 | #endif |
| | 111 | |
| | 112 | enum { |
| | 113 | ATH_LED_TX, |
| | 114 | ATH_LED_RX, |
| | 115 | ATH_LED_POLL, |
| | 116 | }; |
| | 117 | |
| | 118 | static struct ieee80211vap *ath_vap_create(struct ieee80211com *, |
| | 119 | const char *, int, int, int, struct net_device *); |
| | 120 | static void ath_vap_delete(struct ieee80211vap *); |
| | 121 | static int ath_init(struct net_device *); |
| | 122 | static int ath_set_ack_bitrate(struct ath_softc *, int); |
| | 123 | static int ath_reset(struct net_device *); |
| | 124 | static void ath_fatal_tasklet(TQUEUE_ARG); |
| | 125 | static void ath_rxorn_tasklet(TQUEUE_ARG); |
| | 126 | static void ath_bmiss_tasklet(TQUEUE_ARG); |
| | 127 | static void ath_bstuck_tasklet(TQUEUE_ARG); |
| | 128 | static void ath_radar_task(struct work_struct *); |
| | 129 | static void ath_dfs_test_return(unsigned long); |
| | 130 | |
| | 131 | static int ath_stop_locked(struct net_device *); |
| | 132 | static int ath_stop(struct net_device *); |
| | 133 | #if 0 |
| | 134 | static void ath_initkeytable(struct ath_softc *); |
| | 135 | #endif |
| | 136 | static int ath_key_alloc(struct ieee80211vap *, const struct ieee80211_key *); |
| | 137 | static int ath_key_delete(struct ieee80211vap *, const struct ieee80211_key *, |
| | 138 | struct ieee80211_node *); |
| | 139 | static int ath_key_set(struct ieee80211vap *, const struct ieee80211_key *, |
| | 140 | const u_int8_t mac[IEEE80211_ADDR_LEN]); |
| | 141 | static void ath_key_update_begin(struct ieee80211vap *); |
| | 142 | static void ath_key_update_end(struct ieee80211vap *); |
| | 143 | static void ath_mode_init(struct net_device *); |
| | 144 | static void ath_setslottime(struct ath_softc *); |
| | 145 | static void ath_updateslot(struct net_device *); |
| | 146 | static int ath_beaconq_setup(struct ath_hal *); |
| | 147 | static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); |
| | 148 | #ifdef ATH_SUPERG_DYNTURBO |
| | 149 | static void ath_beacon_dturbo_update(struct ieee80211vap *, int *, u_int8_t); |
| | 150 | static void ath_beacon_dturbo_config(struct ieee80211vap *, u_int32_t); |
| | 151 | static void ath_turbo_switch_mode(unsigned long); |
| | 152 | static int ath_check_beacon_done(struct ath_softc *); |
| | 153 | #endif |
| | 154 | static void ath_beacon_send(struct ath_softc *, int *); |
| | 155 | static void ath_beacon_start_adhoc(struct ath_softc *, struct ieee80211vap *); |
| | 156 | static void ath_beacon_return(struct ath_softc *, struct ath_buf *); |
| | 157 | static void ath_beacon_free(struct ath_softc *); |
| | 158 | static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *); |
| | 159 | static int ath_desc_alloc(struct ath_softc *); |
| | 160 | static void ath_desc_free(struct ath_softc *); |
| | 161 | static void ath_desc_swap(struct ath_desc *); |
| | 162 | static struct ieee80211_node *ath_node_alloc(struct ieee80211_node_table *, |
| | 163 | struct ieee80211vap *); |
| | 164 | static void ath_node_cleanup(struct ieee80211_node *); |
| | 165 | static void ath_node_free(struct ieee80211_node *); |
| | 166 | static u_int8_t ath_node_getrssi(const struct ieee80211_node *); |
| | 167 | static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); |
| | 168 | static void ath_recv_mgmt(struct ieee80211_node *, struct sk_buff *, int, |
| | 169 | int, u_int32_t); |
| | 170 | static void ath_setdefantenna(struct ath_softc *, u_int); |
| | 171 | static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int); |
| | 172 | static void ath_rx_tasklet(TQUEUE_ARG); |
| | 173 | static int ath_hardstart(struct sk_buff *, struct net_device *); |
| | 174 | static int ath_mgtstart(struct ieee80211com *, struct sk_buff *); |
| | 175 | #ifdef ATH_SUPERG_COMP |
| | 176 | static u_int32_t ath_get_icvlen(struct ieee80211_key *); |
| | 177 | static u_int32_t ath_get_ivlen(struct ieee80211_key *); |
| | 178 | static void ath_setup_comp(struct ieee80211_node *, int); |
| | 179 | static void ath_comp_set(struct ieee80211vap *, struct ieee80211_node *, int); |
| | 180 | #endif |
| | 181 | static int ath_tx_setup(struct ath_softc *, int, int); |
| | 182 | static int ath_wme_update(struct ieee80211com *); |
| | 183 | static void ath_uapsd_flush(struct ieee80211_node *); |
| | 184 | static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); |
| | 185 | static void ath_tx_cleanup(struct ath_softc *); |
| | 186 | static void ath_tx_uapsdqueue(struct ath_softc *, struct ath_node *, |
| | 187 | struct ath_buf *); |
| | 188 | |
| | 189 | static int ath_tx_start(struct net_device *, struct ieee80211_node *, |
| | 190 | struct ath_buf *, struct sk_buff *, int); |
| | 191 | static void ath_tx_tasklet_q0(TQUEUE_ARG); |
| | 192 | static void ath_tx_tasklet_q0123(TQUEUE_ARG); |
| | 193 | static void ath_tx_tasklet(TQUEUE_ARG); |
| | 194 | static void ath_tx_timeout(struct net_device *); |
| | 195 | static void ath_tx_draintxq(struct ath_softc *, struct ath_txq *); |
| | 196 | static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); |
| | 197 | static void ath_draintxq(struct ath_softc *); |
| | 198 | static __inline void ath_tx_txqaddbuf(struct ath_softc *, struct ieee80211_node *, |
| | 199 | struct ath_txq *, struct ath_buf *, struct ath_desc *, int); |
| | 200 | static void ath_stoprecv(struct ath_softc *); |
| | 201 | static int ath_startrecv(struct ath_softc *); |
| | 202 | static void ath_flushrecv(struct ath_softc *); |
| | 203 | static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); |
| | 204 | static void ath_calibrate(unsigned long); |
| | 205 | static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); |
| | 206 | |
| | 207 | static void ath_scan_start(struct ieee80211com *); |
| | 208 | static void ath_scan_end(struct ieee80211com *); |
| | 209 | static void ath_set_channel(struct ieee80211com *); |
| | 210 | static void ath_set_coverageclass(struct ieee80211com *); |
| | 211 | static u_int ath_mhz2ieee(struct ieee80211com *, u_int, u_int); |
| | 212 | #ifdef ATH_SUPERG_FF |
| | 213 | static int athff_can_aggregate(struct ath_softc *, struct ether_header *, |
| | 214 | struct ath_node *, struct sk_buff *, u_int16_t, int *); |
| | 215 | #endif |
| | 216 | static struct net_device_stats *ath_getstats(struct net_device *); |
| | 217 | static void ath_setup_stationkey(struct ieee80211_node *); |
| | 218 | static void ath_setup_stationwepkey(struct ieee80211_node *); |
| | 219 | static void ath_setup_keycacheslot(struct ath_softc *, struct ieee80211_node *); |
| | 220 | static void ath_newassoc(struct ieee80211_node *, int); |
| | 221 | static int ath_getchannels(struct net_device *, u_int, HAL_BOOL, HAL_BOOL); |
| | 222 | static void ath_led_event(struct ath_softc *, int); |
| | 223 | static void ath_update_txpow(struct ath_softc *); |
| | 224 | |
| | 225 | static int ath_set_mac_address(struct net_device *, void *); |
| | 226 | static int ath_change_mtu(struct net_device *, int); |
| | 227 | static int ath_ioctl(struct net_device *, struct ifreq *, int); |
| | 228 | |
| | 229 | static int ath_rate_setup(struct net_device *, u_int); |
| | 230 | static void ath_setup_subrates(struct net_device *); |
| | 231 | #ifdef ATH_SUPERG_XR |
| | 232 | static int ath_xr_rate_setup(struct net_device *); |
| | 233 | static void ath_grppoll_txq_setup(struct ath_softc *, int, int); |
| | 234 | static void ath_grppoll_start(struct ieee80211vap *, int); |
| | 235 | static void ath_grppoll_stop(struct ieee80211vap *); |
| | 236 | static u_int8_t ath_node_move_data(const struct ieee80211_node *); |
| | 237 | static void ath_grppoll_txq_update(struct ath_softc *, int); |
| | 238 | static void ath_grppoll_period_update(struct ath_softc *); |
| | 239 | #endif |
| | 240 | static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); |
| | 241 | |
| | 242 | static void ath_dynamic_sysctl_register(struct ath_softc *); |
| | 243 | static void ath_dynamic_sysctl_unregister(struct ath_softc *); |
| | 244 | static void ath_announce(struct net_device *); |
| | 245 | static int ath_descdma_setup(struct ath_softc *, struct ath_descdma *, |
| | 246 | ath_bufhead *, const char *, int, int); |
| | 247 | static void ath_descdma_cleanup(struct ath_softc *, struct ath_descdma *, |
| | 248 | ath_bufhead *, int); |
| | 249 | static void ath_check_dfs_clear(unsigned long); |
| | 250 | static const char *ath_get_hal_status_desc(HAL_STATUS status); |
| | 251 | static int ath_rcv_dev_event(struct notifier_block *, unsigned long, void *); |
| | 252 | |
| | 253 | static int ath_calinterval = ATH_SHORT_CALINTERVAL; /* |
| | 254 | * calibrate every 30 secs in steady state |
| | 255 | * but check every second at first. |
| | 256 | */ |
| | 257 | static int ath_countrycode = CTRY_DEFAULT; /* country code */ |
| | 258 | static int ath_outdoor = AH_FALSE; /* enable outdoor use */ |
| | 259 | static int ath_xchanmode = AH_TRUE; /* enable extended channels */ |
| | 260 | static char *autocreate = NULL; |
| | 261 | static char *ratectl = DEF_RATE_CTL; |
| | 262 | static int rfkill = -1; |
| | 263 | static int countrycode = -1; |
| | 264 | static int outdoor = -1; |
| | 265 | static int xchanmode = -1; |
| | 266 | |
| | 267 | static const char *hal_status_desc[] = { |
| | 268 | "No error", |
| | 269 | "No hardware present or device not yet supported", |
| | 270 | "Memory allocation failed", |
| | 271 | "Hardware didn't respond as expected", |
| | 272 | "EEPROM magic number invalid", |
| | 273 | "EEPROM version invalid", |
| | 274 | "EEPROM unreadable", |
| | 275 | "EEPROM checksum invalid", |
| | 276 | "EEPROM read problem", |
| | 277 | "EEPROM mac address invalid", |
| | 278 | "EEPROM size not supported", |
| | 279 | "Attempt to change write-locked EEPROM", |
| | 280 | "Invalid parameter to function", |
| | 281 | "Hardware revision not supported", |
| | 282 | "Hardware self-test failed", |
| | 283 | "Operation incomplete" |
| | 284 | }; |
| | 285 | |
| | 286 | static struct notifier_block ath_event_block = { |
| | 287 | .notifier_call = ath_rcv_dev_event |
| | 288 | }; |
| | 289 | |
| | 290 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,52)) |
| | 291 | MODULE_PARM(countrycode, "i"); |
| | 292 | MODULE_PARM(outdoor, "i"); |
| | 293 | MODULE_PARM(xchanmode, "i"); |
| | 294 | MODULE_PARM(rfkill, "i"); |
| | 295 | MODULE_PARM(autocreate, "s"); |
| | 296 | MODULE_PARM(ratectl, "s"); |
| | 297 | #else |
| | 298 | #include <linux/moduleparam.h> |
| | 299 | module_param(countrycode, int, 0600); |
| | 300 | module_param(outdoor, int, 0600); |
| | 301 | module_param(xchanmode, int, 0600); |
| | 302 | module_param(rfkill, int, 0600); |
| | 303 | module_param(autocreate, charp, 0600); |
| | 304 | module_param(ratectl, charp, 0600); |
| | 305 | #endif |
| | 306 | MODULE_PARM_DESC(countrycode, "Override default country code"); |
| | 307 | MODULE_PARM_DESC(outdoor, "Enable/disable outdoor use"); |
| | 308 | MODULE_PARM_DESC(xchanmode, "Enable/disable extended channel mode"); |
| | 309 | MODULE_PARM_DESC(rfkill, "Enable/disable RFKILL capability"); |
| | 310 | MODULE_PARM_DESC(autocreate, "Create ath device in [sta|ap|wds|adhoc|ahdemo|monitor] mode. defaults to sta, use 'none' to disable"); |
| | 311 | MODULE_PARM_DESC(ratectl, "Rate control algorithm [amrr|onoe|sample], defaults to '" DEF_RATE_CTL "'"); |
| | 312 | |
| | 313 | static int ath_debug = 0; |
| | 314 | #ifdef AR_DEBUG |
| | 315 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,52)) |
| | 316 | MODULE_PARM(ath_debug, "i"); |
| | 317 | #else |
| | 318 | module_param(ath_debug, int, 0600); |
| | 319 | #endif |
| | 320 | MODULE_PARM_DESC(ath_debug, "Load-time debug output enable"); |
| | 321 | |
| | 322 | #define IFF_DUMPPKTS(sc, _m) \ |
| | 323 | ((sc->sc_debug & _m)) |
| | 324 | static void ath_printrxbuf(struct ath_buf *, int); |
| | 325 | static void ath_printtxbuf(struct ath_buf *, int); |
| | 326 | enum { |
| | 327 | ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ |
| | 328 | ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ |
| | 329 | ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ |
| | 330 | ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ |
| | 331 | ATH_DEBUG_RATE = 0x00000010, /* rate control */ |
| | 332 | ATH_DEBUG_RESET = 0x00000020, /* reset processing */ |
| | 333 | /* 0x00000040 was ATH_DEBUG_MODE */ |
| | 334 | ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ |
| | 335 | ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */ |
| | 336 | ATH_DEBUG_INTR = 0x00001000, /* ISR */ |
| | 337 | ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */ |
| | 338 | ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */ |
| | 339 | ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ |
| | 340 | ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ |
| | 341 | ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */ |
| | 342 | ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */ |
| | 343 | ATH_DEBUG_NODE = 0x00080000, /* node management */ |
| | 344 | ATH_DEBUG_LED = 0x00100000, /* led management */ |
| | 345 | ATH_DEBUG_FF = 0x00200000, /* fast frames */ |
| | 346 | ATH_DEBUG_TURBO = 0x00400000, /* turbo/dynamic turbo */ |
| | 347 | ATH_DEBUG_UAPSD = 0x00800000, /* uapsd */ |
| | 348 | ATH_DEBUG_DOTH = 0x01000000, /* 11.h */ |
| | 349 | ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ |
| | 350 | ATH_DEBUG_ANY = 0xffffffff |
| | 351 | }; |
| | 352 | #define DPRINTF(sc, _m, _fmt, ...) do { \ |
| | 353 | if (sc->sc_debug & (_m)) \ |
| | 354 | printk(_fmt, __VA_ARGS__); \ |
| | 355 | } while (0) |
| | 356 | #define KEYPRINTF(sc, ix, hk, mac) do { \ |
| | 357 | if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \ |
| | 358 | ath_keyprint(sc, __func__, ix, hk, mac); \ |
| | 359 | } while (0) |
| | 360 | #else /* defined(AR_DEBUG) */ |
| | 361 | #define IFF_DUMPPKTS(sc, _m) netif_msg_dumppkts(&sc->sc_ic) |
| | 362 | #define DPRINTF(sc, _m, _fmt, ...) |
| | 363 | #define KEYPRINTF(sc, k, ix, mac) |
| | 364 | #endif /* defined(AR_DEBUG) */ |
| | 365 | |
| | 366 | #define ATH_SETUP_XR_VAP(sc,vap,rfilt) \ |
| | 367 | do { \ |
| | 368 | if (sc->sc_curchan.privFlags & CHANNEL_4MS_LIMIT) \ |
| | 369 | vap->iv_fragthreshold = XR_4MS_FRAG_THRESHOLD; \ |
| | 370 | else \ |
| | 371 | vap->iv_fragthreshold = vap->iv_xrvap->iv_fragthreshold; \ |
| | 372 | if (!sc->sc_xrgrppoll) { \ |
| | 373 | ath_grppoll_txq_setup(sc, HAL_TX_QUEUE_DATA, GRP_POLL_PERIOD_NO_XR_STA(sc)); \ |
| | 374 | ath_grppoll_start(vap, sc->sc_xrpollcount); \ |
| | 375 | ath_hal_setrxfilter(sc->sc_ah, rfilt|HAL_RX_FILTER_XRPOLL); \ |
| | 376 | } \ |
| | 377 | } while(0) |
| | 378 | |
| | 379 | /* |
| | 380 | * Define the scheme that we select MAC address for multiple BSS on the same radio. |
| | 381 | * The very first VAP will just use the MAC address from the EEPROM. |
| | 382 | * For the next 3 VAPs, we set the U/L bit (bit 1) in MAC address, |
| | 383 | * and use the next two bits as the index of the VAP. |
| | 384 | */ |
| | 385 | #define ATH_SET_VAP_BSSID_MASK(bssid_mask) ((bssid_mask)[0] &= ~(((ATH_BCBUF-1)<<2)|0x02)) |
| | 386 | #define ATH_GET_VAP_ID(bssid) ((bssid)[0] >> 2) |
| | 387 | #define ATH_SET_VAP_BSSID(bssid, id) \ |
| | 388 | do { \ |
| | 389 | if (id) \ |
| | 390 | (bssid)[0] |= (((id) << 2) | 0x02); \ |
| | 391 | } while(0) |
| | 392 | |
| | 393 | int |
| | 394 | ath_attach(u_int16_t devid, struct net_device *dev, HAL_BUS_TAG tag) |
| | 395 | { |
| | 396 | struct ath_softc *sc = dev->priv; |
| | 397 | struct ieee80211com *ic = &sc->sc_ic; |
| | 398 | struct ath_hal *ah; |
| | 399 | HAL_STATUS status; |
| | 400 | int error = 0, i; |
| | 401 | int autocreatemode = IEEE80211_M_STA; |
| | 402 | u_int8_t csz; |
| | 403 | |
| | 404 | sc->devid = devid; |
| | 405 | sc->sc_debug = ath_debug; |
| | 406 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); |
| | 407 | |
| | 408 | /* |
| | 409 | * Cache line size is used to size and align various |
| | 410 | * structures used to communicate with the hardware. |
| | 411 | */ |
| | 412 | bus_read_cachesize(sc, &csz); |
| | 413 | /* XXX assert csz is non-zero */ |
| | 414 | sc->sc_cachelsz = csz << 2; /* convert to bytes */ |
| | 415 | |
| | 416 | ATH_LOCK_INIT(sc); |
| | 417 | ATH_TXBUF_LOCK_INIT(sc); |
| | 418 | ATH_RXBUF_LOCK_INIT(sc); |
| | 419 | |
| | 420 | ATH_INIT_TQUEUE(&sc->sc_rxtq, ath_rx_tasklet, dev); |
| | 421 | ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet, dev); |
| | 422 | ATH_INIT_TQUEUE(&sc->sc_bmisstq, ath_bmiss_tasklet, dev); |
| | 423 | ATH_INIT_TQUEUE(&sc->sc_bstucktq,ath_bstuck_tasklet, dev); |
| | 424 | ATH_INIT_TQUEUE(&sc->sc_rxorntq, ath_rxorn_tasklet, dev); |
| | 425 | ATH_INIT_TQUEUE(&sc->sc_fataltq, ath_fatal_tasklet, dev); |
| | 426 | ATH_INIT_WORK(&sc->sc_radartask, ath_radar_task); |
| | 427 | |
| | 428 | /* |
| | 429 | * Attach the HAL and verify ABI compatibility by checking |
| | 430 | * the HAL's ABI signature against the one the driver was |
| | 431 | * compiled with. A mismatch indicates the driver was |
| | 432 | * built with an ah.h that does not correspond to the HAL |
| | 433 | * module loaded in the kernel. |
| | 434 | */ |
| | 435 | ah = _ath_hal_attach(devid, sc, tag, sc->sc_iobase, &status); |
| | 436 | if (ah == NULL) { |
| | 437 | printk(KERN_ERR "%s: unable to attach hardware: '%s' (HAL status %u)\n", |
| | 438 | dev->name, ath_get_hal_status_desc(status), status); |
| | 439 | error = ENXIO; |
| | 440 | goto bad; |
| | 441 | } |
| | 442 | if (ah->ah_abi != HAL_ABI_VERSION) { |
| | 443 | printk(KERN_ERR "%s: HAL ABI mismatch; " |
| | 444 | "driver expects 0x%x, HAL reports 0x%x\n", |
| | 445 | dev->name, HAL_ABI_VERSION, ah->ah_abi); |
| | 446 | error = ENXIO; /* XXX */ |
| | 447 | goto bad; |
| | 448 | } |
| | 449 | sc->sc_ah = ah; |
| | 450 | |
| | 451 | /* |
| | 452 | * Check if the MAC has multi-rate retry support. |
| | 453 | * We do this by trying to setup a fake extended |
| | 454 | * descriptor. MAC's that don't have support will |
| | 455 | * return false w/o doing anything. MAC's that do |
| | 456 | * support it will return true w/o doing anything. |
| | 457 | */ |
| | 458 | sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); |
| | 459 | |
| | 460 | /* |
| | 461 | * Check if the device has hardware counters for PHY |
| | 462 | * errors. If so we need to enable the MIB interrupt |
| | 463 | * so we can act on stat triggers. |
| | 464 | */ |
| | 465 | if (ath_hal_hwphycounters(ah)) |
| | 466 | sc->sc_needmib = 1; |
| | 467 | |
| | 468 | /* |
| | 469 | * Get the hardware key cache size. |
| | 470 | */ |
| | 471 | sc->sc_keymax = ath_hal_keycachesize(ah); |
| | 472 | if (sc->sc_keymax > ATH_KEYMAX) { |
| | 473 | printk("%s: Warning, using only %u entries in %u key cache\n", |
| | 474 | dev->name, ATH_KEYMAX, sc->sc_keymax); |
| | 475 | sc->sc_keymax = ATH_KEYMAX; |
| | 476 | } |
| | 477 | /* |
| | 478 | * Reset the key cache since some parts do not |
| | 479 | * reset the contents on initial power up. |
| | 480 | */ |
| | 481 | for (i = 0; i < sc->sc_keymax; i++) |
| | 482 | ath_hal_keyreset(ah, i); |
| | 483 | |
| | 484 | /* |
| | 485 | * Collect the channel list using the default country |
| | 486 | * code and including outdoor channels. The 802.11 layer |
| | 487 | * is responsible for filtering this list based on settings |
| | 488 | * like the phy mode. |
| | 489 | */ |
| | 490 | if (countrycode != -1) |
| | 491 | ath_countrycode = countrycode; |
| | 492 | if (outdoor != -1) |
| | 493 | ath_outdoor = outdoor; |
| | 494 | if (xchanmode != -1) |
| | 495 | ath_xchanmode = xchanmode; |
| | 496 | error = ath_getchannels(dev, ath_countrycode, |
| | 497 | ath_outdoor, ath_xchanmode); |
| | 498 | if (error != 0) |
| | 499 | goto bad; |
| | 500 | |
| | 501 | ic->ic_country_code = ath_countrycode; |
| | 502 | ic->ic_country_outdoor = ath_outdoor; |
| | 503 | |
| | 504 | if (rfkill != -1) { |
| | 505 | printk(KERN_INFO "ath_pci: switching rfkill capability %s\n", |
| | 506 | rfkill ? "on" : "off"); |
| | 507 | ath_hal_setrfsilent(ah, rfkill); |
| | 508 | } |
| | 509 | |
| | 510 | /* |
| | 511 | * Setup rate tables for all potential media types. |
| | 512 | */ |
| | 513 | ath_rate_setup(dev, IEEE80211_MODE_11A); |
| | 514 | ath_rate_setup(dev, IEEE80211_MODE_11B); |
| | 515 | ath_rate_setup(dev, IEEE80211_MODE_11G); |
| | 516 | ath_rate_setup(dev, IEEE80211_MODE_TURBO_A); |
| | 517 | ath_rate_setup(dev, IEEE80211_MODE_TURBO_G); |
| | 518 | |
| | 519 | /* Setup for half/quarter rates */ |
| | 520 | ath_setup_subrates(dev); |
| | 521 | |
| | 522 | /* NB: setup here so ath_rate_update is happy */ |
| | 523 | ath_setcurmode(sc, IEEE80211_MODE_11A); |
| | 524 | |
| | 525 | /* |
| | 526 | * Allocate tx+rx descriptors and populate the lists. |
| | 527 | */ |
| | 528 | error = ath_desc_alloc(sc); |
| | 529 | if (error != 0) { |
| | 530 | printk(KERN_ERR "%s: failed to allocate descriptors: %d\n", |
| | 531 | dev->name, error); |
| | 532 | goto bad; |
| | 533 | } |
| | 534 | |
| | 535 | /* |
| | 536 | * Init ic_caps prior to queue init, since WME cap setting |
| | 537 | * depends on queue setup. |
| | 538 | */ |
| | 539 | ic->ic_caps = 0; |
| | 540 | |
| | 541 | /* |
| | 542 | * Allocate hardware transmit queues: one queue for |
| | 543 | * beacon frames and one data queue for each QoS |
| | 544 | * priority. Note that the HAL handles resetting |
| | 545 | * these queues at the needed time. |
| | 546 | * |
| | 547 | * XXX PS-Poll |
| | 548 | */ |
| | 549 | sc->sc_bhalq = ath_beaconq_setup(ah); |
| | 550 | if (sc->sc_bhalq == (u_int) -1) { |
| | 551 | printk(KERN_ERR "%s: unable to setup a beacon xmit queue!\n", |
| | 552 | dev->name); |
| | 553 | error = EIO; |
| | 554 | goto bad2; |
| | 555 | } |
| | 556 | sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); |
| | 557 | if (sc->sc_cabq == NULL) { |
| | 558 | printk(KERN_ERR "%s: unable to setup CAB xmit queue!\n", |
| | 559 | dev->name); |
| | 560 | error = EIO; |
| | 561 | goto bad2; |
| | 562 | } |
| | 563 | /* NB: ensure BK queue is the lowest priority h/w queue */ |
| | 564 | if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { |
| | 565 | printk(KERN_ERR "%s: unable to setup xmit queue for %s traffic!\n", |
| | 566 | dev->name, ieee80211_wme_acnames[WME_AC_BK]); |
| | 567 | error = EIO; |
| | 568 | goto bad2; |
| | 569 | } |
| | 570 | if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || |
| | 571 | !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || |
| | 572 | !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { |
| | 573 | /* |
| | 574 | * Not enough hardware tx queues to properly do WME; |
| | 575 | * just punt and assign them all to the same h/w queue. |
| | 576 | * We could do a better job of this if, for example, |
| | 577 | * we allocate queues when we switch from station to |
| | 578 | * AP mode. |
| | 579 | */ |
| | 580 | if (sc->sc_ac2q[WME_AC_VI] != NULL) |
| | 581 | ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); |
| | 582 | if (sc->sc_ac2q[WME_AC_BE] != NULL) |
| | 583 | ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); |
| | 584 | sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; |
| | 585 | sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; |
| | 586 | sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; |
| | 587 | } else { |
| | 588 | /* |
| | 589 | * Mark WME capability since we have sufficient |
| | 590 | * hardware queues to do proper priority scheduling. |
| | 591 | */ |
| | 592 | ic->ic_caps |= IEEE80211_C_WME; |
| | 593 | sc->sc_uapsdq = ath_txq_setup(sc, HAL_TX_QUEUE_UAPSD, 0); |
| | 594 | if (sc->sc_uapsdq == NULL) |
| | 595 | DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: unable to setup UAPSD xmit queue!\n", |
| | 596 | __func__); |
| | 597 | else { |
| | 598 | ic->ic_caps |= IEEE80211_C_UAPSD; |
| | 599 | /* |
| | 600 | * default UAPSD on if HW capable |
| | 601 | */ |
| | 602 | IEEE80211_COM_UAPSD_ENABLE(ic); |
| | 603 | } |
| | 604 | } |
| | 605 | #ifdef ATH_SUPERG_XR |
| | 606 | ath_xr_rate_setup(dev); |
| | 607 | sc->sc_xrpollint = XR_DEFAULT_POLL_INTERVAL; |
| | 608 | sc->sc_xrpollcount = XR_DEFAULT_POLL_COUNT; |
| | 609 | strcpy(sc->sc_grppoll_str, XR_DEFAULT_GRPPOLL_RATE_STR); |
| | 610 | sc->sc_grpplq.axq_qnum = -1; |
| | 611 | sc->sc_xrtxq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, HAL_XR_DATA); |
| | 612 | #endif |
| | 613 | |
| | 614 | /* |
| | 615 | * Special case certain configurations. Note the |
| | 616 | * CAB queue is handled by these specially so don't |
| | 617 | * include them when checking the txq setup mask. |
| | 618 | */ |
| | 619 | switch (sc->sc_txqsetup &~ ((1<<sc->sc_cabq->axq_qnum) | |
| | 620 | (sc->sc_uapsdq ? (1<<sc->sc_uapsdq->axq_qnum) : 0))) { |
| | 621 | case 0x01: |
| | 622 | ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet_q0, dev); |
| | 623 | break; |
| | 624 | case 0x0f: |
| | 625 | ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet_q0123, dev); |
| | 626 | break; |
| | 627 | } |
| | 628 | |
| | 629 | sc->sc_setdefantenna = ath_setdefantenna; |
| | 630 | sc->sc_rc = ieee80211_rate_attach(sc, ratectl); |
| | 631 | if (sc->sc_rc == NULL) { |
| | 632 | error = EIO; |
| | 633 | goto bad2; |
| | 634 | } |
| | 635 | |
| | 636 | init_timer(&sc->sc_cal_ch); |
| | 637 | sc->sc_cal_ch.function = ath_calibrate; |
| | 638 | sc->sc_cal_ch.data = (unsigned long) dev; |
| | 639 | |
| | 640 | #ifdef ATH_SUPERG_DYNTURBO |
| | 641 | init_timer(&sc->sc_dturbo_switch_mode); |
| | 642 | sc->sc_dturbo_switch_mode.function = ath_turbo_switch_mode; |
| | 643 | sc->sc_dturbo_switch_mode.data = (unsigned long) dev; |
| | 644 | #endif |
| | 645 | |
| | 646 | sc->sc_blinking = 0; |
| | 647 | sc->sc_ledstate = 1; |
| | 648 | sc->sc_ledon = 0; /* low true */ |
| | 649 | sc->sc_ledidle = msecs_to_jiffies(2700); /* 2.7 sec */ |
| | 650 | sc->sc_dfstesttime = ATH_DFS_TEST_RETURN_PERIOD; |
| | 651 | init_timer(&sc->sc_ledtimer); |
| | 652 | init_timer(&sc->sc_dfswaittimer); |
| | 653 | init_timer(&sc->sc_dfstesttimer); |
| | 654 | sc->sc_ledtimer.data = (unsigned long) sc; |
| | 655 | if (sc->sc_softled) { |
| | 656 | ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); |
| | 657 | ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); |
| | 658 | } |
| | 659 | |
| | 660 | /* NB: ether_setup is done by bus-specific code */ |
| | 661 | dev->open = ath_init; |
| | 662 | dev->stop = ath_stop; |
| | 663 | dev->hard_start_xmit = ath_hardstart; |
| | 664 | dev->tx_timeout = ath_tx_timeout; |
| | 665 | dev->watchdog_timeo = 5 * HZ; /* XXX */ |
| | 666 | dev->set_multicast_list = ath_mode_init; |
| | 667 | dev->do_ioctl = ath_ioctl; |
| | 668 | dev->get_stats = ath_getstats; |
| | 669 | dev->set_mac_address = ath_set_mac_address; |
| | 670 | dev->change_mtu = ath_change_mtu; |
| | 671 | dev->tx_queue_len = ATH_TXBUF - 1; /* 1 for mgmt frame */ |
| | 672 | #ifdef USE_HEADERLEN_RESV |
| | 673 | dev->hard_header_len += sizeof(struct ieee80211_qosframe) + |
| | 674 | sizeof(struct llc) + |
| | 675 | IEEE80211_ADDR_LEN + |
| | 676 | IEEE80211_WEP_IVLEN + |
| | 677 | IEEE80211_WEP_KIDLEN; |
| | 678 | #ifdef ATH_SUPERG_FF |
| | 679 | dev->hard_header_len += ATH_FF_MAX_HDR; |
| | 680 | #endif |
| | 681 | #endif |
| | 682 | ic->ic_dev = dev; |
| | 683 | ic->ic_mgtstart = ath_mgtstart; |
| | 684 | ic->ic_init = ath_init; |
| | 685 | ic->ic_reset = ath_reset; |
| | 686 | ic->ic_newassoc = ath_newassoc; |
| | 687 | ic->ic_updateslot = ath_updateslot; |
| | 688 | |
| | 689 | ic->ic_wme.wme_update = ath_wme_update; |
| | 690 | ic->ic_uapsd_flush = ath_uapsd_flush; |
| | 691 | |
| | 692 | /* XXX not right but it's not used anywhere important */ |
| | 693 | ic->ic_phytype = IEEE80211_T_OFDM; |
| | 694 | ic->ic_opmode = IEEE80211_M_STA; |
| | 695 | sc->sc_opmode = HAL_M_STA; |
| | 696 | /* |
| | 697 | * Set the Atheros Advanced Capabilities from station config before |
| | 698 | * starting 802.11 state machine. Currently, set only fast-frames |
| | 699 | * capability. |
| | 700 | */ |
| | 701 | ic->ic_ath_cap = 0; |
| | 702 | sc->sc_fftxqmin = ATH_FF_TXQMIN; |
| | 703 | #ifdef ATH_SUPERG_FF |
| | 704 | ic->ic_ath_cap |= (ath_hal_fastframesupported(ah) ? IEEE80211_ATHC_FF : 0); |
| | 705 | #endif |
| | 706 | ic->ic_ath_cap |= (ath_hal_burstsupported(ah) ? IEEE80211_ATHC_BURST : 0); |
| | 707 | |
| | 708 | #ifdef ATH_SUPERG_COMP |
| | 709 | ic->ic_ath_cap |= (ath_hal_compressionsupported(ah) ? IEEE80211_ATHC_COMP : 0); |
| | 710 | #endif |
| | 711 | |
| | 712 | #ifdef ATH_SUPERG_DYNTURBO |
| | 713 | ic->ic_ath_cap |= (ath_hal_turboagsupported(ah) ? (IEEE80211_ATHC_TURBOP | |
| | 714 | IEEE80211_ATHC_AR) : 0); |
| | 715 | #endif |
| | 716 | #ifdef ATH_SUPERG_XR |
| | 717 | ic->ic_ath_cap |= (ath_hal_xrsupported(ah) ? IEEE80211_ATHC_XR : 0); |
| | 718 | #endif |
| | 719 | |
| | 720 | ic->ic_caps |= |
| | 721 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ |
| | 722 | | IEEE80211_C_HOSTAP /* hostap mode */ |
| | 723 | | IEEE80211_C_MONITOR /* monitor mode */ |
| | 724 | | IEEE80211_C_AHDEMO /* adhoc demo mode */ |
| | 725 | | IEEE80211_C_SHPREAMBLE /* short preamble supported */ |
| | 726 | | IEEE80211_C_SHSLOT /* short slot time supported */ |
| | 727 | | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ |
| | 728 | | IEEE80211_C_BGSCAN /* capable of bg scanning */ |
| | 729 | ; |
| | 730 | /* |
| | 731 | * Query the HAL to figure out h/w crypto support. |
| | 732 | */ |
| | 733 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) |
| | 734 | ic->ic_caps |= IEEE80211_C_WEP; |
| | 735 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) |
| | 736 | ic->ic_caps |= IEEE80211_C_AES; |
| | 737 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) |
| | 738 | ic->ic_caps |= IEEE80211_C_AES_CCM; |
| | 739 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) |
| | 740 | ic->ic_caps |= IEEE80211_C_CKIP; |
| | 741 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { |
| | 742 | ic->ic_caps |= IEEE80211_C_TKIP; |
| | 743 | /* |
| | 744 | * Check if h/w does the MIC and/or whether the |
| | 745 | * separate key cache entries are required to |
| | 746 | * handle both tx+rx MIC keys. |
| | 747 | */ |
| | 748 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) { |
| | 749 | ic->ic_caps |= IEEE80211_C_TKIPMIC; |
| | 750 | /* |
| | 751 | * Check if h/w does MIC correctly when |
| | 752 | * WMM is turned on. |
| | 753 | */ |
| | 754 | if (ath_hal_wmetkipmic(ah)) |
| | 755 | ic->ic_caps |= IEEE80211_C_WME_TKIPMIC; |
| | 756 | } |
| | 757 | |
| | 758 | /* |
| | 759 | * If the h/w supports storing tx+rx MIC keys |
| | 760 | * in one cache slot automatically enable use. |
| | 761 | */ |
| | 762 | if (ath_hal_hastkipsplit(ah) || |
| | 763 | !ath_hal_settkipsplit(ah, AH_FALSE)) |
| | 764 | sc->sc_splitmic = 1; |
| | 765 | } |
| | 766 | sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); |
| | 767 | #if 0 |
| | 768 | sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); |
| | 769 | #endif |
| | 770 | /* |
| | 771 | * Mark key cache slots associated with global keys |
| | 772 | * as in use. If we knew TKIP was not to be used we |
| | 773 | * could leave the +32, +64, and +32+64 slots free. |
| | 774 | */ |
| | 775 | for (i = 0; i < IEEE80211_WEP_NKID; i++) { |
| | 776 | setbit(sc->sc_keymap, i); |
| | 777 | setbit(sc->sc_keymap, i+64); |
| | 778 | if (sc->sc_splitmic) { |
| | 779 | setbit(sc->sc_keymap, i+32); |
| | 780 | setbit(sc->sc_keymap, i+32+64); |
| | 781 | } |
| | 782 | } |
| | 783 | /* |
| | 784 | * TPC support can be done either with a global cap or |
| | 785 | * per-packet support. The latter is not available on |
| | 786 | * all parts. We're a bit pedantic here as all parts |
| | 787 | * support a global cap. |
| | 788 | */ |
| | 789 | sc->sc_hastpc = ath_hal_hastpc(ah); |
| | 790 | if (sc->sc_hastpc || ath_hal_hastxpowlimit(ah)) |
| | 791 | ic->ic_caps |= IEEE80211_C_TXPMGT; |
| | 792 | |
| | 793 | /* |
| | 794 | * Default 11.h to start enabled. |
| | 795 | */ |
| | 796 | ic->ic_flags |= IEEE80211_F_DOTH; |
| | 797 | |
| | 798 | /* |
| | 799 | * Check for misc other capabilities. |
| | 800 | */ |
| | 801 | if (ath_hal_hasbursting(ah)) |
| | 802 | ic->ic_caps |= IEEE80211_C_BURST; |
| | 803 | sc->sc_hasbmask = ath_hal_hasbssidmask(ah); |
| | 804 | sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); |
| | 805 | /* |
| | 806 | * Indicate we need the 802.11 header padded to a |
| | 807 | * 32-bit boundary for 4-address and QoS frames. |
| | 808 | */ |
| | 809 | ic->ic_flags |= IEEE80211_F_DATAPAD; |
| | 810 | |
| | 811 | /* |
| | 812 | * Query the HAL about antenna support |
| | 813 | * Enable rx fast diversity if HAL has support |
| | 814 | */ |
| | 815 | if (ath_hal_hasdiversity(ah)) { |
| | 816 | sc->sc_hasdiversity = 1; |
| | 817 | ath_hal_setdiversity(ah, AH_TRUE); |
| | 818 | sc->sc_diversity = 1; |
| | 819 | } else { |
| | 820 | sc->sc_hasdiversity = 0; |
| | 821 | sc->sc_diversity = 0; |
| | 822 | ath_hal_setdiversity(ah, AH_FALSE); |
| | 823 | } |
| | 824 | sc->sc_defant = ath_hal_getdefantenna(ah); |
| | 825 | |
| | 826 | /* |
| | 827 | * Not all chips have the VEOL support we want to |
| | 828 | * use with IBSS beacons; check here for it. |
| | 829 | */ |
| | 830 | sc->sc_hasveol = ath_hal_hasveol(ah); |
| | 831 | |
| | 832 | /* get mac address from hardware */ |
| | 833 | ath_hal_getmac(ah, ic->ic_myaddr); |
| | 834 | if (sc->sc_hasbmask) { |
| | 835 | ath_hal_getbssidmask(ah, sc->sc_bssidmask); |
| | 836 | ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask); |
| | 837 | ath_hal_setbssidmask(ah, sc->sc_bssidmask); |
| | 838 | } |
| | 839 | IEEE80211_ADDR_COPY(dev->dev_addr, ic->ic_myaddr); |
| | 840 | |
| | 841 | /* call MI attach routine. */ |
| | 842 | ieee80211_ifattach(ic); |
| | 843 | /* override default methods */ |
| | 844 | ic->ic_node_alloc = ath_node_alloc; |
| | 845 | sc->sc_node_free = ic->ic_node_free; |
| | 846 | ic->ic_node_free = ath_node_free; |
| | 847 | ic->ic_node_getrssi = ath_node_getrssi; |
| | 848 | #ifdef ATH_SUPERG_XR |
| | 849 | ic->ic_node_move_data = ath_node_move_data; |
| | 850 | #endif |
| | 851 | sc->sc_node_cleanup = ic->ic_node_cleanup; |
| | 852 | ic->ic_node_cleanup = ath_node_cleanup; |
| | 853 | sc->sc_recv_mgmt = ic->ic_recv_mgmt; |
| | 854 | ic->ic_recv_mgmt = ath_recv_mgmt; |
| | 855 | |
| | 856 | ic->ic_vap_create = ath_vap_create; |
| | 857 | ic->ic_vap_delete = ath_vap_delete; |
| | 858 | |
| | 859 | ic->ic_scan_start = ath_scan_start; |
| | 860 | ic->ic_scan_end = ath_scan_end; |
| | 861 | ic->ic_set_channel = ath_set_channel; |
| | 862 | |
| | 863 | ic->ic_set_coverageclass = ath_set_coverageclass; |
| | 864 | ic->ic_mhz2ieee = ath_mhz2ieee; |
| | 865 | |
| | 866 | if (register_netdev(dev)) { |
| | 867 | printk(KERN_ERR "%s: unable to register device\n", dev->name); |
| | 868 | goto bad3; |
| | 869 | } |
| | 870 | /* |
| | 871 | * Attach dynamic MIB vars and announce support |
| | 872 | * now that we have a device name with unit number. |
| | 873 | */ |
| | 874 | ath_dynamic_sysctl_register(sc); |
| | 875 | ieee80211_announce(ic); |
| | 876 | ath_announce(dev); |
| | 877 | #ifdef ATH_TX99_DIAG |
| | 878 | printk("%s: TX99 support enabled\n", dev->name); |
| | 879 | #endif |
| | 880 | sc->sc_invalid = 0; |
| | 881 | |
| | 882 | if (autocreate) { |
| | 883 | if (!strcmp(autocreate, "none")) |
| | 884 | autocreatemode = -1; |
| | 885 | else if (!strcmp(autocreate, "sta")) |
| | 886 | autocreatemode = IEEE80211_M_STA; |
| | 887 | else if (!strcmp(autocreate, "ap")) |
| | 888 | autocreatemode = IEEE80211_M_HOSTAP; |
| | 889 | else if (!strcmp(autocreate, "adhoc")) |
| | 890 | autocreatemode = IEEE80211_M_IBSS; |
| | 891 | else if (!strcmp(autocreate, "ahdemo")) |
| | 892 | autocreatemode = IEEE80211_M_AHDEMO; |
| | 893 | else if (!strcmp(autocreate, "wds")) |
| | 894 | autocreatemode = IEEE80211_M_WDS; |
| | 895 | else if (!strcmp(autocreate, "monitor")) |
| | 896 | autocreatemode = IEEE80211_M_MONITOR; |
| | 897 | else { |
| | 898 | printk(KERN_INFO "Unknown autocreate mode: %s\n", |
| | 899 | autocreate); |
| | 900 | autocreatemode = -1; |
| | 901 | } |
| | 902 | } |
| | 903 | |
| | 904 | if (autocreatemode != -1) { |
| | 905 | rtnl_lock(); |
| | 906 | error = ieee80211_create_vap(ic, "ath%d", dev, |
| | 907 | autocreatemode, IEEE80211_CLONE_BSSID); |
| | 908 | rtnl_unlock(); |
| | 909 | if (error) |
| | 910 | printk(KERN_ERR "%s: autocreation of VAP failed: %d\n", |
| | 911 | dev->name, error); |
| | 912 | } |
| | 913 | |
| | 914 | return 0; |
| | 915 | bad3: |
| | 916 | ieee80211_ifdetach(ic); |
| | 917 | ieee80211_rate_detach(sc->sc_rc); |
| | 918 | bad2: |
| | 919 | ath_tx_cleanup(sc); |
| | 920 | ath_desc_free(sc); |
| | 921 | bad: |
| | 922 | if (ah) |
| | 923 | ath_hal_detach(ah); |
| | 924 | ATH_TXBUF_LOCK_DESTROY(sc); |
| | 925 | ATH_LOCK_DESTROY(sc); |
| | 926 | sc->sc_invalid = 1; |
| | 927 | |
| | 928 | return error; |
| | 929 | } |
| | 930 | |
| | 931 | int |
| | 932 | ath_detach(struct net_device *dev) |
| | 933 | { |
| | 934 | struct ath_softc *sc = dev->priv; |
| | 935 | struct ath_hal *ah = sc->sc_ah; |
| | 936 | |
| | 937 | HAL_INT tmp; |
| | 938 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: flags %x\n", __func__, dev->flags); |
| | 939 | ath_stop(dev); |
| | 940 | |
| | 941 | ath_hal_setpower(sc->sc_ah, HAL_PM_AWAKE); |
| | 942 | /* Flush the radar task if it's scheduled */ |
| | 943 | if (sc->sc_rtasksched == 1) |
| | 944 | flush_scheduled_work(); |
| | 945 | |
| | 946 | sc->sc_invalid = 1; |
| | 947 | |
| | 948 | /* |
| | 949 | * NB: the order of these is important: |
| | 950 | * o call the 802.11 layer before detaching the HAL to |
| | 951 | * ensure callbacks into the driver to delete global |
| | 952 | * key cache entries can be handled |
| | 953 | * o reclaim the tx queue data structures after calling |
| | 954 | * the 802.11 layer as we'll get called back to reclaim |
| | 955 | * node state and potentially want to use them |
| | 956 | * o to cleanup the tx queues the HAL is called, so detach |
| | 957 | * it last |
| | 958 | * Other than that, it's straightforward... |
| | 959 | */ |
| | 960 | ieee80211_ifdetach(&sc->sc_ic); |
| | 961 | |
| | 962 | ath_hal_intrset(ah, 0); /* disable further intr's */ |
| | 963 | ath_hal_getisr(ah, &tmp); /* clear ISR */ |
| | 964 | if(dev->irq) { |
| | 965 | free_irq(dev->irq, dev); |
| | 966 | dev->irq = 0; |
| | 967 | } |
| | 968 | #ifdef ATH_TX99_DIAG |
| | 969 | if (sc->sc_tx99 != NULL) |
| | 970 | sc->sc_tx99->detach(sc->sc_tx99); |
| | 971 | #endif |
| | 972 | ieee80211_rate_detach(sc->sc_rc); |
| | 973 | ath_desc_free(sc); |
| | 974 | ath_tx_cleanup(sc); |
| | 975 | ath_hal_detach(ah); |
| | 976 | |
| | 977 | ath_dynamic_sysctl_unregister(sc); |
| | 978 | ATH_LOCK_DESTROY(sc); |
| | 979 | dev->stop = NULL; /* prevent calling ath_stop again */ |
| | 980 | unregister_netdev(dev); |
| | 981 | return 0; |
| | 982 | } |
| | 983 | |
| | 984 | static struct ieee80211vap * |
| | 985 | ath_vap_create(struct ieee80211com *ic, const char *name, int unit, |
| | 986 | int opmode, int flags, struct net_device *mdev) |
| | 987 | { |
| | 988 | struct ath_softc *sc = ic->ic_dev->priv; |
| | 989 | struct ath_hal *ah = sc->sc_ah; |
| | 990 | struct net_device *dev; |
| | 991 | struct ath_vap *avp; |
| | 992 | struct ieee80211vap *vap; |
| | 993 | int ic_opmode; |
| | 994 | |
| | 995 | if (ic->ic_dev->flags & IFF_RUNNING) { |
| | 996 | /* needs to disable hardware too */ |
| | 997 | ath_hal_intrset(ah, 0); /* disable interrupts */ |
| | 998 | ath_draintxq(sc); /* stop xmit side */ |
| | 999 | ath_stoprecv(sc); /* stop recv side */ |
| | 1000 | } |
| | 1001 | /* XXX ic unlocked and race against add */ |
| | 1002 | switch (opmode) { |
| | 1003 | case IEEE80211_M_STA: /* ap+sta for repeater application */ |
| | 1004 | if (sc->sc_nstavaps != 0) /* only one sta regardless */ |
| | 1005 | return NULL; |
| | 1006 | if ((sc->sc_nvaps != 0) && (!(flags & IEEE80211_NO_STABEACONS))) |
| | 1007 | return NULL; /* If using station beacons, must first up */ |
| | 1008 | if (flags & IEEE80211_NO_STABEACONS) { |
| | 1009 | sc->sc_nostabeacons = 1; |
| | 1010 | ic_opmode = IEEE80211_M_HOSTAP; /* Run with chip in AP mode */ |
| | 1011 | } else |
| | 1012 | ic_opmode = opmode; |
| | 1013 | break; |
| | 1014 | case IEEE80211_M_IBSS: |
| | 1015 | if (sc->sc_nvaps != 0) /* only one */ |
| | 1016 | return NULL; |
| | 1017 | ic_opmode = opmode; |
| | 1018 | break; |
| | 1019 | case IEEE80211_M_AHDEMO: |
| | 1020 | case IEEE80211_M_MONITOR: |
| | 1021 | if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { |
| | 1022 | /* preserve existing mode */ |
| | 1023 | ic_opmode = ic->ic_opmode; |
| | 1024 | } else |
| | 1025 | ic_opmode = opmode; |
| | 1026 | break; |
| | 1027 | case IEEE80211_M_HOSTAP: |
| | 1028 | case IEEE80211_M_WDS: |
| | 1029 | /* permit multiple ap's and/or wds links */ |
| | 1030 | /* XXX sta+ap for repeater/bridge application */ |
| | 1031 | if ((sc->sc_nvaps != 0) && (ic->ic_opmode == IEEE80211_M_STA)) |
| | 1032 | return NULL; |
| | 1033 | /* XXX not right, beacon buffer is allocated on RUN trans */ |
| | 1034 | if (opmode == IEEE80211_M_HOSTAP && STAILQ_EMPTY(&sc->sc_bbuf)) |
| | 1035 | return NULL; |
| | 1036 | /* |
| | 1037 | * XXX Not sure if this is correct when operating only |
| | 1038 | * with WDS links. |
| | 1039 | */ |
| | 1040 | ic_opmode = IEEE80211_M_HOSTAP; |
| | 1041 | |
| | 1042 | break; |
| | 1043 | default: |
| | 1044 | return NULL; |
| | 1045 | } |
| | 1046 | |
| | 1047 | if (sc->sc_nvaps >= ATH_BCBUF) { |
| | 1048 | printk(KERN_WARNING "too many virtual ap's (already got %d)\n", sc->sc_nvaps); |
| | 1049 | return NULL; |
| | 1050 | } |
| | 1051 | |
| | 1052 | dev = alloc_etherdev(sizeof(struct ath_vap) + sc->sc_rc->arc_vap_space); |
| | 1053 | if (dev == NULL) { |
| | 1054 | /* XXX msg */ |
| | 1055 | return NULL; |
| | 1056 | } |
| | 1057 | |
| | 1058 | avp = dev->priv; |
| | 1059 | ieee80211_vap_setup(ic, dev, name, unit, opmode, flags); |
| | 1060 | /* override with driver methods */ |
| | 1061 | vap = &avp->av_vap; |
| | 1062 | avp->av_newstate = vap->iv_newstate; |
| | 1063 | vap->iv_newstate = ath_newstate; |
| | 1064 | vap->iv_key_alloc = ath_key_alloc; |
| | 1065 | vap->iv_key_delete = ath_key_delete; |
| | 1066 | vap->iv_key_set = ath_key_set; |
| | 1067 | vap->iv_key_update_begin = ath_key_update_begin; |
| | 1068 | vap->iv_key_update_end = ath_key_update_end; |
| | 1069 | #ifdef ATH_SUPERG_COMP |
| | 1070 | vap->iv_comp_set = ath_comp_set; |
| | 1071 | #endif |
| | 1072 | |
| | 1073 | /* Let rate control register proc entries for the VAP */ |
| | 1074 | if (sc->sc_rc->ops->dynamic_proc_register) |
| | 1075 | sc->sc_rc->ops->dynamic_proc_register(vap); |
| | 1076 | |
| | 1077 | /* |
| | 1078 | * Change the interface type for monitor mode. |
| | 1079 | */ |
| | 1080 | if (opmode == IEEE80211_M_MONITOR) |
| | 1081 | dev->type = ARPHRD_IEEE80211_PRISM; |
| | 1082 | if ((flags & IEEE80211_CLONE_BSSID) && |
| | 1083 | sc->sc_nvaps != 0 && opmode != IEEE80211_M_WDS && sc->sc_hasbmask) { |
| | 1084 | struct ieee80211vap *v; |
| | 1085 | int id_mask, id; |
| | 1086 | |
| | 1087 | /* |
| | 1088 | * Hardware supports the bssid mask and a unique |
| | 1089 | * bssid was requested. Assign a new mac address |
| | 1090 | * and expand our bssid mask to cover the active |
| | 1091 | * virtual ap's with distinct addresses. |
| | 1092 | */ |
| | 1093 | |
| | 1094 | /* do a full search to mark all the allocated VAPs */ |
| | 1095 | id_mask = 0; |
| | 1096 | TAILQ_FOREACH(v, &ic->ic_vaps, iv_next) |
| | 1097 | id_mask |= (1 << ATH_GET_VAP_ID(v->iv_myaddr)); |
| | 1098 | |
| | 1099 | for (id = 0; id < ATH_BCBUF; id++) { |
| | 1100 | /* get the first available slot */ |
| | 1101 | if ((id_mask & (1 << id)) == 0) { |
| | 1102 | ATH_SET_VAP_BSSID(vap->iv_myaddr, id); |
| | 1103 | break; |
| | 1104 | } |
| | 1105 | } |
| | 1106 | } |
| | 1107 | avp->av_bslot = -1; |
| | 1108 | STAILQ_INIT(&avp->av_mcastq.axq_q); |
| | 1109 | ATH_TXQ_LOCK_INIT(&avp->av_mcastq); |
| | 1110 | if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_IBSS) { |
| | 1111 | /* |
| | 1112 | * Allocate beacon state for hostap/ibss. We know |
| | 1113 | * a buffer is available because of the check above. |
| | 1114 | */ |
| | 1115 | avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf); |
| | 1116 | STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list); |
| | 1117 | if (opmode == IEEE80211_M_HOSTAP || !sc->sc_hasveol) { |
| | 1118 | int slot; |
| | 1119 | /* |
| | 1120 | * Assign the VAP to a beacon xmit slot. As |
| | 1121 | * above, this cannot fail to find one. |
| | 1122 | */ |
| | 1123 | avp->av_bslot = 0; |
| | 1124 | for (slot = 0; slot < ATH_BCBUF; slot++) |
| | 1125 | if (sc->sc_bslot[slot] == NULL) { |
| | 1126 | /* |
| | 1127 | * XXX hack, space out slots to better |
| | 1128 | * deal with misses |
| | 1129 | */ |
| | 1130 | if (slot + 1 < ATH_BCBUF && |
| | 1131 | sc->sc_bslot[slot+1] == NULL) { |
| | 1132 | avp->av_bslot = slot + 1; |
| | 1133 | break; |
| | 1134 | } |
| | 1135 | avp->av_bslot = slot; |
| | 1136 | /* NB: keep looking for a double slot */ |
| | 1137 | } |
| | 1138 | KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, |
| | 1139 | ("beacon slot %u not empty?", avp->av_bslot)); |
| | 1140 | sc->sc_bslot[avp->av_bslot] = vap; |
| | 1141 | sc->sc_nbcnvaps++; |
| | 1142 | } |
| | 1143 | if ((opmode == IEEE80211_M_HOSTAP) && (sc->sc_hastsfadd)) { |
| | 1144 | /* |
| | 1145 | * Multiple VAPs are to transmit beacons and we |
| | 1146 | * have h/w support for TSF adjusting; enable use |
| | 1147 | * of staggered beacons. |
| | 1148 | */ |
| | 1149 | /* XXX check for beacon interval too small */ |
| | 1150 | sc->sc_stagbeacons = 1; |
| | 1151 | } |
| | 1152 | } |
| | 1153 | if (sc->sc_hastsfadd) |
| | 1154 | ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); |
| | 1155 | SET_NETDEV_DEV(dev, ATH_GET_NETDEV_DEV(mdev)); |
| | 1156 | /* complete setup */ |
| | 1157 | (void) ieee80211_vap_attach(vap, |
| | 1158 | ieee80211_media_change, ieee80211_media_status); |
| | 1159 | |
| | 1160 | ic->ic_opmode = ic_opmode; |
| | 1161 | |
| | 1162 | if (opmode != IEEE80211_M_WDS) |
| | 1163 | sc->sc_nvaps++; |
| | 1164 | |
| | 1165 | if (opmode == IEEE80211_M_STA) |
| | 1166 | sc->sc_nstavaps++; |
| | 1167 | else if (opmode == IEEE80211_M_MONITOR) |
| | 1168 | sc->sc_nmonvaps++; |
| | 1169 | /* |
| | 1170 | * Adhoc demo mode is a pseudo mode; to the HAL it's |
| | 1171 | * just ibss mode and the driver doesn't use management |
| | 1172 | * frames. Other modes carry over directly to the HAL. |
| | 1173 | */ |
| | 1174 | if (ic->ic_opmode == IEEE80211_M_AHDEMO) |
| | 1175 | sc->sc_opmode = HAL_M_IBSS; |
| | 1176 | else |
| | 1177 | sc->sc_opmode = (HAL_OPMODE) ic->ic_opmode; /* NB: compatible */ |
| | 1178 | |
| | 1179 | #ifdef ATH_SUPERG_XR |
| | 1180 | if ( vap->iv_flags & IEEE80211_F_XR ) { |
| | 1181 | if (ath_descdma_setup(sc, &sc->sc_grppolldma, &sc->sc_grppollbuf, |
| | 1182 | "grppoll", (sc->sc_xrpollcount+1) * HAL_ANTENNA_MAX_MODE, 1) != 0) |
| | 1183 | printk("%s:grppoll Buf allocation failed \n",__func__); |
| | 1184 | if (!sc->sc_xrtxq) |
| | 1185 | sc->sc_xrtxq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, HAL_XR_DATA); |
| | 1186 | if (sc->sc_hasdiversity) { |
| | 1187 | /* Save current diversity state if user destroys XR VAP */ |
| | 1188 | sc->sc_olddiversity = sc->sc_diversity; |
| | 1189 | ath_hal_setdiversity(sc->sc_ah, 0); |
| | 1190 | sc->sc_diversity = 0; |
| | 1191 | } |
| | 1192 | } |
| | 1193 | #endif |
| | 1194 | if (ic->ic_dev->flags & IFF_RUNNING) { |
| | 1195 | /* restart hardware */ |
| | 1196 | if (ath_startrecv(sc) != 0) /* restart recv */ |
| | 1197 | printk("%s: %s: unable to start recv logic\n", |
| | 1198 | dev->name, __func__); |
| | 1199 | if (sc->sc_beacons) |
| | 1200 | ath_beacon_config(sc, NULL); /* restart beacons */ |
| | 1201 | ath_hal_intrset(ah, sc->sc_imask); |
| | 1202 | } |
| | 1203 | |
| | 1204 | return vap; |
| | 1205 | } |
| | 1206 | |
| | 1207 | static void |
| | 1208 | ath_vap_delete(struct ieee80211vap *vap) |
| | 1209 | { |
| | 1210 | struct net_device *dev = vap->iv_ic->ic_dev; |
| | 1211 | struct ath_softc *sc = dev->priv; |
| | 1212 | struct ath_hal *ah = sc->sc_ah; |
| | 1213 | struct ath_vap *avp = ATH_VAP(vap); |
| | 1214 | int decrease = 1; |
| | 1215 | int i; |
| | 1216 | KASSERT(vap->iv_state == IEEE80211_S_INIT, ("VAP not stopped")); |
| | 1217 | |
| | 1218 | if (dev->flags & IFF_RUNNING) { |
| | 1219 | /* |
| | 1220 | * Quiesce the hardware while we remove the VAP. In |
| | 1221 | * particular we need to reclaim all references to the |
| | 1222 | * VAP state by any frames pending on the tx queues. |
| | 1223 | * |
| | 1224 | * XXX can we do this w/o affecting other VAPs? |
| | 1225 | */ |
| | 1226 | ath_hal_intrset(ah, 0); /* disable interrupts */ |
| | 1227 | ath_draintxq(sc); /* stop xmit side */ |
| | 1228 | ath_stoprecv(sc); /* stop recv side */ |
| | 1229 | } |
| | 1230 | |
| | 1231 | /* |
| | 1232 | * Reclaim any pending mcast bufs on the VAP. |
| | 1233 | */ |
| | 1234 | ath_tx_draintxq(sc, &avp->av_mcastq); |
| | 1235 | ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); |
| | 1236 | |
| | 1237 | /* |
| | 1238 | * Reclaim beacon state. Note this must be done before |
| | 1239 | * VAP instance is reclaimed as we may have a reference |
| | 1240 | * to it in the buffer for the beacon frame. |
| | 1241 | */ |
| | 1242 | if (avp->av_bcbuf != NULL) { |
| | 1243 | if (avp->av_bslot != -1) { |
| | 1244 | sc->sc_bslot[avp->av_bslot] = NULL; |
| | 1245 | sc->sc_nbcnvaps--; |
| | 1246 | } |
| | 1247 | ath_beacon_return(sc, avp->av_bcbuf); |
| | 1248 | avp->av_bcbuf = NULL; |
| | 1249 | if (sc->sc_nbcnvaps == 0) |
| | 1250 | sc->sc_stagbeacons = 0; |
| | 1251 | } |
| | 1252 | if (vap->iv_opmode == IEEE80211_M_STA) { |
| | 1253 | sc->sc_nstavaps--; |
| | 1254 | if (sc->sc_nostabeacons) |
| | 1255 | sc->sc_nostabeacons = 0; |
| | 1256 | } else if (vap->iv_opmode == IEEE80211_M_MONITOR) { |
| | 1257 | sc->sc_nmonvaps--; |
| | 1258 | } else if (vap->iv_opmode == IEEE80211_M_WDS) { |
| | 1259 | decrease = 0; |
| | 1260 | } |
| | 1261 | ieee80211_vap_detach(vap); |
| | 1262 | /* NB: memory is reclaimed through dev->destructor callback */ |
| | 1263 | if (decrease) |
| | 1264 | sc->sc_nvaps--; |
| | 1265 | |
| | 1266 | #ifdef ATH_SUPERG_XR |
| | 1267 | /* |
| | 1268 | * If it's an XR VAP, free the memory allocated explicitly. |
| | 1269 | * Since the XR VAP is not registered, OS cannot free the memory. |
| | 1270 | */ |
| | 1271 | if (vap->iv_flags & IEEE80211_F_XR) { |
| | 1272 | ath_grppoll_stop(vap); |
| | 1273 | ath_descdma_cleanup(sc, &sc->sc_grppolldma, &sc->sc_grppollbuf, BUS_DMA_FROMDEVICE); |
| | 1274 | memset(&sc->sc_grppollbuf, 0, sizeof(sc->sc_grppollbuf)); |
| | 1275 | memset(&sc->sc_grppolldma, 0, sizeof(sc->sc_grppolldma)); |
| | 1276 | if (vap->iv_xrvap) |
| | 1277 | vap->iv_xrvap->iv_xrvap = NULL; |
| | 1278 | kfree(vap->iv_dev); |
| | 1279 | ath_tx_cleanupq(sc,sc->sc_xrtxq); |
| | 1280 | sc->sc_xrtxq = NULL; |
| | 1281 | if (sc->sc_hasdiversity) { |
| | 1282 | /* Restore diversity setting to old diversity setting */ |
| | 1283 | ath_hal_setdiversity(ah, sc->sc_olddiversity); |
| | 1284 | sc->sc_diversity = sc->sc_olddiversity; |
| | 1285 | } |
| | 1286 | } |
| | 1287 | #endif |
| | 1288 | |
| | 1289 | for (i = 0; i < IEEE80211_APPIE_NUM_OF_FRAME; i++) { |
| | 1290 | if (vap->app_ie[i].ie != NULL) { |
| | 1291 | FREE(vap->app_ie[i].ie, M_DEVBUF); |
| | 1292 | vap->app_ie[i].ie = NULL; |
| | 1293 | vap->app_ie[i].length = 0; |
| | 1294 | } |
| | 1295 | } |
| | 1296 | |
| | 1297 | if (dev->flags & IFF_RUNNING) { |
| | 1298 | /* |
| | 1299 | * Restart rx+tx machines if device is still running. |
| | 1300 | */ |
| | 1301 | if (ath_startrecv(sc) != 0) /* restart recv */ |
| | 1302 | printk("%s: %s: unable to start recv logic\n", |
| | 1303 | dev->name, __func__); |
| | 1304 | if (sc->sc_beacons) |
| | 1305 | ath_beacon_config(sc, NULL); /* restart beacons */ |
| | 1306 | ath_hal_intrset(ah, sc->sc_imask); |
| | 1307 | } |
| | 1308 | } |
| | 1309 | |
| | 1310 | void |
| | 1311 | ath_suspend(struct net_device *dev) |
| | 1312 | { |
| | 1313 | struct ath_softc *sc = dev->priv; |
| | 1314 | |
| | 1315 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: flags %x\n", __func__, dev->flags); |
| | 1316 | ath_stop(dev); |
| | 1317 | } |
| | 1318 | |
| | 1319 | void |
| | 1320 | ath_resume(struct net_device *dev) |
| | 1321 | { |
| | 1322 | struct ath_softc *sc = dev->priv; |
| | 1323 | |
| | 1324 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: flags %x\n", __func__, dev->flags); |
| | 1325 | ath_init(dev); |
| | 1326 | } |
| | 1327 | |
| | 1328 | static void |
| | 1329 | ath_uapsd_processtriggers(struct ath_softc *sc) |
| | 1330 | { |
| | 1331 | struct ath_hal *ah = sc->sc_ah; |
| | 1332 | struct ath_buf *bf; |
| | 1333 | struct ath_desc *ds; |
| | 1334 | struct sk_buff *skb; |
| | 1335 | struct ieee80211_node *ni; |
| | 1336 | struct ath_node *an; |
| | 1337 | struct ieee80211_qosframe *qwh; |
| | 1338 | struct ath_txq *uapsd_xmit_q = sc->sc_uapsdq; |
| | 1339 | struct ieee80211com *ic = &sc->sc_ic; |
| | 1340 | int ac, retval; |
| | 1341 | u_int8_t tid; |
| | 1342 | u_int16_t frame_seq; |
| | 1343 | u_int64_t tsf; |
| | 1344 | #define PA2DESC(_sc, _pa) \ |
| | 1345 | ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ |
| | 1346 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) |
| | 1347 | |
| | 1348 | /* XXXAPSD: build in check against max triggers we could see |
| | 1349 | * based on ic->ic_uapsdmaxtriggers. |
| | 1350 | */ |
| | 1351 | |
| | 1352 | tsf = ath_hal_gettsf64(ah); |
| | 1353 | ATH_RXBUF_LOCK(sc); |
| | 1354 | if (sc->sc_rxbufcur == NULL) |
| | 1355 | sc->sc_rxbufcur = STAILQ_FIRST(&sc->sc_rxbuf); |
| | 1356 | for (bf = sc->sc_rxbufcur; bf; bf = STAILQ_NEXT(bf, bf_list)) { |
| | 1357 | ds = bf->bf_desc; |
| | 1358 | if (ds->ds_link == bf->bf_daddr) { |
| | 1359 | /* NB: never process the self-linked entry at the end */ |
| | 1360 | break; |
| | 1361 | } |
| | 1362 | if (bf->bf_status & ATH_BUFSTATUS_DONE) { |
| | 1363 | /* |
| | 1364 | * already processed this buffer (shouldn't occur if |
| | 1365 | * we change code to always process descriptors in |
| | 1366 | * rx intr handler - as opposed to sometimes processing |
| | 1367 | * in the rx tasklet). |
| | 1368 | */ |
| | 1369 | continue; |
| | 1370 | } |
| | 1371 | skb = bf->bf_skb; |
| | 1372 | if (skb == NULL) { /* XXX ??? can this happen */ |
| | 1373 | printk("%s: no skbuff\n", __func__); |
| | 1374 | continue; |
| | 1375 | } |
| | 1376 | |
| | 1377 | /* |
| | 1378 | * XXXAPSD: consider new HAL call that does only the subset |
| | 1379 | * of ath_hal_rxprocdesc we require for trigger search. |
| | 1380 | */ |
| | 1381 | |
| | 1382 | /* |
| | 1383 | * NB: descriptor memory doesn't need to be sync'd |
| | 1384 | * due to the way it was allocated. |
| | 1385 | */ |
| | 1386 | |
| | 1387 | /* |
| | 1388 | * Must provide the virtual address of the current |
| | 1389 | * descriptor, the physical address, and the virtual |
| | 1390 | * address of the next descriptor in the h/w chain. |
| | 1391 | * This allows the HAL to look ahead to see if the |
| | 1392 | * hardware is done with a descriptor by checking the |
| | 1393 | * done bit in the following descriptor and the address |
| | 1394 | * of the current descriptor the DMA engine is working |
| | 1395 | * on. All this is necessary because of our use of |
| | 1396 | * a self-linked list to avoid rx overruns. |
| | 1397 | */ |
| | 1398 | retval = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, PA2DESC(sc, ds->ds_link), tsf); |
| | 1399 | if (HAL_EINPROGRESS == retval) |
| | 1400 | break; |
| | 1401 | |
| | 1402 | /* XXX: we do not support frames spanning multiple descriptors */ |
| | 1403 | bf->bf_status |= ATH_BUFSTATUS_DONE; |
| | 1404 | |
| | 1405 | /* errors? */ |
| | 1406 | if (ds->ds_rxstat.rs_status) |
| | 1407 | continue; |
| | 1408 | |
| | 1409 | /* prepare wireless header for examination */ |
| | 1410 | bus_dma_sync_single(sc->sc_bdev, bf->bf_skbaddr, |
| | 1411 | sizeof(struct ieee80211_qosframe), |
| | 1412 | BUS_DMA_FROMDEVICE); |
| | 1413 | qwh = (struct ieee80211_qosframe *) skb->data; |
| | 1414 | |
| | 1415 | /* find the node. it MUST be in the keycache. */ |
| | 1416 | if (ds->ds_rxstat.rs_keyix == HAL_RXKEYIX_INVALID || |
| | 1417 | (ni = sc->sc_keyixmap[ds->ds_rxstat.rs_keyix]) == NULL) { |
| | 1418 | /* |
| | 1419 | * XXX: this can occur if WEP mode is used for non-Atheros clients |
| | 1420 | * (since we do not know which of the 4 WEP keys will be used |
| | 1421 | * at association time, so cannot setup a key-cache entry. |
| | 1422 | * The Atheros client can convey this in the Atheros IE.) |
| | 1423 | * |
| | 1424 | * TODO: The fix is to use the hash lookup on the node here. |
| | 1425 | */ |
| | 1426 | #if 0 |
| | 1427 | /* |
| | 1428 | * This print is very chatty, so removing for now. |
| | 1429 | */ |
| | 1430 | DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: U-APSD node (%s) has invalid keycache entry\n", |
| | 1431 | __func__, ether_sprintf(qwh->i_addr2)); |
| | 1432 | #endif |
| | 1433 | continue; |
| | 1434 | } |
| | 1435 | |
| | 1436 | if (!(ni->ni_flags & IEEE80211_NODE_UAPSD)) |
| | 1437 | continue; |
| | 1438 | |
| | 1439 | /* |
| | 1440 | * Must deal with change of state here, since otherwise there would |
| | 1441 | * be a race (on two quick frames from STA) between this code and the |
| | 1442 | * tasklet where we would: |
| | 1443 | * - miss a trigger on entry to PS if we're already trigger hunting |
| | 1444 | * - generate spurious SP on exit (due to frame following exit frame) |
| | 1445 | */ |
| | 1446 | if (((qwh->i_fc[1] & IEEE80211_FC1_PWR_MGT) ^ |
| | 1447 | (ni->ni_flags & IEEE80211_NODE_PWR_MGT))) { |
| | 1448 | /* |
| | 1449 | * NB: do not require lock here since this runs at intr |
| | 1450 | * "proper" time and cannot be interrupted by rx tasklet |
| | 1451 | * (code there has lock). May want to place a macro here |
| | 1452 | * (that does nothing) to make this more clear. |
| | 1453 | */ |
| | 1454 | ni->ni_flags |= IEEE80211_NODE_PS_CHANGED; |
| | 1455 | ni->ni_pschangeseq = *(__le16 *)(&qwh->i_seq[0]); |
| | 1456 | ni->ni_flags &= ~IEEE80211_NODE_UAPSD_SP; |
| | 1457 | ni->ni_flags ^= IEEE80211_NODE_PWR_MGT; |
| | 1458 | if (qwh->i_fc[1] & IEEE80211_FC1_PWR_MGT) { |
| | 1459 | ni->ni_flags |= IEEE80211_NODE_UAPSD_TRIG; |
| | 1460 | ic->ic_uapsdmaxtriggers++; |
| | 1461 | WME_UAPSD_NODE_TRIGSEQINIT(ni); |
| | 1462 | DPRINTF(sc, ATH_DEBUG_UAPSD, |
| | 1463 | "%s: Node (%s) became U-APSD triggerable (%d)\n", |
| | 1464 | __func__, ether_sprintf(qwh->i_addr2), |
| | 1465 | ic->ic_uapsdmaxtriggers); |
| | 1466 | } else { |
| | 1467 | ni->ni_flags &= ~IEEE80211_NODE_UAPSD_TRIG; |
| | 1468 | ic->ic_uapsdmaxtriggers--; |
| | 1469 | DPRINTF(sc, ATH_DEBUG_UAPSD, |
| | 1470 | "%s: Node (%s) no longer U-APSD triggerable (%d)\n", |
| | 1471 | __func__, ether_sprintf(qwh->i_addr2), |
| | 1472 | ic->ic_uapsdmaxtriggers); |
| | 1473 | /* |
| | 1474 | * XXX: rapidly thrashing sta could get |
| | 1475 | * out-of-order frames due this flush placing |
| | 1476 | * frames on backlogged regular AC queue and |
| | 1477 | * re-entry to PS having fresh arrivals onto |
| | 1478 | * faster UPSD delivery queue. if this is a |
| | 1479 | * big problem we may need to drop these. |
| | 1480 | */ |
| | 1481 | ath_uapsd_flush(ni); |
| | 1482 | } |
| | 1483 | |
| | 1484 | continue; |
| | 1485 | } |
| | 1486 | |
| | 1487 | if (ic->ic_uapsdmaxtriggers == 0) |
| | 1488 | continue; |
| | 1489 | |
| | 1490 | /* make sure the frame is QoS data/null */ |
| | 1491 | /* NB: with current sub-type definitions, the |
| | 1492 | * IEEE80211_FC0_SUBTYPE_QOS check, below, covers the |
| | 1493 | * QoS null case too. |
| | 1494 | */ |
| | 1495 | if (((qwh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA) || |
| | 1496 | !(qwh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)) |
| | 1497 | continue; |
| | 1498 | |
| | 1499 | /* |
| | 1500 | * To be a trigger: |
| | 1501 | * - node is in triggerable state |
| | 1502 | * - QoS data/null frame with triggerable AC |
| | 1503 | */ |
| | 1504 | tid = qwh->i_qos[0] & IEEE80211_QOS_TID; |
| | 1505 | ac = TID_TO_WME_AC(tid); |
| | 1506 | if (!WME_UAPSD_AC_CAN_TRIGGER(ac, ni)) |
| | 1507 | continue; |
| | 1508 | |
| | 1509 | DPRINTF(sc, ATH_DEBUG_UAPSD, |
| | 1510 | "%s: U-APSD trigger detected for node (%s) on AC %d\n", |
| | 1511 | __func__, ether_sprintf(ni->ni_macaddr), ac); |
| | 1512 | if (ni->ni_flags & IEEE80211_NODE_UAPSD_SP) { |
| | 1513 | /* have trigger, but SP in progress, so ignore */ |
| | 1514 | DPRINTF(sc, ATH_DEBUG_UAPSD, |
| | 1515 | "%s: SP already in progress - ignoring\n", |
| | 1516 | __func__); |
| | 1517 | continue; |
| | 1518 | } |
| | 1519 | |
| | 1520 | /* |
| | 1521 | * Detect duplicate triggers and drop if so. |
| | 1522 | */ |
| | 1523 | frame_seq = le16toh(*(__le16 *)qwh->i_seq); |
| | 1524 | if ((qwh->i_fc[1] & IEEE80211_FC1_RETRY) && |
| | 1525 | frame_seq == ni->ni_uapsd_trigseq[ac]) { |
| | 1526 | DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: dropped dup trigger, ac %d, seq %d\n", |
| | 1527 | __func__, ac, frame_seq); |
| | 1528 | continue; |
| | 1529 | } |
| | 1530 | |
| | 1531 | an = ATH_NODE(ni); |
| | 1532 | |
| | 1533 | /* start the SP */ |
| | 1534 | ATH_NODE_UAPSD_LOCK(an); |
| | 1535 | ni->ni_stats.ns_uapsd_triggers++; |
| | 1536 | ni->ni_flags |= IEEE80211_NODE_UAPSD_SP; |
| | 1537 | ni->ni_uapsd_trigseq[ac] = frame_seq; |
| | 1538 | ATH_NODE_UAPSD_UNLOCK(an); |
| | 1539 | |
| | 1540 | ATH_TXQ_LOCK(uapsd_xmit_q); |
| | 1541 | if (STAILQ_EMPTY(&an->an_uapsd_q)) { |
| | 1542 | DPRINTF(sc, ATH_DEBUG_UAPSD, |
| | 1543 | "%s: Queue empty, generating QoS NULL to send\n", |
| | 1544 | __func__); |
| | 1545 | /* |
| | 1546 | * Empty queue, so need to send QoS null on this ac. Make a |
| | 1547 | * call that will dump a QoS null onto the node's queue, then |
| | 1548 | * we can proceed as normal. |
| | 1549 | */ |
| | 1550 | ieee80211_send_qosnulldata(ni, ac); |
| | 1551 | } |
| | 1552 | |
| | 1553 | if (STAILQ_FIRST(&an->an_uapsd_q)) { |
| | 1554 | struct ath_buf *last_buf = STAILQ_LAST(&an->an_uapsd_q, ath_buf, bf_list); |
| | 1555 | struct ath_desc *last_desc = last_buf->bf_desc; |
| | 1556 | struct ieee80211_qosframe *qwhl = (struct ieee80211_qosframe *)last_buf->bf_skb->data; |
| | 1557 | /* |
| | 1558 | * NB: flip the bit to cause intr on the EOSP desc, |
| | 1559 | * which is the last one |
| | 1560 | */ |
| | 1561 | ath_hal_txreqintrdesc(sc->sc_ah, last_desc); |
| | 1562 | qwhl->i_qos[0] |= IEEE80211_QOS_EOSP; |
| | 1563 | |
| | 1564 | if (IEEE80211_VAP_EOSPDROP_ENABLED(ni->ni_vap)) { |
| | 1565 | /* simulate lost EOSP */ |
| | 1566 | qwhl->i_addr1[0] |= 0x40; |
| | 1567 | } |
| | 1568 | |
| | 1569 | /* more data bit only for EOSP frame */ |
| | 1570 | if (an->an_uapsd_overflowqdepth) |
| | 1571 | qwhl->i_fc[1] |= IEEE80211_FC1_MORE_DATA; |
| | 1572 | else if (IEEE80211_NODE_UAPSD_USETIM(ni)) |
| | 1573 | ni->ni_vap->iv_set_tim(ni, 0); |
| | 1574 | |
| | 1575 | ni->ni_stats.ns_tx_uapsd += an->an_uapsd_qdepth; |
| | 1576 | |
| | 1577 | bus_dma_sync_single(sc->sc_bdev, last_buf->bf_skbaddr, |
| | 1578 | sizeof(*qwhl), BUS_DMA_TODEVICE); |
| | 1579 | |
| | 1580 | if (uapsd_xmit_q->axq_link) { |
| | 1581 | #ifdef AH_NEED_DESC_SWAP |
| | 1582 | *uapsd_xmit_q->axq_link = cpu_to_le32(STAILQ_FIRST(&an->an_uapsd_q)->bf_daddr); |
| | 1583 | #else |
| | 1584 | *uapsd_xmit_q->axq_link = STAILQ_FIRST(&an->an_uapsd_q)->bf_daddr; |
| | 1585 | #endif |
| | 1586 | } |
| | 1587 | /* below leaves an_uapsd_q NULL */ |
| | 1588 | STAILQ_CONCAT(&uapsd_xmit_q->axq_q, &an->an_uapsd_q); |
| | 1589 | uapsd_xmit_q->axq_link = &last_desc->ds_link; |
| | 1590 | ath_hal_puttxbuf(sc->sc_ah, |
| | 1591 | uapsd_xmit_q->axq_qnum, |
| | 1592 | (STAILQ_FIRST(&uapsd_xmit_q->axq_q))->bf_daddr); |
| | 1593 | ath_hal_txstart(sc->sc_ah, uapsd_xmit_q->axq_qnum); |
| | 1594 | } |
| | 1595 | an->an_uapsd_qdepth = 0; |
| | 1596 | |
| | 1597 | ATH_TXQ_UNLOCK(uapsd_xmit_q); |
| | 1598 | } |
| | 1599 | sc->sc_rxbufcur = bf; |
| | 1600 | ATH_RXBUF_UNLOCK(sc); |
| | 1601 | #undef PA2DESC |
| | 1602 | } |
| | 1603 | |
| | 1604 | /* |
| | 1605 | * Interrupt handler. Most of the actual processing is deferred. |
| | 1606 | */ |
| | 1607 | irqreturn_t |
| | 1608 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) |
| | 1609 | ath_intr(int irq, void *dev_id) |
| | 1610 | #else |
| | 1611 | ath_intr(int irq, void *dev_id, struct pt_regs *regs) |
| | 1612 | #endif |
| | 1613 | { |
| | 1614 | struct net_device *dev = dev_id; |
| | 1615 | struct ath_softc *sc = dev->priv; |
| | 1616 | struct ath_hal *ah = sc->sc_ah; |
| | 1617 | HAL_INT status; |
| | 1618 | int needmark; |
| | 1619 | |
| | 1620 | if (sc->sc_invalid) { |
| | 1621 | /* |
| | 1622 | * The hardware is not ready/present, don't touch anything. |
| | 1623 | * Note this can happen early on if the IRQ is shared. |
| | 1624 | */ |
| | 1625 | return IRQ_NONE; |
| | 1626 | } |
| | 1627 | if (!ath_hal_intrpend(ah)) /* shared irq, not for us */ |
| | 1628 | return IRQ_NONE; |
| | 1629 | if ((dev->flags & (IFF_RUNNING | IFF_UP)) != (IFF_RUNNING | IFF_UP)) { |
| | 1630 | DPRINTF(sc, ATH_DEBUG_INTR, "%s: flags 0x%x\n", |
| | 1631 | __func__, dev->flags); |
| | 1632 | ath_hal_getisr(ah, &status); /* clear ISR */ |
| | 1633 | ath_hal_intrset(ah, 0); /* disable further intr's */ |
| | 1634 | return IRQ_HANDLED; |
| | 1635 | } |
| | 1636 | needmark = 0; |
| | 1637 | /* |
| | 1638 | * Figure out the reason(s) for the interrupt. Note |
| | 1639 | * that the HAL returns a pseudo-ISR that may include |
| | 1640 | * bits we haven't explicitly enabled so we mask the |
| | 1641 | * value to ensure we only process bits we requested. |
| | 1642 | */ |
| | 1643 | ath_hal_getisr(ah, &status); /* NB: clears ISR too */ |
| | 1644 | DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); |
| | 1645 | status &= sc->sc_imask; /* discard unasked for bits */ |
| | 1646 | if (status & HAL_INT_FATAL) { |
| | 1647 | sc->sc_stats.ast_hardware++; |
| | 1648 | ath_hal_intrset(ah, 0); /* disable intr's until reset */ |
| | 1649 | ATH_SCHEDULE_TQUEUE(&sc->sc_fataltq, &needmark); |
| | 1650 | } else if (status & HAL_INT_RXORN) { |
| | 1651 | sc->sc_stats.ast_rxorn++; |
| | 1652 | ath_hal_intrset(ah, 0); /* disable intr's until reset */ |
| | 1653 | ATH_SCHEDULE_TQUEUE(&sc->sc_rxorntq, &needmark); |
| | 1654 | } else { |
| | 1655 | if (status & HAL_INT_SWBA) { |
| | 1656 | /* |
| | 1657 | * Software beacon alert--time to send a beacon. |
| | 1658 | * Handle beacon transmission directly; deferring |
| | 1659 | * this is too slow to meet timing constraints |
| | 1660 | * under load. |
| | 1661 | */ |
| | 1662 | ath_beacon_send(sc, &needmark); |
| | 1663 | } |
| | 1664 | if (status & HAL_INT_RXEOL) { |
| | 1665 | /* |
| | 1666 | * NB: the hardware should re-read the link when |
| | 1667 | * RXE bit is written, but it doesn't work at |
| | 1668 | * least on older hardware revs. |
| | 1669 | */ |
| | 1670 | sc->sc_stats.ast_rxeol++; |
| | 1671 | } |
| | 1672 | if (status & HAL_INT_TXURN) { |
| | 1673 | sc->sc_stats.ast_txurn++; |
| | 1674 | /* bump tx trigger level */ |
| | 1675 | ath_hal_updatetxtriglevel(ah, AH_TRUE); |
| | 1676 | } |
| | 1677 | if (status & HAL_INT_RX) { |
| | 1678 | ath_uapsd_processtriggers(sc); |
| | 1679 | /* Get the noise floor data in interrupt context as we can't get it |
| | 1680 | * per frame, so we need to get it as soon as possible (i.e. the tasklet |
| | 1681 | * might take too long to fire */ |
| | 1682 | ath_hal_process_noisefloor(ah); |
| | 1683 | sc->sc_channoise = ath_hal_get_channel_noise(ah, &(sc->sc_curchan)); |
| | 1684 | ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark); |
| | 1685 | } |
| | 1686 | if (status & HAL_INT_TX) { |
| | 1687 | #ifdef ATH_SUPERG_DYNTURBO |
| | 1688 | /* |
| | 1689 | * Check if the beacon queue caused the interrupt |
| | 1690 | * when a dynamic turbo switch |
| | 1691 | * is pending so we can initiate the change. |
| | 1692 | * XXX must wait for all VAPs' beacons |
| | 1693 | */ |
| | 1694 | |
| | 1695 | if (sc->sc_dturbo_switch) { |
| | 1696 | u_int32_t txqs = (1 << sc->sc_bhalq); |
| | 1697 | ath_hal_gettxintrtxqs(ah, &txqs); |
| | 1698 | if(txqs & (1 << sc->sc_bhalq)) { |
| | 1699 | sc->sc_dturbo_switch = 0; |
| | 1700 | /* |
| | 1701 | * Hack: defer switch for 10ms to permit slow |
| | 1702 | * clients time to track us. This especially |
| | 1703 | * noticeable with Windows clients. |
| | 1704 | */ |
| | 1705 | mod_timer(&sc->sc_dturbo_switch_mode, |
| | 1706 | jiffies + msecs_to_jiffies(10)); |
| | 1707 | } |
| | 1708 | } |
| | 1709 | #endif |
| | 1710 | ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark); |
| | 1711 | } |
| | 1712 | if (status & HAL_INT_BMISS) { |
| | 1713 | sc->sc_stats.ast_bmiss++; |
| | 1714 | ATH_SCHEDULE_TQUEUE(&sc->sc_bmisstq, &needmark); |
| | 1715 | } |
| | 1716 | if (status & HAL_INT_MIB) { |
| | 1717 | sc->sc_stats.ast_mib++; |
| | 1718 | /* |
| | 1719 | * Disable interrupts until we service the MIB |
| | 1720 | * interrupt; otherwise it will continue to fire. |
| | 1721 | */ |
| | 1722 | ath_hal_intrset(ah, 0); |
| | 1723 | /* |
| | 1724 | * Let the HAL handle the event. We assume it will |
| | 1725 | * clear whatever condition caused the interrupt. |
| | 1726 | */ |
| | 1727 | ath_hal_mibevent(ah, &sc->sc_halstats); |
| | 1728 | ath_hal_intrset(ah, sc->sc_imask); |
| | 1729 | } |
| | 1730 | } |
| | 1731 | if (needmark) |
| | 1732 | mark_bh(IMMEDIATE_BH); |
| | 1733 | return IRQ_HANDLED; |
| | 1734 | } |
| | 1735 | |
| | 1736 | static void |
| | 1737 | ath_radar_task(struct work_struct *thr) |
| | 1738 | { |
| | 1739 | struct ath_softc *sc = container_of(thr, struct ath_softc, sc_radartask); |
| | 1740 | struct ath_hal *ah = sc->sc_ah; |
| | 1741 | struct ieee80211com *ic = &sc->sc_ic; |
| | 1742 | struct ieee80211_channel ichan; |
| | 1743 | HAL_CHANNEL hchan; |
| | 1744 | |
| | 1745 | sc->sc_rtasksched = 0; |
| | 1746 | if (ath_hal_procdfs(ah, &hchan)) { |
| | 1747 | /* |
| | 1748 | * DFS was found, initiate channel change |
| | 1749 | */ |
| | 1750 | ichan.ic_ieee = ath_hal_mhz2ieee(ah, hchan.channel, hchan.channelFlags); |
| | 1751 | ichan.ic_freq = hchan.channel; |
| | 1752 | ichan.ic_flags = hchan.channelFlags; |
| | 1753 | |
| | 1754 | if ((sc->sc_curchan.channel == hchan.channel) && |
| | 1755 | (sc->sc_curchan.channelFlags == hchan.channel)) { |
| | 1756 | if (hchan.privFlags & CHANNEL_INTERFERENCE) |
| | 1757 | sc->sc_curchan.privFlags |= CHANNEL_INTERFERENCE; |
| | 1758 | } |
| | 1759 | ieee80211_mark_dfs(ic, &ichan); |
| | 1760 | if (((ic->ic_flags_ext & IEEE80211_FEXT_MARKDFS) == 0) && |
| | 1761 | (ic->ic_opmode == IEEE80211_M_HOSTAP)) { |
| | 1762 | sc->sc_dfstest_ieeechan = ic->ic_curchan->ic_ieee; |
| | 1763 | sc->sc_dfstesttimer.function = ath_dfs_test_return; |
| | 1764 | sc->sc_dfstesttimer.expires = jiffies + (sc->sc_dfstesttime * HZ); |
| | 1765 | sc->sc_dfstesttimer.data = (unsigned long)sc; |
| | 1766 | if (sc->sc_dfstest == 0) { |
| | 1767 | sc->sc_dfstest = 1; |
| | 1768 | add_timer(&sc->sc_dfstesttimer); |
| | 1769 | } |
| | 1770 | } |
| | 1771 | } |
| | 1772 | } |
| | 1773 | |
| | 1774 | static void |
| | 1775 | ath_dfs_test_return(unsigned long data) |
| | 1776 | { |
| | 1777 | struct ath_softc *sc = (struct ath_softc *)data; |
| | 1778 | struct ieee80211com *ic = &sc->sc_ic; |
| | 1779 | |
| | 1780 | sc->sc_dfstest = 0; |
| | 1781 | ieee80211_dfs_test_return(ic, sc->sc_dfstest_ieeechan); |
| | 1782 | } |
| | 1783 | |
| | 1784 | static void |
| | 1785 | ath_fatal_tasklet(TQUEUE_ARG data) |
| | 1786 | { |
| | 1787 | struct net_device *dev = (struct net_device *)data; |
| | 1788 | |
| | 1789 | printk("%s: hardware error; resetting\n", dev->name); |
| | 1790 | ath_reset(dev); |
| | 1791 | } |
| | 1792 | |
| | 1793 | static void |
| | 1794 | ath_rxorn_tasklet(TQUEUE_ARG data) |
| | 1795 | { |
| | 1796 | struct net_device *dev = (struct net_device *)data; |
| | 1797 | |
| | 1798 | printk("%s: rx FIFO overrun; resetting\n", dev->name); |
| | 1799 | ath_reset(dev); |
| | 1800 | } |
| | 1801 | |
| | 1802 | static void |
| | 1803 | ath_bmiss_tasklet(TQUEUE_ARG data) |
| | 1804 | { |
| | 1805 | struct net_device *dev = (struct net_device *)data; |
| | 1806 | struct ath_softc *sc = dev->priv; |
| | 1807 | |
| | 1808 | if (time_before(jiffies, sc->sc_ic.ic_bmiss_guard)) { |
| | 1809 | /* Beacon miss interrupt occured too short after last beacon |
| | 1810 | * timer configuration. Ignore it as it could be spurious. */ |
| | 1811 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: ignored\n", __func__); |
| | 1812 | } else { |
| | 1813 | DPRINTF(sc, ATH_DEBUG_ANY, "%s\n", __func__); |
| | 1814 | ieee80211_beacon_miss(&sc->sc_ic); |
| | 1815 | } |
| | 1816 | } |
| | 1817 | |
| | 1818 | static u_int |
| | 1819 | ath_chan2flags(struct ieee80211_channel *chan) |
| | 1820 | { |
| | 1821 | u_int flags; |
| | 1822 | static const u_int modeflags[] = { |
| | 1823 | 0, /* IEEE80211_MODE_AUTO */ |
| | 1824 | CHANNEL_A, /* IEEE80211_MODE_11A */ |
| | 1825 | CHANNEL_B, /* IEEE80211_MODE_11B */ |
| | 1826 | CHANNEL_PUREG, /* IEEE80211_MODE_11G */ |
| | 1827 | 0, /* IEEE80211_MODE_FH */ |
| | 1828 | CHANNEL_108A, /* IEEE80211_MODE_TURBO_A */ |
| | 1829 | CHANNEL_108G, /* IEEE80211_MODE_TURBO_G */ |
| | 1830 | }; |
| | 1831 | |
| | 1832 | flags = modeflags[ieee80211_chan2mode(chan)]; |
| | 1833 | |
| | 1834 | if (IEEE80211_IS_CHAN_HALF(chan)) |
| | 1835 | flags |= CHANNEL_HALF; |
| | 1836 | else if (IEEE80211_IS_CHAN_QUARTER(chan)) |
| | 1837 | flags |= CHANNEL_QUARTER; |
| | 1838 | |
| | 1839 | return flags; |
| | 1840 | } |
| | 1841 | |
| | 1842 | /* |
| | 1843 | * Context: process context |
| | 1844 | */ |
| | 1845 | |
| | 1846 | static int |
| | 1847 | ath_init(struct net_device *dev) |
| | 1848 | { |
| | 1849 | struct ath_softc *sc = dev->priv; |
| | 1850 | struct ieee80211com *ic = &sc->sc_ic; |
| | 1851 | struct ath_hal *ah = sc->sc_ah; |
| | 1852 | HAL_STATUS status; |
| | 1853 | int error = 0; |
| | 1854 | |
| | 1855 | ATH_LOCK(sc); |
| | 1856 | |
| | 1857 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: mode %d\n", __func__, ic->ic_opmode); |
| | 1858 | |
| | 1859 | /* |
| | 1860 | * Stop anything previously setup. This is safe |
| | 1861 | * whether this is the first time through or not. |
| | 1862 | */ |
| | 1863 | ath_stop_locked(dev); |
| | 1864 | |
| | 1865 | #ifdef ATH_CAP_TPC |
| | 1866 | ath_hal_setcapability(sc->sc_ah, HAL_CAP_TPC, 0, 1, NULL); |
| | 1867 | #endif |
| | 1868 | |
| | 1869 | /* Whether we should enable h/w TKIP MIC */ |
| | 1870 | if ((ic->ic_caps & IEEE80211_C_WME) == 0) |
| | 1871 | ath_hal_setcapability(sc->sc_ah, HAL_CAP_TKIP_MIC, 0, 0, NULL); |
| | 1872 | else { |
| | 1873 | if (((ic->ic_caps & IEEE80211_C_WME_TKIPMIC) == 0) && |
| | 1874 | (ic->ic_flags & IEEE80211_F_WME)) |
| | 1875 | ath_hal_setcapability(sc->sc_ah, HAL_CAP_TKIP_MIC, 0, 0, NULL); |
| | 1876 | else |
| | 1877 | ath_hal_setcapability(sc->sc_ah, HAL_CAP_TKIP_MIC, 0, 1, NULL); |
| | 1878 | } |
| | 1879 | |
| | 1880 | /* |
| | 1881 | * Flush the skb's allocated for receive in case the rx |
| | 1882 | * buffer size changes. This could be optimized but for |
| | 1883 | * now we do it each time under the assumption it does |
| | 1884 | * not happen often. |
| | 1885 | */ |
| | 1886 | ath_flushrecv(sc); |
| | 1887 | |
| | 1888 | /* |
| | 1889 | * The basic interface to setting the hardware in a good |
| | 1890 | * state is ``reset''. On return the hardware is known to |
| | 1891 | * be powered up and with interrupts disabled. This must |
| | 1892 | * be followed by initialization of the appropriate bits |
| | 1893 | * and then setup of the interrupt mask. |
| | 1894 | */ |
| | 1895 | sc->sc_curchan.channel = ic->ic_curchan->ic_freq; |
| | 1896 | sc->sc_curchan.channelFlags = ath_chan2flags(ic->ic_curchan); |
| | 1897 | if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_FALSE, &status)) { |
| | 1898 | printk("%s: unable to reset hardware: '%s' (HAL status %u) " |
| | 1899 | "(freq %u flags 0x%x)\n", dev->name, |
| | 1900 | ath_get_hal_status_desc(status), status, |
| | 1901 | sc->sc_curchan.channel, sc->sc_curchan.channelFlags); |
| | 1902 | error = -EIO; |
| | 1903 | goto done; |
| | 1904 | } |
| | 1905 | |
| | 1906 | if (sc->sc_softled) |
| | 1907 | ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); |
| | 1908 | /* |
| | 1909 | * This is needed only to setup initial state |
| | 1910 | * but it's best done after a reset. |
| | 1911 | */ |
| | 1912 | ath_update_txpow(sc); |
| | 1913 | |
| | 1914 | /* Set the default RX antenna; it may get lost on reset. */ |
| | 1915 | ath_setdefantenna(sc, sc->sc_defant); |
| | 1916 | |
| | 1917 | /* |
| | 1918 | * Setup the hardware after reset: the key cache |
| | 1919 | * is filled as needed and the receive engine is |
| | 1920 | * set going. Frame transmit is handled entirely |
| | 1921 | * in the frame output path; there's nothing to do |
| | 1922 | * here except setup the interrupt mask. |
| | 1923 | */ |
| | 1924 | #if 0 |
| | 1925 | ath_initkeytable(sc); /* XXX still needed? */ |
| | 1926 | #endif |
| | 1927 | if (ath_startrecv(sc) != 0) { |
| | 1928 | printk("%s: unable to start recv logic\n", dev->name); |
| | 1929 | error = -EIO; |
| | 1930 | goto done; |
| | 1931 | } |
| | 1932 | /* Enable interrupts. */ |
| | 1933 | sc->sc_imask = HAL_INT_RX | HAL_INT_TX |
| | 1934 | | HAL_INT_RXEOL | HAL_INT_RXORN |
| | 1935 | | HAL_INT_FATAL | HAL_INT_GLOBAL; |
| | 1936 | /* |
| | 1937 | * Enable MIB interrupts when there are hardware phy counters. |
| | 1938 | * Note we only do this (at the moment) for station mode. |
| | 1939 | */ |
| | 1940 | if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) |
| | 1941 | sc->sc_imask |= HAL_INT_MIB; |
| | 1942 | ath_hal_intrset(ah, sc->sc_imask); |
| | 1943 | |
| | 1944 | /* |
| | 1945 | * The hardware should be ready to go now so it's safe |
| | 1946 | * to kick the 802.11 state machine as it's likely to |
| | 1947 | * immediately call back to us to send mgmt frames. |
| | 1948 | */ |
| | 1949 | ath_chan_change(sc, ic->ic_curchan); |
| | 1950 | ath_set_ack_bitrate(sc, sc->sc_ackrate); |
| | 1951 | dev->flags |= IFF_RUNNING; /* we are ready to go */ |
| | 1952 | ieee80211_start_running(ic); /* start all VAPs */ |
| | 1953 | #ifdef ATH_TX99_DIAG |
| | 1954 | if (sc->sc_tx99 != NULL) |
| | 1955 | sc->sc_tx99->start(sc->sc_tx99); |
| | 1956 | #endif |
| | 1957 | done: |
| | 1958 | ATH_UNLOCK(sc); |
| | 1959 | return error; |
| | 1960 | } |
| | 1961 | |
| | 1962 | /* Caller must lock ATH_LOCK |
| | 1963 | * |
| | 1964 | * Context: softIRQ |
| | 1965 | */ |
| | 1966 | static int |
| | 1967 | ath_stop_locked(struct net_device *dev) |
| | 1968 | { |
| | 1969 | struct ath_softc *sc = dev->priv; |
| | 1970 | struct ieee80211com *ic = &sc->sc_ic; |
| | 1971 | struct ath_hal *ah = sc->sc_ah; |
| | 1972 | |
| | 1973 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: invalid %u flags 0x%x\n", |
| | 1974 | __func__, sc->sc_invalid, dev->flags); |
| | 1975 | |
| | 1976 | if (dev->flags & IFF_RUNNING) { |
| | 1977 | /* |
| | 1978 | * Shutdown the hardware and driver: |
| | 1979 | * stop output from above |
| | 1980 | * reset 802.11 state machine |
| | 1981 | * (sends station deassoc/deauth frames) |
| | 1982 | * turn off timers |
| | 1983 | * disable interrupts |
| | 1984 | * clear transmit machinery |
| | 1985 | * clear receive machinery |
| | 1986 | * turn off the radio |
| | 1987 | * reclaim beacon resources |
| | 1988 | * |
| | 1989 | * Note that some of this work is not possible if the |
| | 1990 | * hardware is gone (invalid). |
| | 1991 | */ |
| | 1992 | #ifdef ATH_TX99_DIAG |
| | 1993 | if (sc->sc_tx99 != NULL) |
| | 1994 | sc->sc_tx99->stop(sc->sc_tx99); |
| | 1995 | #endif |
| | 1996 | netif_stop_queue(dev); /* XXX re-enabled by ath_newstate */ |
| | 1997 | dev->flags &= ~IFF_RUNNING; /* NB: avoid recursion */ |
| | 1998 | ieee80211_stop_running(ic); /* stop all VAPs */ |
| | 1999 | if (!sc->sc_invalid) { |
| | 2000 | ath_hal_intrset(ah, 0); |
| | 2001 | if (sc->sc_softled) { |
| | 2002 | del_timer(&sc->sc_ledtimer); |
| | 2003 | ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); |
| | 2004 | sc->sc_blinking = 0; |
| | 2005 | sc->sc_ledstate = 1; |
| | 2006 | } |
| | 2007 | } |
| | 2008 | ath_draintxq(sc); |
| | 2009 | if (!sc->sc_invalid) { |
| | 2010 | ath_stoprecv(sc); |
| | 2011 | ath_hal_phydisable(ah); |
| | 2012 | } else |
| | 2013 | sc->sc_rxlink = NULL; |
| | 2014 | ath_beacon_free(sc); /* XXX needed? */ |
| | 2015 | } else |
| | 2016 | ieee80211_stop_running(ic); /* stop other VAPs */ |
| | 2017 | |
| | 2018 | if (sc->sc_softled) |
| | 2019 | ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); |
| | 2020 | |
| | 2021 | return 0; |
| | 2022 | } |
| | 2023 | |
| | 2024 | /* |
| | 2025 | * Stop the device, grabbing the top-level lock to protect |
| | 2026 | * against concurrent entry through ath_init (which can happen |
| | 2027 | * if another thread does a system call and the thread doing the |
| | 2028 | * stop is preempted). |
| | 2029 | */ |
| | 2030 | static int |
| | 2031 | ath_stop(struct net_device *dev) |
| | 2032 | { |
| | 2033 | struct ath_softc *sc = dev->priv; |
| | 2034 | int error; |
| | 2035 | |
| | 2036 | ATH_LOCK(sc); |
| | 2037 | |
| | 2038 | if (!sc->sc_invalid) |
| | 2039 | ath_hal_setpower(sc->sc_ah, HAL_PM_AWAKE); |
| | 2040 | |
| | 2041 | error = ath_stop_locked(dev); |
| | 2042 | #if 0 |
| | 2043 | if (error == 0 && !sc->sc_invalid) { |
| | 2044 | /* |
| | 2045 | * Set the chip in full sleep mode. Note that we are |
| | 2046 | * careful to do this only when bringing the interface |
| | 2047 | * completely to a stop. When the chip is in this state |
| | 2048 | * it must be carefully woken up or references to |
| | 2049 | * registers in the PCI clock domain may freeze the bus |
| | 2050 | * (and system). This varies by chip and is mostly an |
| | 2051 | * issue with newer parts that go to sleep more quickly. |
| | 2052 | */ |
| | 2053 | ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); |
| | 2054 | } |
| | 2055 | #endif |
| | 2056 | ATH_UNLOCK(sc); |
| | 2057 | |
| | 2058 | return error; |
| | 2059 | } |
| | 2060 | |
| | 2061 | static int |
| | 2062 | ar_device(int devid) |
| | 2063 | { |
| | 2064 | switch (devid) { |
| | 2065 | case AR5210_DEFAULT: |
| | 2066 | case AR5210_PROD: |
| | 2067 | case AR5210_AP: |
| | 2068 | return 5210; |
| | 2069 | case AR5211_DEFAULT: |
| | 2070 | case AR5311_DEVID: |
| | 2071 | case AR5211_LEGACY: |
| | 2072 | case AR5211_FPGA11B: |
| | 2073 | return 5211; |
| | 2074 | case AR5212_DEFAULT: |
| | 2075 | case AR5212_DEVID: |
| | 2076 | case AR5212_FPGA: |
| | 2077 | case AR5212_DEVID_IBM: |
| | 2078 | case AR5212_AR5312_REV2: |
| | 2079 | case AR5212_AR5312_REV7: |
| | 2080 | case AR5212_AR2313_REV8: |
| | 2081 | case AR5212_AR2315_REV6: |
| | 2082 | case AR5212_AR2315_REV7: |
| | 2083 | case AR5212_AR2317_REV1: |
| | 2084 | case AR5212_DEVID_0014: |
| | 2085 | case AR5212_DEVID_0015: |
| | 2086 | case AR5212_DEVID_0016: |
| | 2087 | case AR5212_DEVID_0017: |
| | 2088 | case AR5212_DEVID_0018: |
| | 2089 | case AR5212_DEVID_0019: |
| | 2090 | case AR5212_AR2413: |
| | 2091 | case AR5212_AR5413: |
| | 2092 | case AR5212_AR5424: |
| | 2093 | case AR5212_DEVID_FF19: |
| | 2094 | return 5212; |
| | 2095 | case AR5213_SREV_1_0: |
| | 2096 | case AR5213_SREV_REG: |
| | 2097 | case AR_SUBVENDOR_ID_NOG: |
| | 2098 | case AR_SUBVENDOR_ID_NEW_A: |
| | 2099 | return 5213; |
| | 2100 | default: |
| | 2101 | return 0; /* unknown */ |
| | 2102 | } |
| | 2103 | } |
| | 2104 | |
| | 2105 | |
| | 2106 | static int |
| | 2107 | ath_set_ack_bitrate(struct ath_softc *sc, int high) |
| | 2108 | { |
| | 2109 | struct ath_hal *ah = sc->sc_ah; |
| | 2110 | if (ar_device(sc->devid) == 5212 || ar_device(sc->devid) == 5213) { |
| | 2111 | /* set ack to be sent at low bit-rate */ |
| | 2112 | /* registers taken from the OpenBSD 5212 HAL */ |
| | 2113 | #define AR5K_AR5212_STA_ID1 0x8004 |
| | 2114 | #define AR5K_AR5212_STA_ID1_ACKCTS_6MB 0x01000000 |
| | 2115 | #define AR5K_AR5212_STA_ID1_BASE_RATE_11B 0x02000000 |
| | 2116 | u_int32_t v = AR5K_AR5212_STA_ID1_BASE_RATE_11B | AR5K_AR5212_STA_ID1_ACKCTS_6MB; |
| | 2117 | if (high) { |
| | 2118 | OS_REG_WRITE(ah, AR5K_AR5212_STA_ID1, OS_REG_READ(ah, AR5K_AR5212_STA_ID1) & ~v); |
| | 2119 | } else { |
| | 2120 | OS_REG_WRITE(ah, AR5K_AR5212_STA_ID1, OS_REG_READ(ah, AR5K_AR5212_STA_ID1) | v); |
| | 2121 | } |
| | 2122 | return 0; |
| | 2123 | } |
| | 2124 | return 1; |
| | 2125 | } |
| | 2126 | |
| | 2127 | /* |
| | 2128 | * Reset the hardware w/o losing operational state. This is |
| | 2129 | * basically a more efficient way of doing ath_stop, ath_init, |
| | 2130 | * followed by state transitions to the current 802.11 |
| | 2131 | * operational state. Used to recover from errors rx overrun |
| | 2132 | * and to reset the hardware when rf gain settings must be reset. |
| | 2133 | */ |
| | 2134 | static int |
| | 2135 | ath_reset(struct net_device *dev) |
| | 2136 | { |
| | 2137 | struct ath_softc *sc = dev->priv; |
| | 2138 | struct ieee80211com *ic = &sc->sc_ic; |
| | 2139 | struct ath_hal *ah = sc->sc_ah; |
| | 2140 | struct ieee80211_channel *c; |
| | 2141 | HAL_STATUS status; |
| | 2142 | |
| | 2143 | /* |
| | 2144 | * Convert to a HAL channel description with the flags |
| | 2145 | * constrained to reflect the current operating mode. |
| | 2146 | */ |
| | 2147 | c = ic->ic_curchan; |
| | 2148 | sc->sc_curchan.channel = c->ic_freq; |
| | 2149 | sc->sc_curchan.channelFlags = ath_chan2flags(c); |
| | 2150 | |
| | 2151 | ath_hal_intrset(ah, 0); /* disable interrupts */ |
| | 2152 | ath_draintxq(sc); /* stop xmit side */ |
| | 2153 | ath_stoprecv(sc); /* stop recv side */ |
| | 2154 | /* NB: indicate channel change so we do a full reset */ |
| | 2155 | if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_TRUE, &status)) |
| | 2156 | printk("%s: %s: unable to reset hardware: '%s' (HAL status %u)\n", |
| | 2157 | dev->name, __func__, ath_get_hal_status_desc(status), status); |
| | 2158 | ath_update_txpow(sc); /* update tx power state */ |
| | 2159 | if (ath_startrecv(sc) != 0) /* restart recv */ |
| | 2160 | printk("%s: %s: unable to start recv logic\n", |
| | 2161 | dev->name, __func__); |
| | 2162 | if (sc->sc_softled) |
| | 2163 | ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); |
| | 2164 | |
| | 2165 | /* |
| | 2166 | * We may be doing a reset in response to an ioctl |
| | 2167 | * that changes the channel so update any state that |
| | 2168 | * might change as a result. |
| | 2169 | */ |
| | 2170 | ath_chan_change(sc, c); |
| | 2171 | if (sc->sc_beacons) |
| | 2172 | ath_beacon_config(sc, NULL); /* restart beacons */ |
| | 2173 | ath_hal_intrset(ah, sc->sc_imask); |
| | 2174 | ath_set_ack_bitrate(sc, sc->sc_ackrate); |
| | 2175 | netif_wake_queue(dev); /* restart xmit */ |
| | 2176 | #ifdef ATH_SUPERG_XR |
| | 2177 | /* |
| | 2178 | * restart the group polls. |
| | 2179 | */ |
| | 2180 | if (sc->sc_xrgrppoll) { |
| | 2181 | struct ieee80211vap *vap; |
| | 2182 | TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) |
| | 2183 | if (vap && (vap->iv_flags & IEEE80211_F_XR)) |
| | 2184 | break; |
| | 2185 | ath_grppoll_stop(vap); |
| | 2186 | ath_grppoll_start(vap, sc->sc_xrpollcount); |
| | 2187 | } |
| | 2188 | #endif |
| | 2189 | return 0; |
| | 2190 | } |
| | 2191 | |
| | 2192 | |
| | 2193 | /* Swap transmit descriptor. |
| | 2194 | * if AH_NEED_DESC_SWAP flag is not defined this becomes a "null" |
| | 2195 | * function. |
| | 2196 | */ |
| | 2197 | static __inline void |
| | 2198 | ath_desc_swap(struct ath_desc *ds) |
| | 2199 | { |
| | 2200 | #ifdef AH_NEED_DESC_SWAP |
| | 2201 | ds->ds_link = cpu_to_le32(ds->ds_link); |
| | 2202 | ds->ds_data = cpu_to_le32(ds->ds_data); |
| | 2203 | ds->ds_ctl0 = cpu_to_le32(ds->ds_ctl0); |
| | 2204 | ds->ds_ctl1 = cpu_to_le32(ds->ds_ctl1); |
| | 2205 | ds->ds_hw[0] = cpu_to_le32(ds->ds_hw[0]); |
| | 2206 | ds->ds_hw[1] = cpu_to_le32(ds->ds_hw[1]); |
| | 2207 | #endif |
| | 2208 | } |
| | 2209 | |
| | 2210 | /* |
| | 2211 | * Insert a buffer on a txq |
| | 2212 | * |
| | 2213 | */ |
| | 2214 | static __inline void |
| | 2215 | ath_tx_txqaddbuf(struct ath_softc *sc, struct ieee80211_node *ni, |
| | 2216 | struct ath_txq *txq, struct ath_buf *bf, |
| | 2217 | struct ath_desc *lastds, int framelen) |
| | 2218 | { |
| | 2219 | struct ath_hal *ah = sc->sc_ah; |
| | 2220 | |
| | 2221 | /* |
| | 2222 | * Insert the frame on the outbound list and |
| | 2223 | * pass it on to the hardware. |
| | 2224 | */ |
| | 2225 | ATH_TXQ_LOCK(txq); |
| | 2226 | if (ni && ni->ni_vap && txq == &ATH_VAP(ni->ni_vap)->av_mcastq) { |
| | 2227 | /* |
| | 2228 | * The CAB queue is started from the SWBA handler since |
| | 2229 | * frames only go out on DTIM and to avoid possible races. |
| | 2230 | */ |
| | 2231 | ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_SWBA); |
| | 2232 | ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); |
| | 2233 | DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: txq depth = %d\n", __func__, txq->axq_depth); |
| | 2234 | if (txq->axq_link != NULL) { |
| | 2235 | #ifdef AH_NEED_DESC_SWAP |
| | 2236 | *txq->axq_link = cpu_to_le32(bf->bf_daddr); |
| | 2237 | #else |
| | 2238 | *txq->axq_link = bf->bf_daddr; |
| | 2239 | #endif |
| | 2240 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: link[%u](%p)=%llx (%p)\n", |
| | 2241 | __func__, |
| | 2242 | txq->axq_qnum, txq->axq_link, |
| | 2243 | ito64(bf->bf_daddr), bf->bf_desc); |
| | 2244 | } |
| | 2245 | txq->axq_link = &lastds->ds_link; |
| | 2246 | ath_hal_intrset(ah, sc->sc_imask); |
| | 2247 | } else { |
| | 2248 | ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); |
| | 2249 | DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: txq depth = %d\n", __func__, txq->axq_depth); |
| | 2250 | if (txq->axq_link == NULL) { |
| | 2251 | ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); |
| | 2252 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: TXDP[%u] = %llx (%p)\n", |
| | 2253 | __func__, |
| | 2254 | txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); |
| | 2255 | } else { |
| | 2256 | #ifdef AH_NEED_DESC_SWAP |
| | 2257 | *txq->axq_link = cpu_to_le32(bf->bf_daddr); |
| | 2258 | #else |
| | 2259 | *txq->axq_link = bf->bf_daddr; |
| | 2260 | #endif |
| | 2261 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n", |
| | 2262 | __func__, |
| | 2263 | txq->axq_qnum, txq->axq_link, |
| | 2264 | ito64(bf->bf_daddr), bf->bf_desc); |
| | 2265 | } |
| | 2266 | txq->axq_link = &lastds->ds_link; |
| | 2267 | ath_hal_txstart(ah, txq->axq_qnum); |
| | 2268 | sc->sc_dev->trans_start = jiffies; |
| | 2269 | } |
| | 2270 | ATH_TXQ_UNLOCK(txq); |
| | 2271 | |
| | 2272 | sc->sc_devstats.tx_packets++; |
| | 2273 | sc->sc_devstats.tx_bytes += framelen; |
| | 2274 | } |
| | 2275 | |
| | 2276 | static int |
| | 2277 | dot11_to_ratecode(struct ath_softc *sc, const HAL_RATE_TABLE *rt, int dot11) |
| | 2278 | { |
| | 2279 | int index = sc->sc_rixmap[dot11 & IEEE80211_RATE_VAL]; |
| | 2280 | if (index >= 0 && index < rt->rateCount) |
| | 2281 | return rt->info[index].rateCode; |
| | 2282 | |
| | 2283 | return rt->info[sc->sc_minrateix].rateCode; |
| | 2284 | } |
| | 2285 | |
| | 2286 | |
| | 2287 | static int |
| | 2288 | ath_tx_startraw(struct net_device *dev, struct ath_buf *bf, struct sk_buff *skb) |
| | 2289 | { |
| | 2290 | struct ath_softc *sc = dev->priv; |
| | 2291 | struct ath_hal *ah = sc->sc_ah; |
| | 2292 | struct ieee80211_phy_params *ph = (struct ieee80211_phy_params *) (skb->cb + sizeof(struct ieee80211_cb)); |
| | 2293 | const HAL_RATE_TABLE *rt; |
| | 2294 | int pktlen; |
| | 2295 | int hdrlen; |
| | 2296 | HAL_PKT_TYPE atype; |
| | 2297 | u_int flags; |
| | 2298 | int keyix; |
| | 2299 | int try0; |
| | 2300 | int power; |
| | 2301 | u_int8_t antenna, txrate; |
| | 2302 | struct ath_txq *txq=NULL; |
| | 2303 | struct ath_desc *ds=NULL; |
| | 2304 | struct ieee80211_frame *wh; |
| | 2305 | |
| | 2306 | wh = (struct ieee80211_frame *) skb->data; |
| | 2307 | try0 = ph->try0; |
| | 2308 | rt = sc->sc_currates; |
| | 2309 | txrate = dot11_to_ratecode(sc, rt, ph->rate0); |
| | 2310 | power = ph->power > 60 ? 60 : ph->power; |
| | 2311 | hdrlen = ieee80211_anyhdrsize(wh); |
| | 2312 | pktlen = skb->len + IEEE80211_CRC_LEN; |
| | 2313 | |
| | 2314 | keyix = HAL_TXKEYIX_INVALID; |
| | 2315 | flags = HAL_TXDESC_INTREQ | HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ |
| | 2316 | |
| | 2317 | bf->bf_skbaddr = bus_map_single(sc->sc_bdev, |
| | 2318 | skb->data, pktlen, BUS_DMA_TODEVICE); |
| | 2319 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: skb %p [data %p len %u] skbaddr %llx\n", |
| | 2320 | __func__, skb, skb->data, skb->len, ito64(bf->bf_skbaddr)); |
| | 2321 | |
| | 2322 | |
| | 2323 | bf->bf_skb = skb; |
| | 2324 | bf->bf_node = NULL; |
| | 2325 | |
| | 2326 | #ifdef ATH_SUPERG_FF |
| | 2327 | bf->bf_numdesc = 1; |
| | 2328 | #endif |
| | 2329 | |
| | 2330 | /* setup descriptors */ |
| | 2331 | ds = bf->bf_desc; |
| | 2332 | rt = sc->sc_currates; |
| | 2333 | KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); |
| | 2334 | |
| | 2335 | |
| | 2336 | if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { |
| | 2337 | flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ |
| | 2338 | sc->sc_stats.ast_tx_noack++; |
| | 2339 | try0 = 1; |
| | 2340 | } |
| | 2341 | atype = HAL_PKT_TYPE_NORMAL; /* default */ |
| | 2342 | txq = sc->sc_ac2q[skb->priority & 0x3]; |
| | 2343 | |
| | 2344 | |
| | 2345 | flags |= HAL_TXDESC_INTREQ; |
| | 2346 | antenna = sc->sc_txantenna; |
| | 2347 | |
| | 2348 | /* XXX check return value? */ |
| | 2349 | ath_hal_setuptxdesc(ah, ds |
| | 2350 | , pktlen /* packet length */ |
| | 2351 | , hdrlen /* header length */ |
| | 2352 | , atype /* Atheros packet type */ |
| | 2353 | , power /* txpower */ |
| | 2354 | , txrate, try0 /* series 0 rate/tries */ |
| | 2355 | , keyix /* key cache index */ |
| | 2356 | , antenna /* antenna mode */ |
| | 2357 | , flags /* flags */ |
| | 2358 | , 0 /* rts/cts rate */ |
| | 2359 | , 0 /* rts/cts duration */ |
| | 2360 | , 0 /* comp icv len */ |
| | 2361 | , 0 /* comp iv len */ |
| | 2362 | , ATH_COMP_PROC_NO_COMP_NO_CCS /* comp scheme */ |
| | 2363 | ); |
| | 2364 | |
| | 2365 | if (ph->try1) { |
| | 2366 | ath_hal_setupxtxdesc(sc->sc_ah, ds |
| | 2367 | , dot11_to_ratecode(sc, rt, ph->rate1), ph->try1 /* series 1 */ |
| | 2368 | , dot11_to_ratecode(sc, rt, ph->rate2), ph->try2 /* series 2 */ |
| | 2369 | , dot11_to_ratecode(sc, rt, ph->rate3), ph->try3 /* series 3 */ |
| | 2370 | ); |
| | 2371 | } |
| | 2372 | bf->bf_flags = flags; /* record for post-processing */ |
| | 2373 | |
| | 2374 | ds->ds_link = 0; |
| | 2375 | ds->ds_data = bf->bf_skbaddr; |
| | 2376 | |
| | 2377 | ath_hal_filltxdesc(ah, ds |
| | 2378 | , skb->len /* segment length */ |
| | 2379 | , AH_TRUE /* first segment */ |
| | 2380 | , AH_TRUE /* last segment */ |
| | 2381 | , ds /* first descriptor */ |
| | 2382 | ); |
| | 2383 | |
| | 2384 | /* NB: The desc swap function becomes void, |
| | 2385 | * if descriptor swapping is not enabled |
| | 2386 | */ |
| | 2387 | ath_desc_swap(ds); |
| | 2388 | |
| | 2389 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: Q%d: %08x %08x %08x %08x %08x %08x\n", |
| | 2390 | __func__, M_FLAG_GET(skb, M_UAPSD) ? 0 : txq->axq_qnum, ds->ds_link, ds->ds_data, |
| | 2391 | ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); |
| | 2392 | |
| | 2393 | ath_tx_txqaddbuf(sc, NULL, txq, bf, ds, pktlen); |
| | 2394 | return 0; |
| | 2395 | } |
| | 2396 | |
| | 2397 | #ifdef ATH_SUPERG_FF |
| | 2398 | /* |
| | 2399 | * Flush FF staging queue. |
| | 2400 | */ |
| | 2401 | static int |
| | 2402 | ath_ff_neverflushtestdone(struct ath_txq *txq, struct ath_buf *bf) |
| | 2403 | { |
| | 2404 | return 0; |
| | 2405 | } |
| | 2406 | |
| | 2407 | static int |
| | 2408 | ath_ff_ageflushtestdone(struct ath_txq *txq, struct ath_buf *bf) |
| | 2409 | { |
| | 2410 | if ( (txq->axq_totalqueued - bf->bf_queueage) < ATH_FF_STAGEQAGEMAX ) |
| | 2411 | return 1; |
| | 2412 | |
| | 2413 | return 0; |
| | 2414 | } |
| | 2415 | |
| | 2416 | /* Caller must not hold ATH_TXQ_LOCK and ATH_TXBUF_LOCK |
| | 2417 | * |
| | 2418 | * Context: softIRQ |
| | 2419 | */ |
| | 2420 | static void |
| | 2421 | ath_ffstageq_flush(struct ath_softc *sc, struct ath_txq *txq, |
| | 2422 | int (*ath_ff_flushdonetest)(struct ath_txq *txq, struct ath_buf *bf)) |
| | 2423 | { |
| | 2424 | struct ath_buf *bf_ff = NULL; |
| | 2425 | struct ieee80211_node *ni = NULL; |
| | 2426 | int pktlen; |
| | 2427 | int framecnt; |
| | 2428 | |
| | 2429 | for (;;) { |
| | 2430 | ATH_TXQ_LOCK(txq); |
| | 2431 | |
| | 2432 | bf_ff = TAILQ_LAST(&txq->axq_stageq, axq_headtype); |
| | 2433 | if ((!bf_ff) || ath_ff_flushdonetest(txq, bf_ff)) |
| | 2434 | { |
| | 2435 | ATH_TXQ_UNLOCK(txq); |
| | 2436 | break; |
| | 2437 | } |
| | 2438 | |
| | 2439 | ni = bf_ff->bf_node; |
| | 2440 | KASSERT(ATH_NODE(ni)->an_tx_ffbuf[bf_ff->bf_skb->priority], |
| | 2441 | ("no bf_ff on staging queue %p", bf_ff)); |
| | 2442 | ATH_NODE(ni)->an_tx_ffbuf[bf_ff->bf_skb->priority] = NULL; |
| | 2443 | TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist); |
| | 2444 | |
| | 2445 | ATH_TXQ_UNLOCK(txq); |
| | 2446 | |
| | 2447 | /* encap and xmit */ |
| | 2448 | bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt); |
| | 2449 | if (bf_ff->bf_skb == NULL) { |
| | 2450 | DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF, |
| | 2451 | "%s: discard, encapsulation failure\n", __func__); |
| | 2452 | sc->sc_stats.ast_tx_encap++; |
| | 2453 | goto bad; |
| | 2454 | } |
| | 2455 | pktlen = bf_ff->bf_skb->len; /* NB: don't reference skb below */ |
| | 2456 | if (ath_tx_start(sc->sc_dev, ni, bf_ff, bf_ff->bf_skb, 0) == 0) |
| | 2457 | continue; |
| | 2458 | bad: |
| | 2459 | ieee80211_free_node(ni); |
| | 2460 | if (bf_ff->bf_skb != NULL) { |
| | 2461 | dev_kfree_skb(bf_ff->bf_skb); |
| | 2462 | bf_ff->bf_skb = NULL; |
| | 2463 | } |
| | 2464 | bf_ff->bf_node = NULL; |
| | 2465 | |
| | 2466 | ATH_TXBUF_LOCK_IRQ(sc); |
| | 2467 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list); |
| | 2468 | ATH_TXBUF_UNLOCK_IRQ(sc); |
| | 2469 | } |
| | 2470 | } |
| | 2471 | #endif |
| | 2472 | |
| | 2473 | #define ATH_HARDSTART_GET_TX_BUF_WITH_LOCK \ |
| | 2474 | ATH_TXBUF_LOCK_IRQ(sc); \ |
| | 2475 | bf = STAILQ_FIRST(&sc->sc_txbuf); \ |
| | 2476 | if (bf != NULL) { \ |
| | 2477 | STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); \ |
| | 2478 | STAILQ_INSERT_TAIL(&bf_head, bf, bf_list); \ |
| | 2479 | } \ |
| | 2480 | /* XXX use a counter and leave at least one for mgmt frames */ \ |
| | 2481 | if (STAILQ_EMPTY(&sc->sc_txbuf)) { \ |
| | 2482 | DPRINTF(sc, ATH_DEBUG_XMIT, \ |
| | 2483 | "%s: stop queue\n", __func__); \ |
| | 2484 | sc->sc_stats.ast_tx_qstop++; \ |
| | 2485 | netif_stop_queue(dev); \ |
| | 2486 | sc->sc_devstopped = 1; \ |
| | 2487 | ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL); \ |
| | 2488 | } \ |
| | 2489 | ATH_TXBUF_UNLOCK_IRQ(sc); \ |
| | 2490 | if (bf == NULL) { /* NB: should not happen */ \ |
| | 2491 | DPRINTF(sc,ATH_DEBUG_XMIT, \ |
| | 2492 | "%s: discard, no xmit buf\n", __func__); \ |
| | 2493 | sc->sc_stats.ast_tx_nobuf++; \ |
| | 2494 | } |
| | 2495 | |
| | 2496 | /* |
| | 2497 | * Transmit a data packet. On failure caller is |
| | 2498 | * assumed to reclaim the resources. |
| | 2499 | * |
| | 2500 | * Context: process context with BH's disabled |
| | 2501 | */ |
| | 2502 | static int |
| | 2503 | ath_hardstart(struct sk_buff *skb, struct net_device *dev) |
| | 2504 | { |
| | 2505 | struct ath_softc *sc = dev->priv; |
| | 2506 | struct ieee80211_node *ni = NULL; |
| | 2507 | struct ath_buf *bf = NULL; |
| | 2508 | struct ieee80211_cb *cb = (struct ieee80211_cb *) skb->cb; |
| | 2509 | struct ether_header *eh; |
| | 2510 | STAILQ_HEAD(tmp_bf_head, ath_buf) bf_head; |
| | 2511 | struct ath_buf *tbf, *tempbf; |
| | 2512 | struct sk_buff *tskb; |
| | 2513 | int framecnt; |
| | 2514 | int requeue = 0; |
| | 2515 | #ifdef ATH_SUPERG_FF |
| | 2516 | int pktlen; |
| | 2517 | struct ieee80211com *ic = &sc->sc_ic; |
| | 2518 | struct ath_node *an; |
| | 2519 | struct ath_txq *txq = NULL; |
| | 2520 | int ff_flush; |
| | 2521 | struct ieee80211vap *vap; |
| | 2522 | #endif |
| | 2523 | |
| | 2524 | if ((dev->flags & IFF_RUNNING) == 0 || sc->sc_invalid) { |
| | 2525 | DPRINTF(sc, ATH_DEBUG_XMIT, |
| | 2526 | "%s: discard, invalid %d flags %x\n", |
| | 2527 | __func__, sc->sc_invalid, dev->flags); |
| | 2528 | sc->sc_stats.ast_tx_invalid++; |
| | 2529 | return -ENETDOWN; |
| | 2530 | } |
| | 2531 | |
| | 2532 | STAILQ_INIT(&bf_head); |
| | 2533 | |
| | 2534 | if (cb->flags & M_RAW) { |
| | 2535 | ATH_HARDSTART_GET_TX_BUF_WITH_LOCK; |
| | 2536 | if (bf == NULL) |
| | 2537 | goto hardstart_fail; |
| | 2538 | ath_tx_startraw(dev, bf,skb); |
| | 2539 | return NETDEV_TX_OK; |
| | 2540 | } |
| | 2541 | |
| | 2542 | eh = (struct ether_header *) skb->data; |
| | 2543 | ni = cb->ni; /* NB: always passed down by 802.11 layer */ |
| | 2544 | if (ni == NULL) { |
| | 2545 | /* NB: this happens if someone marks the underlying device up */ |
| | 2546 | DPRINTF(sc, ATH_DEBUG_XMIT, |
| | 2547 | "%s: discard, no node in cb\n", __func__); |
| | 2548 | goto hardstart_fail; |
| | 2549 | } |
| | 2550 | #ifdef ATH_SUPERG_FF |
| | 2551 | vap = ni->ni_vap; |
| | 2552 | |
| | 2553 | if (M_FLAG_GET(skb, M_UAPSD)) { |
| | 2554 | /* bypass FF handling */ |
| | 2555 | ATH_HARDSTART_GET_TX_BUF_WITH_LOCK; |
| | 2556 | if (bf == NULL) |
| | 2557 | goto hardstart_fail; |
| | 2558 | goto ff_bypass; |
| | 2559 | } |
| | 2560 | |
| | 2561 | /* |
| | 2562 | * Fast frames check. |
| | 2563 | */ |
| | 2564 | ATH_FF_MAGIC_CLR(skb); |
| | 2565 | an = ATH_NODE(ni); |
| | 2566 | |
| | 2567 | txq = sc->sc_ac2q[skb->priority]; |
| | 2568 | |
| | 2569 | if (txq->axq_depth > TAIL_DROP_COUNT) { |
| | 2570 | sc->sc_stats.ast_tx_discard++; |
| | 2571 | /* queue is full, let the kernel backlog the skb */ |
| | 2572 | requeue = 1; |
| | 2573 | goto hardstart_fail; |
| | 2574 | } |
| | 2575 | |
| | 2576 | /* NB: use this lock to protect an->an_ff_txbuf in athff_can_aggregate() |
| | 2577 | * call too. |
| | 2578 | */ |
| | 2579 | ATH_TXQ_LOCK(txq); |
| | 2580 | if (athff_can_aggregate(sc, eh, an, skb, vap->iv_fragthreshold, &ff_flush)) { |
| | 2581 | |
| | 2582 | if (an->an_tx_ffbuf[skb->priority]) { /* i.e., frame on the staging queue */ |
| | 2583 | bf = an->an_tx_ffbuf[skb->priority]; |
| | 2584 | |
| | 2585 | /* get (and remove) the frame from staging queue */ |
| | 2586 | TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist); |
| | 2587 | an->an_tx_ffbuf[skb->priority] = NULL; |
| | 2588 | |
| | 2589 | ATH_TXQ_UNLOCK(txq); |
| | 2590 | |
| | 2591 | /* |
| | 2592 | * chain skbs and add FF magic |
| | 2593 | * |
| | 2594 | * NB: the arriving skb should not be on a list (skb->list), |
| | 2595 | * so "re-using" the skb next field should be OK. |
| | 2596 | */ |
| | 2597 | bf->bf_skb->next = skb; |
| | 2598 | skb->next = NULL; |
| | 2599 | skb = bf->bf_skb; |
| | 2600 | ATH_FF_MAGIC_PUT(skb); |
| | 2601 | |
| | 2602 | /* decrement extra node reference made when an_tx_ffbuf[] was set */ |
| | 2603 | //ieee80211_free_node(ni); /* XXX where was it set ? */ |
| | 2604 | |
| | 2605 | DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF, |
| | 2606 | "%s: aggregating fast-frame\n", __func__); |
| | 2607 | } else { |
| | 2608 | /* NB: careful grabbing the TX_BUF lock since still holding the txq lock. |
| | 2609 | * this could be avoided by always obtaining the txbuf earlier, |
| | 2610 | * but the "if" portion of this "if/else" clause would then need |
| | 2611 | * to give the buffer back. |
| | 2612 | */ |
| | 2613 | ATH_HARDSTART_GET_TX_BUF_WITH_LOCK; |
| | 2614 | if (bf == NULL) { |
| | 2615 | ATH_TXQ_UNLOCK(txq); |
| | 2616 | goto hardstart_fail; |
| | 2617 | } |
| | 2618 | DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF, |
| | 2619 | "%s: adding to fast-frame stage Q\n", __func__); |
| | 2620 | |
| | 2621 | bf->bf_skb = skb; |
| | 2622 | bf->bf_node = ni; |
| | 2623 | bf->bf_queueage = txq->axq_totalqueued; |
| | 2624 | an->an_tx_ffbuf[skb->priority] = bf; |
| | 2625 | |
| | 2626 | TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist); |
| | 2627 | |
| | 2628 | ATH_TXQ_UNLOCK(txq); |
| | 2629 | |
| | 2630 | return NETDEV_TX_OK; |
| | 2631 | } |
| | 2632 | } else { |
| | 2633 | if (ff_flush) { |
| | 2634 | struct ath_buf *bf_ff = an->an_tx_ffbuf[skb->priority]; |
| | 2635 | |
| | 2636 | TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist); |
| | 2637 | an->an_tx_ffbuf[skb->priority] = NULL; |
| | 2638 | |
| | 2639 | ATH_TXQ_UNLOCK(txq); |
| | 2640 | |
| | 2641 | /* encap and xmit */ |
| | 2642 | bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt); |
| | 2643 | |
| | 2644 | if (bf_ff->bf_skb == NULL) { |
| | 2645 | DPRINTF(sc, ATH_DEBUG_XMIT, |
| | 2646 | "%s: discard, ff flush encap failure\n", |
| | 2647 | __func__); |
| | 2648 | sc->sc_stats.ast_tx_encap++; |
| | 2649 | goto ff_flushbad; |
| | 2650 | } |
| | 2651 | pktlen = bf_ff->bf_skb->len; /* NB: don't reference skb below */ |
| | 2652 | /* NB: ath_tx_start() will use ATH_TXBUF_LOCK_BH(). The _BH |
| | 2653 | * portion is not needed here since we're running at |
| | 2654 | * interrupt time, but should be harmless. |
| | 2655 | */ |
| | 2656 | if (ath_tx_start(dev, ni, bf_ff, bf_ff->bf_skb, 0)) |
| | 2657 | goto ff_flushbad; |
| | 2658 | goto ff_flushdone; |
| | 2659 | ff_flushbad: |
| | 2660 | DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF, |
| | 2661 | "%s: ff stageq flush failure\n", __func__); |
| | 2662 | ieee80211_free_node(ni); |
| | 2663 | if (bf_ff->bf_skb) { |
| | 2664 | dev_kfree_skb(bf_ff->bf_skb); |
| | 2665 | bf_ff->bf_skb = NULL; |
| | 2666 | } |
| | 2667 | bf_ff->bf_node = NULL; |
| | 2668 | |
| | 2669 | ATH_TXBUF_LOCK(sc); |
| | 2670 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list); |
| | 2671 | ATH_TXBUF_UNLOCK(sc); |
| | 2672 | goto ff_flushdone; |
| | 2673 | } |
| | 2674 | /* |
| | 2675 | * XXX: out-of-order condition only occurs for AP mode and multicast. |
| | 2676 | * But, there may be no valid way to get this condition. |
| | 2677 | */ |
| | 2678 | else if (an->an_tx_ffbuf[skb->priority]) { |
| | 2679 | DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF, |
| | 2680 | "%s: Out-Of-Order fast-frame\n", __func__); |
| | 2681 | ATH_TXQ_UNLOCK(txq); |
| | 2682 | } else |
| | 2683 | ATH_TXQ_UNLOCK(txq); |
| | 2684 | |
| | 2685 | ff_flushdone: |
| | 2686 | ATH_HARDSTART_GET_TX_BUF_WITH_LOCK; |
| | 2687 | if (bf == NULL) |
| | 2688 | goto hardstart_fail; |
| | 2689 | } |
| | 2690 | |
| | 2691 | ff_bypass: |
| | 2692 | |
| | 2693 | #else /* ATH_SUPERG_FF */ |
| | 2694 | |
| | 2695 | ATH_HARDSTART_GET_TX_BUF_WITH_LOCK; |
| | 2696 | |
| | 2697 | #endif /* ATH_SUPERG_FF */ |
| | 2698 | |
| | 2699 | /* |
| | 2700 | * Encapsulate the packet for transmission. |
| | 2701 | */ |
| | 2702 | skb = ieee80211_encap(ni, skb, &framecnt); |
| | 2703 | if (skb == NULL) { |
| | 2704 | DPRINTF(sc, ATH_DEBUG_XMIT, |
| | 2705 | "%s: discard, encapsulation failure\n", __func__); |
| | 2706 | sc->sc_stats.ast_tx_encap++; |
| | 2707 | goto hardstart_fail; |
| | 2708 | } |
| | 2709 | |
| | 2710 | if (framecnt > 1) { |
| | 2711 | int bfcnt; |
| | 2712 | |
| | 2713 | /* |
| | 2714 | ** Allocate 1 ath_buf for each frame given 1 was |
| | 2715 | ** already alloc'd |
| | 2716 | */ |
| | 2717 | ATH_TXBUF_LOCK(sc); |
| | 2718 | for (bfcnt = 1; bfcnt < framecnt; ++bfcnt) { |
| | 2719 | if ((tbf = STAILQ_FIRST(&sc->sc_txbuf)) != NULL) { |
| | 2720 | STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); |
| | 2721 | STAILQ_INSERT_TAIL(&bf_head, tbf, bf_list); |
| | 2722 | } |
| | 2723 | else |
| | 2724 | break; |
| | 2725 | |
| | 2726 | ieee80211_ref_node(ni); |
| | 2727 | } |
| | 2728 | |
| | 2729 | if (bfcnt != framecnt) { |
| | 2730 | if (!STAILQ_EMPTY(&bf_head)) { |
| | 2731 | /* |
| | 2732 | ** Failed to alloc enough ath_bufs; |
| | 2733 | ** return to sc_txbuf list |
| | 2734 | */ |
| | 2735 | STAILQ_FOREACH_SAFE(tbf, &bf_head, bf_list, tempbf) { |
| | 2736 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list); |
| | 2737 | } |
| | 2738 | } |
| | 2739 | ATH_TXBUF_UNLOCK(sc); |
| | 2740 | STAILQ_INIT(&bf_head); |
| | 2741 | goto hardstart_fail; |
| | 2742 | } |
| | 2743 | ATH_TXBUF_UNLOCK(sc); |
| | 2744 | |
| | 2745 | while ((bf = STAILQ_FIRST(&bf_head)) != NULL && skb != NULL) { |
| | 2746 | int nextfraglen = 0; |
| | 2747 | |
| | 2748 | STAILQ_REMOVE_HEAD(&bf_head, bf_list); |
| | 2749 | tskb = skb->next; |
| | 2750 | skb->next = NULL; |
| | 2751 | if (tskb) |
| | 2752 | nextfraglen = tskb->len; |
| | 2753 | |
| | 2754 | if (ath_tx_start(dev, ni, bf, skb, nextfraglen) != 0) { |
| | 2755 | STAILQ_INSERT_TAIL(&bf_head, bf, bf_list); |
| | 2756 | skb->next = tskb; |
| | 2757 | goto hardstart_fail; |
| | 2758 | } |
| | 2759 | skb = tskb; |
| | 2760 | } |
| | 2761 | } else { |
| | 2762 | if (ath_tx_start(dev, ni, bf, skb, 0) != 0) { |
| | 2763 | STAILQ_INSERT_TAIL(&bf_head, bf, bf_list); |
| | 2764 | goto hardstart_fail; |
| | 2765 | } |
| | 2766 | } |
| | 2767 | |
| | 2768 | #ifdef ATH_SUPERG_FF |
| | 2769 | /* |
| | 2770 | * flush out stale FF from staging Q for applicable operational modes. |
| | 2771 | */ |
| | 2772 | /* XXX: ADHOC mode too? */ |
| | 2773 | if (txq && ic->ic_opmode == IEEE80211_M_HOSTAP) |
| | 2774 | ath_ffstageq_flush(sc, txq, ath_ff_ageflushtestdone); |
| | 2775 | #endif |
| | 2776 | |
| | 2777 | return NETDEV_TX_OK; |
| | 2778 | |
| | 2779 | hardstart_fail: |
| | 2780 | if (!STAILQ_EMPTY(&bf_head)) { |
| | 2781 | ATH_TXBUF_LOCK(sc); |
| | 2782 | STAILQ_FOREACH_SAFE(tbf, &bf_head, bf_list, tempbf) { |
| | 2783 | tbf->bf_skb = NULL; |
| | 2784 | tbf->bf_node = NULL; |
| | 2785 | |
| | 2786 | if (ni != NULL) |
| | 2787 | ieee80211_free_node(ni); |
| | 2788 | |
| | 2789 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list); |
| | 2790 | } |
| | 2791 | ATH_TXBUF_UNLOCK(sc); |
| | 2792 | } |
| | 2793 | |
| | 2794 | /* let the kernel requeue the skb (don't free it!) */ |
| | 2795 | if (requeue) |
| | 2796 | return NETDEV_TX_BUSY; |
| | 2797 | |
| | 2798 | /* free sk_buffs */ |
| | 2799 | while (skb) { |
| | 2800 | tskb = skb->next; |
| | 2801 | skb->next = NULL; |
| | 2802 | dev_kfree_skb(skb); |
| | 2803 | skb = tskb; |
| | 2804 | } |
| | 2805 | return NETDEV_TX_OK; |
| | 2806 | } |
| | 2807 | #undef ATH_HARDSTART_GET_TX_BUF_WITH_LOCK |
| | 2808 | |
| | 2809 | /* |
| | 2810 | * Transmit a management frame. On failure we reclaim the skbuff. |
| | 2811 | * Note that management frames come directly from the 802.11 layer |
| | 2812 | * and do not honor the send queue flow control. Need to investigate |
| | 2813 | * using priority queuing so management frames can bypass data. |
| | 2814 | * |
| | 2815 | * Context: hwIRQ and softIRQ |
| | 2816 | */ |
| | 2817 | static int |
| | 2818 | ath_mgtstart(struct ieee80211com *ic, struct sk_buff *skb) |
| | 2819 | { |
| | 2820 | struct net_device *dev = ic->ic_dev; |
| | 2821 | struct ath_softc *sc = dev->priv; |
| | 2822 | struct ieee80211_node *ni = NULL; |
| | 2823 | struct ath_buf *bf = NULL; |
| | 2824 | struct ieee80211_cb *cb; |
| | 2825 | int error; |
| | 2826 | |
| | 2827 | if ((dev->flags & IFF_RUNNING) == 0 || sc->sc_invalid) { |
| | 2828 | DPRINTF(sc, ATH_DEBUG_XMIT, |
| | 2829 | "%s: discard, invalid %d flags %x\n", |
| | 2830 | __func__, sc->sc_invalid, dev->flags); |
| | 2831 | sc->sc_stats.ast_tx_invalid++; |
| | 2832 | error = -ENETDOWN; |
| | 2833 | goto bad; |
| | 2834 | } |
| | 2835 | /* |
| | 2836 | * Grab a TX buffer and associated resources. |
| | 2837 | */ |
| | 2838 | ATH_TXBUF_LOCK_IRQ(sc); |
| | 2839 | bf = STAILQ_FIRST(&sc->sc_txbuf); |
| | 2840 | if (bf != NULL) |
| | 2841 | STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); |
| | 2842 | if (STAILQ_EMPTY(&sc->sc_txbuf)) { |
| | 2843 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); |
| | 2844 | sc->sc_stats.ast_tx_qstop++; |
| | 2845 | netif_stop_queue(dev); |
| | 2846 | sc->sc_devstopped=1; |
| | 2847 | ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL); |
| | 2848 | } |
| | 2849 | ATH_TXBUF_UNLOCK_IRQ(sc); |
| | 2850 | if (bf == NULL) { |
| | 2851 | printk("ath_mgtstart: discard, no xmit buf\n"); |
| | 2852 | sc->sc_stats.ast_tx_nobufmgt++; |
| | 2853 | error = -ENOBUFS; |
| | 2854 | goto bad; |
| | 2855 | } |
| | 2856 | |
| | 2857 | /* |
| | 2858 | * NB: the referenced node pointer is in the |
| | 2859 | * control block of the sk_buff. This is |
| | 2860 | * placed there by ieee80211_mgmt_output because |
| | 2861 | * we need to hold the reference with the frame. |
| | 2862 | */ |
| | 2863 | cb = (struct ieee80211_cb *)skb->cb; |
| | 2864 | ni = cb->ni; |
| | 2865 | error = ath_tx_start(dev, ni, bf, skb, 0); |
| | 2866 | if (error == 0) { |
| | 2867 | sc->sc_stats.ast_tx_mgmt++; |
| | 2868 | return 0; |
| | 2869 | } |
| | 2870 | /* fall thru... */ |
| | 2871 | bad: |
| | 2872 | if (ni != NULL) |
| | 2873 | ieee80211_free_node(ni); |
| | 2874 | if (bf != NULL) { |
| | 2875 | bf->bf_skb = NULL; |
| | 2876 | bf->bf_node = NULL; |
| | 2877 | |
| | 2878 | ATH_TXBUF_LOCK_IRQ(sc); |
| | 2879 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
| | 2880 | ATH_TXBUF_UNLOCK_IRQ(sc); |
| | 2881 | } |
| | 2882 | dev_kfree_skb_any(skb); |
| | 2883 | skb = NULL; |
| | 2884 | return error; |
| | 2885 | } |
| | 2886 | |
| | 2887 | #ifdef AR_DEBUG |
| | 2888 | static void |
| | 2889 | ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix, |
| | 2890 | const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) |
| | 2891 | { |
| | 2892 | static const char *ciphers[] = { |
| | 2893 | "WEP", |
| | 2894 | "AES-OCB", |
| | 2895 | "AES-CCM", |
| | 2896 | "CKIP", |
| | 2897 | "TKIP", |
| | 2898 | "CLR", |
| | 2899 | }; |
| | 2900 | int i, n; |
| | 2901 | |
| | 2902 | printk("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]); |
| | 2903 | for (i = 0, n = hk->kv_len; i < n; i++) |
| | 2904 | printk("%02x", hk->kv_val[i]); |
| | 2905 | printk(" mac %s", ether_sprintf(mac)); |
| | 2906 | if (hk->kv_type == HAL_CIPHER_TKIP) { |
| | 2907 | printk(" %s ", sc->sc_splitmic ? "mic" : "rxmic"); |
| | 2908 | for (i = 0; i < sizeof(hk->kv_mic); i++) |
| | 2909 | printk("%02x", hk->kv_mic[i]); |
| | 2910 | #if HAL_ABI_VERSION > 0x06052200 |
| | 2911 | if (!sc->sc_splitmic) { |
| | 2912 | printk(" txmic "); |
| | 2913 | for (i = 0; i < sizeof(hk->kv_txmic); i++) |
| | 2914 | printk("%02x", hk->kv_txmic[i]); |
| | 2915 | } |
| | 2916 | #endif |
| | 2917 | } |
| | 2918 | printk("\n"); |
| | 2919 | } |
| | 2920 | #endif |
| | 2921 | |
| | 2922 | /* |
| | 2923 | * Set a TKIP key into the hardware. This handles the |
| | 2924 | * potential distribution of key state to multiple key |
| | 2925 | * cache slots for TKIP. |
| | 2926 | */ |
| | 2927 | static int |
| | 2928 | ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k, |
| | 2929 | HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) |
| | 2930 | { |
| | 2931 | #define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV) |
| | 2932 | static const u_int8_t zerobssid[IEEE80211_ADDR_LEN]; |
| | 2933 | struct ath_hal *ah = sc->sc_ah; |
| | 2934 | |
| | 2935 | KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP, |
| | 2936 | ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher)); |
| | 2937 | if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) { |
| | 2938 | if (sc->sc_splitmic) { |
| | 2939 | /* |
| | 2940 | * TX key goes at first index, RX key at the rx index. |
| | 2941 | * The HAL handles the MIC keys at index+64. |
| | 2942 | */ |
| | 2943 | memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic)); |
| | 2944 | KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); |
| | 2945 | if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid)) |
| | 2946 | return 0; |
| | 2947 | |
| | 2948 | memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); |
| | 2949 | KEYPRINTF(sc, k->wk_keyix+32, hk, mac); |
| | 2950 | /* XXX delete tx key on failure? */ |
| | 2951 | return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac); |
| | 2952 | } else { |
| | 2953 | /* |
| | 2954 | * Room for both TX+RX MIC keys in one key cache |
| | 2955 | * slot, just set key at the first index; the HAL |
| | 2956 | * will handle the reset. |
| | 2957 | */ |
| | 2958 | memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); |
| | 2959 | #if HAL_ABI_VERSION > 0x06052200 |
| | 2960 | memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic)); |
| | 2961 | #endif |
| | 2962 | KEYPRINTF(sc, k->wk_keyix, hk, mac); |
| | 2963 | return ath_hal_keyset(ah, k->wk_keyix, hk, mac); |
| | 2964 | } |
| | 2965 | } else if (k->wk_flags & IEEE80211_KEY_XR) { |
| | 2966 | /* |
| | 2967 | * TX/RX key goes at first index. |
| | 2968 | * The HAL handles the MIC keys are index+64. |
| | 2969 | */ |
| | 2970 | memcpy(hk->kv_mic, k->wk_flags & IEEE80211_KEY_XMIT ? |
| | 2971 | k->wk_txmic : k->wk_rxmic, sizeof(hk->kv_mic)); |
| | 2972 | KEYPRINTF(sc, k->wk_keyix, hk, mac); |
| | 2973 | return ath_hal_keyset(ah, k->wk_keyix, hk, mac); |
| | 2974 | } |
| | 2975 | return 0; |
| | 2976 | #undef IEEE80211_KEY_XR |
| | 2977 | } |
| | 2978 | |
| | 2979 | /* |
| | 2980 | * Set a net80211 key into the hardware. This handles the |
| | 2981 | * potential distribution of key state to multiple key |
| | 2982 | * cache slots for TKIP with hardware MIC support. |
| | 2983 | */ |
| | 2984 | static int |
| | 2985 | ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k, |
| | 2986 | const u_int8_t mac0[IEEE80211_ADDR_LEN], |
| | 2987 | struct ieee80211_node *bss) |
| | 2988 | { |
| | 2989 | #define N(a) ((int)(sizeof(a)/sizeof(a[0]))) |
| | 2990 | static const u_int8_t ciphermap[] = { |
| | 2991 | HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */ |
| | 2992 | HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */ |
| | 2993 | HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */ |
| | 2994 | HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */ |
| | 2995 | (u_int8_t) -1, /* 4 is not allocated */ |
| | 2996 | HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */ |
| | 2997 | HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */ |
| | 2998 | }; |
| | 2999 | struct ath_hal *ah = sc->sc_ah; |
| | 3000 | const struct ieee80211_cipher *cip = k->wk_cipher; |
| | 3001 | u_int8_t gmac[IEEE80211_ADDR_LEN]; |
| | 3002 | const u_int8_t *mac; |
| | 3003 | HAL_KEYVAL hk; |
| | 3004 | |
| | 3005 | memset(&hk, 0, sizeof(hk)); |
| | 3006 | /* |
| | 3007 | * Software crypto uses a "clear key" so non-crypto |
| | 3008 | * state kept in the key cache are maintained and |
| | 3009 | * so that rx frames have an entry to match. |
| | 3010 | */ |
| | 3011 | if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) { |
| | 3012 | KASSERT(cip->ic_cipher < N(ciphermap), |
| | 3013 | ("invalid cipher type %u", cip->ic_cipher)); |
| | 3014 | hk.kv_type = ciphermap[cip->ic_cipher]; |
| | 3015 | hk.kv_len = k->wk_keylen; |
| | 3016 | memcpy(hk.kv_val, k->wk_key, k->wk_keylen); |
| | 3017 | } else |
| | 3018 | hk.kv_type = HAL_CIPHER_CLR; |
| | 3019 | |
| | 3020 | if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) { |
| | 3021 | /* |
| | 3022 | * Group keys on hardware that supports multicast frame |
| | 3023 | * key search use a mac that is the sender's address with |
| | 3024 | * the high bit set instead of the app-specified address. |
| | 3025 | */ |
| | 3026 | IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr); |
| | 3027 | gmac[0] |= 0x80; |
| | 3028 | mac = gmac; |
| | 3029 | } else |
| | 3030 | mac = mac0; |
| | 3031 | |
| | 3032 | if (hk.kv_type == HAL_CIPHER_TKIP && |
| | 3033 | (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { |
| | 3034 | return ath_keyset_tkip(sc, k, &hk, mac); |
| | 3035 | } else { |
| | 3036 | KEYPRINTF(sc, k->wk_keyix, &hk, mac); |
| | 3037 | return ath_hal_keyset(ah, k->wk_keyix, &hk, mac); |
| | 3038 | } |
| | 3039 | #undef N |
| | 3040 | } |
| | 3041 | |
| | 3042 | /* |
| | 3043 | * Allocate tx/rx key slots for TKIP. We allocate two slots for |
| | 3044 | * each key, one for decrypt/encrypt and the other for the MIC. |
| | 3045 | */ |
| | 3046 | static u_int16_t |
| | 3047 | key_alloc_2pair(struct ath_softc *sc) |
| | 3048 | { |
| | 3049 | #define N(a) ((int)(sizeof(a)/sizeof(a[0]))) |
| | 3050 | u_int i, keyix; |
| | 3051 | |
| | 3052 | KASSERT(sc->sc_splitmic, ("key cache !split")); |
| | 3053 | /* XXX could optimize */ |
| | 3054 | for (i = 0; i < N(sc->sc_keymap) / 4; i++) { |
| | 3055 | u_int8_t b = sc->sc_keymap[i]; |
| | 3056 | if (b != 0xff) { |
| | 3057 | /* |
| | 3058 | * One or more slots in this byte are free. |
| | 3059 | */ |
| | 3060 | keyix = i * NBBY; |
| | 3061 | while (b & 1) { |
| | 3062 | again: |
| | 3063 | keyix++; |
| | 3064 | b >>= 1; |
| | 3065 | } |
| | 3066 | /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */ |
| | 3067 | if (isset(sc->sc_keymap, keyix + 32) || |
| | 3068 | isset(sc->sc_keymap, keyix + 64) || |
| | 3069 | isset(sc->sc_keymap, keyix + 32 + 64)) { |
| | 3070 | /* full pair unavailable */ |
| | 3071 | /* XXX statistic */ |
| | 3072 | if (keyix == (i + 1) * NBBY) { |
| | 3073 | /* no slots were appropriate, advance */ |
| | 3074 | continue; |
| | 3075 | } |
| | 3076 | goto again; |
| | 3077 | } |
| | 3078 | setbit(sc->sc_keymap, keyix); |
| | 3079 | setbit(sc->sc_keymap, keyix + 64); |
| | 3080 | setbit(sc->sc_keymap, keyix + 32); |
| | 3081 | setbit(sc->sc_keymap, keyix + 32 + 64); |
| | 3082 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, |
| | 3083 | "%s: key pair %u,%u %u,%u\n", |
| | 3084 | __func__, keyix, keyix + 64, |
| | 3085 | keyix + 32, keyix + 32 + 64); |
| | 3086 | return keyix; |
| | 3087 | } |
| | 3088 | } |
| | 3089 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); |
| | 3090 | return IEEE80211_KEYIX_NONE; |
| | 3091 | #undef N |
| | 3092 | } |
| | 3093 | |
| | 3094 | /* |
| | 3095 | * Allocate tx/rx key slots for TKIP. We allocate two slots for |
| | 3096 | * each key, one for decrypt/encrypt and the other for the MIC. |
| | 3097 | */ |
| | 3098 | static u_int16_t |
| | 3099 | key_alloc_pair(struct ath_softc *sc) |
| | 3100 | { |
| | 3101 | #define N(a) (sizeof(a)/sizeof(a[0])) |
| | 3102 | u_int i, keyix; |
| | 3103 | |
| | 3104 | KASSERT(!sc->sc_splitmic, ("key cache split")); |
| | 3105 | /* XXX could optimize */ |
| | 3106 | for (i = 0; i < N(sc->sc_keymap)/4; i++) { |
| | 3107 | u_int8_t b = sc->sc_keymap[i]; |
| | 3108 | if (b != 0xff) { |
| | 3109 | /* |
| | 3110 | * One or more slots in this byte are free. |
| | 3111 | */ |
| | 3112 | keyix = i*NBBY; |
| | 3113 | while (b & 1) { |
| | 3114 | again: |
| | 3115 | keyix++; |
| | 3116 | b >>= 1; |
| | 3117 | } |
| | 3118 | if (isset(sc->sc_keymap, keyix+64)) { |
| | 3119 | /* full pair unavailable */ |
| | 3120 | /* XXX statistic */ |
| | 3121 | if (keyix == (i+1)*NBBY) { |
| | 3122 | /* no slots were appropriate, advance */ |
| | 3123 | continue; |
| | 3124 | } |
| | 3125 | goto again; |
| | 3126 | } |
| | 3127 | setbit(sc->sc_keymap, keyix); |
| | 3128 | setbit(sc->sc_keymap, keyix+64); |
| | 3129 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, |
| | 3130 | "%s: key pair %u,%u\n", |
| | 3131 | __func__, keyix, keyix+64); |
| | 3132 | return keyix; |
| | 3133 | } |
| | 3134 | } |
| | 3135 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); |
| | 3136 | return IEEE80211_KEYIX_NONE; |
| | 3137 | #undef N |
| | 3138 | } |
| | 3139 | |
| | 3140 | /* |
| | 3141 | * Allocate a single key cache slot. |
| | 3142 | */ |
| | 3143 | static u_int16_t |
| | 3144 | key_alloc_single(struct ath_softc *sc) |
| | 3145 | { |
| | 3146 | #define N(a) ((int)(sizeof(a)/sizeof(a[0]))) |
| | 3147 | u_int i, keyix; |
| | 3148 | |
| | 3149 | /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */ |
| | 3150 | for (i = 0; i < N(sc->sc_keymap); i++) { |
| | 3151 | u_int8_t b = sc->sc_keymap[i]; |
| | 3152 | if (b != 0xff) { |
| | 3153 | /* |
| | 3154 | * One or more slots are free. |
| | 3155 | */ |
| | 3156 | keyix = i * NBBY; |
| | 3157 | while (b & 1) |
| | 3158 | keyix++, b >>= 1; |
| | 3159 | setbit(sc->sc_keymap, keyix); |
| | 3160 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n", |
| | 3161 | __func__, keyix); |
| | 3162 | return keyix; |
| | 3163 | } |
| | 3164 | } |
| | 3165 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__); |
| | 3166 | return IEEE80211_KEYIX_NONE; |
| | 3167 | #undef N |
| | 3168 | } |
| | 3169 | |
| | 3170 | /* |
| | 3171 | * Allocate one or more key cache slots for a unicast key. The |
| | 3172 | * key itself is needed only to identify the cipher. For hardware |
| | 3173 | * TKIP with split cipher+MIC keys we allocate two key cache slot |
| | 3174 | * pairs so that we can setup separate TX and RX MIC keys. Note |
| | 3175 | * that the MIC key for a TKIP key at slot i is assumed by the |
| | 3176 | * hardware to be at slot i+64. This limits TKIP keys to the first |
| | 3177 | * 64 entries. |
| | 3178 | */ |
| | 3179 | static int |
| | 3180 | ath_key_alloc(struct ieee80211vap *vap, const struct ieee80211_key *k) |
| | 3181 | { |
| | 3182 | struct net_device *dev = vap->iv_ic->ic_dev; |
| | 3183 | struct ath_softc *sc = dev->priv; |
| | 3184 | |
| | 3185 | /* |
| | 3186 | * Group key allocation must be handled specially for |
| | 3187 | * parts that do not support multicast key cache search |
| | 3188 | * functionality. For those parts the key id must match |
| | 3189 | * the h/w key index so lookups find the right key. On |
| | 3190 | * parts w/ the key search facility we install the sender's |
| | 3191 | * mac address (with the high bit set) and let the hardware |
| | 3192 | * find the key w/o using the key id. This is preferred as |
| | 3193 | * it permits us to support multiple users for adhoc and/or |
| | 3194 | * multi-station operation. |
| | 3195 | */ |
| | 3196 | if ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey) { |
| | 3197 | u_int keyix; |
| | 3198 | |
| | 3199 | if (!(&vap->iv_nw_keys[0] <= k && |
| | 3200 | k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { |
| | 3201 | /* should not happen */ |
| | 3202 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, |
| | 3203 | "%s: bogus group key\n", __func__); |
| | 3204 | return IEEE80211_KEYIX_NONE; |
| | 3205 | } |
| | 3206 | keyix = k - vap->iv_nw_keys; |
| | 3207 | /* |
| | 3208 | * XXX we pre-allocate the global keys so |
| | 3209 | * have no way to check if they've already been allocated. |
| | 3210 | */ |
| | 3211 | return keyix; |
| | 3212 | } |
| | 3213 | /* |
| | 3214 | * We allocate two pair for TKIP when using the h/w to do |
| | 3215 | * the MIC. For everything else, including software crypto, |
| | 3216 | * we allocate a single entry. Note that s/w crypto requires |
| | 3217 | * a pass-through slot on the 5211 and 5212. The 5210 does |
| | 3218 | * not support pass-through cache entries and we map all |
| | 3219 | * those requests to slot 0. |
| | 3220 | * |
| | 3221 | * Allocate 1 pair of keys for WEP case. Make sure the key |
| | 3222 | * is not a shared-key. |
| | 3223 | */ |
| | 3224 | if (k->wk_flags & IEEE80211_KEY_SWCRYPT) |
| | 3225 | return key_alloc_single(sc); |
| | 3226 | else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP && |
| | 3227 | (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { |
| | 3228 | if (sc->sc_splitmic) |
| | 3229 | return key_alloc_2pair(sc); |
| | 3230 | else |
| | 3231 | return key_alloc_pair(sc); |
| | 3232 | } else |
| | 3233 | return key_alloc_single(sc); |
| | 3234 | } |
| | 3235 | |
| | 3236 | /* |
| | 3237 | * Delete an entry in the key cache allocated by ath_key_alloc. |
| | 3238 | */ |
| | 3239 | static int |
| | 3240 | ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k, |
| | 3241 | struct ieee80211_node *ninfo) |
| | 3242 | { |
| | 3243 | struct net_device *dev = vap->iv_ic->ic_dev; |
| | 3244 | struct ath_softc *sc = dev->priv; |
| | 3245 | struct ath_hal *ah = sc->sc_ah; |
| | 3246 | const struct ieee80211_cipher *cip = k->wk_cipher; |
| | 3247 | struct ieee80211_node *ni; |
| | 3248 | u_int keyix = k->wk_keyix; |
| | 3249 | int rxkeyoff = 0; |
| | 3250 | |
| | 3251 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix); |
| | 3252 | |
| | 3253 | ath_hal_keyreset(ah, keyix); |
| | 3254 | /* |
| | 3255 | * Check the key->node map and flush any ref. |
| | 3256 | */ |
| | 3257 | ni = sc->sc_keyixmap[keyix]; |
| | 3258 | if (ni != NULL) { |
| | 3259 | ieee80211_free_node(ni); |
| | 3260 | sc->sc_keyixmap[keyix] = NULL; |
| | 3261 | } |
| | 3262 | /* |
| | 3263 | * Handle split tx/rx keying required for TKIP with h/w MIC. |
| | 3264 | */ |
| | 3265 | if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && |
| | 3266 | (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) { |
| | 3267 | ath_hal_keyreset(ah, keyix + 32); /* RX key */ |
| | 3268 | ni = sc->sc_keyixmap[keyix + 32]; |
| | 3269 | if (ni != NULL) { /* as above... */ |
| | 3270 | ieee80211_free_node(ni); |
| | 3271 | sc->sc_keyixmap[keyix + 32] = NULL; |
| | 3272 | } |
| | 3273 | } |
| | 3274 | |
| | 3275 | /* Remove receive key entry if one exists for static WEP case */ |
| | 3276 | if (ninfo != NULL) { |
| | 3277 | rxkeyoff = ninfo->ni_rxkeyoff; |
| | 3278 | if (rxkeyoff != 0) { |
| | 3279 | ninfo->ni_rxkeyoff = 0; |
| | 3280 | ath_hal_keyreset(ah, keyix + rxkeyoff); |
| | 3281 | ni = sc->sc_keyixmap[keyix + rxkeyoff]; |
| | 3282 | if (ni != NULL) { /* as above... */ |
| | 3283 | ieee80211_free_node(ni); |
| | 3284 | sc->sc_keyixmap[keyix + rxkeyoff] = NULL; |
| | 3285 | } |
| | 3286 | } |
| | 3287 | } |
| | 3288 | |
| | 3289 | if (keyix >= IEEE80211_WEP_NKID) { |
| | 3290 | /* |
| | 3291 | * Don't touch keymap entries for global keys so |
| | 3292 | * they are never considered for dynamic allocation. |
| | 3293 | */ |
| | 3294 | clrbit(sc->sc_keymap, keyix); |
| | 3295 | if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && |
| | 3296 | (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { |
| | 3297 | clrbit(sc->sc_keymap, keyix + 64); /* TX key MIC */ |
| | 3298 | if (sc->sc_splitmic) { |
| | 3299 | /* +32 for RX key, +32+64 for RX key MIC */ |
| | 3300 | clrbit(sc->sc_keymap, keyix+32); |
| | 3301 | clrbit(sc->sc_keymap, keyix+32+64); |
| | 3302 | } |
| | 3303 | } |
| | 3304 | |
| | 3305 | if (rxkeyoff != 0) |
| | 3306 | clrbit(sc->sc_keymap, keyix + rxkeyoff);/*RX Key */ |
| | 3307 | } |
| | 3308 | return 1; |
| | 3309 | } |
| | 3310 | |
| | 3311 | /* |
| | 3312 | * Set the key cache contents for the specified key. Key cache |
| | 3313 | * slot(s) must already have been allocated by ath_key_alloc. |
| | 3314 | */ |
| | 3315 | static int |
| | 3316 | ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, |
| | 3317 | const u_int8_t mac[IEEE80211_ADDR_LEN]) |
| | 3318 | { |
| | 3319 | struct net_device *dev = vap->iv_ic->ic_dev; |
| | 3320 | struct ath_softc *sc = dev->priv; |
| | 3321 | |
| | 3322 | return ath_keyset(sc, k, mac, vap->iv_bss); |
| | 3323 | } |
| | 3324 | |
| | 3325 | /* |
| | 3326 | * Block/unblock tx+rx processing while a key change is done. |
| | 3327 | * We assume the caller serializes key management operations |
| | 3328 | * so we only need to worry about synchronization with other |
| | 3329 | * uses that originate in the driver. |
| | 3330 | */ |
| | 3331 | static void |
| | 3332 | ath_key_update_begin(struct ieee80211vap *vap) |
| | 3333 | { |
| | 3334 | struct net_device *dev = vap->iv_ic->ic_dev; |
| | 3335 | struct ath_softc *sc = dev->priv; |
| | 3336 | |
| | 3337 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); |
| | 3338 | /* |
| | 3339 | * When called from the rx tasklet we cannot use |
| | 3340 | * tasklet_disable because it will block waiting |
| | 3341 | * for us to complete execution. |
| | 3342 | * |
| | 3343 | * XXX Using in_softirq is not right since we might |
| | 3344 | * be called from other soft irq contexts than |
| | 3345 | * ath_rx_tasklet. |
| | 3346 | */ |
| | 3347 | if (!in_softirq()) |
| | 3348 | tasklet_disable(&sc->sc_rxtq); |
| | 3349 | netif_stop_queue(dev); |
| | 3350 | } |
| | 3351 | |
| | 3352 | static void |
| | 3353 | ath_key_update_end(struct ieee80211vap *vap) |
| | 3354 | { |
| | 3355 | struct net_device *dev = vap->iv_ic->ic_dev; |
| | 3356 | struct ath_softc *sc = dev->priv; |
| | 3357 | |
| | 3358 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); |
| | 3359 | netif_start_queue(dev); |
| | 3360 | if (!in_softirq()) /* NB: see above */ |
| | 3361 | tasklet_enable(&sc->sc_rxtq); |
| | 3362 | } |
| | 3363 | |
| | 3364 | /* |
| | 3365 | * Calculate the receive filter according to the |
| | 3366 | * operating mode and state: |
| | 3367 | * |
| | 3368 | * o always accept unicast, broadcast, and multicast traffic |
| | 3369 | * o maintain current state of phy error reception (the HAL |
| | 3370 | * may enable phy error frames for noise immunity work) |
| | 3371 | * o probe request frames are accepted only when operating in |
| | 3372 | * hostap, adhoc, or monitor modes |
| | 3373 | * o enable promiscuous mode according to the interface state |
| | 3374 | * o accept beacons: |
| | 3375 | * - when operating in adhoc mode so the 802.11 layer creates |
| | 3376 | * node table entries for peers, |
| | 3377 | * - when operating in station mode for collecting rssi data when |
| | 3378 | * the station is otherwise quiet, or |
| | 3379 | * - when operating as a repeater so we see repeater-sta beacons |
| | 3380 | * - when scanning |
| | 3381 | */ |
| | 3382 | static u_int32_t |
| | 3383 | ath_calcrxfilter(struct ath_softc *sc) |
| | 3384 | { |
| | 3385 | #define RX_FILTER_PRESERVE (HAL_RX_FILTER_PHYERR | HAL_RX_FILTER_PHYRADAR) |
| | 3386 | struct ieee80211com *ic = &sc->sc_ic; |
| | 3387 | struct net_device *dev = ic->ic_dev; |
| | 3388 | struct ath_hal *ah = sc->sc_ah; |
| | 3389 | u_int32_t rfilt; |
| | 3390 | |
| | 3391 | rfilt = (ath_hal_getrxfilter(ah) & RX_FILTER_PRESERVE) | |
| | 3392 | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | |
| | 3393 | HAL_RX_FILTER_MCAST; |
| | 3394 | if (ic->ic_opmode != IEEE80211_M_STA) |
| | 3395 | rfilt |= HAL_RX_FILTER_PROBEREQ; |
| | 3396 | if (ic->ic_opmode != IEEE80211_M_HOSTAP && (dev->flags & IFF_PROMISC)) |
| | 3397 | rfilt |= HAL_RX_FILTER_PROM; |
| | 3398 | if (ic->ic_opmode == IEEE80211_M_STA || |
| | 3399 | sc->sc_opmode == HAL_M_IBSS || /* NB: AHDEMO too */ |
| | 3400 | (sc->sc_nostabeacons) || sc->sc_scanning) |
| | 3401 | rfilt |= HAL_RX_FILTER_BEACON; |
| | 3402 | if (sc->sc_nmonvaps > 0) |
| | 3403 | rfilt |= (HAL_RX_FILTER_CONTROL | HAL_RX_FILTER_BEACON | |
| | 3404 | HAL_RX_FILTER_PROBEREQ | HAL_RX_FILTER_PROM); |
| | 3405 | return rfilt; |
| | 3406 | #undef RX_FILTER_PRESERVE |
| | 3407 | } |
| | 3408 | |
| | 3409 | /* |
| | 3410 | * Merge multicast addresses from all VAPs to form the |
| | 3411 | * hardware filter. Ideally we should only inspect our |
| | 3412 | * own list and the 802.11 layer would merge for us but |
| | 3413 | * that's a bit difficult so for now we put the onus on |
| | 3414 | * the driver. |
| | 3415 | */ |
| | 3416 | static void |
| | 3417 | ath_merge_mcast(struct ath_softc *sc, u_int32_t mfilt[2]) |
| | 3418 | { |
| | 3419 | struct ieee80211com *ic = &sc->sc_ic; |
| | 3420 | struct ieee80211vap *vap; |
| | 3421 | struct dev_mc_list *mc; |
| | 3422 | u_int32_t val; |
| | 3423 | u_int8_t pos; |
| | 3424 | |
| | 3425 | mfilt[0] = mfilt[1] = 0; |
| | 3426 | /* XXX locking */ |
| | 3427 | TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { |
| | 3428 | struct net_device *dev = vap->iv_dev; |
| | 3429 | for (mc = dev->mc_list; mc; mc = mc->next) { |
| | 3430 | /* calculate XOR of eight 6-bit values */ |
| | 3431 | val = LE_READ_4(mc->dmi_addr + 0); |
| | 3432 | pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; |
| | 3433 | val = LE_READ_4(mc->dmi_addr + 3); |
| | 3434 | pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; |
| | 3435 | pos &= 0x3f; |
| | 3436 | mfilt[pos / 32] |= (1 << (pos % 32)); |
| | 3437 | } |
| | 3438 | } |
| | 3439 | } |
| | 3440 | |
| | 3441 | static void |
| | 3442 | ath_mode_init(struct net_device *dev) |
| | 3443 | { |
| | 3444 | struct ath_softc *sc = dev->priv; |
| | 3445 | struct ath_hal *ah = sc->sc_ah; |
| | 3446 | u_int32_t rfilt, mfilt[2]; |
| | 3447 | |
| | 3448 | /* configure rx filter */ |
| | 3449 | rfilt = ath_calcrxfilter(sc); |
| | 3450 | ath_hal_setrxfilter(ah, rfilt); |
| | 3451 | |
| | 3452 | /* configure bssid mask */ |
| | 3453 | if (sc->sc_hasbmask) |
| | 3454 | ath_hal_setbssidmask(ah, sc->sc_bssidmask); |
| | 3455 | |
| | 3456 | /* configure operational mode */ |
| | 3457 | ath_hal_setopmode(ah); |
| | 3458 | |
| | 3459 | /* calculate and install multicast filter */ |
| | 3460 | if ((dev->flags & IFF_ALLMULTI) == 0) |
| | 3461 | ath_merge_mcast(sc, mfilt); |
| | 3462 | else |
| | 3463 | mfilt[0] = mfilt[1] = ~0; |
| | 3464 | ath_hal_setmcastfilter(ah, mfilt[0], mfilt[1]); |
| | 3465 | DPRINTF(sc, ATH_DEBUG_STATE, |
| | 3466 | "%s: RX filter 0x%x, MC filter %08x:%08x\n", |
| | 3467 | __func__, rfilt, mfilt[0], mfilt[1]); |
| | 3468 | } |
| | 3469 | |
| | 3470 | /* |
| | 3471 | * Set the slot time based on the current setting. |
| | 3472 | */ |
| | 3473 | static void |
| | 3474 | ath_setslottime(struct ath_softc *sc) |
| | 3475 | { |
| | 3476 | struct ieee80211com *ic = &sc->sc_ic; |
| | 3477 | struct ath_hal *ah = sc->sc_ah; |
| | 3478 | |
| | 3479 | if (sc->sc_slottimeconf > 0) /* manual override */ |
| | 3480 | ath_hal_setslottime(ah, sc->sc_slottimeconf); |
| | 3481 | else if (ic->ic_flags & IEEE80211_F_SHSLOT) |
| | 3482 | ath_hal_setslottime(ah, HAL_SLOT_TIME_9); |
| | 3483 | else |
| | 3484 | ath_hal_setslottime(ah, HAL_SLOT_TIME_20); |
| | 3485 | sc->sc_updateslot = OK; |
| | 3486 | } |
| | 3487 | |
| | 3488 | /* |
| | 3489 | * Callback from the 802.11 layer to update the |
| | 3490 | * slot time based on the current setting. |
| | 3491 | */ |
| | 3492 | static void |
| | 3493 | ath_updateslot(struct net_device *dev) |
| | 3494 | { |
| | 3495 | struct ath_softc *sc = dev->priv; |
| | 3496 | struct ieee80211com *ic = &sc->sc_ic; |
| | 3497 | |
| | 3498 | /* |
| | 3499 | * When not coordinating the BSS, change the hardware |
| | 3500 | * immediately. For other operation we defer the change |
| | 3501 | * until beacon updates have propagated to the stations. |
| | 3502 | */ |
| | 3503 | if (ic->ic_opmode == IEEE80211_M_HOSTAP) |
| | 3504 | sc->sc_updateslot = UPDATE; |
| | 3505 | else if (dev->flags & IFF_RUNNING) |
| | 3506 | ath_setslottime(sc); |
| | 3507 | } |
| | 3508 | |
| | 3509 | #ifdef ATH_SUPERG_DYNTURBO |
| | 3510 | /* |
| | 3511 | * Dynamic turbo support. |
| | 3512 | * XXX much of this could be moved up to the net80211 layer. |
| | 3513 | */ |
| | 3514 | |
| | 3515 | /* |
| | 3516 | * Configure dynamic turbo state on beacon setup. |
| | 3517 | */ |
| | 3518 | static void |
| | 3519 | ath_beacon_dturbo_config(struct ieee80211vap *vap, u_int32_t intval) |
| | 3520 | { |
| | 3521 | #define IS_CAPABLE(vap) \ |
| | 3522 | (vap->iv_bss && (vap->iv_bss->ni_ath_flags & (IEEE80211_ATHC_TURBOP )) == \ |
| | 3523 | (IEEE80211_ATHC_TURBOP)) |
| | 3524 | struct ieee80211com *ic = vap->iv_ic; |
| | 3525 | struct ath_softc *sc = ic->ic_dev->priv; |
| | 3526 | |
| | 3527 | if (ic->ic_opmode == IEEE80211_M_HOSTAP && IS_CAPABLE(vap)) { |
| | 3528 | |
| | 3529 | /* Dynamic Turbo is supported on this channel. */ |
| | 3530 | sc->sc_dturbo = 1; |
| | 3531 | sc->sc_dturbo_tcount = 0; |
| | 3532 | sc->sc_dturbo_switch = 0; |
| | 3533 | sc->sc_ignore_ar = 0; |
| | 3534 | |
| | 3535 | /* Set the initial ATHC_BOOST capability. */ |
| | 3536 | if (ic->ic_bsschan->ic_flags & CHANNEL_TURBO) |
| | 3537 | ic->ic_ath_cap |= IEEE80211_ATHC_BOOST; |
| | 3538 | else |
| | 3539 | ic->ic_ath_cap &= ~IEEE80211_ATHC_BOOST; |
| | 3540 | |
| | 3541 | /* |
| | 3542 | * Calculate time & bandwidth thresholds |
| | 3543 | * |
| | 3544 | * sc_dturbo_base_tmin : ~70 seconds |
| | 3545 | * sc_dturbo_turbo_tmax : ~120 seconds |
| | 3546 | * |
| | 3547 | * NB: scale calculated values to account for staggered |
| | 3548 | * beacon handling |
| | 3549 | */ |
| | 3550 | sc->sc_dturbo_base_tmin = 70 * 1024 / ic->ic_lintval; |
| | 3551 | sc->sc_dturbo_turbo_tmax = 120 * 1024 / ic->ic_lintval; |
| | 3552 | sc->sc_dturbo_turbo_tmin = 5 * 1024 / ic->ic_lintval; |
| | 3553 | /* convert the thresholds from BW/sec to BW/beacon period */ |
| | 3554 | sc->sc_dturbo_bw_base = ATH_TURBO_DN_THRESH/(1024/ic->ic_lintval); |
| | 3555 | sc->sc_dturbo_bw_turbo = ATH_TURBO_UP_THRESH/(1024/ic->ic_lintval); |
| | 3556 | /* time in hold state in number of beacon */ |
| | 3557 | sc->sc_dturbo_hold_max = (ATH_TURBO_PERIOD_HOLD * 1024)/ic->ic_lintval; |
| | 3558 | } else { |
| | 3559 | sc->sc_dturbo = 0; |
| | 3560 | ic->ic_ath_cap &= ~IEEE80211_ATHC_BOOST; |
| | 3561 | } |
| | 3562 | #undef IS_CAPABLE |
| | 3563 | } |
| | 3564 | |
| | 3565 | /* |
| | 3566 | * Update dynamic turbo state at SWBA. We assume we care |
| | 3567 | * called only if dynamic turbo has been enabled (sc_turbo). |
| | 3568 | */ |
| | 3569 | static void |
| | 3570 | ath_beacon_dturbo_update(struct ieee80211vap *vap, int *needmark,u_int8_t dtim) |
| | 3571 | { |
| | 3572 | struct ieee80211com *ic = vap->iv_ic; |
| | 3573 | struct ath_softc *sc = ic->ic_dev->priv; |
| | 3574 | u_int32_t bss_traffic; |
| | 3575 | |
| | 3576 | /* TBD: Age out CHANNEL_INTERFERENCE */ |
| | 3577 | if (sc->sc_ignore_ar) { |
| | 3578 | /* |
| | 3579 | * Ignore AR for this beacon; a dynamic turbo |
| | 3580 | * switch just happened and the information |
| | 3581 | * is invalid. Notify AR support of the channel |
| | 3582 | * change. |
| | 3583 | */ |
| | 3584 | sc->sc_ignore_ar = 0; |
| | 3585 | ath_hal_ar_enable(sc->sc_ah); |
| | 3586 | } |
| | 3587 | sc->sc_dturbo_tcount++; |
| | 3588 | /* |
| | 3589 | * Calculate BSS traffic over the previous interval. |
| | 3590 | */ |
| | 3591 | bss_traffic = (sc->sc_devstats.tx_bytes + sc->sc_devstats.rx_bytes) |
| | 3592 | - sc->sc_dturbo_bytes; |
| | 3593 | sc->sc_dturbo_bytes = sc->sc_devstats.tx_bytes |
| | 3594 | + sc->sc_devstats.rx_bytes; |
| | 3595 | if (ic->ic_ath_cap & IEEE80211_ATHC_BOOST) { |
| | 3596 | /* |
| | 3597 | * before switching to base mode, |
| | 3598 | * make sure that the conditions( low rssi, low bw) to switch mode |
| | 3599 | * hold for some time and time in turbo exceeds minimum turbo time. |
| | 3600 | */ |
| | 3601 | |
| | 3602 | if (sc->sc_dturbo_tcount >= sc->sc_dturbo_turbo_tmin && |
| | 3603 | sc->sc_dturbo_hold ==0 && |
| | 3604 | (bss_traffic < sc->sc_dturbo_bw_base || !sc->sc_rate_recn_state)) { |
| | 3605 | sc->sc_dturbo_hold = 1; |
| | 3606 | } else { |
| | 3607 | if (sc->sc_dturbo_hold && |
| | 3608 | bss_traffic >= sc->sc_dturbo_bw_turbo && sc->sc_rate_recn_state) { |
| | 3609 | /* out of hold state */ |
| | 3610 | sc->sc_dturbo_hold = 0; |
| | 3611 | sc->sc_dturbo_hold_count = sc->sc_dturbo_hold_max; |
| | 3612 | } |
| | 3613 | } |
| | 3614 | if (sc->sc_dturbo_hold && sc->sc_dturbo_hold_count) |
| | 3615 | sc->sc_dturbo_hold_count--; |
| | 3616 | /* |
| | 3617 | * Current Mode: Turbo (i.e. BOOST) |
| | 3618 | * |
| | 3619 | * Transition to base occurs when one of the following |
| | 3620 | * is true: |
| | 3621 | * 1. its a DTIM beacon. |
| | 3622 | * 2. Maximum time in BOOST has elapsed (120 secs). |
| | 3623 | * 3. Channel is marked with interference |
| | 3624 | * 4. Average BSS traffic falls below 4Mbps |
| | 3625 | * 5. RSSI cannot support at least 18 Mbps rate |
| | 3626 | * XXX do bw checks at true beacon interval? |
| | 3627 | */ |
| | 3628 | if (dtim && |
| | 3629 | (sc->sc_dturbo_tcount >= sc->sc_dturbo_turbo_tmax || |
| | 3630 | ((vap->iv_bss->ni_ath_flags & IEEE80211_ATHC_AR) && |
| | 3631 | (sc->sc_curchan.privFlags & CHANNEL_INTERFERENCE) && |
| | 3632 | IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) || |
| | 3633 | !sc->sc_dturbo_hold_count)) { |
| | 3634 | DPRINTF(sc, ATH_DEBUG_TURBO, "%s: Leaving turbo\n", |
| | 3635 | sc->sc_dev->name); |
| | 3636 | ic->ic_ath_cap &= ~IEEE80211_ATHC_BOOST; |
| | 3637 | vap->iv_bss->ni_ath_flags &= ~IEEE80211_ATHC_BOOST; |
| | 3638 | sc->sc_dturbo_tcount = 0; |
| | 3639 | sc->sc_dturbo_switch = 1; |
| | 3640 | } |
| | 3641 | } else { |
| | 3642 | /* |
| | 3643 | * Current Mode: BASE |
| | 3644 | * |
| | 3645 | * Transition to Turbo (i.e. BOOST) when all of the |
| | 3646 | * following are true: |
| | 3647 | * |
| | 3648 | * 1. its a DTIM beacon. |
| | 3649 | * 2. Dwell time at base has exceeded minimum (70 secs) |
| | 3650 | * 3. Only DT-capable stations are associated |
| | 3651 | * 4. Channel is marked interference-free. |
| | 3652 | * 5. BSS data traffic averages at least 6Mbps |
| | 3653 | * 6. RSSI is good enough to support 36Mbps |
| | 3654 | * XXX do bw+rssi checks at true beacon interval? |
| | 3655 | */ |
| | 3656 | if (dtim && |
| | 3657 | (sc->sc_dturbo_tcount >= sc->sc_dturbo_base_tmin && |
| | 3658 | (ic->ic_dt_sta_assoc != 0 && |
| | 3659 | ic->ic_sta_assoc == ic->ic_dt_sta_assoc) && |
| | 3660 | ((vap->iv_bss->ni_ath_flags & IEEE80211_ATHC_AR) == 0 || |
| | 3661 | (sc->sc_curchan.privFlags & CHANNEL_INTERFERENCE) == 0) && |
| | 3662 | bss_traffic >= sc->sc_dturbo_bw_turbo && |
| | 3663 | sc->sc_rate_recn_state)) { |
| | 3664 | DPRINTF(sc, ATH_DEBUG_TURBO, "%s: Entering turbo\n", |
| | 3665 | sc->sc_dev->name); |
| | 3666 | ic->ic_ath_cap |= IEEE80211_ATHC_BOOST; |
| | 3667 | vap->iv_bss->ni_ath_flags |= IEEE80211_ATHC_BOOST; |
| | 3668 | sc->sc_dturbo_tcount = 0; |
| | 3669 | sc->sc_dturbo_switch = 1; |
| | 3670 | sc->sc_dturbo_hold = 0; |
| | 3671 | sc->sc_dturbo_hold_count = sc->sc_dturbo_hold_max; |
| | 3672 | } |
| | 3673 | } |
| | 3674 | } |
| | 3675 | |
| | 3676 | |
| | 3677 | static int |
| | 3678 | ath_check_beacon_done(struct ath_softc *sc) |
| | 3679 | { |
| | 3680 | struct ieee80211vap *vap=NULL; |
| | 3681 | struct ath_vap *avp; |
| | 3682 | struct ath_buf *bf; |
| | 3683 | struct sk_buff *skb; |
| | 3684 | struct ath_desc *ds; |
| | 3685 | struct ath_hal *ah = sc->sc_ah; |
| | 3686 | int slot; |
| | 3687 | |
| | 3688 | /* |
| | 3689 | * check if the last beacon went out with the mode change flag set. |
| | 3690 | */ |
| | 3691 | for (slot = 0; slot < ATH_BCBUF; slot++) { |
| | 3692 | if(sc->sc_bslot[slot]) { |
| | 3693 | vap = sc->sc_bslot[slot]; |
| | 3694 | break; |
| | 3695 | } |
| | 3696 | } |
| | 3697 | if (!vap) |
| | 3698 | return 0; |
| | 3699 | avp = ATH_VAP(vap); |
| | 3700 | bf = avp->av_bcbuf; |
| | 3701 | skb = bf->bf_skb; |
| | 3702 | ds = bf->bf_desc; |
| | 3703 | |
| | 3704 | return (ath_hal_txprocdesc(ah, ds) != HAL_EINPROGRESS); |
| | 3705 | |
| | 3706 | } |
| | 3707 | |
| | 3708 | /* |
| | 3709 | * Effect a turbo mode switch when operating in dynamic |
| | 3710 | * turbo mode. wait for beacon to go out before switching. |
| | 3711 | */ |
| | 3712 | static void |
| | 3713 | ath_turbo_switch_mode(unsigned long data) |
| | 3714 | { |
| | 3715 | struct net_device *dev = (struct net_device *)data; |
| | 3716 | struct ath_softc *sc = dev->priv; |
| | 3717 | struct ieee80211com *ic = &sc->sc_ic; |
| | 3718 | int newflags; |
| | 3719 | |
| | 3720 | KASSERT(ic->ic_opmode == IEEE80211_M_HOSTAP, |
| | 3721 | ("unexpected operating mode %d", ic->ic_opmode)); |
| | 3722 | |
| | 3723 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: dynamic turbo switch to %s mode\n", |
| | 3724 | dev->name, |
| | 3725 | ic->ic_ath_cap & IEEE80211_ATHC_BOOST ? "turbo" : "base"); |
| | 3726 | |
| | 3727 | if (!ath_check_beacon_done(sc)) { |
| | 3728 | /* |
| | 3729 | * beacon did not go out. reschedule tasklet. |
| | 3730 | */ |
| | 3731 | mod_timer(&sc->sc_dturbo_switch_mode, jiffies + msecs_to_jiffies(2)); |
| | 3732 | return; |
| | 3733 | } |
| | 3734 | |
| | 3735 | /* TBD: DTIM adjustments, delay CAB queue tx until after transmit */ |
| | 3736 | newflags = ic->ic_bsschan->ic_flags; |
| | 3737 | if (ic->ic_ath_cap & IEEE80211_ATHC_BOOST) { |
| | 3738 | if (IEEE80211_IS_CHAN_2GHZ(ic->ic_bsschan)) { |
| | 3739 | /* |
| | 3740 | * Ignore AR next beacon. the AR detection |
| | 3741 | * code detects the traffic in normal channel |
| | 3742 | * from stations during transition delays |
| | 3743 | * between AP and station. |
| | 3744 | */ |
| | 3745 | sc->sc_ignore_ar = 1; |
| | 3746 | ath_hal_ar_disable(sc->sc_ah); |
| | 3747 | } |
| | 3748 | newflags |= IEEE80211_CHAN_TURBO; |
| | 3749 | } else |
| | 3750 | newflags &= ~IEEE80211_CHAN_TURBO; |
| | 3751 | ieee80211_dturbo_switch(ic, newflags); |
| | 3752 | /* XXX ieee80211_reset_erp? */ |
| | 3753 | } |
| | 3754 | #endif /* ATH_SUPERG_DYNTURBO */ |
| | 3755 | |
| | 3756 | /* |
| | 3757 | * Setup a h/w transmit queue for beacons. |
| | 3758 | */ |
| | 3759 | static int |
| | 3760 | ath_beaconq_setup(struct ath_hal *ah) |
| | 3761 | { |
| | 3762 | HAL_TXQ_INFO qi; |
| | 3763 | |
| | 3764 | memset(&qi, 0, sizeof(qi)); |
| | 3765 | qi.tqi_aifs = 1; |
| | 3766 | qi.tqi_cwmin = 0; |
| | 3767 | qi.tqi_cwmax = 0; |
| | 3768 | #ifdef ATH_SUPERG_DYNTURBO |
| | 3769 | qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; |
| | 3770 | #endif |
| | 3771 | /* NB: don't enable any interrupts */ |
| | 3772 | return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); |
| | 3773 | } |
| | 3774 | |
| | 3775 | /* |
| | 3776 | * Configure IFS parameter for the beacon queue. |
| | 3777 | */ |
| | 3778 | static int |
| | 3779 | ath_beaconq_config(struct ath_softc *sc) |
| | 3780 | { |
| | 3781 | #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) |
| | 3782 | struct ieee80211com *ic = &sc->sc_ic; |
| | 3783 | struct ath_hal *ah = sc->sc_ah; |
| | 3784 | HAL_TXQ_INFO qi; |
| | 3785 | |
| | 3786 | ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); |
| | 3787 | if (ic->ic_opmode == IEEE80211_M_HOSTAP) { |
| | 3788 | /* |
| | 3789 | * Always burst out beacon and CAB traffic. |
| | 3790 | */ |
| | 3791 | qi.tqi_aifs = 1; |
| | 3792 | qi.tqi_cwmin = 0; |
| | 3793 | qi.tqi_cwmax = 0; |
| | 3794 | } else { |
| | 3795 | struct wmeParams *wmep = |
| | 3796 | &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; |
| | 3797 | /* |
| | 3798 | * Adhoc mode; important thing is to use 2x cwmin. |
| | 3799 | */ |
| | 3800 | qi.tqi_aifs = wmep->wmep_aifsn; |
| | 3801 | qi.tqi_cwmin = 2 * ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); |
| | 3802 | qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); |
| | 3803 | } |
| | 3804 | |
| | 3805 | if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { |
| | 3806 | printk("%s: unable to update h/w beacon queue parameters\n", |
| | 3807 | sc->sc_dev->name); |
| | 3808 | return 0; |
| | 3809 | } else { |
| | 3810 | ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ |
| | 3811 | return 1; |
| | 3812 | } |
| | 3813 | #undef ATH_EXPONENT_TO_VALUE |
| | 3814 | } |
| | 3815 | |
| | 3816 | /* |
| | 3817 | * Allocate and setup an initial beacon frame. |
| | 3818 | * |
| | 3819 | * Context: softIRQ |
| | 3820 | */ |
| | 3821 | static int |
| | 3822 | ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) |
| | 3823 | { |
| | 3824 | struct ath_vap *avp = ATH_VAP(ni->ni_vap); |
| | 3825 | struct ieee80211_frame *wh; |
| | 3826 | struct ath_buf *bf; |
| | 3827 | struct sk_buff *skb; |
| | 3828 | |
| | 3829 | /* |
| | 3830 | * release the previous beacon's skb if it already exists. |
| | 3831 | */ |
| | 3832 | bf = avp->av_bcbuf; |
| | 3833 | if (bf->bf_skb != NULL) { |
| | 3834 | bus_unmap_single(sc->sc_bdev, |
| | 3835 | bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE); |
| | 3836 | dev_kfree_skb(bf->bf_skb); |
| | 3837 | bf->bf_skb = NULL; |
| | 3838 | } |
| | 3839 | if (bf->bf_node != NULL) { |
| | 3840 | ieee80211_free_node(bf->bf_node); |
| | 3841 | bf->bf_node = NULL; |
| | 3842 | } |
| | 3843 | |
| | 3844 | /* |
| | 3845 | * NB: the beacon data buffer must be 32-bit aligned; |
| | 3846 | * we assume the mbuf routines will return us something |
| | 3847 | * with this alignment (perhaps should assert). |
| | 3848 | */ |
| | 3849 | skb = ieee80211_beacon_alloc(ni, &avp->av_boff); |
| | 3850 | if (skb == NULL) { |
| | 3851 | DPRINTF(sc, ATH_DEBUG_BEACON, "%s: cannot get sk_buff\n", |
| | 3852 | __func__); |
| | 3853 | sc->sc_stats.ast_be_nobuf++; |
| | 3854 | return -ENOMEM; |
| | 3855 | } |
| | 3856 | |
| | 3857 | /* |
| | 3858 | * Calculate a TSF adjustment factor required for |
| | 3859 | * staggered beacons. Note that we assume the format |
| | 3860 | * of the beacon frame leaves the tstamp field immediately |
| | 3861 | * following the header. |
| | 3862 | */ |
| | 3863 | if (sc->sc_stagbeacons && avp->av_bslot > 0) { |
| | 3864 | uint64_t tuadjust; |
| | 3865 | __le64 tsfadjust; |
| | 3866 | /* |
| | 3867 | * The beacon interval is in TU's; the TSF in usecs. |
| | 3868 | * We figure out how many TU's to add to align the |
| | 3869 | * timestamp then convert to TSF units and handle |
| | 3870 | * byte swapping before writing it in the frame. |
| | 3871 | * The hardware will then add this each time a beacon |
| | 3872 | * frame is sent. Note that we align VAPs 1..N |
| | 3873 | * and leave VAP 0 untouched. This means VAP 0 |
| | 3874 | * has a timestamp in one beacon interval while the |
| | 3875 | * others get a timestamp aligned to the next interval. |
| | 3876 | */ |
| | 3877 | tuadjust = (ni->ni_intval * (ATH_BCBUF - avp->av_bslot)) / ATH_BCBUF; |
| | 3878 | tsfadjust = cpu_to_le64(tuadjust << 10); /* TU->TSF */ |
| | 3879 | |
| | 3880 | DPRINTF(sc, ATH_DEBUG_BEACON, |
| | 3881 | "%s: %s beacons, bslot %d intval %u tsfadjust(Kus) %llu\n", |
| | 3882 | __func__, sc->sc_stagbeacons ? "stagger" : "burst", |
| | 3883 | avp->av_bslot, ni->ni_intval, (long long) tuadjust); |
| | 3884 | |
| | 3885 | wh = (struct ieee80211_frame *) skb->data; |
| | 3886 | memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust)); |
| | 3887 | } |
| | 3888 | |
| | 3889 | bf->bf_node = ieee80211_ref_node(ni); |
| | 3890 | bf->bf_skbaddr = bus_map_single(sc->sc_bdev, |
| | 3891 | skb->data, skb->len, BUS_DMA_TODEVICE); |
| | 3892 | bf->bf_skb = skb; |
| | 3893 | |
| | 3894 | return 0; |
| | 3895 | } |
| | 3896 | |
| | 3897 | /* |
| | 3898 | * Setup the beacon frame for transmit. |
| | 3899 | */ |
| | 3900 | static void |
| | 3901 | ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) |
| | 3902 | { |
| | 3903 | #define USE_SHPREAMBLE(_ic) \ |
| | 3904 | (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ |
| | 3905 | == IEEE80211_F_SHPREAMBLE) |
| | 3906 | struct ieee80211_node *ni = bf->bf_node; |
| | 3907 | struct ieee80211com *ic = ni->ni_ic; |
| | 3908 | struct sk_buff *skb = bf->bf_skb; |
| | 3909 | struct ath_hal *ah = sc->sc_ah; |
| | 3910 | struct ath_desc *ds; |
| | 3911 | int flags; |
| | 3912 | int antenna = sc->sc_txantenna; |
| | 3913 | const HAL_RATE_TABLE *rt; |
| | 3914 | u_int8_t rix, rate; |
| | 3915 | int ctsrate = 0; |
| | 3916 | int ctsduration = 0; |
| | 3917 | |
| | 3918 | DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n", |
| | 3919 | __func__, skb, skb->len); |
| | 3920 | |
| | 3921 | /* setup descriptors */ |
| | 3922 | ds = bf->bf_desc; |
| | 3923 | |
| | 3924 | flags = HAL_TXDESC_NOACK; |
| | 3925 | #ifdef ATH_SUPERG_DYNTURBO |
| | 3926 | if (sc->sc_dturbo_switch) |
| | 3927 | flags |= HAL_TXDESC_INTREQ; |
| | 3928 | #endif |
| | 3929 | |
| | 3930 | if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { |
| | 3931 | ds->ds_link = bf->bf_daddr; /* self-linked */ |
| | 3932 | flags |= HAL_TXDESC_VEOL; |
| | 3933 | /* |
| | 3934 | * Let hardware handle antenna switching if txantenna is not set |
| | 3935 | */ |
| | 3936 | } else { |
| | 3937 | ds->ds_link = 0; |
| | 3938 | /* |
| | 3939 | * Switch antenna every beacon if txantenna is not set |
| | 3940 | * Should only switch every beacon period, not for every |
| | 3941 | * SWBA's |
| | 3942 | * XXX assumes two antenna |
| | 3943 | */ |
| | 3944 | if (antenna == 0) { |
| | 3945 | if (sc->sc_stagbeacons) |
| | 3946 | antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 1 ? 2 : 1); |
| | 3947 | else |
| | 3948 | antenna = (sc->sc_stats.ast_be_xmit & 1 ? 2 : 1); |
| | 3949 | } |
| | 3950 | } |
| | 3951 | |
| | 3952 | ds->ds_data = bf->bf_skbaddr; |
| | 3953 | /* |
| | 3954 | * Calculate rate code. |
| | 3955 | * XXX everything at min xmit rate |
| | 3956 | */ |
| | 3957 | rix = sc->sc_minrateix; |
| | 3958 | rt = sc->sc_currates; |
| | 3959 | rate = rt->info[rix].rateCode; |
| | 3960 | if (USE_SHPREAMBLE(ic)) |
| | 3961 | rate |= rt->info[rix].shortPreamble; |
| | 3962 | #ifdef ATH_SUPERG_XR |
| | 3963 | if (bf->bf_node->ni_vap->iv_flags & IEEE80211_F_XR) { |
| | 3964 | u_int8_t cix; |
| | 3965 | int pktlen; |
| | 3966 | pktlen = skb->len + IEEE80211_CRC_LEN; |
| | 3967 | cix = rt->info[sc->sc_protrix].controlRate; |
| | 3968 | /* for XR VAP use different RTSCTS rates and calculate duration */ |
| | 3969 | ctsrate = rt->info[cix].rateCode; |
| | 3970 | if (USE_SHPREAMBLE(ic)) |
| | 3971 | ctsrate |= rt->info[cix].shortPreamble; |
| | 3972 | flags |= HAL_TXDESC_CTSENA; |
| | 3973 | rt = sc->sc_xr_rates; |
| | 3974 | ctsduration = ath_hal_computetxtime(ah,rt, pktlen, |
| | 3975 | IEEE80211_XR_DEFAULT_RATE_INDEX, AH_FALSE); |
| | 3976 | rate = rt->info[IEEE80211_XR_DEFAULT_RATE_INDEX].rateCode; |
| | 3977 | } |
| | 3978 | #endif |
| | 3979 | ath_hal_setuptxdesc(ah, ds |
| | 3980 | , skb->len + IEEE80211_CRC_LEN /* frame length */ |
| | 3981 | , sizeof(struct ieee80211_frame) /* header length */ |
| | 3982 | , HAL_PKT_TYPE_BEACON /* Atheros packet type */ |
| | 3983 | , ni->ni_txpower /* txpower XXX */ |
| | 3984 | , rate, 1 /* series 0 rate/tries */ |
| | 3985 | , HAL_TXKEYIX_INVALID /* no encryption */ |
| | 3986 | , antenna /* antenna mode */ |
| | 3987 | , flags /* no ack, veol for beacons */ |
| | 3988 | , ctsrate /* rts/cts rate */ |
| | 3989 | , ctsduration /* rts/cts duration */ |
| | 3990 | , 0 /* comp icv len */ |
| | 3991 | , 0 /* comp iv len */ |
| | 3992 | , ATH_COMP_PROC_NO_COMP_NO_CCS /* comp scheme */ |
| | 3993 | ); |
| | 3994 | |
| | 3995 | /* NB: beacon's BufLen must be a multiple of 4 bytes */ |
| | 3996 | ath_hal_filltxdesc(ah, ds |
| | 3997 | , roundup(skb->len, 4) /* buffer length */ |
| | 3998 | , AH_TRUE /* first segment */ |
| | 3999 | , AH_TRUE /* last segment */ |
| | 4000 | , ds /* first descriptor */ |
| | 4001 | ); |
| | 4002 | |
| | 4003 | /* NB: The desc swap function becomes void, |
| | 4004 | * if descriptor swapping is not enabled |
| | 4005 | */ |
| | 4006 | ath_desc_swap(ds); |
| | 4007 | #undef USE_SHPREAMBLE |
| | 4008 | } |
| | 4009 | |
| | 4010 | /* |
| | 4011 | * Generate beacon frame and queue cab data for a VAP. |
| | 4012 | */ |
| | 4013 | static struct ath_buf * |
| | 4014 | ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap, int *needmark) |
| | 4015 | { |
| | 4016 | struct ath_hal *ah = sc->sc_ah; |
| | 4017 | struct ath_buf *bf; |
| | 4018 | struct ieee80211_node *ni; |
| | 4019 | struct ath_vap *avp; |
| | 4020 | struct sk_buff *skb; |
| | 4021 | int ncabq; |
| | 4022 | unsigned int curlen; |
| | 4023 | |
| | 4024 | if (vap->iv_state != IEEE80211_S_RUN) { |
| | 4025 | DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: skip VAP in %s state\n", |
| | 4026 | __func__, ieee80211_state_name[vap->iv_state]); |
| | 4027 | return NULL; |
| | 4028 | } |
| | 4029 | #ifdef ATH_SUPERG_XR |
| | 4030 | if (vap->iv_flags & IEEE80211_F_XR) { |
| | 4031 | vap->iv_xrbcnwait++; |
| | 4032 | /* wait for XR_BEACON_FACTOR times before sending the beacon */ |
| | 4033 | if (vap->iv_xrbcnwait < IEEE80211_XR_BEACON_FACTOR) |
| | 4034 | return NULL; |
| | 4035 | vap->iv_xrbcnwait = 0; |
| | 4036 | } |
| | 4037 | #endif |
| | 4038 | avp = ATH_VAP(vap); |
| | 4039 | if (avp->av_bcbuf == NULL) { |
| | 4040 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: avp=%p av_bcbuf=%p\n", |
| | 4041 | __func__, avp, avp->av_bcbuf); |
| | 4042 | return NULL; |
| | 4043 | } |
| | 4044 | bf = avp->av_bcbuf; |
| | 4045 | ni = bf->bf_node; |
| | 4046 | |
| | 4047 | #ifdef ATH_SUPERG_DYNTURBO |
| | 4048 | /* |
| | 4049 | * If we are using dynamic turbo, update the |
| | 4050 | * capability info and arrange for a mode change |
| | 4051 | * if needed. |
| | 4052 | */ |
| | 4053 | if (sc->sc_dturbo) { |
| | 4054 | u_int8_t dtim; |
| | 4055 | dtim = ((avp->av_boff.bo_tim[2] == 1) || |
| | 4056 | (avp->av_boff.bo_tim[3] == 1)); |
| | 4057 | ath_beacon_dturbo_update(vap, needmark, dtim); |
| | 4058 | } |
| | 4059 | #endif |
| | 4060 | /* |
| | 4061 | * Update dynamic beacon contents. If this returns |
| | 4062 | * non-zero then we need to remap the memory because |
| | 4063 | * the beacon frame changed size (probably because |
| | 4064 | * of the TIM bitmap). |
| | 4065 | */ |
| | 4066 | skb = bf->bf_skb; |
| | 4067 | curlen = skb->len; |
| | 4068 | ncabq = avp->av_mcastq.axq_depth; |
| | 4069 | if (ieee80211_beacon_update(ni, &avp->av_boff, skb, ncabq)) { |
| | 4070 | bus_unmap_single(sc->sc_bdev, |
| | 4071 | bf->bf_skbaddr, curlen, BUS_DMA_TODEVICE); |
| | 4072 | bf->bf_skbaddr = bus_map_single(sc->sc_bdev, |
| | 4073 | skb->data, skb->len, BUS_DMA_TODEVICE); |
| | 4074 | } |
| | 4075 | |
| | 4076 | /* |
| | 4077 | * if the CABQ traffic from previous DTIM is pending and the current |
| | 4078 | * beacon is also a DTIM. |
| | 4079 | * 1) if there is only one VAP let the cab traffic continue. |
| | 4080 | * 2) if there are more than one VAP and we are using staggered |
| | 4081 | * beacons, then drain the cabq by dropping all the frames in |
| | 4082 | * the cabq so that the current VAP's cab traffic can be scheduled. |
| | 4083 | * XXX: Need to handle the last MORE_DATA bit here. |
| | 4084 | */ |
| | 4085 | if (ncabq && (avp->av_boff.bo_tim[4] & 1) && sc->sc_cabq->axq_depth) { |
| | 4086 | if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) { |
| | 4087 | ath_tx_draintxq(sc, sc->sc_cabq); |
| | 4088 | DPRINTF(sc, ATH_DEBUG_BEACON, |
| | 4089 | "%s: flush previous cabq traffic\n", __func__); |
| | 4090 | } |
| | 4091 | } |
| | 4092 | |
| | 4093 | /* |
| | 4094 | * Construct tx descriptor. |
| | 4095 | */ |
| | 4096 | ath_beacon_setup(sc, bf); |
| | 4097 | |
| | 4098 | bus_dma_sync_single(sc->sc_bdev, |
| | 4099 | bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE); |
| | 4100 | |
| | 4101 | /* |
| | 4102 | * Enable the CAB queue before the beacon queue to |
| | 4103 | * ensure cab frames are triggered by this beacon. |
| | 4104 | */ |
| | 4105 | if (avp->av_boff.bo_tim[4] & 1) { /* NB: only at DTIM */ |
| | 4106 | struct ath_txq *cabq = sc->sc_cabq; |
| | 4107 | struct ath_buf *bfmcast; |
| | 4108 | /* |
| | 4109 | * Move everything from the VAP's mcast queue |
| | 4110 | * to the hardware cab queue. |
| | 4111 | */ |
| | 4112 | ATH_TXQ_LOCK(&avp->av_mcastq); |
| | 4113 | ATH_TXQ_LOCK(cabq); |
| | 4114 | bfmcast = STAILQ_FIRST(&avp->av_mcastq.axq_q); |
| | 4115 | /* link the descriptors */ |
| | 4116 | if (cabq->axq_link == NULL) |
| | 4117 | ath_hal_puttxbuf(ah, cabq->axq_qnum, bfmcast->bf_daddr); |
| | 4118 | else { |
| | 4119 | #ifdef AH_NEED_DESC_SWAP |
| | 4120 | *cabq->axq_link = cpu_to_le32(bfmcast->bf_daddr); |
| | 4121 | #else |
| | 4122 | *cabq->axq_link = bfmcast->bf_daddr; |
| | 4123 | #endif |
| | 4124 | } |
| | 4125 | |
| | 4126 | /* Set the MORE_DATA bit for each packet except the last one */ |
| | 4127 | STAILQ_FOREACH(bfmcast, &avp->av_mcastq.axq_q, bf_list) { |
| | 4128 | if (bfmcast != STAILQ_LAST(&avp->av_mcastq.axq_q, ath_buf, bf_list)) |
| | 4129 | ((struct ieee80211_frame *)bfmcast->bf_skb->data)->i_fc[1] |= IEEE80211_FC1_MORE_DATA; |
| | 4130 | } |
| | 4131 | |
| | 4132 | /* append the private VAP mcast list to the cabq */ |
| | 4133 | ATH_TXQ_MOVE_MCASTQ(&avp->av_mcastq, cabq); |
| | 4134 | /* NB: gated by beacon so safe to start here */ |
| | 4135 | ath_hal_txstart(ah, cabq->axq_qnum); |
| | 4136 | ATH_TXQ_UNLOCK(cabq); |
| | 4137 | ATH_TXQ_UNLOCK(&avp->av_mcastq); |
| | 4138 | } |
| | 4139 | |
| | 4140 | return bf; |
| | 4141 | } |
| | 4142 | |
| | 4143 | /* |
| | 4144 | * Transmit one or more beacon frames at SWBA. Dynamic |
| | 4145 | * updates to the frame contents are done as needed and |
| | 4146 | * the slot time is also adjusted based on current state. |
| | 4147 | */ |
| | 4148 | static void |
| | 4149 | ath_beacon_send(struct ath_softc *sc, int *needmark) |
| | 4150 | { |
| | 4151 | #define TSF_TO_TU(_h,_l) \ |
| | 4152 | ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) |
| | 4153 | struct ath_hal *ah = sc->sc_ah; |
| | 4154 | struct ieee80211vap *vap; |
| | 4155 | struct ath_buf *bf; |
| | 4156 | int slot; |
| | 4157 | u_int32_t bfaddr; |
| | 4158 | |
| | 4159 | /* |
| | 4160 | * Check if the previous beacon has gone out. If |
| | 4161 | * not don't try to post another, skip this period |
| | 4162 | * and wait for the next. Missed beacons indicate |
| | 4163 | * a problem and should not occur. If we miss too |
| | 4164 | * many consecutive beacons reset the device. |
| | 4165 | */ |
| | 4166 | if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { |
| | 4167 | sc->sc_bmisscount++; |
| | 4168 | /* XXX: 802.11h needs the chanchange IE countdown decremented. |
| | 4169 | * We should consider adding a net80211 call to indicate |
| | 4170 | * a beacon miss so appropriate action could be taken |
| | 4171 | * (in that layer). |
| | 4172 | */ |
| | 4173 | DPRINTF(sc, ATH_DEBUG_BEACON_PROC, |
| | 4174 | "%s: missed %u consecutive beacons\n", |
| | 4175 | __func__, sc->sc_bmisscount); |
| | 4176 | if (sc->sc_bmisscount > BSTUCK_THRESH) |
| | 4177 | ATH_SCHEDULE_TQUEUE(&sc->sc_bstucktq, needmark); |
| | 4178 | return; |
| | 4179 | } |
| | 4180 | if (sc->sc_bmisscount != 0) { |
| | 4181 | DPRINTF(sc, ATH_DEBUG_BEACON_PROC, |
| | 4182 | "%s: resume beacon xmit after %u misses\n", |
| | 4183 | __func__, sc->sc_bmisscount); |
| | 4184 | sc->sc_bmisscount = 0; |
| | 4185 | } |
| | 4186 | |
| | 4187 | /* |
| | 4188 | * Generate beacon frames. If we are sending frames |
| | 4189 | * staggered then calculate the slot for this frame based |
| | 4190 | * on the tsf to safeguard against missing an swba. |
| | 4191 | * Otherwise we are bursting all frames together and need |
| | 4192 | * to generate a frame for each VAP that is up and running. |
| | 4193 | */ |
| | 4194 | if (sc->sc_stagbeacons) { /* staggered beacons */ |
| | 4195 | struct ieee80211com *ic = &sc->sc_ic; |
| | 4196 | u_int64_t tsf; |
| | 4197 | u_int32_t tsftu; |
| | 4198 | |
| | 4199 | tsf = ath_hal_gettsf64(ah); |
| | 4200 | tsftu = TSF_TO_TU(tsf >> 32, tsf); |
| | 4201 | slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval; |
| | 4202 | vap = sc->sc_bslot[(slot + 1) % ATH_BCBUF]; |
| | 4203 | DPRINTF(sc, ATH_DEBUG_BEACON_PROC, |
| | 4204 | "%s: slot %d [tsf %llu tsftu %u intval %u] vap %p\n", |
| | 4205 | __func__, slot, (long long) tsf, tsftu, ic->ic_lintval, vap); |
| | 4206 | bfaddr = 0; |
| | 4207 | if (vap != NULL) { |
| | 4208 | bf = ath_beacon_generate(sc, vap, needmark); |
| | 4209 | if (bf != NULL) |
| | 4210 | bfaddr = bf->bf_daddr; |
| | 4211 | } |
| | 4212 | } else { /* burst'd beacons */ |
| | 4213 | u_int32_t *bflink; |
| | 4214 | |
| | 4215 | bflink = &bfaddr; |
| | 4216 | /* XXX rotate/randomize order? */ |
| | 4217 | for (slot = 0; slot < ATH_BCBUF; slot++) { |
| | 4218 | vap = sc->sc_bslot[slot]; |
| | 4219 | if (vap != NULL) { |
| | 4220 | bf = ath_beacon_generate(sc, vap, needmark); |
| | 4221 | if (bf != NULL) { |
| | 4222 | #ifdef AH_NEED_DESC_SWAP |
| | 4223 | if (bflink != &bfaddr) |
| | 4224 | *bflink = cpu_to_le32(bf->bf_daddr); |
| | 4225 | else |
| | 4226 | *bflink = bf->bf_daddr; |
| | 4227 | #else |
| | 4228 | *bflink = bf->bf_daddr; |
| | 4229 | #endif |
| | 4230 | bflink = &bf->bf_desc->ds_link; |
| | 4231 | } |
| | 4232 | } |
| | 4233 | } |
| | 4234 | *bflink = 0; /* link of last frame */ |
| | 4235 | } |
| | 4236 | |
| | 4237 | /* |
| | 4238 | * Handle slot time change when a non-ERP station joins/leaves |
| | 4239 | * an 11g network. The 802.11 layer notifies us via callback, |
| | 4240 | * we mark updateslot, then wait one beacon before effecting |
| | 4241 | * the change. This gives associated stations at least one |
| | 4242 | * beacon interval to note the state change. |
| | 4243 | * |
| | 4244 | * NB: The slot time change state machine is clocked according |
| | 4245 | * to whether we are bursting or staggering beacons. We |
| | 4246 | * recognize the request to update and record the current |
| | 4247 | * slot then don't transition until that slot is reached |
| | 4248 | * again. If we miss a beacon for that slot then we'll be |
| | 4249 | * slow to transition but we'll be sure at least one beacon |
| | 4250 | * interval has passed. When bursting slot is always left |
| | 4251 | * set to ATH_BCBUF so this check is a no-op. |
| | 4252 | */ |
| | 4253 | /* XXX locking */ |
| | 4254 | if (sc->sc_updateslot == UPDATE) { |
| | 4255 | sc->sc_updateslot = COMMIT; /* commit next beacon */ |
| | 4256 | sc->sc_slotupdate = slot; |
| | 4257 | } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) |
| | 4258 | ath_setslottime(sc); /* commit change to hardware */ |
| | 4259 | |
| | 4260 | if ((!sc->sc_stagbeacons || slot == 0) && (!sc->sc_diversity)) { |
| | 4261 | int otherant; |
| | 4262 | /* |
| | 4263 | * Check recent per-antenna transmit statistics and flip |
| | 4264 | * the default rx antenna if noticeably more frames went out |
| | 4265 | * on the non-default antenna. Only do this if rx diversity |
| | 4266 | * is off. |
| | 4267 | * XXX assumes 2 antennae |
| | 4268 | */ |
| | 4269 | otherant = sc->sc_defant & 1 ? 2 : 1; |
| | 4270 | if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + ATH_ANTENNA_DIFF) { |
| | 4271 | DPRINTF(sc, ATH_DEBUG_BEACON, |
| | 4272 | "%s: flip defant to %u, %u > %u\n", |
| | 4273 | __func__, otherant, sc->sc_ant_tx[otherant], |
| | 4274 | sc->sc_ant_tx[sc->sc_defant]); |
| | 4275 | ath_setdefantenna(sc, otherant); |
| | 4276 | } |
| | 4277 | sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; |
| | 4278 | } |
| | 4279 | |
| | 4280 | if (bfaddr != 0) { |
| | 4281 | /* |
| | 4282 | * Stop any current DMA and put the new frame(s) on the queue. |
| | 4283 | * This should never fail since we check above that no frames |
| | 4284 | * are still pending on the queue. |
| | 4285 | */ |
| | 4286 | if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { |
| | 4287 | DPRINTF(sc, ATH_DEBUG_ANY, |
| | 4288 | "%s: beacon queue %u did not stop?\n", |
| | 4289 | __func__, sc->sc_bhalq); |
| | 4290 | /* NB: the HAL still stops DMA, so proceed */ |
| | 4291 | } |
| | 4292 | /* NB: cabq traffic should already be queued and primed */ |
| | 4293 | ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr); |
| | 4294 | ath_hal_txstart(ah, sc->sc_bhalq); |
| | 4295 | |
| | 4296 | sc->sc_stats.ast_be_xmit++; /* XXX per-VAP? */ |
| | 4297 | } |
| | 4298 | #undef TSF_TO_TU |
| | 4299 | } |
| | 4300 | |
| | 4301 | /* |
| | 4302 | * Reset the hardware after detecting beacons have stopped. |
| | 4303 | */ |
| | 4304 | static void |
| | 4305 | ath_bstuck_tasklet(TQUEUE_ARG data) |
| | 4306 | { |
| | 4307 | struct net_device *dev = (struct net_device *)data; |
| | 4308 | struct ath_softc *sc = dev->priv; |
| | 4309 | /* |
| | 4310 | * XXX:if the bmisscount is cleared while the |
| | 4311 | * tasklet execution is pending, the following |
| | 4312 | * check will be true, in which case return |
| | 4313 | * without resetting the driver. |
| | 4314 | */ |
| | 4315 | if (sc->sc_bmisscount <= BSTUCK_THRESH) |
| | 4316 | return; |
| | 4317 | printk("%s: stuck beacon; resetting (bmiss count %u)\n", |
| | 4318 | dev->name, sc->sc_bmisscount); |
| | 4319 | ath_reset(dev); |
| | 4320 | } |
| | 4321 | |
| | 4322 | /* |
| | 4323 | * Startup beacon transmission for adhoc mode when |
| | 4324 | * they are sent entirely by the hardware using the |
| | 4325 | * self-linked descriptor + veol trick. |
| | 4326 | */ |
| | 4327 | static void |
| | 4328 | ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap) |
| | 4329 | { |
| | 4330 | struct ath_hal *ah = sc->sc_ah; |
| | 4331 | struct ath_buf *bf; |
| | 4332 | struct ieee80211_node *ni; |
| | 4333 | struct ath_vap *avp; |
| | 4334 | struct sk_buff *skb; |
| | 4335 | |
| | 4336 | avp = ATH_VAP(vap); |
| | 4337 | if (avp->av_bcbuf == NULL) { |
| | 4338 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: avp=%p av_bcbuf=%p\n", |
| | 4339 | __func__, avp, avp != NULL ? avp->av_bcbuf : NULL); |
| | 4340 | return; |
| | 4341 | } |
| | 4342 | bf = avp->av_bcbuf; |
| | 4343 | ni = bf->bf_node; |
| | 4344 | |
| | 4345 | /* |
| | 4346 | * Update dynamic beacon contents. If this returns |
| | 4347 | * non-zero then we need to remap the memory because |
| | 4348 | * the beacon frame changed size (probably because |
| | 4349 | * of the TIM bitmap). |
| | 4350 | */ |
| | 4351 | skb = bf->bf_skb; |
| | 4352 | if (ieee80211_beacon_update(ni, &avp->av_boff, skb, 0)) { |
| | 4353 | bus_unmap_single(sc->sc_bdev, |
| | 4354 | bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE); |
| | 4355 | bf->bf_skbaddr = bus_map_single(sc->sc_bdev, |
| | 4356 | skb->data, skb->len, BUS_DMA_TODEVICE); |
| | 4357 | } |
| | 4358 | |
| | 4359 | /* |
| | 4360 | * Construct tx descriptor. |
| | 4361 | */ |
| | 4362 | ath_beacon_setup(sc, bf); |
| | 4363 | |
| | 4364 | bus_dma_sync_single(sc->sc_bdev, |
| | 4365 | bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE); |
| | 4366 | |
| | 4367 | /* NB: caller is known to have already stopped tx DMA */ |
| | 4368 | ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); |
| | 4369 | ath_hal_txstart(ah, sc->sc_bhalq); |
| | 4370 | DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: TXDP%u = %llx (%p)\n", __func__, |
| | 4371 | sc->sc_bhalq, ito64(bf->bf_daddr), bf->bf_desc); |
| | 4372 | } |
| | 4373 | |
| | 4374 | /* |
| | 4375 | * Reclaim beacon resources and return buffer to the pool. |
| | 4376 | */ |
| | 4377 | static void |
| | 4378 | ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf) |
| | 4379 | { |
| | 4380 | if (bf->bf_skb != NULL) { |
| | 4381 | bus_unmap_single(sc->sc_bdev, |
| | 4382 | bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE); |
| | 4383 | dev_kfree_skb(bf->bf_skb); |
| | 4384 | bf->bf_skb = NULL; |
| | 4385 | } |
| | 4386 | if (bf->bf_node != NULL) { |
| | 4387 | ieee80211_free_node(bf->bf_node); |
| | 4388 | bf->bf_node = NULL; |
| | 4389 | } |
| | 4390 | STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list); |
| | 4391 | } |
| | 4392 | |
| | 4393 | /* |
| | 4394 | * Reclaim all beacon resources. |
| | 4395 | */ |
| | 4396 | static void |
| | 4397 | ath_beacon_free(struct ath_softc *sc) |
| | 4398 | { |
| | 4399 | struct ath_buf *bf; |
| | 4400 | |
| | 4401 | STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { |
| | 4402 | if (bf->bf_skb != NULL) { |
| | 4403 | bus_unmap_single(sc->sc_bdev, |
| | 4404 | bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE); |
| | 4405 | dev_kfree_skb(bf->bf_skb); |
| | 4406 | bf->bf_skb = NULL; |
| | 4407 | } |
| | 4408 | if (bf->bf_node != NULL) { |
| | 4409 | ieee80211_free_node(bf->bf_node); |
| | 4410 | bf->bf_node = NULL; |
| | 4411 | } |
| | 4412 | } |
| | 4413 | } |
| | 4414 | |
| | 4415 | /* |
| | 4416 | * Configure the beacon and sleep timers. |
| | 4417 | * |
| | 4418 | * When operating as an AP this resets the TSF and sets |
| | 4419 | * up the hardware to notify us when we need to issue beacons. |
| | 4420 | * |
| | 4421 | * When operating in station mode this sets up the beacon |
| | 4422 | * timers according to the timestamp of the last received |
| | 4423 | * beacon and the current TSF, configures PCF and DTIM |
| | 4424 | * handling, programs the sleep registers so the hardware |
| | 4425 | * will wake up in time to receive beacons, and configures |
| | 4426 | * the beacon miss handling so we'll receive a BMISS |
| | 4427 | * interrupt when we stop seeing beacons from the AP |
| | 4428 | * we've associated with. |
| | 4429 | * |
| | 4430 | * Note : TBTT is Target Beacon Transmission Time (see IEEE |
| | 4431 | * 802.11-1999: 4 & 11.2.1.3). |
| | 4432 | */ |
| | 4433 | static void |
| | 4434 | ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap) |
| | 4435 | { |
| | 4436 | #define TSF_TO_TU(_h,_l) \ |
| | 4437 | ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) |
| | 4438 | struct ieee80211com *ic = &sc->sc_ic; |
| | 4439 | struct ath_hal *ah = sc->sc_ah; |
| | 4440 | struct ieee80211_node *ni; |
| | 4441 | u_int32_t nexttbtt = 0; |
| | 4442 | u_int32_t intval; |
| | 4443 | u_int64_t tsf, hw_tsf; |
| | 4444 | u_int32_t tsftu, hw_tsftu; |
| | 4445 | int reset_tsf = 0; |
| | 4446 | |
| | 4447 | if (vap == NULL) |
| | 4448 | vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ |
| | 4449 | |
| | 4450 | ni = vap->iv_bss; |
| | 4451 | |
| | 4452 | hw_tsf = ath_hal_gettsf64(ah); |
| | 4453 | tsf = le64_to_cpu(ni->ni_tstamp.tsf); |
| | 4454 | hw_tsftu = hw_tsf >> 10; |
| | 4455 | tsftu = tsf >> 10; |
| | 4456 | |
| | 4457 | /* We should reset hw TSF only once, so we increment |
| | 4458 | * ni_tstamp.tsf to avoid resetting the hw TSF multiple |
| | 4459 | * times */ |
| | 4460 | |
| | 4461 | if (tsf == 0) { |
| | 4462 | reset_tsf = 1; |
| | 4463 | ni->ni_tstamp.tsf = cpu_to_le64(1); |
| | 4464 | } |
| | 4465 | |
| | 4466 | /* XXX: Conditionalize multi-bss support? */ |
| | 4467 | if (ic->ic_opmode == IEEE80211_M_HOSTAP) { |
| | 4468 | /* For multi-bss ap support beacons are either staggered |
| | 4469 | * evenly over N slots or burst together. For the former |
| | 4470 | * arrange for the SWBA to be delivered for each slot. |
| | 4471 | * Slots that are not occupied will generate nothing. */ |
| | 4472 | /* NB: the beacon interval is kept internally in TU's */ |
| | 4473 | intval = ic->ic_lintval & HAL_BEACON_PERIOD; |
| | 4474 | if (sc->sc_stagbeacons) |
| | 4475 | intval /= ATH_BCBUF; /* for staggered beacons */ |
| | 4476 | if ((sc->sc_nostabeacons) && |
| | 4477 | (vap->iv_opmode == IEEE80211_M_HOSTAP)) |
| | 4478 | reset_tsf = 1; |
| | 4479 | } else |
| | 4480 | intval = ni->ni_intval & HAL_BEACON_PERIOD; |
| | 4481 | |
| | 4482 | #define FUDGE 2 |
| | 4483 | sc->sc_syncbeacon = 0; |
| | 4484 | |
| | 4485 | if (reset_tsf) { |
| | 4486 | /* We just created the interface and TSF will be reset to |
| | 4487 | * zero, so next beacon will be sent at the next intval |
| | 4488 | * time */ |
| | 4489 | nexttbtt = intval; |
| | 4490 | } else if (intval) { /* NB: can be 0 for monitor mode */ |
| | 4491 | if (tsf == 1) { |
| | 4492 | /* We have not received any beacons or probe responses. |
| | 4493 | * The next TBTT must be at least FUDGE ms ahead of the |
| | 4494 | * hw_tsftu. Also, TSF == 0 is a TBTT - IEEE802.11-1999 |
| | 4495 | * 11.1.2.2, although I'm not sure it applies here... */ |
| | 4496 | nexttbtt = roundup(hw_tsftu + FUDGE, intval); |
| | 4497 | } else { |
| | 4498 | if (tsf > hw_tsf) { |
| | 4499 | /* We received a beacon, but the HW TSF has |
| | 4500 | * not been updated (otherwise hw_tsf > tsf). |
| | 4501 | * We cannot use the hardware TSF, so we wait |
| | 4502 | * to synchronise beacons again. */ |
| | 4503 | sc->sc_syncbeacon = 1; |
| | 4504 | goto ath_beacon_config_debug; |
| | 4505 | } else { |
| | 4506 | /* Normal case: we received a beacon to which |
| | 4507 | * we have synchornised. Make sure that |
| | 4508 | * nexttbtt is at least FUDGE ms ahead of |
| | 4509 | * hw_tsf. */ |
| | 4510 | nexttbtt = tsftu + roundup(hw_tsftu + |
| | 4511 | FUDGE - tsftu, intval); |
| | 4512 | } |
| | 4513 | } |
| | 4514 | } |
| | 4515 | |
| | 4516 | if (ic->ic_opmode == IEEE80211_M_STA && !(sc->sc_nostabeacons)) { |
| | 4517 | HAL_BEACON_STATE bs; |
| | 4518 | int dtimperiod, dtimcount; |
| | 4519 | int cfpperiod, cfpcount; |
| | 4520 | |
| | 4521 | /* Setup DTIM and CFP parameters according to the last beacon |
| | 4522 | * we have received (which may not have happened). */ |
| | 4523 | dtimperiod = vap->iv_dtim_period; |
| | 4524 | if (dtimperiod <= 0) /* NB: 0 if not known */ |
| | 4525 | dtimperiod = 1; |
| | 4526 | dtimcount = vap->iv_dtim_count; |
| | 4527 | if (dtimcount >= dtimperiod) /* NB: sanity check */ |
| | 4528 | dtimcount = 0; /* XXX? */ |
| | 4529 | cfpperiod = 1; /* NB: no PCF support yet */ |
| | 4530 | cfpcount = 0; |
| | 4531 | /* |
| | 4532 | * Pull nexttbtt forward to reflect the current |
| | 4533 | * TSF and calculate dtim+cfp state for the result. |
| | 4534 | */ |
| | 4535 | nexttbtt = tsftu; |
| | 4536 | if (nexttbtt == 0) /* e.g. for ap mode */ |
| | 4537 | nexttbtt = intval; |
| | 4538 | do { |
| | 4539 | nexttbtt += intval; |
| | 4540 | if (--dtimcount < 0) { |
| | 4541 | dtimcount = dtimperiod - 1; |
| | 4542 | if (--cfpcount < 0) |
| | 4543 | cfpcount = cfpperiod - 1; |
| | 4544 | } |
| | 4545 | } while (nexttbtt < hw_tsftu + FUDGE); |
| | 4546 | #undef FUDGE |
| | 4547 | memset(&bs, 0, sizeof(bs)); |
| | 4548 | bs.bs_intval = intval; |
| | 4549 | bs.bs_nexttbtt = nexttbtt; |
| | 4550 | bs.bs_dtimperiod = dtimperiod * intval; |
| | 4551 | bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount * intval; |
| | 4552 | bs.bs_cfpperiod = cfpperiod * bs.bs_dtimperiod; |
| | 4553 | bs.bs_cfpnext = bs.bs_nextdtim + cfpcount * bs.bs_dtimperiod; |
| | 4554 | bs.bs_cfpmaxduration = 0; |
| | 4555 | #if 0 |
| | 4556 | /* |
| | 4557 | * The 802.11 layer records the offset to the DTIM |
| | 4558 | * bitmap while receiving beacons; use it here to |
| | 4559 | * enable h/w detection of our AID being marked in |
| | 4560 | * the bitmap vector (to indicate frames for us are |
| | 4561 | * pending at the AP). |
| | 4562 | * XXX do DTIM handling in s/w to WAR old h/w bugs |
| | 4563 | * XXX enable based on h/w rev for newer chips |
| | 4564 | */ |
| | 4565 | bs.bs_timoffset = ni->ni_timoff; |
| | 4566 | #endif |
| | 4567 | /* |
| | 4568 | * Calculate the number of consecutive beacons to miss |
| | 4569 | * before taking a BMISS interrupt. The configuration |
| | 4570 | * is specified in TU so we only need calculate based |
| | 4571 | * on the beacon interval. Note that we clamp the |
| | 4572 | * result to at most 10 beacons. |
| | 4573 | */ |
| | 4574 | bs.bs_bmissthreshold = howmany(ic->ic_bmisstimeout, intval); |
| | 4575 | if (bs.bs_bmissthreshold > 10) |
| | 4576 | bs.bs_bmissthreshold = 10; |
| | 4577 | else if (bs.bs_bmissthreshold < 2) |
| | 4578 | bs.bs_bmissthreshold = 2; |
| | 4579 | |
| | 4580 | /* |
| | 4581 | * Calculate sleep duration. The configuration is |
| | 4582 | * given in ms. We ensure a multiple of the beacon |
| | 4583 | * period is used. Also, if the sleep duration is |
| | 4584 | * greater than the DTIM period then it makes senses |
| | 4585 | * to make it a multiple of that. |
| | 4586 | * |
| | 4587 | * XXX fixed at 100ms |
| | 4588 | */ |
| | 4589 | bs.bs_sleepduration = |
| | 4590 | roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); |
| | 4591 | if (bs.bs_sleepduration > bs.bs_dtimperiod) |
| | 4592 | bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); |
| | 4593 | |
| | 4594 | DPRINTF(sc, ATH_DEBUG_BEACON, |
| | 4595 | "%s: tsf %llu tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" |
| | 4596 | , __func__ |
| | 4597 | , (long long) tsf, tsftu |
| | 4598 | , bs.bs_intval |
| | 4599 | , bs.bs_nexttbtt |
| | 4600 | , bs.bs_dtimperiod |
| | 4601 | , bs.bs_nextdtim |
| | 4602 | , bs.bs_bmissthreshold |
| | 4603 | , bs.bs_sleepduration |
| | 4604 | , bs.bs_cfpperiod |
| | 4605 | , bs.bs_cfpmaxduration |
| | 4606 | , bs.bs_cfpnext |
| | 4607 | , bs.bs_timoffset |
| | 4608 | ); |
| | 4609 | |
| | 4610 | ic->ic_bmiss_guard = jiffies + |
| | 4611 | IEEE80211_TU_TO_JIFFIES(bs.bs_intval * bs.bs_bmissthreshold); |
| | 4612 | |
| | 4613 | ath_hal_intrset(ah, 0); |
| | 4614 | ath_hal_beacontimers(ah, &bs); |
| | 4615 | sc->sc_imask |= HAL_INT_BMISS; |
| | 4616 | ath_hal_intrset(ah, sc->sc_imask); |
| | 4617 | } else { |
| | 4618 | ath_hal_intrset(ah, 0); |
| | 4619 | if (reset_tsf) |
| | 4620 | intval |= HAL_BEACON_RESET_TSF; |
| | 4621 | if (ic->ic_opmode == IEEE80211_M_IBSS) { |
| | 4622 | /* |
| | 4623 | * In IBSS mode enable the beacon timers but only |
| | 4624 | * enable SWBA interrupts if we need to manually |
| | 4625 | * prepare beacon frames. Otherwise we use a |
| | 4626 | * self-linked tx descriptor and let the hardware |
| | 4627 | * deal with things. |
| | 4628 | */ |
| | 4629 | intval |= HAL_BEACON_ENA; |
| | 4630 | if (!sc->sc_hasveol) |
| | 4631 | sc->sc_imask |= HAL_INT_SWBA; |
| | 4632 | ath_beaconq_config(sc); |
| | 4633 | } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { |
| | 4634 | /* |
| | 4635 | * In AP mode we enable the beacon timers and |
| | 4636 | * SWBA interrupts to prepare beacon frames. |
| | 4637 | */ |
| | 4638 | intval |= HAL_BEACON_ENA; |
| | 4639 | sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ |
| | 4640 | ath_beaconq_config(sc); |
| | 4641 | } |
| | 4642 | #ifdef ATH_SUPERG_DYNTURBO |
| | 4643 | ath_beacon_dturbo_config(vap, intval & |
| | 4644 | ~(HAL_BEACON_RESET_TSF | HAL_BEACON_ENA)); |
| | 4645 | #endif |
| | 4646 | ath_hal_beaconinit(ah, nexttbtt, intval); |
| | 4647 | sc->sc_bmisscount = 0; |
| | 4648 | ath_hal_intrset(ah, sc->sc_imask); |
| | 4649 | /* |
| | 4650 | * When using a self-linked beacon descriptor in |
| | 4651 | * ibss mode load it once here. |
| | 4652 | */ |
| | 4653 | if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) |
| | 4654 | ath_beacon_start_adhoc(sc, vap); |
| | 4655 | } |
| | 4656 | #undef TSF_TO_TU |
| | 4657 | |
| | 4658 | ath_beacon_config_debug: |
| | 4659 | /* We print all debug messages here, in order to preserve the |
| | 4660 | * time critical aspect of this function */ |
| | 4661 | DPRINTF(sc, ATH_DEBUG_BEACON, |
| | 4662 | "%s: ni=%p tsf=%llu hw_tsf=%llu tsftu=%u hw_tsftu=%u\n", |
| | 4663 | __func__, ni, tsf, hw_tsf, tsftu, hw_tsftu); |
| | 4664 | |
| | 4665 | if (reset_tsf) |
| | 4666 | /* We just created the interface */ |
| | 4667 | DPRINTF(sc, ATH_DEBUG_BEACON, "%s: first beacon\n", __func__); |
| | 4668 | else if (tsf == 1) |
| | 4669 | /* We do not receive any beacons or probe response */ |
| | 4670 | DPRINTF(sc, ATH_DEBUG_BEACON, |
| | 4671 | "%s: no beacon received...\n",__func__); |
| | 4672 | else if (tsf > hw_tsf) |
| | 4673 | /* We do receive a beacon and the hw TSF has not been updated */ |
| | 4674 | DPRINTF(sc, ATH_DEBUG_BEACON, |
| | 4675 | "%s: beacon received, but TSF is incorrect\n", |
| | 4676 | __func__); |
| | 4677 | else |
| | 4678 | /* We do receive a beacon in the past, normal case */ |
| | 4679 | DPRINTF(sc, ATH_DEBUG_BEACON, |
| | 4680 | "%s: beacon received, TSF is correct\n", |
| | 4681 | __func__); |
| | 4682 | |
| | 4683 | DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt=%u intval=%u\n", |
| | 4684 | __func__, nexttbtt, intval & HAL_BEACON_PERIOD); |
| | 4685 | } |
| | 4686 | |
| | 4687 | static int |
| | 4688 | ath_descdma_setup(struct ath_softc *sc, |
| | 4689 | struct ath_descdma *dd, ath_bufhead *head, |
| | 4690 | const char *name, int nbuf, int ndesc) |
| | 4691 | { |
| | 4692 | #define DS2PHYS(_dd, _ds) \ |
| | 4693 | ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) |
| | 4694 | struct ath_desc *ds; |
| | 4695 | struct ath_buf *bf; |
| | 4696 | int i, bsize, error; |
| | 4697 | |
| | 4698 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", |
| | 4699 | __func__, name, nbuf, ndesc); |
| | 4700 | |
| | 4701 | dd->dd_name = name; |
| | 4702 | dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; |
| | 4703 | |
| | 4704 | /* allocate descriptors */ |
| | 4705 | dd->dd_desc = bus_alloc_consistent(sc->sc_bdev, |
| | 4706 | dd->dd_desc_len, &dd->dd_desc_paddr); |
| | 4707 | if (dd->dd_desc == NULL) { |
| | 4708 | error = -ENOMEM; |
| | 4709 | goto fail; |
| | 4710 | } |
| | 4711 | ds = dd->dd_desc; |
| | 4712 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %llx (%lu)\n", |
| | 4713 | __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, |
| | 4714 | ito64(dd->dd_desc_paddr), /*XXX*/ (u_long) dd->dd_desc_len); |
| | 4715 | |
| | 4716 | /* allocate buffers */ |
| | 4717 | bsize = sizeof(struct ath_buf) * nbuf; |
| | 4718 | bf = kmalloc(bsize, GFP_KERNEL); |
| | 4719 | if (bf == NULL) { |
| | 4720 | error = -ENOMEM; /* XXX different code */ |
| | 4721 | goto fail2; |
| | 4722 | } |
| | 4723 | memset(bf, 0, bsize); |
| | 4724 | dd->dd_bufptr = bf; |
| | 4725 | |
| | 4726 | STAILQ_INIT(head); |
| | 4727 | for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { |
| | 4728 | bf->bf_desc = ds; |
| | 4729 | bf->bf_daddr = DS2PHYS(dd, ds); |
| | 4730 | STAILQ_INSERT_TAIL(head, bf, bf_list); |
| | 4731 | } |
| | 4732 | return 0; |
| | 4733 | fail2: |
| | 4734 | bus_free_consistent(sc->sc_bdev, dd->dd_desc_len, |
| | 4735 | dd->dd_desc, dd->dd_desc_paddr); |
| | 4736 | fail: |
| | 4737 | memset(dd, 0, sizeof(*dd)); |
| | 4738 | return error; |
| | 4739 | #undef DS2PHYS |
| | 4740 | } |
| | 4741 | |
| | 4742 | static void |
| | 4743 | ath_descdma_cleanup(struct ath_softc *sc, |
| | 4744 | struct ath_descdma *dd, ath_bufhead *head, int dir) |
| | 4745 | { |
| | 4746 | struct ath_buf *bf; |
| | 4747 | struct ieee80211_node *ni; |
| | 4748 | |
| | 4749 | STAILQ_FOREACH(bf, head, bf_list) { |
| | 4750 | if (bf->bf_skb != NULL) { |
| | 4751 | /* XXX skb->len is not good enough for rxbuf */ |
| | 4752 | if (dd == &sc->sc_rxdma) |
| | 4753 | bus_unmap_single(sc->sc_bdev, |
| | 4754 | bf->bf_skbaddr, sc->sc_rxbufsize, dir); |
| | 4755 | else |
| | 4756 | bus_unmap_single(sc->sc_bdev, |
| | 4757 | bf->bf_skbaddr, bf->bf_skb->len, dir); |
| | 4758 | dev_kfree_skb(bf->bf_skb); |
| | 4759 | bf->bf_skb = NULL; |
| | 4760 | } |
| | 4761 | ni = bf->bf_node; |
| | 4762 | bf->bf_node = NULL; |
| | 4763 | if (ni != NULL) { |
| | 4764 | /* |
| | 4765 | * Reclaim node reference. |
| | 4766 | */ |
| | 4767 | ieee80211_free_node(ni); |
| | 4768 | } |
| | 4769 | } |
| | 4770 | |
| | 4771 | /* Free memory associated with descriptors */ |
| | 4772 | bus_free_consistent(sc->sc_bdev, dd->dd_desc_len, |
| | 4773 | dd->dd_desc, dd->dd_desc_paddr); |
| | 4774 | |
| | 4775 | STAILQ_INIT(head); |
| | 4776 | kfree(dd->dd_bufptr); |
| | 4777 | memset(dd, 0, sizeof(*dd)); |
| | 4778 | } |
| | 4779 | |
| | 4780 | static int |
| | 4781 | ath_desc_alloc(struct ath_softc *sc) |
| | 4782 | { |
| | 4783 | int error; |
| | 4784 | |
| | 4785 | error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, |
| | 4786 | "rx", ATH_RXBUF, 1); |
| | 4787 | if (error != 0) |
| | 4788 | return error; |
| | 4789 | |
| | 4790 | error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, |
| | 4791 | "tx", ATH_TXBUF, ATH_TXDESC); |
| | 4792 | if (error != 0) { |
| | 4793 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, |
| | 4794 | BUS_DMA_FROMDEVICE); |
| | 4795 | return error; |
| | 4796 | } |
| | 4797 | |
| | 4798 | /* XXX allocate beacon state together with VAP */ |
| | 4799 | error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, |
| | 4800 | "beacon", ATH_BCBUF, 1); |
| | 4801 | if (error != 0) { |
| | 4802 | ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf, |
| | 4803 | BUS_DMA_TODEVICE); |
| | 4804 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, |
| | 4805 | BUS_DMA_FROMDEVICE); |
| | 4806 | return error; |
| | 4807 | } |
| | 4808 | return 0; |
| | 4809 | } |
| | 4810 | |
| | 4811 | static void |
| | 4812 | ath_desc_free(struct ath_softc *sc) |
| | 4813 | { |
| | 4814 | if (sc->sc_bdma.dd_desc_len != 0) |
| | 4815 | ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf, |
| | 4816 | BUS_DMA_TODEVICE); |
| | 4817 | if (sc->sc_txdma.dd_desc_len != 0) |
| | 4818 | ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf, |
| | 4819 | BUS_DMA_TODEVICE); |
| | 4820 | if (sc->sc_rxdma.dd_desc_len != 0) |
| | 4821 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, |
| | 4822 | BUS_DMA_FROMDEVICE); |
| | 4823 | } |
| | 4824 | |
| | 4825 | static struct ieee80211_node * |
| | 4826 | ath_node_alloc(struct ieee80211_node_table *nt,struct ieee80211vap *vap) |
| | 4827 | { |
| | 4828 | struct ath_softc *sc = nt->nt_ic->ic_dev->priv; |
| | 4829 | const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; |
| | 4830 | struct ath_node *an; |
| | 4831 | |
| | 4832 | an = kmalloc(space, GFP_ATOMIC); |
| | 4833 | if (an == NULL) |
| | 4834 | return NULL; |
| | 4835 | memset(an, 0, space); |
| | 4836 | an->an_decomp_index = INVALID_DECOMP_INDEX; |
| | 4837 | an->an_avgrssi = ATH_RSSI_DUMMY_MARKER; |
| | 4838 | an->an_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; |
| | 4839 | an->an_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; |
| | 4840 | an->an_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; |
| | 4841 | /* |
| | 4842 | * ath_rate_node_init needs a VAP pointer in node |
| | 4843 | * to decide which mgt rate to use |
| | 4844 | */ |
| | 4845 | an->an_node.ni_vap = vap; |
| | 4846 | sc->sc_rc->ops->node_init(sc, an); |
| | 4847 | |
| | 4848 | /* U-APSD init */ |
| | 4849 | STAILQ_INIT(&an->an_uapsd_q); |
| | 4850 | an->an_uapsd_qdepth = 0; |
| | 4851 | STAILQ_INIT(&an->an_uapsd_overflowq); |
| | 4852 | an->an_uapsd_overflowqdepth = 0; |
| | 4853 | ATH_NODE_UAPSD_LOCK_INIT(an); |
| | 4854 | |
| | 4855 | DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); |
| | 4856 | return &an->an_node; |
| | 4857 | } |
| | 4858 | |
| | 4859 | static void |
| | 4860 | ath_node_cleanup(struct ieee80211_node *ni) |
| | 4861 | { |
| | 4862 | struct ieee80211com *ic = ni->ni_ic; |
| | 4863 | struct ath_softc *sc = ni->ni_ic->ic_dev->priv; |
| | 4864 | struct ath_node *an = ATH_NODE(ni); |
| | 4865 | struct ath_buf *bf; |
| | 4866 | |
| | 4867 | /* |
| | 4868 | * U-APSD cleanup |
| | 4869 | */ |
| | 4870 | ATH_NODE_UAPSD_LOCK_IRQ(an); |
| | 4871 | if (ni->ni_flags & IEEE80211_NODE_UAPSD_TRIG) { |
| | 4872 | ni->ni_flags &= ~IEEE80211_NODE_UAPSD_TRIG; |
| | 4873 | ic->ic_uapsdmaxtriggers--; |
| | 4874 | ni->ni_flags &= ~IEEE80211_NODE_UAPSD_SP; |
| | 4875 | } |
| | 4876 | ATH_NODE_UAPSD_UNLOCK_IRQ(an); |
| | 4877 | while (an->an_uapsd_qdepth) { |
| | 4878 | bf = STAILQ_FIRST(&an->an_uapsd_q); |
| | 4879 | STAILQ_REMOVE_HEAD(&an->an_uapsd_q, bf_list); |
| | 4880 | bf->bf_desc->ds_link = 0; |
| | 4881 | |
| | 4882 | dev_kfree_skb_any(bf->bf_skb); |
| | 4883 | bf->bf_skb = NULL; |
| | 4884 | bf->bf_node = NULL; |
| | 4885 | ATH_TXBUF_LOCK_IRQ(sc); |
| | 4886 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
| | 4887 | ATH_TXBUF_UNLOCK_IRQ(sc); |
| | 4888 | ieee80211_free_node(ni); |
| | 4889 | |
| | 4890 | an->an_uapsd_qdepth--; |
| | 4891 | } |
| | 4892 | |
| | 4893 | while (an->an_uapsd_overflowqdepth) { |
| | 4894 | bf = STAILQ_FIRST(&an->an_uapsd_overflowq); |
| | 4895 | STAILQ_REMOVE_HEAD(&an->an_uapsd_overflowq, bf_list); |
| | 4896 | bf->bf_desc->ds_link = 0; |
| | 4897 | |
| | 4898 | dev_kfree_skb_any(bf->bf_skb); |
| | 4899 | bf->bf_skb = NULL; |
| | 4900 | bf->bf_node = NULL; |
| | 4901 | ATH_TXBUF_LOCK_IRQ(sc); |
| | 4902 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
| | 4903 | ATH_TXBUF_UNLOCK_IRQ(sc); |
| | 4904 | ieee80211_free_node(ni); |
| | 4905 | |
| | 4906 | an->an_uapsd_overflowqdepth--; |
| | 4907 | } |
| | 4908 | |
| | 4909 | ATH_NODE_UAPSD_LOCK_IRQ(an); |
| | 4910 | sc->sc_node_cleanup(ni); |
| | 4911 | ATH_NODE_UAPSD_UNLOCK_IRQ(an); |
| | 4912 | } |
| | 4913 | |
| | 4914 | static void |
| | 4915 | ath_node_free(struct ieee80211_node *ni) |
| | 4916 | { |
| | 4917 | struct ath_softc *sc = ni->ni_ic->ic_dev->priv; |
| | 4918 | |
| | 4919 | sc->sc_rc->ops->node_cleanup(sc, ATH_NODE(ni)); |
| | 4920 | sc->sc_node_free(ni); |
| | 4921 | #ifdef ATH_SUPERG_XR |
| | 4922 | ath_grppoll_period_update(sc); |
| | 4923 | #endif |
| | 4924 | } |
| | 4925 | |
| | 4926 | static u_int8_t |
| | 4927 | ath_node_getrssi(const struct ieee80211_node *ni) |
| | 4928 | { |
| | 4929 | #define HAL_EP_RND(x, mul) \ |
| | 4930 | ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) |
| | 4931 | u_int32_t avgrssi = ATH_NODE_CONST(ni)->an_avgrssi; |
| | 4932 | int32_t rssi; |
| | 4933 | |
| | 4934 | /* |
| | 4935 | * When only one frame is received there will be no state in |
| | 4936 | * avgrssi so fallback on the value recorded by the 802.11 layer. |
| | 4937 | */ |
| | 4938 | if (avgrssi != ATH_RSSI_DUMMY_MARKER) |
| | 4939 | rssi = HAL_EP_RND(avgrssi, HAL_RSSI_EP_MULTIPLIER); |
| | 4940 | else |
| | 4941 | rssi = ni->ni_rssi; |
| | 4942 | /* NB: theoretically we shouldn't need this, but be paranoid */ |
| | 4943 | return rssi < 0 ? 0 : rssi > 127 ? 127 : rssi; |
| | 4944 | #undef HAL_EP_RND |
| | 4945 | } |
| | 4946 | |
| | 4947 | |
| | 4948 | #ifdef ATH_SUPERG_XR |
| | 4949 | /* |
| | 4950 | * Stops the txqs and moves data between XR and Normal queues. |
| | 4951 | * Also adjusts the rate info in the descriptors. |
| | 4952 | */ |
| | 4953 | |
| | 4954 | static u_int8_t |
| | 4955 | ath_node_move_data(const struct ieee80211_node *ni) |
| | 4956 | { |
| | 4957 | #ifdef NOT_YET |
| | 4958 | struct ath_txq *txq = NULL; |
| | 4959 | struct ieee80211com *ic = ni->ni_ic; |
| | 4960 | struct ath_softc *sc = ic->ic_dev->priv; |
| | 4961 | struct ath_buf *bf, *prev, *bf_tmp, *bf_tmp1; |
| | 4962 | struct ath_hal *ah = sc->sc_ah; |
| | 4963 | struct sk_buff *skb = NULL; |
| | 4964 | struct ath_desc *ds; |
| | 4965 | HAL_STATUS status; |
| | 4966 | int index; |
| | 4967 | |
| | 4968 | if (ni->ni_vap->iv_flags & IEEE80211_F_XR) { |
| | 4969 | struct ath_txq tmp_q; |
| | 4970 | memset(&tmp_q, 0, sizeof(tmp_q)); |
| | 4971 | STAILQ_INIT(&tmp_q.axq_q); |
| | 4972 | /* |
| | 4973 | * move data from Normal txqs to XR queue. |
| | 4974 | */ |
| | 4975 | printk("move data from NORMAL to XR\n"); |
| | 4976 | /* |
| | 4977 | * collect all the data towards the node |
| | 4978 | * in to the tmp_q. |
| | 4979 | */ |
| | 4980 | index = WME_AC_VO; |
| | 4981 | while (index >= WME_AC_BE && txq != sc->sc_ac2q[index]) { |
| | 4982 | txq = sc->sc_ac2q[index]; |
| | 4983 | ATH_TXQ_LOCK(txq); |
| | 4984 | ath_hal_stoptxdma(ah, txq->axq_qnum); |
| | 4985 | bf = prev = STAILQ_FIRST(&txq->axq_q); |
| | 4986 | /* |
| | 4987 | * skip all the buffers that are done |
| | 4988 | * until the first one that is in progress |
| | 4989 | */ |
| | 4990 | while (bf) { |
| | 4991 | #ifdef ATH_SUPERG_FF |
| | 4992 | ds = &bf->bf_desc[bf->bf_numdesc - 1]; |
| | 4993 | #else |
| | 4994 | ds = bf->bf_desc; /* NB: last descriptor */ |
| | 4995 | #endif |
| | 4996 | status = ath_hal_txprocdesc(ah, ds); |
| | 4997 | if (status == HAL_EINPROGRESS) |
| | 4998 | break; |
| | 4999 | prev = bf; |
| | 5000 | bf = STAILQ_NEXT(bf,bf_list); |
| | 5001 | } |
| | 5002 | /* |
| | 5003 | * save the pointer to the last buf that's |
| | 5004 | * done |
| | 5005 | */ |
| | 5006 | if (prev == bf) |
| | 5007 | bf_tmp = NULL; |
| | 5008 | else |
| | 5009 | bf_tmp=prev; |
| | 5010 | while (bf) { |
| | 5011 | if (ni == bf->bf_node) { |
| | 5012 | if (prev == bf) { |
| | 5013 | ATH_TXQ_REMOVE_HEAD(txq, bf_list); |
| | 5014 | STAILQ_INSERT_TAIL(&tmp_q.axq_q, bf, bf_list); |
| | 5015 | bf = STAILQ_FIRST(&txq->axq_q); |
| | 5016 | prev = bf; |
| | 5017 | } else { |
| | 5018 | STAILQ_REMOVE_AFTER(&(txq->axq_q), prev, bf_list); |
| | 5019 | txq->axq_depth--; |
| | 5020 | STAILQ_INSERT_TAIL(&tmp_q.axq_q, bf, bf_list); |
| | 5021 | bf = STAILQ_NEXT(prev, bf_list); |
| | 5022 | /* |
| | 5023 | * after deleting the node |
| | 5024 | * link the descriptors |
| | 5025 | */ |
| | 5026 | #ifdef ATH_SUPERG_FF |
| | 5027 | ds = &prev->bf_desc[prev->bf_numdesc - 1]; |
| | 5028 | #else |
| | 5029 | ds = prev->bf_desc; /* NB: last descriptor */ |
| | 5030 | #endif |
| | 5031 | #ifdef AH_NEED_DESC_SWAP |
| | 5032 | ds->ds_link = cpu_to_le32(bf->bf_daddr); |
| | 5033 | #else |
| | 5034 | ds->ds_link = bf->bf_daddr; |
| | 5035 | #endif |
| | 5036 | } |
| | 5037 | } else { |
| | 5038 | prev = bf; |
| | 5039 | bf = STAILQ_NEXT(bf, bf_list); |
| | 5040 | } |
| | 5041 | } |
| | 5042 | /* |
| | 5043 | * if the last buf was deleted. |
| | 5044 | * set the pointer to the last descriptor. |
| | 5045 | */ |
| | 5046 | bf = STAILQ_FIRST(&txq->axq_q); |
| | 5047 | if (bf) { |
| | 5048 | if (prev) { |
| | 5049 | bf = STAILQ_NEXT(prev, bf_list); |
| | 5050 | if (!bf) { /* prev is the last one on the list */ |
| | 5051 | #ifdef ATH_SUPERG_FF |
| | 5052 | ds = &prev->bf_desc[prev->bf_numdesc - 1]; |
| | 5053 | #else |
| | 5054 | ds = prev->bf_desc; /* NB: last descriptor */ |
| | 5055 | #endif |
| | 5056 | status = ath_hal_txprocdesc(ah, ds); |
| | 5057 | if (status == HAL_EINPROGRESS) |
| | 5058 | txq->axq_link = &ds->ds_link; |
| | 5059 | else |
| | 5060 | txq->axq_link = NULL; |
| | 5061 | } |
| | 5062 | } |
| | 5063 | } else |
| | 5064 | txq->axq_link = NULL; |
| | 5065 | |
| | 5066 | ATH_TXQ_UNLOCK(txq); |
| | 5067 | /* |
| | 5068 | * restart the DMA from the first |
| | 5069 | * buffer that was not DMA'd. |
| | 5070 | */ |
| | 5071 | if (bf_tmp) |
| | 5072 | bf = STAILQ_NEXT(bf_tmp, bf_list); |
| | 5073 | else |
| | 5074 | bf = STAILQ_FIRST(&txq->axq_q); |
| | 5075 | if (bf) { |
| | 5076 | ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); |
| | 5077 | ath_hal_txstart(ah, txq->axq_qnum); |
| | 5078 | } |
| | 5079 | } |
| | 5080 | /* |
| | 5081 | * queue them on to the XR txqueue. |
| | 5082 | * can not directly put them on to the XR txq. since the |
| | 5083 | * skb data size may be greater than the XR fragmentation |
| | 5084 | * threshold size. |
| | 5085 | */ |
| | 5086 | bf = STAILQ_FIRST(&tmp_q.axq_q); |
| | 5087 | index = 0; |
| | 5088 | while (bf) { |
| | 5089 | skb = bf->bf_skb; |
| | 5090 | bf->bf_skb = NULL; |
| | 5091 | bf->bf_node = NULL; |
| | 5092 | ATH_TXBUF_LOCK(sc); |
| | 5093 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
| | 5094 | ATH_TXBUF_UNLOCK(sc); |
| | 5095 | ath_hardstart(skb,sc->sc_dev); |
| | 5096 | ATH_TXQ_REMOVE_HEAD(&tmp_q, bf_list); |
| | 5097 | bf = STAILQ_FIRST(&tmp_q.axq_q); |
| | 5098 | index++; |
| | 5099 | } |
| | 5100 | printk("moved %d buffers from NORMAL to XR\n", index); |
| | 5101 | } else { |
| | 5102 | struct ath_txq wme_tmp_qs[WME_AC_VO+1]; |
| | 5103 | struct ath_txq *wmeq = NULL, *prevq; |
| | 5104 | struct ieee80211_frame *wh; |
| | 5105 | struct ath_desc *ds = NULL; |
| | 5106 | int count = 0; |
| | 5107 | |
| | 5108 | /* |
| | 5109 | * move data from XR txq to Normal txqs. |
| | 5110 | */ |
| | 5111 | printk("move buffers from XR to NORMAL\n"); |
| | 5112 | memset(&wme_tmp_qs, 0, sizeof(wme_tmp_qs)); |
| | 5113 | for (index = 0; index <= WME_AC_VO; index++) |
| | 5114 | STAILQ_INIT(&wme_tmp_qs[index].axq_q); |
| | 5115 | txq = sc->sc_xrtxq; |
| | 5116 | ATH_TXQ_LOCK(txq); |
| | 5117 | ath_hal_stoptxdma(ah, txq->axq_qnum); |
| | 5118 | bf = prev = STAILQ_FIRST(&txq->axq_q); |
| | 5119 | /* |
| | 5120 | * skip all the buffers that are done |
| | 5121 | * until the first one that is in progress |
| | 5122 | */ |
| | 5123 | while (bf) { |
| | 5124 | #ifdef ATH_SUPERG_FF |
| | 5125 | ds = &bf->bf_desc[bf->bf_numdesc - 1]; |
| | 5126 | #else |
| | 5127 | ds = bf->bf_desc; /* NB: last descriptor */ |
| | 5128 | #endif |
| | 5129 | status = ath_hal_txprocdesc(ah, ds); |
| | 5130 | if (status == HAL_EINPROGRESS) |
| | 5131 | break; |
| | 5132 | prev= bf; |
| | 5133 | bf = STAILQ_NEXT(bf,bf_list); |
| | 5134 | } |
| | 5135 | /* |
| | 5136 | * save the pointer to the last buf that's |
| | 5137 | * done |
| | 5138 | */ |
| | 5139 | if (prev == bf) |
| | 5140 | bf_tmp1 = NULL; |
| | 5141 | else |
| | 5142 | bf_tmp1 = prev; |
| | 5143 | /* |
| | 5144 | * collect all the data in to four temp SW queues. |
| | 5145 | */ |
| | 5146 | while (bf) { |
| | 5147 | if (ni == bf->bf_node) { |
| | 5148 | if (prev == bf) { |
| | 5149 | STAILQ_REMOVE_HEAD(&txq->axq_q,bf_list); |
| | 5150 | bf_tmp=bf; |
| | 5151 | bf = STAILQ_FIRST(&txq->axq_q); |
| | 5152 | prev = bf; |
| | 5153 | } else { |
| | 5154 | STAILQ_REMOVE_AFTER(&(txq->axq_q),prev,bf_list); |
| | 5155 | bf_tmp=bf; |
| | 5156 | bf = STAILQ_NEXT(prev,bf_list); |
| | 5157 | } |
| | 5158 | count++; |
| | 5159 | skb = bf_tmp->bf_skb; |
| | 5160 | wh = (struct ieee80211_frame *) skb->data; |
| | 5161 | if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { |
| | 5162 | /* XXX validate skb->priority, remove mask */ |
| | 5163 | wmeq = &wme_tmp_qs[skb->priority & 0x3]; |
| | 5164 | } else |
| | 5165 | wmeq = &wme_tmp_qs[WME_AC_BE]; |
| | 5166 | STAILQ_INSERT_TAIL(&wmeq->axq_q, bf_tmp, bf_list); |
| | 5167 | ds = bf_tmp->bf_desc; |
| | 5168 | /* |
| | 5169 | * link the descriptors |
| | 5170 | */ |
| | 5171 | if (wmeq->axq_link != NULL) { |
| | 5172 | #ifdef AH_NEED_DESC_SWAP |
| | 5173 | *wmeq->axq_link = cpu_to_le32(bf_tmp->bf_daddr); |
| | 5174 | #else |
| | 5175 | *wmeq->axq_link = bf_tmp->bf_daddr; |
| | 5176 | #endif |
| | 5177 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: link[%u](%p)=%p (%p)\n", |
| | 5178 | __func__, |
| | 5179 | wmeq->axq_qnum, wmeq->axq_link, |
| | 5180 | (caddr_t)bf_tmp->bf_daddr, bf_tmp->bf_desc); |
| | 5181 | } |
| | 5182 | wmeq->axq_link = &ds->ds_link; |
| | 5183 | /* |
| | 5184 | * update the rate information |
| | 5185 | */ |
| | 5186 | } else { |
| | 5187 | prev = bf; |
| | 5188 | bf = STAILQ_NEXT(bf, bf_list); |
| | 5189 | } |
| | 5190 | } |
| | 5191 | /* |
| | 5192 | * reset the axq_link pointer to the last descriptor. |
| | 5193 | */ |
| | 5194 | bf = STAILQ_FIRST(&txq->axq_q); |
| | 5195 | if (bf) { |
| | 5196 | if (prev) { |
| | 5197 | bf = STAILQ_NEXT(prev, bf_list); |
| | 5198 | if (!bf) { /* prev is the last one on the list */ |
| | 5199 | #ifdef ATH_SUPERG_FF |
| | 5200 | ds = &prev->bf_desc[prev->bf_numdesc - 1]; |
| | 5201 | #else |
| | 5202 | ds = prev->bf_desc; /* NB: last descriptor */ |
| | 5203 | #endif |
| | 5204 | status = ath_hal_txprocdesc(ah, ds); |
| | 5205 | if (status == HAL_EINPROGRESS) |
| | 5206 | txq->axq_link = &ds->ds_link; |
| | 5207 | else |
| | 5208 | txq->axq_link = NULL; |
| | 5209 | } |
| | 5210 | } |
| | 5211 | } else { |
| | 5212 | /* |
| | 5213 | * if the list is empty reset the pointer. |
| | 5214 | */ |
| | 5215 | txq->axq_link = NULL; |
| | 5216 | } |
| | 5217 | ATH_TXQ_UNLOCK(txq); |
| | 5218 | /* |
| | 5219 | * restart the DMA from the first |
| | 5220 | * buffer that was not DMA'd. |
| | 5221 | */ |
| | 5222 | if (bf_tmp1) |
| | 5223 | bf = STAILQ_NEXT(bf_tmp1,bf_list); |
| | 5224 | else |
| | 5225 | bf = STAILQ_FIRST(&txq->axq_q); |
| | 5226 | if (bf) { |
| | 5227 | ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); |
| | 5228 | ath_hal_txstart(ah, txq->axq_qnum); |
| | 5229 | } |
| | 5230 | |
| | 5231 | /* |
| | 5232 | * move (concant) the lists from the temp sw queues in to |
| | 5233 | * WME queues. |
| | 5234 | */ |
| | 5235 | index = WME_AC_VO; |
| | 5236 | txq = NULL; |
| | 5237 | while (index >= WME_AC_BE ) { |
| | 5238 | prevq = txq; |
| | 5239 | txq = sc->sc_ac2q[index]; |
| | 5240 | if (txq != prevq) { |
| | 5241 | ATH_TXQ_LOCK(txq); |
| | 5242 | ath_hal_stoptxdma(ah, txq->axq_qnum); |
| | 5243 | } |
| | 5244 | |
| | 5245 | wmeq = &wme_tmp_qs[index]; |
| | 5246 | bf = STAILQ_FIRST(&wmeq->axq_q); |
| | 5247 | if (bf) { |
| | 5248 | ATH_TXQ_MOVE_Q(wmeq,txq); |
| | 5249 | if (txq->axq_link != NULL) { |
| | 5250 | #ifdef AH_NEED_DESC_SWAP |
| | 5251 | *(txq->axq_link) = cpu_to_le32(bf->bf_daddr); |
| | 5252 | #else |
| | 5253 | *(txq->axq_link) = bf->bf_daddr; |
| | 5254 | #endif |
| | 5255 | } |
| | 5256 | } |
| | 5257 | if (index == WME_AC_BE || txq != prevq) { |
| | 5258 | /* |
| | 5259 | * find the first buffer to be DMA'd. |
| | 5260 | */ |
| | 5261 | bf = STAILQ_FIRST(&txq->axq_q); |
| | 5262 | while (bf) { |
| | 5263 | #ifdef ATH_SUPERG_FF |
| | 5264 | ds = &bf->bf_desc[bf->bf_numdesc - 1]; |
| | 5265 | #else |
| | 5266 | ds = bf->bf_desc; /* NB: last descriptor */ |
| | 5267 | #endif |
| | 5268 | status = ath_hal_txprocdesc(ah, ds); |
| | 5269 | if (status == HAL_EINPROGRESS) |
| | 5270 | break; |
| | 5271 | bf = STAILQ_NEXT(bf,bf_list); |
| | 5272 | } |
| | 5273 | if (bf) { |
| | 5274 | ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); |
| | 5275 | ath_hal_txstart(ah, txq->axq_qnum); |
| | 5276 | } |
| | 5277 | ATH_TXQ_UNLOCK(txq); |
| | 5278 | } |
| | 5279 | index--; |
| | 5280 | } |
| | 5281 | printk("moved %d buffers from XR to NORMAL\n", count); |
| | 5282 | } |
| | 5283 | #endif |
| | 5284 | return 0; |
| | 5285 | } |
| | 5286 | #endif |
| | 5287 | |
| | 5288 | static struct sk_buff * |
| | 5289 | ath_alloc_skb(u_int size, u_int align) |
| | 5290 | { |
| | 5291 | struct sk_buff *skb; |
| | 5292 | u_int off; |
| | 5293 | |
| | 5294 | skb = dev_alloc_skb(size + align - 1); |
| | 5295 | if (skb != NULL) { |
| | 5296 | off = ((unsigned long) skb->data) % align; |
| | 5297 | if (off != 0) |
| | 5298 | skb_reserve(skb, align - off); |
| | 5299 | } |
| | 5300 | return skb; |
| | 5301 | } |
| | 5302 | |
| | 5303 | static int |
| | 5304 | ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) |
| | 5305 | { |
| | 5306 | struct ath_hal *ah = sc->sc_ah; |
| | 5307 | struct sk_buff *skb; |
| | 5308 | struct ath_desc *ds; |
| | 5309 | |
| | 5310 | skb = bf->bf_skb; |
| | 5311 | if (skb == NULL) { |
| | 5312 | if (sc->sc_nmonvaps > 0) { |
| | 5313 | u_int off; |
| | 5314 | int extra = A_MAX(sizeof(struct ath_rx_radiotap_header), |
| | 5315 | A_MAX(sizeof(wlan_ng_prism2_header), ATHDESC_HEADER_SIZE)); |
| | 5316 | |
| | 5317 | /* |
| | 5318 | * Allocate buffer for monitor mode with space for the |
| | 5319 | * wlan-ng style physical layer header at the start. |
| | 5320 | */ |
| | 5321 | skb = dev_alloc_skb(sc->sc_rxbufsize + extra + sc->sc_cachelsz - 1); |
| | 5322 | if (skb == NULL) { |
| | 5323 | DPRINTF(sc, ATH_DEBUG_ANY, |
| | 5324 | "%s: skbuff alloc of size %u failed\n", |
| | 5325 | __func__, |
| | 5326 | sc->sc_rxbufsize + extra + sc->sc_cachelsz - 1); |
| | 5327 | sc->sc_stats.ast_rx_nobuf++; |
| | 5328 | return -ENOMEM; |
| | 5329 | } |
| | 5330 | /* |
| | 5331 | * Reserve space for the Prism header. |
| | 5332 | */ |
| | 5333 | skb_reserve(skb, sizeof(wlan_ng_prism2_header)); |
| | 5334 | /* |
| | 5335 | * Align to cache line. |
| | 5336 | */ |
| | 5337 | off = ((unsigned long) skb->data) % sc->sc_cachelsz; |
| | 5338 | if (off != 0) |
| | 5339 | skb_reserve(skb, sc->sc_cachelsz - off); |
| | 5340 | } else { |
| | 5341 | /* |
| | 5342 | * Cache-line-align. This is important (for the |
| | 5343 | * 5210 at least) as not doing so causes bogus data |
| | 5344 | * in rx'd frames. |
| | 5345 | */ |
| | 5346 | skb = ath_alloc_skb(sc->sc_rxbufsize, sc->sc_cachelsz); |
| | 5347 | if (skb == NULL) { |
| | 5348 | DPRINTF(sc, ATH_DEBUG_ANY, |
| | 5349 | "%s: skbuff alloc of size %u failed\n", |
| | 5350 | __func__, sc->sc_rxbufsize); |
| | 5351 | sc->sc_stats.ast_rx_nobuf++; |
| | 5352 | return -ENOMEM; |
| | 5353 | } |
| | 5354 | } |
| | 5355 | skb->dev = sc->sc_dev; |
| | 5356 | bf->bf_skb = skb; |
| | 5357 | bf->bf_skbaddr = bus_map_single(sc->sc_bdev, |
| | 5358 | skb->data, sc->sc_rxbufsize, BUS_DMA_FROMDEVICE); |
| | 5359 | } |
| | 5360 | |
| | 5361 | /* |
| | 5362 | * Setup descriptors. For receive we always terminate |
| | 5363 | * the descriptor list with a self-linked entry so we'll |
| | 5364 | * not get overrun under high load (as can happen with a |
| | 5365 | * 5212 when ANI processing enables PHY error frames). |
| | 5366 | * |
| | 5367 | * To ensure the last descriptor is self-linked we create |
| | 5368 | * each descriptor as self-linked and add it to the end. As |
| | 5369 | * each additional descriptor is added the previous self-linked |
| | 5370 | * entry is ``fixed'' naturally. This should be safe even |
| | 5371 | * if DMA is happening. When processing RX interrupts we |
| | 5372 | * never remove/process the last, self-linked, entry on the |
| | 5373 | * descriptor list. This ensures the hardware always has |
| | 5374 | * someplace to write a new frame. |
| | 5375 | */ |
| | 5376 | ds = bf->bf_desc; |
| | 5377 | ds->ds_link = bf->bf_daddr; /* link to self */ |
| | 5378 | ds->ds_data = bf->bf_skbaddr; |
| | 5379 | ds->ds_vdata = (void *) skb->data; /* virt addr of buffer */ |
| | 5380 | ath_hal_setuprxdesc(ah, ds |
| | 5381 | , skb_tailroom(skb) /* buffer size */ |
| | 5382 | , 0 |
| | 5383 | ); |
| | 5384 | if (sc->sc_rxlink != NULL) |
| | 5385 | *sc->sc_rxlink = bf->bf_daddr; |
| | 5386 | sc->sc_rxlink = &ds->ds_link; |
| | 5387 | return 0; |
| | 5388 | } |
| | 5389 | |
| | 5390 | /* |
| | 5391 | * Extend 15-bit time stamp from rx descriptor to |
| | 5392 | * a full 64-bit TSF using the current h/w TSF. |
| | 5393 | */ |
| | 5394 | static __inline u_int64_t |
| | 5395 | ath_extend_tsf(struct ath_hal *ah, u_int32_t rstamp) |
| | 5396 | { |
| | 5397 | u_int64_t tsf; |
| | 5398 | |
| | 5399 | tsf = ath_hal_gettsf64(ah); |
| | 5400 | if ((tsf & 0x7fff) < rstamp) |
| | 5401 | tsf -= 0x8000; |
| | 5402 | return ((tsf &~ 0x7fff) | rstamp); |
| | 5403 | } |
| | 5404 | |
| | 5405 | /* |
| | 5406 | * Add a prism2 header to a received frame and |
| | 5407 | * dispatch it to capture tools like kismet. |
| | 5408 | */ |
| | 5409 | static void |
| | 5410 | ath_rx_capture(struct net_device *dev, struct ath_desc *ds, struct sk_buff *skb) |
| | 5411 | { |
| | 5412 | struct ath_softc *sc = dev->priv; |
| | 5413 | struct ieee80211com *ic = &sc->sc_ic; |
| | 5414 | struct ieee80211_frame *wh = (struct ieee80211_frame *) skb->data; |
| | 5415 | unsigned int headersize = ieee80211_anyhdrsize(wh); |
| | 5416 | int padbytes = roundup(headersize, 4) - headersize; |
| | 5417 | u_int64_t tsf; |
| | 5418 | |
| | 5419 | /* Pass up tsf clock in mactime |
| | 5420 | * Rx descriptor has the low 15 bits of the tsf at |
| | 5421 | * the time the frame was received. Use the current |
| | 5422 | * tsf to extend this to 64 bits. |
| | 5423 | */ |
| | 5424 | tsf = ath_extend_tsf(sc->sc_ah, ds->ds_rxstat.rs_tstamp); |
| | 5425 | |
| | 5426 | KASSERT(ic->ic_flags & IEEE80211_F_DATAPAD, |
| | 5427 | ("data padding not enabled?")); |
| | 5428 | |
| | 5429 | if (padbytes > 0) { |
| | 5430 | /* Remove hw pad bytes */ |
| | 5431 | struct sk_buff *skb1 = skb_copy(skb, GFP_ATOMIC); |
| | 5432 | memmove(skb1->data + padbytes, skb1->data, headersize); |
| | 5433 | skb_pull(skb1, padbytes); |
| | 5434 | ieee80211_input_monitor(ic, skb1, ds, 0, tsf, sc); |
| | 5435 | dev_kfree_skb(skb1); |
| | 5436 | } else { |
| | 5437 | ieee80211_input_monitor(ic, skb, ds, 0, tsf, sc); |
| | 5438 | } |
| | 5439 | } |
| | 5440 | |
| | 5441 | |
| | 5442 | static void |
| | 5443 | ath_tx_capture(struct net_device *dev, struct ath_desc *ds, struct sk_buff *skb) |
| | 5444 | { |
| | 5445 | struct ath_softc *sc = dev->priv; |
| | 5446 | struct ieee80211com *ic = &sc->sc_ic; |
| | 5447 | struct ieee80211_frame *wh; |
| | 5448 | int extra = A_MAX(sizeof(struct ath_tx_radiotap_header), |
| | 5449 | A_MAX(sizeof(wlan_ng_prism2_header), ATHDESC_HEADER_SIZE)); |
| | 5450 | u_int64_t tsf; |
| | 5451 | u_int32_t tstamp; |
| | 5452 | unsigned int headersize; |
| | 5453 | int padbytes; |
| | 5454 | |
| | 5455 | /* Pass up tsf clock in mactime |
| | 5456 | * TX descriptor contains the transmit time in TU's, |
| | 5457 | * (bits 25-10 of the TSF). |
| | 5458 | */ |
| | 5459 | tsf = ath_hal_gettsf64(sc->sc_ah); |
| | 5460 | tstamp = ds->ds_txstat.ts_tstamp << 10; |
| | 5461 | |
| | 5462 | if ((tsf & 0x3ffffff) < tstamp) |
| | 5463 | tsf -= 0x4000000; |
| | 5464 | tsf = ((tsf &~ 0x3ffffff) | tstamp); |
| | 5465 | |
| | 5466 | /* |
| | 5467 | * release the owner of this skb since we're basically |
| | 5468 | * recycling it |
| | 5469 | */ |
| | 5470 | if (atomic_read(&skb->users) != 1) { |
| | 5471 | struct sk_buff *skb2 = skb; |
| | 5472 | skb = skb_copy(skb, GFP_ATOMIC); |
| | 5473 | if (skb == NULL) { |
| | 5474 | printk("%s:%d %s\n", __FILE__, __LINE__, __func__); |
| | 5475 | dev_kfree_skb(skb2); |
| | 5476 | return; |
| | 5477 | } |
| | 5478 | dev_kfree_skb(skb2); |
| | 5479 | } else |
| | 5480 | skb_orphan(skb); |
| | 5481 | |
| | 5482 | wh = (struct ieee80211_frame *) skb->data; |
| | 5483 | headersize = ieee80211_anyhdrsize(wh); |
| | 5484 | padbytes = roundup(headersize, 4) - headersize; |
| | 5485 | if (padbytes > 0) { |
| | 5486 | /* Unlike in rx_capture, we're freeing the skb at the end |
| | 5487 | * anyway, so we don't need to worry about using a copy */ |
| | 5488 | memmove(skb->data + padbytes, skb->data, headersize); |
| | 5489 | skb_pull(skb, padbytes); |
| | 5490 | } |
| | 5491 | |
| | 5492 | if (skb_headroom(skb) < extra && |
| | 5493 | pskb_expand_head(skb, extra, 0, GFP_ATOMIC)) { |
| | 5494 | printk("%s:%d %s\n", __FILE__, __LINE__, __func__); |
| | 5495 | goto done; |
| | 5496 | } |
| | 5497 | ieee80211_input_monitor(ic, skb, ds, 1, tsf, sc); |
| | 5498 | done: |
| | 5499 | dev_kfree_skb(skb); |
| | 5500 | } |
| | 5501 | |
| | 5502 | /* |
| | 5503 | * Intercept management frames to collect beacon rssi data |
| | 5504 | * and to do ibss merges. |
| | 5505 | */ |
| | 5506 | static void |
| | 5507 | ath_recv_mgmt(struct ieee80211_node *ni, struct sk_buff *skb, |
| | 5508 | int subtype, int rssi, u_int32_t rstamp) |
| | 5509 | { |
| | 5510 | struct ath_softc *sc = ni->ni_ic->ic_dev->priv; |
| | 5511 | struct ieee80211vap *vap = ni->ni_vap; |
| | 5512 | |
| | 5513 | /* Call up first so subsequent work can use information |
| | 5514 | * potentially stored in the node (e.g. for ibss merge). */ |
| | 5515 | sc->sc_recv_mgmt(ni, skb, subtype, rssi, rstamp); |
| | 5516 | switch (subtype) { |
| | 5517 | case IEEE80211_FC0_SUBTYPE_BEACON: |
| | 5518 | /* Update RSSI statistics for use by the HAL */ |
| | 5519 | ATH_RSSI_LPF(ATH_NODE(ni)->an_halstats.ns_avgbrssi, rssi); |
| | 5520 | if ((sc->sc_syncbeacon || (vap->iv_flags_ext & IEEE80211_FEXT_APPIE_UPDATE)) && |
| | 5521 | ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) { |
| | 5522 | /* Resync beacon timers using the TSF of the |
| | 5523 | * beacon frame we just received. */ |
| | 5524 | vap->iv_flags_ext &= ~IEEE80211_FEXT_APPIE_UPDATE; |
| | 5525 | ath_beacon_config(sc, vap); |
| | 5526 | } |
| | 5527 | /* NB: Fall Through */ |
| | 5528 | case IEEE80211_FC0_SUBTYPE_PROBE_RESP: |
| | 5529 | if (vap->iv_opmode == IEEE80211_M_IBSS && |
| | 5530 | vap->iv_state == IEEE80211_S_RUN) { |
| | 5531 | /* Don't merge if we have a desired BSSID */ |
| | 5532 | if (vap->iv_flags & IEEE80211_F_DESBSSID) |
| | 5533 | break; |
| | 5534 | |
| | 5535 | /* To handle IBSS merge, we need the struct |
| | 5536 | * ieee80211_node which has been updated with the |
| | 5537 | * BSSID and TSF from the last beacon */ |
| | 5538 | ni = ieee80211_find_rxnode(ni->ni_ic, |
| | 5539 | (const struct ieee80211_frame_min *) skb->data); |
| | 5540 | if (ni == NULL) |
| | 5541 | break; |
| | 5542 | |
| | 5543 | /* Handle IBSS merge as needed; check the TSF on the |
| | 5544 | * frame before attempting the merge. The 802.11 spec |
| | 5545 | * says the station should change it's bssid to match |
| | 5546 | * the oldest station with the same ssid, where oldest |
| | 5547 | * is determined by the tsf. Note that hardware |
| | 5548 | * reconfiguration happens through callback to |
| | 5549 | * ath_newstate as the state machine will go from |
| | 5550 | * RUN -> RUN when this happens. */ |
| | 5551 | DPRINTF(sc, ATH_DEBUG_BEACON, |
| | 5552 | "check for ibss merge for ni=%p TSF1(t4)=%10llu TSF2(t3)=%10llu\n", |
| | 5553 | ni, rtsf, le64_to_cpu(ni->ni_tstamp.tsf)); |
| | 5554 | |
| | 5555 | if (rtsf < le64_to_cpu(ni->ni_tstamp.tsf)) { |
| | 5556 | DPRINTF(sc, ATH_DEBUG_BEACON, |
| | 5557 | "ibss merge, rtsf %10llu local tsf %10llu\n", |
| | 5558 | rtsf, le64_to_cpu(ni->ni_tstamp.tsf)); |
| | 5559 | ieee80211_ibss_merge(ni); |
| | 5560 | } |
| | 5561 | } |
| | 5562 | break; |
| | 5563 | } |
| | 5564 | } |
| | 5565 | |
| | 5566 | static void |
| | 5567 | ath_setdefantenna(struct ath_softc *sc, u_int antenna) |
| | 5568 | { |
| | 5569 | struct ath_hal *ah = sc->sc_ah; |
| | 5570 | |
| | 5571 | /* XXX block beacon interrupts */ |
| | 5572 | ath_hal_setdefantenna(ah, antenna); |
| | 5573 | if (sc->sc_defant != antenna) |
| | 5574 | sc->sc_stats.ast_ant_defswitch++; |
| | 5575 | sc->sc_defant = antenna; |
| | 5576 | sc->sc_rxotherant = 0; |
| | 5577 | } |
| | 5578 | |
| | 5579 | static void |
| | 5580 | ath_rx_tasklet(TQUEUE_ARG data) |
| | 5581 | { |
| | 5582 | #define PA2DESC(_sc, _pa) \ |
| | 5583 | ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ |
| | 5584 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) |
| | 5585 | struct net_device *dev = (struct net_device *)data; |
| | 5586 | struct ath_buf *bf; |
| | 5587 | struct ath_softc *sc = dev->priv; |
| | 5588 | struct ieee80211com *ic = &sc->sc_ic; |
| | 5589 | struct ath_hal *ah = sc->sc_ah; |
| | 5590 | struct ath_desc *ds; |
| | 5591 | struct sk_buff *skb; |
| | 5592 | struct ieee80211_node *ni; |
| | 5593 | int len, type; |
| | 5594 | u_int phyerr; |
| | 5595 | |
| | 5596 | /* Let the 802.11 layer know about the new noise floor */ |
| | 5597 | ic->ic_channoise = sc->sc_channoise; |
| | 5598 | |
| | 5599 | DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s\n", __func__); |
| | 5600 | do { |
| | 5601 | bf = STAILQ_FIRST(&sc->sc_rxbuf); |
| | 5602 | if (bf == NULL) { /* XXX ??? can this happen */ |
| | 5603 | printk("%s: no buffer (%s)\n", dev->name, __func__); |
| | 5604 | break; |
| | 5605 | } |
| | 5606 | |
| | 5607 | /* |
| | 5608 | * Descriptors are now processed at in the first-level |
| | 5609 | * interrupt handler to support U-APSD trigger search. |
| | 5610 | * This must also be done even when U-APSD is not active to support |
| | 5611 | * other error handling that requires immediate attention. |
| | 5612 | * We check bf_status to find out if the bf's descriptors have |
| | 5613 | * been processed by the HAL. |
| | 5614 | */ |
| | 5615 | if (!(bf->bf_status & ATH_BUFSTATUS_DONE)) |
| | 5616 | break; |
| | 5617 | |
| | 5618 | ds = bf->bf_desc; |
| | 5619 | if (ds->ds_link == bf->bf_daddr) { |
| | 5620 | /* NB: never process the self-linked entry at the end */ |
| | 5621 | break; |
| | 5622 | } |
| | 5623 | skb = bf->bf_skb; |
| | 5624 | if (skb == NULL) { /* XXX ??? can this happen */ |
| | 5625 | printk("%s: no skbuff (%s)\n", dev->name, __func__); |
| | 5626 | continue; |
| | 5627 | } |
| | 5628 | |
| | 5629 | #ifdef AR_DEBUG |
| | 5630 | if (sc->sc_debug & ATH_DEBUG_RECV_DESC) |
| | 5631 | ath_printrxbuf(bf, 1); |
| | 5632 | #endif |
| | 5633 | |
| | 5634 | if (ds->ds_rxstat.rs_more) { |
| | 5635 | /* |
| | 5636 | * Frame spans multiple descriptors; this |
| | 5637 | * cannot happen yet as we don't support |
| | 5638 | * jumbograms. If not in monitor mode, |
| | 5639 | * discard the frame. |
| | 5640 | */ |
| | 5641 | #ifndef ERROR_FRAMES |
| | 5642 | /* |
| | 5643 | * Enable this if you want to see |
| | 5644 | * error frames in Monitor mode. |
| | 5645 | */ |
| | 5646 | if (ic->ic_opmode != IEEE80211_M_MONITOR) { |
| | 5647 | sc->sc_stats.ast_rx_toobig++; |
| | 5648 | goto rx_next; |
| | 5649 | } |
| | 5650 | #endif |
| | 5651 | /* fall thru for monitor mode handling... */ |
| | 5652 | } else if (ds->ds_rxstat.rs_status != 0) { |
| | 5653 | if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) |
| | 5654 | sc->sc_stats.ast_rx_crcerr++; |
| | 5655 | if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) |
| | 5656 | sc->sc_stats.ast_rx_fifoerr++; |
| | 5657 | if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { |
| | 5658 | sc->sc_stats.ast_rx_phyerr++; |
| | 5659 | phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; |
| | 5660 | sc->sc_stats.ast_rx_phy[phyerr]++; |
| | 5661 | } |
| | 5662 | if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) { |
| | 5663 | /* |
| | 5664 | * Decrypt error. If the error occurred |
| | 5665 | * because there was no hardware key, then |
| | 5666 | * let the frame through so the upper layers |
| | 5667 | * can process it. This is necessary for 5210 |
| | 5668 | * parts which have no way to setup a ``clear'' |
| | 5669 | * key cache entry. |
| | 5670 | * |
| | 5671 | * XXX do key cache faulting |
| | 5672 | */ |
| | 5673 | if (ds->ds_rxstat.rs_keyix == HAL_RXKEYIX_INVALID) |
| | 5674 | goto rx_accept; |
| | 5675 | sc->sc_stats.ast_rx_badcrypt++; |
| | 5676 | } |
| | 5677 | if (ds->ds_rxstat.rs_status & HAL_RXERR_MIC) { |
| | 5678 | sc->sc_stats.ast_rx_badmic++; |
| | 5679 | /* |
| | 5680 | * Do minimal work required to hand off |
| | 5681 | * the 802.11 header for notification. |
| | 5682 | */ |
| | 5683 | /* XXX frag's and QoS frames */ |
| | 5684 | len = ds->ds_rxstat.rs_datalen; |
| | 5685 | if (len >= sizeof (struct ieee80211_frame)) { |
| | 5686 | bus_dma_sync_single(sc->sc_bdev, |
| | 5687 | bf->bf_skbaddr, len, |
| | 5688 | BUS_DMA_FROMDEVICE); |
| | 5689 | #if 0 |
| | 5690 | /* XXX revalidate MIC, lookup ni to find VAP */ |
| | 5691 | ieee80211_notify_michael_failure(ic, |
| | 5692 | (struct ieee80211_frame *) skb->data, |
| | 5693 | sc->sc_splitmic ? |
| | 5694 | ds->ds_rxstat.rs_keyix - 32 : |
| | 5695 | ds->ds_rxstat.rs_keyix |
| | 5696 | ); |
| | 5697 | #endif |
| | 5698 | } |
| | 5699 | } |
| | 5700 | /* |
| | 5701 | * Reject error frames if we have no vaps that |
| | 5702 | * are operating in monitor mode. |
| | 5703 | */ |
| | 5704 | if(sc->sc_nmonvaps == 0) goto rx_next; |
| | 5705 | } |
| | 5706 | rx_accept: |
| | 5707 | /* |
| | 5708 | * Sync and unmap the frame. At this point we're |
| | 5709 | * committed to passing the sk_buff somewhere so |
| | 5710 | * clear buf_skb; this means a new sk_buff must be |
| | 5711 | * allocated when the rx descriptor is setup again |
| | 5712 | * to receive another frame. |
| | 5713 | */ |
| | 5714 | len = ds->ds_rxstat.rs_datalen; |
| | 5715 | bus_dma_sync_single(sc->sc_bdev, |
| | 5716 | bf->bf_skbaddr, len, BUS_DMA_FROMDEVICE); |
| | 5717 | bus_unmap_single(sc->sc_bdev, bf->bf_skbaddr, |
| | 5718 | sc->sc_rxbufsize, BUS_DMA_FROMDEVICE); |
| | 5719 | bf->bf_skb = NULL; |
| | 5720 | |
| | 5721 | sc->sc_stats.ast_ant_rx[ds->ds_rxstat.rs_antenna]++; |
| | 5722 | sc->sc_devstats.rx_packets++; |
| | 5723 | sc->sc_devstats.rx_bytes += len; |
| | 5724 | |
| | 5725 | skb_put(skb, len); |
| | 5726 | skb->protocol = __constant_htons(ETH_P_CONTROL); |
| | 5727 | |
| | 5728 | if (sc->sc_nmonvaps > 0) { |
| | 5729 | /* |
| | 5730 | * Some vap is in monitor mode, so send to |
| | 5731 | * ath_rx_capture for monitor encapsulation |
| | 5732 | */ |
| | 5733 | #if 0 |
| | 5734 | if (len < IEEE80211_ACK_LEN) { |
| | 5735 | DPRINTF(sc, ATH_DEBUG_RECV, |
| | 5736 | "%s: runt packet %d\n", __func__, len); |
| | 5737 | sc->sc_stats.ast_rx_tooshort++; |
| | 5738 | dev_kfree_skb(skb); |
| | 5739 | skb = NULL; |
| | 5740 | goto rx_next; |
| | 5741 | } |
| | 5742 | #endif |
| | 5743 | ath_rx_capture(dev, ds, skb); |
| | 5744 | if (sc->sc_ic.ic_opmode == IEEE80211_M_MONITOR) { |
| | 5745 | /* no other VAPs need the packet */ |
| | 5746 | dev_kfree_skb(skb); |
| | 5747 | skb = NULL; |
| | 5748 | goto rx_next; |
| | 5749 | } |
| | 5750 | } |
| | 5751 | |
| | 5752 | /* |
| | 5753 | * Finished monitor mode handling, now reject |
| | 5754 | * error frames before passing to other vaps |
| | 5755 | */ |
| | 5756 | if (ds->ds_rxstat.rs_status != 0) { |
| | 5757 | dev_kfree_skb(skb); |
| | 5758 | skb = NULL; |
| | 5759 | goto rx_next; |
| | 5760 | } |
| | 5761 | |
| | 5762 | /* remove the CRC */ |
| | 5763 | skb_trim(skb, skb->len - IEEE80211_CRC_LEN); |
| | 5764 | |
| | 5765 | /* |
| | 5766 | * From this point on we assume the frame is at least |
| | 5767 | * as large as ieee80211_frame_min; verify that. |
| | 5768 | */ |
| | 5769 | if (len < IEEE80211_MIN_LEN) { |
| | 5770 | DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n", |
| | 5771 | __func__, len); |
| | 5772 | sc->sc_stats.ast_rx_tooshort++; |
| | 5773 | dev_kfree_skb(skb); |
| | 5774 | skb = NULL; |
| | 5775 | goto rx_next; |
| | 5776 | } |
| | 5777 | |
| | 5778 | /* |
| | 5779 | * Normal receive. |
| | 5780 | */ |
| | 5781 | |
| | 5782 | if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { |
| | 5783 | ieee80211_dump_pkt(ic, skb->data, skb->len, |
| | 5784 | sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate, |
| | 5785 | ds->ds_rxstat.rs_rssi); |
| | 5786 | } |
| | 5787 | |
| | 5788 | /* |
| | 5789 | * Locate the node for sender, track state, and then |
| | 5790 | * pass the (referenced) node up to the 802.11 layer |
| | 5791 | * for its use. If the sender is unknown spam the |
| | 5792 | * frame; it'll be dropped where it's not wanted. |
| | 5793 | */ |
| | 5794 | if (ds->ds_rxstat.rs_keyix != HAL_RXKEYIX_INVALID && |
| | 5795 | (ni = sc->sc_keyixmap[ds->ds_rxstat.rs_keyix]) != NULL) { |
| | 5796 | struct ath_node *an; |
| | 5797 | /* |
| | 5798 | * Fast path: node is present in the key map; |
| | 5799 | * grab a reference for processing the frame. |
| | 5800 | */ |
| | 5801 | an = ATH_NODE(ieee80211_ref_node(ni)); |
| | 5802 | ATH_RSSI_LPF(an->an_avgrssi, ds->ds_rxstat.rs_rssi); |
| | 5803 | type = ieee80211_input(ni, skb, |
| | 5804 | ds->ds_rxstat.rs_rssi, ds->ds_rxstat.rs_tstamp); |
| | 5805 | ieee80211_free_node(ni); |
| | 5806 | } else { |
| | 5807 | /* |
| | 5808 | * No key index or no entry, do a lookup and |
| | 5809 | * add the node to the mapping table if possible. |
| | 5810 | */ |
| | 5811 | ni = ieee80211_find_rxnode(ic, |
| | 5812 | (const struct ieee80211_frame_min *) skb->data); |
| | 5813 | if (ni != NULL) { |
| | 5814 | struct ath_node *an = ATH_NODE(ni); |
| | 5815 | u_int16_t keyix; |
| | 5816 | |
| | 5817 | ATH_RSSI_LPF(an->an_avgrssi, |
| | 5818 | ds->ds_rxstat.rs_rssi); |
| | 5819 | type = ieee80211_input(ni, skb, |
| | 5820 | ds->ds_rxstat.rs_rssi, |
| | 5821 | ds->ds_rxstat.rs_tstamp); |
| | 5822 | /* |
| | 5823 | * If the station has a key cache slot assigned |
| | 5824 | * update the key->node mapping table. |
| | 5825 | */ |
| | 5826 | keyix = ni->ni_ucastkey.wk_keyix; |
| | 5827 | if (keyix != IEEE80211_KEYIX_NONE && |
| | 5828 | sc->sc_keyixmap[keyix] == NULL) |
| | 5829 | sc->sc_keyixmap[keyix] = ieee80211_ref_node(ni); |
| | 5830 | ieee80211_free_node(ni); |
| | 5831 | } else |
| | 5832 | type = ieee80211_input_all(ic, skb, |
| | 5833 | ds->ds_rxstat.rs_rssi, |
| | 5834 | ds->ds_rxstat.rs_tstamp); |
| | 5835 | } |
| | 5836 | |
| | 5837 | if (sc->sc_diversity) { |
| | 5838 | /* |
| | 5839 | * When using hardware fast diversity, change the default rx |
| | 5840 | * antenna if rx diversity chooses the other antenna 3 |
| | 5841 | * times in a row. |
| | 5842 | */ |
| | 5843 | if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { |
| | 5844 | if (++sc->sc_rxotherant >= 3) |
| | 5845 | ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna); |
| | 5846 | } else |
| | 5847 | sc->sc_rxotherant = 0; |
| | 5848 | } |
| | 5849 | if (sc->sc_softled) { |
| | 5850 | /* |
| | 5851 | * Blink for any data frame. Otherwise do a |
| | 5852 | * heartbeat-style blink when idle. The latter |
| | 5853 | * is mainly for station mode where we depend on |
| | 5854 | * periodic beacon frames to trigger the poll event. |
| | 5855 | */ |
| | 5856 | if (type == IEEE80211_FC0_TYPE_DATA) { |
| | 5857 | sc->sc_rxrate = ds->ds_rxstat.rs_rate; |
| | 5858 | ath_led_event(sc, ATH_LED_RX); |
| | 5859 | } else if (jiffies - sc->sc_ledevent >= sc->sc_ledidle) |
| | 5860 | ath_led_event(sc, ATH_LED_POLL); |
| | 5861 | } |
| | 5862 | rx_next: |
| | 5863 | ATH_RXBUF_LOCK_IRQ(sc); |
| | 5864 | STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); |
| | 5865 | bf->bf_status &= ~ATH_BUFSTATUS_DONE; |
| | 5866 | STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); |
| | 5867 | ATH_RXBUF_UNLOCK_IRQ(sc); |
| | 5868 | } while (ath_rxbuf_init(sc, bf) == 0); |
| | 5869 | |
| | 5870 | /* rx signal state monitoring */ |
| | 5871 | ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan); |
| | 5872 | if (ath_hal_radar_event(ah)) { |
| | 5873 | sc->sc_rtasksched = 1; |
| | 5874 | schedule_work(&sc->sc_radartask); |
| | 5875 | } |
| | 5876 | #undef PA2DESC |
| | 5877 | } |
| | 5878 | |
| | 5879 | #ifdef ATH_SUPERG_XR |
| | 5880 | |
| | 5881 | static void |
| | 5882 | ath_grppoll_period_update(struct ath_softc *sc) |
| | 5883 | { |
| | 5884 | struct ieee80211com *ic = &sc->sc_ic; |
| | 5885 | u_int16_t interval; |
| | 5886 | u_int16_t xrsta; |
| | 5887 | u_int16_t normalsta; |
| | 5888 | u_int16_t allsta; |
| | 5889 | |
| | 5890 | xrsta = ic->ic_xr_sta_assoc; |
| | 5891 | |
| | 5892 | /* |
| | 5893 | * if no stations are in XR mode. |
| | 5894 | * use default poll interval. |
| | 5895 | */ |
| | 5896 | if (xrsta == 0) { |
| | 5897 | if (sc->sc_xrpollint != XR_DEFAULT_POLL_INTERVAL) { |
| | 5898 | sc->sc_xrpollint = XR_DEFAULT_POLL_INTERVAL; |
| | 5899 | ath_grppoll_txq_update(sc,XR_DEFAULT_POLL_INTERVAL); |
| | 5900 | } |
| | 5901 | return; |
| | 5902 | } |
| | 5903 | |
| | 5904 | allsta = ic->ic_sta_assoc; |
| | 5905 | /* |
| | 5906 | * if all the stations are in XR mode. |
| | 5907 | * use minimum poll interval. |
| | 5908 | */ |
| | 5909 | if (allsta == xrsta) { |
| | 5910 | if (sc->sc_xrpollint != XR_MIN_POLL_INTERVAL) { |
| | 5911 | sc->sc_xrpollint = XR_MIN_POLL_INTERVAL; |
| | 5912 | ath_grppoll_txq_update(sc,XR_MIN_POLL_INTERVAL); |
| | 5913 | } |
| | 5914 | return; |
| | 5915 | } |
| | 5916 | |
| | 5917 | normalsta = allsta-xrsta; |
| | 5918 | /* |
| | 5919 | * if stations are in both XR and normal mode. |
| | 5920 | * use some fudge factor. |
| | 5921 | */ |
| | 5922 | interval = XR_DEFAULT_POLL_INTERVAL - |
| | 5923 | ((XR_DEFAULT_POLL_INTERVAL - XR_MIN_POLL_INTERVAL) * xrsta)/(normalsta * XR_GRPPOLL_PERIOD_FACTOR); |
| | 5924 | if (interval < XR_MIN_POLL_INTERVAL) |
| | 5925 | interval = XR_MIN_POLL_INTERVAL; |
| | 5926 | |
| | 5927 | if (sc->sc_xrpollint != interval) { |
| | 5928 | sc->sc_xrpollint = interval; |
| | 5929 | ath_grppoll_txq_update(sc,interval); |
| | 5930 | } |
| | 5931 | |
| | 5932 | /* |
| | 5933 | * XXX: what if stations go to sleep? |
| | 5934 | * ideally the interval should be adjusted dynamically based on |
| | 5935 | * xr and normal upstream traffic. |
| | 5936 | */ |
| | 5937 | } |
| | 5938 | |
| | 5939 | /* |
| | 5940 | * update grppoll period. |
| | 5941 | */ |
| | 5942 | static void |
| | 5943 | ath_grppoll_txq_update(struct ath_softc *sc, int period) |
| | 5944 | { |
| | 5945 | struct ath_hal *ah = sc->sc_ah; |
| | 5946 | HAL_TXQ_INFO qi; |
| | 5947 | struct ath_txq *txq = &sc->sc_grpplq; |
| | 5948 | |
| | 5949 | if (sc->sc_grpplq.axq_qnum == -1) |
| | 5950 | return; |
| | 5951 | |
| | 5952 | memset(&qi, 0, sizeof(qi)); |
| | 5953 | qi.tqi_subtype = 0; |
| | 5954 | qi.tqi_aifs = XR_AIFS; |
| | 5955 | qi.tqi_cwmin = XR_CWMIN_CWMAX; |
| | 5956 | qi.tqi_cwmax = XR_CWMIN_CWMAX; |
| | 5957 | qi.tqi_compBuf = 0; |
| | 5958 | qi.tqi_cbrPeriod = IEEE80211_TU_TO_MS(period) * 1000; /* usec */ |
| | 5959 | qi.tqi_cbrOverflowLimit = 2; |
| | 5960 | ath_hal_settxqueueprops(ah, txq->axq_qnum,&qi); |
| | 5961 | ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ |
| | 5962 | } |
| | 5963 | |
| | 5964 | /* |
| | 5965 | * Setup grppoll h/w transmit queue. |
| | 5966 | */ |
| | 5967 | static void |
| | 5968 | ath_grppoll_txq_setup(struct ath_softc *sc, int qtype, int period) |
| | 5969 | { |
| | 5970 | #define N(a) ((int)(sizeof(a)/sizeof(a[0]))) |
| | 5971 | struct ath_hal *ah = sc->sc_ah; |
| | 5972 | HAL_TXQ_INFO qi; |
| | 5973 | int qnum; |
| | 5974 | u_int compbufsz = 0; |
| | 5975 | char *compbuf = NULL; |
| | 5976 | dma_addr_t compbufp = 0; |
| | 5977 | struct ath_txq *txq = &sc->sc_grpplq; |
| | 5978 | |
| | 5979 | memset(&qi, 0, sizeof(qi)); |
| | 5980 | qi.tqi_subtype = 0; |
| | 5981 | qi.tqi_aifs = XR_AIFS; |
| | 5982 | qi.tqi_cwmin = XR_CWMIN_CWMAX; |
| | 5983 | qi.tqi_cwmax = XR_CWMIN_CWMAX; |
| | 5984 | qi.tqi_compBuf = 0; |
| | 5985 | qi.tqi_cbrPeriod = IEEE80211_TU_TO_MS(period) * 1000; /* usec */ |
| | 5986 | qi.tqi_cbrOverflowLimit = 2; |
| | 5987 | |
| | 5988 | if (sc->sc_grpplq.axq_qnum == -1) { |
| | 5989 | qnum = ath_hal_setuptxqueue(ah, qtype, &qi); |
| | 5990 | if (qnum == -1) |
| | 5991 | return ; |
| | 5992 | if (qnum >= N(sc->sc_txq)) { |
| | 5993 | printk("%s: HAL qnum %u out of range, max %u!\n", |
| | 5994 | sc->sc_dev->name, qnum, N(sc->sc_txq)); |
| | 5995 | ath_hal_releasetxqueue(ah, qnum); |
| | 5996 | return; |
| | 5997 | } |
| | 5998 | |
| | 5999 | txq->axq_qnum = qnum; |
| | 6000 | } |
| | 6001 | txq->axq_link = NULL; |
| | 6002 | STAILQ_INIT(&txq->axq_q); |
| | 6003 | ATH_TXQ_LOCK_INIT(txq); |
| | 6004 | txq->axq_depth = 0; |
| | 6005 | txq->axq_totalqueued = 0; |
| | 6006 | txq->axq_intrcnt = 0; |
| | 6007 | TAILQ_INIT(&txq->axq_stageq); |
| | 6008 | txq->axq_compbuf = compbuf; |
| | 6009 | txq->axq_compbufsz = compbufsz; |
| | 6010 | txq->axq_compbufp = compbufp; |
| | 6011 | ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ |
| | 6012 | #undef N |
| | 6013 | |
| | 6014 | } |
| | 6015 | |
| | 6016 | /* |
| | 6017 | * Setup group poll frames on the group poll queue. |
| | 6018 | */ |
| | 6019 | static void ath_grppoll_start(struct ieee80211vap *vap,int pollcount) |
| | 6020 | { |
| | 6021 | int i, amode; |
| | 6022 | int flags; |
| | 6023 | struct sk_buff *skb = NULL; |
| | 6024 | struct ath_buf *bf, *head = NULL; |
| | 6025 | struct ieee80211com *ic = vap->iv_ic; |
| | 6026 | struct ath_softc *sc = ic->ic_dev->priv; |
| | 6027 | struct ath_hal *ah = sc->sc_ah; |
| | 6028 | u_int8_t rate; |
| | 6029 | int ctsrate = 0; |
| | 6030 | int ctsduration = 0; |
| | 6031 | const HAL_RATE_TABLE *rt; |
| | 6032 | u_int8_t cix, rtindex = 0; |
| | 6033 | u_int type; |
| | 6034 | struct ath_txq *txq = &sc->sc_grpplq; |
| | 6035 | struct ath_desc *ds = NULL; |
| | 6036 | int pktlen = 0, keyix = 0; |
| | 6037 | int pollsperrate, pos; |
| | 6038 | int rates[XR_NUM_RATES]; |
| | 6039 | u_int8_t ratestr[16], numpollstr[16]; |
| | 6040 | typedef struct rate_to_str_map { |
| | 6041 | u_int8_t str[4]; |
| | 6042 | int ratekbps; |
| | 6043 | } RATE_TO_STR_MAP; |
| | 6044 | |
| | 6045 | static const RATE_TO_STR_MAP ratestrmap[] = { |
| | 6046 | {"0.25", 250}, |
| | 6047 | { ".25", 250}, |
| | 6048 | {"0.5", 500}, |
| | 6049 | { ".5", 500}, |
| | 6050 | { "1", 1000}, |
| | 6051 | { "3", 3000}, |
| | 6052 | { "6", 6000}, |
| | 6053 | { "?", 0}, |
| | 6054 | }; |
| | 6055 | |
| | 6056 | #define MAX_GRPPOLL_RATE 5 |
| | 6057 | #define USE_SHPREAMBLE(_ic) \ |
| | 6058 | (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER)) \ |
| | 6059 | == IEEE80211_F_SHPREAMBLE) |
| | 6060 | |
| | 6061 | if (sc->sc_xrgrppoll) |
| | 6062 | return; |
| | 6063 | |
| | 6064 | memset(&rates, 0, sizeof(rates)); |
| | 6065 | pos = 0; |
| | 6066 | while (sscanf(&(sc->sc_grppoll_str[pos]), "%s %s", ratestr, numpollstr) == 2) { |
| | 6067 | int rtx = 0; |
| | 6068 | while (ratestrmap[rtx].ratekbps != 0) { |
| | 6069 | if (strcmp(ratestrmap[rtx].str, ratestr) == 0) |
| | 6070 | break; |
| | 6071 | rtx++; |
| | 6072 | } |
| | 6073 | sscanf(numpollstr, "%d", &(rates[rtx])); |
| | 6074 | pos += strlen(ratestr) + strlen(numpollstr) + 2; |
| | 6075 | } |
| | 6076 | if (!sc->sc_grppolldma.dd_bufptr) { |
| | 6077 | printk("grppoll_start: grppoll Buf allocation failed\n"); |
| | 6078 | return; |
| | 6079 | } |
| | 6080 | rt = sc->sc_currates; |
| | 6081 | cix = rt->info[sc->sc_protrix].controlRate; |
| | 6082 | ctsrate = rt->info[cix].rateCode; |
| | 6083 | if (USE_SHPREAMBLE(ic)) |
| | 6084 | ctsrate |= rt->info[cix].shortPreamble; |
| | 6085 | rt = sc->sc_xr_rates; |
| | 6086 | /* |
| | 6087 | * queue the group polls for each antenna mode. set the right keycache index for the |
| | 6088 | * broadcast packets. this will ensure that if the first poll |
| | 6089 | * does not elicit a single chirp from any XR station, hardware will |
| | 6090 | * not send the subsequent polls |
| | 6091 | */ |
| | 6092 | pollsperrate = 0; |
| | 6093 | for (amode = HAL_ANTENNA_FIXED_A; amode < HAL_ANTENNA_MAX_MODE ; amode++) { |
| | 6094 | for (i = 0; i < (pollcount + 1); i++) { |
| | 6095 | |
| | 6096 | flags = HAL_TXDESC_NOACK; |
| | 6097 | rate = rt->info[rtindex].rateCode; |
| | 6098 | /* |
| | 6099 | * except for the last one every thing else is a CF poll. |
| | 6100 | * last one is the CF End frame. |
| | 6101 | */ |
| | 6102 | |
| | 6103 | if (i == pollcount) { |
| | 6104 | skb = ieee80211_getcfframe(vap,IEEE80211_FC0_SUBTYPE_CF_END); |
| | 6105 | rate = ctsrate; |
| | 6106 | ctsduration = ath_hal_computetxtime(ah, |
| | 6107 | sc->sc_currates, pktlen, sc->sc_protrix, AH_FALSE); |
| | 6108 | } else { |
| | 6109 | skb = ieee80211_getcfframe(vap, IEEE80211_FC0_SUBTYPE_CFPOLL); |
| | 6110 | pktlen = skb->len + IEEE80211_CRC_LEN; |
| | 6111 | /* |
| | 6112 | * the very first group poll ctsduration should be enough to allow |
| | 6113 | * an auth frame from station. This is to pass the wifi testing (as |
| | 6114 | * some stations in testing do not honor CF_END and rely on CTS duration) |
| | 6115 | */ |
| | 6116 | if (i == 0 && amode == HAL_ANTENNA_FIXED_A) { |
| | 6117 | ctsduration = ath_hal_computetxtime(ah, rt, |
| | 6118 | pktlen, rtindex, |
| | 6119 | AH_FALSE) /*cf-poll time */ |
| | 6120 | + (XR_AIFS + (XR_CWMIN_CWMAX * XR_SLOT_DELAY)) |
| | 6121 | + ath_hal_computetxtime(ah, rt, |
| | 6122 | 2 * (sizeof(struct ieee80211_frame_min) + 6), |
| | 6123 | IEEE80211_XR_DEFAULT_RATE_INDEX, |
| | 6124 | AH_FALSE) /*auth packet time */ |
| | 6125 | + ath_hal_computetxtime(ah, rt, |
| | 6126 | IEEE80211_ACK_LEN, |
| | 6127 | IEEE80211_XR_DEFAULT_RATE_INDEX, |
| | 6128 | AH_FALSE); /*ack frame time */ |
| | 6129 | } else { |
| | 6130 | ctsduration = ath_hal_computetxtime(ah, rt, |
| | 6131 | pktlen, rtindex, |
| | 6132 | AH_FALSE) /*cf-poll time */ |
| | 6133 | + (XR_AIFS + (XR_CWMIN_CWMAX * XR_SLOT_DELAY)) |
| | 6134 | + ath_hal_computetxtime(ah,rt, |
| | 6135 | XR_FRAGMENTATION_THRESHOLD, |
| | 6136 | IEEE80211_XR_DEFAULT_RATE_INDEX, |
| | 6137 | AH_FALSE) /*data packet time */ |
| | 6138 | + ath_hal_computetxtime(ah,rt, |
| | 6139 | IEEE80211_ACK_LEN, |
| | 6140 | IEEE80211_XR_DEFAULT_RATE_INDEX, |
| | 6141 | AH_FALSE); /*ack frame time */ |
| | 6142 | } |
| | 6143 | if ((vap->iv_flags & IEEE80211_F_PRIVACY) && keyix == 0) { |
| | 6144 | struct ieee80211_key *k; |
| | 6145 | k = ieee80211_crypto_encap(vap->iv_bss, skb); |
| | 6146 | if (k) |
| | 6147 | keyix = k->wk_keyix; |
| | 6148 | } |
| | 6149 | } |
| | 6150 | ATH_TXBUF_LOCK_IRQ(sc); |
| | 6151 | bf = STAILQ_FIRST(&sc->sc_grppollbuf); |
| | 6152 | if (bf != NULL) |
| | 6153 | STAILQ_REMOVE_HEAD(&sc->sc_grppollbuf, bf_list); |
| | 6154 | else { |
| | 6155 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs\n", __func__); |
| | 6156 | ATH_TXBUF_UNLOCK_IRQ_EARLY(sc); |
| | 6157 | return; |
| | 6158 | } |
| | 6159 | /* XXX use a counter and leave at least one for mgmt frames */ |
| | 6160 | if (STAILQ_EMPTY(&sc->sc_grppollbuf)) { |
| | 6161 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs left\n", __func__); |
| | 6162 | ATH_TXBUF_UNLOCK_IRQ_EARLY(sc); |
| | 6163 | return; |
| | 6164 | } |
| | 6165 | ATH_TXBUF_UNLOCK_IRQ(sc); |
| | 6166 | bf->bf_skbaddr = bus_map_single(sc->sc_bdev, |
| | 6167 | skb->data, skb->len, BUS_DMA_TODEVICE); |
| | 6168 | bf->bf_skb = skb; |
| | 6169 | ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); |
| | 6170 | ds = bf->bf_desc; |
| | 6171 | ds->ds_data = bf->bf_skbaddr; |
| | 6172 | if (i == pollcount && amode == (HAL_ANTENNA_MAX_MODE -1)) { |
| | 6173 | type = HAL_PKT_TYPE_NORMAL; |
| | 6174 | flags |= (HAL_TXDESC_CLRDMASK | HAL_TXDESC_VEOL); |
| | 6175 | } else { |
| | 6176 | flags |= HAL_TXDESC_CTSENA; |
| | 6177 | type = HAL_PKT_TYPE_GRP_POLL; |
| | 6178 | } |
| | 6179 | if (i == 0 && amode == HAL_ANTENNA_FIXED_A ) { |
| | 6180 | flags |= HAL_TXDESC_CLRDMASK; |
| | 6181 | head = bf; |
| | 6182 | } |
| | 6183 | ath_hal_setuptxdesc(ah, ds |
| | 6184 | , skb->len + IEEE80211_CRC_LEN /* frame length */ |
| | 6185 | , sizeof(struct ieee80211_frame) /* header length */ |
| | 6186 | , type /* Atheros packet type */ |
| | 6187 | , ic->ic_txpowlimit /* max txpower */ |
| | 6188 | , rate, 0 /* series 0 rate/tries */ |
| | 6189 | , keyix /* HAL_TXKEYIX_INVALID */ /* use key index */ |
| | 6190 | , amode /* antenna mode */ |
| | 6191 | , flags |
| | 6192 | , ctsrate /* rts/cts rate */ |
| | 6193 | , ctsduration /* rts/cts duration */ |
| | 6194 | , 0 /* comp icv len */ |
| | 6195 | , 0 /* comp iv len */ |
| | 6196 | , ATH_COMP_PROC_NO_COMP_NO_CCS /* comp scheme */ |
| | 6197 | ); |
| | 6198 | ath_hal_filltxdesc(ah, ds |
| | 6199 | , roundup(skb->len, 4) /* buffer length */ |
| | 6200 | , AH_TRUE /* first segment */ |
| | 6201 | , AH_TRUE /* last segment */ |
| | 6202 | , ds /* first descriptor */ |
| | 6203 | ); |
| | 6204 | /* NB: The desc swap function becomes void, |
| | 6205 | * if descriptor swapping is not enabled |
| | 6206 | */ |
| | 6207 | ath_desc_swap(ds); |
| | 6208 | if (txq->axq_link) { |
| | 6209 | #ifdef AH_NEED_DESC_SWAP |
| | 6210 | *txq->axq_link = cpu_to_le32(bf->bf_daddr); |
| | 6211 | #else |
| | 6212 | *txq->axq_link = bf->bf_daddr; |
| | 6213 | #endif |
| | 6214 | } |
| | 6215 | txq->axq_link = &ds->ds_link; |
| | 6216 | pollsperrate++; |
| | 6217 | if (pollsperrate > rates[rtindex]) { |
| | 6218 | rtindex = (rtindex + 1) % MAX_GRPPOLL_RATE; |
| | 6219 | pollsperrate = 0; |
| | 6220 | } |
| | 6221 | } |
| | 6222 | } |
| | 6223 | /* make it circular */ |
| | 6224 | #ifdef AH_NEED_DESC_SWAP |
| | 6225 | ds->ds_link = cpu_to_le32(head->bf_daddr); |
| | 6226 | #else |
| | 6227 | ds->ds_link = head->bf_daddr; |
| | 6228 | #endif |
| | 6229 | /* start the queue */ |
| | 6230 | ath_hal_puttxbuf(ah, txq->axq_qnum, head->bf_daddr); |
| | 6231 | ath_hal_txstart(ah, txq->axq_qnum); |
| | 6232 | sc->sc_xrgrppoll = 1; |
| | 6233 | #undef USE_SHPREAMBLE |
| | 6234 | } |
| | 6235 | |
| | 6236 | static void ath_grppoll_stop(struct ieee80211vap *vap) |
| | 6237 | { |
| | 6238 | struct ieee80211com *ic = vap->iv_ic; |
| | 6239 | struct ath_softc *sc = ic->ic_dev->priv; |
| | 6240 | struct ath_hal *ah = sc->sc_ah; |
| | 6241 | struct ath_txq *txq = &sc->sc_grpplq; |
| | 6242 | struct ath_buf *bf; |
| | 6243 | |
| | 6244 | |
| | 6245 | if (!sc->sc_xrgrppoll) |
| | 6246 | return; |
| | 6247 | ath_hal_stoptxdma(ah, txq->axq_qnum); |
| | 6248 | |
| | 6249 | /* move the grppool bufs back to the grppollbuf */ |
| | 6250 | for (;;) { |
| | 6251 | ATH_TXQ_LOCK(txq); |
| | 6252 | bf = STAILQ_FIRST(&txq->axq_q); |
| | 6253 | if (bf == NULL) { |
| | 6254 | txq->axq_link = NULL; |
| | 6255 | ATH_TXQ_UNLOCK(txq); |
| | 6256 | break; |
| | 6257 | } |
| | 6258 | ATH_TXQ_REMOVE_HEAD(txq, bf_list); |
| | 6259 | ATH_TXQ_UNLOCK(txq); |
| | 6260 | bus_unmap_single(sc->sc_bdev, |
| | 6261 | bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE); |
| | 6262 | dev_kfree_skb(bf->bf_skb); |
| | 6263 | bf->bf_skb = NULL; |
| | 6264 | bf->bf_node = NULL; |
| | 6265 | |
| | 6266 | ATH_TXBUF_LOCK(sc); |
| | 6267 | STAILQ_INSERT_TAIL(&sc->sc_grppollbuf, bf, bf_list); |
| | 6268 | ATH_TXBUF_UNLOCK(sc); |
| | 6269 | } |
| | 6270 | STAILQ_INIT(&txq->axq_q); |
| | 6271 | ATH_TXQ_LOCK_INIT(txq); |
| | 6272 | txq->axq_depth = 0; |
| | 6273 | txq->axq_totalqueued = 0; |
| | 6274 | txq->axq_intrcnt = 0; |
| | 6275 | TAILQ_INIT(&txq->axq_stageq); |
| | 6276 | sc->sc_xrgrppoll = 0; |
| | 6277 | } |
| | 6278 | #endif |
| | 6279 | |
| | 6280 | /* |
| | 6281 | * Setup a h/w transmit queue. |
| | 6282 | */ |
| | 6283 | static struct ath_txq * |
| | 6284 | ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) |
| | 6285 | { |
| | 6286 | #define N(a) ((int)(sizeof(a)/sizeof(a[0]))) |
| | 6287 | struct ath_hal *ah = sc->sc_ah; |
| | 6288 | HAL_TXQ_INFO qi; |
| | 6289 | int qnum; |
| | 6290 | u_int compbufsz = 0; |
| | 6291 | char *compbuf = NULL; |
| | 6292 | dma_addr_t compbufp = 0; |
| | 6293 | |
| | 6294 | memset(&qi, 0, sizeof(qi)); |
| | 6295 | qi.tqi_subtype = subtype; |
| | 6296 | qi.tqi_aifs = HAL_TXQ_USEDEFAULT; |
| | 6297 | qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; |
| | 6298 | qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; |
| | 6299 | qi.tqi_compBuf = 0; |
| | 6300 | #ifdef ATH_SUPERG_XR |
| | 6301 | if (subtype == HAL_XR_DATA) { |
| | 6302 | qi.tqi_aifs = XR_DATA_AIFS; |
| | 6303 | qi.tqi_cwmin = XR_DATA_CWMIN; |
| | 6304 | qi.tqi_cwmax = XR_DATA_CWMAX; |
| | 6305 | } |
| | 6306 | #endif |
| | 6307 | |
| | 6308 | #ifdef ATH_SUPERG_COMP |
| | 6309 | /* allocate compression scratch buffer for data queues */ |
| | 6310 | if (((qtype == HAL_TX_QUEUE_DATA)|| (qtype == HAL_TX_QUEUE_UAPSD)) |
| | 6311 | && ath_hal_compressionsupported(ah)) { |
| | 6312 | compbufsz = roundup(HAL_COMP_BUF_MAX_SIZE, |
| | 6313 | HAL_COMP_BUF_ALIGN_SIZE) + HAL_COMP_BUF_ALIGN_SIZE; |
| | 6314 | compbuf = (char *)bus_alloc_consistent(sc->sc_bdev, |
| | 6315 | compbufsz, &compbufp); |
| | 6316 | if (compbuf == NULL) |
| | 6317 | sc->sc_ic.ic_ath_cap &= ~IEEE80211_ATHC_COMP; |
| | 6318 | else |
| | 6319 | qi.tqi_compBuf = (u_int32_t)compbufp; |
| | 6320 | } |
| | 6321 | #endif |
| | 6322 | /* |
| | 6323 | * Enable interrupts only for EOL and DESC conditions. |
| | 6324 | * We mark tx descriptors to receive a DESC interrupt |
| | 6325 | * when a tx queue gets deep; otherwise waiting for the |
| | 6326 | * EOL to reap descriptors. Note that this is done to |
| | 6327 | * reduce interrupt load and this only defers reaping |
| | 6328 | * descriptors, never transmitting frames. Aside from |
| | 6329 | * reducing interrupts this also permits more concurrency. |
| | 6330 | * The only potential downside is if the tx queue backs |
| | 6331 | * up in which case the top half of the kernel may backup |
| | 6332 | * due to a lack of tx descriptors. |
| | 6333 | * |
| | 6334 | * The UAPSD queue is an exception, since we take a desc- |
| | 6335 | * based intr on the EOSP frames. |
| | 6336 | */ |
| | 6337 | if (qtype == HAL_TX_QUEUE_UAPSD) |
| | 6338 | qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; |
| | 6339 | else |
| | 6340 | qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; |
| | 6341 | qnum = ath_hal_setuptxqueue(ah, qtype, &qi); |
| | 6342 | if (qnum == -1) { |
| | 6343 | /* |
| | 6344 | * NB: don't print a message, this happens |
| | 6345 | * normally on parts with too few tx queues |
| | 6346 | */ |
| | 6347 | #ifdef ATH_SUPERG_COMP |
| | 6348 | if (compbuf) { |
| | 6349 | bus_free_consistent(sc->sc_bdev, compbufsz, |
| | 6350 | compbuf, compbufp); |
| | 6351 | } |
| | 6352 | #endif |
| | 6353 | return NULL; |
| | 6354 | } |
| | 6355 | if (qnum >= N(sc->sc_txq)) { |
| | 6356 | printk("%s: HAL qnum %u out of range, max %u!\n", |
| | 6357 | sc->sc_dev->name, qnum, N(sc->sc_txq)); |
| | 6358 | #ifdef ATH_SUPERG_COMP |
| | 6359 | if (compbuf) { |
| | 6360 | bus_free_consistent(sc->sc_bdev, compbufsz, |
| | 6361 | compbuf, compbufp); |
| | 6362 | } |
| | 6363 | #endif |
| | 6364 | ath_hal_releasetxqueue(ah, qnum); |
| | 6365 | return NULL; |
| | 6366 | } |
| | 6367 | if (!ATH_TXQ_SETUP(sc, qnum)) { |
| | 6368 | struct ath_txq *txq = &sc->sc_txq[qnum]; |
| | 6369 | |
| | 6370 | txq->axq_qnum = qnum; |
| | 6371 | txq->axq_link = NULL; |
| | 6372 | STAILQ_INIT(&txq->axq_q); |
| | 6373 | ATH_TXQ_LOCK_INIT(txq); |
| | 6374 | txq->axq_depth = 0; |
| | 6375 | txq->axq_totalqueued = 0; |
| | 6376 | txq->axq_intrcnt = 0; |
| | 6377 | TAILQ_INIT(&txq->axq_stageq); |
| | 6378 | txq->axq_compbuf = compbuf; |
| | 6379 | txq->axq_compbufsz = compbufsz; |
| | 6380 | txq->axq_compbufp = compbufp; |
| | 6381 | sc->sc_txqsetup |= 1 << qnum; |
| | 6382 | } |
| | 6383 | return &sc->sc_txq[qnum]; |
| | 6384 | #undef N |
| | 6385 | } |
| | 6386 | |
| | 6387 | /* |
| | 6388 | * Setup a hardware data transmit queue for the specified |
| | 6389 | * access control. The HAL may not support all requested |
| | 6390 | * queues in which case it will return a reference to a |
| | 6391 | * previously setup queue. We record the mapping from ac's |
| | 6392 | * to h/w queues for use by ath_tx_start and also track |
| | 6393 | * the set of h/w queues being used to optimize work in the |
| | 6394 | * transmit interrupt handler and related routines. |
| | 6395 | */ |
| | 6396 | static int |
| | 6397 | ath_tx_setup(struct ath_softc *sc, int ac, int haltype) |
| | 6398 | { |
| | 6399 | #define N(a) ((int)(sizeof(a)/sizeof(a[0]))) |
| | 6400 | struct ath_txq *txq; |
| | 6401 | |
| | 6402 | if (ac >= N(sc->sc_ac2q)) { |
| | 6403 | printk("%s: AC %u out of range, max %u!\n", |
| | 6404 | sc->sc_dev->name, ac, (unsigned)N(sc->sc_ac2q)); |
| | 6405 | return 0; |
| | 6406 | } |
| | 6407 | txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); |
| | 6408 | if (txq != NULL) { |
| | 6409 | sc->sc_ac2q[ac] = txq; |
| | 6410 | return 1; |
| | 6411 | } else |
| | 6412 | return 0; |
| | 6413 | #undef N |
| | 6414 | } |
| | 6415 | |
| | 6416 | /* |
| | 6417 | * Update WME parameters for a transmit queue. |
| | 6418 | */ |
| | 6419 | static int |
| | 6420 | ath_txq_update(struct ath_softc *sc, struct ath_txq *txq, int ac) |
| | 6421 | { |
| | 6422 | #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) |
| | 6423 | #define ATH_TXOP_TO_US(v) (v<<5) |
| | 6424 | struct ieee80211com *ic = &sc->sc_ic; |
| | 6425 | struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; |
| | 6426 | struct ath_hal *ah = sc->sc_ah; |
| | 6427 | HAL_TXQ_INFO qi; |
| | 6428 | |
| | 6429 | ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); |
| | 6430 | qi.tqi_aifs = wmep->wmep_aifsn; |
| | 6431 | qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); |
| | 6432 | qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); |
| | 6433 | qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); |
| | 6434 | |
| | 6435 | if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { |
| | 6436 | printk("%s: unable to update hardware queue " |
| | 6437 | "parameters for %s traffic!\n", |
| | 6438 | sc->sc_dev->name, ieee80211_wme_acnames[ac]); |
| | 6439 | return 0; |
| | 6440 | } else { |
| | 6441 | ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ |
| | 6442 | return 1; |
| | 6443 | } |
| | 6444 | #undef ATH_TXOP_TO_US |
| | 6445 | #undef ATH_EXPONENT_TO_VALUE |
| | 6446 | } |
| | 6447 | |
| | 6448 | /* |
| | 6449 | * Callback from the 802.11 layer to update WME parameters. |
| | 6450 | */ |
| | 6451 | static int |
| | 6452 | ath_wme_update(struct ieee80211com *ic) |
| | 6453 | { |
| | 6454 | struct ath_softc *sc = ic->ic_dev->priv; |
| | 6455 | |
| | 6456 | if (sc->sc_uapsdq) |
| | 6457 | ath_txq_update(sc, sc->sc_uapsdq, WME_AC_VO); |
| | 6458 | |
| | 6459 | return !ath_txq_update(sc, sc->sc_ac2q[WME_AC_BE], WME_AC_BE) || |
| | 6460 | !ath_txq_update(sc, sc->sc_ac2q[WME_AC_BK], WME_AC_BK) || |
| | 6461 | !ath_txq_update(sc, sc->sc_ac2q[WME_AC_VI], WME_AC_VI) || |
| | 6462 | !ath_txq_update(sc, sc->sc_ac2q[WME_AC_VO], WME_AC_VO) ? EIO : 0; |
| | 6463 | } |
| | 6464 | |
| | 6465 | /* |
| | 6466 | * Callback from 802.11 layer to flush a node's U-APSD queues |
| | 6467 | */ |
| | 6468 | static void |
| | 6469 | ath_uapsd_flush(struct ieee80211_node *ni) |
| | 6470 | { |
| | 6471 | struct ath_node *an = ATH_NODE(ni); |
| | 6472 | struct ath_buf *bf; |
| | 6473 | struct ath_softc *sc = ni->ni_ic->ic_dev->priv; |
| | 6474 | struct ath_txq *txq; |
| | 6475 | |
| | 6476 | ATH_NODE_UAPSD_LOCK_IRQ(an); |
| | 6477 | /* |
| | 6478 | * NB: could optimize for successive runs from the same AC |
| | 6479 | * if we can assume that is the most frequent case. |
| | 6480 | */ |
| | 6481 | while (an->an_uapsd_qdepth) { |
| | 6482 | bf = STAILQ_FIRST(&an->an_uapsd_q); |
| | 6483 | STAILQ_REMOVE_HEAD(&an->an_uapsd_q, bf_list); |
| | 6484 | bf->bf_desc->ds_link = 0; |
| | 6485 | txq = sc->sc_ac2q[bf->bf_skb->priority & 0x3]; |
| | 6486 | ath_tx_txqaddbuf(sc, ni, txq, bf, bf->bf_desc, bf->bf_skb->len); |
| | 6487 | an->an_uapsd_qdepth--; |
| | 6488 | } |
| | 6489 | |
| | 6490 | while (an->an_uapsd_overflowqdepth) { |
| | 6491 | bf = STAILQ_FIRST(&an->an_uapsd_overflowq); |
| | 6492 | STAILQ_REMOVE_HEAD(&an->an_uapsd_overflowq, bf_list); |
| | 6493 | bf->bf_desc->ds_link = 0; |
| | 6494 | txq = sc->sc_ac2q[bf->bf_skb->priority & 0x3]; |
| | 6495 | ath_tx_txqaddbuf(sc, ni, txq, bf, bf->bf_desc, bf->bf_skb->len); |
| | 6496 | an->an_uapsd_overflowqdepth--; |
| | 6497 | } |
| | 6498 | if (IEEE80211_NODE_UAPSD_USETIM(ni)) |
| | 6499 | ni->ni_vap->iv_set_tim(ni, 0); |
| | 6500 | ATH_NODE_UAPSD_UNLOCK_IRQ(an); |
| | 6501 | } |
| | 6502 | |
| | 6503 | /* |
| | 6504 | * Reclaim resources for a setup queue. |
| | 6505 | */ |
| | 6506 | static void |
| | 6507 | ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) |
| | 6508 | { |
| | 6509 | |
| | 6510 | #ifdef ATH_SUPERG_COMP |
| | 6511 | /* Release compression buffer */ |
| | 6512 | if (txq->axq_compbuf) { |
| | 6513 | bus_free_consistent(sc->sc_bdev, txq->axq_compbufsz, |
| | 6514 | txq->axq_compbuf, txq->axq_compbufp); |
| | 6515 | txq->axq_compbuf = NULL; |
| | 6516 | } |
| | 6517 | #endif |
| | 6518 | ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); |
| | 6519 | ATH_TXQ_LOCK_DESTROY(txq); |
| | 6520 | sc->sc_txqsetup &= ~(1 << txq->axq_qnum); |
| | 6521 | } |
| | 6522 | |
| | 6523 | /* |
| | 6524 | * Reclaim all tx queue resources. |
| | 6525 | */ |
| | 6526 | static void |
| | 6527 | ath_tx_cleanup(struct ath_softc *sc) |
| | 6528 | { |
| | 6529 | int i; |
| | 6530 | |
| | 6531 | ATH_TXBUF_LOCK_DESTROY(sc); |
| | 6532 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) |
| | 6533 | if (ATH_TXQ_SETUP(sc, i)) |
| | 6534 | ath_tx_cleanupq(sc, &sc->sc_txq[i]); |
| | 6535 | } |
| | 6536 | |
| | 6537 | #ifdef ATH_SUPERG_COMP |
| | 6538 | static u_int32_t |
| | 6539 | ath_get_icvlen(struct ieee80211_key *k) |
| | 6540 | { |
| | 6541 | const struct ieee80211_cipher *cip = k->wk_cipher; |
| | 6542 | |
| | 6543 | if (cip->ic_cipher == IEEE80211_CIPHER_AES_CCM || |
| | 6544 | cip->ic_cipher == IEEE80211_CIPHER_AES_OCB) |
| | 6545 | return AES_ICV_FIELD_SIZE; |
| | 6546 | |
| | 6547 | return WEP_ICV_FIELD_SIZE; |
| | 6548 | } |
| | 6549 | |
| | 6550 | static u_int32_t |
| | 6551 | ath_get_ivlen(struct ieee80211_key *k) |
| | 6552 | { |
| | 6553 | const struct ieee80211_cipher *cip = k->wk_cipher; |
| | 6554 | u_int32_t ivlen; |
| | 6555 | |
| | 6556 | ivlen = WEP_IV_FIELD_SIZE; |
| | 6557 | |
| | 6558 | if (cip->ic_cipher == IEEE80211_CIPHER_AES_CCM || |
| | 6559 | cip->ic_cipher == IEEE80211_CIPHER_AES_OCB) |
| | 6560 | ivlen += EXT_IV_FIELD_SIZE; |
| | 6561 | |
| | 6562 | return ivlen; |
| | 6563 | } |
| | 6564 | #endif |
| | 6565 | |
| | 6566 | /* |
| | 6567 | * Get transmit rate index using rate in Kbps |
| | 6568 | */ |
| | 6569 | static __inline int |
| | 6570 | ath_tx_findindex(const HAL_RATE_TABLE *rt, int rate) |
| | 6571 | { |
| | 6572 | int i; |
| | 6573 | int ndx = 0; |
| | 6574 | |
| | 6575 | for (i = 0; i < rt->rateCount; i++) { |
| | 6576 | if (rt->info[i].rateKbps == rate) { |
| | 6577 | ndx = i; |
| | 6578 | break; |
| | 6579 | } |
| | 6580 | } |
| | 6581 | |
| | 6582 | return ndx; |
| | 6583 | } |
| | 6584 | |
| | 6585 | /* |
| | 6586 | * Needs external locking! |
| | 6587 | */ |
| | 6588 | static void |
| | 6589 | ath_tx_uapsdqueue(struct ath_softc *sc, struct ath_node *an, struct ath_buf *bf) |
| | 6590 | { |
| | 6591 | struct ath_buf *lastbuf; |
| | 6592 | |
| | 6593 | /* case the delivery queue just sent and can move overflow q over */ |
| | 6594 | if (an->an_uapsd_qdepth == 0 && an->an_uapsd_overflowqdepth != 0) { |
| | 6595 | DPRINTF(sc, ATH_DEBUG_UAPSD, |
| | 6596 | "%s: delivery Q empty, replacing with overflow Q\n", |
| | 6597 | __func__); |
| | 6598 | STAILQ_CONCAT(&an->an_uapsd_q, &an->an_uapsd_overflowq); |
| | 6599 | an->an_uapsd_qdepth = an->an_uapsd_overflowqdepth; |
| | 6600 | an->an_uapsd_overflowqdepth = 0; |
| | 6601 | } |
| | 6602 | |
| | 6603 | /* most common case - room on delivery q */ |
| | 6604 | if (an->an_uapsd_qdepth < an->an_node.ni_uapsd_maxsp) { |
| | 6605 | /* add to delivery q */ |
| | 6606 | if ((lastbuf = STAILQ_LAST(&an->an_uapsd_q, ath_buf, bf_list))) { |
| | 6607 | #ifdef AH_NEED_DESC_SWAP |
| | 6608 | lastbuf->bf_desc->ds_link = cpu_to_le32(bf->bf_daddr); |
| | 6609 | #else |
| | 6610 | lastbuf->bf_desc->ds_link = bf->bf_daddr; |
| | 6611 | #endif |
| | 6612 | } |
| | 6613 | STAILQ_INSERT_TAIL(&an->an_uapsd_q, bf, bf_list); |
| | 6614 | an->an_uapsd_qdepth++; |
| | 6615 | DPRINTF(sc, ATH_DEBUG_UAPSD, |
| | 6616 | "%s: added AC %d frame to delivery Q, new depth = %d\n", |
| | 6617 | __func__, bf->bf_skb->priority, an->an_uapsd_qdepth); |
| | 6618 | return; |
| | 6619 | } |
| | 6620 | |
| | 6621 | /* check if need to make room on overflow queue */ |
| | 6622 | if (an->an_uapsd_overflowqdepth == an->an_node.ni_uapsd_maxsp) { |
| | 6623 | /* |
| | 6624 | * pop oldest from delivery queue and cleanup |
| | 6625 | */ |
| | 6626 | lastbuf = STAILQ_FIRST(&an->an_uapsd_q); |
| | 6627 | STAILQ_REMOVE_HEAD(&an->an_uapsd_q, bf_list); |
| | 6628 | dev_kfree_skb(lastbuf->bf_skb); |
| | 6629 | lastbuf->bf_skb = NULL; |
| | 6630 | ieee80211_free_node(lastbuf->bf_node); |
| | 6631 | lastbuf->bf_node = NULL; |
| | 6632 | ATH_TXBUF_LOCK_IRQ(sc); |
| | 6633 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, lastbuf, bf_list); |
| | 6634 | ATH_TXBUF_UNLOCK_IRQ(sc); |
| | 6635 | |
| | 6636 | /* |
| | 6637 | * move oldest from overflow to delivery |
| | 6638 | */ |
| | 6639 | lastbuf = STAILQ_FIRST(&an->an_uapsd_overflowq); |
| | 6640 | STAILQ_REMOVE_HEAD(&an->an_uapsd_overflowq, bf_list); |
| | 6641 | an->an_uapsd_overflowqdepth--; |
| | 6642 | STAILQ_INSERT_TAIL(&an->an_uapsd_q, lastbuf, bf_list); |
| | 6643 | DPRINTF(sc, ATH_DEBUG_UAPSD, |
| | 6644 | "%s: delivery and overflow Qs full, dropped oldest\n", |
| | 6645 | __func__); |
| | 6646 | } |
| | 6647 | |
| | 6648 | /* add to overflow q */ |
| | 6649 | if ((lastbuf = STAILQ_LAST(&an->an_uapsd_overflowq, ath_buf, bf_list))) { |
| | 6650 | #ifdef AH_NEED_DESC_SWAP |
| | 6651 | lastbuf->bf_desc->ds_link = cpu_to_le32(bf->bf_daddr); |
| | 6652 | #else |
| | 6653 | lastbuf->bf_desc->ds_link = bf->bf_daddr; |
| | 6654 | #endif |
| | 6655 | } |
| | 6656 | STAILQ_INSERT_TAIL(&an->an_uapsd_overflowq, bf, bf_list); |
| | 6657 | an->an_uapsd_overflowqdepth++; |
| | 6658 | DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: added AC %d to overflow Q, new depth = %d\n", |
| | 6659 | __func__, bf->bf_skb->priority, an->an_uapsd_overflowqdepth); |
| | 6660 | |
| | 6661 | return; |
| | 6662 | } |
| | 6663 | |
| | 6664 | static int |
| | 6665 | ath_tx_start(struct net_device *dev, struct ieee80211_node *ni, struct ath_buf *bf, struct sk_buff *skb, int nextfraglen) |
| | 6666 | { |
| | 6667 | #define MIN(a,b) ((a) < (b) ? (a) : (b)) |
| | 6668 | struct ath_softc *sc = dev->priv; |
| | 6669 | struct ieee80211com *ic = ni->ni_ic; |
| | 6670 | struct ieee80211vap *vap = ni->ni_vap; |
| | 6671 | struct ath_hal *ah = sc->sc_ah; |
| | 6672 | int isprot, ismcast, keyix, hdrlen, pktlen, try0; |
| | 6673 | u_int8_t rix, txrate, ctsrate; |
| | 6674 | u_int32_t ivlen = 0, icvlen = 0; |
| | 6675 | int comp = ATH_COMP_PROC_NO_COMP_NO_CCS; |
| | 6676 | u_int8_t cix = 0xff; /* NB: silence compiler */ |
| | 6677 | struct ath_desc *ds = NULL; |
| | 6678 | struct ath_txq *txq = NULL; |
| | 6679 | struct ieee80211_frame *wh; |
| | 6680 | u_int subtype, flags, ctsduration; |
| | 6681 | HAL_PKT_TYPE atype; |
| | 6682 | const HAL_RATE_TABLE *rt; |
| | 6683 | HAL_BOOL shortPreamble; |
| | 6684 | struct ath_node *an; |
| | 6685 | struct ath_vap *avp = ATH_VAP(vap); |
| | 6686 | int istxfrag; |
| | 6687 | u_int8_t antenna; |
| | 6688 | |
| | 6689 | wh = (struct ieee80211_frame *) skb->data; |
| | 6690 | isprot = wh->i_fc[1] & IEEE80211_FC1_PROT; |
| | 6691 | ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); |
| | 6692 | hdrlen = ieee80211_anyhdrsize(wh); |
| | 6693 | istxfrag = (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) || |
| | 6694 | (((le16toh(*(__le16 *) &wh->i_seq[0]) >> |
| | 6695 | IEEE80211_SEQ_FRAG_SHIFT) & IEEE80211_SEQ_FRAG_MASK) > 0); |
| | 6696 | |
| | 6697 | pktlen = skb->len; |
| | 6698 | #ifdef ATH_SUPERG_FF |
| | 6699 | { |
| | 6700 | struct sk_buff *skbtmp = skb; |
| | 6701 | while ((skbtmp = skbtmp->next)) |
| | 6702 | pktlen += skbtmp->len; |
| | 6703 | } |
| | 6704 | #endif |
| | 6705 | /* |
| | 6706 | * Packet length must not include any |
| | 6707 | * pad bytes; deduct them here. |
| | 6708 | */ |
| | 6709 | pktlen -= (hdrlen & 3); |
| | 6710 | |
| | 6711 | if (isprot) { |
| | 6712 | const struct ieee80211_cipher *cip; |
| | 6713 | struct ieee80211_key *k; |
| | 6714 | |
| | 6715 | /* |
| | 6716 | * Construct the 802.11 header+trailer for an encrypted |
| | 6717 | * frame. The only reason this can fail is because of an |
| | 6718 | * unknown or unsupported cipher/key type. |
| | 6719 | */ |
| | 6720 | |
| | 6721 | /* FFXXX: change to handle linked skbs */ |
| | 6722 | k = ieee80211_crypto_encap(ni, skb); |
| | 6723 | if (k == NULL) { |
| | 6724 | /* |
| | 6725 | * This can happen when the key is yanked after the |
| | 6726 | * frame was queued. Just discard the frame; the |
| | 6727 | * 802.11 layer counts failures and provides |
| | 6728 | * debugging/diagnostics. |
| | 6729 | */ |
| | 6730 | return -EIO; |
| | 6731 | } |
| | 6732 | /* |
| | 6733 | * Adjust the packet + header lengths for the crypto |
| | 6734 | * additions and calculate the h/w key index. When |
| | 6735 | * a s/w mic is done the frame will have had any mic |
| | 6736 | * added to it prior to entry so skb->len above will |
| | 6737 | * account for it. Otherwise we need to add it to the |
| | 6738 | * packet length. |
| | 6739 | */ |
| | 6740 | cip = k->wk_cipher; |
| | 6741 | hdrlen += cip->ic_header; |
| | 6742 | pktlen += cip->ic_header + cip->ic_trailer; |
| | 6743 | if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { |
| | 6744 | if (!istxfrag) |
| | 6745 | pktlen += cip->ic_miclen; |
| | 6746 | else |
| | 6747 | if (cip->ic_cipher != IEEE80211_CIPHER_TKIP) |
| | 6748 | pktlen += cip->ic_miclen; |
| | 6749 | } |
| | 6750 | keyix = k->wk_keyix; |
| | 6751 | |
| | 6752 | #ifdef ATH_SUPERG_COMP |
| | 6753 | icvlen = ath_get_icvlen(k) / 4; |
| | 6754 | ivlen = ath_get_ivlen(k) / 4; |
| | 6755 | #endif |
| | 6756 | /* packet header may have moved, reset our local pointer */ |
| | 6757 | wh = (struct ieee80211_frame *) skb->data; |
| | 6758 | } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { |
| | 6759 | /* |
| | 6760 | * Use station key cache slot, if assigned. |
| | 6761 | */ |
| | 6762 | keyix = ni->ni_ucastkey.wk_keyix; |
| | 6763 | if (keyix == IEEE80211_KEYIX_NONE) |
| | 6764 | keyix = HAL_TXKEYIX_INVALID; |
| | 6765 | } else |
| | 6766 | keyix = HAL_TXKEYIX_INVALID; |
| | 6767 | |
| | 6768 | pktlen += IEEE80211_CRC_LEN; |
| | 6769 | |
| | 6770 | /* |
| | 6771 | * Load the DMA map so any coalescing is done. This |
| | 6772 | * also calculates the number of descriptors we need. |
| | 6773 | */ |
| | 6774 | #ifndef ATH_SUPERG_FF |
| | 6775 | bf->bf_skbaddr = bus_map_single(sc->sc_bdev, |
| | 6776 | skb->data, pktlen, BUS_DMA_TODEVICE); |
| | 6777 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: skb %p [data %p len %u] skbaddr %llx\n", |
| | 6778 | __func__, skb, skb->data, skb->len, ito64(bf->bf_skbaddr)); |
| | 6779 | #else /* ATH_SUPERG_FF case */ |
| | 6780 | /* NB: ensure skb->len had been updated for each skb so we don't need pktlen */ |
| | 6781 | { |
| | 6782 | struct sk_buff *skbtmp = skb; |
| | 6783 | int i = 0; |
| | 6784 | |
| | 6785 | bf->bf_skbaddr = bus_map_single(sc->sc_bdev, |
| | 6786 | skb->data, skb->len, BUS_DMA_TODEVICE); |
| | 6787 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: skb%d %p [data %p len %u] skbaddr %llx\n", |
| | 6788 | __func__, i, skb, skb->data, skb->len, ito64(bf->bf_skbaddr)); |
| | 6789 | while ((skbtmp = skbtmp->next)) { |
| | 6790 | bf->bf_skbaddrff[i++] = bus_map_single(sc->sc_bdev, |
| | 6791 | skbtmp->data, skbtmp->len, BUS_DMA_TODEVICE); |
| | 6792 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: skb%d %p [data %p len %u] skbaddr %llx\n", |
| | 6793 | __func__, i, skbtmp, skbtmp->data, skbtmp->len, |
| | 6794 | ito64(bf->bf_skbaddrff[i-1])); |
| | 6795 | } |
| | 6796 | bf->bf_numdesc = i + 1; |
| | 6797 | } |
| | 6798 | #endif /* ATH_SUPERG_FF */ |
| | 6799 | bf->bf_skb = skb; |
| | 6800 | bf->bf_node = ni; |
| | 6801 | |
| | 6802 | /* setup descriptors */ |
| | 6803 | ds = bf->bf_desc; |
| | 6804 | #ifdef ATH_SUPERG_XR |
| | 6805 | if(vap->iv_flags & IEEE80211_F_XR ) |
| | 6806 | rt = sc->sc_xr_rates; |
| | 6807 | else |
| | 6808 | rt = sc->sc_currates; |
| | 6809 | #else |
| | 6810 | rt = sc->sc_currates; |
| | 6811 | #endif |
| | 6812 | KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); |
| | 6813 | |
| | 6814 | /* |
| | 6815 | * NB: the 802.11 layer marks whether or not we should |
| | 6816 | * use short preamble based on the current mode and |
| | 6817 | * negotiated parameters. |
| | 6818 | */ |
| | 6819 | if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && |
| | 6820 | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { |
| | 6821 | shortPreamble = AH_TRUE; |
| | 6822 | sc->sc_stats.ast_tx_shortpre++; |
| | 6823 | } else |
| | 6824 | shortPreamble = AH_FALSE; |
| | 6825 | |
| | 6826 | an = ATH_NODE(ni); |
| | 6827 | flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ |
| | 6828 | /* |
| | 6829 | * Calculate Atheros packet type from IEEE80211 packet header, |
| | 6830 | * setup for rate calculations, and select h/w transmit queue. |
| | 6831 | */ |
| | 6832 | switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { |
| | 6833 | case IEEE80211_FC0_TYPE_MGT: |
| | 6834 | subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; |
| | 6835 | if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) |
| | 6836 | atype = HAL_PKT_TYPE_BEACON; |
| | 6837 | else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) |
| | 6838 | atype = HAL_PKT_TYPE_PROBE_RESP; |
| | 6839 | else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) |
| | 6840 | atype = HAL_PKT_TYPE_ATIM; |
| | 6841 | else |
| | 6842 | atype = HAL_PKT_TYPE_NORMAL; /* XXX */ |
| | 6843 | rix = sc->sc_minrateix; |
| | 6844 | txrate = rt->info[rix].rateCode; |
| | 6845 | if (shortPreamble) |
| | 6846 | txrate |= rt->info[rix].shortPreamble; |
| | 6847 | try0 = ATH_TXMAXTRY; |
| | 6848 | |
| | 6849 | if (ni->ni_flags & IEEE80211_NODE_QOS) { |
| | 6850 | /* NB: force all management frames to highest queue */ |
| | 6851 | txq = sc->sc_ac2q[WME_AC_VO]; |
| | 6852 | } else |
| | 6853 | txq = sc->sc_ac2q[WME_AC_BE]; |
| | 6854 | break; |
| | 6855 | case IEEE80211_FC0_TYPE_CTL: |
| | 6856 | atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ |
| | 6857 | rix = sc->sc_minrateix; |
| | 6858 | txrate = rt->info[rix].rateCode; |
| | 6859 | if (shortPreamble) |
| | 6860 | txrate |= rt->info[rix].shortPreamble; |
| | 6861 | try0 = ATH_TXMAXTRY; |
| | 6862 | |
| | 6863 | if (ni->ni_flags & IEEE80211_NODE_QOS) { |
| | 6864 | /* NB: force all ctl frames to highest queue */ |
| | 6865 | txq = sc->sc_ac2q[WME_AC_VO]; |
| | 6866 | } else |
| | 6867 | txq = sc->sc_ac2q[WME_AC_BE]; |
| | 6868 | break; |
| | 6869 | case IEEE80211_FC0_TYPE_DATA: |
| | 6870 | atype = HAL_PKT_TYPE_NORMAL; /* default */ |
| | 6871 | |
| | 6872 | if (ismcast) { |
| | 6873 | rix = ath_tx_findindex(rt, vap->iv_mcast_rate); |
| | 6874 | txrate = rt->info[rix].rateCode; |
| | 6875 | if (shortPreamble) |
| | 6876 | txrate |= rt->info[rix].shortPreamble; |
| | 6877 | /* |
| | 6878 | * ATH_TXMAXTRY disables Multi-rate retries, which |
| | 6879 | * isn't applicable to mcast packets and overrides |
| | 6880 | * the desired transmission rate for mcast traffic. |
| | 6881 | */ |
| | 6882 | try0 = ATH_TXMAXTRY; |
| | 6883 | } else { |
| | 6884 | /* |
| | 6885 | * Data frames; consult the rate control module. |
| | 6886 | */ |
| | 6887 | sc->sc_rc->ops->findrate(sc, an, shortPreamble, skb->len, |
| | 6888 | &rix, &try0, &txrate); |
| | 6889 | |
| | 6890 | /* Note: HAL does not support distinguishing between short |
| | 6891 | * and long retry. These both are set via try0 here then. |
| | 6892 | * In the openhal we'll fix this ;) */ |
| | 6893 | if (vap->iv_flags & IEEE80211_F_SWRETRY && vap->iv_txmax != try0) |
| | 6894 | try0 = vap->iv_txmax; |
| | 6895 | |
| | 6896 | /* Ratecontrol sometimes returns invalid rate index */ |
| | 6897 | if (rix != 0xff) |
| | 6898 | an->an_prevdatarix = rix; |
| | 6899 | else |
| | 6900 | rix = an->an_prevdatarix; |
| | 6901 | } |
| | 6902 | |
| | 6903 | if (M_FLAG_GET(skb, M_UAPSD)) { |
| | 6904 | /* U-APSD frame, handle txq later */ |
| | 6905 | break; |
| | 6906 | } |
| | 6907 | |
| | 6908 | /* |
| | 6909 | * Default all non-QoS traffic to the best-effort queue. |
| | 6910 | */ |
| | 6911 | if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { |
| | 6912 | /* XXX validate skb->priority, remove mask */ |
| | 6913 | txq = sc->sc_ac2q[skb->priority & 0x3]; |
| | 6914 | if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[skb->priority].wmep_noackPolicy) { |
| | 6915 | flags |= HAL_TXDESC_NOACK; |
| | 6916 | sc->sc_stats.ast_tx_noack++; |
| | 6917 | } |
| | 6918 | } else |
| | 6919 | txq = sc->sc_ac2q[WME_AC_BE]; |
| | 6920 | break; |
| | 6921 | default: |
| | 6922 | printk("%s: bogus frame type 0x%x (%s)\n", dev->name, |
| | 6923 | wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); |
| | 6924 | /* XXX statistic */ |
| | 6925 | return -EIO; |
| | 6926 | } |
| | 6927 | |
| | 6928 | #ifdef ATH_SUPERG_XR |
| | 6929 | if (vap->iv_flags & IEEE80211_F_XR ) { |
| | 6930 | txq = sc->sc_xrtxq; |
| | 6931 | if (!txq) |
| | 6932 | txq = sc->sc_ac2q[WME_AC_BK]; |
| | 6933 | flags |= HAL_TXDESC_CTSENA; |
| | 6934 | cix = rt->info[sc->sc_protrix].controlRate; |
| | 6935 | } |
| | 6936 | #endif |
| | 6937 | /* |
| | 6938 | * When servicing one or more stations in power-save mode (or) |
| | 6939 | * if there is some mcast data waiting on mcast queue |
| | 6940 | * (to prevent out of order delivery of mcast,bcast packets) |
| | 6941 | * multicast frames must be buffered until after the beacon. |
| | 6942 | * We use the private mcast queue for that. |
| | 6943 | */ |
| | 6944 | if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { |
| | 6945 | txq = &avp->av_mcastq; |
| | 6946 | /* XXX? more bit in 802.11 frame header */ |
| | 6947 | } |
| | 6948 | |
| | 6949 | /* |
| | 6950 | * Calculate miscellaneous flags. |
| | 6951 | */ |
| | 6952 | if (ismcast) { |
| | 6953 | flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ |
| | 6954 | sc->sc_stats.ast_tx_noack++; |
| | 6955 | try0 = ATH_TXMAXTRY; /* turn off multi-rate retry for multicast traffic */ |
| | 6956 | } else if (pktlen > vap->iv_rtsthreshold) { |
| | 6957 | #ifdef ATH_SUPERG_FF |
| | 6958 | /* we could refine to only check that the frame of interest |
| | 6959 | * is a FF, but this seems inconsistent. |
| | 6960 | */ |
| | 6961 | if (!(vap->iv_ath_cap & ni->ni_ath_flags & IEEE80211_ATHC_FF)) { |
| | 6962 | #endif |
| | 6963 | flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ |
| | 6964 | cix = rt->info[rix].controlRate; |
| | 6965 | sc->sc_stats.ast_tx_rts++; |
| | 6966 | #ifdef ATH_SUPERG_FF |
| | 6967 | } |
| | 6968 | #endif |
| | 6969 | } |
| | 6970 | |
| | 6971 | /* |
| | 6972 | * If 802.11g protection is enabled, determine whether |
| | 6973 | * to use RTS/CTS or just CTS. Note that this is only |
| | 6974 | * done for OFDM unicast frames. |
| | 6975 | */ |
| | 6976 | if ((ic->ic_flags & IEEE80211_F_USEPROT) && |
| | 6977 | rt->info[rix].phy == IEEE80211_T_OFDM && |
| | 6978 | (flags & HAL_TXDESC_NOACK) == 0) { |
| | 6979 | /* XXX fragments must use CCK rates w/ protection */ |
| | 6980 | if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) |
| | 6981 | flags |= HAL_TXDESC_RTSENA; |
| | 6982 | else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) |
| | 6983 | flags |= HAL_TXDESC_CTSENA; |
| | 6984 | |
| | 6985 | if (istxfrag) |
| | 6986 | /* |
| | 6987 | ** if Tx fragment, it would be desirable to |
| | 6988 | ** use highest CCK rate for RTS/CTS. |
| | 6989 | ** However, stations farther away may detect it |
| | 6990 | ** at a lower CCK rate. Therefore, use the |
| | 6991 | ** configured protect rate, which is 2 Mbps |
| | 6992 | ** for 11G. |
| | 6993 | */ |
| | 6994 | cix = rt->info[sc->sc_protrix].controlRate; |
| | 6995 | else |
| | 6996 | cix = rt->info[sc->sc_protrix].controlRate; |
| | 6997 | sc->sc_stats.ast_tx_protect++; |
| | 6998 | } |
| | 6999 | |
| | 7000 | /* |
| | 7001 | * Calculate duration. This logically belongs in the 802.11 |
| | 7002 | * layer but it lacks sufficient information to calculate it. |
| | 7003 | */ |
| | 7004 | if ((flags & HAL_TXDESC_NOACK) == 0 && |
| | 7005 | (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { |
| | 7006 | u_int16_t dur; |
| | 7007 | /* |
| | 7008 | * XXX not right with fragmentation. |
| | 7009 | */ |
| | 7010 | if (shortPreamble) |
| | 7011 | dur = rt->info[rix].spAckDuration; |
| | 7012 | else |
| | 7013 | dur = rt->info[rix].lpAckDuration; |
| | 7014 | |
| | 7015 | if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { |
| | 7016 | dur += dur; /* Add additional 'SIFS + ACK' */ |
| | 7017 | |
| | 7018 | /* |
| | 7019 | ** Compute size of next fragment in order to compute |
| | 7020 | ** durations needed to update NAV. |
| | 7021 | ** The last fragment uses the ACK duration only. |
| | 7022 | ** Add time for next fragment. |
| | 7023 | */ |
| | 7024 | dur += ath_hal_computetxtime(ah, rt, nextfraglen, |
| | 7025 | rix, shortPreamble); |
| | 7026 | } |
| | 7027 | |
| | 7028 | if (istxfrag) { |
| | 7029 | /* |
| | 7030 | ** Force hardware to use computed duration for next |
| | 7031 | ** fragment by disabling multi-rate retry, which |
| | 7032 | ** updates duration based on the multi-rate |
| | 7033 | ** duration table. |
| | 7034 | */ |
| | 7035 | try0 = ATH_TXMAXTRY; |
| | 7036 | } |
| | 7037 | |
| | 7038 | wh->i_dur = cpu_to_le16(dur); |
| | 7039 | } |
| | 7040 | |
| | 7041 | /* |
| | 7042 | * Calculate RTS/CTS rate and duration if needed. |
| | 7043 | */ |
| | 7044 | ctsduration = 0; |
| | 7045 | if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { |
| | 7046 | /* |
| | 7047 | * CTS transmit rate is derived from the transmit rate |
| | 7048 | * by looking in the h/w rate table. We must also factor |
| | 7049 | * in whether or not a short preamble is to be used. |
| | 7050 | */ |
| | 7051 | /* NB: cix is set above where RTS/CTS is enabled */ |
| | 7052 | KASSERT(cix != 0xff, ("cix not setup")); |
| | 7053 | ctsrate = rt->info[cix].rateCode; |
| | 7054 | /* |
| | 7055 | * Compute the transmit duration based on the frame |
| | 7056 | * size and the size of an ACK frame. We call into the |
| | 7057 | * HAL to do the computation since it depends on the |
| | 7058 | * characteristics of the actual PHY being used. |
| | 7059 | * |
| | 7060 | * NB: CTS is assumed the same size as an ACK so we can |
| | 7061 | * use the precalculated ACK durations. |
| | 7062 | */ |
| | 7063 | if (shortPreamble) { |
| | 7064 | ctsrate |= rt->info[cix].shortPreamble; |
| | 7065 | if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ |
| | 7066 | ctsduration += rt->info[cix].spAckDuration; |
| | 7067 | ctsduration += ath_hal_computetxtime(ah, |
| | 7068 | rt, pktlen, rix, AH_TRUE); |
| | 7069 | if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ |
| | 7070 | ctsduration += rt->info[rix].spAckDuration; |
| | 7071 | } else { |
| | 7072 | if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ |
| | 7073 | ctsduration += rt->info[cix].lpAckDuration; |
| | 7074 | ctsduration += ath_hal_computetxtime(ah, |
| | 7075 | rt, pktlen, rix, AH_FALSE); |
| | 7076 | if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ |
| | 7077 | ctsduration += rt->info[rix].lpAckDuration; |
| | 7078 | } |
| | 7079 | /* |
| | 7080 | * Must disable multi-rate retry when using RTS/CTS. |
| | 7081 | */ |
| | 7082 | try0 = ATH_TXMAXTRY; |
| | 7083 | } else |
| | 7084 | ctsrate = 0; |
| | 7085 | |
| | 7086 | if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) |
| | 7087 | /* FFXXX: need multi-skb version to dump entire FF */ |
| | 7088 | ieee80211_dump_pkt(ic, skb->data, skb->len, |
| | 7089 | sc->sc_hwmap[txrate].ieeerate, -1); |
| | 7090 | |
| | 7091 | /* |
| | 7092 | * Determine if a tx interrupt should be generated for |
| | 7093 | * this descriptor. We take a tx interrupt to reap |
| | 7094 | * descriptors when the h/w hits an EOL condition or |
| | 7095 | * when the descriptor is specifically marked to generate |
| | 7096 | * an interrupt. We periodically mark descriptors in this |
| | 7097 | * way to ensure timely replenishing of the supply needed |
| | 7098 | * for sending frames. Deferring interrupts reduces system |
| | 7099 | * load and potentially allows more concurrent work to be |
| | 7100 | * done, but if done too aggressively, it can cause senders |
| | 7101 | * to backup. |
| | 7102 | * |
| | 7103 | * NB: use >= to deal with sc_txintrperiod changing |
| | 7104 | * dynamically through sysctl. |
| | 7105 | */ |
| | 7106 | if (!M_FLAG_GET(skb, M_UAPSD) && |
| | 7107 | ++txq->axq_intrcnt >= sc->sc_txintrperiod) { |
| | 7108 | flags |= HAL_TXDESC_INTREQ; |
| | 7109 | txq->axq_intrcnt = 0; |
| | 7110 | } |
| | 7111 | |
| | 7112 | #ifdef ATH_SUPERG_COMP |
| | 7113 | if (ATH_NODE(ni)->an_decomp_index != INVALID_DECOMP_INDEX && |
| | 7114 | !ismcast && |
| | 7115 | ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) && |
| | 7116 | ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != IEEE80211_FC0_SUBTYPE_NODATA)) { |
| | 7117 | if (pktlen > ATH_COMP_THRESHOLD) |
| | 7118 | comp = ATH_COMP_PROC_COMP_OPTIMAL; |
| | 7119 | else |
| | 7120 | comp = ATH_COMP_PROC_NO_COMP_ADD_CCS; |
| | 7121 | } |
| | 7122 | #endif |
| | 7123 | |
| | 7124 | /* |
| | 7125 | * sc_txantenna == 0 means transmit diversity mode. |
| | 7126 | * sc_txantenna == 1 or sc_txantenna == 2 means the user has selected |
| | 7127 | * the first or second antenna port. |
| | 7128 | * If the user has set the txantenna, use it for multicast frames too. |
| | 7129 | */ |
| | 7130 | if (ismcast && !sc->sc_txantenna) { |
| | 7131 | antenna = sc->sc_mcastantenna + 1; |
| | 7132 | sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1; |
| | 7133 | } else |
| | 7134 | antenna = sc->sc_txantenna; |
| | 7135 | |
| | 7136 | /* |
| | 7137 | * Formulate first tx descriptor with tx controls. |
| | 7138 | */ |
| | 7139 | /* XXX check return value? */ |
| | 7140 | ath_hal_setuptxdesc(ah, ds |
| | 7141 | , pktlen /* packet length */ |
| | 7142 | , hdrlen /* header length */ |
| | 7143 | , atype /* Atheros packet type */ |
| | 7144 | , MIN(ni->ni_txpower, 60)/* txpower */ |
| | 7145 | , txrate, try0 /* series 0 rate/tries */ |
| | 7146 | , keyix /* key cache index */ |
| | 7147 | , antenna /* antenna mode */ |
| | 7148 | , flags /* flags */ |
| | 7149 | , ctsrate /* rts/cts rate */ |
| | 7150 | , ctsduration /* rts/cts duration */ |
| | 7151 | , icvlen /* comp icv len */ |
| | 7152 | , ivlen /* comp iv len */ |
| | 7153 | , comp /* comp scheme */ |
| | 7154 | ); |
| | 7155 | bf->bf_flags = flags; /* record for post-processing */ |
| | 7156 | |
| | 7157 | /* |
| | 7158 | * Setup the multi-rate retry state only when we're |
| | 7159 | * going to use it. This assumes ath_hal_setuptxdesc |
| | 7160 | * initializes the descriptors (so we don't have to) |
| | 7161 | * when the hardware supports multi-rate retry and |
| | 7162 | * we don't use it. |
| | 7163 | */ |
| | 7164 | if (try0 != ATH_TXMAXTRY) |
| | 7165 | sc->sc_rc->ops->setupxtxdesc(sc, an, ds, shortPreamble, |
| | 7166 | skb->len, rix); |
| | 7167 | |
| | 7168 | #ifndef ATH_SUPERG_FF |
| | 7169 | ds->ds_link = 0; |
| | 7170 | ds->ds_data = bf->bf_skbaddr; |
| | 7171 | |
| | 7172 | ath_hal_filltxdesc(ah, ds |
| | 7173 | , skb->len /* segment length */ |
| | 7174 | , AH_TRUE /* first segment */ |
| | 7175 | , AH_TRUE /* last segment */ |
| | 7176 | , ds /* first descriptor */ |
| | 7177 | ); |
| | 7178 | |
| | 7179 | /* NB: The desc swap function becomes void, |
| | 7180 | * if descriptor swapping is not enabled |
| | 7181 | */ |
| | 7182 | ath_desc_swap(ds); |
| | 7183 | |
| | 7184 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: Q%d: %08x %08x %08x %08x %08x %08x\n", |
| | 7185 | __func__, M_FLAG_GET(skb, M_UAPSD) ? 0 : txq->axq_qnum, ds->ds_link, ds->ds_data, |
| | 7186 | ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); |
| | 7187 | #else /* ATH_SUPERG_FF */ |
| | 7188 | { |
| | 7189 | struct sk_buff *skbtmp = skb; |
| | 7190 | struct ath_desc *ds0 = ds; |
| | 7191 | int i; |
| | 7192 | |
| | 7193 | ds->ds_data = bf->bf_skbaddr; |
| | 7194 | ds->ds_link = (skb->next == NULL) ? 0 : bf->bf_daddr + sizeof(*ds); |
| | 7195 | |
| | 7196 | ath_hal_filltxdesc(ah, ds |
| | 7197 | , skbtmp->len /* segment length */ |
| | 7198 | , AH_TRUE /* first segment */ |
| | 7199 | , skbtmp->next == NULL /* last segment */ |
| | 7200 | , ds /* first descriptor */ |
| | 7201 | ); |
| | 7202 | |
| | 7203 | /* NB: The desc swap function becomes void, |
| | 7204 | * if descriptor swapping is not enabled |
| | 7205 | */ |
| | 7206 | ath_desc_swap(ds); |
| | 7207 | |
| | 7208 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: Q%d: (ds)%p (lk)%08x (d)%08x (c0)%08x (c1)%08x %08x %08x\n", |
| | 7209 | __func__, M_FLAG_GET(skb, M_UAPSD) ? 0 : txq->axq_qnum, |
| | 7210 | ds, ds->ds_link, ds->ds_data, ds->ds_ctl0, ds->ds_ctl1, |
| | 7211 | ds->ds_hw[0], ds->ds_hw[1]); |
| | 7212 | for (i= 0, skbtmp = skbtmp->next; i < bf->bf_numdesc - 1; i++, skbtmp = skbtmp->next) { |
| | 7213 | ds++; |
| | 7214 | ds->ds_link = skbtmp->next == NULL ? 0 : bf->bf_daddr + sizeof(*ds) * (i + 2); |
| | 7215 | ds->ds_data = bf->bf_skbaddrff[i]; |
| | 7216 | ath_hal_filltxdesc(ah, ds |
| | 7217 | , skbtmp->len /* segment length */ |
| | 7218 | , AH_FALSE /* first segment */ |
| | 7219 | , skbtmp->next == NULL /* last segment */ |
| | 7220 | , ds0 /* first descriptor */ |
| | 7221 | ); |
| | 7222 | |
| | 7223 | /* NB: The desc swap function becomes void, |
| | 7224 | * if descriptor swapping is not enabled |
| | 7225 | */ |
| | 7226 | ath_desc_swap(ds); |
| | 7227 | |
| | 7228 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: Q%d: %08x %08x %08x %08x %08x %08x\n", |
| | 7229 | __func__, M_FLAG_GET(skb, M_UAPSD) ? 0 : txq->axq_qnum, |
| | 7230 | ds->ds_link, ds->ds_data, ds->ds_ctl0, |
| | 7231 | ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); |
| | 7232 | } |
| | 7233 | } |
| | 7234 | #endif |
| | 7235 | |
| | 7236 | if (M_FLAG_GET(skb, M_UAPSD)) { |
| | 7237 | /* must lock against interrupt-time processing (i.e., not just tasklet) */ |
| | 7238 | ATH_NODE_UAPSD_LOCK_IRQ(an); |
| | 7239 | DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: Qing U-APSD data frame for node %s \n", |
| | 7240 | __func__, ether_sprintf(an->an_node.ni_macaddr)); |
| | 7241 | ath_tx_uapsdqueue(sc, an, bf); |
| | 7242 | if (IEEE80211_NODE_UAPSD_USETIM(ni) && (an->an_uapsd_qdepth == 1)) |
| | 7243 | vap->iv_set_tim(ni, 1); |
| | 7244 | ATH_NODE_UAPSD_UNLOCK_IRQ(an); |
| | 7245 | |
| | 7246 | return 0; |
| | 7247 | } |
| | 7248 | |
| | 7249 | |
| | 7250 | IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "%s: %p<%s> refcnt %d\n", |
| | 7251 | __func__, vap->iv_bss, ether_sprintf(vap->iv_bss->ni_macaddr), |
| | 7252 | ieee80211_node_refcnt(vap->iv_bss)); |
| | 7253 | |
| | 7254 | |
| | 7255 | ath_tx_txqaddbuf(sc, ni, txq, bf, ds, pktlen); |
| | 7256 | return 0; |
| | 7257 | #undef MIN |
| | 7258 | } |
| | 7259 | |
| | 7260 | /* |
| | 7261 | * Process completed xmit descriptors from the specified queue. |
| | 7262 | * Should only be called from tasklet context |
| | 7263 | */ |
| | 7264 | static void |
| | 7265 | ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) |
| | 7266 | { |
| | 7267 | struct ath_hal *ah = sc->sc_ah; |
| | 7268 | struct ath_buf *bf = NULL; |
| | 7269 | struct ath_desc *ds = NULL; |
| | 7270 | struct ieee80211_node *ni = NULL; |
| | 7271 | struct ath_node *an = NULL; |
| | 7272 | int sr, lr; |
| | 7273 | HAL_STATUS status; |
| | 7274 | int uapsdq = 0; |
| | 7275 | unsigned long uapsdq_lockflags = 0; |
| | 7276 | |
| | 7277 | DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %d (0x%x), link %p\n", __func__, |
| | 7278 | txq->axq_qnum, ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), |
| | 7279 | txq->axq_link); |
| | 7280 | |
| | 7281 | if (txq == sc->sc_uapsdq) { |
| | 7282 | DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: reaping U-APSD txq\n", __func__); |
| | 7283 | uapsdq = 1; |
| | 7284 | } |
| | 7285 | |
| | 7286 | for (;;) { |
| | 7287 | if (uapsdq) |
| | 7288 | ATH_TXQ_UAPSDQ_LOCK_IRQ(txq); |
| | 7289 | else |
| | 7290 | ATH_TXQ_LOCK(txq); |
| | 7291 | txq->axq_intrcnt = 0; /* reset periodic desc intr count */ |
| | 7292 | bf = STAILQ_FIRST(&txq->axq_q); |
| | 7293 | if (bf == NULL) { |
| | 7294 | txq->axq_link = NULL; |
| | 7295 | if (uapsdq) |
| | 7296 | ATH_TXQ_UAPSDQ_UNLOCK_IRQ(txq); |
| | 7297 | else |
| | 7298 | ATH_TXQ_UNLOCK(txq); |
| | 7299 | break; |
| | 7300 | } |
| | 7301 | |
| | 7302 | #ifdef ATH_SUPERG_FF |
| | 7303 | ds = &bf->bf_desc[bf->bf_numdesc - 1]; |
| | 7304 | DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: frame's last desc: %p\n", |
| | 7305 | __func__, ds); |
| | 7306 | #else |
| | 7307 | ds = bf->bf_desc; /* NB: last descriptor */ |
| | 7308 | #endif |
| | 7309 | status = ath_hal_txprocdesc(ah, ds); |
| | 7310 | #ifdef AR_DEBUG |
| | 7311 | if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) |
| | 7312 | ath_printtxbuf(bf, status == HAL_OK); |
| | 7313 | #endif |
| | 7314 | if (status == HAL_EINPROGRESS) { |
| | 7315 | if (uapsdq) |
| | 7316 | ATH_TXQ_UAPSDQ_UNLOCK_IRQ(txq); |
| | 7317 | else |
| | 7318 | ATH_TXQ_UNLOCK(txq); |
| | 7319 | break; |
| | 7320 | } |
| | 7321 | |
| | 7322 | ATH_TXQ_REMOVE_HEAD(txq, bf_list); |
| | 7323 | if (uapsdq) |
| | 7324 | ATH_TXQ_UAPSDQ_UNLOCK_IRQ(txq); |
| | 7325 | else |
| | 7326 | ATH_TXQ_UNLOCK(txq); |
| | 7327 | |
| | 7328 | ni = bf->bf_node; |
| | 7329 | if (ni != NULL) { |
| | 7330 | an = ATH_NODE(ni); |
| | 7331 | if (ds->ds_txstat.ts_status == 0) { |
| | 7332 | u_int8_t txant = ds->ds_txstat.ts_antenna; |
| | 7333 | sc->sc_stats.ast_ant_tx[txant]++; |
| | 7334 | sc->sc_ant_tx[txant]++; |
| | 7335 | #ifdef ATH_SUPERG_FF |
| | 7336 | if (bf->bf_numdesc > 1) |
| | 7337 | ni->ni_vap->iv_stats.is_tx_ffokcnt++; |
| | 7338 | #endif |
| | 7339 | if (ds->ds_txstat.ts_rate & HAL_TXSTAT_ALTRATE) |
| | 7340 | sc->sc_stats.ast_tx_altrate++; |
| | 7341 | sc->sc_stats.ast_tx_rssi = |
| | 7342 | ds->ds_txstat.ts_rssi; |
| | 7343 | ATH_RSSI_LPF(an->an_halstats.ns_avgtxrssi, |
| | 7344 | ds->ds_txstat.ts_rssi); |
| | 7345 | if (bf->bf_skb->priority == WME_AC_VO || |
| | 7346 | bf->bf_skb->priority == WME_AC_VI) |
| | 7347 | ni->ni_ic->ic_wme.wme_hipri_traffic++; |
| | 7348 | ni->ni_inact = ni->ni_inact_reload; |
| | 7349 | } else { |
| | 7350 | #ifdef ATH_SUPERG_FF |
| | 7351 | if (bf->bf_numdesc > 1) |
| | 7352 | ni->ni_vap->iv_stats.is_tx_fferrcnt++; |
| | 7353 | #endif |
| | 7354 | if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) { |
| | 7355 | sc->sc_stats.ast_tx_xretries++; |
| | 7356 | if (ni->ni_flags & IEEE80211_NODE_UAPSD_TRIG) { |
| | 7357 | ni->ni_stats.ns_tx_eosplost++; |
| | 7358 | DPRINTF(sc, ATH_DEBUG_UAPSD, |
| | 7359 | "%s: frame in SP retried out, possible EOSP stranded!!!\n", |
| | 7360 | __func__); |
| | 7361 | } |
| | 7362 | } |
| | 7363 | if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO) |
| | 7364 | sc->sc_stats.ast_tx_fifoerr++; |
| | 7365 | if (ds->ds_txstat.ts_status & HAL_TXERR_FILT) |
| | 7366 | sc->sc_stats.ast_tx_filtered++; |
| | 7367 | } |
| | 7368 | sr = ds->ds_txstat.ts_shortretry; |
| | 7369 | lr = ds->ds_txstat.ts_longretry; |
| | 7370 | sc->sc_stats.ast_tx_shortretry += sr; |
| | 7371 | sc->sc_stats.ast_tx_longretry += lr; |
| | 7372 | /* |
| | 7373 | * Hand the descriptor to the rate control algorithm |
| | 7374 | * if the frame wasn't dropped for filtering or sent |
| | 7375 | * w/o waiting for an ack. In those cases the rssi |
| | 7376 | * and retry counts will be meaningless. |
| | 7377 | */ |
| | 7378 | if ((ds->ds_txstat.ts_status & HAL_TXERR_FILT) == 0 && |
| | 7379 | (bf->bf_flags & HAL_TXDESC_NOACK) == 0) |
| | 7380 | sc->sc_rc->ops->tx_complete(sc, an, ds); |
| | 7381 | /* |
| | 7382 | * Reclaim reference to node. |
| | 7383 | * |
| | 7384 | * NB: the node may be reclaimed here if, for example |
| | 7385 | * this is a DEAUTH message that was sent and the |
| | 7386 | * node was timed out due to inactivity. |
| | 7387 | */ |
| | 7388 | ieee80211_free_node(ni); |
| | 7389 | } |
| | 7390 | |
| | 7391 | bus_unmap_single(sc->sc_bdev, bf->bf_skbaddr, |
| | 7392 | bf->bf_skb->len, BUS_DMA_TODEVICE); |
| | 7393 | if (ni && uapsdq) { |
| | 7394 | /* detect EOSP for this node */ |
| | 7395 | struct ieee80211_qosframe *qwh = (struct ieee80211_qosframe *)bf->bf_skb->data; |
| | 7396 | an = ATH_NODE(ni); |
| | 7397 | KASSERT(ni != NULL, ("Processing U-APSD txq for ath_buf with no node!\n")); |
| | 7398 | if (qwh->i_qos[0] & IEEE80211_QOS_EOSP) { |
| | 7399 | DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: EOSP detected for node (%s) on desc %p\n", |
| | 7400 | __func__, ether_sprintf(ni->ni_macaddr), ds); |
| | 7401 | ATH_NODE_UAPSD_LOCK_IRQ(an); |
| | 7402 | ni->ni_flags &= ~IEEE80211_NODE_UAPSD_SP; |
| | 7403 | if (an->an_uapsd_qdepth == 0 && an->an_uapsd_overflowqdepth != 0) { |
| | 7404 | STAILQ_CONCAT(&an->an_uapsd_q, &an->an_uapsd_overflowq); |
| | 7405 | an->an_uapsd_qdepth = an->an_uapsd_overflowqdepth; |
| | 7406 | an->an_uapsd_overflowqdepth = 0; |
| | 7407 | } |
| | 7408 | ATH_NODE_UAPSD_UNLOCK_IRQ(an); |
| | 7409 | } |
| | 7410 | } |
| | 7411 | |
| | 7412 | { |
| | 7413 | struct ieee80211_frame *wh = (struct ieee80211_frame *)bf->bf_skb->data; |
| | 7414 | if ((ds->ds_txstat.ts_seqnum << IEEE80211_SEQ_SEQ_SHIFT) & ~IEEE80211_SEQ_SEQ_MASK) { |
| | 7415 | DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: h/w assigned sequence number is not sane (%d), ignoring it\n", __func__, |
| | 7416 | ds->ds_txstat.ts_seqnum); |
| | 7417 | } else { |
| | 7418 | DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: updating frame's sequence number from %d to %d\n", __func__, |
| | 7419 | (le16toh(*(__le16 *)&wh->i_seq[0]) & IEEE80211_SEQ_SEQ_MASK) >> IEEE80211_SEQ_SEQ_SHIFT, |
| | 7420 | ds->ds_txstat.ts_seqnum); |
| | 7421 | |
| | 7422 | *(__le16 *)&wh->i_seq[0] = htole16( |
| | 7423 | ds->ds_txstat.ts_seqnum << IEEE80211_SEQ_SEQ_SHIFT | |
| | 7424 | (le16toh(*(__le16 *)&wh->i_seq[0]) & ~IEEE80211_SEQ_SEQ_MASK)); |
| | 7425 | } |
| | 7426 | } |
| | 7427 | |
| | 7428 | #ifdef ATH_SUPERG_FF |
| | 7429 | { |
| | 7430 | struct sk_buff *skbfree, *skb = bf->bf_skb; |
| | 7431 | int i; |
| | 7432 | |
| | 7433 | skbfree = skb; |
| | 7434 | skb = skb->next; |
| | 7435 | DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: free skb %p\n", |
| | 7436 | __func__, skbfree); |
| | 7437 | ath_tx_capture(sc->sc_dev, ds, skbfree); |
| | 7438 | for (i = 1; i < bf->bf_numdesc; i++) { |
| | 7439 | bus_unmap_single(sc->sc_bdev, bf->bf_skbaddrff[i-1], |
| | 7440 | bf->bf_skb->len, BUS_DMA_TODEVICE); |
| | 7441 | skbfree = skb; |
| | 7442 | skb = skb->next; |
| | 7443 | DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: free skb %p\n", |
| | 7444 | __func__, skbfree); |
| | 7445 | ath_tx_capture(sc->sc_dev, ds, skbfree); |
| | 7446 | } |
| | 7447 | } |
| | 7448 | bf->bf_numdesc = 0; |
| | 7449 | #else |
| | 7450 | DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: free skb %p\n", __func__, bf->bf_skb); |
| | 7451 | ath_tx_capture(sc->sc_dev, ds, bf->bf_skb); |
| | 7452 | #endif |
| | 7453 | bf->bf_skb = NULL; |
| | 7454 | bf->bf_node = NULL; |
| | 7455 | |
| | 7456 | ATH_TXBUF_LOCK_IRQ(sc); |
| | 7457 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
| | 7458 | if (sc->sc_devstopped) { |
| | 7459 | ++sc->sc_reapcount; |
| | 7460 | if (sc->sc_reapcount > ATH_TXBUF_FREE_THRESHOLD) { |
| | 7461 | if (!sc->sc_dfswait) |
| | 7462 | netif_start_queue(sc->sc_dev); |
| | 7463 | DPRINTF(sc, ATH_DEBUG_TX_PROC, |
| | 7464 | "%s: tx tasklet restart the queue\n", |
| | 7465 | __func__); |
| | 7466 | sc->sc_reapcount = 0; |
| | 7467 | sc->sc_devstopped = 0; |
| | 7468 | } else |
| | 7469 | ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL); |
| | 7470 | } |
| | 7471 | ATH_TXBUF_UNLOCK_IRQ(sc); |
| | 7472 | } |
| | 7473 | #ifdef ATH_SUPERG_FF |
| | 7474 | /* flush ff staging queue if buffer low */ |
| | 7475 | if (txq->axq_depth <= sc->sc_fftxqmin - 1) { |
| | 7476 | /* NB: consider only flushing a preset number based on age. */ |
| | 7477 | ath_ffstageq_flush(sc, txq, ath_ff_neverflushtestdone); |
| | 7478 | } |
| | 7479 | #endif /* ATH_SUPERG_FF */ |
| | 7480 | } |
| | 7481 | |
| | 7482 | static __inline int |
| | 7483 | txqactive(struct ath_hal *ah, int qnum) |
| | 7484 | { |
| | 7485 | u_int32_t txqs = 1 << qnum; |
| | 7486 | ath_hal_gettxintrtxqs(ah, &txqs); |
| | 7487 | return (txqs & (1 << qnum)); |
| | 7488 | } |
| | 7489 | |
| | 7490 | /* |
| | 7491 | * Deferred processing of transmit interrupt; special-cased |
| | 7492 | * for a single hardware transmit queue (e.g. 5210 and 5211). |
| | 7493 | */ |
| | 7494 | static void |
| | 7495 | ath_tx_tasklet_q0(TQUEUE_ARG data) |
| | 7496 | { |
| | 7497 | struct net_device *dev = (struct net_device *)data; |
| | 7498 | struct ath_softc *sc = dev->priv; |
| | 7499 | |
| | 7500 | if (txqactive(sc->sc_ah, 0)) |
| | 7501 | ath_tx_processq(sc, &sc->sc_txq[0]); |
| | 7502 | if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum)) |
| | 7503 | ath_tx_processq(sc, sc->sc_cabq); |
| | 7504 | |
| | 7505 | netif_wake_queue(dev); |
| | 7506 | |
| | 7507 | if (sc->sc_softled) |
| | 7508 | ath_led_event(sc, ATH_LED_TX); |
| | 7509 | } |
| | 7510 | |
| | 7511 | /* |
| | 7512 | * Deferred processing of transmit interrupt; special-cased |
| | 7513 | * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). |
| | 7514 | */ |
| | 7515 | static void |
| | 7516 | ath_tx_tasklet_q0123(TQUEUE_ARG data) |
| | 7517 | { |
| | 7518 | struct net_device *dev = (struct net_device *)data; |
| | 7519 | struct ath_softc *sc = dev->priv; |
| | 7520 | |
| | 7521 | /* |
| | 7522 | * Process each active queue. |
| | 7523 | */ |
| | 7524 | if (txqactive(sc->sc_ah, 0)) |
| | 7525 | ath_tx_processq(sc, &sc->sc_txq[0]); |
| | 7526 | if (txqactive(sc->sc_ah, 1)) |
| | 7527 | ath_tx_processq(sc, &sc->sc_txq[1]); |
| | 7528 | if (txqactive(sc->sc_ah, 2)) |
| | 7529 | ath_tx_processq(sc, &sc->sc_txq[2]); |
| | 7530 | if (txqactive(sc->sc_ah, 3)) |
| | 7531 | ath_tx_processq(sc, &sc->sc_txq[3]); |
| | 7532 | if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum)) |
| | 7533 | ath_tx_processq(sc, sc->sc_cabq); |
| | 7534 | #ifdef ATH_SUPERG_XR |
| | 7535 | if (sc->sc_xrtxq && txqactive(sc->sc_ah, sc->sc_xrtxq->axq_qnum)) |
| | 7536 | ath_tx_processq(sc, sc->sc_xrtxq); |
| | 7537 | #endif |
| | 7538 | if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum)) |
| | 7539 | ath_tx_processq(sc, sc->sc_uapsdq); |
| | 7540 | |
| | 7541 | netif_wake_queue(dev); |
| | 7542 | |
| | 7543 | if (sc->sc_softled) |
| | 7544 | ath_led_event(sc, ATH_LED_TX); |
| | 7545 | } |
| | 7546 | |
| | 7547 | /* |
| | 7548 | * Deferred processing of transmit interrupt. |
| | 7549 | */ |
| | 7550 | static void |
| | 7551 | ath_tx_tasklet(TQUEUE_ARG data) |
| | 7552 | { |
| | 7553 | struct net_device *dev = (struct net_device *)data; |
| | 7554 | struct ath_softc *sc = dev->priv; |
| | 7555 | int i; |
| | 7556 | |
| | 7557 | /* |
| | 7558 | * Process each active queue. |
| | 7559 | */ |
| | 7560 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) |
| | 7561 | if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i)) |
| | 7562 | ath_tx_processq(sc, &sc->sc_txq[i]); |
| | 7563 | #ifdef ATH_SUPERG_XR |
| | 7564 | if (sc->sc_xrtxq && txqactive(sc->sc_ah, sc->sc_xrtxq->axq_qnum)) |
| | 7565 | ath_tx_processq(sc, sc->sc_xrtxq); |
| | 7566 | #endif |
| | 7567 | |
| | 7568 | netif_wake_queue(dev); |
| | 7569 | |
| | 7570 | if (sc->sc_softled) |
| | 7571 | ath_led_event(sc, ATH_LED_TX); |
| | 7572 | } |
| | 7573 | |
| | 7574 | static void |
| | 7575 | ath_tx_timeout(struct net_device *dev) |
| | 7576 | { |
| | 7577 | struct ath_softc *sc = dev->priv; |
| | 7578 | |
| | 7579 | DPRINTF(sc, ATH_DEBUG_WATCHDOG, "%s: %sRUNNING %svalid\n", |
| | 7580 | __func__, (dev->flags & IFF_RUNNING) ? "" : "!", |
| | 7581 | sc->sc_invalid ? "in" : ""); |
| | 7582 | |
| | 7583 | if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) { |
| | 7584 | sc->sc_stats.ast_watchdog++; |
| | 7585 | ath_reset(dev); /* Avoid taking a semaphore in ath_init */ |
| | 7586 | } |
| | 7587 | } |
| | 7588 | |
| | 7589 | /* |
| | 7590 | * Context: softIRQ and hwIRQ |
| | 7591 | */ |
| | 7592 | static void |
| | 7593 | ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) |
| | 7594 | { |
| | 7595 | struct ath_hal *ah = sc->sc_ah; |
| | 7596 | struct ath_buf *bf; |
| | 7597 | struct sk_buff *skb; |
| | 7598 | #ifdef ATH_SUPERG_FF |
| | 7599 | struct sk_buff *tskb; |
| | 7600 | #endif |
| | 7601 | int i; |
| | 7602 | |
| | 7603 | /* |
| | 7604 | * NB: this assumes output has been stopped and |
| | 7605 | * we do not need to block ath_tx_tasklet |
| | 7606 | */ |
| | 7607 | for (;;) { |
| | 7608 | ATH_TXQ_LOCK(txq); |
| | 7609 | bf = STAILQ_FIRST(&txq->axq_q); |
| | 7610 | if (bf == NULL) { |
| | 7611 | txq->axq_link = NULL; |
| | 7612 | ATH_TXQ_UNLOCK(txq); |
| | 7613 | break; |
| | 7614 | } |
| | 7615 | ATH_TXQ_REMOVE_HEAD(txq, bf_list); |
| | 7616 | ATH_TXQ_UNLOCK(txq); |
| | 7617 | #ifdef AR_DEBUG |
| | 7618 | if (sc->sc_debug & ATH_DEBUG_RESET) |
| | 7619 | ath_printtxbuf(bf, ath_hal_txprocdesc(ah, bf->bf_desc) == HAL_OK); |
| | 7620 | #endif /* AR_DEBUG */ |
| | 7621 | skb = bf->bf_skb->next; |
| | 7622 | bus_unmap_single(sc->sc_bdev, |
| | 7623 | bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE); |
| | 7624 | dev_kfree_skb_any(bf->bf_skb); |
| | 7625 | i = 0; |
| | 7626 | #ifdef ATH_SUPERG_FF |
| | 7627 | while (skb) { |
| | 7628 | tskb = skb->next; |
| | 7629 | bus_unmap_single(sc->sc_bdev, |
| | 7630 | bf->bf_skbaddrff[i++], skb->len, BUS_DMA_TODEVICE); |
| | 7631 | dev_kfree_skb_any(skb); |
| | 7632 | skb = tskb; |
| | 7633 | } |
| | 7634 | #endif /* ATH_SUPERG_FF */ |
| | 7635 | if (bf->bf_node) |
| | 7636 | ieee80211_free_node(bf->bf_node); |
| | 7637 | |
| | 7638 | bf->bf_skb = NULL; |
| | 7639 | bf->bf_node = NULL; |
| | 7640 | |
| | 7641 | ATH_TXBUF_LOCK(sc); |
| | 7642 | STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
| | 7643 | ATH_TXBUF_UNLOCK(sc); |
| | 7644 | } |
| | 7645 | } |
| | 7646 | |
| | 7647 | static void |
| | 7648 | ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) |
| | 7649 | { |
| | 7650 | struct ath_hal *ah = sc->sc_ah; |
| | 7651 | |
| | 7652 | (void) ath_hal_stoptxdma(ah, txq->axq_qnum); |
| | 7653 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] 0x%x, link %p\n", |
| | 7654 | __func__, txq->axq_qnum, |
| | 7655 | ath_hal_gettxbuf(ah, txq->axq_qnum), txq->axq_link); |
| | 7656 | } |
| | 7657 | |
| | 7658 | /* |
| | 7659 | * Drain the transmit queues and reclaim resources. |
| | 7660 | */ |
| | 7661 | static void |
| | 7662 | ath_draintxq(struct ath_softc *sc) |
| | 7663 | { |
| | 7664 | struct ath_hal *ah = sc->sc_ah; |
| | 7665 | int i; |
| | 7666 | |
| | 7667 | /* XXX return value */ |
| | 7668 | if (!sc->sc_invalid) { |
| | 7669 | (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); |
| | 7670 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: beacon queue 0x%x\n", |
| | 7671 | __func__, ath_hal_gettxbuf(ah, sc->sc_bhalq)); |
| | 7672 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) |
| | 7673 | if (ATH_TXQ_SETUP(sc, i)) |
| | 7674 | ath_tx_stopdma(sc, &sc->sc_txq[i]); |
| | 7675 | } |
| | 7676 | sc->sc_dev->trans_start = jiffies; |
| | 7677 | netif_start_queue(sc->sc_dev); /* XXX move to callers */ |
| | 7678 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) |
| | 7679 | if (ATH_TXQ_SETUP(sc, i)) |
| | 7680 | ath_tx_draintxq(sc, &sc->sc_txq[i]); |
| | 7681 | } |
| | 7682 | |
| | 7683 | /* |
| | 7684 | * Disable the receive h/w in preparation for a reset. |
| | 7685 | */ |
| | 7686 | static void |
| | 7687 | ath_stoprecv(struct ath_softc *sc) |
| | 7688 | { |
| | 7689 | #define PA2DESC(_sc, _pa) \ |
| | 7690 | ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ |
| | 7691 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) |
| | 7692 | struct ath_hal *ah = sc->sc_ah; |
| | 7693 | u_int64_t tsf; |
| | 7694 | |
| | 7695 | ath_hal_stoppcurecv(ah); /* disable PCU */ |
| | 7696 | ath_hal_setrxfilter(ah, 0); /* clear recv filter */ |
| | 7697 | ath_hal_stopdmarecv(ah); /* disable DMA engine */ |
| | 7698 | mdelay(3); /* 3 ms is long enough for 1 frame */ |
| | 7699 | tsf = ath_hal_gettsf64(ah); |
| | 7700 | #ifdef AR_DEBUG |
| | 7701 | if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { |
| | 7702 | struct ath_buf *bf; |
| | 7703 | |
| | 7704 | printk("ath_stoprecv: rx queue 0x%x, link %p\n", |
| | 7705 | ath_hal_getrxbuf(ah), sc->sc_rxlink); |
| | 7706 | STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { |
| | 7707 | struct ath_desc *ds = bf->bf_desc; |
| | 7708 | HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, |
| | 7709 | bf->bf_daddr, PA2DESC(sc, ds->ds_link), tsf); |
| | 7710 | if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) |
| | 7711 | ath_printrxbuf(bf, status == HAL_OK); |
| | 7712 | } |
| | 7713 | } |
| | 7714 | #endif |
| | 7715 | sc->sc_rxlink = NULL; /* just in case */ |
| | 7716 | #undef PA2DESC |
| | 7717 | } |
| | 7718 | |
| | 7719 | /* |
| | 7720 | * Enable the receive h/w following a reset. |
| | 7721 | */ |
| | 7722 | static int |
| | 7723 | ath_startrecv(struct ath_softc *sc) |
| | 7724 | { |
| | 7725 | struct ath_hal *ah = sc->sc_ah; |
| | 7726 | struct net_device *dev = sc->sc_dev; |
| | 7727 | struct ath_buf *bf; |
| | 7728 | |
| | 7729 | /* |
| | 7730 | * Cisco's VPN software requires that drivers be able to |
| | 7731 | * receive encapsulated frames that are larger than the MTU. |
| | 7732 | * Since we can't be sure how large a frame we'll get, setup |
| | 7733 | * to handle the larges on possible. |
| | 7734 | */ |
| | 7735 | #ifdef ATH_SUPERG_FF |
| | 7736 | sc->sc_rxbufsize = roundup(ATH_FF_MAX_LEN, sc->sc_cachelsz); |
| | 7737 | #else |
| | 7738 | sc->sc_rxbufsize = roundup(IEEE80211_MAX_LEN, sc->sc_cachelsz); |
| | 7739 | #endif |
| | 7740 | DPRINTF(sc,ATH_DEBUG_RESET, "%s: mtu %u cachelsz %u rxbufsize %u\n", |
| | 7741 | __func__, dev->mtu, sc->sc_cachelsz, sc->sc_rxbufsize); |
| | 7742 | |
| | 7743 | sc->sc_rxlink = NULL; |
| | 7744 | STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { |
| | 7745 | int error = ath_rxbuf_init(sc, bf); |
| | 7746 | ATH_RXBUF_RESET(bf); |
| | 7747 | if (error < 0) |
| | 7748 | return error; |
| | 7749 | } |
| | 7750 | |
| | 7751 | sc->sc_rxbufcur = NULL; |
| | 7752 | |
| | 7753 | bf = STAILQ_FIRST(&sc->sc_rxbuf); |
| | 7754 | ath_hal_putrxbuf(ah, bf->bf_daddr); |
| | 7755 | ath_hal_rxena(ah); /* enable recv descriptors */ |
| | 7756 | ath_mode_init(dev); /* set filters, etc. */ |
| | 7757 | ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ |
| | 7758 | return 0; |
| | 7759 | } |
| | 7760 | |
| | 7761 | /* |
| | 7762 | * Flush skb's allocate for receive. |
| | 7763 | */ |
| | 7764 | static void |
| | 7765 | ath_flushrecv(struct ath_softc *sc) |
| | 7766 | { |
| | 7767 | struct ath_buf *bf; |
| | 7768 | |
| | 7769 | STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) |
| | 7770 | if (bf->bf_skb != NULL) { |
| | 7771 | bus_unmap_single(sc->sc_bdev, |
| | 7772 | bf->bf_skbaddr, sc->sc_rxbufsize, |
| | 7773 | BUS_DMA_FROMDEVICE); |
| | 7774 | dev_kfree_skb(bf->bf_skb); |
| | 7775 | bf->bf_skb = NULL; |
| | 7776 | } |
| | 7777 | } |
| | 7778 | |
| | 7779 | /* |
| | 7780 | * Update internal state after a channel change. |
| | 7781 | */ |
| | 7782 | static void |
| | 7783 | ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) |
| | 7784 | { |
| | 7785 | struct ieee80211com *ic = &sc->sc_ic; |
| | 7786 | struct net_device *dev = sc->sc_dev; |
| | 7787 | enum ieee80211_phymode mode; |
| | 7788 | |
| | 7789 | mode = ieee80211_chan2mode(chan); |
| | 7790 | |
| | 7791 | ath_rate_setup(dev, mode); |
| | 7792 | ath_setcurmode(sc, mode); |
| | 7793 | |
| | 7794 | #ifdef notyet |
| | 7795 | /* |
| | 7796 | * Update BPF state. |
| | 7797 | */ |
| | 7798 | sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = |
| | 7799 | htole16(chan->ic_freq); |
| | 7800 | sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = |
| | 7801 | htole16(chan->ic_flags); |
| | 7802 | #endif |
| | 7803 | if (ic->ic_curchanmaxpwr == 0) |
| | 7804 | ic->ic_curchanmaxpwr = chan->ic_maxregpower; |
| | 7805 | } |
| | 7806 | |
| | 7807 | /* |
| | 7808 | * Set/change channels. If the channel is really being changed, |
| | 7809 | * it's done by resetting the chip. To accomplish this we must |
| | 7810 | * first cleanup any pending DMA, then restart stuff after a la |
| | 7811 | * ath_init. |
| | 7812 | */ |
| | 7813 | static int |
| | 7814 | ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) |
| | 7815 | { |
| | 7816 | struct ath_hal *ah = sc->sc_ah; |
| | 7817 | struct ieee80211com *ic = &sc->sc_ic; |
| | 7818 | struct net_device *dev = sc->sc_dev; |
| | 7819 | HAL_CHANNEL hchan; |
| | 7820 | u_int8_t tswitch = 0; |
| | 7821 | |
| | 7822 | /* |
| | 7823 | * Convert to a HAL channel description with |
| | 7824 | * the flags constrained to reflect the current |
| | 7825 | * operating mode. |
| | 7826 | */ |
| | 7827 | hchan.channel = chan->ic_freq; |
| | 7828 | hchan.channelFlags = ath_chan2flags(chan); |
| | 7829 | KASSERT(hchan.channel != 0, |
| | 7830 | ("bogus channel %u/0x%x", hchan.channel, hchan.channelFlags)); |
| | 7831 | |
| | 7832 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz) -> %u (%u MHz)\n", |
| | 7833 | __func__, ath_hal_mhz2ieee(ah, sc->sc_curchan.channel, |
| | 7834 | sc->sc_curchan.channelFlags), sc->sc_curchan.channel, |
| | 7835 | ath_hal_mhz2ieee(ah, hchan.channel, hchan.channelFlags), |
| | 7836 | hchan.channel); |
| | 7837 | /* check if it is turbo mode switch */ |
| | 7838 | if (hchan.channel == sc->sc_curchan.channel && |
| | 7839 | (hchan.channelFlags & IEEE80211_CHAN_TURBO) != (sc->sc_curchan.channelFlags & IEEE80211_CHAN_TURBO)) |
| | 7840 | tswitch = 1; |
| | 7841 | if (hchan.channel != sc->sc_curchan.channel || |
| | 7842 | hchan.channelFlags != sc->sc_curchan.channelFlags) { |
| | 7843 | HAL_STATUS status; |
| | 7844 | |
| | 7845 | /* |
| | 7846 | * To switch channels clear any pending DMA operations; |
| | 7847 | * wait long enough for the RX fifo to drain, reset the |
| | 7848 | * hardware at the new frequency, and then re-enable |
| | 7849 | * the relevant bits of the h/w. |
| | 7850 | */ |
| | 7851 | ath_hal_intrset(ah, 0); /* disable interrupts */ |
| | 7852 | ath_draintxq(sc); /* clear pending tx frames */ |
| | 7853 | ath_stoprecv(sc); /* turn off frame recv */ |
| | 7854 | |
| | 7855 | /* Set coverage class */ |
| | 7856 | if (sc->sc_scanning || !IEEE80211_IS_CHAN_A(chan)) |
| | 7857 | ath_hal_setcoverageclass(sc->sc_ah, 0, 0); |
| | 7858 | else |
| | 7859 | ath_hal_setcoverageclass(sc->sc_ah, ic->ic_coverageclass, 0); |
| | 7860 | |
| | 7861 | if (!ath_hal_reset(ah, sc->sc_opmode, &hchan, AH_TRUE, &status)) { |
| | 7862 | printk("%s: %s: unable to reset channel %u (%u MHz) " |
| | 7863 | "flags 0x%x '%s' (HAL status %u)\n", |
| | 7864 | dev->name, __func__, |
| | 7865 | ieee80211_chan2ieee(ic, chan), chan->ic_freq, |
| | 7866 | hchan.channelFlags, |
| | 7867 | ath_get_hal_status_desc(status), status); |
| | 7868 | return -EIO; |
| | 7869 | } |
| | 7870 | |
| | 7871 | if (sc->sc_softled) |
| | 7872 | ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); |
| | 7873 | |
| | 7874 | sc->sc_curchan = hchan; |
| | 7875 | ath_update_txpow(sc); /* update tx power state */ |
| | 7876 | |
| | 7877 | /* |
| | 7878 | * Re-enable rx framework. |
| | 7879 | */ |
| | 7880 | if (ath_startrecv(sc) != 0) { |
| | 7881 | printk("%s: %s: unable to restart recv logic\n", |
| | 7882 | dev->name, __func__); |
| | 7883 | return -EIO; |
| | 7884 | } |
| | 7885 | |
| | 7886 | /* |
| | 7887 | * Change channels and update the h/w rate map |
| | 7888 | * if we're switching; e.g. 11a to 11b/g. |
| | 7889 | */ |
| | 7890 | ath_chan_change(sc, chan); |
| | 7891 | if (ic->ic_opmode == IEEE80211_M_HOSTAP) { |
| | 7892 | if (sc->sc_curchan.privFlags & CHANNEL_DFS) { |
| | 7893 | if (!(sc->sc_curchan.privFlags & CHANNEL_DFS_CLEAR)) { |
| | 7894 | dev->watchdog_timeo = 120 * HZ; /* set the timeout to normal */ |
| | 7895 | netif_stop_queue(dev); |
| | 7896 | if (sc->sc_dfswait) |
| | 7897 | del_timer_sync(&sc->sc_dfswaittimer); |
| | 7898 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s: start dfs wait period\n", |
| | 7899 | __func__, dev->name); |
| | 7900 | sc->sc_dfswait = 1; |
| | 7901 | sc->sc_dfswaittimer.function = ath_check_dfs_clear; |
| | 7902 | sc->sc_dfswaittimer.expires = |
| | 7903 | jiffies + (ATH_DFS_WAIT_POLL_PERIOD * HZ); |
| | 7904 | sc->sc_dfswaittimer.data = (unsigned long)sc; |
| | 7905 | add_timer(&sc->sc_dfswaittimer); |
| | 7906 | } |
| | 7907 | } else |
| | 7908 | if (sc->sc_dfswait == 1) |
| | 7909 | mod_timer(&sc->sc_dfswaittimer, jiffies + 2); |
| | 7910 | } |
| | 7911 | /* |
| | 7912 | * re configure beacons when it is a turbo mode switch. |
| | 7913 | * HW seems to turn off beacons during turbo mode switch. |
| | 7914 | */ |
| | 7915 | if (sc->sc_beacons && tswitch) |
| | 7916 | ath_beacon_config(sc, NULL); |
| | 7917 | |
| | 7918 | /* |
| | 7919 | * Re-enable interrupts. |
| | 7920 | */ |
| | 7921 | ath_hal_intrset(ah, sc->sc_imask); |
| | 7922 | } |
| | 7923 | return 0; |
| | 7924 | } |
| | 7925 | |
| | 7926 | /* |
| | 7927 | * Periodically recalibrate the PHY to account |
| | 7928 | * for temperature/environment changes. |
| | 7929 | */ |
| | 7930 | static void |
| | 7931 | ath_calibrate(unsigned long arg) |
| | 7932 | { |
| | 7933 | struct net_device *dev = (struct net_device *) arg; |
| | 7934 | struct ath_softc *sc = dev->priv; |
| | 7935 | struct ath_hal *ah = sc->sc_ah; |
| | 7936 | struct ieee80211com *ic = &sc->sc_ic; |
| | 7937 | HAL_CHANNEL *chans; |
| | 7938 | u_int32_t nchans; |
| | 7939 | HAL_BOOL isIQdone = AH_FALSE; |
| | 7940 | |
| | 7941 | sc->sc_stats.ast_per_cal++; |
| | 7942 | |
| | 7943 | DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: channel %u/%x\n", |
| | 7944 | __func__, sc->sc_curchan.channel, sc->sc_curchan.channelFlags); |
| | 7945 | |
| | 7946 | if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { |
| | 7947 | /* |
| | 7948 | * Rfgain is out of bounds, reset the chip |
| | 7949 | * to load new gain values. |
| | 7950 | */ |
| | 7951 | sc->sc_stats.ast_per_rfgain++; |
| | 7952 | ath_reset(dev); |
| | 7953 | } |
| | 7954 | if (!ath_hal_calibrate(ah, &sc->sc_curchan, &isIQdone)) { |
| | 7955 | DPRINTF(sc, ATH_DEBUG_ANY, |
| | 7956 | "%s: calibration of channel %u failed\n", |
| | 7957 | __func__, sc->sc_curchan.channel); |
| | 7958 | sc->sc_stats.ast_per_calfail++; |
| | 7959 | } |
| | 7960 | if (ic->ic_opmode == IEEE80211_M_HOSTAP) { |
| | 7961 | chans = kmalloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), GFP_ATOMIC); |
| | 7962 | if (chans == NULL) { |
| | 7963 | printk("%s: unable to allocate channel table\n", dev->name); |
| | 7964 | return; |
| | 7965 | } |
| | 7966 | nchans = ath_hal_checknol(ah, chans, IEEE80211_CHAN_MAX); |
| | 7967 | if (nchans > 0) { |
| | 7968 | u_int32_t i, j; |
| | 7969 | struct ieee80211_channel *ichan; |
| | 7970 | |
| | 7971 | for (i = 0; i < nchans; i++) { |
| | 7972 | for (j = 0; j < ic->ic_nchans; j++) { |
| | 7973 | ichan = &ic->ic_channels[j]; |
| | 7974 | if (chans[i].channel == ichan->ic_freq) |
| | 7975 | ichan->ic_flags &= ~IEEE80211_CHAN_RADAR; |
| | 7976 | } |
| | 7977 | |
| | 7978 | ichan = ieee80211_find_channel(ic, chans[i].channel, |
| | 7979 | chans[i].channelFlags); |
| | 7980 | if (ichan != NULL) |
| | 7981 | ichan->ic_flags &= ~IEEE80211_CHAN_RADAR; |
| | 7982 | } |
| | 7983 | } |
| | 7984 | kfree(chans); |
| | 7985 | } |
| | 7986 | |
| | 7987 | if (isIQdone == AH_TRUE) |
| | 7988 | ath_calinterval = ATH_LONG_CALINTERVAL; |
| | 7989 | else |
| | 7990 | ath_calinterval = ATH_SHORT_CALINTERVAL; |
| | 7991 | |
| | 7992 | sc->sc_cal_ch.expires = jiffies + (ath_calinterval * HZ); |
| | 7993 | add_timer(&sc->sc_cal_ch); |
| | 7994 | } |
| | 7995 | |
| | 7996 | static void |
| | 7997 | ath_scan_start(struct ieee80211com *ic) |
| | 7998 | { |
| | 7999 | struct net_device *dev = ic->ic_dev; |
| | 8000 | struct ath_softc *sc = dev->priv; |
| | 8001 | struct ath_hal *ah = sc->sc_ah; |
| | 8002 | u_int32_t rfilt; |
| | 8003 | |
| | 8004 | /* XXX calibration timer? */ |
| | 8005 | |
| | 8006 | sc->sc_scanning = 1; |
| | 8007 | sc->sc_syncbeacon = 0; |
| | 8008 | rfilt = ath_calcrxfilter(sc); |
| | 8009 | ath_hal_setrxfilter(ah, rfilt); |
| | 8010 | ath_hal_setassocid(ah, dev->broadcast, 0); |
| | 8011 | |
| | 8012 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", |
| | 8013 | __func__, rfilt, ether_sprintf(dev->broadcast)); |
| | 8014 | } |
| | 8015 | |
| | 8016 | static void |
| | 8017 | ath_scan_end(struct ieee80211com *ic) |
| | 8018 | { |
| | 8019 | struct net_device *dev = ic->ic_dev; |
| | 8020 | struct ath_softc *sc = dev->priv; |
| | 8021 | struct ath_hal *ah = sc->sc_ah; |
| | 8022 | u_int32_t rfilt; |
| | 8023 | |
| | 8024 | sc->sc_scanning = 0; |
| | 8025 | rfilt = ath_calcrxfilter(sc); |
| | 8026 | ath_hal_setrxfilter(ah, rfilt); |
| | 8027 | ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); |
| | 8028 | |
| | 8029 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", |
| | 8030 | __func__, rfilt, ether_sprintf(sc->sc_curbssid), |
| | 8031 | sc->sc_curaid); |
| | 8032 | } |
| | 8033 | |
| | 8034 | static void |
| | 8035 | ath_set_channel(struct ieee80211com *ic) |
| | 8036 | { |
| | 8037 | struct net_device *dev = ic->ic_dev; |
| | 8038 | struct ath_softc *sc = dev->priv; |
| | 8039 | |
| | 8040 | (void) ath_chan_set(sc, ic->ic_curchan); |
| | 8041 | /* |
| | 8042 | * If we are returning to our bss channel then mark state |
| | 8043 | * so the next recv'd beacon's tsf will be used to sync the |
| | 8044 | * beacon timers. Note that since we only hear beacons in |
| | 8045 | * sta/ibss mode this has no effect in other operating modes. |
| | 8046 | */ |
| | 8047 | if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) |
| | 8048 | sc->sc_syncbeacon = 1; |
| | 8049 | } |
| | 8050 | |
| | 8051 | static void |
| | 8052 | ath_set_coverageclass(struct ieee80211com *ic) |
| | 8053 | { |
| | 8054 | struct ath_softc *sc = ic->ic_dev->priv; |
| | 8055 | |
| | 8056 | ath_hal_setcoverageclass(sc->sc_ah, ic->ic_coverageclass, 0); |
| | 8057 | |
| | 8058 | return; |
| | 8059 | } |
| | 8060 | |
| | 8061 | static u_int |
| | 8062 | ath_mhz2ieee(struct ieee80211com *ic, u_int freq, u_int flags) |
| | 8063 | { |
| | 8064 | struct ath_softc *sc = ic->ic_dev->priv; |
| | 8065 | |
| | 8066 | return (ath_hal_mhz2ieee(sc->sc_ah, freq, flags)); |
| | 8067 | } |
| | 8068 | |
| | 8069 | |
| | 8070 | /* |
| | 8071 | * Context: softIRQ and process context |
| | 8072 | */ |
| | 8073 | static int |
| | 8074 | ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) |
| | 8075 | { |
| | 8076 | struct ath_vap *avp = ATH_VAP(vap); |
| | 8077 | struct ieee80211com *ic = vap->iv_ic; |
| | 8078 | struct net_device *dev = ic->ic_dev; |
| | 8079 | struct ath_softc *sc = dev->priv; |
| | 8080 | struct ath_hal *ah = sc->sc_ah; |
| | 8081 | struct ieee80211_node *ni, *wds_ni; |
| | 8082 | int i, error, stamode; |
| | 8083 | u_int32_t rfilt = 0; |
| | 8084 | struct ieee80211vap *tmpvap; |
| | 8085 | static const HAL_LED_STATE leds[] = { |
| | 8086 | HAL_LED_INIT, /* IEEE80211_S_INIT */ |
| | 8087 | HAL_LED_SCAN, /* IEEE80211_S_SCAN */ |
| | 8088 | HAL_LED_AUTH, /* IEEE80211_S_AUTH */ |
| | 8089 | HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ |
| | 8090 | HAL_LED_RUN, /* IEEE80211_S_RUN */ |
| | 8091 | }; |
| | 8092 | |
| | 8093 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s: %s -> %s\n", __func__, dev->name, |
| | 8094 | ieee80211_state_name[vap->iv_state], |
| | 8095 | ieee80211_state_name[nstate]); |
| | 8096 | |
| | 8097 | del_timer(&sc->sc_cal_ch); /* periodic calibration timer */ |
| | 8098 | ath_hal_setledstate(ah, leds[nstate]); /* set LED */ |
| | 8099 | netif_stop_queue(dev); /* before we do anything else */ |
| | 8100 | |
| | 8101 | if (nstate == IEEE80211_S_INIT) { |
| | 8102 | /* |
| | 8103 | * if there is no VAP left in RUN state |
| | 8104 | * disable beacon interrupts. |
| | 8105 | */ |
| | 8106 | TAILQ_FOREACH(tmpvap, &ic->ic_vaps, iv_next) { |
| | 8107 | if (tmpvap != vap && tmpvap->iv_state == IEEE80211_S_RUN ) |
| | 8108 | break; |
| | 8109 | } |
| | 8110 | if (!tmpvap) { |
| | 8111 | sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); |
| | 8112 | /* |
| | 8113 | * Disable interrupts. |
| | 8114 | */ |
| | 8115 | ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); |
| | 8116 | sc->sc_beacons = 0; |
| | 8117 | } |
| | 8118 | /* |
| | 8119 | * Notify the rate control algorithm. |
| | 8120 | */ |
| | 8121 | sc->sc_rc->ops->newstate(vap, nstate); |
| | 8122 | goto done; |
| | 8123 | } |
| | 8124 | ni = vap->iv_bss; |
| | 8125 | |
| | 8126 | rfilt = ath_calcrxfilter(sc); |
| | 8127 | stamode = (vap->iv_opmode == IEEE80211_M_STA || |
| | 8128 | vap->iv_opmode == IEEE80211_M_IBSS || |
| | 8129 | vap->iv_opmode == IEEE80211_M_AHDEMO); |
| | 8130 | if (stamode && nstate == IEEE80211_S_RUN) { |
| | 8131 | sc->sc_curaid = ni->ni_associd; |
| | 8132 | IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); |
| | 8133 | } else |
| | 8134 | sc->sc_curaid = 0; |
| | 8135 | |
| | 8136 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", |
| | 8137 | __func__, rfilt, ether_sprintf(sc->sc_curbssid), |
| | 8138 | sc->sc_curaid); |
| | 8139 | |
| | 8140 | ath_hal_setrxfilter(ah, rfilt); |
| | 8141 | if (stamode) |
| | 8142 | ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); |
| | 8143 | |
| | 8144 | if ((vap->iv_opmode != IEEE80211_M_STA) && |
| | 8145 | (vap->iv_flags & IEEE80211_F_PRIVACY)) { |
| | 8146 | for (i = 0; i < IEEE80211_WEP_NKID; i++) |
| | 8147 | if (ath_hal_keyisvalid(ah, i)) |
| | 8148 | ath_hal_keysetmac(ah, i, ni->ni_bssid); |
| | 8149 | } |
| | 8150 | |
| | 8151 | /* |
| | 8152 | * Notify the rate control algorithm so rates |
| | 8153 | * are setup should ath_beacon_alloc be called. |
| | 8154 | */ |
| | 8155 | sc->sc_rc->ops->newstate(vap, nstate); |
| | 8156 | |
| | 8157 | if (vap->iv_opmode == IEEE80211_M_MONITOR) { |
| | 8158 | /* nothing to do */; |
| | 8159 | } else if (nstate == IEEE80211_S_RUN) { |
| | 8160 | DPRINTF(sc, ATH_DEBUG_STATE, |
| | 8161 | "%s(RUN): ic_flags=0x%08x iv=%d bssid=%s " |
| | 8162 | "capinfo=0x%04x chan=%d\n" |
| | 8163 | , __func__ |
| | 8164 | , vap->iv_flags |
| | 8165 | , ni->ni_intval |
| | 8166 | , ether_sprintf(ni->ni_bssid) |
| | 8167 | , ni->ni_capinfo |
| | 8168 | , ieee80211_chan2ieee(ic, ni->ni_chan)); |
| | 8169 | |
| | 8170 | switch (vap->iv_opmode) { |
| | 8171 | case IEEE80211_M_HOSTAP: |
| | 8172 | case IEEE80211_M_IBSS: |
| | 8173 | /* |
| | 8174 | * Allocate and setup the beacon frame. |
| | 8175 | * |
| | 8176 | * Stop any previous beacon DMA. This may be |
| | 8177 | * necessary, for example, when an ibss merge |
| | 8178 | * causes reconfiguration; there will be a state |
| | 8179 | * transition from RUN->RUN that means we may |
| | 8180 | * be called with beacon transmission active. |
| | 8181 | */ |
| | 8182 | ath_hal_stoptxdma(ah, sc->sc_bhalq); |
| | 8183 | |
| | 8184 | /* Set default key index for static wep case */ |
| | 8185 | ni->ni_ath_defkeyindex = IEEE80211_INVAL_DEFKEY; |
| | 8186 | if (((vap->iv_flags & IEEE80211_F_WPA) == 0) && |
| | 8187 | (ni->ni_authmode != IEEE80211_AUTH_8021X) && |
| | 8188 | (vap->iv_def_txkey != IEEE80211_KEYIX_NONE)) { |
| | 8189 | ni->ni_ath_defkeyindex = vap->iv_def_txkey; |
| | 8190 | } |
| | 8191 | |
| | 8192 | error = ath_beacon_alloc(sc, ni); |
| | 8193 | if (error < 0) |
| | 8194 | goto bad; |
| | 8195 | /* |
| | 8196 | * if the turbo flags have changed, then beacon and turbo |
| | 8197 | * need to be reconfigured. |
| | 8198 | */ |
| | 8199 | if ((sc->sc_dturbo && !(vap->iv_ath_cap & IEEE80211_ATHC_TURBOP)) || |
| | 8200 | (!sc->sc_dturbo && (vap->iv_ath_cap & IEEE80211_ATHC_TURBOP))) |
| | 8201 | sc->sc_beacons = 0; |
| | 8202 | /* |
| | 8203 | * if it is the first AP VAP moving to RUN state then beacon |
| | 8204 | * needs to be reconfigured. |
| | 8205 | */ |
| | 8206 | TAILQ_FOREACH(tmpvap, &ic->ic_vaps, iv_next) { |
| | 8207 | if (tmpvap != vap && tmpvap->iv_state == IEEE80211_S_RUN && |
| | 8208 | tmpvap->iv_opmode == IEEE80211_M_HOSTAP) |
| | 8209 | break; |
| | 8210 | } |
| | 8211 | if (!tmpvap) |
| | 8212 | sc->sc_beacons = 0; |
| | 8213 | break; |
| | 8214 | case IEEE80211_M_STA: |
| | 8215 | #ifdef ATH_SUPERG_COMP |
| | 8216 | /* have we negotiated compression? */ |
| | 8217 | if (!(vap->iv_ath_cap & ni->ni_ath_flags & IEEE80211_NODE_COMP)) |
| | 8218 | ni->ni_ath_flags &= ~IEEE80211_NODE_COMP; |
| | 8219 | #endif |
| | 8220 | /* |
| | 8221 | * Allocate a key cache slot to the station. |
| | 8222 | */ |
| | 8223 | ath_setup_keycacheslot(sc, ni); |
| | 8224 | /* |
| | 8225 | * Record negotiated dynamic turbo state for |
| | 8226 | * use by rate control modules. |
| | 8227 | */ |
| | 8228 | sc->sc_dturbo = |
| | 8229 | (ni->ni_ath_flags & IEEE80211_ATHC_TURBOP) != 0; |
| | 8230 | break; |
| | 8231 | case IEEE80211_M_WDS: |
| | 8232 | wds_ni = ieee80211_find_txnode(vap, vap->wds_mac); |
| | 8233 | if (wds_ni) { |
| | 8234 | /* XXX no rate negotiation; just dup */ |
| | 8235 | wds_ni->ni_rates = vap->iv_bss->ni_rates; |
| | 8236 | /* Depending on the sequence of bringing up devices |
| | 8237 | * it's possible the rates of the root bss isn't |
| | 8238 | * filled yet. |
| | 8239 | */ |
| | 8240 | if (vap->iv_ic->ic_newassoc != NULL && |
| | 8241 | wds_ni->ni_rates.rs_nrates != 0) { |
| | 8242 | /* Fill in the rates based on our own rates |
| | 8243 | * we rely on the rate selection mechanism |
| | 8244 | * to find out which rates actually work! |
| | 8245 | */ |
| | 8246 | vap->iv_ic->ic_newassoc(wds_ni, 1); |
| | 8247 | } |
| | 8248 | } |
| | 8249 | break; |
| | 8250 | default: |
| | 8251 | break; |
| | 8252 | } |
| | 8253 | |
| | 8254 | |
| | 8255 | /* |
| | 8256 | * Configure the beacon and sleep timers. |
| | 8257 | */ |
| | 8258 | if (!sc->sc_beacons && vap->iv_opmode!=IEEE80211_M_WDS) { |
| | 8259 | ath_beacon_config(sc, vap); |
| | 8260 | sc->sc_beacons = 1; |
| | 8261 | } |
| | 8262 | |
| | 8263 | /* |
| | 8264 | * Reset rssi stats; maybe not the best place... |
| | 8265 | */ |
| | 8266 | sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; |
| | 8267 | sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; |
| | 8268 | sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; |
| | 8269 | /* |
| | 8270 | * if it is a DFS channel and has not been checked for radar |
| | 8271 | * do not let the 80211 state machine to go to RUN state. |
| | 8272 | * |
| | 8273 | */ |
| | 8274 | if (sc->sc_dfswait && vap->iv_opmode == IEEE80211_M_HOSTAP ) { |
| | 8275 | /* push the VAP to RUN state once DFS is cleared */ |
| | 8276 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s: VAP -> DFS_WAIT\n", |
| | 8277 | __func__, dev->name); |
| | 8278 | avp->av_dfswait_run = 1; |
| | 8279 | return 0; |
| | 8280 | } |
| | 8281 | } else { |
| | 8282 | if (sc->sc_dfswait && |
| | 8283 | vap->iv_opmode == IEEE80211_M_HOSTAP && |
| | 8284 | sc->sc_dfswaittimer.data == (unsigned long)vap) { |
| | 8285 | del_timer_sync(&sc->sc_dfswaittimer); |
| | 8286 | sc->sc_dfswait = 0; |
| | 8287 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s: VAP out of DFS_WAIT\n", |
| | 8288 | __func__, dev->name); |
| | 8289 | } |
| | 8290 | /* |
| | 8291 | * XXXX |
| | 8292 | * if it is SCAN state, disable beacons. |
| | 8293 | */ |
| | 8294 | if (nstate == IEEE80211_S_SCAN) { |
| | 8295 | ath_hal_intrset(ah,sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); |
| | 8296 | sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); |
| | 8297 | /* need to reconfigure the beacons when it moves to RUN */ |
| | 8298 | sc->sc_beacons = 0; |
| | 8299 | } |
| | 8300 | avp->av_dfswait_run = 0; /* reset the dfs wait flag */ |
| | 8301 | } |
| | 8302 | done: |
| | 8303 | /* |
| | 8304 | * Invoke the parent method to complete the work. |
| | 8305 | */ |
| | 8306 | error = avp->av_newstate(vap, nstate, arg); |
| | 8307 | |
| | 8308 | /* |
| | 8309 | * Finally, start any timers. |
| | 8310 | */ |
| | 8311 | if (nstate == IEEE80211_S_RUN) { |
| | 8312 | /* start periodic recalibration timer */ |
| | 8313 | mod_timer(&sc->sc_cal_ch, jiffies + (ath_calinterval * HZ)); |
| | 8314 | } |
| | 8315 | |
| | 8316 | #ifdef ATH_SUPERG_XR |
| | 8317 | if (vap->iv_flags & IEEE80211_F_XR && |
| | 8318 | nstate == IEEE80211_S_RUN) |
| | 8319 | ATH_SETUP_XR_VAP(sc,vap,rfilt); |
| | 8320 | if (vap->iv_flags & IEEE80211_F_XR && |
| | 8321 | nstate == IEEE80211_S_INIT && sc->sc_xrgrppoll) |
| | 8322 | ath_grppoll_stop(vap); |
| | 8323 | #endif |
| | 8324 | bad: |
| | 8325 | netif_start_queue(dev); |
| | 8326 | dev->watchdog_timeo = 5 * HZ; /* set the timeout to normal */ |
| | 8327 | return error; |
| | 8328 | } |
| | 8329 | |
| | 8330 | /* |
| | 8331 | * periodically checks for the HAL to set |
| | 8332 | * CHANNEL_DFS_CLEAR flag on current channel. |
| | 8333 | * if the flag is set and a VAP is waiting for it, push |
| | 8334 | * transition the VAP to RUN state. |
| | 8335 | * |
| | 8336 | * Context: Timer (softIRQ) |
| | 8337 | */ |
| | 8338 | static void |
| | 8339 | ath_check_dfs_clear(unsigned long data ) |
| | 8340 | { |
| | 8341 | struct ath_softc *sc = (struct ath_softc *)data; |
| | 8342 | struct ieee80211com *ic = &sc->sc_ic; |
| | 8343 | struct net_device *dev = sc->sc_dev; |
| | 8344 | struct ieee80211vap *vap ; |
| | 8345 | HAL_CHANNEL hchan; |
| | 8346 | |
| | 8347 | if(!sc->sc_dfswait) return; |
| | 8348 | |
| | 8349 | /* if still need to wait */ |
| | 8350 | ath_hal_radar_wait(sc->sc_ah, &hchan); |
| | 8351 | |
| | 8352 | if (hchan.privFlags & CHANNEL_INTERFERENCE) |
| | 8353 | return; |
| | 8354 | |
| | 8355 | if ((hchan.privFlags & CHANNEL_DFS_CLEAR) || |
| | 8356 | (!(hchan.privFlags & CHANNEL_DFS))) { |
| | 8357 | sc->sc_curchan.privFlags |= CHANNEL_DFS_CLEAR; |
| | 8358 | sc->sc_dfswait = 0; |
| | 8359 | TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { |
| | 8360 | struct ath_vap *avp = ATH_VAP(vap); |
| | 8361 | if (avp->av_dfswait_run) { |
| | 8362 | /* re alloc beacons to update new channel info */ |
| | 8363 | int error; |
| | 8364 | error = ath_beacon_alloc(sc, vap->iv_bss); |
| | 8365 | if(error < 0) { |
| | 8366 | /* XXX */ |
| | 8367 | return; |
| | 8368 | } |
| | 8369 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s: VAP DFS_WAIT -> RUN\n", |
| | 8370 | __func__, dev->name); |
| | 8371 | avp->av_newstate(vap, IEEE80211_S_RUN, 0); |
| | 8372 | /* start calibration timer */ |
| | 8373 | mod_timer(&sc->sc_cal_ch, jiffies + (ath_calinterval * HZ)); |
| | 8374 | #ifdef ATH_SUPERG_XR |
| | 8375 | if (vap->iv_flags & IEEE80211_F_XR ) { |
| | 8376 | u_int32_t rfilt = 0; |
| | 8377 | rfilt = ath_calcrxfilter(sc); |
| | 8378 | ATH_SETUP_XR_VAP(sc, vap, rfilt); |
| | 8379 | } |
| | 8380 | #endif |
| | 8381 | avp->av_dfswait_run = 0; |
| | 8382 | } |
| | 8383 | } |
| | 8384 | /* start the device */ |
| | 8385 | netif_start_queue(dev); |
| | 8386 | dev->watchdog_timeo = 5 * HZ; /* set the timeout to normal */ |
| | 8387 | } else { |
| | 8388 | /* fire the timer again */ |
| | 8389 | sc->sc_dfswaittimer.expires = jiffies + (ATH_DFS_WAIT_POLL_PERIOD * HZ); |
| | 8390 | sc->sc_dfswaittimer.data = (unsigned long)sc; |
| | 8391 | add_timer(&sc->sc_dfswaittimer); |
| | 8392 | } |
| | 8393 | |
| | 8394 | } |
| | 8395 | |
| | 8396 | #ifdef ATH_SUPERG_COMP |
| | 8397 | /* Enable/Disable de-compression mask for given node. |
| | 8398 | * The routine is invoked after addition or deletion of the |
| | 8399 | * key. |
| | 8400 | */ |
| | 8401 | static void |
| | 8402 | ath_comp_set(struct ieee80211vap *vap, struct ieee80211_node *ni, int en) |
| | 8403 | { |
| | 8404 | ath_setup_comp(ni, en); |
| | 8405 | return; |
| | 8406 | } |
| | 8407 | |
| | 8408 | /* Set up decompression engine for this node. */ |
| | 8409 | static void |
| | 8410 | ath_setup_comp(struct ieee80211_node *ni, int enable) |
| | 8411 | { |
| | 8412 | #define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV) |
| | 8413 | struct ieee80211vap *vap = ni->ni_vap; |
| | 8414 | struct ath_softc *sc = vap->iv_ic->ic_dev->priv; |
| | 8415 | struct ath_node *an = ATH_NODE(ni); |
| | 8416 | u_int16_t keyindex; |
| | 8417 | |
| | 8418 | if (enable) { |
| | 8419 | /* Have we negotiated compression? */ |
| | 8420 | if (!(ni->ni_ath_flags & IEEE80211_NODE_COMP)) |
| | 8421 | return; |
| | 8422 | |
| | 8423 | /* No valid key? */ |
| | 8424 | if (ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) |
| | 8425 | return; |
| | 8426 | |
| | 8427 | /* Setup decompression mask. |
| | 8428 | * For TKIP and split MIC case, recv. keyindex is at 32 offset |
| | 8429 | * from tx key. |
| | 8430 | */ |
| | 8431 | if ((ni->ni_wpa_ie != NULL) && |
| | 8432 | (ni->ni_rsn.rsn_ucastcipher == IEEE80211_CIPHER_TKIP) && |
| | 8433 | sc->sc_splitmic) { |
| | 8434 | if ((ni->ni_ucastkey.wk_flags & IEEE80211_KEY_XR) |
| | 8435 | == IEEE80211_KEY_XR) |
| | 8436 | keyindex = ni->ni_ucastkey.wk_keyix + 32; |
| | 8437 | else |
| | 8438 | keyindex = ni->ni_ucastkey.wk_keyix; |
| | 8439 | } else |
| | 8440 | keyindex = ni->ni_ucastkey.wk_keyix + ni->ni_rxkeyoff; |
| | 8441 | |
| | 8442 | ath_hal_setdecompmask(sc->sc_ah, keyindex, 1); |
| | 8443 | an->an_decomp_index = keyindex; |
| | 8444 | } else { |
| | 8445 | if (an->an_decomp_index != INVALID_DECOMP_INDEX) { |
| | 8446 | ath_hal_setdecompmask(sc->sc_ah, an->an_decomp_index, 0); |
| | 8447 | an->an_decomp_index = INVALID_DECOMP_INDEX; |
| | 8448 | } |
| | 8449 | } |
| | 8450 | |
| | 8451 | return; |
| | 8452 | #undef IEEE80211_KEY_XR |
| | 8453 | } |
| | 8454 | #endif |
| | 8455 | |
| | 8456 | /* |
| | 8457 | * Allocate a key cache slot to the station so we can |
| | 8458 | * setup a mapping from key index to node. The key cache |
| | 8459 | * slot is needed for managing antenna state and for |
| | 8460 | * compression when stations do not use crypto. We do |
| | 8461 | * it unilaterally here; if crypto is employed this slot |
| | 8462 | * will be reassigned. |
| | 8463 | */ |
| | 8464 | static void |
| | 8465 | ath_setup_stationkey(struct ieee80211_node *ni) |
| | 8466 | { |
| | 8467 | struct ieee80211vap *vap = ni->ni_vap; |
| | 8468 | struct ath_softc *sc = vap->iv_ic->ic_dev->priv; |
| | 8469 | u_int16_t keyix; |
| | 8470 | |
| | 8471 | keyix = ath_key_alloc(vap, &ni->ni_ucastkey); |
| | 8472 | if (keyix == IEEE80211_KEYIX_NONE) { |
| | 8473 | /* |
| | 8474 | * Key cache is full; we'll fall back to doing |
| | 8475 | * the more expensive lookup in software. Note |
| | 8476 | * this also means no h/w compression. |
| | 8477 | */ |
| | 8478 | /* XXX msg+statistic */ |
| | 8479 | return; |
| | 8480 | } else { |
| | 8481 | ni->ni_ucastkey.wk_keyix = keyix; |
| | 8482 | /* NB: this will create a pass-thru key entry */ |
| | 8483 | ath_keyset(sc, &ni->ni_ucastkey, ni->ni_macaddr, vap->iv_bss); |
| | 8484 | |
| | 8485 | #ifdef ATH_SUPERG_COMP |
| | 8486 | /* Enable de-compression logic */ |
| | 8487 | ath_setup_comp(ni, 1); |
| | 8488 | #endif |
| | 8489 | } |
| | 8490 | |
| | 8491 | return; |
| | 8492 | } |
| | 8493 | |
| | 8494 | /* Setup WEP key for the station if compression is negotiated. |
| | 8495 | * When station and AP are using same default key index, use single key |
| | 8496 | * cache entry for receive and transmit, else two key cache entries are |
| | 8497 | * created. One for receive with MAC address of station and one for transmit |
| | 8498 | * with NULL mac address. On receive key cache entry de-compression mask |
| | 8499 | * is enabled. |
| | 8500 | */ |
| | 8501 | static void |
| | 8502 | ath_setup_stationwepkey(struct ieee80211_node *ni) |
| | 8503 | { |
| | 8504 | struct ieee80211vap *vap = ni->ni_vap; |
| | 8505 | struct ieee80211_key *ni_key; |
| | 8506 | struct ieee80211_key tmpkey; |
| | 8507 | struct ieee80211_key *rcv_key, *xmit_key; |
| | 8508 | int txkeyidx, rxkeyidx = IEEE80211_KEYIX_NONE, i; |
| | 8509 | u_int8_t null_macaddr[IEEE80211_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; |
| | 8510 | |
| | 8511 | KASSERT(ni->ni_ath_defkeyindex < IEEE80211_WEP_NKID, |
| | 8512 | ("got invalid node key index 0x%x", ni->ni_ath_defkeyindex)); |
| | 8513 | KASSERT(vap->iv_def_txkey < IEEE80211_WEP_NKID, |
| | 8514 | ("got invalid vap def key index 0x%x", vap->iv_def_txkey)); |
| | 8515 | |
| | 8516 | /* Allocate a key slot first */ |
| | 8517 | if (!ieee80211_crypto_newkey(vap, |
| | 8518 | IEEE80211_CIPHER_WEP, |
| | 8519 | IEEE80211_KEY_XMIT|IEEE80211_KEY_RECV, |
| | 8520 | &ni->ni_ucastkey)) |
| | 8521 | goto error; |
| | 8522 | |
| | 8523 | txkeyidx = ni->ni_ucastkey.wk_keyix; |
| | 8524 | xmit_key = &vap->iv_nw_keys[vap->iv_def_txkey]; |
| | 8525 | |
| | 8526 | /* Do we need separate rx key? */ |
| | 8527 | if (ni->ni_ath_defkeyindex != vap->iv_def_txkey) { |
| | 8528 | ni->ni_ucastkey.wk_keyix = IEEE80211_KEYIX_NONE; |
| | 8529 | if (!ieee80211_crypto_newkey(vap, |
| | 8530 | IEEE80211_CIPHER_WEP, |
| | 8531 | IEEE80211_KEY_XMIT|IEEE80211_KEY_RECV, |
| | 8532 | &ni->ni_ucastkey)) { |
| | 8533 | ni->ni_ucastkey.wk_keyix = txkeyidx; |
| | 8534 | ieee80211_crypto_delkey(vap, &ni->ni_ucastkey, ni); |
| | 8535 | goto error; |
| | 8536 | } |
| | 8537 | rxkeyidx = ni->ni_ucastkey.wk_keyix; |
| | 8538 | ni->ni_ucastkey.wk_keyix = txkeyidx; |
| | 8539 | |
| | 8540 | rcv_key = &vap->iv_nw_keys[ni->ni_ath_defkeyindex]; |
| | 8541 | } else { |
| | 8542 | rcv_key = xmit_key; |
| | 8543 | rxkeyidx = txkeyidx; |
| | 8544 | } |
| | 8545 | |
| | 8546 | /* Remember receive key offset */ |
| | 8547 | ni->ni_rxkeyoff = rxkeyidx - txkeyidx; |
| | 8548 | |
| | 8549 | /* Setup xmit key */ |
| | 8550 | ni_key = &ni->ni_ucastkey; |
| | 8551 | if (rxkeyidx != txkeyidx) |
| | 8552 | ni_key->wk_flags = IEEE80211_KEY_XMIT; |
| | 8553 | else |
| | 8554 | ni_key->wk_flags = IEEE80211_KEY_XMIT|IEEE80211_KEY_RECV; |
| | 8555 | |
| | 8556 | ni_key->wk_keylen = xmit_key->wk_keylen; |
| | 8557 | for (i = 0; i < IEEE80211_TID_SIZE; i++) |
| | 8558 | ni_key->wk_keyrsc[i] = xmit_key->wk_keyrsc[i]; |
| | 8559 | ni_key->wk_keytsc = 0; |
| | 8560 | memset(ni_key->wk_key, 0, sizeof(ni_key->wk_key)); |
| | 8561 | memcpy(ni_key->wk_key, xmit_key->wk_key, xmit_key->wk_keylen); |
| | 8562 | ieee80211_crypto_setkey(vap, &ni->ni_ucastkey, |
| | 8563 | (rxkeyidx == txkeyidx) ? ni->ni_macaddr:null_macaddr, ni); |
| | 8564 | |
| | 8565 | if (rxkeyidx != txkeyidx) { |
| | 8566 | /* Setup recv key */ |
| | 8567 | ni_key = &tmpkey; |
| | 8568 | ni_key->wk_keyix = rxkeyidx; |
| | 8569 | ni_key->wk_flags = IEEE80211_KEY_RECV; |
| | 8570 | ni_key->wk_keylen = rcv_key->wk_keylen; |
| | 8571 | for(i = 0; i < IEEE80211_TID_SIZE; i++) |
| | 8572 | ni_key->wk_keyrsc[i] = rcv_key->wk_keyrsc[i]; |
| | 8573 | ni_key->wk_keytsc = 0; |
| | 8574 | ni_key->wk_cipher = rcv_key->wk_cipher; |
| | 8575 | ni_key->wk_private = rcv_key->wk_private; |
| | 8576 | memset(ni_key->wk_key, 0, sizeof(ni_key->wk_key)); |
| | 8577 | memcpy(ni_key->wk_key, rcv_key->wk_key, rcv_key->wk_keylen); |
| | 8578 | ieee80211_crypto_setkey(vap, &tmpkey, ni->ni_macaddr, ni); |
| | 8579 | } |
| | 8580 | |
| | 8581 | return; |
| | 8582 | |
| | 8583 | error: |
| | 8584 | ni->ni_ath_flags &= ~IEEE80211_NODE_COMP; |
| | 8585 | return; |
| | 8586 | } |
| | 8587 | |
| | 8588 | /* Create a keycache entry for given node in clearcase as well as static wep. |
| | 8589 | * Handle compression state if required. |
| | 8590 | * For non clearcase/static wep case, the key is plumbed by hostapd. |
| | 8591 | */ |
| | 8592 | static void |
| | 8593 | ath_setup_keycacheslot(struct ath_softc *sc, struct ieee80211_node *ni) |
| | 8594 | { |
| | 8595 | struct ieee80211vap *vap = ni->ni_vap; |
| | 8596 | |
| | 8597 | if (ni->ni_ucastkey.wk_keyix != IEEE80211_KEYIX_NONE) |
| | 8598 | ieee80211_crypto_delkey(vap, &ni->ni_ucastkey, ni); |
| | 8599 | |
| | 8600 | /* Only for clearcase and WEP case */ |
| | 8601 | if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0 || |
| | 8602 | (ni->ni_ath_defkeyindex != IEEE80211_INVAL_DEFKEY)) { |
| | 8603 | |
| | 8604 | if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) { |
| | 8605 | KASSERT(ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE, |
| | 8606 | ("new node with a ucast key already setup (keyix %u)", |
| | 8607 | ni->ni_ucastkey.wk_keyix)); |
| | 8608 | /* NB: 5210 has no passthru/clr key support */ |
| | 8609 | if (sc->sc_hasclrkey) |
| | 8610 | ath_setup_stationkey(ni); |
| | 8611 | } else |
| | 8612 | ath_setup_stationwepkey(ni); |
| | 8613 | } |
| | 8614 | |
| | 8615 | return; |
| | 8616 | } |
| | 8617 | |
| | 8618 | /* |
| | 8619 | * Setup driver-specific state for a newly associated node. |
| | 8620 | * Note that we're called also on a re-associate, the isnew |
| | 8621 | * param tells us if this is the first time or not. |
| | 8622 | */ |
| | 8623 | static void |
| | 8624 | ath_newassoc(struct ieee80211_node *ni, int isnew) |
| | 8625 | { |
| | 8626 | struct ieee80211com *ic = ni->ni_ic; |
| | 8627 | struct ieee80211vap *vap = ni->ni_vap; |
| | 8628 | struct ath_softc *sc = ic->ic_dev->priv; |
| | 8629 | |
| | 8630 | sc->sc_rc->ops->newassoc(sc, ATH_NODE(ni), isnew); |
| | 8631 | |
| | 8632 | /* are we supporting compression? */ |
| | 8633 | if (!(vap->iv_ath_cap & ni->ni_ath_flags & IEEE80211_NODE_COMP)) |
| | 8634 | ni->ni_ath_flags &= ~IEEE80211_NODE_COMP; |
| | 8635 | |
| | 8636 | /* disable compression for TKIP */ |
| | 8637 | if ((ni->ni_ath_flags & IEEE80211_NODE_COMP) && |
| | 8638 | (ni->ni_wpa_ie != NULL) && |
| | 8639 | (ni->ni_rsn.rsn_ucastcipher == IEEE80211_CIPHER_TKIP)) |
| | 8640 | ni->ni_ath_flags &= ~IEEE80211_NODE_COMP; |
| | 8641 | |
| | 8642 | ath_setup_keycacheslot(sc, ni); |
| | 8643 | #ifdef ATH_SUPERG_XR |
| | 8644 | if (1) { |
| | 8645 | struct ath_node *an = ATH_NODE(ni); |
| | 8646 | if (ic->ic_ath_cap & an->an_node.ni_ath_flags & IEEE80211_ATHC_XR) |
| | 8647 | an->an_minffrate = ATH_MIN_FF_RATE; |
| | 8648 | else |
| | 8649 | an->an_minffrate = 0; |
| | 8650 | ath_grppoll_period_update(sc); |
| | 8651 | } |
| | 8652 | #endif |
| | 8653 | } |
| | 8654 | |
| | 8655 | static int |
| | 8656 | ath_getchannels(struct net_device *dev, u_int cc, |
| | 8657 | HAL_BOOL outdoor, HAL_BOOL xchanmode) |
| | 8658 | { |
| | 8659 | struct ath_softc *sc = dev->priv; |
| | 8660 | struct ieee80211com *ic = &sc->sc_ic; |
| | 8661 | struct ath_hal *ah = sc->sc_ah; |
| | 8662 | HAL_CHANNEL *chans; |
| | 8663 | int i; |
| | 8664 | u_int nchan; |
| | 8665 | |
| | 8666 | chans = kmalloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), GFP_KERNEL); |
| | 8667 | if (chans == NULL) { |
| | 8668 | printk("%s: unable to allocate channel table\n", dev->name); |
| | 8669 | return -ENOMEM; |
| | 8670 | } |
| | 8671 | if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, |
| | 8672 | ic->ic_regclassids, IEEE80211_REGCLASSIDS_MAX, &ic->ic_nregclass, |
| | 8673 | cc, HAL_MODE_ALL, outdoor, xchanmode)) { |
| | 8674 | u_int32_t rd; |
| | 8675 | |
| | 8676 | ath_hal_getregdomain(ah, &rd); |
| | 8677 | printk("%s: unable to collect channel list from HAL; " |
| | 8678 | "regdomain likely %u country code %u\n", |
| | 8679 | dev->name, rd, cc); |
| | 8680 | kfree(chans); |
| | 8681 | return -EINVAL; |
| | 8682 | } |
| | 8683 | /* |
| | 8684 | * Convert HAL channels to ieee80211 ones. |
| | 8685 | */ |
| | 8686 | for (i = 0; i < nchan; i++) { |
| | 8687 | HAL_CHANNEL *c = &chans[i]; |
| | 8688 | struct ieee80211_channel *ichan = &ic->ic_channels[i]; |
| | 8689 | |
| | 8690 | ichan->ic_ieee = ath_hal_mhz2ieee(ah, c->channel, c->channelFlags); |
| | 8691 | ichan->ic_freq = c->channel; |
| | 8692 | ichan->ic_flags = c->channelFlags; |
| | 8693 | ichan->ic_maxregpower = c->maxRegTxPower; /* dBm */ |
| | 8694 | ichan->ic_maxpower = c->maxTxPower; /* 1/4 dBm */ |
| | 8695 | ichan->ic_minpower = c->minTxPower; /* 1/4 dBm */ |
| | 8696 | } |
| | 8697 | ic->ic_nchans = nchan; |
| | 8698 | kfree(chans); |
| | 8699 | return 0; |
| | 8700 | } |
| | 8701 | |
| | 8702 | static void |
| | 8703 | ath_led_done(unsigned long arg) |
| | 8704 | { |
| | 8705 | struct ath_softc *sc = (struct ath_softc *) arg; |
| | 8706 | |
| | 8707 | sc->sc_blinking = 0; |
| | 8708 | } |
| | 8709 | |
| | 8710 | /* |
| | 8711 | * Turn the LED off: flip the pin and then set a timer so no |
| | 8712 | * update will happen for the specified duration. |
| | 8713 | */ |
| | 8714 | static void |
| | 8715 | ath_led_off(unsigned long arg) |
| | 8716 | { |
| | 8717 | struct ath_softc *sc = (struct ath_softc *) arg; |
| | 8718 | |
| | 8719 | ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); |
| | 8720 | sc->sc_ledtimer.function = ath_led_done; |
| | 8721 | sc->sc_ledtimer.expires = jiffies + sc->sc_ledoff; |
| | 8722 | add_timer(&sc->sc_ledtimer); |
| | 8723 | } |
| | 8724 | |
| | 8725 | /* |
| | 8726 | * Blink the LED according to the specified on/off times. |
| | 8727 | */ |
| | 8728 | static void |
| | 8729 | ath_led_blink(struct ath_softc *sc, int on, int off) |
| | 8730 | { |
| | 8731 | DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off); |
| | 8732 | ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon); |
| | 8733 | sc->sc_blinking = 1; |
| | 8734 | sc->sc_ledoff = off; |
| | 8735 | sc->sc_ledtimer.function = ath_led_off; |
| | 8736 | sc->sc_ledtimer.expires = jiffies + on; |
| | 8737 | add_timer(&sc->sc_ledtimer); |
| | 8738 | } |
| | 8739 | |
| | 8740 | static void |
| | 8741 | ath_led_event(struct ath_softc *sc, int event) |
| | 8742 | { |
| | 8743 | |
| | 8744 | sc->sc_ledevent = jiffies; /* time of last event */ |
| | 8745 | if (sc->sc_blinking) /* don't interrupt active blink */ |
| | 8746 | return; |
| | 8747 | switch (event) { |
| | 8748 | case ATH_LED_POLL: |
| | 8749 | ath_led_blink(sc, sc->sc_hwmap[0].ledon, |
| | 8750 | sc->sc_hwmap[0].ledoff); |
| | 8751 | break; |
| | 8752 | case ATH_LED_TX: |
| | 8753 | ath_led_blink(sc, sc->sc_hwmap[sc->sc_txrate].ledon, |
| | 8754 | sc->sc_hwmap[sc->sc_txrate].ledoff); |
| | 8755 | break; |
| | 8756 | case ATH_LED_RX: |
| | 8757 | ath_led_blink(sc, sc->sc_hwmap[sc->sc_rxrate].ledon, |
| | 8758 | sc->sc_hwmap[sc->sc_rxrate].ledoff); |
| | 8759 | break; |
| | 8760 | } |
| | 8761 | } |
| | 8762 | |
| | 8763 | static void |
| | 8764 | set_node_txpower(void *arg, struct ieee80211_node *ni) |
| | 8765 | { |
| | 8766 | int *value = (int *)arg; |
| | 8767 | ni->ni_txpower = *value; |
| | 8768 | } |
| | 8769 | |
| | 8770 | /* XXX: this function needs some locking to avoid being called twice/interrupted */ |
| | 8771 | static void |
| | 8772 | ath_update_txpow(struct ath_softc *sc) |
| | 8773 | { |
| | 8774 | struct ieee80211com *ic = &sc->sc_ic; |
| | 8775 | struct ieee80211vap *vap = NULL; |
| | 8776 | struct ath_hal *ah = sc->sc_ah; |
| | 8777 | u_int32_t txpowlimit = 0; |
| | 8778 | u_int32_t maxtxpowlimit = 9999; |
| | 8779 | u_int32_t clamped_txpow = 0; |
| | 8780 | |
| | 8781 | /* |
| | 8782 | * Find the maxtxpow of the card and regulatory constraints |
| | 8783 | */ |
| | 8784 | (void)ath_hal_getmaxtxpow(ah, &txpowlimit); |
| | 8785 | ath_hal_settxpowlimit(ah, maxtxpowlimit); |
| | 8786 | (void)ath_hal_getmaxtxpow(ah, &maxtxpowlimit); |
| | 8787 | ic->ic_txpowlimit = maxtxpowlimit; |
| | 8788 | ath_hal_settxpowlimit(ah, txpowlimit); |
| | 8789 | |
| | 8790 | /* |
| | 8791 | * Make sure the VAP's change is within limits, clamp it otherwise |
| | 8792 | */ |
| | 8793 | if (ic->ic_newtxpowlimit > ic->ic_txpowlimit) |
| | 8794 | clamped_txpow = ic->ic_txpowlimit; |
| | 8795 | else |
| | 8796 | clamped_txpow = ic->ic_newtxpowlimit; |
| | 8797 | |
| | 8798 | /* |
| | 8799 | * Search for the VAP that needs a txpow change, if any |
| | 8800 | */ |
| | 8801 | TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { |
| | 8802 | #ifdef ATH_CAP_TPC |
| | 8803 | if (ic->ic_newtxpowlimit == vap->iv_bss->ni_txpower) { |
| | 8804 | vap->iv_bss->ni_txpower = clamped_txpow; |
| | 8805 | ieee80211_iterate_nodes(&vap->iv_ic->ic_sta, set_node_txpower, &clamped_txpow); |
| | 8806 | } |
| | 8807 | #else |
| | 8808 | vap->iv_bss->ni_txpower = clamped_txpow; |
| | 8809 | ieee80211_iterate_nodes(&vap->iv_ic->ic_sta, set_node_txpower, &clamped_txpow); |
| | 8810 | #endif |
| | 8811 | } |
| | 8812 | |
| | 8813 | ic->ic_newtxpowlimit = sc->sc_curtxpow = clamped_txpow; |
| | 8814 | |
| | 8815 | #ifdef ATH_CAP_TPC |
| | 8816 | if (ic->ic_newtxpowlimit >= ic->ic_txpowlimit) |
| | 8817 | ath_hal_settxpowlimit(ah, ic->ic_newtxpowlimit); |
| | 8818 | #else |
| | 8819 | if (ic->ic_newtxpowlimit != ic->ic_txpowlimit) |
| | 8820 | ath_hal_settxpowlimit(ah, ic->ic_newtxpowlimit); |
| | 8821 | #endif |
| | 8822 | } |
| | 8823 | |
| | 8824 | |
| | 8825 | #ifdef ATH_SUPERG_XR |
| | 8826 | static int |
| | 8827 | ath_xr_rate_setup(struct net_device *dev) |
| | 8828 | { |
| | 8829 | struct ath_softc *sc = dev->priv; |
| | 8830 | struct ath_hal *ah = sc->sc_ah; |
| | 8831 | struct ieee80211com *ic = &sc->sc_ic; |
| | 8832 | const HAL_RATE_TABLE *rt; |
| | 8833 | struct ieee80211_rateset *rs; |
| | 8834 | int i, maxrates; |
| | 8835 | sc->sc_xr_rates = ath_hal_getratetable(ah, HAL_MODE_XR); |
| | 8836 | rt = sc->sc_xr_rates; |
| | 8837 | if (rt == NULL) |
| | 8838 | return 0; |
| | 8839 | if (rt->rateCount > XR_NUM_SUP_RATES) { |
| | 8840 | DPRINTF(sc, ATH_DEBUG_ANY, |
| | 8841 | "%s: rate table too small (%u > %u)\n", |
| | 8842 | __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE); |
| | 8843 | maxrates = IEEE80211_RATE_MAXSIZE; |
| | 8844 | } else |
| | 8845 | maxrates = rt->rateCount; |
| | 8846 | rs = &ic->ic_sup_xr_rates; |
| | 8847 | for (i = 0; i < maxrates; i++) |
| | 8848 | rs->rs_rates[i] = rt->info[i].dot11Rate; |
| | 8849 | rs->rs_nrates = maxrates; |
| | 8850 | return 1; |
| | 8851 | } |
| | 8852 | #endif |
| | 8853 | |
| | 8854 | /* Setup half/quarter rate table support */ |
| | 8855 | static void |
| | 8856 | ath_setup_subrates(struct net_device *dev) |
| | 8857 | { |
| | 8858 | struct ath_softc *sc = dev->priv; |
| | 8859 | struct ath_hal *ah = sc->sc_ah; |
| | 8860 | struct ieee80211com *ic = &sc->sc_ic; |
| | 8861 | const HAL_RATE_TABLE *rt; |
| | 8862 | struct ieee80211_rateset *rs; |
| | 8863 | int i, maxrates; |
| | 8864 | |
| | 8865 | sc->sc_half_rates = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); |
| | 8866 | rt = sc->sc_half_rates; |
| | 8867 | if (rt != NULL) { |
| | 8868 | if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { |
| | 8869 | DPRINTF(sc, ATH_DEBUG_ANY, |
| | 8870 | "%s: rate table too small (%u > %u)\n", |
| | 8871 | __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE); |
| | 8872 | maxrates = IEEE80211_RATE_MAXSIZE; |
| | 8873 | } else |
| | 8874 | maxrates = rt->rateCount; |
| | 8875 | rs = &ic->ic_sup_half_rates; |
| | 8876 | for (i = 0; i < maxrates; i++) |
| | 8877 | rs->rs_rates[i] = rt->info[i].dot11Rate; |
| | 8878 | rs->rs_nrates = maxrates; |
| | 8879 | } |
| | 8880 | |
| | 8881 | sc->sc_quarter_rates = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); |
| | 8882 | rt = sc->sc_quarter_rates; |
| | 8883 | if (rt != NULL) { |
| | 8884 | if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { |
| | 8885 | DPRINTF(sc, ATH_DEBUG_ANY, |
| | 8886 | "%s: rate table too small (%u > %u)\n", |
| | 8887 | __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE); |
| | 8888 | maxrates = IEEE80211_RATE_MAXSIZE; |
| | 8889 | } else |
| | 8890 | maxrates = rt->rateCount; |
| | 8891 | rs = &ic->ic_sup_quarter_rates; |
| | 8892 | for (i = 0; i < maxrates; i++) |
| | 8893 | rs->rs_rates[i] = rt->info[i].dot11Rate; |
| | 8894 | rs->rs_nrates = maxrates; |
| | 8895 | } |
| | 8896 | } |
| | 8897 | |
| | 8898 | static int |
| | 8899 | ath_rate_setup(struct net_device *dev, u_int mode) |
| | 8900 | { |
| | 8901 | struct ath_softc *sc = dev->priv; |
| | 8902 | struct ath_hal *ah = sc->sc_ah; |
| | 8903 | struct ieee80211com *ic = &sc->sc_ic; |
| | 8904 | const HAL_RATE_TABLE *rt; |
| | 8905 | struct ieee80211_rateset *rs; |
| | 8906 | int i, maxrates; |
| | 8907 | |
| | 8908 | switch (mode) { |
| | 8909 | case IEEE80211_MODE_11A: |
| | 8910 | sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11A); |
| | 8911 | break; |
| | 8912 | case IEEE80211_MODE_11B: |
| | 8913 | sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11B); |
| | 8914 | break; |
| | 8915 | case IEEE80211_MODE_11G: |
| | 8916 | sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11G); |
| | 8917 | break; |
| | 8918 | case IEEE80211_MODE_TURBO_A: |
| | 8919 | sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_TURBO); |
| | 8920 | break; |
| | 8921 | case IEEE80211_MODE_TURBO_G: |
| | 8922 | sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_108G); |
| | 8923 | break; |
| | 8924 | default: |
| | 8925 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", |
| | 8926 | __func__, mode); |
| | 8927 | return 0; |
| | 8928 | } |
| | 8929 | rt = sc->sc_rates[mode]; |
| | 8930 | if (rt == NULL) |
| | 8931 | return 0; |
| | 8932 | if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { |
| | 8933 | DPRINTF(sc, ATH_DEBUG_ANY, |
| | 8934 | "%s: rate table too small (%u > %u)\n", |
| | 8935 | __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE); |
| | 8936 | maxrates = IEEE80211_RATE_MAXSIZE; |
| | 8937 | } else |
| | 8938 | maxrates = rt->rateCount; |
| | 8939 | rs = &ic->ic_sup_rates[mode]; |
| | 8940 | for (i = 0; i < maxrates; i++) |
| | 8941 | rs->rs_rates[i] = rt->info[i].dot11Rate; |
| | 8942 | rs->rs_nrates = maxrates; |
| | 8943 | return 1; |
| | 8944 | } |
| | 8945 | |
| | 8946 | static void |
| | 8947 | ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) |
| | 8948 | { |
| | 8949 | #define N(a) ((int)(sizeof(a)/sizeof(a[0]))) |
| | 8950 | /* NB: on/off times from the Atheros NDIS driver, w/ permission */ |
| | 8951 | static const struct { |
| | 8952 | u_int rate; /* tx/rx 802.11 rate */ |
| | 8953 | u_int16_t timeOn; /* LED on time (ms) */ |
| | 8954 | u_int16_t timeOff; /* LED off time (ms) */ |
| | 8955 | } blinkrates[] = { |
| | 8956 | { 108, 40, 10 }, |
| | 8957 | { 96, 44, 11 }, |
| | 8958 | { 72, 50, 13 }, |
| | 8959 | { 48, 57, 14 }, |
| | 8960 | { 36, 67, 16 }, |
| | 8961 | { 24, 80, 20 }, |
| | 8962 | { 22, 100, 25 }, |
| | 8963 | { 18, 133, 34 }, |
| | 8964 | { 12, 160, 40 }, |
| | 8965 | { 10, 200, 50 }, |
| | 8966 | { 6, 240, 58 }, |
| | 8967 | { 4, 267, 66 }, |
| | 8968 | { 2, 400, 100 }, |
| | 8969 | { 0, 500, 130 }, |
| | 8970 | }; |
| | 8971 | const HAL_RATE_TABLE *rt; |
| | 8972 | int i, j; |
| | 8973 | |
| | 8974 | memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); |
| | 8975 | rt = sc->sc_rates[mode]; |
| | 8976 | KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); |
| | 8977 | for (i = 0; i < rt->rateCount; i++) |
| | 8978 | sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; |
| | 8979 | memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); |
| | 8980 | for (i = 0; i < 32; i++) { |
| | 8981 | u_int8_t ix = rt->rateCodeToIndex[i]; |
| | 8982 | if (ix == 0xff) { |
| | 8983 | sc->sc_hwmap[i].ledon = msecs_to_jiffies(500); |
| | 8984 | sc->sc_hwmap[i].ledoff = msecs_to_jiffies(130); |
| | 8985 | continue; |
| | 8986 | } |
| | 8987 | sc->sc_hwmap[i].ieeerate = |
| | 8988 | rt->info[ix].dot11Rate & IEEE80211_RATE_VAL; |
| | 8989 | if (rt->info[ix].shortPreamble || |
| | 8990 | rt->info[ix].phy == IEEE80211_T_OFDM) |
| | 8991 | sc->sc_hwmap[i].flags |= IEEE80211_RADIOTAP_F_SHORTPRE; |
| | 8992 | /* setup blink rate table to avoid per-packet lookup */ |
| | 8993 | for (j = 0; j < N(blinkrates) - 1; j++) |
| | 8994 | if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) |
| | 8995 | break; |
| | 8996 | /* NB: this uses the last entry if the rate isn't found */ |
| | 8997 | /* XXX beware of overflow */ |
| | 8998 | sc->sc_hwmap[i].ledon = msecs_to_jiffies(blinkrates[j].timeOn); |
| | 8999 | sc->sc_hwmap[i].ledoff = msecs_to_jiffies(blinkrates[j].timeOff); |
| | 9000 | } |
| | 9001 | sc->sc_currates = rt; |
| | 9002 | sc->sc_curmode = mode; |
| | 9003 | /* |
| | 9004 | * All protection frames are transmitted at 2Mb/s for |
| | 9005 | * 11g, otherwise at 1Mb/s. |
| | 9006 | * XXX select protection rate index from rate table. |
| | 9007 | */ |
| | 9008 | sc->sc_protrix = (mode == IEEE80211_MODE_11G ? 1 : 0); |
| | 9009 | /* rate index used to send mgt frames */ |
| | 9010 | sc->sc_minrateix = 0; |
| | 9011 | #undef N |
| | 9012 | } |
| | 9013 | |
| | 9014 | #ifdef ATH_SUPERG_FF |
| | 9015 | static u_int32_t |
| | 9016 | athff_approx_txtime(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb) |
| | 9017 | { |
| | 9018 | u_int32_t txtime; |
| | 9019 | u_int32_t framelen; |
| | 9020 | |
| | 9021 | /* |
| | 9022 | * Approximate the frame length to be transmitted. A swag to add |
| | 9023 | * the following maximal values to the skb payload: |
| | 9024 | * - 32: 802.11 encap + CRC |
| | 9025 | * - 24: encryption overhead (if wep bit) |
| | 9026 | * - 4 + 6: fast-frame header and padding |
| | 9027 | * - 16: 2 LLC FF tunnel headers |
| | 9028 | * - 14: 1 802.3 FF tunnel header (skb already accounts for 2nd) |
| | 9029 | */ |
| | 9030 | framelen = skb->len + 32 + 4 + 6 + 16 + 14; |
| | 9031 | if (sc->sc_ic.ic_flags & IEEE80211_F_PRIVACY) |
| | 9032 | framelen += 24; |
| | 9033 | if (an->an_tx_ffbuf[skb->priority]) |
| | 9034 | framelen += an->an_tx_ffbuf[skb->priority]->bf_skb->len; |
| | 9035 | |
| | 9036 | txtime = ath_hal_computetxtime(sc->sc_ah, sc->sc_currates, framelen, |
| | 9037 | an->an_prevdatarix, AH_FALSE); |
| | 9038 | |
| | 9039 | return txtime; |
| | 9040 | } |
| | 9041 | /* |
| | 9042 | * Determine if a data frame may be aggregated via ff tunneling. |
| | 9043 | * |
| | 9044 | * NB: allowing EAPOL frames to be aggregated with other unicast traffic. |
| | 9045 | * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't |
| | 9046 | * be aggregated with other types of frames when encryption is on? |
| | 9047 | * |
| | 9048 | * NB: assumes lock on an_tx_ffbuf effectively held by txq lock mechanism. |
| | 9049 | */ |
| | 9050 | static int |
| | 9051 | athff_can_aggregate(struct ath_softc *sc, struct ether_header *eh, |
| | 9052 | struct ath_node *an, struct sk_buff *skb, u_int16_t fragthreshold, int *flushq) |
| | 9053 | { |
| | 9054 | struct ieee80211com *ic = &sc->sc_ic; |
| | 9055 | struct ath_txq *txq = sc->sc_ac2q[skb->priority]; |
| | 9056 | struct ath_buf *ffbuf = an->an_tx_ffbuf[skb->priority]; |
| | 9057 | u_int32_t txoplimit; |
| | 9058 | |
| | 9059 | #define US_PER_4MS 4000 |
| | 9060 | #define MIN(a,b) ((a) < (b) ? (a) : (b)) |
| | 9061 | |
| | 9062 | *flushq = AH_FALSE; |
| | 9063 | |
| | 9064 | if (fragthreshold < 2346) |
| | 9065 | return AH_FALSE; |
| | 9066 | |
| | 9067 | if ((!ffbuf) && (txq->axq_depth < sc->sc_fftxqmin)) |
| | 9068 | return AH_FALSE; |
| | 9069 | if (!(ic->ic_ath_cap & an->an_node.ni_ath_flags & IEEE80211_ATHC_FF)) |
| | 9070 | return AH_FALSE; |
| | 9071 | if (!(ic->ic_opmode == IEEE80211_M_STA || |
| | 9072 | ic->ic_opmode == IEEE80211_M_HOSTAP)) |
| | 9073 | return AH_FALSE; |
| | 9074 | if ((ic->ic_opmode == IEEE80211_M_HOSTAP) && |
| | 9075 | ETHER_IS_MULTICAST(eh->ether_dhost)) |
| | 9076 | return AH_FALSE; |
| | 9077 | |
| | 9078 | #ifdef ATH_SUPERG_XR |
| | 9079 | if (sc->sc_currates->info[an->an_prevdatarix].rateKbps < an->an_minffrate) |
| | 9080 | return AH_FALSE; |
| | 9081 | #endif |
| | 9082 | txoplimit = IEEE80211_TXOP_TO_US( |
| | 9083 | ic->ic_wme.wme_chanParams.cap_wmeParams[skb->priority].wmep_txopLimit); |
| | 9084 | |
| | 9085 | /* if the 4 msec limit is set on the channel, take it into account */ |
| | 9086 | if (sc->sc_curchan.privFlags & CHANNEL_4MS_LIMIT) |
| | 9087 | txoplimit = MIN(txoplimit, US_PER_4MS); |
| | 9088 | |
| | 9089 | if (txoplimit != 0 && athff_approx_txtime(sc, an, skb) > txoplimit) { |
| | 9090 | DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF, |
| | 9091 | "%s: FF TxOp violation\n", __func__); |
| | 9092 | if (ffbuf) |
| | 9093 | *flushq = AH_TRUE; |
| | 9094 | return AH_FALSE; |
| | 9095 | } |
| | 9096 | |
| | 9097 | return AH_TRUE; |
| | 9098 | |
| | 9099 | #undef US_PER_4MS |
| | 9100 | #undef MIN |
| | 9101 | } |
| | 9102 | #endif |
| | 9103 | |
| | 9104 | #ifdef AR_DEBUG |
| | 9105 | static void |
| | 9106 | ath_printrxbuf(struct ath_buf *bf, int done) |
| | 9107 | { |
| | 9108 | struct ath_desc *ds = bf->bf_desc; |
| | 9109 | |
| | 9110 | printk("R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n", |
| | 9111 | ds, ito64(bf->bf_daddr), |
| | 9112 | ds->ds_link, ds->ds_data, |
| | 9113 | ds->ds_ctl0, ds->ds_ctl1, |
| | 9114 | ds->ds_hw[0], ds->ds_hw[1], |
| | 9115 | !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); |
| | 9116 | } |
| | 9117 | |
| | 9118 | static void |
| | 9119 | ath_printtxbuf(struct ath_buf *bf, int done) |
| | 9120 | { |
| | 9121 | struct ath_desc *ds = bf->bf_desc; |
| | 9122 | |
| | 9123 | printk("T (%p %llx) %08x %08x %08x %08x %08x %08x %08x %08x %c\n", |
| | 9124 | ds, ito64(bf->bf_daddr), |
| | 9125 | ds->ds_link, ds->ds_data, |
| | 9126 | ds->ds_ctl0, ds->ds_ctl1, |
| | 9127 | ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], |
| | 9128 | !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); |
| | 9129 | } |
| | 9130 | #endif /* AR_DEBUG */ |
| | 9131 | |
| | 9132 | /* |
| | 9133 | * Return netdevice statistics. |
| | 9134 | */ |
| | 9135 | static struct net_device_stats * |
| | 9136 | ath_getstats(struct net_device *dev) |
| | 9137 | { |
| | 9138 | struct ath_softc *sc = dev->priv; |
| | 9139 | struct net_device_stats *stats = &sc->sc_devstats; |
| | 9140 | |
| | 9141 | /* update according to private statistics */ |
| | 9142 | stats->tx_errors = sc->sc_stats.ast_tx_xretries |
| | 9143 | + sc->sc_stats.ast_tx_fifoerr |
| | 9144 | + sc->sc_stats.ast_tx_filtered; |
| | 9145 | stats->tx_dropped = sc->sc_stats.ast_tx_nobuf |
| | 9146 | + sc->sc_stats.ast_tx_encap |
| | 9147 | + sc->sc_stats.ast_tx_nonode |
| | 9148 | + sc->sc_stats.ast_tx_nobufmgt; |
| | 9149 | stats->rx_errors = sc->sc_stats.ast_rx_fifoerr |
| | 9150 | + sc->sc_stats.ast_rx_badcrypt |
| | 9151 | + sc->sc_stats.ast_rx_badmic; |
| | 9152 | stats->rx_dropped = sc->sc_stats.ast_rx_tooshort; |
| | 9153 | stats->rx_crc_errors = sc->sc_stats.ast_rx_crcerr; |
| | 9154 | |
| | 9155 | return stats; |
| | 9156 | } |
| | 9157 | |
| | 9158 | static int |
| | 9159 | ath_set_mac_address(struct net_device *dev, void *addr) |
| | 9160 | { |
| | 9161 | struct ath_softc *sc = dev->priv; |
| | 9162 | struct ieee80211com *ic = &sc->sc_ic; |
| | 9163 | struct ath_hal *ah = sc->sc_ah; |
| | 9164 | struct sockaddr *mac = addr; |
| | 9165 | int error = 0; |
| | 9166 | |
| | 9167 | if (netif_running(dev)) { |
| | 9168 | DPRINTF(sc, ATH_DEBUG_ANY, |
| | 9169 | "%s: cannot set address; device running\n", __func__); |
| | 9170 | return -EBUSY; |
| | 9171 | } |
| | 9172 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", |
| | 9173 | __func__, |
| | 9174 | mac->sa_data[0], mac->sa_data[1], mac->sa_data[2], |
| | 9175 | mac->sa_data[3], mac->sa_data[4], mac->sa_data[5]); |
| | 9176 | |
| | 9177 | ATH_LOCK(sc); |
| | 9178 | /* XXX not right for multiple VAPs */ |
| | 9179 | IEEE80211_ADDR_COPY(ic->ic_myaddr, mac->sa_data); |
| | 9180 | IEEE80211_ADDR_COPY(dev->dev_addr, mac->sa_data); |
| | 9181 | ath_hal_setmac(ah, dev->dev_addr); |
| | 9182 | if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) { |
| | 9183 | error = ath_reset(dev); |
| | 9184 | } |
| | 9185 | ATH_UNLOCK(sc); |
| | 9186 | |
| | 9187 | return error; |
| | 9188 | } |
| | 9189 | |
| | 9190 | static int |
| | 9191 | ath_change_mtu(struct net_device *dev, int mtu) |
| | 9192 | { |
| | 9193 | struct ath_softc *sc = dev->priv; |
| | 9194 | int error = 0; |
| | 9195 | |
| | 9196 | if (!(ATH_MIN_MTU < mtu && mtu <= ATH_MAX_MTU)) { |
| | 9197 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %d, min %u, max %u\n", |
| | 9198 | __func__, mtu, ATH_MIN_MTU, ATH_MAX_MTU); |
| | 9199 | return -EINVAL; |
| | 9200 | } |
| | 9201 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: %d\n", __func__, mtu); |
| | 9202 | |
| | 9203 | ATH_LOCK(sc); |
| | 9204 | dev->mtu = mtu; |
| | 9205 | if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) { |
| | 9206 | /* NB: the rx buffers may need to be reallocated */ |
| | 9207 | tasklet_disable(&sc->sc_rxtq); |
| | 9208 | error = ath_reset(dev); |
| | 9209 | tasklet_enable(&sc->sc_rxtq); |
| | 9210 | } |
| | 9211 | ATH_UNLOCK(sc); |
| | 9212 | |
| | 9213 | return error; |
| | 9214 | } |
| | 9215 | |
| | 9216 | /* |
| | 9217 | * Diagnostic interface to the HAL. This is used by various |
| | 9218 | * tools to do things like retrieve register contents for |
| | 9219 | * debugging. The mechanism is intentionally opaque so that |
| | 9220 | * it can change frequently w/o concern for compatibility. |
| | 9221 | */ |
| | 9222 | static int |
| | 9223 | ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) |
| | 9224 | { |
| | 9225 | struct ath_hal *ah = sc->sc_ah; |
| | 9226 | u_int id = ad->ad_id & ATH_DIAG_ID; |
| | 9227 | void *indata = NULL; |
| | 9228 | void *outdata = NULL; |
| | 9229 | u_int32_t insize = ad->ad_in_size; |
| | 9230 | u_int32_t outsize = ad->ad_out_size; |
| | 9231 | int error = 0; |
| | 9232 | |
| | 9233 | if (ad->ad_id & ATH_DIAG_IN) { |
| | 9234 | /* |
| | 9235 | * Copy in data. |
| | 9236 | */ |
| | 9237 | indata = kmalloc(insize, GFP_KERNEL); |
| | 9238 | if (indata == NULL) { |
| | 9239 | error = -ENOMEM; |
| | 9240 | goto bad; |
| | 9241 | } |
| | 9242 | if (copy_from_user(indata, ad->ad_in_data, insize)) { |
| | 9243 | error = -EFAULT; |
| | 9244 | goto bad; |
| | 9245 | } |
| | 9246 | } |
| | 9247 | if (ad->ad_id & ATH_DIAG_DYN) { |
| | 9248 | /* |
| | 9249 | * Allocate a buffer for the results (otherwise the HAL |
| | 9250 | * returns a pointer to a buffer where we can read the |
| | 9251 | * results). Note that we depend on the HAL leaving this |
| | 9252 | * pointer for us to use below in reclaiming the buffer; |
| | 9253 | * may want to be more defensive. |
| | 9254 | */ |
| | 9255 | outdata = kmalloc(outsize, GFP_KERNEL); |
| | 9256 | if (outdata == NULL) { |
| | 9257 | error = -ENOMEM; |
| | 9258 | goto bad; |
| | 9259 | } |
| | 9260 | } |
| | 9261 | if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { |
| | 9262 | if (outsize < ad->ad_out_size) |
| | 9263 | ad->ad_out_size = outsize; |
| | 9264 | if (outdata && |
| | 9265 | copy_to_user(ad->ad_out_data, outdata, ad->ad_out_size)) |
| | 9266 | error = -EFAULT; |
| | 9267 | } else |
| | 9268 | error = -EINVAL; |
| | 9269 | bad: |
| | 9270 | if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) |
| | 9271 | kfree(indata); |
| | 9272 | if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) |
| | 9273 | kfree(outdata); |
| | 9274 | return error; |
| | 9275 | } |
| | 9276 | |
| | 9277 | static int |
| | 9278 | ath_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
| | 9279 | { |
| | 9280 | struct ath_softc *sc = dev->priv; |
| | 9281 | struct ieee80211com *ic = &sc->sc_ic; |
| | 9282 | int error; |
| | 9283 | |
| | 9284 | ATH_LOCK(sc); |
| | 9285 | switch (cmd) { |
| | 9286 | case SIOCGATHSTATS: |
| | 9287 | sc->sc_stats.ast_tx_packets = sc->sc_devstats.tx_packets; |
| | 9288 | sc->sc_stats.ast_rx_packets = sc->sc_devstats.rx_packets; |
| | 9289 | sc->sc_stats.ast_rx_rssi = ieee80211_getrssi(ic); |
| | 9290 | if (copy_to_user(ifr->ifr_data, &sc->sc_stats, sizeof (sc->sc_stats))) |
| | 9291 | error = -EFAULT; |
| | 9292 | else |
| | 9293 | error = 0; |
| | 9294 | break; |
| | 9295 | case SIOCGATHDIAG: |
| | 9296 | if (!capable(CAP_NET_ADMIN)) |
| | 9297 | error = -EPERM; |
| | 9298 | else |
| | 9299 | error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); |
| | 9300 | break; |
| | 9301 | case SIOCETHTOOL: |
| | 9302 | if (copy_from_user(&cmd, ifr->ifr_data, sizeof(cmd))) |
| | 9303 | error = -EFAULT; |
| | 9304 | else |
| | 9305 | error = ath_ioctl_ethtool(sc, cmd, ifr->ifr_data); |
| | 9306 | break; |
| | 9307 | case SIOC80211IFCREATE: |
| | 9308 | error = ieee80211_ioctl_create_vap(ic, ifr, dev); |
| | 9309 | break; |
| | 9310 | default: |
| | 9311 | error = -EINVAL; |
| | 9312 | break; |
| | 9313 | } |
| | 9314 | ATH_UNLOCK(sc); |
| | 9315 | return error; |
| | 9316 | } |
| | 9317 | |
| | 9318 | /* |
| | 9319 | * Sysctls are split into ``static'' and ``dynamic'' tables. |
| | 9320 | * The former are defined at module load time and are used |
| | 9321 | * control parameters common to all devices. The latter are |
| | 9322 | * tied to particular device instances and come and go with |
| | 9323 | * each device. The split is currently a bit tenuous; many of |
| | 9324 | * the static ones probably should be dynamic but having them |
| | 9325 | * static (e.g. debug) means they can be set after a module is |
| | 9326 | * loaded and before bringing up a device. The alternative |
| | 9327 | * is to add module parameters. |
| | 9328 | */ |
| | 9329 | |
| | 9330 | /* |
| | 9331 | * Dynamic (i.e. per-device) sysctls. These are automatically |
| | 9332 | * mirrored in /proc/sys. |
| | 9333 | */ |
| | 9334 | enum { |
| | 9335 | ATH_SLOTTIME = 1, |
| | 9336 | ATH_ACKTIMEOUT = 2, |
| | 9337 | ATH_CTSTIMEOUT = 3, |
| | 9338 | ATH_SOFTLED = 4, |
| | 9339 | ATH_LEDPIN = 5, |
| | 9340 | ATH_COUNTRYCODE = 6, |
| | 9341 | ATH_REGDOMAIN = 7, |
| | 9342 | ATH_DEBUG = 8, |
| | 9343 | ATH_TXANTENNA = 9, |
| | 9344 | ATH_RXANTENNA = 10, |
| | 9345 | ATH_DIVERSITY = 11, |
| | 9346 | ATH_TXINTRPERIOD = 12, |
| | 9347 | ATH_FFTXQMIN = 18, |
| | 9348 | ATH_TKIPMIC = 19, |
| | 9349 | ATH_XR_POLL_PERIOD = 20, |
| | 9350 | ATH_XR_POLL_COUNT = 21, |
| | 9351 | ATH_ACKRATE = 22, |
| | 9352 | }; |
| | 9353 | |
| | 9354 | static int |
| | 9355 | ATH_SYSCTL_DECL(ath_sysctl_halparam, ctl, write, filp, buffer, lenp, ppos) |
| | 9356 | { |
| | 9357 | struct ath_softc *sc = ctl->extra1; |
| | 9358 | struct ath_hal *ah = sc->sc_ah; |
| | 9359 | u_int val; |
| | 9360 | int ret; |
| | 9361 | |
| | 9362 | ctl->data = &val; |
| | 9363 | ctl->maxlen = sizeof(val); |
| | 9364 | if (write) { |
| | 9365 | ret = ATH_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos); |
| | 9366 | if (ret == 0) { |
| | 9367 | switch (ctl->ctl_name) { |
| | 9368 | case ATH_SLOTTIME: |
| | 9369 | if (val > 0) { |
| | 9370 | if (!ath_hal_setslottime(ah, val)) |
| | 9371 | ret = -EINVAL; |
| | 9372 | else |
| | 9373 | sc->sc_slottimeconf = val; |
| | 9374 | } else { |
| | 9375 | /* disable manual override */ |
| | 9376 | sc->sc_slottimeconf = 0; |
| | 9377 | ath_setslottime(sc); |
| | 9378 | } |
| | 9379 | break; |
| | 9380 | case ATH_ACKTIMEOUT: |
| | 9381 | if (!ath_hal_setacktimeout(ah, val)) |
| | 9382 | ret = -EINVAL; |
| | 9383 | break; |
| | 9384 | case ATH_CTSTIMEOUT: |
| | 9385 | if (!ath_hal_setctstimeout(ah, val)) |
| | 9386 | ret = -EINVAL; |
| | 9387 | break; |
| | 9388 | case ATH_SOFTLED: |
| | 9389 | if (val != sc->sc_softled) { |
| | 9390 | if (val) |
| | 9391 | ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); |
| | 9392 | ath_hal_gpioset(ah, sc->sc_ledpin,!sc->sc_ledon); |
| | 9393 | sc->sc_softled = val; |
| | 9394 | } |
| | 9395 | break; |
| | 9396 | case ATH_LEDPIN: |
| | 9397 | /* XXX validate? */ |
| | 9398 | sc->sc_ledpin = val; |
| | 9399 | break; |
| | 9400 | case ATH_DEBUG: |
| | 9401 | sc->sc_debug = val; |
| | 9402 | break; |
| | 9403 | case ATH_TXANTENNA: |
| | 9404 | /* |
| | 9405 | * antenna can be: |
| | 9406 | * 0 = transmit diversity |
| | 9407 | * 1 = antenna port 1 |
| | 9408 | * 2 = antenna port 2 |
| | 9409 | */ |
| | 9410 | if (val < 0 || val > 2) |
| | 9411 | return -EINVAL; |
| | 9412 | else |
| | 9413 | sc->sc_txantenna = val; |
| | 9414 | break; |
| | 9415 | case ATH_RXANTENNA: |
| | 9416 | /* |
| | 9417 | * antenna can be: |
| | 9418 | * 0 = receive diversity |
| | 9419 | * 1 = antenna port 1 |
| | 9420 | * 2 = antenna port 2 |
| | 9421 | */ |
| | 9422 | if (val < 0 || val > 2) |
| | 9423 | return -EINVAL; |
| | 9424 | else |
| | 9425 | ath_setdefantenna(sc, val); |
| | 9426 | break; |
| | 9427 | case ATH_DIVERSITY: |
| | 9428 | /* |
| | 9429 | * 0 = disallow use of diversity |
| | 9430 | * 1 = allow use of diversity |
| | 9431 | */ |
| | 9432 | if (val < 0 || val > 1) |
| | 9433 | return -EINVAL; |
| | 9434 | /* Don't enable diversity if XR is enabled */ |
| | 9435 | if (((!sc->sc_hasdiversity) || (sc->sc_xrtxq != NULL)) && val) |
| | 9436 | return -EINVAL; |
| | 9437 | sc->sc_diversity = val; |
| | 9438 | ath_hal_setdiversity(ah, val); |
| | 9439 | break; |
| | 9440 | case ATH_TXINTRPERIOD: |
| | 9441 | /* XXX: validate? */ |
| | 9442 | sc->sc_txintrperiod = val; |
| | 9443 | break; |
| | 9444 | case ATH_FFTXQMIN: |
| | 9445 | /* XXX validate? */ |
| | 9446 | sc->sc_fftxqmin = val; |
| | 9447 | break; |
| | 9448 | case ATH_TKIPMIC: { |
| | 9449 | struct ieee80211com *ic = &sc->sc_ic; |
| | 9450 | |
| | 9451 | if (!ath_hal_hastkipmic(ah)) |
| | 9452 | return -EINVAL; |
| | 9453 | ath_hal_settkipmic(ah, val); |
| | 9454 | if (val) |
| | 9455 | ic->ic_caps |= IEEE80211_C_TKIPMIC; |
| | 9456 | else |
| | 9457 | ic->ic_caps &= ~IEEE80211_C_TKIPMIC; |
| | 9458 | break; |
| | 9459 | } |
| | 9460 | #ifdef ATH_SUPERG_XR |
| | 9461 | case ATH_XR_POLL_PERIOD: |
| | 9462 | if (val > XR_MAX_POLL_INTERVAL) |
| | 9463 | val = XR_MAX_POLL_INTERVAL; |
| | 9464 | else if (val < XR_MIN_POLL_INTERVAL) |
| | 9465 | val = XR_MIN_POLL_INTERVAL; |
| | 9466 | sc->sc_xrpollint = val; |
| | 9467 | break; |
| | 9468 | |
| | 9469 | case ATH_XR_POLL_COUNT: |
| | 9470 | if (val > XR_MAX_POLL_COUNT) |
| | 9471 | val = XR_MAX_POLL_COUNT; |
| | 9472 | else if (val < XR_MIN_POLL_COUNT) |
| | 9473 | val = XR_MIN_POLL_COUNT; |
| | 9474 | sc->sc_xrpollcount = val; |
| | 9475 | break; |
| | 9476 | #endif |
| | 9477 | case ATH_ACKRATE: |
| | 9478 | sc->sc_ackrate = val; |
| | 9479 | ath_set_ack_bitrate(sc, sc->sc_ackrate); |
| | 9480 | break; |
| | 9481 | default: |
| | 9482 | return -EINVAL; |
| | 9483 | } |
| | 9484 | } |
| | 9485 | } else { |
| | 9486 | switch (ctl->ctl_name) { |
| | 9487 | case ATH_SLOTTIME: |
| | 9488 | val = ath_hal_getslottime(ah); |
| | 9489 | break; |
| | 9490 | case ATH_ACKTIMEOUT: |
| | 9491 | val = ath_hal_getacktimeout(ah); |
| | 9492 | break; |
| | 9493 | case ATH_CTSTIMEOUT: |
| | 9494 | val = ath_hal_getctstimeout(ah); |
| | 9495 | break; |
| | 9496 | case ATH_SOFTLED: |
| | 9497 | val = sc->sc_softled; |
| | 9498 | break; |
| | 9499 | case ATH_LEDPIN: |
| | 9500 | val = sc->sc_ledpin; |
| | 9501 | break; |
| | 9502 | case ATH_COUNTRYCODE: |
| | 9503 | ath_hal_getcountrycode(ah, &val); |
| | 9504 | break; |
| | 9505 | case ATH_REGDOMAIN: |
| | 9506 | ath_hal_getregdomain(ah, &val); |
| | 9507 | break; |
| | 9508 | case ATH_DEBUG: |
| | 9509 | val = sc->sc_debug; |
| | 9510 | break; |
| | 9511 | case ATH_TXANTENNA: |
| | 9512 | val = sc->sc_txantenna; |
| | 9513 | break; |
| | 9514 | case ATH_RXANTENNA: |
| | 9515 | val = ath_hal_getdefantenna(ah); |
| | 9516 | break; |
| | 9517 | case ATH_DIVERSITY: |
| | 9518 | val = sc->sc_diversity; |
| | 9519 | break; |
| | 9520 | case ATH_TXINTRPERIOD: |
| | 9521 | val = sc->sc_txintrperiod; |
| | 9522 | break; |
| | 9523 | case ATH_FFTXQMIN: |
| | 9524 | val = sc->sc_fftxqmin; |
| | 9525 | break; |
| | 9526 | case ATH_TKIPMIC: |
| | 9527 | val = ath_hal_gettkipmic(ah); |
| | 9528 | break; |
| | 9529 | #ifdef ATH_SUPERG_XR |
| | 9530 | case ATH_XR_POLL_PERIOD: |
| | 9531 | val=sc->sc_xrpollint; |
| | 9532 | break; |
| | 9533 | case ATH_XR_POLL_COUNT: |
| | 9534 | val=sc->sc_xrpollcount; |
| | 9535 | break; |
| | 9536 | #endif |
| | 9537 | case ATH_ACKRATE: |
| | 9538 | val = sc->sc_ackrate; |
| | 9539 | break; |
| | 9540 | default: |
| | 9541 | return -EINVAL; |
| | 9542 | } |
| | 9543 | ret = ATH_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos); |
| | 9544 | } |
| | 9545 | return ret; |
| | 9546 | } |
| | 9547 | |
| | 9548 | static int mincalibrate = 1; /* once a second */ |
| | 9549 | static int maxint = 0x7fffffff; /* 32-bit big */ |
| | 9550 | |
| | 9551 | #define CTL_AUTO -2 /* cannot be CTL_ANY or CTL_NONE */ |
| | 9552 | |
| | 9553 | static const ctl_table ath_sysctl_template[] = { |
| | 9554 | { .ctl_name = ATH_SLOTTIME, |
| | 9555 | .procname = "slottime", |
| | 9556 | .mode = 0644, |
| | 9557 | .proc_handler = ath_sysctl_halparam |
| | 9558 | }, |
| | 9559 | { .ctl_name = ATH_ACKTIMEOUT, |
| | 9560 | .procname = "acktimeout", |
| | 9561 | .mode = 0644, |
| | 9562 | .proc_handler = ath_sysctl_halparam |
| | 9563 | }, |
| | 9564 | { .ctl_name = ATH_CTSTIMEOUT, |
| | 9565 | .procname = "ctstimeout", |
| | 9566 | .mode = 0644, |
| | 9567 | .proc_handler = ath_sysctl_halparam |
| | 9568 | }, |
| | 9569 | { .ctl_name = ATH_SOFTLED, |
| | 9570 | .procname = "softled", |
| | 9571 | .mode = 0644, |
| | 9572 | .proc_handler = ath_sysctl_halparam |
| | 9573 | }, |
| | 9574 | { .ctl_name = ATH_LEDPIN, |
| | 9575 | .procname = "ledpin", |
| | 9576 | .mode = 0644, |
| | 9577 | .proc_handler = ath_sysctl_halparam |
| | 9578 | }, |
| | 9579 | { .ctl_name = ATH_COUNTRYCODE, |
| | 9580 | .procname = "countrycode", |
| | 9581 | .mode = 0444, |
| | 9582 | .proc_handler = ath_sysctl_halparam |
| | 9583 | }, |
| | 9584 | { .ctl_name = ATH_REGDOMAIN, |
| | 9585 | .procname = "regdomain", |
| | 9586 | .mode = 0444, |
| | 9587 | .proc_handler = ath_sysctl_halparam |
| | 9588 | }, |
| | 9589 | #ifdef AR_DEBUG |
| | 9590 | { .ctl_name = ATH_DEBUG, |
| | 9591 | .procname = "debug", |
| | 9592 | .mode = 0644, |
| | 9593 | .proc_handler = ath_sysctl_halparam |
| | 9594 | }, |
| | 9595 | #endif |
| | 9596 | { .ctl_name = ATH_TXANTENNA, |
| | 9597 | .procname = "txantenna", |
| | 9598 | .mode = 0644, |
| | 9599 | .proc_handler = ath_sysctl_halparam |
| | 9600 | }, |
| | 9601 | { .ctl_name = ATH_RXANTENNA, |
| | 9602 | .procname = "rxantenna", |
| | 9603 | .mode = 0644, |
| | 9604 | .proc_handler = ath_sysctl_halparam |
| | 9605 | }, |
| | 9606 | { .ctl_name = ATH_DIVERSITY, |
| | 9607 | .procname = "diversity", |
| | 9608 | .mode = 0644, |
| | 9609 | .proc_handler = ath_sysctl_halparam |
| | 9610 | }, |
| | 9611 | { .ctl_name = ATH_TXINTRPERIOD, |
| | 9612 | .procname = "txintrperiod", |
| | 9613 | .mode = 0644, |
| | 9614 | .proc_handler = ath_sysctl_halparam |
| | 9615 | }, |
| | 9616 | { .ctl_name = ATH_FFTXQMIN, |
| | 9617 | .procname = "fftxqmin", |
| | 9618 | .mode = 0644, |
| | 9619 | .proc_handler = ath_sysctl_halparam |
| | 9620 | }, |
| | 9621 | { .ctl_name = ATH_TKIPMIC, |
| | 9622 | .procname = "tkipmic", |
| | 9623 | .mode = 0644, |
| | 9624 | .proc_handler = ath_sysctl_halparam |
| | 9625 | }, |
| | 9626 | #ifdef ATH_SUPERG_XR |
| | 9627 | { .ctl_name = ATH_XR_POLL_PERIOD, |
| | 9628 | .procname = "xrpollperiod", |
| | 9629 | .mode = 0644, |
| | 9630 | .proc_handler = ath_sysctl_halparam |
| | 9631 | }, |
| | 9632 | { .ctl_name = ATH_XR_POLL_COUNT, |
| | 9633 | .procname = "xrpollcount", |
| | 9634 | .mode = 0644, |
| | 9635 | .proc_handler = ath_sysctl_halparam |
| | 9636 | }, |
| | 9637 | #endif |
| | 9638 | { .ctl_name = ATH_ACKRATE, |
| | 9639 | .procname = "ackrate", |
| | 9640 | .mode = 0644, |
| | 9641 | .proc_handler = ath_sysctl_halparam |
| | 9642 | }, |
| | 9643 | { 0 } |
| | 9644 | }; |
| | 9645 | |
| | 9646 | static void |
| | 9647 | ath_dynamic_sysctl_register(struct ath_softc *sc) |
| | 9648 | { |
| | 9649 | int i, space; |
| | 9650 | char *dev_name = NULL; |
| | 9651 | |
| | 9652 | space = 5 * sizeof(struct ctl_table) + sizeof(ath_sysctl_template); |
| | 9653 | sc->sc_sysctls = kmalloc(space, GFP_KERNEL); |
| | 9654 | if (sc->sc_sysctls == NULL) { |
| | 9655 | printk("%s: no memory for sysctl table!\n", __func__); |
| | 9656 | return; |
| | 9657 | } |
| | 9658 | |
| | 9659 | /* |
| | 9660 | * We want to reserve space for the name of the device separate |
| | 9661 | * from the net_device structure, because when the name is changed |
| | 9662 | * it is changed in the net_device structure and the message given |
| | 9663 | * out. Thus we won't know what the name it used to be if we rely |
| | 9664 | * on it. |
| | 9665 | */ |
| | 9666 | dev_name = kmalloc((strlen(sc->sc_dev->name) + 1) * sizeof(char), GFP_KERNEL); |
| | 9667 | if (dev_name == NULL) { |
| | 9668 | printk("%s: no memory for device name storage!\n", __func__); |
| | 9669 | return; |
| | 9670 | } |
| | 9671 | strncpy(dev_name, sc->sc_dev->name, strlen(sc->sc_dev->name) + 1); |
| | 9672 | |
| | 9673 | /* setup the table */ |
| | 9674 | memset(sc->sc_sysctls, 0, space); |
| | 9675 | sc->sc_sysctls[0].ctl_name = CTL_DEV; |
| | 9676 | sc->sc_sysctls[0].procname = "dev"; |
| | 9677 | sc->sc_sysctls[0].mode = 0555; |
| | 9678 | sc->sc_sysctls[0].child = &sc->sc_sysctls[2]; |
| | 9679 | /* [1] is NULL terminator */ |
| | 9680 | sc->sc_sysctls[2].ctl_name = CTL_AUTO; |
| | 9681 | sc->sc_sysctls[2].procname = dev_name; |
| | 9682 | sc->sc_sysctls[2].mode = 0555; |
| | 9683 | sc->sc_sysctls[2].child = &sc->sc_sysctls[4]; |
| | 9684 | /* [3] is NULL terminator */ |
| | 9685 | /* copy in pre-defined data */ |
| | 9686 | memcpy(&sc->sc_sysctls[4], ath_sysctl_template, |
| | 9687 | sizeof(ath_sysctl_template)); |
| | 9688 | |
| | 9689 | /* add in dynamic data references */ |
| | 9690 | for (i = 4; sc->sc_sysctls[i].ctl_name; i++) |
| | 9691 | if (sc->sc_sysctls[i].extra1 == NULL) |
| | 9692 | sc->sc_sysctls[i].extra1 = sc; |
| | 9693 | |
| | 9694 | /* and register everything */ |
| | 9695 | sc->sc_sysctl_header = ATH_REGISTER_SYSCTL_TABLE(sc->sc_sysctls); |
| | 9696 | if (!sc->sc_sysctl_header) { |
| | 9697 | printk("%s: failed to register sysctls!\n", sc->sc_dev->name); |
| | 9698 | kfree(sc->sc_sysctls); |
| | 9699 | sc->sc_sysctls = NULL; |
| | 9700 | } |
| | 9701 | |
| | 9702 | /* initialize values */ |
| | 9703 | sc->sc_debug = ath_debug; |
| | 9704 | sc->sc_txantenna = 0; /* default to auto-selection */ |
| | 9705 | sc->sc_txintrperiod = ATH_TXQ_INTR_PERIOD; |
| | 9706 | } |
| | 9707 | |
| | 9708 | static void |
| | 9709 | ath_dynamic_sysctl_unregister(struct ath_softc *sc) |
| | 9710 | { |
| | 9711 | if (sc->sc_sysctl_header) { |
| | 9712 | unregister_sysctl_table(sc->sc_sysctl_header); |
| | 9713 | sc->sc_sysctl_header = NULL; |
| | 9714 | } |
| | 9715 | if (sc->sc_sysctls[2].procname) { |
| | 9716 | kfree(sc->sc_sysctls[2].procname); |
| | 9717 | sc->sc_sysctls[2].procname = NULL; |
| | 9718 | } |
| | 9719 | if (sc->sc_sysctls) { |
| | 9720 | kfree(sc->sc_sysctls); |
| | 9721 | sc->sc_sysctls = NULL; |
| | 9722 | } |
| | 9723 | } |
| | 9724 | |
| | 9725 | /* |
| | 9726 | * Announce various information on device/driver attach. |
| | 9727 | */ |
| | 9728 | static void |
| | 9729 | ath_announce(struct net_device *dev) |
| | 9730 | { |
| | 9731 | #define HAL_MODE_DUALBAND (HAL_MODE_11A|HAL_MODE_11B) |
| | 9732 | struct ath_softc *sc = dev->priv; |
| | 9733 | struct ath_hal *ah = sc->sc_ah; |
| | 9734 | u_int modes, cc; |
| | 9735 | |
| | 9736 | printk("%s: mac %d.%d phy %d.%d", dev->name, |
| | 9737 | ah->ah_macVersion, ah->ah_macRev, |
| | 9738 | ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); |
| | 9739 | /* |
| | 9740 | * Print radio revision(s). We check the wireless modes |
| | 9741 | * to avoid falsely printing revs for inoperable parts. |
| | 9742 | * Dual-band radio revs are returned in the 5 GHz rev number. |
| | 9743 | */ |
| | 9744 | ath_hal_getcountrycode(ah, &cc); |
| | 9745 | modes = ath_hal_getwirelessmodes(ah, cc); |
| | 9746 | if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) { |
| | 9747 | if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev) |
| | 9748 | printk(" 5 GHz radio %d.%d 2 GHz radio %d.%d", |
| | 9749 | ah->ah_analog5GhzRev >> 4, |
| | 9750 | ah->ah_analog5GhzRev & 0xf, |
| | 9751 | ah->ah_analog2GhzRev >> 4, |
| | 9752 | ah->ah_analog2GhzRev & 0xf); |
| | 9753 | else |
| | 9754 | printk(" radio %d.%d", ah->ah_analog5GhzRev >> 4, |
| | 9755 | ah->ah_analog5GhzRev & 0xf); |
| | 9756 | } else |
| | 9757 | printk(" radio %d.%d", ah->ah_analog5GhzRev >> 4, |
| | 9758 | ah->ah_analog5GhzRev & 0xf); |
| | 9759 | printk("\n"); |
| | 9760 | if (1/*bootverbose*/) { |
| | 9761 | int i; |
| | 9762 | for (i = 0; i <= WME_AC_VO; i++) { |
| | 9763 | struct ath_txq *txq = sc->sc_ac2q[i]; |
| | 9764 | printk("%s: Use hw queue %u for %s traffic\n", |
| | 9765 | dev->name, txq->axq_qnum, |
| | 9766 | ieee80211_wme_acnames[i]); |
| | 9767 | } |
| | 9768 | printk("%s: Use hw queue %u for CAB traffic\n", dev->name, |
| | 9769 | sc->sc_cabq->axq_qnum); |
| | 9770 | printk("%s: Use hw queue %u for beacons\n", dev->name, |
| | 9771 | sc->sc_bhalq); |
| | 9772 | } |
| | 9773 | #undef HAL_MODE_DUALBAND |
| | 9774 | } |
| | 9775 | |
| | 9776 | /* |
| | 9777 | * Static (i.e. global) sysctls. Note that the HAL sysctls |
| | 9778 | * are located under ours by sharing the setting for DEV_ATH. |
| | 9779 | */ |
| | 9780 | enum { |
| | 9781 | DEV_ATH = 9, /* XXX known by HAL */ |
| | 9782 | }; |
| | 9783 | |
| | 9784 | static ctl_table ath_static_sysctls[] = { |
| | 9785 | #ifdef AR_DEBUG |
| | 9786 | { .ctl_name = CTL_AUTO, |
| | 9787 | .procname = "debug", |
| | 9788 | .mode = 0644, |
| | 9789 | .data = &ath_debug, |
| | 9790 | .maxlen = sizeof(ath_debug), |
| | 9791 | .proc_handler = proc_dointvec |
| | 9792 | }, |
| | 9793 | #endif |
| | 9794 | { .ctl_name = CTL_AUTO, |
| | 9795 | .procname = "countrycode", |
| | 9796 | .mode = 0444, |
| | 9797 | .data = &ath_countrycode, |
| | 9798 | .maxlen = sizeof(ath_countrycode), |
| | 9799 | .proc_handler = proc_dointvec |
| | 9800 | }, |
| | 9801 | { .ctl_name = CTL_AUTO, |
| | 9802 | .procname = "outdoor", |
| | 9803 | .mode = 0444, |
| | 9804 | .data = &ath_outdoor, |
| | 9805 | .maxlen = sizeof(ath_outdoor), |
| | 9806 | .proc_handler = proc_dointvec |
| | 9807 | }, |
| | 9808 | { .ctl_name = CTL_AUTO, |
| | 9809 | .procname = "xchanmode", |
| | 9810 | .mode = 0444, |
| | 9811 | .data = &ath_xchanmode, |
| | 9812 | .maxlen = sizeof(ath_xchanmode), |
| | 9813 | .proc_handler = proc_dointvec |
| | 9814 | }, |
| | 9815 | { .ctl_name = CTL_AUTO, |
| | 9816 | .procname = "calibrate", |
| | 9817 | .mode = 0644, |
| | 9818 | .data = &ath_calinterval, |
| | 9819 | .maxlen = sizeof(ath_calinterval), |
| | 9820 | .extra1 = &mincalibrate, |
| | 9821 | .extra2 = &maxint, |
| | 9822 | .proc_handler = proc_dointvec_minmax |
| | 9823 | }, |
| | 9824 | { 0 } |
| | 9825 | }; |
| | 9826 | static ctl_table ath_ath_table[] = { |
| | 9827 | { .ctl_name = DEV_ATH, |
| | 9828 | .procname = "ath", |
| | 9829 | .mode = 0555, |
| | 9830 | .child = ath_static_sysctls |
| | 9831 | }, { 0 } |
| | 9832 | }; |
| | 9833 | static ctl_table ath_root_table[] = { |
| | 9834 | { .ctl_name = CTL_DEV, |
| | 9835 | .procname = "dev", |
| | 9836 | .mode = 0555, |
| | 9837 | .child = ath_ath_table |
| | 9838 | }, { 0 } |
| | 9839 | }; |
| | 9840 | static struct ctl_table_header *ath_sysctl_header; |
| | 9841 | |
| | 9842 | void |
| | 9843 | ath_sysctl_register(void) |
| | 9844 | { |
| | 9845 | static int initialized = 0; |
| | 9846 | |
| | 9847 | if (!initialized) { |
| | 9848 | register_netdevice_notifier(&ath_event_block); |
| | 9849 | ath_sysctl_header = ATH_REGISTER_SYSCTL_TABLE(ath_root_table); |
| | 9850 | initialized = 1; |
| | 9851 | } |
| | 9852 | } |
| | 9853 | |
| | 9854 | void |
| | 9855 | ath_sysctl_unregister(void) |
| | 9856 | { |
| | 9857 | unregister_netdevice_notifier(&ath_event_block); |
| | 9858 | if (ath_sysctl_header) |
| | 9859 | unregister_sysctl_table(ath_sysctl_header); |
| | 9860 | } |
| | 9861 | |
| | 9862 | static const char* |
| | 9863 | ath_get_hal_status_desc(HAL_STATUS status) |
| | 9864 | { |
| | 9865 | if (status > 0 && status < sizeof(hal_status_desc)/sizeof(char *)) |
| | 9866 | return hal_status_desc[status]; |
| | 9867 | else |
| | 9868 | return ""; |
| | 9869 | } |
| | 9870 | |
| | 9871 | static int |
| | 9872 | ath_rcv_dev_event(struct notifier_block *this, unsigned long event, |
| | 9873 | void *ptr) |
| | 9874 | { |
| | 9875 | struct net_device *dev = (struct net_device *) ptr; |
| | 9876 | struct ath_softc *sc = (struct ath_softc *) dev->priv; |
| | 9877 | |
| | 9878 | if (!dev || !sc || dev->open != &ath_init) |
| | 9879 | return 0; |
| | 9880 | |
| | 9881 | switch (event) { |
| | 9882 | case NETDEV_CHANGENAME: |
| | 9883 | ath_dynamic_sysctl_unregister(sc); |
| | 9884 | ath_dynamic_sysctl_register(sc); |
| | 9885 | return NOTIFY_DONE; |
| | 9886 | default: |
| | 9887 | break; |
| | 9888 | } |
| | 9889 | return 0; |
| | 9890 | } |