Ad-hoc mode beacons are not transmitted for a while after a merge
with another network.

http://madwifi.org/ticket/1033

This patch was rebased for madwifi-0.9.3.1

Signed-Off by: Luis Rodriguez <mcgrof@winlab.rutgers.edu>

diff -Naurp madwifi-0.9.3.3.old/ath/if_ath.c madwifi-0.9.3.3/ath/if_ath.c
--- madwifi-0.9.3.3.old/ath/if_ath.c	2007-07-26 09:42:40.000000000 -0400
+++ madwifi-0.9.3.3/ath/if_ath.c	2007-11-07 18:10:37.000000000 -0500
@@ -4426,6 +4426,9 @@ ath_beacon_free(struct ath_softc *sc)
  * the beacon miss handling so we'll receive a BMISS
  * interrupt when we stop seeing beacons from the AP
  * we've associated with.
+ *
+ * Note : TBTT is Target Beacon Transmission Time (see IEEE 
+ * 802.11-1999: 4 & 11.2.1.3).
  */
 static void
 ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
@@ -4435,50 +4438,88 @@ ath_beacon_config(struct ath_softc *sc, 
 	struct ieee80211com *ic = &sc->sc_ic;
 	struct ath_hal *ah = sc->sc_ah;
 	struct ieee80211_node *ni;
-	u_int32_t nexttbtt, intval;
+	u_int32_t nexttbtt = 0;
+	u_int32_t intval;
+	u_int64_t tsf, hw_tsf;
+	u_int32_t tsftu, hw_tsftu;
+	int reset_tsf = 0;
 
 	if (vap == NULL)
 		vap = TAILQ_FIRST(&ic->ic_vaps);   /* XXX */
 
 	ni = vap->iv_bss;
 
-	/* extract tstamp from last beacon and convert to TU */
-	nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
-			     LE_READ_4(ni->ni_tstamp.data));
-	/* XXX conditionalize multi-bss support? */
+	hw_tsf = ath_hal_gettsf64(ah);
+	tsf = le64_to_cpu(ni->ni_tstamp.tsf);
+	hw_tsftu = hw_tsf >> 10;
+	tsftu = tsf >> 10;
+
+	/* We should reset hw TSF only once, so we increment
+	 * ni_tstamp.tsf to avoid resetting the hw TSF multiple
+	 * times */
+
+	if (tsf == 0) {
+		reset_tsf = 1;
+		ni->ni_tstamp.tsf = cpu_to_le64(1);
+	}
+
+	/* XXX: Conditionalize multi-bss support? */
 	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
-		/*
-		 * For multi-bss ap support beacons are either staggered
+		/* For multi-bss ap support beacons are either staggered
 		 * evenly over N slots or burst together.  For the former
 		 * arrange for the SWBA to be delivered for each slot.
-		 * Slots that are not occupied will generate nothing. 
-		 */
+		 * Slots that are not occupied will generate nothing. */
 		/* NB: the beacon interval is kept internally in TU's */
 		intval = ic->ic_lintval & HAL_BEACON_PERIOD;
 		if (sc->sc_stagbeacons)
 			intval /= ATH_BCBUF;	/* for staggered beacons */
 		if ((sc->sc_nostabeacons) &&
-		    (vap->iv_opmode == IEEE80211_M_HOSTAP))
-			nexttbtt = 0;
+			(vap->iv_opmode == IEEE80211_M_HOSTAP))
+			reset_tsf = 1;
 	} else
 		intval = ni->ni_intval & HAL_BEACON_PERIOD;
-	if (nexttbtt == 0)		/* e.g. for ap mode */
+
+#define        FUDGE   2
+	sc->sc_syncbeacon = 0;
+
+	if (reset_tsf) {
+		/* We just created the interface and TSF will be reset to
+		* zero, so next beacon will be sent at the next intval
+		* time */
 		nexttbtt = intval;
-	else if (intval)		/* NB: can be 0 for monitor mode */
-		nexttbtt = roundup(nexttbtt, intval);
-	DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
-		__func__, nexttbtt, intval, ni->ni_intval);
+	} else if (intval) {    /* NB: can be 0 for monitor mode */
+		if (tsf == 1) {
+			/* We have not received any beacons or probe responses. 
+			* The next TBTT must be at least FUDGE ms ahead of the 
+			* hw_tsftu. Also, TSF == 0 is a TBTT - IEEE802.11-1999
+			* 11.1.2.2, although I'm not sure it applies here... */
+			nexttbtt = roundup(hw_tsftu + FUDGE, intval);
+		} else {
+			if (tsf > hw_tsf) {
+				/* We received a beacon, but the HW TSF has 
+				* not been updated (otherwise hw_tsf > tsf).
+				* We cannot use the hardware TSF, so we wait 
+				* to synchronise beacons again. */
+				sc->sc_syncbeacon = 1;
+				goto ath_beacon_config_debug;
+			} else {
+				/* Normal case: we received a beacon to which
+				* we have synchornised. Make sure that 
+				* nexttbtt is at least FUDGE ms ahead of 
+				* hw_tsf. */
+				nexttbtt = tsftu + roundup(hw_tsftu + 
+				FUDGE - tsftu, intval);
+			}
+		}
+	}
+
 	if (ic->ic_opmode == IEEE80211_M_STA &&	!(sc->sc_nostabeacons)) {
 		HAL_BEACON_STATE bs;
-		u_int64_t tsf;
-		u_int32_t tsftu;
 		int dtimperiod, dtimcount;
 		int cfpperiod, cfpcount;
 
-		/*
-		 * Setup dtim and cfp parameters according to
-		 * last beacon we received (which may be none).
-		 */
+		/* Setup DTIM and CFP parameters according to the last beacon 
+		 * we have received (which may not have happened). */
 		dtimperiod = vap->iv_dtim_period;
 		if (dtimperiod <= 0)		/* NB: 0 if not known */
 			dtimperiod = 1;
@@ -4487,13 +4528,13 @@ ath_beacon_config(struct ath_softc *sc, 
 			dtimcount = 0;		/* XXX? */
 		cfpperiod = 1;			/* NB: no PCF support yet */
 		cfpcount = 0;
-#define	FUDGE	2
 		/*
 		 * Pull nexttbtt forward to reflect the current
 		 * TSF and calculate dtim+cfp state for the result.
 		 */
-		tsf = ath_hal_gettsf64(ah);
-		tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
+		nexttbtt = tsftu;
+		if (nexttbtt == 0)		/* e.g. for ap mode */
+			nexttbtt = intval;
 		do {
 			nexttbtt += intval;
 			if (--dtimcount < 0) {
@@ -4501,7 +4542,7 @@ ath_beacon_config(struct ath_softc *sc, 
 				if (--cfpcount < 0)
 					cfpcount = cfpperiod - 1;
 			}
-		} while (nexttbtt < tsftu);
+		} while (nexttbtt < hw_tsftu + FUDGE);
 #undef FUDGE
 		memset(&bs, 0, sizeof(bs));
 		bs.bs_intval = intval;
@@ -4575,7 +4616,7 @@ ath_beacon_config(struct ath_softc *sc, 
 		ath_hal_intrset(ah, sc->sc_imask);
 	} else {
 		ath_hal_intrset(ah, 0);
-		if (nexttbtt == intval)
+		if (reset_tsf)
 			intval |= HAL_BEACON_RESET_TSF;
 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
 			/*
@@ -4612,8 +4653,35 @@ ath_beacon_config(struct ath_softc *sc, 
 		if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
 			ath_beacon_start_adhoc(sc, vap);
 	}
-	sc->sc_syncbeacon = 0;
 #undef TSF_TO_TU
+
+	ath_beacon_config_debug:
+	/* We print all debug messages here, in order to preserve the
+	 * time critical aspect of this function */
+	DPRINTF(sc, ATH_DEBUG_BEACON,
+		"%s: ni=%p tsf=%llu hw_tsf=%llu tsftu=%u hw_tsftu=%u\n",
+		__func__, ni, tsf, hw_tsf, tsftu, hw_tsftu);
+
+	if (reset_tsf)
+		/* We just created the interface */
+		DPRINTF(sc, ATH_DEBUG_BEACON, "%s: first beacon\n", __func__);
+	else if (tsf == 1)
+		/* We do not receive any beacons or probe response */
+		DPRINTF(sc, ATH_DEBUG_BEACON,
+				"%s: no beacon received...\n",__func__);
+	else if (tsf > hw_tsf)
+		/* We do receive a beacon and the hw TSF has not been updated */
+		DPRINTF(sc, ATH_DEBUG_BEACON,
+				"%s: beacon received, but TSF is incorrect\n", 
+				__func__);
+	else
+		/* We do receive a beacon in the past, normal case */
+		DPRINTF(sc, ATH_DEBUG_BEACON,
+				"%s: beacon received, TSF is correct\n", 
+				__func__);
+
+	DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt=%u intval=%u\n",
+		__func__, nexttbtt, intval & HAL_BEACON_PERIOD);
 }
 
 static int
@@ -5442,48 +5510,53 @@ ath_recv_mgmt(struct ieee80211_node *ni,
 	struct ath_softc *sc = ni->ni_ic->ic_dev->priv;
 	struct ieee80211vap *vap = ni->ni_vap;
 
-	/*
-	 * Call up first so subsequent work can use information
-	 * potentially stored in the node (e.g. for ibss merge).
-	 */
+	/* Call up first so subsequent work can use information
+	 * potentially stored in the node (e.g. for ibss merge). */
 	sc->sc_recv_mgmt(ni, skb, subtype, rssi, rstamp);
 	switch (subtype) {
 	case IEEE80211_FC0_SUBTYPE_BEACON:
-		/* update rssi statistics for use by the HAL */
+		/* Update RSSI statistics for use by the HAL */
 		ATH_RSSI_LPF(ATH_NODE(ni)->an_halstats.ns_avgbrssi, rssi);
 		if ((sc->sc_syncbeacon || (vap->iv_flags_ext & IEEE80211_FEXT_APPIE_UPDATE)) &&
 		    ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
-			/*
-			 * Resync beacon timers using the tsf of the
-			 * beacon frame we just received.
-			 */
+			/* Resync beacon timers using the TSF of the
+			 * beacon frame we just received. */
 			vap->iv_flags_ext &= ~IEEE80211_FEXT_APPIE_UPDATE;
 			ath_beacon_config(sc, vap);
 		}
-		/* fall thru... */
+		/* NB: Fall Through */
 	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
 		if (vap->iv_opmode == IEEE80211_M_IBSS &&
-		    vap->iv_state == IEEE80211_S_RUN) {
-			u_int64_t tsf = ath_extend_tsf(sc->sc_ah, rstamp);
-			/*
-			 * Handle ibss merge as needed; check the tsf on the
+				vap->iv_state == IEEE80211_S_RUN) {
+			/* Don't merge if we have a desired BSSID */
+			if (vap->iv_flags & IEEE80211_F_DESBSSID)
+				break;
+
+			/* To handle IBSS merge, we need the struct
+			 * ieee80211_node which has been updated with the
+			 * BSSID and TSF from the last beacon */
+			ni = ieee80211_find_rxnode(ni->ni_ic,
+				(const struct ieee80211_frame_min *) skb->data);
+			if (ni == NULL)
+				break;
+
+			/* Handle IBSS merge as needed; check the TSF on the
 			 * frame before attempting the merge.  The 802.11 spec
 			 * says the station should change it's bssid to match
 			 * the oldest station with the same ssid, where oldest
 			 * is determined by the tsf.  Note that hardware
 			 * reconfiguration happens through callback to
 			 * ath_newstate as the state machine will go from
-			 * RUN -> RUN when this happens.
-			 */
-			/* jal: added: don't merge if we have a desired
-			   BSSID */
-			if (!(vap->iv_flags & IEEE80211_F_DESBSSID) &&
-				le64_to_cpu(ni->ni_tstamp.tsf) >= tsf) {
-				DPRINTF(sc, ATH_DEBUG_STATE,
-					"ibss merge, rstamp %u tsf %llu "
-					"tstamp %llu\n", rstamp, (long long) tsf,
-					(long long) le64_to_cpu(ni->ni_tstamp.tsf));
-				(void) ieee80211_ibss_merge(ni);
+			 * RUN -> RUN when this happens. */
+			DPRINTF(sc, ATH_DEBUG_BEACON,
+				"check for ibss merge for ni=%p TSF1(t4)=%10u TSF2(t3)=%10llu\n",
+				ni, rstamp, le64_to_cpu(ni->ni_tstamp.tsf));
+
+			if (rstamp < le64_to_cpu(ni->ni_tstamp.tsf)) {
+				DPRINTF(sc, ATH_DEBUG_BEACON,
+				"ibss merge, rstamp %10u local tsf %10llu\n",
+				rstamp, le64_to_cpu(ni->ni_tstamp.tsf));
+				ieee80211_ibss_merge(ni);
 			}
 		}
 		break;
diff -Naurp madwifi-0.9.3.3.old/ath/if_ath.c.orig madwifi-0.9.3.3/ath/if_ath.c.orig
--- madwifi-0.9.3.3.old/ath/if_ath.c.orig	1969-12-31 19:00:00.000000000 -0500
+++ madwifi-0.9.3.3/ath/if_ath.c.orig	2007-11-07 18:10:37.000000000 -0500
@@ -0,0 +1,9890 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ *    redistribution must be conditioned upon including a substantially
+ *    similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: if_ath.c 2616 2007-07-26 13:42:40Z mrenzmann $
+ */
+
+/*
+ * Driver for the Atheros Wireless LAN controller.
+ *
+ * This software is derived from work of Atsushi Onoe; his contribution
+ * is greatly appreciated.
+ */
+#include "opt_ah.h"
+
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/delay.h>
+#include <linux/cache.h>
+#include <linux/sysctl.h>
+#include <linux/proc_fs.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <asm/uaccess.h>
+
+#include "if_ethersubr.h"		/* for ETHER_IS_MULTICAST */
+#include "if_media.h"
+#include "if_llc.h"
+
+#include <net80211/ieee80211_radiotap.h>
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_monitor.h>
+#include <net80211/ieee80211_rate.h>
+
+#ifdef USE_HEADERLEN_RESV
+#include <net80211/if_llc.h>
+#endif
+
+#define	AR_DEBUG
+
+#include "net80211/if_athproto.h"
+#include "if_athvar.h"
+#include "ah_desc.h"
+#include "ah_devid.h"			/* XXX to identify chipset */
+
+#ifdef ATH_PCI		/* PCI BUS */
+#include "if_ath_pci.h"
+#endif			/* PCI BUS */
+#ifdef ATH_AHB		/* AHB BUS */
+#include "if_ath_ahb.h"
+#endif			/* AHB BUS */
+
+#ifdef ATH_TX99_DIAG
+#include "ath_tx99.h"
+#endif
+
+/* unaligned little endian access */
+#define LE_READ_2(p)							\
+	((u_int16_t)							\
+	 ((((u_int8_t *)(p))[0]      ) | (((u_int8_t *)(p))[1] <<  8)))
+#define LE_READ_4(p)							\
+	((u_int32_t)							\
+	 ((((u_int8_t *)(p))[0]      ) | (((u_int8_t *)(p))[1] <<  8) |	\
+	  (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)))
+
+/* Default rate control algorithm */
+#ifdef CONFIG_ATHEROS_RATE_DEFAULT
+#define DEF_RATE_CTL CONFIG_ATHEROS_RATE_DEFAULT
+#else
+#define DEF_RATE_CTL "sample"
+#endif
+
+enum {
+	ATH_LED_TX,
+	ATH_LED_RX,
+	ATH_LED_POLL,
+};
+
+static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
+	const char *, int, int, int, struct net_device *);
+static void ath_vap_delete(struct ieee80211vap *);
+static int ath_init(struct net_device *);
+static int ath_set_ack_bitrate(struct ath_softc *, int);
+static int ath_reset(struct net_device *);
+static void ath_fatal_tasklet(TQUEUE_ARG);
+static void ath_rxorn_tasklet(TQUEUE_ARG);
+static void ath_bmiss_tasklet(TQUEUE_ARG);
+static void ath_bstuck_tasklet(TQUEUE_ARG);
+static void ath_radar_task(struct work_struct *);
+static void ath_dfs_test_return(unsigned long);
+
+static int ath_stop_locked(struct net_device *);
+static int ath_stop(struct net_device *);
+#if 0
+static void ath_initkeytable(struct ath_softc *);
+#endif
+static int ath_key_alloc(struct ieee80211vap *, const struct ieee80211_key *);
+static int ath_key_delete(struct ieee80211vap *, const struct ieee80211_key *,
+	struct ieee80211_node *);
+static int ath_key_set(struct ieee80211vap *, const struct ieee80211_key *,
+	const u_int8_t mac[IEEE80211_ADDR_LEN]);
+static void ath_key_update_begin(struct ieee80211vap *);
+static void ath_key_update_end(struct ieee80211vap *);
+static void ath_mode_init(struct net_device *);
+static void ath_setslottime(struct ath_softc *);
+static void ath_updateslot(struct net_device *);
+static int ath_beaconq_setup(struct ath_hal *);
+static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
+#ifdef ATH_SUPERG_DYNTURBO
+static void ath_beacon_dturbo_update(struct ieee80211vap *, int *, u_int8_t);
+static void ath_beacon_dturbo_config(struct ieee80211vap *, u_int32_t);
+static void ath_turbo_switch_mode(unsigned long);
+static int ath_check_beacon_done(struct ath_softc *);
+#endif
+static void ath_beacon_send(struct ath_softc *, int *);
+static void ath_beacon_start_adhoc(struct ath_softc *, struct ieee80211vap *);
+static void ath_beacon_return(struct ath_softc *, struct ath_buf *);
+static void ath_beacon_free(struct ath_softc *);
+static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
+static int ath_desc_alloc(struct ath_softc *);
+static void ath_desc_free(struct ath_softc *);
+static void ath_desc_swap(struct ath_desc *);
+static struct ieee80211_node *ath_node_alloc(struct ieee80211_node_table *,
+	struct ieee80211vap *);
+static void ath_node_cleanup(struct ieee80211_node *);
+static void ath_node_free(struct ieee80211_node *);
+static u_int8_t ath_node_getrssi(const struct ieee80211_node *);
+static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
+static void ath_recv_mgmt(struct ieee80211_node *, struct sk_buff *, int,
+	int, u_int32_t);
+static void ath_setdefantenna(struct ath_softc *, u_int);
+static struct ath_txq *ath_txq_setup(struct ath_softc *, int, int);
+static void ath_rx_tasklet(TQUEUE_ARG);
+static int ath_hardstart(struct sk_buff *, struct net_device *);
+static int ath_mgtstart(struct ieee80211com *, struct sk_buff *);
+#ifdef ATH_SUPERG_COMP
+static u_int32_t ath_get_icvlen(struct ieee80211_key *);
+static u_int32_t ath_get_ivlen(struct ieee80211_key *);
+static void ath_setup_comp(struct ieee80211_node *, int);
+static void ath_comp_set(struct ieee80211vap *, struct ieee80211_node *, int);	
+#endif
+static int ath_tx_setup(struct ath_softc *, int, int);
+static int ath_wme_update(struct ieee80211com *);
+static void ath_uapsd_flush(struct ieee80211_node *);
+static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
+static void ath_tx_cleanup(struct ath_softc *);
+static void ath_tx_uapsdqueue(struct ath_softc *, struct ath_node *,
+	struct ath_buf *);
+
+static int ath_tx_start(struct net_device *, struct ieee80211_node *,
+	struct ath_buf *, struct sk_buff *, int);
+static void ath_tx_tasklet_q0(TQUEUE_ARG);
+static void ath_tx_tasklet_q0123(TQUEUE_ARG);
+static void ath_tx_tasklet(TQUEUE_ARG);
+static void ath_tx_timeout(struct net_device *);
+static void ath_tx_draintxq(struct ath_softc *, struct ath_txq *);
+static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
+static void ath_draintxq(struct ath_softc *);
+static __inline void ath_tx_txqaddbuf(struct ath_softc *, struct ieee80211_node *,
+	struct ath_txq *, struct ath_buf *, struct ath_desc *, int);
+static void ath_stoprecv(struct ath_softc *);
+static int ath_startrecv(struct ath_softc *);
+static void ath_flushrecv(struct ath_softc *);
+static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
+static void ath_calibrate(unsigned long);
+static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
+
+static void ath_scan_start(struct ieee80211com *);
+static void ath_scan_end(struct ieee80211com *);
+static void ath_set_channel(struct ieee80211com *);
+static void ath_set_coverageclass(struct ieee80211com *);
+static u_int ath_mhz2ieee(struct ieee80211com *, u_int, u_int);
+#ifdef ATH_SUPERG_FF
+static int athff_can_aggregate(struct ath_softc *, struct ether_header *,
+	struct ath_node *, struct sk_buff *, u_int16_t, int *);
+#endif
+static struct net_device_stats *ath_getstats(struct net_device *);
+static void ath_setup_stationkey(struct ieee80211_node *);
+static void ath_setup_stationwepkey(struct ieee80211_node *);
+static void ath_setup_keycacheslot(struct ath_softc *, struct ieee80211_node *);
+static void ath_newassoc(struct ieee80211_node *, int);
+static int ath_getchannels(struct net_device *, u_int, HAL_BOOL, HAL_BOOL);
+static void ath_led_event(struct ath_softc *, int);
+static void ath_update_txpow(struct ath_softc *);
+
+static int ath_set_mac_address(struct net_device *, void *);
+static int ath_change_mtu(struct net_device *, int);
+static int ath_ioctl(struct net_device *, struct ifreq *, int);
+
+static int ath_rate_setup(struct net_device *, u_int);
+static void ath_setup_subrates(struct net_device *);
+#ifdef ATH_SUPERG_XR
+static int ath_xr_rate_setup(struct net_device *);
+static void ath_grppoll_txq_setup(struct ath_softc *, int, int);
+static void ath_grppoll_start(struct ieee80211vap *, int);
+static void ath_grppoll_stop(struct ieee80211vap *);
+static u_int8_t ath_node_move_data(const struct ieee80211_node *);
+static void ath_grppoll_txq_update(struct ath_softc *, int);
+static void ath_grppoll_period_update(struct ath_softc *);
+#endif
+static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
+
+static void ath_dynamic_sysctl_register(struct ath_softc *);
+static void ath_dynamic_sysctl_unregister(struct ath_softc *);
+static void ath_announce(struct net_device *);
+static int ath_descdma_setup(struct ath_softc *, struct ath_descdma *,
+	ath_bufhead *, const char *, int, int);
+static void ath_descdma_cleanup(struct ath_softc *, struct ath_descdma *,
+	ath_bufhead *, int);
+static void ath_check_dfs_clear(unsigned long);
+static const char *ath_get_hal_status_desc(HAL_STATUS status);
+static int ath_rcv_dev_event(struct notifier_block *, unsigned long, void *);
+	
+static int ath_calinterval = ATH_SHORT_CALINTERVAL;		/*
+								 * calibrate every 30 secs in steady state
+								 * but check every second at first.
+								 */
+static int ath_countrycode = CTRY_DEFAULT;	/* country code */
+static int ath_outdoor = AH_FALSE;		/* enable outdoor use */
+static int ath_xchanmode = AH_TRUE;		/* enable extended channels */
+static char *autocreate = NULL;
+static char *ratectl = DEF_RATE_CTL;
+static int rfkill = -1;
+static int countrycode = -1;
+static int outdoor = -1;
+static int xchanmode = -1;
+
+static const char *hal_status_desc[] = {
+	"No error",
+	"No hardware present or device not yet supported",
+	"Memory allocation failed",
+	"Hardware didn't respond as expected",
+	"EEPROM magic number invalid",
+	"EEPROM version invalid",
+	"EEPROM unreadable",
+	"EEPROM checksum invalid",
+	"EEPROM read problem",
+	"EEPROM mac address invalid",
+	"EEPROM size not supported",
+	"Attempt to change write-locked EEPROM",
+	"Invalid parameter to function",
+	"Hardware revision not supported",
+	"Hardware self-test failed",
+	"Operation incomplete"
+};
+
+static struct notifier_block ath_event_block = {
+        .notifier_call = ath_rcv_dev_event
+};
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,52))
+MODULE_PARM(countrycode, "i");
+MODULE_PARM(outdoor, "i");
+MODULE_PARM(xchanmode, "i");
+MODULE_PARM(rfkill, "i");
+MODULE_PARM(autocreate, "s");
+MODULE_PARM(ratectl, "s");
+#else
+#include <linux/moduleparam.h>
+module_param(countrycode, int, 0600);
+module_param(outdoor, int, 0600);
+module_param(xchanmode, int, 0600);
+module_param(rfkill, int, 0600);
+module_param(autocreate, charp, 0600);
+module_param(ratectl, charp, 0600);
+#endif
+MODULE_PARM_DESC(countrycode, "Override default country code");
+MODULE_PARM_DESC(outdoor, "Enable/disable outdoor use");
+MODULE_PARM_DESC(xchanmode, "Enable/disable extended channel mode");
+MODULE_PARM_DESC(rfkill, "Enable/disable RFKILL capability");
+MODULE_PARM_DESC(autocreate, "Create ath device in [sta|ap|wds|adhoc|ahdemo|monitor] mode. defaults to sta, use 'none' to disable");
+MODULE_PARM_DESC(ratectl, "Rate control algorithm [amrr|onoe|sample], defaults to '" DEF_RATE_CTL "'");
+
+static int	ath_debug = 0;
+#ifdef AR_DEBUG
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,52))
+MODULE_PARM(ath_debug, "i");
+#else
+module_param(ath_debug, int, 0600);
+#endif
+MODULE_PARM_DESC(ath_debug, "Load-time debug output enable");
+
+#define	IFF_DUMPPKTS(sc, _m) \
+	((sc->sc_debug & _m))
+static void ath_printrxbuf(struct ath_buf *, int);
+static void ath_printtxbuf(struct ath_buf *, int);
+enum {
+	ATH_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
+	ATH_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
+	ATH_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
+	ATH_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
+	ATH_DEBUG_RATE		= 0x00000010,	/* rate control */
+	ATH_DEBUG_RESET		= 0x00000020,	/* reset processing */
+	/* 0x00000040 was ATH_DEBUG_MODE */
+	ATH_DEBUG_BEACON 	= 0x00000080,	/* beacon handling */
+	ATH_DEBUG_WATCHDOG 	= 0x00000100,	/* watchdog timeout */
+	ATH_DEBUG_INTR		= 0x00001000,	/* ISR */
+	ATH_DEBUG_TX_PROC	= 0x00002000,	/* tx ISR proc */
+	ATH_DEBUG_RX_PROC	= 0x00004000,	/* rx ISR proc */
+	ATH_DEBUG_BEACON_PROC	= 0x00008000,	/* beacon ISR proc */
+	ATH_DEBUG_CALIBRATE	= 0x00010000,	/* periodic calibration */
+	ATH_DEBUG_KEYCACHE	= 0x00020000,	/* key cache management */
+	ATH_DEBUG_STATE		= 0x00040000,	/* 802.11 state transitions */
+	ATH_DEBUG_NODE		= 0x00080000,	/* node management */
+	ATH_DEBUG_LED		= 0x00100000,	/* led management */
+	ATH_DEBUG_FF		= 0x00200000,	/* fast frames */
+	ATH_DEBUG_TURBO		= 0x00400000,	/* turbo/dynamic turbo */
+	ATH_DEBUG_UAPSD		= 0x00800000,	/* uapsd */
+	ATH_DEBUG_DOTH		= 0x01000000,	/* 11.h */
+	ATH_DEBUG_FATAL		= 0x80000000,	/* fatal errors */
+	ATH_DEBUG_ANY		= 0xffffffff
+};
+#define	DPRINTF(sc, _m, _fmt, ...) do {				\
+	if (sc->sc_debug & (_m))				\
+		printk(_fmt, __VA_ARGS__);			\
+} while (0)
+#define	KEYPRINTF(sc, ix, hk, mac) do {				\
+	if (sc->sc_debug & ATH_DEBUG_KEYCACHE)			\
+		ath_keyprint(sc, __func__, ix, hk, mac);	\
+} while (0)
+#else /* defined(AR_DEBUG) */
+#define	IFF_DUMPPKTS(sc, _m)	netif_msg_dumppkts(&sc->sc_ic)
+#define	DPRINTF(sc, _m, _fmt, ...)
+#define	KEYPRINTF(sc, k, ix, mac)
+#endif /* defined(AR_DEBUG) */
+
+#define ATH_SETUP_XR_VAP(sc,vap,rfilt) \
+	do { \
+		if (sc->sc_curchan.privFlags & CHANNEL_4MS_LIMIT) \
+			vap->iv_fragthreshold = XR_4MS_FRAG_THRESHOLD; \
+		else \
+			vap->iv_fragthreshold = vap->iv_xrvap->iv_fragthreshold; \
+		if (!sc->sc_xrgrppoll) { \
+			ath_grppoll_txq_setup(sc, HAL_TX_QUEUE_DATA, GRP_POLL_PERIOD_NO_XR_STA(sc)); \
+			ath_grppoll_start(vap, sc->sc_xrpollcount); \
+			ath_hal_setrxfilter(sc->sc_ah, rfilt|HAL_RX_FILTER_XRPOLL); \
+		} \
+   	} while(0)
+
+/*
+ * Define the scheme that we select MAC address for multiple BSS on the same radio.
+ * The very first VAP will just use the MAC address from the EEPROM.
+ * For the next 3 VAPs, we set the U/L bit (bit 1) in MAC address,
+ * and use the next two bits as the index of the VAP.
+ */
+#define ATH_SET_VAP_BSSID_MASK(bssid_mask)      ((bssid_mask)[0] &= ~(((ATH_BCBUF-1)<<2)|0x02))
+#define ATH_GET_VAP_ID(bssid)                   ((bssid)[0] >> 2)
+#define ATH_SET_VAP_BSSID(bssid, id)            \
+    	do {                                    \
+		if (id)                            \
+            		(bssid)[0] |= (((id) << 2) | 0x02); \
+	} while(0)
+
+int
+ath_attach(u_int16_t devid, struct net_device *dev, HAL_BUS_TAG tag)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ath_hal *ah;
+	HAL_STATUS status;
+	int error = 0, i;
+	int autocreatemode = IEEE80211_M_STA;
+	u_int8_t csz;
+
+	sc->devid = devid;
+	sc->sc_debug = ath_debug;
+	DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
+
+	/*
+	 * Cache line size is used to size and align various
+	 * structures used to communicate with the hardware.
+	 */
+	bus_read_cachesize(sc, &csz);
+	/* XXX assert csz is non-zero */
+	sc->sc_cachelsz = csz << 2;		/* convert to bytes */
+
+	ATH_LOCK_INIT(sc);
+	ATH_TXBUF_LOCK_INIT(sc);
+	ATH_RXBUF_LOCK_INIT(sc);
+
+	ATH_INIT_TQUEUE(&sc->sc_rxtq,	 ath_rx_tasklet,	dev);
+	ATH_INIT_TQUEUE(&sc->sc_txtq,	 ath_tx_tasklet,	dev);
+	ATH_INIT_TQUEUE(&sc->sc_bmisstq, ath_bmiss_tasklet,	dev);
+	ATH_INIT_TQUEUE(&sc->sc_bstucktq,ath_bstuck_tasklet,	dev);
+	ATH_INIT_TQUEUE(&sc->sc_rxorntq, ath_rxorn_tasklet,	dev);
+	ATH_INIT_TQUEUE(&sc->sc_fataltq, ath_fatal_tasklet,	dev);
+	ATH_INIT_WORK(&sc->sc_radartask, ath_radar_task);
+
+	/*
+	 * Attach the HAL and verify ABI compatibility by checking
+	 * the HAL's ABI signature against the one the driver was
+	 * compiled with.  A mismatch indicates the driver was
+	 * built with an ah.h that does not correspond to the HAL
+	 * module loaded in the kernel.
+	 */
+	ah = _ath_hal_attach(devid, sc, tag, sc->sc_iobase, &status);
+	if (ah == NULL) {
+		printk(KERN_ERR "%s: unable to attach hardware: '%s' (HAL status %u)\n",
+			dev->name, ath_get_hal_status_desc(status), status);
+		error = ENXIO;
+		goto bad;
+	}
+	if (ah->ah_abi != HAL_ABI_VERSION) {
+		printk(KERN_ERR "%s: HAL ABI mismatch; "
+			"driver expects 0x%x, HAL reports 0x%x\n",
+			dev->name, HAL_ABI_VERSION, ah->ah_abi);
+		error = ENXIO;		/* XXX */
+		goto bad;
+	}
+	sc->sc_ah = ah;
+
+	/*
+	 * Check if the MAC has multi-rate retry support.
+	 * We do this by trying to setup a fake extended
+	 * descriptor.  MAC's that don't have support will
+	 * return false w/o doing anything.  MAC's that do
+	 * support it will return true w/o doing anything.
+	 */
+	sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
+
+	/*
+	 * Check if the device has hardware counters for PHY
+	 * errors.  If so we need to enable the MIB interrupt
+	 * so we can act on stat triggers.
+	 */
+	if (ath_hal_hwphycounters(ah))
+		sc->sc_needmib = 1;
+
+	/*
+	 * Get the hardware key cache size.
+	 */
+	sc->sc_keymax = ath_hal_keycachesize(ah);
+	if (sc->sc_keymax > ATH_KEYMAX) {
+		printk("%s: Warning, using only %u entries in %u key cache\n",
+			dev->name, ATH_KEYMAX, sc->sc_keymax);
+		sc->sc_keymax = ATH_KEYMAX;
+	}
+	/*
+	 * Reset the key cache since some parts do not
+	 * reset the contents on initial power up.
+	 */
+	for (i = 0; i < sc->sc_keymax; i++)
+		ath_hal_keyreset(ah, i);
+
+	/*
+	 * Collect the channel list using the default country
+	 * code and including outdoor channels.  The 802.11 layer
+	 * is responsible for filtering this list based on settings
+	 * like the phy mode.
+	 */
+	if (countrycode != -1)
+		ath_countrycode = countrycode;
+	if (outdoor != -1)
+		ath_outdoor = outdoor;
+	if (xchanmode != -1)
+		ath_xchanmode = xchanmode;
+	error = ath_getchannels(dev, ath_countrycode,
+			ath_outdoor, ath_xchanmode);
+	if (error != 0)
+		goto bad;
+
+	ic->ic_country_code = ath_countrycode;
+	ic->ic_country_outdoor = ath_outdoor;
+
+	if (rfkill != -1) {
+		printk(KERN_INFO "ath_pci: switching rfkill capability %s\n",
+			rfkill ? "on" : "off");	
+		ath_hal_setrfsilent(ah, rfkill);
+	}
+
+	/*
+	 * Setup rate tables for all potential media types.
+	 */
+	ath_rate_setup(dev, IEEE80211_MODE_11A);
+	ath_rate_setup(dev, IEEE80211_MODE_11B);
+	ath_rate_setup(dev, IEEE80211_MODE_11G);
+	ath_rate_setup(dev, IEEE80211_MODE_TURBO_A);
+	ath_rate_setup(dev, IEEE80211_MODE_TURBO_G);
+
+	/* Setup for half/quarter rates */
+	ath_setup_subrates(dev);
+
+	/* NB: setup here so ath_rate_update is happy */
+	ath_setcurmode(sc, IEEE80211_MODE_11A);
+
+	/*
+	 * Allocate tx+rx descriptors and populate the lists.
+	 */
+	error = ath_desc_alloc(sc);
+	if (error != 0) {
+		printk(KERN_ERR "%s: failed to allocate descriptors: %d\n",
+			dev->name, error);
+		goto bad;
+	}
+
+	/*
+	 * Init ic_caps prior to queue init, since WME cap setting
+	 * depends on queue setup.
+	 */
+	ic->ic_caps = 0;
+
+	/*
+	 * Allocate hardware transmit queues: one queue for
+	 * beacon frames and one data queue for each QoS
+	 * priority.  Note that the HAL handles resetting
+	 * these queues at the needed time.
+	 *
+	 * XXX PS-Poll
+	 */
+	sc->sc_bhalq = ath_beaconq_setup(ah);
+	if (sc->sc_bhalq == (u_int) -1) {
+		printk(KERN_ERR "%s: unable to setup a beacon xmit queue!\n",
+			dev->name);
+		error = EIO;
+		goto bad2;
+	}
+	sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
+	if (sc->sc_cabq == NULL) {
+		printk(KERN_ERR "%s: unable to setup CAB xmit queue!\n",
+			dev->name);
+		error = EIO;
+		goto bad2;
+	}
+	/* NB: ensure BK queue is the lowest priority h/w queue */
+	if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
+		printk(KERN_ERR "%s: unable to setup xmit queue for %s traffic!\n",
+			dev->name, ieee80211_wme_acnames[WME_AC_BK]);
+		error = EIO;
+		goto bad2;
+	}
+	if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
+	    !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
+	    !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
+		/*
+		 * Not enough hardware tx queues to properly do WME;
+		 * just punt and assign them all to the same h/w queue.
+		 * We could do a better job of this if, for example,
+		 * we allocate queues when we switch from station to
+		 * AP mode.
+		 */
+		if (sc->sc_ac2q[WME_AC_VI] != NULL)
+			ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
+		if (sc->sc_ac2q[WME_AC_BE] != NULL)
+			ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
+		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
+		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
+		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
+	} else {
+		/*
+		 * Mark WME capability since we have sufficient
+		 * hardware queues to do proper priority scheduling.
+		 */
+		ic->ic_caps |= IEEE80211_C_WME;
+		sc->sc_uapsdq = ath_txq_setup(sc, HAL_TX_QUEUE_UAPSD, 0);
+		if (sc->sc_uapsdq == NULL)
+			DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: unable to setup UAPSD xmit queue!\n",
+				__func__);
+		else {
+			ic->ic_caps |= IEEE80211_C_UAPSD;
+			/*
+			 * default UAPSD on if HW capable
+			 */
+			IEEE80211_COM_UAPSD_ENABLE(ic);
+		}
+	}
+#ifdef ATH_SUPERG_XR
+	ath_xr_rate_setup(dev);
+	sc->sc_xrpollint = XR_DEFAULT_POLL_INTERVAL;
+	sc->sc_xrpollcount = XR_DEFAULT_POLL_COUNT;
+	strcpy(sc->sc_grppoll_str, XR_DEFAULT_GRPPOLL_RATE_STR);
+	sc->sc_grpplq.axq_qnum = -1;
+	sc->sc_xrtxq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, HAL_XR_DATA);
+#endif
+
+	/*
+	 * Special case certain configurations.  Note the
+	 * CAB queue is handled by these specially so don't
+	 * include them when checking the txq setup mask.
+	 */
+	switch (sc->sc_txqsetup &~ ((1<<sc->sc_cabq->axq_qnum) |
+				(sc->sc_uapsdq ? (1<<sc->sc_uapsdq->axq_qnum) : 0))) {
+	case 0x01:
+		ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet_q0, dev);
+		break;
+	case 0x0f:
+		ATH_INIT_TQUEUE(&sc->sc_txtq, ath_tx_tasklet_q0123, dev);
+		break;
+	}
+
+	sc->sc_setdefantenna = ath_setdefantenna;
+	sc->sc_rc = ieee80211_rate_attach(sc, ratectl);
+	if (sc->sc_rc == NULL) {
+		error = EIO;
+		goto bad2;
+	}
+
+	init_timer(&sc->sc_cal_ch);
+	sc->sc_cal_ch.function = ath_calibrate;
+	sc->sc_cal_ch.data = (unsigned long) dev;
+
+#ifdef ATH_SUPERG_DYNTURBO
+	init_timer(&sc->sc_dturbo_switch_mode);
+	sc->sc_dturbo_switch_mode.function = ath_turbo_switch_mode;
+	sc->sc_dturbo_switch_mode.data = (unsigned long) dev;
+#endif
+
+	sc->sc_blinking = 0;
+	sc->sc_ledstate = 1;
+	sc->sc_ledon = 0;			/* low true */
+	sc->sc_ledidle = msecs_to_jiffies(2700);	/* 2.7 sec */
+	sc->sc_dfstesttime = ATH_DFS_TEST_RETURN_PERIOD;
+	init_timer(&sc->sc_ledtimer);
+	init_timer(&sc->sc_dfswaittimer);
+	init_timer(&sc->sc_dfstesttimer);
+	sc->sc_ledtimer.data = (unsigned long) sc;
+	if (sc->sc_softled) {
+		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin);
+		ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
+	}
+
+	/* NB: ether_setup is done by bus-specific code */
+	dev->open = ath_init;
+	dev->stop = ath_stop;
+	dev->hard_start_xmit = ath_hardstart;
+	dev->tx_timeout = ath_tx_timeout;
+	dev->watchdog_timeo = 5 * HZ;			/* XXX */
+	dev->set_multicast_list = ath_mode_init;
+	dev->do_ioctl = ath_ioctl;
+	dev->get_stats = ath_getstats;
+	dev->set_mac_address = ath_set_mac_address;
+ 	dev->change_mtu = ath_change_mtu;
+	dev->tx_queue_len = ATH_TXBUF - 1;		/* 1 for mgmt frame */
+#ifdef USE_HEADERLEN_RESV
+	dev->hard_header_len += sizeof(struct ieee80211_qosframe) +
+				sizeof(struct llc) +
+				IEEE80211_ADDR_LEN +
+				IEEE80211_WEP_IVLEN +
+				IEEE80211_WEP_KIDLEN;
+#ifdef ATH_SUPERG_FF
+	dev->hard_header_len += ATH_FF_MAX_HDR;
+#endif
+#endif
+	ic->ic_dev = dev;
+	ic->ic_mgtstart = ath_mgtstart;
+	ic->ic_init = ath_init;
+	ic->ic_reset = ath_reset;
+	ic->ic_newassoc = ath_newassoc;
+	ic->ic_updateslot = ath_updateslot;
+
+	ic->ic_wme.wme_update = ath_wme_update;
+	ic->ic_uapsd_flush = ath_uapsd_flush;
+
+	/* XXX not right but it's not used anywhere important */
+	ic->ic_phytype = IEEE80211_T_OFDM;
+	ic->ic_opmode = IEEE80211_M_STA;
+	sc->sc_opmode = HAL_M_STA;
+	/* 
+	 * Set the Atheros Advanced Capabilities from station config before 
+	 * starting 802.11 state machine.  Currently, set only fast-frames 
+	 * capability.
+	 */
+	ic->ic_ath_cap = 0;
+	sc->sc_fftxqmin = ATH_FF_TXQMIN;
+#ifdef ATH_SUPERG_FF
+	ic->ic_ath_cap |= (ath_hal_fastframesupported(ah) ? IEEE80211_ATHC_FF : 0);
+#endif
+	ic->ic_ath_cap |= (ath_hal_burstsupported(ah) ? IEEE80211_ATHC_BURST : 0);
+
+#ifdef ATH_SUPERG_COMP
+	ic->ic_ath_cap |= (ath_hal_compressionsupported(ah) ? IEEE80211_ATHC_COMP : 0); 
+#endif
+
+#ifdef ATH_SUPERG_DYNTURBO
+	ic->ic_ath_cap |= (ath_hal_turboagsupported(ah) ? (IEEE80211_ATHC_TURBOP |
+							IEEE80211_ATHC_AR) : 0);
+#endif
+#ifdef ATH_SUPERG_XR
+	ic->ic_ath_cap |= (ath_hal_xrsupported(ah) ? IEEE80211_ATHC_XR : 0);
+#endif
+
+	ic->ic_caps |=
+		  IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
+		| IEEE80211_C_HOSTAP		/* hostap mode */
+		| IEEE80211_C_MONITOR		/* monitor mode */
+		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
+		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
+		| IEEE80211_C_SHSLOT		/* short slot time supported */
+		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
+		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
+		;
+	/*
+	 * Query the HAL to figure out h/w crypto support.
+	 */
+	if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
+		ic->ic_caps |= IEEE80211_C_WEP;
+	if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
+		ic->ic_caps |= IEEE80211_C_AES;
+	if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
+		ic->ic_caps |= IEEE80211_C_AES_CCM;
+	if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
+		ic->ic_caps |= IEEE80211_C_CKIP;
+	if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
+		ic->ic_caps |= IEEE80211_C_TKIP;
+		/*
+		 * Check if h/w does the MIC and/or whether the
+		 * separate key cache entries are required to
+		 * handle both tx+rx MIC keys.
+		 */
+		if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) {
+			ic->ic_caps |= IEEE80211_C_TKIPMIC;
+			/*
+			 * Check if h/w does MIC correctly when
+			 * WMM is turned on.
+			 */
+			if (ath_hal_wmetkipmic(ah))
+				ic->ic_caps |= IEEE80211_C_WME_TKIPMIC;
+		}
+
+		/*
+		 * If the h/w supports storing tx+rx MIC keys
+		 * in one cache slot automatically enable use.
+		 */
+		if (ath_hal_hastkipsplit(ah) ||
+		    !ath_hal_settkipsplit(ah, AH_FALSE))
+			sc->sc_splitmic = 1;
+	}
+	sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
+#if 0
+	sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
+#endif
+	/*
+	 * Mark key cache slots associated with global keys
+	 * as in use.  If we knew TKIP was not to be used we
+	 * could leave the +32, +64, and +32+64 slots free.
+	 */
+	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
+		setbit(sc->sc_keymap, i);
+		setbit(sc->sc_keymap, i+64);
+		if (sc->sc_splitmic) {
+			setbit(sc->sc_keymap, i+32);
+			setbit(sc->sc_keymap, i+32+64);
+		}
+	}
+	/*
+	 * TPC support can be done either with a global cap or
+	 * per-packet support.  The latter is not available on
+	 * all parts.  We're a bit pedantic here as all parts
+	 * support a global cap.
+	 */
+	sc->sc_hastpc = ath_hal_hastpc(ah);
+	if (sc->sc_hastpc || ath_hal_hastxpowlimit(ah))
+		ic->ic_caps |= IEEE80211_C_TXPMGT;
+
+	/*
+	 * Default 11.h to start enabled.
+	 */
+	ic->ic_flags |= IEEE80211_F_DOTH;
+	
+	/*
+	 * Check for misc other capabilities.
+	 */
+	if (ath_hal_hasbursting(ah))
+		ic->ic_caps |= IEEE80211_C_BURST;
+	sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
+	sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
+	/*
+	 * Indicate we need the 802.11 header padded to a
+	 * 32-bit boundary for 4-address and QoS frames.
+	 */
+	ic->ic_flags |= IEEE80211_F_DATAPAD;
+
+	/*
+	 * Query the HAL about antenna support
+	 * Enable rx fast diversity if HAL has support
+	 */
+	if (ath_hal_hasdiversity(ah)) {
+		sc->sc_hasdiversity = 1;
+		ath_hal_setdiversity(ah, AH_TRUE);
+		sc->sc_diversity = 1;
+	} else {
+		sc->sc_hasdiversity = 0;
+		sc->sc_diversity = 0;
+		ath_hal_setdiversity(ah, AH_FALSE);
+	}
+	sc->sc_defant = ath_hal_getdefantenna(ah);
+
+	/*
+	 * Not all chips have the VEOL support we want to
+	 * use with IBSS beacons; check here for it.
+	 */
+	sc->sc_hasveol = ath_hal_hasveol(ah);
+
+	/* get mac address from hardware */
+	ath_hal_getmac(ah, ic->ic_myaddr);
+	if (sc->sc_hasbmask) {
+		ath_hal_getbssidmask(ah, sc->sc_bssidmask);
+		ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
+		ath_hal_setbssidmask(ah, sc->sc_bssidmask);
+	}
+	IEEE80211_ADDR_COPY(dev->dev_addr, ic->ic_myaddr);
+
+	/* call MI attach routine. */
+	ieee80211_ifattach(ic);
+	/* override default methods */
+	ic->ic_node_alloc = ath_node_alloc;
+	sc->sc_node_free = ic->ic_node_free;
+	ic->ic_node_free = ath_node_free;
+	ic->ic_node_getrssi = ath_node_getrssi;
+#ifdef ATH_SUPERG_XR
+	ic->ic_node_move_data = ath_node_move_data;
+#endif
+	sc->sc_node_cleanup = ic->ic_node_cleanup;
+	ic->ic_node_cleanup = ath_node_cleanup;
+	sc->sc_recv_mgmt = ic->ic_recv_mgmt;
+	ic->ic_recv_mgmt = ath_recv_mgmt;
+
+	ic->ic_vap_create = ath_vap_create;
+	ic->ic_vap_delete = ath_vap_delete;
+
+	ic->ic_scan_start = ath_scan_start;
+	ic->ic_scan_end = ath_scan_end;
+	ic->ic_set_channel = ath_set_channel;
+
+	ic->ic_set_coverageclass = ath_set_coverageclass;
+	ic->ic_mhz2ieee = ath_mhz2ieee;
+
+	if (register_netdev(dev)) {
+		printk(KERN_ERR "%s: unable to register device\n", dev->name);
+		goto bad3;
+	}
+	/*
+	 * Attach dynamic MIB vars and announce support
+	 * now that we have a device name with unit number.
+	 */
+	ath_dynamic_sysctl_register(sc);
+	ieee80211_announce(ic);
+	ath_announce(dev);
+#ifdef ATH_TX99_DIAG
+	printk("%s: TX99 support enabled\n", dev->name);
+#endif
+	sc->sc_invalid = 0;
+
+	if (autocreate) {
+		if (!strcmp(autocreate, "none"))
+			autocreatemode = -1;
+		else if (!strcmp(autocreate, "sta"))
+			autocreatemode = IEEE80211_M_STA;
+		else if (!strcmp(autocreate, "ap"))
+			autocreatemode = IEEE80211_M_HOSTAP;
+		else if (!strcmp(autocreate, "adhoc"))
+			autocreatemode = IEEE80211_M_IBSS;
+		else if (!strcmp(autocreate, "ahdemo"))
+			autocreatemode = IEEE80211_M_AHDEMO;
+		else if (!strcmp(autocreate, "wds"))
+			autocreatemode = IEEE80211_M_WDS;
+		else if (!strcmp(autocreate, "monitor"))
+			autocreatemode = IEEE80211_M_MONITOR;
+		else {
+			printk(KERN_INFO "Unknown autocreate mode: %s\n",
+				autocreate);
+			autocreatemode = -1;
+		}
+	}
+	
+	if (autocreatemode != -1) {
+		rtnl_lock();
+		error = ieee80211_create_vap(ic, "ath%d", dev,
+				autocreatemode, IEEE80211_CLONE_BSSID);
+		rtnl_unlock();
+		if (error)
+			printk(KERN_ERR "%s: autocreation of VAP failed: %d\n",
+				dev->name, error);
+	}
+
+	return 0;
+bad3:
+	ieee80211_ifdetach(ic);
+	ieee80211_rate_detach(sc->sc_rc);
+bad2:
+	ath_tx_cleanup(sc);
+	ath_desc_free(sc);
+bad:
+	if (ah)
+		ath_hal_detach(ah);
+	ATH_TXBUF_LOCK_DESTROY(sc);
+	ATH_LOCK_DESTROY(sc);
+	sc->sc_invalid = 1;
+
+	return error;
+}
+
+int
+ath_detach(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+
+	HAL_INT tmp;
+	DPRINTF(sc, ATH_DEBUG_ANY, "%s: flags %x\n", __func__, dev->flags);
+	ath_stop(dev);
+
+	ath_hal_setpower(sc->sc_ah, HAL_PM_AWAKE);
+	/* Flush the radar task if it's scheduled */
+	if (sc->sc_rtasksched == 1)
+		flush_scheduled_work();
+
+	sc->sc_invalid = 1;
+
+	/*
+	 * NB: the order of these is important:
+	 * o call the 802.11 layer before detaching the HAL to
+	 *   ensure callbacks into the driver to delete global
+	 *   key cache entries can be handled
+	 * o reclaim the tx queue data structures after calling
+	 *   the 802.11 layer as we'll get called back to reclaim
+	 *   node state and potentially want to use them
+	 * o to cleanup the tx queues the HAL is called, so detach
+	 *   it last
+	 * Other than that, it's straightforward...
+	 */
+	ieee80211_ifdetach(&sc->sc_ic);
+
+	ath_hal_intrset(ah, 0);		/* disable further intr's */
+	ath_hal_getisr(ah, &tmp);	/* clear ISR */
+	if(dev->irq) {
+		free_irq(dev->irq, dev);
+		dev->irq = 0;
+	}
+#ifdef ATH_TX99_DIAG
+	if (sc->sc_tx99 != NULL)
+		sc->sc_tx99->detach(sc->sc_tx99);
+#endif
+	ieee80211_rate_detach(sc->sc_rc);
+	ath_desc_free(sc);
+	ath_tx_cleanup(sc);
+	ath_hal_detach(ah);
+
+	ath_dynamic_sysctl_unregister(sc);
+	ATH_LOCK_DESTROY(sc);
+	dev->stop = NULL; /* prevent calling ath_stop again */
+	unregister_netdev(dev);
+	return 0;
+}
+
+static struct ieee80211vap *
+ath_vap_create(struct ieee80211com *ic, const char *name, int unit,
+	int opmode, int flags, struct net_device *mdev)
+{
+	struct ath_softc *sc = ic->ic_dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	struct net_device *dev;
+	struct ath_vap *avp;
+	struct ieee80211vap *vap;
+	int ic_opmode;
+
+	if (ic->ic_dev->flags & IFF_RUNNING) {
+		/* needs to disable hardware too */
+		ath_hal_intrset(ah, 0);		/* disable interrupts */
+		ath_draintxq(sc);		/* stop xmit side */
+		ath_stoprecv(sc);		/* stop recv side */
+	}
+	/* XXX ic unlocked and race against add */
+	switch (opmode) {
+	case IEEE80211_M_STA:	/* ap+sta for repeater application */
+		if (sc->sc_nstavaps != 0)  /* only one sta regardless */
+			return NULL;
+		if ((sc->sc_nvaps != 0) && (!(flags & IEEE80211_NO_STABEACONS)))
+			return NULL;   /* If using station beacons, must first up */
+		if (flags & IEEE80211_NO_STABEACONS) {
+			sc->sc_nostabeacons = 1;
+			ic_opmode = IEEE80211_M_HOSTAP;	/* Run with chip in AP mode */
+		} else 
+			ic_opmode = opmode;
+		break;
+	case IEEE80211_M_IBSS:
+		if (sc->sc_nvaps != 0)		/* only one */
+			return NULL;
+		ic_opmode = opmode;
+		break;
+	case IEEE80211_M_AHDEMO:
+	case IEEE80211_M_MONITOR:
+		if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
+			/* preserve existing mode */
+			ic_opmode = ic->ic_opmode;
+		} else
+			ic_opmode = opmode;
+		break;
+	case IEEE80211_M_HOSTAP:
+	case IEEE80211_M_WDS:
+		/* permit multiple ap's and/or wds links */
+		/* XXX sta+ap for repeater/bridge application */
+		if ((sc->sc_nvaps != 0) && (ic->ic_opmode == IEEE80211_M_STA))
+			return NULL;
+		/* XXX not right, beacon buffer is allocated on RUN trans */
+		if (opmode == IEEE80211_M_HOSTAP && STAILQ_EMPTY(&sc->sc_bbuf))
+			return NULL;
+		/*
+		 * XXX Not sure if this is correct when operating only
+		 * with WDS links.
+		 */
+		ic_opmode = IEEE80211_M_HOSTAP;
+
+		break;
+	default:
+		return NULL;
+	}
+
+	if (sc->sc_nvaps >= ATH_BCBUF) {
+		printk(KERN_WARNING "too many virtual ap's (already got %d)\n", sc->sc_nvaps);
+		return NULL;
+	}
+
+	dev = alloc_etherdev(sizeof(struct ath_vap) + sc->sc_rc->arc_vap_space);
+	if (dev == NULL) {
+		/* XXX msg */
+		return NULL;
+	}
+	
+	avp = dev->priv;
+	ieee80211_vap_setup(ic, dev, name, unit, opmode, flags);
+	/* override with driver methods */
+	vap = &avp->av_vap;
+	avp->av_newstate = vap->iv_newstate;
+	vap->iv_newstate = ath_newstate;
+	vap->iv_key_alloc = ath_key_alloc;
+	vap->iv_key_delete = ath_key_delete;
+	vap->iv_key_set = ath_key_set;
+	vap->iv_key_update_begin = ath_key_update_begin;
+	vap->iv_key_update_end = ath_key_update_end;
+#ifdef ATH_SUPERG_COMP
+	vap->iv_comp_set = ath_comp_set;
+#endif
+
+	/* Let rate control register proc entries for the VAP */
+	if (sc->sc_rc->ops->dynamic_proc_register)
+		sc->sc_rc->ops->dynamic_proc_register(vap);
+
+	/*
+	 * Change the interface type for monitor mode.
+	 */
+	if (opmode == IEEE80211_M_MONITOR)
+		dev->type = ARPHRD_IEEE80211_PRISM;
+	if ((flags & IEEE80211_CLONE_BSSID) &&
+	    sc->sc_nvaps != 0 && opmode != IEEE80211_M_WDS && sc->sc_hasbmask) {
+		struct ieee80211vap *v;
+		int id_mask, id;
+		
+		/*
+		 * Hardware supports the bssid mask and a unique
+		 * bssid was requested.  Assign a new mac address
+		 * and expand our bssid mask to cover the active
+		 * virtual ap's with distinct addresses.
+		 */
+		
+		/* do a full search to mark all the allocated VAPs */
+		id_mask = 0;
+		TAILQ_FOREACH(v, &ic->ic_vaps, iv_next)
+			id_mask |= (1 << ATH_GET_VAP_ID(v->iv_myaddr));
+		
+		for (id = 0; id < ATH_BCBUF; id++) {
+			/* get the first available slot */
+			if ((id_mask & (1 << id)) == 0) {
+				ATH_SET_VAP_BSSID(vap->iv_myaddr, id);
+				break;
+			}
+		}
+	}
+	avp->av_bslot = -1;
+	STAILQ_INIT(&avp->av_mcastq.axq_q);
+	ATH_TXQ_LOCK_INIT(&avp->av_mcastq);
+	if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_IBSS) {
+		/*
+		 * Allocate beacon state for hostap/ibss.  We know
+		 * a buffer is available because of the check above.
+		 */
+		avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf);
+		STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list);
+		if (opmode == IEEE80211_M_HOSTAP || !sc->sc_hasveol) {
+			int slot;
+			/*
+			 * Assign the VAP to a beacon xmit slot.  As
+			 * above, this cannot fail to find one.
+			 */
+			avp->av_bslot = 0;
+			for (slot = 0; slot < ATH_BCBUF; slot++)
+				if (sc->sc_bslot[slot] == NULL) {
+					/*
+					 * XXX hack, space out slots to better
+					 * deal with misses
+					 */
+					if (slot + 1 < ATH_BCBUF &&
+					    sc->sc_bslot[slot+1] == NULL) {
+						avp->av_bslot = slot + 1;
+						break;
+					}
+					avp->av_bslot = slot;
+					/* NB: keep looking for a double slot */
+				}
+			KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
+				("beacon slot %u not empty?", avp->av_bslot));
+			sc->sc_bslot[avp->av_bslot] = vap;
+			sc->sc_nbcnvaps++;
+		}
+		if ((opmode == IEEE80211_M_HOSTAP) && (sc->sc_hastsfadd)) {
+			/*
+			 * Multiple VAPs are to transmit beacons and we
+			 * have h/w support for TSF adjusting; enable use
+			 * of staggered beacons.
+			 */
+			/* XXX check for beacon interval too small */
+			sc->sc_stagbeacons = 1;
+		}
+	}
+	if (sc->sc_hastsfadd)
+		ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
+	SET_NETDEV_DEV(dev, ATH_GET_NETDEV_DEV(mdev));
+	/* complete setup */
+	(void) ieee80211_vap_attach(vap,
+		ieee80211_media_change, ieee80211_media_status);
+
+	ic->ic_opmode = ic_opmode;
+	
+	if (opmode != IEEE80211_M_WDS)
+		sc->sc_nvaps++;
+		
+	if (opmode == IEEE80211_M_STA)
+		sc->sc_nstavaps++;
+	else if (opmode == IEEE80211_M_MONITOR)
+		sc->sc_nmonvaps++;
+	/*
+	 * Adhoc demo mode is a pseudo mode; to the HAL it's
+	 * just ibss mode and the driver doesn't use management
+	 * frames.  Other modes carry over directly to the HAL.
+	 */
+	if (ic->ic_opmode == IEEE80211_M_AHDEMO)
+		sc->sc_opmode = HAL_M_IBSS;
+	else
+		sc->sc_opmode = (HAL_OPMODE) ic->ic_opmode;	/* NB: compatible */
+
+#ifdef ATH_SUPERG_XR
+	if ( vap->iv_flags & IEEE80211_F_XR ) {
+		if (ath_descdma_setup(sc, &sc->sc_grppolldma, &sc->sc_grppollbuf,
+			"grppoll", (sc->sc_xrpollcount+1) * HAL_ANTENNA_MAX_MODE, 1) != 0)
+			printk("%s:grppoll Buf allocation failed \n",__func__);
+		if (!sc->sc_xrtxq)
+			sc->sc_xrtxq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, HAL_XR_DATA);
+		if (sc->sc_hasdiversity) {
+			/* Save current diversity state if user destroys XR VAP */
+			sc->sc_olddiversity = sc->sc_diversity;
+			ath_hal_setdiversity(sc->sc_ah, 0);
+			sc->sc_diversity = 0;
+		}
+	}
+#endif
+	if (ic->ic_dev->flags & IFF_RUNNING) {
+		/* restart hardware */
+		if (ath_startrecv(sc) != 0)	/* restart recv */
+			printk("%s: %s: unable to start recv logic\n",
+				dev->name, __func__);
+		if (sc->sc_beacons)
+			ath_beacon_config(sc, NULL);	/* restart beacons */
+		ath_hal_intrset(ah, sc->sc_imask);
+	}
+
+	return vap;
+}
+
+static void
+ath_vap_delete(struct ieee80211vap *vap)
+{
+	struct net_device *dev = vap->iv_ic->ic_dev;
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ath_vap *avp = ATH_VAP(vap);
+	int decrease = 1;
+	int i;
+	KASSERT(vap->iv_state == IEEE80211_S_INIT, ("VAP not stopped"));
+
+	if (dev->flags & IFF_RUNNING) {
+		/*
+		 * Quiesce the hardware while we remove the VAP.  In
+		 * particular we need to reclaim all references to the
+		 * VAP state by any frames pending on the tx queues.
+		 *
+		 * XXX can we do this w/o affecting other VAPs?
+		 */
+		ath_hal_intrset(ah, 0);		/* disable interrupts */
+		ath_draintxq(sc);		/* stop xmit side */
+		ath_stoprecv(sc);		/* stop recv side */
+	}
+
+	/*
+	 * Reclaim any pending mcast bufs on the VAP.
+	 */
+	ath_tx_draintxq(sc, &avp->av_mcastq);
+	ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
+
+	/*
+	 * Reclaim beacon state.  Note this must be done before
+	 * VAP instance is reclaimed as we may have a reference
+	 * to it in the buffer for the beacon frame.
+	 */
+	if (avp->av_bcbuf != NULL) {
+		if (avp->av_bslot != -1) {
+			sc->sc_bslot[avp->av_bslot] = NULL;
+			sc->sc_nbcnvaps--;
+		}
+		ath_beacon_return(sc, avp->av_bcbuf);
+		avp->av_bcbuf = NULL;
+		if (sc->sc_nbcnvaps == 0)
+			sc->sc_stagbeacons = 0;
+	}
+	if (vap->iv_opmode == IEEE80211_M_STA) {
+		sc->sc_nstavaps--;
+		if (sc->sc_nostabeacons)
+			sc->sc_nostabeacons = 0;
+	} else if (vap->iv_opmode == IEEE80211_M_MONITOR) {
+		sc->sc_nmonvaps--;
+	} else if (vap->iv_opmode == IEEE80211_M_WDS) {
+		decrease = 0;
+	}
+	ieee80211_vap_detach(vap);
+	/* NB: memory is reclaimed through dev->destructor callback */
+	if (decrease)
+		sc->sc_nvaps--;
+
+#ifdef ATH_SUPERG_XR 
+	/*
+	 * If it's an XR VAP, free the memory allocated explicitly.
+	 * Since the XR VAP is not registered, OS cannot free the memory.
+	 */
+	if (vap->iv_flags & IEEE80211_F_XR) {
+		ath_grppoll_stop(vap);
+		ath_descdma_cleanup(sc, &sc->sc_grppolldma, &sc->sc_grppollbuf, BUS_DMA_FROMDEVICE);
+		memset(&sc->sc_grppollbuf, 0, sizeof(sc->sc_grppollbuf));
+		memset(&sc->sc_grppolldma, 0, sizeof(sc->sc_grppolldma));
+		if (vap->iv_xrvap)
+			vap->iv_xrvap->iv_xrvap = NULL;
+		kfree(vap->iv_dev);
+		ath_tx_cleanupq(sc,sc->sc_xrtxq);
+		sc->sc_xrtxq = NULL;
+		if (sc->sc_hasdiversity) {
+			/* Restore diversity setting to old diversity setting */
+			ath_hal_setdiversity(ah, sc->sc_olddiversity);
+			sc->sc_diversity = sc->sc_olddiversity;
+		}
+	}
+#endif
+
+	for (i = 0; i < IEEE80211_APPIE_NUM_OF_FRAME; i++) {
+		if (vap->app_ie[i].ie != NULL) {
+			FREE(vap->app_ie[i].ie, M_DEVBUF);
+			vap->app_ie[i].ie = NULL;
+			vap->app_ie[i].length = 0;
+		}
+	}
+
+	if (dev->flags & IFF_RUNNING) {
+		/*
+		 * Restart rx+tx machines if device is still running.
+		 */
+		if (ath_startrecv(sc) != 0)	/* restart recv */
+			printk("%s: %s: unable to start recv logic\n",
+				dev->name, __func__);
+		if (sc->sc_beacons)
+			ath_beacon_config(sc, NULL);	/* restart beacons */
+		ath_hal_intrset(ah, sc->sc_imask);
+	}
+}
+
+void
+ath_suspend(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+
+	DPRINTF(sc, ATH_DEBUG_ANY, "%s: flags %x\n", __func__, dev->flags);
+	ath_stop(dev);
+}
+
+void
+ath_resume(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+
+	DPRINTF(sc, ATH_DEBUG_ANY, "%s: flags %x\n", __func__, dev->flags);
+	ath_init(dev);
+}
+
+static void
+ath_uapsd_processtriggers(struct ath_softc *sc)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	struct ath_buf *bf;
+	struct ath_desc *ds;
+	struct sk_buff *skb;
+	struct ieee80211_node *ni;
+	struct ath_node *an;
+	struct ieee80211_qosframe *qwh;
+	struct ath_txq *uapsd_xmit_q = sc->sc_uapsdq;
+	struct ieee80211com *ic = &sc->sc_ic;
+	int ac, retval;
+	u_int8_t tid;
+	u_int16_t frame_seq;
+	u_int64_t tsf;
+#define	PA2DESC(_sc, _pa) \
+	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
+		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
+
+	/* XXXAPSD: build in check against max triggers we could see
+	 *          based on ic->ic_uapsdmaxtriggers.
+	 */
+
+	tsf = ath_hal_gettsf64(ah);
+	ATH_RXBUF_LOCK(sc);
+	if (sc->sc_rxbufcur == NULL)
+		sc->sc_rxbufcur = STAILQ_FIRST(&sc->sc_rxbuf);
+	for (bf = sc->sc_rxbufcur; bf; bf = STAILQ_NEXT(bf, bf_list)) {
+		ds = bf->bf_desc;
+		if (ds->ds_link == bf->bf_daddr) {
+			/* NB: never process the self-linked entry at the end */
+			break;
+		}
+		if (bf->bf_status & ATH_BUFSTATUS_DONE) {
+			/* 
+			 * already processed this buffer (shouldn't occur if
+			 * we change code to always process descriptors in
+			 * rx intr handler - as opposed to sometimes processing
+			 * in the rx tasklet).
+			 */
+			continue;
+		}
+		skb = bf->bf_skb;
+		if (skb == NULL) {		/* XXX ??? can this happen */
+			printk("%s: no skbuff\n", __func__);
+			continue;
+		}
+
+		/*
+		 * XXXAPSD: consider new HAL call that does only the subset
+		 *          of ath_hal_rxprocdesc we require for trigger search.
+		 */
+
+		/* 
+		 * NB: descriptor memory doesn't need to be sync'd
+		 *     due to the way it was allocated. 
+		 */
+
+		/*
+		 * Must provide the virtual address of the current
+		 * descriptor, the physical address, and the virtual
+		 * address of the next descriptor in the h/w chain.
+		 * This allows the HAL to look ahead to see if the
+		 * hardware is done with a descriptor by checking the
+		 * done bit in the following descriptor and the address
+		 * of the current descriptor the DMA engine is working
+		 * on.  All this is necessary because of our use of
+		 * a self-linked list to avoid rx overruns.
+		 */
+		retval = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, PA2DESC(sc, ds->ds_link), tsf);
+		if (HAL_EINPROGRESS == retval)
+			break;
+
+		/* XXX: we do not support frames spanning multiple descriptors */
+		bf->bf_status |= ATH_BUFSTATUS_DONE;
+
+		/* errors? */
+		if (ds->ds_rxstat.rs_status)
+			continue;
+
+		/* prepare wireless header for examination */
+		bus_dma_sync_single(sc->sc_bdev, bf->bf_skbaddr, 
+							sizeof(struct ieee80211_qosframe), 
+							BUS_DMA_FROMDEVICE);
+		qwh = (struct ieee80211_qosframe *) skb->data;
+
+		/* find the node. it MUST be in the keycache. */
+		if (ds->ds_rxstat.rs_keyix == HAL_RXKEYIX_INVALID ||
+		    (ni = sc->sc_keyixmap[ds->ds_rxstat.rs_keyix]) == NULL) {
+			/* 
+			 * XXX: this can occur if WEP mode is used for non-Atheros clients
+			 *      (since we do not know which of the 4 WEP keys will be used
+			 *      at association time, so cannot setup a key-cache entry.
+			 *      The Atheros client can convey this in the Atheros IE.)
+			 *
+			 * TODO: The fix is to use the hash lookup on the node here.
+			 */
+#if 0
+			/*
+			 * This print is very chatty, so removing for now.
+			 */
+			DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: U-APSD node (%s) has invalid keycache entry\n",
+				__func__, ether_sprintf(qwh->i_addr2));
+#endif
+			continue;
+		}
+		
+		if (!(ni->ni_flags & IEEE80211_NODE_UAPSD))
+			continue;
+		
+		/*
+		 * Must deal with change of state here, since otherwise there would
+		 * be a race (on two quick frames from STA) between this code and the
+		 * tasklet where we would:
+		 *   - miss a trigger on entry to PS if we're already trigger hunting
+		 *   - generate spurious SP on exit (due to frame following exit frame)
+		 */
+		if (((qwh->i_fc[1] & IEEE80211_FC1_PWR_MGT) ^
+		     (ni->ni_flags & IEEE80211_NODE_PWR_MGT))) {
+			/*
+			 * NB: do not require lock here since this runs at intr
+			 * "proper" time and cannot be interrupted by rx tasklet
+			 * (code there has lock). May want to place a macro here
+			 * (that does nothing) to make this more clear.
+			 */
+			ni->ni_flags |= IEEE80211_NODE_PS_CHANGED;
+			ni->ni_pschangeseq = *(__le16 *)(&qwh->i_seq[0]);
+			ni->ni_flags &= ~IEEE80211_NODE_UAPSD_SP;
+			ni->ni_flags ^= IEEE80211_NODE_PWR_MGT;
+			if (qwh->i_fc[1] & IEEE80211_FC1_PWR_MGT) {
+				ni->ni_flags |= IEEE80211_NODE_UAPSD_TRIG;
+				ic->ic_uapsdmaxtriggers++;
+				WME_UAPSD_NODE_TRIGSEQINIT(ni);
+				DPRINTF(sc, ATH_DEBUG_UAPSD,
+					"%s: Node (%s) became U-APSD triggerable (%d)\n", 
+					__func__, ether_sprintf(qwh->i_addr2),
+					ic->ic_uapsdmaxtriggers);
+			} else {
+				ni->ni_flags &= ~IEEE80211_NODE_UAPSD_TRIG;
+				ic->ic_uapsdmaxtriggers--;
+				DPRINTF(sc, ATH_DEBUG_UAPSD,
+					"%s: Node (%s) no longer U-APSD triggerable (%d)\n", 
+					__func__, ether_sprintf(qwh->i_addr2),
+					ic->ic_uapsdmaxtriggers);
+				/* 
+				 * XXX: rapidly thrashing sta could get 
+				 * out-of-order frames due this flush placing
+				 * frames on backlogged regular AC queue and
+				 * re-entry to PS having fresh arrivals onto
+				 * faster UPSD delivery queue. if this is a
+				 * big problem we may need to drop these.
+				 */
+				ath_uapsd_flush(ni);
+			}
+			
+			continue;
+		}
+
+		if (ic->ic_uapsdmaxtriggers == 0)
+			continue;
+		
+		/* make sure the frame is QoS data/null */
+		/* NB: with current sub-type definitions, the 
+		 * IEEE80211_FC0_SUBTYPE_QOS check, below, covers the 
+		 * QoS null case too.
+		 */
+		if (((qwh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA) ||
+		     !(qwh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS))
+			continue;
+		
+		/*
+		 * To be a trigger:
+		 *   - node is in triggerable state
+		 *   - QoS data/null frame with triggerable AC
+		 */
+		tid = qwh->i_qos[0] & IEEE80211_QOS_TID;
+		ac = TID_TO_WME_AC(tid);
+		if (!WME_UAPSD_AC_CAN_TRIGGER(ac, ni))
+			continue;
+		
+		DPRINTF(sc, ATH_DEBUG_UAPSD, 
+			"%s: U-APSD trigger detected for node (%s) on AC %d\n",
+			__func__, ether_sprintf(ni->ni_macaddr), ac);
+		if (ni->ni_flags & IEEE80211_NODE_UAPSD_SP) {
+			/* have trigger, but SP in progress, so ignore */
+			DPRINTF(sc, ATH_DEBUG_UAPSD,
+				"%s:   SP already in progress - ignoring\n",
+				__func__);
+			continue;
+		}
+
+		/*
+		 * Detect duplicate triggers and drop if so.
+		 */
+		frame_seq = le16toh(*(__le16 *)qwh->i_seq);
+		if ((qwh->i_fc[1] & IEEE80211_FC1_RETRY) &&
+		    frame_seq == ni->ni_uapsd_trigseq[ac]) {
+			DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: dropped dup trigger, ac %d, seq %d\n",
+				__func__, ac, frame_seq);
+			continue;
+		}
+
+		an = ATH_NODE(ni);
+
+		/* start the SP */
+		ATH_NODE_UAPSD_LOCK(an);
+		ni->ni_stats.ns_uapsd_triggers++;
+		ni->ni_flags |= IEEE80211_NODE_UAPSD_SP;
+		ni->ni_uapsd_trigseq[ac] = frame_seq;
+		ATH_NODE_UAPSD_UNLOCK(an);
+
+		ATH_TXQ_LOCK(uapsd_xmit_q);
+		if (STAILQ_EMPTY(&an->an_uapsd_q)) {
+			DPRINTF(sc, ATH_DEBUG_UAPSD,
+				"%s: Queue empty, generating QoS NULL to send\n",
+				__func__);
+			/* 
+			 * Empty queue, so need to send QoS null on this ac. Make a
+			 * call that will dump a QoS null onto the node's queue, then
+			 * we can proceed as normal.
+			 */
+			ieee80211_send_qosnulldata(ni, ac);
+		}
+
+		if (STAILQ_FIRST(&an->an_uapsd_q)) {
+			struct ath_buf *last_buf = STAILQ_LAST(&an->an_uapsd_q, ath_buf, bf_list);
+			struct ath_desc *last_desc = last_buf->bf_desc;
+			struct ieee80211_qosframe *qwhl = (struct ieee80211_qosframe *)last_buf->bf_skb->data;
+			/* 
+			 * NB: flip the bit to cause intr on the EOSP desc,
+			 * which is the last one
+			 */
+			ath_hal_txreqintrdesc(sc->sc_ah, last_desc);
+			qwhl->i_qos[0] |= IEEE80211_QOS_EOSP;
+
+			if (IEEE80211_VAP_EOSPDROP_ENABLED(ni->ni_vap)) {
+				/* simulate lost EOSP */
+				qwhl->i_addr1[0] |= 0x40;
+			}
+			
+			/* more data bit only for EOSP frame */
+			if (an->an_uapsd_overflowqdepth)
+				qwhl->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
+			else if (IEEE80211_NODE_UAPSD_USETIM(ni))
+				ni->ni_vap->iv_set_tim(ni, 0);
+
+			ni->ni_stats.ns_tx_uapsd += an->an_uapsd_qdepth;
+
+			bus_dma_sync_single(sc->sc_bdev, last_buf->bf_skbaddr,
+				sizeof(*qwhl), BUS_DMA_TODEVICE);
+			
+			if (uapsd_xmit_q->axq_link) {
+#ifdef AH_NEED_DESC_SWAP
+				*uapsd_xmit_q->axq_link = cpu_to_le32(STAILQ_FIRST(&an->an_uapsd_q)->bf_daddr);
+#else
+				*uapsd_xmit_q->axq_link = STAILQ_FIRST(&an->an_uapsd_q)->bf_daddr;
+#endif
+			}
+			/* below leaves an_uapsd_q NULL */
+			STAILQ_CONCAT(&uapsd_xmit_q->axq_q, &an->an_uapsd_q);
+			uapsd_xmit_q->axq_link = &last_desc->ds_link;
+			ath_hal_puttxbuf(sc->sc_ah, 
+				uapsd_xmit_q->axq_qnum, 
+				(STAILQ_FIRST(&uapsd_xmit_q->axq_q))->bf_daddr);
+			ath_hal_txstart(sc->sc_ah, uapsd_xmit_q->axq_qnum);
+		}
+		an->an_uapsd_qdepth = 0;
+
+		ATH_TXQ_UNLOCK(uapsd_xmit_q);
+	}
+	sc->sc_rxbufcur = bf;
+	ATH_RXBUF_UNLOCK(sc);
+#undef PA2DESC
+}
+
+/*
+ * Interrupt handler.  Most of the actual processing is deferred.
+ */
+irqreturn_t
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+ath_intr(int irq, void *dev_id)
+#else
+ath_intr(int irq, void *dev_id, struct pt_regs *regs)
+#endif
+{
+	struct net_device *dev = dev_id;
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	HAL_INT status;
+	int needmark;
+
+	if (sc->sc_invalid) {
+		/*
+		 * The hardware is not ready/present, don't touch anything.
+		 * Note this can happen early on if the IRQ is shared.
+		 */
+		return IRQ_NONE;
+	}
+	if (!ath_hal_intrpend(ah))		/* shared irq, not for us */
+		return IRQ_NONE;
+	if ((dev->flags & (IFF_RUNNING | IFF_UP)) != (IFF_RUNNING | IFF_UP)) {
+		DPRINTF(sc, ATH_DEBUG_INTR, "%s: flags 0x%x\n",
+			__func__, dev->flags);
+		ath_hal_getisr(ah, &status);	/* clear ISR */
+		ath_hal_intrset(ah, 0);		/* disable further intr's */
+		return IRQ_HANDLED;
+	}
+	needmark = 0;
+	/*
+	 * Figure out the reason(s) for the interrupt.  Note
+	 * that the HAL returns a pseudo-ISR that may include
+	 * bits we haven't explicitly enabled so we mask the
+	 * value to ensure we only process bits we requested.
+	 */
+	ath_hal_getisr(ah, &status);		/* NB: clears ISR too */
+	DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
+	status &= sc->sc_imask;			/* discard unasked for bits */
+	if (status & HAL_INT_FATAL) {
+		sc->sc_stats.ast_hardware++;
+		ath_hal_intrset(ah, 0);		/* disable intr's until reset */
+		ATH_SCHEDULE_TQUEUE(&sc->sc_fataltq, &needmark);
+	} else if (status & HAL_INT_RXORN) {
+		sc->sc_stats.ast_rxorn++;
+		ath_hal_intrset(ah, 0);		/* disable intr's until reset */
+		ATH_SCHEDULE_TQUEUE(&sc->sc_rxorntq, &needmark);
+	} else {
+		if (status & HAL_INT_SWBA) {
+			/*
+			 * Software beacon alert--time to send a beacon.
+			 * Handle beacon transmission directly; deferring
+			 * this is too slow to meet timing constraints
+			 * under load.
+			 */
+			ath_beacon_send(sc, &needmark);
+		}
+		if (status & HAL_INT_RXEOL) {
+			/*
+			 * NB: the hardware should re-read the link when
+			 *     RXE bit is written, but it doesn't work at
+			 *     least on older hardware revs.
+			 */
+			sc->sc_stats.ast_rxeol++;
+		}
+		if (status & HAL_INT_TXURN) {
+			sc->sc_stats.ast_txurn++;
+			/* bump tx trigger level */
+			ath_hal_updatetxtriglevel(ah, AH_TRUE);
+		}
+		if (status & HAL_INT_RX) {
+			ath_uapsd_processtriggers(sc);
+			/* Get the noise floor data in interrupt context as we can't get it
+			 * per frame, so we need to get it as soon as possible (i.e. the tasklet
+			 * might take too long to fire */
+			ath_hal_process_noisefloor(ah);
+			sc->sc_channoise = ath_hal_get_channel_noise(ah, &(sc->sc_curchan));
+			ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark);
+		}
+		if (status & HAL_INT_TX) {
+#ifdef ATH_SUPERG_DYNTURBO
+			/*
+			 * Check if the beacon queue caused the interrupt 
+			 * when a dynamic turbo switch
+			 * is pending so we can initiate the change. 
+			 * XXX must wait for all VAPs' beacons
+			 */
+
+			if (sc->sc_dturbo_switch) {
+				u_int32_t txqs = (1 << sc->sc_bhalq);
+				ath_hal_gettxintrtxqs(ah, &txqs);
+				if(txqs & (1 << sc->sc_bhalq)) {
+					sc->sc_dturbo_switch = 0;
+					/*
+					 * Hack: defer switch for 10ms to permit slow
+					 * clients time to track us.  This especially
+					 * noticeable with Windows clients.
+					 */
+					mod_timer(&sc->sc_dturbo_switch_mode,
+							  jiffies + msecs_to_jiffies(10));
+				}
+			} 
+#endif
+			ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark);
+		}
+		if (status & HAL_INT_BMISS) {
+			sc->sc_stats.ast_bmiss++;
+			ATH_SCHEDULE_TQUEUE(&sc->sc_bmisstq, &needmark);
+		}
+		if (status & HAL_INT_MIB) {
+			sc->sc_stats.ast_mib++;
+			/*
+			 * Disable interrupts until we service the MIB
+			 * interrupt; otherwise it will continue to fire.
+			 */
+			ath_hal_intrset(ah, 0);
+			/*
+			 * Let the HAL handle the event.  We assume it will
+			 * clear whatever condition caused the interrupt.
+			 */
+			ath_hal_mibevent(ah, &sc->sc_halstats);
+			ath_hal_intrset(ah, sc->sc_imask);
+		}
+	}
+	if (needmark)
+		mark_bh(IMMEDIATE_BH);
+	return IRQ_HANDLED;
+}
+
+static void
+ath_radar_task(struct work_struct *thr)
+{
+	struct ath_softc *sc = container_of(thr, struct ath_softc, sc_radartask);
+	struct ath_hal *ah = sc->sc_ah;
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ieee80211_channel ichan;
+	HAL_CHANNEL hchan;
+
+	sc->sc_rtasksched = 0;
+	if (ath_hal_procdfs(ah, &hchan)) {
+		/*
+		 * DFS was found, initiate channel change
+		 */
+		ichan.ic_ieee = ath_hal_mhz2ieee(ah, hchan.channel, hchan.channelFlags);
+		ichan.ic_freq = hchan.channel;
+		ichan.ic_flags = hchan.channelFlags;
+
+		if ((sc->sc_curchan.channel == hchan.channel) &&
+		    (sc->sc_curchan.channelFlags == hchan.channel)) {
+			if (hchan.privFlags & CHANNEL_INTERFERENCE)
+				sc->sc_curchan.privFlags |= CHANNEL_INTERFERENCE;
+		}
+		ieee80211_mark_dfs(ic, &ichan);
+		if (((ic->ic_flags_ext & IEEE80211_FEXT_MARKDFS) == 0) &&
+		    (ic->ic_opmode == IEEE80211_M_HOSTAP)) {
+			sc->sc_dfstest_ieeechan = ic->ic_curchan->ic_ieee;
+			sc->sc_dfstesttimer.function = ath_dfs_test_return;
+			sc->sc_dfstesttimer.expires = jiffies + (sc->sc_dfstesttime * HZ);
+			sc->sc_dfstesttimer.data = (unsigned long)sc;
+			if (sc->sc_dfstest == 0) {
+				sc->sc_dfstest = 1;
+				add_timer(&sc->sc_dfstesttimer);
+			}
+		}
+	}
+}
+
+static void
+ath_dfs_test_return(unsigned long data)
+{
+	struct ath_softc *sc = (struct ath_softc *)data; 
+	struct ieee80211com *ic = &sc->sc_ic;
+
+	sc->sc_dfstest = 0;
+	ieee80211_dfs_test_return(ic, sc->sc_dfstest_ieeechan);
+}
+
+static void
+ath_fatal_tasklet(TQUEUE_ARG data)
+{
+	struct net_device *dev = (struct net_device *)data;
+
+	printk("%s: hardware error; resetting\n", dev->name);
+	ath_reset(dev);
+}
+
+static void
+ath_rxorn_tasklet(TQUEUE_ARG data)
+{
+	struct net_device *dev = (struct net_device *)data;
+
+	printk("%s: rx FIFO overrun; resetting\n", dev->name);
+	ath_reset(dev);
+}
+
+static void
+ath_bmiss_tasklet(TQUEUE_ARG data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct ath_softc *sc = dev->priv;
+
+	if (time_before(jiffies, sc->sc_ic.ic_bmiss_guard)) {
+		/* Beacon miss interrupt occured too short after last beacon
+		 * timer configuration. Ignore it as it could be spurious. */
+		DPRINTF(sc, ATH_DEBUG_ANY, "%s: ignored\n", __func__);
+	} else {
+		DPRINTF(sc, ATH_DEBUG_ANY, "%s\n", __func__);
+		ieee80211_beacon_miss(&sc->sc_ic);
+	}
+}
+
+static u_int
+ath_chan2flags(struct ieee80211_channel *chan)
+{
+	u_int flags;
+	static const u_int modeflags[] = {
+		0,		/* IEEE80211_MODE_AUTO    */
+		CHANNEL_A,	/* IEEE80211_MODE_11A     */
+		CHANNEL_B,	/* IEEE80211_MODE_11B     */
+		CHANNEL_PUREG,	/* IEEE80211_MODE_11G     */
+		0,		/* IEEE80211_MODE_FH      */
+		CHANNEL_108A,	/* IEEE80211_MODE_TURBO_A */
+		CHANNEL_108G,	/* IEEE80211_MODE_TURBO_G */
+	};
+
+	flags = modeflags[ieee80211_chan2mode(chan)];
+
+	if (IEEE80211_IS_CHAN_HALF(chan))
+		flags |= CHANNEL_HALF;
+	else if (IEEE80211_IS_CHAN_QUARTER(chan))
+		flags |= CHANNEL_QUARTER;
+
+	return flags;
+}
+
+/*
+ * Context: process context
+ */
+
+static int
+ath_init(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ath_hal *ah = sc->sc_ah;
+	HAL_STATUS status;
+	int error = 0;
+
+	ATH_LOCK(sc);
+
+	DPRINTF(sc, ATH_DEBUG_RESET, "%s: mode %d\n", __func__, ic->ic_opmode);
+
+	/*
+	 * Stop anything previously setup.  This is safe
+	 * whether this is the first time through or not.
+	 */
+	ath_stop_locked(dev);
+
+#ifdef ATH_CAP_TPC
+	ath_hal_setcapability(sc->sc_ah, HAL_CAP_TPC, 0, 1, NULL);
+#endif
+
+	/* Whether we should enable h/w TKIP MIC */
+	if ((ic->ic_caps & IEEE80211_C_WME) == 0)
+		ath_hal_setcapability(sc->sc_ah, HAL_CAP_TKIP_MIC, 0, 0, NULL);
+	else {
+		if (((ic->ic_caps & IEEE80211_C_WME_TKIPMIC) == 0) &&
+		    (ic->ic_flags & IEEE80211_F_WME))
+			ath_hal_setcapability(sc->sc_ah, HAL_CAP_TKIP_MIC, 0, 0, NULL);
+		else
+			ath_hal_setcapability(sc->sc_ah, HAL_CAP_TKIP_MIC, 0, 1, NULL);
+	}
+		
+	/*
+	 * Flush the skb's allocated for receive in case the rx
+	 * buffer size changes.  This could be optimized but for
+	 * now we do it each time under the assumption it does
+	 * not happen often.
+	 */
+	ath_flushrecv(sc);
+
+	/*
+	 * The basic interface to setting the hardware in a good
+	 * state is ``reset''.  On return the hardware is known to
+	 * be powered up and with interrupts disabled.  This must
+	 * be followed by initialization of the appropriate bits
+	 * and then setup of the interrupt mask.
+	 */
+	sc->sc_curchan.channel = ic->ic_curchan->ic_freq;
+	sc->sc_curchan.channelFlags = ath_chan2flags(ic->ic_curchan);
+	if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_FALSE, &status)) {
+		printk("%s: unable to reset hardware: '%s' (HAL status %u) "
+			"(freq %u flags 0x%x)\n", dev->name,
+			ath_get_hal_status_desc(status), status,
+			sc->sc_curchan.channel, sc->sc_curchan.channelFlags);
+		error = -EIO;
+		goto done;
+	}
+
+	if (sc->sc_softled)
+		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin);
+	/*
+	 * This is needed only to setup initial state
+	 * but it's best done after a reset.
+	 */
+	ath_update_txpow(sc);
+
+	/* Set the default RX antenna; it may get lost on reset. */
+	ath_setdefantenna(sc, sc->sc_defant);
+
+	/*
+	 * Setup the hardware after reset: the key cache
+	 * is filled as needed and the receive engine is
+	 * set going.  Frame transmit is handled entirely
+	 * in the frame output path; there's nothing to do
+	 * here except setup the interrupt mask.
+	 */
+#if 0
+	ath_initkeytable(sc);		/* XXX still needed? */
+#endif
+	if (ath_startrecv(sc) != 0) {
+		printk("%s: unable to start recv logic\n", dev->name);
+		error = -EIO;
+		goto done;
+	}
+	/* Enable interrupts. */
+	sc->sc_imask = HAL_INT_RX | HAL_INT_TX
+		  | HAL_INT_RXEOL | HAL_INT_RXORN
+		  | HAL_INT_FATAL | HAL_INT_GLOBAL;
+	/*
+	 * Enable MIB interrupts when there are hardware phy counters.
+	 * Note we only do this (at the moment) for station mode.
+	 */
+	if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
+		sc->sc_imask |= HAL_INT_MIB;
+	ath_hal_intrset(ah, sc->sc_imask);
+
+	/*
+	 * The hardware should be ready to go now so it's safe
+	 * to kick the 802.11 state machine as it's likely to
+	 * immediately call back to us to send mgmt frames.
+	 */
+	ath_chan_change(sc, ic->ic_curchan);
+	ath_set_ack_bitrate(sc, sc->sc_ackrate);
+	dev->flags |= IFF_RUNNING;		/* we are ready to go */
+	ieee80211_start_running(ic);		/* start all VAPs */
+#ifdef ATH_TX99_DIAG
+	if (sc->sc_tx99 != NULL)
+		sc->sc_tx99->start(sc->sc_tx99);
+#endif
+done:
+	ATH_UNLOCK(sc);
+	return error;
+}
+
+/* Caller must lock ATH_LOCK 
+ *
+ * Context: softIRQ
+ */ 
+static int
+ath_stop_locked(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ath_hal *ah = sc->sc_ah;
+
+	DPRINTF(sc, ATH_DEBUG_RESET, "%s: invalid %u flags 0x%x\n",
+		__func__, sc->sc_invalid, dev->flags);
+
+	if (dev->flags & IFF_RUNNING) {
+		/*
+		 * Shutdown the hardware and driver:
+		 *    stop output from above
+		 *    reset 802.11 state machine
+		 *	(sends station deassoc/deauth frames)
+		 *    turn off timers
+		 *    disable interrupts
+		 *    clear transmit machinery
+		 *    clear receive machinery
+		 *    turn off the radio
+		 *    reclaim beacon resources
+		 *
+		 * Note that some of this work is not possible if the
+		 * hardware is gone (invalid).
+		 */
+#ifdef ATH_TX99_DIAG
+		if (sc->sc_tx99 != NULL)
+			sc->sc_tx99->stop(sc->sc_tx99);
+#endif
+		netif_stop_queue(dev);		/* XXX re-enabled by ath_newstate */
+		dev->flags &= ~IFF_RUNNING;	/* NB: avoid recursion */
+		ieee80211_stop_running(ic);	/* stop all VAPs */
+		if (!sc->sc_invalid) {
+			ath_hal_intrset(ah, 0);
+			if (sc->sc_softled) {
+				del_timer(&sc->sc_ledtimer);
+				ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
+				sc->sc_blinking = 0;
+				sc->sc_ledstate = 1;
+			}
+		}
+		ath_draintxq(sc);
+		if (!sc->sc_invalid) {
+			ath_stoprecv(sc);
+			ath_hal_phydisable(ah);
+		} else
+			sc->sc_rxlink = NULL;
+		ath_beacon_free(sc);		/* XXX needed? */
+	} else
+		ieee80211_stop_running(ic);	/* stop other VAPs */
+
+	if (sc->sc_softled)
+		ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
+	
+	return 0;
+}
+
+/*
+ * Stop the device, grabbing the top-level lock to protect
+ * against concurrent entry through ath_init (which can happen
+ * if another thread does a system call and the thread doing the
+ * stop is preempted).
+ */
+static int
+ath_stop(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+	int error;
+
+	ATH_LOCK(sc);
+
+	if (!sc->sc_invalid)
+		ath_hal_setpower(sc->sc_ah, HAL_PM_AWAKE);
+
+	error = ath_stop_locked(dev);
+#if 0
+	if (error == 0 && !sc->sc_invalid) {
+		/*
+		 * Set the chip in full sleep mode.  Note that we are
+		 * careful to do this only when bringing the interface
+		 * completely to a stop.  When the chip is in this state
+		 * it must be carefully woken up or references to
+		 * registers in the PCI clock domain may freeze the bus
+		 * (and system).  This varies by chip and is mostly an
+		 * issue with newer parts that go to sleep more quickly.
+		 */
+		ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
+	}
+#endif
+	ATH_UNLOCK(sc);
+
+	return error;
+}
+
+static int 
+ar_device(int devid)
+{
+	switch (devid) {
+	case AR5210_DEFAULT:
+	case AR5210_PROD:
+	case AR5210_AP:
+		return 5210;
+	case AR5211_DEFAULT:
+	case AR5311_DEVID:
+	case AR5211_LEGACY:
+	case AR5211_FPGA11B:
+		return 5211;
+	case AR5212_DEFAULT:
+	case AR5212_DEVID:
+	case AR5212_FPGA:
+	case AR5212_DEVID_IBM:
+	case AR5212_AR5312_REV2:
+	case AR5212_AR5312_REV7:
+	case AR5212_AR2313_REV8:
+	case AR5212_AR2315_REV6:
+	case AR5212_AR2315_REV7:
+	case AR5212_AR2317_REV1:
+	case AR5212_DEVID_0014:
+	case AR5212_DEVID_0015:
+	case AR5212_DEVID_0016:
+	case AR5212_DEVID_0017:
+	case AR5212_DEVID_0018:
+	case AR5212_DEVID_0019:
+	case AR5212_AR2413:
+	case AR5212_AR5413:
+	case AR5212_AR5424:
+	case AR5212_DEVID_FF19:
+		return 5212;
+	case AR5213_SREV_1_0:
+	case AR5213_SREV_REG:
+	case AR_SUBVENDOR_ID_NOG:
+	case AR_SUBVENDOR_ID_NEW_A:
+		return 5213;
+	default: 
+		return 0; /* unknown */
+	}
+}
+
+
+static int 
+ath_set_ack_bitrate(struct ath_softc *sc, int high) 
+{
+	struct ath_hal *ah = sc->sc_ah;
+	if (ar_device(sc->devid) == 5212 || ar_device(sc->devid) == 5213) {
+		/* set ack to be sent at low bit-rate */
+		/* registers taken from the OpenBSD 5212 HAL */
+#define AR5K_AR5212_STA_ID1                     0x8004
+#define AR5K_AR5212_STA_ID1_ACKCTS_6MB          0x01000000
+#define AR5K_AR5212_STA_ID1_BASE_RATE_11B       0x02000000
+		u_int32_t v = AR5K_AR5212_STA_ID1_BASE_RATE_11B | AR5K_AR5212_STA_ID1_ACKCTS_6MB;
+		if (high) {
+			OS_REG_WRITE(ah, AR5K_AR5212_STA_ID1, OS_REG_READ(ah, AR5K_AR5212_STA_ID1) & ~v);
+		} else {
+			OS_REG_WRITE(ah, AR5K_AR5212_STA_ID1, OS_REG_READ(ah, AR5K_AR5212_STA_ID1) | v);
+		}
+		return 0;
+	}
+	return 1;
+}
+
+/*
+ * Reset the hardware w/o losing operational state.  This is
+ * basically a more efficient way of doing ath_stop, ath_init,
+ * followed by state transitions to the current 802.11
+ * operational state.  Used to recover from errors rx overrun
+ * and to reset the hardware when rf gain settings must be reset.
+ */
+static int
+ath_reset(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ieee80211_channel *c;
+	HAL_STATUS status;
+
+	/*
+	 * Convert to a HAL channel description with the flags
+	 * constrained to reflect the current operating mode.
+	 */
+	c = ic->ic_curchan;
+	sc->sc_curchan.channel = c->ic_freq;
+	sc->sc_curchan.channelFlags = ath_chan2flags(c);
+
+	ath_hal_intrset(ah, 0);		/* disable interrupts */
+	ath_draintxq(sc);		/* stop xmit side */
+	ath_stoprecv(sc);		/* stop recv side */
+	/* NB: indicate channel change so we do a full reset */
+	if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_TRUE, &status))
+		printk("%s: %s: unable to reset hardware: '%s' (HAL status %u)\n",
+			dev->name, __func__, ath_get_hal_status_desc(status), status);
+	ath_update_txpow(sc);		/* update tx power state */
+	if (ath_startrecv(sc) != 0)	/* restart recv */
+		printk("%s: %s: unable to start recv logic\n",
+			dev->name, __func__);
+	if (sc->sc_softled)
+		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin);
+
+	/*
+	 * We may be doing a reset in response to an ioctl
+	 * that changes the channel so update any state that
+	 * might change as a result.
+	 */
+	ath_chan_change(sc, c);
+	if (sc->sc_beacons)
+		ath_beacon_config(sc, NULL);	/* restart beacons */
+	ath_hal_intrset(ah, sc->sc_imask);
+	ath_set_ack_bitrate(sc, sc->sc_ackrate);
+	netif_wake_queue(dev);		/* restart xmit */
+#ifdef ATH_SUPERG_XR
+	/*
+	 * restart the group polls.
+	 */
+	if (sc->sc_xrgrppoll) {
+		struct ieee80211vap *vap;
+		TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
+			if (vap && (vap->iv_flags & IEEE80211_F_XR))
+				break;
+		ath_grppoll_stop(vap);
+		ath_grppoll_start(vap, sc->sc_xrpollcount);
+	}
+#endif
+	return 0;
+}
+
+
+/* Swap transmit descriptor.
+ * if AH_NEED_DESC_SWAP flag is not defined this becomes a "null"
+ * function.
+ */
+static __inline void
+ath_desc_swap(struct ath_desc *ds)
+{
+#ifdef AH_NEED_DESC_SWAP
+	ds->ds_link = cpu_to_le32(ds->ds_link);
+	ds->ds_data = cpu_to_le32(ds->ds_data);
+	ds->ds_ctl0 = cpu_to_le32(ds->ds_ctl0);
+	ds->ds_ctl1 = cpu_to_le32(ds->ds_ctl1);
+	ds->ds_hw[0] = cpu_to_le32(ds->ds_hw[0]);
+	ds->ds_hw[1] = cpu_to_le32(ds->ds_hw[1]);
+#endif
+}
+
+/*
+ * Insert a buffer on a txq 
+ * 
+ */
+static __inline void
+ath_tx_txqaddbuf(struct ath_softc *sc, struct ieee80211_node *ni, 
+	struct ath_txq *txq, struct ath_buf *bf, 
+	struct ath_desc *lastds, int framelen)
+{
+	struct ath_hal *ah = sc->sc_ah;
+
+	/*
+	 * Insert the frame on the outbound list and
+	 * pass it on to the hardware.
+	 */
+	ATH_TXQ_LOCK(txq);
+	if (ni && ni->ni_vap && txq == &ATH_VAP(ni->ni_vap)->av_mcastq) {
+		/*
+		 * The CAB queue is started from the SWBA handler since
+		 * frames only go out on DTIM and to avoid possible races.
+		 */
+		ath_hal_intrset(ah, sc->sc_imask & ~HAL_INT_SWBA);
+		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
+		DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: txq depth = %d\n", __func__, txq->axq_depth);
+		if (txq->axq_link != NULL) {
+#ifdef AH_NEED_DESC_SWAP
+			*txq->axq_link = cpu_to_le32(bf->bf_daddr);
+#else
+			*txq->axq_link = bf->bf_daddr;
+#endif
+			DPRINTF(sc, ATH_DEBUG_XMIT, "%s: link[%u](%p)=%llx (%p)\n",
+				__func__,
+				txq->axq_qnum, txq->axq_link,
+				ito64(bf->bf_daddr), bf->bf_desc);
+		}
+		txq->axq_link = &lastds->ds_link;
+		ath_hal_intrset(ah, sc->sc_imask);
+	} else {
+		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
+		DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: txq depth = %d\n", __func__, txq->axq_depth);
+		if (txq->axq_link == NULL) {
+			ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+			DPRINTF(sc, ATH_DEBUG_XMIT, "%s: TXDP[%u] = %llx (%p)\n",
+				__func__,
+				txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
+		} else {
+#ifdef AH_NEED_DESC_SWAP
+			*txq->axq_link = cpu_to_le32(bf->bf_daddr);
+#else
+			*txq->axq_link = bf->bf_daddr;
+#endif
+			DPRINTF(sc, ATH_DEBUG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
+				__func__,
+				txq->axq_qnum, txq->axq_link,
+				ito64(bf->bf_daddr), bf->bf_desc);
+		}
+		txq->axq_link = &lastds->ds_link;
+		ath_hal_txstart(ah, txq->axq_qnum);
+		sc->sc_dev->trans_start = jiffies;
+	}
+	ATH_TXQ_UNLOCK(txq);
+
+	sc->sc_devstats.tx_packets++;
+	sc->sc_devstats.tx_bytes += framelen;
+}
+
+static int 
+dot11_to_ratecode(struct ath_softc *sc, const HAL_RATE_TABLE *rt, int dot11)
+{
+	int index = sc->sc_rixmap[dot11 & IEEE80211_RATE_VAL];
+	if (index >= 0 && index < rt->rateCount)
+		return rt->info[index].rateCode;
+	
+	return rt->info[sc->sc_minrateix].rateCode;
+}
+
+
+static int 
+ath_tx_startraw(struct net_device *dev, struct ath_buf *bf, struct sk_buff *skb) 
+{
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ieee80211_phy_params *ph = (struct ieee80211_phy_params *) (skb->cb + sizeof(struct ieee80211_cb));
+	const HAL_RATE_TABLE *rt;
+	int pktlen;
+	int hdrlen;
+	HAL_PKT_TYPE atype;
+	u_int flags;
+	int keyix;
+	int try0;
+	int power;
+	u_int8_t antenna, txrate;
+	struct ath_txq *txq=NULL;
+	struct ath_desc *ds=NULL;
+	struct ieee80211_frame *wh; 
+	
+	wh = (struct ieee80211_frame *) skb->data;
+	try0 = ph->try0;
+	rt = sc->sc_currates;
+	txrate = dot11_to_ratecode(sc, rt, ph->rate0);
+	power = ph->power > 60 ? 60 : ph->power;
+	hdrlen = ieee80211_anyhdrsize(wh);
+	pktlen = skb->len + IEEE80211_CRC_LEN;
+	
+	keyix = HAL_TXKEYIX_INVALID;
+	flags = HAL_TXDESC_INTREQ | HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
+	
+	bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
+					skb->data, pktlen, BUS_DMA_TODEVICE);
+	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: skb %p [data %p len %u] skbaddr %llx\n",
+		__func__, skb, skb->data, skb->len, ito64(bf->bf_skbaddr));
+	
+	
+	bf->bf_skb = skb;
+	bf->bf_node = NULL;
+	
+#ifdef ATH_SUPERG_FF
+	bf->bf_numdesc = 1;
+#endif
+	
+	/* setup descriptors */
+	ds = bf->bf_desc;
+	rt = sc->sc_currates;
+	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
+	
+	
+	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
+		sc->sc_stats.ast_tx_noack++;
+		try0 = 1;
+	}
+	atype = HAL_PKT_TYPE_NORMAL;		/* default */
+	txq = sc->sc_ac2q[skb->priority & 0x3];
+	
+	
+	flags |= HAL_TXDESC_INTREQ;
+	antenna = sc->sc_txantenna;
+	
+	/* XXX check return value? */
+	ath_hal_setuptxdesc(ah, ds
+			    , pktlen	/* packet length */
+			    , hdrlen	/* header length */
+			    , atype	/* Atheros packet type */
+			    , power	/* txpower */
+			    , txrate, try0 /* series 0 rate/tries */
+			    , keyix	/* key cache index */
+			    , antenna	/* antenna mode */
+			    , flags	/* flags */
+			    , 0		/* rts/cts rate */
+			    , 0		/* rts/cts duration */
+			    , 0		/* comp icv len */
+			    , 0		/* comp iv len */
+			    , ATH_COMP_PROC_NO_COMP_NO_CCS /* comp scheme */
+			   );
+
+	if (ph->try1) {
+		ath_hal_setupxtxdesc(sc->sc_ah, ds
+			, dot11_to_ratecode(sc, rt, ph->rate1), ph->try1 /* series 1 */
+			, dot11_to_ratecode(sc, rt, ph->rate2), ph->try2 /* series 2 */
+			, dot11_to_ratecode(sc, rt, ph->rate3), ph->try3 /* series 3 */
+			);	
+	}
+	bf->bf_flags = flags;			/* record for post-processing */
+
+	ds->ds_link = 0;
+	ds->ds_data = bf->bf_skbaddr;
+	
+	ath_hal_filltxdesc(ah, ds
+			   , skb->len	/* segment length */
+			   , AH_TRUE	/* first segment */
+			   , AH_TRUE	/* last segment */
+			   , ds		/* first descriptor */
+			   );
+	
+	/* NB: The desc swap function becomes void, 
+	 * if descriptor swapping is not enabled
+	 */
+	ath_desc_swap(ds);
+	
+	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: Q%d: %08x %08x %08x %08x %08x %08x\n",
+		__func__, M_FLAG_GET(skb, M_UAPSD) ? 0 : txq->axq_qnum, ds->ds_link, ds->ds_data,
+		ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
+		
+	ath_tx_txqaddbuf(sc, NULL, txq, bf, ds, pktlen);
+	return 0;
+}
+
+#ifdef ATH_SUPERG_FF
+/*
+ * Flush FF staging queue.
+ */
+static int
+ath_ff_neverflushtestdone(struct ath_txq *txq, struct ath_buf *bf)
+{
+	return 0;
+}
+
+static int
+ath_ff_ageflushtestdone(struct ath_txq *txq, struct ath_buf *bf)
+{
+	if ( (txq->axq_totalqueued - bf->bf_queueage) < ATH_FF_STAGEQAGEMAX )
+		return 1;
+
+	return 0;
+}
+
+/* Caller must not hold ATH_TXQ_LOCK and ATH_TXBUF_LOCK
+ *
+ * Context: softIRQ
+ */
+static void
+ath_ffstageq_flush(struct ath_softc *sc, struct ath_txq *txq,
+	int (*ath_ff_flushdonetest)(struct ath_txq *txq, struct ath_buf *bf))
+{
+	struct ath_buf *bf_ff = NULL;
+	struct ieee80211_node *ni = NULL;
+	int pktlen;
+	int framecnt;
+
+	for (;;) {
+		ATH_TXQ_LOCK(txq);
+
+		bf_ff = TAILQ_LAST(&txq->axq_stageq, axq_headtype);
+		if ((!bf_ff) || ath_ff_flushdonetest(txq, bf_ff))
+		{
+			ATH_TXQ_UNLOCK(txq);
+			break;
+		}
+
+		ni = bf_ff->bf_node;
+		KASSERT(ATH_NODE(ni)->an_tx_ffbuf[bf_ff->bf_skb->priority],
+			("no bf_ff on staging queue %p", bf_ff));
+		ATH_NODE(ni)->an_tx_ffbuf[bf_ff->bf_skb->priority] = NULL;
+		TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist);
+
+		ATH_TXQ_UNLOCK(txq);
+
+		/* encap and xmit */
+		bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt);
+		if (bf_ff->bf_skb == NULL) {
+			DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
+				"%s: discard, encapsulation failure\n", __func__);
+			sc->sc_stats.ast_tx_encap++;
+			goto bad;
+		}
+		pktlen = bf_ff->bf_skb->len;	/* NB: don't reference skb below */
+		if (ath_tx_start(sc->sc_dev, ni, bf_ff, bf_ff->bf_skb, 0) == 0)
+			continue;
+	bad:
+		ieee80211_free_node(ni);
+		if (bf_ff->bf_skb != NULL) {
+			dev_kfree_skb(bf_ff->bf_skb);
+			bf_ff->bf_skb = NULL;
+		}
+		bf_ff->bf_node = NULL;
+
+		ATH_TXBUF_LOCK_IRQ(sc);
+		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list);
+		ATH_TXBUF_UNLOCK_IRQ(sc);
+	}
+}
+#endif
+
+#define ATH_HARDSTART_GET_TX_BUF_WITH_LOCK				\
+	ATH_TXBUF_LOCK_IRQ(sc);						\
+	bf = STAILQ_FIRST(&sc->sc_txbuf);				\
+	if (bf != NULL) {						\
+		STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);		\
+		STAILQ_INSERT_TAIL(&bf_head, bf, bf_list);              \
+	}                                                               \
+	/* XXX use a counter and leave at least one for mgmt frames */	\
+	if (STAILQ_EMPTY(&sc->sc_txbuf)) {				\
+		DPRINTF(sc, ATH_DEBUG_XMIT,				\
+			"%s: stop queue\n", __func__);			\
+		sc->sc_stats.ast_tx_qstop++;				\
+		netif_stop_queue(dev); 					\
+		sc->sc_devstopped = 1;					\
+		ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL); 		\
+	}								\
+	ATH_TXBUF_UNLOCK_IRQ(sc);					\
+	if (bf == NULL) {		/* NB: should not happen */	\
+		DPRINTF(sc,ATH_DEBUG_XMIT,				\
+			"%s: discard, no xmit buf\n", __func__);	\
+		sc->sc_stats.ast_tx_nobuf++;				\
+	}
+
+/*
+ * Transmit a data packet.  On failure caller is
+ * assumed to reclaim the resources.
+ *
+ * Context: process context with BH's disabled
+ */
+static int
+ath_hardstart(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211_node *ni = NULL;
+	struct ath_buf *bf = NULL;
+	struct ieee80211_cb *cb = (struct ieee80211_cb *) skb->cb;
+	struct ether_header *eh;
+	STAILQ_HEAD(tmp_bf_head, ath_buf) bf_head;
+	struct ath_buf *tbf, *tempbf;
+	struct sk_buff *tskb;
+	int framecnt;
+	int requeue = 0;
+#ifdef ATH_SUPERG_FF
+	int pktlen;
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ath_node *an;
+	struct ath_txq *txq = NULL;
+	int ff_flush;
+	struct ieee80211vap *vap;
+#endif
+
+	if ((dev->flags & IFF_RUNNING) == 0 || sc->sc_invalid) {
+		DPRINTF(sc, ATH_DEBUG_XMIT,
+			"%s: discard, invalid %d flags %x\n",
+			__func__, sc->sc_invalid, dev->flags);
+		sc->sc_stats.ast_tx_invalid++;
+		return -ENETDOWN;
+	}
+
+	STAILQ_INIT(&bf_head);
+
+	if (cb->flags & M_RAW) {
+		ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
+		if (bf == NULL)
+			goto hardstart_fail;
+		ath_tx_startraw(dev, bf,skb);
+		return NETDEV_TX_OK;
+	}
+
+	eh = (struct ether_header *) skb->data;
+	ni = cb->ni;		/* NB: always passed down by 802.11 layer */
+	if (ni == NULL) {
+		/* NB: this happens if someone marks the underlying device up */
+		DPRINTF(sc, ATH_DEBUG_XMIT,
+			"%s: discard, no node in cb\n", __func__);
+		goto hardstart_fail;
+	}
+#ifdef ATH_SUPERG_FF
+	vap = ni->ni_vap;
+
+	if (M_FLAG_GET(skb, M_UAPSD)) {
+		/* bypass FF handling */
+		ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
+		if (bf == NULL)
+			goto hardstart_fail;
+		goto ff_bypass;
+	}
+
+	/*
+	 * Fast frames check.
+	 */
+	ATH_FF_MAGIC_CLR(skb);
+	an = ATH_NODE(ni);
+
+	txq = sc->sc_ac2q[skb->priority];
+
+	if (txq->axq_depth > TAIL_DROP_COUNT) {
+		sc->sc_stats.ast_tx_discard++;
+		/* queue is full, let the kernel backlog the skb */
+		requeue = 1;
+		goto hardstart_fail;
+	}
+
+	/* NB: use this lock to protect an->an_ff_txbuf in athff_can_aggregate()
+	 *     call too.
+	 */
+	ATH_TXQ_LOCK(txq);
+	if (athff_can_aggregate(sc, eh, an, skb, vap->iv_fragthreshold, &ff_flush)) {
+
+		if (an->an_tx_ffbuf[skb->priority]) { /* i.e., frame on the staging queue */
+			bf = an->an_tx_ffbuf[skb->priority];
+
+			/* get (and remove) the frame from staging queue */
+			TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist);
+			an->an_tx_ffbuf[skb->priority] = NULL;
+
+			ATH_TXQ_UNLOCK(txq);
+
+			/*
+			 * chain skbs and add FF magic
+			 *
+			 * NB: the arriving skb should not be on a list (skb->list),
+			 *     so "re-using" the skb next field should be OK.
+			 */
+			bf->bf_skb->next = skb;
+			skb->next = NULL;
+			skb = bf->bf_skb;
+			ATH_FF_MAGIC_PUT(skb);
+
+			/* decrement extra node reference made when an_tx_ffbuf[] was set */
+			//ieee80211_free_node(ni); /* XXX where was it set ? */
+
+			DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
+				"%s: aggregating fast-frame\n", __func__);
+		} else {
+			/* NB: careful grabbing the TX_BUF lock since still holding the txq lock.
+			 *     this could be avoided by always obtaining the txbuf earlier,
+			 *     but the "if" portion of this "if/else" clause would then need
+			 *     to give the buffer back.
+			 */
+			ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
+			if (bf == NULL) {
+				ATH_TXQ_UNLOCK(txq);
+				goto hardstart_fail;
+			}
+			DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
+				"%s: adding to fast-frame stage Q\n", __func__);
+
+			bf->bf_skb = skb;
+			bf->bf_node = ni;
+			bf->bf_queueage = txq->axq_totalqueued;
+			an->an_tx_ffbuf[skb->priority] = bf;
+
+			TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist);
+
+			ATH_TXQ_UNLOCK(txq);
+
+			return NETDEV_TX_OK;
+		}
+	} else {
+		if (ff_flush) {
+			struct ath_buf *bf_ff = an->an_tx_ffbuf[skb->priority];
+
+			TAILQ_REMOVE(&txq->axq_stageq, bf_ff, bf_stagelist);
+			an->an_tx_ffbuf[skb->priority] = NULL;
+
+			ATH_TXQ_UNLOCK(txq);
+
+			/* encap and xmit */
+			bf_ff->bf_skb = ieee80211_encap(ni, bf_ff->bf_skb, &framecnt);
+
+			if (bf_ff->bf_skb == NULL) {
+				DPRINTF(sc, ATH_DEBUG_XMIT,
+					"%s: discard, ff flush encap failure\n",
+					__func__);
+				sc->sc_stats.ast_tx_encap++;
+				goto ff_flushbad;
+			}
+			pktlen = bf_ff->bf_skb->len;	/* NB: don't reference skb below */
+			/* NB: ath_tx_start() will use ATH_TXBUF_LOCK_BH(). The _BH
+			 *     portion is not needed here since we're running at
+			 *     interrupt time, but should be harmless.
+			 */
+			if (ath_tx_start(dev, ni, bf_ff, bf_ff->bf_skb, 0))
+				goto ff_flushbad;
+			goto ff_flushdone;
+		ff_flushbad:
+			DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
+				"%s: ff stageq flush failure\n", __func__);
+			ieee80211_free_node(ni);
+			if (bf_ff->bf_skb) {
+				dev_kfree_skb(bf_ff->bf_skb);
+				bf_ff->bf_skb = NULL;
+			}
+			bf_ff->bf_node = NULL;
+
+			ATH_TXBUF_LOCK(sc);
+			STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf_ff, bf_list);
+			ATH_TXBUF_UNLOCK(sc);
+			goto ff_flushdone;
+		}
+		/*
+		 * XXX: out-of-order condition only occurs for AP mode and multicast.
+		 *      But, there may be no valid way to get this condition.
+		 */
+		else if (an->an_tx_ffbuf[skb->priority]) {
+			DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
+				"%s: Out-Of-Order fast-frame\n", __func__);
+			ATH_TXQ_UNLOCK(txq);
+		} else
+			ATH_TXQ_UNLOCK(txq);
+
+	ff_flushdone:
+		ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
+		if (bf == NULL)
+			goto hardstart_fail;
+	}
+
+ff_bypass:
+
+#else /* ATH_SUPERG_FF */
+
+	ATH_HARDSTART_GET_TX_BUF_WITH_LOCK;
+
+#endif /* ATH_SUPERG_FF */
+
+	/*
+	 * Encapsulate the packet for transmission.
+	 */
+	skb = ieee80211_encap(ni, skb, &framecnt);
+	if (skb == NULL) {
+		DPRINTF(sc, ATH_DEBUG_XMIT,
+			"%s: discard, encapsulation failure\n", __func__);
+		sc->sc_stats.ast_tx_encap++;
+		goto hardstart_fail;
+	}
+
+	if (framecnt > 1) {
+		int bfcnt;
+
+		/*
+		**  Allocate 1 ath_buf for each frame given 1 was 
+		**  already alloc'd
+		*/
+		ATH_TXBUF_LOCK(sc);
+		for (bfcnt = 1; bfcnt < framecnt; ++bfcnt) {
+			if ((tbf = STAILQ_FIRST(&sc->sc_txbuf)) != NULL) {
+				STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
+				STAILQ_INSERT_TAIL(&bf_head, tbf, bf_list);
+			}
+			else
+				break;
+			
+			ieee80211_ref_node(ni);
+		}
+
+		if (bfcnt != framecnt) {
+			if (!STAILQ_EMPTY(&bf_head)) {
+				/*
+				**  Failed to alloc enough ath_bufs;
+				**  return to sc_txbuf list
+				*/
+				STAILQ_FOREACH_SAFE(tbf, &bf_head, bf_list, tempbf) {
+					STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list);
+				}
+			}
+			ATH_TXBUF_UNLOCK(sc);
+			STAILQ_INIT(&bf_head);
+			goto hardstart_fail;
+		}
+		ATH_TXBUF_UNLOCK(sc);
+
+		while ((bf = STAILQ_FIRST(&bf_head)) != NULL && skb != NULL) {
+			int nextfraglen = 0;
+
+			STAILQ_REMOVE_HEAD(&bf_head, bf_list);
+			tskb = skb->next;
+			skb->next = NULL;
+			if (tskb)
+				nextfraglen = tskb->len;
+
+			if (ath_tx_start(dev, ni, bf, skb, nextfraglen) != 0) {
+				STAILQ_INSERT_TAIL(&bf_head, bf, bf_list);
+				skb->next = tskb;
+				goto hardstart_fail;
+			}
+			skb = tskb;
+		}
+	} else {
+		if (ath_tx_start(dev, ni, bf, skb, 0) != 0) {
+			STAILQ_INSERT_TAIL(&bf_head, bf, bf_list);
+			goto hardstart_fail;
+		}
+	}
+
+#ifdef ATH_SUPERG_FF
+	/*
+	 * flush out stale FF from staging Q for applicable operational modes.
+	 */
+	/* XXX: ADHOC mode too? */
+	if (txq && ic->ic_opmode == IEEE80211_M_HOSTAP)
+		ath_ffstageq_flush(sc, txq, ath_ff_ageflushtestdone);
+#endif
+
+	return NETDEV_TX_OK;
+
+hardstart_fail:
+	if (!STAILQ_EMPTY(&bf_head)) {
+		ATH_TXBUF_LOCK(sc);
+		STAILQ_FOREACH_SAFE(tbf, &bf_head, bf_list, tempbf) {
+			tbf->bf_skb = NULL;
+			tbf->bf_node = NULL;
+			
+			if (ni != NULL) 
+				ieee80211_free_node(ni);
+
+			STAILQ_INSERT_TAIL(&sc->sc_txbuf, tbf, bf_list);
+		}
+		ATH_TXBUF_UNLOCK(sc);
+	}
+	
+	/* let the kernel requeue the skb (don't free it!) */
+	if (requeue)
+		return NETDEV_TX_BUSY;
+
+	/* free sk_buffs */
+	while (skb) {
+		tskb = skb->next;
+		skb->next = NULL;
+		dev_kfree_skb(skb);
+		skb = tskb;
+	}
+	return NETDEV_TX_OK;
+}
+#undef ATH_HARDSTART_GET_TX_BUF_WITH_LOCK
+
+/*
+ * Transmit a management frame.  On failure we reclaim the skbuff.
+ * Note that management frames come directly from the 802.11 layer
+ * and do not honor the send queue flow control.  Need to investigate
+ * using priority queuing so management frames can bypass data.
+ *
+ * Context: hwIRQ and softIRQ
+ */
+static int
+ath_mgtstart(struct ieee80211com *ic, struct sk_buff *skb)
+{
+	struct net_device *dev = ic->ic_dev;
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211_node *ni = NULL;
+	struct ath_buf *bf = NULL;
+	struct ieee80211_cb *cb;
+	int error;
+
+	if ((dev->flags & IFF_RUNNING) == 0 || sc->sc_invalid) {
+		DPRINTF(sc, ATH_DEBUG_XMIT,
+			"%s: discard, invalid %d flags %x\n",
+			__func__, sc->sc_invalid, dev->flags);
+		sc->sc_stats.ast_tx_invalid++;
+		error = -ENETDOWN;
+		goto bad;
+	}
+	/*
+	 * Grab a TX buffer and associated resources.
+	 */
+	ATH_TXBUF_LOCK_IRQ(sc);
+	bf = STAILQ_FIRST(&sc->sc_txbuf);
+	if (bf != NULL)
+		STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
+	if (STAILQ_EMPTY(&sc->sc_txbuf)) {
+		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
+		sc->sc_stats.ast_tx_qstop++;
+		netif_stop_queue(dev);
+		sc->sc_devstopped=1;
+		ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL);
+	}
+	ATH_TXBUF_UNLOCK_IRQ(sc);
+	if (bf == NULL) {
+		printk("ath_mgtstart: discard, no xmit buf\n");
+		sc->sc_stats.ast_tx_nobufmgt++;
+		error = -ENOBUFS;
+		goto bad;
+	}
+
+	/*
+	 * NB: the referenced node pointer is in the
+	 * control block of the sk_buff.  This is
+	 * placed there by ieee80211_mgmt_output because
+	 * we need to hold the reference with the frame.
+	 */
+	cb = (struct ieee80211_cb *)skb->cb;
+	ni = cb->ni;
+	error = ath_tx_start(dev, ni, bf, skb, 0);
+	if (error == 0) {
+		sc->sc_stats.ast_tx_mgmt++;
+		return 0;
+	}
+	/* fall thru... */
+bad:
+	if (ni != NULL)
+		ieee80211_free_node(ni);
+	if (bf != NULL) {
+		bf->bf_skb = NULL;
+		bf->bf_node = NULL;
+
+		ATH_TXBUF_LOCK_IRQ(sc);
+		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
+		ATH_TXBUF_UNLOCK_IRQ(sc);
+	}
+	dev_kfree_skb_any(skb);
+	skb = NULL;
+	return error;
+}
+
+#ifdef AR_DEBUG
+static void
+ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix,
+	const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
+{
+	static const char *ciphers[] = {
+		"WEP",
+		"AES-OCB",
+		"AES-CCM",
+		"CKIP",
+		"TKIP",
+		"CLR",
+	};
+	int i, n;
+
+	printk("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]);
+	for (i = 0, n = hk->kv_len; i < n; i++)
+		printk("%02x", hk->kv_val[i]);
+	printk(" mac %s", ether_sprintf(mac));
+	if (hk->kv_type == HAL_CIPHER_TKIP) {
+		printk(" %s ", sc->sc_splitmic ? "mic" : "rxmic");
+		for (i = 0; i < sizeof(hk->kv_mic); i++)
+			printk("%02x", hk->kv_mic[i]);
+#if HAL_ABI_VERSION > 0x06052200
+		if (!sc->sc_splitmic) {
+			printk(" txmic ");
+			for (i = 0; i < sizeof(hk->kv_txmic); i++)
+				printk("%02x", hk->kv_txmic[i]);
+		}
+#endif
+	}
+	printk("\n");
+}
+#endif
+
+/*
+ * Set a TKIP key into the hardware.  This handles the
+ * potential distribution of key state to multiple key
+ * cache slots for TKIP.
+ */
+static int
+ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k,
+	HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
+{
+#define	IEEE80211_KEY_XR	(IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)
+	static const u_int8_t zerobssid[IEEE80211_ADDR_LEN];
+	struct ath_hal *ah = sc->sc_ah;
+
+	KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP,
+		("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher));
+	if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) {
+		if (sc->sc_splitmic) {
+			/*
+			 * TX key goes at first index, RX key at the rx index.
+			 * The HAL handles the MIC keys at index+64.
+			 */
+			memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic));
+			KEYPRINTF(sc, k->wk_keyix, hk, zerobssid);
+			if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid))
+				return 0;
+
+			memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
+			KEYPRINTF(sc, k->wk_keyix+32, hk, mac);
+			/* XXX delete tx key on failure? */
+			return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac);
+		} else {
+			/*
+			 * Room for both TX+RX MIC keys in one key cache
+			 * slot, just set key at the first index; the HAL
+			 * will handle the reset.
+			 */
+			memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
+#if HAL_ABI_VERSION > 0x06052200
+			memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
+#endif
+			KEYPRINTF(sc, k->wk_keyix, hk, mac);
+			return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
+		}
+	} else if (k->wk_flags & IEEE80211_KEY_XR) {
+		/*
+		 * TX/RX key goes at first index.
+		 * The HAL handles the MIC keys are index+64.
+		 */
+		memcpy(hk->kv_mic, k->wk_flags & IEEE80211_KEY_XMIT ?
+			k->wk_txmic : k->wk_rxmic, sizeof(hk->kv_mic));
+		KEYPRINTF(sc, k->wk_keyix, hk, mac);
+		return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
+	}
+	return 0;
+#undef IEEE80211_KEY_XR
+}
+
+/*
+ * Set a net80211 key into the hardware.  This handles the
+ * potential distribution of key state to multiple key
+ * cache slots for TKIP with hardware MIC support.
+ */
+static int
+ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
+	const u_int8_t mac0[IEEE80211_ADDR_LEN],
+	struct ieee80211_node *bss)
+{
+#define	N(a)	((int)(sizeof(a)/sizeof(a[0])))
+	static const u_int8_t ciphermap[] = {
+		HAL_CIPHER_WEP,		/* IEEE80211_CIPHER_WEP */
+		HAL_CIPHER_TKIP,	/* IEEE80211_CIPHER_TKIP */
+		HAL_CIPHER_AES_OCB,	/* IEEE80211_CIPHER_AES_OCB */
+		HAL_CIPHER_AES_CCM,	/* IEEE80211_CIPHER_AES_CCM */
+		(u_int8_t) -1,		/* 4 is not allocated */
+		HAL_CIPHER_CKIP,	/* IEEE80211_CIPHER_CKIP */
+		HAL_CIPHER_CLR,		/* IEEE80211_CIPHER_NONE */
+	};
+	struct ath_hal *ah = sc->sc_ah;
+	const struct ieee80211_cipher *cip = k->wk_cipher;
+	u_int8_t gmac[IEEE80211_ADDR_LEN];
+	const u_int8_t *mac;
+	HAL_KEYVAL hk;
+
+	memset(&hk, 0, sizeof(hk));
+	/*
+	 * Software crypto uses a "clear key" so non-crypto
+	 * state kept in the key cache are maintained and
+	 * so that rx frames have an entry to match.
+	 */
+	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
+		KASSERT(cip->ic_cipher < N(ciphermap),
+			("invalid cipher type %u", cip->ic_cipher));
+		hk.kv_type = ciphermap[cip->ic_cipher];
+		hk.kv_len = k->wk_keylen;
+		memcpy(hk.kv_val, k->wk_key, k->wk_keylen);
+	} else
+		hk.kv_type = HAL_CIPHER_CLR;
+
+	if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) {
+		/*
+		 * Group keys on hardware that supports multicast frame
+		 * key search use a mac that is the sender's address with
+		 * the high bit set instead of the app-specified address.
+		 */
+		IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr);
+		gmac[0] |= 0x80;
+		mac = gmac;
+	} else
+		mac = mac0;
+
+	if (hk.kv_type == HAL_CIPHER_TKIP &&
+	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
+		return ath_keyset_tkip(sc, k, &hk, mac);
+	} else {
+		KEYPRINTF(sc, k->wk_keyix, &hk, mac);
+		return ath_hal_keyset(ah, k->wk_keyix, &hk, mac);
+	}
+#undef N
+}
+
+/*
+ * Allocate tx/rx key slots for TKIP.  We allocate two slots for
+ * each key, one for decrypt/encrypt and the other for the MIC.
+ */
+static u_int16_t
+key_alloc_2pair(struct ath_softc *sc)
+{
+#define	N(a)	((int)(sizeof(a)/sizeof(a[0])))
+	u_int i, keyix;
+
+	KASSERT(sc->sc_splitmic, ("key cache !split"));
+	/* XXX could optimize */
+	for (i = 0; i < N(sc->sc_keymap) / 4; i++) {
+		u_int8_t b = sc->sc_keymap[i];
+		if (b != 0xff) {
+			/*
+			 * One or more slots in this byte are free.
+			 */
+			keyix = i * NBBY;
+			while (b & 1) {
+		again:
+				keyix++;
+				b >>= 1;
+			}
+			/* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
+			if (isset(sc->sc_keymap, keyix + 32) ||
+			    isset(sc->sc_keymap, keyix + 64) ||
+			    isset(sc->sc_keymap, keyix + 32 + 64)) {
+				/* full pair unavailable */
+				/* XXX statistic */
+				if (keyix == (i + 1) * NBBY) {
+					/* no slots were appropriate, advance */
+					continue;
+				}
+				goto again;
+			}
+			setbit(sc->sc_keymap, keyix);
+			setbit(sc->sc_keymap, keyix + 64);
+			setbit(sc->sc_keymap, keyix + 32);
+			setbit(sc->sc_keymap, keyix + 32 + 64);
+			DPRINTF(sc, ATH_DEBUG_KEYCACHE,
+				"%s: key pair %u,%u %u,%u\n",
+				__func__, keyix, keyix + 64,
+				keyix + 32, keyix + 32 + 64);
+			return keyix;
+		}
+	}
+	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
+	return IEEE80211_KEYIX_NONE;
+#undef N
+}
+
+/*
+ * Allocate tx/rx key slots for TKIP.  We allocate two slots for
+ * each key, one for decrypt/encrypt and the other for the MIC.
+ */
+static u_int16_t
+key_alloc_pair(struct ath_softc *sc)
+{
+#define	N(a)	(sizeof(a)/sizeof(a[0]))
+	u_int i, keyix;
+
+	KASSERT(!sc->sc_splitmic, ("key cache split"));
+	/* XXX could optimize */
+	for (i = 0; i < N(sc->sc_keymap)/4; i++) {
+		u_int8_t b = sc->sc_keymap[i];
+		if (b != 0xff) {
+			/*
+			 * One or more slots in this byte are free.
+			 */
+			keyix = i*NBBY;
+			while (b & 1) {
+		again:
+				keyix++;
+				b >>= 1;
+			}
+			if (isset(sc->sc_keymap, keyix+64)) {
+				/* full pair unavailable */
+				/* XXX statistic */
+				if (keyix == (i+1)*NBBY) {
+					/* no slots were appropriate, advance */
+					continue;
+				}
+				goto again;
+			}
+			setbit(sc->sc_keymap, keyix);
+			setbit(sc->sc_keymap, keyix+64);
+			DPRINTF(sc, ATH_DEBUG_KEYCACHE,
+				"%s: key pair %u,%u\n",
+				__func__, keyix, keyix+64);
+			return keyix;
+		}
+	}
+	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
+	return IEEE80211_KEYIX_NONE;
+#undef N
+}
+
+/*
+ * Allocate a single key cache slot.
+ */
+static u_int16_t
+key_alloc_single(struct ath_softc *sc)
+{
+#define	N(a)	((int)(sizeof(a)/sizeof(a[0])))
+	u_int i, keyix;
+
+	/* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
+	for (i = 0; i < N(sc->sc_keymap); i++) {
+		u_int8_t b = sc->sc_keymap[i];
+		if (b != 0xff) {
+			/*
+			 * One or more slots are free.
+			 */
+			keyix = i * NBBY;
+			while (b & 1)
+				keyix++, b >>= 1;
+			setbit(sc->sc_keymap, keyix);
+			DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n",
+				__func__, keyix);
+			return keyix;
+		}
+	}
+	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__);
+	return IEEE80211_KEYIX_NONE;
+#undef N
+}
+
+/*
+ * Allocate one or more key cache slots for a unicast key.  The
+ * key itself is needed only to identify the cipher.  For hardware
+ * TKIP with split cipher+MIC keys we allocate two key cache slot
+ * pairs so that we can setup separate TX and RX MIC keys.  Note
+ * that the MIC key for a TKIP key at slot i is assumed by the
+ * hardware to be at slot i+64.  This limits TKIP keys to the first
+ * 64 entries.
+ */
+static int
+ath_key_alloc(struct ieee80211vap *vap, const struct ieee80211_key *k)
+{
+	struct net_device *dev = vap->iv_ic->ic_dev;
+	struct ath_softc *sc = dev->priv;
+
+	/*
+	 * Group key allocation must be handled specially for
+	 * parts that do not support multicast key cache search
+	 * functionality.  For those parts the key id must match
+	 * the h/w key index so lookups find the right key.  On
+	 * parts w/ the key search facility we install the sender's
+	 * mac address (with the high bit set) and let the hardware
+	 * find the key w/o using the key id.  This is preferred as
+	 * it permits us to support multiple users for adhoc and/or
+	 * multi-station operation.
+	 */
+	if ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey) {
+		u_int keyix;
+
+		if (!(&vap->iv_nw_keys[0] <= k &&
+		    k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
+			/* should not happen */
+			DPRINTF(sc, ATH_DEBUG_KEYCACHE,
+				"%s: bogus group key\n", __func__);
+			return IEEE80211_KEYIX_NONE;
+		}
+		keyix = k - vap->iv_nw_keys;
+		/*
+		 * XXX we pre-allocate the global keys so
+		 * have no way to check if they've already been allocated.
+		 */
+		return keyix;
+	}
+	/*
+	 * We allocate two pair for TKIP when using the h/w to do
+	 * the MIC.  For everything else, including software crypto,
+	 * we allocate a single entry.  Note that s/w crypto requires
+	 * a pass-through slot on the 5211 and 5212.  The 5210 does
+	 * not support pass-through cache entries and we map all
+	 * those requests to slot 0.
+	 *
+	 * Allocate 1 pair of keys for WEP case. Make sure the key
+	 * is not a shared-key.
+	 */
+	if (k->wk_flags & IEEE80211_KEY_SWCRYPT)
+		return key_alloc_single(sc);
+	else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
+		(k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
+		if (sc->sc_splitmic)
+			return key_alloc_2pair(sc);
+		else
+			return key_alloc_pair(sc);
+	} else
+		return key_alloc_single(sc);
+}
+
+/*
+ * Delete an entry in the key cache allocated by ath_key_alloc.
+ */
+static int
+ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k,
+				struct ieee80211_node *ninfo)
+{
+	struct net_device *dev = vap->iv_ic->ic_dev;
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	const struct ieee80211_cipher *cip = k->wk_cipher;
+	struct ieee80211_node *ni;
+	u_int keyix = k->wk_keyix;
+	int rxkeyoff = 0;
+
+	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix);
+
+	ath_hal_keyreset(ah, keyix);
+	/*
+	 * Check the key->node map and flush any ref.
+	 */
+	ni = sc->sc_keyixmap[keyix];
+	if (ni != NULL) {
+		ieee80211_free_node(ni);
+		sc->sc_keyixmap[keyix] = NULL;
+	}
+	/*
+	 * Handle split tx/rx keying required for TKIP with h/w MIC.
+	 */
+	if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
+	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) {
+		ath_hal_keyreset(ah, keyix + 32);	/* RX key */
+		ni = sc->sc_keyixmap[keyix + 32];
+		if (ni != NULL) {			/* as above... */
+			ieee80211_free_node(ni);
+			sc->sc_keyixmap[keyix + 32] = NULL;
+		}
+	}
+
+	/* Remove receive key entry if one exists for static WEP case */
+	if (ninfo != NULL) {
+		rxkeyoff = ninfo->ni_rxkeyoff;
+		if (rxkeyoff != 0) {
+			ninfo->ni_rxkeyoff = 0;
+			ath_hal_keyreset(ah, keyix + rxkeyoff);
+			ni = sc->sc_keyixmap[keyix + rxkeyoff];
+			if (ni != NULL) {	/* as above... */
+				ieee80211_free_node(ni);
+				sc->sc_keyixmap[keyix + rxkeyoff] = NULL;
+			}
+		}
+	}
+
+	if (keyix >= IEEE80211_WEP_NKID) {
+		/*
+		 * Don't touch keymap entries for global keys so
+		 * they are never considered for dynamic allocation.
+		 */
+		clrbit(sc->sc_keymap, keyix);
+		if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
+		    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
+			clrbit(sc->sc_keymap, keyix + 64);	/* TX key MIC */
+			if (sc->sc_splitmic) {
+				/* +32 for RX key, +32+64 for RX key MIC */
+				clrbit(sc->sc_keymap, keyix+32);
+				clrbit(sc->sc_keymap, keyix+32+64);
+			}
+		}
+
+		if (rxkeyoff != 0)
+			clrbit(sc->sc_keymap, keyix + rxkeyoff);/*RX Key */
+	}
+	return 1;
+}
+
+/*
+ * Set the key cache contents for the specified key.  Key cache
+ * slot(s) must already have been allocated by ath_key_alloc.
+ */
+static int
+ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
+	const u_int8_t mac[IEEE80211_ADDR_LEN])
+{
+	struct net_device *dev = vap->iv_ic->ic_dev;
+	struct ath_softc *sc = dev->priv;
+
+	return ath_keyset(sc, k, mac, vap->iv_bss);
+}
+
+/*
+ * Block/unblock tx+rx processing while a key change is done.
+ * We assume the caller serializes key management operations
+ * so we only need to worry about synchronization with other
+ * uses that originate in the driver.
+ */
+static void
+ath_key_update_begin(struct ieee80211vap *vap)
+{
+	struct net_device *dev = vap->iv_ic->ic_dev;
+	struct ath_softc *sc = dev->priv;
+
+	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
+	/*
+	 * When called from the rx tasklet we cannot use
+	 * tasklet_disable because it will block waiting
+	 * for us to complete execution.
+	 *
+	 * XXX Using in_softirq is not right since we might
+	 * be called from other soft irq contexts than
+	 * ath_rx_tasklet.
+	 */
+	if (!in_softirq())
+		tasklet_disable(&sc->sc_rxtq);
+	netif_stop_queue(dev);
+}
+
+static void
+ath_key_update_end(struct ieee80211vap *vap)
+{
+	struct net_device *dev = vap->iv_ic->ic_dev;
+	struct ath_softc *sc = dev->priv;
+
+	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
+	netif_start_queue(dev);
+	if (!in_softirq())		/* NB: see above */
+		tasklet_enable(&sc->sc_rxtq);
+}
+
+/*
+ * Calculate the receive filter according to the
+ * operating mode and state:
+ *
+ * o always accept unicast, broadcast, and multicast traffic
+ * o maintain current state of phy error reception (the HAL
+ *   may enable phy error frames for noise immunity work)
+ * o probe request frames are accepted only when operating in
+ *   hostap, adhoc, or monitor modes
+ * o enable promiscuous mode according to the interface state
+ * o accept beacons:
+ *   - when operating in adhoc mode so the 802.11 layer creates
+ *     node table entries for peers,
+ *   - when operating in station mode for collecting rssi data when
+ *     the station is otherwise quiet, or
+ *   - when operating as a repeater so we see repeater-sta beacons
+ *   - when scanning
+ */
+static u_int32_t
+ath_calcrxfilter(struct ath_softc *sc)
+{
+#define	RX_FILTER_PRESERVE	(HAL_RX_FILTER_PHYERR | HAL_RX_FILTER_PHYRADAR)
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct net_device *dev = ic->ic_dev;
+	struct ath_hal *ah = sc->sc_ah;
+	u_int32_t rfilt;
+
+	rfilt = (ath_hal_getrxfilter(ah) & RX_FILTER_PRESERVE) |
+		 HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST |
+		 HAL_RX_FILTER_MCAST;
+	if (ic->ic_opmode != IEEE80211_M_STA)
+		rfilt |= HAL_RX_FILTER_PROBEREQ;
+	if (ic->ic_opmode != IEEE80211_M_HOSTAP && (dev->flags & IFF_PROMISC))
+		rfilt |= HAL_RX_FILTER_PROM;
+	if (ic->ic_opmode == IEEE80211_M_STA ||
+	    sc->sc_opmode == HAL_M_IBSS ||	/* NB: AHDEMO too */
+	    (sc->sc_nostabeacons) || sc->sc_scanning)
+		rfilt |= HAL_RX_FILTER_BEACON;
+	if (sc->sc_nmonvaps > 0) 
+		rfilt |= (HAL_RX_FILTER_CONTROL | HAL_RX_FILTER_BEACON | 
+			  HAL_RX_FILTER_PROBEREQ | HAL_RX_FILTER_PROM);
+	return rfilt;
+#undef RX_FILTER_PRESERVE
+}
+
+/*
+ * Merge multicast addresses from all VAPs to form the
+ * hardware filter.  Ideally we should only inspect our
+ * own list and the 802.11 layer would merge for us but
+ * that's a bit difficult so for now we put the onus on
+ * the driver.
+ */
+static void
+ath_merge_mcast(struct ath_softc *sc, u_int32_t mfilt[2])
+{
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ieee80211vap *vap;
+	struct dev_mc_list *mc;
+	u_int32_t val;
+	u_int8_t pos;
+
+	mfilt[0] = mfilt[1] = 0;
+	/* XXX locking */
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+		struct net_device *dev = vap->iv_dev;
+		for (mc = dev->mc_list; mc; mc = mc->next) {
+			/* calculate XOR of eight 6-bit values */
+			val = LE_READ_4(mc->dmi_addr + 0);
+			pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
+			val = LE_READ_4(mc->dmi_addr + 3);
+			pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
+			pos &= 0x3f;
+			mfilt[pos / 32] |= (1 << (pos % 32));
+		}
+	}
+}
+
+static void
+ath_mode_init(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	u_int32_t rfilt, mfilt[2];
+
+	/* configure rx filter */
+	rfilt = ath_calcrxfilter(sc);
+	ath_hal_setrxfilter(ah, rfilt);
+
+	/* configure bssid mask */
+	if (sc->sc_hasbmask)
+		ath_hal_setbssidmask(ah, sc->sc_bssidmask);
+
+	/* configure operational mode */
+	ath_hal_setopmode(ah);
+
+	/* calculate and install multicast filter */
+	if ((dev->flags & IFF_ALLMULTI) == 0)
+		ath_merge_mcast(sc, mfilt);
+	else
+		mfilt[0] = mfilt[1] = ~0;
+	ath_hal_setmcastfilter(ah, mfilt[0], mfilt[1]);
+	DPRINTF(sc, ATH_DEBUG_STATE,
+	     "%s: RX filter 0x%x, MC filter %08x:%08x\n",
+	     __func__, rfilt, mfilt[0], mfilt[1]);
+}
+
+/*
+ * Set the slot time based on the current setting.
+ */
+static void
+ath_setslottime(struct ath_softc *sc)
+{
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ath_hal *ah = sc->sc_ah;
+
+	if (sc->sc_slottimeconf > 0) /* manual override */
+		ath_hal_setslottime(ah, sc->sc_slottimeconf);
+	else if (ic->ic_flags & IEEE80211_F_SHSLOT)
+		ath_hal_setslottime(ah, HAL_SLOT_TIME_9);
+	else
+		ath_hal_setslottime(ah, HAL_SLOT_TIME_20);
+	sc->sc_updateslot = OK;
+}
+
+/*
+ * Callback from the 802.11 layer to update the
+ * slot time based on the current setting.
+ */
+static void
+ath_updateslot(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = &sc->sc_ic;
+
+	/*
+	 * When not coordinating the BSS, change the hardware
+	 * immediately.  For other operation we defer the change
+	 * until beacon updates have propagated to the stations.
+	 */
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP)
+		sc->sc_updateslot = UPDATE;
+	else if (dev->flags & IFF_RUNNING)
+		ath_setslottime(sc);
+}
+
+#ifdef ATH_SUPERG_DYNTURBO
+/*
+ * Dynamic turbo support.
+ * XXX much of this could be moved up to the net80211 layer.
+ */
+
+/*
+ * Configure dynamic turbo state on beacon setup.
+ */
+static void
+ath_beacon_dturbo_config(struct ieee80211vap *vap, u_int32_t intval)
+{
+#define	IS_CAPABLE(vap) \
+	(vap->iv_bss && (vap->iv_bss->ni_ath_flags & (IEEE80211_ATHC_TURBOP )) == \
+		(IEEE80211_ATHC_TURBOP))
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ath_softc *sc = ic->ic_dev->priv;
+
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP && IS_CAPABLE(vap)) {
+
+		/* Dynamic Turbo is supported on this channel. */
+		sc->sc_dturbo = 1;
+		sc->sc_dturbo_tcount = 0;
+		sc->sc_dturbo_switch = 0;
+		sc->sc_ignore_ar = 0;
+
+		/* Set the initial ATHC_BOOST capability. */
+		if (ic->ic_bsschan->ic_flags & CHANNEL_TURBO)
+			ic->ic_ath_cap |=  IEEE80211_ATHC_BOOST;
+		else
+			ic->ic_ath_cap &= ~IEEE80211_ATHC_BOOST;
+
+		/*
+		 * Calculate time & bandwidth thresholds
+		 *
+		 * sc_dturbo_base_tmin  :  ~70 seconds
+		 * sc_dturbo_turbo_tmax : ~120 seconds
+		 *
+		 * NB: scale calculated values to account for staggered
+		 *     beacon handling
+		 */
+		sc->sc_dturbo_base_tmin  = 70  * 1024 / ic->ic_lintval;
+		sc->sc_dturbo_turbo_tmax = 120 * 1024 / ic->ic_lintval;
+		sc->sc_dturbo_turbo_tmin = 5 * 1024 / ic->ic_lintval;
+		/* convert the thresholds from BW/sec to BW/beacon period */
+		sc->sc_dturbo_bw_base    = ATH_TURBO_DN_THRESH/(1024/ic->ic_lintval);  
+		sc->sc_dturbo_bw_turbo   = ATH_TURBO_UP_THRESH/(1024/ic->ic_lintval); 
+		/* time in hold state in number of beacon */
+		sc->sc_dturbo_hold_max   = (ATH_TURBO_PERIOD_HOLD * 1024)/ic->ic_lintval;
+	} else {
+		sc->sc_dturbo = 0;
+		ic->ic_ath_cap &= ~IEEE80211_ATHC_BOOST;
+	}
+#undef IS_CAPABLE
+}
+
+/*
+ * Update dynamic turbo state at SWBA.  We assume we care
+ * called only if dynamic turbo has been enabled (sc_turbo).
+ */
+static void
+ath_beacon_dturbo_update(struct ieee80211vap *vap, int *needmark,u_int8_t dtim)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ath_softc *sc = ic->ic_dev->priv;
+	u_int32_t bss_traffic;
+
+	/* TBD: Age out CHANNEL_INTERFERENCE */
+	if (sc->sc_ignore_ar) {
+		/* 
+		 * Ignore AR for this beacon; a dynamic turbo
+		 * switch just happened and the information
+		 * is invalid.  Notify AR support of the channel
+		 * change.
+		 */
+		sc->sc_ignore_ar = 0;
+		ath_hal_ar_enable(sc->sc_ah);
+	}
+	sc->sc_dturbo_tcount++;
+	/*
+	 * Calculate BSS traffic over the previous interval.
+	 */
+	bss_traffic = (sc->sc_devstats.tx_bytes + sc->sc_devstats.rx_bytes)
+		    - sc->sc_dturbo_bytes;
+	sc->sc_dturbo_bytes = sc->sc_devstats.tx_bytes
+			    + sc->sc_devstats.rx_bytes;
+	if (ic->ic_ath_cap & IEEE80211_ATHC_BOOST) {
+ 		/* 
+  		* before switching to base mode,
+  		* make sure that the conditions( low rssi, low bw) to switch mode 
+  		* hold for some time and time in turbo exceeds minimum turbo time.
+  		*/
+ 
+		if (sc->sc_dturbo_tcount >= sc->sc_dturbo_turbo_tmin && 
+		   sc->sc_dturbo_hold ==0 &&
+		   (bss_traffic < sc->sc_dturbo_bw_base || !sc->sc_rate_recn_state)) {
+			sc->sc_dturbo_hold = 1;
+		} else {
+			if (sc->sc_dturbo_hold &&
+			   bss_traffic >= sc->sc_dturbo_bw_turbo && sc->sc_rate_recn_state) {
+				/* out of hold state */
+				sc->sc_dturbo_hold = 0;
+				sc->sc_dturbo_hold_count = sc->sc_dturbo_hold_max;
+			}
+		}
+		if (sc->sc_dturbo_hold && sc->sc_dturbo_hold_count)
+			sc->sc_dturbo_hold_count--;
+		/*
+		 * Current Mode: Turbo (i.e. BOOST)
+		 *
+		 * Transition to base occurs when one of the following
+		 * is true:
+		 *    1. its a DTIM beacon. 
+		 *    2. Maximum time in BOOST has elapsed (120 secs).
+		 *    3. Channel is marked with interference
+		 *    4. Average BSS traffic falls below 4Mbps 
+		 *    5. RSSI cannot support at least 18 Mbps rate 
+		 * XXX do bw checks at true beacon interval?
+		 */
+		if (dtim && 
+			(sc->sc_dturbo_tcount >= sc->sc_dturbo_turbo_tmax ||
+			 ((vap->iv_bss->ni_ath_flags & IEEE80211_ATHC_AR) && 
+			  (sc->sc_curchan.privFlags & CHANNEL_INTERFERENCE) &&
+			  IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) || 
+			 !sc->sc_dturbo_hold_count)) {
+			DPRINTF(sc, ATH_DEBUG_TURBO, "%s: Leaving turbo\n",
+					sc->sc_dev->name);
+			ic->ic_ath_cap &= ~IEEE80211_ATHC_BOOST;
+			vap->iv_bss->ni_ath_flags &= ~IEEE80211_ATHC_BOOST;
+			sc->sc_dturbo_tcount = 0;
+			sc->sc_dturbo_switch = 1;
+		}
+	} else {
+		/*
+		 * Current Mode: BASE
+		 *
+		 * Transition to Turbo (i.e. BOOST) when all of the
+		 * following are true:
+		 *
+		 * 1. its a DTIM beacon. 
+		 * 2. Dwell time at base has exceeded minimum (70 secs)
+		 * 3. Only DT-capable stations are associated
+		 * 4. Channel is marked interference-free.
+		 * 5. BSS data traffic averages at least 6Mbps 
+		 * 6. RSSI is good enough to support 36Mbps 
+		 * XXX do bw+rssi checks at true beacon interval?
+		 */
+		if (dtim && 
+			(sc->sc_dturbo_tcount >= sc->sc_dturbo_base_tmin &&
+			 (ic->ic_dt_sta_assoc != 0 &&
+			  ic->ic_sta_assoc == ic->ic_dt_sta_assoc) &&
+			 ((vap->iv_bss->ni_ath_flags & IEEE80211_ATHC_AR) == 0 || 
+			  (sc->sc_curchan.privFlags & CHANNEL_INTERFERENCE) == 0) &&
+			 bss_traffic >= sc->sc_dturbo_bw_turbo && 
+			 sc->sc_rate_recn_state)) {
+			DPRINTF(sc, ATH_DEBUG_TURBO, "%s: Entering turbo\n",
+					sc->sc_dev->name);
+			ic->ic_ath_cap |= IEEE80211_ATHC_BOOST;
+			vap->iv_bss->ni_ath_flags |= IEEE80211_ATHC_BOOST;
+			sc->sc_dturbo_tcount = 0;
+			sc->sc_dturbo_switch = 1;
+			sc->sc_dturbo_hold = 0;
+			sc->sc_dturbo_hold_count = sc->sc_dturbo_hold_max;
+		}
+	}
+}
+
+
+static int 
+ath_check_beacon_done(struct ath_softc *sc)
+{
+	struct ieee80211vap *vap=NULL;
+	struct ath_vap *avp;
+	struct ath_buf *bf;
+	struct sk_buff *skb;
+	struct ath_desc *ds;
+	struct ath_hal *ah = sc->sc_ah;
+	int slot;
+
+	/*
+	 * check if the last beacon went out with the mode change flag set.
+	 */
+	for (slot = 0; slot < ATH_BCBUF; slot++) {
+		if(sc->sc_bslot[slot]) { 
+			vap = sc->sc_bslot[slot];
+			break;
+		}
+	}
+	if (!vap)
+		 return 0;
+	avp = ATH_VAP(vap);
+	bf = avp->av_bcbuf;
+	skb = bf->bf_skb;
+	ds = bf->bf_desc;
+
+	return (ath_hal_txprocdesc(ah, ds) != HAL_EINPROGRESS);
+
+}
+
+/*
+ * Effect a turbo mode switch when operating in dynamic
+ * turbo mode. wait for beacon to go out before switching.
+ */
+static void
+ath_turbo_switch_mode(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = &sc->sc_ic;
+	int newflags;
+
+	KASSERT(ic->ic_opmode == IEEE80211_M_HOSTAP,
+		("unexpected operating mode %d", ic->ic_opmode));
+
+	DPRINTF(sc, ATH_DEBUG_STATE, "%s: dynamic turbo switch to %s mode\n",
+		dev->name,
+		ic->ic_ath_cap & IEEE80211_ATHC_BOOST ? "turbo" : "base");
+
+	if (!ath_check_beacon_done(sc)) {
+		/* 
+		 * beacon did not go out. reschedule tasklet.
+		 */
+		mod_timer(&sc->sc_dturbo_switch_mode, jiffies + msecs_to_jiffies(2));
+		return;
+	}
+
+	/* TBD: DTIM adjustments, delay CAB queue tx until after transmit */
+	newflags = ic->ic_bsschan->ic_flags;
+	if (ic->ic_ath_cap & IEEE80211_ATHC_BOOST) {
+		if (IEEE80211_IS_CHAN_2GHZ(ic->ic_bsschan)) {
+			/*
+			 * Ignore AR next beacon. the AR detection
+			 * code detects the traffic in normal channel
+			 * from stations during transition delays
+			 * between AP and station.
+			 */
+			sc->sc_ignore_ar = 1;
+			ath_hal_ar_disable(sc->sc_ah);
+		}
+		newflags |= IEEE80211_CHAN_TURBO;
+	} else
+		newflags &= ~IEEE80211_CHAN_TURBO;
+	ieee80211_dturbo_switch(ic, newflags);
+	/* XXX ieee80211_reset_erp? */
+}
+#endif /* ATH_SUPERG_DYNTURBO */
+
+/*
+ * Setup a h/w transmit queue for beacons.
+ */
+static int
+ath_beaconq_setup(struct ath_hal *ah)
+{
+	HAL_TXQ_INFO qi;
+
+	memset(&qi, 0, sizeof(qi));
+	qi.tqi_aifs = 1;
+	qi.tqi_cwmin = 0;
+	qi.tqi_cwmax = 0;
+#ifdef ATH_SUPERG_DYNTURBO
+	qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
+#endif
+	/* NB: don't enable any interrupts */
+	return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
+}
+
+/*
+ * Configure IFS parameter for the beacon queue.
+ */
+static int
+ath_beaconq_config(struct ath_softc *sc)
+{
+#define	ATH_EXPONENT_TO_VALUE(v)	((1<<v)-1)
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ath_hal *ah = sc->sc_ah;
+	HAL_TXQ_INFO qi;
+
+	ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
+		/*
+		 * Always burst out beacon and CAB traffic.
+		 */
+		qi.tqi_aifs = 1;
+		qi.tqi_cwmin = 0;
+		qi.tqi_cwmax = 0;
+	} else {
+		struct wmeParams *wmep =
+			&ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
+		/*
+		 * Adhoc mode; important thing is to use 2x cwmin.
+		 */
+		qi.tqi_aifs = wmep->wmep_aifsn;
+		qi.tqi_cwmin = 2 * ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
+		qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
+	}
+
+	if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
+		printk("%s: unable to update h/w beacon queue parameters\n",
+			sc->sc_dev->name);
+		return 0;
+	} else {
+		ath_hal_resettxqueue(ah, sc->sc_bhalq);	/* push to h/w */
+		return 1;
+	}
+#undef ATH_EXPONENT_TO_VALUE
+}
+
+/*
+ * Allocate and setup an initial beacon frame.
+ *
+ * Context: softIRQ
+ */
+static int
+ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
+{
+	struct ath_vap *avp = ATH_VAP(ni->ni_vap);
+	struct ieee80211_frame *wh;
+	struct ath_buf *bf;
+	struct sk_buff *skb;
+
+	/*
+	 * release the previous beacon's skb if it already exists.
+	 */
+	bf = avp->av_bcbuf;
+	if (bf->bf_skb != NULL) {
+		bus_unmap_single(sc->sc_bdev,
+		    bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE);
+		dev_kfree_skb(bf->bf_skb);
+		bf->bf_skb = NULL;
+	}
+	if (bf->bf_node != NULL) {
+		ieee80211_free_node(bf->bf_node);
+		bf->bf_node = NULL;
+	}
+
+	/*
+	 * NB: the beacon data buffer must be 32-bit aligned;
+	 * we assume the mbuf routines will return us something
+	 * with this alignment (perhaps should assert).
+	 */
+	skb = ieee80211_beacon_alloc(ni, &avp->av_boff);
+	if (skb == NULL) {
+		DPRINTF(sc, ATH_DEBUG_BEACON, "%s: cannot get sk_buff\n",
+			__func__);
+		sc->sc_stats.ast_be_nobuf++;
+		return -ENOMEM;
+	}
+
+	/*
+	 * Calculate a TSF adjustment factor required for
+	 * staggered beacons.  Note that we assume the format
+	 * of the beacon frame leaves the tstamp field immediately
+	 * following the header.
+	 */
+	if (sc->sc_stagbeacons && avp->av_bslot > 0) {
+		uint64_t tuadjust;
+		__le64 tsfadjust;
+		/*
+		 * The beacon interval is in TU's; the TSF in usecs.
+		 * We figure out how many TU's to add to align the
+		 * timestamp then convert to TSF units and handle
+		 * byte swapping before writing it in the frame.
+		 * The hardware will then add this each time a beacon
+		 * frame is sent.  Note that we align VAPs 1..N
+		 * and leave VAP 0 untouched.  This means VAP 0
+		 * has a timestamp in one beacon interval while the
+		 * others get a timestamp aligned to the next interval.
+		 */
+		tuadjust = (ni->ni_intval * (ATH_BCBUF - avp->av_bslot)) / ATH_BCBUF;
+		tsfadjust = cpu_to_le64(tuadjust << 10);	/* TU->TSF */
+
+		DPRINTF(sc, ATH_DEBUG_BEACON,
+			"%s: %s beacons, bslot %d intval %u tsfadjust(Kus) %llu\n",
+			__func__, sc->sc_stagbeacons ? "stagger" : "burst",
+			avp->av_bslot, ni->ni_intval, (long long) tuadjust);
+
+		wh = (struct ieee80211_frame *) skb->data;
+		memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
+	}
+
+	bf->bf_node = ieee80211_ref_node(ni);
+	bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
+		skb->data, skb->len, BUS_DMA_TODEVICE);
+	bf->bf_skb = skb;
+
+	return 0;
+}
+
+/*
+ * Setup the beacon frame for transmit.
+ */
+static void
+ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
+{
+#define	USE_SHPREAMBLE(_ic) \
+	(((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
+		== IEEE80211_F_SHPREAMBLE)
+	struct ieee80211_node *ni = bf->bf_node;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct sk_buff *skb = bf->bf_skb;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ath_desc *ds;
+	int flags;
+	int antenna = sc->sc_txantenna;
+	const HAL_RATE_TABLE *rt;
+	u_int8_t rix, rate;
+	int ctsrate = 0;
+	int ctsduration = 0;
+
+	DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
+		__func__, skb, skb->len);
+
+	/* setup descriptors */
+	ds = bf->bf_desc;
+
+	flags = HAL_TXDESC_NOACK;
+#ifdef ATH_SUPERG_DYNTURBO
+	if (sc->sc_dturbo_switch)
+		flags |= HAL_TXDESC_INTREQ;
+#endif
+
+	if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
+		ds->ds_link = bf->bf_daddr;	/* self-linked */
+		flags |= HAL_TXDESC_VEOL;
+		/*
+		 * Let hardware handle antenna switching if txantenna is not set
+		 */
+	} else {
+		ds->ds_link = 0;
+		/*
+		 * Switch antenna every beacon if txantenna is not set
+		 * Should only switch every beacon period, not for every
+		 * SWBA's
+		 * XXX assumes two antenna
+		 */
+		if (antenna == 0) {
+			if (sc->sc_stagbeacons)
+				antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 1 ? 2 : 1);
+			else
+				antenna = (sc->sc_stats.ast_be_xmit & 1 ? 2 : 1);
+		}
+	}
+
+	ds->ds_data = bf->bf_skbaddr;
+	/*
+	 * Calculate rate code.
+	 * XXX everything at min xmit rate
+	 */
+	rix = sc->sc_minrateix;
+	rt = sc->sc_currates;
+	rate = rt->info[rix].rateCode;
+	if (USE_SHPREAMBLE(ic))
+		rate |= rt->info[rix].shortPreamble;
+#ifdef ATH_SUPERG_XR
+	if (bf->bf_node->ni_vap->iv_flags & IEEE80211_F_XR) {
+		u_int8_t cix;
+		int pktlen;
+		pktlen = skb->len + IEEE80211_CRC_LEN;
+		cix = rt->info[sc->sc_protrix].controlRate;
+		/* for XR VAP use different RTSCTS rates and calculate duration */
+		ctsrate = rt->info[cix].rateCode;
+		if (USE_SHPREAMBLE(ic))
+			ctsrate |= rt->info[cix].shortPreamble;
+		flags |= HAL_TXDESC_CTSENA;
+		rt = sc->sc_xr_rates;
+		ctsduration = ath_hal_computetxtime(ah,rt, pktlen,
+			IEEE80211_XR_DEFAULT_RATE_INDEX, AH_FALSE);
+		rate = rt->info[IEEE80211_XR_DEFAULT_RATE_INDEX].rateCode;
+	}
+#endif
+	ath_hal_setuptxdesc(ah, ds
+		, skb->len + IEEE80211_CRC_LEN	/* frame length */
+		, sizeof(struct ieee80211_frame)	/* header length */
+		, HAL_PKT_TYPE_BEACON		/* Atheros packet type */
+		, ni->ni_txpower		/* txpower XXX */
+		, rate, 1			/* series 0 rate/tries */
+		, HAL_TXKEYIX_INVALID		/* no encryption */
+		, antenna			/* antenna mode */
+		, flags				/* no ack, veol for beacons */
+		, ctsrate			/* rts/cts rate */
+		, ctsduration			/* rts/cts duration */
+		, 0				/* comp icv len */
+		, 0				/* comp iv len */
+		, ATH_COMP_PROC_NO_COMP_NO_CCS	/* comp scheme */
+	);
+
+	/* NB: beacon's BufLen must be a multiple of 4 bytes */
+	ath_hal_filltxdesc(ah, ds
+		, roundup(skb->len, 4)	/* buffer length */
+		, AH_TRUE		/* first segment */
+		, AH_TRUE		/* last segment */
+		, ds			/* first descriptor */
+	);
+
+	/* NB: The desc swap function becomes void, 
+	 * if descriptor swapping is not enabled
+	 */
+	ath_desc_swap(ds);
+#undef USE_SHPREAMBLE
+}
+
+/*
+ * Generate beacon frame and queue cab data for a VAP.
+ */
+static struct ath_buf *
+ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap, int *needmark)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	struct ath_buf *bf;
+	struct ieee80211_node *ni;
+	struct ath_vap *avp;
+	struct sk_buff *skb;
+	int ncabq;
+	unsigned int curlen;
+
+	if (vap->iv_state != IEEE80211_S_RUN) {
+		DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: skip VAP in %s state\n",
+			__func__, ieee80211_state_name[vap->iv_state]);
+		return NULL;
+	}
+#ifdef ATH_SUPERG_XR
+	if (vap->iv_flags & IEEE80211_F_XR) {
+		vap->iv_xrbcnwait++;
+		/* wait for XR_BEACON_FACTOR times before sending the beacon */
+		if (vap->iv_xrbcnwait < IEEE80211_XR_BEACON_FACTOR)
+			return NULL;
+		vap->iv_xrbcnwait = 0;
+	}
+#endif
+	avp = ATH_VAP(vap);
+	if (avp->av_bcbuf == NULL) {
+		DPRINTF(sc, ATH_DEBUG_ANY, "%s: avp=%p av_bcbuf=%p\n",
+			 __func__, avp, avp->av_bcbuf);
+		return NULL;
+	}
+	bf = avp->av_bcbuf;
+	ni = bf->bf_node;
+
+#ifdef ATH_SUPERG_DYNTURBO
+	/* 
+	 * If we are using dynamic turbo, update the
+	 * capability info and arrange for a mode change
+	 * if needed.
+	 */
+	if (sc->sc_dturbo) {
+		u_int8_t dtim;
+		dtim = ((avp->av_boff.bo_tim[2] == 1) ||
+			(avp->av_boff.bo_tim[3] == 1));
+		ath_beacon_dturbo_update(vap, needmark, dtim);
+	}
+#endif
+	/*
+	 * Update dynamic beacon contents.  If this returns
+	 * non-zero then we need to remap the memory because
+	 * the beacon frame changed size (probably because
+	 * of the TIM bitmap).
+	 */
+	skb = bf->bf_skb;
+	curlen = skb->len;
+	ncabq = avp->av_mcastq.axq_depth;
+	if (ieee80211_beacon_update(ni, &avp->av_boff, skb, ncabq)) {
+		bus_unmap_single(sc->sc_bdev,
+			bf->bf_skbaddr, curlen, BUS_DMA_TODEVICE);
+		bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
+			skb->data, skb->len, BUS_DMA_TODEVICE);
+	}
+
+	/*
+	 * if the CABQ traffic from previous DTIM is pending and the current
+	 * beacon is also a DTIM. 
+	 *  1) if there is only one VAP let the cab traffic continue. 
+	 *  2) if there are more than one VAP and we are using staggered
+	 *     beacons, then drain the cabq by dropping all the frames in
+	 *     the cabq so that the current VAP's cab traffic can be scheduled.
+	 * XXX: Need to handle the last MORE_DATA bit here.
+	 */
+	if (ncabq && (avp->av_boff.bo_tim[4] & 1) && sc->sc_cabq->axq_depth) {
+		if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
+			ath_tx_draintxq(sc, sc->sc_cabq);
+			DPRINTF(sc, ATH_DEBUG_BEACON,
+				"%s: flush previous cabq traffic\n", __func__);
+		}
+	}
+
+	/*
+	 * Construct tx descriptor.
+	 */
+	ath_beacon_setup(sc, bf);
+
+	bus_dma_sync_single(sc->sc_bdev,
+		bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE);
+
+	/*
+	 * Enable the CAB queue before the beacon queue to
+	 * ensure cab frames are triggered by this beacon.
+	 */
+	if (avp->av_boff.bo_tim[4] & 1)	{	/* NB: only at DTIM */
+		struct ath_txq *cabq = sc->sc_cabq;
+		struct ath_buf *bfmcast;
+		/*
+		 * Move everything from the VAP's mcast queue 
+		 * to the hardware cab queue.
+		 */
+		ATH_TXQ_LOCK(&avp->av_mcastq);
+		ATH_TXQ_LOCK(cabq);
+		bfmcast = STAILQ_FIRST(&avp->av_mcastq.axq_q);
+		/* link the descriptors */
+		if (cabq->axq_link == NULL)
+			ath_hal_puttxbuf(ah, cabq->axq_qnum, bfmcast->bf_daddr);
+		else {
+#ifdef AH_NEED_DESC_SWAP
+			*cabq->axq_link = cpu_to_le32(bfmcast->bf_daddr);
+#else
+			*cabq->axq_link = bfmcast->bf_daddr;
+#endif
+		}
+
+		/* Set the MORE_DATA bit for each packet except the last one */
+		STAILQ_FOREACH(bfmcast, &avp->av_mcastq.axq_q, bf_list) {
+			if (bfmcast != STAILQ_LAST(&avp->av_mcastq.axq_q, ath_buf, bf_list))
+				((struct ieee80211_frame *)bfmcast->bf_skb->data)->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
+		}
+
+		/* append the private VAP mcast list to the cabq */
+		ATH_TXQ_MOVE_MCASTQ(&avp->av_mcastq, cabq);
+		/* NB: gated by beacon so safe to start here */
+		ath_hal_txstart(ah, cabq->axq_qnum);
+		ATH_TXQ_UNLOCK(cabq);
+		ATH_TXQ_UNLOCK(&avp->av_mcastq);
+	}
+
+	return bf;
+}
+
+/*
+ * Transmit one or more beacon frames at SWBA.  Dynamic
+ * updates to the frame contents are done as needed and
+ * the slot time is also adjusted based on current state.
+ */
+static void
+ath_beacon_send(struct ath_softc *sc, int *needmark)
+{
+#define	TSF_TO_TU(_h,_l) \
+	((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
+	struct ath_hal *ah = sc->sc_ah;
+	struct ieee80211vap *vap;
+	struct ath_buf *bf;
+	int slot;
+	u_int32_t bfaddr;
+
+	/*
+	 * Check if the previous beacon has gone out.  If
+	 * not don't try to post another, skip this period
+	 * and wait for the next.  Missed beacons indicate
+	 * a problem and should not occur.  If we miss too
+	 * many consecutive beacons reset the device.
+	 */
+	if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
+		sc->sc_bmisscount++;
+		/* XXX: 802.11h needs the chanchange IE countdown decremented.
+		 *      We should consider adding a net80211 call to indicate
+		 *      a beacon miss so appropriate action could be taken
+		 *      (in that layer).
+		 */
+		DPRINTF(sc, ATH_DEBUG_BEACON_PROC,
+			"%s: missed %u consecutive beacons\n",
+			__func__, sc->sc_bmisscount);
+		if (sc->sc_bmisscount > BSTUCK_THRESH)
+			ATH_SCHEDULE_TQUEUE(&sc->sc_bstucktq, needmark);
+		return;
+	}
+	if (sc->sc_bmisscount != 0) {
+		DPRINTF(sc, ATH_DEBUG_BEACON_PROC,
+			"%s: resume beacon xmit after %u misses\n",
+			__func__, sc->sc_bmisscount);
+		sc->sc_bmisscount = 0;
+	}
+
+	/*
+	 * Generate beacon frames.  If we are sending frames
+	 * staggered then calculate the slot for this frame based
+	 * on the tsf to safeguard against missing an swba.
+	 * Otherwise we are bursting all frames together and need
+	 * to generate a frame for each VAP that is up and running.
+	 */
+	if (sc->sc_stagbeacons) {		/* staggered beacons */
+		struct ieee80211com *ic = &sc->sc_ic;
+		u_int64_t tsf;
+		u_int32_t tsftu;
+
+		tsf = ath_hal_gettsf64(ah);
+		tsftu = TSF_TO_TU(tsf >> 32, tsf);
+		slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
+		vap = sc->sc_bslot[(slot + 1) % ATH_BCBUF];
+		DPRINTF(sc, ATH_DEBUG_BEACON_PROC,
+			"%s: slot %d [tsf %llu tsftu %u intval %u] vap %p\n",
+			__func__, slot, (long long) tsf, tsftu, ic->ic_lintval, vap);
+		bfaddr = 0;
+		if (vap != NULL) {
+			bf = ath_beacon_generate(sc, vap, needmark);
+			if (bf != NULL)
+				bfaddr = bf->bf_daddr;
+		}
+	} else {				/* burst'd beacons */
+		u_int32_t *bflink;
+
+		bflink = &bfaddr;
+		/* XXX rotate/randomize order? */
+		for (slot = 0; slot < ATH_BCBUF; slot++) {
+			vap = sc->sc_bslot[slot];
+			if (vap != NULL) {
+				bf = ath_beacon_generate(sc, vap, needmark);
+				if (bf != NULL) {
+#ifdef AH_NEED_DESC_SWAP
+					if (bflink != &bfaddr)
+						*bflink = cpu_to_le32(bf->bf_daddr);
+					else
+						*bflink = bf->bf_daddr;
+#else
+					*bflink = bf->bf_daddr;
+#endif
+					bflink = &bf->bf_desc->ds_link;
+				}
+			}
+		}
+		*bflink = 0;			/* link of last frame */
+	}
+
+	/*
+	 * Handle slot time change when a non-ERP station joins/leaves
+	 * an 11g network.  The 802.11 layer notifies us via callback,
+	 * we mark updateslot, then wait one beacon before effecting
+	 * the change.  This gives associated stations at least one
+	 * beacon interval to note the state change.
+	 *
+	 * NB: The slot time change state machine is clocked according
+	 *     to whether we are bursting or staggering beacons.  We
+	 *     recognize the request to update and record the current
+	 *     slot then don't transition until that slot is reached
+	 *     again.  If we miss a beacon for that slot then we'll be
+	 *     slow to transition but we'll be sure at least one beacon
+	 *     interval has passed.  When bursting slot is always left
+	 *     set to ATH_BCBUF so this check is a no-op.
+	 */
+	/* XXX locking */
+	if (sc->sc_updateslot == UPDATE) {
+		sc->sc_updateslot = COMMIT;	/* commit next beacon */
+		sc->sc_slotupdate = slot;
+	} else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
+		ath_setslottime(sc);		/* commit change to hardware */
+
+	if ((!sc->sc_stagbeacons || slot == 0) && (!sc->sc_diversity)) {
+		int otherant;
+		/*
+		 * Check recent per-antenna transmit statistics and flip
+		 * the default rx antenna if noticeably more frames went out
+		 * on the non-default antenna.  Only do this if rx diversity
+		 * is off.
+		 * XXX assumes 2 antennae
+		 */
+		otherant = sc->sc_defant & 1 ? 2 : 1;
+		if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + ATH_ANTENNA_DIFF) {
+			DPRINTF(sc, ATH_DEBUG_BEACON,
+				"%s: flip defant to %u, %u > %u\n",
+				__func__, otherant, sc->sc_ant_tx[otherant],
+				sc->sc_ant_tx[sc->sc_defant]);
+			ath_setdefantenna(sc, otherant);
+		}
+		sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
+	}
+
+	if (bfaddr != 0) {
+		/*
+		 * Stop any current DMA and put the new frame(s) on the queue.
+		 * This should never fail since we check above that no frames
+		 * are still pending on the queue.
+		 */
+		if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
+			DPRINTF(sc, ATH_DEBUG_ANY,
+				"%s: beacon queue %u did not stop?\n",
+				__func__, sc->sc_bhalq);
+			/* NB: the HAL still stops DMA, so proceed */
+		}
+		/* NB: cabq traffic should already be queued and primed */
+		ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
+		ath_hal_txstart(ah, sc->sc_bhalq);
+
+		sc->sc_stats.ast_be_xmit++;		/* XXX per-VAP? */
+	}
+#undef TSF_TO_TU
+}
+
+/*
+ * Reset the hardware after detecting beacons have stopped.
+ */
+static void
+ath_bstuck_tasklet(TQUEUE_ARG data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct ath_softc *sc = dev->priv;
+	/*
+	 * XXX:if the bmisscount is cleared while the 
+	 *     tasklet execution is pending, the following
+	 *     check will be true, in which case return 
+	 *     without resetting the driver.
+	 */
+	if (sc->sc_bmisscount <= BSTUCK_THRESH) 
+		return;
+	printk("%s: stuck beacon; resetting (bmiss count %u)\n",
+		dev->name, sc->sc_bmisscount);
+	ath_reset(dev);
+}
+
+/*
+ * Startup beacon transmission for adhoc mode when
+ * they are sent entirely by the hardware using the
+ * self-linked descriptor + veol trick.
+ */
+static void
+ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	struct ath_buf *bf;
+	struct ieee80211_node *ni;
+	struct ath_vap *avp;
+	struct sk_buff *skb;
+
+	avp = ATH_VAP(vap);
+	if (avp->av_bcbuf == NULL) {
+		DPRINTF(sc, ATH_DEBUG_ANY, "%s: avp=%p av_bcbuf=%p\n",
+			 __func__, avp, avp != NULL ? avp->av_bcbuf : NULL);
+		return;
+	}
+	bf = avp->av_bcbuf;
+	ni = bf->bf_node;
+
+	/*
+	 * Update dynamic beacon contents.  If this returns
+	 * non-zero then we need to remap the memory because
+	 * the beacon frame changed size (probably because
+	 * of the TIM bitmap).
+	 */
+	skb = bf->bf_skb;
+	if (ieee80211_beacon_update(ni, &avp->av_boff, skb, 0)) {
+		bus_unmap_single(sc->sc_bdev,
+			bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE);
+		bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
+			skb->data, skb->len, BUS_DMA_TODEVICE);
+	}
+
+	/*
+	 * Construct tx descriptor.
+	 */
+	ath_beacon_setup(sc, bf);
+
+	bus_dma_sync_single(sc->sc_bdev,
+		bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE);
+
+	/* NB: caller is known to have already stopped tx DMA */
+	ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
+	ath_hal_txstart(ah, sc->sc_bhalq);
+	DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: TXDP%u = %llx (%p)\n", __func__,
+		sc->sc_bhalq, ito64(bf->bf_daddr), bf->bf_desc);
+}
+
+/*
+ * Reclaim beacon resources and return buffer to the pool.
+ */
+static void
+ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
+{
+	if (bf->bf_skb != NULL) {
+		bus_unmap_single(sc->sc_bdev,
+		    bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE);
+		dev_kfree_skb(bf->bf_skb);
+		bf->bf_skb = NULL;
+	}
+	if (bf->bf_node != NULL) {
+		ieee80211_free_node(bf->bf_node);
+		bf->bf_node = NULL;
+	}
+	STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
+}
+
+/*
+ * Reclaim all beacon resources.
+ */
+static void
+ath_beacon_free(struct ath_softc *sc)
+{
+	struct ath_buf *bf;
+
+	STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
+		if (bf->bf_skb != NULL) {
+			bus_unmap_single(sc->sc_bdev,
+				bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE);
+			dev_kfree_skb(bf->bf_skb);
+			bf->bf_skb = NULL;
+		}
+		if (bf->bf_node != NULL) {
+			ieee80211_free_node(bf->bf_node);
+			bf->bf_node = NULL;
+		}
+	}
+}
+
+/*
+ * Configure the beacon and sleep timers.
+ *
+ * When operating as an AP this resets the TSF and sets
+ * up the hardware to notify us when we need to issue beacons.
+ *
+ * When operating in station mode this sets up the beacon
+ * timers according to the timestamp of the last received
+ * beacon and the current TSF, configures PCF and DTIM
+ * handling, programs the sleep registers so the hardware
+ * will wake up in time to receive beacons, and configures
+ * the beacon miss handling so we'll receive a BMISS
+ * interrupt when we stop seeing beacons from the AP
+ * we've associated with.
+ *
+ * Note : TBTT is Target Beacon Transmission Time (see IEEE 
+ * 802.11-1999: 4 & 11.2.1.3).
+ */
+static void
+ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
+{
+#define	TSF_TO_TU(_h,_l) \
+	((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ieee80211_node *ni;
+	u_int32_t nexttbtt = 0;
+	u_int32_t intval;
+	u_int64_t tsf, hw_tsf;
+	u_int32_t tsftu, hw_tsftu;
+	int reset_tsf = 0;
+
+	if (vap == NULL)
+		vap = TAILQ_FIRST(&ic->ic_vaps);   /* XXX */
+
+	ni = vap->iv_bss;
+
+	hw_tsf = ath_hal_gettsf64(ah);
+	tsf = le64_to_cpu(ni->ni_tstamp.tsf);
+	hw_tsftu = hw_tsf >> 10;
+	tsftu = tsf >> 10;
+
+	/* We should reset hw TSF only once, so we increment
+	 * ni_tstamp.tsf to avoid resetting the hw TSF multiple
+	 * times */
+
+	if (tsf == 0) {
+		reset_tsf = 1;
+		ni->ni_tstamp.tsf = cpu_to_le64(1);
+	}
+
+	/* XXX: Conditionalize multi-bss support? */
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
+		/* For multi-bss ap support beacons are either staggered
+		 * evenly over N slots or burst together.  For the former
+		 * arrange for the SWBA to be delivered for each slot.
+		 * Slots that are not occupied will generate nothing. */
+		/* NB: the beacon interval is kept internally in TU's */
+		intval = ic->ic_lintval & HAL_BEACON_PERIOD;
+		if (sc->sc_stagbeacons)
+			intval /= ATH_BCBUF;	/* for staggered beacons */
+		if ((sc->sc_nostabeacons) &&
+			(vap->iv_opmode == IEEE80211_M_HOSTAP))
+			reset_tsf = 1;
+	} else
+		intval = ni->ni_intval & HAL_BEACON_PERIOD;
+
+#define        FUDGE   2
+	sc->sc_syncbeacon = 0;
+
+	if (reset_tsf) {
+		/* We just created the interface and TSF will be reset to
+		* zero, so next beacon will be sent at the next intval
+		* time */
+		nexttbtt = intval;
+	} else if (intval) {    /* NB: can be 0 for monitor mode */
+		if (tsf == 1) {
+			/* We have not received any beacons or probe responses. 
+			* The next TBTT must be at least FUDGE ms ahead of the 
+			* hw_tsftu. Also, TSF == 0 is a TBTT - IEEE802.11-1999
+			* 11.1.2.2, although I'm not sure it applies here... */
+			nexttbtt = roundup(hw_tsftu + FUDGE, intval);
+		} else {
+			if (tsf > hw_tsf) {
+				/* We received a beacon, but the HW TSF has 
+				* not been updated (otherwise hw_tsf > tsf).
+				* We cannot use the hardware TSF, so we wait 
+				* to synchronise beacons again. */
+				sc->sc_syncbeacon = 1;
+				goto ath_beacon_config_debug;
+			} else {
+				/* Normal case: we received a beacon to which
+				* we have synchornised. Make sure that 
+				* nexttbtt is at least FUDGE ms ahead of 
+				* hw_tsf. */
+				nexttbtt = tsftu + roundup(hw_tsftu + 
+				FUDGE - tsftu, intval);
+			}
+		}
+	}
+
+	if (ic->ic_opmode == IEEE80211_M_STA &&	!(sc->sc_nostabeacons)) {
+		HAL_BEACON_STATE bs;
+		int dtimperiod, dtimcount;
+		int cfpperiod, cfpcount;
+
+		/* Setup DTIM and CFP parameters according to the last beacon 
+		 * we have received (which may not have happened). */
+		dtimperiod = vap->iv_dtim_period;
+		if (dtimperiod <= 0)		/* NB: 0 if not known */
+			dtimperiod = 1;
+		dtimcount = vap->iv_dtim_count;
+		if (dtimcount >= dtimperiod)	/* NB: sanity check */
+			dtimcount = 0;		/* XXX? */
+		cfpperiod = 1;			/* NB: no PCF support yet */
+		cfpcount = 0;
+		/*
+		 * Pull nexttbtt forward to reflect the current
+		 * TSF and calculate dtim+cfp state for the result.
+		 */
+		nexttbtt = tsftu;
+		if (nexttbtt == 0)		/* e.g. for ap mode */
+			nexttbtt = intval;
+		do {
+			nexttbtt += intval;
+			if (--dtimcount < 0) {
+				dtimcount = dtimperiod - 1;
+				if (--cfpcount < 0)
+					cfpcount = cfpperiod - 1;
+			}
+		} while (nexttbtt < hw_tsftu + FUDGE);
+#undef FUDGE
+		memset(&bs, 0, sizeof(bs));
+		bs.bs_intval = intval;
+		bs.bs_nexttbtt = nexttbtt;
+		bs.bs_dtimperiod = dtimperiod * intval;
+		bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount * intval;
+		bs.bs_cfpperiod = cfpperiod * bs.bs_dtimperiod;
+		bs.bs_cfpnext = bs.bs_nextdtim + cfpcount * bs.bs_dtimperiod;
+		bs.bs_cfpmaxduration = 0;
+#if 0
+		/*
+		 * The 802.11 layer records the offset to the DTIM
+		 * bitmap while receiving beacons; use it here to
+		 * enable h/w detection of our AID being marked in
+		 * the bitmap vector (to indicate frames for us are
+		 * pending at the AP).
+		 * XXX do DTIM handling in s/w to WAR old h/w bugs
+		 * XXX enable based on h/w rev for newer chips
+		 */
+		bs.bs_timoffset = ni->ni_timoff;
+#endif
+		/*
+		 * Calculate the number of consecutive beacons to miss
+		 * before taking a BMISS interrupt.  The configuration
+		 * is specified in TU so we only need calculate based
+		 * on the beacon interval.  Note that we clamp the
+		 * result to at most 10 beacons.
+		 */
+		bs.bs_bmissthreshold = howmany(ic->ic_bmisstimeout, intval);
+		if (bs.bs_bmissthreshold > 10)
+			bs.bs_bmissthreshold = 10;
+		else if (bs.bs_bmissthreshold < 2)
+			bs.bs_bmissthreshold = 2;
+
+		/*
+		 * Calculate sleep duration.  The configuration is
+		 * given in ms.  We ensure a multiple of the beacon
+		 * period is used.  Also, if the sleep duration is
+		 * greater than the DTIM period then it makes senses
+		 * to make it a multiple of that.
+		 *
+		 * XXX fixed at 100ms
+		 */
+		bs.bs_sleepduration =
+			roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
+		if (bs.bs_sleepduration > bs.bs_dtimperiod)
+			bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
+
+		DPRINTF(sc, ATH_DEBUG_BEACON, 
+			"%s: tsf %llu tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
+			, __func__
+			, (long long) tsf, tsftu
+			, bs.bs_intval
+			, bs.bs_nexttbtt
+			, bs.bs_dtimperiod
+			, bs.bs_nextdtim
+			, bs.bs_bmissthreshold
+			, bs.bs_sleepduration
+			, bs.bs_cfpperiod
+			, bs.bs_cfpmaxduration
+			, bs.bs_cfpnext
+			, bs.bs_timoffset
+		);
+
+		ic->ic_bmiss_guard = jiffies +
+			IEEE80211_TU_TO_JIFFIES(bs.bs_intval * bs.bs_bmissthreshold);
+
+		ath_hal_intrset(ah, 0);
+		ath_hal_beacontimers(ah, &bs);
+		sc->sc_imask |= HAL_INT_BMISS;
+		ath_hal_intrset(ah, sc->sc_imask);
+	} else {
+		ath_hal_intrset(ah, 0);
+		if (reset_tsf)
+			intval |= HAL_BEACON_RESET_TSF;
+		if (ic->ic_opmode == IEEE80211_M_IBSS) {
+			/*
+			 * In IBSS mode enable the beacon timers but only
+			 * enable SWBA interrupts if we need to manually
+			 * prepare beacon frames.  Otherwise we use a
+			 * self-linked tx descriptor and let the hardware
+			 * deal with things.
+			 */
+			intval |= HAL_BEACON_ENA;
+			if (!sc->sc_hasveol)
+				sc->sc_imask |= HAL_INT_SWBA;
+			ath_beaconq_config(sc);
+		} else if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
+			/*
+			 * In AP mode we enable the beacon timers and
+			 * SWBA interrupts to prepare beacon frames.
+			 */
+			intval |= HAL_BEACON_ENA;
+			sc->sc_imask |= HAL_INT_SWBA;	/* beacon prepare */
+			ath_beaconq_config(sc);
+		}
+#ifdef ATH_SUPERG_DYNTURBO
+		ath_beacon_dturbo_config(vap, intval & 
+				~(HAL_BEACON_RESET_TSF | HAL_BEACON_ENA));
+#endif
+		ath_hal_beaconinit(ah, nexttbtt, intval);
+		sc->sc_bmisscount = 0;
+		ath_hal_intrset(ah, sc->sc_imask);
+		/*
+		 * When using a self-linked beacon descriptor in
+		 * ibss mode load it once here.
+		 */
+		if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
+			ath_beacon_start_adhoc(sc, vap);
+	}
+#undef TSF_TO_TU
+
+	ath_beacon_config_debug:
+	/* We print all debug messages here, in order to preserve the
+	 * time critical aspect of this function */
+	DPRINTF(sc, ATH_DEBUG_BEACON,
+		"%s: ni=%p tsf=%llu hw_tsf=%llu tsftu=%u hw_tsftu=%u\n",
+		__func__, ni, tsf, hw_tsf, tsftu, hw_tsftu);
+
+	if (reset_tsf)
+		/* We just created the interface */
+		DPRINTF(sc, ATH_DEBUG_BEACON, "%s: first beacon\n", __func__);
+	else if (tsf == 1)
+		/* We do not receive any beacons or probe response */
+		DPRINTF(sc, ATH_DEBUG_BEACON,
+				"%s: no beacon received...\n",__func__);
+	else if (tsf > hw_tsf)
+		/* We do receive a beacon and the hw TSF has not been updated */
+		DPRINTF(sc, ATH_DEBUG_BEACON,
+				"%s: beacon received, but TSF is incorrect\n", 
+				__func__);
+	else
+		/* We do receive a beacon in the past, normal case */
+		DPRINTF(sc, ATH_DEBUG_BEACON,
+				"%s: beacon received, TSF is correct\n", 
+				__func__);
+
+	DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt=%u intval=%u\n",
+		__func__, nexttbtt, intval & HAL_BEACON_PERIOD);
+}
+
+static int
+ath_descdma_setup(struct ath_softc *sc,
+	struct ath_descdma *dd, ath_bufhead *head,
+	const char *name, int nbuf, int ndesc)
+{
+#define	DS2PHYS(_dd, _ds) \
+	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
+	struct ath_desc *ds;
+	struct ath_buf *bf;
+	int i, bsize, error;
+
+	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
+		__func__, name, nbuf, ndesc);
+
+	dd->dd_name = name;
+	dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
+
+	/* allocate descriptors */
+	dd->dd_desc = bus_alloc_consistent(sc->sc_bdev,
+		dd->dd_desc_len, &dd->dd_desc_paddr);
+	if (dd->dd_desc == NULL) {
+		error = -ENOMEM;
+		goto fail;
+	}
+	ds = dd->dd_desc;
+	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %llx (%lu)\n",
+		__func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
+		ito64(dd->dd_desc_paddr), /*XXX*/ (u_long) dd->dd_desc_len);
+
+	/* allocate buffers */
+	bsize = sizeof(struct ath_buf) * nbuf;
+	bf = kmalloc(bsize, GFP_KERNEL);
+	if (bf == NULL) {
+		error = -ENOMEM;		/* XXX different code */
+		goto fail2;
+	}
+	memset(bf, 0, bsize);
+	dd->dd_bufptr = bf;
+
+	STAILQ_INIT(head);
+	for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
+		bf->bf_desc = ds;
+		bf->bf_daddr = DS2PHYS(dd, ds);
+		STAILQ_INSERT_TAIL(head, bf, bf_list);
+	}
+	return 0;
+fail2:
+	bus_free_consistent(sc->sc_bdev, dd->dd_desc_len,
+		dd->dd_desc, dd->dd_desc_paddr);
+fail:
+	memset(dd, 0, sizeof(*dd));
+	return error;
+#undef DS2PHYS
+}
+
+static void
+ath_descdma_cleanup(struct ath_softc *sc,
+	struct ath_descdma *dd, ath_bufhead *head, int dir)
+{
+	struct ath_buf *bf;
+	struct ieee80211_node *ni;
+
+	STAILQ_FOREACH(bf, head, bf_list) {
+		if (bf->bf_skb != NULL) {
+			/* XXX skb->len is not good enough for rxbuf */
+			if (dd == &sc->sc_rxdma)
+				bus_unmap_single(sc->sc_bdev,
+					bf->bf_skbaddr, sc->sc_rxbufsize, dir);
+			else
+				bus_unmap_single(sc->sc_bdev,
+					bf->bf_skbaddr, bf->bf_skb->len, dir);
+			dev_kfree_skb(bf->bf_skb);
+			bf->bf_skb = NULL;
+		}
+		ni = bf->bf_node;
+		bf->bf_node = NULL;
+		if (ni != NULL) {
+			/*
+			 * Reclaim node reference.
+			 */
+			ieee80211_free_node(ni);
+		}
+	}
+
+	/* Free memory associated with descriptors */
+	bus_free_consistent(sc->sc_bdev, dd->dd_desc_len,
+		dd->dd_desc, dd->dd_desc_paddr);
+
+	STAILQ_INIT(head);
+	kfree(dd->dd_bufptr);
+	memset(dd, 0, sizeof(*dd));
+}
+
+static int
+ath_desc_alloc(struct ath_softc *sc)
+{
+	int error;
+
+	error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
+			"rx", ATH_RXBUF, 1);
+	if (error != 0)
+		return error;
+
+	error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
+			"tx", ATH_TXBUF, ATH_TXDESC);
+	if (error != 0) {
+		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
+			BUS_DMA_FROMDEVICE);
+		return error;
+	}
+
+	/* XXX allocate beacon state together with VAP */
+	error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
+			"beacon", ATH_BCBUF, 1);
+	if (error != 0) {
+		ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf,
+			BUS_DMA_TODEVICE);
+		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
+			BUS_DMA_FROMDEVICE);
+		return error;
+	}
+	return 0;
+}
+
+static void
+ath_desc_free(struct ath_softc *sc)
+{
+	if (sc->sc_bdma.dd_desc_len != 0)
+		ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf,
+			BUS_DMA_TODEVICE);
+	if (sc->sc_txdma.dd_desc_len != 0)
+		ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf,
+			BUS_DMA_TODEVICE);
+	if (sc->sc_rxdma.dd_desc_len != 0)
+		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
+			BUS_DMA_FROMDEVICE);
+}
+
+static struct ieee80211_node *
+ath_node_alloc(struct ieee80211_node_table *nt,struct ieee80211vap *vap)
+{
+	struct ath_softc *sc = nt->nt_ic->ic_dev->priv;
+	const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
+	struct ath_node *an;
+
+	an = kmalloc(space, GFP_ATOMIC);
+	if (an == NULL)
+		return NULL;
+	memset(an, 0, space);
+	an->an_decomp_index = INVALID_DECOMP_INDEX;
+	an->an_avgrssi = ATH_RSSI_DUMMY_MARKER;
+	an->an_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
+	an->an_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
+	an->an_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
+	/*
+	 * ath_rate_node_init needs a VAP pointer in node
+	 * to decide which mgt rate to use
+	 */
+	an->an_node.ni_vap = vap;
+	sc->sc_rc->ops->node_init(sc, an);
+
+	/* U-APSD init */
+	STAILQ_INIT(&an->an_uapsd_q);
+	an->an_uapsd_qdepth = 0;
+	STAILQ_INIT(&an->an_uapsd_overflowq);
+	an->an_uapsd_overflowqdepth = 0;
+	ATH_NODE_UAPSD_LOCK_INIT(an);
+
+	DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
+	return &an->an_node;
+}
+
+static void
+ath_node_cleanup(struct ieee80211_node *ni)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ath_softc *sc = ni->ni_ic->ic_dev->priv;
+	struct ath_node *an = ATH_NODE(ni);
+	struct ath_buf *bf;
+	
+	/*
+	 * U-APSD cleanup
+	 */
+	ATH_NODE_UAPSD_LOCK_IRQ(an);
+	if (ni->ni_flags & IEEE80211_NODE_UAPSD_TRIG) {
+		ni->ni_flags &= ~IEEE80211_NODE_UAPSD_TRIG;
+		ic->ic_uapsdmaxtriggers--;
+		ni->ni_flags &= ~IEEE80211_NODE_UAPSD_SP;
+	}
+	ATH_NODE_UAPSD_UNLOCK_IRQ(an);
+	while (an->an_uapsd_qdepth) {
+		bf = STAILQ_FIRST(&an->an_uapsd_q);
+		STAILQ_REMOVE_HEAD(&an->an_uapsd_q, bf_list);
+		bf->bf_desc->ds_link = 0;
+
+		dev_kfree_skb_any(bf->bf_skb);
+		bf->bf_skb = NULL;
+		bf->bf_node = NULL;
+		ATH_TXBUF_LOCK_IRQ(sc);
+		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
+		ATH_TXBUF_UNLOCK_IRQ(sc);
+		ieee80211_free_node(ni);
+
+		an->an_uapsd_qdepth--;
+	}
+
+	while (an->an_uapsd_overflowqdepth) {
+		bf = STAILQ_FIRST(&an->an_uapsd_overflowq);
+		STAILQ_REMOVE_HEAD(&an->an_uapsd_overflowq, bf_list);
+		bf->bf_desc->ds_link = 0;
+
+		dev_kfree_skb_any(bf->bf_skb);
+		bf->bf_skb = NULL;
+		bf->bf_node = NULL;
+		ATH_TXBUF_LOCK_IRQ(sc);
+		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
+		ATH_TXBUF_UNLOCK_IRQ(sc);
+		ieee80211_free_node(ni);
+
+		an->an_uapsd_overflowqdepth--;
+	}
+
+	ATH_NODE_UAPSD_LOCK_IRQ(an);
+	sc->sc_node_cleanup(ni);
+	ATH_NODE_UAPSD_UNLOCK_IRQ(an);
+}
+
+static void
+ath_node_free(struct ieee80211_node *ni)
+{
+	struct ath_softc *sc = ni->ni_ic->ic_dev->priv;
+
+	sc->sc_rc->ops->node_cleanup(sc, ATH_NODE(ni));
+	sc->sc_node_free(ni);
+#ifdef ATH_SUPERG_XR
+	ath_grppoll_period_update(sc);
+#endif
+}
+
+static u_int8_t
+ath_node_getrssi(const struct ieee80211_node *ni)
+{
+#define	HAL_EP_RND(x, mul) \
+	((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
+	u_int32_t avgrssi = ATH_NODE_CONST(ni)->an_avgrssi;
+	int32_t rssi;
+
+	/*
+	 * When only one frame is received there will be no state in
+	 * avgrssi so fallback on the value recorded by the 802.11 layer.
+	 */
+	if (avgrssi != ATH_RSSI_DUMMY_MARKER)
+		rssi = HAL_EP_RND(avgrssi, HAL_RSSI_EP_MULTIPLIER);
+	else
+		rssi = ni->ni_rssi;
+	/* NB: theoretically we shouldn't need this, but be paranoid */
+	return rssi < 0 ? 0 : rssi > 127 ? 127 : rssi;
+#undef HAL_EP_RND
+}
+
+
+#ifdef ATH_SUPERG_XR
+/*
+ * Stops the txqs and moves data between XR and Normal queues.
+ * Also adjusts the rate info in the descriptors.
+ */
+
+static u_int8_t
+ath_node_move_data(const struct ieee80211_node *ni)
+{
+#ifdef NOT_YET
+	struct ath_txq *txq = NULL; 
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ath_softc *sc = ic->ic_dev->priv;
+	struct ath_buf *bf, *prev, *bf_tmp, *bf_tmp1;
+	struct ath_hal *ah = sc->sc_ah;
+	struct sk_buff *skb = NULL;
+	struct ath_desc *ds;
+	HAL_STATUS status;
+	int index;
+
+	if (ni->ni_vap->iv_flags & IEEE80211_F_XR) {
+		struct ath_txq tmp_q; 
+		memset(&tmp_q, 0, sizeof(tmp_q));
+		STAILQ_INIT(&tmp_q.axq_q);
+		/*
+		 * move data from Normal txqs to XR queue.
+		 */
+		printk("move data from NORMAL to XR\n");
+		/*
+		 * collect all the data towards the node
+		 * in to the tmp_q.
+		 */
+		index = WME_AC_VO;
+		while (index >= WME_AC_BE && txq != sc->sc_ac2q[index]) { 
+			txq = sc->sc_ac2q[index]; 
+			ATH_TXQ_LOCK(txq);
+			ath_hal_stoptxdma(ah, txq->axq_qnum);
+			bf = prev = STAILQ_FIRST(&txq->axq_q);
+			/*
+			 * skip all the buffers that are done
+			 * until the first one that is in progress
+			 */
+			while (bf) {
+#ifdef ATH_SUPERG_FF
+				ds = &bf->bf_desc[bf->bf_numdesc - 1];
+#else
+				ds = bf->bf_desc;		/* NB: last descriptor */
+#endif
+				status = ath_hal_txprocdesc(ah, ds);
+				if (status == HAL_EINPROGRESS)
+					break; 
+				prev = bf;
+				bf = STAILQ_NEXT(bf,bf_list);
+			}
+			/* 
+			 * save the pointer to the last buf that's
+			 * done
+			 */
+			if (prev == bf)
+				bf_tmp = NULL;  
+			else
+				bf_tmp=prev;
+			while (bf) {
+				if (ni == bf->bf_node) {
+					if (prev == bf) {
+						ATH_TXQ_REMOVE_HEAD(txq, bf_list);
+						STAILQ_INSERT_TAIL(&tmp_q.axq_q, bf, bf_list);
+						bf = STAILQ_FIRST(&txq->axq_q);
+						prev = bf;
+					} else {
+						STAILQ_REMOVE_AFTER(&(txq->axq_q), prev, bf_list);
+						txq->axq_depth--;
+						STAILQ_INSERT_TAIL(&tmp_q.axq_q, bf, bf_list);
+						bf = STAILQ_NEXT(prev, bf_list);
+						/*
+						 * after deleting the node
+						 * link the descriptors
+						 */
+#ifdef ATH_SUPERG_FF
+						ds = &prev->bf_desc[prev->bf_numdesc - 1];
+#else
+						ds = prev->bf_desc;	/* NB: last descriptor */
+#endif
+#ifdef AH_NEED_DESC_SWAP
+						ds->ds_link = cpu_to_le32(bf->bf_daddr);
+#else
+						ds->ds_link = bf->bf_daddr;
+#endif
+					}
+				} else {
+					prev = bf;
+					bf = STAILQ_NEXT(bf, bf_list);
+				}
+			}
+			/*
+			 * if the last buf was deleted.
+			 * set the pointer to the last descriptor.
+			 */
+			bf = STAILQ_FIRST(&txq->axq_q);
+			if (bf) {
+				if (prev) {
+					bf = STAILQ_NEXT(prev, bf_list);
+					if (!bf) { /* prev is the last one on the list */
+#ifdef ATH_SUPERG_FF
+						ds = &prev->bf_desc[prev->bf_numdesc - 1];
+#else
+						ds = prev->bf_desc;	/* NB: last descriptor */
+#endif
+						status = ath_hal_txprocdesc(ah, ds);
+						if (status == HAL_EINPROGRESS) 
+							txq->axq_link = &ds->ds_link;
+						else
+							txq->axq_link = NULL;	
+					}
+				} 
+			} else
+				txq->axq_link = NULL;
+
+			ATH_TXQ_UNLOCK(txq);
+			/*
+			 * restart the DMA from the first 
+			 * buffer that was not DMA'd.
+			 */
+			if (bf_tmp)
+				bf = STAILQ_NEXT(bf_tmp, bf_list);
+			else
+				bf = STAILQ_FIRST(&txq->axq_q);
+			if (bf) {	
+				ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+				ath_hal_txstart(ah, txq->axq_qnum);
+			}
+		}
+		/* 
+		 * queue them on to the XR txqueue. 
+		 * can not directly put them on to the XR txq. since the
+		 * skb data size may be greater than the XR fragmentation
+		 * threshold size.
+		 */
+		bf  = STAILQ_FIRST(&tmp_q.axq_q);
+		index = 0;
+		while (bf) {
+			skb = bf->bf_skb;
+			bf->bf_skb = NULL;
+			bf->bf_node = NULL;
+			ATH_TXBUF_LOCK(sc);
+			STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
+			ATH_TXBUF_UNLOCK(sc);
+			ath_hardstart(skb,sc->sc_dev);
+			ATH_TXQ_REMOVE_HEAD(&tmp_q, bf_list);
+			bf = STAILQ_FIRST(&tmp_q.axq_q);
+			index++;
+		}
+		printk("moved %d buffers from NORMAL to XR\n", index);
+	} else {
+		struct ath_txq wme_tmp_qs[WME_AC_VO+1]; 
+		struct ath_txq *wmeq = NULL, *prevq; 
+		struct ieee80211_frame *wh;
+		struct ath_desc *ds = NULL;
+		int count = 0;
+
+		/*
+		 * move data from XR txq to Normal txqs.
+		 */
+		printk("move buffers from XR to NORMAL\n");
+		memset(&wme_tmp_qs, 0, sizeof(wme_tmp_qs));
+		for (index = 0; index <= WME_AC_VO; index++)
+			STAILQ_INIT(&wme_tmp_qs[index].axq_q);
+		txq = sc->sc_xrtxq; 
+		ATH_TXQ_LOCK(txq);
+		ath_hal_stoptxdma(ah, txq->axq_qnum);
+		bf = prev = STAILQ_FIRST(&txq->axq_q);
+		/*
+		 * skip all the buffers that are done
+		 * until the first one that is in progress
+		 */
+		while (bf) {
+#ifdef ATH_SUPERG_FF
+			ds = &bf->bf_desc[bf->bf_numdesc - 1];
+#else
+			ds = bf->bf_desc;		/* NB: last descriptor */
+#endif
+			status = ath_hal_txprocdesc(ah, ds);
+			if (status == HAL_EINPROGRESS)
+				break;
+			prev= bf;
+			bf = STAILQ_NEXT(bf,bf_list);
+		}
+		/* 
+		 * save the pointer to the last buf that's
+		 * done
+		 */
+		if (prev == bf)
+			bf_tmp1 = NULL;  
+		else
+			bf_tmp1 = prev;
+		/*
+		 * collect all the data in to four temp SW queues.
+		 */
+		while (bf) {
+			if (ni == bf->bf_node) {
+				if (prev == bf) {
+					STAILQ_REMOVE_HEAD(&txq->axq_q,bf_list);
+					bf_tmp=bf;
+					bf = STAILQ_FIRST(&txq->axq_q);
+					prev = bf;
+				} else {
+					STAILQ_REMOVE_AFTER(&(txq->axq_q),prev,bf_list);
+					bf_tmp=bf;
+					bf = STAILQ_NEXT(prev,bf_list);
+				}
+				count++;
+				skb = bf_tmp->bf_skb;
+				wh = (struct ieee80211_frame *) skb->data;
+				if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
+					/* XXX validate skb->priority, remove mask */
+					wmeq = &wme_tmp_qs[skb->priority & 0x3];
+				} else
+					wmeq = &wme_tmp_qs[WME_AC_BE];
+				STAILQ_INSERT_TAIL(&wmeq->axq_q, bf_tmp, bf_list);
+				ds = bf_tmp->bf_desc;
+				/* 
+				 * link the descriptors
+				 */
+				if (wmeq->axq_link != NULL) {
+#ifdef AH_NEED_DESC_SWAP
+					*wmeq->axq_link = cpu_to_le32(bf_tmp->bf_daddr);
+#else
+					*wmeq->axq_link = bf_tmp->bf_daddr;
+#endif
+					DPRINTF(sc, ATH_DEBUG_XMIT, "%s: link[%u](%p)=%p (%p)\n",
+							__func__,
+							wmeq->axq_qnum, wmeq->axq_link,
+							(caddr_t)bf_tmp->bf_daddr, bf_tmp->bf_desc);
+				}
+				wmeq->axq_link = &ds->ds_link;
+				/* 
+				 * update the rate information  
+				 */
+			} else {
+				prev = bf;
+				bf = STAILQ_NEXT(bf, bf_list);
+			}
+		}
+		/*
+		 * reset the axq_link pointer to the last descriptor.
+		 */
+		bf = STAILQ_FIRST(&txq->axq_q);
+		if (bf) {
+			if (prev) {
+				bf = STAILQ_NEXT(prev, bf_list);
+				if (!bf) { /* prev is the last one on the list */
+#ifdef ATH_SUPERG_FF
+					ds = &prev->bf_desc[prev->bf_numdesc - 1];
+#else
+					ds = prev->bf_desc;	/* NB: last descriptor */
+#endif
+					status = ath_hal_txprocdesc(ah, ds);
+					if (status == HAL_EINPROGRESS) 
+						txq->axq_link = &ds->ds_link;
+					else
+						txq->axq_link = NULL;
+				} 
+			} 
+		} else {
+			/*
+			 * if the list is empty reset the pointer.
+			 */
+			txq->axq_link = NULL;
+		}
+		ATH_TXQ_UNLOCK(txq);
+		/*
+		 * restart the DMA from the first 
+		 * buffer that was not DMA'd.
+		 */
+		if (bf_tmp1)
+			bf = STAILQ_NEXT(bf_tmp1,bf_list);
+		else
+			bf = STAILQ_FIRST(&txq->axq_q);
+		if (bf) {	
+			ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+			ath_hal_txstart(ah, txq->axq_qnum);
+		}
+
+		/* 
+		 * move (concant) the lists from the temp sw queues in to
+		 * WME queues.
+		 */
+		index = WME_AC_VO;
+		txq = NULL;
+		while (index >= WME_AC_BE ) { 
+			prevq = txq;
+			txq = sc->sc_ac2q[index];
+			if (txq != prevq) {
+				ATH_TXQ_LOCK(txq);
+				ath_hal_stoptxdma(ah, txq->axq_qnum);
+			}
+			
+			wmeq = &wme_tmp_qs[index];
+			bf = STAILQ_FIRST(&wmeq->axq_q);
+			if (bf) {
+				ATH_TXQ_MOVE_Q(wmeq,txq);
+				if (txq->axq_link != NULL) {
+#ifdef AH_NEED_DESC_SWAP
+					*(txq->axq_link) = cpu_to_le32(bf->bf_daddr);
+#else
+					*(txq->axq_link) = bf->bf_daddr;
+#endif
+				} 
+			}
+			if (index == WME_AC_BE || txq != prevq) {
+				/* 
+				 * find the first buffer to be DMA'd.
+				 */
+				bf = STAILQ_FIRST(&txq->axq_q);
+				while (bf) {
+#ifdef ATH_SUPERG_FF
+					ds = &bf->bf_desc[bf->bf_numdesc - 1];
+#else
+					ds = bf->bf_desc;	/* NB: last descriptor */
+#endif
+					status = ath_hal_txprocdesc(ah, ds);
+					if (status == HAL_EINPROGRESS)
+						break; 
+					bf = STAILQ_NEXT(bf,bf_list);
+				}
+				if (bf) {
+					ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+					ath_hal_txstart(ah, txq->axq_qnum);
+				}
+				ATH_TXQ_UNLOCK(txq);
+			}
+			index--;
+		}
+		printk("moved %d buffers from XR to NORMAL\n", count);
+	}
+#endif
+	return 0;
+}
+#endif
+
+static struct sk_buff *
+ath_alloc_skb(u_int size, u_int align)
+{
+	struct sk_buff *skb;
+	u_int off;
+
+	skb = dev_alloc_skb(size + align - 1);
+	if (skb != NULL) {
+		off = ((unsigned long) skb->data) % align;
+		if (off != 0)
+			skb_reserve(skb, align - off);
+	}
+	return skb;
+}
+
+static int
+ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	struct sk_buff *skb;
+	struct ath_desc *ds;
+
+	skb = bf->bf_skb;
+	if (skb == NULL) {
+ 		if (sc->sc_nmonvaps > 0) {
+ 			u_int off;
+			int extra = A_MAX(sizeof(struct ath_rx_radiotap_header), 
+					  A_MAX(sizeof(wlan_ng_prism2_header), ATHDESC_HEADER_SIZE));
+						
+ 			/*
+ 			 * Allocate buffer for monitor mode with space for the
+			 * wlan-ng style physical layer header at the start.
+ 			 */
+ 			skb = dev_alloc_skb(sc->sc_rxbufsize + extra + sc->sc_cachelsz - 1);
+ 			if (skb == NULL) {
+ 				DPRINTF(sc, ATH_DEBUG_ANY,
+					"%s: skbuff alloc of size %u failed\n",
+					__func__,
+					sc->sc_rxbufsize + extra + sc->sc_cachelsz - 1);
+ 				sc->sc_stats.ast_rx_nobuf++;
+ 				return -ENOMEM;
+ 			}
+ 			/*
+			 * Reserve space for the Prism header.
+ 			 */
+ 			skb_reserve(skb, sizeof(wlan_ng_prism2_header));
+			/*
+ 			 * Align to cache line.
+			 */
+ 			off = ((unsigned long) skb->data) % sc->sc_cachelsz;
+ 			if (off != 0)
+ 				skb_reserve(skb, sc->sc_cachelsz - off);
+		} else {
+			/*
+			 * Cache-line-align.  This is important (for the
+			 * 5210 at least) as not doing so causes bogus data
+			 * in rx'd frames.
+			 */
+			skb = ath_alloc_skb(sc->sc_rxbufsize, sc->sc_cachelsz);
+			if (skb == NULL) {
+				DPRINTF(sc, ATH_DEBUG_ANY,
+					"%s: skbuff alloc of size %u failed\n",
+					__func__, sc->sc_rxbufsize);
+				sc->sc_stats.ast_rx_nobuf++;
+				return -ENOMEM;
+			}
+		}
+		skb->dev = sc->sc_dev;
+		bf->bf_skb = skb;
+		bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
+			skb->data, sc->sc_rxbufsize, BUS_DMA_FROMDEVICE);
+	}
+
+	/*
+	 * Setup descriptors.  For receive we always terminate
+	 * the descriptor list with a self-linked entry so we'll
+	 * not get overrun under high load (as can happen with a
+	 * 5212 when ANI processing enables PHY error frames).
+	 *
+	 * To ensure the last descriptor is self-linked we create
+	 * each descriptor as self-linked and add it to the end.  As
+	 * each additional descriptor is added the previous self-linked
+	 * entry is ``fixed'' naturally.  This should be safe even
+	 * if DMA is happening.  When processing RX interrupts we
+	 * never remove/process the last, self-linked, entry on the
+	 * descriptor list.  This ensures the hardware always has
+	 * someplace to write a new frame.
+	 */
+	ds = bf->bf_desc;
+	ds->ds_link = bf->bf_daddr;		/* link to self */
+	ds->ds_data = bf->bf_skbaddr;
+	ds->ds_vdata = (void *) skb->data;	/* virt addr of buffer */
+	ath_hal_setuprxdesc(ah, ds
+		, skb_tailroom(skb)		/* buffer size */
+		, 0
+	);
+	if (sc->sc_rxlink != NULL)
+		*sc->sc_rxlink = bf->bf_daddr;
+	sc->sc_rxlink = &ds->ds_link;
+	return 0;
+}
+
+/*
+ * Extend 15-bit time stamp from rx descriptor to
+ * a full 64-bit TSF using the current h/w TSF.
+ */
+static __inline u_int64_t
+ath_extend_tsf(struct ath_hal *ah, u_int32_t rstamp)
+{
+	u_int64_t tsf;
+
+	tsf = ath_hal_gettsf64(ah);
+	if ((tsf & 0x7fff) < rstamp)
+		tsf -= 0x8000;
+	return ((tsf &~ 0x7fff) | rstamp);
+}
+
+/*
+ * Add a prism2 header to a received frame and
+ * dispatch it to capture tools like kismet.
+ */
+static void
+ath_rx_capture(struct net_device *dev, struct ath_desc *ds, struct sk_buff *skb)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ieee80211_frame *wh = (struct ieee80211_frame *) skb->data;
+	unsigned int headersize = ieee80211_anyhdrsize(wh);
+	int padbytes = roundup(headersize, 4) - headersize;
+	u_int64_t tsf;
+
+	/* Pass up tsf clock in mactime
+	 * Rx descriptor has the low 15 bits of the tsf at
+	 * the time the frame was received.  Use the current
+	 * tsf to extend this to 64 bits.
+	 */
+	tsf = ath_extend_tsf(sc->sc_ah, ds->ds_rxstat.rs_tstamp);
+
+	KASSERT(ic->ic_flags & IEEE80211_F_DATAPAD,
+		("data padding not enabled?"));
+
+	if (padbytes > 0) {
+		/* Remove hw pad bytes */
+		struct sk_buff *skb1 = skb_copy(skb, GFP_ATOMIC);
+		memmove(skb1->data + padbytes, skb1->data, headersize);
+		skb_pull(skb1, padbytes);
+		ieee80211_input_monitor(ic, skb1, ds, 0, tsf, sc);
+		dev_kfree_skb(skb1);
+	} else {
+		ieee80211_input_monitor(ic, skb, ds, 0, tsf, sc);
+	}
+}
+
+
+static void
+ath_tx_capture(struct net_device *dev, struct ath_desc *ds, struct sk_buff *skb)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ieee80211_frame *wh;
+	int extra = A_MAX(sizeof(struct ath_tx_radiotap_header), 
+			  A_MAX(sizeof(wlan_ng_prism2_header), ATHDESC_HEADER_SIZE));
+	u_int64_t tsf;
+	u_int32_t tstamp;
+	unsigned int headersize;
+	int padbytes;
+	
+	/* Pass up tsf clock in mactime
+	 * TX descriptor contains the transmit time in TU's,
+	 * (bits 25-10 of the TSF).
+	 */
+	tsf = ath_hal_gettsf64(sc->sc_ah);
+	tstamp = ds->ds_txstat.ts_tstamp << 10;
+	
+	if ((tsf & 0x3ffffff) < tstamp)
+		tsf -= 0x4000000;
+	tsf = ((tsf &~ 0x3ffffff) | tstamp);
+
+	/*                                                                      
+	 * release the owner of this skb since we're basically                  
+	 * recycling it                                                         
+	 */
+	if (atomic_read(&skb->users) != 1) {
+		struct sk_buff *skb2 = skb;
+		skb = skb_copy(skb, GFP_ATOMIC);
+		if (skb == NULL) {
+			printk("%s:%d %s\n", __FILE__, __LINE__, __func__);
+			dev_kfree_skb(skb2);
+			return;
+		}
+		dev_kfree_skb(skb2);
+	} else
+		skb_orphan(skb);
+
+	wh = (struct ieee80211_frame *) skb->data;
+	headersize = ieee80211_anyhdrsize(wh);
+	padbytes = roundup(headersize, 4) - headersize;
+	if (padbytes > 0) {
+		/* Unlike in rx_capture, we're freeing the skb at the end
+		 * anyway, so we don't need to worry about using a copy */
+		memmove(skb->data + padbytes, skb->data, headersize);
+		skb_pull(skb, padbytes);
+	}
+	
+	if (skb_headroom(skb) < extra &&
+	    pskb_expand_head(skb, extra, 0, GFP_ATOMIC)) {
+		printk("%s:%d %s\n", __FILE__, __LINE__, __func__);
+		goto done;
+	}
+	ieee80211_input_monitor(ic, skb, ds, 1, tsf, sc);
+ done:
+	dev_kfree_skb(skb);
+}
+
+/*
+ * Intercept management frames to collect beacon rssi data
+ * and to do ibss merges.
+ */
+static void
+ath_recv_mgmt(struct ieee80211_node *ni, struct sk_buff *skb,
+	int subtype, int rssi, u_int32_t rstamp)
+{
+	struct ath_softc *sc = ni->ni_ic->ic_dev->priv;
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	/* Call up first so subsequent work can use information
+	 * potentially stored in the node (e.g. for ibss merge). */
+	sc->sc_recv_mgmt(ni, skb, subtype, rssi, rstamp);
+	switch (subtype) {
+	case IEEE80211_FC0_SUBTYPE_BEACON:
+		/* Update RSSI statistics for use by the HAL */
+		ATH_RSSI_LPF(ATH_NODE(ni)->an_halstats.ns_avgbrssi, rssi);
+		if ((sc->sc_syncbeacon || (vap->iv_flags_ext & IEEE80211_FEXT_APPIE_UPDATE)) &&
+		    ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
+			/* Resync beacon timers using the TSF of the
+			 * beacon frame we just received. */
+			vap->iv_flags_ext &= ~IEEE80211_FEXT_APPIE_UPDATE;
+			ath_beacon_config(sc, vap);
+		}
+		/* NB: Fall Through */
+	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
+		if (vap->iv_opmode == IEEE80211_M_IBSS &&
+				vap->iv_state == IEEE80211_S_RUN) {
+			/* Don't merge if we have a desired BSSID */
+			if (vap->iv_flags & IEEE80211_F_DESBSSID)
+				break;
+
+			/* To handle IBSS merge, we need the struct
+			 * ieee80211_node which has been updated with the
+			 * BSSID and TSF from the last beacon */
+			ni = ieee80211_find_rxnode(ni->ni_ic,
+				(const struct ieee80211_frame_min *) skb->data);
+			if (ni == NULL)
+				break;
+
+			/* Handle IBSS merge as needed; check the TSF on the
+			 * frame before attempting the merge.  The 802.11 spec
+			 * says the station should change it's bssid to match
+			 * the oldest station with the same ssid, where oldest
+			 * is determined by the tsf.  Note that hardware
+			 * reconfiguration happens through callback to
+			 * ath_newstate as the state machine will go from
+			 * RUN -> RUN when this happens. */
+			DPRINTF(sc, ATH_DEBUG_BEACON,
+				"check for ibss merge for ni=%p TSF1(t4)=%10llu TSF2(t3)=%10llu\n",
+				ni, rtsf, le64_to_cpu(ni->ni_tstamp.tsf));
+
+			if (rtsf < le64_to_cpu(ni->ni_tstamp.tsf)) {
+				DPRINTF(sc, ATH_DEBUG_BEACON,
+				"ibss merge, rtsf %10llu local tsf %10llu\n",
+				rtsf, le64_to_cpu(ni->ni_tstamp.tsf));
+				ieee80211_ibss_merge(ni);
+			}
+		}
+		break;
+	}
+}
+
+static void
+ath_setdefantenna(struct ath_softc *sc, u_int antenna)
+{
+	struct ath_hal *ah = sc->sc_ah;
+
+	/* XXX block beacon interrupts */
+	ath_hal_setdefantenna(ah, antenna);
+	if (sc->sc_defant != antenna)
+		sc->sc_stats.ast_ant_defswitch++;
+	sc->sc_defant = antenna;
+	sc->sc_rxotherant = 0;
+}
+
+static void
+ath_rx_tasklet(TQUEUE_ARG data)
+{
+#define	PA2DESC(_sc, _pa) \
+	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
+		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
+	struct net_device *dev = (struct net_device *)data;
+	struct ath_buf *bf;
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ath_desc *ds;
+	struct sk_buff *skb;
+	struct ieee80211_node *ni;
+	int len, type;
+	u_int phyerr;
+
+	/* Let the 802.11 layer know about the new noise floor */
+	ic->ic_channoise = sc->sc_channoise;
+	
+	DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s\n", __func__);
+	do {
+		bf = STAILQ_FIRST(&sc->sc_rxbuf);
+		if (bf == NULL) {		/* XXX ??? can this happen */
+			printk("%s: no buffer (%s)\n", dev->name, __func__);
+			break;
+		}
+
+		/*
+		 * Descriptors are now processed at in the first-level
+		 * interrupt handler to support U-APSD trigger search.
+		 * This must also be done even when U-APSD is not active to support
+		 * other error handling that requires immediate attention.
+		 * We check bf_status to find out if the bf's descriptors have 
+		 * been processed by the HAL.
+		 */
+		if (!(bf->bf_status & ATH_BUFSTATUS_DONE))
+			break;
+		
+		ds = bf->bf_desc;
+		if (ds->ds_link == bf->bf_daddr) {
+			/* NB: never process the self-linked entry at the end */
+			break;
+		}
+		skb = bf->bf_skb;
+		if (skb == NULL) {		/* XXX ??? can this happen */
+			printk("%s: no skbuff (%s)\n", dev->name, __func__);
+			continue;
+		}
+
+#ifdef AR_DEBUG
+		if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
+			ath_printrxbuf(bf, 1);
+#endif
+
+		if (ds->ds_rxstat.rs_more) {
+			/*
+			 * Frame spans multiple descriptors; this
+			 * cannot happen yet as we don't support
+			 * jumbograms.  If not in monitor mode,
+			 * discard the frame.
+			 */
+#ifndef ERROR_FRAMES
+			/*
+			 * Enable this if you want to see
+			 * error frames in Monitor mode.
+			 */
+			if (ic->ic_opmode != IEEE80211_M_MONITOR) {
+				sc->sc_stats.ast_rx_toobig++;
+				goto rx_next;
+			}
+#endif
+			/* fall thru for monitor mode handling... */
+		} else if (ds->ds_rxstat.rs_status != 0) {
+			if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC)
+				sc->sc_stats.ast_rx_crcerr++;
+			if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO)
+				sc->sc_stats.ast_rx_fifoerr++;
+			if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) {
+				sc->sc_stats.ast_rx_phyerr++;
+				phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
+				sc->sc_stats.ast_rx_phy[phyerr]++;
+			}
+			if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) {
+				/*
+				 * Decrypt error.  If the error occurred
+				 * because there was no hardware key, then
+				 * let the frame through so the upper layers
+				 * can process it.  This is necessary for 5210
+				 * parts which have no way to setup a ``clear''
+				 * key cache entry.
+				 *
+				 * XXX do key cache faulting
+				 */
+				if (ds->ds_rxstat.rs_keyix == HAL_RXKEYIX_INVALID)
+					goto rx_accept;
+				sc->sc_stats.ast_rx_badcrypt++;
+			}
+			if (ds->ds_rxstat.rs_status & HAL_RXERR_MIC) {
+				sc->sc_stats.ast_rx_badmic++;
+				/*
+				 * Do minimal work required to hand off
+				 * the 802.11 header for notification.
+				 */
+				/* XXX frag's and QoS frames */
+				len = ds->ds_rxstat.rs_datalen;
+				if (len >= sizeof (struct ieee80211_frame)) {
+					bus_dma_sync_single(sc->sc_bdev,
+					    bf->bf_skbaddr, len,
+					    BUS_DMA_FROMDEVICE);
+#if 0
+/* XXX revalidate MIC, lookup ni to find VAP */
+					ieee80211_notify_michael_failure(ic,
+					    (struct ieee80211_frame *) skb->data,
+					    sc->sc_splitmic ?
+					        ds->ds_rxstat.rs_keyix - 32 :
+					        ds->ds_rxstat.rs_keyix
+					);
+#endif
+				}
+			}
+			/*
+			 * Reject error frames if we have no vaps that 
+			 * are operating in monitor mode.
+			 */
+			if(sc->sc_nmonvaps == 0) goto rx_next;
+		}
+rx_accept:
+		/*
+		 * Sync and unmap the frame.  At this point we're
+		 * committed to passing the sk_buff somewhere so
+		 * clear buf_skb; this means a new sk_buff must be
+		 * allocated when the rx descriptor is setup again
+		 * to receive another frame.
+		 */
+		len = ds->ds_rxstat.rs_datalen;
+		bus_dma_sync_single(sc->sc_bdev,
+			bf->bf_skbaddr, len, BUS_DMA_FROMDEVICE);
+		bus_unmap_single(sc->sc_bdev, bf->bf_skbaddr,
+			sc->sc_rxbufsize, BUS_DMA_FROMDEVICE);
+		bf->bf_skb = NULL;
+
+		sc->sc_stats.ast_ant_rx[ds->ds_rxstat.rs_antenna]++;
+		sc->sc_devstats.rx_packets++;
+		sc->sc_devstats.rx_bytes += len;
+
+		skb_put(skb, len);
+		skb->protocol = __constant_htons(ETH_P_CONTROL);
+
+		if (sc->sc_nmonvaps > 0) {
+			/* 
+			 * Some vap is in monitor mode, so send to
+			 * ath_rx_capture for monitor encapsulation
+			 */
+#if 0
+			if (len < IEEE80211_ACK_LEN) {
+				DPRINTF(sc, ATH_DEBUG_RECV,
+					"%s: runt packet %d\n", __func__, len);
+				sc->sc_stats.ast_rx_tooshort++;
+				dev_kfree_skb(skb);
+				skb = NULL;
+				goto rx_next;
+			}
+#endif
+			ath_rx_capture(dev, ds, skb);
+			if (sc->sc_ic.ic_opmode == IEEE80211_M_MONITOR) {
+				/* no other VAPs need the packet */
+				dev_kfree_skb(skb);
+				skb = NULL;
+				goto rx_next;
+			}
+		}
+
+		/*
+		 * Finished monitor mode handling, now reject
+		 * error frames before passing to other vaps
+		 */
+		if (ds->ds_rxstat.rs_status != 0) {
+			dev_kfree_skb(skb);
+			skb = NULL;
+			goto rx_next;
+		}
+		
+		/* remove the CRC */
+		skb_trim(skb, skb->len - IEEE80211_CRC_LEN);
+
+		/*
+		 * From this point on we assume the frame is at least
+		 * as large as ieee80211_frame_min; verify that.
+		 */
+		if (len < IEEE80211_MIN_LEN) {
+			DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n",
+				__func__, len);
+			sc->sc_stats.ast_rx_tooshort++;
+			dev_kfree_skb(skb);
+			skb = NULL;
+			goto rx_next;
+		}
+
+		/*
+		 * Normal receive.
+		 */
+
+		if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
+			ieee80211_dump_pkt(ic, skb->data, skb->len,
+				   sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate,
+				   ds->ds_rxstat.rs_rssi);
+		}
+
+		/*
+		 * Locate the node for sender, track state, and then
+		 * pass the (referenced) node up to the 802.11 layer
+		 * for its use.  If the sender is unknown spam the
+		 * frame; it'll be dropped where it's not wanted.
+		 */
+		if (ds->ds_rxstat.rs_keyix != HAL_RXKEYIX_INVALID &&
+		    (ni = sc->sc_keyixmap[ds->ds_rxstat.rs_keyix]) != NULL) {
+			struct ath_node *an;
+			/*
+			 * Fast path: node is present in the key map;
+			 * grab a reference for processing the frame.
+			 */
+			an = ATH_NODE(ieee80211_ref_node(ni));
+			ATH_RSSI_LPF(an->an_avgrssi, ds->ds_rxstat.rs_rssi);
+			type = ieee80211_input(ni, skb,
+				ds->ds_rxstat.rs_rssi, ds->ds_rxstat.rs_tstamp);
+			ieee80211_free_node(ni);
+		} else {
+			/*
+			 * No key index or no entry, do a lookup and
+			 * add the node to the mapping table if possible.
+			 */
+			ni = ieee80211_find_rxnode(ic, 
+				(const struct ieee80211_frame_min *) skb->data);
+			if (ni != NULL) {
+				struct ath_node *an = ATH_NODE(ni);
+				u_int16_t keyix;
+
+				ATH_RSSI_LPF(an->an_avgrssi,
+					ds->ds_rxstat.rs_rssi);
+				type = ieee80211_input(ni, skb,
+					ds->ds_rxstat.rs_rssi,
+					ds->ds_rxstat.rs_tstamp);
+				/*
+				 * If the station has a key cache slot assigned
+				 * update the key->node mapping table.
+				 */
+				keyix = ni->ni_ucastkey.wk_keyix;
+				if (keyix != IEEE80211_KEYIX_NONE &&
+				    sc->sc_keyixmap[keyix] == NULL)
+					sc->sc_keyixmap[keyix] = ieee80211_ref_node(ni);
+				ieee80211_free_node(ni); 
+			} else
+				type = ieee80211_input_all(ic, skb,
+					ds->ds_rxstat.rs_rssi,
+					ds->ds_rxstat.rs_tstamp);
+		}
+
+		if (sc->sc_diversity) {
+			/*
+			 * When using hardware fast diversity, change the default rx
+			 * antenna if rx diversity chooses the other antenna 3
+			 * times in a row.
+			 */
+			if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
+				if (++sc->sc_rxotherant >= 3)
+					ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna);
+			} else
+				sc->sc_rxotherant = 0;
+		}
+		if (sc->sc_softled) {
+			/*
+			 * Blink for any data frame.  Otherwise do a
+			 * heartbeat-style blink when idle.  The latter
+			 * is mainly for station mode where we depend on
+			 * periodic beacon frames to trigger the poll event.
+			 */
+			if (type == IEEE80211_FC0_TYPE_DATA) {
+				sc->sc_rxrate = ds->ds_rxstat.rs_rate;
+				ath_led_event(sc, ATH_LED_RX);
+			} else if (jiffies - sc->sc_ledevent >= sc->sc_ledidle)
+				ath_led_event(sc, ATH_LED_POLL);
+		}
+rx_next:
+		ATH_RXBUF_LOCK_IRQ(sc);
+		STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
+		bf->bf_status &= ~ATH_BUFSTATUS_DONE;
+		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
+		ATH_RXBUF_UNLOCK_IRQ(sc);
+	} while (ath_rxbuf_init(sc, bf) == 0);
+	
+	/* rx signal state monitoring */
+	ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
+	if (ath_hal_radar_event(ah)) {
+		sc->sc_rtasksched = 1;
+		schedule_work(&sc->sc_radartask);
+	}
+#undef PA2DESC
+}
+
+#ifdef ATH_SUPERG_XR
+
+static void 
+ath_grppoll_period_update(struct ath_softc *sc)
+{
+	struct ieee80211com *ic = &sc->sc_ic;
+	u_int16_t interval;
+	u_int16_t xrsta;
+	u_int16_t normalsta;
+	u_int16_t allsta;
+
+	xrsta = ic->ic_xr_sta_assoc;
+
+	/*
+	 * if no stations are in XR mode.
+	 * use default poll interval.
+	 */
+	if (xrsta == 0) { 
+		if (sc->sc_xrpollint != XR_DEFAULT_POLL_INTERVAL) {
+			sc->sc_xrpollint = XR_DEFAULT_POLL_INTERVAL;
+			ath_grppoll_txq_update(sc,XR_DEFAULT_POLL_INTERVAL);
+		}
+		return;
+	}
+
+	allsta = ic->ic_sta_assoc;
+	/*
+	 * if all the stations are in XR mode.
+	 * use minimum poll interval.
+	 */
+	if (allsta == xrsta) { 
+		if (sc->sc_xrpollint != XR_MIN_POLL_INTERVAL) {
+			sc->sc_xrpollint = XR_MIN_POLL_INTERVAL;
+			ath_grppoll_txq_update(sc,XR_MIN_POLL_INTERVAL);
+		}
+		return;
+	}
+
+	normalsta = allsta-xrsta;
+	/*
+	 * if stations are in both XR and normal mode. 
+	 * use some fudge factor.
+	 */
+	interval = XR_DEFAULT_POLL_INTERVAL -
+          ((XR_DEFAULT_POLL_INTERVAL - XR_MIN_POLL_INTERVAL) * xrsta)/(normalsta * XR_GRPPOLL_PERIOD_FACTOR);
+	if (interval < XR_MIN_POLL_INTERVAL)
+		interval = XR_MIN_POLL_INTERVAL;
+	
+	if (sc->sc_xrpollint != interval) {
+		sc->sc_xrpollint = interval;
+		ath_grppoll_txq_update(sc,interval);
+	}
+
+	/*
+	 * XXX: what if stations go to sleep?
+	 * ideally the interval should be adjusted dynamically based on
+	 * xr and normal upstream traffic.
+	 */
+}
+
+/*
+ * update grppoll period.
+ */
+static void 
+ath_grppoll_txq_update(struct ath_softc *sc, int period)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	HAL_TXQ_INFO qi;
+	struct ath_txq *txq = &sc->sc_grpplq;
+
+	if (sc->sc_grpplq.axq_qnum == -1)
+		return; 
+
+	memset(&qi, 0, sizeof(qi));
+	qi.tqi_subtype = 0;
+	qi.tqi_aifs = XR_AIFS;
+	qi.tqi_cwmin = XR_CWMIN_CWMAX;
+	qi.tqi_cwmax = XR_CWMIN_CWMAX;
+	qi.tqi_compBuf = 0;
+	qi.tqi_cbrPeriod = IEEE80211_TU_TO_MS(period) * 1000; /* usec */
+	qi.tqi_cbrOverflowLimit = 2;
+	ath_hal_settxqueueprops(ah, txq->axq_qnum,&qi);
+	ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
+}
+
+/*
+ * Setup grppoll  h/w transmit queue.
+ */
+static void 
+ath_grppoll_txq_setup(struct ath_softc *sc, int qtype, int period)
+{
+#define	N(a)	((int)(sizeof(a)/sizeof(a[0])))
+	struct ath_hal *ah = sc->sc_ah;
+	HAL_TXQ_INFO qi;
+	int qnum;
+	u_int compbufsz = 0;
+	char *compbuf = NULL;
+	dma_addr_t compbufp = 0;
+	struct ath_txq *txq = &sc->sc_grpplq;
+
+	memset(&qi, 0, sizeof(qi));
+	qi.tqi_subtype = 0;
+	qi.tqi_aifs = XR_AIFS;
+	qi.tqi_cwmin = XR_CWMIN_CWMAX;
+	qi.tqi_cwmax = XR_CWMIN_CWMAX;
+	qi.tqi_compBuf = 0;
+	qi.tqi_cbrPeriod = IEEE80211_TU_TO_MS(period) * 1000; /* usec */
+	qi.tqi_cbrOverflowLimit = 2;
+
+	if (sc->sc_grpplq.axq_qnum == -1) {
+		qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
+		if (qnum == -1)
+			return ;
+		if (qnum >= N(sc->sc_txq)) {
+			printk("%s: HAL qnum %u out of range, max %u!\n",
+				   sc->sc_dev->name, qnum, N(sc->sc_txq));
+			ath_hal_releasetxqueue(ah, qnum);
+			return;
+		}
+
+		txq->axq_qnum = qnum;
+	}
+	txq->axq_link = NULL;
+	STAILQ_INIT(&txq->axq_q);
+	ATH_TXQ_LOCK_INIT(txq);
+	txq->axq_depth = 0;
+	txq->axq_totalqueued = 0;
+	txq->axq_intrcnt = 0;
+	TAILQ_INIT(&txq->axq_stageq);
+	txq->axq_compbuf = compbuf;
+	txq->axq_compbufsz = compbufsz;
+	txq->axq_compbufp = compbufp;
+	ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
+#undef N
+
+}
+
+/*
+ * Setup group poll frames on the group poll queue.
+ */
+static void ath_grppoll_start(struct ieee80211vap *vap,int pollcount)
+{
+	int i, amode;
+	int flags;
+	struct sk_buff *skb = NULL;
+	struct ath_buf *bf, *head = NULL;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ath_softc *sc = ic->ic_dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	u_int8_t rate;
+	int ctsrate = 0;
+	int ctsduration = 0;
+	const HAL_RATE_TABLE *rt;
+	u_int8_t cix, rtindex = 0;
+	u_int type;
+	struct ath_txq *txq = &sc->sc_grpplq;
+	struct ath_desc *ds = NULL;
+	int pktlen = 0, keyix = 0;
+	int pollsperrate, pos;
+	int rates[XR_NUM_RATES];
+	u_int8_t ratestr[16], numpollstr[16];
+	typedef struct rate_to_str_map {
+		u_int8_t str[4];
+		int ratekbps;
+	} RATE_TO_STR_MAP;
+
+	static const RATE_TO_STR_MAP ratestrmap[] = {
+		{"0.25",    250},
+		{ ".25",    250},
+		{"0.5",     500},
+		{ ".5",     500},
+		{  "1",    1000},
+		{  "3",    3000},
+		{  "6",    6000},
+		{  "?",       0},
+	};
+
+#define MAX_GRPPOLL_RATE 5
+#define	USE_SHPREAMBLE(_ic) \
+	(((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER)) \
+		== IEEE80211_F_SHPREAMBLE)
+
+	if (sc->sc_xrgrppoll)
+		return; 
+
+	memset(&rates, 0, sizeof(rates));
+	pos = 0;
+	while (sscanf(&(sc->sc_grppoll_str[pos]), "%s %s", ratestr, numpollstr) == 2) {
+		int rtx = 0;
+		while (ratestrmap[rtx].ratekbps != 0) {
+			if (strcmp(ratestrmap[rtx].str, ratestr) == 0)
+				break;
+			rtx++;
+		}
+		sscanf(numpollstr, "%d", &(rates[rtx]));
+		pos += strlen(ratestr) + strlen(numpollstr) + 2;
+	}
+	if (!sc->sc_grppolldma.dd_bufptr) {
+		printk("grppoll_start: grppoll Buf allocation failed\n");
+		return;
+	}
+	rt = sc->sc_currates;
+	cix = rt->info[sc->sc_protrix].controlRate;
+	ctsrate = rt->info[cix].rateCode;
+	if (USE_SHPREAMBLE(ic))
+			ctsrate |= rt->info[cix].shortPreamble;
+	rt = sc->sc_xr_rates;
+	/*
+	 * queue the group polls for each antenna mode. set the right keycache index for the
+	 * broadcast packets. this will ensure that if the first poll
+	 * does not elicit a single chirp from any XR station, hardware will
+	 * not send the subsequent polls
+	 */
+	pollsperrate = 0;
+	for (amode = HAL_ANTENNA_FIXED_A; amode < HAL_ANTENNA_MAX_MODE ; amode++) {
+		for (i = 0; i < (pollcount + 1); i++) {
+
+			flags = HAL_TXDESC_NOACK;
+			rate = rt->info[rtindex].rateCode;
+			/* 
+			 * except for the last one every thing else is a CF poll.
+			 * last one is  the CF End frame.
+			 */
+
+			if (i == pollcount) {
+				skb = ieee80211_getcfframe(vap,IEEE80211_FC0_SUBTYPE_CF_END);
+				rate = ctsrate;
+				ctsduration = ath_hal_computetxtime(ah,
+					sc->sc_currates, pktlen, sc->sc_protrix, AH_FALSE);
+			} else {
+				skb = ieee80211_getcfframe(vap, IEEE80211_FC0_SUBTYPE_CFPOLL);
+				pktlen = skb->len + IEEE80211_CRC_LEN;
+				/*
+				 * the very first group poll ctsduration  should be enough to allow
+				 * an auth frame from station. This is to pass the wifi testing (as 
+				 * some stations in testing do not honor CF_END and rely on CTS duration)
+				 */
+				if (i == 0 && amode == HAL_ANTENNA_FIXED_A) {
+					ctsduration = ath_hal_computetxtime(ah,	rt,
+							pktlen,	rtindex,
+							AH_FALSE) /*cf-poll time */
+						+ (XR_AIFS + (XR_CWMIN_CWMAX * XR_SLOT_DELAY))  
+						+ ath_hal_computetxtime(ah, rt,
+							2 * (sizeof(struct ieee80211_frame_min) + 6),
+							IEEE80211_XR_DEFAULT_RATE_INDEX,
+							AH_FALSE) /*auth packet time */
+						+ ath_hal_computetxtime(ah, rt,
+							IEEE80211_ACK_LEN,
+							IEEE80211_XR_DEFAULT_RATE_INDEX,
+							AH_FALSE); /*ack frame time */ 
+				} else {
+					ctsduration = ath_hal_computetxtime(ah, rt,
+							pktlen, rtindex,
+							AH_FALSE) /*cf-poll time */
+						+ (XR_AIFS + (XR_CWMIN_CWMAX * XR_SLOT_DELAY))  
+						+ ath_hal_computetxtime(ah,rt,
+							XR_FRAGMENTATION_THRESHOLD,
+							IEEE80211_XR_DEFAULT_RATE_INDEX,
+							AH_FALSE) /*data packet time */
+						+ ath_hal_computetxtime(ah,rt,
+							IEEE80211_ACK_LEN,
+							IEEE80211_XR_DEFAULT_RATE_INDEX,
+							AH_FALSE); /*ack frame time */ 
+				}
+				if ((vap->iv_flags & IEEE80211_F_PRIVACY) && keyix == 0) {
+					struct ieee80211_key *k;
+					k = ieee80211_crypto_encap(vap->iv_bss, skb);
+					if (k)
+						keyix = k->wk_keyix;
+				}
+			}
+			ATH_TXBUF_LOCK_IRQ(sc);					
+			bf = STAILQ_FIRST(&sc->sc_grppollbuf);
+			if (bf != NULL)
+				STAILQ_REMOVE_HEAD(&sc->sc_grppollbuf, bf_list);
+			else {
+				DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs\n", __func__);
+				ATH_TXBUF_UNLOCK_IRQ_EARLY(sc);
+				return;
+			}
+			/* XXX use a counter and leave at least one for mgmt frames */
+			if (STAILQ_EMPTY(&sc->sc_grppollbuf)) {				
+				DPRINTF(sc, ATH_DEBUG_XMIT, "%s: No more TxBufs left\n", __func__);
+				ATH_TXBUF_UNLOCK_IRQ_EARLY(sc);
+				return;
+			}					
+			ATH_TXBUF_UNLOCK_IRQ(sc);
+			bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
+				skb->data, skb->len, BUS_DMA_TODEVICE);
+			bf->bf_skb = skb;
+			ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
+			ds = bf->bf_desc;
+			ds->ds_data = bf->bf_skbaddr;
+			if (i == pollcount && amode == (HAL_ANTENNA_MAX_MODE -1)) {
+				type = HAL_PKT_TYPE_NORMAL;
+				flags |= (HAL_TXDESC_CLRDMASK | HAL_TXDESC_VEOL);
+			} else {
+				flags |= HAL_TXDESC_CTSENA;
+				type = HAL_PKT_TYPE_GRP_POLL;
+			}
+			if (i == 0 && amode == HAL_ANTENNA_FIXED_A ) {
+				flags |= HAL_TXDESC_CLRDMASK;  
+				head = bf;
+			}
+			ath_hal_setuptxdesc(ah, ds
+				, skb->len + IEEE80211_CRC_LEN	/* frame length */
+				, sizeof(struct ieee80211_frame)	/* header length */
+				, type			/* Atheros packet type */
+				, ic->ic_txpowlimit	/* max txpower */
+				, rate, 0		/* series 0 rate/tries */
+				, keyix /* HAL_TXKEYIX_INVALID */		/* use key index */
+				, amode			/* antenna mode */
+				, flags				
+				, ctsrate		/* rts/cts rate */
+				, ctsduration		/* rts/cts duration */
+				, 0			/* comp icv len */
+				, 0			/* comp iv len */
+				, ATH_COMP_PROC_NO_COMP_NO_CCS	/* comp scheme */
+				);
+			ath_hal_filltxdesc(ah, ds
+				, roundup(skb->len, 4)	/* buffer length */
+				, AH_TRUE		/* first segment */
+				, AH_TRUE		/* last segment */
+				, ds			/* first descriptor */
+				);
+			/* NB: The desc swap function becomes void, 
+	 		* if descriptor swapping is not enabled
+	 		*/
+			ath_desc_swap(ds);
+			if (txq->axq_link) {
+#ifdef AH_NEED_DESC_SWAP
+				*txq->axq_link = cpu_to_le32(bf->bf_daddr);
+#else
+				*txq->axq_link = bf->bf_daddr;
+#endif
+			}
+			txq->axq_link = &ds->ds_link;
+			pollsperrate++;
+			if (pollsperrate > rates[rtindex]) {
+				rtindex = (rtindex + 1) % MAX_GRPPOLL_RATE;
+				pollsperrate = 0;
+			}
+		}
+	}
+	/* make it circular */
+#ifdef AH_NEED_DESC_SWAP
+	ds->ds_link = cpu_to_le32(head->bf_daddr);
+#else
+	ds->ds_link = head->bf_daddr;
+#endif
+	/* start the queue */
+	ath_hal_puttxbuf(ah, txq->axq_qnum, head->bf_daddr);
+	ath_hal_txstart(ah, txq->axq_qnum);
+	sc->sc_xrgrppoll = 1;
+#undef USE_SHPREAMBLE
+}
+
+static void ath_grppoll_stop(struct ieee80211vap *vap)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ath_softc *sc = ic->ic_dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ath_txq *txq = &sc->sc_grpplq;
+	struct ath_buf *bf;
+	
+	
+	if (!sc->sc_xrgrppoll)
+		return; 
+	ath_hal_stoptxdma(ah, txq->axq_qnum);
+
+	/* move the grppool bufs back to the grppollbuf */
+	for (;;) {
+		ATH_TXQ_LOCK(txq);
+		bf = STAILQ_FIRST(&txq->axq_q);
+		if (bf == NULL) {
+			txq->axq_link = NULL;
+			ATH_TXQ_UNLOCK(txq);
+			break;
+		}
+		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
+		ATH_TXQ_UNLOCK(txq);
+		bus_unmap_single(sc->sc_bdev,
+			bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE);
+		dev_kfree_skb(bf->bf_skb);
+		bf->bf_skb = NULL;
+		bf->bf_node = NULL;
+
+		ATH_TXBUF_LOCK(sc);
+		STAILQ_INSERT_TAIL(&sc->sc_grppollbuf, bf, bf_list);
+		ATH_TXBUF_UNLOCK(sc);
+	}
+	STAILQ_INIT(&txq->axq_q);
+	ATH_TXQ_LOCK_INIT(txq);
+	txq->axq_depth = 0;
+	txq->axq_totalqueued = 0;
+	txq->axq_intrcnt = 0;
+	TAILQ_INIT(&txq->axq_stageq);
+	sc->sc_xrgrppoll = 0;
+}
+#endif
+
+/*
+ * Setup a h/w transmit queue.
+ */
+static struct ath_txq *
+ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
+{
+#define	N(a)	((int)(sizeof(a)/sizeof(a[0])))
+	struct ath_hal *ah = sc->sc_ah;
+	HAL_TXQ_INFO qi;
+	int qnum;
+	u_int compbufsz = 0;
+	char *compbuf = NULL;
+	dma_addr_t compbufp = 0;
+
+	memset(&qi, 0, sizeof(qi));
+	qi.tqi_subtype = subtype;
+	qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
+	qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
+	qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
+	qi.tqi_compBuf = 0;
+#ifdef ATH_SUPERG_XR
+	if (subtype == HAL_XR_DATA) {
+		qi.tqi_aifs  = XR_DATA_AIFS;
+		qi.tqi_cwmin = XR_DATA_CWMIN;
+		qi.tqi_cwmax = XR_DATA_CWMAX;
+	}
+#endif
+
+#ifdef ATH_SUPERG_COMP
+	/* allocate compression scratch buffer for data queues */
+	if (((qtype == HAL_TX_QUEUE_DATA)|| (qtype == HAL_TX_QUEUE_UAPSD)) 
+	    && ath_hal_compressionsupported(ah)) {
+		compbufsz = roundup(HAL_COMP_BUF_MAX_SIZE, 
+			HAL_COMP_BUF_ALIGN_SIZE) + HAL_COMP_BUF_ALIGN_SIZE;
+		compbuf = (char *)bus_alloc_consistent(sc->sc_bdev,
+			compbufsz, &compbufp);
+		if (compbuf == NULL)
+			sc->sc_ic.ic_ath_cap &= ~IEEE80211_ATHC_COMP;	
+		else
+			qi.tqi_compBuf = (u_int32_t)compbufp;
+	} 
+#endif
+	/*
+	 * Enable interrupts only for EOL and DESC conditions.
+	 * We mark tx descriptors to receive a DESC interrupt
+	 * when a tx queue gets deep; otherwise waiting for the
+	 * EOL to reap descriptors.  Note that this is done to
+	 * reduce interrupt load and this only defers reaping
+	 * descriptors, never transmitting frames.  Aside from
+	 * reducing interrupts this also permits more concurrency.
+	 * The only potential downside is if the tx queue backs
+	 * up in which case the top half of the kernel may backup
+	 * due to a lack of tx descriptors.
+	 *
+	 * The UAPSD queue is an exception, since we take a desc-
+	 * based intr on the EOSP frames.
+	 */
+	if (qtype == HAL_TX_QUEUE_UAPSD)
+		qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
+	else
+		qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
+	qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
+	if (qnum == -1) {
+		/*
+		 * NB: don't print a message, this happens
+		 * normally on parts with too few tx queues
+		 */
+#ifdef ATH_SUPERG_COMP
+		if (compbuf) {
+			bus_free_consistent(sc->sc_bdev, compbufsz,
+				compbuf, compbufp);
+		}
+#endif
+		return NULL;
+	}
+	if (qnum >= N(sc->sc_txq)) {
+		printk("%s: HAL qnum %u out of range, max %u!\n",
+			sc->sc_dev->name, qnum, N(sc->sc_txq));
+#ifdef ATH_SUPERG_COMP
+		if (compbuf) {
+			bus_free_consistent(sc->sc_bdev, compbufsz,
+				compbuf, compbufp);
+		}
+#endif
+		ath_hal_releasetxqueue(ah, qnum);
+		return NULL;
+	}
+	if (!ATH_TXQ_SETUP(sc, qnum)) {
+		struct ath_txq *txq = &sc->sc_txq[qnum];
+
+		txq->axq_qnum = qnum;
+		txq->axq_link = NULL;
+		STAILQ_INIT(&txq->axq_q);
+		ATH_TXQ_LOCK_INIT(txq);
+		txq->axq_depth = 0;
+		txq->axq_totalqueued = 0;
+		txq->axq_intrcnt = 0;
+		TAILQ_INIT(&txq->axq_stageq);
+		txq->axq_compbuf = compbuf;
+		txq->axq_compbufsz = compbufsz;
+		txq->axq_compbufp = compbufp;
+		sc->sc_txqsetup |= 1 << qnum;
+	}
+	return &sc->sc_txq[qnum];
+#undef N
+}
+
+/*
+ * Setup a hardware data transmit queue for the specified
+ * access control.  The HAL may not support all requested
+ * queues in which case it will return a reference to a
+ * previously setup queue.  We record the mapping from ac's
+ * to h/w queues for use by ath_tx_start and also track
+ * the set of h/w queues being used to optimize work in the
+ * transmit interrupt handler and related routines.
+ */
+static int
+ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
+{
+#define	N(a)	((int)(sizeof(a)/sizeof(a[0])))
+	struct ath_txq *txq;
+
+	if (ac >= N(sc->sc_ac2q)) {
+		printk("%s: AC %u out of range, max %u!\n",
+		       sc->sc_dev->name, ac, (unsigned)N(sc->sc_ac2q));
+		return 0;
+	}
+	txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
+	if (txq != NULL) {
+		sc->sc_ac2q[ac] = txq;
+		return 1;
+	} else
+		return 0;
+#undef N
+}
+
+/*
+ * Update WME parameters for a transmit queue.
+ */
+static int
+ath_txq_update(struct ath_softc *sc, struct ath_txq *txq, int ac)
+{
+#define	ATH_EXPONENT_TO_VALUE(v)	((1<<v)-1)
+#define	ATH_TXOP_TO_US(v)		(v<<5)
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
+	struct ath_hal *ah = sc->sc_ah;
+	HAL_TXQ_INFO qi;
+
+	ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
+	qi.tqi_aifs = wmep->wmep_aifsn;
+	qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
+	qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
+	qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
+
+	if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
+		printk("%s: unable to update hardware queue "
+			"parameters for %s traffic!\n",
+			sc->sc_dev->name, ieee80211_wme_acnames[ac]);
+		return 0;
+	} else {
+		ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
+		return 1;
+	}
+#undef ATH_TXOP_TO_US
+#undef ATH_EXPONENT_TO_VALUE
+}
+
+/*
+ * Callback from the 802.11 layer to update WME parameters.
+ */
+static int
+ath_wme_update(struct ieee80211com *ic)
+{
+	struct ath_softc *sc = ic->ic_dev->priv;
+
+	if (sc->sc_uapsdq)
+		ath_txq_update(sc, sc->sc_uapsdq, WME_AC_VO);
+
+	return !ath_txq_update(sc, sc->sc_ac2q[WME_AC_BE], WME_AC_BE) ||
+	    !ath_txq_update(sc, sc->sc_ac2q[WME_AC_BK], WME_AC_BK) ||
+	    !ath_txq_update(sc, sc->sc_ac2q[WME_AC_VI], WME_AC_VI) ||
+	    !ath_txq_update(sc, sc->sc_ac2q[WME_AC_VO], WME_AC_VO) ? EIO : 0;
+}
+
+/*
+ * Callback from 802.11 layer to flush a node's U-APSD queues
+ */
+static void	
+ath_uapsd_flush(struct ieee80211_node *ni)
+{
+	struct ath_node *an = ATH_NODE(ni);
+	struct ath_buf *bf;
+	struct ath_softc *sc = ni->ni_ic->ic_dev->priv;
+	struct ath_txq *txq;
+
+	ATH_NODE_UAPSD_LOCK_IRQ(an);
+	/*
+	 * NB: could optimize for successive runs from the same AC
+	 *     if we can assume that is the most frequent case.
+	 */
+	while (an->an_uapsd_qdepth) {
+		bf = STAILQ_FIRST(&an->an_uapsd_q);
+		STAILQ_REMOVE_HEAD(&an->an_uapsd_q, bf_list);
+		bf->bf_desc->ds_link = 0;
+		txq = sc->sc_ac2q[bf->bf_skb->priority & 0x3];
+		ath_tx_txqaddbuf(sc, ni, txq, bf, bf->bf_desc, bf->bf_skb->len);
+		an->an_uapsd_qdepth--;
+	}
+
+	while (an->an_uapsd_overflowqdepth) {
+		bf = STAILQ_FIRST(&an->an_uapsd_overflowq);
+		STAILQ_REMOVE_HEAD(&an->an_uapsd_overflowq, bf_list);
+		bf->bf_desc->ds_link = 0;
+		txq = sc->sc_ac2q[bf->bf_skb->priority & 0x3];
+		ath_tx_txqaddbuf(sc, ni, txq, bf, bf->bf_desc, bf->bf_skb->len);
+		an->an_uapsd_overflowqdepth--;
+	}
+	if (IEEE80211_NODE_UAPSD_USETIM(ni))
+		ni->ni_vap->iv_set_tim(ni, 0);
+	ATH_NODE_UAPSD_UNLOCK_IRQ(an);
+}
+
+/*
+ * Reclaim resources for a setup queue.
+ */
+static void
+ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
+{
+
+#ifdef ATH_SUPERG_COMP
+	/* Release compression buffer */
+	if (txq->axq_compbuf) {
+		bus_free_consistent(sc->sc_bdev, txq->axq_compbufsz,
+			txq->axq_compbuf, txq->axq_compbufp);
+		txq->axq_compbuf = NULL;
+	}
+#endif
+	ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
+	ATH_TXQ_LOCK_DESTROY(txq);
+	sc->sc_txqsetup &= ~(1 << txq->axq_qnum);
+}
+
+/*
+ * Reclaim all tx queue resources.
+ */
+static void
+ath_tx_cleanup(struct ath_softc *sc)
+{
+	int i;
+
+	ATH_TXBUF_LOCK_DESTROY(sc);
+	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
+		if (ATH_TXQ_SETUP(sc, i))
+			ath_tx_cleanupq(sc, &sc->sc_txq[i]);
+}
+
+#ifdef ATH_SUPERG_COMP
+static u_int32_t
+ath_get_icvlen(struct ieee80211_key *k)
+{
+	const struct ieee80211_cipher *cip = k->wk_cipher;
+
+	if (cip->ic_cipher == IEEE80211_CIPHER_AES_CCM ||
+		cip->ic_cipher == IEEE80211_CIPHER_AES_OCB)
+		return AES_ICV_FIELD_SIZE;
+
+	return WEP_ICV_FIELD_SIZE;
+}
+
+static u_int32_t
+ath_get_ivlen(struct ieee80211_key *k)
+{
+	const struct ieee80211_cipher *cip = k->wk_cipher;
+	u_int32_t ivlen;
+
+	ivlen = WEP_IV_FIELD_SIZE;
+
+	if (cip->ic_cipher == IEEE80211_CIPHER_AES_CCM ||
+		cip->ic_cipher == IEEE80211_CIPHER_AES_OCB)
+		ivlen += EXT_IV_FIELD_SIZE;
+	
+	return ivlen;
+}
+#endif
+
+/*
+ * Get transmit rate index using rate in Kbps
+ */
+static __inline int
+ath_tx_findindex(const HAL_RATE_TABLE *rt, int rate)
+{
+	int i;
+	int ndx = 0;
+
+	for (i = 0; i < rt->rateCount; i++) {
+		if (rt->info[i].rateKbps == rate) {
+			ndx = i;
+			break;
+		}
+	}
+
+	return ndx;
+}
+
+/*
+ * Needs external locking!
+ */
+static void
+ath_tx_uapsdqueue(struct ath_softc *sc, struct ath_node *an, struct ath_buf *bf)
+{
+	struct ath_buf *lastbuf;
+
+	/* case the delivery queue just sent and can move overflow q over */
+	if (an->an_uapsd_qdepth == 0 && an->an_uapsd_overflowqdepth != 0) {
+		DPRINTF(sc, ATH_DEBUG_UAPSD,
+			"%s: delivery Q empty, replacing with overflow Q\n",
+			__func__);
+		STAILQ_CONCAT(&an->an_uapsd_q, &an->an_uapsd_overflowq);
+		an->an_uapsd_qdepth = an->an_uapsd_overflowqdepth;
+		an->an_uapsd_overflowqdepth = 0;
+	}
+
+	/* most common case - room on delivery q */
+	if (an->an_uapsd_qdepth < an->an_node.ni_uapsd_maxsp) {
+		/* add to delivery q */
+		if ((lastbuf = STAILQ_LAST(&an->an_uapsd_q, ath_buf, bf_list))) {
+#ifdef AH_NEED_DESC_SWAP
+			lastbuf->bf_desc->ds_link = cpu_to_le32(bf->bf_daddr);
+#else
+			lastbuf->bf_desc->ds_link = bf->bf_daddr;
+#endif
+		}
+		STAILQ_INSERT_TAIL(&an->an_uapsd_q, bf, bf_list);
+		an->an_uapsd_qdepth++;
+		DPRINTF(sc, ATH_DEBUG_UAPSD,
+				"%s: added AC %d frame to delivery Q, new depth = %d\n", 
+				__func__, bf->bf_skb->priority, an->an_uapsd_qdepth);
+		return;
+	}
+	
+	/* check if need to make room on overflow queue */
+	if (an->an_uapsd_overflowqdepth == an->an_node.ni_uapsd_maxsp) {
+		/* 
+		 *  pop oldest from delivery queue and cleanup
+		 */ 
+		lastbuf = STAILQ_FIRST(&an->an_uapsd_q);
+		STAILQ_REMOVE_HEAD(&an->an_uapsd_q, bf_list);
+		dev_kfree_skb(lastbuf->bf_skb);
+		lastbuf->bf_skb = NULL;
+		ieee80211_free_node(lastbuf->bf_node);
+		lastbuf->bf_node = NULL;
+		ATH_TXBUF_LOCK_IRQ(sc);
+		STAILQ_INSERT_TAIL(&sc->sc_txbuf, lastbuf, bf_list);
+		ATH_TXBUF_UNLOCK_IRQ(sc);
+		
+		/*
+		 *  move oldest from overflow to delivery
+		 */
+		lastbuf = STAILQ_FIRST(&an->an_uapsd_overflowq);
+		STAILQ_REMOVE_HEAD(&an->an_uapsd_overflowq, bf_list);
+		an->an_uapsd_overflowqdepth--;
+		STAILQ_INSERT_TAIL(&an->an_uapsd_q, lastbuf, bf_list);
+		DPRINTF(sc, ATH_DEBUG_UAPSD,
+			"%s: delivery and overflow Qs full, dropped oldest\n",
+			__func__);
+	}
+
+	/* add to overflow q */
+	if ((lastbuf = STAILQ_LAST(&an->an_uapsd_overflowq, ath_buf, bf_list))) {
+#ifdef AH_NEED_DESC_SWAP
+		lastbuf->bf_desc->ds_link = cpu_to_le32(bf->bf_daddr);
+#else
+		lastbuf->bf_desc->ds_link = bf->bf_daddr;
+#endif
+	}
+	STAILQ_INSERT_TAIL(&an->an_uapsd_overflowq, bf, bf_list);
+	an->an_uapsd_overflowqdepth++;
+	DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: added AC %d to overflow Q, new depth = %d\n",
+		__func__, bf->bf_skb->priority, an->an_uapsd_overflowqdepth);
+
+	return;
+}
+
+static int
+ath_tx_start(struct net_device *dev, struct ieee80211_node *ni, struct ath_buf *bf, struct sk_buff *skb, int nextfraglen)
+{
+#define	MIN(a,b)	((a) < (b) ? (a) : (b))
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ath_hal *ah = sc->sc_ah;
+	int isprot, ismcast, keyix, hdrlen, pktlen, try0;
+	u_int8_t rix, txrate, ctsrate;
+	u_int32_t ivlen = 0, icvlen = 0;
+	int comp = ATH_COMP_PROC_NO_COMP_NO_CCS;
+	u_int8_t cix = 0xff;		/* NB: silence compiler */
+	struct ath_desc *ds = NULL;
+	struct ath_txq *txq = NULL;
+	struct ieee80211_frame *wh;
+	u_int subtype, flags, ctsduration;
+	HAL_PKT_TYPE atype;
+	const HAL_RATE_TABLE *rt;
+	HAL_BOOL shortPreamble;
+	struct ath_node *an;
+	struct ath_vap *avp = ATH_VAP(vap);
+	int istxfrag;
+	u_int8_t antenna;
+
+	wh = (struct ieee80211_frame *) skb->data;
+	isprot = wh->i_fc[1] & IEEE80211_FC1_PROT;
+	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
+	hdrlen = ieee80211_anyhdrsize(wh);
+	istxfrag = (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) || 
+		(((le16toh(*(__le16 *) &wh->i_seq[0]) >> 
+		IEEE80211_SEQ_FRAG_SHIFT) & IEEE80211_SEQ_FRAG_MASK) > 0);
+
+	pktlen = skb->len;
+#ifdef ATH_SUPERG_FF
+	{
+		struct sk_buff *skbtmp = skb;
+		while ((skbtmp = skbtmp->next))
+			pktlen += skbtmp->len;
+	}
+#endif
+	/*
+	 * Packet length must not include any
+	 * pad bytes; deduct them here.
+	 */
+	pktlen -= (hdrlen & 3);
+
+	if (isprot) {
+		const struct ieee80211_cipher *cip;
+		struct ieee80211_key *k;
+
+		/*
+		 * Construct the 802.11 header+trailer for an encrypted
+		 * frame. The only reason this can fail is because of an
+		 * unknown or unsupported cipher/key type.
+		 */
+
+		/* FFXXX: change to handle linked skbs */
+		k = ieee80211_crypto_encap(ni, skb);
+		if (k == NULL) {
+			/*
+			 * This can happen when the key is yanked after the
+			 * frame was queued.  Just discard the frame; the
+			 * 802.11 layer counts failures and provides
+			 * debugging/diagnostics.
+			 */
+			return -EIO;
+		}
+		/*
+		 * Adjust the packet + header lengths for the crypto
+		 * additions and calculate the h/w key index.  When
+		 * a s/w mic is done the frame will have had any mic
+		 * added to it prior to entry so skb->len above will
+		 * account for it. Otherwise we need to add it to the
+		 * packet length.
+		 */
+		cip = k->wk_cipher;
+		hdrlen += cip->ic_header;
+		pktlen += cip->ic_header + cip->ic_trailer;
+		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
+			if (!istxfrag)
+				pktlen += cip->ic_miclen;
+			else
+				if (cip->ic_cipher != IEEE80211_CIPHER_TKIP)
+					pktlen += cip->ic_miclen;
+		}
+		keyix = k->wk_keyix;
+
+#ifdef ATH_SUPERG_COMP
+		icvlen = ath_get_icvlen(k) / 4;
+		ivlen = ath_get_ivlen(k) / 4;
+#endif
+		/* packet header may have moved, reset our local pointer */
+		wh = (struct ieee80211_frame *) skb->data;
+	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
+		/*
+		 * Use station key cache slot, if assigned.
+		 */
+		keyix = ni->ni_ucastkey.wk_keyix;
+		if (keyix == IEEE80211_KEYIX_NONE)
+			keyix = HAL_TXKEYIX_INVALID;
+	} else
+		keyix = HAL_TXKEYIX_INVALID;
+
+	pktlen += IEEE80211_CRC_LEN;
+
+	/*
+	 * Load the DMA map so any coalescing is done.  This
+	 * also calculates the number of descriptors we need.
+	 */
+#ifndef ATH_SUPERG_FF
+	bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
+		skb->data, pktlen, BUS_DMA_TODEVICE);
+	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: skb %p [data %p len %u] skbaddr %llx\n",
+		__func__, skb, skb->data, skb->len, ito64(bf->bf_skbaddr));
+#else /* ATH_SUPERG_FF case */
+	/* NB: ensure skb->len had been updated for each skb so we don't need pktlen */
+	{
+		struct sk_buff *skbtmp = skb;
+		int i = 0;
+
+		bf->bf_skbaddr = bus_map_single(sc->sc_bdev,
+			skb->data, skb->len, BUS_DMA_TODEVICE);
+ 		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: skb%d %p [data %p len %u] skbaddr %llx\n",
+			__func__, i, skb, skb->data, skb->len, ito64(bf->bf_skbaddr));
+		while ((skbtmp = skbtmp->next)) {
+			bf->bf_skbaddrff[i++] = bus_map_single(sc->sc_bdev,
+				skbtmp->data, skbtmp->len, BUS_DMA_TODEVICE);
+			DPRINTF(sc, ATH_DEBUG_XMIT, "%s: skb%d %p [data %p len %u] skbaddr %llx\n",
+				__func__, i, skbtmp, skbtmp->data, skbtmp->len,
+				ito64(bf->bf_skbaddrff[i-1]));
+		}
+		bf->bf_numdesc = i + 1;
+	}
+#endif /* ATH_SUPERG_FF */
+	bf->bf_skb = skb;
+	bf->bf_node = ni;
+
+	/* setup descriptors */
+	ds = bf->bf_desc;
+#ifdef ATH_SUPERG_XR
+	if(vap->iv_flags & IEEE80211_F_XR ) 
+		rt = sc->sc_xr_rates;
+	else
+		rt = sc->sc_currates;
+#else
+	rt = sc->sc_currates;
+#endif
+	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
+
+	/*
+	 * NB: the 802.11 layer marks whether or not we should
+	 * use short preamble based on the current mode and
+	 * negotiated parameters.
+	 */
+	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
+	    (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
+		shortPreamble = AH_TRUE;
+		sc->sc_stats.ast_tx_shortpre++;
+	} else
+		shortPreamble = AH_FALSE;
+
+	an = ATH_NODE(ni);
+	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
+	/*
+	 * Calculate Atheros packet type from IEEE80211 packet header,
+	 * setup for rate calculations, and select h/w transmit queue.
+	 */
+	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
+	case IEEE80211_FC0_TYPE_MGT:
+		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
+			atype = HAL_PKT_TYPE_BEACON;
+		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
+			atype = HAL_PKT_TYPE_PROBE_RESP;
+		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
+			atype = HAL_PKT_TYPE_ATIM;
+		else
+			atype = HAL_PKT_TYPE_NORMAL;	/* XXX */
+		rix = sc->sc_minrateix;
+		txrate = rt->info[rix].rateCode;
+		if (shortPreamble)
+			txrate |= rt->info[rix].shortPreamble;
+		try0 = ATH_TXMAXTRY;
+
+		if (ni->ni_flags & IEEE80211_NODE_QOS) {
+			/* NB: force all management frames to highest queue */
+			txq = sc->sc_ac2q[WME_AC_VO];
+		} else
+			txq = sc->sc_ac2q[WME_AC_BE];
+		break;
+	case IEEE80211_FC0_TYPE_CTL:
+		atype = HAL_PKT_TYPE_PSPOLL;	/* stop setting of duration */
+		rix = sc->sc_minrateix;
+		txrate = rt->info[rix].rateCode;
+		if (shortPreamble)
+			txrate |= rt->info[rix].shortPreamble;
+		try0 = ATH_TXMAXTRY;
+
+		if (ni->ni_flags & IEEE80211_NODE_QOS) {
+			/* NB: force all ctl frames to highest queue */
+			txq = sc->sc_ac2q[WME_AC_VO];
+		} else
+			txq = sc->sc_ac2q[WME_AC_BE];
+		break;
+	case IEEE80211_FC0_TYPE_DATA:
+		atype = HAL_PKT_TYPE_NORMAL;		/* default */
+		
+		if (ismcast) {
+			rix = ath_tx_findindex(rt, vap->iv_mcast_rate);
+			txrate = rt->info[rix].rateCode;
+			if (shortPreamble)
+				txrate |= rt->info[rix].shortPreamble;
+			/* 
+			 * ATH_TXMAXTRY disables Multi-rate retries, which
+			 * isn't applicable to mcast packets and overrides
+			 * the desired transmission rate for mcast traffic.
+			 */
+			try0 = ATH_TXMAXTRY;
+		} else {
+			/*
+			 * Data frames; consult the rate control module.
+			 */
+			sc->sc_rc->ops->findrate(sc, an, shortPreamble, skb->len,
+				&rix, &try0, &txrate);
+
+			/* Note: HAL does not support distinguishing between short
+ 			 * and long retry. These both are set via try0 here then.
+ 			 * In the openhal we'll fix this ;) */
+			if (vap->iv_flags & IEEE80211_F_SWRETRY && vap->iv_txmax != try0)
+				try0 = vap->iv_txmax;
+
+			/* Ratecontrol sometimes returns invalid rate index */
+			if (rix != 0xff)
+				an->an_prevdatarix = rix;
+			else
+				rix = an->an_prevdatarix;
+		}
+
+		if (M_FLAG_GET(skb, M_UAPSD)) {
+			/* U-APSD frame, handle txq later */
+			break;
+		}
+
+		/*
+		 * Default all non-QoS traffic to the best-effort queue.
+		 */
+		if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
+			/* XXX validate skb->priority, remove mask */
+			txq = sc->sc_ac2q[skb->priority & 0x3];
+			if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[skb->priority].wmep_noackPolicy) {
+				flags |= HAL_TXDESC_NOACK;
+				sc->sc_stats.ast_tx_noack++;
+			}
+		} else
+			txq = sc->sc_ac2q[WME_AC_BE];
+		break;
+	default:
+		printk("%s: bogus frame type 0x%x (%s)\n", dev->name,
+			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
+		/* XXX statistic */
+		return -EIO;
+	}
+
+#ifdef ATH_SUPERG_XR 
+	if (vap->iv_flags & IEEE80211_F_XR ) {
+		txq = sc->sc_xrtxq;
+		if (!txq)
+			txq = sc->sc_ac2q[WME_AC_BK];
+		flags |= HAL_TXDESC_CTSENA;
+		cix = rt->info[sc->sc_protrix].controlRate;
+	}
+#endif
+	/*
+	 * When servicing one or more stations in power-save mode (or)
+	 * if there is some mcast data waiting on mcast queue
+	 * (to prevent out of order delivery of mcast,bcast packets)
+	 * multicast frames must be buffered until after the beacon.
+	 * We use the private mcast queue for that.
+	 */
+	if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
+		txq = &avp->av_mcastq;
+		/* XXX? more bit in 802.11 frame header */
+	}
+
+	/*
+	 * Calculate miscellaneous flags.
+	 */
+	if (ismcast) {
+		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
+		sc->sc_stats.ast_tx_noack++;
+		try0 = ATH_TXMAXTRY;    /* turn off multi-rate retry for multicast traffic */
+	 } else if (pktlen > vap->iv_rtsthreshold) { 
+#ifdef ATH_SUPERG_FF
+		/* we could refine to only check that the frame of interest
+		 * is a FF, but this seems inconsistent.
+		 */
+		if (!(vap->iv_ath_cap & ni->ni_ath_flags & IEEE80211_ATHC_FF)) {
+#endif
+			flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
+			cix = rt->info[rix].controlRate;
+			sc->sc_stats.ast_tx_rts++;
+#ifdef ATH_SUPERG_FF
+		}
+#endif
+	}
+
+	/*
+	 * If 802.11g protection is enabled, determine whether
+	 * to use RTS/CTS or just CTS.  Note that this is only
+	 * done for OFDM unicast frames.
+	 */
+	if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
+	    rt->info[rix].phy == IEEE80211_T_OFDM &&
+	    (flags & HAL_TXDESC_NOACK) == 0) {
+		/* XXX fragments must use CCK rates w/ protection */
+		if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
+			flags |= HAL_TXDESC_RTSENA;
+		else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
+			flags |= HAL_TXDESC_CTSENA;
+
+		if (istxfrag)
+			/*
+			**  if Tx fragment, it would be desirable to 
+			**  use highest CCK rate for RTS/CTS.
+			**  However, stations farther away may detect it
+			**  at a lower CCK rate. Therefore, use the 
+			**  configured protect rate, which is 2 Mbps
+			**  for 11G.
+			*/
+			cix = rt->info[sc->sc_protrix].controlRate;
+		else
+			cix = rt->info[sc->sc_protrix].controlRate;
+		sc->sc_stats.ast_tx_protect++;
+	}
+
+	/*
+	 * Calculate duration.  This logically belongs in the 802.11
+	 * layer but it lacks sufficient information to calculate it.
+	 */
+	if ((flags & HAL_TXDESC_NOACK) == 0 &&
+	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
+		u_int16_t dur;
+		/*
+		 * XXX not right with fragmentation.
+		 */
+		if (shortPreamble)
+			dur = rt->info[rix].spAckDuration;
+		else
+			dur = rt->info[rix].lpAckDuration;
+
+		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
+			dur += dur;  /* Add additional 'SIFS + ACK' */
+
+			/*
+			** Compute size of next fragment in order to compute
+			** durations needed to update NAV.
+			** The last fragment uses the ACK duration only.
+			** Add time for next fragment.
+			*/
+			dur += ath_hal_computetxtime(ah, rt, nextfraglen, 
+				rix, shortPreamble);
+		}
+
+		if (istxfrag) {
+			/*
+			**  Force hardware to use computed duration for next
+			**  fragment by disabling multi-rate retry, which
+			**  updates duration based on the multi-rate
+			**  duration table.
+			*/
+			try0 = ATH_TXMAXTRY;
+		}
+
+		wh->i_dur = cpu_to_le16(dur);
+	}
+
+	/*
+	 * Calculate RTS/CTS rate and duration if needed.
+	 */
+	ctsduration = 0;
+	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
+		/*
+		 * CTS transmit rate is derived from the transmit rate
+		 * by looking in the h/w rate table.  We must also factor
+		 * in whether or not a short preamble is to be used.
+		 */
+		/* NB: cix is set above where RTS/CTS is enabled */
+		KASSERT(cix != 0xff, ("cix not setup"));
+		ctsrate = rt->info[cix].rateCode;
+		/*
+		 * Compute the transmit duration based on the frame
+		 * size and the size of an ACK frame.  We call into the
+		 * HAL to do the computation since it depends on the
+		 * characteristics of the actual PHY being used.
+		 *
+		 * NB: CTS is assumed the same size as an ACK so we can
+		 *     use the precalculated ACK durations.
+		 */
+		if (shortPreamble) {
+			ctsrate |= rt->info[cix].shortPreamble;
+			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
+				ctsduration += rt->info[cix].spAckDuration;
+			ctsduration += ath_hal_computetxtime(ah,
+				rt, pktlen, rix, AH_TRUE);
+			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
+				ctsduration += rt->info[rix].spAckDuration;
+		} else {
+			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
+				ctsduration += rt->info[cix].lpAckDuration;
+			ctsduration += ath_hal_computetxtime(ah,
+				rt, pktlen, rix, AH_FALSE);
+			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
+				ctsduration += rt->info[rix].lpAckDuration;
+		}
+		/*
+		 * Must disable multi-rate retry when using RTS/CTS.
+		 */
+		try0 = ATH_TXMAXTRY;
+	} else 
+		ctsrate = 0;
+
+	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
+		/* FFXXX: need multi-skb version to dump entire FF */
+		ieee80211_dump_pkt(ic, skb->data, skb->len,
+			sc->sc_hwmap[txrate].ieeerate, -1);
+
+	/*
+	 * Determine if a tx interrupt should be generated for
+	 * this descriptor.  We take a tx interrupt to reap
+	 * descriptors when the h/w hits an EOL condition or
+	 * when the descriptor is specifically marked to generate
+	 * an interrupt.  We periodically mark descriptors in this
+	 * way to ensure timely replenishing of the supply needed
+	 * for sending frames.  Deferring interrupts reduces system
+	 * load and potentially allows more concurrent work to be
+	 * done, but if done too aggressively, it can cause senders
+	 * to backup.
+	 *
+	 * NB: use >= to deal with sc_txintrperiod changing
+	 *     dynamically through sysctl.
+	 */
+	if (!M_FLAG_GET(skb, M_UAPSD) &&
+		++txq->axq_intrcnt >= sc->sc_txintrperiod) {
+		flags |= HAL_TXDESC_INTREQ;
+		txq->axq_intrcnt = 0;
+	}
+
+#ifdef ATH_SUPERG_COMP
+	if (ATH_NODE(ni)->an_decomp_index != INVALID_DECOMP_INDEX && 
+	    !ismcast &&
+	    ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) &&
+	    ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) != IEEE80211_FC0_SUBTYPE_NODATA)) {
+		if (pktlen > ATH_COMP_THRESHOLD)
+			comp = ATH_COMP_PROC_COMP_OPTIMAL;
+		else
+			comp = ATH_COMP_PROC_NO_COMP_ADD_CCS;
+	} 
+#endif
+
+	/*
+	 * sc_txantenna == 0 means transmit diversity mode.
+	 * sc_txantenna == 1 or sc_txantenna == 2 means the user has selected
+	 * the first or second antenna port.
+	 * If the user has set the txantenna, use it for multicast frames too.
+	 */
+	if (ismcast && !sc->sc_txantenna) {
+		antenna = sc->sc_mcastantenna + 1;
+		sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
+	} else
+		antenna = sc->sc_txantenna;
+
+	/*
+	 * Formulate first tx descriptor with tx controls.
+	 */
+	/* XXX check return value? */
+	ath_hal_setuptxdesc(ah, ds
+			    , pktlen		/* packet length */
+			    , hdrlen		/* header length */
+			    , atype		/* Atheros packet type */
+			    , MIN(ni->ni_txpower, 60)/* txpower */
+			    , txrate, try0	/* series 0 rate/tries */
+			    , keyix		/* key cache index */
+			    , antenna		/* antenna mode */
+			    , flags		/* flags */
+			    , ctsrate		/* rts/cts rate */
+			    , ctsduration	/* rts/cts duration */
+			    , icvlen		/* comp icv len */
+			    , ivlen		/* comp iv len */
+			    , comp		/* comp scheme */
+		);
+	bf->bf_flags = flags;			/* record for post-processing */
+
+	/*
+	 * Setup the multi-rate retry state only when we're
+	 * going to use it.  This assumes ath_hal_setuptxdesc
+	 * initializes the descriptors (so we don't have to)
+	 * when the hardware supports multi-rate retry and
+	 * we don't use it.
+	 */
+	if (try0 != ATH_TXMAXTRY)
+		sc->sc_rc->ops->setupxtxdesc(sc, an, ds, shortPreamble,
+					     skb->len, rix);
+
+#ifndef ATH_SUPERG_FF
+	ds->ds_link = 0;
+	ds->ds_data = bf->bf_skbaddr;
+
+	ath_hal_filltxdesc(ah, ds
+			   , skb->len	/* segment length */
+			   , AH_TRUE	/* first segment */
+			   , AH_TRUE	/* last segment */
+			   , ds		/* first descriptor */
+		);
+
+	/* NB: The desc swap function becomes void, 
+	 * if descriptor swapping is not enabled
+	 */
+	ath_desc_swap(ds);
+
+	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: Q%d: %08x %08x %08x %08x %08x %08x\n",
+	    __func__, M_FLAG_GET(skb, M_UAPSD) ? 0 : txq->axq_qnum, ds->ds_link, ds->ds_data,
+	    ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
+#else /* ATH_SUPERG_FF */
+	{
+		struct sk_buff *skbtmp = skb;
+		struct ath_desc *ds0 = ds;
+		int i;
+
+		ds->ds_data = bf->bf_skbaddr;
+		ds->ds_link = (skb->next == NULL) ? 0 : bf->bf_daddr + sizeof(*ds);
+
+		ath_hal_filltxdesc(ah, ds
+			, skbtmp->len		/* segment length */
+			, AH_TRUE		/* first segment */
+			, skbtmp->next == NULL	/* last segment */
+			, ds			/* first descriptor */
+		);
+
+		/* NB: The desc swap function becomes void, 
+		 * if descriptor swapping is not enabled
+		 */
+		ath_desc_swap(ds);
+
+		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: Q%d: (ds)%p (lk)%08x (d)%08x (c0)%08x (c1)%08x %08x %08x\n",
+			__func__, M_FLAG_GET(skb, M_UAPSD) ? 0 : txq->axq_qnum,
+			ds, ds->ds_link, ds->ds_data, ds->ds_ctl0, ds->ds_ctl1,
+			ds->ds_hw[0], ds->ds_hw[1]);
+		for (i= 0, skbtmp = skbtmp->next; i < bf->bf_numdesc - 1; i++, skbtmp = skbtmp->next) {
+			ds++;
+			ds->ds_link = skbtmp->next == NULL ? 0 : bf->bf_daddr + sizeof(*ds) * (i + 2);
+			ds->ds_data = bf->bf_skbaddrff[i];
+			ath_hal_filltxdesc(ah, ds
+				, skbtmp->len		/* segment length */
+				, AH_FALSE		/* first segment */
+				, skbtmp->next == NULL	/* last segment */
+				, ds0			/* first descriptor */
+			);
+
+			/* NB: The desc swap function becomes void, 
+		 	 * if descriptor swapping is not enabled
+		 	 */
+			ath_desc_swap(ds);
+
+			DPRINTF(sc, ATH_DEBUG_XMIT, "%s: Q%d: %08x %08x %08x %08x %08x %08x\n",
+				__func__, M_FLAG_GET(skb, M_UAPSD) ? 0 : txq->axq_qnum,
+				ds->ds_link, ds->ds_data, ds->ds_ctl0,
+				ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
+		}
+	}
+#endif
+
+	if (M_FLAG_GET(skb, M_UAPSD)) {
+		/* must lock against interrupt-time processing (i.e., not just tasklet) */
+		ATH_NODE_UAPSD_LOCK_IRQ(an);
+		DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: Qing U-APSD data frame for node %s \n", 
+			__func__, ether_sprintf(an->an_node.ni_macaddr));
+		ath_tx_uapsdqueue(sc, an, bf);
+		if (IEEE80211_NODE_UAPSD_USETIM(ni) && (an->an_uapsd_qdepth == 1))
+			vap->iv_set_tim(ni, 1);
+		ATH_NODE_UAPSD_UNLOCK_IRQ(an);
+
+		return 0;
+	}
+	
+	
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "%s: %p<%s> refcnt %d\n",
+		__func__, vap->iv_bss, ether_sprintf(vap->iv_bss->ni_macaddr),
+		ieee80211_node_refcnt(vap->iv_bss));
+
+
+	ath_tx_txqaddbuf(sc, ni, txq, bf, ds, pktlen);
+	return 0;
+#undef MIN
+}
+
+/*
+ * Process completed xmit descriptors from the specified queue.
+ * Should only be called from tasklet context
+ */
+static void
+ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	struct ath_buf *bf = NULL;
+	struct ath_desc *ds = NULL;
+	struct ieee80211_node *ni = NULL;
+	struct ath_node *an = NULL;
+	int sr, lr;
+	HAL_STATUS status;
+	int uapsdq = 0;
+	unsigned long uapsdq_lockflags = 0;
+
+	DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %d (0x%x), link %p\n", __func__,
+		txq->axq_qnum, ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
+		txq->axq_link);
+
+	if (txq == sc->sc_uapsdq) {
+		DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: reaping U-APSD txq\n", __func__);
+		uapsdq = 1;
+	}
+
+	for (;;) {
+		if (uapsdq)
+			ATH_TXQ_UAPSDQ_LOCK_IRQ(txq);
+		else
+			ATH_TXQ_LOCK(txq);
+		txq->axq_intrcnt = 0; /* reset periodic desc intr count */
+		bf = STAILQ_FIRST(&txq->axq_q);
+		if (bf == NULL) {
+			txq->axq_link = NULL;
+			if (uapsdq)
+				ATH_TXQ_UAPSDQ_UNLOCK_IRQ(txq);
+			else
+				ATH_TXQ_UNLOCK(txq);
+			break;
+		}
+
+#ifdef ATH_SUPERG_FF
+		ds = &bf->bf_desc[bf->bf_numdesc - 1];
+		DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: frame's last desc: %p\n",
+			__func__, ds);
+#else
+		ds = bf->bf_desc;		/* NB: last descriptor */
+#endif
+		status = ath_hal_txprocdesc(ah, ds);
+#ifdef AR_DEBUG
+		if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
+			ath_printtxbuf(bf, status == HAL_OK);
+#endif
+		if (status == HAL_EINPROGRESS) {
+			if (uapsdq)
+				ATH_TXQ_UAPSDQ_UNLOCK_IRQ(txq);
+			else
+				ATH_TXQ_UNLOCK(txq);
+			break;
+		}
+
+		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
+		if (uapsdq)
+			ATH_TXQ_UAPSDQ_UNLOCK_IRQ(txq);
+		else
+			ATH_TXQ_UNLOCK(txq);
+
+		ni = bf->bf_node;
+		if (ni != NULL) {
+			an = ATH_NODE(ni);
+			if (ds->ds_txstat.ts_status == 0) {
+				u_int8_t txant = ds->ds_txstat.ts_antenna;
+				sc->sc_stats.ast_ant_tx[txant]++;
+				sc->sc_ant_tx[txant]++;
+#ifdef ATH_SUPERG_FF
+				if (bf->bf_numdesc > 1)
+					ni->ni_vap->iv_stats.is_tx_ffokcnt++;
+#endif
+				if (ds->ds_txstat.ts_rate & HAL_TXSTAT_ALTRATE)
+					sc->sc_stats.ast_tx_altrate++;
+				sc->sc_stats.ast_tx_rssi =
+					ds->ds_txstat.ts_rssi;
+				ATH_RSSI_LPF(an->an_halstats.ns_avgtxrssi,
+					ds->ds_txstat.ts_rssi);
+				if (bf->bf_skb->priority == WME_AC_VO ||
+				    bf->bf_skb->priority == WME_AC_VI)
+					ni->ni_ic->ic_wme.wme_hipri_traffic++;
+				ni->ni_inact = ni->ni_inact_reload;
+			} else {
+#ifdef ATH_SUPERG_FF
+				if (bf->bf_numdesc > 1)
+					ni->ni_vap->iv_stats.is_tx_fferrcnt++;
+#endif
+				if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) {
+					sc->sc_stats.ast_tx_xretries++;
+					if (ni->ni_flags & IEEE80211_NODE_UAPSD_TRIG) {
+						ni->ni_stats.ns_tx_eosplost++;
+						DPRINTF(sc, ATH_DEBUG_UAPSD,
+							"%s: frame in SP retried out, possible EOSP stranded!!!\n",
+							__func__);
+					}
+				}
+				if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO)
+					sc->sc_stats.ast_tx_fifoerr++;
+				if (ds->ds_txstat.ts_status & HAL_TXERR_FILT)
+					sc->sc_stats.ast_tx_filtered++;
+			}
+			sr = ds->ds_txstat.ts_shortretry;
+			lr = ds->ds_txstat.ts_longretry;
+			sc->sc_stats.ast_tx_shortretry += sr;
+			sc->sc_stats.ast_tx_longretry += lr;
+			/*
+			 * Hand the descriptor to the rate control algorithm
+			 * if the frame wasn't dropped for filtering or sent
+			 * w/o waiting for an ack.  In those cases the rssi
+			 * and retry counts will be meaningless.
+			 */
+			if ((ds->ds_txstat.ts_status & HAL_TXERR_FILT) == 0 &&
+			    (bf->bf_flags & HAL_TXDESC_NOACK) == 0)
+				sc->sc_rc->ops->tx_complete(sc, an, ds);
+			/*
+			 * Reclaim reference to node.
+			 *
+			 * NB: the node may be reclaimed here if, for example
+			 *     this is a DEAUTH message that was sent and the
+			 *     node was timed out due to inactivity.
+			 */
+			 ieee80211_free_node(ni); 
+		}
+
+		bus_unmap_single(sc->sc_bdev, bf->bf_skbaddr, 
+                                 bf->bf_skb->len, BUS_DMA_TODEVICE);
+		if (ni && uapsdq) {
+			/* detect EOSP for this node */
+			struct ieee80211_qosframe *qwh = (struct ieee80211_qosframe *)bf->bf_skb->data;
+			an = ATH_NODE(ni);
+			KASSERT(ni != NULL, ("Processing U-APSD txq for ath_buf with no node!\n"));
+			if (qwh->i_qos[0] & IEEE80211_QOS_EOSP) {
+				DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: EOSP detected for node (%s) on desc %p\n", 
+					__func__, ether_sprintf(ni->ni_macaddr), ds);
+				ATH_NODE_UAPSD_LOCK_IRQ(an);
+				ni->ni_flags &= ~IEEE80211_NODE_UAPSD_SP;
+				if (an->an_uapsd_qdepth == 0 && an->an_uapsd_overflowqdepth != 0) {
+					STAILQ_CONCAT(&an->an_uapsd_q, &an->an_uapsd_overflowq);
+					an->an_uapsd_qdepth = an->an_uapsd_overflowqdepth;
+					an->an_uapsd_overflowqdepth = 0;
+				}
+				ATH_NODE_UAPSD_UNLOCK_IRQ(an);
+			}
+		}
+
+		{
+			struct ieee80211_frame *wh = (struct ieee80211_frame *)bf->bf_skb->data;
+			if ((ds->ds_txstat.ts_seqnum << IEEE80211_SEQ_SEQ_SHIFT) & ~IEEE80211_SEQ_SEQ_MASK) {
+				DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: h/w assigned sequence number is not sane (%d), ignoring it\n", __func__,
+				        ds->ds_txstat.ts_seqnum);
+			} else {
+				DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: updating frame's sequence number from %d to %d\n", __func__, 
+				        (le16toh(*(__le16 *)&wh->i_seq[0]) & IEEE80211_SEQ_SEQ_MASK) >> IEEE80211_SEQ_SEQ_SHIFT,
+				        ds->ds_txstat.ts_seqnum);
+
+				*(__le16 *)&wh->i_seq[0] = htole16(
+					ds->ds_txstat.ts_seqnum << IEEE80211_SEQ_SEQ_SHIFT |
+					(le16toh(*(__le16 *)&wh->i_seq[0]) & ~IEEE80211_SEQ_SEQ_MASK));
+			}
+		}
+
+#ifdef ATH_SUPERG_FF
+		{
+			struct sk_buff *skbfree, *skb = bf->bf_skb;
+			int i;
+
+			skbfree = skb;
+			skb = skb->next;
+			DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: free skb %p\n",
+				__func__, skbfree);
+			ath_tx_capture(sc->sc_dev, ds, skbfree);
+			for (i = 1; i < bf->bf_numdesc; i++) {
+				bus_unmap_single(sc->sc_bdev, bf->bf_skbaddrff[i-1],
+					bf->bf_skb->len, BUS_DMA_TODEVICE);
+				skbfree = skb;
+				skb = skb->next;
+				DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: free skb %p\n",
+					__func__, skbfree);
+				ath_tx_capture(sc->sc_dev, ds, skbfree);
+			}
+		}
+		bf->bf_numdesc = 0;
+#else
+		DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: free skb %p\n", __func__, bf->bf_skb);
+		ath_tx_capture(sc->sc_dev, ds, bf->bf_skb);
+#endif
+		bf->bf_skb = NULL;
+		bf->bf_node = NULL;
+
+		ATH_TXBUF_LOCK_IRQ(sc);
+		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
+		if (sc->sc_devstopped) {
+			++sc->sc_reapcount;
+			if (sc->sc_reapcount > ATH_TXBUF_FREE_THRESHOLD) {
+				if (!sc->sc_dfswait)
+					netif_start_queue(sc->sc_dev);
+				DPRINTF(sc, ATH_DEBUG_TX_PROC,
+					"%s: tx tasklet restart the queue\n",
+					__func__);
+				sc->sc_reapcount = 0;
+				sc->sc_devstopped = 0;
+			} else
+				ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, NULL);
+		}
+		ATH_TXBUF_UNLOCK_IRQ(sc);
+	}
+#ifdef ATH_SUPERG_FF
+	/* flush ff staging queue if buffer low */
+	if (txq->axq_depth <= sc->sc_fftxqmin - 1) {
+		/* NB: consider only flushing a preset number based on age. */
+		ath_ffstageq_flush(sc, txq, ath_ff_neverflushtestdone);
+	}
+#endif /* ATH_SUPERG_FF */
+}
+
+static __inline int
+txqactive(struct ath_hal *ah, int qnum)
+{
+	u_int32_t txqs = 1 << qnum;
+	ath_hal_gettxintrtxqs(ah, &txqs);
+	return (txqs & (1 << qnum));
+}
+
+/*
+ * Deferred processing of transmit interrupt; special-cased
+ * for a single hardware transmit queue (e.g. 5210 and 5211).
+ */
+static void
+ath_tx_tasklet_q0(TQUEUE_ARG data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct ath_softc *sc = dev->priv;
+
+	if (txqactive(sc->sc_ah, 0))
+		ath_tx_processq(sc, &sc->sc_txq[0]);
+	if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
+		ath_tx_processq(sc, sc->sc_cabq);
+
+	netif_wake_queue(dev);
+
+	if (sc->sc_softled)
+		ath_led_event(sc, ATH_LED_TX);
+}
+
+/*
+ * Deferred processing of transmit interrupt; special-cased
+ * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
+ */
+static void
+ath_tx_tasklet_q0123(TQUEUE_ARG data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct ath_softc *sc = dev->priv;
+
+	/*
+	 * Process each active queue.
+	 */
+	if (txqactive(sc->sc_ah, 0))
+		ath_tx_processq(sc, &sc->sc_txq[0]);
+	if (txqactive(sc->sc_ah, 1))
+		ath_tx_processq(sc, &sc->sc_txq[1]);
+	if (txqactive(sc->sc_ah, 2))
+		ath_tx_processq(sc, &sc->sc_txq[2]);
+	if (txqactive(sc->sc_ah, 3))
+		ath_tx_processq(sc, &sc->sc_txq[3]);
+	if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
+		ath_tx_processq(sc, sc->sc_cabq);
+#ifdef ATH_SUPERG_XR
+	if (sc->sc_xrtxq && txqactive(sc->sc_ah, sc->sc_xrtxq->axq_qnum))
+		ath_tx_processq(sc, sc->sc_xrtxq);
+#endif
+	if (sc->sc_uapsdq && txqactive(sc->sc_ah, sc->sc_uapsdq->axq_qnum))
+		ath_tx_processq(sc, sc->sc_uapsdq);
+
+	netif_wake_queue(dev);
+
+	if (sc->sc_softled)
+		ath_led_event(sc, ATH_LED_TX);
+}
+
+/*
+ * Deferred processing of transmit interrupt.
+ */
+static void
+ath_tx_tasklet(TQUEUE_ARG data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct ath_softc *sc = dev->priv;
+	int i;
+
+	/*
+	 * Process each active queue.
+	 */
+	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
+		if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
+			ath_tx_processq(sc, &sc->sc_txq[i]);
+#ifdef ATH_SUPERG_XR
+	if (sc->sc_xrtxq && txqactive(sc->sc_ah, sc->sc_xrtxq->axq_qnum))
+		ath_tx_processq(sc, sc->sc_xrtxq);
+#endif
+
+	netif_wake_queue(dev);
+
+	if (sc->sc_softled)
+		ath_led_event(sc, ATH_LED_TX);
+}
+
+static void
+ath_tx_timeout(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+
+	DPRINTF(sc, ATH_DEBUG_WATCHDOG, "%s: %sRUNNING %svalid\n",
+		__func__, (dev->flags & IFF_RUNNING) ? "" : "!",
+		sc->sc_invalid ? "in" : "");
+
+	if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
+		sc->sc_stats.ast_watchdog++;
+		ath_reset(dev);	/* Avoid taking a semaphore in ath_init */
+	}
+}
+
+/* 
+ * Context: softIRQ and hwIRQ
+ */
+static void
+ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	struct ath_buf *bf;
+	struct sk_buff *skb;
+#ifdef ATH_SUPERG_FF
+	struct sk_buff *tskb;
+#endif
+	int i;
+
+	/*
+	 * NB: this assumes output has been stopped and
+	 *     we do not need to block ath_tx_tasklet
+	 */
+	for (;;) {
+		ATH_TXQ_LOCK(txq);
+		bf = STAILQ_FIRST(&txq->axq_q);
+		if (bf == NULL) {
+			txq->axq_link = NULL;
+			ATH_TXQ_UNLOCK(txq);
+			break;
+		}
+		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
+		ATH_TXQ_UNLOCK(txq);
+#ifdef AR_DEBUG
+		if (sc->sc_debug & ATH_DEBUG_RESET)
+			ath_printtxbuf(bf, ath_hal_txprocdesc(ah, bf->bf_desc) == HAL_OK);
+#endif /* AR_DEBUG */
+		skb = bf->bf_skb->next;
+		bus_unmap_single(sc->sc_bdev,
+			bf->bf_skbaddr, bf->bf_skb->len, BUS_DMA_TODEVICE);
+		dev_kfree_skb_any(bf->bf_skb);
+		i = 0;
+#ifdef ATH_SUPERG_FF
+		while (skb) {
+			tskb = skb->next;
+			bus_unmap_single(sc->sc_bdev,
+				 bf->bf_skbaddrff[i++], skb->len, BUS_DMA_TODEVICE);
+			dev_kfree_skb_any(skb);
+			skb = tskb;
+		}
+#endif /* ATH_SUPERG_FF */
+		if (bf->bf_node)
+			ieee80211_free_node(bf->bf_node);
+
+		bf->bf_skb = NULL;
+		bf->bf_node = NULL;
+
+		ATH_TXBUF_LOCK(sc);
+		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
+		ATH_TXBUF_UNLOCK(sc);
+	}
+}
+
+static void
+ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
+{
+	struct ath_hal *ah = sc->sc_ah;
+
+	(void) ath_hal_stoptxdma(ah, txq->axq_qnum);
+	DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] 0x%x, link %p\n",
+		__func__, txq->axq_qnum,
+		ath_hal_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
+}
+
+/*
+ * Drain the transmit queues and reclaim resources.
+ */
+static void
+ath_draintxq(struct ath_softc *sc)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	int i;
+
+	/* XXX return value */
+	if (!sc->sc_invalid) {
+		(void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
+		DPRINTF(sc, ATH_DEBUG_RESET, "%s: beacon queue 0x%x\n",
+			__func__, ath_hal_gettxbuf(ah, sc->sc_bhalq));
+		for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
+			if (ATH_TXQ_SETUP(sc, i))
+				ath_tx_stopdma(sc, &sc->sc_txq[i]);
+	}
+	sc->sc_dev->trans_start = jiffies;
+	netif_start_queue(sc->sc_dev);		/* XXX move to callers */
+	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
+		if (ATH_TXQ_SETUP(sc, i))
+			ath_tx_draintxq(sc, &sc->sc_txq[i]);
+}
+
+/*
+ * Disable the receive h/w in preparation for a reset.
+ */
+static void
+ath_stoprecv(struct ath_softc *sc)
+{
+#define	PA2DESC(_sc, _pa) \
+	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
+		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
+	struct ath_hal *ah = sc->sc_ah;
+	u_int64_t tsf;
+
+	ath_hal_stoppcurecv(ah);	/* disable PCU */
+	ath_hal_setrxfilter(ah, 0);	/* clear recv filter */
+	ath_hal_stopdmarecv(ah);	/* disable DMA engine */
+	mdelay(3);			/* 3 ms is long enough for 1 frame */
+	tsf = ath_hal_gettsf64(ah);
+#ifdef AR_DEBUG
+	if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
+		struct ath_buf *bf;
+
+		printk("ath_stoprecv: rx queue 0x%x, link %p\n",
+			ath_hal_getrxbuf(ah), sc->sc_rxlink);
+		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
+			struct ath_desc *ds = bf->bf_desc;
+			HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
+				bf->bf_daddr, PA2DESC(sc, ds->ds_link), tsf);
+			if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
+				ath_printrxbuf(bf, status == HAL_OK);
+		}
+	}
+#endif
+	sc->sc_rxlink = NULL;		/* just in case */
+#undef PA2DESC
+}
+
+/*
+ * Enable the receive h/w following a reset.
+ */
+static int
+ath_startrecv(struct ath_softc *sc)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	struct net_device *dev = sc->sc_dev;
+	struct ath_buf *bf;
+
+	/*
+	 * Cisco's VPN software requires that drivers be able to
+	 * receive encapsulated frames that are larger than the MTU.
+	 * Since we can't be sure how large a frame we'll get, setup
+	 * to handle the larges on possible.
+	 */
+#ifdef ATH_SUPERG_FF
+	sc->sc_rxbufsize = roundup(ATH_FF_MAX_LEN, sc->sc_cachelsz);
+#else
+	sc->sc_rxbufsize = roundup(IEEE80211_MAX_LEN, sc->sc_cachelsz);
+#endif
+	DPRINTF(sc,ATH_DEBUG_RESET, "%s: mtu %u cachelsz %u rxbufsize %u\n",
+		__func__, dev->mtu, sc->sc_cachelsz, sc->sc_rxbufsize);
+
+	sc->sc_rxlink = NULL;
+	STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
+		int error = ath_rxbuf_init(sc, bf);
+		ATH_RXBUF_RESET(bf);
+		if (error < 0)
+			return error;
+	}
+
+	sc->sc_rxbufcur = NULL;
+
+	bf = STAILQ_FIRST(&sc->sc_rxbuf);
+	ath_hal_putrxbuf(ah, bf->bf_daddr);
+	ath_hal_rxena(ah);		/* enable recv descriptors */
+	ath_mode_init(dev);		/* set filters, etc. */
+	ath_hal_startpcurecv(ah);	/* re-enable PCU/DMA engine */
+	return 0;
+}
+
+/*
+ * Flush skb's allocate for receive.
+ */
+static void
+ath_flushrecv(struct ath_softc *sc)
+{
+	struct ath_buf *bf;
+
+	STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list)
+		if (bf->bf_skb != NULL) {
+			bus_unmap_single(sc->sc_bdev,
+				bf->bf_skbaddr, sc->sc_rxbufsize,
+				BUS_DMA_FROMDEVICE);
+			dev_kfree_skb(bf->bf_skb);
+			bf->bf_skb = NULL;
+		}
+}
+
+/*
+ * Update internal state after a channel change.
+ */
+static void
+ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
+{
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct net_device *dev = sc->sc_dev;
+	enum ieee80211_phymode mode;
+
+	mode = ieee80211_chan2mode(chan);
+
+	ath_rate_setup(dev, mode);
+	ath_setcurmode(sc, mode);
+
+#ifdef notyet
+	/*
+	 * Update BPF state.
+	 */
+	sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq =
+		htole16(chan->ic_freq);
+	sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags =
+		htole16(chan->ic_flags);
+#endif
+	if (ic->ic_curchanmaxpwr == 0)
+		ic->ic_curchanmaxpwr = chan->ic_maxregpower;
+}
+
+/*
+ * Set/change channels.  If the channel is really being changed,
+ * it's done by resetting the chip.  To accomplish this we must
+ * first cleanup any pending DMA, then restart stuff after a la
+ * ath_init.
+ */
+static int
+ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct net_device *dev = sc->sc_dev;
+	HAL_CHANNEL hchan;
+	u_int8_t tswitch = 0;
+
+	/*
+	 * Convert to a HAL channel description with
+	 * the flags constrained to reflect the current
+	 * operating mode.
+	 */
+	hchan.channel = chan->ic_freq;
+	hchan.channelFlags = ath_chan2flags(chan);
+	KASSERT(hchan.channel != 0,
+		("bogus channel %u/0x%x", hchan.channel, hchan.channelFlags));
+
+	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz) -> %u (%u MHz)\n",
+		__func__, ath_hal_mhz2ieee(ah, sc->sc_curchan.channel,
+		sc->sc_curchan.channelFlags), sc->sc_curchan.channel,
+		ath_hal_mhz2ieee(ah, hchan.channel, hchan.channelFlags),
+		hchan.channel);
+	/* check if it is turbo mode switch */
+	if (hchan.channel == sc->sc_curchan.channel &&
+	   (hchan.channelFlags & IEEE80211_CHAN_TURBO) != (sc->sc_curchan.channelFlags & IEEE80211_CHAN_TURBO)) 
+		tswitch = 1;
+	if (hchan.channel != sc->sc_curchan.channel ||
+	    hchan.channelFlags != sc->sc_curchan.channelFlags) {
+		HAL_STATUS status;
+
+		/*
+		 * To switch channels clear any pending DMA operations;
+		 * wait long enough for the RX fifo to drain, reset the
+		 * hardware at the new frequency, and then re-enable
+		 * the relevant bits of the h/w.
+		 */
+		ath_hal_intrset(ah, 0);	/* disable interrupts */
+		ath_draintxq(sc);	/* clear pending tx frames */
+		ath_stoprecv(sc);	/* turn off frame recv */
+		
+		/* Set coverage class */
+		if (sc->sc_scanning || !IEEE80211_IS_CHAN_A(chan))
+			ath_hal_setcoverageclass(sc->sc_ah, 0, 0);
+		else
+			ath_hal_setcoverageclass(sc->sc_ah, ic->ic_coverageclass, 0);
+
+		if (!ath_hal_reset(ah, sc->sc_opmode, &hchan, AH_TRUE, &status)) {
+			printk("%s: %s: unable to reset channel %u (%u MHz) "
+				"flags 0x%x '%s' (HAL status %u)\n",
+				dev->name, __func__,
+				ieee80211_chan2ieee(ic, chan), chan->ic_freq,
+			        hchan.channelFlags,
+				ath_get_hal_status_desc(status), status);
+			return -EIO;
+		}
+
+		if (sc->sc_softled)
+			ath_hal_gpioCfgOutput(ah, sc->sc_ledpin);
+		
+		sc->sc_curchan = hchan;
+		ath_update_txpow(sc);		/* update tx power state */
+
+		/*
+		 * Re-enable rx framework.
+		 */
+		if (ath_startrecv(sc) != 0) {
+			printk("%s: %s: unable to restart recv logic\n",
+				dev->name, __func__);
+			return -EIO;
+		}
+
+		/*
+		 * Change channels and update the h/w rate map
+		 * if we're switching; e.g. 11a to 11b/g.
+		 */
+		ath_chan_change(sc, chan);
+		if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
+			if (sc->sc_curchan.privFlags & CHANNEL_DFS) {
+				if (!(sc->sc_curchan.privFlags & CHANNEL_DFS_CLEAR)) { 
+					dev->watchdog_timeo = 120 * HZ;	/* set the timeout to normal */
+					netif_stop_queue(dev);			
+					if (sc->sc_dfswait)
+						del_timer_sync(&sc->sc_dfswaittimer);
+					DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s: start dfs wait period\n",
+						__func__, dev->name);
+					sc->sc_dfswait = 1;
+					sc->sc_dfswaittimer.function = ath_check_dfs_clear;
+					sc->sc_dfswaittimer.expires = 
+						jiffies + (ATH_DFS_WAIT_POLL_PERIOD * HZ);
+					sc->sc_dfswaittimer.data = (unsigned long)sc;
+					add_timer(&sc->sc_dfswaittimer);
+				}
+			} else
+				if (sc->sc_dfswait == 1)
+					mod_timer(&sc->sc_dfswaittimer, jiffies + 2);
+		}
+		/*
+		 * re configure beacons when it is a turbo mode switch.
+		 * HW seems to turn off beacons during turbo mode switch.
+		 */
+		if (sc->sc_beacons && tswitch) 
+			ath_beacon_config(sc, NULL);	
+
+		/*
+		 * Re-enable interrupts.
+		 */
+		ath_hal_intrset(ah, sc->sc_imask);
+	}
+	return 0;
+}
+
+/*
+ * Periodically recalibrate the PHY to account
+ * for temperature/environment changes.
+ */
+static void
+ath_calibrate(unsigned long arg)
+{
+	struct net_device *dev = (struct net_device *) arg;
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ieee80211com *ic = &sc->sc_ic;
+	HAL_CHANNEL *chans;
+	u_int32_t nchans;
+	HAL_BOOL isIQdone = AH_FALSE;
+
+	sc->sc_stats.ast_per_cal++;
+
+	DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: channel %u/%x\n",
+		__func__, sc->sc_curchan.channel, sc->sc_curchan.channelFlags);
+
+	if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
+		/*
+		 * Rfgain is out of bounds, reset the chip
+		 * to load new gain values.
+		 */
+		sc->sc_stats.ast_per_rfgain++;
+		ath_reset(dev);
+	}
+	if (!ath_hal_calibrate(ah, &sc->sc_curchan, &isIQdone)) {
+		DPRINTF(sc, ATH_DEBUG_ANY,
+			"%s: calibration of channel %u failed\n",
+			__func__, sc->sc_curchan.channel);
+		sc->sc_stats.ast_per_calfail++;
+	}
+	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
+		chans = kmalloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), GFP_ATOMIC);
+		if (chans == NULL) {
+			printk("%s: unable to allocate channel table\n", dev->name);
+			return;
+		}
+		nchans = ath_hal_checknol(ah, chans, IEEE80211_CHAN_MAX);
+		if (nchans > 0) {
+			u_int32_t i, j;
+			struct ieee80211_channel *ichan;
+			
+			for (i = 0; i < nchans; i++) {
+				for (j = 0; j < ic->ic_nchans; j++) {
+					ichan = &ic->ic_channels[j];
+					if (chans[i].channel == ichan->ic_freq)
+						ichan->ic_flags &= ~IEEE80211_CHAN_RADAR;
+				}
+
+				ichan = ieee80211_find_channel(ic, chans[i].channel,
+					chans[i].channelFlags);
+				if (ichan != NULL)
+					ichan->ic_flags &= ~IEEE80211_CHAN_RADAR;
+			}
+		}
+		kfree(chans);
+	}
+
+	if (isIQdone == AH_TRUE)
+		ath_calinterval = ATH_LONG_CALINTERVAL;
+	else
+		ath_calinterval = ATH_SHORT_CALINTERVAL;
+
+	sc->sc_cal_ch.expires = jiffies + (ath_calinterval * HZ);
+	add_timer(&sc->sc_cal_ch);
+}
+
+static void
+ath_scan_start(struct ieee80211com *ic)
+{
+	struct net_device *dev = ic->ic_dev;
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	u_int32_t rfilt;
+
+	/* XXX calibration timer? */
+
+	sc->sc_scanning = 1;
+	sc->sc_syncbeacon = 0;
+	rfilt = ath_calcrxfilter(sc);
+	ath_hal_setrxfilter(ah, rfilt);
+	ath_hal_setassocid(ah, dev->broadcast, 0);
+
+	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
+		 __func__, rfilt, ether_sprintf(dev->broadcast));
+}
+
+static void
+ath_scan_end(struct ieee80211com *ic)
+{
+	struct net_device *dev = ic->ic_dev;
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	u_int32_t rfilt;
+
+	sc->sc_scanning = 0;
+	rfilt = ath_calcrxfilter(sc);
+	ath_hal_setrxfilter(ah, rfilt);
+	ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
+
+	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
+		 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
+		 sc->sc_curaid);
+}
+
+static void
+ath_set_channel(struct ieee80211com *ic)
+{
+	struct net_device *dev = ic->ic_dev;
+	struct ath_softc *sc = dev->priv;
+
+	(void) ath_chan_set(sc, ic->ic_curchan);
+	/*
+	 * If we are returning to our bss channel then mark state
+	 * so the next recv'd beacon's tsf will be used to sync the
+	 * beacon timers.  Note that since we only hear beacons in
+	 * sta/ibss mode this has no effect in other operating modes.
+	 */
+	if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
+		sc->sc_syncbeacon = 1;
+}
+
+static void
+ath_set_coverageclass(struct ieee80211com *ic) 
+{
+	struct ath_softc *sc = ic->ic_dev->priv;
+
+	ath_hal_setcoverageclass(sc->sc_ah, ic->ic_coverageclass, 0);
+	
+	return;
+}
+
+static u_int
+ath_mhz2ieee(struct ieee80211com *ic, u_int freq, u_int flags)
+{
+	struct ath_softc *sc = ic->ic_dev->priv;
+
+ 	return (ath_hal_mhz2ieee(sc->sc_ah, freq, flags));
+}
+
+
+/*
+ * Context: softIRQ and process context
+ */
+static int
+ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+	struct ath_vap *avp = ATH_VAP(vap);
+	struct ieee80211com *ic = vap->iv_ic;
+	struct net_device *dev = ic->ic_dev;
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ieee80211_node *ni, *wds_ni;
+	int i, error, stamode;
+	u_int32_t rfilt = 0;
+	struct ieee80211vap *tmpvap;
+	static const HAL_LED_STATE leds[] = {
+		HAL_LED_INIT,	/* IEEE80211_S_INIT */
+		HAL_LED_SCAN,	/* IEEE80211_S_SCAN */
+		HAL_LED_AUTH,	/* IEEE80211_S_AUTH */
+		HAL_LED_ASSOC, 	/* IEEE80211_S_ASSOC */
+		HAL_LED_RUN, 	/* IEEE80211_S_RUN */
+	};
+
+	DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s: %s -> %s\n", __func__, dev->name,
+		ieee80211_state_name[vap->iv_state],
+		ieee80211_state_name[nstate]);
+
+	del_timer(&sc->sc_cal_ch);		/* periodic calibration timer */
+	ath_hal_setledstate(ah, leds[nstate]);	/* set LED */
+	netif_stop_queue(dev);			/* before we do anything else */
+
+	if (nstate == IEEE80211_S_INIT) {
+		/*
+		 * if there is no VAP left in RUN state
+		 * disable beacon interrupts.
+		 */
+		TAILQ_FOREACH(tmpvap, &ic->ic_vaps, iv_next) {
+			if (tmpvap != vap && tmpvap->iv_state == IEEE80211_S_RUN )
+				break;
+		}
+		if (!tmpvap) {
+			sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
+			/*
+			 * Disable interrupts.
+			 */
+			ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
+			sc->sc_beacons = 0;
+		}
+		/*
+		 * Notify the rate control algorithm.
+		 */
+		sc->sc_rc->ops->newstate(vap, nstate);
+		goto done;
+	}
+	ni = vap->iv_bss;
+
+	rfilt = ath_calcrxfilter(sc);
+	stamode = (vap->iv_opmode == IEEE80211_M_STA ||
+		   vap->iv_opmode == IEEE80211_M_IBSS ||
+		   vap->iv_opmode == IEEE80211_M_AHDEMO);
+	if (stamode && nstate == IEEE80211_S_RUN) {
+		sc->sc_curaid = ni->ni_associd;
+		IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
+	} else
+		sc->sc_curaid = 0;
+
+	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
+		 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
+		 sc->sc_curaid);
+
+	ath_hal_setrxfilter(ah, rfilt);
+	if (stamode)
+		ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
+
+	if ((vap->iv_opmode != IEEE80211_M_STA) &&
+		 (vap->iv_flags & IEEE80211_F_PRIVACY)) {
+		for (i = 0; i < IEEE80211_WEP_NKID; i++)
+			if (ath_hal_keyisvalid(ah, i))
+				ath_hal_keysetmac(ah, i, ni->ni_bssid);
+	}
+
+	/*
+	 * Notify the rate control algorithm so rates
+	 * are setup should ath_beacon_alloc be called.
+	 */
+	sc->sc_rc->ops->newstate(vap, nstate);
+
+	if (vap->iv_opmode == IEEE80211_M_MONITOR) {
+		/* nothing to do */;
+	} else if (nstate == IEEE80211_S_RUN) {
+		DPRINTF(sc, ATH_DEBUG_STATE,
+			"%s(RUN): ic_flags=0x%08x iv=%d bssid=%s "
+			"capinfo=0x%04x chan=%d\n"
+			 , __func__
+			 , vap->iv_flags
+			 , ni->ni_intval
+			 , ether_sprintf(ni->ni_bssid)
+			 , ni->ni_capinfo
+			 , ieee80211_chan2ieee(ic, ni->ni_chan));
+
+		switch (vap->iv_opmode) {
+		case IEEE80211_M_HOSTAP:
+		case IEEE80211_M_IBSS:
+			/*
+			 * Allocate and setup the beacon frame.
+			 *
+			 * Stop any previous beacon DMA.  This may be
+			 * necessary, for example, when an ibss merge
+			 * causes reconfiguration; there will be a state
+			 * transition from RUN->RUN that means we may
+			 * be called with beacon transmission active.
+			 */
+			ath_hal_stoptxdma(ah, sc->sc_bhalq);
+
+        		/* Set default key index for static wep case */
+			ni->ni_ath_defkeyindex = IEEE80211_INVAL_DEFKEY;
+			if (((vap->iv_flags & IEEE80211_F_WPA) == 0) &&
+			    (ni->ni_authmode != IEEE80211_AUTH_8021X) &&
+			    (vap->iv_def_txkey != IEEE80211_KEYIX_NONE)) {
+                       		ni->ni_ath_defkeyindex = vap->iv_def_txkey;
+			}
+
+			error = ath_beacon_alloc(sc, ni);
+			if (error < 0)
+				goto bad;
+			/* 
+			 * if the turbo flags have changed, then beacon and turbo
+			 * need to be reconfigured.
+			 */
+			if ((sc->sc_dturbo && !(vap->iv_ath_cap & IEEE80211_ATHC_TURBOP)) || 
+				(!sc->sc_dturbo && (vap->iv_ath_cap & IEEE80211_ATHC_TURBOP))) 
+				sc->sc_beacons = 0;
+			/* 
+			 * if it is the first AP VAP moving to RUN state then beacon 
+			 * needs to be reconfigured.
+			 */
+			TAILQ_FOREACH(tmpvap, &ic->ic_vaps, iv_next) {
+				if (tmpvap != vap && tmpvap->iv_state == IEEE80211_S_RUN &&
+					tmpvap->iv_opmode == IEEE80211_M_HOSTAP)
+					break;
+			}
+			if (!tmpvap) 
+				sc->sc_beacons = 0;
+			break;
+		case IEEE80211_M_STA:
+#ifdef ATH_SUPERG_COMP
+			/* have we negotiated compression? */
+			if (!(vap->iv_ath_cap & ni->ni_ath_flags & IEEE80211_NODE_COMP))
+				ni->ni_ath_flags &= ~IEEE80211_NODE_COMP;
+#endif
+			/*
+			 * Allocate a key cache slot to the station.
+			 */
+			ath_setup_keycacheslot(sc, ni);
+			/*
+			 * Record negotiated dynamic turbo state for
+			 * use by rate control modules.
+			 */
+			sc->sc_dturbo =
+				(ni->ni_ath_flags & IEEE80211_ATHC_TURBOP) != 0;
+			break;
+		case IEEE80211_M_WDS:
+			wds_ni = ieee80211_find_txnode(vap, vap->wds_mac);
+			if (wds_ni) {
+				/* XXX no rate negotiation; just dup */
+				wds_ni->ni_rates = vap->iv_bss->ni_rates;
+				/* Depending on the sequence of bringing up devices
+				 * it's possible the rates of the root bss isn't
+				 * filled yet. 
+				 */
+				if (vap->iv_ic->ic_newassoc != NULL && 
+				    wds_ni->ni_rates.rs_nrates != 0) {
+					/* Fill in the rates based on our own rates
+					 * we rely on the rate selection mechanism
+					 * to find out which rates actually work!
+					 */
+					vap->iv_ic->ic_newassoc(wds_ni, 1);
+				}
+			}
+			break;
+		default:
+			break;
+		}
+
+
+		/*
+		 * Configure the beacon and sleep timers.
+		 */
+		if (!sc->sc_beacons && vap->iv_opmode!=IEEE80211_M_WDS) {
+			ath_beacon_config(sc, vap);
+			sc->sc_beacons = 1;
+		}
+
+		/*
+		 * Reset rssi stats; maybe not the best place...
+		 */
+		sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
+		sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
+		sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
+		/* 
+		 * if it is a DFS channel and has not been checked for radar 
+		 * do not let the 80211 state machine to go to RUN state.
+		 *
+		 */
+		if (sc->sc_dfswait && vap->iv_opmode == IEEE80211_M_HOSTAP ) {
+			/* push the VAP to RUN state once DFS is cleared */
+			DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s: VAP  -> DFS_WAIT\n",
+				__func__, dev->name);
+			avp->av_dfswait_run = 1; 
+			return 0;
+		}
+	} else {
+		if (sc->sc_dfswait &&
+			vap->iv_opmode == IEEE80211_M_HOSTAP &&
+			sc->sc_dfswaittimer.data == (unsigned long)vap) {
+			del_timer_sync(&sc->sc_dfswaittimer);
+			sc->sc_dfswait = 0;
+			DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s: VAP  out of DFS_WAIT\n",
+				__func__, dev->name);
+		}
+		/*
+		 *  XXXX
+		 * if it is SCAN state, disable beacons. 
+		 */
+		if (nstate == IEEE80211_S_SCAN) {
+			ath_hal_intrset(ah,sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
+			sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
+			/* need to reconfigure the beacons when it moves to RUN */
+			sc->sc_beacons = 0; 
+		}
+		avp->av_dfswait_run = 0; /* reset the dfs wait flag */ 
+	}
+done:
+	/*
+	 * Invoke the parent method to complete the work.
+	 */
+	error = avp->av_newstate(vap, nstate, arg);
+
+	/*
+	 * Finally, start any timers.
+	 */
+	if (nstate == IEEE80211_S_RUN) {
+		/* start periodic recalibration timer */
+		mod_timer(&sc->sc_cal_ch, jiffies + (ath_calinterval * HZ));
+	}
+
+#ifdef ATH_SUPERG_XR
+	if (vap->iv_flags & IEEE80211_F_XR &&
+		nstate == IEEE80211_S_RUN)
+		ATH_SETUP_XR_VAP(sc,vap,rfilt);
+	if (vap->iv_flags & IEEE80211_F_XR &&
+		nstate == IEEE80211_S_INIT && sc->sc_xrgrppoll)
+		ath_grppoll_stop(vap);
+#endif
+bad:
+	netif_start_queue(dev);
+	dev->watchdog_timeo = 5 * HZ;		/* set the timeout to normal */
+	return error;
+}
+
+/*
+ * periodically checks for the HAL to set
+ * CHANNEL_DFS_CLEAR flag on current channel.
+ * if the flag is set and a VAP is waiting for it, push 
+ * transition the VAP to RUN state.
+ *
+ * Context: Timer (softIRQ)
+ */
+static void 
+ath_check_dfs_clear(unsigned long data ) 
+{
+	struct ath_softc *sc = (struct ath_softc *)data; 
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct net_device *dev = sc->sc_dev;
+	struct ieee80211vap *vap ;
+	HAL_CHANNEL hchan;
+
+	if(!sc->sc_dfswait) return;
+
+	/* if still need to wait */
+	ath_hal_radar_wait(sc->sc_ah, &hchan);
+
+	if (hchan.privFlags & CHANNEL_INTERFERENCE)
+		return; 
+
+	if ((hchan.privFlags & CHANNEL_DFS_CLEAR) ||
+	    (!(hchan.privFlags & CHANNEL_DFS))) { 
+		sc->sc_curchan.privFlags |= CHANNEL_DFS_CLEAR;
+		sc->sc_dfswait = 0;
+		TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+			struct ath_vap *avp = ATH_VAP(vap);
+			if (avp->av_dfswait_run) {
+				/* re alloc beacons to update new channel info */
+				int error;
+				error = ath_beacon_alloc(sc, vap->iv_bss);
+				if(error < 0) {
+					/* XXX */
+					return;
+				}
+				DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s: VAP DFS_WAIT -> RUN\n",
+					__func__, dev->name);
+				avp->av_newstate(vap, IEEE80211_S_RUN, 0);
+				/* start calibration timer */
+				mod_timer(&sc->sc_cal_ch, jiffies + (ath_calinterval * HZ));
+#ifdef ATH_SUPERG_XR
+				if (vap->iv_flags & IEEE80211_F_XR ) {
+					u_int32_t rfilt = 0;
+					rfilt = ath_calcrxfilter(sc);
+					ATH_SETUP_XR_VAP(sc, vap, rfilt);
+				}
+#endif
+				avp->av_dfswait_run = 0;
+			}
+		}
+		/* start the device */
+		netif_start_queue(dev);
+		dev->watchdog_timeo = 5 * HZ;	/* set the timeout to normal */
+	} else {
+		/* fire the timer again */
+		sc->sc_dfswaittimer.expires = jiffies + (ATH_DFS_WAIT_POLL_PERIOD * HZ);
+		sc->sc_dfswaittimer.data = (unsigned long)sc;
+		add_timer(&sc->sc_dfswaittimer);
+	}
+
+}
+
+#ifdef ATH_SUPERG_COMP
+/* Enable/Disable de-compression mask for given node.
+ * The routine is invoked after addition or deletion of the
+ * key.
+ */
+static void
+ath_comp_set(struct ieee80211vap *vap, struct ieee80211_node *ni, int en)
+{
+	ath_setup_comp(ni, en);
+	return;
+}
+
+/* Set up decompression engine for this node. */
+static void
+ath_setup_comp(struct ieee80211_node *ni, int enable)
+{
+#define	IEEE80211_KEY_XR	(IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ath_softc *sc = vap->iv_ic->ic_dev->priv;
+	struct ath_node *an = ATH_NODE(ni);
+	u_int16_t keyindex;
+
+	if (enable) {
+		/* Have we negotiated compression? */
+		if (!(ni->ni_ath_flags & IEEE80211_NODE_COMP))
+			return;
+
+		/* No valid key? */
+		if (ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
+			return;
+
+		/* Setup decompression mask.
+		 * For TKIP and split MIC case, recv. keyindex is at 32 offset
+		 * from tx key.
+		 */
+		if ((ni->ni_wpa_ie != NULL) &&
+		    (ni->ni_rsn.rsn_ucastcipher == IEEE80211_CIPHER_TKIP) &&
+		    sc->sc_splitmic) {
+			if ((ni->ni_ucastkey.wk_flags & IEEE80211_KEY_XR) 
+							== IEEE80211_KEY_XR)
+				keyindex = ni->ni_ucastkey.wk_keyix + 32;
+			else
+				keyindex = ni->ni_ucastkey.wk_keyix;
+		} else
+			keyindex = ni->ni_ucastkey.wk_keyix + ni->ni_rxkeyoff;
+
+		ath_hal_setdecompmask(sc->sc_ah, keyindex, 1);
+		an->an_decomp_index = keyindex;
+	} else {
+		if (an->an_decomp_index != INVALID_DECOMP_INDEX) {
+			ath_hal_setdecompmask(sc->sc_ah, an->an_decomp_index, 0);
+			an->an_decomp_index = INVALID_DECOMP_INDEX;
+		}
+	}
+
+	return;
+#undef IEEE80211_KEY_XR
+}
+#endif
+
+/*
+ * Allocate a key cache slot to the station so we can
+ * setup a mapping from key index to node. The key cache
+ * slot is needed for managing antenna state and for
+ * compression when stations do not use crypto.  We do
+ * it unilaterally here; if crypto is employed this slot
+ * will be reassigned.
+ */
+static void
+ath_setup_stationkey(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ath_softc *sc = vap->iv_ic->ic_dev->priv;
+	u_int16_t keyix;
+
+	keyix = ath_key_alloc(vap, &ni->ni_ucastkey);
+	if (keyix == IEEE80211_KEYIX_NONE) {
+		/*
+		 * Key cache is full; we'll fall back to doing
+		 * the more expensive lookup in software.  Note
+		 * this also means no h/w compression.
+		 */
+		/* XXX msg+statistic */
+		return;
+	} else {
+		ni->ni_ucastkey.wk_keyix = keyix;
+		/* NB: this will create a pass-thru key entry */
+		ath_keyset(sc, &ni->ni_ucastkey, ni->ni_macaddr, vap->iv_bss);
+
+#ifdef ATH_SUPERG_COMP
+		/* Enable de-compression logic */
+		ath_setup_comp(ni, 1);
+#endif
+	}
+	
+	return;
+}
+
+/* Setup WEP key for the station if compression is negotiated.
+ * When station and AP are using same default key index, use single key
+ * cache entry for receive and transmit, else two key cache entries are
+ * created. One for receive with MAC address of station and one for transmit
+ * with NULL mac address. On receive key cache entry de-compression mask
+ * is enabled.
+ */
+static void
+ath_setup_stationwepkey(struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211_key *ni_key;
+	struct ieee80211_key tmpkey;
+	struct ieee80211_key *rcv_key, *xmit_key;
+	int txkeyidx, rxkeyidx = IEEE80211_KEYIX_NONE, i;
+	u_int8_t null_macaddr[IEEE80211_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+
+	KASSERT(ni->ni_ath_defkeyindex < IEEE80211_WEP_NKID,
+		("got invalid node key index 0x%x", ni->ni_ath_defkeyindex));
+	KASSERT(vap->iv_def_txkey < IEEE80211_WEP_NKID,
+		("got invalid vap def key index 0x%x", vap->iv_def_txkey));
+
+	/* Allocate a key slot first */
+	if (!ieee80211_crypto_newkey(vap, 
+		IEEE80211_CIPHER_WEP, 
+		IEEE80211_KEY_XMIT|IEEE80211_KEY_RECV, 
+		&ni->ni_ucastkey))
+		goto error;
+
+	txkeyidx = ni->ni_ucastkey.wk_keyix;
+	xmit_key = &vap->iv_nw_keys[vap->iv_def_txkey];
+
+	/* Do we need separate rx key? */
+	if (ni->ni_ath_defkeyindex != vap->iv_def_txkey) {
+		ni->ni_ucastkey.wk_keyix = IEEE80211_KEYIX_NONE;
+		if (!ieee80211_crypto_newkey(vap, 
+			IEEE80211_CIPHER_WEP, 
+			IEEE80211_KEY_XMIT|IEEE80211_KEY_RECV,
+			&ni->ni_ucastkey)) {
+			ni->ni_ucastkey.wk_keyix = txkeyidx;
+			ieee80211_crypto_delkey(vap, &ni->ni_ucastkey, ni);
+			goto error;
+		}
+		rxkeyidx = ni->ni_ucastkey.wk_keyix;
+		ni->ni_ucastkey.wk_keyix = txkeyidx;
+
+		rcv_key = &vap->iv_nw_keys[ni->ni_ath_defkeyindex];
+	} else {
+		rcv_key = xmit_key;
+		rxkeyidx = txkeyidx;
+	}
+
+	/* Remember receive key offset */
+	ni->ni_rxkeyoff = rxkeyidx - txkeyidx;
+
+	/* Setup xmit key */
+	ni_key = &ni->ni_ucastkey;
+	if (rxkeyidx != txkeyidx)
+		ni_key->wk_flags = IEEE80211_KEY_XMIT;
+	else
+		ni_key->wk_flags = IEEE80211_KEY_XMIT|IEEE80211_KEY_RECV;
+
+	ni_key->wk_keylen = xmit_key->wk_keylen;
+	for (i = 0; i < IEEE80211_TID_SIZE; i++)
+		ni_key->wk_keyrsc[i] = xmit_key->wk_keyrsc[i];
+	ni_key->wk_keytsc = 0; 
+	memset(ni_key->wk_key, 0, sizeof(ni_key->wk_key));
+	memcpy(ni_key->wk_key, xmit_key->wk_key, xmit_key->wk_keylen);
+	ieee80211_crypto_setkey(vap, &ni->ni_ucastkey, 
+		(rxkeyidx == txkeyidx) ? ni->ni_macaddr:null_macaddr, ni);
+
+	if (rxkeyidx != txkeyidx) {
+		/* Setup recv key */
+		ni_key = &tmpkey;
+		ni_key->wk_keyix = rxkeyidx;
+		ni_key->wk_flags = IEEE80211_KEY_RECV;
+		ni_key->wk_keylen = rcv_key->wk_keylen;
+		for(i = 0; i < IEEE80211_TID_SIZE; i++)
+			ni_key->wk_keyrsc[i] = rcv_key->wk_keyrsc[i];
+		ni_key->wk_keytsc = 0; 
+		ni_key->wk_cipher = rcv_key->wk_cipher; 
+		ni_key->wk_private = rcv_key->wk_private; 
+		memset(ni_key->wk_key, 0, sizeof(ni_key->wk_key));
+		memcpy(ni_key->wk_key, rcv_key->wk_key, rcv_key->wk_keylen);
+		ieee80211_crypto_setkey(vap, &tmpkey, ni->ni_macaddr, ni);
+	}
+
+	return;
+
+error:
+	ni->ni_ath_flags &= ~IEEE80211_NODE_COMP;
+	return;
+}
+
+/* Create a keycache entry for given node in clearcase as well as static wep.
+ * Handle compression state if required.
+ * For non clearcase/static wep case, the key is plumbed by hostapd.
+ */
+static void
+ath_setup_keycacheslot(struct ath_softc *sc, struct ieee80211_node *ni)
+{
+	struct ieee80211vap *vap = ni->ni_vap;
+
+	if (ni->ni_ucastkey.wk_keyix != IEEE80211_KEYIX_NONE)
+		ieee80211_crypto_delkey(vap, &ni->ni_ucastkey, ni);
+
+	/* Only for clearcase and WEP case */
+	if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0 ||
+		(ni->ni_ath_defkeyindex != IEEE80211_INVAL_DEFKEY)) {
+
+		if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
+			KASSERT(ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE,
+		    		("new node with a ucast key already setup (keyix %u)",
+		    		  ni->ni_ucastkey.wk_keyix));
+			/* NB: 5210 has no passthru/clr key support */
+			if (sc->sc_hasclrkey)
+				ath_setup_stationkey(ni);
+		} else
+			ath_setup_stationwepkey(ni);
+	}
+
+	return;
+}
+
+/*
+ * Setup driver-specific state for a newly associated node.
+ * Note that we're called also on a re-associate, the isnew
+ * param tells us if this is the first time or not.
+ */
+static void
+ath_newassoc(struct ieee80211_node *ni, int isnew)
+{
+	struct ieee80211com *ic = ni->ni_ic;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ath_softc *sc = ic->ic_dev->priv;
+
+	sc->sc_rc->ops->newassoc(sc, ATH_NODE(ni), isnew);
+
+	/* are we supporting compression? */
+	if (!(vap->iv_ath_cap & ni->ni_ath_flags & IEEE80211_NODE_COMP))
+		ni->ni_ath_flags &= ~IEEE80211_NODE_COMP;
+
+	/* disable compression for TKIP */
+	if ((ni->ni_ath_flags & IEEE80211_NODE_COMP) &&
+		(ni->ni_wpa_ie != NULL) &&
+		(ni->ni_rsn.rsn_ucastcipher == IEEE80211_CIPHER_TKIP))
+		ni->ni_ath_flags &= ~IEEE80211_NODE_COMP;
+
+	ath_setup_keycacheslot(sc, ni);
+#ifdef ATH_SUPERG_XR
+	if (1) {
+		struct ath_node *an = ATH_NODE(ni);
+		if (ic->ic_ath_cap & an->an_node.ni_ath_flags & IEEE80211_ATHC_XR)
+			an->an_minffrate = ATH_MIN_FF_RATE;
+		else
+			an->an_minffrate = 0;
+		ath_grppoll_period_update(sc);
+	}
+#endif
+}
+
+static int
+ath_getchannels(struct net_device *dev, u_int cc,
+	HAL_BOOL outdoor, HAL_BOOL xchanmode)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ath_hal *ah = sc->sc_ah;
+	HAL_CHANNEL *chans;
+	int i;
+	u_int nchan;
+
+	chans = kmalloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), GFP_KERNEL);
+	if (chans == NULL) {
+		printk("%s: unable to allocate channel table\n", dev->name);
+		return -ENOMEM;
+	}
+	if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan,
+	    ic->ic_regclassids, IEEE80211_REGCLASSIDS_MAX, &ic->ic_nregclass,
+	    cc, HAL_MODE_ALL, outdoor, xchanmode)) {
+		u_int32_t rd;
+
+		ath_hal_getregdomain(ah, &rd);
+		printk("%s: unable to collect channel list from HAL; "
+			"regdomain likely %u country code %u\n",
+			dev->name, rd, cc);
+		kfree(chans);
+		return -EINVAL;
+	}
+	/*
+	 * Convert HAL channels to ieee80211 ones.
+	 */
+	for (i = 0; i < nchan; i++) {
+		HAL_CHANNEL *c = &chans[i];
+		struct ieee80211_channel *ichan = &ic->ic_channels[i];
+
+		ichan->ic_ieee = ath_hal_mhz2ieee(ah, c->channel, c->channelFlags);
+		ichan->ic_freq = c->channel;
+		ichan->ic_flags = c->channelFlags;
+		ichan->ic_maxregpower = c->maxRegTxPower;	/* dBm */
+		ichan->ic_maxpower = c->maxTxPower;		    /* 1/4 dBm */
+		ichan->ic_minpower = c->minTxPower;		    /* 1/4 dBm */
+	}
+	ic->ic_nchans = nchan;
+	kfree(chans);
+	return 0;
+}
+
+static void
+ath_led_done(unsigned long arg)
+{
+	struct ath_softc *sc = (struct ath_softc *) arg;
+
+	sc->sc_blinking = 0;
+}
+
+/*
+ * Turn the LED off: flip the pin and then set a timer so no
+ * update will happen for the specified duration.
+ */
+static void
+ath_led_off(unsigned long arg)
+{
+	struct ath_softc *sc = (struct ath_softc *) arg;
+
+	ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon);
+	sc->sc_ledtimer.function = ath_led_done;
+	sc->sc_ledtimer.expires = jiffies + sc->sc_ledoff;
+	add_timer(&sc->sc_ledtimer);
+}
+
+/*
+ * Blink the LED according to the specified on/off times.
+ */
+static void
+ath_led_blink(struct ath_softc *sc, int on, int off)
+{
+	DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off);
+	ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon);
+	sc->sc_blinking = 1;
+	sc->sc_ledoff = off;
+	sc->sc_ledtimer.function = ath_led_off;
+	sc->sc_ledtimer.expires = jiffies + on;
+	add_timer(&sc->sc_ledtimer);
+}
+
+static void
+ath_led_event(struct ath_softc *sc, int event)
+{
+
+	sc->sc_ledevent = jiffies;	/* time of last event */
+	if (sc->sc_blinking)		/* don't interrupt active blink */
+		return;
+	switch (event) {
+	case ATH_LED_POLL:
+		ath_led_blink(sc, sc->sc_hwmap[0].ledon,
+			sc->sc_hwmap[0].ledoff);
+		break;
+	case ATH_LED_TX:
+		ath_led_blink(sc, sc->sc_hwmap[sc->sc_txrate].ledon,
+			sc->sc_hwmap[sc->sc_txrate].ledoff);
+		break;
+	case ATH_LED_RX:
+		ath_led_blink(sc, sc->sc_hwmap[sc->sc_rxrate].ledon,
+			sc->sc_hwmap[sc->sc_rxrate].ledoff);
+		break;
+	}
+}
+
+static void
+set_node_txpower(void *arg, struct ieee80211_node *ni)
+{
+	int *value = (int *)arg;
+	ni->ni_txpower = *value;
+}
+
+/* XXX: this function needs some locking to avoid being called twice/interrupted */
+static void
+ath_update_txpow(struct ath_softc *sc)
+{
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ieee80211vap *vap = NULL;
+	struct ath_hal *ah = sc->sc_ah;
+	u_int32_t txpowlimit = 0;
+	u_int32_t maxtxpowlimit = 9999;
+	u_int32_t clamped_txpow = 0;
+
+	/*
+	 * Find the maxtxpow of the card and regulatory constraints
+	 */
+	(void)ath_hal_getmaxtxpow(ah, &txpowlimit);
+	ath_hal_settxpowlimit(ah, maxtxpowlimit);
+	(void)ath_hal_getmaxtxpow(ah, &maxtxpowlimit);
+	ic->ic_txpowlimit = maxtxpowlimit;
+	ath_hal_settxpowlimit(ah, txpowlimit);
+ 	
+ 	/*
+	 * Make sure the VAP's change is within limits, clamp it otherwise
+ 	 */
+	if (ic->ic_newtxpowlimit > ic->ic_txpowlimit)
+		clamped_txpow = ic->ic_txpowlimit;
+	else
+		clamped_txpow = ic->ic_newtxpowlimit;
+	
+	/*
+	 * Search for the VAP that needs a txpow change, if any
+	 */
+	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+#ifdef ATH_CAP_TPC
+		if (ic->ic_newtxpowlimit == vap->iv_bss->ni_txpower) {
+			vap->iv_bss->ni_txpower = clamped_txpow;
+			ieee80211_iterate_nodes(&vap->iv_ic->ic_sta, set_node_txpower, &clamped_txpow);
+		}
+#else
+		vap->iv_bss->ni_txpower = clamped_txpow;
+		ieee80211_iterate_nodes(&vap->iv_ic->ic_sta, set_node_txpower, &clamped_txpow);
+#endif
+	}
+ 	
+	ic->ic_newtxpowlimit = sc->sc_curtxpow = clamped_txpow;
+
+#ifdef ATH_CAP_TPC
+	if (ic->ic_newtxpowlimit >= ic->ic_txpowlimit)
+		ath_hal_settxpowlimit(ah, ic->ic_newtxpowlimit);
+#else
+	if (ic->ic_newtxpowlimit != ic->ic_txpowlimit)
+		ath_hal_settxpowlimit(ah, ic->ic_newtxpowlimit);
+#endif
+}
+ 
+
+#ifdef ATH_SUPERG_XR
+static int
+ath_xr_rate_setup(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ieee80211com *ic = &sc->sc_ic;
+	const HAL_RATE_TABLE *rt;
+	struct ieee80211_rateset *rs;
+	int i, maxrates;
+	sc->sc_xr_rates = ath_hal_getratetable(ah, HAL_MODE_XR);
+	rt = sc->sc_xr_rates;
+	if (rt == NULL)
+		return 0;
+	if (rt->rateCount > XR_NUM_SUP_RATES) {
+		DPRINTF(sc, ATH_DEBUG_ANY,
+			"%s: rate table too small (%u > %u)\n",
+			__func__, rt->rateCount, IEEE80211_RATE_MAXSIZE);
+		maxrates = IEEE80211_RATE_MAXSIZE;
+	} else
+		maxrates = rt->rateCount;
+	rs = &ic->ic_sup_xr_rates;
+	for (i = 0; i < maxrates; i++)
+		rs->rs_rates[i] = rt->info[i].dot11Rate;
+	rs->rs_nrates = maxrates;
+	return 1;
+}
+#endif
+
+/* Setup half/quarter rate table support */
+static void
+ath_setup_subrates(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ieee80211com *ic = &sc->sc_ic;
+	const HAL_RATE_TABLE *rt;
+	struct ieee80211_rateset *rs;
+	int i, maxrates;
+
+	sc->sc_half_rates = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
+	rt = sc->sc_half_rates;
+	if (rt != NULL) {
+		if (rt->rateCount > IEEE80211_RATE_MAXSIZE) {
+			DPRINTF(sc, ATH_DEBUG_ANY,
+				"%s: rate table too small (%u > %u)\n",
+			       __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE);
+			maxrates = IEEE80211_RATE_MAXSIZE;
+		} else
+			maxrates = rt->rateCount;
+		rs = &ic->ic_sup_half_rates;
+		for (i = 0; i < maxrates; i++)
+			rs->rs_rates[i] = rt->info[i].dot11Rate;
+		rs->rs_nrates = maxrates;
+	}
+
+	sc->sc_quarter_rates = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
+	rt = sc->sc_quarter_rates;
+	if (rt != NULL) {
+		if (rt->rateCount > IEEE80211_RATE_MAXSIZE) {
+			DPRINTF(sc, ATH_DEBUG_ANY,
+				"%s: rate table too small (%u > %u)\n",
+			       __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE);
+			maxrates = IEEE80211_RATE_MAXSIZE;
+		} else
+			maxrates = rt->rateCount;
+		rs = &ic->ic_sup_quarter_rates;
+		for (i = 0; i < maxrates; i++)
+			rs->rs_rates[i] = rt->info[i].dot11Rate;
+		rs->rs_nrates = maxrates;
+	}
+}
+
+static int
+ath_rate_setup(struct net_device *dev, u_int mode)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	struct ieee80211com *ic = &sc->sc_ic;
+	const HAL_RATE_TABLE *rt;
+	struct ieee80211_rateset *rs;
+	int i, maxrates;
+
+	switch (mode) {
+	case IEEE80211_MODE_11A:
+		sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11A);
+		break;
+	case IEEE80211_MODE_11B:
+		sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11B);
+		break;
+	case IEEE80211_MODE_11G:
+		sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11G);
+		break;
+	case IEEE80211_MODE_TURBO_A:
+		sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_TURBO);
+		break;
+	case IEEE80211_MODE_TURBO_G:
+		sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_108G);
+		break;
+	default:
+		DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
+			__func__, mode);
+		return 0;
+	}
+	rt = sc->sc_rates[mode];
+	if (rt == NULL)
+		return 0;
+	if (rt->rateCount > IEEE80211_RATE_MAXSIZE) {
+		DPRINTF(sc, ATH_DEBUG_ANY,
+			"%s: rate table too small (%u > %u)\n",
+			__func__, rt->rateCount, IEEE80211_RATE_MAXSIZE);
+		maxrates = IEEE80211_RATE_MAXSIZE;
+	} else
+		maxrates = rt->rateCount;
+	rs = &ic->ic_sup_rates[mode];
+	for (i = 0; i < maxrates; i++)
+		rs->rs_rates[i] = rt->info[i].dot11Rate;
+	rs->rs_nrates = maxrates;
+	return 1;
+}
+
+static void
+ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
+{
+#define	N(a)	((int)(sizeof(a)/sizeof(a[0])))
+	/* NB: on/off times from the Atheros NDIS driver, w/ permission */
+	static const struct {
+		u_int		rate;		/* tx/rx 802.11 rate */
+		u_int16_t	timeOn;		/* LED on time (ms) */
+		u_int16_t	timeOff;	/* LED off time (ms) */
+	} blinkrates[] = {
+		{ 108,  40,  10 },
+		{  96,  44,  11 },
+		{  72,  50,  13 },
+		{  48,  57,  14 },
+		{  36,  67,  16 },
+		{  24,  80,  20 },
+		{  22, 100,  25 },
+		{  18, 133,  34 },
+		{  12, 160,  40 },
+		{  10, 200,  50 },
+		{   6, 240,  58 },
+		{   4, 267,  66 },
+		{   2, 400, 100 },
+		{   0, 500, 130 },
+	};
+	const HAL_RATE_TABLE *rt;
+	int i, j;
+
+	memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
+	rt = sc->sc_rates[mode];
+	KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
+	for (i = 0; i < rt->rateCount; i++)
+		sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i;
+	memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
+	for (i = 0; i < 32; i++) {
+		u_int8_t ix = rt->rateCodeToIndex[i];
+		if (ix == 0xff) {
+			sc->sc_hwmap[i].ledon = msecs_to_jiffies(500);
+			sc->sc_hwmap[i].ledoff = msecs_to_jiffies(130);
+			continue;
+		}
+		sc->sc_hwmap[i].ieeerate =
+			rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
+		if (rt->info[ix].shortPreamble ||
+		    rt->info[ix].phy == IEEE80211_T_OFDM)
+			sc->sc_hwmap[i].flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+		/* setup blink rate table to avoid per-packet lookup */
+		for (j = 0; j < N(blinkrates) - 1; j++)
+			if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
+				break;
+		/* NB: this uses the last entry if the rate isn't found */
+		/* XXX beware of overflow */
+		sc->sc_hwmap[i].ledon = msecs_to_jiffies(blinkrates[j].timeOn);
+		sc->sc_hwmap[i].ledoff = msecs_to_jiffies(blinkrates[j].timeOff);
+	}
+	sc->sc_currates = rt;
+	sc->sc_curmode = mode;
+	/*
+	 * All protection frames are transmitted at 2Mb/s for
+	 * 11g, otherwise at 1Mb/s.
+	 * XXX select protection rate index from rate table.
+	 */
+	sc->sc_protrix = (mode == IEEE80211_MODE_11G ? 1 : 0);
+	/* rate index used to send mgt frames */
+	sc->sc_minrateix = 0;
+#undef N
+}
+
+#ifdef ATH_SUPERG_FF
+static u_int32_t
+athff_approx_txtime(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
+{
+	u_int32_t txtime;
+	u_int32_t framelen;
+
+	/*
+	 * Approximate the frame length to be transmitted. A swag to add
+	 * the following maximal values to the skb payload:
+	 *   - 32: 802.11 encap + CRC
+	 *   - 24: encryption overhead (if wep bit)
+	 *   - 4 + 6: fast-frame header and padding
+	 *   - 16: 2 LLC FF tunnel headers
+	 *   - 14: 1 802.3 FF tunnel header (skb already accounts for 2nd)
+	 */
+	framelen = skb->len + 32 + 4 + 6 + 16 + 14;
+	if (sc->sc_ic.ic_flags & IEEE80211_F_PRIVACY)
+		framelen += 24;
+	if (an->an_tx_ffbuf[skb->priority])
+		framelen += an->an_tx_ffbuf[skb->priority]->bf_skb->len;
+
+	txtime = ath_hal_computetxtime(sc->sc_ah, sc->sc_currates, framelen,
+		an->an_prevdatarix, AH_FALSE);
+
+	return txtime;
+}
+/*
+ * Determine if a data frame may be aggregated via ff tunneling.
+ *
+ *  NB: allowing EAPOL frames to be aggregated with other unicast traffic.
+ *      Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
+ *      be aggregated with other types of frames when encryption is on?
+ *
+ *  NB: assumes lock on an_tx_ffbuf effectively held by txq lock mechanism.
+ */
+static int
+athff_can_aggregate(struct ath_softc *sc, struct ether_header *eh,
+		    struct ath_node *an, struct sk_buff *skb, u_int16_t fragthreshold, int *flushq)
+{
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ath_txq *txq = sc->sc_ac2q[skb->priority];
+	struct ath_buf *ffbuf = an->an_tx_ffbuf[skb->priority];
+	u_int32_t txoplimit;
+
+#define US_PER_4MS 4000
+#define	MIN(a,b)	((a) < (b) ? (a) : (b))
+
+	*flushq = AH_FALSE;
+
+	if (fragthreshold < 2346)
+	    return AH_FALSE;
+
+	if ((!ffbuf) && (txq->axq_depth < sc->sc_fftxqmin))
+		return AH_FALSE;
+	if (!(ic->ic_ath_cap & an->an_node.ni_ath_flags & IEEE80211_ATHC_FF))
+		return AH_FALSE;
+	if (!(ic->ic_opmode == IEEE80211_M_STA ||
+		  ic->ic_opmode == IEEE80211_M_HOSTAP))
+		return AH_FALSE;
+	if ((ic->ic_opmode == IEEE80211_M_HOSTAP) &&
+		  ETHER_IS_MULTICAST(eh->ether_dhost))
+		return AH_FALSE;
+
+#ifdef ATH_SUPERG_XR
+	if (sc->sc_currates->info[an->an_prevdatarix].rateKbps < an->an_minffrate)
+		return AH_FALSE;
+#endif
+	txoplimit = IEEE80211_TXOP_TO_US(
+		ic->ic_wme.wme_chanParams.cap_wmeParams[skb->priority].wmep_txopLimit);
+
+	/* if the 4 msec limit is set on the channel, take it into account */
+	if (sc->sc_curchan.privFlags & CHANNEL_4MS_LIMIT)
+		txoplimit = MIN(txoplimit, US_PER_4MS); 
+
+	if (txoplimit != 0 && athff_approx_txtime(sc, an, skb) > txoplimit) {
+		DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
+			"%s: FF TxOp violation\n", __func__);
+		if (ffbuf)
+			*flushq = AH_TRUE;
+		return AH_FALSE;
+	}
+
+	return AH_TRUE;
+
+#undef US_PER_4MS
+#undef MIN
+}
+#endif
+
+#ifdef AR_DEBUG
+static void
+ath_printrxbuf(struct ath_buf *bf, int done)
+{
+	struct ath_desc *ds = bf->bf_desc;
+
+	printk("R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n",
+	    ds, ito64(bf->bf_daddr),
+	    ds->ds_link, ds->ds_data,
+	    ds->ds_ctl0, ds->ds_ctl1,
+	    ds->ds_hw[0], ds->ds_hw[1],
+	    !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!');
+}
+
+static void
+ath_printtxbuf(struct ath_buf *bf, int done)
+{
+	struct ath_desc *ds = bf->bf_desc;
+
+	printk("T (%p %llx) %08x %08x %08x %08x %08x %08x %08x %08x %c\n",
+	    ds, ito64(bf->bf_daddr),
+	    ds->ds_link, ds->ds_data,
+	    ds->ds_ctl0, ds->ds_ctl1,
+	    ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
+	    !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!');
+}
+#endif /* AR_DEBUG */
+
+/*
+ * Return netdevice statistics.
+ */
+static struct net_device_stats *
+ath_getstats(struct net_device *dev)
+{
+	struct ath_softc *sc = dev->priv;
+	struct net_device_stats *stats = &sc->sc_devstats;
+
+	/* update according to private statistics */
+	stats->tx_errors = sc->sc_stats.ast_tx_xretries
+			 + sc->sc_stats.ast_tx_fifoerr
+			 + sc->sc_stats.ast_tx_filtered;
+	stats->tx_dropped = sc->sc_stats.ast_tx_nobuf
+			+ sc->sc_stats.ast_tx_encap
+			+ sc->sc_stats.ast_tx_nonode
+			+ sc->sc_stats.ast_tx_nobufmgt;
+	stats->rx_errors = sc->sc_stats.ast_rx_fifoerr
+			+ sc->sc_stats.ast_rx_badcrypt
+			+ sc->sc_stats.ast_rx_badmic;
+	stats->rx_dropped = sc->sc_stats.ast_rx_tooshort;
+	stats->rx_crc_errors = sc->sc_stats.ast_rx_crcerr;
+
+	return stats;
+}
+
+static int
+ath_set_mac_address(struct net_device *dev, void *addr)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = &sc->sc_ic;
+	struct ath_hal *ah = sc->sc_ah;
+	struct sockaddr *mac = addr;
+	int error = 0;
+
+	if (netif_running(dev)) {
+		DPRINTF(sc, ATH_DEBUG_ANY,
+			"%s: cannot set address; device running\n", __func__);
+		return -EBUSY;
+	}
+	DPRINTF(sc, ATH_DEBUG_ANY, "%s: %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
+		__func__,
+		mac->sa_data[0], mac->sa_data[1], mac->sa_data[2],
+		mac->sa_data[3], mac->sa_data[4], mac->sa_data[5]);
+
+	ATH_LOCK(sc);
+	/* XXX not right for multiple VAPs */
+	IEEE80211_ADDR_COPY(ic->ic_myaddr, mac->sa_data);
+	IEEE80211_ADDR_COPY(dev->dev_addr, mac->sa_data);
+	ath_hal_setmac(ah, dev->dev_addr);
+	if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
+		error = ath_reset(dev);
+	}
+	ATH_UNLOCK(sc);
+
+	return error;
+}
+
+static int
+ath_change_mtu(struct net_device *dev, int mtu)
+{
+	struct ath_softc *sc = dev->priv;
+	int error = 0;
+
+	if (!(ATH_MIN_MTU < mtu && mtu <= ATH_MAX_MTU)) {
+		DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %d, min %u, max %u\n",
+			__func__, mtu, ATH_MIN_MTU, ATH_MAX_MTU);
+		return -EINVAL;
+	}
+	DPRINTF(sc, ATH_DEBUG_ANY, "%s: %d\n", __func__, mtu);
+
+	ATH_LOCK(sc);
+	dev->mtu = mtu;
+	if ((dev->flags & IFF_RUNNING) && !sc->sc_invalid) {
+		/* NB: the rx buffers may need to be reallocated */
+		tasklet_disable(&sc->sc_rxtq);
+		error = ath_reset(dev);
+		tasklet_enable(&sc->sc_rxtq);
+	}
+	ATH_UNLOCK(sc);
+
+	return error;
+}
+
+/*
+ * Diagnostic interface to the HAL.  This is used by various
+ * tools to do things like retrieve register contents for
+ * debugging.  The mechanism is intentionally opaque so that
+ * it can change frequently w/o concern for compatibility.
+ */
+static int
+ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
+{
+	struct ath_hal *ah = sc->sc_ah;
+	u_int id = ad->ad_id & ATH_DIAG_ID;
+	void *indata = NULL;
+	void *outdata = NULL;
+	u_int32_t insize = ad->ad_in_size;
+	u_int32_t outsize = ad->ad_out_size;
+	int error = 0;
+
+	if (ad->ad_id & ATH_DIAG_IN) {
+		/*
+		 * Copy in data.
+		 */
+		indata = kmalloc(insize, GFP_KERNEL);
+		if (indata == NULL) {
+			error = -ENOMEM;
+			goto bad;
+		}
+		if (copy_from_user(indata, ad->ad_in_data, insize)) {
+			error = -EFAULT;
+			goto bad;
+		}
+	}
+	if (ad->ad_id & ATH_DIAG_DYN) {
+		/*
+		 * Allocate a buffer for the results (otherwise the HAL
+		 * returns a pointer to a buffer where we can read the
+		 * results).  Note that we depend on the HAL leaving this
+		 * pointer for us to use below in reclaiming the buffer;
+		 * may want to be more defensive.
+		 */
+		outdata = kmalloc(outsize, GFP_KERNEL);
+		if (outdata == NULL) {
+			error = -ENOMEM;
+			goto bad;
+		}
+	}
+	if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
+		if (outsize < ad->ad_out_size)
+			ad->ad_out_size = outsize;
+		if (outdata &&
+		    copy_to_user(ad->ad_out_data, outdata, ad->ad_out_size))
+			error = -EFAULT;
+	} else
+		error = -EINVAL;
+bad:
+	if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
+		kfree(indata);
+	if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
+		kfree(outdata);
+	return error;
+}
+
+static int
+ath_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct ath_softc *sc = dev->priv;
+	struct ieee80211com *ic = &sc->sc_ic;
+	int error;
+
+	ATH_LOCK(sc);
+	switch (cmd) {
+	case SIOCGATHSTATS:
+		sc->sc_stats.ast_tx_packets = sc->sc_devstats.tx_packets;
+		sc->sc_stats.ast_rx_packets = sc->sc_devstats.rx_packets;
+		sc->sc_stats.ast_rx_rssi = ieee80211_getrssi(ic);
+		if (copy_to_user(ifr->ifr_data, &sc->sc_stats, sizeof (sc->sc_stats)))
+			error = -EFAULT;
+		else
+			error = 0;
+		break;
+	case SIOCGATHDIAG:
+		if (!capable(CAP_NET_ADMIN))
+			error = -EPERM;
+		else
+			error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
+		break;
+	case SIOCETHTOOL:
+		if (copy_from_user(&cmd, ifr->ifr_data, sizeof(cmd)))
+			error = -EFAULT;
+		else
+			error = ath_ioctl_ethtool(sc, cmd, ifr->ifr_data);
+		break;
+	case SIOC80211IFCREATE:
+		error = ieee80211_ioctl_create_vap(ic, ifr, dev); 
+		break;
+	default:
+		error = -EINVAL;
+		break;
+	}
+	ATH_UNLOCK(sc);
+	return error;
+}
+
+/*
+ * Sysctls are split into ``static'' and ``dynamic'' tables.
+ * The former are defined at module load time and are used
+ * control parameters common to all devices.  The latter are
+ * tied to particular device instances and come and go with
+ * each device.  The split is currently a bit tenuous; many of
+ * the static ones probably should be dynamic but having them
+ * static (e.g. debug) means they can be set after a module is
+ * loaded and before bringing up a device.  The alternative
+ * is to add module parameters.
+ */
+
+/*
+ * Dynamic (i.e. per-device) sysctls.  These are automatically
+ * mirrored in /proc/sys.
+ */
+enum {
+	ATH_SLOTTIME		= 1,
+	ATH_ACKTIMEOUT		= 2,
+	ATH_CTSTIMEOUT		= 3,
+	ATH_SOFTLED		= 4,
+	ATH_LEDPIN		= 5,
+	ATH_COUNTRYCODE		= 6,
+	ATH_REGDOMAIN		= 7,
+	ATH_DEBUG		= 8,
+	ATH_TXANTENNA		= 9,
+	ATH_RXANTENNA		= 10,
+	ATH_DIVERSITY		= 11,
+	ATH_TXINTRPERIOD 	= 12,
+	ATH_FFTXQMIN		= 18,
+	ATH_TKIPMIC		= 19,
+	ATH_XR_POLL_PERIOD 	= 20,
+	ATH_XR_POLL_COUNT 	= 21,
+	ATH_ACKRATE             = 22,
+};
+
+static int
+ATH_SYSCTL_DECL(ath_sysctl_halparam, ctl, write, filp, buffer, lenp, ppos)
+{
+	struct ath_softc *sc = ctl->extra1;
+	struct ath_hal *ah = sc->sc_ah;
+	u_int val;
+	int ret;
+
+	ctl->data = &val;
+	ctl->maxlen = sizeof(val);
+	if (write) {
+		ret = ATH_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos);
+		if (ret == 0) {
+			switch (ctl->ctl_name) {
+			case ATH_SLOTTIME:
+				if (val > 0) {
+					if (!ath_hal_setslottime(ah, val))
+						ret = -EINVAL;
+					else
+						sc->sc_slottimeconf = val;
+				} else {
+					/* disable manual override */
+					sc->sc_slottimeconf = 0;
+					ath_setslottime(sc);
+				}
+				break;
+			case ATH_ACKTIMEOUT:
+				if (!ath_hal_setacktimeout(ah, val))
+					ret = -EINVAL;
+				break;
+			case ATH_CTSTIMEOUT:
+				if (!ath_hal_setctstimeout(ah, val))
+					ret = -EINVAL;
+				break;
+			case ATH_SOFTLED:
+				if (val != sc->sc_softled) {
+					if (val)
+						ath_hal_gpioCfgOutput(ah, sc->sc_ledpin);
+					ath_hal_gpioset(ah, sc->sc_ledpin,!sc->sc_ledon);
+					sc->sc_softled = val;
+				}
+				break;
+			case ATH_LEDPIN:
+				/* XXX validate? */
+				sc->sc_ledpin = val;
+				break;
+			case ATH_DEBUG:
+				sc->sc_debug = val;
+				break;
+			case ATH_TXANTENNA:
+				/*
+				 * antenna can be:
+				 * 0 = transmit diversity
+				 * 1 = antenna port 1
+				 * 2 = antenna port 2
+				 */
+				if (val < 0 || val > 2)
+					return -EINVAL;
+				else
+					sc->sc_txantenna = val;
+				break;
+			case ATH_RXANTENNA:
+				/*
+				 * antenna can be:
+				 * 0 = receive diversity
+				 * 1 = antenna port 1
+				 * 2 = antenna port 2
+				 */
+				if (val < 0 || val > 2)
+					return -EINVAL;
+				else
+					ath_setdefantenna(sc, val);
+				break;
+			case ATH_DIVERSITY:
+				/*
+				 * 0 = disallow use of diversity
+				 * 1 = allow use of diversity
+				 */
+				if (val < 0 || val > 1)
+					return -EINVAL;
+				/* Don't enable diversity if XR is enabled */
+				if (((!sc->sc_hasdiversity) || (sc->sc_xrtxq != NULL)) && val)
+					return -EINVAL;
+				sc->sc_diversity = val;
+				ath_hal_setdiversity(ah, val);
+				break;
+			case ATH_TXINTRPERIOD:
+				/* XXX: validate? */
+				sc->sc_txintrperiod = val;
+				break;
+			case ATH_FFTXQMIN:
+				/* XXX validate? */
+				sc->sc_fftxqmin = val;
+				break;
+			case ATH_TKIPMIC: {
+				struct ieee80211com *ic = &sc->sc_ic;
+
+				if (!ath_hal_hastkipmic(ah))
+					return -EINVAL;
+				ath_hal_settkipmic(ah, val);
+				if (val)
+					ic->ic_caps |= IEEE80211_C_TKIPMIC;
+				else
+					ic->ic_caps &= ~IEEE80211_C_TKIPMIC;
+				break;
+			}
+#ifdef ATH_SUPERG_XR
+			case ATH_XR_POLL_PERIOD: 
+				if (val > XR_MAX_POLL_INTERVAL)
+					val = XR_MAX_POLL_INTERVAL;
+				else if (val < XR_MIN_POLL_INTERVAL)
+					val = XR_MIN_POLL_INTERVAL;
+				sc->sc_xrpollint = val;
+				break;
+
+			case ATH_XR_POLL_COUNT: 
+				if (val > XR_MAX_POLL_COUNT)
+					val = XR_MAX_POLL_COUNT;
+				else if (val < XR_MIN_POLL_COUNT)
+					val = XR_MIN_POLL_COUNT;
+				sc->sc_xrpollcount = val;
+				break;
+#endif
+			case ATH_ACKRATE:
+				sc->sc_ackrate = val;
+				ath_set_ack_bitrate(sc, sc->sc_ackrate);
+				break;
+			default:
+				return -EINVAL;
+			}
+		}
+	} else {
+		switch (ctl->ctl_name) {
+		case ATH_SLOTTIME:
+			val = ath_hal_getslottime(ah);
+			break;
+		case ATH_ACKTIMEOUT:
+			val = ath_hal_getacktimeout(ah);
+			break;
+		case ATH_CTSTIMEOUT:
+			val = ath_hal_getctstimeout(ah);
+			break;
+		case ATH_SOFTLED:
+			val = sc->sc_softled;
+			break;
+		case ATH_LEDPIN:
+			val = sc->sc_ledpin;
+			break;
+		case ATH_COUNTRYCODE:
+			ath_hal_getcountrycode(ah, &val);
+			break;
+		case ATH_REGDOMAIN:
+			ath_hal_getregdomain(ah, &val);
+			break;
+		case ATH_DEBUG:
+			val = sc->sc_debug;
+			break;
+		case ATH_TXANTENNA:
+			val = sc->sc_txantenna;
+			break;
+		case ATH_RXANTENNA:
+			val = ath_hal_getdefantenna(ah);
+			break;
+		case ATH_DIVERSITY:
+			val = sc->sc_diversity;
+			break;
+		case ATH_TXINTRPERIOD:
+			val = sc->sc_txintrperiod;
+			break;
+		case ATH_FFTXQMIN:
+			val = sc->sc_fftxqmin;
+			break;
+		case ATH_TKIPMIC:
+			val = ath_hal_gettkipmic(ah);
+			break;
+#ifdef ATH_SUPERG_XR
+		case ATH_XR_POLL_PERIOD: 
+			val=sc->sc_xrpollint;
+			break;
+		case ATH_XR_POLL_COUNT: 
+			val=sc->sc_xrpollcount;
+			break;
+#endif
+		case ATH_ACKRATE:
+			val = sc->sc_ackrate;
+			break;
+		default:
+			return -EINVAL;
+		}
+		ret = ATH_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos);
+	}
+	return ret;
+}
+
+static int mincalibrate = 1;			/* once a second */
+static int maxint = 0x7fffffff;		/* 32-bit big */
+
+#define	CTL_AUTO	-2	/* cannot be CTL_ANY or CTL_NONE */
+
+static const ctl_table ath_sysctl_template[] = {
+	{ .ctl_name	= ATH_SLOTTIME,
+	  .procname	= "slottime",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ .ctl_name	= ATH_ACKTIMEOUT,
+	  .procname	= "acktimeout",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ .ctl_name	= ATH_CTSTIMEOUT,
+	  .procname	= "ctstimeout",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ .ctl_name	= ATH_SOFTLED,
+	  .procname	= "softled",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ .ctl_name	= ATH_LEDPIN,
+	  .procname	= "ledpin",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ .ctl_name	= ATH_COUNTRYCODE,
+	  .procname	= "countrycode",
+	  .mode		= 0444,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ .ctl_name	= ATH_REGDOMAIN,
+	  .procname	= "regdomain",
+	  .mode		= 0444,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+#ifdef AR_DEBUG
+	{ .ctl_name	= ATH_DEBUG,
+	  .procname	= "debug",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+#endif
+	{ .ctl_name	= ATH_TXANTENNA,
+	  .procname	= "txantenna",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ .ctl_name	= ATH_RXANTENNA,
+	  .procname	= "rxantenna",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ .ctl_name	= ATH_DIVERSITY,
+	  .procname	= "diversity",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ .ctl_name	= ATH_TXINTRPERIOD,
+	  .procname	= "txintrperiod",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ .ctl_name	= ATH_FFTXQMIN,
+	  .procname	= "fftxqmin",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ .ctl_name	= ATH_TKIPMIC,
+	  .procname	= "tkipmic",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+#ifdef ATH_SUPERG_XR
+	{ .ctl_name	= ATH_XR_POLL_PERIOD,
+	  .procname	= "xrpollperiod",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ .ctl_name	= ATH_XR_POLL_COUNT,
+	  .procname	= "xrpollcount",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+#endif
+	{ .ctl_name	= ATH_ACKRATE,
+	  .procname	= "ackrate",
+	  .mode		= 0644,
+	  .proc_handler	= ath_sysctl_halparam
+	},
+	{ 0 }
+};
+
+static void
+ath_dynamic_sysctl_register(struct ath_softc *sc)
+{
+	int i, space;
+	char *dev_name = NULL;
+	
+	space = 5 * sizeof(struct ctl_table) + sizeof(ath_sysctl_template);
+	sc->sc_sysctls = kmalloc(space, GFP_KERNEL);
+	if (sc->sc_sysctls == NULL) {
+		printk("%s: no memory for sysctl table!\n", __func__);
+		return;
+	}
+	
+	/* 
+	 * We want to reserve space for the name of the device separate
+	 * from the net_device structure, because when the name is changed
+	 * it is changed in the net_device structure and the message given
+	 * out.  Thus we won't know what the name it used to be if we rely
+	 * on it.
+	 */
+	dev_name = kmalloc((strlen(sc->sc_dev->name) + 1) * sizeof(char), GFP_KERNEL);
+	if (dev_name == NULL) {
+		printk("%s: no memory for device name storage!\n", __func__);
+		return;
+	}
+	strncpy(dev_name, sc->sc_dev->name, strlen(sc->sc_dev->name) + 1);
+
+	/* setup the table */
+	memset(sc->sc_sysctls, 0, space);
+	sc->sc_sysctls[0].ctl_name = CTL_DEV;
+	sc->sc_sysctls[0].procname = "dev";
+	sc->sc_sysctls[0].mode = 0555;
+	sc->sc_sysctls[0].child = &sc->sc_sysctls[2];
+	/* [1] is NULL terminator */
+	sc->sc_sysctls[2].ctl_name = CTL_AUTO;
+	sc->sc_sysctls[2].procname = dev_name;
+	sc->sc_sysctls[2].mode = 0555;
+	sc->sc_sysctls[2].child = &sc->sc_sysctls[4];
+	/* [3] is NULL terminator */
+	/* copy in pre-defined data */
+	memcpy(&sc->sc_sysctls[4], ath_sysctl_template,
+		sizeof(ath_sysctl_template));
+
+	/* add in dynamic data references */
+	for (i = 4; sc->sc_sysctls[i].ctl_name; i++)
+		if (sc->sc_sysctls[i].extra1 == NULL)
+			sc->sc_sysctls[i].extra1 = sc;
+
+	/* and register everything */
+	sc->sc_sysctl_header = ATH_REGISTER_SYSCTL_TABLE(sc->sc_sysctls);
+	if (!sc->sc_sysctl_header) {
+		printk("%s: failed to register sysctls!\n", sc->sc_dev->name);
+		kfree(sc->sc_sysctls);
+		sc->sc_sysctls = NULL;
+	}
+
+	/* initialize values */
+	sc->sc_debug = ath_debug;
+	sc->sc_txantenna = 0;		/* default to auto-selection */
+	sc->sc_txintrperiod = ATH_TXQ_INTR_PERIOD;
+}
+
+static void
+ath_dynamic_sysctl_unregister(struct ath_softc *sc)
+{
+	if (sc->sc_sysctl_header) {
+		unregister_sysctl_table(sc->sc_sysctl_header);
+		sc->sc_sysctl_header = NULL;
+	}
+	if (sc->sc_sysctls[2].procname) {
+		kfree(sc->sc_sysctls[2].procname);
+		sc->sc_sysctls[2].procname = NULL;
+	}
+	if (sc->sc_sysctls) {
+		kfree(sc->sc_sysctls);
+		sc->sc_sysctls = NULL;
+	}
+}
+
+/*
+ * Announce various information on device/driver attach.
+ */
+static void
+ath_announce(struct net_device *dev)
+{
+#define	HAL_MODE_DUALBAND	(HAL_MODE_11A|HAL_MODE_11B)
+	struct ath_softc *sc = dev->priv;
+	struct ath_hal *ah = sc->sc_ah;
+	u_int modes, cc;
+
+	printk("%s: mac %d.%d phy %d.%d", dev->name,
+		ah->ah_macVersion, ah->ah_macRev,
+		ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
+	/*
+	 * Print radio revision(s).  We check the wireless modes
+	 * to avoid falsely printing revs for inoperable parts.
+	 * Dual-band radio revs are returned in the 5 GHz rev number.
+	 */
+	ath_hal_getcountrycode(ah, &cc);
+	modes = ath_hal_getwirelessmodes(ah, cc);
+	if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) {
+		if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev)
+			printk(" 5 GHz radio %d.%d 2 GHz radio %d.%d",
+				ah->ah_analog5GhzRev >> 4,
+				ah->ah_analog5GhzRev & 0xf,
+				ah->ah_analog2GhzRev >> 4,
+				ah->ah_analog2GhzRev & 0xf);
+		else
+			printk(" radio %d.%d", ah->ah_analog5GhzRev >> 4,
+				ah->ah_analog5GhzRev & 0xf);
+	} else
+		printk(" radio %d.%d", ah->ah_analog5GhzRev >> 4,
+			ah->ah_analog5GhzRev & 0xf);
+	printk("\n");
+	if (1/*bootverbose*/) {
+		int i;
+		for (i = 0; i <= WME_AC_VO; i++) {
+			struct ath_txq *txq = sc->sc_ac2q[i];
+			printk("%s: Use hw queue %u for %s traffic\n",
+				dev->name, txq->axq_qnum,
+				ieee80211_wme_acnames[i]);
+		}
+		printk("%s: Use hw queue %u for CAB traffic\n", dev->name,
+			sc->sc_cabq->axq_qnum);
+		printk("%s: Use hw queue %u for beacons\n", dev->name,
+			sc->sc_bhalq);
+	}
+#undef HAL_MODE_DUALBAND
+}
+
+/*
+ * Static (i.e. global) sysctls.  Note that the HAL sysctls
+ * are located under ours by sharing the setting for DEV_ATH.
+ */
+enum {
+	DEV_ATH		= 9,			/* XXX known by HAL */
+};
+
+static ctl_table ath_static_sysctls[] = {
+#ifdef AR_DEBUG
+	{ .ctl_name	= CTL_AUTO,
+	  .procname	= "debug",
+	  .mode		= 0644,
+	  .data		= &ath_debug,
+	  .maxlen	= sizeof(ath_debug),
+	  .proc_handler	= proc_dointvec
+	},
+#endif
+	{ .ctl_name	= CTL_AUTO,
+	  .procname	= "countrycode",
+	  .mode		= 0444,
+	  .data		= &ath_countrycode,
+	  .maxlen	= sizeof(ath_countrycode),
+	  .proc_handler	= proc_dointvec
+	},
+	{ .ctl_name	= CTL_AUTO,
+	  .procname	= "outdoor",
+	  .mode		= 0444,
+	  .data		= &ath_outdoor,
+	  .maxlen	= sizeof(ath_outdoor),
+	  .proc_handler	= proc_dointvec
+	},
+	{ .ctl_name	= CTL_AUTO,
+	  .procname	= "xchanmode",
+	  .mode		= 0444,
+	  .data		= &ath_xchanmode,
+	  .maxlen	= sizeof(ath_xchanmode),
+	  .proc_handler	= proc_dointvec
+	},
+	{ .ctl_name	= CTL_AUTO,
+	  .procname	= "calibrate",
+	  .mode		= 0644,
+	  .data		= &ath_calinterval,
+	  .maxlen	= sizeof(ath_calinterval),
+	  .extra1	= &mincalibrate,
+	  .extra2	= &maxint,
+	  .proc_handler	= proc_dointvec_minmax
+	},
+	{ 0 }
+};
+static ctl_table ath_ath_table[] = {
+	{ .ctl_name	= DEV_ATH,
+	  .procname	= "ath",
+	  .mode		= 0555,
+	  .child	= ath_static_sysctls
+	}, { 0 }
+};
+static ctl_table ath_root_table[] = {
+	{ .ctl_name	= CTL_DEV,
+	  .procname	= "dev",
+	  .mode		= 0555,
+	  .child	= ath_ath_table
+	}, { 0 }
+};
+static struct ctl_table_header *ath_sysctl_header;
+
+void
+ath_sysctl_register(void)
+{
+	static int initialized = 0;
+
+	if (!initialized) {
+	        register_netdevice_notifier(&ath_event_block);
+		ath_sysctl_header = ATH_REGISTER_SYSCTL_TABLE(ath_root_table);
+		initialized = 1;
+	}
+}
+
+void
+ath_sysctl_unregister(void)
+{
+	unregister_netdevice_notifier(&ath_event_block);
+	if (ath_sysctl_header)
+		unregister_sysctl_table(ath_sysctl_header);
+}
+
+static const char* 
+ath_get_hal_status_desc(HAL_STATUS status)
+{
+	if (status > 0 && status < sizeof(hal_status_desc)/sizeof(char *))
+		return hal_status_desc[status];
+	else
+		return "";
+}
+
+static int
+ath_rcv_dev_event(struct notifier_block *this, unsigned long event,
+	void *ptr)
+{
+	struct net_device *dev = (struct net_device *) ptr;
+	struct ath_softc *sc = (struct ath_softc *) dev->priv;
+
+	if (!dev || !sc || dev->open != &ath_init)
+		return 0;
+
+        switch (event) {
+        case NETDEV_CHANGENAME:
+		ath_dynamic_sysctl_unregister(sc);
+		ath_dynamic_sysctl_register(sc);
+		return NOTIFY_DONE;
+        default:
+	        break;
+        }
+        return 0;
+}
diff -Naurp madwifi-0.9.3.3.old/net80211/ieee80211_linux.c madwifi-0.9.3.3/net80211/ieee80211_linux.c
--- madwifi-0.9.3.3.old/net80211/ieee80211_linux.c	2007-10-17 17:48:20.000000000 -0400
+++ madwifi-0.9.3.3/net80211/ieee80211_linux.c	2007-11-07 17:27:19.000000000 -0500
@@ -360,20 +360,23 @@ proc_read_nodes(struct ieee80211vap *vap
 
         //IEEE80211_NODE_LOCK(nt);                                                                               
         TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
+		struct timespec t;
                 /* Assume each node needs 500 bytes */
                 if (buf + space < p + 500)
                         break;
 
-		if (ni->ni_vap == vap &&
-		    0 != memcmp(vap->iv_myaddr, ni->ni_macaddr, IEEE80211_ADDR_LEN)) {
-			struct timespec t;
+		if ((ni->ni_vap == vap) && (memcmp(vap->iv_myaddr, 
+				ni->ni_macaddr, IEEE80211_ADDR_LEN) != 0)) {
 			jiffies_to_timespec(jiffies - ni->ni_last_rx, &t);
-			p += sprintf(p, "macaddr: <%s>\n", ether_sprintf(ni->ni_macaddr));
+			p += sprintf(p, "ni: %p%s vap: %p macaddr: <%s>\n",
+				ni, ni == ni->ni_vap->iv_bss ? "*" : " ",
+				ni->ni_vap, ether_sprintf(ni->ni_macaddr));
 			p += sprintf(p, " rssi %d\n", ni->ni_rssi);
 			
 			p += sprintf(p, " last_rx %ld.%06ld\n", 
 				     t.tv_sec, t.tv_nsec / 1000);
-
+			p += sprintf(p, " ni_tstamp %10llu ni_rtsf %10u\n",
+				le64_to_cpu(ni->ni_tstamp.tsf), ni->ni_rstamp);
 		}
         }
         //IEEE80211_NODE_UNLOCK(nt);                                                                             
diff -Naurp madwifi-0.9.3.3.old/net80211/ieee80211_node.c madwifi-0.9.3.3/net80211/ieee80211_node.c
--- madwifi-0.9.3.3.old/net80211/ieee80211_node.c	2007-11-07 18:10:13.000000000 -0500
+++ madwifi-0.9.3.3/net80211/ieee80211_node.c	2007-11-07 16:25:23.000000000 -0500
@@ -547,8 +547,7 @@ ieee80211_ibss_merge(struct ieee80211_no
 	struct ieee80211vap *vap = ni->ni_vap;
 	struct ieee80211com *ic = ni->ni_ic;
 
-	if (ni == vap->iv_bss ||
-	    IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_bss->ni_bssid)) {
+	if (ni == vap->iv_bss) {
 		/* unchanged, nothing to do */
 		return 0;
 	}
diff -Naurp madwifi-0.9.3.3.old/net80211/ieee80211_wireless.c.orig madwifi-0.9.3.3/net80211/ieee80211_wireless.c.orig
--- madwifi-0.9.3.3.old/net80211/ieee80211_wireless.c.orig	1969-12-31 19:00:00.000000000 -0500
+++ madwifi-0.9.3.3/net80211/ieee80211_wireless.c.orig	2007-11-07 18:10:37.000000000 -0500
@@ -0,0 +1,5464 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ *    redistribution must be conditioned upon including a substantially
+ *    similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $Id: ieee80211_wireless.c 2614 2007-07-26 12:58:47Z mrenzmann $
+ */
+
+/*
+ * Wireless extensions support for 802.11 common code.
+ */
+#ifndef AUTOCONF_INCLUDED
+#include <linux/config.h>
+#endif
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/utsname.h>
+#include <linux/if_arp.h>		/* XXX for ARPHRD_ETHER */
+#include <linux/delay.h>
+
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+
+#if WIRELESS_EXT < 14
+#error "Wireless extensions v14 or better is needed."
+#endif
+
+#include <asm/uaccess.h>
+
+#include <net80211/if_media.h>
+#include <net80211/if_athproto.h>
+
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_linux.h>
+
+#define	IS_UP(_dev) \
+	(((_dev)->flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP))
+#define	IS_UP_AUTO(_vap) \
+	(IS_UP((_vap)->iv_dev) && \
+	 (_vap)->iv_ic->ic_roaming == IEEE80211_ROAMING_AUTO)
+#define	RESCAN	1
+
+static void
+pre_announced_chanswitch(struct net_device *dev, u_int32_t channel, u_int32_t tbtt);
+
+static void
+preempt_scan(struct net_device *dev, int max_grace, int max_wait)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	int total_delay = 0;
+	int canceled = 0, ready = 0;
+	while (!ready && total_delay < max_grace + max_wait) {
+	  if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
+	    ready = 1;
+	  } else {
+	    if (!canceled && total_delay > max_grace) {
+	      /* 
+		 Cancel any existing active scan, so that any new parameters
+		 in this scan ioctl (or the defaults) can be honored, then
+		 wait around a while to see if the scan cancels properly.
+	      */
+	      IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				"%s: cancel pending scan request\n", __func__);
+	      (void) ieee80211_cancel_scan(vap);
+	      canceled = 1;
+	    }
+	    mdelay (1);
+	    total_delay += 1;
+	  }
+	}
+	if (!ready) {
+	  IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, 
+			    "%s: Timeout canceling current scan.\n", 
+			    __func__); 
+	}
+}
+	
+static struct iw_statistics *
+ieee80211_iw_getstats(struct net_device *dev)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct iw_statistics *is = &vap->iv_iwstats;
+	struct ieee80211com *ic = vap->iv_ic;
+	
+	set_quality(&is->qual, ieee80211_getrssi(vap->iv_ic), 
+			ic->ic_channoise);
+	is->status = vap->iv_state;
+	is->discard.nwid = vap->iv_stats.is_rx_wrongbss +
+		vap->iv_stats.is_rx_ssidmismatch;
+	is->discard.code = vap->iv_stats.is_rx_wepfail +
+		vap->iv_stats.is_rx_decryptcrc;
+	is->discard.fragment = 0;
+	is->discard.retries = 0;
+	is->discard.misc = 0;
+
+	is->miss.beacon = 0;
+
+	return is;
+}
+
+static int
+ieee80211_ioctl_giwname(struct net_device *dev, struct iw_request_info *info,
+	char *name, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211_channel *c = vap->iv_ic->ic_curchan;
+
+	if (IEEE80211_IS_CHAN_108G(c))
+		strncpy(name, "IEEE 802.11Tg", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_108A(c))
+		strncpy(name, "IEEE 802.11Ta", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_TURBO(c))
+		strncpy(name, "IEEE 802.11T", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_ANYG(c))
+		strncpy(name, "IEEE 802.11g", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_A(c))
+		strncpy(name, "IEEE 802.11a", IFNAMSIZ);
+	else if (IEEE80211_IS_CHAN_B(c))
+		strncpy(name, "IEEE 802.11b", IFNAMSIZ);
+	else
+		strncpy(name, "IEEE 802.11", IFNAMSIZ);
+	/* XXX FHSS */
+	return 0;
+}
+
+/*
+ * Get a key index from a request.  If nothing is
+ * specified in the request we use the current xmit
+ * key index.  Otherwise we just convert the index
+ * to be base zero.
+ */
+static int
+getiwkeyix(struct ieee80211vap *vap, const struct iw_point* erq, int *kix)
+{
+	int kid;
+
+	kid = erq->flags & IW_ENCODE_INDEX;
+	if (kid < 1 || kid > IEEE80211_WEP_NKID) {
+		kid = vap->iv_def_txkey;
+		if (kid == IEEE80211_KEYIX_NONE)
+			kid = 0;
+	} else
+		--kid;
+	if (0 <= kid && kid < IEEE80211_WEP_NKID) {
+		*kix = kid;
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+static int
+ieee80211_ioctl_siwencode(struct net_device *dev,
+	struct iw_request_info *info, struct iw_point *erq, char *keybuf)
+{
+	struct ieee80211vap *vap = dev->priv;
+	int kid, error;
+	int wepchange = 0;
+
+	if ((erq->flags & IW_ENCODE_DISABLED) == 0) {
+		/*
+		 * Enable crypto, set key contents, and
+		 * set the default transmit key.
+		 */
+		error = getiwkeyix(vap, erq, &kid);
+		if (error < 0)
+			return error;
+		if (erq->length > IEEE80211_KEYBUF_SIZE)
+			return -EINVAL;
+		/* XXX no way to install 0-length key */
+		ieee80211_key_update_begin(vap);
+		if (erq->length > 0) {
+			struct ieee80211_key *k = &vap->iv_nw_keys[kid];
+
+			/*
+			 * Set key contents.  This interface only supports WEP.
+			 * Indicate intended key index.			 
+			 */
+			k->wk_keyix = kid;
+			if (ieee80211_crypto_newkey(vap, IEEE80211_CIPHER_WEP,
+			    IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV, k)) {
+				k->wk_keylen = erq->length;
+				memcpy(k->wk_key, keybuf, erq->length);
+				memset(k->wk_key + erq->length, 0,
+					IEEE80211_KEYBUF_SIZE - erq->length);
+				if (!ieee80211_crypto_setkey(vap, k, vap->iv_myaddr, NULL))
+					error = -EINVAL;		/* XXX */
+			} else
+				error = -EINVAL;
+		} else {
+			/*
+			 * When the length is zero the request only changes
+			 * the default transmit key.  Verify the new key has
+			 * a non-zero length.
+			 */
+			if (vap->iv_nw_keys[kid].wk_keylen == 0)
+				error = -EINVAL;
+		}
+		if (error == 0) {
+			/*
+			 * The default transmit key is only changed when:
+			 * 1. Privacy is enabled and no key matter is
+			 *    specified.
+			 * 2. Privacy is currently disabled.
+			 * This is deduced from the iwconfig man page.
+			 */
+			if (erq->length == 0 ||
+			    (vap->iv_flags & IEEE80211_F_PRIVACY) == 0)
+				vap->iv_def_txkey = kid;
+			wepchange = (vap->iv_flags & IEEE80211_F_PRIVACY) == 0;
+			vap->iv_flags |= IEEE80211_F_PRIVACY;
+		}
+		ieee80211_key_update_end(vap);
+	} else {
+		if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0)
+			return 0;
+		vap->iv_flags &= ~IEEE80211_F_PRIVACY;
+		wepchange = 1;
+		error = 0;
+	}
+	if (error == 0) {
+		/* Set policy for unencrypted frames */	
+		if ((erq->flags & IW_ENCODE_OPEN) && 
+		    (!(erq->flags & IW_ENCODE_RESTRICTED))) {
+			vap->iv_flags &= ~IEEE80211_F_DROPUNENC;
+		} else if (!(erq->flags & IW_ENCODE_OPEN) && 
+			 (erq->flags & IW_ENCODE_RESTRICTED)) {
+			vap->iv_flags |= IEEE80211_F_DROPUNENC;
+		} else {
+			/* Default policy */
+			if (vap->iv_flags & IEEE80211_F_PRIVACY) 
+				vap->iv_flags |= IEEE80211_F_DROPUNENC;
+			else
+				vap->iv_flags &= ~IEEE80211_F_DROPUNENC;
+		}	
+	}
+	if (error == 0 && IS_UP(vap->iv_dev)) {
+		/*
+		 * Device is up and running; we must kick it to
+		 * effect the change.  If we're enabling/disabling
+		 * crypto use then we must re-initialize the device
+		 * so the 802.11 state machine is reset.  Otherwise
+		 * the key state should have been updated above.
+		 */
+		if (wepchange && IS_UP_AUTO(vap))
+			ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+	}
+#ifdef ATH_SUPERG_XR
+	/* set the same params on the xr vap device if exists */
+	if (!error && vap->iv_xrvap && !(vap->iv_flags & IEEE80211_F_XR))
+		ieee80211_ioctl_siwencode(vap->iv_xrvap->iv_dev, info, erq, keybuf);
+#endif
+	return error;
+}
+
+static int
+ieee80211_ioctl_giwencode(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *erq, char *key)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211_key *k;
+	int error, kid;
+
+	if (vap->iv_flags & IEEE80211_F_PRIVACY) {
+		error = getiwkeyix(vap, erq, &kid);
+		if (error < 0)
+			return error;
+		k = &vap->iv_nw_keys[kid];
+		/* XXX no way to return cipher/key type */
+
+		erq->flags = kid + 1;			/* NB: base 1 */
+		if (erq->length > k->wk_keylen)
+			erq->length = k->wk_keylen;
+		memcpy(key, k->wk_key, erq->length);
+		erq->flags |= IW_ENCODE_ENABLED;
+	} else {
+		erq->length = 0;
+		erq->flags = IW_ENCODE_DISABLED;
+	}
+	if (vap->iv_flags & IEEE80211_F_DROPUNENC)
+		erq->flags |= IW_ENCODE_RESTRICTED;
+	else
+		erq->flags |= IW_ENCODE_OPEN;
+	return 0;
+}
+
+#ifndef ifr_media
+#define	ifr_media	ifr_ifru.ifru_ivalue
+#endif
+
+static int
+ieee80211_ioctl_siwrate(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	static const u_int mopts[] = {
+		IFM_AUTO,
+		IFM_IEEE80211_11A,
+		IFM_IEEE80211_11B,
+		IFM_IEEE80211_11G,
+		IFM_IEEE80211_FH,
+		IFM_IEEE80211_11A | IFM_IEEE80211_TURBO,
+		IFM_IEEE80211_11G | IFM_IEEE80211_TURBO,
+	};
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ifreq ifr;
+	int rate, retv;
+
+	if (vap->iv_media.ifm_cur == NULL)
+		return -EINVAL;
+	memset(&ifr, 0, sizeof(ifr));
+	ifr.ifr_media = vap->iv_media.ifm_cur->ifm_media &~ (IFM_MMASK|IFM_TMASK);
+	ifr.ifr_media |= mopts[vap->iv_des_mode];
+	if (rrq->fixed) {
+		/* XXX fudge checking rates */
+		rate = ieee80211_rate2media(ic, 2 * rrq->value / 1000000,
+			vap->iv_des_mode);
+		if (rate == IFM_AUTO)		/* NB: unknown rate */
+			return -EINVAL;
+	} else
+		rate = IFM_AUTO;
+	ifr.ifr_media |= IFM_SUBTYPE(rate);
+
+	/* refresh media capabilities based on channel */
+	ifmedia_removeall(&vap->iv_media);
+	(void) ieee80211_media_setup(ic, &vap->iv_media,
+		vap->iv_caps, vap->iv_media.ifm_change, vap->iv_media.ifm_status);
+
+	retv = ifmedia_ioctl(vap->iv_dev, &ifr, &vap->iv_media, SIOCSIFMEDIA);
+	if (retv == -ENETRESET)
+		retv = IS_UP_AUTO(vap) ? ieee80211_open(vap->iv_dev) : 0;
+	return retv;
+}
+
+static int
+ieee80211_ioctl_giwrate(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ifmediareq imr;
+	int rate;
+
+	memset(&imr, 0, sizeof(imr));
+	vap->iv_media.ifm_status(vap->iv_dev, &imr);
+
+	rrq->fixed = IFM_SUBTYPE(vap->iv_media.ifm_media) != IFM_AUTO;
+	/* media status will have the current xmit rate if available */
+	rate = ieee80211_media2rate(imr.ifm_active);
+	if (rate == -1)		/* IFM_AUTO */
+		rate = 0;
+	rrq->value = 1000000 * (rate / 2);
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwsens(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_param *sens, char *extra)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+ieee80211_ioctl_giwsens(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_param *sens, char *extra)
+{
+	sens->value = 1;
+	sens->fixed = 1;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwrts(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rts, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	u16 val;
+
+	if (rts->disabled)
+		val = IEEE80211_RTS_MAX;
+	else if (IEEE80211_RTS_MIN <= rts->value &&
+	    rts->value <= IEEE80211_RTS_MAX)
+		val = rts->value;
+	else
+		return -EINVAL;
+	if (val != vap->iv_rtsthreshold) {
+		vap->iv_rtsthreshold = val;
+		if (IS_UP(vap->iv_dev))
+			return ic->ic_reset(ic->ic_dev);
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwrts(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rts, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+
+	rts->value = vap->iv_rtsthreshold;
+	rts->disabled = (rts->value == IEEE80211_RTS_MAX);
+	rts->fixed = 1;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwfrag(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_param *rts, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	u16 val;
+
+	if (rts->disabled)
+		val = 2346;
+	else if (rts->value < 256 || rts->value > 2346)
+		return -EINVAL;
+	else
+		val = (rts->value & ~0x1);
+
+	if (val != vap->iv_fragthreshold) {
+		vap->iv_fragthreshold = val;
+		if (IS_UP(ic->ic_dev))
+			return ic->ic_reset(ic->ic_dev);
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwfrag(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_param *rts, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+
+	rts->value = vap->iv_fragthreshold;
+	rts->disabled = (rts->value == 2346);
+	rts->fixed = 1;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwap(struct net_device *dev, struct iw_request_info *info,
+	struct sockaddr *ap_addr, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+
+	/* NB: should not be set when in AP mode */
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+		return -EINVAL;
+
+	if (vap->iv_opmode == IEEE80211_M_WDS)
+		IEEE80211_ADDR_COPY(vap->wds_mac, &ap_addr->sa_data);
+
+	/* 
+	 * zero address corresponds to 'iwconfig ath0 ap off', which means 
+	 * enable automatic choice of AP without actually forcing a
+	 * reassociation.  
+	 *
+	 * broadcast address corresponds to 'iwconfig ath0 ap any', which
+	 * means scan for the current best AP.
+	 *
+	 * anything else specifies a particular AP.
+	 */
+	vap->iv_flags &= ~IEEE80211_F_DESBSSID;
+	if (!IEEE80211_ADDR_NULL(&ap_addr->sa_data)) {
+		if (!IEEE80211_ADDR_EQ(vap->iv_des_bssid, (u_int8_t*) "\xff\xff\xff\xff\xff\xff"))
+			vap->iv_flags |= IEEE80211_F_DESBSSID;
+		
+		IEEE80211_ADDR_COPY(vap->iv_des_bssid, &ap_addr->sa_data);
+		if (IS_UP_AUTO(vap))
+			ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwap(struct net_device *dev, struct iw_request_info *info,
+	struct sockaddr *ap_addr, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+
+	if (vap->iv_flags & IEEE80211_F_DESBSSID)
+		IEEE80211_ADDR_COPY(&ap_addr->sa_data, vap->iv_des_bssid);
+	else {
+		if (vap->iv_state == IEEE80211_S_RUN)
+			if (vap->iv_opmode != IEEE80211_M_WDS) 
+				IEEE80211_ADDR_COPY(&ap_addr->sa_data, vap->iv_bss->ni_bssid);
+			else
+				IEEE80211_ADDR_COPY(&ap_addr->sa_data, vap->wds_mac);
+		else
+			IEEE80211_ADDR_SET_NULL(&ap_addr->sa_data);
+	}
+	ap_addr->sa_family = ARPHRD_ETHER;
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwnickn(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *nickname)
+{
+	struct ieee80211vap *vap = dev->priv;
+
+	if (data->length > IEEE80211_NWID_LEN)
+		return -EINVAL;
+
+	memset(vap->iv_nickname, 0, IEEE80211_NWID_LEN);
+	memcpy(vap->iv_nickname, nickname, data->length);
+	vap->iv_nicknamelen = data->length;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwnickn(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *nickname)
+{
+	struct ieee80211vap *vap = dev->priv;
+
+	if (data->length > vap->iv_nicknamelen + 1)
+		data->length = vap->iv_nicknamelen + 1;
+	if (data->length > 0) {
+		memcpy(nickname, vap->iv_nickname, data->length - 1); /* XXX: strcpy? */
+		nickname[data->length-1] = '\0';
+	}
+	return 0;
+}
+
+static int
+find11gchannel(struct ieee80211com *ic, int i, int freq)
+{
+	for (; i < ic->ic_nchans; i++) {
+		const struct ieee80211_channel *c = &ic->ic_channels[i];
+		if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c))
+			return 1;
+	}
+	return 0;
+}
+
+static struct ieee80211_channel *
+findchannel(struct ieee80211com *ic, int ieee, int mode)
+{
+	static const u_int chanflags[] = {
+		0,			/* IEEE80211_MODE_AUTO */
+		IEEE80211_CHAN_A,	/* IEEE80211_MODE_11A */
+		IEEE80211_CHAN_B,	/* IEEE80211_MODE_11B */
+		IEEE80211_CHAN_PUREG,	/* IEEE80211_MODE_11G */
+		IEEE80211_CHAN_FHSS,	/* IEEE80211_MODE_FH */
+		IEEE80211_CHAN_108A,	/* IEEE80211_MODE_TURBO_A */
+		IEEE80211_CHAN_108G,	/* IEEE80211_MODE_TURBO_G */
+		IEEE80211_CHAN_ST,	/* IEEE80211_MODE_TURBO_STATIC_A */
+	};
+	u_int modeflags;
+	int i;
+
+	modeflags = chanflags[mode];
+	for (i = 0; i < ic->ic_nchans; i++) {
+		struct ieee80211_channel *c = &ic->ic_channels[i];
+
+		if (c->ic_ieee != ieee)
+			continue;
+		if (mode == IEEE80211_MODE_AUTO) {
+			/* ignore turbo channels for autoselect */
+			if (!(ic->ic_ath_cap & IEEE80211_ATHC_TURBOP) &&
+			    IEEE80211_IS_CHAN_TURBO(c))
+				continue;
+			/*
+			 * XXX special-case 11b/g channels so we
+			 *     always select the g channel if both
+			 *     are present.
+			 */
+			if (!IEEE80211_IS_CHAN_B(c) ||
+			    !find11gchannel(ic, i + 1, c->ic_freq))
+				return c;
+		} else {
+			if ((c->ic_flags & modeflags) == modeflags)
+				return c;
+		}
+	}
+	return NULL;
+}
+
+#define	IEEE80211_MODE_TURBO_STATIC_A	IEEE80211_MODE_MAX
+static int
+ieee80211_check_mode_consistency(struct ieee80211com *ic, int mode,
+	struct ieee80211_channel *c)
+{
+	if (c == IEEE80211_CHAN_ANYC)
+		return 0;
+	switch (mode) {
+	case IEEE80211_MODE_11B:
+		if (IEEE80211_IS_CHAN_B(c)) 
+			return 0;
+		else
+			return 1;
+		break;
+	case IEEE80211_MODE_11G:
+		if (IEEE80211_IS_CHAN_ANYG(c)) 
+			return 0;
+		else
+			return 1;
+		break;
+	case IEEE80211_MODE_11A:
+		if (IEEE80211_IS_CHAN_A(c)) 
+			return 0;
+		else
+			return 1;
+		break;
+	case IEEE80211_MODE_TURBO_STATIC_A:
+		if (IEEE80211_IS_CHAN_A(c) && IEEE80211_IS_CHAN_STURBO(c)) 
+			return 0;
+		else
+			return 1;
+		break;
+	case IEEE80211_MODE_AUTO:
+		return 0;
+		break;
+	}
+	return 1;
+}
+#undef	IEEE80211_MODE_TURBO_STATIC_A
+
+static int
+ieee80211_ioctl_siwfreq(struct net_device *dev, struct iw_request_info *info,
+	struct iw_freq *freq, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_channel *c, *c2;
+	int i;
+	
+	if (freq->e > 1)
+		return -EINVAL;
+	if (freq->e == 1)
+		i = (ic->ic_mhz2ieee)(ic, freq->m / 100000, 0);
+	else
+		i = freq->m;
+	if (i != 0) {
+		if (i > IEEE80211_CHAN_MAX)
+			return -EINVAL;
+		c = findchannel(ic, i, vap->iv_des_mode);
+		if (c == NULL) {
+			c = findchannel(ic, i, IEEE80211_MODE_AUTO);
+			if (c == NULL)			/* no channel */
+				return -EINVAL;
+		}
+		/*
+		 * Fine tune channel selection based on desired mode:
+		 *   if 11b is requested, find the 11b version of any
+		 *      11g channel returned,
+		 *   if static turbo, find the turbo version of any
+		 *	11a channel return,
+		 *   otherwise we should be ok with what we've got.
+		 */
+		switch (vap->iv_des_mode) {
+		case IEEE80211_MODE_11B:
+			if (IEEE80211_IS_CHAN_ANYG(c)) {
+				c2 = findchannel(ic, i, IEEE80211_MODE_11B);
+				/* NB: should not happen, =>'s 11g w/o 11b */
+				if (c2 != NULL)
+					c = c2;
+			}
+			break;
+		case IEEE80211_MODE_TURBO_A:
+			if (IEEE80211_IS_CHAN_A(c)) {
+				c2 = findchannel(ic, i, IEEE80211_MODE_TURBO_A);
+				if (c2 != NULL)
+					c = c2;
+			}
+			break;
+		default:		/* NB: no static turboG */
+			break;
+		}
+		if (ieee80211_check_mode_consistency(ic,vap->iv_des_mode,c)) {
+			if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+				return -EINVAL;
+		}
+		if (vap->iv_state == IEEE80211_S_RUN && c == ic->ic_bsschan)
+			return 0;			/* no change, return */
+
+		/* Don't allow to change to channel with radar found */
+		if (c->ic_flags & IEEE80211_CHAN_RADAR)
+			return -EINVAL;
+
+		/*
+		 * Mark desired channel and if running force a
+		 * radio change.
+		 */
+		vap->iv_des_chan = c;
+	} else {
+		/*
+		 * Intepret channel 0 to mean "no desired channel";
+		 * otherwise there's no way to undo fixing the desired
+		 * channel.
+		 */
+		if (vap->iv_des_chan == IEEE80211_CHAN_ANYC)
+			return 0;
+		vap->iv_des_chan = IEEE80211_CHAN_ANYC;
+	}
+#if 0
+	if (vap->iv_des_chan != IEEE80211_CHAN_ANYC) {
+		int mode = ieee80211_chan2mode(vap->iv_des_chan);
+		if (mode != ic->ic_curmode)
+			ieee80211_setmode(ic, mode);
+	}
+#endif
+	if ((vap->iv_opmode == IEEE80211_M_MONITOR ||
+	    vap->iv_opmode == IEEE80211_M_WDS) &&
+	    vap->iv_des_chan != IEEE80211_CHAN_ANYC) {
+		/* Monitor and wds modes can switch directly. */
+		ic->ic_curchan = vap->iv_des_chan;
+		if (vap->iv_state == IEEE80211_S_RUN) {
+			ic->ic_set_channel(ic);
+		}
+	} else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		/* Need to use channel switch announcement on beacon if we are 
+		 * up and running.  We use ic_set_channel directly if we are 
+		 * "running" but not "up".  Otherwise, iv_des_chan will take
+		 * effect when we are transitioned to RUN state later. */
+		if (IS_UP(vap->iv_dev)) {
+			pre_announced_chanswitch(dev, ieee80211_chan2ieee(ic, vap->iv_des_chan), IEEE80211_DEFAULT_CHANCHANGE_TBTT_COUNT);
+		}
+		else if (vap->iv_state == IEEE80211_S_RUN) {
+			ic->ic_curchan = vap->iv_des_chan;
+			ic->ic_set_channel(ic);
+		}
+	} else {
+		/* Need to go through the state machine in case we need
+		 * to reassociate or the like.  The state machine will
+		 * pickup the desired channel and avoid scanning. */
+		if (IS_UP_AUTO(vap))
+			ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwfreq(struct net_device *dev, struct iw_request_info *info,
+	struct iw_freq *freq, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (vap->iv_state == IEEE80211_S_RUN &&
+	    vap->iv_opmode != IEEE80211_M_MONITOR) {
+		/*
+		 * NB: use curchan for monitor mode so you can see
+		 *     manual scanning by apps like kismet.
+		 */
+		KASSERT(ic->ic_bsschan != IEEE80211_CHAN_ANYC,
+			("bss channel not set"));
+		freq->m = ic->ic_bsschan->ic_freq;
+	} else if (vap->iv_state != IEEE80211_S_INIT)	/* e.g. when scanning */
+		freq->m = ic->ic_curchan->ic_freq;
+	else if (vap->iv_des_chan != IEEE80211_CHAN_ANYC)
+		freq->m = vap->iv_des_chan->ic_freq;
+	else
+		freq->m = 0;
+	freq->m *= 100000;
+	freq->e = 1;
+
+	return 0;
+}
+
+#ifdef ATH_SUPERG_XR 
+/*
+ * Copy desired ssid state from one vap to another.
+ */
+static void
+copy_des_ssid(struct ieee80211vap *dst, const struct ieee80211vap *src)
+{
+	dst->iv_des_nssid = src->iv_des_nssid;
+	memcpy(dst->iv_des_ssid, src->iv_des_ssid,
+	    src->iv_des_nssid * sizeof(src->iv_des_ssid[0]));
+}
+#endif /* ATH_SUPERG_XR */
+
+static int
+ieee80211_ioctl_siwessid(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *ssid)
+{
+	struct ieee80211vap *vap = dev->priv;
+
+	if (vap->iv_opmode == IEEE80211_M_WDS)
+		return -EOPNOTSUPP;
+
+	if (data->flags == 0)		/* ANY */
+		vap->iv_des_nssid = 0;
+	else {
+		if (data->length > IEEE80211_NWID_LEN)
+			data->length = IEEE80211_NWID_LEN;
+		/* NB: always use entry 0 */
+		memcpy(vap->iv_des_ssid[0].ssid, ssid, data->length);
+		vap->iv_des_ssid[0].len = data->length;
+		vap->iv_des_nssid = 1;
+		/*
+		 * Deduct a trailing \0 since iwconfig passes a string
+		 * length that includes this.  Unfortunately this means
+		 * that specifying a string with multiple trailing \0's
+		 * won't be handled correctly.  Not sure there's a good
+		 * solution; the API is botched (the length should be
+		 * exactly those bytes that are meaningful and not include
+		 * extraneous stuff).
+		 */
+		if (data->length > 0 &&
+		    vap->iv_des_ssid[0].ssid[data->length - 1] == '\0')
+			vap->iv_des_ssid[0].len--;
+	}
+#ifdef ATH_SUPERG_XR 
+	if (vap->iv_xrvap != NULL && !(vap->iv_flags & IEEE80211_F_XR)) {
+		if (data->flags == 0)
+			vap->iv_des_nssid = 0;
+		else
+			copy_des_ssid(vap->iv_xrvap, vap);
+	}
+#endif
+	return IS_UP_AUTO(vap) ? ieee80211_init(vap->iv_dev, RESCAN) : 0;
+}
+
+static int
+ieee80211_ioctl_giwessid(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *essid)
+{
+	struct ieee80211vap *vap = dev->priv;
+
+	if (vap->iv_opmode == IEEE80211_M_WDS)
+		return -EOPNOTSUPP;
+
+	data->flags = 1;		/* active */
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		if (vap->iv_des_nssid > 0) {
+			if (data->length > vap->iv_des_ssid[0].len)
+				data->length = vap->iv_des_ssid[0].len;
+			memcpy(essid, vap->iv_des_ssid[0].ssid, data->length);
+		} else
+			data->length = 0;
+	} else {
+		if (vap->iv_des_nssid == 0) {
+			if (data->length > vap->iv_bss->ni_esslen)
+				data->length = vap->iv_bss->ni_esslen;
+			memcpy(essid, vap->iv_bss->ni_essid, data->length);
+		} else {
+			if (data->length > vap->iv_des_ssid[0].len)
+				data->length = vap->iv_des_ssid[0].len;
+			memcpy(essid, vap->iv_des_ssid[0].ssid, data->length);
+		}
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwrange(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni = vap->iv_bss;
+	struct iw_range *range = (struct iw_range *) extra;
+	struct ieee80211_rateset *rs;
+	u_int8_t reported[IEEE80211_CHAN_BYTES];	/* XXX stack usage? */
+	int i, r;
+	int step = 0;
+
+	data->length = sizeof(struct iw_range);
+	memset(range, 0, sizeof(struct iw_range));
+
+	/* txpower (128 values, but will print out only IW_MAX_TXPOWER) */
+#if WIRELESS_EXT >= 10
+	range->num_txpower = (ic->ic_txpowlimit >= 8) ? IW_MAX_TXPOWER : ic->ic_txpowlimit;
+	step = ic->ic_txpowlimit / (2 * (IW_MAX_TXPOWER - 1));
+ 
+	range->txpower[0] = 0;
+	for (i = 1; i < IW_MAX_TXPOWER; i++)
+		range->txpower[i] = (ic->ic_txpowlimit/2)
+			- (IW_MAX_TXPOWER - i - 1) * step;
+#endif
+
+
+	range->txpower_capa = IW_TXPOW_DBM;
+
+	if (vap->iv_opmode == IEEE80211_M_STA ||
+	    vap->iv_opmode == IEEE80211_M_IBSS) {
+		range->min_pmp = 1 * 1024;
+		range->max_pmp = 65535 * 1024;
+		range->min_pmt = 1 * 1024;
+		range->max_pmt = 1000 * 1024;
+		range->pmp_flags = IW_POWER_PERIOD;
+		range->pmt_flags = IW_POWER_TIMEOUT;
+		range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT |
+			IW_POWER_UNICAST_R | IW_POWER_ALL_R;
+	}
+
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 13;
+
+	range->retry_capa = IW_RETRY_LIMIT;
+	range->retry_flags = IW_RETRY_LIMIT;
+	range->min_retry = 0;
+	range->max_retry = 255;
+
+	range->num_channels = ic->ic_nchans;
+
+	range->num_frequency = 0;
+	memset(reported, 0, sizeof(reported));
+	for (i = 0; i < ic->ic_nchans; i++) {
+		const struct ieee80211_channel *c = &ic->ic_channels[i];
+
+		/* discard if previously reported (e.g. b/g) */
+		if (isclr(reported, c->ic_ieee)) {
+			setbit(reported, c->ic_ieee);
+			range->freq[range->num_frequency].i = c->ic_ieee;
+			range->freq[range->num_frequency].m =
+				ic->ic_channels[i].ic_freq * 100000;
+			range->freq[range->num_frequency].e = 1;
+			if (++range->num_frequency == IW_MAX_FREQUENCIES)
+				break;
+		}
+	}
+
+	/* Atheros' RSSI value is SNR: 0 -> 60 for old chipsets. Range 
+	 * for newer chipsets is unknown. This value is arbitarily chosen 
+	 * to give an indication that full rate will be available and to be 
+	 * a practicable maximum. */
+	range->max_qual.qual  = 70;
+#if WIRELESS_EXT >= 19
+	/* XXX: This should be updated to use the current noise floor. */
+	/* These are negative full bytes.
+	 * Min. quality is noise + 1 */
+	range->max_qual.updated |= IW_QUAL_DBM;
+	range->max_qual.level = ATH_DEFAULT_NOISE + 1;
+	range->max_qual.noise = ATH_DEFAULT_NOISE;
+#else
+	/* Values larger than the maximum are assumed to be absolute */
+	range->max_qual.level = 0;
+	range->max_qual.noise = 0;
+#endif
+
+	range->sensitivity = 1;
+
+	range->max_encoding_tokens = IEEE80211_WEP_NKID;
+	/* XXX query driver to find out supported key sizes */
+	range->num_encoding_sizes = 3;
+	range->encoding_size[0] = 5;		/* 40-bit */
+	range->encoding_size[1] = 13;		/* 104-bit */
+	range->encoding_size[2] = 16;		/* 128-bit */
+
+	/* XXX this only works for station mode */
+	rs = &ni->ni_rates;
+	range->num_bitrates = rs->rs_nrates;
+	if (range->num_bitrates > IW_MAX_BITRATES)
+		range->num_bitrates = IW_MAX_BITRATES;
+	for (i = 0; i < range->num_bitrates; i++) {
+		r = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+		range->bitrate[i] = (r * 1000000) / 2;
+	}
+
+	/* estimated maximum TCP throughput values (bps) */
+	range->throughput = 5500000;
+
+	range->min_rts = 0;
+	range->max_rts = 2347;
+	range->min_frag = 256;
+	range->max_frag = 2346;
+
+#if WIRELESS_EXT >= 17
+	/* Event capability (kernel) */
+	IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
+   
+	/* Event capability (driver) */
+	if (vap->iv_opmode == IEEE80211_M_STA ||
+		 vap->iv_opmode == IEEE80211_M_IBSS ||
+		 vap->iv_opmode == IEEE80211_M_AHDEMO) {
+		/* for now, only ibss, ahdemo, sta has this cap */
+		IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
+	}
+
+	if (vap->iv_opmode == IEEE80211_M_STA) {
+		/* for sta only */
+		IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); 
+		IW_EVENT_CAPA_SET(range->event_capa, IWEVREGISTERED);
+		IW_EVENT_CAPA_SET(range->event_capa, IWEVEXPIRED);
+	}
+	
+	/* this is used for reporting replay failure, which is used by the different encoding schemes */
+	IW_EVENT_CAPA_SET(range->event_capa, IWEVCUSTOM);
+#endif
+
+#if WIRELESS_EXT >= 18
+	/* report supported WPA/WPA2 capabilities to userspace */
+	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
+               IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
+#endif
+	
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setspy(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	/* save the list of node addresses */
+	struct ieee80211vap *vap = dev->priv;
+	struct sockaddr address[IW_MAX_SPY];
+	unsigned int number = data->length;
+	int i;
+
+	if (number > IW_MAX_SPY)
+		return -E2BIG;
+	
+	/* get the addresses into the driver */
+	if (data->pointer) {
+		if (copy_from_user(address, data->pointer,
+		    sizeof(struct sockaddr) * number))
+			return -EFAULT;
+        } else
+		return -EFAULT;
+
+	/* copy the MAC addresses into a list */
+	if (number > 0) {
+		/* extract the MAC addresses */
+		for (i = 0; i < number; i++)
+			memcpy(&vap->iv_spy.mac[i * IEEE80211_ADDR_LEN],
+				address[i].sa_data, IEEE80211_ADDR_LEN);
+		/* init rssi timestamps */
+		memset(vap->iv_spy.ts_rssi, 0, IW_MAX_SPY * sizeof(u_int32_t));
+	}
+	vap->iv_spy.num = number;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getspy(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	/*
+	 * locate nodes by mac (ieee80211_find_node()),
+	 * copy out rssi, set updated flag appropriately
+	 */
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
+	struct ieee80211_node *ni;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct sockaddr *address;
+	struct iw_quality *spy_stat;
+	unsigned int number = vap->iv_spy.num;
+	int i;
+
+	address = (struct sockaddr *) extra;
+	spy_stat = (struct iw_quality *) (extra + number * sizeof(struct sockaddr));
+
+	for (i = 0; i < number; i++) {
+		memcpy(address[i].sa_data, &vap->iv_spy.mac[i * IEEE80211_ADDR_LEN],
+			IEEE80211_ADDR_LEN);
+		address[i].sa_family = AF_PACKET;
+	}
+
+	/* locate a node, read its rssi, check if updated, convert to dBm */
+	for (i = 0; i < number; i++) {
+		ni = ieee80211_find_node(nt, &vap->iv_spy.mac[i * IEEE80211_ADDR_LEN]);
+		/* TODO: free node ? */
+		/* check we are associated w/ this vap */
+		if (ni && (ni->ni_vap == vap)) {
+			set_quality(&spy_stat[i], ni->ni_rssi, ic->ic_channoise);
+			if (ni->ni_rstamp != vap->iv_spy.ts_rssi[i]) {
+				vap->iv_spy.ts_rssi[i] = ni->ni_rstamp;
+			} else {
+				spy_stat[i].updated = 0;
+			}
+		} else {
+			spy_stat[i].updated = IW_QUAL_ALL_INVALID;
+		}
+	}
+
+	/* copy results to userspace */
+	data->length = number;
+	return 0;
+}
+
+#if WIRELESS_EXT >= 16
+/* Enhanced iwspy support */
+static int
+ieee80211_ioctl_setthrspy(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct iw_thrspy threshold;
+
+	if (data->length != 1)
+		return -EINVAL;
+	
+	/* get the threshold values into the driver */
+	if (data->pointer) {
+		if (copy_from_user(&threshold, data->pointer,
+		    sizeof(struct iw_thrspy)))
+			return -EFAULT;
+        } else
+		return -EINVAL;
+		
+	if (threshold.low.level == 0) {
+		/* disable threshold */
+		vap->iv_spy.thr_low = 0;
+		vap->iv_spy.thr_high = 0;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,
+			"%s: disabled iw_spy threshold\n", __func__);
+	} else {
+		/* We are passed a signal level/strength - calculate 
+		 * corresponding RSSI values */
+		/* XXX: We should use current noise value. */
+		vap->iv_spy.thr_low = threshold.low.level + ATH_DEFAULT_NOISE;
+		vap->iv_spy.thr_high = threshold.high.level + ATH_DEFAULT_NOISE;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,
+			"%s: enabled iw_spy threshold\n", __func__);
+	}
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getthrspy(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct iw_thrspy *threshold;	
+	
+	threshold = (struct iw_thrspy *) extra;
+
+	/* set threshold values */
+	set_quality(&(threshold->low), vap->iv_spy.thr_low, ic->ic_channoise);
+	set_quality(&(threshold->high), vap->iv_spy.thr_high, ic->ic_channoise);
+
+	/* copy results to userspace */
+	data->length = 1;
+	
+	return 0;
+}
+#endif
+
+static int
+ieee80211_ioctl_siwmode(struct net_device *dev, struct iw_request_info *info,
+	__u32 *mode, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ifmediareq imr;
+	int valid = 0;
+	
+	memset(&imr, 0, sizeof(imr));
+	vap->iv_media.ifm_status(vap->iv_dev, &imr);
+	
+	if (imr.ifm_active & IFM_IEEE80211_HOSTAP)
+		valid = (*mode == IW_MODE_MASTER);
+#if WIRELESS_EXT >= 15
+	else if (imr.ifm_active & IFM_IEEE80211_MONITOR)
+		valid = (*mode == IW_MODE_MONITOR);
+#endif
+	else if (imr.ifm_active & IFM_IEEE80211_ADHOC)
+		valid = (*mode == IW_MODE_ADHOC);
+	else if (imr.ifm_active & IFM_IEEE80211_WDS)
+		valid = (*mode == IW_MODE_REPEAT);
+	else
+		valid = (*mode == IW_MODE_INFRA);
+		
+	return valid ? 0 : -EINVAL;	
+}
+
+static int
+ieee80211_ioctl_giwmode(struct net_device *dev,	struct iw_request_info *info,
+	__u32 *mode, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ifmediareq imr;
+
+	memset(&imr, 0, sizeof(imr));
+	vap->iv_media.ifm_status(vap->iv_dev, &imr);
+
+	if (imr.ifm_active & IFM_IEEE80211_HOSTAP)
+		*mode = IW_MODE_MASTER;
+#if WIRELESS_EXT >= 15
+	else if (imr.ifm_active & IFM_IEEE80211_MONITOR)
+		*mode = IW_MODE_MONITOR;
+#endif
+	else if (imr.ifm_active & IFM_IEEE80211_ADHOC)
+		*mode = IW_MODE_ADHOC;
+	else if (imr.ifm_active & IFM_IEEE80211_WDS)
+		*mode = IW_MODE_REPEAT;
+	else
+		*mode = IW_MODE_INFRA;
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwpower(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *wrq, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	/* XXX: These values, flags, and caps do not seem to be used elsewhere 
+	 * at all? */
+
+	if ((ic->ic_caps & IEEE80211_C_PMGT) == 0)
+		return -EOPNOTSUPP;
+	
+	if (wrq->disabled) {
+		if (ic->ic_flags & IEEE80211_F_PMGTON)
+			ic->ic_flags &= ~IEEE80211_F_PMGTON;
+	} else {
+	switch (wrq->flags & IW_POWER_MODE) {
+	case IW_POWER_UNICAST_R:
+	case IW_POWER_ALL_R:
+	case IW_POWER_ON:
+			if (wrq->flags & IW_POWER_PERIOD) {
+				if (IEEE80211_BINTVAL_VALID(wrq->value))
+					ic->ic_lintval = IEEE80211_MS_TO_TU(wrq->value);
+				else
+		return -EINVAL;
+	}
+			if (wrq->flags & IW_POWER_TIMEOUT)
+		ic->ic_holdover = IEEE80211_MS_TO_TU(wrq->value);
+			
+		ic->ic_flags |= IEEE80211_F_PMGTON;
+			break;
+		default:
+			return -EINVAL;
+	}
+	}
+	
+	return IS_UP(ic->ic_dev) ? ic->ic_reset(ic->ic_dev) : 0;
+}
+
+static int
+ieee80211_ioctl_giwpower(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	rrq->disabled = (ic->ic_flags & IEEE80211_F_PMGTON) == 0;
+	if (!rrq->disabled) {
+		switch (rrq->flags & IW_POWER_TYPE) {
+		case IW_POWER_TIMEOUT:
+			rrq->flags = IW_POWER_TIMEOUT;
+			rrq->value = IEEE80211_TU_TO_MS(ic->ic_holdover);
+			break;
+		case IW_POWER_PERIOD:
+			rrq->flags = IW_POWER_PERIOD;
+			rrq->value = IEEE80211_TU_TO_MS(ic->ic_lintval);
+			break;
+		}
+		rrq->flags |= IW_POWER_ALL_R;
+	}
+	return 0;
+}
+
+/* WE-21 added support for IW_RETRY_SHORT/IW_RETRY_LONG retry modifiers 
+ * Note: IW_RETRY_SHORT/IW_RETRY_LONG was just a userspace improvement so
+ * we can just add the defines required for its support here and a user
+ * with an older kernel but new WE will still be able to benefit from this */
+#if WIRELESS_EXT < 21
+/* Retry limits and lifetime flags available */
+#ifndef IW_RETRY_LIFETIME /* Can't pinpoint when this guy was introduced */
+#define IW_RETRY_LIFETIME       0x2000  /* Maximum duration of retries in us */
+#endif /* IW_RETRY_LIFETIME */
+#define IW_RETRY_SHORT          0x0010  /* Value is for short packets  */
+#define IW_RETRY_LONG           0x0020  /* Value is for long packets */
+#endif /* WIRELESS_EXT < 21 */
+
+static int
+ieee80211_ioctl_siwretry(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (rrq->disabled) {
+		if (vap->iv_flags & IEEE80211_F_SWRETRY) {
+			vap->iv_flags &= ~IEEE80211_F_SWRETRY;
+			goto done;
+		}
+		/* Already disabled in iv_flags, nothing to do */
+		return 0;
+	}
+	if ((vap->iv_caps & IEEE80211_C_SWRETRY) == 0)
+		return -EOPNOTSUPP;
+	if (rrq->value < 0)
+		return -EINVAL;
+	if (rrq->flags == IW_RETRY_LIMIT) {
+		if (rrq->value >= 0) {
+			if (rrq->flags & IW_RETRY_SHORT)
+				vap->iv_txmin = rrq->value;
+			else if (rrq->flags & IW_RETRY_LONG)
+				vap->iv_txmax = rrq->value;
+			else {
+				vap->iv_txmin = rrq->value;
+				vap->iv_txmax = rrq->value;
+			}
+			vap->iv_flags |= IEEE80211_F_SWRETRY;
+		} else {
+			vap->iv_flags &= ~IEEE80211_F_SWRETRY;
+		}
+	}
+	if (rrq->flags & IW_RETRY_LIFETIME)
+		vap->iv_txlifetime = 0;
+done:
+	return IS_UP(ic->ic_dev) ? ic->ic_reset(ic->ic_dev) : 0;
+}
+
+static int
+ieee80211_ioctl_giwretry(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+
+	rrq->disabled = (vap->iv_flags & IEEE80211_F_SWRETRY) == 0;
+	if (!rrq->disabled) {
+		switch (rrq->flags & IW_RETRY_TYPE) {
+		case IW_RETRY_LIFETIME:
+			rrq->flags = IW_RETRY_LIFETIME;
+			rrq->value = IEEE80211_TU_TO_MS(vap->iv_txlifetime);
+			break;
+		case IW_RETRY_LIMIT:
+			rrq->flags = IW_RETRY_LIMIT;
+			switch (rrq->flags & IW_RETRY_MODIFIER) {
+			case IW_RETRY_MIN:
+				rrq->flags |= IW_RETRY_MAX;
+				rrq->value = vap->iv_txmin;
+				break;
+			case IW_RETRY_MAX:
+				rrq->flags |= IW_RETRY_MAX;
+				rrq->value = vap->iv_txmax;
+				break;
+			}
+			break;
+		}
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwtxpow(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	int fixed, disabled;
+
+	fixed = (ic->ic_flags & IEEE80211_F_TXPOW_FIXED);
+	disabled = (fixed && vap->iv_bss->ni_txpower == 0);
+	if (rrq->disabled) {
+		if (!disabled) {
+			if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0)
+				return -EOPNOTSUPP;
+			ic->ic_flags |= IEEE80211_F_TXPOW_FIXED;
+			vap->iv_bss->ni_txpower = 0;
+			goto done;
+		}
+		return 0;
+	}
+
+	if (rrq->fixed) {
+		if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0)
+			return -EOPNOTSUPP;
+		if (rrq->flags != IW_TXPOW_DBM)
+			return -EOPNOTSUPP;
+		if (ic->ic_bsschan != IEEE80211_CHAN_ANYC) {
+			if (ic->ic_bsschan->ic_maxregpower >= rrq->value &&
+			    ic->ic_txpowlimit/2 >= rrq->value) {
+ 			        vap->iv_bss->ni_txpower = 2 * rrq->value;
+				ic->ic_newtxpowlimit = 2 * rrq->value;
+ 				ic->ic_flags |= IEEE80211_F_TXPOW_FIXED;
+ 			} else
+				return -EINVAL;
+		} else {
+			/*
+			 * No channel set yet
+			 */
+			if (ic->ic_txpowlimit/2 >= rrq->value) {
+				vap->iv_bss->ni_txpower = 2 * rrq->value;
+				ic->ic_newtxpowlimit = 2 * rrq->value;
+				ic->ic_flags |= IEEE80211_F_TXPOW_FIXED;
+			}
+			else
+				return -EINVAL;
+		}
+	} else {
+		if (!fixed)		/* no change */
+			return 0;
+		ic->ic_flags &= ~IEEE80211_F_TXPOW_FIXED;
+	}
+done:
+	return IS_UP(ic->ic_dev) ? ic->ic_reset(ic->ic_dev) : 0;
+}
+
+static int
+ieee80211_ioctl_giwtxpow(struct net_device *dev, struct iw_request_info *info,
+	struct iw_param *rrq, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	rrq->value = vap->iv_bss->ni_txpower / 2;
+	rrq->fixed = (ic->ic_flags & IEEE80211_F_TXPOW_FIXED) != 0;
+	rrq->disabled = (rrq->fixed && rrq->value == 0);
+	rrq->flags = IW_TXPOW_DBM;
+	return 0;
+}
+
+struct waplistreq {	/* XXX: not the right place for declaration? */
+	struct ieee80211vap *vap;
+	struct sockaddr addr[IW_MAX_AP];
+	struct iw_quality qual[IW_MAX_AP];
+	int i;
+};
+
+static int
+waplist_cb(void *arg, const struct ieee80211_scan_entry *se)
+{
+	struct waplistreq *req = arg;
+	int i = req->i;
+
+	if (i >= IW_MAX_AP)
+		return 0;
+	req->addr[i].sa_family = ARPHRD_ETHER;
+	if (req->vap->iv_opmode == IEEE80211_M_HOSTAP)
+		IEEE80211_ADDR_COPY(req->addr[i].sa_data, se->se_macaddr);
+	else
+		IEEE80211_ADDR_COPY(req->addr[i].sa_data, se->se_bssid);
+	set_quality(&req->qual[i], se->se_rssi, ATH_DEFAULT_NOISE);
+	req->i = i + 1;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_iwaplist(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct waplistreq req;		/* XXX off stack */
+
+	req.vap = vap;
+	req.i = 0;
+	ieee80211_scan_iterate(ic, waplist_cb, &req);
+
+	data->length = req.i;
+	memcpy(extra, &req.addr, req.i * sizeof(req.addr[0]));
+	data->flags = 1;		/* signal quality present (sort of) */
+	memcpy(extra + req.i * sizeof(req.addr[0]), &req.qual,
+		req.i * sizeof(req.qual[0]));
+
+	return 0;
+}
+
+#ifdef SIOCGIWSCAN
+static int
+ieee80211_ioctl_siwscan(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+
+	/*
+	 * XXX don't permit a scan to be started unless we
+	 * know the device is ready.  For the moment this means
+	 * the device is marked up as this is the required to
+	 * initialize the hardware.  It would be better to permit
+	 * scanning prior to being up but that'll require some
+	 * changes to the infrastructure.
+	 */
+	if (!IS_UP(vap->iv_dev))
+		return -ENETDOWN;	/* XXX */
+	/* XXX always manual... */
+	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+		"%s: active scan request\n", __func__);
+	preempt_scan(dev, 100, 100);
+#if WIRELESS_EXT > 17
+	if (data && (data->flags & IW_SCAN_THIS_ESSID)) {
+		struct iw_scan_req req;
+		struct ieee80211_scan_ssid ssid;
+		int copyLength;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+			"%s: SCAN_THIS_ESSID requested\n", __func__);
+		if (data->length > sizeof req) {
+			copyLength = sizeof req;
+		} else {
+			copyLength = data->length;
+		}
+		memset(&req, 0, sizeof req);
+		if (copy_from_user(&req, data->pointer, copyLength))
+			return -EFAULT;
+		memcpy(&ssid.ssid, req.essid, sizeof ssid.ssid);
+		ssid.len = req.essid_len;
+		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				  "%s: requesting scan of essid '%s'\n", __func__, ssid.ssid);
+		(void) ieee80211_start_scan(vap,
+			IEEE80211_SCAN_ACTIVE |
+			IEEE80211_SCAN_NOPICK |
+			IEEE80211_SCAN_ONCE, IEEE80211_SCAN_FOREVER,
+			1, &ssid);
+		return 0;
+	}
+#endif		 
+	(void) ieee80211_start_scan(vap, IEEE80211_SCAN_ACTIVE |
+		IEEE80211_SCAN_NOPICK |	IEEE80211_SCAN_ONCE,
+		IEEE80211_SCAN_FOREVER,
+		/* XXX use ioctl params */
+		vap->iv_des_nssid, vap->iv_des_ssid);
+	return 0;
+}
+
+#if WIRELESS_EXT > 14
+/*
+ * Encode a WPA or RSN information element as a custom
+ * element using the hostap format.
+ */
+static u_int
+encode_ie(void *buf, size_t bufsize, const u_int8_t *ie, size_t ielen,
+	const char *leader, size_t leader_len)
+{
+	u_int8_t *p;
+	int i;
+
+	if (bufsize < leader_len)
+		return 0;
+	p = buf;
+	memcpy(p, leader, leader_len);
+	bufsize -= leader_len;
+	p += leader_len;
+	for (i = 0; i < ielen && bufsize > 2; i++) {
+		p += sprintf(p, "%02x", ie[i]);
+		bufsize -= 2;
+	}
+	return (i == ielen ? p - (u_int8_t *)buf : 0);
+}
+#endif /* WIRELESS_EXT > 14 */
+
+struct iwscanreq {		/* XXX: right place for this declaration? */
+	struct ieee80211vap *vap;
+	char *current_ev;
+	char *end_buf;
+	int mode;
+};
+
+static int
+giwscan_cb(void *arg, const struct ieee80211_scan_entry *se)
+{
+	struct iwscanreq *req = arg;
+	struct ieee80211vap *vap = req->vap;
+	char *current_ev = req->current_ev;
+	char *end_buf = req->end_buf;
+	char *last_ev;
+#if WIRELESS_EXT > 14
+#define MAX_IE_LENGTH 64 * 2 + 30
+	char buf[MAX_IE_LENGTH];
+#ifndef IWEVGENIE
+	static const char rsn_leader[] = "rsn_ie=";
+	static const char wpa_leader[] = "wpa_ie=";
+#endif
+#endif
+	struct iw_event iwe;
+	char *current_val;
+	int j;
+
+	if (current_ev >= end_buf)
+		return E2BIG;
+	/* WPA/!WPA sort criteria */
+	if ((req->mode != 0) ^ (se->se_wpa_ie != NULL))
+		return 0;
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = SIOCGIWAP;
+	iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+		IEEE80211_ADDR_COPY(iwe.u.ap_addr.sa_data, se->se_macaddr);
+	else
+		IEEE80211_ADDR_COPY(iwe.u.ap_addr.sa_data, se->se_bssid);
+	current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_ADDR_LEN);
+
+	/* We ran out of space in the buffer. */
+	if (last_ev == current_ev)
+	  return E2BIG;
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = SIOCGIWESSID;
+	iwe.u.data.flags = 1;
+	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+		iwe.u.data.length = vap->iv_des_nssid > 0 ?
+			vap->iv_des_ssid[0].len : 0;
+		current_ev = iwe_stream_add_point(current_ev,
+			end_buf, &iwe, vap->iv_des_ssid[0].ssid);
+	} else {
+		iwe.u.data.length = se->se_ssid[1];
+		current_ev = iwe_stream_add_point(current_ev,
+			end_buf, &iwe, (char *) se->se_ssid+2);
+	}
+
+	/* We ran out of space in the buffer. */
+	if (last_ev == current_ev)
+	  return E2BIG;
+
+	if (se->se_capinfo & (IEEE80211_CAPINFO_ESS|IEEE80211_CAPINFO_IBSS)) {
+		memset(&iwe, 0, sizeof(iwe));
+		last_ev = current_ev;
+		iwe.cmd = SIOCGIWMODE;
+		iwe.u.mode = se->se_capinfo & IEEE80211_CAPINFO_ESS ?
+			IW_MODE_MASTER : IW_MODE_ADHOC;
+		current_ev = iwe_stream_add_event(current_ev,
+			end_buf, &iwe, IW_EV_UINT_LEN);
+
+		/* We ran out of space in the buffer. */
+		if (last_ev == current_ev)
+		  return E2BIG;
+	}
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = SIOCGIWFREQ;
+	iwe.u.freq.m = se->se_chan->ic_freq * 100000;
+	iwe.u.freq.e = 1;
+	current_ev = iwe_stream_add_event(current_ev,
+		end_buf, &iwe, IW_EV_FREQ_LEN);
+
+	/* We ran out of space in the buffer. */
+	if (last_ev == current_ev)
+	  return E2BIG;
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = IWEVQUAL;
+	set_quality(&iwe.u.qual, se->se_rssi, ATH_DEFAULT_NOISE);
+	current_ev = iwe_stream_add_event(current_ev,
+		end_buf, &iwe, IW_EV_QUAL_LEN);
+
+	/* We ran out of space in the buffer */
+	if (last_ev == current_ev)
+	  return E2BIG;
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = SIOCGIWENCODE;
+	if (se->se_capinfo & IEEE80211_CAPINFO_PRIVACY)
+		iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+	else
+		iwe.u.data.flags = IW_ENCODE_DISABLED;
+	iwe.u.data.length = 0;
+	current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, "");
+
+	/* We ran out of space in the buffer. */
+	if (last_ev == current_ev)
+	  return E2BIG;
+
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = SIOCGIWRATE;
+	current_val = current_ev + IW_EV_LCP_LEN;
+	/* NB: not sorted, does it matter? */
+	for (j = 0; j < se->se_rates[1]; j++) {
+		int r = se->se_rates[2 + j] & IEEE80211_RATE_VAL;
+		if (r != 0) {
+			iwe.u.bitrate.value = r * (1000000 / 2);
+			current_val = iwe_stream_add_value(current_ev,
+				current_val, end_buf, &iwe,
+				IW_EV_PARAM_LEN);
+		}
+	}
+	for (j = 0; j < se->se_xrates[1]; j++) {
+		int r = se->se_xrates[2+j] & IEEE80211_RATE_VAL;
+		if (r != 0) {
+			iwe.u.bitrate.value = r * (1000000 / 2);
+			current_val = iwe_stream_add_value(current_ev,
+				current_val, end_buf, &iwe,
+				IW_EV_PARAM_LEN);
+		}
+	}
+	/* remove fixed header if no rates were added */
+	if ((current_val - current_ev) > IW_EV_LCP_LEN) {
+		current_ev = current_val;
+	} else {
+	  /* We ran out of space in the buffer. */
+	  if (last_ev == current_ev)
+	    return E2BIG;
+	}
+
+#if WIRELESS_EXT > 14
+	memset(&iwe, 0, sizeof(iwe));
+	last_ev = current_ev;
+	iwe.cmd = IWEVCUSTOM;
+	snprintf(buf, sizeof(buf), "bcn_int=%d", se->se_intval);
+	iwe.u.data.length = strlen(buf);
+	current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, buf);
+
+	/* We ran out of space in the buffer. */
+	if (last_ev == current_ev)
+	  return E2BIG;
+
+	if (se->se_rsn_ie != NULL) {
+	  last_ev = current_ev;
+#ifdef IWEVGENIE
+		memset(&iwe, 0, sizeof(iwe));
+		if ((se->se_rsn_ie[1] + 2) > MAX_IE_LENGTH)
+			return E2BIG;
+		memcpy(buf, se->se_rsn_ie, se->se_rsn_ie[1] + 2);
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = se->se_rsn_ie[1] + 2;
+#else	
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVCUSTOM;
+		if (se->se_rsn_ie[0] == IEEE80211_ELEMID_RSN)
+			iwe.u.data.length = encode_ie(buf, sizeof(buf),
+				se->se_rsn_ie, se->se_rsn_ie[1] + 2,
+				rsn_leader, sizeof(rsn_leader) - 1);
+#endif
+		if (iwe.u.data.length != 0) {
+			current_ev = iwe_stream_add_point(current_ev, end_buf,
+				&iwe, buf);
+			
+			/* We ran out of space in the buffer */
+			if (last_ev == current_ev)
+			  return E2BIG;
+		}
+	}
+
+	if (se->se_wpa_ie != NULL) {
+	  last_ev = current_ev;
+#ifdef IWEVGENIE
+		memset(&iwe, 0, sizeof(iwe));
+		if ((se->se_wpa_ie[1] + 2) > MAX_IE_LENGTH)
+			return E2BIG;
+		memcpy(buf, se->se_wpa_ie, se->se_wpa_ie[1] + 2);
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = se->se_wpa_ie[1] + 2;
+#else
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVCUSTOM;
+		iwe.u.data.length = encode_ie(buf, sizeof(buf),
+			se->se_wpa_ie, se->se_wpa_ie[1] + 2,
+			wpa_leader, sizeof(wpa_leader) - 1);
+#endif
+		if (iwe.u.data.length != 0) {
+			current_ev = iwe_stream_add_point(current_ev, end_buf,
+				&iwe, buf);
+			
+			/* We ran out of space in the buffer. */
+			if (last_ev == current_ev)
+			  return E2BIG;
+		}
+
+	}
+	if (se->se_wme_ie != NULL) {
+		static const char wme_leader[] = "wme_ie=";
+
+		memset(&iwe, 0, sizeof(iwe));
+		last_ev = current_ev;
+		iwe.cmd = IWEVCUSTOM;
+		iwe.u.data.length = encode_ie(buf, sizeof(buf),
+			se->se_wme_ie, se->se_wme_ie[1] + 2,
+			wme_leader, sizeof(wme_leader) - 1);
+		if (iwe.u.data.length != 0) {
+			current_ev = iwe_stream_add_point(current_ev, end_buf,
+				&iwe, buf);
+
+			/* We ran out of space in the buffer. */
+			if (last_ev == current_ev)
+			  return E2BIG;
+		}
+	}
+	if (se->se_ath_ie != NULL) {
+		static const char ath_leader[] = "ath_ie=";
+
+		memset(&iwe, 0, sizeof(iwe));
+		last_ev = current_ev;
+		iwe.cmd = IWEVCUSTOM;
+		iwe.u.data.length = encode_ie(buf, sizeof(buf),
+			se->se_ath_ie, se->se_ath_ie[1] + 2,
+			ath_leader, sizeof(ath_leader) - 1);
+		if (iwe.u.data.length != 0) {
+			current_ev = iwe_stream_add_point(current_ev, end_buf,
+				&iwe, buf);
+
+			/* We ran out of space in the buffer. */
+			if (last_ev == current_ev)
+			  return E2BIG;
+		}
+	}
+#endif /* WIRELESS_EXT > 14 */
+	req->current_ev = current_ev;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwscan(struct net_device *dev,	struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct iwscanreq req;
+	int res = 0;
+
+	req.vap = vap;
+	req.current_ev = extra;
+	if (data->length == 0) {
+	  req.end_buf = extra + IW_SCAN_MAX_DATA;
+	} else {
+	  req.end_buf = extra + data->length;
+	}
+
+	/*
+	 * NB: This is no longer needed, as long as the caller supports
+	 * large scan results.
+	 *
+	 * Do two passes to ensure WPA/non-WPA scan candidates
+	 * are sorted to the front.  This is a hack to deal with
+	 * the wireless extensions capping scan results at
+	 * IW_SCAN_MAX_DATA bytes.  In densely populated environments
+	 * it's easy to overflow this buffer (especially with WPA/RSN
+	 * information elements).  Note this sorting hack does not
+	 * guarantee we won't overflow anyway.
+	 */
+	req.mode = vap->iv_flags & IEEE80211_F_WPA;
+	res = ieee80211_scan_iterate(ic, giwscan_cb, &req);
+	if (res == 0) {
+	  req.mode = req.mode ? 0 : IEEE80211_F_WPA;
+	  res = ieee80211_scan_iterate(ic, giwscan_cb, &req);
+	}
+
+	data->length = req.current_ev - extra;
+
+	if (res != 0) {
+	  return -res;
+	}
+
+	return res;
+}
+#endif /* SIOCGIWSCAN */
+
+static int
+cipher2cap(int cipher)
+{
+	switch (cipher) {
+	case IEEE80211_CIPHER_WEP:	return IEEE80211_C_WEP;
+	case IEEE80211_CIPHER_AES_OCB:	return IEEE80211_C_AES;
+	case IEEE80211_CIPHER_AES_CCM:	return IEEE80211_C_AES_CCM;
+	case IEEE80211_CIPHER_CKIP:	return IEEE80211_C_CKIP;
+	case IEEE80211_CIPHER_TKIP:	return IEEE80211_C_TKIP;
+	}
+	return 0;
+}
+
+#define	IEEE80211_MODE_TURBO_STATIC_A	IEEE80211_MODE_MAX
+
+static int
+ieee80211_convert_mode(const char *mode)
+{
+#define TOUPPER(c) ((((c) > 0x60) && ((c) < 0x7b)) ? ((c) - 0x20) : (c))
+	static const struct {
+		char *name;
+		int mode;
+	} mappings[] = {
+		/* NB: need to order longest strings first for overlaps */ 
+		{ "11AST" , IEEE80211_MODE_TURBO_STATIC_A },
+		{ "AUTO"  , IEEE80211_MODE_AUTO },
+		{ "11A"   , IEEE80211_MODE_11A },
+		{ "11B"   , IEEE80211_MODE_11B },
+		{ "11G"   , IEEE80211_MODE_11G },
+		{ "FH"    , IEEE80211_MODE_FH },
+		{ "0"     , IEEE80211_MODE_AUTO },
+		{ "1"     , IEEE80211_MODE_11A },
+		{ "2"     , IEEE80211_MODE_11B },
+		{ "3"     , IEEE80211_MODE_11G },
+		{ "4"     , IEEE80211_MODE_FH },
+		{ "5"     , IEEE80211_MODE_TURBO_STATIC_A },
+		{ NULL }
+	};
+	int i, j;
+	const char *cp;
+
+	for (i = 0; mappings[i].name != NULL; i++) {
+		cp = mappings[i].name;
+		for (j = 0; j < strlen(mode) + 1; j++) {
+			/* convert user-specified string to upper case */
+			if (TOUPPER(mode[j]) != cp[j])
+				break;
+			if (cp[j] == '\0')
+				return mappings[i].mode;
+		}
+	}
+	return -1;
+#undef TOUPPER
+}
+
+static int
+ieee80211_ioctl_setmode(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *wri, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ifreq ifr;
+	char s[6];		/* big enough for ``11adt'' */
+	int retv, mode, ifr_mode, itr_count;
+
+	if (ic->ic_media.ifm_cur == NULL)
+		return -EINVAL;
+	if (wri->length > sizeof(s))		/* silently truncate */
+		wri->length = sizeof(s);
+	if (copy_from_user(s, wri->pointer, wri->length))
+		return -EINVAL;
+	s[sizeof(s)-1] = '\0';			/* ensure null termination */
+	mode = ieee80211_convert_mode(s);
+	if (mode < 0)
+		return -EINVAL;
+
+	if(ieee80211_check_mode_consistency(ic,mode,vap->iv_des_chan)) { 
+		/*
+		 * error in AP mode.
+		 * overwrite channel selection in other modes.
+		 */
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+			return -EINVAL;
+		else
+			vap->iv_des_chan=IEEE80211_CHAN_ANYC;
+	}
+
+	ifr_mode = mode;
+	memset(&ifr, 0, sizeof(ifr));
+	ifr.ifr_media = ic->ic_media.ifm_cur->ifm_media &~ IFM_MMASK;
+	if (mode == IEEE80211_MODE_TURBO_STATIC_A)
+		ifr_mode = IEEE80211_MODE_11A;
+	ifr.ifr_media |= IFM_MAKEMODE(ifr_mode);
+	retv = ifmedia_ioctl(ic->ic_dev, &ifr, &ic->ic_media, SIOCSIFMEDIA);
+	if ((!retv || retv == -ENETRESET) &&  mode != vap->iv_des_mode) {
+		ieee80211_scan_flush(ic);	/* NB: could optimize */
+		vap->iv_des_mode = mode;
+		if (IS_UP_AUTO(vap)) {
+			ieee80211_cancel_scan(vap);
+			itr_count = 0;
+			while((ic->ic_flags & IEEE80211_F_SCAN) != 0) {
+				mdelay(1);
+				if (itr_count < 100) {
+					itr_count++;
+					continue;
+				}
+				IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
+				  "%s: Timeout canceling current scan.\n",
+				  __func__);
+                    		return -ETIMEDOUT;
+			}
+			ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+		}
+		retv = 0;
+	}
+#ifdef ATH_SUPERG_XR
+	/* set the same params on the xr vap device if exists */
+	if (vap->iv_xrvap && !(vap->iv_flags & IEEE80211_F_XR))
+		vap->iv_xrvap->iv_des_mode = mode;
+#endif
+	return -retv;
+}
+#undef IEEE80211_MODE_TURBO_STATIC_A
+
+#ifdef ATH_SUPERG_XR
+static void
+ieee80211_setupxr(struct ieee80211vap *vap)
+{
+	struct net_device *dev = vap->iv_dev;
+	struct ieee80211com *ic = vap->iv_ic;
+	if (!(vap->iv_flags & IEEE80211_F_XR)) {
+		if ((vap->iv_ath_cap & IEEE80211_ATHC_XR) && !vap->iv_xrvap) { 
+			char name[IFNAMSIZ];
+			strcpy(name, dev->name);
+			strcat(name, "-xr");
+			/*
+			 * create a new XR vap. if the normal VAP is already up,
+			 * bring up the XR vap aswell.
+			 */
+			vap->iv_ath_cap &= ~IEEE80211_ATHC_TURBOP; /* turn off turbo */ 
+			ieee80211_scan_flush(ic);	/* NB: could optimize */
+			 
+			vap->iv_xrvap = ic->ic_vap_create(ic, name, vap->iv_unit,
+				IEEE80211_M_HOSTAP,IEEE80211_VAP_XR |
+				IEEE80211_CLONE_BSSID, dev);
+			if (!vap->iv_xrvap)
+				return;
+			vap->iv_xrvap->iv_fragthreshold = IEEE80211_XR_FRAG_THRESHOLD;
+			copy_des_ssid(vap->iv_xrvap, vap);
+			vap->iv_xrvap->iv_des_mode = vap->iv_des_mode;
+		} else if (!(vap->iv_ath_cap & IEEE80211_ATHC_XR) && vap->iv_xrvap) { 
+			/*
+			 * destroy the XR vap. if the XR VAP is up , bring
+			 * it down before destroying.
+			 */
+			if (vap->iv_xrvap) {
+				ieee80211_stop(vap->iv_xrvap->iv_dev);
+				ic->ic_vap_delete(vap->iv_xrvap);
+			}
+			vap->iv_xrvap = NULL;
+		}
+	}
+}
+#endif
+
+static int
+ieee80211_setathcap(struct ieee80211vap *vap, int cap, int setting)
+{
+	struct ieee80211com *ic = vap->iv_ic;
+	int ocap;
+
+	if ((ic->ic_ath_cap & cap) == 0)
+		return -EINVAL;
+	ocap = vap->iv_ath_cap;
+	if (setting)
+		vap->iv_ath_cap |= cap;
+	else
+		vap->iv_ath_cap &= ~cap;
+	return (vap->iv_ath_cap != ocap ? ENETRESET : 0);
+}
+
+static int
+ieee80211_set_turbo(struct net_device *dev, int flag)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ifreq ifr;
+	struct ieee80211vap *tmpvap = dev->priv;
+	int nvap = 0;
+
+	TAILQ_FOREACH(tmpvap, &ic->ic_vaps, iv_next)
+		nvap++;
+	if (nvap > 1 && flag )
+		return -EINVAL;
+	ifr.ifr_media = ic->ic_media.ifm_cur->ifm_media &~ IFM_MMASK;
+	if (flag)
+		ifr.ifr_media |= IFM_IEEE80211_TURBO;
+	else
+		ifr.ifr_media &= ~IFM_IEEE80211_TURBO;
+	(void) ifmedia_ioctl(ic->ic_dev, &ifr, &ic->ic_media, SIOCSIFMEDIA);
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setparam(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_rsnparms *rsn = &vap->iv_bss->ni_rsn;
+	int *i = (int *) extra;
+	int param = i[0];		/* parameter id is 1st */
+	int value = i[1];		/* NB: most values are TYPE_INT */
+	int retv = 0;
+	int j, caps;
+	const struct ieee80211_authenticator *auth;
+	const struct ieee80211_aclator *acl;
+
+	switch (param) {
+	case IEEE80211_PARAM_AUTHMODE:
+		switch (value) {
+		case IEEE80211_AUTH_WPA:	/* WPA */
+		case IEEE80211_AUTH_8021X:	/* 802.1x */
+		case IEEE80211_AUTH_OPEN:	/* open */
+		case IEEE80211_AUTH_SHARED:	/* shared-key */
+		case IEEE80211_AUTH_AUTO:	/* auto */
+			auth = ieee80211_authenticator_get(value);
+			if (auth == NULL)
+				return -EINVAL;
+			break;
+		default:
+			return -EINVAL;
+		}
+		switch (value) {
+		case IEEE80211_AUTH_WPA:	/* WPA w/ 802.1x */
+			value = IEEE80211_AUTH_8021X;
+			break;
+		case IEEE80211_AUTH_OPEN:	/* open */
+			vap->iv_flags &= ~(IEEE80211_F_WPA);
+			break;
+		case IEEE80211_AUTH_SHARED:	/* shared-key */
+		case IEEE80211_AUTH_AUTO:	/* auto */
+		case IEEE80211_AUTH_8021X:	/* 802.1x */
+			vap->iv_flags &= ~IEEE80211_F_WPA;
+			break;
+		}
+		/* NB: authenticator attach/detach happens on state change */
+		vap->iv_bss->ni_authmode = value;
+		/* XXX mixed/mode/usage? */
+		vap->iv_auth = auth;
+		retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_PROTMODE:
+		if (value > IEEE80211_PROT_RTSCTS)
+			return -EINVAL;
+		ic->ic_protmode = value;
+		/* NB: if not operating in 11g this can wait */
+		if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+		    IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan))
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_MCASTCIPHER:
+		if ((vap->iv_caps & cipher2cap(value)) == 0 &&
+		    !ieee80211_crypto_available(value))
+			return -EINVAL;
+		rsn->rsn_mcastcipher = value;
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_MCASTKEYLEN:
+		if (!(0 < value && value <= IEEE80211_KEYBUF_SIZE))
+			return -EINVAL;
+		/* XXX no way to verify driver capability */
+		rsn->rsn_mcastkeylen = value;
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_UCASTCIPHERS:
+		/*
+		 * NB: this logic intentionally ignores unknown and
+		 * unsupported ciphers so folks can specify 0xff or
+		 * similar and get all available ciphers.
+		 */
+		/* caps are really ciphers */
+		caps = 0;
+		for (j = 1; j < 32; j++)	/* NB: skip WEP */
+			if ((value & (1 << j)) &&
+			    ((vap->iv_caps & cipher2cap(j)) ||
+			     ieee80211_crypto_available(j)))
+				caps |= 1 << j;
+		if (caps == 0)			/* nothing available */
+			return -EINVAL;
+		/* XXX verify ciphers ok for unicast use? */
+		/* XXX disallow if running as it'll have no effect */
+		rsn->rsn_ucastcipherset = caps;
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_UCASTCIPHER:
+		if ((vap->iv_caps & cipher2cap(value)) == 0 &&
+		    !ieee80211_crypto_available(value))
+			return -EINVAL;
+		rsn->rsn_ucastcipher = value;
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_UCASTKEYLEN:
+		if (!(0 < value && value <= IEEE80211_KEYBUF_SIZE))
+			return -EINVAL;
+		/* XXX no way to verify driver capability */
+		rsn->rsn_ucastkeylen = value;
+		break;
+	case IEEE80211_PARAM_KEYMGTALGS:
+		/* XXX check */
+		rsn->rsn_keymgmtset = value;
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_RSNCAPS:
+		/* XXX check */
+		rsn->rsn_caps = value;
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_WPA:
+		if (value > 3)
+			return -EINVAL;
+		/* XXX verify ciphers available */
+		vap->iv_flags &= ~IEEE80211_F_WPA;
+		switch (value) {
+		case 1:
+			vap->iv_flags |= IEEE80211_F_WPA1;
+			break;
+		case 2:
+			vap->iv_flags |= IEEE80211_F_WPA2;
+			break;
+		case 3:
+			vap->iv_flags |= IEEE80211_F_WPA1 | IEEE80211_F_WPA2;
+			break;
+		}
+		retv = ENETRESET;		/* XXX? */
+		break;
+	case IEEE80211_PARAM_ROAMING:
+		if (!(IEEE80211_ROAMING_DEVICE <= value &&
+		    value <= IEEE80211_ROAMING_MANUAL))
+			return -EINVAL;
+		ic->ic_roaming = value;
+		break;
+	case IEEE80211_PARAM_PRIVACY:
+		if (value) {
+			/* XXX check for key state? */
+			vap->iv_flags |= IEEE80211_F_PRIVACY;
+		} else
+			vap->iv_flags &= ~IEEE80211_F_PRIVACY;
+		break;
+	case IEEE80211_PARAM_DROPUNENCRYPTED:
+		if (value)
+			vap->iv_flags |= IEEE80211_F_DROPUNENC;
+		else
+			vap->iv_flags &= ~IEEE80211_F_DROPUNENC;
+		break;
+	case IEEE80211_PARAM_DROPUNENC_EAPOL:
+		if (value)
+			IEEE80211_VAP_DROPUNENC_EAPOL_ENABLE(vap);
+		else
+			IEEE80211_VAP_DROPUNENC_EAPOL_DISABLE(vap);
+		break;
+	case IEEE80211_PARAM_COUNTERMEASURES:
+		if (value) {
+			if ((vap->iv_flags & IEEE80211_F_WPA) == 0)
+				return -EINVAL;
+			vap->iv_flags |= IEEE80211_F_COUNTERM;
+		} else
+			vap->iv_flags &= ~IEEE80211_F_COUNTERM;
+		break;
+	case IEEE80211_PARAM_DRIVER_CAPS:
+		vap->iv_caps = value;		/* NB: for testing */
+		break;
+	case IEEE80211_PARAM_MACCMD:
+		acl = vap->iv_acl;
+		switch (value) {
+		case IEEE80211_MACCMD_POLICY_OPEN:
+		case IEEE80211_MACCMD_POLICY_ALLOW:
+		case IEEE80211_MACCMD_POLICY_DENY:
+			if (acl == NULL) {
+				acl = ieee80211_aclator_get("mac");
+				if (acl == NULL || !acl->iac_attach(vap))
+					return -EINVAL;
+				vap->iv_acl = acl;
+			}
+			acl->iac_setpolicy(vap, value);
+			break;
+		case IEEE80211_MACCMD_FLUSH:
+			if (acl != NULL)
+				acl->iac_flush(vap);
+			/* NB: silently ignore when not in use */
+			break;
+		case IEEE80211_MACCMD_DETACH:
+			if (acl != NULL) {
+				vap->iv_acl = NULL;
+				acl->iac_detach(vap);
+			}
+			break;
+		}
+		break;
+	case IEEE80211_PARAM_WMM:
+		if (ic->ic_caps & IEEE80211_C_WME){
+			if (value) {
+				vap->iv_flags |= IEEE80211_F_WME;
+				vap->iv_ic->ic_flags |= IEEE80211_F_WME; /* XXX needed by ic_reset */
+			} else {
+				vap->iv_flags &= ~IEEE80211_F_WME;
+				vap->iv_ic->ic_flags &= ~IEEE80211_F_WME; /* XXX needed by ic_reset */
+			}
+			retv = ENETRESET;	/* Renegotiate for capabilities */
+		}
+		break;
+	case IEEE80211_PARAM_HIDESSID:
+		if (value)
+			vap->iv_flags |= IEEE80211_F_HIDESSID;
+		else
+			vap->iv_flags &= ~IEEE80211_F_HIDESSID;
+		retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_APBRIDGE:
+		if (value == 0)
+			vap->iv_flags |= IEEE80211_F_NOBRIDGE;
+		else
+			vap->iv_flags &= ~IEEE80211_F_NOBRIDGE;
+		break;
+	case IEEE80211_PARAM_INACT:
+		vap->iv_inact_run = value / IEEE80211_INACT_WAIT;
+		break;
+	case IEEE80211_PARAM_INACT_AUTH:
+		vap->iv_inact_auth = value / IEEE80211_INACT_WAIT;
+		break;
+	case IEEE80211_PARAM_INACT_INIT:
+		vap->iv_inact_init = value / IEEE80211_INACT_WAIT;
+		break;
+	case IEEE80211_PARAM_ABOLT:
+		caps = 0;
+		/*
+		 * Map abolt settings to capability bits;
+		 * this also strips unknown/unwanted bits.
+		 */
+		if (value & IEEE80211_ABOLT_TURBO_PRIME)
+			caps |= IEEE80211_ATHC_TURBOP;
+		if (value & IEEE80211_ABOLT_COMPRESSION)
+			caps |= IEEE80211_ATHC_COMP;
+		if (value & IEEE80211_ABOLT_FAST_FRAME)
+			caps |= IEEE80211_ATHC_FF;
+		if (value & IEEE80211_ABOLT_XR)
+			caps |= IEEE80211_ATHC_XR;
+		if (value & IEEE80211_ABOLT_AR)
+			caps |= IEEE80211_ATHC_AR;
+		if (value & IEEE80211_ABOLT_BURST)
+			caps |= IEEE80211_ATHC_BURST;
+		/* verify requested capabilities are supported */
+		if ((caps & ic->ic_ath_cap) != caps)
+			return -EINVAL;
+		if (vap->iv_ath_cap != caps) {
+			if ((vap->iv_ath_cap ^ caps) & IEEE80211_ATHC_TURBOP) {
+				/* no turbo and XR at the same time */
+				if ((caps & IEEE80211_ATHC_TURBOP) && (caps & IEEE80211_ATHC_XR))
+					return -EINVAL;
+				if (ieee80211_set_turbo(dev,  caps & IEEE80211_ATHC_TURBOP))
+					return -EINVAL;
+				ieee80211_scan_flush(ic);
+			}
+			vap->iv_ath_cap = caps;
+#ifdef ATH_SUPERG_XR
+			ieee80211_setupxr(vap);
+#endif
+			retv = ENETRESET;
+		}
+		break;
+	case IEEE80211_PARAM_DTIM_PERIOD:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
+		    vap->iv_opmode != IEEE80211_M_IBSS)
+			return -EINVAL;
+		if (IEEE80211_DTIM_MIN <= value &&
+		    value <= IEEE80211_DTIM_MAX) {
+			vap->iv_dtim_period = value;
+			retv = ENETRESET;		/* requires restart */
+		} else
+			retv = EINVAL;
+		break;
+	case IEEE80211_PARAM_BEACON_INTERVAL:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
+		    vap->iv_opmode != IEEE80211_M_IBSS)
+			return -EINVAL;
+		if (IEEE80211_BINTVAL_VALID(value)) {
+			ic->ic_lintval = value;		/* XXX multi-bss */
+			retv = ENETRESET;		/* requires restart */
+		} else
+			retv = EINVAL;
+		break;
+	case IEEE80211_PARAM_DOTH:
+		if (value)
+			ic->ic_flags |= IEEE80211_F_DOTH;
+		else
+			ic->ic_flags &= ~IEEE80211_F_DOTH;
+		retv = ENETRESET;	/* XXX: need something this drastic? */
+		break;
+	case IEEE80211_PARAM_SHPREAMBLE:
+		if (value) {
+			ic->ic_caps |= IEEE80211_C_SHPREAMBLE;
+			ic->ic_flags |= IEEE80211_F_SHPREAMBLE;
+			ic->ic_flags &= ~IEEE80211_F_USEBARKER;
+		} else {
+			ic->ic_caps &= ~IEEE80211_C_SHPREAMBLE;
+			ic->ic_flags &= ~IEEE80211_F_SHPREAMBLE;
+			ic->ic_flags |= IEEE80211_F_USEBARKER;
+		}	
+		retv = ENETRESET;	/* requires restart */
+		break;
+	case IEEE80211_PARAM_PWRTARGET:
+		ic->ic_curchanmaxpwr = value;
+		break;
+	case IEEE80211_PARAM_GENREASSOC:
+		IEEE80211_SEND_MGMT(vap->iv_bss, IEEE80211_FC0_SUBTYPE_REASSOC_REQ, 0);
+		break;
+	case IEEE80211_PARAM_COMPRESSION:
+		retv = ieee80211_setathcap(vap, IEEE80211_ATHC_COMP, value);
+		break;
+	case IEEE80211_PARAM_FF:
+		retv = ieee80211_setathcap(vap, IEEE80211_ATHC_FF, value);
+		break;
+	case IEEE80211_PARAM_TURBO:
+		retv = ieee80211_setathcap(vap, IEEE80211_ATHC_TURBOP, value);
+		if (retv == ENETRESET) {
+			/* no turbo and XR at the same time */
+			if ((vap->iv_ath_cap & IEEE80211_ATHC_XR) && value)
+				return -EINVAL;
+			if (ieee80211_set_turbo(dev,value))
+				return -EINVAL;
+			ieee80211_scan_flush(ic);
+		}
+		break;
+	case IEEE80211_PARAM_XR:
+		/* no turbo and XR at the same time */
+		if ((vap->iv_ath_cap & IEEE80211_ATHC_TURBOP) && value)
+			return -EINVAL;
+		retv = ieee80211_setathcap(vap, IEEE80211_ATHC_XR, value);
+#ifdef ATH_SUPERG_XR
+		ieee80211_setupxr(vap);
+#endif
+		break;
+	case IEEE80211_PARAM_BURST:
+		retv = ieee80211_setathcap(vap, IEEE80211_ATHC_BURST, value);
+		break;
+	case IEEE80211_PARAM_AR:
+		retv = ieee80211_setathcap(vap, IEEE80211_ATHC_AR, value);
+		break;
+	case IEEE80211_PARAM_PUREG:
+		if (value)
+			vap->iv_flags |= IEEE80211_F_PUREG;
+		else
+			vap->iv_flags &= ~IEEE80211_F_PUREG;
+		/* NB: reset only if we're operating on an 11g channel */
+		if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
+		    IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan))
+			retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_WDS:
+		if (value)
+			vap->iv_flags_ext |= IEEE80211_FEXT_WDS;
+		else
+			vap->iv_flags_ext &= ~IEEE80211_FEXT_WDS;
+		break;
+	case IEEE80211_PARAM_BGSCAN:
+		if (value) {
+			if ((vap->iv_caps & IEEE80211_C_BGSCAN) == 0)
+				return -EINVAL;
+			vap->iv_flags |= IEEE80211_F_BGSCAN;
+		} else {
+			/* XXX racey? */
+			vap->iv_flags &= ~IEEE80211_F_BGSCAN;
+			ieee80211_cancel_scan(vap);	/* anything current */
+		}
+		break;
+	case IEEE80211_PARAM_BGSCAN_IDLE:
+		if (value >= IEEE80211_BGSCAN_IDLE_MIN)
+			vap->iv_bgscanidle = msecs_to_jiffies(value);
+		else
+			retv = EINVAL;
+		break;
+	case IEEE80211_PARAM_BGSCAN_INTERVAL:
+		if (value >= IEEE80211_BGSCAN_INTVAL_MIN)
+			vap->iv_bgscanintvl = value * HZ;
+		else
+			retv = EINVAL;
+		break;
+	case IEEE80211_PARAM_MCAST_RATE:
+		/* units are in KILObits per second */
+		if (value >= 256 && value <= 54000)
+			vap->iv_mcast_rate = value;
+		else
+			retv = EINVAL;
+		break;
+	case IEEE80211_PARAM_COVERAGE_CLASS:
+		if (value >= 0 && value <= IEEE80211_COVERAGE_CLASS_MAX) {
+			ic->ic_coverageclass = value;
+			if (IS_UP_AUTO(vap))
+				ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+			retv = 0;
+		} else
+			retv = EINVAL;
+		break;
+	case IEEE80211_PARAM_COUNTRY_IE:
+		if (value)
+			ic->ic_flags_ext |= IEEE80211_FEXT_COUNTRYIE;
+		else
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_COUNTRYIE;
+		retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_REGCLASS:
+		if (value)
+			ic->ic_flags_ext |= IEEE80211_FEXT_REGCLASS;
+		else
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_REGCLASS;
+		retv = ENETRESET;
+		break;
+	case IEEE80211_PARAM_SCANVALID:
+		vap->iv_scanvalid = value * HZ;
+		break;
+	case IEEE80211_PARAM_ROAM_RSSI_11A:
+		vap->iv_roam.rssi11a = value;
+		break;
+	case IEEE80211_PARAM_ROAM_RSSI_11B:
+		vap->iv_roam.rssi11bOnly = value;
+		break;
+	case IEEE80211_PARAM_ROAM_RSSI_11G:
+		vap->iv_roam.rssi11b = value;
+		break;
+	case IEEE80211_PARAM_ROAM_RATE_11A:
+		vap->iv_roam.rate11a = value;
+		break;
+	case IEEE80211_PARAM_ROAM_RATE_11B:
+		vap->iv_roam.rate11bOnly = value;
+		break;
+	case IEEE80211_PARAM_ROAM_RATE_11G:
+		vap->iv_roam.rate11b = value;
+		break;
+	case IEEE80211_PARAM_UAPSDINFO:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (ic->ic_caps & IEEE80211_C_UAPSD) {
+				if (value)
+					IEEE80211_VAP_UAPSD_ENABLE(vap);
+				else
+					IEEE80211_VAP_UAPSD_DISABLE(vap);
+				retv = ENETRESET;
+			}
+		} else if (vap->iv_opmode == IEEE80211_M_STA) {
+			vap->iv_uapsdinfo = value;
+			IEEE80211_VAP_UAPSD_ENABLE(vap);
+			retv = ENETRESET;
+		}
+		break;
+	case IEEE80211_PARAM_SLEEP:
+		/* XXX: Forced sleep for testing. Does not actually place the
+		 *      HW in sleep mode yet. this only makes sense for STAs.
+		 */
+		if (value) {
+			/* goto sleep */
+			IEEE80211_VAP_GOTOSLEEP(vap);
+		} else {
+			/* wakeup */
+			IEEE80211_VAP_WAKEUP(vap);
+		}
+		ieee80211_send_nulldata(ieee80211_ref_node(vap->iv_bss));
+		break;
+	case IEEE80211_PARAM_QOSNULL:
+		/* Force a QoS Null for testing. */
+		ieee80211_send_qosnulldata(vap->iv_bss, value);
+		break;
+	case IEEE80211_PARAM_PSPOLL:
+		/* Force a PS-POLL for testing. */
+		ieee80211_send_pspoll(vap->iv_bss);
+		break;
+	case IEEE80211_PARAM_EOSPDROP:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (value)
+				IEEE80211_VAP_EOSPDROP_ENABLE(vap);
+			else
+				IEEE80211_VAP_EOSPDROP_DISABLE(vap);
+		}
+		break;
+	case IEEE80211_PARAM_MARKDFS:
+		if (value)
+			ic->ic_flags_ext |= IEEE80211_FEXT_MARKDFS;
+		else
+			ic->ic_flags_ext &= ~IEEE80211_FEXT_MARKDFS;
+		break;
+	default:
+		retv = EOPNOTSUPP;
+		break;
+	}
+#ifdef ATH_SUPERG_XR
+	/* set the same params on the xr vap device if exists */
+	if (vap->iv_xrvap && !(vap->iv_flags & IEEE80211_F_XR)) {
+		ieee80211_ioctl_setparam(vap->iv_xrvap->iv_dev,info,w,extra);
+		vap->iv_xrvap->iv_ath_cap &= IEEE80211_ATHC_XR; /* XR vap does not support  any superG features */
+	} 
+	/*
+	 * do not reset the xr vap , which is automatically 
+	 * reset by the state machine now.
+	 */
+	if (!vap->iv_xrvap || (vap->iv_xrvap && !(vap->iv_flags & IEEE80211_F_XR))) {
+		if (retv == ENETRESET)
+			retv = IS_UP_AUTO(vap) ? ieee80211_open(vap->iv_dev) : 0;
+	}
+#else
+	/* XXX should any of these cause a rescan? */
+	if (retv == ENETRESET)
+		retv = IS_UP_AUTO(vap) ? ieee80211_open(vap->iv_dev) : 0;
+#endif
+	return -retv;
+}
+
+#if 0
+static int
+cap2cipher(int flag)
+{
+	switch (flag) {
+	case IEEE80211_C_WEP:		return IEEE80211_CIPHER_WEP;
+	case IEEE80211_C_AES:		return IEEE80211_CIPHER_AES_OCB;
+	case IEEE80211_C_AES_CCM:	return IEEE80211_CIPHER_AES_CCM;
+	case IEEE80211_C_CKIP:		return IEEE80211_CIPHER_CKIP;
+	case IEEE80211_C_TKIP:		return IEEE80211_CIPHER_TKIP;
+	}
+	return -1;
+}
+#endif
+
+static int
+ieee80211_ioctl_getmode(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *wri, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ifmediareq imr;
+
+	ic->ic_media.ifm_status(ic->ic_dev, &imr);
+	switch (IFM_MODE(imr.ifm_active)) {
+	case IFM_IEEE80211_11A:
+		strcpy(extra, "11a");
+		break;
+	case IFM_IEEE80211_11B:
+		strcpy(extra, "11b");
+		break;
+	case IFM_IEEE80211_11G:
+		strcpy(extra, "11g");
+		break;
+	case IFM_IEEE80211_FH:
+		strcpy(extra, "FH");
+		break;
+	case IFM_AUTO:
+		strcpy(extra, "auto");
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (ic->ic_media.ifm_media & IFM_IEEE80211_TURBO) {
+		if (vap->iv_ath_cap & IEEE80211_ATHC_TURBOP)
+			strcat(extra, "T");
+		else
+			strcat(extra, "ST");
+	}
+	wri->length = strlen(extra);
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getparam(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_rsnparms *rsn = &vap->iv_bss->ni_rsn;
+	int *param = (int *) extra;
+	
+	switch (param[0]) {
+	case IEEE80211_PARAM_AUTHMODE:
+		if (vap->iv_flags & IEEE80211_F_WPA)
+			param[0] = IEEE80211_AUTH_WPA;
+		else
+			param[0] = vap->iv_bss->ni_authmode;
+		break;
+	case IEEE80211_PARAM_PROTMODE:
+		param[0] = ic->ic_protmode;
+		break;
+	case IEEE80211_PARAM_MCASTCIPHER:
+		param[0] = rsn->rsn_mcastcipher;
+		break;
+	case IEEE80211_PARAM_MCASTKEYLEN:
+		param[0] = rsn->rsn_mcastkeylen;
+		break;
+	case IEEE80211_PARAM_UCASTCIPHERS:
+		param[0] = rsn->rsn_ucastcipherset;
+		break;
+	case IEEE80211_PARAM_UCASTCIPHER:
+		param[0] = rsn->rsn_ucastcipher;
+		break;
+	case IEEE80211_PARAM_UCASTKEYLEN:
+		param[0] = rsn->rsn_ucastkeylen;
+		break;
+	case IEEE80211_PARAM_KEYMGTALGS:
+		param[0] = rsn->rsn_keymgmtset;
+		break;
+	case IEEE80211_PARAM_RSNCAPS:
+		param[0] = rsn->rsn_caps;
+		break;
+	case IEEE80211_PARAM_WPA:
+		switch (vap->iv_flags & IEEE80211_F_WPA) {
+		case IEEE80211_F_WPA1:
+			param[0] = 1;
+			break;
+		case IEEE80211_F_WPA2:
+			param[0] = 2;
+			break;
+		case IEEE80211_F_WPA1 | IEEE80211_F_WPA2:
+			param[0] = 3;
+			break;
+		default:
+			param[0] = 0;
+			break;
+		}
+		break;
+	case IEEE80211_PARAM_ROAMING:
+		param[0] = ic->ic_roaming;
+		break;
+	case IEEE80211_PARAM_PRIVACY:
+		param[0] = (vap->iv_flags & IEEE80211_F_PRIVACY) != 0;
+		break;
+	case IEEE80211_PARAM_DROPUNENCRYPTED:
+		param[0] = (vap->iv_flags & IEEE80211_F_DROPUNENC) != 0;
+		break;
+	case IEEE80211_PARAM_DROPUNENC_EAPOL:
+		param[0] = IEEE80211_VAP_DROPUNENC_EAPOL(vap);
+		break;
+	case IEEE80211_PARAM_COUNTERMEASURES:
+		param[0] = (vap->iv_flags & IEEE80211_F_COUNTERM) != 0;
+		break;
+	case IEEE80211_PARAM_DRIVER_CAPS:
+		param[0] = vap->iv_caps;
+		break;
+	case IEEE80211_PARAM_WMM:
+		param[0] = (vap->iv_flags & IEEE80211_F_WME) != 0;
+		break;
+	case IEEE80211_PARAM_HIDESSID:
+		param[0] = (vap->iv_flags & IEEE80211_F_HIDESSID) != 0;
+		break;
+	case IEEE80211_PARAM_APBRIDGE:
+		param[0] = (vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0;
+		break;
+	case IEEE80211_PARAM_INACT:
+		param[0] = vap->iv_inact_run * IEEE80211_INACT_WAIT;
+		break;
+	case IEEE80211_PARAM_INACT_AUTH:
+		param[0] = vap->iv_inact_auth * IEEE80211_INACT_WAIT;
+		break;
+	case IEEE80211_PARAM_INACT_INIT:
+		param[0] = vap->iv_inact_init * IEEE80211_INACT_WAIT;
+		break;
+	case IEEE80211_PARAM_ABOLT:
+		/*
+		 * Map capability bits to abolt settings.
+		 */
+		param[0] = 0;
+		if (vap->iv_ath_cap & IEEE80211_ATHC_COMP)
+			param[0] |= IEEE80211_ABOLT_COMPRESSION;
+		if (vap->iv_ath_cap & IEEE80211_ATHC_FF)
+			param[0] |= IEEE80211_ABOLT_FAST_FRAME;
+		if (vap->iv_ath_cap & IEEE80211_ATHC_XR)
+			param[0] |= IEEE80211_ABOLT_XR;
+		if (vap->iv_ath_cap & IEEE80211_ATHC_BURST)
+			param[0] |= IEEE80211_ABOLT_BURST;
+		if (vap->iv_ath_cap & IEEE80211_ATHC_TURBOP)
+			param[0] |= IEEE80211_ABOLT_TURBO_PRIME;
+		if (vap->iv_ath_cap & IEEE80211_ATHC_AR)
+			param[0] |= IEEE80211_ABOLT_AR;
+		break;
+	case IEEE80211_PARAM_COMPRESSION:
+		param[0] = (vap->iv_ath_cap & IEEE80211_ATHC_COMP) != 0;
+		break;
+	case IEEE80211_PARAM_FF:
+		param[0] = (vap->iv_ath_cap & IEEE80211_ATHC_FF) != 0;
+		break;
+	case IEEE80211_PARAM_XR:
+		param[0] = (vap->iv_ath_cap & IEEE80211_ATHC_XR) != 0;
+		break;
+	case IEEE80211_PARAM_BURST:
+		param[0] = (vap->iv_ath_cap & IEEE80211_ATHC_BURST) != 0;
+		break;
+	case IEEE80211_PARAM_AR:
+		param[0] = (vap->iv_ath_cap & IEEE80211_ATHC_AR) != 0;
+		break;
+	case IEEE80211_PARAM_TURBO:
+		param[0] = (vap->iv_ath_cap & IEEE80211_ATHC_TURBOP) != 0;
+		break;
+	case IEEE80211_PARAM_DTIM_PERIOD:
+		param[0] = vap->iv_dtim_period;
+		break;
+	case IEEE80211_PARAM_BEACON_INTERVAL:
+		/* NB: get from ic_bss for station mode */
+		param[0] = vap->iv_bss->ni_intval;
+		break;
+	case IEEE80211_PARAM_DOTH:
+		param[0] = (ic->ic_flags & IEEE80211_F_DOTH) != 0;
+		break;
+	case IEEE80211_PARAM_SHPREAMBLE:
+		param[0] = (ic->ic_caps & IEEE80211_C_SHPREAMBLE) != 0;
+		break;
+	case IEEE80211_PARAM_PWRTARGET:
+		param[0] = ic->ic_curchanmaxpwr;
+		break;
+	case IEEE80211_PARAM_PUREG:
+		param[0] = (vap->iv_flags & IEEE80211_F_PUREG) != 0;
+		break;
+	case IEEE80211_PARAM_WDS:
+		param[0] = ((vap->iv_flags_ext & IEEE80211_FEXT_WDS) == IEEE80211_FEXT_WDS);
+		break;
+	case IEEE80211_PARAM_BGSCAN:
+		param[0] = (vap->iv_flags & IEEE80211_F_BGSCAN) != 0;
+		break;
+	case IEEE80211_PARAM_BGSCAN_IDLE:
+		param[0] = jiffies_to_msecs(vap->iv_bgscanidle); /* ms */
+		break;
+	case IEEE80211_PARAM_BGSCAN_INTERVAL:
+		param[0] = vap->iv_bgscanintvl / HZ;	/* seconds */
+		break;
+	case IEEE80211_PARAM_MCAST_RATE:
+		param[0] = vap->iv_mcast_rate;	/* seconds */
+		break;
+	case IEEE80211_PARAM_COVERAGE_CLASS:
+		param[0] = ic->ic_coverageclass;
+		break;
+	case IEEE80211_PARAM_COUNTRY_IE:
+		param[0] = (ic->ic_flags_ext & IEEE80211_FEXT_COUNTRYIE) != 0;
+		break;
+	case IEEE80211_PARAM_REGCLASS:
+		param[0] = (ic->ic_flags_ext & IEEE80211_FEXT_REGCLASS) != 0;
+		break;
+	case IEEE80211_PARAM_SCANVALID:
+		param[0] = vap->iv_scanvalid / HZ;	/* seconds */
+		break;
+	case IEEE80211_PARAM_ROAM_RSSI_11A:
+		param[0] = vap->iv_roam.rssi11a;
+		break;
+	case IEEE80211_PARAM_ROAM_RSSI_11B:
+		param[0] = vap->iv_roam.rssi11bOnly;
+		break;
+	case IEEE80211_PARAM_ROAM_RSSI_11G:
+		param[0] = vap->iv_roam.rssi11b;
+		break;
+	case IEEE80211_PARAM_ROAM_RATE_11A:
+		param[0] = vap->iv_roam.rate11a;
+		break;
+	case IEEE80211_PARAM_ROAM_RATE_11B:
+		param[0] = vap->iv_roam.rate11bOnly;
+		break;
+	case IEEE80211_PARAM_ROAM_RATE_11G:
+		param[0] = vap->iv_roam.rate11b;
+		break;
+	case IEEE80211_PARAM_UAPSDINFO:
+		if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+			if (IEEE80211_VAP_UAPSD_ENABLED(vap))
+				param[0] = 1;
+			else
+				param[0] = 0;
+		} else if (vap->iv_opmode == IEEE80211_M_STA)
+			param[0] = vap->iv_uapsdinfo;
+		break;
+	case IEEE80211_PARAM_SLEEP:
+		param[0] = vap->iv_bss->ni_flags & IEEE80211_NODE_PWR_MGT;
+		break;
+	case IEEE80211_PARAM_EOSPDROP:
+		param[0] = IEEE80211_VAP_EOSPDROP_ENABLED(vap);
+		break;
+	case IEEE80211_PARAM_MARKDFS:
+		if (ic->ic_flags_ext & IEEE80211_FEXT_MARKDFS)
+			param[0] = 1;
+		else
+			param[0] = 0;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/* returns non-zero if ID is for a system IE (not for app use) */
+static int
+is_sys_ie(u_int8_t ie_id)
+{
+	/* XXX review this list */
+	switch (ie_id) {
+	case IEEE80211_ELEMID_SSID:
+	case IEEE80211_ELEMID_RATES:
+	case IEEE80211_ELEMID_FHPARMS:
+	case IEEE80211_ELEMID_DSPARMS:
+	case IEEE80211_ELEMID_CFPARMS:
+	case IEEE80211_ELEMID_TIM:
+	case IEEE80211_ELEMID_IBSSPARMS:
+	case IEEE80211_ELEMID_COUNTRY:
+	case IEEE80211_ELEMID_REQINFO:
+	case IEEE80211_ELEMID_CHALLENGE:
+	case IEEE80211_ELEMID_PWRCNSTR:
+	case IEEE80211_ELEMID_PWRCAP:
+	case IEEE80211_ELEMID_TPCREQ:
+	case IEEE80211_ELEMID_TPCREP:
+	case IEEE80211_ELEMID_SUPPCHAN:
+	case IEEE80211_ELEMID_CHANSWITCHANN:
+	case IEEE80211_ELEMID_MEASREQ:
+	case IEEE80211_ELEMID_MEASREP:
+	case IEEE80211_ELEMID_QUIET:
+	case IEEE80211_ELEMID_IBSSDFS:
+	case IEEE80211_ELEMID_ERP:
+	case IEEE80211_ELEMID_RSN:
+	case IEEE80211_ELEMID_XRATES:
+	case IEEE80211_ELEMID_TPC:
+	case IEEE80211_ELEMID_CCKM:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+/* returns non-zero if the buffer appears to contain a valid IE list */
+static int
+is_valid_ie_list(u_int32_t buf_len, void *buf, int exclude_sys_ies)
+{
+	struct ieee80211_ie *ie = (struct ieee80211_ie *)buf;
+
+	while (buf_len >= sizeof(*ie)) {
+		int ie_elem_len = sizeof(*ie) + ie->len;
+		if (buf_len < ie_elem_len)
+			break;
+		if (exclude_sys_ies && is_sys_ie(ie->id))
+			break;
+		buf_len -= ie_elem_len;
+		ie = (struct ieee80211_ie *)(ie->info + ie->len);
+	}
+
+	return (buf_len == 0) ? 1 : 0;
+}
+
+static int
+ieee80211_ioctl_setoptie(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *wri, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	void *ie;
+
+	/*
+	 * NB: Doing this for ap operation could be useful (e.g. for
+	 *     WPA and/or WME) except that it typically is worthless
+	 *     without being able to intervene when processing
+	 *     association response frames--so disallow it for now.
+	 */
+	if (vap->iv_opmode != IEEE80211_M_STA)
+		return -EINVAL;
+	if (! is_valid_ie_list(wri->length, extra, 0))
+		return -EINVAL;
+	/* NB: wri->length is validated by the wireless extensions code */
+	MALLOC(ie, void *, wri->length, M_DEVBUF, M_WAITOK);
+	if (ie == NULL)
+		return -ENOMEM;
+	memcpy(ie, extra, wri->length);
+	if (vap->iv_opt_ie != NULL)
+		FREE(vap->iv_opt_ie, M_DEVBUF);
+	vap->iv_opt_ie = ie;
+	vap->iv_opt_ie_len = wri->length;
+#ifdef ATH_SUPERG_XR
+	/* set the same params on the xr vap device if exists */
+	if (vap->iv_xrvap && !(vap->iv_flags & IEEE80211_F_XR))
+		ieee80211_ioctl_setoptie(vap->iv_xrvap->iv_dev, info, wri, extra);
+#endif
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getoptie(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *wri, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+
+	if (vap->iv_opt_ie == NULL) {
+		wri->length = 0;
+		return 0;
+	}
+	wri->length = vap->iv_opt_ie_len;
+	memcpy(extra, vap->iv_opt_ie, vap->iv_opt_ie_len);
+	return 0;
+}
+
+/* the following functions are used by the set/get appiebuf functions */
+static int
+add_app_ie(unsigned int frame_type_index, struct ieee80211vap *vap,
+	struct ieee80211req_getset_appiebuf *iebuf)
+{
+	struct ieee80211_ie *ie;
+
+	if (! is_valid_ie_list(iebuf->app_buflen, iebuf->app_buf, 1))
+		return -EINVAL;
+	/* NB: data.length is validated by the wireless extensions code */
+	MALLOC(ie, struct ieee80211_ie *, iebuf->app_buflen, M_DEVBUF, M_WAITOK);
+	if (ie == NULL)
+		return -ENOMEM;
+
+	memcpy(ie, iebuf->app_buf, iebuf->app_buflen);
+	if (vap->app_ie[frame_type_index].ie != NULL)
+		FREE(vap->app_ie[frame_type_index].ie, M_DEVBUF);
+	vap->app_ie[frame_type_index].ie = ie;
+	vap->app_ie[frame_type_index].length = iebuf->app_buflen;
+
+	return 0;
+}
+
+static int
+remove_app_ie(unsigned int frame_type_index, struct ieee80211vap *vap)
+{
+	struct ieee80211_app_ie_t *app_ie = &vap->app_ie[frame_type_index];
+	if (app_ie->ie != NULL) {
+		FREE(app_ie->ie, M_DEVBUF);
+		app_ie->ie = NULL;
+		app_ie->length = 0;
+	}
+	return 0;
+}
+
+static int
+get_app_ie(unsigned int frame_type_index, struct ieee80211vap *vap,
+	struct ieee80211req_getset_appiebuf *iebuf)
+{
+	struct ieee80211_app_ie_t *app_ie = &vap->app_ie[frame_type_index];
+	if (iebuf->app_buflen < app_ie->length)
+		return -EINVAL;
+
+	iebuf->app_buflen = app_ie->length;
+	memcpy(iebuf->app_buf, app_ie->ie, app_ie->length);
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setappiebuf(struct net_device *dev,
+	struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211req_getset_appiebuf *iebuf =
+		(struct ieee80211req_getset_appiebuf *)extra;
+	enum ieee80211_opmode chk_opmode;
+	int iebuf_len;
+	int rc = 0;
+
+	iebuf_len = data->length - sizeof(struct ieee80211req_getset_appiebuf);
+	if ( iebuf_len < 0 || iebuf_len != iebuf->app_buflen ||
+		 iebuf->app_buflen > IEEE80211_APPIE_MAX )
+		return -EINVAL;
+
+	switch (iebuf->app_frmtype) {
+	case IEEE80211_APPIE_FRAME_BEACON:
+	case IEEE80211_APPIE_FRAME_PROBE_RESP:
+	case IEEE80211_APPIE_FRAME_ASSOC_RESP:
+		chk_opmode = IEEE80211_M_HOSTAP;
+		break;
+	case IEEE80211_APPIE_FRAME_PROBE_REQ:
+	case IEEE80211_APPIE_FRAME_ASSOC_REQ:
+		chk_opmode = IEEE80211_M_STA;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (vap->iv_opmode != chk_opmode)
+		return -EINVAL;
+
+	if (iebuf->app_buflen)
+		rc = add_app_ie(iebuf->app_frmtype, vap, iebuf);
+	else
+		rc = remove_app_ie(iebuf->app_frmtype, vap);
+	if ((iebuf->app_frmtype == IEEE80211_APPIE_FRAME_BEACON) && (rc == 0))
+		vap->iv_flags_ext |= IEEE80211_FEXT_APPIE_UPDATE;
+
+	return rc;
+}
+
+static int
+ieee80211_ioctl_getappiebuf(struct net_device *dev, struct iw_request_info *info,
+	struct iw_point *data, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211req_getset_appiebuf *iebuf =
+		(struct ieee80211req_getset_appiebuf *)extra;
+	int max_iebuf_len;
+	int rc = 0;
+
+	max_iebuf_len = data->length - sizeof(struct ieee80211req_getset_appiebuf);
+	if (max_iebuf_len < 0)
+		return -EINVAL;
+	if (copy_from_user(iebuf, data->pointer, sizeof(struct ieee80211req_getset_appiebuf)))
+		return -EFAULT;		
+	if (iebuf->app_buflen > max_iebuf_len)
+		iebuf->app_buflen = max_iebuf_len;
+
+	switch (iebuf->app_frmtype) {
+	case IEEE80211_APPIE_FRAME_BEACON:
+	case IEEE80211_APPIE_FRAME_PROBE_RESP:
+	case IEEE80211_APPIE_FRAME_ASSOC_RESP:
+		if (vap->iv_opmode == IEEE80211_M_STA)
+			return -EINVAL;
+		break;
+	case IEEE80211_APPIE_FRAME_PROBE_REQ:
+	case IEEE80211_APPIE_FRAME_ASSOC_REQ:
+		if (vap->iv_opmode != IEEE80211_M_STA)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rc = get_app_ie(iebuf->app_frmtype, vap, iebuf);
+	
+	data->length = sizeof(struct ieee80211req_getset_appiebuf) + iebuf->app_buflen;
+	
+	return rc;
+}
+
+static int
+ieee80211_ioctl_setfilter(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211req_set_filter *app_filter = (struct ieee80211req_set_filter *)extra;
+
+	if ((extra == NULL) || (app_filter->app_filterype & ~IEEE80211_FILTER_TYPE_ALL))
+		return -EINVAL;
+
+	vap->app_filter = app_filter->app_filterype;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setkey(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211req_key *ik = (struct ieee80211req_key *)extra;
+	struct ieee80211_node *ni;
+	struct ieee80211_key *wk;
+	u_int16_t kid;
+	int error, flags,i;
+
+	/* NB: cipher support is verified by ieee80211_crypt_newkey */
+	/* NB: this also checks ik->ik_keylen > sizeof(wk->wk_key) */
+	if (ik->ik_keylen > sizeof(ik->ik_keydata))
+		return -E2BIG;
+	kid = ik->ik_keyix;
+	if (kid == IEEE80211_KEYIX_NONE) {
+		/* XXX unicast keys currently must be tx/rx */
+		if (ik->ik_flags != (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))
+			return -EINVAL;
+		if (vap->iv_opmode == IEEE80211_M_STA) {
+			ni = ieee80211_ref_node(vap->iv_bss);
+			if (!IEEE80211_ADDR_EQ(ik->ik_macaddr, ni->ni_bssid))
+				return -EADDRNOTAVAIL;
+		} else
+			ni = ieee80211_find_node(&ic->ic_sta, ik->ik_macaddr);
+		if (ni == NULL)
+			return -ENOENT;
+		wk = &ni->ni_ucastkey;
+	} else {
+		if (kid >= IEEE80211_WEP_NKID)
+			return -EINVAL;
+		wk = &vap->iv_nw_keys[kid];
+		ni = NULL;
+		/* XXX auto-add group key flag until applications are updated */
+		if ((ik->ik_flags & IEEE80211_KEY_XMIT) == 0)	/* XXX */
+			ik->ik_flags |= IEEE80211_KEY_GROUP;	/* XXX */
+	}
+	error = 0;
+	flags = ik->ik_flags & IEEE80211_KEY_COMMON;
+	ieee80211_key_update_begin(vap);
+	if (ieee80211_crypto_newkey(vap, ik->ik_type, flags, wk)) {
+		wk->wk_keylen = ik->ik_keylen;
+		/* NB: MIC presence is implied by cipher type */
+		if (wk->wk_keylen > IEEE80211_KEYBUF_SIZE)
+			wk->wk_keylen = IEEE80211_KEYBUF_SIZE;
+		for(i = 0; i < IEEE80211_TID_SIZE; i++)
+			wk->wk_keyrsc[i] = ik->ik_keyrsc;
+		wk->wk_keytsc = 0;			/* new key, reset */
+		memset(wk->wk_key, 0, sizeof(wk->wk_key));
+		memcpy(wk->wk_key, ik->ik_keydata, ik->ik_keylen);
+		if (!ieee80211_crypto_setkey(vap, wk,
+		    ni != NULL ? ni->ni_macaddr : ik->ik_macaddr, ni))
+			error = -EIO;
+		else if ((ik->ik_flags & IEEE80211_KEY_DEFAULT))
+			vap->iv_def_txkey = kid;
+	} else
+		error = -ENXIO;
+	ieee80211_key_update_end(vap);
+	if (ni != NULL)
+		ieee80211_free_node(ni);
+#ifdef ATH_SUPERG_XR
+	/* set the same params on the xr vap device if exists */
+	if (vap->iv_xrvap && !(vap->iv_flags & IEEE80211_F_XR))
+		ieee80211_ioctl_setkey(vap->iv_xrvap->iv_dev, info, w, extra);
+#endif
+	return error;
+}
+
+static int
+ieee80211_ioctl_getkey(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+	struct ieee80211req_key ik;
+	struct ieee80211_key *wk;
+	const struct ieee80211_cipher *cip;
+	u_int kid;
+
+	if (iwr->u.data.length != sizeof(ik))
+		return -EINVAL;
+	if (copy_from_user(&ik, iwr->u.data.pointer, sizeof(ik)))
+		return -EFAULT;
+	kid = ik.ik_keyix;
+	if (kid == IEEE80211_KEYIX_NONE) {
+		ni = ieee80211_find_node(&ic->ic_sta, ik.ik_macaddr);
+		if (ni == NULL)
+			return -EINVAL;		/* XXX */
+		wk = &ni->ni_ucastkey;
+	} else {
+		if (kid >= IEEE80211_WEP_NKID)
+			return -EINVAL;
+		wk = &vap->iv_nw_keys[kid];
+		IEEE80211_ADDR_COPY(&ik.ik_macaddr, vap->iv_bss->ni_macaddr);
+		ni = NULL;
+	}
+	cip = wk->wk_cipher;
+	ik.ik_type = cip->ic_cipher;
+	ik.ik_keylen = wk->wk_keylen;
+	ik.ik_flags = wk->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV);
+	if (wk->wk_keyix == vap->iv_def_txkey)
+		ik.ik_flags |= IEEE80211_KEY_DEFAULT;
+	if (capable(CAP_NET_ADMIN)) {
+		/* NB: only root can read key data */
+		ik.ik_keyrsc = wk->wk_keyrsc[0];
+		ik.ik_keytsc = wk->wk_keytsc;
+		memcpy(ik.ik_keydata, wk->wk_key, wk->wk_keylen);
+		if (cip->ic_cipher == IEEE80211_CIPHER_TKIP) {
+			memcpy(ik.ik_keydata+wk->wk_keylen,
+				wk->wk_key + IEEE80211_KEYBUF_SIZE,
+				IEEE80211_MICBUF_SIZE);
+			ik.ik_keylen += IEEE80211_MICBUF_SIZE;
+		}
+	} else {
+		ik.ik_keyrsc = 0;
+		ik.ik_keytsc = 0;
+		memset(ik.ik_keydata, 0, sizeof(ik.ik_keydata));
+	}
+	if (ni != NULL)
+		ieee80211_free_node(ni);
+	return (copy_to_user(iwr->u.data.pointer, &ik, sizeof(ik)) ? -EFAULT : 0);
+}
+
+static int
+ieee80211_ioctl_delkey(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211req_del_key *dk = (struct ieee80211req_del_key *)extra;
+	int kid;
+
+	kid = dk->idk_keyix;
+	/* XXX u_int8_t -> u_int16_t */
+	if (dk->idk_keyix == (u_int8_t) IEEE80211_KEYIX_NONE) {
+		struct ieee80211_node *ni;
+
+		ni = ieee80211_find_node(&ic->ic_sta, dk->idk_macaddr);
+		if (ni == NULL)
+			return -EINVAL;		/* XXX */
+		/* XXX error return */
+		ieee80211_crypto_delkey(vap, &ni->ni_ucastkey, ni);
+		ieee80211_free_node(ni);
+	} else {
+		if (kid >= IEEE80211_WEP_NKID)
+			return -EINVAL;
+		/* XXX error return */
+		ieee80211_crypto_delkey(vap, &vap->iv_nw_keys[kid], NULL);
+	}
+	return 0;
+}
+
+static void
+domlme(void *arg, struct ieee80211_node *ni)
+{
+	struct ieee80211req_mlme *mlme = arg;
+
+	if (ni->ni_associd != 0) {
+		IEEE80211_SEND_MGMT(ni,
+			mlme->im_op == IEEE80211_MLME_DEAUTH ?
+				IEEE80211_FC0_SUBTYPE_DEAUTH :
+				IEEE80211_FC0_SUBTYPE_DISASSOC,
+			mlme->im_reason);
+	}
+	ieee80211_node_leave(ni);
+}
+
+struct scanlookup {		/* XXX: right place for declaration? */
+	const u_int8_t *mac;
+	int esslen;
+	const u_int8_t *essid;
+	const struct ieee80211_scan_entry *se;
+};
+
+/*
+ * Match mac address and any ssid.
+ */
+static int
+mlmelookup(void *arg, const struct ieee80211_scan_entry *se)
+{
+	struct scanlookup *look = arg;
+
+	if (!IEEE80211_ADDR_EQ(look->mac, se->se_macaddr))
+		return 0;
+	if (look->esslen != 0) {
+		if (se->se_ssid[1] != look->esslen)
+			return 0;
+		if (memcmp(look->essid, se->se_ssid + 2, look->esslen))
+			return 0;
+	}
+	look->se = se;
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setmlme(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211req_mlme *mlme = (struct ieee80211req_mlme *)extra;
+	struct ieee80211_node *ni;
+
+	if (!IS_UP(dev))
+		return -EINVAL;
+	switch (mlme->im_op) {
+	case IEEE80211_MLME_ASSOC:
+		if (vap->iv_opmode == IEEE80211_M_STA) {
+			struct scanlookup lookup;
+
+			lookup.se = NULL;
+			lookup.mac = mlme->im_macaddr;
+			/* XXX use revised api w/ explicit ssid */
+			lookup.esslen = vap->iv_des_ssid[0].len;
+			lookup.essid = vap->iv_des_ssid[0].ssid;
+			ieee80211_scan_iterate(ic, mlmelookup, &lookup);
+			if (lookup.se != NULL) {
+				vap->iv_nsdone = 0;
+				vap->iv_nsparams.result = 0;
+				if (ieee80211_sta_join(vap, lookup.se))
+					while (!vap->iv_nsdone)
+						IEEE80211_RESCHEDULE();
+				if (vap->iv_nsparams.result)
+					return 0;
+			}
+		}
+		return -EINVAL;
+	case IEEE80211_MLME_DISASSOC:
+	case IEEE80211_MLME_DEAUTH:
+		switch (vap->iv_opmode) {
+		case IEEE80211_M_STA:
+			/* XXX not quite right */
+			ieee80211_new_state(vap, IEEE80211_S_INIT,
+				mlme->im_reason);
+			break;
+		case IEEE80211_M_HOSTAP:
+			/* NB: the broadcast address means do 'em all */
+			if (!IEEE80211_ADDR_EQ(mlme->im_macaddr, vap->iv_dev->broadcast)) {
+				ni = ieee80211_find_node(&ic->ic_sta,
+					mlme->im_macaddr);
+				if (ni == NULL)
+					return -EINVAL;
+				if (dev == ni->ni_vap->iv_dev)
+					domlme(mlme, ni);
+				ieee80211_free_node(ni);
+			} else
+				ieee80211_iterate_dev_nodes(dev, &ic->ic_sta, domlme, mlme);
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case IEEE80211_MLME_AUTHORIZE:
+	case IEEE80211_MLME_UNAUTHORIZE:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			return -EINVAL;
+		ni = ieee80211_find_node(&ic->ic_sta, mlme->im_macaddr);
+		if (ni == NULL)
+			return -EINVAL;
+		if (mlme->im_op == IEEE80211_MLME_AUTHORIZE)
+			ieee80211_node_authorize(ni);
+		else
+			ieee80211_node_unauthorize(ni);
+		ieee80211_free_node(ni);
+		break;
+	case IEEE80211_MLME_CLEAR_STATS:
+		if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+			return -EINVAL;
+		ni = ieee80211_find_node(&ic->ic_sta, mlme->im_macaddr);
+		if (ni == NULL)
+			return -ENOENT;
+		
+		/* clear statistics */
+		memset(&ni->ni_stats, 0, sizeof(struct ieee80211_nodestats));
+		ieee80211_free_node(ni);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_wdsmac(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct sockaddr *sa = (struct sockaddr *)extra;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	if (!IEEE80211_ADDR_NULL(vap->wds_mac)) {
+		printk("%s: Failed to add WDS MAC: %s\n", dev->name,
+			ether_sprintf(sa->sa_data));
+		printk("%s: Device already has WDS mac address attached,"
+			" remove first\n", dev->name);
+		return -1;
+	}
+
+	memcpy(vap->wds_mac, sa->sa_data, IEEE80211_ADDR_LEN);
+
+	printk("%s: Added WDS MAC: %s\n", dev->name,
+		ether_sprintf(vap->wds_mac));
+
+	if (IS_UP(vap->iv_dev))
+		return ic->ic_reset(ic->ic_dev);
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_wdsdelmac(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct sockaddr *sa = (struct sockaddr *)extra;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	/* WDS Mac address filed already? */
+	if (IEEE80211_ADDR_NULL(vap->wds_mac))
+		return 0;
+
+	/* Compare suplied MAC address with WDS MAC of this interface 
+	 * remove when mac address is known
+	 */
+	if (memcmp(vap->wds_mac, sa->sa_data, IEEE80211_ADDR_LEN) == 0) {
+		memset(vap->wds_mac, 0x00, IEEE80211_ADDR_LEN);
+		if (IS_UP(vap->iv_dev))
+			return ic->ic_reset(ic->ic_dev);
+		return 0;			 
+	}
+
+	printk("%s: WDS MAC address %s is not known by this interface\n",
+	       dev->name, ether_sprintf(sa->sa_data));
+
+	return -1;
+}
+
+/*
+ * kick associated station with the given MAC address.
+ */
+static int
+ieee80211_ioctl_kickmac(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct sockaddr *sa = (struct sockaddr *)extra;
+	struct ieee80211req_mlme mlme;
+	
+	if (sa->sa_family != ARPHRD_ETHER)
+		return -EINVAL;
+
+	/* Setup a MLME request for disassociation of the given MAC */
+	mlme.im_op = IEEE80211_MLME_DISASSOC;
+	mlme.im_reason = IEEE80211_REASON_UNSPECIFIED;
+	IEEE80211_ADDR_COPY(&(mlme.im_macaddr), sa->sa_data);
+
+	/* Send the MLME request and return the result. */
+	return ieee80211_ioctl_setmlme(dev, info, w, (char *)&mlme);
+}
+
+static int
+ieee80211_ioctl_addmac(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct sockaddr *sa = (struct sockaddr *)extra;
+	const struct ieee80211_aclator *acl = vap->iv_acl;
+
+	if (acl == NULL) {
+		acl = ieee80211_aclator_get("mac");
+		if (acl == NULL || !acl->iac_attach(vap))
+			return -EINVAL;
+		vap->iv_acl = acl;
+	}
+	acl->iac_add(vap, sa->sa_data);
+	return 0;
+}
+
+static int
+ieee80211_ioctl_delmac(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct sockaddr *sa = (struct sockaddr *)extra;
+	const struct ieee80211_aclator *acl = vap->iv_acl;
+
+	if (acl == NULL) {
+		acl = ieee80211_aclator_get("mac");
+		if (acl == NULL || !acl->iac_attach(vap))
+			return -EINVAL;
+		vap->iv_acl = acl;
+	}
+	acl->iac_remove(vap, sa->sa_data);
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setchanlist(struct net_device *dev,
+	struct iw_request_info *info, void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211req_chanlist *list =
+		(struct ieee80211req_chanlist *)extra;
+	u_char chanlist[IEEE80211_CHAN_BYTES];
+	int i, j, nchan;
+
+	memset(chanlist, 0, sizeof(chanlist));
+	/*
+	 * Since channel 0 is not available for DS, channel 1
+	 * is assigned to LSB on WaveLAN.
+	 */
+	if (ic->ic_phytype == IEEE80211_T_DS)
+		i = 1;
+	else
+		i = 0;
+	nchan = 0;
+	for (j = 0; i <= IEEE80211_CHAN_MAX; i++, j++) {
+		/*
+		 * NB: silently discard unavailable channels so users
+		 *     can specify 1-255 to get all available channels.
+		 */
+		if (isset(list->ic_channels, j) && isset(ic->ic_chan_avail, i)) {
+			setbit(chanlist, i);
+			nchan++;
+		}
+	}
+	if (nchan == 0)			/* no valid channels, disallow */
+		return -EINVAL;
+	if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&	/* XXX */
+	    isclr(chanlist, ic->ic_bsschan->ic_ieee))
+		ic->ic_bsschan = IEEE80211_CHAN_ANYC;	/* invalidate */
+	memcpy(ic->ic_chan_active, chanlist, sizeof(ic->ic_chan_active));
+	if (IS_UP_AUTO(vap))
+		ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getchanlist(struct net_device *dev,
+	struct iw_request_info *info, void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+
+	memcpy(extra, ic->ic_chan_active, sizeof(ic->ic_chan_active));
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getchaninfo(struct net_device *dev,
+	struct iw_request_info *info, void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211req_chaninfo *chans =
+		(struct ieee80211req_chaninfo *) extra;
+	u_int8_t reported[IEEE80211_CHAN_BYTES];	/* XXX stack usage? */
+	int i;
+
+	memset(chans, 0, sizeof(*chans));
+	memset(reported, 0, sizeof(reported));
+	for (i = 0; i < ic->ic_nchans; i++) {
+		const struct ieee80211_channel *c = &ic->ic_channels[i];
+		const struct ieee80211_channel *c1 = c;
+
+		if (isclr(reported, c->ic_ieee)) {
+			setbit(reported, c->ic_ieee);
+
+			/* pick turbo channel over non-turbo channel, and
+			 * 11g channel over 11b channel */
+			if (IEEE80211_IS_CHAN_A(c))
+				c1 = findchannel(ic, c->ic_ieee, IEEE80211_MODE_TURBO_A);
+			if (IEEE80211_IS_CHAN_ANYG(c))
+				c1 = findchannel(ic, c->ic_ieee, IEEE80211_MODE_TURBO_G);
+			else if (IEEE80211_IS_CHAN_B(c)) {
+				c1 = findchannel(ic, c->ic_ieee, IEEE80211_MODE_TURBO_G);
+				if (!c1)
+					c1 = findchannel(ic, c->ic_ieee, IEEE80211_MODE_11G);
+			}
+
+			if (c1)
+				c = c1;
+			chans->ic_chans[chans->ic_nchans].ic_ieee = c->ic_ieee;
+			chans->ic_chans[chans->ic_nchans].ic_freq = c->ic_freq;
+			chans->ic_chans[chans->ic_nchans].ic_flags = c->ic_flags;
+			if (++chans->ic_nchans >= IEEE80211_CHAN_MAX)
+				break;
+		}
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_setwmmparams(struct net_device *dev,
+	struct iw_request_info *info, void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	int *param = (int *) extra;
+	int ac = (param[1] >= 0 && param[1] < WME_NUM_AC) ?
+		param[1] : WME_AC_BE;
+	int bss = param[2]; 
+	struct ieee80211_wme_state *wme = &vap->iv_ic->ic_wme;
+
+	switch (param[0]) {
+        case IEEE80211_WMMPARAMS_CWMIN:
+		if (param[3] < 0 || param[3] > 15) 
+			return -EINVAL;
+        	if (bss) {
+			wme->wme_wmeBssChanParams.cap_wmeParams[ac].wmep_logcwmin = param[3];
+			if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+				wme->wme_bssChanParams.cap_wmeParams[ac].wmep_logcwmin = param[3];
+		} else {
+			wme->wme_wmeChanParams.cap_wmeParams[ac].wmep_logcwmin = param[3];
+			wme->wme_chanParams.cap_wmeParams[ac].wmep_logcwmin = param[3];
+		}
+		ieee80211_wme_updateparams(vap);	
+		break;
+	case IEEE80211_WMMPARAMS_CWMAX:
+		if (param[3] < 0 || param[3] > 15) 
+			return -EINVAL;
+        	if (bss) {
+			wme->wme_wmeBssChanParams.cap_wmeParams[ac].wmep_logcwmax = param[3];
+			if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+				wme->wme_bssChanParams.cap_wmeParams[ac].wmep_logcwmax = param[3];
+		} else {
+			wme->wme_wmeChanParams.cap_wmeParams[ac].wmep_logcwmax = param[3];
+			wme->wme_chanParams.cap_wmeParams[ac].wmep_logcwmax = param[3];
+		}
+		ieee80211_wme_updateparams(vap);	
+		break;
+        case IEEE80211_WMMPARAMS_AIFS:
+		if (param[3] < 0 || param[3] > 15) 
+			return -EINVAL;	
+        	if (bss) {
+			wme->wme_wmeBssChanParams.cap_wmeParams[ac].wmep_aifsn = param[3];
+			if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+				wme->wme_bssChanParams.cap_wmeParams[ac].wmep_aifsn = param[3];
+		} else {
+			wme->wme_wmeChanParams.cap_wmeParams[ac].wmep_aifsn = param[3];
+			wme->wme_chanParams.cap_wmeParams[ac].wmep_aifsn = param[3];
+		}
+		ieee80211_wme_updateparams(vap);	
+		break;
+        case IEEE80211_WMMPARAMS_TXOPLIMIT:
+		if (param[3] < 0 || param[3] > 8192) 
+			return -EINVAL;
+        	if (bss) {
+			wme->wme_wmeBssChanParams.cap_wmeParams[ac].wmep_txopLimit 
+				= IEEE80211_US_TO_TXOP(param[3]);
+			if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+				wme->wme_bssChanParams.cap_wmeParams[ac].wmep_txopLimit =
+					IEEE80211_US_TO_TXOP(param[3]);
+		} else {
+			wme->wme_wmeChanParams.cap_wmeParams[ac].wmep_txopLimit 
+				= IEEE80211_US_TO_TXOP(param[3]);
+			wme->wme_chanParams.cap_wmeParams[ac].wmep_txopLimit 
+				= IEEE80211_US_TO_TXOP(param[3]);
+		}
+		ieee80211_wme_updateparams(vap);	
+		break;
+        case IEEE80211_WMMPARAMS_ACM:
+		if (!bss || param[3] < 0 || param[3] > 1) 
+			return -EINVAL;
+        	/* ACM bit applies to BSS case only */
+		wme->wme_wmeBssChanParams.cap_wmeParams[ac].wmep_acm = param[3];
+		if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
+			wme->wme_bssChanParams.cap_wmeParams[ac].wmep_acm = param[3];
+		break;
+        case IEEE80211_WMMPARAMS_NOACKPOLICY:
+		if (bss || param[3] < 0 || param[3] > 1) 
+			return -EINVAL;	
+        	/* ack policy applies to non-BSS case only */
+		wme->wme_wmeChanParams.cap_wmeParams[ac].wmep_noackPolicy = param[3];
+		wme->wme_chanParams.cap_wmeParams[ac].wmep_noackPolicy = param[3];
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getwmmparams(struct net_device *dev,
+	struct iw_request_info *info, void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	int *param = (int *) extra;
+	int ac = (param[1] >= 0 && param[1] < WME_NUM_AC) ?
+		param[1] : WME_AC_BE;
+	struct ieee80211_wme_state *wme = &vap->iv_ic->ic_wme;
+	struct chanAccParams *chanParams = (param[2] == 0) ? 
+		&(wme->wme_chanParams) : &(wme->wme_bssChanParams);
+
+	switch (param[0]) {
+        case IEEE80211_WMMPARAMS_CWMIN: 
+		param[0] = chanParams->cap_wmeParams[ac].wmep_logcwmin;
+		break;
+        case IEEE80211_WMMPARAMS_CWMAX: 
+		param[0] = chanParams->cap_wmeParams[ac].wmep_logcwmax;
+		break;
+        case IEEE80211_WMMPARAMS_AIFS: 
+		param[0] = chanParams->cap_wmeParams[ac].wmep_aifsn;
+		break;
+        case IEEE80211_WMMPARAMS_TXOPLIMIT: 
+		param[0] = IEEE80211_TXOP_TO_US(chanParams->cap_wmeParams[ac].wmep_txopLimit);
+		break;
+        case IEEE80211_WMMPARAMS_ACM: 
+		param[0] = wme->wme_wmeBssChanParams.cap_wmeParams[ac].wmep_acm;
+		break;
+        case IEEE80211_WMMPARAMS_NOACKPOLICY: 
+		param[0] = wme->wme_wmeChanParams.cap_wmeParams[ac].wmep_noackPolicy;
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getwpaie(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+	struct ieee80211req_wpaie wpaie;
+
+	if (iwr->u.data.length != sizeof(wpaie))
+		return -EINVAL;
+	if (copy_from_user(&wpaie, iwr->u.data.pointer, IEEE80211_ADDR_LEN))
+		return -EFAULT;
+	ni = ieee80211_find_node(&ic->ic_sta, wpaie.wpa_macaddr);
+	if (ni == NULL)
+		return -EINVAL;		/* XXX */
+	memset(wpaie.wpa_ie, 0, sizeof(wpaie.wpa_ie));
+	if (ni->ni_wpa_ie != NULL) {
+		int ielen = ni->ni_wpa_ie[1] + 2;
+		if (ielen > sizeof(wpaie.wpa_ie))
+			ielen = sizeof(wpaie.wpa_ie);
+		memcpy(wpaie.wpa_ie, ni->ni_wpa_ie, ielen);
+	}
+	if (ni->ni_rsn_ie != NULL) {
+		int ielen = ni->ni_rsn_ie[1] + 2;
+		if (ielen > sizeof(wpaie.rsn_ie))
+			ielen = sizeof(wpaie.rsn_ie);
+		memcpy(wpaie.rsn_ie, ni->ni_rsn_ie, ielen);
+	}
+	ieee80211_free_node(ni);
+	return (copy_to_user(iwr->u.data.pointer, &wpaie, sizeof(wpaie)) ?
+		-EFAULT : 0);
+}
+
+static int
+ieee80211_ioctl_getstastats(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211_node *ni;
+	u_int8_t macaddr[IEEE80211_ADDR_LEN];
+	const int off = __offsetof(struct ieee80211req_sta_stats, is_stats);
+	int error;
+
+	if (iwr->u.data.length < off)
+		return -EINVAL;
+	if (copy_from_user(macaddr, iwr->u.data.pointer, IEEE80211_ADDR_LEN))
+		return -EFAULT;
+	ni = ieee80211_find_node(&ic->ic_sta, macaddr);
+	if (ni == NULL)
+		return -EINVAL;		/* XXX */
+	if (iwr->u.data.length > sizeof(struct ieee80211req_sta_stats))
+		iwr->u.data.length = sizeof(struct ieee80211req_sta_stats);
+	/* NB: copy out only the statistics */
+	error = copy_to_user(iwr->u.data.pointer + off, &ni->ni_stats,
+		iwr->u.data.length - off);
+	ieee80211_free_node(ni);
+	return (error ? -EFAULT : 0);
+}
+
+struct scanreq {			/* XXX: right place for declaration? */
+	struct ieee80211req_scan_result *sr;
+	size_t space;
+};
+
+static size_t
+scan_space(const struct ieee80211_scan_entry *se, int *ielen)
+{
+	*ielen = 0;
+	if (se->se_rsn_ie != NULL)
+		*ielen += 2 + se->se_rsn_ie[1];
+	if (se->se_wpa_ie != NULL)
+		*ielen += 2 + se->se_wpa_ie[1];
+	if (se->se_wme_ie != NULL)
+		*ielen += 2 + se->se_wme_ie[1];
+	if (se->se_ath_ie != NULL)
+		*ielen += 2 + se->se_ath_ie[1];
+	return roundup(sizeof(struct ieee80211req_scan_result) +
+		se->se_ssid[1] + *ielen, sizeof(u_int32_t));
+}
+
+static int
+get_scan_space(void *arg, const struct ieee80211_scan_entry *se)
+{
+	struct scanreq *req = arg;
+	int ielen;
+
+	req->space += scan_space(se, &ielen);
+
+	return 0;
+}
+
+static int
+get_scan_result(void *arg, const struct ieee80211_scan_entry *se)
+{
+	struct scanreq *req = arg;
+	struct ieee80211req_scan_result *sr;
+	int ielen, len, nr, nxr;
+	u_int8_t *cp;
+
+	len = scan_space(se, &ielen);
+	if (len > req->space) {
+	  printk("[madwifi] %s() : Not enough space.\n", __FUNCTION__);
+		return 0;
+	}
+
+	sr = req->sr;
+	memset(sr, 0, sizeof(*sr));
+	sr->isr_ssid_len = se->se_ssid[1];
+	/* XXX watch for overflow */
+	sr->isr_ie_len = ielen;
+	sr->isr_len = len;
+	sr->isr_freq = se->se_chan->ic_freq;
+	sr->isr_flags = se->se_chan->ic_flags;
+	sr->isr_rssi = se->se_rssi;
+	sr->isr_intval = se->se_intval;
+	sr->isr_capinfo = se->se_capinfo;
+	sr->isr_erp = se->se_erp;
+	IEEE80211_ADDR_COPY(sr->isr_bssid, se->se_bssid);
+	/* XXX bounds check */
+	nr = se->se_rates[1];
+	memcpy(sr->isr_rates, se->se_rates + 2, nr);
+	nxr = se->se_xrates[1];
+	memcpy(sr->isr_rates+nr, se->se_xrates + 2, nxr);
+	sr->isr_nrates = nr + nxr;
+
+	cp = (u_int8_t *)(sr + 1);
+	memcpy(cp, se->se_ssid + 2, sr->isr_ssid_len);
+	cp += sr->isr_ssid_len;
+	if (se->se_rsn_ie != NULL) {
+		memcpy(cp, se->se_rsn_ie, 2 + se->se_rsn_ie[1]);
+		cp += 2 + se->se_rsn_ie[1];
+	}
+	if (se->se_wpa_ie != NULL) {
+		memcpy(cp, se->se_wpa_ie, 2 + se->se_wpa_ie[1]);
+		cp += 2 + se->se_wpa_ie[1];
+	}
+	if (se->se_wme_ie != NULL) {
+		memcpy(cp, se->se_wme_ie, 2 + se->se_wme_ie[1]);
+		cp += 2 + se->se_wme_ie[1];
+	}
+	if (se->se_ath_ie != NULL) {
+		memcpy(cp, se->se_ath_ie, 2 + se->se_ath_ie[1]);
+		cp += 2 + se->se_ath_ie[1];
+	}
+
+	req->space -= len;
+	req->sr = (struct ieee80211req_scan_result *)(((u_int8_t *)sr) + len);
+
+	return 0;
+}
+
+static int
+ieee80211_ioctl_getscanresults(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct scanreq req;
+	int error;
+
+	if (iwr->u.data.length < sizeof(struct scanreq))
+		return -EFAULT;
+
+	error = 0;
+	req.space = 0;
+	ieee80211_scan_iterate(ic, get_scan_space, &req);
+	if (req.space > iwr->u.data.length)
+		req.space = iwr->u.data.length;
+	if (req.space > 0) {
+		size_t space;
+		void *p;
+
+		space = req.space;
+		MALLOC(p, void *, space, M_TEMP, M_WAITOK);
+		if (p == NULL)
+			return -ENOMEM;
+		req.sr = p;
+		ieee80211_scan_iterate(ic, get_scan_result, &req);
+		iwr->u.data.length = space - req.space;
+		error = copy_to_user(iwr->u.data.pointer, p, iwr->u.data.length);
+		FREE(p, M_TEMP);
+	} else
+		iwr->u.data.length = 0;
+
+	return (error ? -EFAULT : 0);
+}
+
+struct stainforeq {		/* XXX: right place for declaration? */
+	struct ieee80211vap *vap;
+	struct ieee80211req_sta_info *si;
+	size_t	space;
+};
+
+static size_t
+sta_space(const struct ieee80211_node *ni, size_t *ielen)
+{
+	*ielen = 0;
+	if (ni->ni_rsn_ie != NULL)
+		*ielen += 2+ni->ni_rsn_ie[1];
+	if (ni->ni_wpa_ie != NULL)
+		*ielen += 2+ni->ni_wpa_ie[1];
+	if (ni->ni_wme_ie != NULL)
+		*ielen += 2+ni->ni_wme_ie[1];
+	if (ni->ni_ath_ie != NULL)
+		*ielen += 2+ni->ni_ath_ie[1];
+	return roundup(sizeof(struct ieee80211req_sta_info) + *ielen,
+		      sizeof(u_int32_t));
+}
+
+static void
+get_sta_space(void *arg, struct ieee80211_node *ni)
+{
+	struct stainforeq *req = arg;
+	struct ieee80211vap *vap = ni->ni_vap;
+	size_t ielen;
+
+	if (vap != req->vap && vap != req->vap->iv_xrvap)	/* only entries for this vap */
+		return;
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP ||
+	     vap->iv_opmode == IEEE80211_M_WDS) &&
+	    ni->ni_associd == 0)				/* only associated stations or a WDS peer */
+		return;
+	req->space += sta_space(ni, &ielen);
+}
+
+static void
+get_sta_info(void *arg, struct ieee80211_node *ni)
+{
+	struct stainforeq *req = arg;
+	struct ieee80211vap *vap = ni->ni_vap;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct ieee80211req_sta_info *si;
+	size_t ielen, len;
+	u_int8_t *cp;
+
+	if (vap != req->vap && vap != req->vap->iv_xrvap)	/* only entries for this vap (or) xrvap */
+		return;
+	if ((vap->iv_opmode == IEEE80211_M_HOSTAP ||
+	     vap->iv_opmode == IEEE80211_M_WDS) &&
+	    ni->ni_associd == 0)				/* only associated stations or a WDS peer */
+		return;
+	if (ni->ni_chan == IEEE80211_CHAN_ANYC)			/* XXX bogus entry */
+		return;
+	len = sta_space(ni, &ielen);
+	if (len > req->space)
+		return;
+	si = req->si;
+	si->isi_len = len;
+	si->isi_ie_len = ielen;
+	si->isi_freq = ni->ni_chan->ic_freq;
+	si->isi_flags = ni->ni_chan->ic_flags;
+	si->isi_state = ni->ni_flags;
+	si->isi_authmode = ni->ni_authmode;
+	si->isi_rssi = ic->ic_node_getrssi(ni);
+	si->isi_capinfo = ni->ni_capinfo;
+	si->isi_athflags = ni->ni_ath_flags;
+	si->isi_erp = ni->ni_erp;
+	IEEE80211_ADDR_COPY(si->isi_macaddr, ni->ni_macaddr);
+	si->isi_nrates = ni->ni_rates.rs_nrates;
+	if (si->isi_nrates > 15)
+		si->isi_nrates = 15;
+	memcpy(si->isi_rates, ni->ni_rates.rs_rates, si->isi_nrates);
+	si->isi_txrate = ni->ni_txrate;
+	si->isi_ie_len = ielen;
+	si->isi_associd = ni->ni_associd;
+	si->isi_txpower = ni->ni_txpower;
+	si->isi_vlan = ni->ni_vlan;
+	if (ni->ni_flags & IEEE80211_NODE_QOS) {
+		memcpy(si->isi_txseqs, ni->ni_txseqs, sizeof(ni->ni_txseqs));
+		memcpy(si->isi_rxseqs, ni->ni_rxseqs, sizeof(ni->ni_rxseqs));
+	} else {
+		si->isi_txseqs[0] = ni->ni_txseqs[0];
+		si->isi_rxseqs[0] = ni->ni_rxseqs[0];
+	}
+	si->isi_uapsd = ni->ni_uapsd;
+	if ( vap == req->vap->iv_xrvap)		
+		si->isi_opmode = IEEE80211_STA_OPMODE_XR;
+	else
+		si->isi_opmode = IEEE80211_STA_OPMODE_NORMAL;
+	/* NB: leave all cases in case we relax ni_associd == 0 check */
+	if (ieee80211_node_is_authorized(ni))
+		si->isi_inact = vap->iv_inact_run;
+	else if (ni->ni_associd != 0)
+		si->isi_inact = vap->iv_inact_auth;
+	else
+		si->isi_inact = vap->iv_inact_init;
+	si->isi_inact = (si->isi_inact - ni->ni_inact) * IEEE80211_INACT_WAIT;
+
+	cp = (u_int8_t *)(si+1);
+	if (ni->ni_rsn_ie != NULL) {
+		memcpy(cp, ni->ni_rsn_ie, 2 + ni->ni_rsn_ie[1]);
+		cp += 2 + ni->ni_rsn_ie[1];
+        }
+	if (ni->ni_wpa_ie != NULL) {
+		memcpy(cp, ni->ni_wpa_ie, 2 + ni->ni_wpa_ie[1]);
+		cp += 2 + ni->ni_wpa_ie[1];
+	}
+	if (ni->ni_wme_ie != NULL) {
+		memcpy(cp, ni->ni_wme_ie, 2 + ni->ni_wme_ie[1]);
+		cp += 2 + ni->ni_wme_ie[1];
+	}
+	if (ni->ni_ath_ie != NULL) {
+		memcpy(cp, ni->ni_ath_ie, 2 + ni->ni_ath_ie[1]);
+		cp += 2 + ni->ni_ath_ie[1];
+	}
+
+	req->si = (struct ieee80211req_sta_info *)(((u_int8_t *)si) + len);
+	req->space -= len;
+}
+
+static int
+ieee80211_ioctl_getstainfo(struct net_device *dev, struct iwreq *iwr)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	struct stainforeq req;
+	int error;
+
+	if (iwr->u.data.length < sizeof(struct stainforeq))
+		return -EFAULT;
+
+	/* estimate space required for station info */
+	error = 0;
+	req.space = sizeof(struct stainforeq);
+	req.vap = vap;
+	ieee80211_iterate_nodes(&ic->ic_sta, get_sta_space, &req);
+	if (req.space > iwr->u.data.length)
+		req.space = iwr->u.data.length;
+	if (req.space > 0) {
+		size_t space;
+		void *p;
+
+		space = req.space;
+		MALLOC(p, void *, space, M_TEMP, M_WAITOK);
+		req.si = (struct ieee80211req_sta_info *)p;
+		ieee80211_iterate_nodes(&ic->ic_sta, get_sta_info, &req);
+		iwr->u.data.length = space - req.space;
+		error = copy_to_user(iwr->u.data.pointer, p, iwr->u.data.length);
+		FREE(p, M_TEMP);
+	} else
+		iwr->u.data.length = 0;
+
+	return (error ? -EFAULT : 0);
+}
+
+static void
+pre_announced_chanswitch(struct net_device *dev, u_int32_t channel, u_int32_t tbtt) {
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	/* now flag the beacon update to include the channel switch IE */
+	ic->ic_flags |= IEEE80211_F_CHANSWITCH;
+	ic->ic_chanchange_chan = channel;
+	ic->ic_chanchange_tbtt = tbtt;
+}
+
+static int
+ieee80211_ioctl_chanswitch(struct net_device *dev, struct iw_request_info *info,
+	void *w, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct ieee80211com *ic = vap->iv_ic;
+	int *param = (int *) extra;
+
+	if (!(ic->ic_flags & IEEE80211_F_DOTH))
+		return 0;
+
+	pre_announced_chanswitch(dev, param[0], param[1]);
+	
+	return 0;
+}
+
+#if WIRELESS_EXT >= 18
+static int
+ieee80211_ioctl_siwmlme(struct net_device *dev,
+	struct iw_request_info *info, struct iw_point *erq, char *data)
+{
+	struct ieee80211req_mlme mlme;
+	struct iw_mlme *wextmlme = (struct iw_mlme *)data;
+
+	memset(&mlme, 0, sizeof(mlme));
+
+	switch(wextmlme->cmd) {
+	case IW_MLME_DEAUTH:
+		mlme.im_op = IEEE80211_MLME_DEAUTH;
+		break;
+	case IW_MLME_DISASSOC:
+		mlme.im_op = IEEE80211_MLME_DISASSOC;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mlme.im_reason = wextmlme->reason_code;
+
+	memcpy(mlme.im_macaddr, wextmlme->addr.sa_data, IEEE80211_ADDR_LEN);
+
+	return ieee80211_ioctl_setmlme(dev, NULL, NULL, (char*)&mlme);
+}
+
+
+static int
+ieee80211_ioctl_giwgenie(struct net_device *dev,
+	struct iw_request_info *info, struct iw_point *out, char *buf)
+{
+	struct ieee80211vap *vap = dev->priv;
+
+	if (out->length < vap->iv_opt_ie_len)
+		return -E2BIG;
+
+	return ieee80211_ioctl_getoptie(dev, info, out, buf);
+}
+
+static int
+ieee80211_ioctl_siwgenie(struct net_device *dev,
+	struct iw_request_info *info, struct iw_point *erq, char *data)
+{
+	return ieee80211_ioctl_setoptie(dev, info, erq, data);
+}
+
+
+static int
+siwauth_wpa_version(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int ver = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_WPA;
+
+	if ((ver & IW_AUTH_WPA_VERSION_WPA) && (ver & IW_AUTH_WPA_VERSION_WPA2))
+		args[1] = 3;
+	else if (ver & IW_AUTH_WPA_VERSION_WPA2)
+		args[1] = 2;
+	else if (ver & IW_AUTH_WPA_VERSION_WPA)
+		args[1] = 1;
+	else
+		args[1] = 0;
+
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+iwcipher2ieee80211cipher(int iwciph)
+{
+	switch(iwciph) {
+	case IW_AUTH_CIPHER_NONE:
+		return IEEE80211_CIPHER_NONE;
+	case IW_AUTH_CIPHER_WEP40:
+	case IW_AUTH_CIPHER_WEP104:
+		return IEEE80211_CIPHER_WEP;
+	case IW_AUTH_CIPHER_TKIP:
+		return IEEE80211_CIPHER_TKIP;
+	case IW_AUTH_CIPHER_CCMP:
+		return IEEE80211_CIPHER_AES_CCM;
+	}
+	return -1;
+}
+
+static int
+ieee80211cipher2iwcipher(int ieee80211ciph)
+{
+	switch(ieee80211ciph) {
+	case IEEE80211_CIPHER_NONE:
+		return IW_AUTH_CIPHER_NONE;
+	case IEEE80211_CIPHER_WEP:
+		return IW_AUTH_CIPHER_WEP104;
+	case IEEE80211_CIPHER_TKIP:
+		return IW_AUTH_CIPHER_TKIP;
+	case IEEE80211_CIPHER_AES_CCM:
+		return IW_AUTH_CIPHER_CCMP;
+	}
+	return -1;
+}
+
+/* TODO We don't enforce wep key lengths. */
+static int
+siwauth_cipher_pairwise(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int iwciph = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_UCASTCIPHER;
+	args[1] = iwcipher2ieee80211cipher(iwciph);
+	if (args[1] < 0) {
+		printk(KERN_WARNING "%s: unknown pairwise cipher %d\n", 
+		       dev->name, iwciph);
+		return -EINVAL;
+	}
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+/* TODO We don't enforce wep key lengths. */
+static int
+siwauth_cipher_group(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int iwciph = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_MCASTCIPHER;
+	args[1] = iwcipher2ieee80211cipher(iwciph);
+	if (args[1] < 0) {
+		printk(KERN_WARNING "%s: unknown group cipher %d\n", 
+		       dev->name, iwciph);
+		return -EINVAL;
+	}
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_key_mgmt(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int iwkm = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_KEYMGTALGS;
+	args[1] = WPA_ASE_NONE;
+	if (iwkm & IW_AUTH_KEY_MGMT_802_1X) 
+		args[1] |= WPA_ASE_8021X_UNSPEC;
+	if (iwkm & IW_AUTH_KEY_MGMT_PSK)
+		args[1] |= WPA_ASE_8021X_PSK;
+
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_tkip_countermeasures(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int args[2];
+	args[0] = IEEE80211_PARAM_COUNTERMEASURES;
+	args[1] = erq->value;
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_drop_unencrypted(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int args[2];
+	args[0] = IEEE80211_PARAM_DROPUNENCRYPTED;
+	args[1] = erq->value;
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+
+static int
+siwauth_80211_auth_alg(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+#define VALID_ALGS_MASK (IW_AUTH_ALG_OPEN_SYSTEM|IW_AUTH_ALG_SHARED_KEY|IW_AUTH_ALG_LEAP)
+	int mode = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_AUTHMODE;
+
+	if (mode & ~VALID_ALGS_MASK) {
+		return -EINVAL;
+	}
+	if (mode & IW_AUTH_ALG_LEAP) {
+		args[1] = IEEE80211_AUTH_8021X;
+	} else if ((mode & IW_AUTH_ALG_SHARED_KEY) &&
+		  (mode & IW_AUTH_ALG_OPEN_SYSTEM)) {
+		args[1] = IEEE80211_AUTH_AUTO;
+	} else if (mode & IW_AUTH_ALG_SHARED_KEY) {
+		args[1] = IEEE80211_AUTH_SHARED;
+	} else {
+		args[1] = IEEE80211_AUTH_OPEN;
+	}
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_wpa_enabled(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int enabled = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_WPA;
+	if (enabled) 
+		args[1] = 3; /* enable WPA1 and WPA2 */
+	else
+		args[1] = 0; /* disable WPA1 and WPA2 */
+
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_rx_unencrypted_eapol(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rxunenc = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_DROPUNENC_EAPOL;
+	if (rxunenc) 
+		args[1] = 1;
+	else
+		args[1] = 0;
+
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_roaming_control(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int roam = erq->value;
+	int args[2];
+
+	args[0] = IEEE80211_PARAM_ROAMING;
+	switch(roam) {
+	case IW_AUTH_ROAMING_ENABLE:
+		args[1] = IEEE80211_ROAMING_AUTO;
+		break;
+	case IW_AUTH_ROAMING_DISABLE:
+		args[1] = IEEE80211_ROAMING_MANUAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+static int
+siwauth_privacy_invoked(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int args[2];
+	args[0] = IEEE80211_PARAM_PRIVACY;
+	args[1] = erq->value;
+	return ieee80211_ioctl_setparam(dev, NULL, NULL, (char*)args);
+}
+
+/* 
+ * If this function is invoked it means someone is using the wireless extensions
+ * API instead of the private madwifi ioctls.  That's fine.  We translate their
+ * request into the format used by the private ioctls.  Note that the 
+ * iw_request_info and iw_param structures are not the same ones as the 
+ * private ioctl handler expects.  Luckily, the private ioctl handler doesn't
+ * do anything with those at the moment.  We pass NULL for those, because in 
+ * case someone does modify the ioctl handler to use those values, a null 
+ * pointer will be easier to debug than other bad behavior.
+ */
+static int
+ieee80211_ioctl_siwauth(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc = -EINVAL;
+
+	switch(erq->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_WPA_VERSION:
+		rc = siwauth_wpa_version(dev, info, erq, buf);
+		break;
+	case IW_AUTH_CIPHER_PAIRWISE:
+		rc = siwauth_cipher_pairwise(dev, info, erq, buf);
+		break;
+	case IW_AUTH_CIPHER_GROUP:
+		rc = siwauth_cipher_group(dev, info, erq, buf);
+		break;
+	case IW_AUTH_KEY_MGMT:
+		rc = siwauth_key_mgmt(dev, info, erq, buf);
+		break;
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		rc = siwauth_tkip_countermeasures(dev, info, erq, buf);
+		break;
+	case IW_AUTH_DROP_UNENCRYPTED:
+		rc = siwauth_drop_unencrypted(dev, info, erq, buf);
+		break;
+	case IW_AUTH_80211_AUTH_ALG:
+		rc = siwauth_80211_auth_alg(dev, info, erq, buf);
+		break;
+	case IW_AUTH_WPA_ENABLED:
+		rc = siwauth_wpa_enabled(dev, info, erq, buf);
+		break;
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		rc = siwauth_rx_unencrypted_eapol(dev, info, erq, buf);
+		break;
+	case IW_AUTH_ROAMING_CONTROL:
+		rc = siwauth_roaming_control(dev, info, erq, buf);
+		break;
+	case IW_AUTH_PRIVACY_INVOKED:
+		rc = siwauth_privacy_invoked(dev, info, erq, buf);
+		break;
+	default:
+		printk(KERN_WARNING "%s: unknown SIOCSIWAUTH flag %d\n",
+			dev->name, erq->flags);
+		break;
+	}
+
+	return rc;
+}
+
+static int
+giwauth_wpa_version(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int ver;
+	int rc;
+	int arg = IEEE80211_PARAM_WPA;
+
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+
+	switch(arg) {
+	case 1:
+	    	ver = IW_AUTH_WPA_VERSION_WPA;
+		break;
+	case 2:
+	    	ver = IW_AUTH_WPA_VERSION_WPA2;
+		break;
+	case 3:
+	    	ver = IW_AUTH_WPA_VERSION|IW_AUTH_WPA_VERSION_WPA2;
+		break;
+	default:
+		ver = IW_AUTH_WPA_VERSION_DISABLED;
+		break;
+	}
+
+	erq->value = ver;
+	return rc;
+}
+
+static int
+giwauth_cipher_pairwise(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc;
+	int arg = IEEE80211_PARAM_UCASTCIPHER;
+
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+
+	erq->value = ieee80211cipher2iwcipher(arg);
+	if (erq->value < 0)
+		return -EINVAL;
+	return 0;
+}
+
+
+static int
+giwauth_cipher_group(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc;
+	int arg = IEEE80211_PARAM_MCASTCIPHER;
+
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+
+	erq->value = ieee80211cipher2iwcipher(arg);
+	if (erq->value < 0)
+		return -EINVAL;
+	return 0;
+}
+
+static int
+giwauth_key_mgmt(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int arg;
+	int rc;
+
+	arg = IEEE80211_PARAM_KEYMGTALGS;
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc) 
+		return rc;
+	erq->value = 0;
+	if (arg & WPA_ASE_8021X_UNSPEC)
+		erq->value |= IW_AUTH_KEY_MGMT_802_1X;
+	if (arg & WPA_ASE_8021X_PSK)
+		erq->value |= IW_AUTH_KEY_MGMT_PSK;
+	return 0;
+}
+
+static int
+giwauth_tkip_countermeasures(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int arg;
+	int rc;
+
+	arg = IEEE80211_PARAM_COUNTERMEASURES;
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc) 
+		return rc;
+	erq->value = arg;
+	return 0;
+}
+
+static int
+giwauth_drop_unencrypted(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int arg;
+	int rc;
+	arg = IEEE80211_PARAM_DROPUNENCRYPTED;
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+	erq->value = arg;
+	return 0;
+}
+
+static int
+giwauth_80211_auth_alg(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+giwauth_wpa_enabled(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc;
+	int arg = IEEE80211_PARAM_WPA;
+
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+
+	erq->value = arg;
+	return 0;
+
+}
+
+static int
+giwauth_rx_unencrypted_eapol(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+giwauth_roaming_control(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc;
+	int arg;
+
+	arg = IEEE80211_PARAM_ROAMING;
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+
+	switch(arg) {
+	case IEEE80211_ROAMING_DEVICE:
+	case IEEE80211_ROAMING_AUTO:
+		erq->value = IW_AUTH_ROAMING_ENABLE;
+		break;
+	default:
+		erq->value = IW_AUTH_ROAMING_DISABLE;
+		break;
+	}
+
+	return 0;
+}
+
+static int
+giwauth_privacy_invoked(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc;
+	int arg;
+	arg = IEEE80211_PARAM_PRIVACY;
+	rc = ieee80211_ioctl_getparam(dev, NULL, NULL, (char*)&arg);
+	if (rc)
+		return rc;
+	erq->value = arg;
+	return 0;
+}
+
+static int
+ieee80211_ioctl_giwauth(struct net_device *dev,
+	struct iw_request_info *info, struct iw_param *erq, char *buf)
+{
+	int rc = -EOPNOTSUPP;
+
+	switch(erq->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_WPA_VERSION:
+		rc = giwauth_wpa_version(dev, info, erq, buf);
+		break;
+	case IW_AUTH_CIPHER_PAIRWISE:
+		rc = giwauth_cipher_pairwise(dev, info, erq, buf);
+		break;
+	case IW_AUTH_CIPHER_GROUP:
+		rc = giwauth_cipher_group(dev, info, erq, buf);
+		break;
+	case IW_AUTH_KEY_MGMT:
+		rc = giwauth_key_mgmt(dev, info, erq, buf);
+		break;
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		rc = giwauth_tkip_countermeasures(dev, info, erq, buf);
+		break;
+	case IW_AUTH_DROP_UNENCRYPTED:
+		rc = giwauth_drop_unencrypted(dev, info, erq, buf);
+		break;
+	case IW_AUTH_80211_AUTH_ALG:
+		rc = giwauth_80211_auth_alg(dev, info, erq, buf);
+		break;
+	case IW_AUTH_WPA_ENABLED:
+		rc = giwauth_wpa_enabled(dev, info, erq, buf);
+		break;
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		rc = giwauth_rx_unencrypted_eapol(dev, info, erq, buf);
+		break;
+	case IW_AUTH_ROAMING_CONTROL:
+		rc = giwauth_roaming_control(dev, info, erq, buf);
+		break;
+	case IW_AUTH_PRIVACY_INVOKED:
+		rc = giwauth_privacy_invoked(dev, info, erq, buf);
+		break;
+	default:
+		printk(KERN_WARNING "%s: unknown SIOCGIWAUTH flag %d\n",
+			dev->name, erq->flags);
+		break;
+	}
+
+	return rc;
+}
+
+/*
+ * Retrieve information about a key.  Open question: should we allow
+ * callers to retrieve unicast keys based on a supplied MAC address?
+ * The ipw2200 reference implementation doesn't, so we don't either.
+ */
+static int
+ieee80211_ioctl_giwencodeext(struct net_device *dev, 
+	struct iw_request_info *info, struct iw_point *erq, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct iw_encode_ext *ext;
+	struct ieee80211_key *wk;
+	int error;
+	int kid;
+	int max_key_len;
+	
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	max_key_len = erq->length - sizeof(*ext);
+	if (max_key_len < 0) 
+		return -EINVAL;
+	ext = (struct iw_encode_ext *)extra;
+
+	error = getiwkeyix(vap, erq, &kid);
+	if (error < 0)
+		return error;
+
+	wk = &vap->iv_nw_keys[kid];
+	if (wk->wk_keylen > max_key_len)
+		return -E2BIG;
+
+	erq->flags = kid+1;
+	memset(ext, 0, sizeof(*ext));
+
+	ext->key_len = wk->wk_keylen;
+	memcpy(ext->key, wk->wk_key, wk->wk_keylen);
+
+	/* flags */
+	if (wk->wk_flags & IEEE80211_KEY_GROUP)
+		ext->ext_flags |= IW_ENCODE_EXT_GROUP_KEY;
+
+	/* algorithm */
+	switch(wk->wk_cipher->ic_cipher) {
+	case IEEE80211_CIPHER_NONE:
+		ext->alg = IW_ENCODE_ALG_NONE;
+		erq->flags |= IW_ENCODE_DISABLED;
+		break;
+	case IEEE80211_CIPHER_WEP:
+		ext->alg = IW_ENCODE_ALG_WEP;
+		break;
+	case IEEE80211_CIPHER_TKIP:
+		ext->alg = IW_ENCODE_ALG_TKIP;
+		break;
+	case IEEE80211_CIPHER_AES_OCB:
+	case IEEE80211_CIPHER_AES_CCM:
+	case IEEE80211_CIPHER_CKIP:
+		ext->alg = IW_ENCODE_ALG_CCMP;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+ieee80211_ioctl_siwencodeext(struct net_device *dev,
+	struct iw_request_info *info, struct iw_point *erq, char *extra)
+{
+	struct ieee80211vap *vap = dev->priv;
+	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+	struct ieee80211req_key kr;
+	int error;
+	int kid;
+	error = getiwkeyix(vap, erq, &kid);
+	if (error < 0)
+		return error;
+
+	if (ext->key_len > (erq->length - sizeof(struct iw_encode_ext)))
+	   	return -EINVAL;
+
+	if (ext->alg == IW_ENCODE_ALG_NONE) {
+		/* convert to the format used by IEEE_80211_IOCTL_DELKEY */
+		struct ieee80211req_del_key dk;
+		
+		memset(&dk, 0, sizeof(dk));
+		dk.idk_keyix = kid;
+		memcpy(&dk.idk_macaddr, ext->addr.sa_data, IEEE80211_ADDR_LEN);
+
+		return ieee80211_ioctl_delkey(dev, NULL, NULL, (char*)&dk);
+	}
+
+	/* TODO This memcmp for the broadcast address seems hackish, but
+	 * mimics what wpa supplicant was doing.  The wpa supplicant comments
+	 * make it sound like they were having trouble with 
+	 * IEEE80211_IOCTL_SETKEY and static WEP keys.  It might be worth
+	 * figuring out what their trouble was so the rest of this function
+	 * can be implemented in terms of ieee80211_ioctl_setkey */
+	if (ext->alg == IW_ENCODE_ALG_WEP &&
+	    memcmp(ext->addr.sa_data, "\xff\xff\xff\xff\xff\xff", 
+		   IEEE80211_ADDR_LEN) == 0) {
+		/* convert to the format used by SIOCSIWENCODE.  The old
+		 * format just had the key in the extra buf, whereas the
+		 * new format has the key tacked on to the end of the
+		 * iw_encode_ext structure */
+		struct iw_request_info oldinfo;
+		struct iw_point olderq;
+		char *key;
+
+		memset(&oldinfo, 0, sizeof(oldinfo));
+		oldinfo.cmd = SIOCSIWENCODE;
+		oldinfo.flags = info->flags;
+
+		memset(&olderq, 0, sizeof(olderq));
+		olderq.flags = erq->flags;
+		olderq.pointer = erq->pointer;
+		olderq.length = ext->key_len;
+
+		key = ext->key;
+
+		return ieee80211_ioctl_siwencode(dev, &oldinfo, &olderq, key);
+	}
+
+	/* convert to the format used by IEEE_80211_IOCTL_SETKEY */
+	memset(&kr, 0, sizeof(kr));
+
+	switch(ext->alg) {
+	case IW_ENCODE_ALG_WEP:
+		kr.ik_type = IEEE80211_CIPHER_WEP;
+		break;
+	case IW_ENCODE_ALG_TKIP:
+		kr.ik_type = IEEE80211_CIPHER_TKIP;
+		break;
+	case IW_ENCODE_ALG_CCMP:
+		kr.ik_type = IEEE80211_CIPHER_AES_CCM;
+		break;
+	default:
+		printk(KERN_WARNING "%s: unknown algorithm %d\n",
+		       dev->name, ext->alg);
+		return -EINVAL;
+	}
+
+	kr.ik_keyix = kid;
+
+	if (ext->key_len > sizeof(kr.ik_keydata)) {
+		printk(KERN_WARNING "%s: key size %d is too large\n",
+		       dev->name, ext->key_len);
+		return -E2BIG;
+	}
+	memcpy(kr.ik_keydata, ext->key, ext->key_len);
+	kr.ik_keylen = ext->key_len;
+
+	kr.ik_flags = IEEE80211_KEY_RECV;
+
+	if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
+		kr.ik_flags |= IEEE80211_KEY_GROUP;
+
+	if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+		kr.ik_flags |= IEEE80211_KEY_XMIT | IEEE80211_KEY_DEFAULT;
+		memcpy(kr.ik_macaddr, ext->addr.sa_data, IEEE80211_ADDR_LEN);
+	}
+
+	if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+		memcpy(&kr.ik_keyrsc, ext->rx_seq, sizeof(kr.ik_keyrsc));
+	}
+
+	return ieee80211_ioctl_setkey(dev, NULL, NULL, (char*)&kr);
+}
+#endif /* WIRELESS_EXT >= 18 */
+
+#define	IW_PRIV_TYPE_OPTIE	IW_PRIV_TYPE_BYTE | IEEE80211_MAX_OPT_IE
+#define	IW_PRIV_TYPE_KEY \
+	IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_key)
+#define	IW_PRIV_TYPE_DELKEY \
+	IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_del_key)
+#define	IW_PRIV_TYPE_MLME \
+	IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_mlme)
+#define	IW_PRIV_TYPE_CHANLIST \
+	IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_chanlist)
+#define	IW_PRIV_TYPE_CHANINFO \
+	IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_chaninfo)
+#define IW_PRIV_TYPE_APPIEBUF \
+	(IW_PRIV_TYPE_BYTE | (sizeof(struct ieee80211req_getset_appiebuf) + IEEE80211_APPIE_MAX))
+#define IW_PRIV_TYPE_FILTER \
+	IW_PRIV_TYPE_BYTE | sizeof(struct ieee80211req_set_filter)
+
+static const struct iw_priv_args ieee80211_priv_args[] = {
+	/* NB: setoptie & getoptie are !IW_PRIV_SIZE_FIXED */
+	{ IEEE80211_IOCTL_SETOPTIE,
+	  IW_PRIV_TYPE_OPTIE, 0,			"setoptie" },
+	{ IEEE80211_IOCTL_GETOPTIE,
+	  0, IW_PRIV_TYPE_OPTIE,			"getoptie" },
+	{ IEEE80211_IOCTL_SETKEY,
+	  IW_PRIV_TYPE_KEY | IW_PRIV_SIZE_FIXED, 0,	"setkey" },
+	{ IEEE80211_IOCTL_DELKEY,
+	  IW_PRIV_TYPE_DELKEY | IW_PRIV_SIZE_FIXED, 0,	"delkey" },
+	{ IEEE80211_IOCTL_SETMLME,
+	  IW_PRIV_TYPE_MLME | IW_PRIV_SIZE_FIXED, 0,	"setmlme" },
+	{ IEEE80211_IOCTL_ADDMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,"addmac" },
+	{ IEEE80211_IOCTL_DELMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,"delmac" },
+	{ IEEE80211_IOCTL_KICKMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "kickmac"},
+	{ IEEE80211_IOCTL_WDSADDMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,"wds_add" },
+	{ IEEE80211_IOCTL_WDSDELMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,"wds_del" },
+	{ IEEE80211_IOCTL_SETCHANLIST,
+	  IW_PRIV_TYPE_CHANLIST | IW_PRIV_SIZE_FIXED, 0,"setchanlist" },
+	{ IEEE80211_IOCTL_GETCHANLIST,
+	  0, IW_PRIV_TYPE_CHANLIST | IW_PRIV_SIZE_FIXED,"getchanlist" },
+	{ IEEE80211_IOCTL_GETCHANINFO,
+	  0, IW_PRIV_TYPE_CHANINFO | IW_PRIV_SIZE_FIXED,"getchaninfo" },
+	{ IEEE80211_IOCTL_SETMODE,
+	  IW_PRIV_TYPE_CHAR |  6, 0, "mode" },
+	{ IEEE80211_IOCTL_GETMODE,
+	  0, IW_PRIV_TYPE_CHAR | 6, "get_mode" },
+#if WIRELESS_EXT >= 12	  
+	{ IEEE80211_IOCTL_SETWMMPARAMS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 4, 0,"setwmmparams" },
+	{ IEEE80211_IOCTL_GETWMMPARAMS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "getwmmparams" },
+	/*
+	 * These depends on sub-ioctl support which added in version 12.
+	 */
+	{ IEEE80211_IOCTL_SETWMMPARAMS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"" },
+	{ IEEE80211_IOCTL_GETWMMPARAMS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "" },
+	/* sub-ioctl handlers */
+	{ IEEE80211_WMMPARAMS_CWMIN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"cwmin" },
+	{ IEEE80211_WMMPARAMS_CWMIN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "get_cwmin" },
+	{ IEEE80211_WMMPARAMS_CWMAX,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"cwmax" },
+	{ IEEE80211_WMMPARAMS_CWMAX,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "get_cwmax" },
+	{ IEEE80211_WMMPARAMS_AIFS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"aifs" },
+	{ IEEE80211_WMMPARAMS_AIFS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "get_aifs" },
+	{ IEEE80211_WMMPARAMS_TXOPLIMIT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"txoplimit" },
+	{ IEEE80211_WMMPARAMS_TXOPLIMIT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "get_txoplimit" },
+	{ IEEE80211_WMMPARAMS_ACM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"acm" },
+	{ IEEE80211_WMMPARAMS_ACM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "get_acm" },
+	{ IEEE80211_WMMPARAMS_NOACKPOLICY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0,"noackpolicy" },
+	{ IEEE80211_WMMPARAMS_NOACKPOLICY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 	
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,   "get_noackpolicy" },
+	
+	{ IEEE80211_IOCTL_SETPARAM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "setparam" },
+	/*
+	 * These depends on sub-ioctl support which added in version 12.
+	 */
+	{ IEEE80211_IOCTL_GETPARAM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,	"getparam" },
+
+	/* sub-ioctl handlers */
+	{ IEEE80211_IOCTL_SETPARAM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "" },
+	{ IEEE80211_IOCTL_GETPARAM,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "" },
+
+	/* sub-ioctl definitions */
+	{ IEEE80211_PARAM_AUTHMODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "authmode" },
+	{ IEEE80211_PARAM_AUTHMODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_authmode" },
+	{ IEEE80211_PARAM_PROTMODE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "protmode" },
+	{ IEEE80211_PARAM_PROTMODE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_protmode" },
+	{ IEEE80211_PARAM_MCASTCIPHER,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mcastcipher" },
+	{ IEEE80211_PARAM_MCASTCIPHER,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_mcastcipher" },
+	{ IEEE80211_PARAM_MCASTKEYLEN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mcastkeylen" },
+	{ IEEE80211_PARAM_MCASTKEYLEN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_mcastkeylen" },
+	{ IEEE80211_PARAM_UCASTCIPHERS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ucastciphers" },
+	{ IEEE80211_PARAM_UCASTCIPHERS,
+	/*
+	 * NB: can't use "get_ucastciphers" due to iwpriv command names
+	 *     must be <IFNAMESIZ which is 16.
+	 */
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_uciphers" },
+	{ IEEE80211_PARAM_UCASTCIPHER,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ucastcipher" },
+	{ IEEE80211_PARAM_UCASTCIPHER,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ucastcipher" },
+	{ IEEE80211_PARAM_UCASTKEYLEN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ucastkeylen" },
+	{ IEEE80211_PARAM_UCASTKEYLEN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ucastkeylen" },
+	{ IEEE80211_PARAM_KEYMGTALGS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "keymgtalgs" },
+	{ IEEE80211_PARAM_KEYMGTALGS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_keymgtalgs" },
+	{ IEEE80211_PARAM_RSNCAPS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rsncaps" },
+	{ IEEE80211_PARAM_RSNCAPS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rsncaps" },
+	{ IEEE80211_PARAM_ROAMING,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostroaming" },
+	{ IEEE80211_PARAM_ROAMING,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_hostroaming" },
+	{ IEEE80211_PARAM_PRIVACY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "privacy" },
+	{ IEEE80211_PARAM_PRIVACY,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_privacy" },
+	{ IEEE80211_PARAM_COUNTERMEASURES,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "countermeasures" },
+	{ IEEE80211_PARAM_COUNTERMEASURES,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_countermeas" },
+	{ IEEE80211_PARAM_DROPUNENCRYPTED,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dropunencrypted" },
+	{ IEEE80211_PARAM_DROPUNENCRYPTED,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_dropunencry" },
+	{ IEEE80211_PARAM_WPA,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wpa" },
+	{ IEEE80211_PARAM_WPA,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_wpa" },
+	{ IEEE80211_PARAM_DRIVER_CAPS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "driver_caps" },
+	{ IEEE80211_PARAM_DRIVER_CAPS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_driver_caps" },
+	{ IEEE80211_PARAM_MACCMD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "maccmd" },
+	{ IEEE80211_PARAM_WMM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wmm" },
+	{ IEEE80211_PARAM_WMM,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_wmm" },
+	{ IEEE80211_PARAM_HIDESSID,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hide_ssid" },
+	{ IEEE80211_PARAM_HIDESSID,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_hide_ssid" },
+	{ IEEE80211_PARAM_APBRIDGE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_bridge" },
+	{ IEEE80211_PARAM_APBRIDGE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ap_bridge" },
+	{ IEEE80211_PARAM_INACT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "inact" },
+	{ IEEE80211_PARAM_INACT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_inact" },
+	{ IEEE80211_PARAM_INACT_AUTH,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "inact_auth" },
+	{ IEEE80211_PARAM_INACT_AUTH,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_inact_auth" },
+	{ IEEE80211_PARAM_INACT_INIT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "inact_init" },
+	{ IEEE80211_PARAM_INACT_INIT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_inact_init" },
+	{ IEEE80211_PARAM_ABOLT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "abolt" },
+	{ IEEE80211_PARAM_ABOLT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_abolt" },
+	{ IEEE80211_PARAM_DTIM_PERIOD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dtim_period" },
+	{ IEEE80211_PARAM_DTIM_PERIOD,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_dtim_period" },
+	/* XXX bintval chosen to avoid 16-char limit */
+	{ IEEE80211_PARAM_BEACON_INTERVAL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bintval" },
+	{ IEEE80211_PARAM_BEACON_INTERVAL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bintval" },
+	{ IEEE80211_PARAM_DOTH,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "doth" },
+	{ IEEE80211_PARAM_DOTH,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_doth" },
+	{ IEEE80211_PARAM_PWRTARGET,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "doth_pwrtgt" },
+	{ IEEE80211_PARAM_PWRTARGET,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_doth_pwrtgt" },
+	{ IEEE80211_PARAM_GENREASSOC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "doth_reassoc" },
+	{ IEEE80211_PARAM_COMPRESSION,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "compression" },
+	{ IEEE80211_PARAM_COMPRESSION,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_compression" },
+	{ IEEE80211_PARAM_FF,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ff" },
+	{ IEEE80211_PARAM_FF,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ff" },
+	{ IEEE80211_PARAM_TURBO,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "turbo" },
+	{ IEEE80211_PARAM_TURBO,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_turbo" },
+	{ IEEE80211_PARAM_XR,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "xr" },
+	{ IEEE80211_PARAM_XR,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_xr" },
+	{ IEEE80211_PARAM_BURST,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "burst" },
+	{ IEEE80211_PARAM_BURST,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_burst" },
+	{ IEEE80211_IOCTL_CHANSWITCH,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0,	"doth_chanswitch" },
+	{ IEEE80211_PARAM_PUREG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pureg" },
+	{ IEEE80211_PARAM_PUREG,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_pureg" },
+	{ IEEE80211_PARAM_AR,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ar" },
+	{ IEEE80211_PARAM_AR,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ar" },
+	{ IEEE80211_PARAM_WDS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wds" },
+	{ IEEE80211_PARAM_WDS,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_wds" },
+	{ IEEE80211_PARAM_BGSCAN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bgscan" },
+	{ IEEE80211_PARAM_BGSCAN,0,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bgscan" },
+	{ IEEE80211_PARAM_BGSCAN_IDLE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bgscanidle" },
+	{ IEEE80211_PARAM_BGSCAN_IDLE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bgscanidle" },
+	{ IEEE80211_PARAM_BGSCAN_INTERVAL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bgscanintvl" },
+	{ IEEE80211_PARAM_BGSCAN_INTERVAL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_bgscanintvl" },
+	{ IEEE80211_PARAM_MCAST_RATE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "mcast_rate" },
+	{ IEEE80211_PARAM_MCAST_RATE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_mcast_rate" },
+	{ IEEE80211_PARAM_COVERAGE_CLASS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "coverageclass" },
+	{ IEEE80211_PARAM_COVERAGE_CLASS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_coveragecls" },
+	{ IEEE80211_PARAM_COUNTRY_IE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "countryie" },
+	{ IEEE80211_PARAM_COUNTRY_IE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_countryie" },
+	{ IEEE80211_PARAM_SCANVALID,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "scanvalid" },
+	{ IEEE80211_PARAM_SCANVALID,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_scanvalid" },
+	{ IEEE80211_PARAM_REGCLASS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "regclass" },
+	{ IEEE80211_PARAM_REGCLASS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_regclass" },
+	{ IEEE80211_PARAM_DROPUNENC_EAPOL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dropunenceapol" },
+	{ IEEE80211_PARAM_DROPUNENC_EAPOL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_dropunencea" },
+	{ IEEE80211_PARAM_SHPREAMBLE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "shpreamble" },
+	{ IEEE80211_PARAM_SHPREAMBLE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_shpreamble" },
+	/*
+	 * NB: these should be roamrssi* etc, but iwpriv usurps all
+	 *     strings that start with roam!
+	 */
+	{ IEEE80211_PARAM_ROAM_RSSI_11A,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rssi11a" },
+	{ IEEE80211_PARAM_ROAM_RSSI_11A,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rssi11a" },
+	{ IEEE80211_PARAM_ROAM_RSSI_11B,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rssi11b" },
+	{ IEEE80211_PARAM_ROAM_RSSI_11B,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rssi11b" },
+	{ IEEE80211_PARAM_ROAM_RSSI_11G,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rssi11g" },
+	{ IEEE80211_PARAM_ROAM_RSSI_11G,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rssi11g" },
+	{ IEEE80211_PARAM_ROAM_RATE_11A,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rate11a" },
+	{ IEEE80211_PARAM_ROAM_RATE_11A,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rate11a" },
+	{ IEEE80211_PARAM_ROAM_RATE_11B,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rate11b" },
+	{ IEEE80211_PARAM_ROAM_RATE_11B,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rate11b" },
+	{ IEEE80211_PARAM_ROAM_RATE_11G,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rate11g" },
+	{ IEEE80211_PARAM_ROAM_RATE_11G,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_rate11g" },
+	{ IEEE80211_PARAM_UAPSDINFO,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "uapsd" },
+	{ IEEE80211_PARAM_UAPSDINFO,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_uapsd" },
+	{ IEEE80211_PARAM_SLEEP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "sleep" },
+	{ IEEE80211_PARAM_SLEEP,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_sleep" },
+	{ IEEE80211_PARAM_QOSNULL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "qosnull" },
+	{ IEEE80211_PARAM_PSPOLL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pspoll" },
+	{ IEEE80211_PARAM_EOSPDROP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "eospdrop" },
+	{ IEEE80211_PARAM_EOSPDROP,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_eospdrop" },
+	{ IEEE80211_PARAM_MARKDFS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "markdfs" },
+	{ IEEE80211_PARAM_MARKDFS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_markdfs" },
+	{ IEEE80211_IOCTL_SET_APPIEBUF,
+	  IW_PRIV_TYPE_APPIEBUF, 0, "setiebuf" },
+	{ IEEE80211_IOCTL_GET_APPIEBUF,
+	  0, IW_PRIV_TYPE_APPIEBUF, "getiebuf" },
+	{ IEEE80211_IOCTL_FILTERFRAME,
+	  IW_PRIV_TYPE_FILTER , 0, "setfilter" },
+
+#endif /* WIRELESS_EXT >= 12 */
+};
+
+static const iw_handler ieee80211_handlers[] = {
+	(iw_handler) NULL,				/* SIOCSIWCOMMIT */
+	(iw_handler) ieee80211_ioctl_giwname,		/* SIOCGIWNAME */
+	(iw_handler) NULL,				/* SIOCSIWNWID */
+	(iw_handler) NULL,				/* SIOCGIWNWID */
+	(iw_handler) ieee80211_ioctl_siwfreq,		/* SIOCSIWFREQ */
+	(iw_handler) ieee80211_ioctl_giwfreq,		/* SIOCGIWFREQ */
+	(iw_handler) ieee80211_ioctl_siwmode,		/* SIOCSIWMODE */
+	(iw_handler) ieee80211_ioctl_giwmode,		/* SIOCGIWMODE */
+	(iw_handler) ieee80211_ioctl_siwsens,		/* SIOCSIWSENS */
+	(iw_handler) ieee80211_ioctl_giwsens,		/* SIOCGIWSENS */
+	(iw_handler) NULL /* not used */,		/* SIOCSIWRANGE */
+	(iw_handler) ieee80211_ioctl_giwrange,		/* SIOCGIWRANGE */
+	(iw_handler) NULL /* not used */,		/* SIOCSIWPRIV */
+	(iw_handler) NULL /* kernel code */,		/* SIOCGIWPRIV */
+	(iw_handler) NULL /* not used */,		/* SIOCSIWSTATS */
+	(iw_handler) NULL /* kernel code */,		/* SIOCGIWSTATS */
+	(iw_handler) ieee80211_ioctl_setspy,		/* SIOCSIWSPY */
+	(iw_handler) ieee80211_ioctl_getspy,		/* SIOCGIWSPY */
+#if WIRELESS_EXT >= 16
+	(iw_handler) ieee80211_ioctl_setthrspy,		/* SIOCSIWTHRSPY */
+	(iw_handler) ieee80211_ioctl_getthrspy,		/* SIOCGIWTHRSPY */
+#else
+	(iw_handler) NULL,				/* SIOCSIWTHRSPY */
+	(iw_handler) NULL,				/* SIOCGIWTHRSPY */
+#endif
+	(iw_handler) ieee80211_ioctl_siwap,		/* SIOCSIWAP */
+	(iw_handler) ieee80211_ioctl_giwap,		/* SIOCGIWAP */
+#ifdef SIOCSIWMLME
+	(iw_handler) ieee80211_ioctl_siwmlme,		/* SIOCSIWMLME */
+#else
+	(iw_handler) NULL,				/* -- hole -- */
+#endif
+	(iw_handler) ieee80211_ioctl_iwaplist,		/* SIOCGIWAPLIST */
+#ifdef SIOCGIWSCAN
+	(iw_handler) ieee80211_ioctl_siwscan,		/* SIOCSIWSCAN */
+	(iw_handler) ieee80211_ioctl_giwscan,		/* SIOCGIWSCAN */
+#else
+	(iw_handler) NULL,				/* SIOCSIWSCAN */
+	(iw_handler) NULL,				/* SIOCGIWSCAN */
+#endif /* SIOCGIWSCAN */
+	(iw_handler) ieee80211_ioctl_siwessid,		/* SIOCSIWESSID */
+	(iw_handler) ieee80211_ioctl_giwessid,		/* SIOCGIWESSID */
+	(iw_handler) ieee80211_ioctl_siwnickn,		/* SIOCSIWNICKN */
+	(iw_handler) ieee80211_ioctl_giwnickn,		/* SIOCGIWNICKN */
+	(iw_handler) NULL,				/* -- hole -- */
+	(iw_handler) NULL,				/* -- hole -- */
+	(iw_handler) ieee80211_ioctl_siwrate,		/* SIOCSIWRATE */
+	(iw_handler) ieee80211_ioctl_giwrate,		/* SIOCGIWRATE */
+	(iw_handler) ieee80211_ioctl_siwrts,		/* SIOCSIWRTS */
+	(iw_handler) ieee80211_ioctl_giwrts,		/* SIOCGIWRTS */
+	(iw_handler) ieee80211_ioctl_siwfrag,		/* SIOCSIWFRAG */
+	(iw_handler) ieee80211_ioctl_giwfrag,		/* SIOCGIWFRAG */
+	(iw_handler) ieee80211_ioctl_siwtxpow,		/* SIOCSIWTXPOW */
+	(iw_handler) ieee80211_ioctl_giwtxpow,		/* SIOCGIWTXPOW */
+	(iw_handler) ieee80211_ioctl_siwretry,		/* SIOCSIWRETRY */
+	(iw_handler) ieee80211_ioctl_giwretry,		/* SIOCGIWRETRY */
+	(iw_handler) ieee80211_ioctl_siwencode,		/* SIOCSIWENCODE */
+	(iw_handler) ieee80211_ioctl_giwencode,		/* SIOCGIWENCODE */
+	(iw_handler) ieee80211_ioctl_siwpower,		/* SIOCSIWPOWER */
+	(iw_handler) ieee80211_ioctl_giwpower,		/* SIOCGIWPOWER */
+	(iw_handler) NULL,				/* -- hole -- */
+	(iw_handler) NULL,				/* -- hole -- */
+#if WIRELESS_EXT >= 18
+	(iw_handler) ieee80211_ioctl_siwgenie,		/* SIOCSIWGENIE */
+	(iw_handler) ieee80211_ioctl_giwgenie,		/* SIOCGIWGENIE */
+	(iw_handler) ieee80211_ioctl_siwauth,		/* SIOCSIWAUTH */
+	(iw_handler) ieee80211_ioctl_giwauth,		/* SIOCGIWAUTH */
+	(iw_handler) ieee80211_ioctl_siwencodeext,	/* SIOCSIWENCODEEXT */
+	(iw_handler) ieee80211_ioctl_giwencodeext,	/* SIOCGIWENCODEEXT */
+#endif /* WIRELESS_EXT >= 18 */
+};
+static const iw_handler ieee80211_priv_handlers[] = {
+	(iw_handler) ieee80211_ioctl_setparam,		/* SIOCIWFIRSTPRIV+0 */
+	(iw_handler) ieee80211_ioctl_getparam,		/* SIOCIWFIRSTPRIV+1 */
+	(iw_handler) ieee80211_ioctl_setmode,		/* SIOCIWFIRSTPRIV+2 */
+	(iw_handler) ieee80211_ioctl_getmode,		/* SIOCIWFIRSTPRIV+3 */
+	(iw_handler) ieee80211_ioctl_setwmmparams,	/* SIOCIWFIRSTPRIV+4 */
+	(iw_handler) ieee80211_ioctl_getwmmparams,	/* SIOCIWFIRSTPRIV+5 */
+	(iw_handler) ieee80211_ioctl_setchanlist,	/* SIOCIWFIRSTPRIV+6 */
+	(iw_handler) ieee80211_ioctl_getchanlist,	/* SIOCIWFIRSTPRIV+7 */
+	(iw_handler) ieee80211_ioctl_chanswitch,	/* SIOCIWFIRSTPRIV+8 */
+	(iw_handler) ieee80211_ioctl_getappiebuf,	/* SIOCIWFIRSTPRIV+9 */
+	(iw_handler) ieee80211_ioctl_setappiebuf,	/* SIOCIWFIRSTPRIV+10 */
+	(iw_handler) ieee80211_ioctl_getscanresults,	/* SIOCIWFIRSTPRIV+11 */
+	(iw_handler) ieee80211_ioctl_setfilter,		/* SIOCIWFIRSTPRIV+12 */
+	(iw_handler) ieee80211_ioctl_getchaninfo,	/* SIOCIWFIRSTPRIV+13 */
+	(iw_handler) ieee80211_ioctl_setoptie,		/* SIOCIWFIRSTPRIV+14 */
+	(iw_handler) ieee80211_ioctl_getoptie,		/* SIOCIWFIRSTPRIV+15 */
+	(iw_handler) ieee80211_ioctl_setmlme,		/* SIOCIWFIRSTPRIV+16 */
+	(iw_handler) NULL,				/* SIOCIWFIRSTPRIV+17 */
+	(iw_handler) ieee80211_ioctl_setkey,		/* SIOCIWFIRSTPRIV+18 */
+	(iw_handler) NULL,				/* SIOCIWFIRSTPRIV+19 */
+	(iw_handler) ieee80211_ioctl_delkey,		/* SIOCIWFIRSTPRIV+20 */
+	(iw_handler) NULL,				/* SIOCIWFIRSTPRIV+21 */
+	(iw_handler) ieee80211_ioctl_addmac,		/* SIOCIWFIRSTPRIV+22 */
+	(iw_handler) NULL,				/* SIOCIWFIRSTPRIV+23 */
+	(iw_handler) ieee80211_ioctl_delmac,		/* SIOCIWFIRSTPRIV+24 */
+	(iw_handler) NULL,				/* SIOCIWFIRSTPRIV+25 */
+	(iw_handler) ieee80211_ioctl_wdsmac,		/* SIOCIWFIRSTPRIV+26 */
+	(iw_handler) NULL,				/* SIOCIWFIRSTPRIV+27 */
+	(iw_handler) ieee80211_ioctl_wdsdelmac,		/* SIOCIWFIRSTPRIV+28 */
+	(iw_handler) NULL,				/* SIOCIWFIRSTPRIV+29 */
+	(iw_handler) ieee80211_ioctl_kickmac,		/* SIOCIWFIRSTPRIV+30 */
+	(iw_handler) NULL,				/* SIOCIWFIRSTPRIV+31 */
+};
+static struct iw_handler_def ieee80211_iw_handler_def = {
+#define	N(a)	(sizeof (a) / sizeof (a[0]))
+	.standard		= (iw_handler *) ieee80211_handlers,
+	.num_standard		= N(ieee80211_handlers),
+	.private		= (iw_handler *) ieee80211_priv_handlers,
+	.num_private		= N(ieee80211_priv_handlers),
+	.private_args		= (struct iw_priv_args *) ieee80211_priv_args,
+	.num_private_args	= N(ieee80211_priv_args),
+#if IW_HANDLER_VERSION >= 7
+	.get_wireless_stats	= ieee80211_iw_getstats,
+#endif
+#undef N
+};
+
+static	void ieee80211_delete_wlanunit(u_int);
+
+/*
+ * Handle private ioctl requests.
+ */
+static int
+ieee80211_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct ieee80211vap *vap = dev->priv;
+	u_int unit;
+
+	switch (cmd) {
+	case SIOCG80211STATS:
+		return copy_to_user(ifr->ifr_data, &vap->iv_stats,
+			sizeof (vap->iv_stats)) ? -EFAULT : 0;
+	case SIOC80211IFDESTROY:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		ieee80211_stop(vap->iv_dev);	/* force state before cleanup */
+		unit = vap->iv_unit;
+		vap->iv_ic->ic_vap_delete(vap);
+		return 0;
+	case IEEE80211_IOCTL_GETKEY:
+		return ieee80211_ioctl_getkey(dev, (struct iwreq *) ifr);
+	case IEEE80211_IOCTL_GETWPAIE:
+		return ieee80211_ioctl_getwpaie(dev, (struct iwreq *) ifr);
+	case IEEE80211_IOCTL_STA_STATS:
+		return ieee80211_ioctl_getstastats(dev, (struct iwreq *) ifr);
+	case IEEE80211_IOCTL_STA_INFO:
+		return ieee80211_ioctl_getstainfo(dev, (struct iwreq *) ifr);
+	case IEEE80211_IOCTL_SCAN_RESULTS:
+		return ieee80211_ioctl_getscanresults(dev, (struct iwreq *)ifr);
+	}
+	return -EOPNOTSUPP;
+}
+
+static u_int8_t wlan_units[32];		/* enough for 256 */
+
+/*
+ * Allocate a new unit number.  If the map is full return -1;
+ * otherwise the allocate unit number is returned.
+ */
+static int
+ieee80211_new_wlanunit(void)
+{
+#define	N(a)	(sizeof(a)/sizeof(a[0]))
+	u_int unit;
+	u_int8_t b;
+	int i;
+
+	/* NB: covered by rtnl_lock */
+	unit = 0;
+	for (i = 0; i < N(wlan_units) && wlan_units[i] == 0xff; i++)
+		unit += NBBY;
+	if (i == N(wlan_units))
+		return -1;
+	for (b = wlan_units[i]; b & 1; b >>= 1)
+		unit++;
+	setbit(wlan_units, unit);
+
+	return unit;
+#undef N
+}
+
+/*
+ * Reclaim the specified unit number.
+ */
+static void
+ieee80211_delete_wlanunit(u_int unit)
+{
+	/* NB: covered by rtnl_lock */
+	KASSERT(unit < sizeof(wlan_units) * NBBY, ("invalid wlan unit %u", unit));
+	KASSERT(isset(wlan_units, unit), ("wlan unit %u not allocated", unit));
+	clrbit(wlan_units, unit);
+}
+
+/*
+ * Create a virtual ap.  This is public as it must be implemented
+ * outside our control (e.g. in the driver).
+ */
+int
+ieee80211_ioctl_create_vap(struct ieee80211com *ic, struct ifreq *ifr, struct net_device *mdev)
+{
+	struct ieee80211_clone_params cp;
+	struct ieee80211vap *vap;
+	char name[IFNAMSIZ];
+	int unit;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+	if (copy_from_user(&cp, ifr->ifr_data, sizeof(cp)))
+		return -EFAULT;
+
+	unit = ieee80211_new_wlanunit();
+	if (unit == -1)
+		return -EIO;		/* XXX */
+	strncpy(name, cp.icp_name, sizeof(name));
+	
+	vap = ic->ic_vap_create(ic, name, unit, cp.icp_opmode, cp.icp_flags, mdev);
+	if (vap == NULL) {
+		ieee80211_delete_wlanunit(unit);
+		return -EIO;
+	}
+	/* return final device name */
+	strncpy(ifr->ifr_name, vap->iv_dev->name, IFNAMSIZ);
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_ioctl_create_vap);
+
+/*
+ * Create a virtual ap.  This is public as it must be implemented
+ * outside our control (e.g. in the driver).
+ * Must be called with rtnl_lock held
+ */
+int
+ieee80211_create_vap(struct ieee80211com *ic, char *name,
+	struct net_device *mdev, int opmode, int opflags)
+{
+	struct ieee80211vap *vap;
+	int unit;
+	
+	if ((unit = ieee80211_new_wlanunit()) == -1)
+		return -EIO;		/* XXX */
+
+	if ((vap = ic->ic_vap_create(ic, name, unit, opmode, opflags, mdev)) == NULL) {
+		ieee80211_delete_wlanunit(unit);
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ieee80211_create_vap);
+
+
+void
+ieee80211_ioctl_vattach(struct ieee80211vap *vap)
+{
+	struct net_device *dev = vap->iv_dev;
+
+	dev->do_ioctl = ieee80211_ioctl;
+#if IW_HANDLER_VERSION < 7
+	dev->get_wireless_stats = ieee80211_iw_getstats;
+#endif
+	dev->wireless_handlers = &ieee80211_iw_handler_def;
+}
+
+void
+ieee80211_ioctl_vdetach(struct ieee80211vap *vap)
+{
+	if ((vap->iv_unit != -1) && isset(wlan_units, vap->iv_unit))
+		ieee80211_delete_wlanunit(vap->iv_unit);
+}
+
