1 /*
   2  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
   3  * Use is subject to license terms.
   4  */
   5 
   6 /*
   7  * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting
   8  * All rights reserved.
   9  *
  10  * Redistribution and use in source and binary forms, with or without
  11  * modification, are permitted provided that the following conditions
  12  * are met:
  13  * 1. Redistributions of source code must retain the above copyright
  14  * notice, this list of conditions and the following disclaimer,
  15  * without modification.
  16  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
  17  * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
  18  * redistribution must be conditioned upon including a substantially
  19  * similar Disclaimer requirement for further binary redistribution.
  20  * 3. Neither the names of the above-listed copyright holders nor the names
  21  * of any contributors may be used to endorse or promote products derived
  22  * from this software without specific prior written permission.
  23  *
  24  * NO WARRANTY
  25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  27  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
  28  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
  29  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
  30  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  33  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  35  * THE POSSIBILITY OF SUCH DAMAGES.
  36  *
  37  */
  38 
  39 /*
  40  * Driver for the Atheros Wireless LAN controller.
  41  *
  42  * The Atheros driver calls into net80211 module for IEEE80211 protocol
  43  * management functionalities. The driver includes a LLD(Low Level Driver)
  44  * part to implement H/W related operations.
  45  * The following is the high level structure of ath driver.
  46  * (The arrows between modules indicate function call direction.)
  47  *
  48  *
  49  *                                                  |
  50  *                                                  | GLD thread
  51  *                                                  V
  52  *         ==================  =========================================
  53  *         |                |  |[1]                                    |
  54  *         |                |  |  GLDv3 Callback functions registered  |
  55  *         |   Net80211     |  =========================       by      |
  56  *         |    module      |          |               |     driver    |
  57  *         |                |          V               |               |
  58  *         |                |========================  |               |
  59  *         |   Functions exported by net80211       |  |               |
  60  *         |                                        |  |               |
  61  *         ==========================================  =================
  62  *                         |                                  |
  63  *                         V                                  |
  64  *         +----------------------------------+               |
  65  *         |[2]                               |               |
  66  *         |    Net80211 Callback functions   |               |
  67  *         |      registered by LLD           |               |
  68  *         +----------------------------------+               |
  69  *                         |                                  |
  70  *                         V                                  v
  71  *         +-----------------------------------------------------------+
  72  *         |[3]                                                        |
  73  *         |                LLD Internal functions                     |
  74  *         |                                                           |
  75  *         +-----------------------------------------------------------+
  76  *                                    ^
  77  *                                    | Software interrupt thread
  78  *                                    |
  79  *
  80  * The short description of each module is as below:
  81  *      Module 1: GLD callback functions, which are intercepting the calls from
  82  *                GLD to LLD.
  83  *      Module 2: Net80211 callback functions registered by LLD, which
  84  *                calls into LLD for H/W related functions needed by net80211.
  85  *      Module 3: LLD Internal functions, which are responsible for allocing
  86  *                descriptor/buffer, handling interrupt and other H/W
  87  *                operations.
  88  *
  89  * All functions are running in 3 types of thread:
  90  * 1. GLD callbacks threads, such as ioctl, intr, etc.
  91  * 2. Clock interruptt thread which is responsible for scan, rate control and
  92  *    calibration.
  93  * 3. Software Interrupt thread originated in LLD.
  94  *
  95  * The lock strategy is as below:
  96  * There have 4 queues for tx, each queue has one asc_txqlock[i] to
  97  *      prevent conflicts access to queue resource from different thread.
  98  *
  99  * All the transmit buffers are contained in asc_txbuf which are
 100  *      protected by asc_txbuflock.
 101  *
 102  * Each receive buffers are contained in asc_rxbuf which are protected
 103  *      by asc_rxbuflock.
 104  *
 105  * In ath struct, asc_genlock is a general lock, protecting most other
 106  *      operational data in ath_softc struct and HAL accesses.
 107  *      It is acquired by the interupt handler and most "mode-ctrl" routines.
 108  *
 109  * Any of the locks can be acquired singly, but where multiple
 110  * locks are acquired, they *must* be in the order:
 111  *    asc_genlock >> asc_txqlock[i] >> asc_txbuflock >> asc_rxbuflock
 112  */
 113 
 114 #include <sys/param.h>
 115 #include <sys/types.h>
 116 #include <sys/signal.h>
 117 #include <sys/stream.h>
 118 #include <sys/termio.h>
 119 #include <sys/errno.h>
 120 #include <sys/file.h>
 121 #include <sys/cmn_err.h>
 122 #include <sys/stropts.h>
 123 #include <sys/strsubr.h>
 124 #include <sys/strtty.h>
 125 #include <sys/kbio.h>
 126 #include <sys/cred.h>
 127 #include <sys/stat.h>
 128 #include <sys/consdev.h>
 129 #include <sys/kmem.h>
 130 #include <sys/modctl.h>
 131 #include <sys/ddi.h>
 132 #include <sys/sunddi.h>
 133 #include <sys/pci.h>
 134 #include <sys/errno.h>
 135 #include <sys/mac_provider.h>
 136 #include <sys/dlpi.h>
 137 #include <sys/ethernet.h>
 138 #include <sys/list.h>
 139 #include <sys/byteorder.h>
 140 #include <sys/strsun.h>
 141 #include <sys/policy.h>
 142 #include <inet/common.h>
 143 #include <inet/nd.h>
 144 #include <inet/mi.h>
 145 #include <inet/wifi_ioctl.h>
 146 #include <sys/mac_wifi.h>
 147 #include "ath_hal.h"
 148 #include "ath_impl.h"
 149 #include "ath_aux.h"
 150 #include "ath_rate.h"
 151 
 152 #define ATH_MAX_RSSI    63      /* max rssi */
 153 
 154 extern void ath_halfix_init(void);
 155 extern void ath_halfix_finit(void);
 156 extern int32_t ath_getset(ath_t *asc, mblk_t *mp, uint32_t cmd);
 157 
 158 /*
 159  * PIO access attributes for registers
 160  */
 161 static ddi_device_acc_attr_t ath_reg_accattr = {
 162         DDI_DEVICE_ATTR_V0,
 163         DDI_STRUCTURE_LE_ACC,
 164         DDI_STRICTORDER_ACC
 165 };
 166 
 167 /*
 168  * DMA access attributes for descriptors: NOT to be byte swapped.
 169  */
 170 static ddi_device_acc_attr_t ath_desc_accattr = {
 171         DDI_DEVICE_ATTR_V0,
 172         DDI_STRUCTURE_LE_ACC,
 173         DDI_STRICTORDER_ACC
 174 };
 175 
 176 /*
 177  * DMA attributes for rx/tx buffers
 178  */
 179 static ddi_dma_attr_t ath_dma_attr = {
 180         DMA_ATTR_V0,            /* version number */
 181         0,                      /* low address */
 182         0xffffffffU,            /* high address */
 183         0x3ffffU,               /* counter register max */
 184         1,                      /* alignment */
 185         0xFFF,                  /* burst sizes */
 186         1,                      /* minimum transfer size */
 187         0x3ffffU,               /* max transfer size */
 188         0xffffffffU,            /* address register max */
 189         1,                      /* no scatter-gather */
 190         1,                      /* granularity of device */
 191         0,                      /* DMA flags */
 192 };
 193 
 194 static ddi_dma_attr_t ath_desc_dma_attr = {
 195         DMA_ATTR_V0,            /* version number */
 196         0,                      /* low address */
 197         0xffffffffU,            /* high address */
 198         0xffffffffU,            /* counter register max */
 199         0x1000,                 /* alignment */
 200         0xFFF,                  /* burst sizes */
 201         1,                      /* minimum transfer size */
 202         0xffffffffU,            /* max transfer size */
 203         0xffffffffU,            /* address register max */
 204         1,                      /* no scatter-gather */
 205         1,                      /* granularity of device */
 206         0,                      /* DMA flags */
 207 };
 208 
 209 static kmutex_t ath_loglock;
 210 static void *ath_soft_state_p = NULL;
 211 static int ath_dwelltime = 150;         /* scan interval, ms */
 212 
 213 static int      ath_m_stat(void *,  uint_t, uint64_t *);
 214 static int      ath_m_start(void *);
 215 static void     ath_m_stop(void *);
 216 static int      ath_m_promisc(void *, boolean_t);
 217 static int      ath_m_multicst(void *, boolean_t, const uint8_t *);
 218 static int      ath_m_unicst(void *, const uint8_t *);
 219 static mblk_t   *ath_m_tx(void *, mblk_t *);
 220 static void     ath_m_ioctl(void *, queue_t *, mblk_t *);
 221 static int      ath_m_setprop(void *, const char *, mac_prop_id_t,
 222     uint_t, const void *);
 223 static int      ath_m_getprop(void *, const char *, mac_prop_id_t,
 224     uint_t, void *);
 225 static void     ath_m_propinfo(void *, const char *, mac_prop_id_t,
 226     mac_prop_info_handle_t);
 227 
 228 static mac_callbacks_t ath_m_callbacks = {
 229         MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
 230         ath_m_stat,
 231         ath_m_start,
 232         ath_m_stop,
 233         ath_m_promisc,
 234         ath_m_multicst,
 235         ath_m_unicst,
 236         ath_m_tx,
 237         NULL,
 238         ath_m_ioctl,
 239         NULL,           /* mc_getcapab */
 240         NULL,
 241         NULL,
 242         ath_m_setprop,
 243         ath_m_getprop,
 244         ath_m_propinfo
 245 };
 246 
 247 /*
 248  * Available debug flags:
 249  * ATH_DBG_INIT, ATH_DBG_GLD, ATH_DBG_HAL, ATH_DBG_INT, ATH_DBG_ATTACH,
 250  * ATH_DBG_DETACH, ATH_DBG_AUX, ATH_DBG_WIFICFG, ATH_DBG_OSDEP
 251  */
 252 uint32_t ath_dbg_flags = 0;
 253 
 254 /*
 255  * Exception/warning cases not leading to panic.
 256  */
 257 void
 258 ath_problem(const int8_t *fmt, ...)
 259 {
 260         va_list args;
 261 
 262         mutex_enter(&ath_loglock);
 263 
 264         va_start(args, fmt);
 265         vcmn_err(CE_WARN, fmt, args);
 266         va_end(args);
 267 
 268         mutex_exit(&ath_loglock);
 269 }
 270 
 271 /*
 272  * Normal log information independent of debug.
 273  */
 274 void
 275 ath_log(const int8_t *fmt, ...)
 276 {
 277         va_list args;
 278 
 279         mutex_enter(&ath_loglock);
 280 
 281         va_start(args, fmt);
 282         vcmn_err(CE_CONT, fmt, args);
 283         va_end(args);
 284 
 285         mutex_exit(&ath_loglock);
 286 }
 287 
 288 void
 289 ath_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
 290 {
 291         va_list args;
 292 
 293         if (dbg_flags & ath_dbg_flags) {
 294                 mutex_enter(&ath_loglock);
 295                 va_start(args, fmt);
 296                 vcmn_err(CE_CONT, fmt, args);
 297                 va_end(args);
 298                 mutex_exit(&ath_loglock);
 299         }
 300 }
 301 
 302 void
 303 ath_setup_desc(ath_t *asc, struct ath_buf *bf)
 304 {
 305         struct ath_desc *ds;
 306 
 307         ds = bf->bf_desc;
 308         ds->ds_link = bf->bf_daddr;
 309         ds->ds_data = bf->bf_dma.cookie.dmac_address;
 310         ATH_HAL_SETUPRXDESC(asc->asc_ah, ds,
 311             bf->bf_dma.alength,              /* buffer size */
 312             0);
 313 
 314         if (asc->asc_rxlink != NULL)
 315                 *asc->asc_rxlink = bf->bf_daddr;
 316         asc->asc_rxlink = &ds->ds_link;
 317 }
 318 
 319 
 320 /*
 321  * Allocate an area of memory and a DMA handle for accessing it
 322  */
 323 static int
 324 ath_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize,
 325     ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
 326     uint_t bind_flags, dma_area_t *dma_p)
 327 {
 328         int err;
 329 
 330         /*
 331          * Allocate handle
 332          */
 333         err = ddi_dma_alloc_handle(devinfo, dma_attr,
 334             DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
 335         if (err != DDI_SUCCESS)
 336                 return (DDI_FAILURE);
 337 
 338         /*
 339          * Allocate memory
 340          */
 341         err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
 342             alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
 343             &dma_p->alength, &dma_p->acc_hdl);
 344         if (err != DDI_SUCCESS)
 345                 return (DDI_FAILURE);
 346 
 347         /*
 348          * Bind the two together
 349          */
 350         err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
 351             dma_p->mem_va, dma_p->alength, bind_flags,
 352             DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
 353         if (err != DDI_DMA_MAPPED)
 354                 return (DDI_FAILURE);
 355 
 356         dma_p->nslots = ~0U;
 357         dma_p->size = ~0U;
 358         dma_p->token = ~0U;
 359         dma_p->offset = 0;
 360         return (DDI_SUCCESS);
 361 }
 362 
 363 /*
 364  * Free one allocated area of DMAable memory
 365  */
 366 static void
 367 ath_free_dma_mem(dma_area_t *dma_p)
 368 {
 369         if (dma_p->dma_hdl != NULL) {
 370                 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
 371                 if (dma_p->acc_hdl != NULL) {
 372                         ddi_dma_mem_free(&dma_p->acc_hdl);
 373                         dma_p->acc_hdl = NULL;
 374                 }
 375                 ddi_dma_free_handle(&dma_p->dma_hdl);
 376                 dma_p->ncookies = 0;
 377                 dma_p->dma_hdl = NULL;
 378         }
 379 }
 380 
 381 
 382 /*
 383  * Initialize tx/rx buffer list. Allocate DMA memory for
 384  * each buffer.
 385  */
 386 static int
 387 ath_buflist_setup(dev_info_t *devinfo, ath_t *asc, list_t *bflist,
 388     struct ath_buf **pbf, struct ath_desc **pds, int nbuf, uint_t dmabflags)
 389 {
 390         int i, err;
 391         struct ath_buf *bf = *pbf;
 392         struct ath_desc *ds = *pds;
 393 
 394         list_create(bflist, sizeof (struct ath_buf),
 395             offsetof(struct ath_buf, bf_node));
 396         for (i = 0; i < nbuf; i++, bf++, ds++) {
 397                 bf->bf_desc = ds;
 398                 bf->bf_daddr = asc->asc_desc_dma.cookie.dmac_address +
 399                     ((uintptr_t)ds - (uintptr_t)asc->asc_desc);
 400                 list_insert_tail(bflist, bf);
 401 
 402                 /* alloc DMA memory */
 403                 err = ath_alloc_dma_mem(devinfo, &ath_dma_attr,
 404                     asc->asc_dmabuf_size, &ath_desc_accattr, DDI_DMA_STREAMING,
 405                     dmabflags, &bf->bf_dma);
 406                 if (err != DDI_SUCCESS)
 407                         return (err);
 408         }
 409         *pbf = bf;
 410         *pds = ds;
 411 
 412         return (DDI_SUCCESS);
 413 }
 414 
 415 /*
 416  * Destroy tx/rx buffer list. Free DMA memory.
 417  */
 418 static void
 419 ath_buflist_cleanup(list_t *buflist)
 420 {
 421         struct ath_buf *bf;
 422 
 423         if (!buflist)
 424                 return;
 425 
 426         bf = list_head(buflist);
 427         while (bf != NULL) {
 428                 if (bf->bf_m != NULL) {
 429                         freemsg(bf->bf_m);
 430                         bf->bf_m = NULL;
 431                 }
 432                 /* Free DMA buffer */
 433                 ath_free_dma_mem(&bf->bf_dma);
 434                 if (bf->bf_in != NULL) {
 435                         ieee80211_free_node(bf->bf_in);
 436                         bf->bf_in = NULL;
 437                 }
 438                 list_remove(buflist, bf);
 439                 bf = list_head(buflist);
 440         }
 441         list_destroy(buflist);
 442 }
 443 
 444 
 445 static void
 446 ath_desc_free(ath_t *asc)
 447 {
 448         ath_buflist_cleanup(&asc->asc_txbuf_list);
 449         ath_buflist_cleanup(&asc->asc_rxbuf_list);
 450 
 451         /* Free descriptor DMA buffer */
 452         ath_free_dma_mem(&asc->asc_desc_dma);
 453 
 454         kmem_free((void *)asc->asc_vbufptr, asc->asc_vbuflen);
 455         asc->asc_vbufptr = NULL;
 456 }
 457 
 458 static int
 459 ath_desc_alloc(dev_info_t *devinfo, ath_t *asc)
 460 {
 461         int err;
 462         size_t size;
 463         struct ath_desc *ds;
 464         struct ath_buf *bf;
 465 
 466         size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
 467 
 468         err = ath_alloc_dma_mem(devinfo, &ath_desc_dma_attr, size,
 469             &ath_desc_accattr, DDI_DMA_CONSISTENT,
 470             DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &asc->asc_desc_dma);
 471 
 472         /* virtual address of the first descriptor */
 473         asc->asc_desc = (struct ath_desc *)asc->asc_desc_dma.mem_va;
 474 
 475         ds = asc->asc_desc;
 476         ATH_DEBUG((ATH_DBG_INIT, "ath: ath_desc_alloc(): DMA map: "
 477             "%p (%d) -> %p\n",
 478             asc->asc_desc, asc->asc_desc_dma.alength,
 479             asc->asc_desc_dma.cookie.dmac_address));
 480 
 481         /* allocate data structures to describe TX/RX DMA buffers */
 482         asc->asc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
 483         bf = (struct ath_buf *)kmem_zalloc(asc->asc_vbuflen, KM_SLEEP);
 484         asc->asc_vbufptr = bf;
 485 
 486         /* DMA buffer size for each TX/RX packet */
 487         asc->asc_dmabuf_size = roundup(1000 + sizeof (struct ieee80211_frame) +
 488             IEEE80211_MTU + IEEE80211_CRC_LEN +
 489             (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
 490             IEEE80211_WEP_CRCLEN), asc->asc_cachelsz);
 491 
 492         /* create RX buffer list */
 493         err = ath_buflist_setup(devinfo, asc, &asc->asc_rxbuf_list, &bf, &ds,
 494             ATH_RXBUF, DDI_DMA_READ | DDI_DMA_STREAMING);
 495         if (err != DDI_SUCCESS) {
 496                 ath_desc_free(asc);
 497                 return (err);
 498         }
 499 
 500         /* create TX buffer list */
 501         err = ath_buflist_setup(devinfo, asc, &asc->asc_txbuf_list, &bf, &ds,
 502             ATH_TXBUF, DDI_DMA_STREAMING);
 503         if (err != DDI_SUCCESS) {
 504                 ath_desc_free(asc);
 505                 return (err);
 506         }
 507 
 508 
 509         return (DDI_SUCCESS);
 510 }
 511 
 512 static void
 513 ath_printrxbuf(struct ath_buf *bf, int32_t done)
 514 {
 515         struct ath_desc *ds = bf->bf_desc;
 516         const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
 517 
 518         ATH_DEBUG((ATH_DBG_RECV, "ath: R (%p %p) %08x %08x %08x "
 519             "%08x %08x %08x %c\n",
 520             ds, bf->bf_daddr,
 521             ds->ds_link, ds->ds_data,
 522             ds->ds_ctl0, ds->ds_ctl1,
 523             ds->ds_hw[0], ds->ds_hw[1],
 524             !done ? ' ' : (rs->rs_status == 0) ? '*' : '!'));
 525 }
 526 
 527 static void
 528 ath_rx_handler(ath_t *asc)
 529 {
 530         ieee80211com_t *ic = (ieee80211com_t *)asc;
 531         struct ath_buf *bf;
 532         struct ath_hal *ah = asc->asc_ah;
 533         struct ath_desc *ds;
 534         struct ath_rx_status *rs;
 535         mblk_t *rx_mp;
 536         struct ieee80211_frame *wh;
 537         int32_t len, loop = 1;
 538         uint8_t phyerr;
 539         HAL_STATUS status;
 540         HAL_NODE_STATS hal_node_stats;
 541         struct ieee80211_node *in;
 542 
 543         do {
 544                 mutex_enter(&asc->asc_rxbuflock);
 545                 bf = list_head(&asc->asc_rxbuf_list);
 546                 if (bf == NULL) {
 547                         ATH_DEBUG((ATH_DBG_RECV, "ath: ath_rx_handler(): "
 548                             "no buffer\n"));
 549                         mutex_exit(&asc->asc_rxbuflock);
 550                         break;
 551                 }
 552                 ASSERT(bf->bf_dma.cookie.dmac_address != NULL);
 553                 ds = bf->bf_desc;
 554                 if (ds->ds_link == bf->bf_daddr) {
 555                         /*
 556                          * Never process the self-linked entry at the end,
 557                          * this may be met at heavy load.
 558                          */
 559                         mutex_exit(&asc->asc_rxbuflock);
 560                         break;
 561                 }
 562 
 563                 rs = &bf->bf_status.ds_rxstat;
 564                 status = ATH_HAL_RXPROCDESC(ah, ds,
 565                     bf->bf_daddr,
 566                     ATH_PA2DESC(asc, ds->ds_link), rs);
 567                 if (status == HAL_EINPROGRESS) {
 568                         mutex_exit(&asc->asc_rxbuflock);
 569                         break;
 570                 }
 571                 list_remove(&asc->asc_rxbuf_list, bf);
 572                 mutex_exit(&asc->asc_rxbuflock);
 573 
 574                 if (rs->rs_status != 0) {
 575                         if (rs->rs_status & HAL_RXERR_CRC)
 576                                 asc->asc_stats.ast_rx_crcerr++;
 577                         if (rs->rs_status & HAL_RXERR_FIFO)
 578                                 asc->asc_stats.ast_rx_fifoerr++;
 579                         if (rs->rs_status & HAL_RXERR_DECRYPT)
 580                                 asc->asc_stats.ast_rx_badcrypt++;
 581                         if (rs->rs_status & HAL_RXERR_PHY) {
 582                                 asc->asc_stats.ast_rx_phyerr++;
 583                                 phyerr = rs->rs_phyerr & 0x1f;
 584                                 asc->asc_stats.ast_rx_phy[phyerr]++;
 585                         }
 586                         goto rx_next;
 587                 }
 588                 len = rs->rs_datalen;
 589 
 590                 /* less than sizeof(struct ieee80211_frame) */
 591                 if (len < 20) {
 592                         asc->asc_stats.ast_rx_tooshort++;
 593                         goto rx_next;
 594                 }
 595 
 596                 if ((rx_mp = allocb(asc->asc_dmabuf_size, BPRI_MED)) == NULL) {
 597                         ath_problem("ath: ath_rx_handler(): "
 598                             "allocing mblk buffer failed.\n");
 599                         return;
 600                 }
 601 
 602                 ATH_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORCPU);
 603                 bcopy(bf->bf_dma.mem_va, rx_mp->b_rptr, len);
 604 
 605                 rx_mp->b_wptr += len;
 606                 wh = (struct ieee80211_frame *)rx_mp->b_rptr;
 607                 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
 608                     IEEE80211_FC0_TYPE_CTL) {
 609                         /*
 610                          * Ignore control frame received in promisc mode.
 611                          */
 612                         freemsg(rx_mp);
 613                         goto rx_next;
 614                 }
 615                 /* Remove the CRC at the end of IEEE80211 frame */
 616                 rx_mp->b_wptr -= IEEE80211_CRC_LEN;
 617 #ifdef DEBUG
 618                 ath_printrxbuf(bf, status == HAL_OK);
 619 #endif /* DEBUG */
 620                 /*
 621                  * Locate the node for sender, track state, and then
 622                  * pass the (referenced) node up to the 802.11 layer
 623                  * for its use.
 624                  */
 625                 in = ieee80211_find_rxnode(ic, wh);
 626 
 627                 /*
 628                  * Send frame up for processing.
 629                  */
 630                 (void) ieee80211_input(ic, rx_mp, in,
 631                     rs->rs_rssi, rs->rs_tstamp);
 632 
 633                 ieee80211_free_node(in);
 634 
 635 rx_next:
 636                 mutex_enter(&asc->asc_rxbuflock);
 637                 list_insert_tail(&asc->asc_rxbuf_list, bf);
 638                 mutex_exit(&asc->asc_rxbuflock);
 639                 ath_setup_desc(asc, bf);
 640         } while (loop);
 641 
 642         /* rx signal state monitoring */
 643         ATH_HAL_RXMONITOR(ah, &hal_node_stats, &asc->asc_curchan);
 644 }
 645 
 646 static void
 647 ath_printtxbuf(struct ath_buf *bf, int done)
 648 {
 649         struct ath_desc *ds = bf->bf_desc;
 650         const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
 651 
 652         ATH_DEBUG((ATH_DBG_SEND, "ath: T(%p %p) %08x %08x %08x %08x %08x"
 653             " %08x %08x %08x %c\n",
 654             ds, bf->bf_daddr,
 655             ds->ds_link, ds->ds_data,
 656             ds->ds_ctl0, ds->ds_ctl1,
 657             ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
 658             !done ? ' ' : (ts->ts_status == 0) ? '*' : '!'));
 659 }
 660 
 661 /*
 662  * The input parameter mp has following assumption:
 663  * For data packets, GLDv3 mac_wifi plugin allocates and fills the
 664  * ieee80211 header. For management packets, net80211 allocates and
 665  * fills the ieee80211 header. In both cases, enough spaces in the
 666  * header are left for encryption option.
 667  */
 668 static int32_t
 669 ath_tx_start(ath_t *asc, struct ieee80211_node *in, struct ath_buf *bf,
 670     mblk_t *mp)
 671 {
 672         ieee80211com_t *ic = (ieee80211com_t *)asc;
 673         struct ieee80211_frame *wh;
 674         struct ath_hal *ah = asc->asc_ah;
 675         uint32_t subtype, flags, ctsduration;
 676         int32_t keyix, iswep, hdrlen, pktlen, mblen, mbslen, try0;
 677         uint8_t rix, cix, txrate, ctsrate;
 678         struct ath_desc *ds;
 679         struct ath_txq *txq;
 680         HAL_PKT_TYPE atype;
 681         const HAL_RATE_TABLE *rt;
 682         HAL_BOOL shortPreamble;
 683         struct ath_node *an;
 684         caddr_t dest;
 685 
 686         /*
 687          * CRC are added by H/W, not encaped by driver,
 688          * but we must count it in pkt length.
 689          */
 690         pktlen = IEEE80211_CRC_LEN;
 691 
 692         wh = (struct ieee80211_frame *)mp->b_rptr;
 693         iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
 694         keyix = HAL_TXKEYIX_INVALID;
 695         hdrlen = sizeof (struct ieee80211_frame);
 696         if (iswep != 0) {
 697                 const struct ieee80211_cipher *cip;
 698                 struct ieee80211_key *k;
 699 
 700                 /*
 701                  * Construct the 802.11 header+trailer for an encrypted
 702                  * frame. The only reason this can fail is because of an
 703                  * unknown or unsupported cipher/key type.
 704                  */
 705                 k = ieee80211_crypto_encap(ic, mp);
 706                 if (k == NULL) {
 707                         ATH_DEBUG((ATH_DBG_AUX, "crypto_encap failed\n"));
 708                         /*
 709                          * This can happen when the key is yanked after the
 710                          * frame was queued.  Just discard the frame; the
 711                          * 802.11 layer counts failures and provides
 712                          * debugging/diagnostics.
 713                          */
 714                         return (EIO);
 715                 }
 716                 cip = k->wk_cipher;
 717                 /*
 718                  * Adjust the packet + header lengths for the crypto
 719                  * additions and calculate the h/w key index.  When
 720                  * a s/w mic is done the frame will have had any mic
 721                  * added to it prior to entry so m0->m_pkthdr.len above will
 722                  * account for it. Otherwise we need to add it to the
 723                  * packet length.
 724                  */
 725                 hdrlen += cip->ic_header;
 726                 pktlen += cip->ic_trailer;
 727                 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
 728                         pktlen += cip->ic_miclen;
 729                 keyix = k->wk_keyix;
 730 
 731                 /* packet header may have moved, reset our local pointer */
 732                 wh = (struct ieee80211_frame *)mp->b_rptr;
 733         }
 734 
 735         dest = bf->bf_dma.mem_va;
 736         for (; mp != NULL; mp = mp->b_cont) {
 737                 mblen = MBLKL(mp);
 738                 bcopy(mp->b_rptr, dest, mblen);
 739                 dest += mblen;
 740         }
 741         mbslen = (uintptr_t)dest - (uintptr_t)bf->bf_dma.mem_va;
 742         pktlen += mbslen;
 743 
 744         bf->bf_in = in;
 745 
 746         /* setup descriptors */
 747         ds = bf->bf_desc;
 748         rt = asc->asc_currates;
 749         ASSERT(rt != NULL);
 750 
 751         /*
 752          * The 802.11 layer marks whether or not we should
 753          * use short preamble based on the current mode and
 754          * negotiated parameters.
 755          */
 756         if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
 757             (in->in_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
 758                 shortPreamble = AH_TRUE;
 759                 asc->asc_stats.ast_tx_shortpre++;
 760         } else {
 761                 shortPreamble = AH_FALSE;
 762         }
 763 
 764         an = ATH_NODE(in);
 765 
 766         /*
 767          * Calculate Atheros packet type from IEEE80211 packet header
 768          * and setup for rate calculations.
 769          */
 770         switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
 771         case IEEE80211_FC0_TYPE_MGT:
 772                 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
 773                 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
 774                         atype = HAL_PKT_TYPE_BEACON;
 775                 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
 776                         atype = HAL_PKT_TYPE_PROBE_RESP;
 777                 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
 778                         atype = HAL_PKT_TYPE_ATIM;
 779                 else
 780                         atype = HAL_PKT_TYPE_NORMAL;
 781                 rix = 0;        /* lowest rate */
 782                 try0 = ATH_TXMAXTRY;
 783                 if (shortPreamble)
 784                         txrate = an->an_tx_mgtratesp;
 785                 else
 786                         txrate = an->an_tx_mgtrate;
 787                 /* force all ctl frames to highest queue */
 788                 txq = asc->asc_ac2q[WME_AC_VO];
 789                 break;
 790         case IEEE80211_FC0_TYPE_CTL:
 791                 atype = HAL_PKT_TYPE_PSPOLL;
 792                 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
 793                 rix = 0;        /* lowest rate */
 794                 try0 = ATH_TXMAXTRY;
 795                 if (shortPreamble)
 796                         txrate = an->an_tx_mgtratesp;
 797                 else
 798                         txrate = an->an_tx_mgtrate;
 799                 /* force all ctl frames to highest queue */
 800                 txq = asc->asc_ac2q[WME_AC_VO];
 801                 break;
 802         case IEEE80211_FC0_TYPE_DATA:
 803                 atype = HAL_PKT_TYPE_NORMAL;
 804                 rix = an->an_tx_rix0;
 805                 try0 = an->an_tx_try0;
 806                 if (shortPreamble)
 807                         txrate = an->an_tx_rate0sp;
 808                 else
 809                         txrate = an->an_tx_rate0;
 810                 /* Always use background queue */
 811                 txq = asc->asc_ac2q[WME_AC_BK];
 812                 break;
 813         default:
 814                 /* Unknown 802.11 frame */
 815                 asc->asc_stats.ast_tx_invalid++;
 816                 return (1);
 817         }
 818         /*
 819          * Calculate miscellaneous flags.
 820          */
 821         flags = HAL_TXDESC_CLRDMASK;
 822         if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
 823                 flags |= HAL_TXDESC_NOACK;      /* no ack on broad/multicast */
 824                 asc->asc_stats.ast_tx_noack++;
 825         } else if (pktlen > ic->ic_rtsthreshold) {
 826                 flags |= HAL_TXDESC_RTSENA;     /* RTS based on frame length */
 827                 asc->asc_stats.ast_tx_rts++;
 828         }
 829 
 830         /*
 831          * Calculate duration.  This logically belongs in the 802.11
 832          * layer but it lacks sufficient information to calculate it.
 833          */
 834         if ((flags & HAL_TXDESC_NOACK) == 0 &&
 835             (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
 836             IEEE80211_FC0_TYPE_CTL) {
 837                 uint16_t dur;
 838                 dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE,
 839                     rix, shortPreamble);
 840                 /* LINTED E_BAD_PTR_CAST_ALIGN */
 841                 *(uint16_t *)wh->i_dur = LE_16(dur);
 842         }
 843 
 844         /*
 845          * Calculate RTS/CTS rate and duration if needed.
 846          */
 847         ctsduration = 0;
 848         if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
 849                 /*
 850                  * CTS transmit rate is derived from the transmit rate
 851                  * by looking in the h/w rate table.  We must also factor
 852                  * in whether or not a short preamble is to be used.
 853                  */
 854                 cix = rt->info[rix].controlRate;
 855                 ctsrate = rt->info[cix].rateCode;
 856                 if (shortPreamble)
 857                         ctsrate |= rt->info[cix].shortPreamble;
 858                 /*
 859                  * Compute the transmit duration based on the size
 860                  * of an ACK frame.  We call into the HAL to do the
 861                  * computation since it depends on the characteristics
 862                  * of the actual PHY being used.
 863                  */
 864                 if (flags & HAL_TXDESC_RTSENA) {    /* SIFS + CTS */
 865                         ctsduration += ath_hal_computetxtime(ah,
 866                             rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
 867                 }
 868                 /* SIFS + data */
 869                 ctsduration += ath_hal_computetxtime(ah,
 870                     rt, pktlen, rix, shortPreamble);
 871                 if ((flags & HAL_TXDESC_NOACK) == 0) {  /* SIFS + ACK */
 872                         ctsduration += ath_hal_computetxtime(ah,
 873                             rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
 874                 }
 875         } else
 876                 ctsrate = 0;
 877 
 878         if (++txq->axq_intrcnt >= ATH_TXINTR_PERIOD) {
 879                 flags |= HAL_TXDESC_INTREQ;
 880                 txq->axq_intrcnt = 0;
 881         }
 882 
 883         /*
 884          * Formulate first tx descriptor with tx controls.
 885          */
 886         ATH_HAL_SETUPTXDESC(ah, ds,
 887             pktlen,                     /* packet length */
 888             hdrlen,                     /* header length */
 889             atype,                      /* Atheros packet type */
 890             MIN(in->in_txpower, 60), /* txpower */
 891             txrate, try0,               /* series 0 rate/tries */
 892             keyix,                      /* key cache index */
 893             an->an_tx_antenna,               /* antenna mode */
 894             flags,                      /* flags */
 895             ctsrate,                    /* rts/cts rate */
 896             ctsduration);               /* rts/cts duration */
 897         bf->bf_flags = flags;
 898 
 899         /* LINTED E_BAD_PTR_CAST_ALIGN */
 900         ATH_DEBUG((ATH_DBG_SEND, "ath: ath_xmit(): to %s totlen=%d "
 901             "an->an_tx_rate1sp=%d tx_rate2sp=%d tx_rate3sp=%d "
 902             "qnum=%d rix=%d sht=%d dur = %d\n",
 903             ieee80211_macaddr_sprintf(wh->i_addr1), mbslen, an->an_tx_rate1sp,
 904             an->an_tx_rate2sp, an->an_tx_rate3sp,
 905             txq->axq_qnum, rix, shortPreamble, *(uint16_t *)wh->i_dur));
 906 
 907         /*
 908          * Setup the multi-rate retry state only when we're
 909          * going to use it.  This assumes ath_hal_setuptxdesc
 910          * initializes the descriptors (so we don't have to)
 911          * when the hardware supports multi-rate retry and
 912          * we don't use it.
 913          */
 914         if (try0 != ATH_TXMAXTRY)
 915                 ATH_HAL_SETUPXTXDESC(ah, ds,
 916                     an->an_tx_rate1sp, 2,    /* series 1 */
 917                     an->an_tx_rate2sp, 2,    /* series 2 */
 918                     an->an_tx_rate3sp, 2);   /* series 3 */
 919 
 920         ds->ds_link = 0;
 921         ds->ds_data = bf->bf_dma.cookie.dmac_address;
 922         ATH_HAL_FILLTXDESC(ah, ds,
 923             mbslen,             /* segment length */
 924             AH_TRUE,            /* first segment */
 925             AH_TRUE,            /* last segment */
 926             ds);                /* first descriptor */
 927 
 928         ATH_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORDEV);
 929 
 930         mutex_enter(&txq->axq_lock);
 931         list_insert_tail(&txq->axq_list, bf);
 932         if (txq->axq_link == NULL) {
 933                 ATH_HAL_PUTTXBUF(ah, txq->axq_qnum, bf->bf_daddr);
 934         } else {
 935                 *txq->axq_link = bf->bf_daddr;
 936         }
 937         txq->axq_link = &ds->ds_link;
 938         mutex_exit(&txq->axq_lock);
 939 
 940         ATH_HAL_TXSTART(ah, txq->axq_qnum);
 941 
 942         ic->ic_stats.is_tx_frags++;
 943         ic->ic_stats.is_tx_bytes += pktlen;
 944 
 945         return (0);
 946 }
 947 
 948 /*
 949  * Transmit a management frame.  On failure we reclaim the skbuff.
 950  * Note that management frames come directly from the 802.11 layer
 951  * and do not honor the send queue flow control.  Need to investigate
 952  * using priority queueing so management frames can bypass data.
 953  */
 954 static int
 955 ath_xmit(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
 956 {
 957         ath_t *asc = (ath_t *)ic;
 958         struct ath_hal *ah = asc->asc_ah;
 959         struct ieee80211_node *in = NULL;
 960         struct ath_buf *bf = NULL;
 961         struct ieee80211_frame *wh;
 962         int error = 0;
 963 
 964         ASSERT(mp->b_next == NULL);
 965 
 966         if (!ATH_IS_RUNNING(asc)) {
 967                 if ((type & IEEE80211_FC0_TYPE_MASK) !=
 968                     IEEE80211_FC0_TYPE_DATA) {
 969                         freemsg(mp);
 970                 }
 971                 return (ENXIO);
 972         }
 973 
 974         /* Grab a TX buffer */
 975         mutex_enter(&asc->asc_txbuflock);
 976         bf = list_head(&asc->asc_txbuf_list);
 977         if (bf != NULL)
 978                 list_remove(&asc->asc_txbuf_list, bf);
 979         if (list_empty(&asc->asc_txbuf_list)) {
 980                 ATH_DEBUG((ATH_DBG_SEND, "ath: ath_mgmt_send(): "
 981                     "stop queue\n"));
 982                 asc->asc_stats.ast_tx_qstop++;
 983         }
 984         mutex_exit(&asc->asc_txbuflock);
 985         if (bf == NULL) {
 986                 ATH_DEBUG((ATH_DBG_SEND, "ath: ath_mgmt_send(): discard, "
 987                     "no xmit buf\n"));
 988                 ic->ic_stats.is_tx_nobuf++;
 989                 if ((type & IEEE80211_FC0_TYPE_MASK) ==
 990                     IEEE80211_FC0_TYPE_DATA) {
 991                         asc->asc_stats.ast_tx_nobuf++;
 992                         mutex_enter(&asc->asc_resched_lock);
 993                         asc->asc_resched_needed = B_TRUE;
 994                         mutex_exit(&asc->asc_resched_lock);
 995                 } else {
 996                         asc->asc_stats.ast_tx_nobufmgt++;
 997                         freemsg(mp);
 998                 }
 999                 return (ENOMEM);
1000         }
1001 
1002         wh = (struct ieee80211_frame *)mp->b_rptr;
1003 
1004         /* Locate node */
1005         in = ieee80211_find_txnode(ic,  wh->i_addr1);
1006         if (in == NULL) {
1007                 error = EIO;
1008                 goto bad;
1009         }
1010 
1011         in->in_inact = 0;
1012         switch (type & IEEE80211_FC0_TYPE_MASK) {
1013         case IEEE80211_FC0_TYPE_DATA:
1014                 (void) ieee80211_encap(ic, mp, in);
1015                 break;
1016         default:
1017                 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
1018                     IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
1019                         /* fill time stamp */
1020                         uint64_t tsf;
1021                         uint32_t *tstamp;
1022 
1023                         tsf = ATH_HAL_GETTSF64(ah);
1024                         /* adjust 100us delay to xmit */
1025                         tsf += 100;
1026                         /* LINTED E_BAD_PTR_CAST_ALIGN */
1027                         tstamp = (uint32_t *)&wh[1];
1028                         tstamp[0] = LE_32(tsf & 0xffffffff);
1029                         tstamp[1] = LE_32(tsf >> 32);
1030                 }
1031                 asc->asc_stats.ast_tx_mgmt++;
1032                 break;
1033         }
1034 
1035         error = ath_tx_start(asc, in, bf, mp);
1036         if (error != 0) {
1037 bad:
1038                 ic->ic_stats.is_tx_failed++;
1039                 if (bf != NULL) {
1040                         mutex_enter(&asc->asc_txbuflock);
1041                         list_insert_tail(&asc->asc_txbuf_list, bf);
1042                         mutex_exit(&asc->asc_txbuflock);
1043                 }
1044         }
1045         if (in != NULL)
1046                 ieee80211_free_node(in);
1047         if ((type & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA ||
1048             error == 0) {
1049                 freemsg(mp);
1050         }
1051 
1052         return (error);
1053 }
1054 
1055 static mblk_t *
1056 ath_m_tx(void *arg, mblk_t *mp)
1057 {
1058         ath_t *asc = arg;
1059         ieee80211com_t *ic = (ieee80211com_t *)asc;
1060         mblk_t *next;
1061         int error = 0;
1062 
1063         /*
1064          * No data frames go out unless we're associated; this
1065          * should not happen as the 802.11 layer does not enable
1066          * the xmit queue until we enter the RUN state.
1067          */
1068         if (ic->ic_state != IEEE80211_S_RUN) {
1069                 ATH_DEBUG((ATH_DBG_SEND, "ath: ath_m_tx(): "
1070                     "discard, state %u\n", ic->ic_state));
1071                 asc->asc_stats.ast_tx_discard++;
1072                 freemsgchain(mp);
1073                 return (NULL);
1074         }
1075 
1076         while (mp != NULL) {
1077                 next = mp->b_next;
1078                 mp->b_next = NULL;
1079                 error = ath_xmit(ic, mp, IEEE80211_FC0_TYPE_DATA);
1080                 if (error != 0) {
1081                         mp->b_next = next;
1082                         if (error == ENOMEM) {
1083                                 break;
1084                         } else {
1085                                 freemsgchain(mp);       /* CR6501759 issues */
1086                                 return (NULL);
1087                         }
1088                 }
1089                 mp = next;
1090         }
1091 
1092         return (mp);
1093 }
1094 
1095 static int
1096 ath_tx_processq(ath_t *asc, struct ath_txq *txq)
1097 {
1098         ieee80211com_t *ic = (ieee80211com_t *)asc;
1099         struct ath_hal *ah = asc->asc_ah;
1100         struct ath_buf *bf;
1101         struct ath_desc *ds;
1102         struct ieee80211_node *in;
1103         int32_t sr, lr, nacked = 0;
1104         struct ath_tx_status *ts;
1105         HAL_STATUS status;
1106         struct ath_node *an;
1107 
1108         for (;;) {
1109                 mutex_enter(&txq->axq_lock);
1110                 bf = list_head(&txq->axq_list);
1111                 if (bf == NULL) {
1112                         txq->axq_link = NULL;
1113                         mutex_exit(&txq->axq_lock);
1114                         break;
1115                 }
1116                 ds = bf->bf_desc;    /* last decriptor */
1117                 ts = &bf->bf_status.ds_txstat;
1118                 status = ATH_HAL_TXPROCDESC(ah, ds, ts);
1119 #ifdef DEBUG
1120                 ath_printtxbuf(bf, status == HAL_OK);
1121 #endif
1122                 if (status == HAL_EINPROGRESS) {
1123                         mutex_exit(&txq->axq_lock);
1124                         break;
1125                 }
1126                 list_remove(&txq->axq_list, bf);
1127                 mutex_exit(&txq->axq_lock);
1128                 in = bf->bf_in;
1129                 if (in != NULL) {
1130                         an = ATH_NODE(in);
1131                         /* Successful transmition */
1132                         if (ts->ts_status == 0) {
1133                                 an->an_tx_ok++;
1134                                 an->an_tx_antenna = ts->ts_antenna;
1135                                 if (ts->ts_rate & HAL_TXSTAT_ALTRATE)
1136                                         asc->asc_stats.ast_tx_altrate++;
1137                                 asc->asc_stats.ast_tx_rssidelta =
1138                                     ts->ts_rssi - asc->asc_stats.ast_tx_rssi;
1139                                 asc->asc_stats.ast_tx_rssi = ts->ts_rssi;
1140                         } else {
1141                                 an->an_tx_err++;
1142                                 if (ts->ts_status & HAL_TXERR_XRETRY)
1143                                         asc->asc_stats.ast_tx_xretries++;
1144                                 if (ts->ts_status & HAL_TXERR_FIFO)
1145                                         asc->asc_stats.ast_tx_fifoerr++;
1146                                 if (ts->ts_status & HAL_TXERR_FILT)
1147                                         asc->asc_stats.ast_tx_filtered++;
1148                                 an->an_tx_antenna = 0;       /* invalidate */
1149                         }
1150                         sr = ts->ts_shortretry;
1151                         lr = ts->ts_longretry;
1152                         asc->asc_stats.ast_tx_shortretry += sr;
1153                         asc->asc_stats.ast_tx_longretry += lr;
1154                         /*
1155                          * Hand the descriptor to the rate control algorithm.
1156                          */
1157                         if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
1158                             (bf->bf_flags & HAL_TXDESC_NOACK) == 0) {
1159                                 /*
1160                                  * If frame was ack'd update the last rx time
1161                                  * used to workaround phantom bmiss interrupts.
1162                                  */
1163                                 if (ts->ts_status == 0) {
1164                                         nacked++;
1165                                         an->an_tx_ok++;
1166                                 } else {
1167                                         an->an_tx_err++;
1168                                 }
1169                                 an->an_tx_retr += sr + lr;
1170                         }
1171                 }
1172                 bf->bf_in = NULL;
1173                 mutex_enter(&asc->asc_txbuflock);
1174                 list_insert_tail(&asc->asc_txbuf_list, bf);
1175                 mutex_exit(&asc->asc_txbuflock);
1176                 /*
1177                  * Reschedule stalled outbound packets
1178                  */
1179                 mutex_enter(&asc->asc_resched_lock);
1180                 if (asc->asc_resched_needed) {
1181                         asc->asc_resched_needed = B_FALSE;
1182                         mac_tx_update(ic->ic_mach);
1183                 }
1184                 mutex_exit(&asc->asc_resched_lock);
1185         }
1186         return (nacked);
1187 }
1188 
1189 
1190 static void
1191 ath_tx_handler(ath_t *asc)
1192 {
1193         int i;
1194 
1195         /*
1196          * Process each active queue.
1197          */
1198         for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
1199                 if (ATH_TXQ_SETUP(asc, i)) {
1200                         (void) ath_tx_processq(asc, &asc->asc_txq[i]);
1201                 }
1202         }
1203 }
1204 
1205 static struct ieee80211_node *
1206 ath_node_alloc(ieee80211com_t *ic)
1207 {
1208         struct ath_node *an;
1209         ath_t *asc = (ath_t *)ic;
1210 
1211         an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1212         ath_rate_update(asc, &an->an_node, 0);
1213         return (&an->an_node);
1214 }
1215 
1216 static void
1217 ath_node_free(struct ieee80211_node *in)
1218 {
1219         ieee80211com_t *ic = in->in_ic;
1220         ath_t *asc = (ath_t *)ic;
1221         struct ath_buf *bf;
1222         struct ath_txq *txq;
1223         int32_t i;
1224 
1225         for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
1226                 if (ATH_TXQ_SETUP(asc, i)) {
1227                         txq = &asc->asc_txq[i];
1228                         mutex_enter(&txq->axq_lock);
1229                         bf = list_head(&txq->axq_list);
1230                         while (bf != NULL) {
1231                                 if (bf->bf_in == in) {
1232                                         bf->bf_in = NULL;
1233                                 }
1234                                 bf = list_next(&txq->axq_list, bf);
1235                         }
1236                         mutex_exit(&txq->axq_lock);
1237                 }
1238         }
1239         ic->ic_node_cleanup(in);
1240         if (in->in_wpa_ie != NULL)
1241                 ieee80211_free(in->in_wpa_ie);
1242         kmem_free(in, sizeof (struct ath_node));
1243 }
1244 
1245 static void
1246 ath_next_scan(void *arg)
1247 {
1248         ieee80211com_t *ic = arg;
1249         ath_t *asc = (ath_t *)ic;
1250 
1251         asc->asc_scan_timer = 0;
1252         if (ic->ic_state == IEEE80211_S_SCAN) {
1253                 asc->asc_scan_timer = timeout(ath_next_scan, (void *)asc,
1254                     drv_usectohz(ath_dwelltime * 1000));
1255                 ieee80211_next_scan(ic);
1256         }
1257 }
1258 
1259 static void
1260 ath_stop_scantimer(ath_t *asc)
1261 {
1262         timeout_id_t tmp_id = 0;
1263 
1264         while ((asc->asc_scan_timer != 0) && (tmp_id != asc->asc_scan_timer)) {
1265                 tmp_id = asc->asc_scan_timer;
1266                 (void) untimeout(tmp_id);
1267         }
1268         asc->asc_scan_timer = 0;
1269 }
1270 
1271 static int32_t
1272 ath_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1273 {
1274         ath_t *asc = (ath_t *)ic;
1275         struct ath_hal *ah = asc->asc_ah;
1276         struct ieee80211_node *in;
1277         int32_t i, error;
1278         uint8_t *bssid;
1279         uint32_t rfilt;
1280         enum ieee80211_state ostate;
1281 
1282         static const HAL_LED_STATE leds[] = {
1283             HAL_LED_INIT,       /* IEEE80211_S_INIT */
1284             HAL_LED_SCAN,       /* IEEE80211_S_SCAN */
1285             HAL_LED_AUTH,       /* IEEE80211_S_AUTH */
1286             HAL_LED_ASSOC,      /* IEEE80211_S_ASSOC */
1287             HAL_LED_RUN,        /* IEEE80211_S_RUN */
1288         };
1289         if (!ATH_IS_RUNNING(asc))
1290                 return (0);
1291 
1292         ostate = ic->ic_state;
1293         if (nstate != IEEE80211_S_SCAN)
1294                 ath_stop_scantimer(asc);
1295 
1296         ATH_LOCK(asc);
1297         ATH_HAL_SETLEDSTATE(ah, leds[nstate]);  /* set LED */
1298 
1299         if (nstate == IEEE80211_S_INIT) {
1300                 asc->asc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
1301                 /*
1302                  * Disable interrupts.
1303                  */
1304                 ATH_HAL_INTRSET(ah, asc->asc_imask &~ HAL_INT_GLOBAL);
1305                 ATH_UNLOCK(asc);
1306                 goto done;
1307         }
1308         in = ic->ic_bss;
1309         error = ath_chan_set(asc, ic->ic_curchan);
1310         if (error != 0) {
1311                 if (nstate != IEEE80211_S_SCAN) {
1312                         ATH_UNLOCK(asc);
1313                         ieee80211_reset_chan(ic);
1314                         goto bad;
1315                 }
1316         }
1317 
1318         rfilt = ath_calcrxfilter(asc);
1319 
1320         if (nstate == IEEE80211_S_SCAN)
1321                 bssid = ic->ic_macaddr;
1322         else
1323                 bssid = in->in_bssid;
1324         ATH_HAL_SETRXFILTER(ah, rfilt);
1325 
1326         if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS)
1327                 ATH_HAL_SETASSOCID(ah, bssid, in->in_associd);
1328         else
1329                 ATH_HAL_SETASSOCID(ah, bssid, 0);
1330         if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1331                 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1332                         if (ATH_HAL_KEYISVALID(ah, i))
1333                                 ATH_HAL_KEYSETMAC(ah, i, bssid);
1334                 }
1335         }
1336 
1337         if ((nstate == IEEE80211_S_RUN) &&
1338             (ostate != IEEE80211_S_RUN)) {
1339                 /* Configure the beacon and sleep timers. */
1340                 ath_beacon_config(asc);
1341         } else {
1342                 asc->asc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
1343                 ATH_HAL_INTRSET(ah, asc->asc_imask);
1344         }
1345         /*
1346          * Reset the rate control state.
1347          */
1348         ath_rate_ctl_reset(asc, nstate);
1349 
1350         ATH_UNLOCK(asc);
1351 done:
1352         /*
1353          * Invoke the parent method to complete the work.
1354          */
1355         error = asc->asc_newstate(ic, nstate, arg);
1356         /*
1357          * Finally, start any timers.
1358          */
1359         if (nstate == IEEE80211_S_RUN) {
1360                 ieee80211_start_watchdog(ic, 1);
1361         } else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) {
1362                 /* start ap/neighbor scan timer */
1363                 ASSERT(asc->asc_scan_timer == 0);
1364                 asc->asc_scan_timer = timeout(ath_next_scan, (void *)asc,
1365                     drv_usectohz(ath_dwelltime * 1000));
1366         }
1367 bad:
1368         return (error);
1369 }
1370 
1371 /*
1372  * Periodically recalibrate the PHY to account
1373  * for temperature/environment changes.
1374  */
1375 static void
1376 ath_calibrate(ath_t *asc)
1377 {
1378         struct ath_hal *ah = asc->asc_ah;
1379         HAL_BOOL iqcaldone;
1380 
1381         asc->asc_stats.ast_per_cal++;
1382 
1383         if (ATH_HAL_GETRFGAIN(ah) == HAL_RFGAIN_NEED_CHANGE) {
1384                 /*
1385                  * Rfgain is out of bounds, reset the chip
1386                  * to load new gain values.
1387                  */
1388                 ATH_DEBUG((ATH_DBG_HAL, "ath: ath_calibrate(): "
1389                     "Need change RFgain\n"));
1390                 asc->asc_stats.ast_per_rfgain++;
1391                 (void) ath_reset(&asc->asc_isc);
1392         }
1393         if (!ATH_HAL_CALIBRATE(ah, &asc->asc_curchan, &iqcaldone)) {
1394                 ATH_DEBUG((ATH_DBG_HAL, "ath: ath_calibrate(): "
1395                     "calibration of channel %u failed\n",
1396                     asc->asc_curchan.channel));
1397                 asc->asc_stats.ast_per_calfail++;
1398         }
1399 }
1400 
1401 static void
1402 ath_watchdog(void *arg)
1403 {
1404         ath_t *asc = arg;
1405         ieee80211com_t *ic = &asc->asc_isc;
1406         int ntimer = 0;
1407 
1408         ATH_LOCK(asc);
1409         ic->ic_watchdog_timer = 0;
1410         if (!ATH_IS_RUNNING(asc)) {
1411                 ATH_UNLOCK(asc);
1412                 return;
1413         }
1414 
1415         if (ic->ic_state == IEEE80211_S_RUN) {
1416                 /* periodic recalibration */
1417                 ath_calibrate(asc);
1418 
1419                 /*
1420                  * Start the background rate control thread if we
1421                  * are not configured to use a fixed xmit rate.
1422                  */
1423                 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1424                         asc->asc_stats.ast_rate_calls ++;
1425                         if (ic->ic_opmode == IEEE80211_M_STA)
1426                                 ath_rate_ctl(ic, ic->ic_bss);
1427                         else
1428                                 ieee80211_iterate_nodes(&ic->ic_sta,
1429                                     ath_rate_ctl, asc);
1430                 }
1431 
1432                 ntimer = 1;
1433         }
1434         ATH_UNLOCK(asc);
1435 
1436         ieee80211_watchdog(ic);
1437         if (ntimer != 0)
1438                 ieee80211_start_watchdog(ic, ntimer);
1439 }
1440 
1441 static void
1442 ath_tx_proc(void *arg)
1443 {
1444         ath_t *asc = arg;
1445         ath_tx_handler(asc);
1446 }
1447 
1448 
1449 static uint_t
1450 ath_intr(caddr_t arg)
1451 {
1452         /* LINTED E_BAD_PTR_CAST_ALIGN */
1453         ath_t *asc = (ath_t *)arg;
1454         struct ath_hal *ah = asc->asc_ah;
1455         HAL_INT status;
1456         ieee80211com_t *ic = (ieee80211com_t *)asc;
1457 
1458         ATH_LOCK(asc);
1459 
1460         if (!ATH_IS_RUNNING(asc)) {
1461                 /*
1462                  * The hardware is not ready/present, don't touch anything.
1463                  * Note this can happen early on if the IRQ is shared.
1464                  */
1465                 ATH_UNLOCK(asc);
1466                 return (DDI_INTR_UNCLAIMED);
1467         }
1468 
1469         if (!ATH_HAL_INTRPEND(ah)) {    /* shared irq, not for us */
1470                 ATH_UNLOCK(asc);
1471                 return (DDI_INTR_UNCLAIMED);
1472         }
1473 
1474         ATH_HAL_GETISR(ah, &status);
1475         status &= asc->asc_imask;
1476         if (status & HAL_INT_FATAL) {
1477                 asc->asc_stats.ast_hardware++;
1478                 goto reset;
1479         } else if (status & HAL_INT_RXORN) {
1480                 asc->asc_stats.ast_rxorn++;
1481                 goto reset;
1482         } else {
1483                 if (status & HAL_INT_RXEOL) {
1484                         asc->asc_stats.ast_rxeol++;
1485                         asc->asc_rxlink = NULL;
1486                 }
1487                 if (status & HAL_INT_TXURN) {
1488                         asc->asc_stats.ast_txurn++;
1489                         ATH_HAL_UPDATETXTRIGLEVEL(ah, AH_TRUE);
1490                 }
1491 
1492                 if (status & HAL_INT_RX) {
1493                         asc->asc_rx_pend = 1;
1494                         ddi_trigger_softintr(asc->asc_softint_id);
1495                 }
1496                 if (status & HAL_INT_TX) {
1497                         if (ddi_taskq_dispatch(asc->asc_tq, ath_tx_proc,
1498                             asc, DDI_NOSLEEP) != DDI_SUCCESS) {
1499                                 ath_problem("ath: ath_intr(): "
1500                                     "No memory available for tx taskq\n");
1501                         }
1502                 }
1503                 ATH_UNLOCK(asc);
1504 
1505                 if (status & HAL_INT_SWBA) {
1506                         /* This will occur only in Host-AP or Ad-Hoc mode */
1507                         return (DDI_INTR_CLAIMED);
1508                 }
1509 
1510                 if (status & HAL_INT_BMISS) {
1511                         if (ic->ic_state == IEEE80211_S_RUN) {
1512                                 (void) ieee80211_new_state(ic,
1513                                     IEEE80211_S_ASSOC, -1);
1514                         }
1515                 }
1516 
1517         }
1518 
1519         return (DDI_INTR_CLAIMED);
1520 reset:
1521         (void) ath_reset(ic);
1522         ATH_UNLOCK(asc);
1523         return (DDI_INTR_CLAIMED);
1524 }
1525 
1526 static uint_t
1527 ath_softint_handler(caddr_t data)
1528 {
1529         /* LINTED E_BAD_PTR_CAST_ALIGN */
1530         ath_t *asc = (ath_t *)data;
1531 
1532         /*
1533          * Check if the soft interrupt is triggered by another
1534          * driver at the same level.
1535          */
1536         ATH_LOCK(asc);
1537         if (asc->asc_rx_pend) { /* Soft interrupt for this driver */
1538                 asc->asc_rx_pend = 0;
1539                 ATH_UNLOCK(asc);
1540                 ath_rx_handler(asc);
1541                 return (DDI_INTR_CLAIMED);
1542         }
1543         ATH_UNLOCK(asc);
1544         return (DDI_INTR_UNCLAIMED);
1545 }
1546 
1547 /*
1548  * following are gld callback routine
1549  * ath_gld_send, ath_gld_ioctl, ath_gld_gstat
1550  * are listed in other corresponding sections.
1551  * reset the hardware w/o losing operational state.  this is
1552  * basically a more efficient way of doing ath_gld_stop, ath_gld_start,
1553  * followed by state transitions to the current 802.11
1554  * operational state.  used to recover from errors rx overrun
1555  * and to reset the hardware when rf gain settings must be reset.
1556  */
1557 
1558 static void
1559 ath_stop_locked(ath_t *asc)
1560 {
1561         ieee80211com_t *ic = (ieee80211com_t *)asc;
1562         struct ath_hal *ah = asc->asc_ah;
1563 
1564         ATH_LOCK_ASSERT(asc);
1565         if (!asc->asc_isrunning)
1566                 return;
1567 
1568         /*
1569          * Shutdown the hardware and driver:
1570          *    reset 802.11 state machine
1571          *    turn off timers
1572          *    disable interrupts
1573          *    turn off the radio
1574          *    clear transmit machinery
1575          *    clear receive machinery
1576          *    drain and release tx queues
1577          *    reclaim beacon resources
1578          *    power down hardware
1579          *
1580          * Note that some of this work is not possible if the
1581          * hardware is gone (invalid).
1582          */
1583         ATH_UNLOCK(asc);
1584         ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1585         ieee80211_stop_watchdog(ic);
1586         ATH_LOCK(asc);
1587         ATH_HAL_INTRSET(ah, 0);
1588         ath_draintxq(asc);
1589         if (!asc->asc_invalid) {
1590                 ath_stoprecv(asc);
1591                 ATH_HAL_PHYDISABLE(ah);
1592         } else {
1593                 asc->asc_rxlink = NULL;
1594         }
1595         asc->asc_isrunning = 0;
1596 }
1597 
1598 static void
1599 ath_m_stop(void *arg)
1600 {
1601         ath_t *asc = arg;
1602         struct ath_hal *ah = asc->asc_ah;
1603 
1604         ATH_LOCK(asc);
1605         ath_stop_locked(asc);
1606         ATH_HAL_SETPOWER(ah, HAL_PM_AWAKE);
1607         asc->asc_invalid = 1;
1608         ATH_UNLOCK(asc);
1609 }
1610 
1611 static int
1612 ath_start_locked(ath_t *asc)
1613 {
1614         ieee80211com_t *ic = (ieee80211com_t *)asc;
1615         struct ath_hal *ah = asc->asc_ah;
1616         HAL_STATUS status;
1617 
1618         ATH_LOCK_ASSERT(asc);
1619 
1620         /*
1621          * The basic interface to setting the hardware in a good
1622          * state is ``reset''.  On return the hardware is known to
1623          * be powered up and with interrupts disabled.  This must
1624          * be followed by initialization of the appropriate bits
1625          * and then setup of the interrupt mask.
1626          */
1627         asc->asc_curchan.channel = ic->ic_curchan->ich_freq;
1628         asc->asc_curchan.channelFlags = ath_chan2flags(ic, ic->ic_curchan);
1629         if (!ATH_HAL_RESET(ah, (HAL_OPMODE)ic->ic_opmode,
1630             &asc->asc_curchan, AH_FALSE, &status)) {
1631                 ATH_DEBUG((ATH_DBG_HAL, "ath: ath_m_start(): "
1632                     "reset hardware failed: '%s' (HAL status %u)\n",
1633                     ath_get_hal_status_desc(status), status));
1634                 return (ENOTACTIVE);
1635         }
1636 
1637         (void) ath_startrecv(asc);
1638 
1639         /*
1640          * Enable interrupts.
1641          */
1642         asc->asc_imask = HAL_INT_RX | HAL_INT_TX
1643             | HAL_INT_RXEOL | HAL_INT_RXORN
1644             | HAL_INT_FATAL | HAL_INT_GLOBAL;
1645         ATH_HAL_INTRSET(ah, asc->asc_imask);
1646 
1647         /*
1648          * The hardware should be ready to go now so it's safe
1649          * to kick the 802.11 state machine as it's likely to
1650          * immediately call back to us to send mgmt frames.
1651          */
1652         ath_chan_change(asc, ic->ic_curchan);
1653 
1654         asc->asc_isrunning = 1;
1655 
1656         return (0);
1657 }
1658 
1659 int
1660 ath_m_start(void *arg)
1661 {
1662         ath_t *asc = arg;
1663         int err;
1664 
1665         ATH_LOCK(asc);
1666         /*
1667          * Stop anything previously setup.  This is safe
1668          * whether this is the first time through or not.
1669          */
1670         ath_stop_locked(asc);
1671 
1672         if ((err = ath_start_locked(asc)) != 0) {
1673                 ATH_UNLOCK(asc);
1674                 return (err);
1675         }
1676 
1677         asc->asc_invalid = 0;
1678         ATH_UNLOCK(asc);
1679 
1680         return (0);
1681 }
1682 
1683 
1684 static int
1685 ath_m_unicst(void *arg, const uint8_t *macaddr)
1686 {
1687         ath_t *asc = arg;
1688         struct ath_hal *ah = asc->asc_ah;
1689 
1690         ATH_DEBUG((ATH_DBG_GLD, "ath: ath_gld_saddr(): "
1691             "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
1692             macaddr[0], macaddr[1], macaddr[2],
1693             macaddr[3], macaddr[4], macaddr[5]));
1694 
1695         ATH_LOCK(asc);
1696         IEEE80211_ADDR_COPY(asc->asc_isc.ic_macaddr, macaddr);
1697         ATH_HAL_SETMAC(ah, asc->asc_isc.ic_macaddr);
1698 
1699         (void) ath_reset(&asc->asc_isc);
1700         ATH_UNLOCK(asc);
1701         return (0);
1702 }
1703 
1704 static int
1705 ath_m_promisc(void *arg, boolean_t on)
1706 {
1707         ath_t *asc = arg;
1708         struct ath_hal *ah = asc->asc_ah;
1709         uint32_t rfilt;
1710 
1711         ATH_LOCK(asc);
1712         rfilt = ATH_HAL_GETRXFILTER(ah);
1713         if (on)
1714                 rfilt |= HAL_RX_FILTER_PROM;
1715         else
1716                 rfilt &= ~HAL_RX_FILTER_PROM;
1717         asc->asc_promisc = on;
1718         ATH_HAL_SETRXFILTER(ah, rfilt);
1719         ATH_UNLOCK(asc);
1720 
1721         return (0);
1722 }
1723 
1724 static int
1725 ath_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1726 {
1727         ath_t *asc = arg;
1728         struct ath_hal *ah = asc->asc_ah;
1729         uint32_t val, index, bit;
1730         uint8_t pos;
1731         uint32_t *mfilt = asc->asc_mcast_hash;
1732 
1733         ATH_LOCK(asc);
1734 
1735         /* calculate XOR of eight 6bit values */
1736         val = ATH_LE_READ_4(mca + 0);
1737         pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1738         val = ATH_LE_READ_4(mca + 3);
1739         pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1740         pos &= 0x3f;
1741         index = pos / 32;
1742         bit = 1 << (pos % 32);
1743 
1744         if (add) {      /* enable multicast */
1745                 asc->asc_mcast_refs[pos]++;
1746                 mfilt[index] |= bit;
1747         } else {        /* disable multicast */
1748                 if (--asc->asc_mcast_refs[pos] == 0)
1749                         mfilt[index] &= ~bit;
1750         }
1751         ATH_HAL_SETMCASTFILTER(ah, mfilt[0], mfilt[1]);
1752 
1753         ATH_UNLOCK(asc);
1754         return (0);
1755 }
1756 /*
1757  * callback functions for /get/set properties
1758  */
1759 static int
1760 ath_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
1761     uint_t wldp_length, const void *wldp_buf)
1762 {
1763         ath_t   *asc = arg;
1764         int     err;
1765 
1766         err = ieee80211_setprop(&asc->asc_isc, pr_name, wldp_pr_num,
1767             wldp_length, wldp_buf);
1768 
1769         ATH_LOCK(asc);
1770 
1771         if (err == ENETRESET) {
1772                 if (ATH_IS_RUNNING(asc)) {
1773                         ATH_UNLOCK(asc);
1774                         (void) ath_m_start(asc);
1775                         (void) ieee80211_new_state(&asc->asc_isc,
1776                             IEEE80211_S_SCAN, -1);
1777                         ATH_LOCK(asc);
1778                 }
1779                 err = 0;
1780         }
1781 
1782         ATH_UNLOCK(asc);
1783 
1784         return (err);
1785 }
1786 
1787 static int
1788 ath_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
1789     uint_t wldp_length, void *wldp_buf)
1790 {
1791         ath_t   *asc = arg;
1792         int     err = 0;
1793 
1794         err = ieee80211_getprop(&asc->asc_isc, pr_name, wldp_pr_num,
1795             wldp_length, wldp_buf);
1796 
1797         return (err);
1798 }
1799 
1800 static void
1801 ath_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
1802     mac_prop_info_handle_t mph)
1803 {
1804         ath_t   *asc = arg;
1805 
1806         ieee80211_propinfo(&asc->asc_isc, pr_name, wldp_pr_num, mph);
1807 }
1808 
1809 static void
1810 ath_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1811 {
1812         ath_t *asc = arg;
1813         int32_t err;
1814 
1815         err = ieee80211_ioctl(&asc->asc_isc, wq, mp);
1816         ATH_LOCK(asc);
1817         if (err == ENETRESET) {
1818                 if (ATH_IS_RUNNING(asc)) {
1819                         ATH_UNLOCK(asc);
1820                         (void) ath_m_start(asc);
1821                         (void) ieee80211_new_state(&asc->asc_isc,
1822                             IEEE80211_S_SCAN, -1);
1823                         ATH_LOCK(asc);
1824                 }
1825         }
1826         ATH_UNLOCK(asc);
1827 }
1828 
1829 static int
1830 ath_m_stat(void *arg, uint_t stat, uint64_t *val)
1831 {
1832         ath_t *asc = arg;
1833         ieee80211com_t *ic = (ieee80211com_t *)asc;
1834         struct ieee80211_node *in = ic->ic_bss;
1835         struct ieee80211_rateset *rs = &in->in_rates;
1836 
1837         ATH_LOCK(asc);
1838         switch (stat) {
1839         case MAC_STAT_IFSPEED:
1840                 *val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 *
1841                     1000000ull;
1842                 break;
1843         case MAC_STAT_NOXMTBUF:
1844                 *val = asc->asc_stats.ast_tx_nobuf +
1845                     asc->asc_stats.ast_tx_nobufmgt;
1846                 break;
1847         case MAC_STAT_IERRORS:
1848                 *val = asc->asc_stats.ast_rx_tooshort;
1849                 break;
1850         case MAC_STAT_RBYTES:
1851                 *val = ic->ic_stats.is_rx_bytes;
1852                 break;
1853         case MAC_STAT_IPACKETS:
1854                 *val = ic->ic_stats.is_rx_frags;
1855                 break;
1856         case MAC_STAT_OBYTES:
1857                 *val = ic->ic_stats.is_tx_bytes;
1858                 break;
1859         case MAC_STAT_OPACKETS:
1860                 *val = ic->ic_stats.is_tx_frags;
1861                 break;
1862         case MAC_STAT_OERRORS:
1863         case WIFI_STAT_TX_FAILED:
1864                 *val = asc->asc_stats.ast_tx_fifoerr +
1865                     asc->asc_stats.ast_tx_xretries +
1866                     asc->asc_stats.ast_tx_discard;
1867                 break;
1868         case WIFI_STAT_TX_RETRANS:
1869                 *val = asc->asc_stats.ast_tx_xretries;
1870                 break;
1871         case WIFI_STAT_FCS_ERRORS:
1872                 *val = asc->asc_stats.ast_rx_crcerr;
1873                 break;
1874         case WIFI_STAT_WEP_ERRORS:
1875                 *val = asc->asc_stats.ast_rx_badcrypt;
1876                 break;
1877         case WIFI_STAT_TX_FRAGS:
1878         case WIFI_STAT_MCAST_TX:
1879         case WIFI_STAT_RTS_SUCCESS:
1880         case WIFI_STAT_RTS_FAILURE:
1881         case WIFI_STAT_ACK_FAILURE:
1882         case WIFI_STAT_RX_FRAGS:
1883         case WIFI_STAT_MCAST_RX:
1884         case WIFI_STAT_RX_DUPS:
1885                 ATH_UNLOCK(asc);
1886                 return (ieee80211_stat(ic, stat, val));
1887         default:
1888                 ATH_UNLOCK(asc);
1889                 return (ENOTSUP);
1890         }
1891         ATH_UNLOCK(asc);
1892 
1893         return (0);
1894 }
1895 
1896 static int
1897 ath_pci_setup(ath_t *asc)
1898 {
1899         uint16_t command;
1900 
1901         /*
1902          * Enable memory mapping and bus mastering
1903          */
1904         ASSERT(asc != NULL);
1905         command = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_COMM);
1906         command |= PCI_COMM_MAE | PCI_COMM_ME;
1907         pci_config_put16(asc->asc_cfg_handle, PCI_CONF_COMM, command);
1908         command = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_COMM);
1909         if ((command & PCI_COMM_MAE) == 0) {
1910                 ath_problem("ath: ath_pci_setup(): "
1911                     "failed to enable memory mapping\n");
1912                 return (EIO);
1913         }
1914         if ((command & PCI_COMM_ME) == 0) {
1915                 ath_problem("ath: ath_pci_setup(): "
1916                     "failed to enable bus mastering\n");
1917                 return (EIO);
1918         }
1919         ATH_DEBUG((ATH_DBG_INIT, "ath: ath_pci_setup(): "
1920             "set command reg to 0x%x \n", command));
1921 
1922         return (0);
1923 }
1924 
1925 static int
1926 ath_resume(dev_info_t *devinfo)
1927 {
1928         ath_t *asc;
1929         int ret = DDI_SUCCESS;
1930 
1931         asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
1932         if (asc == NULL) {
1933                 ATH_DEBUG((ATH_DBG_SUSPEND, "ath: ath_resume(): "
1934                     "failed to get soft state\n"));
1935                 return (DDI_FAILURE);
1936         }
1937 
1938         ATH_LOCK(asc);
1939         /*
1940          * Set up config space command register(s). Refuse
1941          * to resume on failure.
1942          */
1943         if (ath_pci_setup(asc) != 0) {
1944                 ATH_DEBUG((ATH_DBG_SUSPEND, "ath: ath_resume(): "
1945                     "ath_pci_setup() failed\n"));
1946                 ATH_UNLOCK(asc);
1947                 return (DDI_FAILURE);
1948         }
1949 
1950         if (!asc->asc_invalid)
1951                 ret = ath_start_locked(asc);
1952         ATH_UNLOCK(asc);
1953 
1954         return (ret);
1955 }
1956 
1957 static int
1958 ath_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1959 {
1960         ath_t *asc;
1961         ieee80211com_t *ic;
1962         struct ath_hal *ah;
1963         uint8_t csz;
1964         HAL_STATUS status;
1965         caddr_t regs;
1966         uint32_t i, val;
1967         uint16_t vendor_id, device_id;
1968         const char *athname;
1969         int32_t ath_countrycode = CTRY_DEFAULT; /* country code */
1970         int32_t err, ath_regdomain = 0; /* regulatory domain */
1971         char strbuf[32];
1972         int instance;
1973         wifi_data_t wd = { 0 };
1974         mac_register_t *macp;
1975 
1976         switch (cmd) {
1977         case DDI_ATTACH:
1978                 break;
1979 
1980         case DDI_RESUME:
1981                 return (ath_resume(devinfo));
1982 
1983         default:
1984                 return (DDI_FAILURE);
1985         }
1986 
1987         instance = ddi_get_instance(devinfo);
1988         if (ddi_soft_state_zalloc(ath_soft_state_p, instance) != DDI_SUCCESS) {
1989                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1990                     "Unable to alloc softstate\n"));
1991                 return (DDI_FAILURE);
1992         }
1993 
1994         asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
1995         ic = (ieee80211com_t *)asc;
1996         asc->asc_dev = devinfo;
1997 
1998         mutex_init(&asc->asc_genlock, NULL, MUTEX_DRIVER, NULL);
1999         mutex_init(&asc->asc_txbuflock, NULL, MUTEX_DRIVER, NULL);
2000         mutex_init(&asc->asc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
2001         mutex_init(&asc->asc_resched_lock, NULL, MUTEX_DRIVER, NULL);
2002 
2003         err = pci_config_setup(devinfo, &asc->asc_cfg_handle);
2004         if (err != DDI_SUCCESS) {
2005                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2006                     "pci_config_setup() failed"));
2007                 goto attach_fail0;
2008         }
2009 
2010         if (ath_pci_setup(asc) != 0)
2011                 goto attach_fail1;
2012 
2013         /*
2014          * Cache line size is used to size and align various
2015          * structures used to communicate with the hardware.
2016          */
2017         csz = pci_config_get8(asc->asc_cfg_handle, PCI_CONF_CACHE_LINESZ);
2018         if (csz == 0) {
2019                 /*
2020                  * We must have this setup properly for rx buffer
2021                  * DMA to work so force a reasonable value here if it
2022                  * comes up zero.
2023                  */
2024                 csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t);
2025                 pci_config_put8(asc->asc_cfg_handle, PCI_CONF_CACHE_LINESZ,
2026                     csz);
2027         }
2028         asc->asc_cachelsz = csz << 2;
2029         vendor_id = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_VENID);
2030         device_id = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_DEVID);
2031         ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): vendor 0x%x, "
2032             "device id 0x%x, cache size %d\n", vendor_id, device_id, csz));
2033 
2034         athname = ath_hal_probe(vendor_id, device_id);
2035         ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): athname: %s\n",
2036             athname ? athname : "Atheros ???"));
2037 
2038         pci_config_put8(asc->asc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
2039         val = pci_config_get32(asc->asc_cfg_handle, 0x40);
2040         if ((val & 0x0000ff00) != 0)
2041                 pci_config_put32(asc->asc_cfg_handle, 0x40, val & 0xffff00ff);
2042 
2043         err = ddi_regs_map_setup(devinfo, 1,
2044             &regs, 0, 0, &ath_reg_accattr, &asc->asc_io_handle);
2045         ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2046             "regs map1 = %x err=%d\n", regs, err));
2047         if (err != DDI_SUCCESS) {
2048                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2049                     "ddi_regs_map_setup() failed"));
2050                 goto attach_fail1;
2051         }
2052 
2053         ah = ath_hal_attach(device_id, asc, 0, regs, &status);
2054         if (ah == NULL) {
2055                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2056                     "unable to attach hw: '%s' (HAL status %u)\n",
2057                     ath_get_hal_status_desc(status), status));
2058                 goto attach_fail2;
2059         }
2060         ATH_DEBUG((ATH_DBG_ATTACH, "mac %d.%d phy %d.%d",
2061             ah->ah_macVersion, ah->ah_macRev,
2062             ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf));
2063         ATH_HAL_INTRSET(ah, 0);
2064         asc->asc_ah = ah;
2065 
2066         if (ah->ah_abi != HAL_ABI_VERSION) {
2067                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2068                     "HAL ABI mismatch detected (0x%x != 0x%x)\n",
2069                     ah->ah_abi, HAL_ABI_VERSION));
2070                 goto attach_fail3;
2071         }
2072 
2073         ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2074             "HAL ABI version 0x%x\n", ah->ah_abi));
2075         ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2076             "HAL mac version %d.%d, phy version %d.%d\n",
2077             ah->ah_macVersion, ah->ah_macRev,
2078             ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf));
2079         if (ah->ah_analog5GhzRev)
2080                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2081                     "HAL 5ghz radio version %d.%d\n",
2082                     ah->ah_analog5GhzRev >> 4,
2083                     ah->ah_analog5GhzRev & 0xf));
2084         if (ah->ah_analog2GhzRev)
2085                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2086                     "HAL 2ghz radio version %d.%d\n",
2087                     ah->ah_analog2GhzRev >> 4,
2088                     ah->ah_analog2GhzRev & 0xf));
2089 
2090         /*
2091          * Check if the MAC has multi-rate retry support.
2092          * We do this by trying to setup a fake extended
2093          * descriptor.  MAC's that don't have support will
2094          * return false w/o doing anything.  MAC's that do
2095          * support it will return true w/o doing anything.
2096          */
2097         asc->asc_mrretry = ATH_HAL_SETUPXTXDESC(ah, NULL, 0, 0, 0, 0, 0, 0);
2098         ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2099             "multi rate retry support=%x\n",
2100             asc->asc_mrretry));
2101 
2102         /*
2103          * Get the hardware key cache size.
2104          */
2105         asc->asc_keymax = ATH_HAL_KEYCACHESIZE(ah);
2106         if (asc->asc_keymax > sizeof (asc->asc_keymap) * NBBY) {
2107                 ATH_DEBUG((ATH_DBG_ATTACH, "ath_attach:"
2108                     " Warning, using only %u entries in %u key cache\n",
2109                     sizeof (asc->asc_keymap) * NBBY, asc->asc_keymax));
2110                 asc->asc_keymax = sizeof (asc->asc_keymap) * NBBY;
2111         }
2112         /*
2113          * Reset the key cache since some parts do not
2114          * reset the contents on initial power up.
2115          */
2116         for (i = 0; i < asc->asc_keymax; i++)
2117                 ATH_HAL_KEYRESET(ah, i);
2118 
2119         ATH_HAL_GETREGDOMAIN(ah, (uint32_t *)&ath_regdomain);
2120         ATH_HAL_GETCOUNTRYCODE(ah, &ath_countrycode);
2121         /*
2122          * Collect the channel list using the default country
2123          * code and including outdoor channels.  The 802.11 layer
2124          * is resposible for filtering this list to a set of
2125          * channels that it considers ok to use.
2126          */
2127         asc->asc_have11g = 0;
2128 
2129         /* enable outdoor use, enable extended channels */
2130         err = ath_getchannels(asc, ath_countrycode, AH_FALSE, AH_TRUE);
2131         if (err != 0)
2132                 goto attach_fail3;
2133 
2134         /*
2135          * Setup rate tables for all potential media types.
2136          */
2137         ath_rate_setup(asc, IEEE80211_MODE_11A);
2138         ath_rate_setup(asc, IEEE80211_MODE_11B);
2139         ath_rate_setup(asc, IEEE80211_MODE_11G);
2140         ath_rate_setup(asc, IEEE80211_MODE_TURBO_A);
2141 
2142         /* Setup here so ath_rate_update is happy */
2143         ath_setcurmode(asc, IEEE80211_MODE_11A);
2144 
2145         err = ath_desc_alloc(devinfo, asc);
2146         if (err != DDI_SUCCESS) {
2147                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2148                     "failed to allocate descriptors: %d\n", err));
2149                 goto attach_fail3;
2150         }
2151 
2152         if ((asc->asc_tq = ddi_taskq_create(devinfo, "ath_taskq", 1,
2153             TASKQ_DEFAULTPRI, 0)) == NULL) {
2154                 goto attach_fail4;
2155         }
2156         /* Setup transmit queues in the HAL */
2157         if (ath_txq_setup(asc))
2158                 goto attach_fail4;
2159 
2160         ATH_HAL_GETMAC(ah, ic->ic_macaddr);
2161 
2162         /*
2163          * Initialize pointers to device specific functions which
2164          * will be used by the generic layer.
2165          */
2166         /* 11g support is identified when we fetch the channel set */
2167         if (asc->asc_have11g)
2168                 ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
2169                     IEEE80211_C_SHSLOT;         /* short slot time */
2170         /*
2171          * Query the hal to figure out h/w crypto support.
2172          */
2173         if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_WEP))
2174                 ic->ic_caps |= IEEE80211_C_WEP;
2175         if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_AES_OCB))
2176                 ic->ic_caps |= IEEE80211_C_AES;
2177         if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_AES_CCM)) {
2178                 ATH_DEBUG((ATH_DBG_ATTACH, "Atheros support H/W CCMP\n"));
2179                 ic->ic_caps |= IEEE80211_C_AES_CCM;
2180         }
2181         if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_CKIP))
2182                 ic->ic_caps |= IEEE80211_C_CKIP;
2183         if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_TKIP)) {
2184                 ATH_DEBUG((ATH_DBG_ATTACH, "Atheros support H/W TKIP\n"));
2185                 ic->ic_caps |= IEEE80211_C_TKIP;
2186                 /*
2187                  * Check if h/w does the MIC and/or whether the
2188                  * separate key cache entries are required to
2189                  * handle both tx+rx MIC keys.
2190                  */
2191                 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_MIC)) {
2192                         ATH_DEBUG((ATH_DBG_ATTACH, "Support H/W TKIP MIC\n"));
2193                         ic->ic_caps |= IEEE80211_C_TKIPMIC;
2194                 }
2195 
2196                 /*
2197                  * If the h/w supports storing tx+rx MIC keys
2198                  * in one cache slot automatically enable use.
2199                  */
2200                 if (ATH_HAL_HASTKIPSPLIT(ah) ||
2201                     !ATH_HAL_SETTKIPSPLIT(ah, AH_FALSE)) {
2202                         asc->asc_splitmic = 1;
2203                 }
2204         }
2205         ic->ic_caps |= IEEE80211_C_WPA;      /* Support WPA/WPA2 */
2206 
2207         asc->asc_hasclrkey = ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_CLR);
2208         /*
2209          * Mark key cache slots associated with global keys
2210          * as in use.  If we knew TKIP was not to be used we
2211          * could leave the +32, +64, and +32+64 slots free.
2212          */
2213         for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2214                 setbit(asc->asc_keymap, i);
2215                 setbit(asc->asc_keymap, i+64);
2216                 if (asc->asc_splitmic) {
2217                         setbit(asc->asc_keymap, i+32);
2218                         setbit(asc->asc_keymap, i+32+64);
2219                 }
2220         }
2221 
2222         ic->ic_phytype = IEEE80211_T_OFDM;
2223         ic->ic_opmode = IEEE80211_M_STA;
2224         ic->ic_state = IEEE80211_S_INIT;
2225         ic->ic_maxrssi = ATH_MAX_RSSI;
2226         ic->ic_set_shortslot = ath_set_shortslot;
2227         ic->ic_xmit = ath_xmit;
2228         ieee80211_attach(ic);
2229 
2230         /* different instance has different WPA door */
2231         (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
2232             ddi_driver_name(devinfo),
2233             ddi_get_instance(devinfo));
2234 
2235         /* Override 80211 default routines */
2236         ic->ic_reset = ath_reset;
2237         asc->asc_newstate = ic->ic_newstate;
2238         ic->ic_newstate = ath_newstate;
2239         ic->ic_watchdog = ath_watchdog;
2240         ic->ic_node_alloc = ath_node_alloc;
2241         ic->ic_node_free = ath_node_free;
2242         ic->ic_crypto.cs_key_alloc = ath_key_alloc;
2243         ic->ic_crypto.cs_key_delete = ath_key_delete;
2244         ic->ic_crypto.cs_key_set = ath_key_set;
2245         ieee80211_media_init(ic);
2246         /*
2247          * initialize default tx key
2248          */
2249         ic->ic_def_txkey = 0;
2250 
2251         asc->asc_rx_pend = 0;
2252         ATH_HAL_INTRSET(ah, 0);
2253         err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
2254             &asc->asc_softint_id, NULL, 0, ath_softint_handler, (caddr_t)asc);
2255         if (err != DDI_SUCCESS) {
2256                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2257                     "ddi_add_softintr() failed\n"));
2258                 goto attach_fail5;
2259         }
2260 
2261         if (ddi_get_iblock_cookie(devinfo, 0, &asc->asc_iblock)
2262             != DDI_SUCCESS) {
2263                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2264                     "Can not get iblock cookie for INT\n"));
2265                 goto attach_fail6;
2266         }
2267 
2268         if (ddi_add_intr(devinfo, 0, NULL, NULL, ath_intr,
2269             (caddr_t)asc) != DDI_SUCCESS) {
2270                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2271                     "Can not set intr for ATH driver\n"));
2272                 goto attach_fail6;
2273         }
2274 
2275         /*
2276          * Provide initial settings for the WiFi plugin; whenever this
2277          * information changes, we need to call mac_plugindata_update()
2278          */
2279         wd.wd_opmode = ic->ic_opmode;
2280         wd.wd_secalloc = WIFI_SEC_NONE;
2281         IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid);
2282 
2283         if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
2284                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2285                     "MAC version mismatch\n"));
2286                 goto attach_fail7;
2287         }
2288 
2289         macp->m_type_ident   = MAC_PLUGIN_IDENT_WIFI;
2290         macp->m_driver               = asc;
2291         macp->m_dip          = devinfo;
2292         macp->m_src_addr     = ic->ic_macaddr;
2293         macp->m_callbacks    = &ath_m_callbacks;
2294         macp->m_min_sdu              = 0;
2295         macp->m_max_sdu              = IEEE80211_MTU;
2296         macp->m_pdata                = &wd;
2297         macp->m_pdata_size   = sizeof (wd);
2298 
2299         err = mac_register(macp, &ic->ic_mach);
2300         mac_free(macp);
2301         if (err != 0) {
2302                 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2303                     "mac_register err %x\n", err));
2304                 goto attach_fail7;
2305         }
2306 
2307         /* Create minor node of type DDI_NT_NET_WIFI */
2308         (void) snprintf(strbuf, sizeof (strbuf), "%s%d",
2309             ATH_NODENAME, instance);
2310         err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
2311             instance + 1, DDI_NT_NET_WIFI, 0);
2312         if (err != DDI_SUCCESS)
2313                 ATH_DEBUG((ATH_DBG_ATTACH, "WARN: ath: ath_attach(): "
2314                     "Create minor node failed - %d\n", err));
2315 
2316         mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
2317         asc->asc_invalid = 1;
2318         asc->asc_isrunning = 0;
2319         asc->asc_promisc = B_FALSE;
2320         bzero(asc->asc_mcast_refs, sizeof (asc->asc_mcast_refs));
2321         bzero(asc->asc_mcast_hash, sizeof (asc->asc_mcast_hash));
2322         return (DDI_SUCCESS);
2323 attach_fail7:
2324         ddi_remove_intr(devinfo, 0, asc->asc_iblock);
2325 attach_fail6:
2326         ddi_remove_softintr(asc->asc_softint_id);
2327 attach_fail5:
2328         (void) ieee80211_detach(ic);
2329 attach_fail4:
2330         ath_desc_free(asc);
2331         if (asc->asc_tq)
2332                 ddi_taskq_destroy(asc->asc_tq);
2333 attach_fail3:
2334         ah->ah_detach(asc->asc_ah);
2335 attach_fail2:
2336         ddi_regs_map_free(&asc->asc_io_handle);
2337 attach_fail1:
2338         pci_config_teardown(&asc->asc_cfg_handle);
2339 attach_fail0:
2340         asc->asc_invalid = 1;
2341         mutex_destroy(&asc->asc_txbuflock);
2342         for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2343                 if (ATH_TXQ_SETUP(asc, i)) {
2344                         struct ath_txq *txq = &asc->asc_txq[i];
2345                         mutex_destroy(&txq->axq_lock);
2346                 }
2347         }
2348         mutex_destroy(&asc->asc_rxbuflock);
2349         mutex_destroy(&asc->asc_genlock);
2350         mutex_destroy(&asc->asc_resched_lock);
2351         ddi_soft_state_free(ath_soft_state_p, instance);
2352 
2353         return (DDI_FAILURE);
2354 }
2355 
2356 /*
2357  * Suspend transmit/receive for powerdown
2358  */
2359 static int
2360 ath_suspend(ath_t *asc)
2361 {
2362         ATH_LOCK(asc);
2363         ath_stop_locked(asc);
2364         ATH_UNLOCK(asc);
2365         ATH_DEBUG((ATH_DBG_SUSPEND, "ath: suspended.\n"));
2366 
2367         return (DDI_SUCCESS);
2368 }
2369 
2370 static int32_t
2371 ath_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2372 {
2373         ath_t *asc;
2374 
2375         asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
2376         ASSERT(asc != NULL);
2377 
2378         switch (cmd) {
2379         case DDI_DETACH:
2380                 break;
2381 
2382         case DDI_SUSPEND:
2383                 return (ath_suspend(asc));
2384 
2385         default:
2386                 return (DDI_FAILURE);
2387         }
2388 
2389         if (mac_disable(asc->asc_isc.ic_mach) != 0)
2390                 return (DDI_FAILURE);
2391 
2392         ath_stop_scantimer(asc);
2393 
2394         /* disable interrupts */
2395         ATH_HAL_INTRSET(asc->asc_ah, 0);
2396 
2397         /*
2398          * Unregister from the MAC layer subsystem
2399          */
2400         (void) mac_unregister(asc->asc_isc.ic_mach);
2401 
2402         /* free intterrupt resources */
2403         ddi_remove_intr(devinfo, 0, asc->asc_iblock);
2404         ddi_remove_softintr(asc->asc_softint_id);
2405 
2406         /*
2407          * NB: the order of these is important:
2408          * o call the 802.11 layer before detaching the hal to
2409          *   insure callbacks into the driver to delete global
2410          *   key cache entries can be handled
2411          * o reclaim the tx queue data structures after calling
2412          *   the 802.11 layer as we'll get called back to reclaim
2413          *   node state and potentially want to use them
2414          * o to cleanup the tx queues the hal is called, so detach
2415          *   it last
2416          */
2417         ieee80211_detach(&asc->asc_isc);
2418         ath_desc_free(asc);
2419         ddi_taskq_destroy(asc->asc_tq);
2420         ath_txq_cleanup(asc);
2421         asc->asc_ah->ah_detach(asc->asc_ah);
2422 
2423         /* free io handle */
2424         ddi_regs_map_free(&asc->asc_io_handle);
2425         pci_config_teardown(&asc->asc_cfg_handle);
2426 
2427         /* destroy locks */
2428         mutex_destroy(&asc->asc_rxbuflock);
2429         mutex_destroy(&asc->asc_genlock);
2430         mutex_destroy(&asc->asc_resched_lock);
2431 
2432         ddi_remove_minor_node(devinfo, NULL);
2433         ddi_soft_state_free(ath_soft_state_p, ddi_get_instance(devinfo));
2434 
2435         return (DDI_SUCCESS);
2436 }
2437 
2438 /*
2439  * quiesce(9E) entry point.
2440  *
2441  * This function is called when the system is single-threaded at high
2442  * PIL with preemption disabled. Therefore, this function must not be
2443  * blocked.
2444  *
2445  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
2446  * DDI_FAILURE indicates an error condition and should almost never happen.
2447  */
2448 static int32_t
2449 ath_quiesce(dev_info_t *devinfo)
2450 {
2451         ath_t           *asc;
2452         struct ath_hal  *ah;
2453         int             i;
2454 
2455         asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
2456 
2457         if (asc == NULL || (ah = asc->asc_ah) == NULL)
2458                 return (DDI_FAILURE);
2459 
2460         /*
2461          * Disable interrupts
2462          */
2463         ATH_HAL_INTRSET(ah, 0);
2464 
2465         /*
2466          * Disable TX HW
2467          */
2468         for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2469                 if (ATH_TXQ_SETUP(asc, i)) {
2470                         ATH_HAL_STOPTXDMA(ah, asc->asc_txq[i].axq_qnum);
2471                 }
2472         }
2473 
2474         /*
2475          * Disable RX HW
2476          */
2477         ATH_HAL_STOPPCURECV(ah);
2478         ATH_HAL_SETRXFILTER(ah, 0);
2479         ATH_HAL_STOPDMARECV(ah);
2480         drv_usecwait(3000);
2481 
2482         /*
2483          * Power down HW
2484          */
2485         ATH_HAL_PHYDISABLE(ah);
2486 
2487         return (DDI_SUCCESS);
2488 }
2489 
2490 DDI_DEFINE_STREAM_OPS(ath_dev_ops, nulldev, nulldev, ath_attach, ath_detach,
2491     nodev, NULL, D_MP, NULL, ath_quiesce);
2492 
2493 static struct modldrv ath_modldrv = {
2494         &mod_driverops,             /* Type of module.  This one is a driver */
2495         "ath driver 1.4/HAL 0.10.5.6",          /* short description */
2496         &ath_dev_ops                /* driver specific ops */
2497 };
2498 
2499 static struct modlinkage modlinkage = {
2500         MODREV_1, { (void *)&ath_modldrv, NULL }
2501 };
2502 
2503 
2504 int
2505 _info(struct modinfo *modinfop)
2506 {
2507         return (mod_info(&modlinkage, modinfop));
2508 }
2509 
2510 int
2511 _init(void)
2512 {
2513         int status;
2514 
2515         status = ddi_soft_state_init(&ath_soft_state_p, sizeof (ath_t), 1);
2516         if (status != 0)
2517                 return (status);
2518 
2519         mutex_init(&ath_loglock, NULL, MUTEX_DRIVER, NULL);
2520         ath_halfix_init();
2521         mac_init_ops(&ath_dev_ops, "ath");
2522         status = mod_install(&modlinkage);
2523         if (status != 0) {
2524                 mac_fini_ops(&ath_dev_ops);
2525                 ath_halfix_finit();
2526                 mutex_destroy(&ath_loglock);
2527                 ddi_soft_state_fini(&ath_soft_state_p);
2528         }
2529 
2530         return (status);
2531 }
2532 
2533 int
2534 _fini(void)
2535 {
2536         int status;
2537 
2538         status = mod_remove(&modlinkage);
2539         if (status == 0) {
2540                 mac_fini_ops(&ath_dev_ops);
2541                 ath_halfix_finit();
2542                 mutex_destroy(&ath_loglock);
2543                 ddi_soft_state_fini(&ath_soft_state_p);
2544         }
2545         return (status);
2546 }