Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ath/ath_main.c
+++ new/usr/src/uts/common/io/ath/ath_main.c
1 1 /*
2 2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 3 * Use is subject to license terms.
4 4 */
5 5
6 6 /*
7 7 * Copyright (c) 2002-2004 Sam Leffler, Errno Consulting
8 8 * All rights reserved.
9 9 *
10 10 * Redistribution and use in source and binary forms, with or without
11 11 * modification, are permitted provided that the following conditions
12 12 * are met:
13 13 * 1. Redistributions of source code must retain the above copyright
14 14 * notice, this list of conditions and the following disclaimer,
15 15 * without modification.
16 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 17 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
18 18 * redistribution must be conditioned upon including a substantially
19 19 * similar Disclaimer requirement for further binary redistribution.
20 20 * 3. Neither the names of the above-listed copyright holders nor the names
21 21 * of any contributors may be used to endorse or promote products derived
22 22 * from this software without specific prior written permission.
23 23 *
24 24 * NO WARRANTY
25 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 27 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
28 28 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
29 29 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
30 30 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
33 33 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
35 35 * THE POSSIBILITY OF SUCH DAMAGES.
36 36 *
37 37 */
38 38
39 39 /*
40 40 * Driver for the Atheros Wireless LAN controller.
41 41 *
42 42 * The Atheros driver calls into net80211 module for IEEE80211 protocol
43 43 * management functionalities. The driver includes a LLD(Low Level Driver)
44 44 * part to implement H/W related operations.
45 45 * The following is the high level structure of ath driver.
46 46 * (The arrows between modules indicate function call direction.)
47 47 *
48 48 *
49 49 * |
50 50 * | GLD thread
51 51 * V
52 52 * ================== =========================================
53 53 * | | |[1] |
54 54 * | | | GLDv3 Callback functions registered |
55 55 * | Net80211 | ========================= by |
56 56 * | module | | | driver |
57 57 * | | V | |
58 58 * | |======================== | |
59 59 * | Functions exported by net80211 | | |
60 60 * | | | |
61 61 * ========================================== =================
62 62 * | |
63 63 * V |
64 64 * +----------------------------------+ |
65 65 * |[2] | |
66 66 * | Net80211 Callback functions | |
67 67 * | registered by LLD | |
68 68 * +----------------------------------+ |
69 69 * | |
70 70 * V v
71 71 * +-----------------------------------------------------------+
72 72 * |[3] |
73 73 * | LLD Internal functions |
74 74 * | |
75 75 * +-----------------------------------------------------------+
76 76 * ^
77 77 * | Software interrupt thread
78 78 * |
79 79 *
80 80 * The short description of each module is as below:
81 81 * Module 1: GLD callback functions, which are intercepting the calls from
82 82 * GLD to LLD.
83 83 * Module 2: Net80211 callback functions registered by LLD, which
84 84 * calls into LLD for H/W related functions needed by net80211.
85 85 * Module 3: LLD Internal functions, which are responsible for allocing
86 86 * descriptor/buffer, handling interrupt and other H/W
87 87 * operations.
88 88 *
89 89 * All functions are running in 3 types of thread:
90 90 * 1. GLD callbacks threads, such as ioctl, intr, etc.
91 91 * 2. Clock interruptt thread which is responsible for scan, rate control and
92 92 * calibration.
93 93 * 3. Software Interrupt thread originated in LLD.
94 94 *
95 95 * The lock strategy is as below:
96 96 * There have 4 queues for tx, each queue has one asc_txqlock[i] to
97 97 * prevent conflicts access to queue resource from different thread.
98 98 *
99 99 * All the transmit buffers are contained in asc_txbuf which are
100 100 * protected by asc_txbuflock.
101 101 *
102 102 * Each receive buffers are contained in asc_rxbuf which are protected
103 103 * by asc_rxbuflock.
104 104 *
105 105 * In ath struct, asc_genlock is a general lock, protecting most other
106 106 * operational data in ath_softc struct and HAL accesses.
107 107 * It is acquired by the interupt handler and most "mode-ctrl" routines.
108 108 *
109 109 * Any of the locks can be acquired singly, but where multiple
110 110 * locks are acquired, they *must* be in the order:
111 111 * asc_genlock >> asc_txqlock[i] >> asc_txbuflock >> asc_rxbuflock
112 112 */
113 113
114 114 #include <sys/param.h>
115 115 #include <sys/types.h>
116 116 #include <sys/signal.h>
117 117 #include <sys/stream.h>
118 118 #include <sys/termio.h>
119 119 #include <sys/errno.h>
120 120 #include <sys/file.h>
121 121 #include <sys/cmn_err.h>
122 122 #include <sys/stropts.h>
123 123 #include <sys/strsubr.h>
124 124 #include <sys/strtty.h>
125 125 #include <sys/kbio.h>
126 126 #include <sys/cred.h>
127 127 #include <sys/stat.h>
128 128 #include <sys/consdev.h>
129 129 #include <sys/kmem.h>
130 130 #include <sys/modctl.h>
131 131 #include <sys/ddi.h>
132 132 #include <sys/sunddi.h>
133 133 #include <sys/pci.h>
134 134 #include <sys/errno.h>
135 135 #include <sys/mac_provider.h>
136 136 #include <sys/dlpi.h>
137 137 #include <sys/ethernet.h>
138 138 #include <sys/list.h>
139 139 #include <sys/byteorder.h>
140 140 #include <sys/strsun.h>
141 141 #include <sys/policy.h>
142 142 #include <inet/common.h>
143 143 #include <inet/nd.h>
144 144 #include <inet/mi.h>
145 145 #include <inet/wifi_ioctl.h>
146 146 #include <sys/mac_wifi.h>
147 147 #include "ath_hal.h"
148 148 #include "ath_impl.h"
149 149 #include "ath_aux.h"
150 150 #include "ath_rate.h"
151 151
152 152 #define ATH_MAX_RSSI 63 /* max rssi */
153 153
154 154 extern void ath_halfix_init(void);
155 155 extern void ath_halfix_finit(void);
156 156 extern int32_t ath_getset(ath_t *asc, mblk_t *mp, uint32_t cmd);
157 157
158 158 /*
159 159 * PIO access attributes for registers
160 160 */
161 161 static ddi_device_acc_attr_t ath_reg_accattr = {
162 162 DDI_DEVICE_ATTR_V0,
163 163 DDI_STRUCTURE_LE_ACC,
164 164 DDI_STRICTORDER_ACC
165 165 };
166 166
167 167 /*
168 168 * DMA access attributes for descriptors: NOT to be byte swapped.
169 169 */
170 170 static ddi_device_acc_attr_t ath_desc_accattr = {
171 171 DDI_DEVICE_ATTR_V0,
172 172 DDI_STRUCTURE_LE_ACC,
173 173 DDI_STRICTORDER_ACC
174 174 };
175 175
176 176 /*
177 177 * DMA attributes for rx/tx buffers
178 178 */
179 179 static ddi_dma_attr_t ath_dma_attr = {
180 180 DMA_ATTR_V0, /* version number */
181 181 0, /* low address */
182 182 0xffffffffU, /* high address */
183 183 0x3ffffU, /* counter register max */
184 184 1, /* alignment */
185 185 0xFFF, /* burst sizes */
186 186 1, /* minimum transfer size */
187 187 0x3ffffU, /* max transfer size */
188 188 0xffffffffU, /* address register max */
189 189 1, /* no scatter-gather */
190 190 1, /* granularity of device */
191 191 0, /* DMA flags */
192 192 };
193 193
194 194 static ddi_dma_attr_t ath_desc_dma_attr = {
195 195 DMA_ATTR_V0, /* version number */
196 196 0, /* low address */
197 197 0xffffffffU, /* high address */
198 198 0xffffffffU, /* counter register max */
199 199 0x1000, /* alignment */
200 200 0xFFF, /* burst sizes */
201 201 1, /* minimum transfer size */
202 202 0xffffffffU, /* max transfer size */
203 203 0xffffffffU, /* address register max */
204 204 1, /* no scatter-gather */
205 205 1, /* granularity of device */
206 206 0, /* DMA flags */
207 207 };
208 208
209 209 static kmutex_t ath_loglock;
210 210 static void *ath_soft_state_p = NULL;
211 211 static int ath_dwelltime = 150; /* scan interval, ms */
212 212
213 213 static int ath_m_stat(void *, uint_t, uint64_t *);
214 214 static int ath_m_start(void *);
215 215 static void ath_m_stop(void *);
216 216 static int ath_m_promisc(void *, boolean_t);
217 217 static int ath_m_multicst(void *, boolean_t, const uint8_t *);
218 218 static int ath_m_unicst(void *, const uint8_t *);
219 219 static mblk_t *ath_m_tx(void *, mblk_t *);
220 220 static void ath_m_ioctl(void *, queue_t *, mblk_t *);
221 221 static int ath_m_setprop(void *, const char *, mac_prop_id_t,
222 222 uint_t, const void *);
223 223 static int ath_m_getprop(void *, const char *, mac_prop_id_t,
224 224 uint_t, void *);
225 225 static void ath_m_propinfo(void *, const char *, mac_prop_id_t,
226 226 mac_prop_info_handle_t);
227 227
228 228 static mac_callbacks_t ath_m_callbacks = {
229 229 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
230 230 ath_m_stat,
231 231 ath_m_start,
232 232 ath_m_stop,
233 233 ath_m_promisc,
234 234 ath_m_multicst,
235 235 ath_m_unicst,
236 236 ath_m_tx,
237 237 NULL,
238 238 ath_m_ioctl,
239 239 NULL, /* mc_getcapab */
240 240 NULL,
241 241 NULL,
242 242 ath_m_setprop,
243 243 ath_m_getprop,
244 244 ath_m_propinfo
245 245 };
246 246
247 247 /*
248 248 * Available debug flags:
249 249 * ATH_DBG_INIT, ATH_DBG_GLD, ATH_DBG_HAL, ATH_DBG_INT, ATH_DBG_ATTACH,
250 250 * ATH_DBG_DETACH, ATH_DBG_AUX, ATH_DBG_WIFICFG, ATH_DBG_OSDEP
251 251 */
252 252 uint32_t ath_dbg_flags = 0;
253 253
254 254 /*
255 255 * Exception/warning cases not leading to panic.
256 256 */
257 257 void
258 258 ath_problem(const int8_t *fmt, ...)
259 259 {
260 260 va_list args;
261 261
262 262 mutex_enter(&ath_loglock);
263 263
264 264 va_start(args, fmt);
265 265 vcmn_err(CE_WARN, fmt, args);
266 266 va_end(args);
267 267
268 268 mutex_exit(&ath_loglock);
269 269 }
270 270
271 271 /*
272 272 * Normal log information independent of debug.
273 273 */
274 274 void
275 275 ath_log(const int8_t *fmt, ...)
276 276 {
277 277 va_list args;
278 278
279 279 mutex_enter(&ath_loglock);
280 280
281 281 va_start(args, fmt);
282 282 vcmn_err(CE_CONT, fmt, args);
283 283 va_end(args);
284 284
285 285 mutex_exit(&ath_loglock);
286 286 }
287 287
288 288 void
289 289 ath_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
290 290 {
291 291 va_list args;
292 292
293 293 if (dbg_flags & ath_dbg_flags) {
294 294 mutex_enter(&ath_loglock);
295 295 va_start(args, fmt);
296 296 vcmn_err(CE_CONT, fmt, args);
297 297 va_end(args);
298 298 mutex_exit(&ath_loglock);
299 299 }
300 300 }
301 301
302 302 void
303 303 ath_setup_desc(ath_t *asc, struct ath_buf *bf)
304 304 {
305 305 struct ath_desc *ds;
306 306
307 307 ds = bf->bf_desc;
308 308 ds->ds_link = bf->bf_daddr;
309 309 ds->ds_data = bf->bf_dma.cookie.dmac_address;
310 310 ATH_HAL_SETUPRXDESC(asc->asc_ah, ds,
311 311 bf->bf_dma.alength, /* buffer size */
312 312 0);
313 313
314 314 if (asc->asc_rxlink != NULL)
315 315 *asc->asc_rxlink = bf->bf_daddr;
316 316 asc->asc_rxlink = &ds->ds_link;
317 317 }
318 318
319 319
320 320 /*
321 321 * Allocate an area of memory and a DMA handle for accessing it
322 322 */
323 323 static int
324 324 ath_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize,
325 325 ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
326 326 uint_t bind_flags, dma_area_t *dma_p)
327 327 {
328 328 int err;
329 329
330 330 /*
331 331 * Allocate handle
332 332 */
333 333 err = ddi_dma_alloc_handle(devinfo, dma_attr,
334 334 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
335 335 if (err != DDI_SUCCESS)
336 336 return (DDI_FAILURE);
337 337
338 338 /*
339 339 * Allocate memory
340 340 */
341 341 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
342 342 alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
343 343 &dma_p->alength, &dma_p->acc_hdl);
344 344 if (err != DDI_SUCCESS)
345 345 return (DDI_FAILURE);
346 346
347 347 /*
348 348 * Bind the two together
349 349 */
350 350 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
351 351 dma_p->mem_va, dma_p->alength, bind_flags,
352 352 DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
353 353 if (err != DDI_DMA_MAPPED)
354 354 return (DDI_FAILURE);
355 355
356 356 dma_p->nslots = ~0U;
357 357 dma_p->size = ~0U;
358 358 dma_p->token = ~0U;
359 359 dma_p->offset = 0;
360 360 return (DDI_SUCCESS);
361 361 }
362 362
363 363 /*
364 364 * Free one allocated area of DMAable memory
365 365 */
366 366 static void
367 367 ath_free_dma_mem(dma_area_t *dma_p)
368 368 {
369 369 if (dma_p->dma_hdl != NULL) {
370 370 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
371 371 if (dma_p->acc_hdl != NULL) {
372 372 ddi_dma_mem_free(&dma_p->acc_hdl);
373 373 dma_p->acc_hdl = NULL;
374 374 }
375 375 ddi_dma_free_handle(&dma_p->dma_hdl);
376 376 dma_p->ncookies = 0;
377 377 dma_p->dma_hdl = NULL;
378 378 }
379 379 }
380 380
381 381
382 382 /*
383 383 * Initialize tx/rx buffer list. Allocate DMA memory for
384 384 * each buffer.
385 385 */
386 386 static int
387 387 ath_buflist_setup(dev_info_t *devinfo, ath_t *asc, list_t *bflist,
388 388 struct ath_buf **pbf, struct ath_desc **pds, int nbuf, uint_t dmabflags)
389 389 {
390 390 int i, err;
391 391 struct ath_buf *bf = *pbf;
392 392 struct ath_desc *ds = *pds;
393 393
394 394 list_create(bflist, sizeof (struct ath_buf),
395 395 offsetof(struct ath_buf, bf_node));
396 396 for (i = 0; i < nbuf; i++, bf++, ds++) {
397 397 bf->bf_desc = ds;
398 398 bf->bf_daddr = asc->asc_desc_dma.cookie.dmac_address +
399 399 ((uintptr_t)ds - (uintptr_t)asc->asc_desc);
400 400 list_insert_tail(bflist, bf);
401 401
402 402 /* alloc DMA memory */
403 403 err = ath_alloc_dma_mem(devinfo, &ath_dma_attr,
404 404 asc->asc_dmabuf_size, &ath_desc_accattr, DDI_DMA_STREAMING,
405 405 dmabflags, &bf->bf_dma);
406 406 if (err != DDI_SUCCESS)
407 407 return (err);
408 408 }
409 409 *pbf = bf;
410 410 *pds = ds;
411 411
412 412 return (DDI_SUCCESS);
413 413 }
414 414
415 415 /*
416 416 * Destroy tx/rx buffer list. Free DMA memory.
417 417 */
418 418 static void
419 419 ath_buflist_cleanup(list_t *buflist)
420 420 {
421 421 struct ath_buf *bf;
422 422
423 423 if (!buflist)
424 424 return;
425 425
426 426 bf = list_head(buflist);
427 427 while (bf != NULL) {
428 428 if (bf->bf_m != NULL) {
429 429 freemsg(bf->bf_m);
430 430 bf->bf_m = NULL;
431 431 }
432 432 /* Free DMA buffer */
433 433 ath_free_dma_mem(&bf->bf_dma);
434 434 if (bf->bf_in != NULL) {
435 435 ieee80211_free_node(bf->bf_in);
436 436 bf->bf_in = NULL;
437 437 }
438 438 list_remove(buflist, bf);
439 439 bf = list_head(buflist);
440 440 }
441 441 list_destroy(buflist);
442 442 }
443 443
444 444
445 445 static void
446 446 ath_desc_free(ath_t *asc)
447 447 {
448 448 ath_buflist_cleanup(&asc->asc_txbuf_list);
449 449 ath_buflist_cleanup(&asc->asc_rxbuf_list);
450 450
451 451 /* Free descriptor DMA buffer */
452 452 ath_free_dma_mem(&asc->asc_desc_dma);
453 453
454 454 kmem_free((void *)asc->asc_vbufptr, asc->asc_vbuflen);
455 455 asc->asc_vbufptr = NULL;
456 456 }
457 457
458 458 static int
459 459 ath_desc_alloc(dev_info_t *devinfo, ath_t *asc)
460 460 {
461 461 int err;
462 462 size_t size;
463 463 struct ath_desc *ds;
464 464 struct ath_buf *bf;
465 465
466 466 size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
467 467
468 468 err = ath_alloc_dma_mem(devinfo, &ath_desc_dma_attr, size,
469 469 &ath_desc_accattr, DDI_DMA_CONSISTENT,
470 470 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &asc->asc_desc_dma);
471 471
472 472 /* virtual address of the first descriptor */
473 473 asc->asc_desc = (struct ath_desc *)asc->asc_desc_dma.mem_va;
474 474
475 475 ds = asc->asc_desc;
476 476 ATH_DEBUG((ATH_DBG_INIT, "ath: ath_desc_alloc(): DMA map: "
477 477 "%p (%d) -> %p\n",
478 478 asc->asc_desc, asc->asc_desc_dma.alength,
479 479 asc->asc_desc_dma.cookie.dmac_address));
480 480
481 481 /* allocate data structures to describe TX/RX DMA buffers */
482 482 asc->asc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
483 483 bf = (struct ath_buf *)kmem_zalloc(asc->asc_vbuflen, KM_SLEEP);
484 484 asc->asc_vbufptr = bf;
485 485
486 486 /* DMA buffer size for each TX/RX packet */
487 487 asc->asc_dmabuf_size = roundup(1000 + sizeof (struct ieee80211_frame) +
488 488 IEEE80211_MTU + IEEE80211_CRC_LEN +
489 489 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
490 490 IEEE80211_WEP_CRCLEN), asc->asc_cachelsz);
491 491
492 492 /* create RX buffer list */
493 493 err = ath_buflist_setup(devinfo, asc, &asc->asc_rxbuf_list, &bf, &ds,
494 494 ATH_RXBUF, DDI_DMA_READ | DDI_DMA_STREAMING);
495 495 if (err != DDI_SUCCESS) {
496 496 ath_desc_free(asc);
497 497 return (err);
498 498 }
499 499
500 500 /* create TX buffer list */
501 501 err = ath_buflist_setup(devinfo, asc, &asc->asc_txbuf_list, &bf, &ds,
502 502 ATH_TXBUF, DDI_DMA_STREAMING);
503 503 if (err != DDI_SUCCESS) {
504 504 ath_desc_free(asc);
505 505 return (err);
506 506 }
507 507
508 508
509 509 return (DDI_SUCCESS);
510 510 }
511 511
512 512 static void
513 513 ath_printrxbuf(struct ath_buf *bf, int32_t done)
514 514 {
515 515 struct ath_desc *ds = bf->bf_desc;
516 516 const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
517 517
518 518 ATH_DEBUG((ATH_DBG_RECV, "ath: R (%p %p) %08x %08x %08x "
519 519 "%08x %08x %08x %c\n",
520 520 ds, bf->bf_daddr,
521 521 ds->ds_link, ds->ds_data,
522 522 ds->ds_ctl0, ds->ds_ctl1,
523 523 ds->ds_hw[0], ds->ds_hw[1],
524 524 !done ? ' ' : (rs->rs_status == 0) ? '*' : '!'));
525 525 }
526 526
527 527 static void
528 528 ath_rx_handler(ath_t *asc)
529 529 {
530 530 ieee80211com_t *ic = (ieee80211com_t *)asc;
531 531 struct ath_buf *bf;
532 532 struct ath_hal *ah = asc->asc_ah;
533 533 struct ath_desc *ds;
534 534 struct ath_rx_status *rs;
535 535 mblk_t *rx_mp;
536 536 struct ieee80211_frame *wh;
537 537 int32_t len, loop = 1;
538 538 uint8_t phyerr;
539 539 HAL_STATUS status;
540 540 HAL_NODE_STATS hal_node_stats;
541 541 struct ieee80211_node *in;
542 542
543 543 do {
544 544 mutex_enter(&asc->asc_rxbuflock);
545 545 bf = list_head(&asc->asc_rxbuf_list);
546 546 if (bf == NULL) {
547 547 ATH_DEBUG((ATH_DBG_RECV, "ath: ath_rx_handler(): "
548 548 "no buffer\n"));
549 549 mutex_exit(&asc->asc_rxbuflock);
550 550 break;
551 551 }
552 552 ASSERT(bf->bf_dma.cookie.dmac_address != NULL);
553 553 ds = bf->bf_desc;
554 554 if (ds->ds_link == bf->bf_daddr) {
555 555 /*
556 556 * Never process the self-linked entry at the end,
557 557 * this may be met at heavy load.
558 558 */
559 559 mutex_exit(&asc->asc_rxbuflock);
560 560 break;
561 561 }
562 562
563 563 rs = &bf->bf_status.ds_rxstat;
564 564 status = ATH_HAL_RXPROCDESC(ah, ds,
565 565 bf->bf_daddr,
566 566 ATH_PA2DESC(asc, ds->ds_link), rs);
567 567 if (status == HAL_EINPROGRESS) {
568 568 mutex_exit(&asc->asc_rxbuflock);
569 569 break;
570 570 }
571 571 list_remove(&asc->asc_rxbuf_list, bf);
572 572 mutex_exit(&asc->asc_rxbuflock);
573 573
574 574 if (rs->rs_status != 0) {
575 575 if (rs->rs_status & HAL_RXERR_CRC)
576 576 asc->asc_stats.ast_rx_crcerr++;
577 577 if (rs->rs_status & HAL_RXERR_FIFO)
578 578 asc->asc_stats.ast_rx_fifoerr++;
579 579 if (rs->rs_status & HAL_RXERR_DECRYPT)
580 580 asc->asc_stats.ast_rx_badcrypt++;
581 581 if (rs->rs_status & HAL_RXERR_PHY) {
582 582 asc->asc_stats.ast_rx_phyerr++;
583 583 phyerr = rs->rs_phyerr & 0x1f;
584 584 asc->asc_stats.ast_rx_phy[phyerr]++;
585 585 }
586 586 goto rx_next;
587 587 }
588 588 len = rs->rs_datalen;
589 589
590 590 /* less than sizeof(struct ieee80211_frame) */
591 591 if (len < 20) {
592 592 asc->asc_stats.ast_rx_tooshort++;
593 593 goto rx_next;
594 594 }
595 595
596 596 if ((rx_mp = allocb(asc->asc_dmabuf_size, BPRI_MED)) == NULL) {
597 597 ath_problem("ath: ath_rx_handler(): "
598 598 "allocing mblk buffer failed.\n");
599 599 return;
600 600 }
601 601
602 602 ATH_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORCPU);
603 603 bcopy(bf->bf_dma.mem_va, rx_mp->b_rptr, len);
604 604
605 605 rx_mp->b_wptr += len;
606 606 wh = (struct ieee80211_frame *)rx_mp->b_rptr;
607 607 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
608 608 IEEE80211_FC0_TYPE_CTL) {
609 609 /*
610 610 * Ignore control frame received in promisc mode.
611 611 */
612 612 freemsg(rx_mp);
613 613 goto rx_next;
614 614 }
615 615 /* Remove the CRC at the end of IEEE80211 frame */
616 616 rx_mp->b_wptr -= IEEE80211_CRC_LEN;
617 617 #ifdef DEBUG
618 618 ath_printrxbuf(bf, status == HAL_OK);
619 619 #endif /* DEBUG */
620 620 /*
621 621 * Locate the node for sender, track state, and then
622 622 * pass the (referenced) node up to the 802.11 layer
623 623 * for its use.
624 624 */
625 625 in = ieee80211_find_rxnode(ic, wh);
626 626
627 627 /*
628 628 * Send frame up for processing.
629 629 */
630 630 (void) ieee80211_input(ic, rx_mp, in,
631 631 rs->rs_rssi, rs->rs_tstamp);
632 632
633 633 ieee80211_free_node(in);
634 634
635 635 rx_next:
636 636 mutex_enter(&asc->asc_rxbuflock);
637 637 list_insert_tail(&asc->asc_rxbuf_list, bf);
638 638 mutex_exit(&asc->asc_rxbuflock);
639 639 ath_setup_desc(asc, bf);
640 640 } while (loop);
641 641
642 642 /* rx signal state monitoring */
643 643 ATH_HAL_RXMONITOR(ah, &hal_node_stats, &asc->asc_curchan);
644 644 }
645 645
646 646 static void
647 647 ath_printtxbuf(struct ath_buf *bf, int done)
648 648 {
649 649 struct ath_desc *ds = bf->bf_desc;
650 650 const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
651 651
652 652 ATH_DEBUG((ATH_DBG_SEND, "ath: T(%p %p) %08x %08x %08x %08x %08x"
653 653 " %08x %08x %08x %c\n",
654 654 ds, bf->bf_daddr,
655 655 ds->ds_link, ds->ds_data,
656 656 ds->ds_ctl0, ds->ds_ctl1,
657 657 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
658 658 !done ? ' ' : (ts->ts_status == 0) ? '*' : '!'));
659 659 }
660 660
661 661 /*
662 662 * The input parameter mp has following assumption:
663 663 * For data packets, GLDv3 mac_wifi plugin allocates and fills the
664 664 * ieee80211 header. For management packets, net80211 allocates and
665 665 * fills the ieee80211 header. In both cases, enough spaces in the
666 666 * header are left for encryption option.
667 667 */
668 668 static int32_t
669 669 ath_tx_start(ath_t *asc, struct ieee80211_node *in, struct ath_buf *bf,
670 670 mblk_t *mp)
671 671 {
672 672 ieee80211com_t *ic = (ieee80211com_t *)asc;
673 673 struct ieee80211_frame *wh;
674 674 struct ath_hal *ah = asc->asc_ah;
675 675 uint32_t subtype, flags, ctsduration;
676 676 int32_t keyix, iswep, hdrlen, pktlen, mblen, mbslen, try0;
677 677 uint8_t rix, cix, txrate, ctsrate;
678 678 struct ath_desc *ds;
679 679 struct ath_txq *txq;
680 680 HAL_PKT_TYPE atype;
681 681 const HAL_RATE_TABLE *rt;
682 682 HAL_BOOL shortPreamble;
683 683 struct ath_node *an;
684 684 caddr_t dest;
685 685
686 686 /*
687 687 * CRC are added by H/W, not encaped by driver,
688 688 * but we must count it in pkt length.
689 689 */
690 690 pktlen = IEEE80211_CRC_LEN;
691 691
692 692 wh = (struct ieee80211_frame *)mp->b_rptr;
693 693 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
694 694 keyix = HAL_TXKEYIX_INVALID;
695 695 hdrlen = sizeof (struct ieee80211_frame);
696 696 if (iswep != 0) {
697 697 const struct ieee80211_cipher *cip;
698 698 struct ieee80211_key *k;
699 699
700 700 /*
701 701 * Construct the 802.11 header+trailer for an encrypted
702 702 * frame. The only reason this can fail is because of an
703 703 * unknown or unsupported cipher/key type.
704 704 */
705 705 k = ieee80211_crypto_encap(ic, mp);
706 706 if (k == NULL) {
707 707 ATH_DEBUG((ATH_DBG_AUX, "crypto_encap failed\n"));
708 708 /*
709 709 * This can happen when the key is yanked after the
710 710 * frame was queued. Just discard the frame; the
711 711 * 802.11 layer counts failures and provides
712 712 * debugging/diagnostics.
713 713 */
714 714 return (EIO);
715 715 }
716 716 cip = k->wk_cipher;
717 717 /*
718 718 * Adjust the packet + header lengths for the crypto
719 719 * additions and calculate the h/w key index. When
720 720 * a s/w mic is done the frame will have had any mic
721 721 * added to it prior to entry so m0->m_pkthdr.len above will
722 722 * account for it. Otherwise we need to add it to the
723 723 * packet length.
724 724 */
725 725 hdrlen += cip->ic_header;
726 726 pktlen += cip->ic_trailer;
727 727 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
728 728 pktlen += cip->ic_miclen;
729 729 keyix = k->wk_keyix;
730 730
731 731 /* packet header may have moved, reset our local pointer */
732 732 wh = (struct ieee80211_frame *)mp->b_rptr;
733 733 }
734 734
735 735 dest = bf->bf_dma.mem_va;
736 736 for (; mp != NULL; mp = mp->b_cont) {
737 737 mblen = MBLKL(mp);
738 738 bcopy(mp->b_rptr, dest, mblen);
739 739 dest += mblen;
740 740 }
741 741 mbslen = (uintptr_t)dest - (uintptr_t)bf->bf_dma.mem_va;
742 742 pktlen += mbslen;
743 743
744 744 bf->bf_in = in;
745 745
746 746 /* setup descriptors */
747 747 ds = bf->bf_desc;
748 748 rt = asc->asc_currates;
749 749 ASSERT(rt != NULL);
750 750
751 751 /*
752 752 * The 802.11 layer marks whether or not we should
753 753 * use short preamble based on the current mode and
754 754 * negotiated parameters.
755 755 */
756 756 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
757 757 (in->in_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
758 758 shortPreamble = AH_TRUE;
759 759 asc->asc_stats.ast_tx_shortpre++;
760 760 } else {
761 761 shortPreamble = AH_FALSE;
762 762 }
763 763
764 764 an = ATH_NODE(in);
765 765
766 766 /*
767 767 * Calculate Atheros packet type from IEEE80211 packet header
768 768 * and setup for rate calculations.
769 769 */
770 770 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
771 771 case IEEE80211_FC0_TYPE_MGT:
772 772 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
773 773 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
774 774 atype = HAL_PKT_TYPE_BEACON;
775 775 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
776 776 atype = HAL_PKT_TYPE_PROBE_RESP;
777 777 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
778 778 atype = HAL_PKT_TYPE_ATIM;
779 779 else
780 780 atype = HAL_PKT_TYPE_NORMAL;
781 781 rix = 0; /* lowest rate */
782 782 try0 = ATH_TXMAXTRY;
783 783 if (shortPreamble)
784 784 txrate = an->an_tx_mgtratesp;
785 785 else
786 786 txrate = an->an_tx_mgtrate;
787 787 /* force all ctl frames to highest queue */
788 788 txq = asc->asc_ac2q[WME_AC_VO];
789 789 break;
790 790 case IEEE80211_FC0_TYPE_CTL:
791 791 atype = HAL_PKT_TYPE_PSPOLL;
792 792 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
793 793 rix = 0; /* lowest rate */
794 794 try0 = ATH_TXMAXTRY;
795 795 if (shortPreamble)
796 796 txrate = an->an_tx_mgtratesp;
797 797 else
798 798 txrate = an->an_tx_mgtrate;
799 799 /* force all ctl frames to highest queue */
800 800 txq = asc->asc_ac2q[WME_AC_VO];
801 801 break;
802 802 case IEEE80211_FC0_TYPE_DATA:
803 803 atype = HAL_PKT_TYPE_NORMAL;
804 804 rix = an->an_tx_rix0;
805 805 try0 = an->an_tx_try0;
806 806 if (shortPreamble)
807 807 txrate = an->an_tx_rate0sp;
808 808 else
809 809 txrate = an->an_tx_rate0;
810 810 /* Always use background queue */
811 811 txq = asc->asc_ac2q[WME_AC_BK];
812 812 break;
813 813 default:
814 814 /* Unknown 802.11 frame */
815 815 asc->asc_stats.ast_tx_invalid++;
816 816 return (1);
817 817 }
818 818 /*
819 819 * Calculate miscellaneous flags.
820 820 */
821 821 flags = HAL_TXDESC_CLRDMASK;
822 822 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
823 823 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
824 824 asc->asc_stats.ast_tx_noack++;
825 825 } else if (pktlen > ic->ic_rtsthreshold) {
826 826 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
827 827 asc->asc_stats.ast_tx_rts++;
828 828 }
829 829
830 830 /*
831 831 * Calculate duration. This logically belongs in the 802.11
832 832 * layer but it lacks sufficient information to calculate it.
833 833 */
834 834 if ((flags & HAL_TXDESC_NOACK) == 0 &&
835 835 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
836 836 IEEE80211_FC0_TYPE_CTL) {
837 837 uint16_t dur;
838 838 dur = ath_hal_computetxtime(ah, rt, IEEE80211_ACK_SIZE,
839 839 rix, shortPreamble);
840 840 /* LINTED E_BAD_PTR_CAST_ALIGN */
841 841 *(uint16_t *)wh->i_dur = LE_16(dur);
842 842 }
843 843
844 844 /*
845 845 * Calculate RTS/CTS rate and duration if needed.
846 846 */
847 847 ctsduration = 0;
848 848 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
849 849 /*
850 850 * CTS transmit rate is derived from the transmit rate
851 851 * by looking in the h/w rate table. We must also factor
852 852 * in whether or not a short preamble is to be used.
853 853 */
854 854 cix = rt->info[rix].controlRate;
855 855 ctsrate = rt->info[cix].rateCode;
856 856 if (shortPreamble)
857 857 ctsrate |= rt->info[cix].shortPreamble;
858 858 /*
859 859 * Compute the transmit duration based on the size
860 860 * of an ACK frame. We call into the HAL to do the
861 861 * computation since it depends on the characteristics
862 862 * of the actual PHY being used.
863 863 */
864 864 if (flags & HAL_TXDESC_RTSENA) { /* SIFS + CTS */
865 865 ctsduration += ath_hal_computetxtime(ah,
866 866 rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
867 867 }
868 868 /* SIFS + data */
869 869 ctsduration += ath_hal_computetxtime(ah,
870 870 rt, pktlen, rix, shortPreamble);
871 871 if ((flags & HAL_TXDESC_NOACK) == 0) { /* SIFS + ACK */
872 872 ctsduration += ath_hal_computetxtime(ah,
873 873 rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
874 874 }
875 875 } else
876 876 ctsrate = 0;
877 877
878 878 if (++txq->axq_intrcnt >= ATH_TXINTR_PERIOD) {
879 879 flags |= HAL_TXDESC_INTREQ;
880 880 txq->axq_intrcnt = 0;
881 881 }
882 882
883 883 /*
884 884 * Formulate first tx descriptor with tx controls.
885 885 */
886 886 ATH_HAL_SETUPTXDESC(ah, ds,
887 887 pktlen, /* packet length */
888 888 hdrlen, /* header length */
889 889 atype, /* Atheros packet type */
890 890 MIN(in->in_txpower, 60), /* txpower */
891 891 txrate, try0, /* series 0 rate/tries */
892 892 keyix, /* key cache index */
893 893 an->an_tx_antenna, /* antenna mode */
894 894 flags, /* flags */
895 895 ctsrate, /* rts/cts rate */
896 896 ctsduration); /* rts/cts duration */
897 897 bf->bf_flags = flags;
898 898
899 899 /* LINTED E_BAD_PTR_CAST_ALIGN */
900 900 ATH_DEBUG((ATH_DBG_SEND, "ath: ath_xmit(): to %s totlen=%d "
901 901 "an->an_tx_rate1sp=%d tx_rate2sp=%d tx_rate3sp=%d "
902 902 "qnum=%d rix=%d sht=%d dur = %d\n",
903 903 ieee80211_macaddr_sprintf(wh->i_addr1), mbslen, an->an_tx_rate1sp,
904 904 an->an_tx_rate2sp, an->an_tx_rate3sp,
905 905 txq->axq_qnum, rix, shortPreamble, *(uint16_t *)wh->i_dur));
906 906
907 907 /*
908 908 * Setup the multi-rate retry state only when we're
909 909 * going to use it. This assumes ath_hal_setuptxdesc
910 910 * initializes the descriptors (so we don't have to)
911 911 * when the hardware supports multi-rate retry and
912 912 * we don't use it.
913 913 */
914 914 if (try0 != ATH_TXMAXTRY)
915 915 ATH_HAL_SETUPXTXDESC(ah, ds,
916 916 an->an_tx_rate1sp, 2, /* series 1 */
917 917 an->an_tx_rate2sp, 2, /* series 2 */
918 918 an->an_tx_rate3sp, 2); /* series 3 */
919 919
920 920 ds->ds_link = 0;
921 921 ds->ds_data = bf->bf_dma.cookie.dmac_address;
922 922 ATH_HAL_FILLTXDESC(ah, ds,
923 923 mbslen, /* segment length */
924 924 AH_TRUE, /* first segment */
925 925 AH_TRUE, /* last segment */
926 926 ds); /* first descriptor */
927 927
928 928 ATH_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORDEV);
929 929
930 930 mutex_enter(&txq->axq_lock);
931 931 list_insert_tail(&txq->axq_list, bf);
932 932 if (txq->axq_link == NULL) {
933 933 ATH_HAL_PUTTXBUF(ah, txq->axq_qnum, bf->bf_daddr);
934 934 } else {
935 935 *txq->axq_link = bf->bf_daddr;
936 936 }
937 937 txq->axq_link = &ds->ds_link;
938 938 mutex_exit(&txq->axq_lock);
939 939
940 940 ATH_HAL_TXSTART(ah, txq->axq_qnum);
941 941
942 942 ic->ic_stats.is_tx_frags++;
943 943 ic->ic_stats.is_tx_bytes += pktlen;
944 944
945 945 return (0);
946 946 }
947 947
948 948 /*
949 949 * Transmit a management frame. On failure we reclaim the skbuff.
950 950 * Note that management frames come directly from the 802.11 layer
951 951 * and do not honor the send queue flow control. Need to investigate
952 952 * using priority queueing so management frames can bypass data.
953 953 */
954 954 static int
955 955 ath_xmit(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
956 956 {
957 957 ath_t *asc = (ath_t *)ic;
958 958 struct ath_hal *ah = asc->asc_ah;
959 959 struct ieee80211_node *in = NULL;
960 960 struct ath_buf *bf = NULL;
961 961 struct ieee80211_frame *wh;
962 962 int error = 0;
963 963
964 964 ASSERT(mp->b_next == NULL);
965 965
966 966 if (!ATH_IS_RUNNING(asc)) {
967 967 if ((type & IEEE80211_FC0_TYPE_MASK) !=
968 968 IEEE80211_FC0_TYPE_DATA) {
969 969 freemsg(mp);
970 970 }
971 971 return (ENXIO);
972 972 }
973 973
974 974 /* Grab a TX buffer */
975 975 mutex_enter(&asc->asc_txbuflock);
976 976 bf = list_head(&asc->asc_txbuf_list);
977 977 if (bf != NULL)
978 978 list_remove(&asc->asc_txbuf_list, bf);
979 979 if (list_empty(&asc->asc_txbuf_list)) {
980 980 ATH_DEBUG((ATH_DBG_SEND, "ath: ath_mgmt_send(): "
981 981 "stop queue\n"));
982 982 asc->asc_stats.ast_tx_qstop++;
983 983 }
984 984 mutex_exit(&asc->asc_txbuflock);
985 985 if (bf == NULL) {
986 986 ATH_DEBUG((ATH_DBG_SEND, "ath: ath_mgmt_send(): discard, "
987 987 "no xmit buf\n"));
988 988 ic->ic_stats.is_tx_nobuf++;
989 989 if ((type & IEEE80211_FC0_TYPE_MASK) ==
990 990 IEEE80211_FC0_TYPE_DATA) {
991 991 asc->asc_stats.ast_tx_nobuf++;
992 992 mutex_enter(&asc->asc_resched_lock);
993 993 asc->asc_resched_needed = B_TRUE;
994 994 mutex_exit(&asc->asc_resched_lock);
995 995 } else {
996 996 asc->asc_stats.ast_tx_nobufmgt++;
997 997 freemsg(mp);
998 998 }
999 999 return (ENOMEM);
1000 1000 }
1001 1001
1002 1002 wh = (struct ieee80211_frame *)mp->b_rptr;
1003 1003
1004 1004 /* Locate node */
1005 1005 in = ieee80211_find_txnode(ic, wh->i_addr1);
1006 1006 if (in == NULL) {
1007 1007 error = EIO;
1008 1008 goto bad;
1009 1009 }
1010 1010
1011 1011 in->in_inact = 0;
1012 1012 switch (type & IEEE80211_FC0_TYPE_MASK) {
1013 1013 case IEEE80211_FC0_TYPE_DATA:
1014 1014 (void) ieee80211_encap(ic, mp, in);
1015 1015 break;
1016 1016 default:
1017 1017 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
1018 1018 IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
1019 1019 /* fill time stamp */
1020 1020 uint64_t tsf;
1021 1021 uint32_t *tstamp;
1022 1022
1023 1023 tsf = ATH_HAL_GETTSF64(ah);
1024 1024 /* adjust 100us delay to xmit */
1025 1025 tsf += 100;
1026 1026 /* LINTED E_BAD_PTR_CAST_ALIGN */
1027 1027 tstamp = (uint32_t *)&wh[1];
1028 1028 tstamp[0] = LE_32(tsf & 0xffffffff);
1029 1029 tstamp[1] = LE_32(tsf >> 32);
1030 1030 }
1031 1031 asc->asc_stats.ast_tx_mgmt++;
1032 1032 break;
1033 1033 }
1034 1034
1035 1035 error = ath_tx_start(asc, in, bf, mp);
1036 1036 if (error != 0) {
1037 1037 bad:
1038 1038 ic->ic_stats.is_tx_failed++;
1039 1039 if (bf != NULL) {
1040 1040 mutex_enter(&asc->asc_txbuflock);
1041 1041 list_insert_tail(&asc->asc_txbuf_list, bf);
1042 1042 mutex_exit(&asc->asc_txbuflock);
1043 1043 }
1044 1044 }
1045 1045 if (in != NULL)
1046 1046 ieee80211_free_node(in);
1047 1047 if ((type & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA ||
1048 1048 error == 0) {
1049 1049 freemsg(mp);
1050 1050 }
1051 1051
1052 1052 return (error);
1053 1053 }
1054 1054
1055 1055 static mblk_t *
1056 1056 ath_m_tx(void *arg, mblk_t *mp)
1057 1057 {
1058 1058 ath_t *asc = arg;
1059 1059 ieee80211com_t *ic = (ieee80211com_t *)asc;
1060 1060 mblk_t *next;
1061 1061 int error = 0;
1062 1062
1063 1063 /*
1064 1064 * No data frames go out unless we're associated; this
1065 1065 * should not happen as the 802.11 layer does not enable
1066 1066 * the xmit queue until we enter the RUN state.
1067 1067 */
1068 1068 if (ic->ic_state != IEEE80211_S_RUN) {
1069 1069 ATH_DEBUG((ATH_DBG_SEND, "ath: ath_m_tx(): "
1070 1070 "discard, state %u\n", ic->ic_state));
1071 1071 asc->asc_stats.ast_tx_discard++;
1072 1072 freemsgchain(mp);
1073 1073 return (NULL);
1074 1074 }
1075 1075
1076 1076 while (mp != NULL) {
1077 1077 next = mp->b_next;
1078 1078 mp->b_next = NULL;
1079 1079 error = ath_xmit(ic, mp, IEEE80211_FC0_TYPE_DATA);
1080 1080 if (error != 0) {
1081 1081 mp->b_next = next;
1082 1082 if (error == ENOMEM) {
1083 1083 break;
1084 1084 } else {
1085 1085 freemsgchain(mp); /* CR6501759 issues */
1086 1086 return (NULL);
1087 1087 }
1088 1088 }
1089 1089 mp = next;
1090 1090 }
1091 1091
1092 1092 return (mp);
1093 1093 }
1094 1094
1095 1095 static int
1096 1096 ath_tx_processq(ath_t *asc, struct ath_txq *txq)
1097 1097 {
1098 1098 ieee80211com_t *ic = (ieee80211com_t *)asc;
1099 1099 struct ath_hal *ah = asc->asc_ah;
1100 1100 struct ath_buf *bf;
1101 1101 struct ath_desc *ds;
1102 1102 struct ieee80211_node *in;
1103 1103 int32_t sr, lr, nacked = 0;
1104 1104 struct ath_tx_status *ts;
1105 1105 HAL_STATUS status;
1106 1106 struct ath_node *an;
1107 1107
1108 1108 for (;;) {
1109 1109 mutex_enter(&txq->axq_lock);
1110 1110 bf = list_head(&txq->axq_list);
1111 1111 if (bf == NULL) {
1112 1112 txq->axq_link = NULL;
1113 1113 mutex_exit(&txq->axq_lock);
1114 1114 break;
1115 1115 }
1116 1116 ds = bf->bf_desc; /* last decriptor */
1117 1117 ts = &bf->bf_status.ds_txstat;
1118 1118 status = ATH_HAL_TXPROCDESC(ah, ds, ts);
1119 1119 #ifdef DEBUG
1120 1120 ath_printtxbuf(bf, status == HAL_OK);
1121 1121 #endif
1122 1122 if (status == HAL_EINPROGRESS) {
1123 1123 mutex_exit(&txq->axq_lock);
1124 1124 break;
1125 1125 }
1126 1126 list_remove(&txq->axq_list, bf);
1127 1127 mutex_exit(&txq->axq_lock);
1128 1128 in = bf->bf_in;
1129 1129 if (in != NULL) {
1130 1130 an = ATH_NODE(in);
1131 1131 /* Successful transmition */
1132 1132 if (ts->ts_status == 0) {
1133 1133 an->an_tx_ok++;
1134 1134 an->an_tx_antenna = ts->ts_antenna;
1135 1135 if (ts->ts_rate & HAL_TXSTAT_ALTRATE)
1136 1136 asc->asc_stats.ast_tx_altrate++;
1137 1137 asc->asc_stats.ast_tx_rssidelta =
1138 1138 ts->ts_rssi - asc->asc_stats.ast_tx_rssi;
1139 1139 asc->asc_stats.ast_tx_rssi = ts->ts_rssi;
1140 1140 } else {
1141 1141 an->an_tx_err++;
1142 1142 if (ts->ts_status & HAL_TXERR_XRETRY)
1143 1143 asc->asc_stats.ast_tx_xretries++;
1144 1144 if (ts->ts_status & HAL_TXERR_FIFO)
1145 1145 asc->asc_stats.ast_tx_fifoerr++;
1146 1146 if (ts->ts_status & HAL_TXERR_FILT)
1147 1147 asc->asc_stats.ast_tx_filtered++;
1148 1148 an->an_tx_antenna = 0; /* invalidate */
1149 1149 }
1150 1150 sr = ts->ts_shortretry;
1151 1151 lr = ts->ts_longretry;
1152 1152 asc->asc_stats.ast_tx_shortretry += sr;
1153 1153 asc->asc_stats.ast_tx_longretry += lr;
1154 1154 /*
1155 1155 * Hand the descriptor to the rate control algorithm.
1156 1156 */
1157 1157 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
1158 1158 (bf->bf_flags & HAL_TXDESC_NOACK) == 0) {
1159 1159 /*
1160 1160 * If frame was ack'd update the last rx time
1161 1161 * used to workaround phantom bmiss interrupts.
1162 1162 */
1163 1163 if (ts->ts_status == 0) {
1164 1164 nacked++;
1165 1165 an->an_tx_ok++;
1166 1166 } else {
1167 1167 an->an_tx_err++;
1168 1168 }
1169 1169 an->an_tx_retr += sr + lr;
1170 1170 }
1171 1171 }
1172 1172 bf->bf_in = NULL;
1173 1173 mutex_enter(&asc->asc_txbuflock);
1174 1174 list_insert_tail(&asc->asc_txbuf_list, bf);
1175 1175 mutex_exit(&asc->asc_txbuflock);
1176 1176 /*
1177 1177 * Reschedule stalled outbound packets
1178 1178 */
1179 1179 mutex_enter(&asc->asc_resched_lock);
1180 1180 if (asc->asc_resched_needed) {
1181 1181 asc->asc_resched_needed = B_FALSE;
1182 1182 mac_tx_update(ic->ic_mach);
1183 1183 }
1184 1184 mutex_exit(&asc->asc_resched_lock);
1185 1185 }
1186 1186 return (nacked);
1187 1187 }
1188 1188
1189 1189
1190 1190 static void
1191 1191 ath_tx_handler(ath_t *asc)
1192 1192 {
1193 1193 int i;
1194 1194
1195 1195 /*
1196 1196 * Process each active queue.
1197 1197 */
1198 1198 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
1199 1199 if (ATH_TXQ_SETUP(asc, i)) {
1200 1200 (void) ath_tx_processq(asc, &asc->asc_txq[i]);
1201 1201 }
1202 1202 }
1203 1203 }
1204 1204
1205 1205 static struct ieee80211_node *
1206 1206 ath_node_alloc(ieee80211com_t *ic)
1207 1207 {
1208 1208 struct ath_node *an;
1209 1209 ath_t *asc = (ath_t *)ic;
1210 1210
1211 1211 an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1212 1212 ath_rate_update(asc, &an->an_node, 0);
1213 1213 return (&an->an_node);
1214 1214 }
1215 1215
1216 1216 static void
1217 1217 ath_node_free(struct ieee80211_node *in)
1218 1218 {
1219 1219 ieee80211com_t *ic = in->in_ic;
1220 1220 ath_t *asc = (ath_t *)ic;
1221 1221 struct ath_buf *bf;
1222 1222 struct ath_txq *txq;
1223 1223 int32_t i;
1224 1224
1225 1225 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
1226 1226 if (ATH_TXQ_SETUP(asc, i)) {
1227 1227 txq = &asc->asc_txq[i];
1228 1228 mutex_enter(&txq->axq_lock);
1229 1229 bf = list_head(&txq->axq_list);
1230 1230 while (bf != NULL) {
1231 1231 if (bf->bf_in == in) {
1232 1232 bf->bf_in = NULL;
1233 1233 }
1234 1234 bf = list_next(&txq->axq_list, bf);
1235 1235 }
1236 1236 mutex_exit(&txq->axq_lock);
1237 1237 }
1238 1238 }
1239 1239 ic->ic_node_cleanup(in);
1240 1240 if (in->in_wpa_ie != NULL)
1241 1241 ieee80211_free(in->in_wpa_ie);
1242 1242 kmem_free(in, sizeof (struct ath_node));
1243 1243 }
1244 1244
1245 1245 static void
1246 1246 ath_next_scan(void *arg)
1247 1247 {
1248 1248 ieee80211com_t *ic = arg;
1249 1249 ath_t *asc = (ath_t *)ic;
1250 1250
1251 1251 asc->asc_scan_timer = 0;
1252 1252 if (ic->ic_state == IEEE80211_S_SCAN) {
1253 1253 asc->asc_scan_timer = timeout(ath_next_scan, (void *)asc,
1254 1254 drv_usectohz(ath_dwelltime * 1000));
1255 1255 ieee80211_next_scan(ic);
1256 1256 }
1257 1257 }
1258 1258
1259 1259 static void
1260 1260 ath_stop_scantimer(ath_t *asc)
1261 1261 {
1262 1262 timeout_id_t tmp_id = 0;
1263 1263
1264 1264 while ((asc->asc_scan_timer != 0) && (tmp_id != asc->asc_scan_timer)) {
1265 1265 tmp_id = asc->asc_scan_timer;
1266 1266 (void) untimeout(tmp_id);
1267 1267 }
1268 1268 asc->asc_scan_timer = 0;
1269 1269 }
1270 1270
1271 1271 static int32_t
1272 1272 ath_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1273 1273 {
1274 1274 ath_t *asc = (ath_t *)ic;
1275 1275 struct ath_hal *ah = asc->asc_ah;
1276 1276 struct ieee80211_node *in;
1277 1277 int32_t i, error;
1278 1278 uint8_t *bssid;
1279 1279 uint32_t rfilt;
1280 1280 enum ieee80211_state ostate;
1281 1281
1282 1282 static const HAL_LED_STATE leds[] = {
1283 1283 HAL_LED_INIT, /* IEEE80211_S_INIT */
1284 1284 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
1285 1285 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
1286 1286 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
1287 1287 HAL_LED_RUN, /* IEEE80211_S_RUN */
1288 1288 };
1289 1289 if (!ATH_IS_RUNNING(asc))
1290 1290 return (0);
1291 1291
1292 1292 ostate = ic->ic_state;
1293 1293 if (nstate != IEEE80211_S_SCAN)
1294 1294 ath_stop_scantimer(asc);
1295 1295
1296 1296 ATH_LOCK(asc);
1297 1297 ATH_HAL_SETLEDSTATE(ah, leds[nstate]); /* set LED */
1298 1298
1299 1299 if (nstate == IEEE80211_S_INIT) {
1300 1300 asc->asc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
1301 1301 /*
1302 1302 * Disable interrupts.
1303 1303 */
1304 1304 ATH_HAL_INTRSET(ah, asc->asc_imask &~ HAL_INT_GLOBAL);
1305 1305 ATH_UNLOCK(asc);
1306 1306 goto done;
1307 1307 }
1308 1308 in = ic->ic_bss;
1309 1309 error = ath_chan_set(asc, ic->ic_curchan);
1310 1310 if (error != 0) {
1311 1311 if (nstate != IEEE80211_S_SCAN) {
1312 1312 ATH_UNLOCK(asc);
1313 1313 ieee80211_reset_chan(ic);
1314 1314 goto bad;
1315 1315 }
1316 1316 }
1317 1317
1318 1318 rfilt = ath_calcrxfilter(asc);
1319 1319
1320 1320 if (nstate == IEEE80211_S_SCAN)
1321 1321 bssid = ic->ic_macaddr;
1322 1322 else
1323 1323 bssid = in->in_bssid;
1324 1324 ATH_HAL_SETRXFILTER(ah, rfilt);
1325 1325
1326 1326 if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS)
1327 1327 ATH_HAL_SETASSOCID(ah, bssid, in->in_associd);
1328 1328 else
1329 1329 ATH_HAL_SETASSOCID(ah, bssid, 0);
1330 1330 if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1331 1331 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1332 1332 if (ATH_HAL_KEYISVALID(ah, i))
1333 1333 ATH_HAL_KEYSETMAC(ah, i, bssid);
1334 1334 }
1335 1335 }
1336 1336
1337 1337 if ((nstate == IEEE80211_S_RUN) &&
1338 1338 (ostate != IEEE80211_S_RUN)) {
1339 1339 /* Configure the beacon and sleep timers. */
1340 1340 ath_beacon_config(asc);
1341 1341 } else {
1342 1342 asc->asc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
1343 1343 ATH_HAL_INTRSET(ah, asc->asc_imask);
1344 1344 }
1345 1345 /*
1346 1346 * Reset the rate control state.
1347 1347 */
1348 1348 ath_rate_ctl_reset(asc, nstate);
1349 1349
1350 1350 ATH_UNLOCK(asc);
1351 1351 done:
1352 1352 /*
1353 1353 * Invoke the parent method to complete the work.
1354 1354 */
1355 1355 error = asc->asc_newstate(ic, nstate, arg);
1356 1356 /*
1357 1357 * Finally, start any timers.
1358 1358 */
1359 1359 if (nstate == IEEE80211_S_RUN) {
1360 1360 ieee80211_start_watchdog(ic, 1);
1361 1361 } else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) {
1362 1362 /* start ap/neighbor scan timer */
1363 1363 ASSERT(asc->asc_scan_timer == 0);
1364 1364 asc->asc_scan_timer = timeout(ath_next_scan, (void *)asc,
1365 1365 drv_usectohz(ath_dwelltime * 1000));
1366 1366 }
1367 1367 bad:
1368 1368 return (error);
1369 1369 }
1370 1370
1371 1371 /*
1372 1372 * Periodically recalibrate the PHY to account
1373 1373 * for temperature/environment changes.
1374 1374 */
1375 1375 static void
1376 1376 ath_calibrate(ath_t *asc)
1377 1377 {
1378 1378 struct ath_hal *ah = asc->asc_ah;
1379 1379 HAL_BOOL iqcaldone;
1380 1380
1381 1381 asc->asc_stats.ast_per_cal++;
1382 1382
1383 1383 if (ATH_HAL_GETRFGAIN(ah) == HAL_RFGAIN_NEED_CHANGE) {
1384 1384 /*
1385 1385 * Rfgain is out of bounds, reset the chip
1386 1386 * to load new gain values.
1387 1387 */
1388 1388 ATH_DEBUG((ATH_DBG_HAL, "ath: ath_calibrate(): "
1389 1389 "Need change RFgain\n"));
1390 1390 asc->asc_stats.ast_per_rfgain++;
1391 1391 (void) ath_reset(&asc->asc_isc);
1392 1392 }
1393 1393 if (!ATH_HAL_CALIBRATE(ah, &asc->asc_curchan, &iqcaldone)) {
1394 1394 ATH_DEBUG((ATH_DBG_HAL, "ath: ath_calibrate(): "
1395 1395 "calibration of channel %u failed\n",
1396 1396 asc->asc_curchan.channel));
1397 1397 asc->asc_stats.ast_per_calfail++;
1398 1398 }
1399 1399 }
1400 1400
1401 1401 static void
1402 1402 ath_watchdog(void *arg)
1403 1403 {
1404 1404 ath_t *asc = arg;
1405 1405 ieee80211com_t *ic = &asc->asc_isc;
1406 1406 int ntimer = 0;
1407 1407
1408 1408 ATH_LOCK(asc);
1409 1409 ic->ic_watchdog_timer = 0;
1410 1410 if (!ATH_IS_RUNNING(asc)) {
1411 1411 ATH_UNLOCK(asc);
1412 1412 return;
1413 1413 }
1414 1414
1415 1415 if (ic->ic_state == IEEE80211_S_RUN) {
1416 1416 /* periodic recalibration */
1417 1417 ath_calibrate(asc);
1418 1418
1419 1419 /*
1420 1420 * Start the background rate control thread if we
1421 1421 * are not configured to use a fixed xmit rate.
1422 1422 */
1423 1423 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1424 1424 asc->asc_stats.ast_rate_calls ++;
1425 1425 if (ic->ic_opmode == IEEE80211_M_STA)
1426 1426 ath_rate_ctl(ic, ic->ic_bss);
1427 1427 else
1428 1428 ieee80211_iterate_nodes(&ic->ic_sta,
1429 1429 ath_rate_ctl, asc);
1430 1430 }
1431 1431
1432 1432 ntimer = 1;
1433 1433 }
1434 1434 ATH_UNLOCK(asc);
1435 1435
1436 1436 ieee80211_watchdog(ic);
1437 1437 if (ntimer != 0)
1438 1438 ieee80211_start_watchdog(ic, ntimer);
1439 1439 }
1440 1440
1441 1441 static void
1442 1442 ath_tx_proc(void *arg)
1443 1443 {
1444 1444 ath_t *asc = arg;
1445 1445 ath_tx_handler(asc);
1446 1446 }
1447 1447
1448 1448
1449 1449 static uint_t
1450 1450 ath_intr(caddr_t arg)
1451 1451 {
1452 1452 /* LINTED E_BAD_PTR_CAST_ALIGN */
1453 1453 ath_t *asc = (ath_t *)arg;
1454 1454 struct ath_hal *ah = asc->asc_ah;
1455 1455 HAL_INT status;
1456 1456 ieee80211com_t *ic = (ieee80211com_t *)asc;
1457 1457
1458 1458 ATH_LOCK(asc);
1459 1459
1460 1460 if (!ATH_IS_RUNNING(asc)) {
1461 1461 /*
1462 1462 * The hardware is not ready/present, don't touch anything.
1463 1463 * Note this can happen early on if the IRQ is shared.
1464 1464 */
1465 1465 ATH_UNLOCK(asc);
1466 1466 return (DDI_INTR_UNCLAIMED);
1467 1467 }
1468 1468
1469 1469 if (!ATH_HAL_INTRPEND(ah)) { /* shared irq, not for us */
1470 1470 ATH_UNLOCK(asc);
1471 1471 return (DDI_INTR_UNCLAIMED);
1472 1472 }
1473 1473
1474 1474 ATH_HAL_GETISR(ah, &status);
1475 1475 status &= asc->asc_imask;
1476 1476 if (status & HAL_INT_FATAL) {
1477 1477 asc->asc_stats.ast_hardware++;
1478 1478 goto reset;
1479 1479 } else if (status & HAL_INT_RXORN) {
1480 1480 asc->asc_stats.ast_rxorn++;
1481 1481 goto reset;
1482 1482 } else {
1483 1483 if (status & HAL_INT_RXEOL) {
1484 1484 asc->asc_stats.ast_rxeol++;
1485 1485 asc->asc_rxlink = NULL;
1486 1486 }
1487 1487 if (status & HAL_INT_TXURN) {
1488 1488 asc->asc_stats.ast_txurn++;
1489 1489 ATH_HAL_UPDATETXTRIGLEVEL(ah, AH_TRUE);
1490 1490 }
1491 1491
1492 1492 if (status & HAL_INT_RX) {
1493 1493 asc->asc_rx_pend = 1;
1494 1494 ddi_trigger_softintr(asc->asc_softint_id);
1495 1495 }
1496 1496 if (status & HAL_INT_TX) {
1497 1497 if (ddi_taskq_dispatch(asc->asc_tq, ath_tx_proc,
1498 1498 asc, DDI_NOSLEEP) != DDI_SUCCESS) {
1499 1499 ath_problem("ath: ath_intr(): "
1500 1500 "No memory available for tx taskq\n");
1501 1501 }
1502 1502 }
1503 1503 ATH_UNLOCK(asc);
1504 1504
1505 1505 if (status & HAL_INT_SWBA) {
1506 1506 /* This will occur only in Host-AP or Ad-Hoc mode */
1507 1507 return (DDI_INTR_CLAIMED);
1508 1508 }
1509 1509
1510 1510 if (status & HAL_INT_BMISS) {
1511 1511 if (ic->ic_state == IEEE80211_S_RUN) {
1512 1512 (void) ieee80211_new_state(ic,
1513 1513 IEEE80211_S_ASSOC, -1);
1514 1514 }
1515 1515 }
1516 1516
1517 1517 }
1518 1518
1519 1519 return (DDI_INTR_CLAIMED);
1520 1520 reset:
1521 1521 (void) ath_reset(ic);
1522 1522 ATH_UNLOCK(asc);
1523 1523 return (DDI_INTR_CLAIMED);
1524 1524 }
1525 1525
1526 1526 static uint_t
1527 1527 ath_softint_handler(caddr_t data)
1528 1528 {
1529 1529 /* LINTED E_BAD_PTR_CAST_ALIGN */
1530 1530 ath_t *asc = (ath_t *)data;
1531 1531
1532 1532 /*
1533 1533 * Check if the soft interrupt is triggered by another
1534 1534 * driver at the same level.
1535 1535 */
1536 1536 ATH_LOCK(asc);
1537 1537 if (asc->asc_rx_pend) { /* Soft interrupt for this driver */
1538 1538 asc->asc_rx_pend = 0;
1539 1539 ATH_UNLOCK(asc);
1540 1540 ath_rx_handler(asc);
1541 1541 return (DDI_INTR_CLAIMED);
1542 1542 }
1543 1543 ATH_UNLOCK(asc);
1544 1544 return (DDI_INTR_UNCLAIMED);
1545 1545 }
1546 1546
1547 1547 /*
1548 1548 * following are gld callback routine
1549 1549 * ath_gld_send, ath_gld_ioctl, ath_gld_gstat
1550 1550 * are listed in other corresponding sections.
1551 1551 * reset the hardware w/o losing operational state. this is
1552 1552 * basically a more efficient way of doing ath_gld_stop, ath_gld_start,
1553 1553 * followed by state transitions to the current 802.11
1554 1554 * operational state. used to recover from errors rx overrun
1555 1555 * and to reset the hardware when rf gain settings must be reset.
1556 1556 */
1557 1557
1558 1558 static void
1559 1559 ath_stop_locked(ath_t *asc)
1560 1560 {
1561 1561 ieee80211com_t *ic = (ieee80211com_t *)asc;
1562 1562 struct ath_hal *ah = asc->asc_ah;
1563 1563
1564 1564 ATH_LOCK_ASSERT(asc);
1565 1565 if (!asc->asc_isrunning)
1566 1566 return;
1567 1567
1568 1568 /*
1569 1569 * Shutdown the hardware and driver:
1570 1570 * reset 802.11 state machine
1571 1571 * turn off timers
1572 1572 * disable interrupts
1573 1573 * turn off the radio
1574 1574 * clear transmit machinery
1575 1575 * clear receive machinery
1576 1576 * drain and release tx queues
1577 1577 * reclaim beacon resources
1578 1578 * power down hardware
1579 1579 *
1580 1580 * Note that some of this work is not possible if the
1581 1581 * hardware is gone (invalid).
1582 1582 */
1583 1583 ATH_UNLOCK(asc);
1584 1584 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1585 1585 ieee80211_stop_watchdog(ic);
1586 1586 ATH_LOCK(asc);
1587 1587 ATH_HAL_INTRSET(ah, 0);
1588 1588 ath_draintxq(asc);
1589 1589 if (!asc->asc_invalid) {
1590 1590 ath_stoprecv(asc);
1591 1591 ATH_HAL_PHYDISABLE(ah);
1592 1592 } else {
1593 1593 asc->asc_rxlink = NULL;
1594 1594 }
1595 1595 asc->asc_isrunning = 0;
1596 1596 }
1597 1597
1598 1598 static void
1599 1599 ath_m_stop(void *arg)
1600 1600 {
1601 1601 ath_t *asc = arg;
1602 1602 struct ath_hal *ah = asc->asc_ah;
1603 1603
1604 1604 ATH_LOCK(asc);
1605 1605 ath_stop_locked(asc);
1606 1606 ATH_HAL_SETPOWER(ah, HAL_PM_AWAKE);
1607 1607 asc->asc_invalid = 1;
1608 1608 ATH_UNLOCK(asc);
1609 1609 }
1610 1610
1611 1611 static int
1612 1612 ath_start_locked(ath_t *asc)
1613 1613 {
1614 1614 ieee80211com_t *ic = (ieee80211com_t *)asc;
1615 1615 struct ath_hal *ah = asc->asc_ah;
1616 1616 HAL_STATUS status;
1617 1617
1618 1618 ATH_LOCK_ASSERT(asc);
1619 1619
1620 1620 /*
1621 1621 * The basic interface to setting the hardware in a good
1622 1622 * state is ``reset''. On return the hardware is known to
1623 1623 * be powered up and with interrupts disabled. This must
1624 1624 * be followed by initialization of the appropriate bits
1625 1625 * and then setup of the interrupt mask.
1626 1626 */
1627 1627 asc->asc_curchan.channel = ic->ic_curchan->ich_freq;
1628 1628 asc->asc_curchan.channelFlags = ath_chan2flags(ic, ic->ic_curchan);
1629 1629 if (!ATH_HAL_RESET(ah, (HAL_OPMODE)ic->ic_opmode,
1630 1630 &asc->asc_curchan, AH_FALSE, &status)) {
1631 1631 ATH_DEBUG((ATH_DBG_HAL, "ath: ath_m_start(): "
1632 1632 "reset hardware failed: '%s' (HAL status %u)\n",
1633 1633 ath_get_hal_status_desc(status), status));
1634 1634 return (ENOTACTIVE);
1635 1635 }
1636 1636
1637 1637 (void) ath_startrecv(asc);
1638 1638
1639 1639 /*
1640 1640 * Enable interrupts.
1641 1641 */
1642 1642 asc->asc_imask = HAL_INT_RX | HAL_INT_TX
1643 1643 | HAL_INT_RXEOL | HAL_INT_RXORN
1644 1644 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1645 1645 ATH_HAL_INTRSET(ah, asc->asc_imask);
1646 1646
1647 1647 /*
1648 1648 * The hardware should be ready to go now so it's safe
1649 1649 * to kick the 802.11 state machine as it's likely to
1650 1650 * immediately call back to us to send mgmt frames.
1651 1651 */
1652 1652 ath_chan_change(asc, ic->ic_curchan);
1653 1653
1654 1654 asc->asc_isrunning = 1;
1655 1655
1656 1656 return (0);
1657 1657 }
1658 1658
1659 1659 int
1660 1660 ath_m_start(void *arg)
1661 1661 {
1662 1662 ath_t *asc = arg;
1663 1663 int err;
1664 1664
1665 1665 ATH_LOCK(asc);
1666 1666 /*
1667 1667 * Stop anything previously setup. This is safe
1668 1668 * whether this is the first time through or not.
1669 1669 */
1670 1670 ath_stop_locked(asc);
1671 1671
1672 1672 if ((err = ath_start_locked(asc)) != 0) {
1673 1673 ATH_UNLOCK(asc);
1674 1674 return (err);
1675 1675 }
1676 1676
1677 1677 asc->asc_invalid = 0;
1678 1678 ATH_UNLOCK(asc);
1679 1679
1680 1680 return (0);
1681 1681 }
1682 1682
1683 1683
1684 1684 static int
1685 1685 ath_m_unicst(void *arg, const uint8_t *macaddr)
1686 1686 {
1687 1687 ath_t *asc = arg;
1688 1688 struct ath_hal *ah = asc->asc_ah;
1689 1689
1690 1690 ATH_DEBUG((ATH_DBG_GLD, "ath: ath_gld_saddr(): "
1691 1691 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
1692 1692 macaddr[0], macaddr[1], macaddr[2],
1693 1693 macaddr[3], macaddr[4], macaddr[5]));
1694 1694
1695 1695 ATH_LOCK(asc);
1696 1696 IEEE80211_ADDR_COPY(asc->asc_isc.ic_macaddr, macaddr);
1697 1697 ATH_HAL_SETMAC(ah, asc->asc_isc.ic_macaddr);
1698 1698
1699 1699 (void) ath_reset(&asc->asc_isc);
1700 1700 ATH_UNLOCK(asc);
1701 1701 return (0);
1702 1702 }
1703 1703
1704 1704 static int
1705 1705 ath_m_promisc(void *arg, boolean_t on)
1706 1706 {
1707 1707 ath_t *asc = arg;
1708 1708 struct ath_hal *ah = asc->asc_ah;
1709 1709 uint32_t rfilt;
1710 1710
1711 1711 ATH_LOCK(asc);
1712 1712 rfilt = ATH_HAL_GETRXFILTER(ah);
1713 1713 if (on)
1714 1714 rfilt |= HAL_RX_FILTER_PROM;
1715 1715 else
1716 1716 rfilt &= ~HAL_RX_FILTER_PROM;
1717 1717 asc->asc_promisc = on;
1718 1718 ATH_HAL_SETRXFILTER(ah, rfilt);
1719 1719 ATH_UNLOCK(asc);
1720 1720
1721 1721 return (0);
1722 1722 }
1723 1723
1724 1724 static int
1725 1725 ath_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1726 1726 {
1727 1727 ath_t *asc = arg;
1728 1728 struct ath_hal *ah = asc->asc_ah;
1729 1729 uint32_t val, index, bit;
1730 1730 uint8_t pos;
1731 1731 uint32_t *mfilt = asc->asc_mcast_hash;
1732 1732
1733 1733 ATH_LOCK(asc);
1734 1734
1735 1735 /* calculate XOR of eight 6bit values */
1736 1736 val = ATH_LE_READ_4(mca + 0);
1737 1737 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1738 1738 val = ATH_LE_READ_4(mca + 3);
1739 1739 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
1740 1740 pos &= 0x3f;
1741 1741 index = pos / 32;
1742 1742 bit = 1 << (pos % 32);
1743 1743
1744 1744 if (add) { /* enable multicast */
1745 1745 asc->asc_mcast_refs[pos]++;
1746 1746 mfilt[index] |= bit;
1747 1747 } else { /* disable multicast */
1748 1748 if (--asc->asc_mcast_refs[pos] == 0)
1749 1749 mfilt[index] &= ~bit;
1750 1750 }
1751 1751 ATH_HAL_SETMCASTFILTER(ah, mfilt[0], mfilt[1]);
1752 1752
1753 1753 ATH_UNLOCK(asc);
1754 1754 return (0);
1755 1755 }
1756 1756 /*
1757 1757 * callback functions for /get/set properties
1758 1758 */
1759 1759 static int
1760 1760 ath_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
1761 1761 uint_t wldp_length, const void *wldp_buf)
1762 1762 {
1763 1763 ath_t *asc = arg;
1764 1764 int err;
1765 1765
1766 1766 err = ieee80211_setprop(&asc->asc_isc, pr_name, wldp_pr_num,
1767 1767 wldp_length, wldp_buf);
1768 1768
1769 1769 ATH_LOCK(asc);
1770 1770
1771 1771 if (err == ENETRESET) {
1772 1772 if (ATH_IS_RUNNING(asc)) {
1773 1773 ATH_UNLOCK(asc);
1774 1774 (void) ath_m_start(asc);
1775 1775 (void) ieee80211_new_state(&asc->asc_isc,
1776 1776 IEEE80211_S_SCAN, -1);
1777 1777 ATH_LOCK(asc);
1778 1778 }
1779 1779 err = 0;
1780 1780 }
1781 1781
1782 1782 ATH_UNLOCK(asc);
1783 1783
1784 1784 return (err);
1785 1785 }
1786 1786
1787 1787 static int
1788 1788 ath_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
1789 1789 uint_t wldp_length, void *wldp_buf)
1790 1790 {
1791 1791 ath_t *asc = arg;
1792 1792 int err = 0;
1793 1793
1794 1794 err = ieee80211_getprop(&asc->asc_isc, pr_name, wldp_pr_num,
1795 1795 wldp_length, wldp_buf);
1796 1796
1797 1797 return (err);
1798 1798 }
1799 1799
1800 1800 static void
1801 1801 ath_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
1802 1802 mac_prop_info_handle_t mph)
1803 1803 {
1804 1804 ath_t *asc = arg;
1805 1805
1806 1806 ieee80211_propinfo(&asc->asc_isc, pr_name, wldp_pr_num, mph);
1807 1807 }
1808 1808
1809 1809 static void
1810 1810 ath_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1811 1811 {
1812 1812 ath_t *asc = arg;
1813 1813 int32_t err;
1814 1814
1815 1815 err = ieee80211_ioctl(&asc->asc_isc, wq, mp);
1816 1816 ATH_LOCK(asc);
1817 1817 if (err == ENETRESET) {
1818 1818 if (ATH_IS_RUNNING(asc)) {
1819 1819 ATH_UNLOCK(asc);
1820 1820 (void) ath_m_start(asc);
1821 1821 (void) ieee80211_new_state(&asc->asc_isc,
1822 1822 IEEE80211_S_SCAN, -1);
1823 1823 ATH_LOCK(asc);
1824 1824 }
1825 1825 }
1826 1826 ATH_UNLOCK(asc);
1827 1827 }
1828 1828
1829 1829 static int
1830 1830 ath_m_stat(void *arg, uint_t stat, uint64_t *val)
1831 1831 {
1832 1832 ath_t *asc = arg;
1833 1833 ieee80211com_t *ic = (ieee80211com_t *)asc;
1834 1834 struct ieee80211_node *in = ic->ic_bss;
1835 1835 struct ieee80211_rateset *rs = &in->in_rates;
1836 1836
1837 1837 ATH_LOCK(asc);
1838 1838 switch (stat) {
1839 1839 case MAC_STAT_IFSPEED:
1840 1840 *val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 *
1841 1841 1000000ull;
1842 1842 break;
1843 1843 case MAC_STAT_NOXMTBUF:
1844 1844 *val = asc->asc_stats.ast_tx_nobuf +
1845 1845 asc->asc_stats.ast_tx_nobufmgt;
1846 1846 break;
1847 1847 case MAC_STAT_IERRORS:
1848 1848 *val = asc->asc_stats.ast_rx_tooshort;
1849 1849 break;
1850 1850 case MAC_STAT_RBYTES:
1851 1851 *val = ic->ic_stats.is_rx_bytes;
1852 1852 break;
1853 1853 case MAC_STAT_IPACKETS:
1854 1854 *val = ic->ic_stats.is_rx_frags;
1855 1855 break;
1856 1856 case MAC_STAT_OBYTES:
1857 1857 *val = ic->ic_stats.is_tx_bytes;
1858 1858 break;
1859 1859 case MAC_STAT_OPACKETS:
1860 1860 *val = ic->ic_stats.is_tx_frags;
1861 1861 break;
1862 1862 case MAC_STAT_OERRORS:
1863 1863 case WIFI_STAT_TX_FAILED:
1864 1864 *val = asc->asc_stats.ast_tx_fifoerr +
1865 1865 asc->asc_stats.ast_tx_xretries +
1866 1866 asc->asc_stats.ast_tx_discard;
1867 1867 break;
1868 1868 case WIFI_STAT_TX_RETRANS:
1869 1869 *val = asc->asc_stats.ast_tx_xretries;
1870 1870 break;
1871 1871 case WIFI_STAT_FCS_ERRORS:
1872 1872 *val = asc->asc_stats.ast_rx_crcerr;
1873 1873 break;
1874 1874 case WIFI_STAT_WEP_ERRORS:
1875 1875 *val = asc->asc_stats.ast_rx_badcrypt;
1876 1876 break;
1877 1877 case WIFI_STAT_TX_FRAGS:
1878 1878 case WIFI_STAT_MCAST_TX:
1879 1879 case WIFI_STAT_RTS_SUCCESS:
1880 1880 case WIFI_STAT_RTS_FAILURE:
1881 1881 case WIFI_STAT_ACK_FAILURE:
1882 1882 case WIFI_STAT_RX_FRAGS:
1883 1883 case WIFI_STAT_MCAST_RX:
1884 1884 case WIFI_STAT_RX_DUPS:
1885 1885 ATH_UNLOCK(asc);
1886 1886 return (ieee80211_stat(ic, stat, val));
1887 1887 default:
1888 1888 ATH_UNLOCK(asc);
1889 1889 return (ENOTSUP);
1890 1890 }
1891 1891 ATH_UNLOCK(asc);
1892 1892
1893 1893 return (0);
1894 1894 }
1895 1895
1896 1896 static int
1897 1897 ath_pci_setup(ath_t *asc)
1898 1898 {
1899 1899 uint16_t command;
1900 1900
1901 1901 /*
1902 1902 * Enable memory mapping and bus mastering
1903 1903 */
1904 1904 ASSERT(asc != NULL);
1905 1905 command = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_COMM);
1906 1906 command |= PCI_COMM_MAE | PCI_COMM_ME;
1907 1907 pci_config_put16(asc->asc_cfg_handle, PCI_CONF_COMM, command);
1908 1908 command = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_COMM);
1909 1909 if ((command & PCI_COMM_MAE) == 0) {
1910 1910 ath_problem("ath: ath_pci_setup(): "
1911 1911 "failed to enable memory mapping\n");
1912 1912 return (EIO);
1913 1913 }
1914 1914 if ((command & PCI_COMM_ME) == 0) {
1915 1915 ath_problem("ath: ath_pci_setup(): "
1916 1916 "failed to enable bus mastering\n");
1917 1917 return (EIO);
1918 1918 }
1919 1919 ATH_DEBUG((ATH_DBG_INIT, "ath: ath_pci_setup(): "
1920 1920 "set command reg to 0x%x \n", command));
1921 1921
1922 1922 return (0);
1923 1923 }
1924 1924
1925 1925 static int
1926 1926 ath_resume(dev_info_t *devinfo)
1927 1927 {
1928 1928 ath_t *asc;
1929 1929 int ret = DDI_SUCCESS;
1930 1930
1931 1931 asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
1932 1932 if (asc == NULL) {
1933 1933 ATH_DEBUG((ATH_DBG_SUSPEND, "ath: ath_resume(): "
1934 1934 "failed to get soft state\n"));
1935 1935 return (DDI_FAILURE);
1936 1936 }
1937 1937
1938 1938 ATH_LOCK(asc);
1939 1939 /*
1940 1940 * Set up config space command register(s). Refuse
1941 1941 * to resume on failure.
1942 1942 */
1943 1943 if (ath_pci_setup(asc) != 0) {
1944 1944 ATH_DEBUG((ATH_DBG_SUSPEND, "ath: ath_resume(): "
1945 1945 "ath_pci_setup() failed\n"));
1946 1946 ATH_UNLOCK(asc);
1947 1947 return (DDI_FAILURE);
1948 1948 }
1949 1949
1950 1950 if (!asc->asc_invalid)
1951 1951 ret = ath_start_locked(asc);
1952 1952 ATH_UNLOCK(asc);
1953 1953
1954 1954 return (ret);
1955 1955 }
1956 1956
1957 1957 static int
1958 1958 ath_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
1959 1959 {
1960 1960 ath_t *asc;
1961 1961 ieee80211com_t *ic;
1962 1962 struct ath_hal *ah;
1963 1963 uint8_t csz;
1964 1964 HAL_STATUS status;
1965 1965 caddr_t regs;
1966 1966 uint32_t i, val;
1967 1967 uint16_t vendor_id, device_id;
1968 1968 const char *athname;
1969 1969 int32_t ath_countrycode = CTRY_DEFAULT; /* country code */
1970 1970 int32_t err, ath_regdomain = 0; /* regulatory domain */
1971 1971 char strbuf[32];
1972 1972 int instance;
1973 1973 wifi_data_t wd = { 0 };
1974 1974 mac_register_t *macp;
1975 1975
1976 1976 switch (cmd) {
1977 1977 case DDI_ATTACH:
1978 1978 break;
1979 1979
1980 1980 case DDI_RESUME:
1981 1981 return (ath_resume(devinfo));
1982 1982
1983 1983 default:
1984 1984 return (DDI_FAILURE);
1985 1985 }
1986 1986
1987 1987 instance = ddi_get_instance(devinfo);
1988 1988 if (ddi_soft_state_zalloc(ath_soft_state_p, instance) != DDI_SUCCESS) {
1989 1989 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
1990 1990 "Unable to alloc softstate\n"));
1991 1991 return (DDI_FAILURE);
1992 1992 }
1993 1993
1994 1994 asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
1995 1995 ic = (ieee80211com_t *)asc;
1996 1996 asc->asc_dev = devinfo;
1997 1997
1998 1998 mutex_init(&asc->asc_genlock, NULL, MUTEX_DRIVER, NULL);
1999 1999 mutex_init(&asc->asc_txbuflock, NULL, MUTEX_DRIVER, NULL);
2000 2000 mutex_init(&asc->asc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
2001 2001 mutex_init(&asc->asc_resched_lock, NULL, MUTEX_DRIVER, NULL);
2002 2002
2003 2003 err = pci_config_setup(devinfo, &asc->asc_cfg_handle);
2004 2004 if (err != DDI_SUCCESS) {
2005 2005 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2006 2006 "pci_config_setup() failed"));
2007 2007 goto attach_fail0;
2008 2008 }
2009 2009
2010 2010 if (ath_pci_setup(asc) != 0)
2011 2011 goto attach_fail1;
2012 2012
2013 2013 /*
2014 2014 * Cache line size is used to size and align various
2015 2015 * structures used to communicate with the hardware.
2016 2016 */
2017 2017 csz = pci_config_get8(asc->asc_cfg_handle, PCI_CONF_CACHE_LINESZ);
2018 2018 if (csz == 0) {
2019 2019 /*
2020 2020 * We must have this setup properly for rx buffer
2021 2021 * DMA to work so force a reasonable value here if it
2022 2022 * comes up zero.
2023 2023 */
2024 2024 csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t);
2025 2025 pci_config_put8(asc->asc_cfg_handle, PCI_CONF_CACHE_LINESZ,
2026 2026 csz);
2027 2027 }
2028 2028 asc->asc_cachelsz = csz << 2;
2029 2029 vendor_id = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_VENID);
2030 2030 device_id = pci_config_get16(asc->asc_cfg_handle, PCI_CONF_DEVID);
2031 2031 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): vendor 0x%x, "
2032 2032 "device id 0x%x, cache size %d\n", vendor_id, device_id, csz));
2033 2033
2034 2034 athname = ath_hal_probe(vendor_id, device_id);
2035 2035 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): athname: %s\n",
2036 2036 athname ? athname : "Atheros ???"));
2037 2037
2038 2038 pci_config_put8(asc->asc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
2039 2039 val = pci_config_get32(asc->asc_cfg_handle, 0x40);
2040 2040 if ((val & 0x0000ff00) != 0)
2041 2041 pci_config_put32(asc->asc_cfg_handle, 0x40, val & 0xffff00ff);
2042 2042
2043 2043 err = ddi_regs_map_setup(devinfo, 1,
2044 2044 ®s, 0, 0, &ath_reg_accattr, &asc->asc_io_handle);
2045 2045 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2046 2046 "regs map1 = %x err=%d\n", regs, err));
2047 2047 if (err != DDI_SUCCESS) {
2048 2048 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2049 2049 "ddi_regs_map_setup() failed"));
2050 2050 goto attach_fail1;
2051 2051 }
2052 2052
2053 2053 ah = ath_hal_attach(device_id, asc, 0, regs, &status);
2054 2054 if (ah == NULL) {
2055 2055 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2056 2056 "unable to attach hw: '%s' (HAL status %u)\n",
2057 2057 ath_get_hal_status_desc(status), status));
2058 2058 goto attach_fail2;
2059 2059 }
2060 2060 ATH_DEBUG((ATH_DBG_ATTACH, "mac %d.%d phy %d.%d",
2061 2061 ah->ah_macVersion, ah->ah_macRev,
2062 2062 ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf));
2063 2063 ATH_HAL_INTRSET(ah, 0);
2064 2064 asc->asc_ah = ah;
2065 2065
2066 2066 if (ah->ah_abi != HAL_ABI_VERSION) {
2067 2067 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2068 2068 "HAL ABI mismatch detected (0x%x != 0x%x)\n",
2069 2069 ah->ah_abi, HAL_ABI_VERSION));
2070 2070 goto attach_fail3;
2071 2071 }
2072 2072
2073 2073 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2074 2074 "HAL ABI version 0x%x\n", ah->ah_abi));
2075 2075 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2076 2076 "HAL mac version %d.%d, phy version %d.%d\n",
2077 2077 ah->ah_macVersion, ah->ah_macRev,
2078 2078 ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf));
2079 2079 if (ah->ah_analog5GhzRev)
2080 2080 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2081 2081 "HAL 5ghz radio version %d.%d\n",
2082 2082 ah->ah_analog5GhzRev >> 4,
2083 2083 ah->ah_analog5GhzRev & 0xf));
2084 2084 if (ah->ah_analog2GhzRev)
2085 2085 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2086 2086 "HAL 2ghz radio version %d.%d\n",
2087 2087 ah->ah_analog2GhzRev >> 4,
2088 2088 ah->ah_analog2GhzRev & 0xf));
2089 2089
2090 2090 /*
2091 2091 * Check if the MAC has multi-rate retry support.
2092 2092 * We do this by trying to setup a fake extended
2093 2093 * descriptor. MAC's that don't have support will
2094 2094 * return false w/o doing anything. MAC's that do
2095 2095 * support it will return true w/o doing anything.
2096 2096 */
2097 2097 asc->asc_mrretry = ATH_HAL_SETUPXTXDESC(ah, NULL, 0, 0, 0, 0, 0, 0);
2098 2098 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2099 2099 "multi rate retry support=%x\n",
2100 2100 asc->asc_mrretry));
2101 2101
2102 2102 /*
2103 2103 * Get the hardware key cache size.
2104 2104 */
2105 2105 asc->asc_keymax = ATH_HAL_KEYCACHESIZE(ah);
2106 2106 if (asc->asc_keymax > sizeof (asc->asc_keymap) * NBBY) {
2107 2107 ATH_DEBUG((ATH_DBG_ATTACH, "ath_attach:"
2108 2108 " Warning, using only %u entries in %u key cache\n",
2109 2109 sizeof (asc->asc_keymap) * NBBY, asc->asc_keymax));
2110 2110 asc->asc_keymax = sizeof (asc->asc_keymap) * NBBY;
2111 2111 }
2112 2112 /*
2113 2113 * Reset the key cache since some parts do not
2114 2114 * reset the contents on initial power up.
2115 2115 */
2116 2116 for (i = 0; i < asc->asc_keymax; i++)
2117 2117 ATH_HAL_KEYRESET(ah, i);
2118 2118
2119 2119 ATH_HAL_GETREGDOMAIN(ah, (uint32_t *)&ath_regdomain);
2120 2120 ATH_HAL_GETCOUNTRYCODE(ah, &ath_countrycode);
2121 2121 /*
2122 2122 * Collect the channel list using the default country
2123 2123 * code and including outdoor channels. The 802.11 layer
2124 2124 * is resposible for filtering this list to a set of
2125 2125 * channels that it considers ok to use.
2126 2126 */
2127 2127 asc->asc_have11g = 0;
2128 2128
2129 2129 /* enable outdoor use, enable extended channels */
2130 2130 err = ath_getchannels(asc, ath_countrycode, AH_FALSE, AH_TRUE);
2131 2131 if (err != 0)
2132 2132 goto attach_fail3;
2133 2133
2134 2134 /*
2135 2135 * Setup rate tables for all potential media types.
2136 2136 */
2137 2137 ath_rate_setup(asc, IEEE80211_MODE_11A);
2138 2138 ath_rate_setup(asc, IEEE80211_MODE_11B);
2139 2139 ath_rate_setup(asc, IEEE80211_MODE_11G);
2140 2140 ath_rate_setup(asc, IEEE80211_MODE_TURBO_A);
2141 2141
2142 2142 /* Setup here so ath_rate_update is happy */
2143 2143 ath_setcurmode(asc, IEEE80211_MODE_11A);
2144 2144
2145 2145 err = ath_desc_alloc(devinfo, asc);
2146 2146 if (err != DDI_SUCCESS) {
2147 2147 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2148 2148 "failed to allocate descriptors: %d\n", err));
2149 2149 goto attach_fail3;
2150 2150 }
2151 2151
2152 2152 if ((asc->asc_tq = ddi_taskq_create(devinfo, "ath_taskq", 1,
2153 2153 TASKQ_DEFAULTPRI, 0)) == NULL) {
2154 2154 goto attach_fail4;
2155 2155 }
2156 2156 /* Setup transmit queues in the HAL */
2157 2157 if (ath_txq_setup(asc))
2158 2158 goto attach_fail4;
2159 2159
2160 2160 ATH_HAL_GETMAC(ah, ic->ic_macaddr);
2161 2161
2162 2162 /*
2163 2163 * Initialize pointers to device specific functions which
2164 2164 * will be used by the generic layer.
2165 2165 */
2166 2166 /* 11g support is identified when we fetch the channel set */
2167 2167 if (asc->asc_have11g)
2168 2168 ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
2169 2169 IEEE80211_C_SHSLOT; /* short slot time */
2170 2170 /*
2171 2171 * Query the hal to figure out h/w crypto support.
2172 2172 */
2173 2173 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_WEP))
2174 2174 ic->ic_caps |= IEEE80211_C_WEP;
2175 2175 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_AES_OCB))
2176 2176 ic->ic_caps |= IEEE80211_C_AES;
2177 2177 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_AES_CCM)) {
2178 2178 ATH_DEBUG((ATH_DBG_ATTACH, "Atheros support H/W CCMP\n"));
2179 2179 ic->ic_caps |= IEEE80211_C_AES_CCM;
2180 2180 }
2181 2181 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_CKIP))
2182 2182 ic->ic_caps |= IEEE80211_C_CKIP;
2183 2183 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_TKIP)) {
2184 2184 ATH_DEBUG((ATH_DBG_ATTACH, "Atheros support H/W TKIP\n"));
2185 2185 ic->ic_caps |= IEEE80211_C_TKIP;
2186 2186 /*
2187 2187 * Check if h/w does the MIC and/or whether the
2188 2188 * separate key cache entries are required to
2189 2189 * handle both tx+rx MIC keys.
2190 2190 */
2191 2191 if (ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_MIC)) {
2192 2192 ATH_DEBUG((ATH_DBG_ATTACH, "Support H/W TKIP MIC\n"));
2193 2193 ic->ic_caps |= IEEE80211_C_TKIPMIC;
2194 2194 }
2195 2195
2196 2196 /*
2197 2197 * If the h/w supports storing tx+rx MIC keys
2198 2198 * in one cache slot automatically enable use.
2199 2199 */
2200 2200 if (ATH_HAL_HASTKIPSPLIT(ah) ||
2201 2201 !ATH_HAL_SETTKIPSPLIT(ah, AH_FALSE)) {
2202 2202 asc->asc_splitmic = 1;
2203 2203 }
2204 2204 }
2205 2205 ic->ic_caps |= IEEE80211_C_WPA; /* Support WPA/WPA2 */
2206 2206
2207 2207 asc->asc_hasclrkey = ATH_HAL_CIPHERSUPPORTED(ah, HAL_CIPHER_CLR);
2208 2208 /*
2209 2209 * Mark key cache slots associated with global keys
2210 2210 * as in use. If we knew TKIP was not to be used we
2211 2211 * could leave the +32, +64, and +32+64 slots free.
2212 2212 */
2213 2213 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2214 2214 setbit(asc->asc_keymap, i);
2215 2215 setbit(asc->asc_keymap, i+64);
2216 2216 if (asc->asc_splitmic) {
2217 2217 setbit(asc->asc_keymap, i+32);
2218 2218 setbit(asc->asc_keymap, i+32+64);
2219 2219 }
2220 2220 }
2221 2221
2222 2222 ic->ic_phytype = IEEE80211_T_OFDM;
2223 2223 ic->ic_opmode = IEEE80211_M_STA;
2224 2224 ic->ic_state = IEEE80211_S_INIT;
2225 2225 ic->ic_maxrssi = ATH_MAX_RSSI;
2226 2226 ic->ic_set_shortslot = ath_set_shortslot;
2227 2227 ic->ic_xmit = ath_xmit;
2228 2228 ieee80211_attach(ic);
2229 2229
2230 2230 /* different instance has different WPA door */
2231 2231 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
2232 2232 ddi_driver_name(devinfo),
2233 2233 ddi_get_instance(devinfo));
2234 2234
2235 2235 /* Override 80211 default routines */
2236 2236 ic->ic_reset = ath_reset;
2237 2237 asc->asc_newstate = ic->ic_newstate;
2238 2238 ic->ic_newstate = ath_newstate;
2239 2239 ic->ic_watchdog = ath_watchdog;
2240 2240 ic->ic_node_alloc = ath_node_alloc;
2241 2241 ic->ic_node_free = ath_node_free;
2242 2242 ic->ic_crypto.cs_key_alloc = ath_key_alloc;
2243 2243 ic->ic_crypto.cs_key_delete = ath_key_delete;
2244 2244 ic->ic_crypto.cs_key_set = ath_key_set;
2245 2245 ieee80211_media_init(ic);
2246 2246 /*
2247 2247 * initialize default tx key
2248 2248 */
2249 2249 ic->ic_def_txkey = 0;
2250 2250
2251 2251 asc->asc_rx_pend = 0;
2252 2252 ATH_HAL_INTRSET(ah, 0);
2253 2253 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
2254 2254 &asc->asc_softint_id, NULL, 0, ath_softint_handler, (caddr_t)asc);
2255 2255 if (err != DDI_SUCCESS) {
2256 2256 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2257 2257 "ddi_add_softintr() failed\n"));
2258 2258 goto attach_fail5;
2259 2259 }
2260 2260
2261 2261 if (ddi_get_iblock_cookie(devinfo, 0, &asc->asc_iblock)
2262 2262 != DDI_SUCCESS) {
2263 2263 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2264 2264 "Can not get iblock cookie for INT\n"));
2265 2265 goto attach_fail6;
2266 2266 }
2267 2267
2268 2268 if (ddi_add_intr(devinfo, 0, NULL, NULL, ath_intr,
2269 2269 (caddr_t)asc) != DDI_SUCCESS) {
2270 2270 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2271 2271 "Can not set intr for ATH driver\n"));
2272 2272 goto attach_fail6;
2273 2273 }
2274 2274
2275 2275 /*
2276 2276 * Provide initial settings for the WiFi plugin; whenever this
2277 2277 * information changes, we need to call mac_plugindata_update()
2278 2278 */
2279 2279 wd.wd_opmode = ic->ic_opmode;
2280 2280 wd.wd_secalloc = WIFI_SEC_NONE;
2281 2281 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid);
2282 2282
2283 2283 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
2284 2284 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2285 2285 "MAC version mismatch\n"));
2286 2286 goto attach_fail7;
2287 2287 }
2288 2288
2289 2289 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
2290 2290 macp->m_driver = asc;
2291 2291 macp->m_dip = devinfo;
2292 2292 macp->m_src_addr = ic->ic_macaddr;
2293 2293 macp->m_callbacks = &ath_m_callbacks;
2294 2294 macp->m_min_sdu = 0;
2295 2295 macp->m_max_sdu = IEEE80211_MTU;
2296 2296 macp->m_pdata = &wd;
2297 2297 macp->m_pdata_size = sizeof (wd);
2298 2298
2299 2299 err = mac_register(macp, &ic->ic_mach);
2300 2300 mac_free(macp);
2301 2301 if (err != 0) {
2302 2302 ATH_DEBUG((ATH_DBG_ATTACH, "ath: ath_attach(): "
2303 2303 "mac_register err %x\n", err));
2304 2304 goto attach_fail7;
2305 2305 }
2306 2306
2307 2307 /* Create minor node of type DDI_NT_NET_WIFI */
2308 2308 (void) snprintf(strbuf, sizeof (strbuf), "%s%d",
2309 2309 ATH_NODENAME, instance);
2310 2310 err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
2311 2311 instance + 1, DDI_NT_NET_WIFI, 0);
2312 2312 if (err != DDI_SUCCESS)
2313 2313 ATH_DEBUG((ATH_DBG_ATTACH, "WARN: ath: ath_attach(): "
2314 2314 "Create minor node failed - %d\n", err));
2315 2315
2316 2316 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
2317 2317 asc->asc_invalid = 1;
2318 2318 asc->asc_isrunning = 0;
2319 2319 asc->asc_promisc = B_FALSE;
2320 2320 bzero(asc->asc_mcast_refs, sizeof (asc->asc_mcast_refs));
2321 2321 bzero(asc->asc_mcast_hash, sizeof (asc->asc_mcast_hash));
2322 2322 return (DDI_SUCCESS);
2323 2323 attach_fail7:
2324 2324 ddi_remove_intr(devinfo, 0, asc->asc_iblock);
2325 2325 attach_fail6:
2326 2326 ddi_remove_softintr(asc->asc_softint_id);
2327 2327 attach_fail5:
2328 2328 (void) ieee80211_detach(ic);
2329 2329 attach_fail4:
2330 2330 ath_desc_free(asc);
2331 2331 if (asc->asc_tq)
2332 2332 ddi_taskq_destroy(asc->asc_tq);
2333 2333 attach_fail3:
2334 2334 ah->ah_detach(asc->asc_ah);
2335 2335 attach_fail2:
2336 2336 ddi_regs_map_free(&asc->asc_io_handle);
2337 2337 attach_fail1:
2338 2338 pci_config_teardown(&asc->asc_cfg_handle);
2339 2339 attach_fail0:
2340 2340 asc->asc_invalid = 1;
2341 2341 mutex_destroy(&asc->asc_txbuflock);
2342 2342 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2343 2343 if (ATH_TXQ_SETUP(asc, i)) {
2344 2344 struct ath_txq *txq = &asc->asc_txq[i];
2345 2345 mutex_destroy(&txq->axq_lock);
2346 2346 }
2347 2347 }
2348 2348 mutex_destroy(&asc->asc_rxbuflock);
2349 2349 mutex_destroy(&asc->asc_genlock);
2350 2350 mutex_destroy(&asc->asc_resched_lock);
2351 2351 ddi_soft_state_free(ath_soft_state_p, instance);
2352 2352
2353 2353 return (DDI_FAILURE);
2354 2354 }
2355 2355
2356 2356 /*
2357 2357 * Suspend transmit/receive for powerdown
2358 2358 */
2359 2359 static int
2360 2360 ath_suspend(ath_t *asc)
2361 2361 {
2362 2362 ATH_LOCK(asc);
2363 2363 ath_stop_locked(asc);
2364 2364 ATH_UNLOCK(asc);
2365 2365 ATH_DEBUG((ATH_DBG_SUSPEND, "ath: suspended.\n"));
2366 2366
2367 2367 return (DDI_SUCCESS);
2368 2368 }
2369 2369
2370 2370 static int32_t
2371 2371 ath_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2372 2372 {
2373 2373 ath_t *asc;
2374 2374
2375 2375 asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
2376 2376 ASSERT(asc != NULL);
2377 2377
2378 2378 switch (cmd) {
2379 2379 case DDI_DETACH:
2380 2380 break;
2381 2381
2382 2382 case DDI_SUSPEND:
2383 2383 return (ath_suspend(asc));
2384 2384
2385 2385 default:
2386 2386 return (DDI_FAILURE);
2387 2387 }
2388 2388
2389 2389 if (mac_disable(asc->asc_isc.ic_mach) != 0)
2390 2390 return (DDI_FAILURE);
2391 2391
2392 2392 ath_stop_scantimer(asc);
2393 2393
2394 2394 /* disable interrupts */
2395 2395 ATH_HAL_INTRSET(asc->asc_ah, 0);
2396 2396
2397 2397 /*
2398 2398 * Unregister from the MAC layer subsystem
2399 2399 */
2400 2400 (void) mac_unregister(asc->asc_isc.ic_mach);
2401 2401
2402 2402 /* free intterrupt resources */
2403 2403 ddi_remove_intr(devinfo, 0, asc->asc_iblock);
2404 2404 ddi_remove_softintr(asc->asc_softint_id);
2405 2405
2406 2406 /*
2407 2407 * NB: the order of these is important:
2408 2408 * o call the 802.11 layer before detaching the hal to
2409 2409 * insure callbacks into the driver to delete global
2410 2410 * key cache entries can be handled
2411 2411 * o reclaim the tx queue data structures after calling
2412 2412 * the 802.11 layer as we'll get called back to reclaim
2413 2413 * node state and potentially want to use them
2414 2414 * o to cleanup the tx queues the hal is called, so detach
2415 2415 * it last
2416 2416 */
2417 2417 ieee80211_detach(&asc->asc_isc);
2418 2418 ath_desc_free(asc);
2419 2419 ddi_taskq_destroy(asc->asc_tq);
2420 2420 ath_txq_cleanup(asc);
2421 2421 asc->asc_ah->ah_detach(asc->asc_ah);
2422 2422
2423 2423 /* free io handle */
2424 2424 ddi_regs_map_free(&asc->asc_io_handle);
2425 2425 pci_config_teardown(&asc->asc_cfg_handle);
2426 2426
2427 2427 /* destroy locks */
2428 2428 mutex_destroy(&asc->asc_rxbuflock);
2429 2429 mutex_destroy(&asc->asc_genlock);
2430 2430 mutex_destroy(&asc->asc_resched_lock);
2431 2431
2432 2432 ddi_remove_minor_node(devinfo, NULL);
2433 2433 ddi_soft_state_free(ath_soft_state_p, ddi_get_instance(devinfo));
2434 2434
2435 2435 return (DDI_SUCCESS);
2436 2436 }
2437 2437
2438 2438 /*
2439 2439 * quiesce(9E) entry point.
2440 2440 *
2441 2441 * This function is called when the system is single-threaded at high
2442 2442 * PIL with preemption disabled. Therefore, this function must not be
2443 2443 * blocked.
2444 2444 *
2445 2445 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
2446 2446 * DDI_FAILURE indicates an error condition and should almost never happen.
2447 2447 */
2448 2448 static int32_t
2449 2449 ath_quiesce(dev_info_t *devinfo)
2450 2450 {
2451 2451 ath_t *asc;
2452 2452 struct ath_hal *ah;
2453 2453 int i;
2454 2454
2455 2455 asc = ddi_get_soft_state(ath_soft_state_p, ddi_get_instance(devinfo));
2456 2456
2457 2457 if (asc == NULL || (ah = asc->asc_ah) == NULL)
2458 2458 return (DDI_FAILURE);
2459 2459
2460 2460 /*
2461 2461 * Disable interrupts
2462 2462 */
2463 2463 ATH_HAL_INTRSET(ah, 0);
2464 2464
2465 2465 /*
2466 2466 * Disable TX HW
2467 2467 */
2468 2468 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2469 2469 if (ATH_TXQ_SETUP(asc, i)) {
2470 2470 ATH_HAL_STOPTXDMA(ah, asc->asc_txq[i].axq_qnum);
2471 2471 }
2472 2472 }
2473 2473
2474 2474 /*
2475 2475 * Disable RX HW
2476 2476 */
2477 2477 ATH_HAL_STOPPCURECV(ah);
2478 2478 ATH_HAL_SETRXFILTER(ah, 0);
2479 2479 ATH_HAL_STOPDMARECV(ah);
2480 2480 drv_usecwait(3000);
2481 2481
2482 2482 /*
2483 2483 * Power down HW
2484 2484 */
2485 2485 ATH_HAL_PHYDISABLE(ah);
2486 2486
2487 2487 return (DDI_SUCCESS);
2488 2488 }
2489 2489
↓ open down ↓ |
2489 lines elided |
↑ open up ↑ |
2490 2490 DDI_DEFINE_STREAM_OPS(ath_dev_ops, nulldev, nulldev, ath_attach, ath_detach,
2491 2491 nodev, NULL, D_MP, NULL, ath_quiesce);
2492 2492
2493 2493 static struct modldrv ath_modldrv = {
2494 2494 &mod_driverops, /* Type of module. This one is a driver */
2495 2495 "ath driver 1.4/HAL 0.10.5.6", /* short description */
2496 2496 &ath_dev_ops /* driver specific ops */
2497 2497 };
2498 2498
2499 2499 static struct modlinkage modlinkage = {
2500 - MODREV_1, (void *)&ath_modldrv, NULL
2500 + MODREV_1, { (void *)&ath_modldrv, NULL }
2501 2501 };
2502 2502
2503 2503
2504 2504 int
2505 2505 _info(struct modinfo *modinfop)
2506 2506 {
2507 2507 return (mod_info(&modlinkage, modinfop));
2508 2508 }
2509 2509
2510 2510 int
2511 2511 _init(void)
2512 2512 {
2513 2513 int status;
2514 2514
2515 2515 status = ddi_soft_state_init(&ath_soft_state_p, sizeof (ath_t), 1);
2516 2516 if (status != 0)
2517 2517 return (status);
2518 2518
2519 2519 mutex_init(&ath_loglock, NULL, MUTEX_DRIVER, NULL);
2520 2520 ath_halfix_init();
2521 2521 mac_init_ops(&ath_dev_ops, "ath");
2522 2522 status = mod_install(&modlinkage);
2523 2523 if (status != 0) {
2524 2524 mac_fini_ops(&ath_dev_ops);
2525 2525 ath_halfix_finit();
2526 2526 mutex_destroy(&ath_loglock);
2527 2527 ddi_soft_state_fini(&ath_soft_state_p);
2528 2528 }
2529 2529
2530 2530 return (status);
2531 2531 }
2532 2532
2533 2533 int
2534 2534 _fini(void)
2535 2535 {
2536 2536 int status;
2537 2537
2538 2538 status = mod_remove(&modlinkage);
2539 2539 if (status == 0) {
2540 2540 mac_fini_ops(&ath_dev_ops);
2541 2541 ath_halfix_finit();
2542 2542 mutex_destroy(&ath_loglock);
2543 2543 ddi_soft_state_fini(&ath_soft_state_p);
2544 2544 }
2545 2545 return (status);
2546 2546 }
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX