Print this page
7154 arn(7D) walks out of bounds when byteswapping the 4K eeprom
7152 weird condition in arn(7D) needs clarification
7153 delete unused code in arn(7D)
7155 arn(7D) should include the mac fields in the eeprom enumeration
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/arn/arn_main.c
+++ new/usr/src/uts/common/io/arn/arn_main.c
1 1 /*
2 2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 3 * Use is subject to license terms.
4 4 */
5 5
6 6 /*
7 7 * Copyright (c) 2008 Atheros Communications Inc.
8 8 *
9 9 * Permission to use, copy, modify, and/or distribute this software for any
10 10 * purpose with or without fee is hereby granted, provided that the above
11 11 * copyright notice and this permission notice appear in all copies.
12 12 *
13 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 20 */
21 21
22 22 #include <sys/sysmacros.h>
23 23 #include <sys/param.h>
24 24 #include <sys/types.h>
25 25 #include <sys/signal.h>
26 26 #include <sys/stream.h>
27 27 #include <sys/termio.h>
28 28 #include <sys/errno.h>
29 29 #include <sys/file.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/stropts.h>
32 32 #include <sys/strsubr.h>
33 33 #include <sys/strtty.h>
34 34 #include <sys/kbio.h>
35 35 #include <sys/cred.h>
36 36 #include <sys/stat.h>
37 37 #include <sys/consdev.h>
38 38 #include <sys/kmem.h>
39 39 #include <sys/modctl.h>
40 40 #include <sys/ddi.h>
41 41 #include <sys/sunddi.h>
42 42 #include <sys/pci.h>
43 43 #include <sys/errno.h>
44 44 #include <sys/mac_provider.h>
45 45 #include <sys/dlpi.h>
46 46 #include <sys/ethernet.h>
47 47 #include <sys/list.h>
48 48 #include <sys/byteorder.h>
49 49 #include <sys/strsun.h>
50 50 #include <sys/policy.h>
51 51 #include <inet/common.h>
52 52 #include <inet/nd.h>
53 53 #include <inet/mi.h>
54 54 #include <inet/wifi_ioctl.h>
55 55 #include <sys/mac_wifi.h>
56 56 #include <sys/net80211.h>
57 57 #include <sys/net80211_proto.h>
58 58 #include <sys/net80211_ht.h>
59 59
60 60
61 61 #include "arn_ath9k.h"
62 62 #include "arn_core.h"
63 63 #include "arn_reg.h"
64 64 #include "arn_hw.h"
65 65
66 66 #define ARN_MAX_RSSI 45 /* max rssi */
67 67
68 68 /*
69 69 * Default 11n reates supported by this station.
70 70 */
71 71 extern struct ieee80211_htrateset ieee80211_rateset_11n;
72 72
73 73 /*
74 74 * PIO access attributes for registers
75 75 */
76 76 static ddi_device_acc_attr_t arn_reg_accattr = {
77 77 DDI_DEVICE_ATTR_V0,
78 78 DDI_STRUCTURE_LE_ACC,
79 79 DDI_STRICTORDER_ACC,
80 80 DDI_DEFAULT_ACC
81 81 };
82 82
83 83 /*
84 84 * DMA access attributes for descriptors: NOT to be byte swapped.
85 85 */
86 86 static ddi_device_acc_attr_t arn_desc_accattr = {
87 87 DDI_DEVICE_ATTR_V0,
88 88 DDI_STRUCTURE_LE_ACC,
89 89 DDI_STRICTORDER_ACC,
90 90 DDI_DEFAULT_ACC
91 91 };
92 92
93 93 /*
94 94 * Describes the chip's DMA engine
95 95 */
96 96 static ddi_dma_attr_t arn_dma_attr = {
97 97 DMA_ATTR_V0, /* version number */
98 98 0, /* low address */
99 99 0xffffffffU, /* high address */
100 100 0x3ffffU, /* counter register max */
101 101 1, /* alignment */
102 102 0xFFF, /* burst sizes */
103 103 1, /* minimum transfer size */
104 104 0x3ffffU, /* max transfer size */
105 105 0xffffffffU, /* address register max */
106 106 1, /* no scatter-gather */
107 107 1, /* granularity of device */
108 108 0, /* DMA flags */
109 109 };
110 110
111 111 static ddi_dma_attr_t arn_desc_dma_attr = {
112 112 DMA_ATTR_V0, /* version number */
113 113 0, /* low address */
114 114 0xffffffffU, /* high address */
115 115 0xffffffffU, /* counter register max */
116 116 0x1000, /* alignment */
117 117 0xFFF, /* burst sizes */
118 118 1, /* minimum transfer size */
119 119 0xffffffffU, /* max transfer size */
120 120 0xffffffffU, /* address register max */
121 121 1, /* no scatter-gather */
122 122 1, /* granularity of device */
123 123 0, /* DMA flags */
124 124 };
125 125
126 126 #define ATH_DEF_CACHE_BYTES 32 /* default cache line size */
127 127
128 128 static kmutex_t arn_loglock;
129 129 static void *arn_soft_state_p = NULL;
130 130 static int arn_dwelltime = 200; /* scan interval */
131 131
132 132 static int arn_m_stat(void *, uint_t, uint64_t *);
133 133 static int arn_m_start(void *);
134 134 static void arn_m_stop(void *);
135 135 static int arn_m_promisc(void *, boolean_t);
136 136 static int arn_m_multicst(void *, boolean_t, const uint8_t *);
137 137 static int arn_m_unicst(void *, const uint8_t *);
138 138 static mblk_t *arn_m_tx(void *, mblk_t *);
139 139 static void arn_m_ioctl(void *, queue_t *, mblk_t *);
140 140 static int arn_m_setprop(void *, const char *, mac_prop_id_t,
141 141 uint_t, const void *);
142 142 static int arn_m_getprop(void *, const char *, mac_prop_id_t,
143 143 uint_t, void *);
144 144 static void arn_m_propinfo(void *, const char *, mac_prop_id_t,
145 145 mac_prop_info_handle_t);
146 146
147 147 /* MAC Callcack Functions */
148 148 static mac_callbacks_t arn_m_callbacks = {
149 149 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
150 150 arn_m_stat,
151 151 arn_m_start,
152 152 arn_m_stop,
153 153 arn_m_promisc,
154 154 arn_m_multicst,
155 155 arn_m_unicst,
156 156 arn_m_tx,
157 157 NULL,
158 158 arn_m_ioctl,
159 159 NULL,
160 160 NULL,
161 161 NULL,
162 162 arn_m_setprop,
163 163 arn_m_getprop,
164 164 arn_m_propinfo
165 165 };
166 166
167 167 /*
168 168 * ARN_DBG_HW
169 169 * ARN_DBG_REG_IO
170 170 * ARN_DBG_QUEUE
171 171 * ARN_DBG_EEPROM
172 172 * ARN_DBG_XMIT
173 173 * ARN_DBG_RECV
174 174 * ARN_DBG_CALIBRATE
175 175 * ARN_DBG_CHANNEL
176 176 * ARN_DBG_INTERRUPT
177 177 * ARN_DBG_REGULATORY
178 178 * ARN_DBG_ANI
179 179 * ARN_DBG_POWER_MGMT
180 180 * ARN_DBG_KEYCACHE
181 181 * ARN_DBG_BEACON
182 182 * ARN_DBG_RATE
183 183 * ARN_DBG_INIT
184 184 * ARN_DBG_ATTACH
185 185 * ARN_DBG_DEATCH
186 186 * ARN_DBG_AGGR
187 187 * ARN_DBG_RESET
188 188 * ARN_DBG_FATAL
189 189 * ARN_DBG_ANY
190 190 * ARN_DBG_ALL
191 191 */
192 192 uint32_t arn_dbg_mask = 0;
193 193
194 194 /*
195 195 * Exception/warning cases not leading to panic.
196 196 */
197 197 void
198 198 arn_problem(const int8_t *fmt, ...)
199 199 {
200 200 va_list args;
201 201
202 202 mutex_enter(&arn_loglock);
203 203
204 204 va_start(args, fmt);
205 205 vcmn_err(CE_WARN, fmt, args);
206 206 va_end(args);
207 207
208 208 mutex_exit(&arn_loglock);
209 209 }
210 210
211 211 /*
212 212 * Normal log information independent of debug.
213 213 */
214 214 void
215 215 arn_log(const int8_t *fmt, ...)
216 216 {
217 217 va_list args;
218 218
219 219 mutex_enter(&arn_loglock);
220 220
221 221 va_start(args, fmt);
222 222 vcmn_err(CE_CONT, fmt, args);
223 223 va_end(args);
224 224
225 225 mutex_exit(&arn_loglock);
226 226 }
227 227
228 228 void
229 229 arn_dbg(uint32_t dbg_flags, const int8_t *fmt, ...)
230 230 {
231 231 va_list args;
232 232
233 233 if (dbg_flags & arn_dbg_mask) {
234 234 mutex_enter(&arn_loglock);
235 235 va_start(args, fmt);
236 236 vcmn_err(CE_CONT, fmt, args);
237 237 va_end(args);
238 238 mutex_exit(&arn_loglock);
239 239 }
240 240 }
241 241
242 242 /*
243 243 * Read and write, they both share the same lock. We do this to serialize
244 244 * reads and writes on Atheros 802.11n PCI devices only. This is required
245 245 * as the FIFO on these devices can only accept sanely 2 requests. After
246 246 * that the device goes bananas. Serializing the reads/writes prevents this
247 247 * from happening.
248 248 */
249 249 void
250 250 arn_iowrite32(struct ath_hal *ah, uint32_t reg_offset, uint32_t val)
251 251 {
252 252 struct arn_softc *sc = ah->ah_sc;
253 253 if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
254 254 mutex_enter(&sc->sc_serial_rw);
255 255 ddi_put32(sc->sc_io_handle,
256 256 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
257 257 mutex_exit(&sc->sc_serial_rw);
258 258 } else {
259 259 ddi_put32(sc->sc_io_handle,
260 260 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)), val);
261 261 }
262 262 }
263 263
264 264 unsigned int
265 265 arn_ioread32(struct ath_hal *ah, uint32_t reg_offset)
266 266 {
267 267 uint32_t val;
268 268 struct arn_softc *sc = ah->ah_sc;
269 269 if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
270 270 mutex_enter(&sc->sc_serial_rw);
271 271 val = ddi_get32(sc->sc_io_handle,
272 272 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
273 273 mutex_exit(&sc->sc_serial_rw);
274 274 } else {
275 275 val = ddi_get32(sc->sc_io_handle,
276 276 (uint32_t *)((uintptr_t)(sc->mem) + (reg_offset)));
277 277 }
278 278
279 279 return (val);
280 280 }
281 281
282 282 /*
283 283 * Allocate an area of memory and a DMA handle for accessing it
284 284 */
285 285 static int
286 286 arn_alloc_dma_mem(dev_info_t *devinfo, ddi_dma_attr_t *dma_attr, size_t memsize,
287 287 ddi_device_acc_attr_t *attr_p, uint_t alloc_flags,
288 288 uint_t bind_flags, dma_area_t *dma_p)
289 289 {
290 290 int err;
291 291
292 292 /*
293 293 * Allocate handle
294 294 */
295 295 err = ddi_dma_alloc_handle(devinfo, dma_attr,
296 296 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
297 297 if (err != DDI_SUCCESS)
298 298 return (DDI_FAILURE);
299 299
300 300 /*
301 301 * Allocate memory
302 302 */
303 303 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p,
304 304 alloc_flags, DDI_DMA_SLEEP, NULL, &dma_p->mem_va,
305 305 &dma_p->alength, &dma_p->acc_hdl);
306 306 if (err != DDI_SUCCESS)
307 307 return (DDI_FAILURE);
308 308
309 309 /*
310 310 * Bind the two together
311 311 */
312 312 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
313 313 dma_p->mem_va, dma_p->alength, bind_flags,
314 314 DDI_DMA_SLEEP, NULL, &dma_p->cookie, &dma_p->ncookies);
315 315 if (err != DDI_DMA_MAPPED)
316 316 return (DDI_FAILURE);
317 317
318 318 dma_p->nslots = ~0U;
319 319 dma_p->size = ~0U;
320 320 dma_p->token = ~0U;
321 321 dma_p->offset = 0;
322 322 return (DDI_SUCCESS);
323 323 }
324 324
325 325 /*
326 326 * Free one allocated area of DMAable memory
327 327 */
328 328 static void
329 329 arn_free_dma_mem(dma_area_t *dma_p)
330 330 {
331 331 if (dma_p->dma_hdl != NULL) {
332 332 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
333 333 if (dma_p->acc_hdl != NULL) {
334 334 ddi_dma_mem_free(&dma_p->acc_hdl);
335 335 dma_p->acc_hdl = NULL;
336 336 }
337 337 ddi_dma_free_handle(&dma_p->dma_hdl);
338 338 dma_p->ncookies = 0;
339 339 dma_p->dma_hdl = NULL;
340 340 }
341 341 }
342 342
343 343 /*
344 344 * Initialize tx, rx. or beacon buffer list. Allocate DMA memory for
345 345 * each buffer.
346 346 */
347 347 static int
348 348 arn_buflist_setup(dev_info_t *devinfo,
349 349 struct arn_softc *sc,
350 350 list_t *bflist,
351 351 struct ath_buf **pbf,
352 352 struct ath_desc **pds,
353 353 int nbuf,
354 354 uint_t dmabflags,
355 355 uint32_t buflen)
356 356 {
357 357 int i, err;
358 358 struct ath_buf *bf = *pbf;
359 359 struct ath_desc *ds = *pds;
360 360
361 361 list_create(bflist, sizeof (struct ath_buf),
362 362 offsetof(struct ath_buf, bf_node));
363 363 for (i = 0; i < nbuf; i++, bf++, ds++) {
364 364 bf->bf_desc = ds;
365 365 bf->bf_daddr = sc->sc_desc_dma.cookie.dmac_address +
366 366 ((uintptr_t)ds - (uintptr_t)sc->sc_desc);
367 367 list_insert_tail(bflist, bf);
368 368
369 369 /* alloc DMA memory */
370 370 err = arn_alloc_dma_mem(devinfo, &arn_dma_attr,
371 371 buflen, &arn_desc_accattr, DDI_DMA_STREAMING,
372 372 dmabflags, &bf->bf_dma);
373 373 if (err != DDI_SUCCESS)
374 374 return (err);
375 375 }
376 376 *pbf = bf;
377 377 *pds = ds;
378 378
379 379 return (DDI_SUCCESS);
380 380 }
381 381
382 382 /*
383 383 * Destroy tx, rx or beacon buffer list. Free DMA memory.
384 384 */
385 385 static void
386 386 arn_buflist_cleanup(list_t *buflist)
387 387 {
388 388 struct ath_buf *bf;
389 389
390 390 if (!buflist)
391 391 return;
392 392
393 393 bf = list_head(buflist);
394 394 while (bf != NULL) {
395 395 if (bf->bf_m != NULL) {
396 396 freemsg(bf->bf_m);
397 397 bf->bf_m = NULL;
398 398 }
399 399 /* Free DMA buffer */
400 400 arn_free_dma_mem(&bf->bf_dma);
401 401 if (bf->bf_in != NULL) {
402 402 ieee80211_free_node(bf->bf_in);
403 403 bf->bf_in = NULL;
404 404 }
405 405 list_remove(buflist, bf);
406 406 bf = list_head(buflist);
407 407 }
408 408 list_destroy(buflist);
409 409 }
410 410
411 411 static void
412 412 arn_desc_free(struct arn_softc *sc)
413 413 {
414 414 arn_buflist_cleanup(&sc->sc_txbuf_list);
415 415 arn_buflist_cleanup(&sc->sc_rxbuf_list);
416 416 #ifdef ARN_IBSS
417 417 arn_buflist_cleanup(&sc->sc_bcbuf_list);
418 418 #endif
419 419
420 420 /* Free descriptor DMA buffer */
421 421 arn_free_dma_mem(&sc->sc_desc_dma);
422 422
423 423 kmem_free((void *)sc->sc_vbufptr, sc->sc_vbuflen);
424 424 sc->sc_vbufptr = NULL;
425 425 }
426 426
427 427 static int
428 428 arn_desc_alloc(dev_info_t *devinfo, struct arn_softc *sc)
429 429 {
430 430 int err;
431 431 size_t size;
432 432 struct ath_desc *ds;
433 433 struct ath_buf *bf;
434 434
435 435 #ifdef ARN_IBSS
436 436 size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF);
437 437 #else
438 438 size = sizeof (struct ath_desc) * (ATH_TXBUF + ATH_RXBUF);
439 439 #endif
440 440
441 441 err = arn_alloc_dma_mem(devinfo, &arn_desc_dma_attr, size,
442 442 &arn_desc_accattr, DDI_DMA_CONSISTENT,
443 443 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &sc->sc_desc_dma);
444 444
445 445 /* virtual address of the first descriptor */
446 446 sc->sc_desc = (struct ath_desc *)sc->sc_desc_dma.mem_va;
447 447
448 448 ds = sc->sc_desc;
449 449 ARN_DBG((ARN_DBG_INIT, "arn: arn_desc_alloc(): DMA map: "
450 450 "%p (%d) -> %p\n",
451 451 sc->sc_desc, sc->sc_desc_dma.alength,
452 452 sc->sc_desc_dma.cookie.dmac_address));
453 453
454 454 /* allocate data structures to describe TX/RX DMA buffers */
455 455 #ifdef ARN_IBSS
456 456 sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF +
457 457 ATH_BCBUF);
458 458 #else
459 459 sc->sc_vbuflen = sizeof (struct ath_buf) * (ATH_TXBUF + ATH_RXBUF);
460 460 #endif
461 461 bf = (struct ath_buf *)kmem_zalloc(sc->sc_vbuflen, KM_SLEEP);
462 462 sc->sc_vbufptr = bf;
463 463
464 464 /* DMA buffer size for each TX/RX packet */
465 465 #ifdef ARN_TX_AGGREGRATION
466 466 sc->tx_dmabuf_size =
467 467 roundup((IEEE80211_MAX_MPDU_LEN + 3840 * 2),
468 468 min(sc->sc_cachelsz, (uint16_t)64));
469 469 #else
470 470 sc->tx_dmabuf_size =
471 471 roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
472 472 #endif
473 473 sc->rx_dmabuf_size =
474 474 roundup(IEEE80211_MAX_MPDU_LEN, min(sc->sc_cachelsz, (uint16_t)64));
475 475
476 476 /* create RX buffer list */
477 477 err = arn_buflist_setup(devinfo, sc, &sc->sc_rxbuf_list, &bf, &ds,
478 478 ATH_RXBUF, DDI_DMA_READ | DDI_DMA_STREAMING, sc->rx_dmabuf_size);
479 479 if (err != DDI_SUCCESS) {
480 480 arn_desc_free(sc);
481 481 return (err);
482 482 }
483 483
484 484 /* create TX buffer list */
485 485 err = arn_buflist_setup(devinfo, sc, &sc->sc_txbuf_list, &bf, &ds,
486 486 ATH_TXBUF, DDI_DMA_STREAMING, sc->tx_dmabuf_size);
487 487 if (err != DDI_SUCCESS) {
488 488 arn_desc_free(sc);
489 489 return (err);
490 490 }
491 491
492 492 /* create beacon buffer list */
493 493 #ifdef ARN_IBSS
494 494 err = arn_buflist_setup(devinfo, sc, &sc->sc_bcbuf_list, &bf, &ds,
↓ open down ↓ |
494 lines elided |
↑ open up ↑ |
495 495 ATH_BCBUF, DDI_DMA_STREAMING);
496 496 if (err != DDI_SUCCESS) {
497 497 arn_desc_free(sc);
498 498 return (err);
499 499 }
500 500 #endif
501 501
502 502 return (DDI_SUCCESS);
503 503 }
504 504
505 -static struct ath_rate_table *
506 -/* LINTED E_STATIC_UNUSED */
507 -arn_get_ratetable(struct arn_softc *sc, uint32_t mode)
508 -{
509 - struct ath_rate_table *rate_table = NULL;
510 -
511 - switch (mode) {
512 - case IEEE80211_MODE_11A:
513 - rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
514 - break;
515 - case IEEE80211_MODE_11B:
516 - rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
517 - break;
518 - case IEEE80211_MODE_11G:
519 - rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
520 - break;
521 -#ifdef ARB_11N
522 - case IEEE80211_MODE_11NA_HT20:
523 - rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
524 - break;
525 - case IEEE80211_MODE_11NG_HT20:
526 - rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
527 - break;
528 - case IEEE80211_MODE_11NA_HT40PLUS:
529 - rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
530 - break;
531 - case IEEE80211_MODE_11NA_HT40MINUS:
532 - rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
533 - break;
534 - case IEEE80211_MODE_11NG_HT40PLUS:
535 - rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
536 - break;
537 - case IEEE80211_MODE_11NG_HT40MINUS:
538 - rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
539 - break;
540 -#endif
541 - default:
542 - ARN_DBG((ARN_DBG_FATAL, "arn: arn_get_ratetable(): "
543 - "invalid mode %u\n", mode));
544 - return (NULL);
545 - }
546 -
547 - return (rate_table);
548 -
549 -}
550 -
551 505 static void
552 506 arn_setcurmode(struct arn_softc *sc, enum wireless_mode mode)
553 507 {
554 508 struct ath_rate_table *rt;
555 509 int i;
556 510
557 511 for (i = 0; i < sizeof (sc->asc_rixmap); i++)
558 512 sc->asc_rixmap[i] = 0xff;
559 513
560 514 rt = sc->hw_rate_table[mode];
561 515 ASSERT(rt != NULL);
562 516
563 517 for (i = 0; i < rt->rate_cnt; i++)
564 518 sc->asc_rixmap[rt->info[i].dot11rate &
565 519 IEEE80211_RATE_VAL] = (uint8_t)i; /* LINT */
566 520
567 521 sc->sc_currates = rt;
568 522 sc->sc_curmode = mode;
569 523
570 524 /*
571 525 * All protection frames are transmited at 2Mb/s for
572 526 * 11g, otherwise at 1Mb/s.
573 527 * XXX select protection rate index from rate table.
574 528 */
575 529 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
576 530 }
577 531
578 532 static enum wireless_mode
579 533 arn_chan2mode(struct ath9k_channel *chan)
580 534 {
581 535 if (chan->chanmode == CHANNEL_A)
582 536 return (ATH9K_MODE_11A);
583 537 else if (chan->chanmode == CHANNEL_G)
584 538 return (ATH9K_MODE_11G);
585 539 else if (chan->chanmode == CHANNEL_B)
586 540 return (ATH9K_MODE_11B);
587 541 else if (chan->chanmode == CHANNEL_A_HT20)
588 542 return (ATH9K_MODE_11NA_HT20);
589 543 else if (chan->chanmode == CHANNEL_G_HT20)
590 544 return (ATH9K_MODE_11NG_HT20);
591 545 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
592 546 return (ATH9K_MODE_11NA_HT40PLUS);
593 547 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
594 548 return (ATH9K_MODE_11NA_HT40MINUS);
595 549 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
596 550 return (ATH9K_MODE_11NG_HT40PLUS);
597 551 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
598 552 return (ATH9K_MODE_11NG_HT40MINUS);
599 553
600 554 return (ATH9K_MODE_11B);
601 555 }
602 556
603 557 static void
604 558 arn_update_txpow(struct arn_softc *sc)
605 559 {
606 560 struct ath_hal *ah = sc->sc_ah;
607 561 uint32_t txpow;
608 562
609 563 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
610 564 (void) ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
611 565 /* read back in case value is clamped */
612 566 (void) ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
613 567 sc->sc_curtxpow = (uint32_t)txpow;
614 568 }
615 569 }
616 570
617 571 uint8_t
618 572 parse_mpdudensity(uint8_t mpdudensity)
619 573 {
620 574 /*
621 575 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
622 576 * 0 for no restriction
623 577 * 1 for 1/4 us
624 578 * 2 for 1/2 us
625 579 * 3 for 1 us
626 580 * 4 for 2 us
627 581 * 5 for 4 us
628 582 * 6 for 8 us
629 583 * 7 for 16 us
630 584 */
631 585 switch (mpdudensity) {
632 586 case 0:
633 587 return (0);
634 588 case 1:
635 589 case 2:
636 590 case 3:
637 591 /*
638 592 * Our lower layer calculations limit our
639 593 * precision to 1 microsecond
640 594 */
641 595 return (1);
642 596 case 4:
643 597 return (2);
644 598 case 5:
645 599 return (4);
646 600 case 6:
647 601 return (8);
648 602 case 7:
649 603 return (16);
650 604 default:
651 605 return (0);
652 606 }
653 607 }
654 608
655 609 static void
656 610 arn_setup_rates(struct arn_softc *sc, uint32_t mode)
657 611 {
658 612 int i, maxrates;
659 613 struct ath_rate_table *rate_table = NULL;
660 614 struct ieee80211_rateset *rateset;
661 615 ieee80211com_t *ic = (ieee80211com_t *)sc;
662 616
663 617 /* rate_table = arn_get_ratetable(sc, mode); */
664 618 switch (mode) {
665 619 case IEEE80211_MODE_11A:
666 620 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
667 621 break;
668 622 case IEEE80211_MODE_11B:
669 623 rate_table = sc->hw_rate_table[ATH9K_MODE_11B];
670 624 break;
671 625 case IEEE80211_MODE_11G:
672 626 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
673 627 break;
674 628 #ifdef ARN_11N
675 629 case IEEE80211_MODE_11NA_HT20:
676 630 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
677 631 break;
678 632 case IEEE80211_MODE_11NG_HT20:
679 633 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
680 634 break;
681 635 case IEEE80211_MODE_11NA_HT40PLUS:
682 636 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
683 637 break;
684 638 case IEEE80211_MODE_11NA_HT40MINUS:
685 639 rate_table = sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
686 640 break;
687 641 case IEEE80211_MODE_11NG_HT40PLUS:
688 642 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
689 643 break;
690 644 case IEEE80211_MODE_11NG_HT40MINUS:
691 645 rate_table = sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
692 646 break;
693 647 #endif
694 648 default:
695 649 ARN_DBG((ARN_DBG_RATE, "arn: arn_get_ratetable(): "
696 650 "invalid mode %u\n", mode));
697 651 break;
698 652 }
699 653 if (rate_table == NULL)
700 654 return;
701 655 if (rate_table->rate_cnt > ATH_RATE_MAX) {
702 656 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
703 657 "rate table too small (%u > %u)\n",
704 658 rate_table->rate_cnt, IEEE80211_RATE_MAXSIZE));
705 659 maxrates = ATH_RATE_MAX;
706 660 } else
707 661 maxrates = rate_table->rate_cnt;
708 662
709 663 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
710 664 "maxrates is %d\n", maxrates));
711 665
712 666 rateset = &ic->ic_sup_rates[mode];
713 667 for (i = 0; i < maxrates; i++) {
714 668 rateset->ir_rates[i] = rate_table->info[i].dot11rate;
715 669 ARN_DBG((ARN_DBG_RATE, "arn: arn_rate_setup(): "
716 670 "%d\n", rate_table->info[i].dot11rate));
717 671 }
718 672 rateset->ir_nrates = (uint8_t)maxrates; /* ??? */
719 673 }
720 674
721 675 static int
722 676 arn_setup_channels(struct arn_softc *sc)
723 677 {
724 678 struct ath_hal *ah = sc->sc_ah;
725 679 ieee80211com_t *ic = (ieee80211com_t *)sc;
726 680 int nchan, i, index;
727 681 uint8_t regclassids[ATH_REGCLASSIDS_MAX];
728 682 uint32_t nregclass = 0;
729 683 struct ath9k_channel *c;
730 684
731 685 /* Fill in ah->ah_channels */
732 686 if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (uint32_t *)&nchan,
733 687 regclassids, ATH_REGCLASSIDS_MAX, &nregclass, CTRY_DEFAULT,
734 688 B_FALSE, 1)) {
735 689 uint32_t rd = ah->ah_currentRD;
736 690 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
737 691 "unable to collect channel list; "
738 692 "regdomain likely %u country code %u\n",
739 693 rd, CTRY_DEFAULT));
740 694 return (EINVAL);
741 695 }
742 696
743 697 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_setup_channels(): "
744 698 "number of channel is %d\n", nchan));
745 699
746 700 for (i = 0; i < nchan; i++) {
747 701 c = &ah->ah_channels[i];
748 702 uint32_t flags;
749 703 index = ath9k_hw_mhz2ieee(ah, c->channel, c->channelFlags);
750 704
751 705 if (index > IEEE80211_CHAN_MAX) {
752 706 ARN_DBG((ARN_DBG_CHANNEL,
753 707 "arn: arn_setup_channels(): "
754 708 "bad hal channel %d (%u/%x) ignored\n",
755 709 index, c->channel, c->channelFlags));
756 710 continue;
757 711 }
758 712 /* NB: flags are known to be compatible */
759 713 if (index < 0) {
760 714 /*
761 715 * can't handle frequency <2400MHz (negative
762 716 * channels) right now
763 717 */
764 718 ARN_DBG((ARN_DBG_CHANNEL,
765 719 "arn: arn_setup_channels(): "
766 720 "hal channel %d (%u/%x) "
767 721 "cannot be handled, ignored\n",
768 722 index, c->channel, c->channelFlags));
769 723 continue;
770 724 }
771 725
772 726 /*
773 727 * Calculate net80211 flags; most are compatible
774 728 * but some need massaging. Note the static turbo
775 729 * conversion can be removed once net80211 is updated
776 730 * to understand static vs. dynamic turbo.
777 731 */
778 732
779 733 flags = c->channelFlags & (CHANNEL_ALL | CHANNEL_PASSIVE);
780 734
781 735 if (ic->ic_sup_channels[index].ich_freq == 0) {
782 736 ic->ic_sup_channels[index].ich_freq = c->channel;
783 737 ic->ic_sup_channels[index].ich_flags = flags;
784 738 } else {
785 739 /* channels overlap; e.g. 11g and 11b */
786 740 ic->ic_sup_channels[index].ich_flags |= flags;
787 741 }
788 742 if ((c->channelFlags & CHANNEL_G) == CHANNEL_G) {
789 743 sc->sc_have11g = 1;
790 744 ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
791 745 IEEE80211_C_SHSLOT; /* short slot time */
792 746 }
793 747 }
794 748
795 749 return (0);
796 750 }
797 751
798 752 uint32_t
799 753 arn_chan2flags(ieee80211com_t *isc, struct ieee80211_channel *chan)
800 754 {
801 755 uint32_t channel_mode;
802 756 switch (ieee80211_chan2mode(isc, chan)) {
803 757 case IEEE80211_MODE_11NA:
804 758 if (chan->ich_flags & IEEE80211_CHAN_HT40U)
805 759 channel_mode = CHANNEL_A_HT40PLUS;
806 760 else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
807 761 channel_mode = CHANNEL_A_HT40MINUS;
808 762 else
809 763 channel_mode = CHANNEL_A_HT20;
810 764 break;
811 765 case IEEE80211_MODE_11NG:
812 766 if (chan->ich_flags & IEEE80211_CHAN_HT40U)
813 767 channel_mode = CHANNEL_G_HT40PLUS;
814 768 else if (chan->ich_flags & IEEE80211_CHAN_HT40D)
815 769 channel_mode = CHANNEL_G_HT40MINUS;
816 770 else
817 771 channel_mode = CHANNEL_G_HT20;
818 772 break;
819 773 case IEEE80211_MODE_TURBO_G:
820 774 case IEEE80211_MODE_STURBO_A:
821 775 case IEEE80211_MODE_TURBO_A:
822 776 channel_mode = 0;
823 777 break;
824 778 case IEEE80211_MODE_11A:
825 779 channel_mode = CHANNEL_A;
826 780 break;
827 781 case IEEE80211_MODE_11G:
828 782 channel_mode = CHANNEL_B;
829 783 break;
830 784 case IEEE80211_MODE_11B:
831 785 channel_mode = CHANNEL_G;
832 786 break;
833 787 case IEEE80211_MODE_FH:
834 788 channel_mode = 0;
835 789 break;
836 790 default:
837 791 break;
838 792 }
839 793
840 794 return (channel_mode);
841 795 }
842 796
843 797 /*
844 798 * Update internal state after a channel change.
845 799 */
846 800 void
847 801 arn_chan_change(struct arn_softc *sc, struct ieee80211_channel *chan)
848 802 {
849 803 struct ieee80211com *ic = &sc->sc_isc;
850 804 enum ieee80211_phymode mode;
851 805 enum wireless_mode wlmode;
852 806
853 807 /*
854 808 * Change channels and update the h/w rate map
855 809 * if we're switching; e.g. 11a to 11b/g.
856 810 */
857 811 mode = ieee80211_chan2mode(ic, chan);
858 812 switch (mode) {
859 813 case IEEE80211_MODE_11A:
860 814 wlmode = ATH9K_MODE_11A;
861 815 break;
862 816 case IEEE80211_MODE_11B:
863 817 wlmode = ATH9K_MODE_11B;
864 818 break;
865 819 case IEEE80211_MODE_11G:
866 820 wlmode = ATH9K_MODE_11B;
867 821 break;
868 822 default:
869 823 break;
870 824 }
871 825 if (wlmode != sc->sc_curmode)
872 826 arn_setcurmode(sc, wlmode);
873 827
874 828 }
875 829
876 830 /*
877 831 * Set/change channels. If the channel is really being changed, it's done
878 832 * by reseting the chip. To accomplish this we must first cleanup any pending
879 833 * DMA, then restart stuff.
880 834 */
881 835 static int
882 836 arn_set_channel(struct arn_softc *sc, struct ath9k_channel *hchan)
883 837 {
884 838 struct ath_hal *ah = sc->sc_ah;
885 839 ieee80211com_t *ic = &sc->sc_isc;
886 840 boolean_t fastcc = B_TRUE;
887 841 boolean_t stopped;
888 842 struct ieee80211_channel chan;
889 843 enum wireless_mode curmode;
890 844
891 845 if (sc->sc_flags & SC_OP_INVALID)
892 846 return (EIO);
893 847
894 848 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
895 849 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
896 850 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
897 851 (sc->sc_flags & SC_OP_FULL_RESET)) {
898 852 int status;
899 853
900 854 /*
901 855 * This is only performed if the channel settings have
902 856 * actually changed.
903 857 *
904 858 * To switch channels clear any pending DMA operations;
905 859 * wait long enough for the RX fifo to drain, reset the
906 860 * hardware at the new frequency, and then re-enable
907 861 * the relevant bits of the h/w.
908 862 */
909 863 (void) ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
910 864 arn_draintxq(sc, B_FALSE); /* clear pending tx frames */
911 865 stopped = arn_stoprecv(sc); /* turn off frame recv */
912 866
913 867 /*
914 868 * XXX: do not flush receive queue here. We don't want
915 869 * to flush data frames already in queue because of
916 870 * changing channel.
917 871 */
918 872
919 873 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
920 874 fastcc = B_FALSE;
921 875
922 876 ARN_DBG((ARN_DBG_CHANNEL, "arn: arn_set_channel(): "
923 877 "(%u MHz) -> (%u MHz), cflags:%x, chanwidth: %d\n",
924 878 sc->sc_ah->ah_curchan->channel,
925 879 hchan->channel, hchan->channelFlags, sc->tx_chan_width));
926 880
927 881 if (!ath9k_hw_reset(ah, hchan, sc->tx_chan_width,
928 882 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
929 883 sc->sc_ht_extprotspacing, fastcc, &status)) {
930 884 ARN_DBG((ARN_DBG_FATAL, "arn: arn_set_channel(): "
931 885 "unable to reset channel %u (%uMhz) "
932 886 "flags 0x%x hal status %u\n",
933 887 ath9k_hw_mhz2ieee(ah, hchan->channel,
934 888 hchan->channelFlags),
935 889 hchan->channel, hchan->channelFlags, status));
936 890 return (EIO);
937 891 }
938 892
939 893 sc->sc_curchan = *hchan;
940 894
941 895 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
942 896 sc->sc_flags &= ~SC_OP_FULL_RESET;
943 897
944 898 if (arn_startrecv(sc) != 0) {
945 899 arn_problem("arn: arn_set_channel(): "
946 900 "unable to restart recv logic\n");
947 901 return (EIO);
948 902 }
949 903
950 904 chan.ich_freq = hchan->channel;
951 905 chan.ich_flags = hchan->channelFlags;
952 906 ic->ic_ibss_chan = &chan;
953 907
954 908 /*
955 909 * Change channels and update the h/w rate map
956 910 * if we're switching; e.g. 11a to 11b/g.
957 911 */
958 912 curmode = arn_chan2mode(hchan);
959 913 if (curmode != sc->sc_curmode)
960 914 arn_setcurmode(sc, arn_chan2mode(hchan));
961 915
962 916 arn_update_txpow(sc);
963 917
964 918 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
965 919 }
966 920
967 921 return (0);
968 922 }
↓ open down ↓ |
408 lines elided |
↑ open up ↑ |
969 923
970 924 /*
971 925 * This routine performs the periodic noise floor calibration function
972 926 * that is used to adjust and optimize the chip performance. This
973 927 * takes environmental changes (location, temperature) into account.
974 928 * When the task is complete, it reschedules itself depending on the
975 929 * appropriate interval that was calculated.
976 930 */
977 931 static void
978 932 arn_ani_calibrate(void *arg)
979 -
980 933 {
981 934 ieee80211com_t *ic = (ieee80211com_t *)arg;
982 935 struct arn_softc *sc = (struct arn_softc *)ic;
983 936 struct ath_hal *ah = sc->sc_ah;
984 937 boolean_t longcal = B_FALSE;
985 938 boolean_t shortcal = B_FALSE;
986 939 boolean_t aniflag = B_FALSE;
987 940 unsigned int timestamp = drv_hztousec(ddi_get_lbolt())/1000;
988 941 uint32_t cal_interval;
989 942
990 943 /*
991 944 * don't calibrate when we're scanning.
992 945 * we are most likely not on our home channel.
993 946 */
994 947 if (ic->ic_state != IEEE80211_S_RUN)
995 948 goto settimer;
996 949
997 950 /* Long calibration runs independently of short calibration. */
998 951 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
999 952 longcal = B_TRUE;
1000 953 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1001 954 "%s: longcal @%lu\n", __func__, drv_hztousec));
1002 955 sc->sc_ani.sc_longcal_timer = timestamp;
1003 956 }
1004 957
1005 958 /* Short calibration applies only while sc_caldone is FALSE */
1006 959 if (!sc->sc_ani.sc_caldone) {
1007 960 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
1008 961 ATH_SHORT_CALINTERVAL) {
1009 962 shortcal = B_TRUE;
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
1010 963 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1011 964 "%s: shortcal @%lu\n",
1012 965 __func__, drv_hztousec));
1013 966 sc->sc_ani.sc_shortcal_timer = timestamp;
1014 967 sc->sc_ani.sc_resetcal_timer = timestamp;
1015 968 }
1016 969 } else {
1017 970 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
1018 971 ATH_RESTART_CALINTERVAL) {
1019 972 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
1020 - &sc->sc_ani.sc_caldone);
973 + &sc->sc_ani.sc_caldone);
1021 974 if (sc->sc_ani.sc_caldone)
1022 975 sc->sc_ani.sc_resetcal_timer = timestamp;
1023 976 }
1024 977 }
1025 978
1026 979 /* Verify whether we must check ANI */
1027 980 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
1028 981 ATH_ANI_POLLINTERVAL) {
1029 982 aniflag = B_TRUE;
1030 983 sc->sc_ani.sc_checkani_timer = timestamp;
1031 984 }
1032 985
1033 986 /* Skip all processing if there's nothing to do. */
1034 987 if (longcal || shortcal || aniflag) {
1035 988 /* Call ANI routine if necessary */
1036 989 if (aniflag)
1037 990 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
1038 991 ah->ah_curchan);
1039 992
1040 993 /* Perform calibration if necessary */
1041 994 if (longcal || shortcal) {
1042 995 boolean_t iscaldone = B_FALSE;
1043 996
1044 997 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
1045 998 sc->sc_rx_chainmask, longcal, &iscaldone)) {
1046 999 if (longcal)
1047 1000 sc->sc_ani.sc_noise_floor =
1048 1001 ath9k_hw_getchan_noise(ah,
1049 1002 ah->ah_curchan);
1050 1003
1051 1004 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1052 1005 "%s: calibrate chan %u/%x nf: %d\n",
1053 1006 __func__,
1054 1007 ah->ah_curchan->channel,
1055 1008 ah->ah_curchan->channelFlags,
1056 1009 sc->sc_ani.sc_noise_floor));
1057 1010 } else {
1058 1011 ARN_DBG((ARN_DBG_CALIBRATE, "arn: "
1059 1012 "%s: calibrate chan %u/%x failed\n",
1060 1013 __func__,
1061 1014 ah->ah_curchan->channel,
1062 1015 ah->ah_curchan->channelFlags));
1063 1016 }
1064 1017 sc->sc_ani.sc_caldone = iscaldone;
1065 1018 }
1066 1019 }
1067 1020
1068 1021 settimer:
1069 1022 /*
1070 1023 * Set timer interval based on previous results.
1071 1024 * The interval must be the shortest necessary to satisfy ANI,
1072 1025 * short calibration and long calibration.
1073 1026 */
1074 1027 cal_interval = ATH_LONG_CALINTERVAL;
1075 1028 if (sc->sc_ah->ah_config.enable_ani)
1076 1029 cal_interval =
1077 1030 min(cal_interval, (uint32_t)ATH_ANI_POLLINTERVAL);
1078 1031
1079 1032 if (!sc->sc_ani.sc_caldone)
1080 1033 cal_interval = min(cal_interval,
1081 1034 (uint32_t)ATH_SHORT_CALINTERVAL);
1082 1035
1083 1036 sc->sc_scan_timer = 0;
1084 1037 sc->sc_scan_timer = timeout(arn_ani_calibrate, (void *)sc,
1085 1038 drv_usectohz(cal_interval * 1000));
1086 1039 }
1087 1040
1088 1041 static void
1089 1042 arn_stop_caltimer(struct arn_softc *sc)
1090 1043 {
1091 1044 timeout_id_t tmp_id = 0;
1092 1045
1093 1046 while ((sc->sc_cal_timer != 0) && (tmp_id != sc->sc_cal_timer)) {
1094 1047 tmp_id = sc->sc_cal_timer;
1095 1048 (void) untimeout(tmp_id);
1096 1049 }
1097 1050 sc->sc_cal_timer = 0;
1098 1051 }
1099 1052
1100 1053 static uint_t
1101 1054 arn_isr(caddr_t arg)
1102 1055 {
1103 1056 /* LINTED E_BAD_PTR_CAST_ALIGN */
1104 1057 struct arn_softc *sc = (struct arn_softc *)arg;
1105 1058 struct ath_hal *ah = sc->sc_ah;
1106 1059 enum ath9k_int status;
1107 1060 ieee80211com_t *ic = (ieee80211com_t *)sc;
1108 1061
1109 1062 ARN_LOCK(sc);
1110 1063
1111 1064 if (sc->sc_flags & SC_OP_INVALID) {
1112 1065 /*
1113 1066 * The hardware is not ready/present, don't
1114 1067 * touch anything. Note this can happen early
1115 1068 * on if the IRQ is shared.
1116 1069 */
1117 1070 ARN_UNLOCK(sc);
1118 1071 return (DDI_INTR_UNCLAIMED);
1119 1072 }
1120 1073 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
1121 1074 ARN_UNLOCK(sc);
1122 1075 return (DDI_INTR_UNCLAIMED);
1123 1076 }
1124 1077
1125 1078 /*
1126 1079 * Figure out the reason(s) for the interrupt. Note
1127 1080 * that the hal returns a pseudo-ISR that may include
1128 1081 * bits we haven't explicitly enabled so we mask the
1129 1082 * value to insure we only process bits we requested.
1130 1083 */
1131 1084 (void) ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
1132 1085
1133 1086 status &= sc->sc_imask; /* discard unasked-for bits */
1134 1087
1135 1088 /*
1136 1089 * If there are no status bits set, then this interrupt was not
1137 1090 * for me (should have been caught above).
1138 1091 */
1139 1092 if (!status) {
1140 1093 ARN_UNLOCK(sc);
1141 1094 return (DDI_INTR_UNCLAIMED);
1142 1095 }
1143 1096
1144 1097 sc->sc_intrstatus = status;
1145 1098
1146 1099 if (status & ATH9K_INT_FATAL) {
1147 1100 /* need a chip reset */
1148 1101 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1149 1102 "ATH9K_INT_FATAL\n"));
1150 1103 goto reset;
1151 1104 } else if (status & ATH9K_INT_RXORN) {
1152 1105 /* need a chip reset */
1153 1106 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1154 1107 "ATH9K_INT_RXORN\n"));
1155 1108 goto reset;
1156 1109 } else {
1157 1110 if (status & ATH9K_INT_RXEOL) {
1158 1111 /*
1159 1112 * NB: the hardware should re-read the link when
1160 1113 * RXE bit is written, but it doesn't work
1161 1114 * at least on older hardware revs.
1162 1115 */
1163 1116 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1164 1117 "ATH9K_INT_RXEOL\n"));
1165 1118 sc->sc_rxlink = NULL;
1166 1119 }
1167 1120 if (status & ATH9K_INT_TXURN) {
1168 1121 /* bump tx trigger level */
1169 1122 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1170 1123 "ATH9K_INT_TXURN\n"));
1171 1124 (void) ath9k_hw_updatetxtriglevel(ah, B_TRUE);
1172 1125 }
1173 1126 /* XXX: optimize this */
1174 1127 if (status & ATH9K_INT_RX) {
1175 1128 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1176 1129 "ATH9K_INT_RX\n"));
1177 1130 sc->sc_rx_pend = 1;
1178 1131 ddi_trigger_softintr(sc->sc_softint_id);
1179 1132 }
1180 1133 if (status & ATH9K_INT_TX) {
1181 1134 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1182 1135 "ATH9K_INT_TX\n"));
1183 1136 if (ddi_taskq_dispatch(sc->sc_tq,
1184 1137 arn_tx_int_proc, sc, DDI_NOSLEEP) !=
1185 1138 DDI_SUCCESS) {
1186 1139 arn_problem("arn: arn_isr(): "
1187 1140 "No memory for tx taskq\n");
1188 1141 }
1189 1142 }
1190 1143 #ifdef ARN_ATH9K_INT_MIB
1191 1144 if (status & ATH9K_INT_MIB) {
1192 1145 /*
1193 1146 * Disable interrupts until we service the MIB
1194 1147 * interrupt; otherwise it will continue to
1195 1148 * fire.
1196 1149 */
1197 1150 (void) ath9k_hw_set_interrupts(ah, 0);
1198 1151 /*
1199 1152 * Let the hal handle the event. We assume
1200 1153 * it will clear whatever condition caused
1201 1154 * the interrupt.
1202 1155 */
1203 1156 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
1204 1157 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1205 1158 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1206 1159 "ATH9K_INT_MIB\n"));
1207 1160 }
1208 1161 #endif
1209 1162
1210 1163 #ifdef ARN_ATH9K_INT_TIM_TIMER
1211 1164 if (status & ATH9K_INT_TIM_TIMER) {
1212 1165 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1213 1166 "ATH9K_INT_TIM_TIMER\n"));
1214 1167 if (!(ah->ah_caps.hw_caps &
1215 1168 ATH9K_HW_CAP_AUTOSLEEP)) {
1216 1169 /*
1217 1170 * Clear RxAbort bit so that we can
1218 1171 * receive frames
1219 1172 */
1220 1173 ath9k_hw_setrxabort(ah, 0);
1221 1174 goto reset;
1222 1175 }
1223 1176 }
1224 1177 #endif
1225 1178
1226 1179 if (status & ATH9K_INT_BMISS) {
1227 1180 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1228 1181 "ATH9K_INT_BMISS\n"));
1229 1182 #ifdef ARN_HW_BEACON_MISS_HANDLE
1230 1183 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1231 1184 "handle beacon mmiss by H/W mechanism\n"));
1232 1185 if (ddi_taskq_dispatch(sc->sc_tq, arn_bmiss_proc,
1233 1186 sc, DDI_NOSLEEP) != DDI_SUCCESS) {
1234 1187 arn_problem("arn: arn_isr(): "
1235 1188 "No memory available for bmiss taskq\n");
1236 1189 }
1237 1190 #else
1238 1191 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1239 1192 "handle beacon mmiss by S/W mechanism\n"));
1240 1193 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1241 1194 }
1242 1195
1243 1196 ARN_UNLOCK(sc);
1244 1197
1245 1198 #ifdef ARN_ATH9K_INT_CST
1246 1199 /* carrier sense timeout */
1247 1200 if (status & ATH9K_INT_CST) {
1248 1201 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1249 1202 "ATH9K_INT_CST\n"));
1250 1203 return (DDI_INTR_CLAIMED);
1251 1204 }
1252 1205 #endif
1253 1206
1254 1207 if (status & ATH9K_INT_SWBA) {
1255 1208 ARN_DBG((ARN_DBG_INTERRUPT, "arn: arn_isr(): "
1256 1209 "ATH9K_INT_SWBA\n"));
1257 1210 /* This will occur only in Host-AP or Ad-Hoc mode */
1258 1211 return (DDI_INTR_CLAIMED);
1259 1212 }
1260 1213 }
1261 1214
1262 1215 return (DDI_INTR_CLAIMED);
1263 1216 reset:
1264 1217 ARN_DBG((ARN_DBG_INTERRUPT, "Rset for fatal err\n"));
1265 1218 (void) arn_reset(ic);
1266 1219 ARN_UNLOCK(sc);
1267 1220 return (DDI_INTR_CLAIMED);
1268 1221 }
1269 1222
1270 1223 static int
1271 1224 arn_get_channel(struct arn_softc *sc, struct ieee80211_channel *chan)
1272 1225 {
1273 1226 int i;
1274 1227
1275 1228 for (i = 0; i < sc->sc_ah->ah_nchan; i++) {
1276 1229 if (sc->sc_ah->ah_channels[i].channel == chan->ich_freq)
1277 1230 return (i);
1278 1231 }
1279 1232
1280 1233 return (-1);
1281 1234 }
1282 1235
1283 1236 int
1284 1237 arn_reset(ieee80211com_t *ic)
1285 1238 {
1286 1239 struct arn_softc *sc = (struct arn_softc *)ic;
1287 1240 struct ath_hal *ah = sc->sc_ah;
1288 1241 int status;
1289 1242 int error = 0;
1290 1243
1291 1244 (void) ath9k_hw_set_interrupts(ah, 0);
1292 1245 arn_draintxq(sc, 0);
1293 1246 (void) arn_stoprecv(sc);
1294 1247
1295 1248 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, sc->tx_chan_width,
1296 1249 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1297 1250 sc->sc_ht_extprotspacing, B_FALSE, &status)) {
1298 1251 ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1299 1252 "unable to reset hardware; hal status %u\n", status));
1300 1253 error = EIO;
1301 1254 }
1302 1255
1303 1256 if (arn_startrecv(sc) != 0)
1304 1257 ARN_DBG((ARN_DBG_RESET, "arn: arn_reset(): "
1305 1258 "unable to start recv logic\n"));
1306 1259
1307 1260 /*
1308 1261 * We may be doing a reset in response to a request
1309 1262 * that changes the channel so update any state that
1310 1263 * might change as a result.
1311 1264 */
1312 1265 arn_setcurmode(sc, arn_chan2mode(sc->sc_ah->ah_curchan));
1313 1266
1314 1267 arn_update_txpow(sc);
1315 1268
1316 1269 if (sc->sc_flags & SC_OP_BEACONS)
1317 1270 arn_beacon_config(sc); /* restart beacons */
1318 1271
1319 1272 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1320 1273
1321 1274 return (error);
1322 1275 }
1323 1276
1324 1277 int
1325 1278 arn_get_hal_qnum(uint16_t queue, struct arn_softc *sc)
1326 1279 {
1327 1280 int qnum;
1328 1281
1329 1282 switch (queue) {
1330 1283 case WME_AC_VO:
1331 1284 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1332 1285 break;
1333 1286 case WME_AC_VI:
1334 1287 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1335 1288 break;
1336 1289 case WME_AC_BE:
1337 1290 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1338 1291 break;
1339 1292 case WME_AC_BK:
1340 1293 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1341 1294 break;
1342 1295 default:
1343 1296 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1344 1297 break;
1345 1298 }
1346 1299
1347 1300 return (qnum);
1348 1301 }
1349 1302
1350 1303 static struct {
1351 1304 uint32_t version;
1352 1305 const char *name;
1353 1306 } ath_mac_bb_names[] = {
1354 1307 { AR_SREV_VERSION_5416_PCI, "5416" },
1355 1308 { AR_SREV_VERSION_5416_PCIE, "5418" },
1356 1309 { AR_SREV_VERSION_9100, "9100" },
1357 1310 { AR_SREV_VERSION_9160, "9160" },
1358 1311 { AR_SREV_VERSION_9280, "9280" },
1359 1312 { AR_SREV_VERSION_9285, "9285" }
1360 1313 };
1361 1314
1362 1315 static struct {
1363 1316 uint16_t version;
1364 1317 const char *name;
1365 1318 } ath_rf_names[] = {
1366 1319 { 0, "5133" },
1367 1320 { AR_RAD5133_SREV_MAJOR, "5133" },
1368 1321 { AR_RAD5122_SREV_MAJOR, "5122" },
1369 1322 { AR_RAD2133_SREV_MAJOR, "2133" },
1370 1323 { AR_RAD2122_SREV_MAJOR, "2122" }
1371 1324 };
1372 1325
1373 1326 /*
1374 1327 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
1375 1328 */
1376 1329
1377 1330 static const char *
1378 1331 arn_mac_bb_name(uint32_t mac_bb_version)
1379 1332 {
1380 1333 int i;
1381 1334
1382 1335 for (i = 0; i < ARRAY_SIZE(ath_mac_bb_names); i++) {
1383 1336 if (ath_mac_bb_names[i].version == mac_bb_version) {
1384 1337 return (ath_mac_bb_names[i].name);
1385 1338 }
1386 1339 }
1387 1340
1388 1341 return ("????");
1389 1342 }
1390 1343
1391 1344 /*
1392 1345 * Return the RF name. "????" is returned if the RF is unknown.
1393 1346 */
1394 1347
1395 1348 static const char *
1396 1349 arn_rf_name(uint16_t rf_version)
1397 1350 {
1398 1351 int i;
1399 1352
1400 1353 for (i = 0; i < ARRAY_SIZE(ath_rf_names); i++) {
1401 1354 if (ath_rf_names[i].version == rf_version) {
1402 1355 return (ath_rf_names[i].name);
1403 1356 }
1404 1357 }
1405 1358
1406 1359 return ("????");
1407 1360 }
1408 1361
1409 1362 static void
1410 1363 arn_next_scan(void *arg)
1411 1364 {
1412 1365 ieee80211com_t *ic = arg;
1413 1366 struct arn_softc *sc = (struct arn_softc *)ic;
1414 1367
1415 1368 sc->sc_scan_timer = 0;
1416 1369 if (ic->ic_state == IEEE80211_S_SCAN) {
1417 1370 sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1418 1371 drv_usectohz(arn_dwelltime * 1000));
1419 1372 ieee80211_next_scan(ic);
1420 1373 }
1421 1374 }
1422 1375
1423 1376 static void
1424 1377 arn_stop_scantimer(struct arn_softc *sc)
1425 1378 {
1426 1379 timeout_id_t tmp_id = 0;
1427 1380
1428 1381 while ((sc->sc_scan_timer != 0) && (tmp_id != sc->sc_scan_timer)) {
1429 1382 tmp_id = sc->sc_scan_timer;
1430 1383 (void) untimeout(tmp_id);
1431 1384 }
1432 1385 sc->sc_scan_timer = 0;
1433 1386 }
1434 1387
1435 1388 static int32_t
1436 1389 arn_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1437 1390 {
1438 1391 struct arn_softc *sc = (struct arn_softc *)ic;
1439 1392 struct ath_hal *ah = sc->sc_ah;
1440 1393 struct ieee80211_node *in;
1441 1394 int32_t i, error;
1442 1395 uint8_t *bssid;
1443 1396 uint32_t rfilt;
1444 1397 enum ieee80211_state ostate;
1445 1398 struct ath9k_channel *channel;
1446 1399 int pos;
1447 1400
1448 1401 /* Should set up & init LED here */
1449 1402
1450 1403 if (sc->sc_flags & SC_OP_INVALID)
1451 1404 return (0);
1452 1405
1453 1406 ostate = ic->ic_state;
1454 1407 ARN_DBG((ARN_DBG_INIT, "arn: arn_newstate(): "
1455 1408 "%x -> %x!\n", ostate, nstate));
1456 1409
1457 1410 ARN_LOCK(sc);
1458 1411
1459 1412 if (nstate != IEEE80211_S_SCAN)
1460 1413 arn_stop_scantimer(sc);
1461 1414 if (nstate != IEEE80211_S_RUN)
1462 1415 arn_stop_caltimer(sc);
1463 1416
1464 1417 /* Should set LED here */
1465 1418
1466 1419 if (nstate == IEEE80211_S_INIT) {
1467 1420 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1468 1421 /*
1469 1422 * Disable interrupts.
1470 1423 */
1471 1424 (void) ath9k_hw_set_interrupts
1472 1425 (ah, sc->sc_imask &~ ATH9K_INT_GLOBAL);
1473 1426
1474 1427 #ifdef ARN_IBSS
1475 1428 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1476 1429 (void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1477 1430 arn_beacon_return(sc);
1478 1431 }
1479 1432 #endif
1480 1433 ARN_UNLOCK(sc);
1481 1434 ieee80211_stop_watchdog(ic);
1482 1435 goto done;
1483 1436 }
1484 1437 in = ic->ic_bss;
1485 1438
1486 1439 pos = arn_get_channel(sc, ic->ic_curchan);
1487 1440
1488 1441 if (pos == -1) {
1489 1442 ARN_DBG((ARN_DBG_FATAL, "arn: "
1490 1443 "%s: Invalid channel\n", __func__));
1491 1444 error = EINVAL;
1492 1445 ARN_UNLOCK(sc);
1493 1446 goto bad;
1494 1447 }
1495 1448
1496 1449 if (in->in_htcap & IEEE80211_HTCAP_CHWIDTH40) {
1497 1450 arn_update_chainmask(sc);
1498 1451 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1499 1452 } else
1500 1453 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1501 1454
1502 1455 sc->sc_ah->ah_channels[pos].chanmode =
1503 1456 arn_chan2flags(ic, ic->ic_curchan);
1504 1457 channel = &sc->sc_ah->ah_channels[pos];
1505 1458 if (channel == NULL) {
1506 1459 arn_problem("arn_newstate(): channel == NULL");
1507 1460 ARN_UNLOCK(sc);
1508 1461 goto bad;
1509 1462 }
1510 1463 error = arn_set_channel(sc, channel);
1511 1464 if (error != 0) {
1512 1465 if (nstate != IEEE80211_S_SCAN) {
1513 1466 ARN_UNLOCK(sc);
1514 1467 ieee80211_reset_chan(ic);
1515 1468 goto bad;
1516 1469 }
1517 1470 }
1518 1471
1519 1472 /*
1520 1473 * Get the receive filter according to the
1521 1474 * operating mode and state
1522 1475 */
1523 1476 rfilt = arn_calcrxfilter(sc);
1524 1477
1525 1478 if (nstate == IEEE80211_S_SCAN)
1526 1479 bssid = ic->ic_macaddr;
1527 1480 else
1528 1481 bssid = in->in_bssid;
1529 1482
1530 1483 ath9k_hw_setrxfilter(ah, rfilt);
1531 1484
1532 1485 if (nstate == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS)
1533 1486 ath9k_hw_write_associd(ah, bssid, in->in_associd);
1534 1487 else
1535 1488 ath9k_hw_write_associd(ah, bssid, 0);
1536 1489
1537 1490 /* Check for WLAN_CAPABILITY_PRIVACY ? */
1538 1491 if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1539 1492 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1540 1493 if (ath9k_hw_keyisvalid(ah, (uint16_t)i))
1541 1494 (void) ath9k_hw_keysetmac(ah, (uint16_t)i,
1542 1495 bssid);
1543 1496 }
1544 1497 }
1545 1498
1546 1499 if (nstate == IEEE80211_S_RUN) {
1547 1500 switch (ic->ic_opmode) {
1548 1501 #ifdef ARN_IBSS
1549 1502 case IEEE80211_M_IBSS:
1550 1503 /*
1551 1504 * Allocate and setup the beacon frame.
1552 1505 * Stop any previous beacon DMA.
1553 1506 */
1554 1507 (void) ath9k_hw_stoptxdma(ah, sc->sc_beaconq);
1555 1508 arn_beacon_return(sc);
1556 1509 error = arn_beacon_alloc(sc, in);
1557 1510 if (error != 0) {
1558 1511 ARN_UNLOCK(sc);
1559 1512 goto bad;
1560 1513 }
1561 1514 /*
1562 1515 * If joining an adhoc network defer beacon timer
1563 1516 * configuration to the next beacon frame so we
1564 1517 * have a current TSF to use. Otherwise we're
1565 1518 * starting an ibss/bss so there's no need to delay.
1566 1519 */
1567 1520 if (ic->ic_opmode == IEEE80211_M_IBSS &&
1568 1521 ic->ic_bss->in_tstamp.tsf != 0) {
1569 1522 sc->sc_bsync = 1;
1570 1523 } else {
1571 1524 arn_beacon_config(sc);
1572 1525 }
1573 1526 break;
1574 1527 #endif /* ARN_IBSS */
1575 1528 case IEEE80211_M_STA:
1576 1529 if (ostate != IEEE80211_S_RUN) {
1577 1530 /*
1578 1531 * Defer beacon timer configuration to the next
1579 1532 * beacon frame so we have a current TSF to use.
1580 1533 * Any TSF collected when scanning is likely old
1581 1534 */
1582 1535 #ifdef ARN_IBSS
1583 1536 sc->sc_bsync = 1;
1584 1537 #else
1585 1538 /* Configure the beacon and sleep timers. */
1586 1539 arn_beacon_config(sc);
1587 1540 /* Reset rssi stats */
1588 1541 sc->sc_halstats.ns_avgbrssi =
1589 1542 ATH_RSSI_DUMMY_MARKER;
1590 1543 sc->sc_halstats.ns_avgrssi =
1591 1544 ATH_RSSI_DUMMY_MARKER;
1592 1545 sc->sc_halstats.ns_avgtxrssi =
1593 1546 ATH_RSSI_DUMMY_MARKER;
1594 1547 sc->sc_halstats.ns_avgtxrate =
1595 1548 ATH_RATE_DUMMY_MARKER;
1596 1549 /* end */
1597 1550
1598 1551 #endif /* ARN_IBSS */
1599 1552 }
1600 1553 break;
1601 1554 default:
1602 1555 break;
1603 1556 }
1604 1557 } else {
1605 1558 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1606 1559 (void) ath9k_hw_set_interrupts(ah, sc->sc_imask);
1607 1560 }
1608 1561
1609 1562 /*
1610 1563 * Reset the rate control state.
1611 1564 */
1612 1565 arn_rate_ctl_reset(sc, nstate);
1613 1566
1614 1567 ARN_UNLOCK(sc);
1615 1568 done:
1616 1569 /*
1617 1570 * Invoke the parent method to complete the work.
1618 1571 */
1619 1572 error = sc->sc_newstate(ic, nstate, arg);
1620 1573
1621 1574 /*
1622 1575 * Finally, start any timers.
1623 1576 */
1624 1577 if (nstate == IEEE80211_S_RUN) {
1625 1578 ieee80211_start_watchdog(ic, 1);
1626 1579 ASSERT(sc->sc_cal_timer == 0);
1627 1580 sc->sc_cal_timer = timeout(arn_ani_calibrate, (void *)sc,
1628 1581 drv_usectohz(100 * 1000));
1629 1582 } else if ((nstate == IEEE80211_S_SCAN) && (ostate != nstate)) {
1630 1583 /* start ap/neighbor scan timer */
1631 1584 /* ASSERT(sc->sc_scan_timer == 0); */
1632 1585 if (sc->sc_scan_timer != 0) {
1633 1586 (void) untimeout(sc->sc_scan_timer);
1634 1587 sc->sc_scan_timer = 0;
1635 1588 }
1636 1589 sc->sc_scan_timer = timeout(arn_next_scan, (void *)sc,
1637 1590 drv_usectohz(arn_dwelltime * 1000));
1638 1591 }
1639 1592
1640 1593 bad:
1641 1594 return (error);
1642 1595 }
1643 1596
1644 1597 static void
1645 1598 arn_watchdog(void *arg)
1646 1599 {
1647 1600 struct arn_softc *sc = arg;
1648 1601 ieee80211com_t *ic = &sc->sc_isc;
1649 1602 int ntimer = 0;
1650 1603
1651 1604 ARN_LOCK(sc);
1652 1605 ic->ic_watchdog_timer = 0;
1653 1606 if (sc->sc_flags & SC_OP_INVALID) {
1654 1607 ARN_UNLOCK(sc);
1655 1608 return;
1656 1609 }
1657 1610
1658 1611 if (ic->ic_state == IEEE80211_S_RUN) {
1659 1612 /*
1660 1613 * Start the background rate control thread if we
1661 1614 * are not configured to use a fixed xmit rate.
1662 1615 */
1663 1616 #ifdef ARN_LEGACY_RC
1664 1617 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1665 1618 sc->sc_stats.ast_rate_calls ++;
1666 1619 if (ic->ic_opmode == IEEE80211_M_STA)
1667 1620 arn_rate_ctl(ic, ic->ic_bss);
1668 1621 else
1669 1622 ieee80211_iterate_nodes(&ic->ic_sta,
1670 1623 arn_rate_ctl, sc);
1671 1624 }
1672 1625 #endif /* ARN_LEGACY_RC */
1673 1626
1674 1627 #ifdef ARN_HW_BEACON_MISS_HANDLE
1675 1628 /* nothing to do here */
1676 1629 #else
1677 1630 /* currently set 10 seconds as beacon miss threshold */
1678 1631 if (ic->ic_beaconmiss++ > 100) {
1679 1632 ARN_DBG((ARN_DBG_BEACON, "arn_watchdog():"
1680 1633 "Beacon missed for 10 seconds, run"
1681 1634 "ieee80211_new_state(ic, IEEE80211_S_INIT, -1)\n"));
1682 1635 ARN_UNLOCK(sc);
1683 1636 (void) ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1684 1637 return;
1685 1638 }
1686 1639 #endif /* ARN_HW_BEACON_MISS_HANDLE */
1687 1640
1688 1641 ntimer = 1;
1689 1642 }
1690 1643 ARN_UNLOCK(sc);
1691 1644
1692 1645 ieee80211_watchdog(ic);
1693 1646 if (ntimer != 0)
1694 1647 ieee80211_start_watchdog(ic, ntimer);
1695 1648 }
1696 1649
1697 1650 /* ARGSUSED */
1698 1651 static struct ieee80211_node *
1699 1652 arn_node_alloc(ieee80211com_t *ic)
1700 1653 {
1701 1654 struct ath_node *an;
1702 1655 #ifdef ARN_TX_AGGREGATION
1703 1656 struct arn_softc *sc = (struct arn_softc *)ic;
1704 1657 #endif
1705 1658
1706 1659 an = kmem_zalloc(sizeof (struct ath_node), KM_SLEEP);
1707 1660
1708 1661 /* legacy rate control */
1709 1662 #ifdef ARN_LEGACY_RC
1710 1663 arn_rate_update(sc, &an->an_node, 0);
1711 1664 #endif
1712 1665
1713 1666 #ifdef ARN_TX_AGGREGATION
1714 1667 if (sc->sc_flags & SC_OP_TXAGGR) {
1715 1668 arn_tx_node_init(sc, an);
1716 1669 }
1717 1670 #endif /* ARN_TX_AGGREGATION */
1718 1671
1719 1672 an->last_rssi = ATH_RSSI_DUMMY_MARKER;
1720 1673
1721 1674 return ((an != NULL) ? &an->an_node : NULL);
1722 1675 }
1723 1676
1724 1677 static void
1725 1678 arn_node_free(struct ieee80211_node *in)
1726 1679 {
1727 1680 ieee80211com_t *ic = in->in_ic;
1728 1681 struct arn_softc *sc = (struct arn_softc *)ic;
1729 1682 struct ath_buf *bf;
1730 1683 struct ath_txq *txq;
1731 1684 int32_t i;
1732 1685
1733 1686 #ifdef ARN_TX_AGGREGATION
1734 1687 if (sc->sc_flags & SC_OP_TXAGGR)
1735 1688 arn_tx_node_cleanup(sc, in);
1736 1689 #endif /* TX_AGGREGATION */
1737 1690
1738 1691 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1739 1692 if (ARN_TXQ_SETUP(sc, i)) {
1740 1693 txq = &sc->sc_txq[i];
1741 1694 mutex_enter(&txq->axq_lock);
1742 1695 bf = list_head(&txq->axq_list);
1743 1696 while (bf != NULL) {
1744 1697 if (bf->bf_in == in) {
1745 1698 bf->bf_in = NULL;
1746 1699 }
1747 1700 bf = list_next(&txq->axq_list, bf);
1748 1701 }
1749 1702 mutex_exit(&txq->axq_lock);
1750 1703 }
1751 1704 }
1752 1705
1753 1706 ic->ic_node_cleanup(in);
1754 1707
1755 1708 if (in->in_wpa_ie != NULL)
1756 1709 ieee80211_free(in->in_wpa_ie);
1757 1710
1758 1711 if (in->in_wme_ie != NULL)
1759 1712 ieee80211_free(in->in_wme_ie);
1760 1713
1761 1714 if (in->in_htcap_ie != NULL)
1762 1715 ieee80211_free(in->in_htcap_ie);
1763 1716
1764 1717 kmem_free(in, sizeof (struct ath_node));
1765 1718 }
1766 1719
1767 1720 /*
1768 1721 * Allocate tx/rx key slots for TKIP. We allocate one slot for
1769 1722 * each key. MIC is right after the decrypt/encrypt key.
1770 1723 */
1771 1724 static uint16_t
1772 1725 arn_key_alloc_pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1773 1726 ieee80211_keyix *rxkeyix)
1774 1727 {
1775 1728 uint16_t i, keyix;
1776 1729
1777 1730 ASSERT(!sc->sc_splitmic);
1778 1731 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1779 1732 uint8_t b = sc->sc_keymap[i];
1780 1733 if (b == 0xff)
1781 1734 continue;
1782 1735 for (keyix = i * NBBY; keyix < (i + 1) * NBBY;
1783 1736 keyix++, b >>= 1) {
1784 1737 if ((b & 1) || is_set(keyix+64, sc->sc_keymap)) {
1785 1738 /* full pair unavailable */
1786 1739 continue;
1787 1740 }
1788 1741 set_bit(keyix, sc->sc_keymap);
1789 1742 set_bit(keyix+64, sc->sc_keymap);
1790 1743 ARN_DBG((ARN_DBG_KEYCACHE,
1791 1744 "arn_key_alloc_pair(): key pair %u,%u\n",
1792 1745 keyix, keyix+64));
1793 1746 *txkeyix = *rxkeyix = keyix;
1794 1747 return (1);
1795 1748 }
1796 1749 }
1797 1750 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_pair():"
1798 1751 " out of pair space\n"));
1799 1752
1800 1753 return (0);
1801 1754 }
1802 1755
1803 1756 /*
1804 1757 * Allocate tx/rx key slots for TKIP. We allocate two slots for
1805 1758 * each key, one for decrypt/encrypt and the other for the MIC.
1806 1759 */
1807 1760 static int
1808 1761 arn_key_alloc_2pair(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1809 1762 ieee80211_keyix *rxkeyix)
1810 1763 {
1811 1764 uint16_t i, keyix;
1812 1765
1813 1766 ASSERT(sc->sc_splitmic);
1814 1767 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap)/4; i++) {
1815 1768 uint8_t b = sc->sc_keymap[i];
1816 1769 if (b != 0xff) {
1817 1770 /*
1818 1771 * One or more slots in this byte are free.
1819 1772 */
1820 1773 keyix = i*NBBY;
1821 1774 while (b & 1) {
1822 1775 again:
1823 1776 keyix++;
1824 1777 b >>= 1;
1825 1778 }
1826 1779 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
1827 1780 if (is_set(keyix+32, sc->sc_keymap) ||
1828 1781 is_set(keyix+64, sc->sc_keymap) ||
1829 1782 is_set(keyix+32+64, sc->sc_keymap)) {
1830 1783 /* full pair unavailable */
1831 1784 if (keyix == (i+1)*NBBY) {
1832 1785 /* no slots were appropriate, advance */
1833 1786 continue;
1834 1787 }
1835 1788 goto again;
1836 1789 }
1837 1790 set_bit(keyix, sc->sc_keymap);
1838 1791 set_bit(keyix+64, sc->sc_keymap);
1839 1792 set_bit(keyix+32, sc->sc_keymap);
1840 1793 set_bit(keyix+32+64, sc->sc_keymap);
1841 1794 ARN_DBG((ARN_DBG_KEYCACHE,
1842 1795 "arn_key_alloc_2pair(): key pair %u,%u %u,%u\n",
1843 1796 keyix, keyix+64,
1844 1797 keyix+32, keyix+32+64));
1845 1798 *txkeyix = *rxkeyix = keyix;
1846 1799 return (1);
1847 1800 }
1848 1801 }
1849 1802 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_2pair(): "
1850 1803 " out of pair space\n"));
1851 1804
1852 1805 return (0);
1853 1806 }
1854 1807 /*
1855 1808 * Allocate a single key cache slot.
1856 1809 */
1857 1810 static int
1858 1811 arn_key_alloc_single(struct arn_softc *sc, ieee80211_keyix *txkeyix,
1859 1812 ieee80211_keyix *rxkeyix)
1860 1813 {
1861 1814 uint16_t i, keyix;
1862 1815
1863 1816 /* try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
1864 1817 for (i = 0; i < ARRAY_SIZE(sc->sc_keymap); i++) {
1865 1818 uint8_t b = sc->sc_keymap[i];
1866 1819
1867 1820 if (b != 0xff) {
1868 1821 /*
1869 1822 * One or more slots are free.
1870 1823 */
1871 1824 keyix = i*NBBY;
1872 1825 while (b & 1)
1873 1826 keyix++, b >>= 1;
1874 1827 set_bit(keyix, sc->sc_keymap);
1875 1828 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_alloc_single(): "
1876 1829 "key %u\n", keyix));
1877 1830 *txkeyix = *rxkeyix = keyix;
1878 1831 return (1);
1879 1832 }
1880 1833 }
1881 1834 return (0);
1882 1835 }
1883 1836
1884 1837 /*
1885 1838 * Allocate one or more key cache slots for a unicast key. The
1886 1839 * key itself is needed only to identify the cipher. For hardware
1887 1840 * TKIP with split cipher+MIC keys we allocate two key cache slot
1888 1841 * pairs so that we can setup separate TX and RX MIC keys. Note
1889 1842 * that the MIC key for a TKIP key at slot i is assumed by the
1890 1843 * hardware to be at slot i+64. This limits TKIP keys to the first
1891 1844 * 64 entries.
1892 1845 */
1893 1846 /* ARGSUSED */
1894 1847 int
1895 1848 arn_key_alloc(ieee80211com_t *ic, const struct ieee80211_key *k,
1896 1849 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1897 1850 {
1898 1851 struct arn_softc *sc = (struct arn_softc *)ic;
1899 1852
1900 1853 /*
1901 1854 * We allocate two pair for TKIP when using the h/w to do
1902 1855 * the MIC. For everything else, including software crypto,
1903 1856 * we allocate a single entry. Note that s/w crypto requires
1904 1857 * a pass-through slot on the 5211 and 5212. The 5210 does
1905 1858 * not support pass-through cache entries and we map all
1906 1859 * those requests to slot 0.
1907 1860 */
1908 1861 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
1909 1862 return (arn_key_alloc_single(sc, keyix, rxkeyix));
1910 1863 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
1911 1864 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1912 1865 if (sc->sc_splitmic)
1913 1866 return (arn_key_alloc_2pair(sc, keyix, rxkeyix));
1914 1867 else
1915 1868 return (arn_key_alloc_pair(sc, keyix, rxkeyix));
1916 1869 } else {
1917 1870 return (arn_key_alloc_single(sc, keyix, rxkeyix));
1918 1871 }
1919 1872 }
1920 1873
1921 1874 /*
1922 1875 * Delete an entry in the key cache allocated by ath_key_alloc.
1923 1876 */
1924 1877 int
1925 1878 arn_key_delete(ieee80211com_t *ic, const struct ieee80211_key *k)
1926 1879 {
1927 1880 struct arn_softc *sc = (struct arn_softc *)ic;
1928 1881 struct ath_hal *ah = sc->sc_ah;
1929 1882 const struct ieee80211_cipher *cip = k->wk_cipher;
1930 1883 ieee80211_keyix keyix = k->wk_keyix;
1931 1884
1932 1885 ARN_DBG((ARN_DBG_KEYCACHE, "arn_key_delete():"
1933 1886 " delete key %u ic_cipher=0x%x\n", keyix, cip->ic_cipher));
1934 1887
1935 1888 (void) ath9k_hw_keyreset(ah, keyix);
1936 1889 /*
1937 1890 * Handle split tx/rx keying required for TKIP with h/w MIC.
1938 1891 */
1939 1892 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1940 1893 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
1941 1894 (void) ath9k_hw_keyreset(ah, keyix+32); /* RX key */
1942 1895
1943 1896 if (keyix >= IEEE80211_WEP_NKID) {
1944 1897 /*
1945 1898 * Don't touch keymap entries for global keys so
1946 1899 * they are never considered for dynamic allocation.
1947 1900 */
1948 1901 clr_bit(keyix, sc->sc_keymap);
1949 1902 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
1950 1903 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1951 1904 /*
1952 1905 * If splitmic is true +64 is TX key MIC,
1953 1906 * else +64 is RX key + RX key MIC.
1954 1907 */
1955 1908 clr_bit(keyix+64, sc->sc_keymap);
1956 1909 if (sc->sc_splitmic) {
1957 1910 /* Rx key */
1958 1911 clr_bit(keyix+32, sc->sc_keymap);
1959 1912 /* RX key MIC */
1960 1913 clr_bit(keyix+32+64, sc->sc_keymap);
1961 1914 }
1962 1915 }
1963 1916 }
1964 1917 return (1);
1965 1918 }
1966 1919
1967 1920 /*
1968 1921 * Set a TKIP key into the hardware. This handles the
1969 1922 * potential distribution of key state to multiple key
1970 1923 * cache slots for TKIP.
1971 1924 */
1972 1925 static int
1973 1926 arn_keyset_tkip(struct arn_softc *sc, const struct ieee80211_key *k,
1974 1927 struct ath9k_keyval *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1975 1928 {
1976 1929 uint8_t *key_rxmic = NULL;
1977 1930 uint8_t *key_txmic = NULL;
1978 1931 uint8_t *key = (uint8_t *)&(k->wk_key[0]);
1979 1932 struct ath_hal *ah = sc->sc_ah;
1980 1933
1981 1934 key_txmic = key + 16;
1982 1935 key_rxmic = key + 24;
1983 1936
1984 1937 if (mac == NULL) {
1985 1938 /* Group key installation */
1986 1939 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1987 1940 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1988 1941 mac, B_FALSE));
1989 1942 }
1990 1943 if (!sc->sc_splitmic) {
1991 1944 /*
1992 1945 * data key goes at first index,
1993 1946 * the hal handles the MIC keys at index+64.
1994 1947 */
1995 1948 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
1996 1949 (void) memcpy(hk->kv_txmic, key_txmic, sizeof (hk->kv_txmic));
1997 1950 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk,
1998 1951 mac, B_FALSE));
1999 1952 }
2000 1953 /*
2001 1954 * TX key goes at first index, RX key at +32.
2002 1955 * The hal handles the MIC keys at index+64.
2003 1956 */
2004 1957 (void) memcpy(hk->kv_mic, key_txmic, sizeof (hk->kv_mic));
2005 1958 if (!(ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, NULL,
2006 1959 B_FALSE))) {
2007 1960 /* Txmic entry failed. No need to proceed further */
2008 1961 ARN_DBG((ARN_DBG_KEYCACHE,
2009 1962 "%s Setting TX MIC Key Failed\n", __func__));
2010 1963 return (0);
2011 1964 }
2012 1965
2013 1966 (void) memcpy(hk->kv_mic, key_rxmic, sizeof (hk->kv_mic));
2014 1967
2015 1968 /* XXX delete tx key on failure? */
2016 1969 return (ath9k_hw_set_keycache_entry(ah, k->wk_keyix, hk, mac, B_FALSE));
2017 1970
2018 1971 }
2019 1972
2020 1973 int
2021 1974 arn_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
2022 1975 const uint8_t mac[IEEE80211_ADDR_LEN])
2023 1976 {
2024 1977 struct arn_softc *sc = (struct arn_softc *)ic;
2025 1978 const struct ieee80211_cipher *cip = k->wk_cipher;
2026 1979 struct ath9k_keyval hk;
2027 1980
2028 1981 /* cipher table */
2029 1982 static const uint8_t ciphermap[] = {
2030 1983 ATH9K_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */
2031 1984 ATH9K_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */
2032 1985 ATH9K_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */
2033 1986 ATH9K_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */
2034 1987 ATH9K_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */
2035 1988 ATH9K_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */
2036 1989 };
2037 1990
2038 1991 bzero(&hk, sizeof (hk));
2039 1992
2040 1993 /*
2041 1994 * Software crypto uses a "clear key" so non-crypto
2042 1995 * state kept in the key cache are maintainedd so that
2043 1996 * rx frames have an entry to match.
2044 1997 */
2045 1998 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2046 1999 ASSERT(cip->ic_cipher < 6);
2047 2000 hk.kv_type = ciphermap[cip->ic_cipher];
2048 2001 hk.kv_len = k->wk_keylen;
2049 2002 bcopy(k->wk_key, hk.kv_val, k->wk_keylen);
2050 2003 } else {
2051 2004 hk.kv_type = ATH9K_CIPHER_CLR;
2052 2005 }
2053 2006
2054 2007 if (hk.kv_type == ATH9K_CIPHER_TKIP &&
2055 2008 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2056 2009 return (arn_keyset_tkip(sc, k, &hk, mac));
2057 2010 } else {
2058 2011 return (ath9k_hw_set_keycache_entry(sc->sc_ah,
2059 2012 k->wk_keyix, &hk, mac, B_FALSE));
2060 2013 }
2061 2014 }
2062 2015
2063 2016 /*
2064 2017 * Enable/Disable short slot timing
2065 2018 */
2066 2019 void
2067 2020 arn_set_shortslot(ieee80211com_t *ic, int onoff)
2068 2021 {
2069 2022 struct ath_hal *ah = ((struct arn_softc *)ic)->sc_ah;
2070 2023
2071 2024 if (onoff)
2072 2025 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
2073 2026 else
2074 2027 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_20);
2075 2028 }
2076 2029
2077 2030 static int
2078 2031 arn_open(struct arn_softc *sc)
2079 2032 {
2080 2033 ieee80211com_t *ic = (ieee80211com_t *)sc;
2081 2034 struct ieee80211_channel *curchan = ic->ic_curchan;
2082 2035 struct ath9k_channel *init_channel;
2083 2036 int error = 0, pos, status;
2084 2037
2085 2038 ARN_LOCK_ASSERT(sc);
2086 2039
2087 2040 pos = arn_get_channel(sc, curchan);
2088 2041 if (pos == -1) {
2089 2042 ARN_DBG((ARN_DBG_FATAL, "arn: "
2090 2043 "%s: Invalid channel\n", __func__));
2091 2044 error = EINVAL;
2092 2045 goto error;
2093 2046 }
2094 2047
2095 2048 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
2096 2049
2097 2050 if (sc->sc_curmode == ATH9K_MODE_11A) {
2098 2051 sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_A;
2099 2052 } else {
2100 2053 sc->sc_ah->ah_channels[pos].chanmode = CHANNEL_G;
2101 2054 }
2102 2055
2103 2056 init_channel = &sc->sc_ah->ah_channels[pos];
2104 2057
2105 2058 /* Reset SERDES registers */
2106 2059 ath9k_hw_configpcipowersave(sc->sc_ah, 0);
2107 2060
2108 2061 /*
2109 2062 * The basic interface to setting the hardware in a good
2110 2063 * state is ``reset''. On return the hardware is known to
2111 2064 * be powered up and with interrupts disabled. This must
2112 2065 * be followed by initialization of the appropriate bits
2113 2066 * and then setup of the interrupt mask.
2114 2067 */
2115 2068 if (!ath9k_hw_reset(sc->sc_ah, init_channel,
2116 2069 sc->tx_chan_width, sc->sc_tx_chainmask,
2117 2070 sc->sc_rx_chainmask, sc->sc_ht_extprotspacing,
2118 2071 B_FALSE, &status)) {
2119 2072 ARN_DBG((ARN_DBG_FATAL, "arn: "
2120 2073 "%s: unable to reset hardware; hal status %u "
2121 2074 "(freq %u flags 0x%x)\n", __func__, status,
2122 2075 init_channel->channel, init_channel->channelFlags));
2123 2076
2124 2077 error = EIO;
2125 2078 goto error;
2126 2079 }
2127 2080
2128 2081 /*
2129 2082 * This is needed only to setup initial state
2130 2083 * but it's best done after a reset.
2131 2084 */
2132 2085 arn_update_txpow(sc);
2133 2086
2134 2087 /*
2135 2088 * Setup the hardware after reset:
2136 2089 * The receive engine is set going.
2137 2090 * Frame transmit is handled entirely
2138 2091 * in the frame output path; there's nothing to do
2139 2092 * here except setup the interrupt mask.
2140 2093 */
2141 2094 if (arn_startrecv(sc) != 0) {
2142 2095 ARN_DBG((ARN_DBG_INIT, "arn: "
2143 2096 "%s: unable to start recv logic\n", __func__));
2144 2097 error = EIO;
2145 2098 goto error;
2146 2099 }
2147 2100
2148 2101 /* Setup our intr mask. */
2149 2102 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX |
2150 2103 ATH9K_INT_RXEOL | ATH9K_INT_RXORN |
2151 2104 ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
2152 2105 #ifdef ARN_ATH9K_HW_CAP_GTT
2153 2106 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
2154 2107 sc->sc_imask |= ATH9K_INT_GTT;
2155 2108 #endif
2156 2109
2157 2110 #ifdef ARN_ATH9K_HW_CAP_GTT
2158 2111 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
2159 2112 sc->sc_imask |= ATH9K_INT_CST;
2160 2113 #endif
2161 2114
2162 2115 /*
2163 2116 * Enable MIB interrupts when there are hardware phy counters.
2164 2117 * Note we only do this (at the moment) for station mode.
2165 2118 */
2166 2119 #ifdef ARN_ATH9K_INT_MIB
2167 2120 if (ath9k_hw_phycounters(sc->sc_ah) &&
2168 2121 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
2169 2122 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
2170 2123 sc->sc_imask |= ATH9K_INT_MIB;
2171 2124 #endif
2172 2125 /*
2173 2126 * Some hardware processes the TIM IE and fires an
2174 2127 * interrupt when the TIM bit is set. For hardware
2175 2128 * that does, if not overridden by configuration,
2176 2129 * enable the TIM interrupt when operating as station.
2177 2130 */
2178 2131 #ifdef ARN_ATH9K_INT_TIM
2179 2132 if ((sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
2180 2133 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
2181 2134 !sc->sc_config.swBeaconProcess)
2182 2135 sc->sc_imask |= ATH9K_INT_TIM;
2183 2136 #endif
2184 2137 if (arn_chan2mode(init_channel) != sc->sc_curmode)
2185 2138 arn_setcurmode(sc, arn_chan2mode(init_channel));
2186 2139 ARN_DBG((ARN_DBG_INIT, "arn: "
2187 2140 "%s: current mode after arn_setcurmode is %d\n",
2188 2141 __func__, sc->sc_curmode));
2189 2142
2190 2143 sc->sc_isrunning = 1;
2191 2144
2192 2145 /* Disable BMISS interrupt when we're not associated */
2193 2146 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2194 2147 (void) ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
2195 2148
2196 2149 return (0);
2197 2150
2198 2151 error:
2199 2152 return (error);
2200 2153 }
2201 2154
2202 2155 static void
2203 2156 arn_close(struct arn_softc *sc)
2204 2157 {
2205 2158 ieee80211com_t *ic = (ieee80211com_t *)sc;
2206 2159 struct ath_hal *ah = sc->sc_ah;
2207 2160
2208 2161 ARN_LOCK_ASSERT(sc);
2209 2162
2210 2163 if (!sc->sc_isrunning)
2211 2164 return;
2212 2165
2213 2166 /*
2214 2167 * Shutdown the hardware and driver
2215 2168 * Note that some of this work is not possible if the
2216 2169 * hardware is gone (invalid).
2217 2170 */
2218 2171 ARN_UNLOCK(sc);
2219 2172 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2220 2173 ieee80211_stop_watchdog(ic);
2221 2174 ARN_LOCK(sc);
2222 2175
2223 2176 /*
2224 2177 * make sure h/w will not generate any interrupt
2225 2178 * before setting the invalid flag.
2226 2179 */
2227 2180 (void) ath9k_hw_set_interrupts(ah, 0);
2228 2181
2229 2182 if (!(sc->sc_flags & SC_OP_INVALID)) {
2230 2183 arn_draintxq(sc, 0);
2231 2184 (void) arn_stoprecv(sc);
2232 2185 (void) ath9k_hw_phy_disable(ah);
2233 2186 } else {
2234 2187 sc->sc_rxlink = NULL;
2235 2188 }
2236 2189
2237 2190 sc->sc_isrunning = 0;
2238 2191 }
2239 2192
2240 2193 /*
2241 2194 * MAC callback functions
2242 2195 */
2243 2196 static int
2244 2197 arn_m_stat(void *arg, uint_t stat, uint64_t *val)
2245 2198 {
2246 2199 struct arn_softc *sc = arg;
2247 2200 ieee80211com_t *ic = (ieee80211com_t *)sc;
2248 2201 struct ieee80211_node *in;
2249 2202 struct ieee80211_rateset *rs;
2250 2203
2251 2204 ARN_LOCK(sc);
2252 2205 switch (stat) {
2253 2206 case MAC_STAT_IFSPEED:
2254 2207 in = ic->ic_bss;
2255 2208 rs = &in->in_rates;
2256 2209 *val = (rs->ir_rates[in->in_txrate] & IEEE80211_RATE_VAL) / 2 *
2257 2210 1000000ull;
2258 2211 break;
2259 2212 case MAC_STAT_NOXMTBUF:
2260 2213 *val = sc->sc_stats.ast_tx_nobuf +
2261 2214 sc->sc_stats.ast_tx_nobufmgt;
2262 2215 break;
2263 2216 case MAC_STAT_IERRORS:
2264 2217 *val = sc->sc_stats.ast_rx_tooshort;
2265 2218 break;
2266 2219 case MAC_STAT_RBYTES:
2267 2220 *val = ic->ic_stats.is_rx_bytes;
2268 2221 break;
2269 2222 case MAC_STAT_IPACKETS:
2270 2223 *val = ic->ic_stats.is_rx_frags;
2271 2224 break;
2272 2225 case MAC_STAT_OBYTES:
2273 2226 *val = ic->ic_stats.is_tx_bytes;
2274 2227 break;
2275 2228 case MAC_STAT_OPACKETS:
2276 2229 *val = ic->ic_stats.is_tx_frags;
2277 2230 break;
2278 2231 case MAC_STAT_OERRORS:
2279 2232 case WIFI_STAT_TX_FAILED:
2280 2233 *val = sc->sc_stats.ast_tx_fifoerr +
2281 2234 sc->sc_stats.ast_tx_xretries +
2282 2235 sc->sc_stats.ast_tx_discard;
2283 2236 break;
2284 2237 case WIFI_STAT_TX_RETRANS:
2285 2238 *val = sc->sc_stats.ast_tx_xretries;
2286 2239 break;
2287 2240 case WIFI_STAT_FCS_ERRORS:
2288 2241 *val = sc->sc_stats.ast_rx_crcerr;
2289 2242 break;
2290 2243 case WIFI_STAT_WEP_ERRORS:
2291 2244 *val = sc->sc_stats.ast_rx_badcrypt;
2292 2245 break;
2293 2246 case WIFI_STAT_TX_FRAGS:
2294 2247 case WIFI_STAT_MCAST_TX:
2295 2248 case WIFI_STAT_RTS_SUCCESS:
2296 2249 case WIFI_STAT_RTS_FAILURE:
2297 2250 case WIFI_STAT_ACK_FAILURE:
2298 2251 case WIFI_STAT_RX_FRAGS:
2299 2252 case WIFI_STAT_MCAST_RX:
2300 2253 case WIFI_STAT_RX_DUPS:
2301 2254 ARN_UNLOCK(sc);
2302 2255 return (ieee80211_stat(ic, stat, val));
2303 2256 default:
2304 2257 ARN_UNLOCK(sc);
2305 2258 return (ENOTSUP);
2306 2259 }
2307 2260 ARN_UNLOCK(sc);
2308 2261
2309 2262 return (0);
2310 2263 }
2311 2264
2312 2265 int
2313 2266 arn_m_start(void *arg)
2314 2267 {
2315 2268 struct arn_softc *sc = arg;
2316 2269 int err = 0;
2317 2270
2318 2271 ARN_LOCK(sc);
2319 2272
2320 2273 /*
2321 2274 * Stop anything previously setup. This is safe
2322 2275 * whether this is the first time through or not.
2323 2276 */
2324 2277
2325 2278 arn_close(sc);
2326 2279
2327 2280 if ((err = arn_open(sc)) != 0) {
2328 2281 ARN_UNLOCK(sc);
2329 2282 return (err);
2330 2283 }
2331 2284
2332 2285 /* H/W is reday now */
2333 2286 sc->sc_flags &= ~SC_OP_INVALID;
2334 2287
2335 2288 ARN_UNLOCK(sc);
2336 2289
2337 2290 return (0);
2338 2291 }
2339 2292
2340 2293 static void
2341 2294 arn_m_stop(void *arg)
2342 2295 {
2343 2296 struct arn_softc *sc = arg;
2344 2297
2345 2298 ARN_LOCK(sc);
2346 2299 arn_close(sc);
2347 2300
2348 2301 /* disable HAL and put h/w to sleep */
2349 2302 (void) ath9k_hw_disable(sc->sc_ah);
2350 2303 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2351 2304
2352 2305 /* XXX: hardware will not be ready in suspend state */
2353 2306 sc->sc_flags |= SC_OP_INVALID;
2354 2307 ARN_UNLOCK(sc);
2355 2308 }
2356 2309
2357 2310 static int
2358 2311 arn_m_promisc(void *arg, boolean_t on)
2359 2312 {
2360 2313 struct arn_softc *sc = arg;
2361 2314 struct ath_hal *ah = sc->sc_ah;
2362 2315 uint32_t rfilt;
2363 2316
2364 2317 ARN_LOCK(sc);
2365 2318
2366 2319 rfilt = ath9k_hw_getrxfilter(ah);
2367 2320 if (on)
2368 2321 rfilt |= ATH9K_RX_FILTER_PROM;
2369 2322 else
2370 2323 rfilt &= ~ATH9K_RX_FILTER_PROM;
2371 2324 sc->sc_promisc = on;
2372 2325 ath9k_hw_setrxfilter(ah, rfilt);
2373 2326
2374 2327 ARN_UNLOCK(sc);
2375 2328
2376 2329 return (0);
2377 2330 }
2378 2331
2379 2332 static int
2380 2333 arn_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2381 2334 {
2382 2335 struct arn_softc *sc = arg;
2383 2336 struct ath_hal *ah = sc->sc_ah;
2384 2337 uint32_t val, index, bit;
2385 2338 uint8_t pos;
2386 2339 uint32_t *mfilt = sc->sc_mcast_hash;
2387 2340
2388 2341 ARN_LOCK(sc);
2389 2342
2390 2343 /* calculate XOR of eight 6bit values */
2391 2344 val = ARN_LE_READ_32(mca + 0);
2392 2345 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2393 2346 val = ARN_LE_READ_32(mca + 3);
2394 2347 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2395 2348 pos &= 0x3f;
2396 2349 index = pos / 32;
2397 2350 bit = 1 << (pos % 32);
2398 2351
2399 2352 if (add) { /* enable multicast */
2400 2353 sc->sc_mcast_refs[pos]++;
2401 2354 mfilt[index] |= bit;
2402 2355 } else { /* disable multicast */
2403 2356 if (--sc->sc_mcast_refs[pos] == 0)
2404 2357 mfilt[index] &= ~bit;
2405 2358 }
2406 2359 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
2407 2360
2408 2361 ARN_UNLOCK(sc);
2409 2362 return (0);
2410 2363 }
2411 2364
2412 2365 static int
2413 2366 arn_m_unicst(void *arg, const uint8_t *macaddr)
2414 2367 {
2415 2368 struct arn_softc *sc = arg;
2416 2369 struct ath_hal *ah = sc->sc_ah;
2417 2370 ieee80211com_t *ic = (ieee80211com_t *)sc;
2418 2371
2419 2372 ARN_DBG((ARN_DBG_XMIT, "ath: ath_gld_saddr(): "
2420 2373 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
2421 2374 macaddr[0], macaddr[1], macaddr[2],
2422 2375 macaddr[3], macaddr[4], macaddr[5]));
2423 2376
2424 2377 ARN_LOCK(sc);
2425 2378 IEEE80211_ADDR_COPY(sc->sc_isc.ic_macaddr, macaddr);
2426 2379 (void) ath9k_hw_setmac(ah, sc->sc_isc.ic_macaddr);
2427 2380 (void) arn_reset(ic);
2428 2381 ARN_UNLOCK(sc);
2429 2382 return (0);
2430 2383 }
2431 2384
2432 2385 static mblk_t *
2433 2386 arn_m_tx(void *arg, mblk_t *mp)
2434 2387 {
2435 2388 struct arn_softc *sc = arg;
2436 2389 int error = 0;
2437 2390 mblk_t *next;
2438 2391 ieee80211com_t *ic = (ieee80211com_t *)sc;
2439 2392
2440 2393 /*
2441 2394 * No data frames go out unless we're associated; this
2442 2395 * should not happen as the 802.11 layer does not enable
2443 2396 * the xmit queue until we enter the RUN state.
2444 2397 */
2445 2398 if (ic->ic_state != IEEE80211_S_RUN) {
2446 2399 ARN_DBG((ARN_DBG_XMIT, "arn: arn_m_tx(): "
2447 2400 "discard, state %u\n", ic->ic_state));
2448 2401 sc->sc_stats.ast_tx_discard++;
2449 2402 freemsgchain(mp);
2450 2403 return (NULL);
2451 2404 }
2452 2405
2453 2406 while (mp != NULL) {
2454 2407 next = mp->b_next;
2455 2408 mp->b_next = NULL;
2456 2409 error = arn_tx(ic, mp, IEEE80211_FC0_TYPE_DATA);
2457 2410 if (error != 0) {
2458 2411 mp->b_next = next;
2459 2412 if (error == ENOMEM) {
2460 2413 break;
2461 2414 } else {
2462 2415 freemsgchain(mp);
2463 2416 return (NULL);
2464 2417 }
2465 2418 }
2466 2419 mp = next;
2467 2420 }
2468 2421
2469 2422 return (mp);
2470 2423 }
2471 2424
2472 2425 static void
2473 2426 arn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2474 2427 {
2475 2428 struct arn_softc *sc = arg;
2476 2429 int32_t err;
2477 2430
2478 2431 err = ieee80211_ioctl(&sc->sc_isc, wq, mp);
2479 2432
2480 2433 ARN_LOCK(sc);
2481 2434 if (err == ENETRESET) {
2482 2435 if (!(sc->sc_flags & SC_OP_INVALID)) {
2483 2436 ARN_UNLOCK(sc);
2484 2437
2485 2438 (void) arn_m_start(sc);
2486 2439
2487 2440 (void) ieee80211_new_state(&sc->sc_isc,
2488 2441 IEEE80211_S_SCAN, -1);
2489 2442 ARN_LOCK(sc);
2490 2443 }
2491 2444 }
2492 2445 ARN_UNLOCK(sc);
2493 2446 }
2494 2447
2495 2448 static int
2496 2449 arn_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2497 2450 uint_t wldp_length, const void *wldp_buf)
2498 2451 {
2499 2452 struct arn_softc *sc = arg;
2500 2453 int err;
2501 2454
2502 2455 err = ieee80211_setprop(&sc->sc_isc, pr_name, wldp_pr_num,
2503 2456 wldp_length, wldp_buf);
2504 2457
2505 2458 ARN_LOCK(sc);
2506 2459
2507 2460 if (err == ENETRESET) {
2508 2461 if (!(sc->sc_flags & SC_OP_INVALID)) {
2509 2462 ARN_UNLOCK(sc);
2510 2463 (void) arn_m_start(sc);
2511 2464 (void) ieee80211_new_state(&sc->sc_isc,
2512 2465 IEEE80211_S_SCAN, -1);
2513 2466 ARN_LOCK(sc);
2514 2467 }
2515 2468 err = 0;
2516 2469 }
2517 2470
2518 2471 ARN_UNLOCK(sc);
2519 2472
2520 2473 return (err);
2521 2474 }
2522 2475
2523 2476 /* ARGSUSED */
2524 2477 static int
2525 2478 arn_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2526 2479 uint_t wldp_length, void *wldp_buf)
2527 2480 {
2528 2481 struct arn_softc *sc = arg;
2529 2482 int err = 0;
2530 2483
2531 2484 err = ieee80211_getprop(&sc->sc_isc, pr_name, wldp_pr_num,
2532 2485 wldp_length, wldp_buf);
2533 2486
2534 2487 return (err);
2535 2488 }
2536 2489
2537 2490 static void
2538 2491 arn_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
2539 2492 mac_prop_info_handle_t prh)
2540 2493 {
2541 2494 struct arn_softc *sc = arg;
2542 2495
2543 2496 ieee80211_propinfo(&sc->sc_isc, pr_name, wldp_pr_num, prh);
2544 2497 }
2545 2498
2546 2499 /* return bus cachesize in 4B word units */
2547 2500 static void
2548 2501 arn_pci_config_cachesize(struct arn_softc *sc)
2549 2502 {
2550 2503 uint8_t csz;
2551 2504
2552 2505 /*
2553 2506 * Cache line size is used to size and align various
2554 2507 * structures used to communicate with the hardware.
2555 2508 */
2556 2509 csz = pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ);
2557 2510 if (csz == 0) {
2558 2511 /*
2559 2512 * We must have this setup properly for rx buffer
2560 2513 * DMA to work so force a reasonable value here if it
2561 2514 * comes up zero.
2562 2515 */
2563 2516 csz = ATH_DEF_CACHE_BYTES / sizeof (uint32_t);
2564 2517 pci_config_put8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ,
2565 2518 csz);
2566 2519 }
2567 2520 sc->sc_cachelsz = csz << 2;
2568 2521 }
2569 2522
2570 2523 static int
2571 2524 arn_pci_setup(struct arn_softc *sc)
2572 2525 {
2573 2526 uint16_t command;
2574 2527
2575 2528 /*
2576 2529 * Enable memory mapping and bus mastering
2577 2530 */
2578 2531 ASSERT(sc != NULL);
2579 2532 command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2580 2533 command |= PCI_COMM_MAE | PCI_COMM_ME;
2581 2534 pci_config_put16(sc->sc_cfg_handle, PCI_CONF_COMM, command);
2582 2535 command = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_COMM);
2583 2536 if ((command & PCI_COMM_MAE) == 0) {
2584 2537 arn_problem("arn: arn_pci_setup(): "
2585 2538 "failed to enable memory mapping\n");
2586 2539 return (EIO);
2587 2540 }
2588 2541 if ((command & PCI_COMM_ME) == 0) {
2589 2542 arn_problem("arn: arn_pci_setup(): "
2590 2543 "failed to enable bus mastering\n");
2591 2544 return (EIO);
2592 2545 }
2593 2546 ARN_DBG((ARN_DBG_INIT, "arn: arn_pci_setup(): "
2594 2547 "set command reg to 0x%x \n", command));
2595 2548
2596 2549 return (0);
2597 2550 }
2598 2551
2599 2552 static void
2600 2553 arn_get_hw_encap(struct arn_softc *sc)
2601 2554 {
2602 2555 ieee80211com_t *ic;
2603 2556 struct ath_hal *ah;
2604 2557
2605 2558 ic = (ieee80211com_t *)sc;
2606 2559 ah = sc->sc_ah;
2607 2560
2608 2561 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2609 2562 ATH9K_CIPHER_AES_CCM, NULL))
2610 2563 ic->ic_caps |= IEEE80211_C_AES_CCM;
2611 2564 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2612 2565 ATH9K_CIPHER_AES_OCB, NULL))
2613 2566 ic->ic_caps |= IEEE80211_C_AES;
2614 2567 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2615 2568 ATH9K_CIPHER_TKIP, NULL))
2616 2569 ic->ic_caps |= IEEE80211_C_TKIP;
2617 2570 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
2618 2571 ATH9K_CIPHER_WEP, NULL))
2619 2572 ic->ic_caps |= IEEE80211_C_WEP;
2620 2573 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
↓ open down ↓ |
1590 lines elided |
↑ open up ↑ |
2621 2574 ATH9K_CIPHER_MIC, NULL))
2622 2575 ic->ic_caps |= IEEE80211_C_TKIPMIC;
2623 2576 }
2624 2577
2625 2578 static void
2626 2579 arn_setup_ht_cap(struct arn_softc *sc)
2627 2580 {
2628 2581 #define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
2629 2582 #define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
2630 2583
2631 - /* LINTED E_FUNC_SET_NOT_USED */
2632 - uint8_t tx_streams;
2633 2584 uint8_t rx_streams;
2634 2585
2635 2586 arn_ht_conf *ht_info = &sc->sc_ht_conf;
2636 2587
2637 2588 ht_info->ht_supported = B_TRUE;
2638 2589
2639 2590 /* Todo: IEEE80211_HTCAP_SMPS */
2640 2591 ht_info->cap = IEEE80211_HTCAP_CHWIDTH40|
2641 2592 IEEE80211_HTCAP_SHORTGI40 |
2642 2593 IEEE80211_HTCAP_DSSSCCK40;
2643 2594
2644 2595 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
2645 2596 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
2646 2597
2647 2598 /* set up supported mcs set */
2648 2599 (void) memset(&ht_info->rx_mcs_mask, 0, sizeof (ht_info->rx_mcs_mask));
2649 - tx_streams = ISP2(sc->sc_ah->ah_caps.tx_chainmask) ? 1 : 2;
2650 2600 rx_streams = ISP2(sc->sc_ah->ah_caps.rx_chainmask) ? 1 : 2;
2651 2601
2652 2602 ht_info->rx_mcs_mask[0] = 0xff;
2653 2603 if (rx_streams >= 2)
2654 2604 ht_info->rx_mcs_mask[1] = 0xff;
2655 2605 }
2656 2606
2657 2607 /* xxx should be used for ht rate set negotiating ? */
2658 2608 static void
2659 2609 arn_overwrite_11n_rateset(struct arn_softc *sc)
2660 2610 {
2661 2611 uint8_t *ht_rs = sc->sc_ht_conf.rx_mcs_mask;
2662 2612 int mcs_idx, mcs_count = 0;
2663 2613 int i, j;
2664 2614
2665 2615 (void) memset(&ieee80211_rateset_11n, 0,
2666 2616 sizeof (ieee80211_rateset_11n));
2667 2617 for (i = 0; i < 10; i++) {
2668 2618 for (j = 0; j < 8; j++) {
2669 2619 if (ht_rs[i] & (1 << j)) {
2670 2620 mcs_idx = i * 8 + j;
2671 2621 if (mcs_idx >= IEEE80211_HTRATE_MAXSIZE) {
2672 2622 break;
2673 2623 }
2674 2624
2675 2625 ieee80211_rateset_11n.rs_rates[mcs_idx] =
2676 2626 (uint8_t)mcs_idx;
2677 2627 mcs_count++;
2678 2628 }
2679 2629 }
2680 2630 }
2681 2631
2682 2632 ieee80211_rateset_11n.rs_nrates = (uint8_t)mcs_count;
2683 2633
2684 2634 ARN_DBG((ARN_DBG_RATE, "arn_overwrite_11n_rateset(): "
2685 2635 "MCS rate set supported by this station is as follows:\n"));
2686 2636
2687 2637 for (i = 0; i < ieee80211_rateset_11n.rs_nrates; i++) {
2688 2638 ARN_DBG((ARN_DBG_RATE, "MCS rate %d is %d\n",
2689 2639 i, ieee80211_rateset_11n.rs_rates[i]));
2690 2640 }
2691 2641
2692 2642 }
2693 2643
2694 2644 /*
2695 2645 * Update WME parameters for a transmit queue.
2696 2646 */
2697 2647 static int
2698 2648 arn_tx_queue_update(struct arn_softc *sc, int ac)
2699 2649 {
2700 2650 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
2701 2651 #define ATH_TXOP_TO_US(v) (v<<5)
2702 2652 ieee80211com_t *ic = (ieee80211com_t *)sc;
2703 2653 struct ath_txq *txq;
2704 2654 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2705 2655 struct ath_hal *ah = sc->sc_ah;
2706 2656 struct ath9k_tx_queue_info qi;
2707 2657
2708 2658 txq = &sc->sc_txq[arn_get_hal_qnum(ac, sc)];
2709 2659 (void) ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi);
2710 2660
2711 2661 /*
2712 2662 * TXQ_FLAG_TXOKINT_ENABLE = 0x0001
2713 2663 * TXQ_FLAG_TXERRINT_ENABLE = 0x0001
2714 2664 * TXQ_FLAG_TXDESCINT_ENABLE = 0x0002
2715 2665 * TXQ_FLAG_TXEOLINT_ENABLE = 0x0004
2716 2666 * TXQ_FLAG_TXURNINT_ENABLE = 0x0008
2717 2667 * TXQ_FLAG_BACKOFF_DISABLE = 0x0010
2718 2668 * TXQ_FLAG_COMPRESSION_ENABLE = 0x0020
2719 2669 * TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040
2720 2670 * TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080
2721 2671 */
2722 2672
2723 2673 /* xxx should update these flags here? */
2724 2674 #if 0
2725 2675 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
2726 2676 TXQ_FLAG_TXERRINT_ENABLE |
2727 2677 TXQ_FLAG_TXDESCINT_ENABLE |
2728 2678 TXQ_FLAG_TXURNINT_ENABLE;
2729 2679 #endif
2730 2680
2731 2681 qi.tqi_aifs = wmep->wmep_aifsn;
2732 2682 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2733 2683 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2734 2684 qi.tqi_readyTime = 0;
2735 2685 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
2736 2686
2737 2687 ARN_DBG((ARN_DBG_INIT,
2738 2688 "%s:"
2739 2689 "Q%u"
2740 2690 "qflags 0x%x"
2741 2691 "aifs %u"
2742 2692 "cwmin %u"
2743 2693 "cwmax %u"
2744 2694 "burstTime %u\n",
2745 2695 __func__,
2746 2696 txq->axq_qnum,
2747 2697 qi.tqi_qflags,
2748 2698 qi.tqi_aifs,
2749 2699 qi.tqi_cwmin,
2750 2700 qi.tqi_cwmax,
2751 2701 qi.tqi_burstTime));
2752 2702
2753 2703 if (!ath9k_hw_set_txq_props(ah, txq->axq_qnum, &qi)) {
2754 2704 arn_problem("unable to update hardware queue "
2755 2705 "parameters for %s traffic!\n",
2756 2706 ieee80211_wme_acnames[ac]);
2757 2707 return (0);
2758 2708 } else {
2759 2709 /* push to H/W */
2760 2710 (void) ath9k_hw_resettxqueue(ah, txq->axq_qnum);
2761 2711 return (1);
2762 2712 }
2763 2713
2764 2714 #undef ATH_TXOP_TO_US
2765 2715 #undef ATH_EXPONENT_TO_VALUE
2766 2716 }
2767 2717
2768 2718 /* Update WME parameters */
2769 2719 static int
2770 2720 arn_wme_update(ieee80211com_t *ic)
2771 2721 {
2772 2722 struct arn_softc *sc = (struct arn_softc *)ic;
2773 2723
2774 2724 /* updateing */
2775 2725 return (!arn_tx_queue_update(sc, WME_AC_BE) ||
2776 2726 !arn_tx_queue_update(sc, WME_AC_BK) ||
2777 2727 !arn_tx_queue_update(sc, WME_AC_VI) ||
2778 2728 !arn_tx_queue_update(sc, WME_AC_VO) ? EIO : 0);
2779 2729 }
2780 2730
2781 2731 /*
2782 2732 * Update tx/rx chainmask. For legacy association,
2783 2733 * hard code chainmask to 1x1, for 11n association, use
2784 2734 * the chainmask configuration.
2785 2735 */
2786 2736 void
2787 2737 arn_update_chainmask(struct arn_softc *sc)
2788 2738 {
2789 2739 boolean_t is_ht = B_FALSE;
2790 2740 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
2791 2741
2792 2742 is_ht = sc->sc_ht_conf.ht_supported;
2793 2743 if (is_ht) {
2794 2744 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
2795 2745 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
2796 2746 } else {
2797 2747 sc->sc_tx_chainmask = 1;
2798 2748 sc->sc_rx_chainmask = 1;
2799 2749 }
2800 2750
2801 2751 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2802 2752 "tx_chainmask = %d, rx_chainmask = %d\n",
2803 2753 sc->sc_tx_chainmask, sc->sc_rx_chainmask));
2804 2754 }
2805 2755
2806 2756 static int
2807 2757 arn_resume(dev_info_t *devinfo)
2808 2758 {
2809 2759 struct arn_softc *sc;
2810 2760 int ret = DDI_SUCCESS;
2811 2761
2812 2762 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2813 2763 if (sc == NULL) {
2814 2764 ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2815 2765 "failed to get soft state\n"));
2816 2766 return (DDI_FAILURE);
2817 2767 }
2818 2768
2819 2769 ARN_LOCK(sc);
2820 2770 /*
2821 2771 * Set up config space command register(s). Refuse
2822 2772 * to resume on failure.
2823 2773 */
2824 2774 if (arn_pci_setup(sc) != 0) {
2825 2775 ARN_DBG((ARN_DBG_INIT, "ath: ath_resume(): "
2826 2776 "ath_pci_setup() failed\n"));
2827 2777 ARN_UNLOCK(sc);
2828 2778 return (DDI_FAILURE);
2829 2779 }
2830 2780
2831 2781 if (!(sc->sc_flags & SC_OP_INVALID))
2832 2782 ret = arn_open(sc);
2833 2783 ARN_UNLOCK(sc);
2834 2784
2835 2785 return (ret);
2836 2786 }
2837 2787
2838 2788 static int
2839 2789 arn_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2840 2790 {
2841 2791 struct arn_softc *sc;
2842 2792 int instance;
2843 2793 int status;
2844 2794 int32_t err;
2845 2795 uint16_t vendor_id;
2846 2796 uint16_t device_id;
2847 2797 uint32_t i;
2848 2798 uint32_t val;
2849 2799 char strbuf[32];
2850 2800 ieee80211com_t *ic;
2851 2801 struct ath_hal *ah;
2852 2802 wifi_data_t wd = { 0 };
2853 2803 mac_register_t *macp;
2854 2804
2855 2805 switch (cmd) {
2856 2806 case DDI_ATTACH:
2857 2807 break;
2858 2808 case DDI_RESUME:
2859 2809 return (arn_resume(devinfo));
2860 2810 default:
2861 2811 return (DDI_FAILURE);
2862 2812 }
2863 2813
2864 2814 instance = ddi_get_instance(devinfo);
2865 2815 if (ddi_soft_state_zalloc(arn_soft_state_p, instance) != DDI_SUCCESS) {
2866 2816 ARN_DBG((ARN_DBG_ATTACH, "arn: "
2867 2817 "%s: Unable to alloc softstate\n", __func__));
2868 2818 return (DDI_FAILURE);
2869 2819 }
2870 2820
2871 2821 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
2872 2822 ic = (ieee80211com_t *)sc;
2873 2823 sc->sc_dev = devinfo;
2874 2824
2875 2825 mutex_init(&sc->sc_genlock, NULL, MUTEX_DRIVER, NULL);
2876 2826 mutex_init(&sc->sc_serial_rw, NULL, MUTEX_DRIVER, NULL);
2877 2827 mutex_init(&sc->sc_txbuflock, NULL, MUTEX_DRIVER, NULL);
2878 2828 mutex_init(&sc->sc_rxbuflock, NULL, MUTEX_DRIVER, NULL);
2879 2829 mutex_init(&sc->sc_resched_lock, NULL, MUTEX_DRIVER, NULL);
2880 2830 #ifdef ARN_IBSS
2881 2831 mutex_init(&sc->sc_bcbuflock, NULL, MUTEX_DRIVER, NULL);
2882 2832 #endif
2883 2833
2884 2834 sc->sc_flags |= SC_OP_INVALID;
2885 2835
2886 2836 err = pci_config_setup(devinfo, &sc->sc_cfg_handle);
2887 2837 if (err != DDI_SUCCESS) {
2888 2838 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2889 2839 "pci_config_setup() failed"));
2890 2840 goto attach_fail0;
2891 2841 }
2892 2842
2893 2843 if (arn_pci_setup(sc) != 0)
2894 2844 goto attach_fail1;
2895 2845
2896 2846 /* Cache line size set up */
2897 2847 arn_pci_config_cachesize(sc);
2898 2848
2899 2849 vendor_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_VENID);
2900 2850 device_id = pci_config_get16(sc->sc_cfg_handle, PCI_CONF_DEVID);
2901 2851 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): vendor 0x%x, "
2902 2852 "device id 0x%x, cache size %d\n",
2903 2853 vendor_id, device_id,
2904 2854 pci_config_get8(sc->sc_cfg_handle, PCI_CONF_CACHE_LINESZ)));
2905 2855
2906 2856 pci_config_put8(sc->sc_cfg_handle, PCI_CONF_LATENCY_TIMER, 0xa8);
2907 2857 val = pci_config_get32(sc->sc_cfg_handle, 0x40);
2908 2858 if ((val & 0x0000ff00) != 0)
2909 2859 pci_config_put32(sc->sc_cfg_handle, 0x40, val & 0xffff00ff);
2910 2860
2911 2861 err = ddi_regs_map_setup(devinfo, 1,
2912 2862 &sc->mem, 0, 0, &arn_reg_accattr, &sc->sc_io_handle);
2913 2863 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2914 2864 "regs map1 = %x err=%d\n", sc->mem, err));
2915 2865 if (err != DDI_SUCCESS) {
2916 2866 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2917 2867 "ddi_regs_map_setup() failed"));
2918 2868 goto attach_fail1;
2919 2869 }
2920 2870
2921 2871 ah = ath9k_hw_attach(device_id, sc, sc->mem, &status);
2922 2872 if (ah == NULL) {
2923 2873 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2924 2874 "unable to attach hw: H/W status %u\n",
2925 2875 status));
2926 2876 goto attach_fail2;
2927 2877 }
2928 2878 sc->sc_ah = ah;
2929 2879
2930 2880 ath9k_hw_getmac(ah, ic->ic_macaddr);
2931 2881
2932 2882 /* Get the hardware key cache size. */
2933 2883 sc->sc_keymax = ah->ah_caps.keycache_size;
2934 2884 if (sc->sc_keymax > ATH_KEYMAX) {
2935 2885 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2936 2886 "Warning, using only %u entries in %u key cache\n",
2937 2887 ATH_KEYMAX, sc->sc_keymax));
2938 2888 sc->sc_keymax = ATH_KEYMAX;
2939 2889 }
2940 2890
2941 2891 /*
2942 2892 * Reset the key cache since some parts do not
2943 2893 * reset the contents on initial power up.
2944 2894 */
2945 2895 for (i = 0; i < sc->sc_keymax; i++)
2946 2896 (void) ath9k_hw_keyreset(ah, (uint16_t)i);
2947 2897 /*
2948 2898 * Mark key cache slots associated with global keys
2949 2899 * as in use. If we knew TKIP was not to be used we
2950 2900 * could leave the +32, +64, and +32+64 slots free.
2951 2901 * XXX only for splitmic.
2952 2902 */
2953 2903 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
2954 2904 set_bit(i, sc->sc_keymap);
2955 2905 set_bit(i + 32, sc->sc_keymap);
2956 2906 set_bit(i + 64, sc->sc_keymap);
2957 2907 set_bit(i + 32 + 64, sc->sc_keymap);
2958 2908 }
2959 2909
2960 2910 /* Collect the channel list using the default country code */
2961 2911 err = arn_setup_channels(sc);
2962 2912 if (err == EINVAL) {
2963 2913 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2964 2914 "ERR:arn_setup_channels\n"));
2965 2915 goto attach_fail3;
2966 2916 }
2967 2917
2968 2918 /* default to STA mode */
2969 2919 sc->sc_ah->ah_opmode = ATH9K_M_STA;
2970 2920
2971 2921 /* Setup rate tables */
2972 2922 arn_rate_attach(sc);
2973 2923 arn_setup_rates(sc, IEEE80211_MODE_11A);
2974 2924 arn_setup_rates(sc, IEEE80211_MODE_11B);
2975 2925 arn_setup_rates(sc, IEEE80211_MODE_11G);
2976 2926
2977 2927 /* Setup current mode here */
2978 2928 arn_setcurmode(sc, ATH9K_MODE_11G);
2979 2929
2980 2930 /* 802.11g features */
2981 2931 if (sc->sc_have11g)
2982 2932 ic->ic_caps |= IEEE80211_C_SHPREAMBLE |
2983 2933 IEEE80211_C_SHSLOT; /* short slot time */
2984 2934
2985 2935 /* Temp workaround */
2986 2936 sc->sc_mrretry = 1;
2987 2937 sc->sc_config.ath_aggr_prot = 0;
2988 2938
2989 2939 /* Setup tx/rx descriptors */
2990 2940 err = arn_desc_alloc(devinfo, sc);
2991 2941 if (err != DDI_SUCCESS) {
2992 2942 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
2993 2943 "failed to allocate descriptors: %d\n", err));
2994 2944 goto attach_fail3;
2995 2945 }
2996 2946
2997 2947 if ((sc->sc_tq = ddi_taskq_create(devinfo, "ath_taskq", 1,
2998 2948 TASKQ_DEFAULTPRI, 0)) == NULL) {
2999 2949 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3000 2950 "ERR:ddi_taskq_create\n"));
3001 2951 goto attach_fail4;
3002 2952 }
3003 2953
3004 2954 /*
3005 2955 * Allocate hardware transmit queues: one queue for
3006 2956 * beacon frames and one data queue for each QoS
3007 2957 * priority. Note that the hal handles reseting
3008 2958 * these queues at the needed time.
3009 2959 */
3010 2960 #ifdef ARN_IBSS
3011 2961 sc->sc_beaconq = arn_beaconq_setup(ah);
3012 2962 if (sc->sc_beaconq == (-1)) {
3013 2963 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3014 2964 "unable to setup a beacon xmit queue\n"));
3015 2965 goto attach_fail4;
3016 2966 }
3017 2967 #endif
3018 2968 #ifdef ARN_HOSTAP
3019 2969 sc->sc_cabq = arn_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
3020 2970 if (sc->sc_cabq == NULL) {
3021 2971 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3022 2972 "unable to setup CAB xmit queue\n"));
3023 2973 goto attach_fail4;
3024 2974 }
3025 2975
3026 2976 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
3027 2977 ath_cabq_update(sc);
3028 2978 #endif
3029 2979
3030 2980 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
3031 2981 sc->sc_haltype2q[i] = -1;
3032 2982
3033 2983 /* Setup data queues */
3034 2984 /* NB: ensure BK queue is the lowest priority h/w queue */
3035 2985 if (!arn_tx_setup(sc, ATH9K_WME_AC_BK)) {
3036 2986 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3037 2987 "unable to setup xmit queue for BK traffic\n"));
3038 2988 goto attach_fail4;
3039 2989 }
3040 2990 if (!arn_tx_setup(sc, ATH9K_WME_AC_BE)) {
3041 2991 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3042 2992 "unable to setup xmit queue for BE traffic\n"));
3043 2993 goto attach_fail4;
3044 2994 }
3045 2995 if (!arn_tx_setup(sc, ATH9K_WME_AC_VI)) {
3046 2996 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3047 2997 "unable to setup xmit queue for VI traffic\n"));
3048 2998 goto attach_fail4;
3049 2999 }
3050 3000 if (!arn_tx_setup(sc, ATH9K_WME_AC_VO)) {
3051 3001 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3052 3002 "unable to setup xmit queue for VO traffic\n"));
3053 3003 goto attach_fail4;
3054 3004 }
3055 3005
3056 3006 /*
3057 3007 * Initializes the noise floor to a reasonable default value.
3058 3008 * Later on this will be updated during ANI processing.
3059 3009 */
3060 3010
3061 3011 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
3062 3012
3063 3013
3064 3014 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3065 3015 ATH9K_CIPHER_TKIP, NULL)) {
3066 3016 /*
3067 3017 * Whether we should enable h/w TKIP MIC.
3068 3018 * XXX: if we don't support WME TKIP MIC, then we wouldn't
3069 3019 * report WMM capable, so it's always safe to turn on
3070 3020 * TKIP MIC in this case.
3071 3021 */
3072 3022 (void) ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
3073 3023 0, 1, NULL);
3074 3024 }
3075 3025
3076 3026 /* Get cipher releated capability information */
3077 3027 arn_get_hw_encap(sc);
3078 3028
3079 3029 /*
3080 3030 * Check whether the separate key cache entries
3081 3031 * are required to handle both tx+rx MIC keys.
3082 3032 * With split mic keys the number of stations is limited
3083 3033 * to 27 otherwise 59.
3084 3034 */
3085 3035 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3086 3036 ATH9K_CIPHER_TKIP, NULL) &&
3087 3037 ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
3088 3038 ATH9K_CIPHER_MIC, NULL) &&
3089 3039 ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
3090 3040 0, NULL))
3091 3041 sc->sc_splitmic = 1;
3092 3042
3093 3043 /* turn on mcast key search if possible */
3094 3044 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
3095 3045 (void) ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
3096 3046 1, NULL);
3097 3047
3098 3048 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
3099 3049 sc->sc_config.txpowlimit_override = 0;
3100 3050
3101 3051 /* 11n Capabilities */
3102 3052 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
3103 3053 sc->sc_flags |= SC_OP_TXAGGR;
3104 3054 sc->sc_flags |= SC_OP_RXAGGR;
3105 3055 arn_setup_ht_cap(sc);
3106 3056 arn_overwrite_11n_rateset(sc);
3107 3057 }
3108 3058
3109 3059 sc->sc_tx_chainmask = 1;
3110 3060 sc->sc_rx_chainmask = 1;
3111 3061 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3112 3062 "tx_chainmask = %d, rx_chainmask = %d\n",
3113 3063 sc->sc_tx_chainmask, sc->sc_rx_chainmask));
3114 3064
3115 3065 /* arn_update_chainmask(sc); */
3116 3066
3117 3067 (void) ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, B_TRUE, NULL);
3118 3068 sc->sc_defant = ath9k_hw_getdefantenna(ah);
3119 3069
3120 3070 ath9k_hw_getmac(ah, sc->sc_myaddr);
3121 3071 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
3122 3072 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
3123 3073 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
3124 3074 (void) ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
3125 3075 }
3126 3076
3127 3077 /* set default value to short slot time */
3128 3078 sc->sc_slottime = ATH9K_SLOT_TIME_9;
3129 3079 (void) ath9k_hw_setslottime(ah, ATH9K_SLOT_TIME_9);
3130 3080
3131 3081 /* initialize beacon slots */
3132 3082 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
3133 3083 sc->sc_bslot[i] = ATH_IF_ID_ANY;
3134 3084
3135 3085 /* Save MISC configurations */
3136 3086 sc->sc_config.swBeaconProcess = 1;
3137 3087
3138 3088 /* Support QoS/WME */
3139 3089 ic->ic_caps |= IEEE80211_C_WME;
3140 3090 ic->ic_wme.wme_update = arn_wme_update;
3141 3091
3142 3092 /* Support 802.11n/HT */
3143 3093 if (sc->sc_ht_conf.ht_supported) {
3144 3094 ic->ic_htcaps =
3145 3095 IEEE80211_HTCAP_CHWIDTH40 |
3146 3096 IEEE80211_HTCAP_SHORTGI40 |
3147 3097 IEEE80211_HTCAP_DSSSCCK40 |
3148 3098 IEEE80211_HTCAP_MAXAMSDU_7935 |
3149 3099 IEEE80211_HTC_HT |
3150 3100 IEEE80211_HTC_AMSDU |
3151 3101 IEEE80211_HTCAP_RXSTBC_2STREAM;
3152 3102
3153 3103 #ifdef ARN_TX_AGGREGATION
3154 3104 ic->ic_htcaps |= IEEE80211_HTC_AMPDU;
3155 3105 #endif
3156 3106 }
3157 3107
3158 3108 /* Header padding requested by driver */
3159 3109 ic->ic_flags |= IEEE80211_F_DATAPAD;
3160 3110 /* Support WPA/WPA2 */
3161 3111 ic->ic_caps |= IEEE80211_C_WPA;
3162 3112 #if 0
3163 3113 ic->ic_caps |= IEEE80211_C_TXFRAG; /* handle tx frags */
3164 3114 ic->ic_caps |= IEEE80211_C_BGSCAN; /* capable of bg scanning */
3165 3115 #endif
3166 3116 ic->ic_phytype = IEEE80211_T_HT;
3167 3117 ic->ic_opmode = IEEE80211_M_STA;
3168 3118 ic->ic_state = IEEE80211_S_INIT;
3169 3119 ic->ic_maxrssi = ARN_MAX_RSSI;
3170 3120 ic->ic_set_shortslot = arn_set_shortslot;
3171 3121 ic->ic_xmit = arn_tx;
3172 3122 ieee80211_attach(ic);
3173 3123
3174 3124 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3175 3125 "ic->ic_curchan->ich_freq: %d\n", ic->ic_curchan->ich_freq));
3176 3126
3177 3127 /* different instance has different WPA door */
3178 3128 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
3179 3129 ddi_driver_name(devinfo),
3180 3130 ddi_get_instance(devinfo));
3181 3131
3182 3132 if (sc->sc_ht_conf.ht_supported) {
3183 3133 sc->sc_recv_action = ic->ic_recv_action;
3184 3134 ic->ic_recv_action = arn_ampdu_recv_action;
3185 3135 // sc->sc_send_action = ic->ic_send_action;
3186 3136 // ic->ic_send_action = arn_ampdu_send_action;
3187 3137
3188 3138 ic->ic_ampdu_rxmax = sc->sc_ht_conf.ampdu_factor;
3189 3139 ic->ic_ampdu_density = sc->sc_ht_conf.ampdu_density;
3190 3140 ic->ic_ampdu_limit = ic->ic_ampdu_rxmax;
3191 3141 }
3192 3142
3193 3143 /* Override 80211 default routines */
3194 3144 sc->sc_newstate = ic->ic_newstate;
3195 3145 ic->ic_newstate = arn_newstate;
3196 3146 #ifdef ARN_IBSS
3197 3147 sc->sc_recv_mgmt = ic->ic_recv_mgmt;
3198 3148 ic->ic_recv_mgmt = arn_recv_mgmt;
3199 3149 #endif
3200 3150 ic->ic_watchdog = arn_watchdog;
3201 3151 ic->ic_node_alloc = arn_node_alloc;
3202 3152 ic->ic_node_free = arn_node_free;
3203 3153 ic->ic_crypto.cs_key_alloc = arn_key_alloc;
3204 3154 ic->ic_crypto.cs_key_delete = arn_key_delete;
3205 3155 ic->ic_crypto.cs_key_set = arn_key_set;
3206 3156
3207 3157 ieee80211_media_init(ic);
3208 3158
3209 3159 /*
3210 3160 * initialize default tx key
3211 3161 */
3212 3162 ic->ic_def_txkey = 0;
3213 3163
3214 3164 sc->sc_rx_pend = 0;
3215 3165 (void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3216 3166 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW,
3217 3167 &sc->sc_softint_id, NULL, 0, arn_softint_handler, (caddr_t)sc);
3218 3168 if (err != DDI_SUCCESS) {
3219 3169 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3220 3170 "ddi_add_softintr() failed....\n"));
3221 3171 goto attach_fail5;
3222 3172 }
3223 3173
3224 3174 if (ddi_get_iblock_cookie(devinfo, 0, &sc->sc_iblock)
3225 3175 != DDI_SUCCESS) {
3226 3176 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3227 3177 "Can not get iblock cookie for INT\n"));
3228 3178 goto attach_fail6;
3229 3179 }
3230 3180
3231 3181 if (ddi_add_intr(devinfo, 0, NULL, NULL, arn_isr,
3232 3182 (caddr_t)sc) != DDI_SUCCESS) {
3233 3183 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3234 3184 "Can not set intr for ARN driver\n"));
3235 3185 goto attach_fail6;
3236 3186 }
3237 3187
3238 3188 /*
3239 3189 * Provide initial settings for the WiFi plugin; whenever this
3240 3190 * information changes, we need to call mac_plugindata_update()
3241 3191 */
3242 3192 wd.wd_opmode = ic->ic_opmode;
3243 3193 wd.wd_secalloc = WIFI_SEC_NONE;
3244 3194 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid);
3245 3195
3246 3196 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3247 3197 "IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_bss->in_bssid)"
3248 3198 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
3249 3199 wd.wd_bssid[0], wd.wd_bssid[1], wd.wd_bssid[2],
3250 3200 wd.wd_bssid[3], wd.wd_bssid[4], wd.wd_bssid[5]));
3251 3201
3252 3202 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
3253 3203 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3254 3204 "MAC version mismatch\n"));
3255 3205 goto attach_fail7;
3256 3206 }
3257 3207
3258 3208 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
3259 3209 macp->m_driver = sc;
3260 3210 macp->m_dip = devinfo;
3261 3211 macp->m_src_addr = ic->ic_macaddr;
3262 3212 macp->m_callbacks = &arn_m_callbacks;
3263 3213 macp->m_min_sdu = 0;
3264 3214 macp->m_max_sdu = IEEE80211_MTU;
3265 3215 macp->m_pdata = &wd;
3266 3216 macp->m_pdata_size = sizeof (wd);
3267 3217
3268 3218 err = mac_register(macp, &ic->ic_mach);
3269 3219 mac_free(macp);
3270 3220 if (err != 0) {
3271 3221 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3272 3222 "mac_register err %x\n", err));
3273 3223 goto attach_fail7;
3274 3224 }
3275 3225
3276 3226 /* Create minor node of type DDI_NT_NET_WIFI */
3277 3227 (void) snprintf(strbuf, sizeof (strbuf), "%s%d",
3278 3228 ARN_NODENAME, instance);
3279 3229 err = ddi_create_minor_node(devinfo, strbuf, S_IFCHR,
3280 3230 instance + 1, DDI_NT_NET_WIFI, 0);
3281 3231 if (err != DDI_SUCCESS)
3282 3232 ARN_DBG((ARN_DBG_ATTACH, "WARN: arn: arn_attach(): "
3283 3233 "Create minor node failed - %d\n", err));
3284 3234
3285 3235 /* Notify link is down now */
3286 3236 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
3287 3237
3288 3238 sc->sc_promisc = B_FALSE;
3289 3239 bzero(sc->sc_mcast_refs, sizeof (sc->sc_mcast_refs));
3290 3240 bzero(sc->sc_mcast_hash, sizeof (sc->sc_mcast_hash));
3291 3241
3292 3242 ARN_DBG((ARN_DBG_ATTACH, "arn: arn_attach(): "
3293 3243 "Atheros AR%s MAC/BB Rev:%x "
3294 3244 "AR%s RF Rev:%x: mem=0x%lx\n",
3295 3245 arn_mac_bb_name(ah->ah_macVersion),
3296 3246 ah->ah_macRev,
3297 3247 arn_rf_name((ah->ah_analog5GhzRev & AR_RADIO_SREV_MAJOR)),
3298 3248 ah->ah_phyRev,
3299 3249 (unsigned long)sc->mem));
3300 3250
3301 3251 /* XXX: hardware will not be ready until arn_open() being called */
3302 3252 sc->sc_flags |= SC_OP_INVALID;
3303 3253 sc->sc_isrunning = 0;
3304 3254
3305 3255 return (DDI_SUCCESS);
3306 3256
3307 3257 attach_fail7:
3308 3258 ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3309 3259 attach_fail6:
3310 3260 ddi_remove_softintr(sc->sc_softint_id);
3311 3261 attach_fail5:
3312 3262 (void) ieee80211_detach(ic);
3313 3263 attach_fail4:
3314 3264 arn_desc_free(sc);
3315 3265 if (sc->sc_tq)
3316 3266 ddi_taskq_destroy(sc->sc_tq);
3317 3267 attach_fail3:
3318 3268 ath9k_hw_detach(ah);
3319 3269 attach_fail2:
3320 3270 ddi_regs_map_free(&sc->sc_io_handle);
3321 3271 attach_fail1:
3322 3272 pci_config_teardown(&sc->sc_cfg_handle);
3323 3273 attach_fail0:
3324 3274 sc->sc_flags |= SC_OP_INVALID;
3325 3275 /* cleanup tx queues */
3326 3276 mutex_destroy(&sc->sc_txbuflock);
3327 3277 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3328 3278 if (ARN_TXQ_SETUP(sc, i)) {
3329 3279 /* arn_tx_cleanupq(asc, &asc->sc_txq[i]); */
3330 3280 mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3331 3281 }
3332 3282 }
3333 3283 mutex_destroy(&sc->sc_rxbuflock);
3334 3284 mutex_destroy(&sc->sc_serial_rw);
3335 3285 mutex_destroy(&sc->sc_genlock);
3336 3286 mutex_destroy(&sc->sc_resched_lock);
3337 3287 #ifdef ARN_IBSS
3338 3288 mutex_destroy(&sc->sc_bcbuflock);
3339 3289 #endif
3340 3290
3341 3291 ddi_soft_state_free(arn_soft_state_p, instance);
3342 3292
3343 3293 return (DDI_FAILURE);
3344 3294
3345 3295 }
3346 3296
3347 3297 /*
3348 3298 * Suspend transmit/receive for powerdown
3349 3299 */
3350 3300 static int
3351 3301 arn_suspend(struct arn_softc *sc)
3352 3302 {
3353 3303 ARN_LOCK(sc);
3354 3304 arn_close(sc);
3355 3305 ARN_UNLOCK(sc);
3356 3306
3357 3307 return (DDI_SUCCESS);
3358 3308 }
3359 3309
3360 3310 static int32_t
3361 3311 arn_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3362 3312 {
3363 3313 struct arn_softc *sc;
3364 3314 int i;
3365 3315
3366 3316 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3367 3317 ASSERT(sc != NULL);
3368 3318
3369 3319 switch (cmd) {
3370 3320 case DDI_DETACH:
3371 3321 break;
3372 3322
3373 3323 case DDI_SUSPEND:
3374 3324 return (arn_suspend(sc));
3375 3325
3376 3326 default:
3377 3327 return (DDI_FAILURE);
3378 3328 }
3379 3329
3380 3330 if (mac_disable(sc->sc_isc.ic_mach) != 0)
3381 3331 return (DDI_FAILURE);
3382 3332
3383 3333 arn_stop_scantimer(sc);
3384 3334 arn_stop_caltimer(sc);
3385 3335
3386 3336 /* disable interrupts */
3387 3337 (void) ath9k_hw_set_interrupts(sc->sc_ah, 0);
3388 3338
3389 3339 /*
3390 3340 * Unregister from the MAC layer subsystem
3391 3341 */
3392 3342 (void) mac_unregister(sc->sc_isc.ic_mach);
3393 3343
3394 3344 /* free intterrupt resources */
3395 3345 ddi_remove_intr(devinfo, 0, sc->sc_iblock);
3396 3346 ddi_remove_softintr(sc->sc_softint_id);
3397 3347
3398 3348 /*
3399 3349 * NB: the order of these is important:
3400 3350 * o call the 802.11 layer before detaching the hal to
3401 3351 * insure callbacks into the driver to delete global
3402 3352 * key cache entries can be handled
3403 3353 * o reclaim the tx queue data structures after calling
3404 3354 * the 802.11 layer as we'll get called back to reclaim
3405 3355 * node state and potentially want to use them
3406 3356 * o to cleanup the tx queues the hal is called, so detach
3407 3357 * it last
3408 3358 */
3409 3359 ieee80211_detach(&sc->sc_isc);
3410 3360
3411 3361 arn_desc_free(sc);
3412 3362
3413 3363 ddi_taskq_destroy(sc->sc_tq);
3414 3364
3415 3365 if (!(sc->sc_flags & SC_OP_INVALID))
3416 3366 (void) ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
3417 3367
3418 3368 /* cleanup tx queues */
3419 3369 mutex_destroy(&sc->sc_txbuflock);
3420 3370 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3421 3371 if (ARN_TXQ_SETUP(sc, i)) {
3422 3372 arn_tx_cleanupq(sc, &sc->sc_txq[i]);
3423 3373 mutex_destroy(&((&sc->sc_txq[i])->axq_lock));
3424 3374 }
3425 3375 }
3426 3376
3427 3377 ath9k_hw_detach(sc->sc_ah);
3428 3378
3429 3379 /* free io handle */
3430 3380 ddi_regs_map_free(&sc->sc_io_handle);
3431 3381 pci_config_teardown(&sc->sc_cfg_handle);
3432 3382
3433 3383 /* destroy locks */
3434 3384 mutex_destroy(&sc->sc_genlock);
3435 3385 mutex_destroy(&sc->sc_serial_rw);
3436 3386 mutex_destroy(&sc->sc_rxbuflock);
3437 3387 mutex_destroy(&sc->sc_resched_lock);
3438 3388 #ifdef ARN_IBSS
3439 3389 mutex_destroy(&sc->sc_bcbuflock);
3440 3390 #endif
3441 3391
3442 3392 ddi_remove_minor_node(devinfo, NULL);
3443 3393 ddi_soft_state_free(arn_soft_state_p, ddi_get_instance(devinfo));
3444 3394
3445 3395 return (DDI_SUCCESS);
3446 3396 }
3447 3397
3448 3398 /*
3449 3399 * quiesce(9E) entry point.
3450 3400 *
3451 3401 * This function is called when the system is single-threaded at high
3452 3402 * PIL with preemption disabled. Therefore, this function must not be
3453 3403 * blocked.
3454 3404 *
3455 3405 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
3456 3406 * DDI_FAILURE indicates an error condition and should almost never happen.
3457 3407 */
3458 3408 static int32_t
3459 3409 arn_quiesce(dev_info_t *devinfo)
3460 3410 {
3461 3411 struct arn_softc *sc;
3462 3412 int i;
3463 3413 struct ath_hal *ah;
3464 3414
3465 3415 sc = ddi_get_soft_state(arn_soft_state_p, ddi_get_instance(devinfo));
3466 3416
3467 3417 if (sc == NULL || (ah = sc->sc_ah) == NULL)
3468 3418 return (DDI_FAILURE);
3469 3419
3470 3420 /*
3471 3421 * Disable interrupts
3472 3422 */
3473 3423 (void) ath9k_hw_set_interrupts(ah, 0);
3474 3424
3475 3425 /*
3476 3426 * Disable TX HW
3477 3427 */
3478 3428 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
3479 3429 if (ARN_TXQ_SETUP(sc, i))
3480 3430 (void) ath9k_hw_stoptxdma(ah, sc->sc_txq[i].axq_qnum);
3481 3431 }
3482 3432
3483 3433 /*
3484 3434 * Disable RX HW
3485 3435 */
3486 3436 ath9k_hw_stoppcurecv(ah);
3487 3437 ath9k_hw_setrxfilter(ah, 0);
3488 3438 (void) ath9k_hw_stopdmarecv(ah);
3489 3439 drv_usecwait(3000);
3490 3440
3491 3441 /*
3492 3442 * Power down HW
3493 3443 */
↓ open down ↓ |
834 lines elided |
↑ open up ↑ |
3494 3444 (void) ath9k_hw_phy_disable(ah);
3495 3445
3496 3446 return (DDI_SUCCESS);
3497 3447 }
3498 3448
3499 3449 DDI_DEFINE_STREAM_OPS(arn_dev_ops, nulldev, nulldev, arn_attach, arn_detach,
3500 3450 nodev, NULL, D_MP, NULL, arn_quiesce);
3501 3451
3502 3452 static struct modldrv arn_modldrv = {
3503 3453 &mod_driverops, /* Type of module. This one is a driver */
3504 - "arn-Atheros 9000 series driver:2.0", /* short description */
3454 + "Atheros 9000 series driver", /* short description */
3505 3455 &arn_dev_ops /* driver specific ops */
3506 3456 };
3507 3457
3508 3458 static struct modlinkage modlinkage = {
3509 3459 MODREV_1, (void *)&arn_modldrv, NULL
3510 3460 };
3511 3461
3512 3462 int
3513 3463 _info(struct modinfo *modinfop)
3514 3464 {
3515 3465 return (mod_info(&modlinkage, modinfop));
3516 3466 }
3517 3467
3518 3468 int
3519 3469 _init(void)
3520 3470 {
3521 3471 int status;
3522 3472
3523 3473 status = ddi_soft_state_init
3524 3474 (&arn_soft_state_p, sizeof (struct arn_softc), 1);
3525 3475 if (status != 0)
3526 3476 return (status);
3527 3477
3528 3478 mutex_init(&arn_loglock, NULL, MUTEX_DRIVER, NULL);
3529 3479 mac_init_ops(&arn_dev_ops, "arn");
3530 3480 status = mod_install(&modlinkage);
3531 3481 if (status != 0) {
3532 3482 mac_fini_ops(&arn_dev_ops);
3533 3483 mutex_destroy(&arn_loglock);
3534 3484 ddi_soft_state_fini(&arn_soft_state_p);
3535 3485 }
3536 3486
3537 3487 return (status);
3538 3488 }
3539 3489
3540 3490 int
3541 3491 _fini(void)
3542 3492 {
3543 3493 int status;
3544 3494
3545 3495 status = mod_remove(&modlinkage);
3546 3496 if (status == 0) {
3547 3497 mac_fini_ops(&arn_dev_ops);
3548 3498 mutex_destroy(&arn_loglock);
3549 3499 ddi_soft_state_fini(&arn_soft_state_p);
3550 3500 }
3551 3501 return (status);
3552 3502 }
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX