Print this page
7154 arn(7D) walks out of bounds when byteswapping the 4K eeprom
7152 weird condition in arn(7D) needs clarification
7153 delete unused code in arn(7D)
7155 arn(7D) should include the mac fields in the eeprom enumeration
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/arn/arn_xmit.c
+++ new/usr/src/uts/common/io/arn/arn_xmit.c
1 1 /*
2 2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 3 * Use is subject to license terms.
4 4 */
5 5
6 6 /*
7 7 * Copyright (c) 2008 Atheros Communications Inc.
8 8 *
9 9 * Permission to use, copy, modify, and/or distribute this software for any
10 10 * purpose with or without fee is hereby granted, provided that the above
11 11 * copyright notice and this permission notice appear in all copies.
12 12 *
13 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 20 */
21 21 #include <sys/param.h>
22 22 #include <sys/types.h>
23 23 #include <sys/signal.h>
24 24 #include <sys/stream.h>
25 25 #include <sys/termio.h>
26 26 #include <sys/errno.h>
27 27 #include <sys/file.h>
28 28 #include <sys/cmn_err.h>
29 29 #include <sys/stropts.h>
30 30 #include <sys/strsubr.h>
31 31 #include <sys/strtty.h>
32 32 #include <sys/kbio.h>
33 33 #include <sys/cred.h>
34 34 #include <sys/stat.h>
35 35 #include <sys/consdev.h>
36 36 #include <sys/kmem.h>
37 37 #include <sys/modctl.h>
38 38 #include <sys/ddi.h>
39 39 #include <sys/sunddi.h>
40 40 #include <sys/pci.h>
41 41 #include <sys/errno.h>
42 42 #include <sys/mac_provider.h>
43 43 #include <sys/dlpi.h>
44 44 #include <sys/ethernet.h>
45 45 #include <sys/list.h>
46 46 #include <sys/byteorder.h>
47 47 #include <sys/strsun.h>
48 48 #include <sys/policy.h>
49 49 #include <inet/common.h>
50 50 #include <inet/nd.h>
51 51 #include <inet/mi.h>
52 52 #include <inet/wifi_ioctl.h>
53 53 #include <sys/mac_wifi.h>
54 54
55 55 #include "arn_core.h"
56 56
57 57 #define BITS_PER_BYTE 8
58 58 #define OFDM_PLCP_BITS 22
59 59 #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
60 60 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
61 61 #define L_STF 8
62 62 #define L_LTF 8
63 63 #define L_SIG 4
64 64 #define HT_SIG 8
65 65 #define HT_STF 4
66 66 #define HT_LTF(_ns) (4 * (_ns))
67 67 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
68 68 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
69 69 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
70 70 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
71 71
72 72 #define OFDM_SIFS_TIME 16
73 73
74 74 static uint32_t bits_per_symbol[][2] = {
75 75 /* 20MHz 40MHz */
76 76 { 26, 54 }, /* 0: BPSK */
77 77 { 52, 108 }, /* 1: QPSK 1/2 */
78 78 { 78, 162 }, /* 2: QPSK 3/4 */
79 79 { 104, 216 }, /* 3: 16-QAM 1/2 */
80 80 { 156, 324 }, /* 4: 16-QAM 3/4 */
81 81 { 208, 432 }, /* 5: 64-QAM 2/3 */
82 82 { 234, 486 }, /* 6: 64-QAM 3/4 */
83 83 { 260, 540 }, /* 7: 64-QAM 5/6 */
84 84 { 52, 108 }, /* 8: BPSK */
85 85 { 104, 216 }, /* 9: QPSK 1/2 */
86 86 { 156, 324 }, /* 10: QPSK 3/4 */
87 87 { 208, 432 }, /* 11: 16-QAM 1/2 */
88 88 { 312, 648 }, /* 12: 16-QAM 3/4 */
89 89 { 416, 864 }, /* 13: 64-QAM 2/3 */
90 90 { 468, 972 }, /* 14: 64-QAM 3/4 */
91 91 { 520, 1080 }, /* 15: 64-QAM 5/6 */
92 92 };
93 93
94 94 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
95 95
96 96 #ifdef ARN_TX_AGGREGRATION
97 97 static void arn_tx_send_ht_normal(struct arn_softc *sc, struct ath_txq *txq,
98 98 struct ath_atx_tid *tid, list_t *bf_list);
99 99 static void arn_tx_complete_buf(struct arn_softc *sc, struct ath_buf *bf,
100 100 list_t *bf_q, int txok, int sendbar);
101 101 static void arn_tx_txqaddbuf(struct arn_softc *sc, struct ath_txq *txq,
102 102 list_t *buf_list);
103 103 static void arn_buf_set_rate(struct arn_softc *sc, struct ath_buf *bf);
104 104 static int arn_tx_num_badfrms(struct arn_softc *sc,
105 105 struct ath_buf *bf, int txok);
106 106 static void arn_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
107 107 int nbad, int txok, boolean_t update_rc);
108 108 #endif
109 109
110 110 static void
111 111 arn_get_beaconconfig(struct arn_softc *sc, struct ath_beacon_config *conf)
112 112 {
113 113 ieee80211com_t *ic = (ieee80211com_t *)sc;
114 114 struct ieee80211_node *in = ic->ic_bss;
115 115
116 116 /* fill in beacon config data */
117 117
118 118 conf->beacon_interval = in->in_intval ?
119 119 in->in_intval : ATH_DEFAULT_BINTVAL;
120 120 conf->listen_interval = 100;
121 121 conf->dtim_count = 1;
122 122 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
123 123 }
124 124
125 125 /* Aggregation logic */
126 126
127 127 #ifdef ARN_TX_AGGREGATION
128 128
129 129 /* Check if it's okay to send out aggregates */
130 130 static int
131 131 arn_aggr_query(struct arn_softc *sc, struct ath_node *an, uint8_t tidno)
132 132 {
133 133 struct ath_atx_tid *tid;
134 134 tid = ATH_AN_2_TID(an, tidno);
135 135
136 136 if (tid->state & AGGR_ADDBA_COMPLETE ||
137 137 tid->state & AGGR_ADDBA_PROGRESS)
138 138 return (1);
139 139 else
140 140 return (0);
141 141 }
142 142
143 143 /*
144 144 * queue up a dest/ac pair for tx scheduling
145 145 * NB: must be called with txq lock held
146 146 */
147 147 static void
148 148 arn_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
149 149 {
150 150 struct ath_atx_ac *ac = tid->ac;
151 151
152 152 /* if tid is paused, hold off */
153 153 if (tid->paused)
154 154 return;
155 155
156 156 /* add tid to ac atmost once */
157 157 if (tid->sched)
158 158 return;
159 159
160 160 tid->sched = B_TRUE;
161 161 list_insert_tail(&ac->tid_q, &tid->list);
162 162
163 163 /* add node ac to txq atmost once */
164 164 if (ac->sched)
165 165 return;
166 166
167 167 ac->sched = B_TRUE;
168 168 list_insert_tail(&txq->axq_acq, &ac->list);
169 169 }
170 170
171 171 /* pause a tid */
172 172 static void
173 173 arn_tx_pause_tid(struct arn_softc *sc, struct ath_atx_tid *tid)
174 174 {
175 175 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
176 176
177 177 mutex_enter(&txq->axq_lock);
178 178
179 179 tid->paused++;
180 180
181 181 mutex_exit(&txq->axq_lock);
182 182 }
183 183
184 184 /* resume a tid and schedule aggregate */
185 185 void
186 186 arn_tx_resume_tid(struct arn_softc *sc, struct ath_atx_tid *tid)
187 187 {
188 188 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
189 189
190 190 ASSERT(tid->paused > 0);
191 191 mutex_enter(&txq->axq_lock);
192 192
193 193 tid->paused--;
194 194
195 195 if (tid->paused > 0)
196 196 goto unlock;
197 197
198 198 if (list_empty(&tid->buf_q))
199 199 goto unlock;
200 200
201 201 /*
202 202 * Add this TID to scheduler and try to send out aggregates
203 203 */
204 204 arn_tx_queue_tid(txq, tid);
205 205 arn_txq_schedule(sc, txq);
206 206 unlock:
207 207 mutex_exit(&txq->axq_lock);
208 208 }
209 209
210 210 /* flush tid's software queue and send frames as non-ampdu's */
211 211 static void
212 212 arn_tx_flush_tid(struct arn_softc *sc, struct ath_atx_tid *tid)
213 213 {
214 214 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
215 215 struct ath_buf *bf;
216 216
217 217 list_t list;
218 218 list_create(&list, sizeof (struct ath_buf),
219 219 offsetof(struct ath_buf, bf_node));
220 220
221 221 ASSERT(tid->paused > 0);
222 222 mutex_enter(&txq->axq_lock);
223 223
224 224 tid->paused--;
225 225
226 226 if (tid->paused > 0) {
227 227 mutex_exit(&txq->axq_lock);
228 228 return;
229 229 }
230 230
231 231 while (!list_empty(&tid->buf_q)) {
232 232 bf = list_head(&tid->buf_q);
233 233 ASSERT(!bf_isretried(bf));
234 234 list_remove(&tid->buf_q, bf);
235 235 list_insert_tail(&list, bf);
236 236 arn_tx_send_ht_normal(sc, txq, tid, &list);
237 237 }
238 238
239 239 mutex_exit(&txq->axq_lock);
240 240 }
241 241
242 242 /* Update block ack window */
243 243 static void
244 244 arn_tx_update_baw(struct arn_softc *sc, struct ath_atx_tid *tid, int seqno)
245 245 {
246 246 int index, cindex;
247 247
248 248 index = ATH_BA_INDEX(tid->seq_start, seqno);
249 249 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
250 250
251 251 tid->tx_buf[cindex] = NULL;
252 252
253 253 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
254 254 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
255 255 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
256 256 }
257 257 }
258 258
259 259 /* Add a sub-frame to block ack window */
260 260 static void
261 261 arn_tx_addto_baw(struct arn_softc *sc, struct ath_atx_tid *tid,
262 262 struct ath_buf *bf)
263 263 {
264 264 int index, cindex;
265 265
266 266 if (bf_isretried(bf))
267 267 return;
268 268
269 269 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
270 270 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
271 271
272 272 ASSERT(tid->tx_buf[cindex] == NULL);
273 273 tid->tx_buf[cindex] = bf;
274 274
275 275 if (index >= ((tid->baw_tail - tid->baw_head) &
276 276 (ATH_TID_MAX_BUFS - 1))) {
277 277 tid->baw_tail = cindex;
278 278 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
279 279 }
280 280 }
281 281
↓ open down ↓ |
281 lines elided |
↑ open up ↑ |
282 282 /*
283 283 * TODO: For frame(s) that are in the retry state, we will reuse the
284 284 * sequence number(s) without setting the retry bit. The
285 285 * alternative is to give up on these and BAR the receiver's window
286 286 * forward.
287 287 */
288 288 static void
289 289 arn_tid_drain(struct arn_softc *sc,
290 290 struct ath_txq *txq,
291 291 struct ath_atx_tid *tid)
292 -
293 292 {
294 293 struct ath_buf *bf;
295 294
296 295 list_t list;
297 296 list_create(&list, sizeof (struct ath_buf),
298 297 offsetof(struct ath_buf, bf_node));
299 298
300 299 for (;;) {
301 300 if (list_empty(&tid->buf_q))
302 301 break;
303 302
304 303 bf = list_head(&tid->buf_q);
305 304 list_remove(&tid->buf_q, bf);
306 305 list_insert_tail(&list, bf);
307 306
308 307 if (bf_isretried(bf))
309 308 arn_tx_update_baw(sc, tid, bf->bf_seqno);
310 309
311 310 mutex_enter(&txq->axq_lock);
312 311 arn_tx_complete_buf(sc, bf, &list, 0, 0);
313 312 mutex_exit(&txq->axq_lock);
314 313 }
315 314
316 315 tid->seq_next = tid->seq_start;
317 316 tid->baw_tail = tid->baw_head;
318 317 }
319 318
320 319 static void
321 320 arn_tx_set_retry(struct arn_softc *sc, struct ath_buf *bf)
322 321 {
323 322 struct ieee80211_frame *wh;
324 323 wh = (struct ieee80211_frame *)bf->bf_dma.mem_va;
325 324
326 325 bf->bf_state.bf_type |= BUF_RETRY;
327 326 bf->bf_retries++;
328 327
329 328 *(uint16_t *)&wh->i_seq[0] |= LE_16(0x0800); /* ??? */
330 329 }
331 330
332 331 static struct ath_buf *
333 332 arn_clone_txbuf(struct arn_softc *sc, struct ath_buf *bf)
334 333 {
335 334 struct ath_buf *tbf;
336 335
337 336 mutex_enter(&sc->sc_txbuflock);
338 337 ASSERT(!list_empty((&sc->sc_txbuf_list)));
339 338
340 339 tbf = list_head(&sc->sc_txbuf_list);
341 340 list_remove(&sc->sc_txbuf_list, tbf);
342 341 mutex_exit(&sc->sc_txbuflock);
343 342
344 343 ATH_TXBUF_RESET(tbf);
345 344
346 345 tbf->bf_daddr = bf->bf_daddr; /* physical addr of desc */
347 346 tbf->bf_dma = bf->bf_dma; /* dma area for buf */
348 347 *(tbf->bf_desc) = *(bf->bf_desc); /* virtual addr of desc */
349 348 tbf->bf_state = bf->bf_state; /* buffer state */
350 349
351 350 return (tbf);
352 351 }
353 352
354 353 static void
355 354 arn_tx_complete_aggr(struct arn_softc *sc, struct ath_txq *txq,
356 355 struct ath_buf *bf, list_t *bf_q, int txok)
357 356 {
358 357 struct ieee80211_node *in;
359 358 struct ath_node *an = NULL;
360 359 struct ath_atx_tid *tid = NULL;
361 360 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
362 361 struct ath_desc *ds = bf_last->bf_desc;
363 362
364 363 list_t list, list_pending;
365 364 uint16_t seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
366 365 uint32_t ba[WME_BA_BMP_SIZE >> 5];
367 366 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
368 367 boolean_t rc_update = B_TRUE;
369 368
370 369 an = ATH_NODE(in); /* Be sure in != NULL */
371 370 tid = ATH_AN_2_TID(an, bf->bf_tidno);
372 371
373 372 isaggr = bf_isaggr(bf);
374 373 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
375 374
376 375 if (isaggr && txok) {
377 376 if (ATH_DS_TX_BA(ds)) {
378 377 seq_st = ATH_DS_BA_SEQ(ds);
379 378 memcpy(ba, ATH_DS_BA_BITMAP(ds),
380 379 WME_BA_BMP_SIZE >> 3);
381 380 } else {
382 381 /*
383 382 * AR5416 can become deaf/mute when BA
384 383 * issue happens. Chip needs to be reset.
385 384 * But AP code may have sychronization issues
386 385 * when perform internal reset in this routine.
387 386 * Only enable reset in STA mode for now.
388 387 */
389 388 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
390 389 needreset = 1;
391 390 }
392 391 }
393 392
394 393 list_create(&list_pending, sizeof (struct ath_buf),
395 394 offsetof(struct ath_buf, bf_node));
396 395 list_create(&list, sizeof (struct ath_buf),
397 396 offsetof(struct ath_buf, bf_node));
398 397
399 398 nbad = arn_tx_num_badfrms(sc, bf, txok);
400 399 while (bf) {
401 400 txfail = txpending = 0;
402 401 bf_next = bf->bf_next;
403 402
404 403 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
405 404 /*
406 405 * transmit completion, subframe is
407 406 * acked by block ack
408 407 */
409 408 acked_cnt++;
410 409 } else if (!isaggr && txok) {
411 410 /* transmit completion */
412 411 acked_cnt++;
413 412 } else {
414 413 if (!(tid->state & AGGR_CLEANUP) &&
415 414 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
416 415 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
417 416 arn_tx_set_retry(sc, bf);
418 417 txpending = 1;
419 418 } else {
420 419 bf->bf_state.bf_type |= BUF_XRETRY;
421 420 txfail = 1;
422 421 sendbar = 1;
423 422 txfail_cnt++;
424 423 }
425 424 } else {
426 425 /*
427 426 * cleanup in progress, just fail
428 427 * the un-acked sub-frames
429 428 */
430 429 txfail = 1;
431 430 }
432 431 }
433 432
434 433 if (bf_next == NULL) {
435 434 /* INIT_LIST_HEAD */
436 435 list_create(&list, sizeof (struct ath_buf),
437 436 offsetof(struct ath_buf, bf_node));
438 437 } else {
439 438 ASSERT(!list_empty(bf_q));
440 439 list_remove(bf_q, bf);
441 440 list_insert_tail(&list, bf);
442 441 }
443 442
444 443 if (!txpending) {
445 444 /*
446 445 * complete the acked-ones/xretried ones; update
447 446 * block-ack window
448 447 */
449 448 mutex_enter(&txq->axq_lock);
450 449 arn_tx_update_baw(sc, tid, bf->bf_seqno);
451 450 mutex_exit(&txq->axq_lock);
452 451
453 452 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
454 453 ath_tx_rc_status(bf, ds, nbad, txok, B_TRUE);
455 454 rc_update = B_FALSE;
456 455 } else {
457 456 ath_tx_rc_status(bf, ds, nbad, txok, B_FALSE);
458 457 }
459 458
460 459 ath_tx_complete_buf(sc, bf, list, !txfail, sendbar);
461 460 } else {
462 461 /* retry the un-acked ones */
463 462 if (bf->bf_next == NULL &&
464 463 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
465 464 struct ath_buf *tbf;
466 465
467 466 tbf = arn_clone_txbuf(sc, bf_last);
468 467 ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
469 468 list_insert_tail(&list, tbf);
470 469 } else {
471 470 /*
472 471 * Clear descriptor status words for
473 472 * software retry
474 473 */
475 474 ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
476 475 }
477 476
478 477 /*
479 478 * Put this buffer to the temporary pending
480 479 * queue to retain ordering
481 480 */
482 481 list_splice_tail_init(&list, &list_pending);
483 482 /*
484 483 * Insert src list after dst list.
485 484 * Empty src list thereafter
486 485 */
487 486 list_move_tail(&list_pending, &list);
488 487 /* should re-initialize list here??? */
489 488 }
490 489
491 490 bf = bf_next;
492 491 }
493 492
494 493 if (tid->state & AGGR_CLEANUP) {
495 494 if (tid->baw_head == tid->baw_tail) {
496 495 tid->state &= ~AGGR_ADDBA_COMPLETE;
497 496 tid->addba_exchangeattempts = 0;
498 497 tid->state &= ~AGGR_CLEANUP;
499 498
500 499 /* send buffered frames as singles */
501 500 arn_tx_flush_tid(sc, tid);
502 501 }
503 502 return;
504 503 }
505 504
506 505 /*
507 506 * prepend un-acked frames to the beginning of
508 507 * the pending frame queue
509 508 */
510 509
511 510 if (!list_empty(&list_pending)) {
512 511 mutex_enter(&txq->axq_lock);
513 512 list_move_tail(&list_pending, &tid->buf_q);
514 513 arn_tx_queue_tid(txq, tid);
515 514 mutex_exit(&txq->axq_lock);
516 515 }
517 516 }
518 517
519 518 static uint32_t
520 519 arn_lookup_rate(struct arn_softc *sc, struct ath_buf *bf,
521 520 struct ath_atx_tid *tid)
522 521 {
523 522 struct ath_rate_table *rate_table = sc->sc_currates;
524 523 struct ath9k_tx_rate *rates;
525 524 struct ath_tx_info_priv *tx_info_priv;
526 525 uint32_t max_4ms_framelen, frmlen;
527 526 uint16_t aggr_limit, legacy = 0, maxampdu;
528 527 int i;
529 528
530 529 /* ??? */
531 530 rates = (struct ath9k_tx_rate *)bf->rates;
532 531 tx_info_priv = (struct ath_tx_info_priv *)&bf->tx_info_priv;
533 532
534 533 /*
535 534 * Find the lowest frame length among the rate series that will have a
536 535 * 4ms transmit duration.
537 536 * TODO - TXOP limit needs to be considered.
538 537 */
539 538 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
540 539
541 540 for (i = 0; i < 4; i++) {
542 541 if (rates[i].count) {
543 542 if (!WLAN_RC_PHY_HT
544 543 (rate_table->info[rates[i].idx].phy)) {
545 544 legacy = 1;
546 545 break;
547 546 }
548 547
549 548 frmlen =
550 549 rate_table->info[rates[i].idx].max_4ms_framelen;
551 550 max_4ms_framelen = min(max_4ms_framelen, frmlen);
552 551 }
553 552 }
554 553
555 554 /*
556 555 * limit aggregate size by the minimum rate if rate selected is
557 556 * not a probe rate, if rate selected is a probe rate then
558 557 * avoid aggregation of this packet.
559 558 */
560 559 if (legacy)
561 560 return (0);
562 561
563 562 aggr_limit = min(max_4ms_framelen, (uint32_t)ATH_AMPDU_LIMIT_DEFAULT);
564 563
565 564 /*
566 565 * h/w can accept aggregates upto 16 bit lengths (65535).
567 566 * The IE, however can hold upto 65536, which shows up here
568 567 * as zero. Ignore 65536 since we are constrained by hw.
569 568 */
570 569 maxampdu = tid->an->maxampdu;
571 570 if (maxampdu)
572 571 aggr_limit = min(aggr_limit, maxampdu);
573 572
574 573 return (aggr_limit);
575 574 }
576 575
577 576 /*
578 577 * Returns the number of delimiters to be added to
579 578 * meet the minimum required mpdudensity.
580 579 * caller should make sure that the rate is HT rate .
581 580 */
582 581 static int
583 582 arn_compute_num_delims(struct arn_softc *sc, struct ath_atx_tid *tid,
584 583 struct ath_buf *bf, uint16_t frmlen)
585 584 {
586 585 struct ath_rate_table *rt = sc->sc_currates;
587 586 struct ath9k_tx_rate *rates = (struct ath9k_tx_rate *)bf->rates;
588 587 uint32_t nsymbits, nsymbols, mpdudensity;
589 588 uint16_t minlen;
590 589 uint8_t rc, flags, rix;
591 590 int width, half_gi, ndelim, mindelim;
592 591
593 592 /* Select standard number of delimiters based on frame length alone */
594 593 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
595 594
596 595 /*
597 596 * If encryption enabled, hardware requires some more padding between
598 597 * subframes.
599 598 * TODO - this could be improved to be dependent on the rate.
600 599 * The hardware can keep up at lower rates, but not higher rates
601 600 */
602 601 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
603 602 ndelim += ATH_AGGR_ENCRYPTDELIM;
604 603
605 604 /*
606 605 * Convert desired mpdu density from microeconds to bytes based
607 606 * on highest rate in rate series (i.e. first rate) to determine
608 607 * required minimum length for subframe. Take into account
609 608 * whether high rate is 20 or 40Mhz and half or full GI.
610 609 */
611 610 mpdudensity = tid->an->mpdudensity;
612 611
613 612 /*
614 613 * If there is no mpdu density restriction, no further calculation
615 614 * is needed.
616 615 */
617 616 if (mpdudensity == 0)
618 617 return (ndelim);
619 618
620 619 rix = rates[0].idx;
621 620 flags = rates[0].flags;
622 621 rc = rt->info[rix].ratecode;
623 622 width = (flags & ATH9K_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
624 623 half_gi = (flags & ATH9K_TX_RC_SHORT_GI) ? 1 : 0;
625 624
626 625 if (half_gi)
627 626 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
628 627 else
629 628 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
630 629
631 630 if (nsymbols == 0)
632 631 nsymbols = 1;
633 632
634 633 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
635 634 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
636 635
637 636 if (frmlen < minlen) {
638 637 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
639 638 ndelim = max(mindelim, ndelim);
640 639 }
641 640
642 641 return (ndelim);
643 642 }
644 643
645 644 static enum ATH_AGGR_STATUS
646 645 arn_tx_form_aggr(struct arn_softc *sc, struct ath_atx_tid *tid,
647 646 list_t *bf_q)
648 647 {
649 648 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
650 649 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
651 650 int rl = 0, nframes = 0, ndelim, prev_al = 0;
652 651 uint16_t aggr_limit = 0, al = 0, bpad = 0,
653 652 al_delta, h_baw = tid->baw_size / 2;
654 653 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
655 654
656 655 bf_first = list_head(&tid->buf_q);
657 656
658 657 do {
659 658 bf = list_head(&tid->buf_q);
660 659
661 660 /* do not step over block-ack window */
662 661 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
663 662 status = ATH_AGGR_BAW_CLOSED;
664 663 break;
665 664 }
666 665
667 666 if (!rl) {
668 667 aggr_limit = arn_lookup_rate(sc, bf, tid);
669 668 rl = 1;
670 669 }
671 670
672 671 /* do not exceed aggregation limit */
673 672 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
674 673
675 674 if (nframes &&
676 675 (aggr_limit < (al + bpad + al_delta + prev_al))) {
677 676 status = ATH_AGGR_LIMITED;
678 677 break;
679 678 }
680 679
681 680 /* do not exceed subframe limit */
682 681 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
683 682 status = ATH_AGGR_LIMITED;
684 683 break;
685 684 }
686 685 nframes++;
687 686
688 687 /* add padding for previous frame to aggregation length */
689 688 al += bpad + al_delta;
690 689
691 690 /*
692 691 * Get the delimiters needed to meet the MPDU
693 692 * density for this node.
694 693 */
695 694 ndelim =
696 695 arn_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
697 696 bpad = PADBYTES(al_delta) + (ndelim << 2);
698 697
699 698 bf->bf_next = NULL;
700 699 bf->bf_desc->ds_link = 0;
701 700
702 701 /* link buffers of this frame to the aggregate */
703 702 arn_tx_addto_baw(sc, tid, bf);
704 703 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
705 704 list_remove(&tid->buf_q, bf);
706 705 list_insert_tail(bf_q, bf);
707 706 if (bf_prev) {
708 707 bf_prev->bf_next = bf;
709 708 bf_prev->bf_desc->ds_link = bf->bf_daddr;
710 709 }
711 710 bf_prev = bf;
712 711 } while (!list_empty(&tid->buf_q));
713 712
714 713 bf_first->bf_al = al;
715 714 bf_first->bf_nframes = nframes;
716 715
717 716 return (status);
718 717 #undef PADBYTES
719 718 }
720 719
721 720 static void
722 721 arn_tx_sched_aggr(struct arn_softc *sc, struct ath_txq *txq,
723 722 struct ath_atx_tid *tid)
724 723 {
725 724 struct ath_buf *bf;
726 725 enum ATH_AGGR_STATUS status;
727 726 list_t bf_q;
728 727
729 728 do {
730 729 if (list_empty(&tid->buf_q))
731 730 return;
732 731
733 732 /* INIT_LIST_HEAD */
734 733 list_create(&bf_q, sizeof (struct ath_buf),
735 734 offsetof(struct ath_buf, bf_node));
736 735
737 736 status = arn_tx_form_aggr(sc, tid, &bf_q);
738 737
739 738 /*
740 739 * no frames picked up to be aggregated;
741 740 * block-ack window is not open.
742 741 */
743 742 if (list_empty(&bf_q))
744 743 break;
745 744
746 745 bf = list_head(&bf_q);
747 746 bf->bf_lastbf = list_object(&bf_q, bf->bf_node.list_prev);
748 747
749 748 /* if only one frame, send as non-aggregate */
750 749 if (bf->bf_nframes == 1) {
751 750 bf->bf_state.bf_type &= ~BUF_AGGR;
752 751 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
753 752 ath_buf_set_rate(sc, bf);
754 753 arn_tx_txqaddbuf(sc, txq, &bf_q);
755 754 continue;
756 755 }
757 756
758 757 /* setup first desc of aggregate */
759 758 bf->bf_state.bf_type |= BUF_AGGR;
760 759 ath_buf_set_rate(sc, bf);
761 760 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
762 761
763 762 /* anchor last desc of aggregate */
764 763 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
765 764
766 765 txq->axq_aggr_depth++;
767 766 arn_tx_txqaddbuf(sc, txq, &bf_q);
768 767
769 768 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
770 769 status != ATH_AGGR_BAW_CLOSED);
771 770 }
772 771
773 772 int
774 773 arn_tx_aggr_start(struct arn_softc *sc, struct ieee80211_node *in,
775 774 uint16_t tid, uint16_t *ssn)
776 775 {
777 776 struct ath_atx_tid *txtid;
778 777 struct ath_node *an;
779 778
780 779 an = ATH_NODE(in);
781 780
782 781 if (sc->sc_flags & SC_OP_TXAGGR) {
783 782 txtid = ATH_AN_2_TID(an, tid);
784 783 txtid->state |= AGGR_ADDBA_PROGRESS;
785 784 arn_tx_pause_tid(sc, txtid);
786 785 *ssn = txtid->seq_start;
787 786 }
788 787
789 788 return (0);
790 789 }
791 790
792 791 int
793 792 arn_tx_aggr_stop(struct arn_softc *sc, struct ieee80211_node *in, uint16_t tid)
794 793 {
795 794 struct ath_node *an = ATH_NODE(in);
796 795 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
797 796 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
798 797 struct ath_buf *bf;
799 798
800 799 list_t list;
801 800 list_create(&list, sizeof (struct ath_buf),
802 801 offsetof(struct ath_buf, bf_node));
803 802
804 803 if (txtid->state & AGGR_CLEANUP)
805 804 return (0);
806 805
807 806 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
808 807 txtid->addba_exchangeattempts = 0;
809 808 return (0);
810 809 }
811 810
812 811 arn_tx_pause_tid(sc, txtid);
813 812
814 813 /* drop all software retried frames and mark this TID */
815 814 mutex_enter(&txq->axq_lock);
816 815 while (!list_empty(&txtid->buf_q)) {
817 816 /* list_first_entry */
818 817 bf = list_head(&txtid->buf_q);
819 818 if (!bf_isretried(bf)) {
820 819 /*
821 820 * NB: it's based on the assumption that
822 821 * software retried frame will always stay
823 822 * at the head of software queue.
824 823 */
825 824 break;
826 825 }
827 826 list_remove(&txtid->buf_q, bf);
828 827 list_insert_tail(&list, bf);
829 828 arn_tx_update_baw(sc, txtid, bf->bf_seqno);
830 829 // ath_tx_complete_buf(sc, bf, &list, 0, 0); /* to do */
831 830 }
832 831 mutex_exit(&txq->axq_lock);
833 832
834 833 if (txtid->baw_head != txtid->baw_tail) {
835 834 txtid->state |= AGGR_CLEANUP;
836 835 } else {
837 836 txtid->state &= ~AGGR_ADDBA_COMPLETE;
838 837 txtid->addba_exchangeattempts = 0;
839 838 arn_tx_flush_tid(sc, txtid);
840 839 }
841 840
842 841 return (0);
843 842 }
844 843
845 844 void
846 845 arn_tx_aggr_resume(struct arn_softc *sc,
847 846 struct ieee80211_node *in,
848 847 uint16_t tid)
849 848 {
850 849 struct ath_atx_tid *txtid;
851 850 struct ath_node *an;
852 851
853 852 an = ATH_NODE(in);
854 853
855 854 if (sc->sc_flags & SC_OP_TXAGGR) {
856 855 txtid = ATH_AN_2_TID(an, tid);
857 856 txtid->baw_size = (0x8) << sc->sc_ht_conf.ampdu_factor;
858 857 txtid->state |= AGGR_ADDBA_COMPLETE;
859 858 txtid->state &= ~AGGR_ADDBA_PROGRESS;
860 859 arn_tx_resume_tid(sc, txtid);
861 860 }
862 861 }
863 862
864 863 boolean_t
865 864 arn_tx_aggr_check(struct arn_softc *sc, struct ath_node *an, uint8_t tidno)
866 865 {
867 866 struct ath_atx_tid *txtid;
868 867
869 868 if (!(sc->sc_flags & SC_OP_TXAGGR))
870 869 return (B_FALSE);
871 870
872 871 txtid = ATH_AN_2_TID(an, tidno);
873 872
874 873 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
875 874 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
876 875 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
877 876 txtid->addba_exchangeattempts++;
878 877 return (B_TRUE);
879 878 }
880 879 }
881 880
882 881 return (B_FALSE);
883 882 }
884 883
885 884 /* Queue Management */
886 885
887 886 static void
888 887 arn_txq_drain_pending_buffers(struct arn_softc *sc, struct ath_txq *txq)
889 888 {
890 889 struct ath_atx_ac *ac, *ac_tmp;
891 890 struct ath_atx_tid *tid, *tid_tmp;
892 891
893 892 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq) {
894 893 list_remove(&txq->axq_acq, ac);
895 894 ac->sched = B_FALSE;
896 895 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q) {
897 896 list_remove(&ac->tid_q, tid);
898 897 tid->sched = B_FALSE;
899 898 arn_tid_drain(sc, txq, tid);
900 899 }
901 900 }
902 901 }
903 902
904 903 int
905 904 arn_tx_get_qnum(struct arn_softc *sc, int qtype, int haltype)
906 905 {
907 906 int qnum;
908 907
909 908 switch (qtype) {
910 909 case ATH9K_TX_QUEUE_DATA:
911 910 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
912 911 ARN_DBG((ARN_DBG_FATAL, "arn: arn_tx_get_qnum(): "
913 912 "HAL AC %u out of range, max %zu!\n",
914 913 haltype, ARRAY_SIZE(sc->sc_haltype2q)));
915 914 return (-1);
916 915 }
917 916 qnum = sc->sc_haltype2q[haltype];
918 917 break;
919 918 case ATH9K_TX_QUEUE_BEACON:
920 919 qnum = sc->sc_beaconq;
921 920 break;
922 921 case ATH9K_TX_QUEUE_CAB:
923 922 qnum = sc->sc_cabq->axq_qnum;
924 923 break;
925 924 default:
926 925 qnum = -1;
927 926 }
928 927 return (qnum);
929 928 }
930 929
931 930 struct ath_txq *
932 931 arn_test_get_txq(struct arn_softc *sc, struct ieee80211_node *in,
933 932 struct ieee80211_frame *wh, uint8_t type)
934 933 {
935 934 struct ieee80211_qosframe *qwh = NULL;
936 935 struct ath_txq *txq = NULL;
937 936 int tid = -1;
938 937 int qos_ac;
939 938 int qnum;
940 939
941 940 if (in->in_flags & IEEE80211_NODE_QOS) {
942 941
943 942 if ((type & IEEE80211_FC0_TYPE_MASK) ==
944 943 IEEE80211_FC0_TYPE_DATA) {
945 944
946 945 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
947 946 qwh = (struct ieee80211_qosframe *)wh;
948 947
949 948 tid = qwh->i_qos[0] & IEEE80211_QOS_TID;
950 949 switch (tid) {
951 950 case 1:
952 951 case 2:
953 952 qos_ac = WME_AC_BK;
954 953 case 0:
955 954 case 3:
956 955 qos_ac = WME_AC_BE;
957 956 case 4:
958 957 case 5:
959 958 qos_ac = WME_AC_VI;
960 959 case 6:
961 960 case 7:
962 961 qos_ac = WME_AC_VO;
963 962 }
964 963 }
965 964 } else {
966 965 qos_ac = WME_AC_VO;
967 966 }
968 967 } else if ((type & IEEE80211_FC0_TYPE_MASK) ==
969 968 IEEE80211_FC0_TYPE_MGT) {
970 969 qos_ac = WME_AC_VO;
971 970 } else if ((type & IEEE80211_FC0_TYPE_MASK) ==
972 971 IEEE80211_FC0_TYPE_CTL) {
973 972 qos_ac = WME_AC_VO;
974 973 } else {
975 974 qos_ac = WME_AC_BK;
976 975 }
977 976 qnum = arn_get_hal_qnum(qos_ac, sc);
978 977 txq = &sc->sc_txq[qnum];
979 978
980 979 mutex_enter(&txq->axq_lock);
981 980
982 981 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
983 982 ARN_DBG((ARN_DBG_XMIT,
984 983 "TX queue: %d is full, depth: %d\n",
985 984 qnum, txq->axq_depth));
986 985 /* stop th queue */
987 986 sc->sc_resched_needed = B_TRUE;
988 987 txq->stopped = 1;
989 988 mutex_exit(&txq->axq_lock);
990 989 return (NULL);
991 990 }
992 991
993 992 mutex_exit(&txq->axq_lock);
994 993
995 994 return (txq);
996 995 }
997 996
998 997 /* Called only when tx aggregation is enabled and HT is supported */
999 998 static void
1000 999 assign_aggr_tid_seqno(struct arn_softc *sc,
1001 1000 struct ath_buf *bf,
1002 1001 struct ieee80211_frame *wh)
1003 1002 {
1004 1003 struct ath_node *an;
1005 1004 struct ath_atx_tid *tid;
1006 1005 struct ieee80211_node *in;
1007 1006 struct ieee80211_qosframe *qwh = NULL;
1008 1007 ieee80211com_t *ic = (ieee80211com_t *)sc;
1009 1008
1010 1009 in = ieee80211_find_txnode(ic, wh->i_addr1);
1011 1010 if (in == NULL) {
1012 1011 arn_problem("assign_aggr_tid_seqno():"
1013 1012 "failed to find tx node\n");
1014 1013 return;
1015 1014 }
1016 1015 an = ATH_NODE(in);
1017 1016
1018 1017 /* Get tidno */
1019 1018 if (in->in_flags & IEEE80211_NODE_QOS) {
1020 1019 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
1021 1020 qwh = (struct ieee80211_qosframe *)wh;
1022 1021 bf->bf_tidno = qwh->i_qos[0] & IEEE80211_QOS_TID;
1023 1022 }
1024 1023 }
1025 1024
1026 1025 /* Get seqno */
1027 1026 /*
1028 1027 * For HT capable stations, we save tidno for later use.
1029 1028 * We also override seqno set by upper layer with the one
1030 1029 * in tx aggregation state.
1031 1030 *
1032 1031 * If fragmentation is on, the sequence number is
1033 1032 * not overridden, since it has been
1034 1033 * incremented by the fragmentation routine.
1035 1034 *
1036 1035 * FIXME: check if the fragmentation threshold exceeds
1037 1036 * IEEE80211 max.
1038 1037 */
1039 1038 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1040 1039
1041 1040 *(uint16_t *)&wh->i_seq[0] =
1042 1041 LE_16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1043 1042 bf->bf_seqno = tid->seq_next;
1044 1043 /* LINTED E_CONSTANT_CONDITION */
1045 1044 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1046 1045
1047 1046 /* release node */
1048 1047 ieee80211_free_node(in);
1049 1048 }
1050 1049
1051 1050 /* Compute the number of bad frames */
1052 1051 /* ARGSUSED */
1053 1052 static int
1054 1053 arn_tx_num_badfrms(struct arn_softc *sc, struct ath_buf *bf, int txok)
1055 1054 {
1056 1055 struct ath_buf *bf_last = bf->bf_lastbf;
1057 1056 struct ath_desc *ds = bf_last->bf_desc;
1058 1057 uint16_t seq_st = 0;
1059 1058 uint32_t ba[WME_BA_BMP_SIZE >> 5];
1060 1059 int ba_index;
1061 1060 int nbad = 0;
1062 1061 int isaggr = 0;
1063 1062
1064 1063 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
1065 1064 return (0);
1066 1065
1067 1066 isaggr = bf_isaggr(bf);
1068 1067 if (isaggr) {
1069 1068 seq_st = ATH_DS_BA_SEQ(ds);
1070 1069 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
1071 1070 }
1072 1071
1073 1072 while (bf) {
1074 1073 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1075 1074 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1076 1075 nbad++;
1077 1076
1078 1077 bf = bf->bf_next;
1079 1078 }
1080 1079
1081 1080 return (nbad);
1082 1081 }
1083 1082
1084 1083 static void
1085 1084 arn_tx_send_ht_normal(struct arn_softc *sc,
1086 1085 struct ath_txq *txq,
1087 1086 struct ath_atx_tid *tid,
1088 1087 list_t *list)
1089 1088 {
1090 1089 struct ath_buf *bf;
1091 1090
1092 1091 bf = list_head(list);
1093 1092 bf->bf_state.bf_type &= ~BUF_AMPDU;
1094 1093
1095 1094 /* update starting sequence number for subsequent ADDBA request */
1096 1095 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1097 1096
1098 1097 bf->bf_nframes = 1;
1099 1098 bf->bf_lastbf = bf;
1100 1099 ath_buf_set_rate(sc, bf);
1101 1100 arn_tx_txqaddbuf(sc, txq, list);
1102 1101 }
1103 1102
1104 1103 /*
1105 1104 * Insert a chain of ath_buf (descriptors) on a txq and
1106 1105 * assume the descriptors are already chained together by caller.
1107 1106 */
1108 1107 static void
1109 1108 arn_tx_txqaddbuf(struct arn_softc *sc,
1110 1109 struct ath_txq *txq,
1111 1110 list_t *list)
1112 1111 {
1113 1112 struct ath_buf *bf;
1114 1113
1115 1114 /*
1116 1115 * Insert the frame on the outbound list and
1117 1116 * pass it on to the hardware.
1118 1117 */
1119 1118
1120 1119 if (list_empty(list))
1121 1120 return;
1122 1121
1123 1122 bf = list_head(list);
1124 1123
1125 1124 list_splice_tail_init(list, &txq->axq_q);
1126 1125
1127 1126 txq->axq_depth++;
1128 1127 txq->axq_totalqueued++;
1129 1128 txq->axq_linkbuf = list_object(list, txq->axq_q.prev);
1130 1129
1131 1130 ARN_DBG((ARN_DBG_QUEUE,
1132 1131 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth));
1133 1132
1134 1133 if (txq->axq_link == NULL) {
1135 1134 ath9k_hw_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
1136 1135 ARN_DBG((ARN_DBG_XMIT,
1137 1136 "TXDP[%u] = %llx (%p)\n",
1138 1137 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc));
1139 1138 } else {
↓ open down ↓ |
837 lines elided |
↑ open up ↑ |
1140 1139 *txq->axq_link = bf->bf_daddr;
1141 1140 ARN_DBG((ARN_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
1142 1141 txq->axq_qnum, txq->axq_link,
1143 1142 ito64(bf->bf_daddr), bf->bf_desc));
1144 1143 }
1145 1144 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
1146 1145 ath9k_hw_txstart(sc->sc_ah, txq->axq_qnum);
1147 1146 }
1148 1147 #endif /* ARN_TX_AGGREGATION */
1149 1148
1150 -/*
1151 - * ath_pkt_dur - compute packet duration (NB: not NAV)
1152 - * rix - rate index
1153 - * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1154 - * width - 0 for 20 MHz, 1 for 40 MHz
1155 - * half_gi - to use 4us v/s 3.6 us for symbol time
1156 - */
1157 -
1158 -static uint32_t
1159 -/* LINTED E_STATIC_UNUSED */
1160 -arn_pkt_duration(struct arn_softc *sc, uint8_t rix, struct ath_buf *bf,
1161 - int width, int half_gi, boolean_t shortPreamble)
1162 -{
1163 - struct ath_rate_table *rate_table = sc->sc_currates;
1164 - uint32_t nbits, nsymbits, duration, nsymbols;
1165 - uint8_t rc;
1166 - int streams, pktlen;
1167 -
1168 - pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1169 - rc = rate_table->info[rix].ratecode;
1170 -
1171 - /* for legacy rates, use old function to compute packet duration */
1172 - if (!IS_HT_RATE(rc))
1173 - return (ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
1174 - rix, shortPreamble));
1175 -
1176 - /* find number of symbols: PLCP + data */
1177 - nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1178 - nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1179 - nsymbols = (nbits + nsymbits - 1) / nsymbits;
1180 -
1181 - if (!half_gi)
1182 - duration = SYMBOL_TIME(nsymbols);
1183 - else
1184 - duration = SYMBOL_TIME_HALFGI(nsymbols);
1185 -
1186 - /* addup duration for legacy/ht training and signal fields */
1187 - streams = HT_RC_2_STREAMS(rc);
1188 - duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1189 -
1190 - return (duration);
1191 -}
1192 -
1193 1149 static struct ath_buf *
1194 1150 arn_tx_get_buffer(struct arn_softc *sc)
1195 1151 {
1196 1152 struct ath_buf *bf = NULL;
1197 1153
1198 1154 mutex_enter(&sc->sc_txbuflock);
1199 1155 bf = list_head(&sc->sc_txbuf_list);
1200 1156 /* Check if a tx buffer is available */
1201 1157 if (bf != NULL)
1202 1158 list_remove(&sc->sc_txbuf_list, bf);
1203 1159 if (list_empty(&sc->sc_txbuf_list)) {
1204 1160 ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx(): "
1205 1161 "stop queue\n"));
1206 1162 sc->sc_stats.ast_tx_qstop++;
1207 1163 }
1208 1164 mutex_exit(&sc->sc_txbuflock);
1209 1165
1210 1166 return (bf);
1211 1167 }
1212 1168
1213 1169 static uint32_t
1214 1170 setup_tx_flags(struct arn_softc *sc,
1215 1171 struct ieee80211_frame *wh,
1216 1172 uint32_t pktlen)
1217 1173 {
1218 1174 int flags = 0;
1219 1175 ieee80211com_t *ic = (ieee80211com_t *)sc;
1220 1176
1221 1177 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1222 1178 flags |= ATH9K_TXDESC_INTREQ;
1223 1179
1224 1180 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1225 1181 flags |= ATH9K_TXDESC_NOACK; /* no ack on broad/multicast */
1226 1182 sc->sc_stats.ast_tx_noack++;
1227 1183 }
1228 1184 if (pktlen > ic->ic_rtsthreshold) {
1229 1185 flags |= ATH9K_TXDESC_RTSENA; /* RTS based on frame length */
1230 1186 sc->sc_stats.ast_tx_rts++;
1231 1187 }
1232 1188
1233 1189 return (flags);
1234 1190 }
1235 1191
1236 1192 static void
1237 1193 ath_tx_setup_buffer(struct arn_softc *sc, struct ath_buf *bf,
1238 1194 struct ieee80211_node *in, struct ieee80211_frame *wh,
1239 1195 uint32_t pktlen, uint32_t keytype)
1240 1196 {
1241 1197 ieee80211com_t *ic = (ieee80211com_t *)sc;
1242 1198 int i;
1243 1199
1244 1200 /* Buf reset */
1245 1201 ATH_TXBUF_RESET(bf);
1246 1202 for (i = 0; i < 4; i++) {
1247 1203 bf->rates[i].idx = -1;
1248 1204 bf->rates[i].flags = 0;
1249 1205 bf->rates[i].count = 1;
1250 1206 }
1251 1207
1252 1208 bf->bf_in = in;
1253 1209 /* LINTED E_ASSIGN_NARROW_CONV */
1254 1210 bf->bf_frmlen = pktlen;
1255 1211
1256 1212 /* Frame type */
1257 1213 IEEE80211_IS_DATA(wh) ?
1258 1214 (bf->bf_state.bf_type |= BUF_DATA) :
1259 1215 (bf->bf_state.bf_type &= ~BUF_DATA);
1260 1216 IEEE80211_IS_BACK_REQ(wh) ?
1261 1217 (bf->bf_state.bf_type |= BUF_BAR) :
1262 1218 (bf->bf_state.bf_type &= ~BUF_BAR);
1263 1219 IEEE80211_IS_PSPOLL(wh) ?
1264 1220 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1265 1221 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1266 1222 /*
1267 1223 * The 802.11 layer marks whether or not we should
1268 1224 * use short preamble based on the current mode and
1269 1225 * negotiated parameters.
1270 1226 */
1271 1227 ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1272 1228 (in->in_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) ?
1273 1229 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1274 1230 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1275 1231
1276 1232 bf->bf_flags = setup_tx_flags(sc, wh, pktlen);
1277 1233
1278 1234 /* Crypto */
1279 1235 bf->bf_keytype = keytype;
1280 1236
1281 1237 /* Assign seqno, tidno for tx aggrefation */
1282 1238
1283 1239 #ifdef ARN_TX_AGGREGATION
1284 1240 if (ieee80211_is_data_qos(wh) && (sc->sc_flags & SC_OP_TXAGGR))
1285 1241 assign_aggr_tid_seqno(sc, bf, wh);
1286 1242 #endif /* ARN_TX_AGGREGATION */
1287 1243
1288 1244 }
1289 1245
1290 1246 /*
1291 1247 * ath_pkt_dur - compute packet duration (NB: not NAV)
1292 1248 *
1293 1249 * rix - rate index
1294 1250 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1295 1251 * width - 0 for 20 MHz, 1 for 40 MHz
1296 1252 * half_gi - to use 4us v/s 3.6 us for symbol time
1297 1253 */
1298 1254 static uint32_t
1299 1255 ath_pkt_duration(struct arn_softc *sc, uint8_t rix, struct ath_buf *bf,
1300 1256 int width, int half_gi, boolean_t shortPreamble)
1301 1257 {
1302 1258 struct ath_rate_table *rate_table = sc->sc_currates;
1303 1259 uint32_t nbits, nsymbits, duration, nsymbols;
1304 1260 uint8_t rc;
1305 1261 int streams, pktlen;
1306 1262
1307 1263 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1308 1264 rc = rate_table->info[rix].ratecode;
1309 1265
1310 1266 /* for legacy rates, use old function to compute packet duration */
1311 1267 if (!IS_HT_RATE(rc))
1312 1268 return (ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
1313 1269 rix, shortPreamble));
1314 1270
1315 1271 /* find number of symbols: PLCP + data */
1316 1272 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1317 1273 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1318 1274 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1319 1275
1320 1276 if (!half_gi)
1321 1277 duration = SYMBOL_TIME(nsymbols);
1322 1278 else
1323 1279 duration = SYMBOL_TIME_HALFGI(nsymbols);
1324 1280
↓ open down ↓ |
122 lines elided |
↑ open up ↑ |
1325 1281 /* addup duration for legacy/ht training and signal fields */
1326 1282 streams = HT_RC_2_STREAMS(rc);
1327 1283 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1328 1284
1329 1285 return (duration);
1330 1286 }
1331 1287
1332 1288 /* Rate module function to set rate related fields in tx descriptor */
1333 1289 static void
1334 1290 ath_buf_set_rate(struct arn_softc *sc,
1335 -struct ath_buf *bf,
1336 -struct ieee80211_frame *wh)
1291 + struct ath_buf *bf,
1292 + struct ieee80211_frame *wh)
1337 1293 {
1338 1294 struct ath_hal *ah = sc->sc_ah;
1339 1295 struct ath_rate_table *rt;
1340 1296 struct ath_desc *ds = bf->bf_desc;
1341 1297 struct ath_desc *lastds = bf->bf_desc; /* temp workground */
1342 1298 struct ath9k_11n_rate_series series[4];
1343 1299 struct ath9k_tx_rate *rates;
1344 1300 int i, flags, rtsctsena = 0;
1345 1301 uint32_t ctsduration = 0;
1346 1302 uint8_t rix = 0, cix, ctsrate = 0;
1347 1303
1348 1304 (void) memset(series, 0, sizeof (struct ath9k_11n_rate_series) * 4);
1349 1305
1350 1306 rates = bf->rates;
1351 1307
1352 1308 if (IEEE80211_HAS_MOREFRAGS(wh) ||
1353 1309 wh->i_seq[0] & IEEE80211_SEQ_FRAG_MASK) {
1354 1310 rates[1].count = rates[2].count = rates[3].count = 0;
1355 1311 rates[1].idx = rates[2].idx = rates[3].idx = 0;
1356 1312 rates[0].count = ATH_TXMAXTRY;
1357 1313 }
1358 1314
1359 1315 /* get the cix for the lowest valid rix */
1360 1316 rt = sc->sc_currates;
1361 1317 for (i = 3; i >= 0; i--) {
1362 1318 if (rates[i].count && (rates[i].idx >= 0)) {
1363 1319 rix = rates[i].idx;
1364 1320 break;
1365 1321 }
1366 1322 }
1367 1323
1368 1324 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
1369 1325 cix = rt->info[rix].ctrl_rate;
1370 1326
1371 1327 /*
1372 1328 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
1373 1329 * just CTS. Note that this is only done for OFDM/HT unicast frames.
1374 1330 */
1375 1331 if (sc->sc_protmode != PROT_M_NONE &&
1376 1332 !(bf->bf_flags & ATH9K_TXDESC_NOACK) &&
1377 1333 (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
1378 1334 WLAN_RC_PHY_HT(rt->info[rix].phy))) {
1379 1335 if (sc->sc_protmode == PROT_M_RTSCTS)
1380 1336 flags = ATH9K_TXDESC_RTSENA;
1381 1337 else if (sc->sc_protmode == PROT_M_CTSONLY)
1382 1338 flags = ATH9K_TXDESC_CTSENA;
1383 1339
1384 1340 cix = rt->info[sc->sc_protrix].ctrl_rate;
1385 1341 rtsctsena = 1;
1386 1342 }
1387 1343
1388 1344 /*
1389 1345 * For 11n, the default behavior is to enable RTS for hw retried frames.
1390 1346 * We enable the global flag here and let rate series flags determine
1391 1347 * which rates will actually use RTS.
1392 1348 */
1393 1349 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
1394 1350 /* 802.11g protection not needed, use our default behavior */
1395 1351 if (!rtsctsena)
1396 1352 flags = ATH9K_TXDESC_RTSENA;
1397 1353 }
1398 1354
1399 1355 /* Set protection if aggregate protection on */
1400 1356 if (sc->sc_config.ath_aggr_prot &&
1401 1357 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
1402 1358 flags = ATH9K_TXDESC_RTSENA;
1403 1359 cix = rt->info[sc->sc_protrix].ctrl_rate;
1404 1360 rtsctsena = 1;
1405 1361 }
1406 1362
1407 1363 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1408 1364 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
1409 1365 flags &= ~(ATH9K_TXDESC_RTSENA);
1410 1366
1411 1367 /*
1412 1368 * CTS transmit rate is derived from the transmit rate by looking in the
1413 1369 * h/w rate table. We must also factor in whether or not a short
1414 1370 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
1415 1371 */
1416 1372 ctsrate = rt->info[cix].ratecode |
1417 1373 (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
1418 1374
1419 1375 for (i = 0; i < 4; i++) {
1420 1376 if (!rates[i].count || (rates[i].idx < 0))
1421 1377 continue;
1422 1378
1423 1379 rix = rates[i].idx;
1424 1380
1425 1381 series[i].Rate = rt->info[rix].ratecode |
1426 1382 (bf_isshpreamble(bf) ?
1427 1383 rt->info[rix].short_preamble : 0);
1428 1384
1429 1385 series[i].Tries = rates[i].count;
1430 1386
1431 1387 series[i].RateFlags =
1432 1388 ((rates[i].flags & ATH9K_TX_RC_USE_RTS_CTS) ?
1433 1389 ATH9K_RATESERIES_RTS_CTS : 0) |
1434 1390 ((rates[i].flags & ATH9K_TX_RC_40_MHZ_WIDTH) ?
1435 1391 ATH9K_RATESERIES_2040 : 0) |
1436 1392 ((rates[i].flags & ATH9K_TX_RC_SHORT_GI) ?
1437 1393 ATH9K_RATESERIES_HALFGI : 0);
1438 1394
1439 1395 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1440 1396 (rates[i].flags & ATH9K_TX_RC_40_MHZ_WIDTH) != 0,
1441 1397 (rates[i].flags & ATH9K_TX_RC_SHORT_GI),
1442 1398 bf_isshpreamble(bf));
1443 1399
1444 1400 series[i].ChSel = sc->sc_tx_chainmask;
1445 1401
1446 1402 if (rtsctsena)
1447 1403 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1448 1404
1449 1405 ARN_DBG((ARN_DBG_RATE,
1450 1406 "series[%d]--flags & ATH9K_TX_RC_USE_RTS_CTS = %08x"
1451 1407 "--flags & ATH9K_TX_RC_40_MHZ_WIDTH = %08x"
1452 1408 "--flags & ATH9K_TX_RC_SHORT_GI = %08x\n",
1453 1409 rates[i].flags & ATH9K_TX_RC_USE_RTS_CTS,
1454 1410 rates[i].flags & ATH9K_TX_RC_40_MHZ_WIDTH,
1455 1411 rates[i].flags & ATH9K_TX_RC_SHORT_GI));
1456 1412
1457 1413 ARN_DBG((ARN_DBG_RATE,
1458 1414 "series[%d]:"
1459 1415 "dot11rate:%d"
1460 1416 "index:%d"
1461 1417 "retry count:%d\n",
1462 1418 i,
1463 1419 (rt->info[rates[i].idx].ratekbps)/1000,
1464 1420 rates[i].idx,
1465 1421 rates[i].count));
1466 1422 }
1467 1423
1468 1424 /* set dur_update_en for l-sig computation except for PS-Poll frames */
1469 1425 ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
1470 1426 ctsrate, ctsduration,
1471 1427 series, 4, flags);
1472 1428
1473 1429 if (sc->sc_config.ath_aggr_prot && flags)
1474 1430 ath9k_hw_set11n_burstduration(ah, ds, 8192);
1475 1431 }
1476 1432
1477 1433 static void
1478 1434 ath_tx_complete(struct arn_softc *sc, struct ath_buf *bf,
1479 1435 struct ath_xmit_status *tx_status)
1480 1436 {
1481 1437 boolean_t is_data = bf_isdata(bf);
1482 1438
1483 1439 ARN_DBG((ARN_DBG_XMIT, "TX complete\n"));
1484 1440
1485 1441 if (tx_status->flags & ATH_TX_BAR)
1486 1442 tx_status->flags &= ~ATH_TX_BAR;
1487 1443
1488 1444 bf->rates[0].count = tx_status->retries + 1;
1489 1445
1490 1446 arn_tx_status(sc, bf, is_data);
1491 1447 }
1492 1448
1493 1449 /* To complete a chain of buffers associated a frame */
1494 1450 static void
1495 1451 ath_tx_complete_buf(struct arn_softc *sc, struct ath_buf *bf,
1496 1452 int txok, int sendbar)
1497 1453 {
1498 1454 struct ath_xmit_status tx_status;
1499 1455
1500 1456 /*
1501 1457 * Set retry information.
1502 1458 * NB: Don't use the information in the descriptor, because the frame
1503 1459 * could be software retried.
1504 1460 */
1505 1461 tx_status.retries = bf->bf_retries;
1506 1462 tx_status.flags = 0;
1507 1463
1508 1464 if (sendbar)
1509 1465 tx_status.flags = ATH_TX_BAR;
1510 1466
1511 1467 if (!txok) {
1512 1468 tx_status.flags |= ATH_TX_ERROR;
1513 1469
1514 1470 if (bf_isxretried(bf))
1515 1471 tx_status.flags |= ATH_TX_XRETRY;
1516 1472 }
1517 1473
1518 1474 /* complete this frame */
1519 1475 ath_tx_complete(sc, bf, &tx_status);
1520 1476
1521 1477 /*
1522 1478 * Return the list of ath_buf of this mpdu to free queue
1523 1479 */
1524 1480 }
1525 1481
1526 1482 static void
1527 1483 arn_tx_stopdma(struct arn_softc *sc, struct ath_txq *txq)
1528 1484 {
1529 1485 struct ath_hal *ah = sc->sc_ah;
1530 1486
1531 1487 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1532 1488
1533 1489 ARN_DBG((ARN_DBG_XMIT, "arn: arn_drain_txdataq(): "
1534 1490 "tx queue [%u] %x, link %p\n",
1535 1491 txq->axq_qnum,
1536 1492 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link));
1537 1493
1538 1494 }
1539 1495
1540 1496 /* Drain only the data queues */
1541 1497 /* ARGSUSED */
1542 1498 static void
1543 1499 arn_drain_txdataq(struct arn_softc *sc, boolean_t retry_tx)
1544 1500 {
1545 1501 struct ath_hal *ah = sc->sc_ah;
1546 1502 int i, status, npend = 0;
1547 1503
1548 1504 if (!(sc->sc_flags & SC_OP_INVALID)) {
1549 1505 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1550 1506 if (ARN_TXQ_SETUP(sc, i)) {
1551 1507 arn_tx_stopdma(sc, &sc->sc_txq[i]);
1552 1508 /*
1553 1509 * The TxDMA may not really be stopped.
1554 1510 * Double check the hal tx pending count
1555 1511 */
1556 1512 npend += ath9k_hw_numtxpending(ah,
1557 1513 sc->sc_txq[i].axq_qnum);
1558 1514 }
1559 1515 }
1560 1516 }
1561 1517
1562 1518 if (npend) {
1563 1519 /* TxDMA not stopped, reset the hal */
1564 1520 ARN_DBG((ARN_DBG_XMIT, "arn: arn_drain_txdataq(): "
1565 1521 "Unable to stop TxDMA. Reset HAL!\n"));
1566 1522
1567 1523 if (!ath9k_hw_reset(ah,
1568 1524 sc->sc_ah->ah_curchan,
1569 1525 sc->tx_chan_width,
1570 1526 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1571 1527 sc->sc_ht_extprotspacing, B_TRUE, &status)) {
1572 1528 ARN_DBG((ARN_DBG_FATAL, "arn: arn_drain_txdataq(): "
1573 1529 "unable to reset hardware; hal status %u\n",
1574 1530 status));
1575 1531 }
1576 1532 }
1577 1533
1578 1534 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1579 1535 if (ARN_TXQ_SETUP(sc, i))
1580 1536 arn_tx_draintxq(sc, &sc->sc_txq[i]);
1581 1537 }
1582 1538 }
1583 1539
1584 1540 /* Setup a h/w transmit queue */
1585 1541 struct ath_txq *
1586 1542 arn_txq_setup(struct arn_softc *sc, int qtype, int subtype)
1587 1543 {
1588 1544 struct ath_hal *ah = sc->sc_ah;
1589 1545 struct ath9k_tx_queue_info qi;
1590 1546 int qnum;
1591 1547
1592 1548 (void) memset(&qi, 0, sizeof (qi));
1593 1549 qi.tqi_subtype = subtype;
1594 1550 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1595 1551 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1596 1552 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1597 1553 qi.tqi_physCompBuf = 0;
1598 1554
1599 1555 /*
1600 1556 * Enable interrupts only for EOL and DESC conditions.
1601 1557 * We mark tx descriptors to receive a DESC interrupt
1602 1558 * when a tx queue gets deep; otherwise waiting for the
1603 1559 * EOL to reap descriptors. Note that this is done to
1604 1560 * reduce interrupt load and this only defers reaping
1605 1561 * descriptors, never transmitting frames. Aside from
1606 1562 * reducing interrupts this also permits more concurrency.
1607 1563 * The only potential downside is if the tx queue backs
1608 1564 * up in which case the top half of the kernel may backup
1609 1565 * due to a lack of tx descriptors.
1610 1566 *
1611 1567 * The UAPSD queue is an exception, since we take a desc-
1612 1568 * based intr on the EOSP frames.
1613 1569 */
1614 1570 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1615 1571 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1616 1572 else
1617 1573 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1618 1574 TXQ_FLAG_TXDESCINT_ENABLE;
1619 1575 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1620 1576 if (qnum == -1) {
1621 1577 /*
1622 1578 * NB: don't print a message, this happens
1623 1579 * normally on parts with too few tx queues
1624 1580 */
1625 1581 return (NULL);
1626 1582 }
1627 1583 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
1628 1584 ARN_DBG((ARN_DBG_FATAL, "arn: arn_txq_setup(): "
1629 1585 "hal qnum %u out of range, max %u!\n",
1630 1586 qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq)));
1631 1587 (void) ath9k_hw_releasetxqueue(ah, qnum);
1632 1588 return (NULL);
1633 1589 }
1634 1590 if (!ARN_TXQ_SETUP(sc, qnum)) {
1635 1591 struct ath_txq *txq = &sc->sc_txq[qnum];
1636 1592
1637 1593 txq->axq_qnum = qnum;
1638 1594 txq->axq_intrcnt = 0; /* legacy */
1639 1595 txq->axq_link = NULL;
1640 1596
1641 1597 list_create(&txq->axq_list, sizeof (struct ath_buf),
1642 1598 offsetof(struct ath_buf, bf_node));
1643 1599 list_create(&txq->axq_acq, sizeof (struct ath_buf),
1644 1600 offsetof(struct ath_buf, bf_node));
1645 1601 mutex_init(&txq->axq_lock, NULL, MUTEX_DRIVER, NULL);
1646 1602
1647 1603 txq->axq_depth = 0;
1648 1604 txq->axq_aggr_depth = 0;
1649 1605 txq->axq_totalqueued = 0;
1650 1606 txq->axq_linkbuf = NULL;
1651 1607 sc->sc_txqsetup |= 1<<qnum;
1652 1608 }
1653 1609 return (&sc->sc_txq[qnum]);
1654 1610 }
1655 1611
1656 1612 /* Reclaim resources for a setup queue */
1657 1613
1658 1614 void
1659 1615 arn_tx_cleanupq(struct arn_softc *sc, struct ath_txq *txq)
1660 1616 {
1661 1617 (void) ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1662 1618 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
1663 1619 }
1664 1620
1665 1621 /*
1666 1622 * Setup a hardware data transmit queue for the specified
1667 1623 * access control. The hal may not support all requested
1668 1624 * queues in which case it will return a reference to a
1669 1625 * previously setup queue. We record the mapping from ac's
1670 1626 * to h/w queues for use by arn_tx_start and also track
1671 1627 * the set of h/w queues being used to optimize work in the
1672 1628 * transmit interrupt handler and related routines.
1673 1629 */
1674 1630
1675 1631 int
1676 1632 arn_tx_setup(struct arn_softc *sc, int haltype)
1677 1633 {
1678 1634 struct ath_txq *txq;
1679 1635
1680 1636 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1681 1637 ARN_DBG((ARN_DBG_FATAL, "arn: arn_tx_setup(): "
1682 1638 "HAL AC %u out of range, max %zu!\n",
1683 1639 haltype, ARRAY_SIZE(sc->sc_haltype2q)));
1684 1640 return (0);
1685 1641 }
1686 1642 txq = arn_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1687 1643 if (txq != NULL) {
1688 1644 sc->sc_haltype2q[haltype] = txq->axq_qnum;
1689 1645 return (1);
1690 1646 } else
1691 1647 return (0);
1692 1648 }
1693 1649
1694 1650 void
1695 1651 arn_tx_draintxq(struct arn_softc *sc, struct ath_txq *txq)
1696 1652 {
1697 1653 struct ath_buf *bf;
1698 1654
1699 1655 /*
1700 1656 * This assumes output has been stopped.
1701 1657 */
1702 1658 for (;;) {
1703 1659 mutex_enter(&txq->axq_lock);
1704 1660 bf = list_head(&txq->axq_list);
1705 1661 if (bf == NULL) {
1706 1662 txq->axq_link = NULL;
1707 1663 mutex_exit(&txq->axq_lock);
1708 1664 break;
1709 1665 }
1710 1666 list_remove(&txq->axq_list, bf);
1711 1667 mutex_exit(&txq->axq_lock);
1712 1668 bf->bf_in = NULL;
1713 1669 mutex_enter(&sc->sc_txbuflock);
1714 1670 list_insert_tail(&sc->sc_txbuf_list, bf);
1715 1671 mutex_exit(&sc->sc_txbuflock);
1716 1672 }
1717 1673 }
1718 1674
1719 1675 /* Drain the transmit queues and reclaim resources */
1720 1676
1721 1677 void
1722 1678 arn_draintxq(struct arn_softc *sc, boolean_t retry_tx)
1723 1679 {
1724 1680 /*
1725 1681 * stop beacon queue. The beacon will be freed when
1726 1682 * we go to INIT state
1727 1683 */
1728 1684 if (!(sc->sc_flags & SC_OP_INVALID)) {
1729 1685 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_beaconq);
1730 1686 ARN_DBG((ARN_DBG_XMIT, "arn: arn_draintxq(): "
1731 1687 "beacon queue %x\n",
1732 1688 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_beaconq)));
1733 1689 }
1734 1690
1735 1691 arn_drain_txdataq(sc, retry_tx);
1736 1692 }
1737 1693
1738 1694 uint32_t
1739 1695 arn_txq_depth(struct arn_softc *sc, int qnum)
1740 1696 {
1741 1697 return (sc->sc_txq[qnum].axq_depth);
1742 1698 }
1743 1699
1744 1700 uint32_t
1745 1701 arn_txq_aggr_depth(struct arn_softc *sc, int qnum)
1746 1702 {
1747 1703 return (sc->sc_txq[qnum].axq_aggr_depth);
1748 1704 }
1749 1705
1750 1706 /* Update parameters for a transmit queue */
1751 1707 int
1752 1708 arn_txq_update(struct arn_softc *sc, int qnum,
1753 1709 struct ath9k_tx_queue_info *qinfo)
1754 1710 {
1755 1711 struct ath_hal *ah = sc->sc_ah;
1756 1712 int error = 0;
1757 1713 struct ath9k_tx_queue_info qi;
1758 1714
1759 1715 if (qnum == sc->sc_beaconq) {
1760 1716 /*
1761 1717 * XXX: for beacon queue, we just save the parameter.
1762 1718 * It will be picked up by arn_beaconq_config() when
1763 1719 * it's necessary.
1764 1720 */
1765 1721 sc->sc_beacon_qi = *qinfo;
1766 1722 return (0);
1767 1723 }
1768 1724
1769 1725 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
1770 1726
1771 1727 (void) ath9k_hw_get_txq_props(ah, qnum, &qi);
1772 1728 qi.tqi_aifs = qinfo->tqi_aifs;
1773 1729 qi.tqi_cwmin = qinfo->tqi_cwmin;
1774 1730 qi.tqi_cwmax = qinfo->tqi_cwmax;
1775 1731 qi.tqi_burstTime = qinfo->tqi_burstTime;
1776 1732 qi.tqi_readyTime = qinfo->tqi_readyTime;
1777 1733
1778 1734 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1779 1735 ARN_DBG((ARN_DBG_FATAL,
1780 1736 "Unable to update hardware queue %u!\n", qnum));
1781 1737 error = -EIO;
1782 1738 } else {
1783 1739 (void) ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
1784 1740 }
1785 1741
1786 1742 return (error);
1787 1743 }
1788 1744
1789 1745 int
1790 1746 ath_cabq_update(struct arn_softc *sc)
1791 1747 {
1792 1748 struct ath9k_tx_queue_info qi;
1793 1749 int qnum = sc->sc_cabq->axq_qnum;
1794 1750 struct ath_beacon_config conf;
1795 1751
1796 1752 (void) ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1797 1753 /*
1798 1754 * Ensure the readytime % is within the bounds.
1799 1755 */
1800 1756 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1801 1757 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1802 1758 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1803 1759 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1804 1760
1805 1761 arn_get_beaconconfig(sc, &conf);
1806 1762 qi.tqi_readyTime =
1807 1763 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
1808 1764 (void) arn_txq_update(sc, qnum, &qi);
1809 1765
1810 1766 return (0);
1811 1767 }
1812 1768
1813 1769 static uint32_t
1814 1770 arn_tx_get_keytype(const struct ieee80211_cipher *cip)
1815 1771 {
1816 1772 uint32_t index;
1817 1773 static const uint8_t ciphermap[] = {
1818 1774 ATH9K_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */
1819 1775 ATH9K_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */
1820 1776 ATH9K_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */
1821 1777 ATH9K_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */
1822 1778 ATH9K_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */
1823 1779 ATH9K_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */
1824 1780 };
1825 1781
1826 1782 ASSERT(cip->ic_cipher < ARRAY_SIZE(ciphermap));
1827 1783 index = cip->ic_cipher;
1828 1784
1829 1785 if (ciphermap[index] == ATH9K_CIPHER_WEP)
1830 1786 return (ATH9K_KEY_TYPE_WEP);
1831 1787 else if (ciphermap[index] == ATH9K_CIPHER_TKIP)
1832 1788 return (ATH9K_KEY_TYPE_TKIP);
1833 1789 else if (ciphermap[index] == ATH9K_CIPHER_AES_CCM)
1834 1790 return (ATH9K_KEY_TYPE_AES);
1835 1791
1836 1792 return (ATH9K_KEY_TYPE_CLEAR);
1837 1793
1838 1794 }
1839 1795
1840 1796 /* Display buffer */
1841 1797 void
1842 1798 arn_dump_line(unsigned char *p, uint32_t len, boolean_t isaddress,
1843 1799 uint32_t group)
1844 1800 {
1845 1801 char *pnumeric = "0123456789ABCDEF";
1846 1802 char hex[((2 + 1) * 16) + 1];
1847 1803 char *phex = hex;
1848 1804 char ascii[16 + 1];
1849 1805 char *pascii = ascii;
1850 1806 uint32_t grouped = 0;
1851 1807
1852 1808 if (isaddress) {
1853 1809 arn_problem("arn: %08x: ", p);
1854 1810 } else {
1855 1811 arn_problem("arn: ");
1856 1812 }
1857 1813
1858 1814 while (len) {
1859 1815 *phex++ = pnumeric[((uint8_t)*p) / 16];
1860 1816 *phex++ = pnumeric[((uint8_t)*p) % 16];
1861 1817 if (++grouped >= group) {
1862 1818 *phex++ = ' ';
1863 1819 grouped = 0;
1864 1820 }
1865 1821
1866 1822 *pascii++ = (*p >= 32 && *p < 128) ? *p : '.';
1867 1823
1868 1824 ++p;
1869 1825 --len;
1870 1826 }
1871 1827
1872 1828 *phex = '\0';
1873 1829 *pascii = '\0';
1874 1830
1875 1831 arn_problem("%-*s|%-*s|\n", (2 * 16) +
1876 1832 (16 / group), hex, 16, ascii);
1877 1833 }
1878 1834
1879 1835 void
1880 1836 arn_dump_pkg(unsigned char *p, uint32_t len, boolean_t isaddress,
1881 1837 uint32_t group)
1882 1838 {
1883 1839 uint32_t perline;
1884 1840 while (len) {
1885 1841 perline = (len < 16) ? len : 16;
1886 1842 arn_dump_line(p, perline, isaddress, group);
1887 1843 len -= perline;
1888 1844 p += perline;
1889 1845 }
1890 1846 }
1891 1847
1892 1848 /*
1893 1849 * The input parameter mp has following assumption:
1894 1850 * For data packets, GLDv3 mac_wifi plugin allocates and fills the
1895 1851 * ieee80211 header. For management packets, net80211 allocates and
1896 1852 * fills the ieee80211 header. In both cases, enough spaces in the
1897 1853 * header are left for encryption option.
1898 1854 */
1899 1855 static int32_t
1900 1856 arn_tx_start(struct arn_softc *sc, struct ieee80211_node *in,
1901 1857 struct ath_buf *bf, mblk_t *mp)
1902 1858 {
1903 1859 ieee80211com_t *ic = (ieee80211com_t *)sc;
1904 1860 struct ieee80211_frame *wh = (struct ieee80211_frame *)mp->b_rptr;
1905 1861 struct ath_hal *ah = sc->sc_ah;
1906 1862 struct ath_node *an;
1907 1863 struct ath_desc *ds;
1908 1864 struct ath_txq *txq;
1909 1865 struct ath_rate_table *rt;
1910 1866 enum ath9k_pkt_type atype;
1911 1867 boolean_t shortPreamble, is_padding = B_FALSE;
1912 1868 uint32_t subtype, keytype = ATH9K_KEY_TYPE_CLEAR;
1913 1869 int32_t keyix, iswep, hdrlen, pktlen, mblen, mbslen;
1914 1870 caddr_t dest;
1915 1871
1916 1872 /*
1917 1873 * CRC are added by H/W, not encaped by driver,
1918 1874 * but we must count it in pkt length.
1919 1875 */
1920 1876 pktlen = IEEE80211_CRC_LEN;
1921 1877 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
1922 1878 keyix = ATH9K_TXKEYIX_INVALID;
1923 1879 hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
1924 1880 if (hdrlen == 28)
1925 1881 is_padding = B_TRUE;
1926 1882
1927 1883 if (iswep != 0) {
1928 1884 const struct ieee80211_cipher *cip;
1929 1885 struct ieee80211_key *k;
1930 1886
1931 1887 /*
1932 1888 * Construct the 802.11 header+trailer for an encrypted
1933 1889 * frame. The only reason this can fail is because of an
1934 1890 * unknown or unsupported cipher/key type.
1935 1891 */
1936 1892 k = ieee80211_crypto_encap(ic, mp);
1937 1893 if (k == NULL) {
1938 1894 ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx_start "
1939 1895 "crypto_encap failed\n"));
1940 1896 /*
1941 1897 * This can happen when the key is yanked after the
1942 1898 * frame was queued. Just discard the frame; the
1943 1899 * 802.11 layer counts failures and provides
1944 1900 * debugging/diagnostics.
1945 1901 */
1946 1902 return (EIO);
1947 1903 }
1948 1904 cip = k->wk_cipher;
1949 1905
1950 1906 keytype = arn_tx_get_keytype(cip);
1951 1907
1952 1908 /*
1953 1909 * Adjust the packet + header lengths for the crypto
1954 1910 * additions and calculate the h/w key index. When
1955 1911 * a s/w mic is done the frame will have had any mic
1956 1912 * added to it prior to entry so m0->m_pkthdr.len above will
1957 1913 * account for it. Otherwise we need to add it to the
1958 1914 * packet length.
1959 1915 */
1960 1916 hdrlen += cip->ic_header;
1961 1917 pktlen += cip->ic_trailer;
1962 1918 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
1963 1919 pktlen += cip->ic_miclen;
1964 1920
1965 1921 keyix = k->wk_keyix;
1966 1922
1967 1923 /* packet header may have moved, reset our local pointer */
1968 1924 wh = (struct ieee80211_frame *)mp->b_rptr;
1969 1925 }
1970 1926
1971 1927 dest = bf->bf_dma.mem_va;
1972 1928 for (; mp != NULL; mp = mp->b_cont) {
1973 1929 mblen = MBLKL(mp);
1974 1930 bcopy(mp->b_rptr, dest, mblen);
1975 1931 dest += mblen;
1976 1932 }
1977 1933 mbslen = (uintptr_t)dest - (uintptr_t)bf->bf_dma.mem_va;
1978 1934 pktlen += mbslen;
1979 1935 if (is_padding && (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1980 1936 IEEE80211_FC0_TYPE_DATA)
1981 1937 pktlen -= 2; /* real pkg len */
1982 1938
1983 1939 /* buf setup */
1984 1940 ath_tx_setup_buffer(sc, bf, in, wh, pktlen, keytype);
1985 1941
1986 1942 /* setup descriptors */
1987 1943 ds = bf->bf_desc;
1988 1944 rt = sc->sc_currates;
1989 1945 ASSERT(rt != NULL);
1990 1946
1991 1947 arn_get_rate(sc, bf, wh);
1992 1948 an = (struct ath_node *)(in);
1993 1949
1994 1950 /*
1995 1951 * Calculate Atheros packet type from IEEE80211 packet header
1996 1952 * and setup for rate calculations.
1997 1953 */
1998 1954 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1999 1955 case IEEE80211_FC0_TYPE_MGT:
2000 1956 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2001 1957 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
2002 1958 atype = ATH9K_PKT_TYPE_BEACON;
2003 1959 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2004 1960 atype = ATH9K_PKT_TYPE_PROBE_RESP;
2005 1961 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
2006 1962 atype = ATH9K_PKT_TYPE_ATIM;
2007 1963 else
2008 1964 atype = ATH9K_PKT_TYPE_NORMAL;
2009 1965
2010 1966 /* force all ctl frames to highest queue */
2011 1967 txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_VO, sc)];
2012 1968 break;
2013 1969 case IEEE80211_FC0_TYPE_CTL:
2014 1970 atype = ATH9K_PKT_TYPE_PSPOLL;
2015 1971 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2016 1972
2017 1973 /* force all ctl frames to highest queue */
2018 1974 txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_VO, sc)];
2019 1975 break;
2020 1976 case IEEE80211_FC0_TYPE_DATA:
2021 1977 // arn_dump_pkg((unsigned char *)bf->bf_dma.mem_va,
2022 1978 // pktlen, 1, 1);
2023 1979 atype = ATH9K_PKT_TYPE_NORMAL;
2024 1980
2025 1981 /* Always use background queue */
2026 1982 txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_BE, sc)];
2027 1983 break;
2028 1984 default:
2029 1985 /* Unknown 802.11 frame */
2030 1986 sc->sc_stats.ast_tx_invalid++;
2031 1987 return (1);
2032 1988 }
2033 1989
2034 1990 /* setup descriptor */
2035 1991 ds->ds_link = 0;
2036 1992 ds->ds_data = bf->bf_dma.cookie.dmac_address;
2037 1993
2038 1994 /*
2039 1995 * Formulate first tx descriptor with tx controls.
2040 1996 */
2041 1997 ath9k_hw_set11n_txdesc(ah, ds,
2042 1998 (pktlen), /* packet length */
2043 1999 atype, /* Atheros packet type */
2044 2000 MAX_RATE_POWER /* MAX_RATE_POWER */,
2045 2001 keyix /* ATH9K_TXKEYIX_INVALID */,
2046 2002 keytype /* ATH9K_KEY_TYPE_CLEAR */,
2047 2003 bf->bf_flags /* flags */);
2048 2004
2049 2005 /* LINTED E_BAD_PTR_CAST_ALIGN */
2050 2006 ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx_start(): to %s totlen=%d "
2051 2007 "an->an_tx_rate1sp=%d tx_rate2sp=%d tx_rate3sp=%d "
2052 2008 "qnum=%d sht=%d dur = %d\n",
2053 2009 ieee80211_macaddr_sprintf(wh->i_addr1), mbslen, an->an_tx_rate1sp,
2054 2010 an->an_tx_rate2sp, an->an_tx_rate3sp,
2055 2011 txq->axq_qnum, shortPreamble, *(uint16_t *)wh->i_dur));
2056 2012
2057 2013 (void) ath9k_hw_filltxdesc(ah, ds,
2058 2014 mbslen, /* segment length */
2059 2015 B_TRUE, /* first segment */
2060 2016 B_TRUE, /* last segment */
2061 2017 ds); /* first descriptor */
2062 2018
2063 2019 /* set rate related fields in tx descriptor */
2064 2020 ath_buf_set_rate(sc, bf, wh);
2065 2021
2066 2022 ARN_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORDEV);
2067 2023
2068 2024 mutex_enter(&txq->axq_lock);
2069 2025 list_insert_tail(&txq->axq_list, bf);
2070 2026 if (txq->axq_link == NULL) {
2071 2027 (void) ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
2072 2028 } else {
2073 2029 *txq->axq_link = bf->bf_daddr;
2074 2030 }
2075 2031 txq->axq_link = &ds->ds_link;
2076 2032 mutex_exit(&txq->axq_lock);
2077 2033
2078 2034 // arn_dump_pkg((unsigned char *)bf->bf_dma.mem_va, pktlen, 1, 1);
2079 2035
2080 2036 (void) ath9k_hw_txstart(ah, txq->axq_qnum);
2081 2037
2082 2038 ic->ic_stats.is_tx_frags++;
2083 2039 ic->ic_stats.is_tx_bytes += pktlen;
2084 2040
2085 2041 return (0);
2086 2042 }
2087 2043
2088 2044 /*
2089 2045 * Transmit a management frame.
2090 2046 * Note that management frames come directly from the 802.11 layer
2091 2047 * and do not honor the send queue flow control.
2092 2048 */
2093 2049 /* Upon failure caller should free mp */
2094 2050 int
2095 2051 arn_tx(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2096 2052 {
2097 2053 struct arn_softc *sc = (struct arn_softc *)ic;
2098 2054 struct ath_hal *ah = sc->sc_ah;
2099 2055 struct ieee80211_node *in = NULL;
2100 2056 struct ath_buf *bf = NULL;
2101 2057 struct ieee80211_frame *wh;
2102 2058 int error = 0;
2103 2059
2104 2060 ASSERT(mp->b_next == NULL);
2105 2061 /* should check later */
2106 2062 if (sc->sc_flags & SC_OP_INVALID) {
2107 2063 if ((type & IEEE80211_FC0_TYPE_MASK) !=
2108 2064 IEEE80211_FC0_TYPE_DATA) {
2109 2065 freemsg(mp);
2110 2066 }
2111 2067 return (ENXIO);
2112 2068 }
2113 2069
2114 2070 /* Grab a TX buffer */
2115 2071 bf = arn_tx_get_buffer(sc);
2116 2072 if (bf == NULL) {
2117 2073 ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx(): discard, "
2118 2074 "no xmit buf\n"));
2119 2075 ic->ic_stats.is_tx_nobuf++;
2120 2076 if ((type & IEEE80211_FC0_TYPE_MASK) ==
2121 2077 IEEE80211_FC0_TYPE_DATA) {
2122 2078 sc->sc_stats.ast_tx_nobuf++;
2123 2079 mutex_enter(&sc->sc_resched_lock);
2124 2080 sc->sc_resched_needed = B_TRUE;
2125 2081 mutex_exit(&sc->sc_resched_lock);
2126 2082 } else {
2127 2083 sc->sc_stats.ast_tx_nobufmgt++;
2128 2084 freemsg(mp);
2129 2085 }
2130 2086 return (ENOMEM);
2131 2087 }
2132 2088
2133 2089 wh = (struct ieee80211_frame *)mp->b_rptr;
2134 2090
2135 2091 /* Locate node */
2136 2092 in = ieee80211_find_txnode(ic, wh->i_addr1);
2137 2093 if (in == NULL) {
2138 2094 error = EIO;
2139 2095 goto bad;
2140 2096 }
2141 2097
2142 2098 in->in_inact = 0;
2143 2099 switch (type & IEEE80211_FC0_TYPE_MASK) {
2144 2100 case IEEE80211_FC0_TYPE_DATA:
2145 2101 (void) ieee80211_encap(ic, mp, in);
2146 2102 break;
2147 2103 default:
2148 2104 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2149 2105 IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
2150 2106 /* fill time stamp */
2151 2107 uint64_t tsf;
2152 2108 uint32_t *tstamp;
2153 2109
2154 2110 tsf = ath9k_hw_gettsf64(ah);
2155 2111 /* adjust 100us delay to xmit */
2156 2112 tsf += 100;
2157 2113 /* LINTED E_BAD_PTR_CAST_ALIGN */
2158 2114 tstamp = (uint32_t *)&wh[1];
2159 2115 tstamp[0] = LE_32(tsf & 0xffffffff);
2160 2116 tstamp[1] = LE_32(tsf >> 32);
2161 2117 }
2162 2118 sc->sc_stats.ast_tx_mgmt++;
2163 2119 break;
2164 2120 }
2165 2121
2166 2122 error = arn_tx_start(sc, in, bf, mp);
2167 2123
2168 2124 if (error != 0) {
2169 2125 bad:
2170 2126 ic->ic_stats.is_tx_failed++;
2171 2127 if (bf != NULL) {
2172 2128 mutex_enter(&sc->sc_txbuflock);
2173 2129 list_insert_tail(&sc->sc_txbuf_list, bf);
2174 2130 mutex_exit(&sc->sc_txbuflock);
2175 2131 }
2176 2132 }
2177 2133 if (in != NULL)
2178 2134 ieee80211_free_node(in);
2179 2135 if ((type & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA ||
2180 2136 error == 0) {
2181 2137 freemsg(mp);
2182 2138 }
2183 2139
2184 2140 return (error);
2185 2141 }
2186 2142
2187 2143 static void
2188 2144 arn_printtxbuf(struct ath_buf *bf, int done)
2189 2145 {
2190 2146 struct ath_desc *ds = bf->bf_desc;
2191 2147 const struct ath_tx_status *ts = &ds->ds_txstat;
2192 2148
2193 2149 ARN_DBG((ARN_DBG_XMIT, "arn: T(%p %p) %08x %08x %08x %08x %08x"
2194 2150 " %08x %08x %08x %c\n",
2195 2151 ds, bf->bf_daddr,
2196 2152 ds->ds_link, ds->ds_data,
2197 2153 ds->ds_ctl0, ds->ds_ctl1,
2198 2154 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
2199 2155 !done ? ' ' : (ts->ts_status == 0) ? '*' : '!'));
2200 2156 }
2201 2157
2202 2158 /* ARGSUSED */
2203 2159 static void
2204 2160 ath_tx_rc_status(struct ath_buf *bf,
2205 2161 struct ath_desc *ds,
2206 2162 int nbad,
2207 2163 int txok,
2208 2164 boolean_t update_rc)
2209 2165 {
2210 2166 struct ath_tx_info_priv *tx_info_priv =
2211 2167 (struct ath_tx_info_priv *)&bf->tx_info_priv;
2212 2168
2213 2169 tx_info_priv->update_rc = B_FALSE;
2214 2170
2215 2171 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
2216 2172 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
2217 2173 if (bf_isdata(bf)) {
2218 2174 (void) memcpy(&tx_info_priv->tx, &ds->ds_txstat,
2219 2175 sizeof (tx_info_priv->tx));
2220 2176 tx_info_priv->n_frames = bf->bf_nframes;
2221 2177 tx_info_priv->n_bad_frames = nbad;
2222 2178 tx_info_priv->update_rc = B_TRUE;
2223 2179 }
2224 2180 }
2225 2181 }
2226 2182
2227 2183 /* Process completed xmit descriptors from the specified queue */
2228 2184 static int
2229 2185 arn_tx_processq(struct arn_softc *sc, struct ath_txq *txq)
2230 2186 {
2231 2187 ieee80211com_t *ic = (ieee80211com_t *)sc;
2232 2188 struct ath_hal *ah = sc->sc_ah;
2233 2189 struct ath_buf *bf;
2234 2190 struct ath_desc *ds;
2235 2191 struct ieee80211_node *in;
2236 2192 struct ath_tx_status *ts;
2237 2193 struct ath_node *an;
2238 2194 int32_t sr, lr, nacked = 0;
2239 2195 int txok, nbad = 0;
2240 2196 int status;
2241 2197
2242 2198 for (;;) {
2243 2199 mutex_enter(&txq->axq_lock);
2244 2200 bf = list_head(&txq->axq_list);
2245 2201 if (bf == NULL) {
2246 2202 txq->axq_link = NULL;
2247 2203 /* txq->axq_linkbuf = NULL; */
2248 2204 mutex_exit(&txq->axq_lock);
2249 2205 break;
2250 2206 }
2251 2207 ds = bf->bf_desc; /* last decriptor */
2252 2208 ts = &ds->ds_txstat;
2253 2209 status = ath9k_hw_txprocdesc(ah, ds);
2254 2210
2255 2211 #ifdef DEBUG
2256 2212 arn_printtxbuf(bf, status == 0);
2257 2213 #endif
2258 2214
2259 2215 if (status == EINPROGRESS) {
2260 2216 mutex_exit(&txq->axq_lock);
2261 2217 break;
2262 2218 }
2263 2219 list_remove(&txq->axq_list, bf);
2264 2220 mutex_exit(&txq->axq_lock);
2265 2221 in = bf->bf_in;
2266 2222 if (in != NULL) {
2267 2223 an = ATH_NODE(in);
2268 2224 /* Successful transmition */
2269 2225 if (ts->ts_status == 0) {
2270 2226 an->an_tx_ok++;
2271 2227 an->an_tx_antenna = ts->ts_antenna;
2272 2228 sc->sc_stats.ast_tx_rssidelta =
2273 2229 ts->ts_rssi - sc->sc_stats.ast_tx_rssi;
2274 2230 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
2275 2231 } else {
2276 2232 an->an_tx_err++;
2277 2233 if (ts->ts_status & ATH9K_TXERR_XRETRY) {
2278 2234 sc->sc_stats.ast_tx_xretries++;
2279 2235 }
2280 2236 if (ts->ts_status & ATH9K_TXERR_FIFO) {
2281 2237 sc->sc_stats.ast_tx_fifoerr++;
2282 2238 }
2283 2239 if (ts->ts_status & ATH9K_TXERR_FILT) {
2284 2240 sc->sc_stats.ast_tx_filtered++;
2285 2241 }
2286 2242 an->an_tx_antenna = 0; /* invalidate */
2287 2243 }
2288 2244 sr = ts->ts_shortretry;
2289 2245 lr = ts->ts_longretry;
2290 2246 sc->sc_stats.ast_tx_shortretry += sr;
2291 2247 sc->sc_stats.ast_tx_longretry += lr;
2292 2248 /*
2293 2249 * Hand the descriptor to the rate control algorithm.
2294 2250 */
2295 2251 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2296 2252 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
2297 2253 /*
2298 2254 * If frame was ack'd update the last rx time
2299 2255 * used to workaround phantom bmiss interrupts.
2300 2256 */
2301 2257 if (ts->ts_status == 0) {
2302 2258 nacked++;
2303 2259 an->an_tx_ok++;
2304 2260 } else {
2305 2261 an->an_tx_err++;
2306 2262 }
2307 2263 an->an_tx_retr += sr + lr;
2308 2264 }
2309 2265 }
2310 2266
2311 2267 txok = (ds->ds_txstat.ts_status == 0);
2312 2268 if (!bf_isampdu(bf)) {
2313 2269 /*
2314 2270 * This frame is sent out as a single frame.
2315 2271 * Use hardware retry status for this frame.
2316 2272 */
2317 2273 bf->bf_retries = ds->ds_txstat.ts_longretry;
2318 2274 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
2319 2275 bf->bf_state.bf_type |= BUF_XRETRY;
2320 2276 nbad = 0;
2321 2277 }
2322 2278 ath_tx_rc_status(bf, ds, nbad, B_TRUE, txok);
2323 2279
2324 2280 ath_tx_complete_buf(sc, bf, txok, 0);
2325 2281
2326 2282 // arn_dump_pkg((unsigned char *)bf->bf_dma.mem_va,
2327 2283 // bf->bf_frmlen, 1, 1);
2328 2284
2329 2285 bf->bf_in = NULL;
2330 2286 mutex_enter(&sc->sc_txbuflock);
2331 2287 list_insert_tail(&sc->sc_txbuf_list, bf);
2332 2288 mutex_exit(&sc->sc_txbuflock);
2333 2289
2334 2290 /*
2335 2291 * Reschedule stalled outbound packets
2336 2292 */
2337 2293 mutex_enter(&sc->sc_resched_lock);
2338 2294 if (sc->sc_resched_needed) {
2339 2295 sc->sc_resched_needed = B_FALSE;
2340 2296 mac_tx_update(ic->ic_mach);
2341 2297 }
2342 2298 mutex_exit(&sc->sc_resched_lock);
2343 2299 }
2344 2300
2345 2301 return (nacked);
2346 2302 }
2347 2303
2348 2304 static void
2349 2305 arn_tx_handler(struct arn_softc *sc)
2350 2306 {
2351 2307 int i;
2352 2308 int nacked = 0;
2353 2309 uint32_t qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2354 2310 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2355 2311
2356 2312 /*
2357 2313 * Process each active queue.
2358 2314 */
2359 2315 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2360 2316 if (ARN_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) {
2361 2317 nacked += arn_tx_processq(sc, &sc->sc_txq[i]);
2362 2318 }
2363 2319 }
2364 2320
2365 2321 if (nacked)
2366 2322 sc->sc_lastrx = ath9k_hw_gettsf64(sc->sc_ah);
2367 2323 }
2368 2324
2369 2325 /* Deferred processing of transmit interrupt */
2370 2326
2371 2327 void
2372 2328 arn_tx_int_proc(void *arg)
2373 2329 {
2374 2330 struct arn_softc *sc = arg;
2375 2331 arn_tx_handler(sc);
2376 2332 }
2377 2333
2378 2334 /* Node init & cleanup functions */
2379 2335
2380 2336 #ifdef ARN_TX_AGGREGATION
2381 2337 void
2382 2338 arn_tx_node_init(struct arn_softc *sc, struct ath_node *an)
2383 2339 {
2384 2340 struct ath_atx_tid *tid;
2385 2341 struct ath_atx_ac *ac;
2386 2342 int tidno, acno;
2387 2343
2388 2344 for (tidno = 0, tid = &an->tid[tidno]; tidno < WME_NUM_TID;
2389 2345 tidno++, tid++) {
2390 2346 tid->an = an;
2391 2347 tid->tidno = tidno;
2392 2348 tid->seq_start = tid->seq_next = 0;
2393 2349 tid->baw_size = WME_MAX_BA;
2394 2350 tid->baw_head = tid->baw_tail = 0;
2395 2351 tid->sched = B_FALSE;
2396 2352 tid->paused = B_FALSE;
2397 2353 tid->state &= ~AGGR_CLEANUP;
2398 2354 list_create(&tid->buf_q, sizeof (struct ath_buf),
2399 2355 offsetof(struct ath_buf, bf_node));
2400 2356 acno = TID_TO_WME_AC(tidno);
2401 2357 tid->ac = &an->ac[acno];
2402 2358 tid->state &= ~AGGR_ADDBA_COMPLETE;
2403 2359 tid->state &= ~AGGR_ADDBA_PROGRESS;
2404 2360 tid->addba_exchangeattempts = 0;
2405 2361 }
2406 2362
2407 2363 for (acno = 0, ac = &an->ac[acno]; acno < WME_NUM_AC; acno++, ac++) {
2408 2364 ac->sched = B_FALSE;
2409 2365 list_create(&ac->tid_q, sizeof (struct ath_atx_tid),
2410 2366 offsetof(struct ath_atx_tid, list));
2411 2367
2412 2368 switch (acno) {
2413 2369 case WME_AC_BE:
2414 2370 ac->qnum = arn_tx_get_qnum(sc,
2415 2371 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2416 2372 break;
2417 2373 case WME_AC_BK:
2418 2374 ac->qnum = arn_tx_get_qnum(sc,
2419 2375 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2420 2376 break;
2421 2377 case WME_AC_VI:
2422 2378 ac->qnum = arn_tx_get_qnum(sc,
2423 2379 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2424 2380 break;
2425 2381 case WME_AC_VO:
2426 2382 ac->qnum = arn_tx_get_qnum(sc,
2427 2383 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2428 2384 break;
2429 2385 }
2430 2386 }
2431 2387 }
2432 2388
2433 2389 void
2434 2390 arn_tx_node_cleanup(struct arn_softc *sc, struct ieee80211_node *in)
2435 2391 {
2436 2392 int i;
2437 2393 struct ath_atx_ac *ac, *ac_tmp;
2438 2394 struct ath_atx_tid *tid, *tid_tmp;
2439 2395 struct ath_txq *txq;
2440 2396 struct ath_node *an = ATH_NODE(in);
2441 2397
2442 2398 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2443 2399 if (ARN_TXQ_SETUP(sc, i)) {
2444 2400 txq = &sc->sc_txq[i];
2445 2401
2446 2402 mutex_enter(&txq->axq_lock);
2447 2403
2448 2404 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq) {
2449 2405 tid = list_head(&ac->tid_q);
2450 2406 if (tid && tid->an != an)
2451 2407 continue;
2452 2408 list_remove(&txq->axq_acq, ac);
2453 2409 ac->sched = B_FALSE;
2454 2410
2455 2411 list_for_each_entry_safe(tid, tid_tmp,
2456 2412 &ac->tid_q) {
2457 2413 list_remove(&ac->tid_q, tid);
2458 2414 bf = list_head(&tid->buf_q);
2459 2415 while (bf != NULL) {
2460 2416 if (bf->bf_in == in)
2461 2417 bf->bf_in = NULL;
2462 2418 }
2463 2419 bf = list_next(&txq->axq_list, bf);
2464 2420 tid->sched = B_FALSE;
2465 2421 arn_tid_drain(sc, txq, tid);
2466 2422 tid->state &= ~AGGR_ADDBA_COMPLETE;
2467 2423 tid->addba_exchangeattempts = 0;
2468 2424 tid->state &= ~AGGR_CLEANUP;
2469 2425 }
2470 2426 }
2471 2427
2472 2428 mutex_exit(&txq->axq_lock);
2473 2429 }
2474 2430 }
2475 2431 }
2476 2432 #endif /* ARN_TX_AGGREGATION */
↓ open down ↓ |
1130 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX