Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/iwp/iwp.c
+++ new/usr/src/uts/common/io/iwp/iwp.c
1 1 /*
2 2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 3 * Use is subject to license terms.
4 4 */
5 5
6 6 /*
7 7 * Copyright (c) 2009, Intel Corporation
8 8 * All rights reserved.
9 9 */
10 10
11 11 /*
12 12 * Copyright (c) 2006
13 13 * Copyright (c) 2007
14 14 * Damien Bergamini <damien.bergamini@free.fr>
15 15 *
16 16 * Permission to use, copy, modify, and distribute this software for any
17 17 * purpose with or without fee is hereby granted, provided that the above
18 18 * copyright notice and this permission notice appear in all copies.
19 19 *
20 20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21 21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22 22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24 24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25 25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26 26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 27 */
28 28
29 29 /*
30 30 * Intel(R) WiFi Link 6000 Driver
31 31 */
32 32
33 33 #include <sys/types.h>
34 34 #include <sys/byteorder.h>
35 35 #include <sys/conf.h>
36 36 #include <sys/cmn_err.h>
37 37 #include <sys/stat.h>
38 38 #include <sys/ddi.h>
39 39 #include <sys/sunddi.h>
40 40 #include <sys/strsubr.h>
41 41 #include <sys/ethernet.h>
42 42 #include <inet/common.h>
43 43 #include <inet/nd.h>
44 44 #include <inet/mi.h>
45 45 #include <sys/note.h>
46 46 #include <sys/stream.h>
47 47 #include <sys/strsun.h>
48 48 #include <sys/modctl.h>
49 49 #include <sys/devops.h>
50 50 #include <sys/dlpi.h>
51 51 #include <sys/mac_provider.h>
52 52 #include <sys/mac_wifi.h>
53 53 #include <sys/net80211.h>
54 54 #include <sys/net80211_proto.h>
55 55 #include <sys/varargs.h>
56 56 #include <sys/policy.h>
57 57 #include <sys/pci.h>
58 58
59 59 #include "iwp_calibration.h"
60 60 #include "iwp_hw.h"
61 61 #include "iwp_eeprom.h"
62 62 #include "iwp_var.h"
63 63 #include <inet/wifi_ioctl.h>
64 64
65 65 #ifdef DEBUG
66 66 #define IWP_DEBUG_80211 (1 << 0)
67 67 #define IWP_DEBUG_CMD (1 << 1)
68 68 #define IWP_DEBUG_DMA (1 << 2)
69 69 #define IWP_DEBUG_EEPROM (1 << 3)
70 70 #define IWP_DEBUG_FW (1 << 4)
71 71 #define IWP_DEBUG_HW (1 << 5)
72 72 #define IWP_DEBUG_INTR (1 << 6)
73 73 #define IWP_DEBUG_MRR (1 << 7)
74 74 #define IWP_DEBUG_PIO (1 << 8)
75 75 #define IWP_DEBUG_RX (1 << 9)
76 76 #define IWP_DEBUG_SCAN (1 << 10)
77 77 #define IWP_DEBUG_TX (1 << 11)
78 78 #define IWP_DEBUG_RATECTL (1 << 12)
79 79 #define IWP_DEBUG_RADIO (1 << 13)
80 80 #define IWP_DEBUG_RESUME (1 << 14)
81 81 #define IWP_DEBUG_CALIBRATION (1 << 15)
82 82 /*
83 83 * if want to see debug message of a given section,
84 84 * please set this flag to one of above values
85 85 */
86 86 uint32_t iwp_dbg_flags = 0;
87 87 #define IWP_DBG(x) \
88 88 iwp_dbg x
89 89 #else
90 90 #define IWP_DBG(x)
91 91 #endif
92 92
93 93 static void *iwp_soft_state_p = NULL;
94 94
95 95 /*
96 96 * ucode will be compiled into driver image
97 97 */
98 98 static uint8_t iwp_fw_bin [] = {
99 99 #include "fw-iw/iwp.ucode"
100 100 };
101 101
102 102 /*
103 103 * DMA attributes for a shared page
104 104 */
105 105 static ddi_dma_attr_t sh_dma_attr = {
106 106 DMA_ATTR_V0, /* version of this structure */
107 107 0, /* lowest usable address */
108 108 0xffffffffU, /* highest usable address */
109 109 0xffffffffU, /* maximum DMAable byte count */
110 110 0x1000, /* alignment in bytes */
111 111 0x1000, /* burst sizes (any?) */
112 112 1, /* minimum transfer */
113 113 0xffffffffU, /* maximum transfer */
114 114 0xffffffffU, /* maximum segment length */
115 115 1, /* maximum number of segments */
116 116 1, /* granularity */
117 117 0, /* flags (reserved) */
118 118 };
119 119
120 120 /*
121 121 * DMA attributes for a keep warm DRAM descriptor
122 122 */
123 123 static ddi_dma_attr_t kw_dma_attr = {
124 124 DMA_ATTR_V0, /* version of this structure */
125 125 0, /* lowest usable address */
126 126 0xffffffffU, /* highest usable address */
127 127 0xffffffffU, /* maximum DMAable byte count */
128 128 0x1000, /* alignment in bytes */
129 129 0x1000, /* burst sizes (any?) */
130 130 1, /* minimum transfer */
131 131 0xffffffffU, /* maximum transfer */
132 132 0xffffffffU, /* maximum segment length */
133 133 1, /* maximum number of segments */
134 134 1, /* granularity */
135 135 0, /* flags (reserved) */
136 136 };
137 137
138 138 /*
139 139 * DMA attributes for a ring descriptor
140 140 */
141 141 static ddi_dma_attr_t ring_desc_dma_attr = {
142 142 DMA_ATTR_V0, /* version of this structure */
143 143 0, /* lowest usable address */
144 144 0xffffffffU, /* highest usable address */
145 145 0xffffffffU, /* maximum DMAable byte count */
146 146 0x100, /* alignment in bytes */
147 147 0x100, /* burst sizes (any?) */
148 148 1, /* minimum transfer */
149 149 0xffffffffU, /* maximum transfer */
150 150 0xffffffffU, /* maximum segment length */
151 151 1, /* maximum number of segments */
152 152 1, /* granularity */
153 153 0, /* flags (reserved) */
154 154 };
155 155
156 156 /*
157 157 * DMA attributes for a cmd
158 158 */
159 159 static ddi_dma_attr_t cmd_dma_attr = {
160 160 DMA_ATTR_V0, /* version of this structure */
161 161 0, /* lowest usable address */
162 162 0xffffffffU, /* highest usable address */
163 163 0xffffffffU, /* maximum DMAable byte count */
164 164 4, /* alignment in bytes */
165 165 0x100, /* burst sizes (any?) */
166 166 1, /* minimum transfer */
167 167 0xffffffffU, /* maximum transfer */
168 168 0xffffffffU, /* maximum segment length */
169 169 1, /* maximum number of segments */
170 170 1, /* granularity */
171 171 0, /* flags (reserved) */
172 172 };
173 173
174 174 /*
175 175 * DMA attributes for a rx buffer
176 176 */
177 177 static ddi_dma_attr_t rx_buffer_dma_attr = {
178 178 DMA_ATTR_V0, /* version of this structure */
179 179 0, /* lowest usable address */
180 180 0xffffffffU, /* highest usable address */
181 181 0xffffffffU, /* maximum DMAable byte count */
182 182 0x100, /* alignment in bytes */
183 183 0x100, /* burst sizes (any?) */
184 184 1, /* minimum transfer */
185 185 0xffffffffU, /* maximum transfer */
186 186 0xffffffffU, /* maximum segment length */
187 187 1, /* maximum number of segments */
188 188 1, /* granularity */
189 189 0, /* flags (reserved) */
190 190 };
191 191
192 192 /*
193 193 * DMA attributes for a tx buffer.
194 194 * the maximum number of segments is 4 for the hardware.
195 195 * now all the wifi drivers put the whole frame in a single
196 196 * descriptor, so we define the maximum number of segments 1,
197 197 * just the same as the rx_buffer. we consider leverage the HW
198 198 * ability in the future, that is why we don't define rx and tx
199 199 * buffer_dma_attr as the same.
200 200 */
201 201 static ddi_dma_attr_t tx_buffer_dma_attr = {
202 202 DMA_ATTR_V0, /* version of this structure */
203 203 0, /* lowest usable address */
204 204 0xffffffffU, /* highest usable address */
205 205 0xffffffffU, /* maximum DMAable byte count */
206 206 4, /* alignment in bytes */
207 207 0x100, /* burst sizes (any?) */
208 208 1, /* minimum transfer */
209 209 0xffffffffU, /* maximum transfer */
210 210 0xffffffffU, /* maximum segment length */
211 211 1, /* maximum number of segments */
212 212 1, /* granularity */
213 213 0, /* flags (reserved) */
214 214 };
215 215
216 216 /*
217 217 * DMA attributes for text and data part in the firmware
218 218 */
219 219 static ddi_dma_attr_t fw_dma_attr = {
220 220 DMA_ATTR_V0, /* version of this structure */
221 221 0, /* lowest usable address */
222 222 0xffffffffU, /* highest usable address */
223 223 0x7fffffff, /* maximum DMAable byte count */
224 224 0x10, /* alignment in bytes */
225 225 0x100, /* burst sizes (any?) */
226 226 1, /* minimum transfer */
227 227 0xffffffffU, /* maximum transfer */
228 228 0xffffffffU, /* maximum segment length */
229 229 1, /* maximum number of segments */
230 230 1, /* granularity */
231 231 0, /* flags (reserved) */
232 232 };
233 233
234 234 /*
235 235 * regs access attributes
236 236 */
237 237 static ddi_device_acc_attr_t iwp_reg_accattr = {
238 238 DDI_DEVICE_ATTR_V0,
239 239 DDI_STRUCTURE_LE_ACC,
240 240 DDI_STRICTORDER_ACC,
241 241 DDI_DEFAULT_ACC
242 242 };
243 243
244 244 /*
245 245 * DMA access attributes for descriptor
246 246 */
247 247 static ddi_device_acc_attr_t iwp_dma_descattr = {
248 248 DDI_DEVICE_ATTR_V0,
249 249 DDI_STRUCTURE_LE_ACC,
250 250 DDI_STRICTORDER_ACC,
251 251 DDI_DEFAULT_ACC
252 252 };
253 253
254 254 /*
255 255 * DMA access attributes
256 256 */
257 257 static ddi_device_acc_attr_t iwp_dma_accattr = {
258 258 DDI_DEVICE_ATTR_V0,
259 259 DDI_NEVERSWAP_ACC,
260 260 DDI_STRICTORDER_ACC,
261 261 DDI_DEFAULT_ACC
262 262 };
263 263
264 264 static int iwp_ring_init(iwp_sc_t *);
265 265 static void iwp_ring_free(iwp_sc_t *);
266 266 static int iwp_alloc_shared(iwp_sc_t *);
267 267 static void iwp_free_shared(iwp_sc_t *);
268 268 static int iwp_alloc_kw(iwp_sc_t *);
269 269 static void iwp_free_kw(iwp_sc_t *);
270 270 static int iwp_alloc_fw_dma(iwp_sc_t *);
271 271 static void iwp_free_fw_dma(iwp_sc_t *);
272 272 static int iwp_alloc_rx_ring(iwp_sc_t *);
273 273 static void iwp_reset_rx_ring(iwp_sc_t *);
274 274 static void iwp_free_rx_ring(iwp_sc_t *);
275 275 static int iwp_alloc_tx_ring(iwp_sc_t *, iwp_tx_ring_t *,
276 276 int, int);
277 277 static void iwp_reset_tx_ring(iwp_sc_t *, iwp_tx_ring_t *);
278 278 static void iwp_free_tx_ring(iwp_tx_ring_t *);
279 279 static ieee80211_node_t *iwp_node_alloc(ieee80211com_t *);
280 280 static void iwp_node_free(ieee80211_node_t *);
281 281 static int iwp_newstate(ieee80211com_t *, enum ieee80211_state, int);
282 282 static void iwp_mac_access_enter(iwp_sc_t *);
283 283 static void iwp_mac_access_exit(iwp_sc_t *);
284 284 static uint32_t iwp_reg_read(iwp_sc_t *, uint32_t);
285 285 static void iwp_reg_write(iwp_sc_t *, uint32_t, uint32_t);
286 286 static int iwp_load_init_firmware(iwp_sc_t *);
287 287 static int iwp_load_run_firmware(iwp_sc_t *);
288 288 static void iwp_tx_intr(iwp_sc_t *, iwp_rx_desc_t *);
289 289 static void iwp_cmd_intr(iwp_sc_t *, iwp_rx_desc_t *);
290 290 static uint_t iwp_intr(caddr_t, caddr_t);
291 291 static int iwp_eep_load(iwp_sc_t *);
292 292 static void iwp_get_mac_from_eep(iwp_sc_t *);
293 293 static int iwp_eep_sem_down(iwp_sc_t *);
294 294 static void iwp_eep_sem_up(iwp_sc_t *);
295 295 static uint_t iwp_rx_softintr(caddr_t, caddr_t);
296 296 static uint8_t iwp_rate_to_plcp(int);
297 297 static int iwp_cmd(iwp_sc_t *, int, const void *, int, int);
298 298 static void iwp_set_led(iwp_sc_t *, uint8_t, uint8_t, uint8_t);
299 299 static int iwp_hw_set_before_auth(iwp_sc_t *);
300 300 static int iwp_scan(iwp_sc_t *);
301 301 static int iwp_config(iwp_sc_t *);
302 302 static void iwp_stop_master(iwp_sc_t *);
303 303 static int iwp_power_up(iwp_sc_t *);
304 304 static int iwp_preinit(iwp_sc_t *);
305 305 static int iwp_init(iwp_sc_t *);
306 306 static void iwp_stop(iwp_sc_t *);
307 307 static int iwp_quiesce(dev_info_t *t);
308 308 static void iwp_amrr_init(iwp_amrr_t *);
309 309 static void iwp_amrr_timeout(iwp_sc_t *);
310 310 static void iwp_amrr_ratectl(void *, ieee80211_node_t *);
311 311 static void iwp_ucode_alive(iwp_sc_t *, iwp_rx_desc_t *);
312 312 static void iwp_rx_phy_intr(iwp_sc_t *, iwp_rx_desc_t *);
313 313 static void iwp_rx_mpdu_intr(iwp_sc_t *, iwp_rx_desc_t *);
314 314 static void iwp_release_calib_buffer(iwp_sc_t *);
315 315 static int iwp_init_common(iwp_sc_t *);
316 316 static uint8_t *iwp_eep_addr_trans(iwp_sc_t *, uint32_t);
317 317 static int iwp_put_seg_fw(iwp_sc_t *, uint32_t, uint32_t, uint32_t);
318 318 static int iwp_alive_common(iwp_sc_t *);
319 319 static void iwp_save_calib_result(iwp_sc_t *, iwp_rx_desc_t *);
320 320 static int iwp_attach(dev_info_t *, ddi_attach_cmd_t);
321 321 static int iwp_detach(dev_info_t *, ddi_detach_cmd_t);
322 322 static void iwp_destroy_locks(iwp_sc_t *);
323 323 static int iwp_send(ieee80211com_t *, mblk_t *, uint8_t);
324 324 static void iwp_thread(iwp_sc_t *);
325 325 static int iwp_run_state_config(iwp_sc_t *);
326 326 static int iwp_fast_recover(iwp_sc_t *);
327 327 static void iwp_overwrite_ic_default(iwp_sc_t *);
328 328 static int iwp_add_ap_sta(iwp_sc_t *);
329 329 static int iwp_alloc_dma_mem(iwp_sc_t *, size_t,
330 330 ddi_dma_attr_t *, ddi_device_acc_attr_t *,
331 331 uint_t, iwp_dma_t *);
332 332 static void iwp_free_dma_mem(iwp_dma_t *);
333 333 static int iwp_eep_ver_chk(iwp_sc_t *);
334 334 static void iwp_set_chip_param(iwp_sc_t *);
335 335
336 336 /*
337 337 * GLD specific operations
338 338 */
339 339 static int iwp_m_stat(void *, uint_t, uint64_t *);
340 340 static int iwp_m_start(void *);
341 341 static void iwp_m_stop(void *);
342 342 static int iwp_m_unicst(void *, const uint8_t *);
343 343 static int iwp_m_multicst(void *, boolean_t, const uint8_t *);
344 344 static int iwp_m_promisc(void *, boolean_t);
345 345 static mblk_t *iwp_m_tx(void *, mblk_t *);
346 346 static void iwp_m_ioctl(void *, queue_t *, mblk_t *);
347 347 static int iwp_m_setprop(void *arg, const char *pr_name,
348 348 mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf);
349 349 static int iwp_m_getprop(void *arg, const char *pr_name,
350 350 mac_prop_id_t wldp_pr_num, uint_t wldp_length, void *wldp_buf);
351 351 static void iwp_m_propinfo(void *, const char *, mac_prop_id_t,
352 352 mac_prop_info_handle_t);
353 353
354 354 /*
355 355 * Supported rates for 802.11b/g modes (in 500Kbps unit).
356 356 */
357 357 static const struct ieee80211_rateset iwp_rateset_11b =
358 358 { 4, { 2, 4, 11, 22 } };
359 359
360 360 static const struct ieee80211_rateset iwp_rateset_11g =
361 361 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
362 362
363 363 /*
364 364 * For mfthread only
365 365 */
366 366 extern pri_t minclsyspri;
367 367
368 368 #define DRV_NAME_SP "iwp"
369 369
370 370 /*
371 371 * Module Loading Data & Entry Points
372 372 */
373 373 DDI_DEFINE_STREAM_OPS(iwp_devops, nulldev, nulldev, iwp_attach,
↓ open down ↓ |
373 lines elided |
↑ open up ↑ |
374 374 iwp_detach, nodev, NULL, D_MP, NULL, iwp_quiesce);
375 375
376 376 static struct modldrv iwp_modldrv = {
377 377 &mod_driverops,
378 378 "Intel(R) PumaPeak driver(N)",
379 379 &iwp_devops
380 380 };
381 381
382 382 static struct modlinkage iwp_modlinkage = {
383 383 MODREV_1,
384 - &iwp_modldrv,
385 - NULL
384 + { &iwp_modldrv, NULL }
386 385 };
387 386
388 387 int
389 388 _init(void)
390 389 {
391 390 int status;
392 391
393 392 status = ddi_soft_state_init(&iwp_soft_state_p,
394 393 sizeof (iwp_sc_t), 1);
395 394 if (status != DDI_SUCCESS) {
396 395 return (status);
397 396 }
398 397
399 398 mac_init_ops(&iwp_devops, DRV_NAME_SP);
400 399 status = mod_install(&iwp_modlinkage);
401 400 if (status != DDI_SUCCESS) {
402 401 mac_fini_ops(&iwp_devops);
403 402 ddi_soft_state_fini(&iwp_soft_state_p);
404 403 }
405 404
406 405 return (status);
407 406 }
408 407
409 408 int
410 409 _fini(void)
411 410 {
412 411 int status;
413 412
414 413 status = mod_remove(&iwp_modlinkage);
415 414 if (DDI_SUCCESS == status) {
416 415 mac_fini_ops(&iwp_devops);
417 416 ddi_soft_state_fini(&iwp_soft_state_p);
418 417 }
419 418
420 419 return (status);
421 420 }
422 421
423 422 int
424 423 _info(struct modinfo *mip)
425 424 {
426 425 return (mod_info(&iwp_modlinkage, mip));
427 426 }
428 427
429 428 /*
430 429 * Mac Call Back entries
431 430 */
432 431 mac_callbacks_t iwp_m_callbacks = {
433 432 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
434 433 iwp_m_stat,
435 434 iwp_m_start,
436 435 iwp_m_stop,
437 436 iwp_m_promisc,
438 437 iwp_m_multicst,
439 438 iwp_m_unicst,
440 439 iwp_m_tx,
441 440 NULL,
442 441 iwp_m_ioctl,
443 442 NULL,
444 443 NULL,
445 444 NULL,
446 445 iwp_m_setprop,
447 446 iwp_m_getprop,
448 447 iwp_m_propinfo
449 448 };
450 449
451 450 #ifdef DEBUG
452 451 void
453 452 iwp_dbg(uint32_t flags, const char *fmt, ...)
454 453 {
455 454 va_list ap;
456 455
457 456 if (flags & iwp_dbg_flags) {
458 457 va_start(ap, fmt);
459 458 vcmn_err(CE_NOTE, fmt, ap);
460 459 va_end(ap);
461 460 }
462 461 }
463 462 #endif /* DEBUG */
464 463
465 464 /*
466 465 * device operations
467 466 */
468 467 int
469 468 iwp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
470 469 {
471 470 iwp_sc_t *sc;
472 471 ieee80211com_t *ic;
473 472 int instance, i;
474 473 char strbuf[32];
475 474 wifi_data_t wd = { 0 };
476 475 mac_register_t *macp;
477 476 int intr_type;
478 477 int intr_count;
479 478 int intr_actual;
480 479 int err = DDI_FAILURE;
481 480
482 481 switch (cmd) {
483 482 case DDI_ATTACH:
484 483 break;
485 484 case DDI_RESUME:
486 485 instance = ddi_get_instance(dip);
487 486 sc = ddi_get_soft_state(iwp_soft_state_p,
488 487 instance);
489 488 ASSERT(sc != NULL);
490 489
491 490 if (sc->sc_flags & IWP_F_RUNNING) {
492 491 (void) iwp_init(sc);
493 492 }
494 493
495 494 atomic_and_32(&sc->sc_flags, ~IWP_F_SUSPEND);
496 495
497 496 IWP_DBG((IWP_DEBUG_RESUME, "iwp_attach(): "
498 497 "resume\n"));
499 498 return (DDI_SUCCESS);
500 499 default:
501 500 goto attach_fail1;
502 501 }
503 502
504 503 instance = ddi_get_instance(dip);
505 504 err = ddi_soft_state_zalloc(iwp_soft_state_p, instance);
506 505 if (err != DDI_SUCCESS) {
507 506 cmn_err(CE_WARN, "iwp_attach(): "
508 507 "failed to allocate soft state\n");
509 508 goto attach_fail1;
510 509 }
511 510
512 511 sc = ddi_get_soft_state(iwp_soft_state_p, instance);
513 512 ASSERT(sc != NULL);
514 513
515 514 sc->sc_dip = dip;
516 515
517 516 /*
518 517 * map configure space
519 518 */
520 519 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
521 520 &iwp_reg_accattr, &sc->sc_cfg_handle);
522 521 if (err != DDI_SUCCESS) {
523 522 cmn_err(CE_WARN, "iwp_attach(): "
524 523 "failed to map config spaces regs\n");
525 524 goto attach_fail2;
526 525 }
527 526
528 527 sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle,
529 528 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID));
530 529 if ((sc->sc_dev_id != 0x422B) &&
531 530 (sc->sc_dev_id != 0x422C) &&
532 531 (sc->sc_dev_id != 0x4238) &&
533 532 (sc->sc_dev_id != 0x4239) &&
534 533 (sc->sc_dev_id != 0x008d) &&
535 534 (sc->sc_dev_id != 0x008e)) {
536 535 cmn_err(CE_WARN, "iwp_attach(): "
537 536 "Do not support this device\n");
538 537 goto attach_fail3;
539 538 }
540 539
541 540 iwp_set_chip_param(sc);
542 541
543 542 sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
544 543 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
545 544
546 545 /*
547 546 * keep from disturbing C3 state of CPU
548 547 */
549 548 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base +
550 549 PCI_CFG_RETRY_TIMEOUT), 0);
551 550
552 551 /*
553 552 * determine the size of buffer for frame and command to ucode
554 553 */
555 554 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
556 555 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
557 556 if (!sc->sc_clsz) {
558 557 sc->sc_clsz = 16;
559 558 }
560 559 sc->sc_clsz = (sc->sc_clsz << 2);
561 560
562 561 sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
563 562 IEEE80211_MTU + IEEE80211_CRC_LEN +
564 563 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
565 564 IEEE80211_WEP_CRCLEN), sc->sc_clsz);
566 565
567 566 /*
568 567 * Map operating registers
569 568 */
570 569 err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
571 570 0, 0, &iwp_reg_accattr, &sc->sc_handle);
572 571 if (err != DDI_SUCCESS) {
573 572 cmn_err(CE_WARN, "iwp_attach(): "
574 573 "failed to map device regs\n");
575 574 goto attach_fail3;
576 575 }
577 576
578 577 /*
579 578 * this is used to differentiate type of hardware
580 579 */
581 580 sc->sc_hw_rev = IWP_READ(sc, CSR_HW_REV);
582 581
583 582 err = ddi_intr_get_supported_types(dip, &intr_type);
584 583 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
585 584 cmn_err(CE_WARN, "iwp_attach(): "
586 585 "fixed type interrupt is not supported\n");
587 586 goto attach_fail4;
588 587 }
589 588
590 589 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
591 590 if ((err != DDI_SUCCESS) || (intr_count != 1)) {
592 591 cmn_err(CE_WARN, "iwp_attach(): "
593 592 "no fixed interrupts\n");
594 593 goto attach_fail4;
595 594 }
596 595
597 596 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
598 597
599 598 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
600 599 intr_count, &intr_actual, 0);
601 600 if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
602 601 cmn_err(CE_WARN, "iwp_attach(): "
603 602 "ddi_intr_alloc() failed 0x%x\n", err);
604 603 goto attach_fail5;
605 604 }
606 605
607 606 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
608 607 if (err != DDI_SUCCESS) {
609 608 cmn_err(CE_WARN, "iwp_attach(): "
610 609 "ddi_intr_get_pri() failed 0x%x\n", err);
611 610 goto attach_fail6;
612 611 }
613 612
614 613 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
615 614 DDI_INTR_PRI(sc->sc_intr_pri));
616 615 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
617 616 DDI_INTR_PRI(sc->sc_intr_pri));
618 617 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
619 618 DDI_INTR_PRI(sc->sc_intr_pri));
620 619
621 620 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
622 621 cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL);
623 622 cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL);
624 623
625 624 /*
626 625 * initialize the mfthread
627 626 */
628 627 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
629 628 sc->sc_mf_thread = NULL;
630 629 sc->sc_mf_thread_switch = 0;
631 630
632 631 /*
633 632 * Allocate shared buffer for communication between driver and ucode.
634 633 */
635 634 err = iwp_alloc_shared(sc);
636 635 if (err != DDI_SUCCESS) {
637 636 cmn_err(CE_WARN, "iwp_attach(): "
638 637 "failed to allocate shared page\n");
639 638 goto attach_fail7;
640 639 }
641 640
642 641 (void) memset(sc->sc_shared, 0, sizeof (iwp_shared_t));
643 642
644 643 /*
645 644 * Allocate keep warm page.
646 645 */
647 646 err = iwp_alloc_kw(sc);
648 647 if (err != DDI_SUCCESS) {
649 648 cmn_err(CE_WARN, "iwp_attach(): "
650 649 "failed to allocate keep warm page\n");
651 650 goto attach_fail8;
652 651 }
653 652
654 653 /*
655 654 * Do some necessary hardware initializations.
656 655 */
657 656 err = iwp_preinit(sc);
658 657 if (err != IWP_SUCCESS) {
659 658 cmn_err(CE_WARN, "iwp_attach(): "
660 659 "failed to initialize hardware\n");
661 660 goto attach_fail9;
662 661 }
663 662
664 663 /*
665 664 * get hardware configurations from eeprom
666 665 */
667 666 err = iwp_eep_load(sc);
668 667 if (err != IWP_SUCCESS) {
669 668 cmn_err(CE_WARN, "iwp_attach(): "
670 669 "failed to load eeprom\n");
671 670 goto attach_fail9;
672 671 }
673 672
674 673 /*
675 674 * calibration information from EEPROM
676 675 */
677 676 sc->sc_eep_calib = (struct iwp_eep_calibration *)
678 677 iwp_eep_addr_trans(sc, EEP_CALIBRATION);
679 678
680 679 err = iwp_eep_ver_chk(sc);
681 680 if (err != IWP_SUCCESS) {
682 681 goto attach_fail9;
683 682 }
684 683
685 684 /*
686 685 * get MAC address of this chipset
687 686 */
688 687 iwp_get_mac_from_eep(sc);
689 688
690 689
691 690 /*
692 691 * initialize TX and RX ring buffers
693 692 */
694 693 err = iwp_ring_init(sc);
695 694 if (err != DDI_SUCCESS) {
696 695 cmn_err(CE_WARN, "iwp_attach(): "
697 696 "failed to allocate and initialize ring\n");
698 697 goto attach_fail9;
699 698 }
700 699
701 700 sc->sc_hdr = (iwp_firmware_hdr_t *)iwp_fw_bin;
702 701
703 702 /*
704 703 * copy ucode to dma buffer
705 704 */
706 705 err = iwp_alloc_fw_dma(sc);
707 706 if (err != DDI_SUCCESS) {
708 707 cmn_err(CE_WARN, "iwp_attach(): "
709 708 "failed to allocate firmware dma\n");
710 709 goto attach_fail10;
711 710 }
712 711
713 712 /*
714 713 * Initialize the wifi part, which will be used by
715 714 * 802.11 module
716 715 */
717 716 ic = &sc->sc_ic;
718 717 ic->ic_phytype = IEEE80211_T_OFDM;
719 718 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
720 719 ic->ic_state = IEEE80211_S_INIT;
721 720 ic->ic_maxrssi = 100; /* experimental number */
722 721 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
723 722 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
724 723
725 724 /*
726 725 * Support WPA/WPA2
727 726 */
728 727 ic->ic_caps |= IEEE80211_C_WPA;
729 728
730 729 /*
731 730 * set supported .11b and .11g rates
732 731 */
733 732 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwp_rateset_11b;
734 733 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwp_rateset_11g;
735 734
736 735 /*
737 736 * set supported .11b and .11g channels (1 through 11)
738 737 */
739 738 for (i = 1; i <= 11; i++) {
740 739 ic->ic_sup_channels[i].ich_freq =
741 740 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
742 741 ic->ic_sup_channels[i].ich_flags =
743 742 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
744 743 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
745 744 IEEE80211_CHAN_PASSIVE;
746 745 }
747 746
748 747 ic->ic_ibss_chan = &ic->ic_sup_channels[0];
749 748 ic->ic_xmit = iwp_send;
750 749
751 750 /*
752 751 * attach to 802.11 module
753 752 */
754 753 ieee80211_attach(ic);
755 754
756 755 /*
757 756 * different instance has different WPA door
758 757 */
759 758 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
760 759 ddi_driver_name(dip),
761 760 ddi_get_instance(dip));
762 761
763 762 /*
764 763 * Overwrite 80211 default configurations.
765 764 */
766 765 iwp_overwrite_ic_default(sc);
767 766
768 767 /*
769 768 * initialize 802.11 module
770 769 */
771 770 ieee80211_media_init(ic);
772 771
773 772 /*
774 773 * initialize default tx key
775 774 */
776 775 ic->ic_def_txkey = 0;
777 776
778 777 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
779 778 iwp_rx_softintr, (caddr_t)sc);
780 779 if (err != DDI_SUCCESS) {
781 780 cmn_err(CE_WARN, "iwp_attach(): "
782 781 "add soft interrupt failed\n");
783 782 goto attach_fail12;
784 783 }
785 784
786 785 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwp_intr,
787 786 (caddr_t)sc, NULL);
788 787 if (err != DDI_SUCCESS) {
789 788 cmn_err(CE_WARN, "iwp_attach(): "
790 789 "ddi_intr_add_handle() failed\n");
791 790 goto attach_fail13;
792 791 }
793 792
794 793 err = ddi_intr_enable(sc->sc_intr_htable[0]);
795 794 if (err != DDI_SUCCESS) {
796 795 cmn_err(CE_WARN, "iwp_attach(): "
797 796 "ddi_intr_enable() failed\n");
798 797 goto attach_fail14;
799 798 }
800 799
801 800 /*
802 801 * Initialize pointer to device specific functions
803 802 */
804 803 wd.wd_secalloc = WIFI_SEC_NONE;
805 804 wd.wd_opmode = ic->ic_opmode;
806 805 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
807 806
808 807 /*
809 808 * create relation to GLD
810 809 */
811 810 macp = mac_alloc(MAC_VERSION);
812 811 if (NULL == macp) {
813 812 cmn_err(CE_WARN, "iwp_attach(): "
814 813 "failed to do mac_alloc()\n");
815 814 goto attach_fail15;
816 815 }
817 816
818 817 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
819 818 macp->m_driver = sc;
820 819 macp->m_dip = dip;
821 820 macp->m_src_addr = ic->ic_macaddr;
822 821 macp->m_callbacks = &iwp_m_callbacks;
823 822 macp->m_min_sdu = 0;
824 823 macp->m_max_sdu = IEEE80211_MTU;
825 824 macp->m_pdata = &wd;
826 825 macp->m_pdata_size = sizeof (wd);
827 826
828 827 /*
829 828 * Register the macp to mac
830 829 */
831 830 err = mac_register(macp, &ic->ic_mach);
832 831 mac_free(macp);
833 832 if (err != DDI_SUCCESS) {
834 833 cmn_err(CE_WARN, "iwp_attach(): "
835 834 "failed to do mac_register()\n");
836 835 goto attach_fail15;
837 836 }
838 837
839 838 /*
840 839 * Create minor node of type DDI_NT_NET_WIFI
841 840 */
842 841 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance);
843 842 err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
844 843 instance + 1, DDI_NT_NET_WIFI, 0);
845 844 if (err != DDI_SUCCESS) {
846 845 cmn_err(CE_WARN, "iwp_attach(): "
847 846 "failed to do ddi_create_minor_node()\n");
848 847 }
849 848
850 849 /*
851 850 * Notify link is down now
852 851 */
853 852 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
854 853
855 854 /*
856 855 * create the mf thread to handle the link status,
857 856 * recovery fatal error, etc.
858 857 */
859 858 sc->sc_mf_thread_switch = 1;
860 859 if (NULL == sc->sc_mf_thread) {
861 860 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
862 861 iwp_thread, sc, 0, &p0, TS_RUN, minclsyspri);
863 862 }
864 863
865 864 atomic_or_32(&sc->sc_flags, IWP_F_ATTACHED);
866 865
867 866 return (DDI_SUCCESS);
868 867
869 868 attach_fail15:
870 869 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
871 870 attach_fail14:
872 871 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
873 872 attach_fail13:
874 873 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
875 874 sc->sc_soft_hdl = NULL;
876 875 attach_fail12:
877 876 ieee80211_detach(ic);
878 877 attach_fail11:
879 878 iwp_free_fw_dma(sc);
880 879 attach_fail10:
881 880 iwp_ring_free(sc);
882 881 attach_fail9:
883 882 iwp_free_kw(sc);
884 883 attach_fail8:
885 884 iwp_free_shared(sc);
886 885 attach_fail7:
887 886 iwp_destroy_locks(sc);
888 887 attach_fail6:
889 888 (void) ddi_intr_free(sc->sc_intr_htable[0]);
890 889 attach_fail5:
891 890 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
892 891 attach_fail4:
893 892 ddi_regs_map_free(&sc->sc_handle);
894 893 attach_fail3:
895 894 ddi_regs_map_free(&sc->sc_cfg_handle);
896 895 attach_fail2:
897 896 ddi_soft_state_free(iwp_soft_state_p, instance);
898 897 attach_fail1:
899 898 return (DDI_FAILURE);
900 899 }
901 900
902 901 int
903 902 iwp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
904 903 {
905 904 iwp_sc_t *sc;
906 905 ieee80211com_t *ic;
907 906 int err;
908 907
909 908 sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
910 909 ASSERT(sc != NULL);
911 910 ic = &sc->sc_ic;
912 911
913 912 switch (cmd) {
914 913 case DDI_DETACH:
915 914 break;
916 915 case DDI_SUSPEND:
917 916 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
918 917 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
919 918
920 919 atomic_or_32(&sc->sc_flags, IWP_F_SUSPEND);
921 920
922 921 if (sc->sc_flags & IWP_F_RUNNING) {
923 922 iwp_stop(sc);
924 923 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
925 924
926 925 }
927 926
928 927 IWP_DBG((IWP_DEBUG_RESUME, "iwp_detach(): "
929 928 "suspend\n"));
930 929 return (DDI_SUCCESS);
931 930 default:
932 931 return (DDI_FAILURE);
933 932 }
934 933
935 934 if (!(sc->sc_flags & IWP_F_ATTACHED)) {
936 935 return (DDI_FAILURE);
937 936 }
938 937
939 938 /*
940 939 * Destroy the mf_thread
941 940 */
942 941 sc->sc_mf_thread_switch = 0;
943 942
944 943 mutex_enter(&sc->sc_mt_lock);
945 944 while (sc->sc_mf_thread != NULL) {
946 945 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) {
947 946 break;
948 947 }
949 948 }
950 949 mutex_exit(&sc->sc_mt_lock);
951 950
952 951 err = mac_disable(sc->sc_ic.ic_mach);
953 952 if (err != DDI_SUCCESS) {
954 953 return (err);
955 954 }
956 955
957 956 /*
958 957 * stop chipset
959 958 */
960 959 iwp_stop(sc);
961 960
962 961 DELAY(500000);
963 962
964 963 /*
965 964 * release buffer for calibration
966 965 */
967 966 iwp_release_calib_buffer(sc);
968 967
969 968 /*
970 969 * Unregiste from GLD
971 970 */
972 971 (void) mac_unregister(sc->sc_ic.ic_mach);
973 972
974 973 mutex_enter(&sc->sc_glock);
975 974 iwp_free_fw_dma(sc);
976 975 iwp_ring_free(sc);
977 976 iwp_free_kw(sc);
978 977 iwp_free_shared(sc);
979 978 mutex_exit(&sc->sc_glock);
980 979
981 980 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
982 981 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
983 982 (void) ddi_intr_free(sc->sc_intr_htable[0]);
984 983 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
985 984
986 985 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
987 986 sc->sc_soft_hdl = NULL;
988 987
989 988 /*
990 989 * detach from 80211 module
991 990 */
992 991 ieee80211_detach(&sc->sc_ic);
993 992
994 993 iwp_destroy_locks(sc);
995 994
996 995 ddi_regs_map_free(&sc->sc_handle);
997 996 ddi_regs_map_free(&sc->sc_cfg_handle);
998 997 ddi_remove_minor_node(dip, NULL);
999 998 ddi_soft_state_free(iwp_soft_state_p, ddi_get_instance(dip));
1000 999
1001 1000 return (DDI_SUCCESS);
1002 1001 }
1003 1002
1004 1003 /*
1005 1004 * destroy all locks
1006 1005 */
1007 1006 static void
1008 1007 iwp_destroy_locks(iwp_sc_t *sc)
1009 1008 {
1010 1009 cv_destroy(&sc->sc_mt_cv);
1011 1010 cv_destroy(&sc->sc_cmd_cv);
1012 1011 cv_destroy(&sc->sc_put_seg_cv);
1013 1012 cv_destroy(&sc->sc_ucode_cv);
1014 1013 mutex_destroy(&sc->sc_mt_lock);
1015 1014 mutex_destroy(&sc->sc_tx_lock);
1016 1015 mutex_destroy(&sc->sc_glock);
1017 1016 }
1018 1017
1019 1018 /*
1020 1019 * Allocate an area of memory and a DMA handle for accessing it
1021 1020 */
1022 1021 static int
1023 1022 iwp_alloc_dma_mem(iwp_sc_t *sc, size_t memsize,
1024 1023 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
1025 1024 uint_t dma_flags, iwp_dma_t *dma_p)
1026 1025 {
1027 1026 caddr_t vaddr;
1028 1027 int err = DDI_FAILURE;
1029 1028
1030 1029 /*
1031 1030 * Allocate handle
1032 1031 */
1033 1032 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
1034 1033 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1035 1034 if (err != DDI_SUCCESS) {
1036 1035 dma_p->dma_hdl = NULL;
1037 1036 return (DDI_FAILURE);
1038 1037 }
1039 1038
1040 1039 /*
1041 1040 * Allocate memory
1042 1041 */
1043 1042 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
1044 1043 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1045 1044 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1046 1045 if (err != DDI_SUCCESS) {
1047 1046 ddi_dma_free_handle(&dma_p->dma_hdl);
1048 1047 dma_p->dma_hdl = NULL;
1049 1048 dma_p->acc_hdl = NULL;
1050 1049 return (DDI_FAILURE);
1051 1050 }
1052 1051
1053 1052 /*
1054 1053 * Bind the two together
1055 1054 */
1056 1055 dma_p->mem_va = vaddr;
1057 1056 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1058 1057 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1059 1058 &dma_p->cookie, &dma_p->ncookies);
1060 1059 if (err != DDI_DMA_MAPPED) {
1061 1060 ddi_dma_mem_free(&dma_p->acc_hdl);
1062 1061 ddi_dma_free_handle(&dma_p->dma_hdl);
1063 1062 dma_p->acc_hdl = NULL;
1064 1063 dma_p->dma_hdl = NULL;
1065 1064 return (DDI_FAILURE);
1066 1065 }
1067 1066
1068 1067 dma_p->nslots = ~0U;
1069 1068 dma_p->size = ~0U;
1070 1069 dma_p->token = ~0U;
1071 1070 dma_p->offset = 0;
1072 1071 return (DDI_SUCCESS);
1073 1072 }
1074 1073
1075 1074 /*
1076 1075 * Free one allocated area of DMAable memory
1077 1076 */
1078 1077 static void
1079 1078 iwp_free_dma_mem(iwp_dma_t *dma_p)
1080 1079 {
1081 1080 if (dma_p->dma_hdl != NULL) {
1082 1081 if (dma_p->ncookies) {
1083 1082 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1084 1083 dma_p->ncookies = 0;
1085 1084 }
1086 1085 ddi_dma_free_handle(&dma_p->dma_hdl);
1087 1086 dma_p->dma_hdl = NULL;
1088 1087 }
1089 1088
1090 1089 if (dma_p->acc_hdl != NULL) {
1091 1090 ddi_dma_mem_free(&dma_p->acc_hdl);
1092 1091 dma_p->acc_hdl = NULL;
1093 1092 }
1094 1093 }
1095 1094
1096 1095 /*
1097 1096 * copy ucode into dma buffers
1098 1097 */
1099 1098 static int
1100 1099 iwp_alloc_fw_dma(iwp_sc_t *sc)
1101 1100 {
1102 1101 int err = DDI_FAILURE;
1103 1102 iwp_dma_t *dma_p;
1104 1103 char *t;
1105 1104
1106 1105 /*
1107 1106 * firmware image layout:
1108 1107 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1109 1108 */
1110 1109
1111 1110 /*
1112 1111 * Check firmware image size.
1113 1112 */
1114 1113 if (LE_32(sc->sc_hdr->init_textsz) > RTC_INST_SIZE) {
1115 1114 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1116 1115 "firmware init text size 0x%x is too large\n",
1117 1116 LE_32(sc->sc_hdr->init_textsz));
1118 1117
1119 1118 goto fail;
1120 1119 }
1121 1120
1122 1121 if (LE_32(sc->sc_hdr->init_datasz) > RTC_DATA_SIZE) {
1123 1122 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1124 1123 "firmware init data size 0x%x is too large\n",
1125 1124 LE_32(sc->sc_hdr->init_datasz));
1126 1125
1127 1126 goto fail;
1128 1127 }
1129 1128
1130 1129 if (LE_32(sc->sc_hdr->textsz) > RTC_INST_SIZE) {
1131 1130 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1132 1131 "firmware text size 0x%x is too large\n",
1133 1132 LE_32(sc->sc_hdr->textsz));
1134 1133
1135 1134 goto fail;
1136 1135 }
1137 1136
1138 1137 if (LE_32(sc->sc_hdr->datasz) > RTC_DATA_SIZE) {
1139 1138 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1140 1139 "firmware data size 0x%x is too large\n",
1141 1140 LE_32(sc->sc_hdr->datasz));
1142 1141
1143 1142 goto fail;
1144 1143 }
1145 1144
1146 1145 /*
1147 1146 * copy text of runtime ucode
1148 1147 */
1149 1148 t = (char *)(sc->sc_hdr + 1);
1150 1149 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1151 1150 &fw_dma_attr, &iwp_dma_accattr,
1152 1151 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1153 1152 &sc->sc_dma_fw_text);
1154 1153 if (err != DDI_SUCCESS) {
1155 1154 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1156 1155 "failed to allocate text dma memory.\n");
1157 1156 goto fail;
1158 1157 }
1159 1158
1160 1159 dma_p = &sc->sc_dma_fw_text;
1161 1160
1162 1161 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1163 1162 "text[ncookies:%d addr:%lx size:%lx]\n",
1164 1163 dma_p->ncookies, dma_p->cookie.dmac_address,
1165 1164 dma_p->cookie.dmac_size));
1166 1165
1167 1166 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1168 1167
1169 1168 /*
1170 1169 * copy data and bak-data of runtime ucode
1171 1170 */
1172 1171 t += LE_32(sc->sc_hdr->textsz);
1173 1172 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1174 1173 &fw_dma_attr, &iwp_dma_accattr,
1175 1174 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1176 1175 &sc->sc_dma_fw_data);
1177 1176 if (err != DDI_SUCCESS) {
1178 1177 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1179 1178 "failed to allocate data dma memory\n");
1180 1179 goto fail;
1181 1180 }
1182 1181
1183 1182 dma_p = &sc->sc_dma_fw_data;
1184 1183
1185 1184 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1186 1185 "data[ncookies:%d addr:%lx size:%lx]\n",
1187 1186 dma_p->ncookies, dma_p->cookie.dmac_address,
1188 1187 dma_p->cookie.dmac_size));
1189 1188
1190 1189 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1191 1190
1192 1191 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1193 1192 &fw_dma_attr, &iwp_dma_accattr,
1194 1193 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1195 1194 &sc->sc_dma_fw_data_bak);
1196 1195 if (err != DDI_SUCCESS) {
1197 1196 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1198 1197 "failed to allocate data bakup dma memory\n");
1199 1198 goto fail;
1200 1199 }
1201 1200
1202 1201 dma_p = &sc->sc_dma_fw_data_bak;
1203 1202
1204 1203 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1205 1204 "data_bak[ncookies:%d addr:%lx "
1206 1205 "size:%lx]\n",
1207 1206 dma_p->ncookies, dma_p->cookie.dmac_address,
1208 1207 dma_p->cookie.dmac_size));
1209 1208
1210 1209 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1211 1210
1212 1211 /*
1213 1212 * copy text of init ucode
1214 1213 */
1215 1214 t += LE_32(sc->sc_hdr->datasz);
1216 1215 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1217 1216 &fw_dma_attr, &iwp_dma_accattr,
1218 1217 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1219 1218 &sc->sc_dma_fw_init_text);
1220 1219 if (err != DDI_SUCCESS) {
1221 1220 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1222 1221 "failed to allocate init text dma memory\n");
1223 1222 goto fail;
1224 1223 }
1225 1224
1226 1225 dma_p = &sc->sc_dma_fw_init_text;
1227 1226
1228 1227 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1229 1228 "init_text[ncookies:%d addr:%lx "
1230 1229 "size:%lx]\n",
1231 1230 dma_p->ncookies, dma_p->cookie.dmac_address,
1232 1231 dma_p->cookie.dmac_size));
1233 1232
1234 1233 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1235 1234
1236 1235 /*
1237 1236 * copy data of init ucode
1238 1237 */
1239 1238 t += LE_32(sc->sc_hdr->init_textsz);
1240 1239 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1241 1240 &fw_dma_attr, &iwp_dma_accattr,
1242 1241 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1243 1242 &sc->sc_dma_fw_init_data);
1244 1243 if (err != DDI_SUCCESS) {
1245 1244 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1246 1245 "failed to allocate init data dma memory\n");
1247 1246 goto fail;
1248 1247 }
1249 1248
1250 1249 dma_p = &sc->sc_dma_fw_init_data;
1251 1250
1252 1251 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1253 1252 "init_data[ncookies:%d addr:%lx "
1254 1253 "size:%lx]\n",
1255 1254 dma_p->ncookies, dma_p->cookie.dmac_address,
1256 1255 dma_p->cookie.dmac_size));
1257 1256
1258 1257 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1259 1258
1260 1259 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1261 1260 fail:
1262 1261 return (err);
1263 1262 }
1264 1263
1265 1264 static void
1266 1265 iwp_free_fw_dma(iwp_sc_t *sc)
1267 1266 {
1268 1267 iwp_free_dma_mem(&sc->sc_dma_fw_text);
1269 1268 iwp_free_dma_mem(&sc->sc_dma_fw_data);
1270 1269 iwp_free_dma_mem(&sc->sc_dma_fw_data_bak);
1271 1270 iwp_free_dma_mem(&sc->sc_dma_fw_init_text);
1272 1271 iwp_free_dma_mem(&sc->sc_dma_fw_init_data);
1273 1272 }
1274 1273
1275 1274 /*
1276 1275 * Allocate a shared buffer between host and NIC.
1277 1276 */
1278 1277 static int
1279 1278 iwp_alloc_shared(iwp_sc_t *sc)
1280 1279 {
1281 1280 #ifdef DEBUG
1282 1281 iwp_dma_t *dma_p;
1283 1282 #endif
1284 1283 int err = DDI_FAILURE;
1285 1284
1286 1285 /*
1287 1286 * must be aligned on a 4K-page boundary
1288 1287 */
1289 1288 err = iwp_alloc_dma_mem(sc, sizeof (iwp_shared_t),
1290 1289 &sh_dma_attr, &iwp_dma_descattr,
1291 1290 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1292 1291 &sc->sc_dma_sh);
1293 1292 if (err != DDI_SUCCESS) {
1294 1293 goto fail;
1295 1294 }
1296 1295
1297 1296 sc->sc_shared = (iwp_shared_t *)sc->sc_dma_sh.mem_va;
1298 1297
1299 1298 #ifdef DEBUG
1300 1299 dma_p = &sc->sc_dma_sh;
1301 1300 #endif
1302 1301 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_shared(): "
1303 1302 "sh[ncookies:%d addr:%lx size:%lx]\n",
1304 1303 dma_p->ncookies, dma_p->cookie.dmac_address,
1305 1304 dma_p->cookie.dmac_size));
1306 1305
1307 1306 return (err);
1308 1307 fail:
1309 1308 iwp_free_shared(sc);
1310 1309 return (err);
1311 1310 }
1312 1311
1313 1312 static void
1314 1313 iwp_free_shared(iwp_sc_t *sc)
1315 1314 {
1316 1315 iwp_free_dma_mem(&sc->sc_dma_sh);
1317 1316 }
1318 1317
1319 1318 /*
1320 1319 * Allocate a keep warm page.
1321 1320 */
1322 1321 static int
1323 1322 iwp_alloc_kw(iwp_sc_t *sc)
1324 1323 {
1325 1324 #ifdef DEBUG
1326 1325 iwp_dma_t *dma_p;
1327 1326 #endif
1328 1327 int err = DDI_FAILURE;
1329 1328
1330 1329 /*
1331 1330 * must be aligned on a 4K-page boundary
1332 1331 */
1333 1332 err = iwp_alloc_dma_mem(sc, IWP_KW_SIZE,
1334 1333 &kw_dma_attr, &iwp_dma_descattr,
1335 1334 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1336 1335 &sc->sc_dma_kw);
1337 1336 if (err != DDI_SUCCESS) {
1338 1337 goto fail;
1339 1338 }
1340 1339
1341 1340 #ifdef DEBUG
1342 1341 dma_p = &sc->sc_dma_kw;
1343 1342 #endif
1344 1343 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_kw(): "
1345 1344 "kw[ncookies:%d addr:%lx size:%lx]\n",
1346 1345 dma_p->ncookies, dma_p->cookie.dmac_address,
1347 1346 dma_p->cookie.dmac_size));
1348 1347
1349 1348 return (err);
1350 1349 fail:
1351 1350 iwp_free_kw(sc);
1352 1351 return (err);
1353 1352 }
1354 1353
1355 1354 static void
1356 1355 iwp_free_kw(iwp_sc_t *sc)
1357 1356 {
1358 1357 iwp_free_dma_mem(&sc->sc_dma_kw);
1359 1358 }
1360 1359
1361 1360 /*
1362 1361 * initialize RX ring buffers
1363 1362 */
1364 1363 static int
1365 1364 iwp_alloc_rx_ring(iwp_sc_t *sc)
1366 1365 {
1367 1366 iwp_rx_ring_t *ring;
1368 1367 iwp_rx_data_t *data;
1369 1368 #ifdef DEBUG
1370 1369 iwp_dma_t *dma_p;
1371 1370 #endif
1372 1371 int i, err = DDI_FAILURE;
1373 1372
1374 1373 ring = &sc->sc_rxq;
1375 1374 ring->cur = 0;
1376 1375
1377 1376 /*
1378 1377 * allocate RX description ring buffer
1379 1378 */
1380 1379 err = iwp_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1381 1380 &ring_desc_dma_attr, &iwp_dma_descattr,
1382 1381 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1383 1382 &ring->dma_desc);
1384 1383 if (err != DDI_SUCCESS) {
1385 1384 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1386 1385 "dma alloc rx ring desc "
1387 1386 "failed\n"));
1388 1387 goto fail;
1389 1388 }
1390 1389
1391 1390 ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1392 1391 #ifdef DEBUG
1393 1392 dma_p = &ring->dma_desc;
1394 1393 #endif
1395 1394 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1396 1395 "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1397 1396 dma_p->ncookies, dma_p->cookie.dmac_address,
1398 1397 dma_p->cookie.dmac_size));
1399 1398
1400 1399 /*
1401 1400 * Allocate Rx frame buffers.
1402 1401 */
1403 1402 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1404 1403 data = &ring->data[i];
1405 1404 err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1406 1405 &rx_buffer_dma_attr, &iwp_dma_accattr,
1407 1406 DDI_DMA_READ | DDI_DMA_STREAMING,
1408 1407 &data->dma_data);
1409 1408 if (err != DDI_SUCCESS) {
1410 1409 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1411 1410 "dma alloc rx ring "
1412 1411 "buf[%d] failed\n", i));
1413 1412 goto fail;
1414 1413 }
1415 1414 /*
1416 1415 * the physical address bit [8-36] are used,
1417 1416 * instead of bit [0-31] in 3945.
1418 1417 */
1419 1418 ring->desc[i] = (uint32_t)
1420 1419 (data->dma_data.cookie.dmac_address >> 8);
1421 1420 }
1422 1421
1423 1422 #ifdef DEBUG
1424 1423 dma_p = &ring->data[0].dma_data;
1425 1424 #endif
1426 1425 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1427 1426 "rx buffer[0][ncookies:%d addr:%lx "
1428 1427 "size:%lx]\n",
1429 1428 dma_p->ncookies, dma_p->cookie.dmac_address,
1430 1429 dma_p->cookie.dmac_size));
1431 1430
1432 1431 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1433 1432
1434 1433 return (err);
1435 1434
1436 1435 fail:
1437 1436 iwp_free_rx_ring(sc);
1438 1437 return (err);
1439 1438 }
1440 1439
1441 1440 /*
1442 1441 * disable RX ring
1443 1442 */
1444 1443 static void
1445 1444 iwp_reset_rx_ring(iwp_sc_t *sc)
1446 1445 {
1447 1446 int n;
1448 1447
1449 1448 iwp_mac_access_enter(sc);
1450 1449 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1451 1450 for (n = 0; n < 2000; n++) {
1452 1451 if (IWP_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) {
1453 1452 break;
1454 1453 }
1455 1454 DELAY(1000);
1456 1455 }
1457 1456 #ifdef DEBUG
1458 1457 if (2000 == n) {
1459 1458 IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_rx_ring(): "
1460 1459 "timeout resetting Rx ring\n"));
1461 1460 }
1462 1461 #endif
1463 1462 iwp_mac_access_exit(sc);
1464 1463
1465 1464 sc->sc_rxq.cur = 0;
1466 1465 }
1467 1466
1468 1467 static void
1469 1468 iwp_free_rx_ring(iwp_sc_t *sc)
1470 1469 {
1471 1470 int i;
1472 1471
1473 1472 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1474 1473 if (sc->sc_rxq.data[i].dma_data.dma_hdl) {
1475 1474 IWP_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1476 1475 DDI_DMA_SYNC_FORCPU);
1477 1476 }
1478 1477
1479 1478 iwp_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1480 1479 }
1481 1480
1482 1481 if (sc->sc_rxq.dma_desc.dma_hdl) {
1483 1482 IWP_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1484 1483 }
1485 1484
1486 1485 iwp_free_dma_mem(&sc->sc_rxq.dma_desc);
1487 1486 }
1488 1487
1489 1488 /*
1490 1489 * initialize TX ring buffers
1491 1490 */
1492 1491 static int
1493 1492 iwp_alloc_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring,
1494 1493 int slots, int qid)
1495 1494 {
1496 1495 iwp_tx_data_t *data;
1497 1496 iwp_tx_desc_t *desc_h;
1498 1497 uint32_t paddr_desc_h;
1499 1498 iwp_cmd_t *cmd_h;
1500 1499 uint32_t paddr_cmd_h;
1501 1500 #ifdef DEBUG
1502 1501 iwp_dma_t *dma_p;
1503 1502 #endif
1504 1503 int i, err = DDI_FAILURE;
1505 1504 ring->qid = qid;
1506 1505 ring->count = TFD_QUEUE_SIZE_MAX;
1507 1506 ring->window = slots;
1508 1507 ring->queued = 0;
1509 1508 ring->cur = 0;
1510 1509 ring->desc_cur = 0;
1511 1510
1512 1511 /*
1513 1512 * allocate buffer for TX descriptor ring
1514 1513 */
1515 1514 err = iwp_alloc_dma_mem(sc,
1516 1515 TFD_QUEUE_SIZE_MAX * sizeof (iwp_tx_desc_t),
1517 1516 &ring_desc_dma_attr, &iwp_dma_descattr,
1518 1517 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1519 1518 &ring->dma_desc);
1520 1519 if (err != DDI_SUCCESS) {
1521 1520 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1522 1521 "dma alloc tx ring desc[%d] "
1523 1522 "failed\n", qid));
1524 1523 goto fail;
1525 1524 }
1526 1525
1527 1526 #ifdef DEBUG
1528 1527 dma_p = &ring->dma_desc;
1529 1528 #endif
1530 1529 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1531 1530 "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1532 1531 dma_p->ncookies, dma_p->cookie.dmac_address,
1533 1532 dma_p->cookie.dmac_size));
1534 1533
1535 1534 desc_h = (iwp_tx_desc_t *)ring->dma_desc.mem_va;
1536 1535 paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1537 1536
1538 1537 /*
1539 1538 * allocate buffer for ucode command
1540 1539 */
1541 1540 err = iwp_alloc_dma_mem(sc,
1542 1541 TFD_QUEUE_SIZE_MAX * sizeof (iwp_cmd_t),
1543 1542 &cmd_dma_attr, &iwp_dma_accattr,
1544 1543 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1545 1544 &ring->dma_cmd);
1546 1545 if (err != DDI_SUCCESS) {
1547 1546 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1548 1547 "dma alloc tx ring cmd[%d]"
1549 1548 " failed\n", qid));
1550 1549 goto fail;
1551 1550 }
1552 1551
1553 1552 #ifdef DEBUG
1554 1553 dma_p = &ring->dma_cmd;
1555 1554 #endif
1556 1555 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1557 1556 "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1558 1557 dma_p->ncookies, dma_p->cookie.dmac_address,
1559 1558 dma_p->cookie.dmac_size));
1560 1559
1561 1560 cmd_h = (iwp_cmd_t *)ring->dma_cmd.mem_va;
1562 1561 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1563 1562
1564 1563 /*
1565 1564 * Allocate Tx frame buffers.
1566 1565 */
1567 1566 ring->data = kmem_zalloc(sizeof (iwp_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1568 1567 KM_NOSLEEP);
1569 1568 if (NULL == ring->data) {
1570 1569 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1571 1570 "could not allocate "
1572 1571 "tx data slots\n"));
1573 1572 goto fail;
1574 1573 }
1575 1574
1576 1575 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1577 1576 data = &ring->data[i];
1578 1577 err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1579 1578 &tx_buffer_dma_attr, &iwp_dma_accattr,
1580 1579 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1581 1580 &data->dma_data);
1582 1581 if (err != DDI_SUCCESS) {
1583 1582 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1584 1583 "dma alloc tx "
1585 1584 "ring buf[%d] failed\n", i));
1586 1585 goto fail;
1587 1586 }
1588 1587
1589 1588 data->desc = desc_h + i;
1590 1589 data->paddr_desc = paddr_desc_h +
1591 1590 _PTRDIFF(data->desc, desc_h);
1592 1591 data->cmd = cmd_h + i;
1593 1592 data->paddr_cmd = paddr_cmd_h +
1594 1593 _PTRDIFF(data->cmd, cmd_h);
1595 1594 }
1596 1595 #ifdef DEBUG
1597 1596 dma_p = &ring->data[0].dma_data;
1598 1597 #endif
1599 1598 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1600 1599 "tx buffer[0][ncookies:%d addr:%lx "
1601 1600 "size:%lx]\n",
1602 1601 dma_p->ncookies, dma_p->cookie.dmac_address,
1603 1602 dma_p->cookie.dmac_size));
1604 1603
1605 1604 return (err);
1606 1605
1607 1606 fail:
1608 1607 iwp_free_tx_ring(ring);
1609 1608
1610 1609 return (err);
1611 1610 }
1612 1611
1613 1612 /*
1614 1613 * disable TX ring
1615 1614 */
1616 1615 static void
1617 1616 iwp_reset_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring)
1618 1617 {
1619 1618 iwp_tx_data_t *data;
1620 1619 int i, n;
1621 1620
1622 1621 iwp_mac_access_enter(sc);
1623 1622
1624 1623 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1625 1624 for (n = 0; n < 200; n++) {
1626 1625 if (IWP_READ(sc, IWP_FH_TSSR_TX_STATUS_REG) &
1627 1626 IWP_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) {
1628 1627 break;
1629 1628 }
1630 1629 DELAY(10);
1631 1630 }
1632 1631
1633 1632 #ifdef DEBUG
1634 1633 if (200 == n) {
1635 1634 IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_tx_ring(): "
1636 1635 "timeout reset tx ring %d\n",
1637 1636 ring->qid));
1638 1637 }
1639 1638 #endif
1640 1639
1641 1640 iwp_mac_access_exit(sc);
1642 1641
1643 1642 /* by pass, if it's quiesce */
1644 1643 if (!(sc->sc_flags & IWP_F_QUIESCED)) {
1645 1644 for (i = 0; i < ring->count; i++) {
1646 1645 data = &ring->data[i];
1647 1646 IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1648 1647 }
1649 1648 }
1650 1649
1651 1650 ring->queued = 0;
1652 1651 ring->cur = 0;
1653 1652 ring->desc_cur = 0;
1654 1653 }
1655 1654
1656 1655 static void
1657 1656 iwp_free_tx_ring(iwp_tx_ring_t *ring)
1658 1657 {
1659 1658 int i;
1660 1659
1661 1660 if (ring->dma_desc.dma_hdl != NULL) {
1662 1661 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1663 1662 }
1664 1663 iwp_free_dma_mem(&ring->dma_desc);
1665 1664
1666 1665 if (ring->dma_cmd.dma_hdl != NULL) {
1667 1666 IWP_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1668 1667 }
1669 1668 iwp_free_dma_mem(&ring->dma_cmd);
1670 1669
1671 1670 if (ring->data != NULL) {
1672 1671 for (i = 0; i < ring->count; i++) {
1673 1672 if (ring->data[i].dma_data.dma_hdl) {
1674 1673 IWP_DMA_SYNC(ring->data[i].dma_data,
1675 1674 DDI_DMA_SYNC_FORDEV);
1676 1675 }
1677 1676 iwp_free_dma_mem(&ring->data[i].dma_data);
1678 1677 }
1679 1678 kmem_free(ring->data, ring->count * sizeof (iwp_tx_data_t));
1680 1679 }
1681 1680 }
1682 1681
1683 1682 /*
1684 1683 * initialize TX and RX ring
1685 1684 */
1686 1685 static int
1687 1686 iwp_ring_init(iwp_sc_t *sc)
1688 1687 {
1689 1688 int i, err = DDI_FAILURE;
1690 1689
1691 1690 for (i = 0; i < IWP_NUM_QUEUES; i++) {
1692 1691 if (IWP_CMD_QUEUE_NUM == i) {
1693 1692 continue;
1694 1693 }
1695 1694
1696 1695 err = iwp_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1697 1696 i);
1698 1697 if (err != DDI_SUCCESS) {
1699 1698 goto fail;
1700 1699 }
1701 1700 }
1702 1701
1703 1702 /*
1704 1703 * initialize command queue
1705 1704 */
1706 1705 err = iwp_alloc_tx_ring(sc, &sc->sc_txq[IWP_CMD_QUEUE_NUM],
1707 1706 TFD_CMD_SLOTS, IWP_CMD_QUEUE_NUM);
1708 1707 if (err != DDI_SUCCESS) {
1709 1708 goto fail;
1710 1709 }
1711 1710
1712 1711 err = iwp_alloc_rx_ring(sc);
1713 1712 if (err != DDI_SUCCESS) {
1714 1713 goto fail;
1715 1714 }
1716 1715
1717 1716 fail:
1718 1717 return (err);
1719 1718 }
1720 1719
1721 1720 static void
1722 1721 iwp_ring_free(iwp_sc_t *sc)
1723 1722 {
1724 1723 int i = IWP_NUM_QUEUES;
1725 1724
1726 1725 iwp_free_rx_ring(sc);
1727 1726 while (--i >= 0) {
1728 1727 iwp_free_tx_ring(&sc->sc_txq[i]);
1729 1728 }
1730 1729 }
1731 1730
1732 1731 /* ARGSUSED */
1733 1732 static ieee80211_node_t *
1734 1733 iwp_node_alloc(ieee80211com_t *ic)
1735 1734 {
1736 1735 iwp_amrr_t *amrr;
1737 1736
1738 1737 amrr = kmem_zalloc(sizeof (iwp_amrr_t), KM_SLEEP);
1739 1738 if (NULL == amrr) {
1740 1739 cmn_err(CE_WARN, "iwp_node_alloc(): "
1741 1740 "failed to allocate memory for amrr structure\n");
1742 1741 return (NULL);
1743 1742 }
1744 1743
1745 1744 iwp_amrr_init(amrr);
1746 1745
1747 1746 return (&amrr->in);
1748 1747 }
1749 1748
1750 1749 static void
1751 1750 iwp_node_free(ieee80211_node_t *in)
1752 1751 {
1753 1752 ieee80211com_t *ic;
1754 1753
1755 1754 if ((NULL == in) ||
1756 1755 (NULL == in->in_ic)) {
1757 1756 cmn_err(CE_WARN, "iwp_node_free() "
1758 1757 "Got a NULL point from Net80211 module\n");
1759 1758 return;
1760 1759 }
1761 1760 ic = in->in_ic;
1762 1761
1763 1762 if (ic->ic_node_cleanup != NULL) {
1764 1763 ic->ic_node_cleanup(in);
1765 1764 }
1766 1765
1767 1766 if (in->in_wpa_ie != NULL) {
1768 1767 ieee80211_free(in->in_wpa_ie);
1769 1768 }
1770 1769
1771 1770 if (in->in_wme_ie != NULL) {
1772 1771 ieee80211_free(in->in_wme_ie);
1773 1772 }
1774 1773
1775 1774 if (in->in_htcap_ie != NULL) {
1776 1775 ieee80211_free(in->in_htcap_ie);
1777 1776 }
1778 1777
1779 1778 kmem_free(in, sizeof (iwp_amrr_t));
1780 1779 }
1781 1780
1782 1781
1783 1782 /*
1784 1783 * change station's state. this function will be invoked by 80211 module
1785 1784 * when need to change staton's state.
1786 1785 */
1787 1786 static int
1788 1787 iwp_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1789 1788 {
1790 1789 iwp_sc_t *sc;
1791 1790 ieee80211_node_t *in;
1792 1791 enum ieee80211_state ostate;
1793 1792 iwp_add_sta_t node;
1794 1793 int i, err = IWP_FAIL;
1795 1794
1796 1795 if (NULL == ic) {
1797 1796 return (err);
1798 1797 }
1799 1798 sc = (iwp_sc_t *)ic;
1800 1799 in = ic->ic_bss;
1801 1800 ostate = ic->ic_state;
1802 1801
1803 1802 mutex_enter(&sc->sc_glock);
1804 1803
1805 1804 switch (nstate) {
1806 1805 case IEEE80211_S_SCAN:
1807 1806 switch (ostate) {
1808 1807 case IEEE80211_S_INIT:
1809 1808 atomic_or_32(&sc->sc_flags, IWP_F_SCANNING);
1810 1809 iwp_set_led(sc, 2, 10, 2);
1811 1810
1812 1811 /*
1813 1812 * clear association to receive beacons from
1814 1813 * all BSS'es
1815 1814 */
1816 1815 sc->sc_config.assoc_id = 0;
1817 1816 sc->sc_config.filter_flags &=
1818 1817 ~LE_32(RXON_FILTER_ASSOC_MSK);
1819 1818
1820 1819 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1821 1820 "config chan %d "
1822 1821 "flags %x filter_flags %x\n",
1823 1822 LE_16(sc->sc_config.chan),
1824 1823 LE_32(sc->sc_config.flags),
1825 1824 LE_32(sc->sc_config.filter_flags)));
1826 1825
1827 1826 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
1828 1827 sizeof (iwp_rxon_cmd_t), 1);
1829 1828 if (err != IWP_SUCCESS) {
1830 1829 cmn_err(CE_WARN, "iwp_newstate(): "
1831 1830 "could not clear association\n");
1832 1831 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1833 1832 mutex_exit(&sc->sc_glock);
1834 1833 return (err);
1835 1834 }
1836 1835
1837 1836 /* add broadcast node to send probe request */
1838 1837 (void) memset(&node, 0, sizeof (node));
1839 1838 (void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN);
1840 1839 node.sta.sta_id = IWP_BROADCAST_ID;
1841 1840 err = iwp_cmd(sc, REPLY_ADD_STA, &node,
1842 1841 sizeof (node), 1);
1843 1842 if (err != IWP_SUCCESS) {
1844 1843 cmn_err(CE_WARN, "iwp_newstate(): "
1845 1844 "could not add broadcast node\n");
1846 1845 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1847 1846 mutex_exit(&sc->sc_glock);
1848 1847 return (err);
1849 1848 }
1850 1849 break;
1851 1850 case IEEE80211_S_SCAN:
1852 1851 mutex_exit(&sc->sc_glock);
1853 1852 /* step to next channel before actual FW scan */
1854 1853 err = sc->sc_newstate(ic, nstate, arg);
1855 1854 mutex_enter(&sc->sc_glock);
1856 1855 if ((err != 0) || ((err = iwp_scan(sc)) != 0)) {
1857 1856 cmn_err(CE_WARN, "iwp_newstate(): "
1858 1857 "could not initiate scan\n");
1859 1858 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1860 1859 ieee80211_cancel_scan(ic);
1861 1860 }
1862 1861 mutex_exit(&sc->sc_glock);
1863 1862 return (err);
1864 1863 default:
1865 1864 break;
1866 1865 }
1867 1866 sc->sc_clk = 0;
1868 1867 break;
1869 1868
1870 1869 case IEEE80211_S_AUTH:
1871 1870 if (ostate == IEEE80211_S_SCAN) {
1872 1871 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1873 1872 }
1874 1873
1875 1874 /*
1876 1875 * reset state to handle reassociations correctly
1877 1876 */
1878 1877 sc->sc_config.assoc_id = 0;
1879 1878 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1880 1879
1881 1880 /*
1882 1881 * before sending authentication and association request frame,
1883 1882 * we need do something in the hardware, such as setting the
1884 1883 * channel same to the target AP...
1885 1884 */
1886 1885 if ((err = iwp_hw_set_before_auth(sc)) != 0) {
1887 1886 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1888 1887 "could not send authentication request\n"));
1889 1888 mutex_exit(&sc->sc_glock);
1890 1889 return (err);
1891 1890 }
1892 1891 break;
1893 1892
1894 1893 case IEEE80211_S_RUN:
1895 1894 if (ostate == IEEE80211_S_SCAN) {
1896 1895 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1897 1896 }
1898 1897
1899 1898 if (IEEE80211_M_MONITOR == ic->ic_opmode) {
1900 1899 /* let LED blink when monitoring */
1901 1900 iwp_set_led(sc, 2, 10, 10);
1902 1901 break;
1903 1902 }
1904 1903
1905 1904 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1906 1905 "associated.\n"));
1907 1906
1908 1907 err = iwp_run_state_config(sc);
1909 1908 if (err != IWP_SUCCESS) {
1910 1909 cmn_err(CE_WARN, "iwp_newstate(): "
1911 1910 "failed to set up association\n");
1912 1911 mutex_exit(&sc->sc_glock);
1913 1912 return (err);
1914 1913 }
1915 1914
1916 1915 /*
1917 1916 * start automatic rate control
1918 1917 */
1919 1918 if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) {
1920 1919 atomic_or_32(&sc->sc_flags, IWP_F_RATE_AUTO_CTL);
1921 1920
1922 1921 /*
1923 1922 * set rate to some reasonable initial value
1924 1923 */
1925 1924 i = in->in_rates.ir_nrates - 1;
1926 1925 while (i > 0 && IEEE80211_RATE(i) > 72) {
1927 1926 i--;
1928 1927 }
1929 1928 in->in_txrate = i;
1930 1929
1931 1930 } else {
1932 1931 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
1933 1932 }
1934 1933
1935 1934 /*
1936 1935 * set LED on after associated
1937 1936 */
1938 1937 iwp_set_led(sc, 2, 0, 1);
1939 1938 break;
1940 1939
1941 1940 case IEEE80211_S_INIT:
1942 1941 if (ostate == IEEE80211_S_SCAN) {
1943 1942 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1944 1943 }
1945 1944 /*
1946 1945 * set LED off after init
1947 1946 */
1948 1947 iwp_set_led(sc, 2, 1, 0);
1949 1948 break;
1950 1949
1951 1950 case IEEE80211_S_ASSOC:
1952 1951 if (ostate == IEEE80211_S_SCAN) {
1953 1952 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1954 1953 }
1955 1954 break;
1956 1955 }
1957 1956
1958 1957 mutex_exit(&sc->sc_glock);
1959 1958
1960 1959 return (sc->sc_newstate(ic, nstate, arg));
1961 1960 }
1962 1961
1963 1962 /*
1964 1963 * exclusive access to mac begin.
1965 1964 */
1966 1965 static void
1967 1966 iwp_mac_access_enter(iwp_sc_t *sc)
1968 1967 {
1969 1968 uint32_t tmp;
1970 1969 int n;
1971 1970
1972 1971 tmp = IWP_READ(sc, CSR_GP_CNTRL);
1973 1972 IWP_WRITE(sc, CSR_GP_CNTRL,
1974 1973 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1975 1974
1976 1975 /* wait until we succeed */
1977 1976 for (n = 0; n < 1000; n++) {
1978 1977 if ((IWP_READ(sc, CSR_GP_CNTRL) &
1979 1978 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1980 1979 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1981 1980 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) {
1982 1981 break;
1983 1982 }
1984 1983 DELAY(10);
1985 1984 }
1986 1985
1987 1986 #ifdef DEBUG
1988 1987 if (1000 == n) {
1989 1988 IWP_DBG((IWP_DEBUG_PIO, "iwp_mac_access_enter(): "
1990 1989 "could not lock memory\n"));
1991 1990 }
1992 1991 #endif
1993 1992 }
1994 1993
1995 1994 /*
1996 1995 * exclusive access to mac end.
1997 1996 */
1998 1997 static void
1999 1998 iwp_mac_access_exit(iwp_sc_t *sc)
2000 1999 {
2001 2000 uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2002 2001 IWP_WRITE(sc, CSR_GP_CNTRL,
2003 2002 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2004 2003 }
2005 2004
2006 2005 /*
2007 2006 * this function defined here for future use.
2008 2007 * static uint32_t
2009 2008 * iwp_mem_read(iwp_sc_t *sc, uint32_t addr)
2010 2009 * {
2011 2010 * IWP_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
2012 2011 * return (IWP_READ(sc, HBUS_TARG_MEM_RDAT));
2013 2012 * }
2014 2013 */
2015 2014
2016 2015 /*
2017 2016 * write mac memory
2018 2017 */
2019 2018 static void
2020 2019 iwp_mem_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2021 2020 {
2022 2021 IWP_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
2023 2022 IWP_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
2024 2023 }
2025 2024
2026 2025 /*
2027 2026 * read mac register
2028 2027 */
2029 2028 static uint32_t
2030 2029 iwp_reg_read(iwp_sc_t *sc, uint32_t addr)
2031 2030 {
2032 2031 IWP_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
2033 2032 return (IWP_READ(sc, HBUS_TARG_PRPH_RDAT));
2034 2033 }
2035 2034
2036 2035 /*
2037 2036 * write mac register
2038 2037 */
2039 2038 static void
2040 2039 iwp_reg_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2041 2040 {
2042 2041 IWP_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2043 2042 IWP_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2044 2043 }
2045 2044
2046 2045
2047 2046 /*
2048 2047 * steps of loading ucode:
2049 2048 * load init ucode=>init alive=>calibrate=>
2050 2049 * receive calibration result=>reinitialize NIC=>
2051 2050 * load runtime ucode=>runtime alive=>
2052 2051 * send calibration result=>running.
2053 2052 */
2054 2053 static int
2055 2054 iwp_load_init_firmware(iwp_sc_t *sc)
2056 2055 {
2057 2056 int err = IWP_FAIL;
2058 2057 clock_t clk;
2059 2058
2060 2059 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2061 2060
2062 2061 /*
2063 2062 * load init_text section of uCode to hardware
2064 2063 */
2065 2064 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address,
2066 2065 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size);
2067 2066 if (err != IWP_SUCCESS) {
2068 2067 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2069 2068 "failed to write init uCode.\n");
2070 2069 return (err);
2071 2070 }
2072 2071
2073 2072 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2074 2073
2075 2074 /* wait loading init_text until completed or timeout */
2076 2075 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2077 2076 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2078 2077 break;
2079 2078 }
2080 2079 }
2081 2080
2082 2081 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2083 2082 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2084 2083 "timeout waiting for init uCode load.\n");
2085 2084 return (IWP_FAIL);
2086 2085 }
2087 2086
2088 2087 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2089 2088
2090 2089 /*
2091 2090 * load init_data section of uCode to hardware
2092 2091 */
2093 2092 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address,
2094 2093 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size);
2095 2094 if (err != IWP_SUCCESS) {
2096 2095 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2097 2096 "failed to write init_data uCode.\n");
2098 2097 return (err);
2099 2098 }
2100 2099
2101 2100 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2102 2101
2103 2102 /*
2104 2103 * wait loading init_data until completed or timeout
2105 2104 */
2106 2105 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2107 2106 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2108 2107 break;
2109 2108 }
2110 2109 }
2111 2110
2112 2111 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2113 2112 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2114 2113 "timeout waiting for init_data uCode load.\n");
2115 2114 return (IWP_FAIL);
2116 2115 }
2117 2116
2118 2117 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2119 2118
2120 2119 return (err);
2121 2120 }
2122 2121
2123 2122 static int
2124 2123 iwp_load_run_firmware(iwp_sc_t *sc)
2125 2124 {
2126 2125 int err = IWP_FAIL;
2127 2126 clock_t clk;
2128 2127
2129 2128 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2130 2129
2131 2130 /*
2132 2131 * load init_text section of uCode to hardware
2133 2132 */
2134 2133 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address,
2135 2134 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size);
2136 2135 if (err != IWP_SUCCESS) {
2137 2136 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2138 2137 "failed to write run uCode.\n");
2139 2138 return (err);
2140 2139 }
2141 2140
2142 2141 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2143 2142
2144 2143 /* wait loading run_text until completed or timeout */
2145 2144 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2146 2145 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2147 2146 break;
2148 2147 }
2149 2148 }
2150 2149
2151 2150 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2152 2151 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2153 2152 "timeout waiting for run uCode load.\n");
2154 2153 return (IWP_FAIL);
2155 2154 }
2156 2155
2157 2156 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2158 2157
2159 2158 /*
2160 2159 * load run_data section of uCode to hardware
2161 2160 */
2162 2161 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address,
2163 2162 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size);
2164 2163 if (err != IWP_SUCCESS) {
2165 2164 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2166 2165 "failed to write run_data uCode.\n");
2167 2166 return (err);
2168 2167 }
2169 2168
2170 2169 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2171 2170
2172 2171 /*
2173 2172 * wait loading run_data until completed or timeout
2174 2173 */
2175 2174 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2176 2175 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2177 2176 break;
2178 2177 }
2179 2178 }
2180 2179
2181 2180 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2182 2181 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2183 2182 "timeout waiting for run_data uCode load.\n");
2184 2183 return (IWP_FAIL);
2185 2184 }
2186 2185
2187 2186 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2188 2187
2189 2188 return (err);
2190 2189 }
2191 2190
2192 2191 /*
2193 2192 * this function will be invoked to receive phy information
2194 2193 * when a frame is received.
2195 2194 */
2196 2195 static void
2197 2196 iwp_rx_phy_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2198 2197 {
2199 2198
2200 2199 sc->sc_rx_phy_res.flag = 1;
2201 2200
2202 2201 (void) memcpy(sc->sc_rx_phy_res.buf, (uint8_t *)(desc + 1),
2203 2202 sizeof (iwp_rx_phy_res_t));
2204 2203 }
2205 2204
2206 2205 /*
2207 2206 * this function will be invoked to receive body of frame when
2208 2207 * a frame is received.
2209 2208 */
2210 2209 static void
2211 2210 iwp_rx_mpdu_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2212 2211 {
2213 2212 ieee80211com_t *ic = &sc->sc_ic;
2214 2213 #ifdef DEBUG
2215 2214 iwp_rx_ring_t *ring = &sc->sc_rxq;
2216 2215 #endif
2217 2216 struct ieee80211_frame *wh;
2218 2217 struct iwp_rx_non_cfg_phy *phyinfo;
2219 2218 struct iwp_rx_mpdu_body_size *mpdu_size;
2220 2219
2221 2220 mblk_t *mp;
2222 2221 int16_t t;
2223 2222 uint16_t len, rssi, agc;
2224 2223 uint32_t temp, crc, *tail;
2225 2224 uint32_t arssi, brssi, crssi, mrssi;
2226 2225 iwp_rx_phy_res_t *stat;
2227 2226 ieee80211_node_t *in;
2228 2227
2229 2228 /*
2230 2229 * assuming not 11n here. cope with 11n in phase-II
2231 2230 */
2232 2231 mpdu_size = (struct iwp_rx_mpdu_body_size *)(desc + 1);
2233 2232 stat = (iwp_rx_phy_res_t *)sc->sc_rx_phy_res.buf;
2234 2233 if (stat->cfg_phy_cnt > 20) {
2235 2234 return;
2236 2235 }
2237 2236
2238 2237 phyinfo = (struct iwp_rx_non_cfg_phy *)stat->non_cfg_phy;
2239 2238 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_AGC_IDX]);
2240 2239 agc = (temp & IWP_OFDM_AGC_MSK) >> IWP_OFDM_AGC_BIT_POS;
2241 2240
2242 2241 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_AB_IDX]);
2243 2242 arssi = (temp & IWP_OFDM_RSSI_A_MSK) >> IWP_OFDM_RSSI_A_BIT_POS;
2244 2243 brssi = (temp & IWP_OFDM_RSSI_B_MSK) >> IWP_OFDM_RSSI_B_BIT_POS;
2245 2244
2246 2245 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_C_IDX]);
2247 2246 crssi = (temp & IWP_OFDM_RSSI_C_MSK) >> IWP_OFDM_RSSI_C_BIT_POS;
2248 2247
2249 2248 mrssi = MAX(arssi, brssi);
2250 2249 mrssi = MAX(mrssi, crssi);
2251 2250
2252 2251 t = mrssi - agc - IWP_RSSI_OFFSET;
2253 2252 /*
2254 2253 * convert dBm to percentage
2255 2254 */
2256 2255 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
2257 2256 / (75 * 75);
2258 2257 if (rssi > 100) {
2259 2258 rssi = 100;
2260 2259 }
2261 2260 if (rssi < 1) {
2262 2261 rssi = 1;
2263 2262 }
2264 2263
2265 2264 /*
2266 2265 * size of frame, not include FCS
2267 2266 */
2268 2267 len = LE_16(mpdu_size->byte_count);
2269 2268 tail = (uint32_t *)((uint8_t *)(desc + 1) +
2270 2269 sizeof (struct iwp_rx_mpdu_body_size) + len);
2271 2270 bcopy(tail, &crc, 4);
2272 2271
2273 2272 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2274 2273 "rx intr: idx=%d phy_len=%x len=%d "
2275 2274 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2276 2275 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2277 2276 len, stat->rate.r.s.rate, stat->channel,
2278 2277 LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2279 2278 stat->cfg_phy_cnt, LE_32(crc)));
2280 2279
2281 2280 if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2282 2281 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2283 2282 "rx frame oversize\n"));
2284 2283 return;
2285 2284 }
2286 2285
2287 2286 /*
2288 2287 * discard Rx frames with bad CRC
2289 2288 */
2290 2289 if ((LE_32(crc) &
2291 2290 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2292 2291 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2293 2292 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2294 2293 "rx crc error tail: %x\n",
2295 2294 LE_32(crc)));
2296 2295 sc->sc_rx_err++;
2297 2296 return;
2298 2297 }
2299 2298
2300 2299 wh = (struct ieee80211_frame *)
2301 2300 ((uint8_t *)(desc + 1)+ sizeof (struct iwp_rx_mpdu_body_size));
2302 2301
2303 2302 if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) {
2304 2303 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2305 2304 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2306 2305 "rx : association id = %x\n",
2307 2306 sc->sc_assoc_id));
2308 2307 }
2309 2308
2310 2309 #ifdef DEBUG
2311 2310 if (iwp_dbg_flags & IWP_DEBUG_RX) {
2312 2311 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2313 2312 }
2314 2313 #endif
2315 2314
2316 2315 in = ieee80211_find_rxnode(ic, wh);
2317 2316 mp = allocb(len, BPRI_MED);
2318 2317 if (mp) {
2319 2318 (void) memcpy(mp->b_wptr, wh, len);
2320 2319 mp->b_wptr += len;
2321 2320
2322 2321 /*
2323 2322 * send the frame to the 802.11 layer
2324 2323 */
2325 2324 (void) ieee80211_input(ic, mp, in, rssi, 0);
2326 2325 } else {
2327 2326 sc->sc_rx_nobuf++;
2328 2327 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2329 2328 "alloc rx buf failed\n"));
2330 2329 }
2331 2330
2332 2331 /*
2333 2332 * release node reference
2334 2333 */
2335 2334 ieee80211_free_node(in);
2336 2335 }
2337 2336
2338 2337 /*
2339 2338 * process correlative affairs after a frame is sent.
2340 2339 */
2341 2340 static void
2342 2341 iwp_tx_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2343 2342 {
2344 2343 ieee80211com_t *ic = &sc->sc_ic;
2345 2344 iwp_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2346 2345 iwp_tx_stat_t *stat = (iwp_tx_stat_t *)(desc + 1);
2347 2346 iwp_amrr_t *amrr;
2348 2347
2349 2348 if (NULL == ic->ic_bss) {
2350 2349 return;
2351 2350 }
2352 2351
2353 2352 amrr = (iwp_amrr_t *)ic->ic_bss;
2354 2353
2355 2354 amrr->txcnt++;
2356 2355 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_tx_intr(): "
2357 2356 "tx: %d cnt\n", amrr->txcnt));
2358 2357
2359 2358 if (stat->ntries > 0) {
2360 2359 amrr->retrycnt++;
2361 2360 sc->sc_tx_retries++;
2362 2361 IWP_DBG((IWP_DEBUG_TX, "iwp_tx_intr(): "
2363 2362 "tx: %d retries\n",
2364 2363 sc->sc_tx_retries));
2365 2364 }
2366 2365
2367 2366 mutex_enter(&sc->sc_mt_lock);
2368 2367 sc->sc_tx_timer = 0;
2369 2368 mutex_exit(&sc->sc_mt_lock);
2370 2369
2371 2370 mutex_enter(&sc->sc_tx_lock);
2372 2371
2373 2372 ring->queued--;
2374 2373 if (ring->queued < 0) {
2375 2374 ring->queued = 0;
2376 2375 }
2377 2376
2378 2377 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) {
2379 2378 sc->sc_need_reschedule = 0;
2380 2379 mutex_exit(&sc->sc_tx_lock);
2381 2380 mac_tx_update(ic->ic_mach);
2382 2381 mutex_enter(&sc->sc_tx_lock);
2383 2382 }
2384 2383
2385 2384 mutex_exit(&sc->sc_tx_lock);
2386 2385 }
2387 2386
2388 2387 /*
2389 2388 * inform a given command has been executed
2390 2389 */
2391 2390 static void
2392 2391 iwp_cmd_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2393 2392 {
2394 2393 if ((desc->hdr.qid & 7) != 4) {
2395 2394 return;
2396 2395 }
2397 2396
2398 2397 if (sc->sc_cmd_accum > 0) {
2399 2398 sc->sc_cmd_accum--;
2400 2399 return;
2401 2400 }
2402 2401
2403 2402 mutex_enter(&sc->sc_glock);
2404 2403
2405 2404 sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2406 2405
2407 2406 cv_signal(&sc->sc_cmd_cv);
2408 2407
2409 2408 mutex_exit(&sc->sc_glock);
2410 2409
2411 2410 IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd_intr(): "
2412 2411 "qid=%x idx=%d flags=%x type=0x%x\n",
2413 2412 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2414 2413 desc->hdr.type));
2415 2414 }
2416 2415
2417 2416 /*
2418 2417 * this function will be invoked when alive notification occur.
2419 2418 */
2420 2419 static void
2421 2420 iwp_ucode_alive(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2422 2421 {
2423 2422 uint32_t rv;
2424 2423 struct iwp_calib_cfg_cmd cmd;
2425 2424 struct iwp_alive_resp *ar =
2426 2425 (struct iwp_alive_resp *)(desc + 1);
2427 2426 struct iwp_calib_results *res_p = &sc->sc_calib_results;
2428 2427
2429 2428 /*
2430 2429 * the microcontroller is ready
2431 2430 */
2432 2431 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2433 2432 "microcode alive notification minor: %x major: %x type: "
2434 2433 "%x subtype: %x\n",
2435 2434 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2436 2435
2437 2436 #ifdef DEBUG
2438 2437 if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2439 2438 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2440 2439 "microcontroller initialization failed\n"));
2441 2440 }
2442 2441 #endif
2443 2442
2444 2443 /*
2445 2444 * determine if init alive or runtime alive.
2446 2445 */
2447 2446 if (INITIALIZE_SUBTYPE == ar->ver_subtype) {
2448 2447 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2449 2448 "initialization alive received.\n"));
2450 2449
2451 2450 (void) memcpy(&sc->sc_card_alive_init, ar,
2452 2451 sizeof (struct iwp_init_alive_resp));
2453 2452
2454 2453 /*
2455 2454 * necessary configuration to NIC
2456 2455 */
2457 2456 mutex_enter(&sc->sc_glock);
2458 2457
2459 2458 rv = iwp_alive_common(sc);
2460 2459 if (rv != IWP_SUCCESS) {
2461 2460 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2462 2461 "common alive process failed in init alive.\n");
2463 2462 mutex_exit(&sc->sc_glock);
2464 2463 return;
2465 2464 }
2466 2465
2467 2466 (void) memset(&cmd, 0, sizeof (cmd));
2468 2467
2469 2468 cmd.ucd_calib_cfg.once.is_enable = IWP_CALIB_INIT_CFG_ALL;
2470 2469 cmd.ucd_calib_cfg.once.start = IWP_CALIB_INIT_CFG_ALL;
2471 2470 cmd.ucd_calib_cfg.once.send_res = IWP_CALIB_INIT_CFG_ALL;
2472 2471 cmd.ucd_calib_cfg.flags = IWP_CALIB_INIT_CFG_ALL;
2473 2472
2474 2473 /*
2475 2474 * require ucode execute calibration
2476 2475 */
2477 2476 rv = iwp_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1);
2478 2477 if (rv != IWP_SUCCESS) {
2479 2478 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2480 2479 "failed to send calibration configure command.\n");
2481 2480 mutex_exit(&sc->sc_glock);
2482 2481 return;
2483 2482 }
2484 2483
2485 2484 mutex_exit(&sc->sc_glock);
2486 2485
2487 2486 } else { /* runtime alive */
2488 2487
2489 2488 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2490 2489 "runtime alive received.\n"));
2491 2490
2492 2491 (void) memcpy(&sc->sc_card_alive_run, ar,
2493 2492 sizeof (struct iwp_alive_resp));
2494 2493
2495 2494 mutex_enter(&sc->sc_glock);
2496 2495
2497 2496 /*
2498 2497 * necessary configuration to NIC
2499 2498 */
2500 2499 rv = iwp_alive_common(sc);
2501 2500 if (rv != IWP_SUCCESS) {
2502 2501 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2503 2502 "common alive process failed in run alive.\n");
2504 2503 mutex_exit(&sc->sc_glock);
2505 2504 return;
2506 2505 }
2507 2506
2508 2507 /*
2509 2508 * send the result of local oscilator calibration to uCode.
2510 2509 */
2511 2510 if (res_p->lo_res != NULL) {
2512 2511 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2513 2512 res_p->lo_res, res_p->lo_res_len, 1);
2514 2513 if (rv != IWP_SUCCESS) {
2515 2514 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2516 2515 "failed to send local"
2517 2516 "oscilator calibration command.\n");
2518 2517 mutex_exit(&sc->sc_glock);
2519 2518 return;
2520 2519 }
2521 2520
2522 2521 DELAY(1000);
2523 2522 }
2524 2523
2525 2524 /*
2526 2525 * send the result of TX IQ calibration to uCode.
2527 2526 */
2528 2527 if (res_p->tx_iq_res != NULL) {
2529 2528 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2530 2529 res_p->tx_iq_res, res_p->tx_iq_res_len, 1);
2531 2530 if (rv != IWP_SUCCESS) {
2532 2531 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2533 2532 "failed to send TX IQ"
2534 2533 "calibration command.\n");
2535 2534 mutex_exit(&sc->sc_glock);
2536 2535 return;
2537 2536 }
2538 2537
2539 2538 DELAY(1000);
2540 2539 }
2541 2540
2542 2541 /*
2543 2542 * send the result of TX IQ perd calibration to uCode.
2544 2543 */
2545 2544 if (res_p->tx_iq_perd_res != NULL) {
2546 2545 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2547 2546 res_p->tx_iq_perd_res,
2548 2547 res_p->tx_iq_perd_res_len, 1);
2549 2548 if (rv != IWP_SUCCESS) {
2550 2549 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2551 2550 "failed to send TX IQ perd"
2552 2551 "calibration command.\n");
2553 2552 mutex_exit(&sc->sc_glock);
2554 2553 return;
2555 2554 }
2556 2555
2557 2556 DELAY(1000);
2558 2557 }
2559 2558
2560 2559 /*
2561 2560 * send the result of Base Band calibration to uCode.
2562 2561 */
2563 2562 if (res_p->base_band_res != NULL) {
2564 2563 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2565 2564 res_p->base_band_res,
2566 2565 res_p->base_band_res_len, 1);
2567 2566 if (rv != IWP_SUCCESS) {
2568 2567 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2569 2568 "failed to send Base Band"
2570 2569 "calibration command.\n");
2571 2570 mutex_exit(&sc->sc_glock);
2572 2571 return;
2573 2572 }
2574 2573
2575 2574 DELAY(1000);
2576 2575 }
2577 2576
2578 2577 atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2579 2578 cv_signal(&sc->sc_ucode_cv);
2580 2579
2581 2580 mutex_exit(&sc->sc_glock);
2582 2581 }
2583 2582
2584 2583 }
2585 2584
2586 2585 /*
2587 2586 * deal with receiving frames, command response
2588 2587 * and all notifications from ucode.
2589 2588 */
2590 2589 /* ARGSUSED */
2591 2590 static uint_t
2592 2591 iwp_rx_softintr(caddr_t arg, caddr_t unused)
2593 2592 {
2594 2593 iwp_sc_t *sc;
2595 2594 ieee80211com_t *ic;
2596 2595 iwp_rx_desc_t *desc;
2597 2596 iwp_rx_data_t *data;
2598 2597 uint32_t index;
2599 2598
2600 2599 if (NULL == arg) {
2601 2600 return (DDI_INTR_UNCLAIMED);
2602 2601 }
2603 2602 sc = (iwp_sc_t *)arg;
2604 2603 ic = &sc->sc_ic;
2605 2604
2606 2605 /*
2607 2606 * firmware has moved the index of the rx queue, driver get it,
2608 2607 * and deal with it.
2609 2608 */
2610 2609 index = (sc->sc_shared->val0) & 0xfff;
2611 2610
2612 2611 while (sc->sc_rxq.cur != index) {
2613 2612 data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2614 2613 desc = (iwp_rx_desc_t *)data->dma_data.mem_va;
2615 2614
2616 2615 IWP_DBG((IWP_DEBUG_INTR, "iwp_rx_softintr(): "
2617 2616 "rx notification index = %d"
2618 2617 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2619 2618 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2620 2619 desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2621 2620
2622 2621 /*
2623 2622 * a command other than a tx need to be replied
2624 2623 */
2625 2624 if (!(desc->hdr.qid & 0x80) &&
2626 2625 (desc->hdr.type != REPLY_SCAN_CMD) &&
2627 2626 (desc->hdr.type != REPLY_TX)) {
2628 2627 iwp_cmd_intr(sc, desc);
2629 2628 }
2630 2629
2631 2630 switch (desc->hdr.type) {
2632 2631 case REPLY_RX_PHY_CMD:
2633 2632 iwp_rx_phy_intr(sc, desc);
2634 2633 break;
2635 2634
2636 2635 case REPLY_RX_MPDU_CMD:
2637 2636 iwp_rx_mpdu_intr(sc, desc);
2638 2637 break;
2639 2638
2640 2639 case REPLY_TX:
2641 2640 iwp_tx_intr(sc, desc);
2642 2641 break;
2643 2642
2644 2643 case REPLY_ALIVE:
2645 2644 iwp_ucode_alive(sc, desc);
2646 2645 break;
2647 2646
2648 2647 case CARD_STATE_NOTIFICATION:
2649 2648 {
2650 2649 uint32_t *status = (uint32_t *)(desc + 1);
2651 2650
2652 2651 IWP_DBG((IWP_DEBUG_RADIO, "iwp_rx_softintr(): "
2653 2652 "state changed to %x\n",
2654 2653 LE_32(*status)));
2655 2654
2656 2655 if (LE_32(*status) & 1) {
2657 2656 /*
2658 2657 * the radio button has to be pushed(OFF). It
2659 2658 * is considered as a hw error, the
2660 2659 * iwp_thread() tries to recover it after the
2661 2660 * button is pushed again(ON)
2662 2661 */
2663 2662 cmn_err(CE_NOTE, "iwp_rx_softintr(): "
2664 2663 "radio transmitter is off\n");
2665 2664 sc->sc_ostate = sc->sc_ic.ic_state;
2666 2665 ieee80211_new_state(&sc->sc_ic,
2667 2666 IEEE80211_S_INIT, -1);
2668 2667 atomic_or_32(&sc->sc_flags,
2669 2668 IWP_F_HW_ERR_RECOVER | IWP_F_RADIO_OFF);
2670 2669 }
2671 2670
2672 2671 break;
2673 2672 }
2674 2673
2675 2674 case SCAN_START_NOTIFICATION:
2676 2675 {
2677 2676 iwp_start_scan_t *scan =
2678 2677 (iwp_start_scan_t *)(desc + 1);
2679 2678
2680 2679 IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2681 2680 "scanning channel %d status %x\n",
2682 2681 scan->chan, LE_32(scan->status)));
2683 2682
2684 2683 ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2685 2684 break;
2686 2685 }
2687 2686
2688 2687 case SCAN_COMPLETE_NOTIFICATION:
2689 2688 {
2690 2689 #ifdef DEBUG
2691 2690 iwp_stop_scan_t *scan =
2692 2691 (iwp_stop_scan_t *)(desc + 1);
2693 2692
2694 2693 IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2695 2694 "completed channel %d (burst of %d) status %02x\n",
2696 2695 scan->chan, scan->nchan, scan->status));
2697 2696 #endif
2698 2697
2699 2698 sc->sc_scan_pending++;
2700 2699 break;
2701 2700 }
2702 2701
2703 2702 case STATISTICS_NOTIFICATION:
2704 2703 {
2705 2704 /*
2706 2705 * handle statistics notification
2707 2706 */
2708 2707 break;
2709 2708 }
2710 2709
2711 2710 case CALIBRATION_RES_NOTIFICATION:
2712 2711 iwp_save_calib_result(sc, desc);
2713 2712 break;
2714 2713
2715 2714 case CALIBRATION_COMPLETE_NOTIFICATION:
2716 2715 mutex_enter(&sc->sc_glock);
2717 2716 atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2718 2717 cv_signal(&sc->sc_ucode_cv);
2719 2718 mutex_exit(&sc->sc_glock);
2720 2719 break;
2721 2720
2722 2721 case MISSED_BEACONS_NOTIFICATION:
2723 2722 {
2724 2723 struct iwp_beacon_missed *miss =
2725 2724 (struct iwp_beacon_missed *)(desc + 1);
2726 2725
2727 2726 if ((ic->ic_state == IEEE80211_S_RUN) &&
2728 2727 (LE_32(miss->consecutive) > 50)) {
2729 2728 cmn_err(CE_NOTE, "iwp: iwp_rx_softintr(): "
2730 2729 "beacon missed %d/%d\n",
2731 2730 LE_32(miss->consecutive),
2732 2731 LE_32(miss->total));
2733 2732 (void) ieee80211_new_state(ic,
2734 2733 IEEE80211_S_INIT, -1);
2735 2734 }
2736 2735 break;
2737 2736 }
2738 2737 }
2739 2738
2740 2739 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2741 2740 }
2742 2741
2743 2742 /*
2744 2743 * driver dealt with what received in rx queue and tell the information
2745 2744 * to the firmware.
2746 2745 */
2747 2746 index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1;
2748 2747 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2749 2748
2750 2749 /*
2751 2750 * re-enable interrupts
2752 2751 */
2753 2752 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2754 2753
2755 2754 return (DDI_INTR_CLAIMED);
2756 2755 }
2757 2756
2758 2757 /*
2759 2758 * the handle of interrupt
2760 2759 */
2761 2760 /* ARGSUSED */
2762 2761 static uint_t
2763 2762 iwp_intr(caddr_t arg, caddr_t unused)
2764 2763 {
2765 2764 iwp_sc_t *sc;
2766 2765 uint32_t r, rfh;
2767 2766
2768 2767 if (NULL == arg) {
2769 2768 return (DDI_INTR_UNCLAIMED);
2770 2769 }
2771 2770 sc = (iwp_sc_t *)arg;
2772 2771
2773 2772 r = IWP_READ(sc, CSR_INT);
2774 2773 if (0 == r || 0xffffffff == r) {
2775 2774 return (DDI_INTR_UNCLAIMED);
2776 2775 }
2777 2776
2778 2777 IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2779 2778 "interrupt reg %x\n", r));
2780 2779
2781 2780 rfh = IWP_READ(sc, CSR_FH_INT_STATUS);
2782 2781
2783 2782 IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2784 2783 "FH interrupt reg %x\n", rfh));
2785 2784
2786 2785 /*
2787 2786 * disable interrupts
2788 2787 */
2789 2788 IWP_WRITE(sc, CSR_INT_MASK, 0);
2790 2789
2791 2790 /*
2792 2791 * ack interrupts
2793 2792 */
2794 2793 IWP_WRITE(sc, CSR_INT, r);
2795 2794 IWP_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2796 2795
2797 2796 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2798 2797 IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2799 2798 "fatal firmware error\n"));
2800 2799 iwp_stop(sc);
2801 2800 sc->sc_ostate = sc->sc_ic.ic_state;
2802 2801
2803 2802 /* notify upper layer */
2804 2803 if (!IWP_CHK_FAST_RECOVER(sc)) {
2805 2804 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2806 2805 }
2807 2806
2808 2807 atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
2809 2808 return (DDI_INTR_CLAIMED);
2810 2809 }
2811 2810
2812 2811 if (r & BIT_INT_RF_KILL) {
2813 2812 uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2814 2813 if (tmp & (1 << 27)) {
2815 2814 cmn_err(CE_NOTE, "RF switch: radio on\n");
2816 2815 }
2817 2816 }
2818 2817
2819 2818 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2820 2819 (rfh & FH_INT_RX_MASK)) {
2821 2820 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2822 2821 return (DDI_INTR_CLAIMED);
2823 2822 }
2824 2823
2825 2824 if (r & BIT_INT_FH_TX) {
2826 2825 mutex_enter(&sc->sc_glock);
2827 2826 atomic_or_32(&sc->sc_flags, IWP_F_PUT_SEG);
2828 2827 cv_signal(&sc->sc_put_seg_cv);
2829 2828 mutex_exit(&sc->sc_glock);
2830 2829 }
2831 2830
2832 2831 #ifdef DEBUG
2833 2832 if (r & BIT_INT_ALIVE) {
2834 2833 IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2835 2834 "firmware initialized.\n"));
2836 2835 }
2837 2836 #endif
2838 2837
2839 2838 /*
2840 2839 * re-enable interrupts
2841 2840 */
2842 2841 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2843 2842
2844 2843 return (DDI_INTR_CLAIMED);
2845 2844 }
2846 2845
2847 2846 static uint8_t
2848 2847 iwp_rate_to_plcp(int rate)
2849 2848 {
2850 2849 uint8_t ret;
2851 2850
2852 2851 switch (rate) {
2853 2852 /*
2854 2853 * CCK rates
2855 2854 */
2856 2855 case 2:
2857 2856 ret = 0xa;
2858 2857 break;
2859 2858
2860 2859 case 4:
2861 2860 ret = 0x14;
2862 2861 break;
2863 2862
2864 2863 case 11:
2865 2864 ret = 0x37;
2866 2865 break;
2867 2866
2868 2867 case 22:
2869 2868 ret = 0x6e;
2870 2869 break;
2871 2870
2872 2871 /*
2873 2872 * OFDM rates
2874 2873 */
2875 2874 case 12:
2876 2875 ret = 0xd;
2877 2876 break;
2878 2877
2879 2878 case 18:
2880 2879 ret = 0xf;
2881 2880 break;
2882 2881
2883 2882 case 24:
2884 2883 ret = 0x5;
2885 2884 break;
2886 2885
2887 2886 case 36:
2888 2887 ret = 0x7;
2889 2888 break;
2890 2889
2891 2890 case 48:
2892 2891 ret = 0x9;
2893 2892 break;
2894 2893
2895 2894 case 72:
2896 2895 ret = 0xb;
2897 2896 break;
2898 2897
2899 2898 case 96:
2900 2899 ret = 0x1;
2901 2900 break;
2902 2901
2903 2902 case 108:
2904 2903 ret = 0x3;
2905 2904 break;
2906 2905
2907 2906 default:
2908 2907 ret = 0;
2909 2908 break;
2910 2909 }
2911 2910
2912 2911 return (ret);
2913 2912 }
2914 2913
2915 2914 /*
2916 2915 * invoked by GLD send frames
2917 2916 */
2918 2917 static mblk_t *
2919 2918 iwp_m_tx(void *arg, mblk_t *mp)
2920 2919 {
2921 2920 iwp_sc_t *sc;
2922 2921 ieee80211com_t *ic;
2923 2922 mblk_t *next;
2924 2923
2925 2924 if (NULL == arg) {
2926 2925 return (NULL);
2927 2926 }
2928 2927 sc = (iwp_sc_t *)arg;
2929 2928 ic = &sc->sc_ic;
2930 2929
2931 2930 if (sc->sc_flags & IWP_F_SUSPEND) {
2932 2931 freemsgchain(mp);
2933 2932 return (NULL);
2934 2933 }
2935 2934
2936 2935 if (ic->ic_state != IEEE80211_S_RUN) {
2937 2936 freemsgchain(mp);
2938 2937 return (NULL);
2939 2938 }
2940 2939
2941 2940 if ((sc->sc_flags & IWP_F_HW_ERR_RECOVER) &&
2942 2941 IWP_CHK_FAST_RECOVER(sc)) {
2943 2942 IWP_DBG((IWP_DEBUG_FW, "iwp_m_tx(): "
2944 2943 "hold queue\n"));
2945 2944 return (mp);
2946 2945 }
2947 2946
2948 2947
2949 2948 while (mp != NULL) {
2950 2949 next = mp->b_next;
2951 2950 mp->b_next = NULL;
2952 2951 if (iwp_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2953 2952 mp->b_next = next;
2954 2953 break;
2955 2954 }
2956 2955 mp = next;
2957 2956 }
2958 2957
2959 2958 return (mp);
2960 2959 }
2961 2960
2962 2961 /*
2963 2962 * send frames
2964 2963 */
2965 2964 static int
2966 2965 iwp_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2967 2966 {
2968 2967 iwp_sc_t *sc;
2969 2968 iwp_tx_ring_t *ring;
2970 2969 iwp_tx_desc_t *desc;
2971 2970 iwp_tx_data_t *data;
2972 2971 iwp_tx_data_t *desc_data;
2973 2972 iwp_cmd_t *cmd;
2974 2973 iwp_tx_cmd_t *tx;
2975 2974 ieee80211_node_t *in;
2976 2975 struct ieee80211_frame *wh;
2977 2976 struct ieee80211_key *k = NULL;
2978 2977 mblk_t *m, *m0;
2979 2978 int hdrlen, len, len0, mblen, off, err = IWP_SUCCESS;
2980 2979 uint16_t masks = 0;
2981 2980 uint32_t rate, s_id = 0;
2982 2981
2983 2982 if (NULL == ic) {
2984 2983 return (IWP_FAIL);
2985 2984 }
2986 2985 sc = (iwp_sc_t *)ic;
2987 2986
2988 2987 if (sc->sc_flags & IWP_F_SUSPEND) {
2989 2988 if ((type & IEEE80211_FC0_TYPE_MASK) !=
2990 2989 IEEE80211_FC0_TYPE_DATA) {
2991 2990 freemsg(mp);
2992 2991 }
2993 2992 err = IWP_FAIL;
2994 2993 goto exit;
2995 2994 }
2996 2995
2997 2996 mutex_enter(&sc->sc_tx_lock);
2998 2997 ring = &sc->sc_txq[0];
2999 2998 data = &ring->data[ring->cur];
3000 2999 cmd = data->cmd;
3001 3000 bzero(cmd, sizeof (*cmd));
3002 3001
3003 3002 ring->cur = (ring->cur + 1) % ring->count;
3004 3003
3005 3004 /*
3006 3005 * Need reschedule TX if TX buffer is full.
3007 3006 */
3008 3007 if (ring->queued > ring->count - IWP_MAX_WIN_SIZE) {
3009 3008 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3010 3009 "no txbuf\n"));
3011 3010
3012 3011 sc->sc_need_reschedule = 1;
3013 3012 mutex_exit(&sc->sc_tx_lock);
3014 3013
3015 3014 if ((type & IEEE80211_FC0_TYPE_MASK) !=
3016 3015 IEEE80211_FC0_TYPE_DATA) {
3017 3016 freemsg(mp);
3018 3017 }
3019 3018 sc->sc_tx_nobuf++;
3020 3019 err = IWP_FAIL;
3021 3020 goto exit;
3022 3021 }
3023 3022
3024 3023 ring->queued++;
3025 3024
3026 3025 mutex_exit(&sc->sc_tx_lock);
3027 3026
3028 3027 hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3029 3028
3030 3029 m = allocb(msgdsize(mp) + 32, BPRI_MED);
3031 3030 if (NULL == m) { /* can not alloc buf, drop this package */
3032 3031 cmn_err(CE_WARN, "iwp_send(): "
3033 3032 "failed to allocate msgbuf\n");
3034 3033 freemsg(mp);
3035 3034
3036 3035 mutex_enter(&sc->sc_tx_lock);
3037 3036 ring->queued--;
3038 3037 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3039 3038 sc->sc_need_reschedule = 0;
3040 3039 mutex_exit(&sc->sc_tx_lock);
3041 3040 mac_tx_update(ic->ic_mach);
3042 3041 mutex_enter(&sc->sc_tx_lock);
3043 3042 }
3044 3043 mutex_exit(&sc->sc_tx_lock);
3045 3044
3046 3045 err = IWP_SUCCESS;
3047 3046 goto exit;
3048 3047 }
3049 3048
3050 3049 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3051 3050 mblen = MBLKL(m0);
3052 3051 (void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
3053 3052 off += mblen;
3054 3053 }
3055 3054
3056 3055 m->b_wptr += off;
3057 3056
3058 3057 wh = (struct ieee80211_frame *)m->b_rptr;
3059 3058
3060 3059 /*
3061 3060 * determine send which AP or station in IBSS
3062 3061 */
3063 3062 in = ieee80211_find_txnode(ic, wh->i_addr1);
3064 3063 if (NULL == in) {
3065 3064 cmn_err(CE_WARN, "iwp_send(): "
3066 3065 "failed to find tx node\n");
3067 3066 freemsg(mp);
3068 3067 freemsg(m);
3069 3068 sc->sc_tx_err++;
3070 3069
3071 3070 mutex_enter(&sc->sc_tx_lock);
3072 3071 ring->queued--;
3073 3072 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3074 3073 sc->sc_need_reschedule = 0;
3075 3074 mutex_exit(&sc->sc_tx_lock);
3076 3075 mac_tx_update(ic->ic_mach);
3077 3076 mutex_enter(&sc->sc_tx_lock);
3078 3077 }
3079 3078 mutex_exit(&sc->sc_tx_lock);
3080 3079
3081 3080 err = IWP_SUCCESS;
3082 3081 goto exit;
3083 3082 }
3084 3083
3085 3084 /*
3086 3085 * Net80211 module encapsulate outbound data frames.
3087 3086 * Add some feilds of 80211 frame.
3088 3087 */
3089 3088 if ((type & IEEE80211_FC0_TYPE_MASK) ==
3090 3089 IEEE80211_FC0_TYPE_DATA) {
3091 3090 (void) ieee80211_encap(ic, m, in);
3092 3091 }
3093 3092
3094 3093 freemsg(mp);
3095 3094
3096 3095 cmd->hdr.type = REPLY_TX;
3097 3096 cmd->hdr.flags = 0;
3098 3097 cmd->hdr.qid = ring->qid;
3099 3098
3100 3099 tx = (iwp_tx_cmd_t *)cmd->data;
3101 3100 tx->tx_flags = 0;
3102 3101
3103 3102 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3104 3103 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
3105 3104 } else {
3106 3105 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
3107 3106 }
3108 3107
3109 3108 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3110 3109 k = ieee80211_crypto_encap(ic, m);
3111 3110 if (NULL == k) {
3112 3111 freemsg(m);
3113 3112 sc->sc_tx_err++;
3114 3113
3115 3114 mutex_enter(&sc->sc_tx_lock);
3116 3115 ring->queued--;
3117 3116 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3118 3117 sc->sc_need_reschedule = 0;
3119 3118 mutex_exit(&sc->sc_tx_lock);
3120 3119 mac_tx_update(ic->ic_mach);
3121 3120 mutex_enter(&sc->sc_tx_lock);
3122 3121 }
3123 3122 mutex_exit(&sc->sc_tx_lock);
3124 3123
3125 3124 err = IWP_SUCCESS;
3126 3125 goto exit;
3127 3126 }
3128 3127
3129 3128 /* packet header may have moved, reset our local pointer */
3130 3129 wh = (struct ieee80211_frame *)m->b_rptr;
3131 3130 }
3132 3131
3133 3132 len = msgdsize(m);
3134 3133
3135 3134 #ifdef DEBUG
3136 3135 if (iwp_dbg_flags & IWP_DEBUG_TX) {
3137 3136 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
3138 3137 }
3139 3138 #endif
3140 3139
3141 3140 tx->rts_retry_limit = IWP_TX_RTS_RETRY_LIMIT;
3142 3141 tx->data_retry_limit = IWP_TX_DATA_RETRY_LIMIT;
3143 3142
3144 3143 /*
3145 3144 * specific TX parameters for management frames
3146 3145 */
3147 3146 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3148 3147 IEEE80211_FC0_TYPE_MGT) {
3149 3148 /*
3150 3149 * mgmt frames are sent at 1M
3151 3150 */
3152 3151 if ((in->in_rates.ir_rates[0] &
3153 3152 IEEE80211_RATE_VAL) != 0) {
3154 3153 rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL;
3155 3154 } else {
3156 3155 rate = 2;
3157 3156 }
3158 3157
3159 3158 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3160 3159
3161 3160 /*
3162 3161 * tell h/w to set timestamp in probe responses
3163 3162 */
3164 3163 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3165 3164 IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
3166 3165 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
3167 3166
3168 3167 tx->data_retry_limit = 3;
3169 3168 if (tx->data_retry_limit < tx->rts_retry_limit) {
3170 3169 tx->rts_retry_limit = tx->data_retry_limit;
3171 3170 }
3172 3171 }
3173 3172
3174 3173 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3175 3174 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
3176 3175 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3177 3176 IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) {
3178 3177 tx->timeout.pm_frame_timeout = LE_16(3);
3179 3178 } else {
3180 3179 tx->timeout.pm_frame_timeout = LE_16(2);
3181 3180 }
3182 3181
3183 3182 } else {
3184 3183 /*
3185 3184 * do it here for the software way rate scaling.
3186 3185 * later for rate scaling in hardware.
3187 3186 *
3188 3187 * now the txrate is determined in tx cmd flags, set to the
3189 3188 * max value 54M for 11g and 11M for 11b originally.
3190 3189 */
3191 3190 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3192 3191 rate = ic->ic_fixed_rate;
3193 3192 } else {
3194 3193 if ((in->in_rates.ir_rates[in->in_txrate] &
3195 3194 IEEE80211_RATE_VAL) != 0) {
3196 3195 rate = in->in_rates.
3197 3196 ir_rates[in->in_txrate] &
3198 3197 IEEE80211_RATE_VAL;
3199 3198 }
3200 3199 }
3201 3200
3202 3201 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3203 3202
3204 3203 tx->timeout.pm_frame_timeout = 0;
3205 3204 }
3206 3205
3207 3206 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3208 3207 "tx rate[%d of %d] = %x",
3209 3208 in->in_txrate, in->in_rates.ir_nrates, rate));
3210 3209
3211 3210 len0 = roundup(4 + sizeof (iwp_tx_cmd_t) + hdrlen, 4);
3212 3211 if (len0 != (4 + sizeof (iwp_tx_cmd_t) + hdrlen)) {
3213 3212 tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
3214 3213 }
3215 3214
3216 3215 /*
3217 3216 * retrieve destination node's id
3218 3217 */
3219 3218 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3220 3219 tx->sta_id = IWP_BROADCAST_ID;
3221 3220 } else {
3222 3221 tx->sta_id = IWP_AP_ID;
3223 3222 }
3224 3223
3225 3224 if (2 == rate || 4 == rate || 11 == rate || 22 == rate) {
3226 3225 masks |= RATE_MCS_CCK_MSK;
3227 3226 }
3228 3227
3229 3228 masks |= RATE_MCS_ANT_B_MSK;
3230 3229 tx->rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(rate) | masks);
3231 3230
3232 3231 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3233 3232 "tx flag = %x",
3234 3233 tx->tx_flags));
3235 3234
3236 3235 tx->stop_time.life_time = LE_32(0xffffffff);
3237 3236
3238 3237 tx->len = LE_16(len);
3239 3238
3240 3239 tx->dram_lsb_ptr =
3241 3240 LE_32(data->paddr_cmd + 4 + offsetof(iwp_tx_cmd_t, scratch));
3242 3241 tx->dram_msb_ptr = 0;
3243 3242 tx->driver_txop = 0;
3244 3243 tx->next_frame_len = 0;
3245 3244
3246 3245 (void) memcpy(tx + 1, m->b_rptr, hdrlen);
3247 3246 m->b_rptr += hdrlen;
3248 3247 (void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
3249 3248
3250 3249 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3251 3250 "sending data: qid=%d idx=%d len=%d",
3252 3251 ring->qid, ring->cur, len));
3253 3252
3254 3253 /*
3255 3254 * first segment includes the tx cmd plus the 802.11 header,
3256 3255 * the second includes the remaining of the 802.11 frame.
3257 3256 */
3258 3257 mutex_enter(&sc->sc_tx_lock);
3259 3258
3260 3259 cmd->hdr.idx = ring->desc_cur;
3261 3260
3262 3261 desc_data = &ring->data[ring->desc_cur];
3263 3262 desc = desc_data->desc;
3264 3263 bzero(desc, sizeof (*desc));
3265 3264 desc->val0 = 2 << 24;
3266 3265 desc->pa[0].tb1_addr = data->paddr_cmd;
3267 3266 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
3268 3267 ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
3269 3268 desc->pa[0].val2 =
3270 3269 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
3271 3270 ((len - hdrlen) << 20);
3272 3271 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3273 3272 "phy addr1 = 0x%x phy addr2 = 0x%x "
3274 3273 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
3275 3274 data->paddr_cmd, data->dma_data.cookie.dmac_address,
3276 3275 len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
3277 3276
3278 3277 /*
3279 3278 * kick ring
3280 3279 */
3281 3280 s_id = tx->sta_id;
3282 3281
3283 3282 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3284 3283 tfd_offset[ring->desc_cur].val =
3285 3284 (8 + len) | (s_id << 12);
3286 3285 if (ring->desc_cur < IWP_MAX_WIN_SIZE) {
3287 3286 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3288 3287 tfd_offset[IWP_QUEUE_SIZE + ring->desc_cur].val =
3289 3288 (8 + len) | (s_id << 12);
3290 3289 }
3291 3290
3292 3291 IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
3293 3292 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
3294 3293
3295 3294 ring->desc_cur = (ring->desc_cur + 1) % ring->count;
3296 3295 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur);
3297 3296
3298 3297 mutex_exit(&sc->sc_tx_lock);
3299 3298 freemsg(m);
3300 3299
3301 3300 /*
3302 3301 * release node reference
3303 3302 */
3304 3303 ieee80211_free_node(in);
3305 3304
3306 3305 ic->ic_stats.is_tx_bytes += len;
3307 3306 ic->ic_stats.is_tx_frags++;
3308 3307
3309 3308 mutex_enter(&sc->sc_mt_lock);
3310 3309 if (0 == sc->sc_tx_timer) {
3311 3310 sc->sc_tx_timer = 4;
3312 3311 }
3313 3312 mutex_exit(&sc->sc_mt_lock);
3314 3313
3315 3314 exit:
3316 3315 return (err);
3317 3316 }
3318 3317
3319 3318 /*
3320 3319 * invoked by GLD to deal with IOCTL affaires
3321 3320 */
3322 3321 static void
3323 3322 iwp_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
3324 3323 {
3325 3324 iwp_sc_t *sc;
3326 3325 ieee80211com_t *ic;
3327 3326 int err = EINVAL;
3328 3327
3329 3328 if (NULL == arg) {
3330 3329 return;
3331 3330 }
3332 3331 sc = (iwp_sc_t *)arg;
3333 3332 ic = &sc->sc_ic;
3334 3333
3335 3334 err = ieee80211_ioctl(ic, wq, mp);
3336 3335 if (ENETRESET == err) {
3337 3336 /*
3338 3337 * This is special for the hidden AP connection.
3339 3338 * In any case, we should make sure only one 'scan'
3340 3339 * in the driver for a 'connect' CLI command. So
3341 3340 * when connecting to a hidden AP, the scan is just
3342 3341 * sent out to the air when we know the desired
3343 3342 * essid of the AP we want to connect.
3344 3343 */
3345 3344 if (ic->ic_des_esslen) {
3346 3345 if (sc->sc_flags & IWP_F_RUNNING) {
3347 3346 iwp_m_stop(sc);
3348 3347 (void) iwp_m_start(sc);
3349 3348 (void) ieee80211_new_state(ic,
3350 3349 IEEE80211_S_SCAN, -1);
3351 3350 }
3352 3351 }
3353 3352 }
3354 3353 }
3355 3354
3356 3355 /*
3357 3356 * Call back functions for get/set proporty
3358 3357 */
3359 3358 static int
3360 3359 iwp_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3361 3360 uint_t wldp_length, void *wldp_buf)
3362 3361 {
3363 3362 iwp_sc_t *sc;
3364 3363 int err = EINVAL;
3365 3364
3366 3365 if (NULL == arg) {
3367 3366 return (EINVAL);
3368 3367 }
3369 3368 sc = (iwp_sc_t *)arg;
3370 3369
3371 3370 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3372 3371 wldp_length, wldp_buf);
3373 3372
3374 3373 return (err);
3375 3374 }
3376 3375
3377 3376 static void
3378 3377 iwp_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3379 3378 mac_prop_info_handle_t prh)
3380 3379 {
3381 3380 iwp_sc_t *sc;
3382 3381
3383 3382 sc = (iwp_sc_t *)arg;
3384 3383 ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, prh);
3385 3384 }
3386 3385
3387 3386 static int
3388 3387 iwp_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3389 3388 uint_t wldp_length, const void *wldp_buf)
3390 3389 {
3391 3390 iwp_sc_t *sc;
3392 3391 ieee80211com_t *ic;
3393 3392 int err = EINVAL;
3394 3393
3395 3394 if (NULL == arg) {
3396 3395 return (EINVAL);
3397 3396 }
3398 3397 sc = (iwp_sc_t *)arg;
3399 3398 ic = &sc->sc_ic;
3400 3399
3401 3400 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3402 3401 wldp_buf);
3403 3402
3404 3403 if (err == ENETRESET) {
3405 3404 if (ic->ic_des_esslen) {
3406 3405 if (sc->sc_flags & IWP_F_RUNNING) {
3407 3406 iwp_m_stop(sc);
3408 3407 (void) iwp_m_start(sc);
3409 3408 (void) ieee80211_new_state(ic,
3410 3409 IEEE80211_S_SCAN, -1);
3411 3410 }
3412 3411 }
3413 3412 err = 0;
3414 3413 }
3415 3414 return (err);
3416 3415 }
3417 3416
3418 3417 /*
3419 3418 * invoked by GLD supply statistics NIC and driver
3420 3419 */
3421 3420 static int
3422 3421 iwp_m_stat(void *arg, uint_t stat, uint64_t *val)
3423 3422 {
3424 3423 iwp_sc_t *sc;
3425 3424 ieee80211com_t *ic;
3426 3425 ieee80211_node_t *in;
3427 3426
3428 3427 if (NULL == arg) {
3429 3428 return (EINVAL);
3430 3429 }
3431 3430 sc = (iwp_sc_t *)arg;
3432 3431 ic = &sc->sc_ic;
3433 3432
3434 3433 mutex_enter(&sc->sc_glock);
3435 3434
3436 3435 switch (stat) {
3437 3436 case MAC_STAT_IFSPEED:
3438 3437 in = ic->ic_bss;
3439 3438 *val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
3440 3439 IEEE80211_RATE(in->in_txrate) :
3441 3440 ic->ic_fixed_rate) / 2 * 1000000;
3442 3441 break;
3443 3442 case MAC_STAT_NOXMTBUF:
3444 3443 *val = sc->sc_tx_nobuf;
3445 3444 break;
3446 3445 case MAC_STAT_NORCVBUF:
3447 3446 *val = sc->sc_rx_nobuf;
3448 3447 break;
3449 3448 case MAC_STAT_IERRORS:
3450 3449 *val = sc->sc_rx_err;
3451 3450 break;
3452 3451 case MAC_STAT_RBYTES:
3453 3452 *val = ic->ic_stats.is_rx_bytes;
3454 3453 break;
3455 3454 case MAC_STAT_IPACKETS:
3456 3455 *val = ic->ic_stats.is_rx_frags;
3457 3456 break;
3458 3457 case MAC_STAT_OBYTES:
3459 3458 *val = ic->ic_stats.is_tx_bytes;
3460 3459 break;
3461 3460 case MAC_STAT_OPACKETS:
3462 3461 *val = ic->ic_stats.is_tx_frags;
3463 3462 break;
3464 3463 case MAC_STAT_OERRORS:
3465 3464 case WIFI_STAT_TX_FAILED:
3466 3465 *val = sc->sc_tx_err;
3467 3466 break;
3468 3467 case WIFI_STAT_TX_RETRANS:
3469 3468 *val = sc->sc_tx_retries;
3470 3469 break;
3471 3470 case WIFI_STAT_FCS_ERRORS:
3472 3471 case WIFI_STAT_WEP_ERRORS:
3473 3472 case WIFI_STAT_TX_FRAGS:
3474 3473 case WIFI_STAT_MCAST_TX:
3475 3474 case WIFI_STAT_RTS_SUCCESS:
3476 3475 case WIFI_STAT_RTS_FAILURE:
3477 3476 case WIFI_STAT_ACK_FAILURE:
3478 3477 case WIFI_STAT_RX_FRAGS:
3479 3478 case WIFI_STAT_MCAST_RX:
3480 3479 case WIFI_STAT_RX_DUPS:
3481 3480 mutex_exit(&sc->sc_glock);
3482 3481 return (ieee80211_stat(ic, stat, val));
3483 3482 default:
3484 3483 mutex_exit(&sc->sc_glock);
3485 3484 return (ENOTSUP);
3486 3485 }
3487 3486
3488 3487 mutex_exit(&sc->sc_glock);
3489 3488
3490 3489 return (IWP_SUCCESS);
3491 3490
3492 3491 }
3493 3492
3494 3493 /*
3495 3494 * invoked by GLD to start or open NIC
3496 3495 */
3497 3496 static int
3498 3497 iwp_m_start(void *arg)
3499 3498 {
3500 3499 iwp_sc_t *sc;
3501 3500 ieee80211com_t *ic;
3502 3501 int err = IWP_FAIL;
3503 3502
3504 3503 if (NULL == arg) {
3505 3504 return (EINVAL);
3506 3505 }
3507 3506 sc = (iwp_sc_t *)arg;
3508 3507 ic = &sc->sc_ic;
3509 3508
3510 3509 err = iwp_init(sc);
3511 3510 if (err != IWP_SUCCESS) {
3512 3511 /*
3513 3512 * The hw init err(eg. RF is OFF). Return Success to make
3514 3513 * the 'plumb' succeed. The iwp_thread() tries to re-init
3515 3514 * background.
3516 3515 */
3517 3516 atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
3518 3517 return (IWP_SUCCESS);
3519 3518 }
3520 3519
3521 3520 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3522 3521
3523 3522 atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3524 3523
3525 3524 return (IWP_SUCCESS);
3526 3525 }
3527 3526
3528 3527 /*
3529 3528 * invoked by GLD to stop or down NIC
3530 3529 */
3531 3530 static void
3532 3531 iwp_m_stop(void *arg)
3533 3532 {
3534 3533 iwp_sc_t *sc;
3535 3534 ieee80211com_t *ic;
3536 3535
3537 3536 if (NULL == arg) {
3538 3537 return;
3539 3538 }
3540 3539 sc = (iwp_sc_t *)arg;
3541 3540 ic = &sc->sc_ic;
3542 3541
3543 3542 iwp_stop(sc);
3544 3543
3545 3544 /*
3546 3545 * release buffer for calibration
3547 3546 */
3548 3547 iwp_release_calib_buffer(sc);
3549 3548
3550 3549 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3551 3550
3552 3551 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
3553 3552 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
3554 3553
3555 3554 atomic_and_32(&sc->sc_flags, ~IWP_F_RUNNING);
3556 3555 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
3557 3556 }
3558 3557
3559 3558 /*
3560 3559 * invoked by GLD to configure NIC
3561 3560 */
3562 3561 static int
3563 3562 iwp_m_unicst(void *arg, const uint8_t *macaddr)
3564 3563 {
3565 3564 iwp_sc_t *sc;
3566 3565 ieee80211com_t *ic;
3567 3566 int err = IWP_SUCCESS;
3568 3567
3569 3568 if (NULL == arg) {
3570 3569 return (EINVAL);
3571 3570 }
3572 3571 sc = (iwp_sc_t *)arg;
3573 3572 ic = &sc->sc_ic;
3574 3573
3575 3574 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3576 3575 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3577 3576 mutex_enter(&sc->sc_glock);
3578 3577 err = iwp_config(sc);
3579 3578 mutex_exit(&sc->sc_glock);
3580 3579 if (err != IWP_SUCCESS) {
3581 3580 cmn_err(CE_WARN, "iwp_m_unicst(): "
3582 3581 "failed to configure device\n");
3583 3582 goto fail;
3584 3583 }
3585 3584 }
3586 3585
3587 3586 return (err);
3588 3587
3589 3588 fail:
3590 3589 return (err);
3591 3590 }
3592 3591
3593 3592 /* ARGSUSED */
3594 3593 static int
3595 3594 iwp_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3596 3595 {
3597 3596 return (IWP_SUCCESS);
3598 3597 }
3599 3598
3600 3599 /* ARGSUSED */
3601 3600 static int
3602 3601 iwp_m_promisc(void *arg, boolean_t on)
3603 3602 {
3604 3603 return (IWP_SUCCESS);
3605 3604 }
3606 3605
3607 3606 /*
3608 3607 * kernel thread to deal with exceptional situation
3609 3608 */
3610 3609 static void
3611 3610 iwp_thread(iwp_sc_t *sc)
3612 3611 {
3613 3612 ieee80211com_t *ic = &sc->sc_ic;
3614 3613 clock_t clk;
3615 3614 int err, n = 0, timeout = 0;
3616 3615 uint32_t tmp;
3617 3616 #ifdef DEBUG
3618 3617 int times = 0;
3619 3618 #endif
3620 3619
3621 3620 while (sc->sc_mf_thread_switch) {
3622 3621 tmp = IWP_READ(sc, CSR_GP_CNTRL);
3623 3622 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3624 3623 atomic_and_32(&sc->sc_flags, ~IWP_F_RADIO_OFF);
3625 3624 } else {
3626 3625 atomic_or_32(&sc->sc_flags, IWP_F_RADIO_OFF);
3627 3626 }
3628 3627
3629 3628 /*
3630 3629 * If in SUSPEND or the RF is OFF, do nothing.
3631 3630 */
3632 3631 if (sc->sc_flags & IWP_F_RADIO_OFF) {
3633 3632 delay(drv_usectohz(100000));
3634 3633 continue;
3635 3634 }
3636 3635
3637 3636 /*
3638 3637 * recovery fatal error
3639 3638 */
3640 3639 if (ic->ic_mach &&
3641 3640 (sc->sc_flags & IWP_F_HW_ERR_RECOVER)) {
3642 3641
3643 3642 IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3644 3643 "try to recover fatal hw error: %d\n", times++));
3645 3644
3646 3645 iwp_stop(sc);
3647 3646
3648 3647 if (IWP_CHK_FAST_RECOVER(sc)) {
3649 3648 /* save runtime configuration */
3650 3649 bcopy(&sc->sc_config, &sc->sc_config_save,
3651 3650 sizeof (sc->sc_config));
3652 3651 } else {
3653 3652 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3654 3653 delay(drv_usectohz(2000000 + n*500000));
3655 3654 }
3656 3655
3657 3656 err = iwp_init(sc);
3658 3657 if (err != IWP_SUCCESS) {
3659 3658 n++;
3660 3659 if (n < 20) {
3661 3660 continue;
3662 3661 }
3663 3662 }
3664 3663
3665 3664 n = 0;
3666 3665 if (!err) {
3667 3666 atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3668 3667 }
3669 3668
3670 3669
3671 3670 if (!IWP_CHK_FAST_RECOVER(sc) ||
3672 3671 iwp_fast_recover(sc) != IWP_SUCCESS) {
3673 3672 atomic_and_32(&sc->sc_flags,
3674 3673 ~IWP_F_HW_ERR_RECOVER);
3675 3674
3676 3675 delay(drv_usectohz(2000000));
3677 3676 if (sc->sc_ostate != IEEE80211_S_INIT) {
3678 3677 ieee80211_new_state(ic,
3679 3678 IEEE80211_S_SCAN, 0);
3680 3679 }
3681 3680 }
3682 3681 }
3683 3682
3684 3683 if (ic->ic_mach &&
3685 3684 (sc->sc_flags & IWP_F_SCANNING) && sc->sc_scan_pending) {
3686 3685 IWP_DBG((IWP_DEBUG_SCAN, "iwp_thread(): "
3687 3686 "wait for probe response\n"));
3688 3687
3689 3688 sc->sc_scan_pending--;
3690 3689 delay(drv_usectohz(200000));
3691 3690 ieee80211_next_scan(ic);
3692 3691 }
3693 3692
3694 3693 /*
3695 3694 * rate ctl
3696 3695 */
3697 3696 if (ic->ic_mach &&
3698 3697 (sc->sc_flags & IWP_F_RATE_AUTO_CTL)) {
3699 3698 clk = ddi_get_lbolt();
3700 3699 if (clk > sc->sc_clk + drv_usectohz(1000000)) {
3701 3700 iwp_amrr_timeout(sc);
3702 3701 }
3703 3702 }
3704 3703
3705 3704 delay(drv_usectohz(100000));
3706 3705
3707 3706 mutex_enter(&sc->sc_mt_lock);
3708 3707 if (sc->sc_tx_timer) {
3709 3708 timeout++;
3710 3709 if (10 == timeout) {
3711 3710 sc->sc_tx_timer--;
3712 3711 if (0 == sc->sc_tx_timer) {
3713 3712 atomic_or_32(&sc->sc_flags,
3714 3713 IWP_F_HW_ERR_RECOVER);
3715 3714 sc->sc_ostate = IEEE80211_S_RUN;
3716 3715 IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3717 3716 "try to recover from "
3718 3717 "send fail\n"));
3719 3718 }
3720 3719 timeout = 0;
3721 3720 }
3722 3721 }
3723 3722 mutex_exit(&sc->sc_mt_lock);
3724 3723 }
3725 3724
3726 3725 mutex_enter(&sc->sc_mt_lock);
3727 3726 sc->sc_mf_thread = NULL;
3728 3727 cv_signal(&sc->sc_mt_cv);
3729 3728 mutex_exit(&sc->sc_mt_lock);
3730 3729 }
3731 3730
3732 3731
3733 3732 /*
3734 3733 * Send a command to the ucode.
3735 3734 */
3736 3735 static int
3737 3736 iwp_cmd(iwp_sc_t *sc, int code, const void *buf, int size, int async)
3738 3737 {
3739 3738 iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3740 3739 iwp_tx_desc_t *desc;
3741 3740 iwp_cmd_t *cmd;
3742 3741
3743 3742 ASSERT(size <= sizeof (cmd->data));
3744 3743 ASSERT(mutex_owned(&sc->sc_glock));
3745 3744
3746 3745 IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd() "
3747 3746 "code[%d]", code));
3748 3747 desc = ring->data[ring->cur].desc;
3749 3748 cmd = ring->data[ring->cur].cmd;
3750 3749
3751 3750 cmd->hdr.type = (uint8_t)code;
3752 3751 cmd->hdr.flags = 0;
3753 3752 cmd->hdr.qid = ring->qid;
3754 3753 cmd->hdr.idx = ring->cur;
3755 3754 (void) memcpy(cmd->data, buf, size);
3756 3755 (void) memset(desc, 0, sizeof (*desc));
3757 3756
3758 3757 desc->val0 = 1 << 24;
3759 3758 desc->pa[0].tb1_addr =
3760 3759 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3761 3760 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3762 3761
3763 3762 if (async) {
3764 3763 sc->sc_cmd_accum++;
3765 3764 }
3766 3765
3767 3766 /*
3768 3767 * kick cmd ring XXX
3769 3768 */
3770 3769 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3771 3770 tfd_offset[ring->cur].val = 8;
3772 3771 if (ring->cur < IWP_MAX_WIN_SIZE) {
3773 3772 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3774 3773 tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
3775 3774 }
3776 3775 ring->cur = (ring->cur + 1) % ring->count;
3777 3776 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3778 3777
3779 3778 if (async) {
3780 3779 return (IWP_SUCCESS);
3781 3780 } else {
3782 3781 clock_t clk;
3783 3782
3784 3783 clk = ddi_get_lbolt() + drv_usectohz(2000000);
3785 3784 while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) {
3786 3785 if (cv_timedwait(&sc->sc_cmd_cv,
3787 3786 &sc->sc_glock, clk) < 0) {
3788 3787 break;
3789 3788 }
3790 3789 }
3791 3790
3792 3791 if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) {
3793 3792 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3794 3793 return (IWP_SUCCESS);
3795 3794 } else {
3796 3795 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3797 3796 return (IWP_FAIL);
3798 3797 }
3799 3798 }
3800 3799 }
3801 3800
3802 3801 /*
3803 3802 * require ucode seting led of NIC
3804 3803 */
3805 3804 static void
3806 3805 iwp_set_led(iwp_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3807 3806 {
3808 3807 iwp_led_cmd_t led;
3809 3808
3810 3809 led.interval = LE_32(100000); /* unit: 100ms */
3811 3810 led.id = id;
3812 3811 led.off = off;
3813 3812 led.on = on;
3814 3813
3815 3814 (void) iwp_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3816 3815 }
3817 3816
3818 3817 /*
3819 3818 * necessary setting to NIC before authentication
3820 3819 */
3821 3820 static int
3822 3821 iwp_hw_set_before_auth(iwp_sc_t *sc)
3823 3822 {
3824 3823 ieee80211com_t *ic = &sc->sc_ic;
3825 3824 ieee80211_node_t *in = ic->ic_bss;
3826 3825 int err = IWP_FAIL;
3827 3826
3828 3827 /*
3829 3828 * update adapter's configuration according
3830 3829 * the info of target AP
3831 3830 */
3832 3831 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3833 3832 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
3834 3833
3835 3834 sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0;
3836 3835 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0;
3837 3836 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0;
3838 3837
3839 3838 if (IEEE80211_MODE_11B == ic->ic_curmode) {
3840 3839 sc->sc_config.cck_basic_rates = 0x03;
3841 3840 sc->sc_config.ofdm_basic_rates = 0;
3842 3841 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3843 3842 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3844 3843 sc->sc_config.cck_basic_rates = 0;
3845 3844 sc->sc_config.ofdm_basic_rates = 0x15;
3846 3845 } else { /* assume 802.11b/g */
3847 3846 sc->sc_config.cck_basic_rates = 0x0f;
3848 3847 sc->sc_config.ofdm_basic_rates = 0xff;
3849 3848 }
3850 3849
3851 3850 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3852 3851 RXON_FLG_SHORT_SLOT_MSK);
3853 3852
3854 3853 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
3855 3854 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3856 3855 } else {
3857 3856 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3858 3857 }
3859 3858
3860 3859 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
3861 3860 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3862 3861 } else {
3863 3862 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3864 3863 }
3865 3864
3866 3865 IWP_DBG((IWP_DEBUG_80211, "iwp_hw_set_before_auth(): "
3867 3866 "config chan %d flags %x "
3868 3867 "filter_flags %x cck %x ofdm %x"
3869 3868 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3870 3869 LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
3871 3870 LE_32(sc->sc_config.filter_flags),
3872 3871 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3873 3872 sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3874 3873 sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3875 3874 sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3876 3875
3877 3876 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
3878 3877 sizeof (iwp_rxon_cmd_t), 1);
3879 3878 if (err != IWP_SUCCESS) {
3880 3879 cmn_err(CE_WARN, "iwp_hw_set_before_auth(): "
3881 3880 "failed to config chan%d\n", sc->sc_config.chan);
3882 3881 return (err);
3883 3882 }
3884 3883
3885 3884 /*
3886 3885 * add default AP node
3887 3886 */
3888 3887 err = iwp_add_ap_sta(sc);
3889 3888 if (err != IWP_SUCCESS) {
3890 3889 return (err);
3891 3890 }
3892 3891
3893 3892
3894 3893 return (err);
3895 3894 }
3896 3895
3897 3896 /*
3898 3897 * Send a scan request(assembly scan cmd) to the firmware.
3899 3898 */
3900 3899 static int
3901 3900 iwp_scan(iwp_sc_t *sc)
3902 3901 {
3903 3902 ieee80211com_t *ic = &sc->sc_ic;
3904 3903 iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3905 3904 iwp_tx_desc_t *desc;
3906 3905 iwp_tx_data_t *data;
3907 3906 iwp_cmd_t *cmd;
3908 3907 iwp_scan_hdr_t *hdr;
3909 3908 iwp_scan_chan_t chan;
3910 3909 struct ieee80211_frame *wh;
3911 3910 ieee80211_node_t *in = ic->ic_bss;
3912 3911 uint8_t essid[IEEE80211_NWID_LEN+1];
3913 3912 struct ieee80211_rateset *rs;
3914 3913 enum ieee80211_phymode mode;
3915 3914 uint8_t *frm;
3916 3915 int i, pktlen, nrates;
3917 3916
3918 3917 data = &ring->data[ring->cur];
3919 3918 desc = data->desc;
3920 3919 cmd = (iwp_cmd_t *)data->dma_data.mem_va;
3921 3920
3922 3921 cmd->hdr.type = REPLY_SCAN_CMD;
3923 3922 cmd->hdr.flags = 0;
3924 3923 cmd->hdr.qid = ring->qid;
3925 3924 cmd->hdr.idx = ring->cur | 0x40;
3926 3925
3927 3926 hdr = (iwp_scan_hdr_t *)cmd->data;
3928 3927 (void) memset(hdr, 0, sizeof (iwp_scan_hdr_t));
3929 3928 hdr->nchan = 1;
3930 3929 hdr->quiet_time = LE_16(50);
3931 3930 hdr->quiet_plcp_th = LE_16(1);
3932 3931
3933 3932 hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK);
3934 3933 hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3935 3934 (0x7 << RXON_RX_CHAIN_VALID_POS) |
3936 3935 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3937 3936 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3938 3937
3939 3938 hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3940 3939 hdr->tx_cmd.sta_id = IWP_BROADCAST_ID;
3941 3940 hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
3942 3941 hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(2));
3943 3942 hdr->tx_cmd.rate.r.rate_n_flags |=
3944 3943 LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK);
3945 3944 hdr->direct_scan[0].len = ic->ic_des_esslen;
3946 3945 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID;
3947 3946
3948 3947 hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3949 3948 RXON_FILTER_BCON_AWARE_MSK);
3950 3949
3951 3950 if (ic->ic_des_esslen) {
3952 3951 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3953 3952 essid[ic->ic_des_esslen] = '\0';
3954 3953 IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3955 3954 "directed scan %s\n", essid));
3956 3955
3957 3956 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3958 3957 ic->ic_des_esslen);
3959 3958 } else {
3960 3959 bzero(hdr->direct_scan[0].ssid,
3961 3960 sizeof (hdr->direct_scan[0].ssid));
3962 3961 }
3963 3962
3964 3963 /*
3965 3964 * a probe request frame is required after the REPLY_SCAN_CMD
3966 3965 */
3967 3966 wh = (struct ieee80211_frame *)(hdr + 1);
3968 3967 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3969 3968 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3970 3969 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3971 3970 (void) memset(wh->i_addr1, 0xff, 6);
3972 3971 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3973 3972 (void) memset(wh->i_addr3, 0xff, 6);
3974 3973 *(uint16_t *)&wh->i_dur[0] = 0;
3975 3974 *(uint16_t *)&wh->i_seq[0] = 0;
3976 3975
3977 3976 frm = (uint8_t *)(wh + 1);
3978 3977
3979 3978 /*
3980 3979 * essid IE
3981 3980 */
3982 3981 if (in->in_esslen) {
3983 3982 bcopy(in->in_essid, essid, in->in_esslen);
3984 3983 essid[in->in_esslen] = '\0';
3985 3984 IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3986 3985 "probe with ESSID %s\n",
3987 3986 essid));
3988 3987 }
3989 3988 *frm++ = IEEE80211_ELEMID_SSID;
3990 3989 *frm++ = in->in_esslen;
3991 3990 (void) memcpy(frm, in->in_essid, in->in_esslen);
3992 3991 frm += in->in_esslen;
3993 3992
3994 3993 mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3995 3994 rs = &ic->ic_sup_rates[mode];
3996 3995
3997 3996 /*
3998 3997 * supported rates IE
3999 3998 */
4000 3999 *frm++ = IEEE80211_ELEMID_RATES;
4001 4000 nrates = rs->ir_nrates;
4002 4001 if (nrates > IEEE80211_RATE_SIZE) {
4003 4002 nrates = IEEE80211_RATE_SIZE;
4004 4003 }
4005 4004
4006 4005 *frm++ = (uint8_t)nrates;
4007 4006 (void) memcpy(frm, rs->ir_rates, nrates);
4008 4007 frm += nrates;
4009 4008
4010 4009 /*
4011 4010 * supported xrates IE
4012 4011 */
4013 4012 if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
4014 4013 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
4015 4014 *frm++ = IEEE80211_ELEMID_XRATES;
4016 4015 *frm++ = (uint8_t)nrates;
4017 4016 (void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
4018 4017 frm += nrates;
4019 4018 }
4020 4019
4021 4020 /*
4022 4021 * optionnal IE (usually for wpa)
4023 4022 */
4024 4023 if (ic->ic_opt_ie != NULL) {
4025 4024 (void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
4026 4025 frm += ic->ic_opt_ie_len;
4027 4026 }
4028 4027
4029 4028 /* setup length of probe request */
4030 4029 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
4031 4030 hdr->len = LE_16(hdr->nchan * sizeof (iwp_scan_chan_t) +
4032 4031 LE_16(hdr->tx_cmd.len) + sizeof (iwp_scan_hdr_t));
4033 4032
4034 4033 /*
4035 4034 * the attribute of the scan channels are required after the probe
4036 4035 * request frame.
4037 4036 */
4038 4037 for (i = 1; i <= hdr->nchan; i++) {
4039 4038 if (ic->ic_des_esslen) {
4040 4039 chan.type = LE_32(3);
4041 4040 } else {
4042 4041 chan.type = LE_32(1);
4043 4042 }
4044 4043
4045 4044 chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4046 4045 chan.tpc.tx_gain = 0x28;
4047 4046 chan.tpc.dsp_atten = 110;
4048 4047 chan.active_dwell = LE_16(50);
4049 4048 chan.passive_dwell = LE_16(120);
4050 4049
4051 4050 bcopy(&chan, frm, sizeof (iwp_scan_chan_t));
4052 4051 frm += sizeof (iwp_scan_chan_t);
4053 4052 }
4054 4053
4055 4054 pktlen = _PTRDIFF(frm, cmd);
4056 4055
4057 4056 (void) memset(desc, 0, sizeof (*desc));
4058 4057 desc->val0 = 1 << 24;
4059 4058 desc->pa[0].tb1_addr =
4060 4059 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
4061 4060 desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
4062 4061
4063 4062 /*
4064 4063 * maybe for cmd, filling the byte cnt table is not necessary.
4065 4064 * anyway, we fill it here.
4066 4065 */
4067 4066 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
4068 4067 .tfd_offset[ring->cur].val = 8;
4069 4068 if (ring->cur < IWP_MAX_WIN_SIZE) {
4070 4069 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
4071 4070 tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
4072 4071 }
4073 4072
4074 4073 /*
4075 4074 * kick cmd ring
4076 4075 */
4077 4076 ring->cur = (ring->cur + 1) % ring->count;
4078 4077 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4079 4078
4080 4079 return (IWP_SUCCESS);
4081 4080 }
4082 4081
4083 4082 /*
4084 4083 * configure NIC by using ucode commands after loading ucode.
4085 4084 */
4086 4085 static int
4087 4086 iwp_config(iwp_sc_t *sc)
4088 4087 {
4089 4088 ieee80211com_t *ic = &sc->sc_ic;
4090 4089 iwp_powertable_cmd_t powertable;
4091 4090 iwp_bt_cmd_t bt;
4092 4091 iwp_add_sta_t node;
4093 4092 iwp_rem_sta_t rm_sta;
4094 4093 const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
4095 4094 int err = IWP_FAIL;
4096 4095
4097 4096 /*
4098 4097 * set power mode. Disable power management at present, do it later
4099 4098 */
4100 4099 (void) memset(&powertable, 0, sizeof (powertable));
4101 4100 powertable.flags = LE_16(0x8);
4102 4101 err = iwp_cmd(sc, POWER_TABLE_CMD, &powertable,
4103 4102 sizeof (powertable), 0);
4104 4103 if (err != IWP_SUCCESS) {
4105 4104 cmn_err(CE_WARN, "iwp_config(): "
4106 4105 "failed to set power mode\n");
4107 4106 return (err);
4108 4107 }
4109 4108
4110 4109 /*
4111 4110 * configure bt coexistence
4112 4111 */
4113 4112 (void) memset(&bt, 0, sizeof (bt));
4114 4113 bt.flags = 3;
4115 4114 bt.lead_time = 0xaa;
4116 4115 bt.max_kill = 1;
4117 4116 err = iwp_cmd(sc, REPLY_BT_CONFIG, &bt,
4118 4117 sizeof (bt), 0);
4119 4118 if (err != IWP_SUCCESS) {
4120 4119 cmn_err(CE_WARN, "iwp_config(): "
4121 4120 "failed to configurate bt coexistence\n");
4122 4121 return (err);
4123 4122 }
4124 4123
4125 4124 /*
4126 4125 * configure rxon
4127 4126 */
4128 4127 (void) memset(&sc->sc_config, 0, sizeof (iwp_rxon_cmd_t));
4129 4128 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
4130 4129 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
4131 4130 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4132 4131 sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK);
4133 4132 sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4134 4133 RXON_FLG_CHANNEL_MODE_PURE_40_MSK));
4135 4134
4136 4135 switch (ic->ic_opmode) {
4137 4136 case IEEE80211_M_STA:
4138 4137 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
4139 4138 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4140 4139 RXON_FILTER_DIS_DECRYPT_MSK |
4141 4140 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4142 4141 break;
4143 4142 case IEEE80211_M_IBSS:
4144 4143 case IEEE80211_M_AHDEMO:
4145 4144 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
4146 4145
4147 4146 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4148 4147 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4149 4148 RXON_FILTER_DIS_DECRYPT_MSK |
4150 4149 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4151 4150 break;
4152 4151 case IEEE80211_M_HOSTAP:
4153 4152 sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
4154 4153 break;
4155 4154 case IEEE80211_M_MONITOR:
4156 4155 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
4157 4156 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4158 4157 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
4159 4158 break;
4160 4159 }
4161 4160
4162 4161 /*
4163 4162 * Support all CCK rates.
4164 4163 */
4165 4164 sc->sc_config.cck_basic_rates = 0x0f;
4166 4165
4167 4166 /*
4168 4167 * Support all OFDM rates.
4169 4168 */
4170 4169 sc->sc_config.ofdm_basic_rates = 0xff;
4171 4170
4172 4171 sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
4173 4172 (0x7 << RXON_RX_CHAIN_VALID_POS) |
4174 4173 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
4175 4174 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
4176 4175
4177 4176 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
4178 4177 sizeof (iwp_rxon_cmd_t), 0);
4179 4178 if (err != IWP_SUCCESS) {
4180 4179 cmn_err(CE_WARN, "iwp_config(): "
4181 4180 "failed to set configure command\n");
4182 4181 return (err);
4183 4182 }
4184 4183
4185 4184 /*
4186 4185 * remove all nodes in NIC
4187 4186 */
4188 4187 (void) memset(&rm_sta, 0, sizeof (rm_sta));
4189 4188 rm_sta.num_sta = 1;
4190 4189 (void) memcpy(rm_sta.addr, bcast, 6);
4191 4190
4192 4191 err = iwp_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwp_rem_sta_t), 0);
4193 4192 if (err != IWP_SUCCESS) {
4194 4193 cmn_err(CE_WARN, "iwp_config(): "
4195 4194 "failed to remove broadcast node in hardware.\n");
4196 4195 return (err);
4197 4196 }
4198 4197
4199 4198 /*
4200 4199 * add broadcast node so that we can send broadcast frame
4201 4200 */
4202 4201 (void) memset(&node, 0, sizeof (node));
4203 4202 (void) memset(node.sta.addr, 0xff, 6);
4204 4203 node.mode = 0;
4205 4204 node.sta.sta_id = IWP_BROADCAST_ID;
4206 4205 node.station_flags = 0;
4207 4206
4208 4207 err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
4209 4208 if (err != IWP_SUCCESS) {
4210 4209 cmn_err(CE_WARN, "iwp_config(): "
4211 4210 "failed to add broadcast node\n");
4212 4211 return (err);
4213 4212 }
4214 4213
4215 4214 return (err);
4216 4215 }
4217 4216
4218 4217 /*
4219 4218 * quiesce(9E) entry point.
4220 4219 * This function is called when the system is single-threaded at high
4221 4220 * PIL with preemption disabled. Therefore, this function must not be
4222 4221 * blocked.
4223 4222 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4224 4223 * DDI_FAILURE indicates an error condition and should almost never happen.
4225 4224 */
4226 4225 static int
4227 4226 iwp_quiesce(dev_info_t *dip)
4228 4227 {
4229 4228 iwp_sc_t *sc;
4230 4229
4231 4230 sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
4232 4231 if (NULL == sc) {
4233 4232 return (DDI_FAILURE);
4234 4233 }
4235 4234
4236 4235 #ifdef DEBUG
4237 4236 /* by pass any messages, if it's quiesce */
4238 4237 iwp_dbg_flags = 0;
4239 4238 #endif
4240 4239
4241 4240 /*
4242 4241 * No more blocking is allowed while we are in the
4243 4242 * quiesce(9E) entry point.
4244 4243 */
4245 4244 atomic_or_32(&sc->sc_flags, IWP_F_QUIESCED);
4246 4245
4247 4246 /*
4248 4247 * Disable and mask all interrupts.
4249 4248 */
4250 4249 iwp_stop(sc);
4251 4250
4252 4251 return (DDI_SUCCESS);
4253 4252 }
4254 4253
4255 4254 static void
4256 4255 iwp_stop_master(iwp_sc_t *sc)
4257 4256 {
4258 4257 uint32_t tmp;
4259 4258 int n;
4260 4259
4261 4260 tmp = IWP_READ(sc, CSR_RESET);
4262 4261 IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
4263 4262
4264 4263 tmp = IWP_READ(sc, CSR_GP_CNTRL);
4265 4264 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
4266 4265 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) {
4267 4266 return;
4268 4267 }
4269 4268
4270 4269 for (n = 0; n < 2000; n++) {
4271 4270 if (IWP_READ(sc, CSR_RESET) &
4272 4271 CSR_RESET_REG_FLAG_MASTER_DISABLED) {
4273 4272 break;
4274 4273 }
4275 4274 DELAY(1000);
4276 4275 }
4277 4276
4278 4277 #ifdef DEBUG
4279 4278 if (2000 == n) {
4280 4279 IWP_DBG((IWP_DEBUG_HW, "iwp_stop_master(): "
4281 4280 "timeout waiting for master stop\n"));
4282 4281 }
4283 4282 #endif
4284 4283 }
4285 4284
4286 4285 static int
4287 4286 iwp_power_up(iwp_sc_t *sc)
4288 4287 {
4289 4288 uint32_t tmp;
4290 4289
4291 4290 iwp_mac_access_enter(sc);
4292 4291 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4293 4292 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
4294 4293 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
4295 4294 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4296 4295 iwp_mac_access_exit(sc);
4297 4296
4298 4297 DELAY(5000);
4299 4298 return (IWP_SUCCESS);
4300 4299 }
4301 4300
4302 4301 /*
4303 4302 * hardware initialization
4304 4303 */
4305 4304 static int
4306 4305 iwp_preinit(iwp_sc_t *sc)
4307 4306 {
4308 4307 int n;
4309 4308 uint8_t vlink;
4310 4309 uint16_t radio_cfg;
4311 4310 uint32_t tmp;
4312 4311
4313 4312 /*
4314 4313 * clear any pending interrupts
4315 4314 */
4316 4315 IWP_WRITE(sc, CSR_INT, 0xffffffff);
4317 4316
4318 4317 tmp = IWP_READ(sc, CSR_GIO_CHICKEN_BITS);
4319 4318 IWP_WRITE(sc, CSR_GIO_CHICKEN_BITS,
4320 4319 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4321 4320
4322 4321 tmp = IWP_READ(sc, CSR_GP_CNTRL);
4323 4322 IWP_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4324 4323
4325 4324 /*
4326 4325 * wait for clock ready
4327 4326 */
4328 4327 for (n = 0; n < 1000; n++) {
4329 4328 if (IWP_READ(sc, CSR_GP_CNTRL) &
4330 4329 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
4331 4330 break;
4332 4331 }
4333 4332 DELAY(10);
4334 4333 }
4335 4334
4336 4335 if (1000 == n) {
4337 4336 return (ETIMEDOUT);
4338 4337 }
4339 4338
4340 4339 iwp_mac_access_enter(sc);
4341 4340
4342 4341 iwp_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4343 4342
4344 4343 DELAY(20);
4345 4344 tmp = iwp_reg_read(sc, ALM_APMG_PCIDEV_STT);
4346 4345 iwp_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4347 4346 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4348 4347 iwp_mac_access_exit(sc);
4349 4348
4350 4349 radio_cfg = IWP_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION);
4351 4350 if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) {
4352 4351 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4353 4352 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4354 4353 tmp | SP_RADIO_TYPE_MSK(radio_cfg) |
4355 4354 SP_RADIO_STEP_MSK(radio_cfg) |
4356 4355 SP_RADIO_DASH_MSK(radio_cfg));
4357 4356 } else {
4358 4357 cmn_err(CE_WARN, "iwp_preinit(): "
4359 4358 "radio configuration information in eeprom is wrong\n");
4360 4359 return (IWP_FAIL);
4361 4360 }
4362 4361
4363 4362
4364 4363 IWP_WRITE(sc, CSR_INT_COALESCING, 512 / 32);
4365 4364
4366 4365 (void) iwp_power_up(sc);
4367 4366
4368 4367 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4369 4368 tmp = ddi_get32(sc->sc_cfg_handle,
4370 4369 (uint32_t *)(sc->sc_cfg_base + 0xe8));
4371 4370 ddi_put32(sc->sc_cfg_handle,
4372 4371 (uint32_t *)(sc->sc_cfg_base + 0xe8),
4373 4372 tmp & ~(1 << 11));
4374 4373 }
4375 4374
4376 4375 vlink = ddi_get8(sc->sc_cfg_handle,
4377 4376 (uint8_t *)(sc->sc_cfg_base + 0xf0));
4378 4377 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4379 4378 vlink & ~2);
4380 4379
4381 4380 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4382 4381 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4383 4382 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
4384 4383 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG, tmp);
4385 4384
4386 4385 /*
4387 4386 * make sure power supply on each part of the hardware
4388 4387 */
4389 4388 iwp_mac_access_enter(sc);
4390 4389 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4391 4390 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4392 4391 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4393 4392 DELAY(5);
4394 4393
4395 4394 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4396 4395 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4397 4396 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4398 4397 iwp_mac_access_exit(sc);
4399 4398
4400 4399 if (PA_TYPE_MIX == sc->sc_chip_param.pa_type) {
4401 4400 IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4402 4401 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_MIX);
4403 4402 }
4404 4403
4405 4404 if (PA_TYPE_INTER == sc->sc_chip_param.pa_type) {
4406 4405
4407 4406 IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4408 4407 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
4409 4408 }
4410 4409
4411 4410 return (IWP_SUCCESS);
4412 4411 }
4413 4412
4414 4413 /*
4415 4414 * set up semphore flag to own EEPROM
4416 4415 */
4417 4416 static int
4418 4417 iwp_eep_sem_down(iwp_sc_t *sc)
4419 4418 {
4420 4419 int count1, count2;
4421 4420 uint32_t tmp;
4422 4421
4423 4422 for (count1 = 0; count1 < 1000; count1++) {
4424 4423 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4425 4424 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4426 4425 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4427 4426
4428 4427 for (count2 = 0; count2 < 2; count2++) {
4429 4428 if (IWP_READ(sc, CSR_HW_IF_CONFIG_REG) &
4430 4429 CSR_HW_IF_CONFIG_REG_EEP_SEM) {
4431 4430 return (IWP_SUCCESS);
4432 4431 }
4433 4432 DELAY(10000);
4434 4433 }
4435 4434 }
4436 4435 return (IWP_FAIL);
4437 4436 }
4438 4437
4439 4438 /*
4440 4439 * reset semphore flag to release EEPROM
4441 4440 */
4442 4441 static void
4443 4442 iwp_eep_sem_up(iwp_sc_t *sc)
4444 4443 {
4445 4444 uint32_t tmp;
4446 4445
4447 4446 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4448 4447 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4449 4448 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4450 4449 }
4451 4450
4452 4451 /*
4453 4452 * This function read all infomation from eeprom
4454 4453 */
4455 4454 static int
4456 4455 iwp_eep_load(iwp_sc_t *sc)
4457 4456 {
4458 4457 int i, rr;
4459 4458 uint32_t rv, tmp, eep_gp;
4460 4459 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4461 4460 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4462 4461
4463 4462 /*
4464 4463 * read eeprom gp register in CSR
4465 4464 */
4466 4465 eep_gp = IWP_READ(sc, CSR_EEPROM_GP);
4467 4466 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4468 4467 CSR_EEPROM_GP_BAD_SIGNATURE) {
4469 4468 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4470 4469 "not find eeprom\n"));
4471 4470 return (IWP_FAIL);
4472 4471 }
4473 4472
4474 4473 rr = iwp_eep_sem_down(sc);
4475 4474 if (rr != 0) {
4476 4475 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4477 4476 "driver failed to own EEPROM\n"));
4478 4477 return (IWP_FAIL);
4479 4478 }
4480 4479
4481 4480 for (addr = 0; addr < eep_sz; addr += 2) {
4482 4481 IWP_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4483 4482 tmp = IWP_READ(sc, CSR_EEPROM_REG);
4484 4483 IWP_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4485 4484
4486 4485 for (i = 0; i < 10; i++) {
4487 4486 rv = IWP_READ(sc, CSR_EEPROM_REG);
4488 4487 if (rv & 1) {
4489 4488 break;
4490 4489 }
4491 4490 DELAY(10);
4492 4491 }
4493 4492
4494 4493 if (!(rv & 1)) {
4495 4494 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4496 4495 "time out when read eeprome\n"));
4497 4496 iwp_eep_sem_up(sc);
4498 4497 return (IWP_FAIL);
4499 4498 }
4500 4499
4501 4500 eep_p[addr/2] = LE_16(rv >> 16);
4502 4501 }
4503 4502
4504 4503 iwp_eep_sem_up(sc);
4505 4504 return (IWP_SUCCESS);
4506 4505 }
4507 4506
4508 4507 /*
4509 4508 * initialize mac address in ieee80211com_t struct
4510 4509 */
4511 4510 static void
4512 4511 iwp_get_mac_from_eep(iwp_sc_t *sc)
4513 4512 {
4514 4513 ieee80211com_t *ic = &sc->sc_ic;
4515 4514
4516 4515 IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]);
4517 4516
4518 4517 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_get_mac_from_eep(): "
4519 4518 "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4520 4519 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4521 4520 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4522 4521 }
4523 4522
4524 4523 /*
4525 4524 * main initialization function
4526 4525 */
4527 4526 static int
4528 4527 iwp_init(iwp_sc_t *sc)
4529 4528 {
4530 4529 int err = IWP_FAIL;
4531 4530 clock_t clk;
4532 4531
4533 4532 /*
4534 4533 * release buffer for calibration
4535 4534 */
4536 4535 iwp_release_calib_buffer(sc);
4537 4536
4538 4537 mutex_enter(&sc->sc_glock);
4539 4538 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4540 4539
4541 4540 err = iwp_init_common(sc);
4542 4541 if (err != IWP_SUCCESS) {
4543 4542 mutex_exit(&sc->sc_glock);
4544 4543 return (IWP_FAIL);
4545 4544 }
4546 4545
4547 4546 /*
4548 4547 * backup ucode data part for future use.
4549 4548 */
4550 4549 (void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4551 4550 sc->sc_dma_fw_data.mem_va,
4552 4551 sc->sc_dma_fw_data.alength);
4553 4552
4554 4553 /* load firmware init segment into NIC */
4555 4554 err = iwp_load_init_firmware(sc);
4556 4555 if (err != IWP_SUCCESS) {
4557 4556 cmn_err(CE_WARN, "iwp_init(): "
4558 4557 "failed to setup init firmware\n");
4559 4558 mutex_exit(&sc->sc_glock);
4560 4559 return (IWP_FAIL);
4561 4560 }
4562 4561
4563 4562 /*
4564 4563 * now press "execute" start running
4565 4564 */
4566 4565 IWP_WRITE(sc, CSR_RESET, 0);
4567 4566
4568 4567 clk = ddi_get_lbolt() + drv_usectohz(1000000);
4569 4568 while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4570 4569 if (cv_timedwait(&sc->sc_ucode_cv,
4571 4570 &sc->sc_glock, clk) < 0) {
4572 4571 break;
4573 4572 }
4574 4573 }
4575 4574
4576 4575 if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4577 4576 cmn_err(CE_WARN, "iwp_init(): "
4578 4577 "failed to process init alive.\n");
4579 4578 mutex_exit(&sc->sc_glock);
4580 4579 return (IWP_FAIL);
4581 4580 }
4582 4581
4583 4582 mutex_exit(&sc->sc_glock);
4584 4583
4585 4584 /*
4586 4585 * stop chipset for initializing chipset again
4587 4586 */
4588 4587 iwp_stop(sc);
4589 4588
4590 4589 mutex_enter(&sc->sc_glock);
4591 4590 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4592 4591
4593 4592 err = iwp_init_common(sc);
4594 4593 if (err != IWP_SUCCESS) {
4595 4594 mutex_exit(&sc->sc_glock);
4596 4595 return (IWP_FAIL);
4597 4596 }
4598 4597
4599 4598 /*
4600 4599 * load firmware run segment into NIC
4601 4600 */
4602 4601 err = iwp_load_run_firmware(sc);
4603 4602 if (err != IWP_SUCCESS) {
4604 4603 cmn_err(CE_WARN, "iwp_init(): "
4605 4604 "failed to setup run firmware\n");
4606 4605 mutex_exit(&sc->sc_glock);
4607 4606 return (IWP_FAIL);
4608 4607 }
4609 4608
4610 4609 /*
4611 4610 * now press "execute" start running
4612 4611 */
4613 4612 IWP_WRITE(sc, CSR_RESET, 0);
4614 4613
4615 4614 clk = ddi_get_lbolt() + drv_usectohz(1000000);
4616 4615 while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4617 4616 if (cv_timedwait(&sc->sc_ucode_cv,
4618 4617 &sc->sc_glock, clk) < 0) {
4619 4618 break;
4620 4619 }
4621 4620 }
4622 4621
4623 4622 if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4624 4623 cmn_err(CE_WARN, "iwp_init(): "
4625 4624 "failed to process runtime alive.\n");
4626 4625 mutex_exit(&sc->sc_glock);
4627 4626 return (IWP_FAIL);
4628 4627 }
4629 4628
4630 4629 mutex_exit(&sc->sc_glock);
4631 4630
4632 4631 DELAY(1000);
4633 4632
4634 4633 mutex_enter(&sc->sc_glock);
4635 4634 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4636 4635
4637 4636 /*
4638 4637 * at this point, the firmware is loaded OK, then config the hardware
4639 4638 * with the ucode API, including rxon, txpower, etc.
4640 4639 */
4641 4640 err = iwp_config(sc);
4642 4641 if (err) {
4643 4642 cmn_err(CE_WARN, "iwp_init(): "
4644 4643 "failed to configure device\n");
4645 4644 mutex_exit(&sc->sc_glock);
4646 4645 return (IWP_FAIL);
4647 4646 }
4648 4647
4649 4648 /*
4650 4649 * at this point, hardware may receive beacons :)
4651 4650 */
4652 4651 mutex_exit(&sc->sc_glock);
4653 4652 return (IWP_SUCCESS);
4654 4653 }
4655 4654
4656 4655 /*
4657 4656 * stop or disable NIC
4658 4657 */
4659 4658 static void
4660 4659 iwp_stop(iwp_sc_t *sc)
4661 4660 {
4662 4661 uint32_t tmp;
4663 4662 int i;
4664 4663
4665 4664 /* by pass if it's quiesced */
4666 4665 if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4667 4666 mutex_enter(&sc->sc_glock);
4668 4667 }
4669 4668
4670 4669 IWP_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4671 4670 /*
4672 4671 * disable interrupts
4673 4672 */
4674 4673 IWP_WRITE(sc, CSR_INT_MASK, 0);
4675 4674 IWP_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4676 4675 IWP_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4677 4676
4678 4677 /*
4679 4678 * reset all Tx rings
4680 4679 */
4681 4680 for (i = 0; i < IWP_NUM_QUEUES; i++) {
4682 4681 iwp_reset_tx_ring(sc, &sc->sc_txq[i]);
4683 4682 }
4684 4683
4685 4684 /*
4686 4685 * reset Rx ring
4687 4686 */
4688 4687 iwp_reset_rx_ring(sc);
4689 4688
4690 4689 iwp_mac_access_enter(sc);
4691 4690 iwp_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4692 4691 iwp_mac_access_exit(sc);
4693 4692
4694 4693 DELAY(5);
4695 4694
4696 4695 iwp_stop_master(sc);
4697 4696
4698 4697 mutex_enter(&sc->sc_mt_lock);
4699 4698 sc->sc_tx_timer = 0;
4700 4699 mutex_exit(&sc->sc_mt_lock);
4701 4700
4702 4701 tmp = IWP_READ(sc, CSR_RESET);
4703 4702 IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4704 4703
4705 4704 /* by pass if it's quiesced */
4706 4705 if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4707 4706 mutex_exit(&sc->sc_glock);
4708 4707 }
4709 4708 }
4710 4709
4711 4710 /*
4712 4711 * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4713 4712 * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4714 4713 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4715 4714 * INRIA Sophia - Projet Planete
4716 4715 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4717 4716 */
4718 4717 #define is_success(amrr) \
4719 4718 ((amrr)->retrycnt < (amrr)->txcnt / 10)
4720 4719 #define is_failure(amrr) \
4721 4720 ((amrr)->retrycnt > (amrr)->txcnt / 3)
4722 4721 #define is_enough(amrr) \
4723 4722 ((amrr)->txcnt > 200)
4724 4723 #define not_very_few(amrr) \
4725 4724 ((amrr)->txcnt > 40)
4726 4725 #define is_min_rate(in) \
4727 4726 (0 == (in)->in_txrate)
4728 4727 #define is_max_rate(in) \
4729 4728 ((in)->in_rates.ir_nrates - 1 == (in)->in_txrate)
4730 4729 #define increase_rate(in) \
4731 4730 ((in)->in_txrate++)
4732 4731 #define decrease_rate(in) \
4733 4732 ((in)->in_txrate--)
4734 4733 #define reset_cnt(amrr) \
4735 4734 { (amrr)->txcnt = (amrr)->retrycnt = 0; }
4736 4735
4737 4736 #define IWP_AMRR_MIN_SUCCESS_THRESHOLD 1
4738 4737 #define IWP_AMRR_MAX_SUCCESS_THRESHOLD 15
4739 4738
4740 4739 static void
4741 4740 iwp_amrr_init(iwp_amrr_t *amrr)
4742 4741 {
4743 4742 amrr->success = 0;
4744 4743 amrr->recovery = 0;
4745 4744 amrr->txcnt = amrr->retrycnt = 0;
4746 4745 amrr->success_threshold = IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4747 4746 }
4748 4747
4749 4748 static void
4750 4749 iwp_amrr_timeout(iwp_sc_t *sc)
4751 4750 {
4752 4751 ieee80211com_t *ic = &sc->sc_ic;
4753 4752
4754 4753 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_timeout(): "
4755 4754 "enter\n"));
4756 4755
4757 4756 if (IEEE80211_M_STA == ic->ic_opmode) {
4758 4757 iwp_amrr_ratectl(NULL, ic->ic_bss);
4759 4758 } else {
4760 4759 ieee80211_iterate_nodes(&ic->ic_sta, iwp_amrr_ratectl, NULL);
4761 4760 }
4762 4761
4763 4762 sc->sc_clk = ddi_get_lbolt();
4764 4763 }
4765 4764
4766 4765 /* ARGSUSED */
4767 4766 static void
4768 4767 iwp_amrr_ratectl(void *arg, ieee80211_node_t *in)
4769 4768 {
4770 4769 iwp_amrr_t *amrr = (iwp_amrr_t *)in;
4771 4770 int need_change = 0;
4772 4771
4773 4772 if (is_success(amrr) && is_enough(amrr)) {
4774 4773 amrr->success++;
4775 4774 if (amrr->success >= amrr->success_threshold &&
4776 4775 !is_max_rate(in)) {
4777 4776 amrr->recovery = 1;
4778 4777 amrr->success = 0;
4779 4778 increase_rate(in);
4780 4779 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4781 4780 "AMRR increasing rate %d "
4782 4781 "(txcnt=%d retrycnt=%d)\n",
4783 4782 in->in_txrate, amrr->txcnt,
4784 4783 amrr->retrycnt));
4785 4784 need_change = 1;
4786 4785 } else {
4787 4786 amrr->recovery = 0;
4788 4787 }
4789 4788 } else if (not_very_few(amrr) && is_failure(amrr)) {
4790 4789 amrr->success = 0;
4791 4790 if (!is_min_rate(in)) {
4792 4791 if (amrr->recovery) {
4793 4792 amrr->success_threshold++;
4794 4793 if (amrr->success_threshold >
4795 4794 IWP_AMRR_MAX_SUCCESS_THRESHOLD) {
4796 4795 amrr->success_threshold =
4797 4796 IWP_AMRR_MAX_SUCCESS_THRESHOLD;
4798 4797 }
4799 4798 } else {
4800 4799 amrr->success_threshold =
4801 4800 IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4802 4801 }
4803 4802 decrease_rate(in);
4804 4803 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4805 4804 "AMRR decreasing rate %d "
4806 4805 "(txcnt=%d retrycnt=%d)\n",
4807 4806 in->in_txrate, amrr->txcnt,
4808 4807 amrr->retrycnt));
4809 4808 need_change = 1;
4810 4809 }
4811 4810 amrr->recovery = 0; /* paper is incorrect */
4812 4811 }
4813 4812
4814 4813 if (is_enough(amrr) || need_change) {
4815 4814 reset_cnt(amrr);
4816 4815 }
4817 4816 }
4818 4817
4819 4818 /*
4820 4819 * translate indirect address in eeprom to direct address
4821 4820 * in eeprom and return address of entry whos indirect address
4822 4821 * is indi_addr
4823 4822 */
4824 4823 static uint8_t *
4825 4824 iwp_eep_addr_trans(iwp_sc_t *sc, uint32_t indi_addr)
4826 4825 {
4827 4826 uint32_t di_addr;
4828 4827 uint16_t temp;
4829 4828
4830 4829 if (!(indi_addr & INDIRECT_ADDRESS)) {
4831 4830 di_addr = indi_addr;
4832 4831 return (&sc->sc_eep_map[di_addr]);
4833 4832 }
4834 4833
4835 4834 switch (indi_addr & INDIRECT_TYPE_MSK) {
4836 4835 case INDIRECT_GENERAL:
4837 4836 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_GENERAL);
4838 4837 break;
4839 4838 case INDIRECT_HOST:
4840 4839 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_HOST);
4841 4840 break;
4842 4841 case INDIRECT_REGULATORY:
4843 4842 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY);
4844 4843 break;
4845 4844 case INDIRECT_CALIBRATION:
4846 4845 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION);
4847 4846 break;
4848 4847 case INDIRECT_PROCESS_ADJST:
4849 4848 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST);
4850 4849 break;
4851 4850 case INDIRECT_OTHERS:
4852 4851 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_OTHERS);
4853 4852 break;
4854 4853 default:
4855 4854 temp = 0;
4856 4855 cmn_err(CE_WARN, "iwp_eep_addr_trans(): "
4857 4856 "incorrect indirect eeprom address.\n");
4858 4857 break;
4859 4858 }
4860 4859
4861 4860 di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1);
4862 4861
4863 4862 return (&sc->sc_eep_map[di_addr]);
4864 4863 }
4865 4864
4866 4865 /*
4867 4866 * loade a section of ucode into NIC
4868 4867 */
4869 4868 static int
4870 4869 iwp_put_seg_fw(iwp_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len)
4871 4870 {
4872 4871
4873 4872 iwp_mac_access_enter(sc);
4874 4873
4875 4874 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4876 4875 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4877 4876
4878 4877 IWP_WRITE(sc, IWP_FH_SRVC_CHNL_SRAM_ADDR_REG(IWP_FH_SRVC_CHNL), addr_d);
4879 4878
4880 4879 IWP_WRITE(sc, IWP_FH_TFDIB_CTRL0_REG(IWP_FH_SRVC_CHNL),
4881 4880 (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK));
4882 4881
4883 4882 IWP_WRITE(sc, IWP_FH_TFDIB_CTRL1_REG(IWP_FH_SRVC_CHNL), len);
4884 4883
4885 4884 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_BUF_STS_REG(IWP_FH_SRVC_CHNL),
4886 4885 (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
4887 4886 (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
4888 4887 IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4889 4888
4890 4889 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4891 4890 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4892 4891 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
4893 4892 IWP_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4894 4893
4895 4894 iwp_mac_access_exit(sc);
4896 4895
4897 4896 return (IWP_SUCCESS);
4898 4897 }
4899 4898
4900 4899 /*
4901 4900 * necessary setting during alive notification
4902 4901 */
4903 4902 static int
4904 4903 iwp_alive_common(iwp_sc_t *sc)
4905 4904 {
4906 4905 uint32_t base;
4907 4906 uint32_t i;
4908 4907 iwp_wimax_coex_cmd_t w_cmd;
4909 4908 iwp_calibration_crystal_cmd_t c_cmd;
4910 4909 uint32_t rv = IWP_FAIL;
4911 4910
4912 4911 /*
4913 4912 * initialize SCD related registers to make TX work.
4914 4913 */
4915 4914 iwp_mac_access_enter(sc);
4916 4915
4917 4916 /*
4918 4917 * read sram address of data base.
4919 4918 */
4920 4919 sc->sc_scd_base = iwp_reg_read(sc, IWP_SCD_SRAM_BASE_ADDR);
4921 4920
4922 4921 for (base = sc->sc_scd_base + IWP_SCD_CONTEXT_DATA_OFFSET;
4923 4922 base < sc->sc_scd_base + IWP_SCD_TX_STTS_BITMAP_OFFSET;
4924 4923 base += 4) {
4925 4924 iwp_mem_write(sc, base, 0);
4926 4925 }
4927 4926
4928 4927 for (; base < sc->sc_scd_base + IWP_SCD_TRANSLATE_TBL_OFFSET;
4929 4928 base += 4) {
4930 4929 iwp_mem_write(sc, base, 0);
4931 4930 }
4932 4931
4933 4932 for (i = 0; i < sizeof (uint16_t) * IWP_NUM_QUEUES; i += 4) {
4934 4933 iwp_mem_write(sc, base + i, 0);
4935 4934 }
4936 4935
4937 4936 iwp_reg_write(sc, IWP_SCD_DRAM_BASE_ADDR,
4938 4937 sc->sc_dma_sh.cookie.dmac_address >> 10);
4939 4938
4940 4939 iwp_reg_write(sc, IWP_SCD_QUEUECHAIN_SEL,
4941 4940 IWP_SCD_QUEUECHAIN_SEL_ALL(IWP_NUM_QUEUES));
4942 4941
4943 4942 iwp_reg_write(sc, IWP_SCD_AGGR_SEL, 0);
4944 4943
4945 4944 for (i = 0; i < IWP_NUM_QUEUES; i++) {
4946 4945 iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(i), 0);
4947 4946 IWP_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8));
4948 4947 iwp_mem_write(sc, sc->sc_scd_base +
4949 4948 IWP_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
4950 4949 iwp_mem_write(sc, sc->sc_scd_base +
4951 4950 IWP_SCD_CONTEXT_QUEUE_OFFSET(i) +
4952 4951 sizeof (uint32_t),
4953 4952 ((SCD_WIN_SIZE << IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
4954 4953 IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
4955 4954 ((SCD_FRAME_LIMIT <<
4956 4955 IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
4957 4956 IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
4958 4957 }
4959 4958
4960 4959 iwp_reg_write(sc, IWP_SCD_INTERRUPT_MASK, (1 << IWP_NUM_QUEUES) - 1);
4961 4960
4962 4961 iwp_reg_write(sc, (IWP_SCD_BASE + 0x10),
4963 4962 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
4964 4963
4965 4964 IWP_WRITE(sc, HBUS_TARG_WRPTR, (IWP_CMD_QUEUE_NUM << 8));
4966 4965 iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(IWP_CMD_QUEUE_NUM), 0);
4967 4966
4968 4967 /*
4969 4968 * queue 0-7 map to FIFO 0-7 and
4970 4969 * all queues work under FIFO mode(none-scheduler_ack)
4971 4970 */
4972 4971 for (i = 0; i < 4; i++) {
4973 4972 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4974 4973 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4975 4974 ((3-i) << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4976 4975 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4977 4976 IWP_SCD_QUEUE_STTS_REG_MSK);
4978 4977 }
4979 4978
4980 4979 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(IWP_CMD_QUEUE_NUM),
4981 4980 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4982 4981 (IWP_CMD_FIFO_NUM << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4983 4982 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4984 4983 IWP_SCD_QUEUE_STTS_REG_MSK);
4985 4984
4986 4985 for (i = 5; i < 7; i++) {
4987 4986 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4988 4987 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4989 4988 (i << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4990 4989 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4991 4990 IWP_SCD_QUEUE_STTS_REG_MSK);
4992 4991 }
4993 4992
4994 4993 iwp_mac_access_exit(sc);
4995 4994
4996 4995 (void) memset(&w_cmd, 0, sizeof (w_cmd));
4997 4996
4998 4997 rv = iwp_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1);
4999 4998 if (rv != IWP_SUCCESS) {
5000 4999 cmn_err(CE_WARN, "iwp_alive_common(): "
5001 5000 "failed to send wimax coexist command.\n");
5002 5001 return (rv);
5003 5002 }
5004 5003
5005 5004 (void) memset(&c_cmd, 0, sizeof (c_cmd));
5006 5005
5007 5006 c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
5008 5007 c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]);
5009 5008 c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]);
5010 5009
5011 5010 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &c_cmd, sizeof (c_cmd), 1);
5012 5011 if (rv != IWP_SUCCESS) {
5013 5012 cmn_err(CE_WARN, "iwp_alive_common(): "
5014 5013 "failed to send crystal frq calibration command.\n");
5015 5014 return (rv);
5016 5015 }
5017 5016
5018 5017 /*
5019 5018 * make sure crystal frequency calibration ready
5020 5019 * before next operations.
5021 5020 */
5022 5021 DELAY(1000);
5023 5022
5024 5023 return (IWP_SUCCESS);
5025 5024 }
5026 5025
5027 5026 /*
5028 5027 * save results of calibration from ucode
5029 5028 */
5030 5029 static void
5031 5030 iwp_save_calib_result(iwp_sc_t *sc, iwp_rx_desc_t *desc)
5032 5031 {
5033 5032 struct iwp_calib_results *res_p = &sc->sc_calib_results;
5034 5033 struct iwp_calib_hdr *calib_hdr = (struct iwp_calib_hdr *)(desc + 1);
5035 5034 int len = LE_32(desc->len);
5036 5035
5037 5036 /*
5038 5037 * ensure the size of buffer is not too big
5039 5038 */
5040 5039 len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4;
5041 5040
5042 5041 switch (calib_hdr->op_code) {
5043 5042 case PHY_CALIBRATE_LO_CMD:
5044 5043 if (NULL == res_p->lo_res) {
5045 5044 res_p->lo_res = kmem_alloc(len, KM_NOSLEEP);
5046 5045 }
5047 5046
5048 5047 if (NULL == res_p->lo_res) {
5049 5048 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5050 5049 "failed to allocate memory.\n");
5051 5050 return;
5052 5051 }
5053 5052
5054 5053 res_p->lo_res_len = len;
5055 5054 (void) memcpy(res_p->lo_res, calib_hdr, len);
5056 5055 break;
5057 5056 case PHY_CALIBRATE_TX_IQ_CMD:
5058 5057 if (NULL == res_p->tx_iq_res) {
5059 5058 res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP);
5060 5059 }
5061 5060
5062 5061 if (NULL == res_p->tx_iq_res) {
5063 5062 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5064 5063 "failed to allocate memory.\n");
5065 5064 return;
5066 5065 }
5067 5066
5068 5067 res_p->tx_iq_res_len = len;
5069 5068 (void) memcpy(res_p->tx_iq_res, calib_hdr, len);
5070 5069 break;
5071 5070 case PHY_CALIBRATE_TX_IQ_PERD_CMD:
5072 5071 if (NULL == res_p->tx_iq_perd_res) {
5073 5072 res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP);
5074 5073 }
5075 5074
5076 5075 if (NULL == res_p->tx_iq_perd_res) {
5077 5076 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5078 5077 "failed to allocate memory.\n");
5079 5078 }
5080 5079
5081 5080 res_p->tx_iq_perd_res_len = len;
5082 5081 (void) memcpy(res_p->tx_iq_perd_res, calib_hdr, len);
5083 5082 break;
5084 5083 case PHY_CALIBRATE_BASE_BAND_CMD:
5085 5084 if (NULL == res_p->base_band_res) {
5086 5085 res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP);
5087 5086 }
5088 5087
5089 5088 if (NULL == res_p->base_band_res) {
5090 5089 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5091 5090 "failed to allocate memory.\n");
5092 5091 }
5093 5092
5094 5093 res_p->base_band_res_len = len;
5095 5094 (void) memcpy(res_p->base_band_res, calib_hdr, len);
5096 5095 break;
5097 5096 default:
5098 5097 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5099 5098 "incorrect calibration type(%d).\n", calib_hdr->op_code);
5100 5099 break;
5101 5100 }
5102 5101
5103 5102 }
5104 5103
5105 5104 static void
5106 5105 iwp_release_calib_buffer(iwp_sc_t *sc)
5107 5106 {
5108 5107 if (sc->sc_calib_results.lo_res != NULL) {
5109 5108 kmem_free(sc->sc_calib_results.lo_res,
5110 5109 sc->sc_calib_results.lo_res_len);
5111 5110 sc->sc_calib_results.lo_res = NULL;
5112 5111 }
5113 5112
5114 5113 if (sc->sc_calib_results.tx_iq_res != NULL) {
5115 5114 kmem_free(sc->sc_calib_results.tx_iq_res,
5116 5115 sc->sc_calib_results.tx_iq_res_len);
5117 5116 sc->sc_calib_results.tx_iq_res = NULL;
5118 5117 }
5119 5118
5120 5119 if (sc->sc_calib_results.tx_iq_perd_res != NULL) {
5121 5120 kmem_free(sc->sc_calib_results.tx_iq_perd_res,
5122 5121 sc->sc_calib_results.tx_iq_perd_res_len);
5123 5122 sc->sc_calib_results.tx_iq_perd_res = NULL;
5124 5123 }
5125 5124
5126 5125 if (sc->sc_calib_results.base_band_res != NULL) {
5127 5126 kmem_free(sc->sc_calib_results.base_band_res,
5128 5127 sc->sc_calib_results.base_band_res_len);
5129 5128 sc->sc_calib_results.base_band_res = NULL;
5130 5129 }
5131 5130
5132 5131 }
5133 5132
5134 5133 /*
5135 5134 * common section of intialization
5136 5135 */
5137 5136 static int
5138 5137 iwp_init_common(iwp_sc_t *sc)
5139 5138 {
5140 5139 int32_t qid;
5141 5140 uint32_t tmp;
5142 5141
5143 5142 (void) iwp_preinit(sc);
5144 5143
5145 5144 tmp = IWP_READ(sc, CSR_GP_CNTRL);
5146 5145 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
5147 5146 cmn_err(CE_NOTE, "iwp_init_common(): "
5148 5147 "radio transmitter is off\n");
5149 5148 return (IWP_FAIL);
5150 5149 }
5151 5150
5152 5151 /*
5153 5152 * init Rx ring
5154 5153 */
5155 5154 iwp_mac_access_enter(sc);
5156 5155 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
5157 5156
5158 5157 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
5159 5158 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
5160 5159 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
5161 5160
5162 5161 IWP_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
5163 5162 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
5164 5163 offsetof(struct iwp_shared, val0)) >> 4));
5165 5164
5166 5165 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
5167 5166 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
5168 5167 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
5169 5168 IWP_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
5170 5169 (RX_QUEUE_SIZE_LOG <<
5171 5170 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
5172 5171 iwp_mac_access_exit(sc);
5173 5172 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
5174 5173 (RX_QUEUE_SIZE - 1) & ~0x7);
5175 5174
5176 5175 /*
5177 5176 * init Tx rings
5178 5177 */
5179 5178 iwp_mac_access_enter(sc);
5180 5179 iwp_reg_write(sc, IWP_SCD_TXFACT, 0);
5181 5180
5182 5181 /*
5183 5182 * keep warm page
5184 5183 */
5185 5184 IWP_WRITE(sc, IWP_FH_KW_MEM_ADDR_REG,
5186 5185 sc->sc_dma_kw.cookie.dmac_address >> 4);
5187 5186
5188 5187 for (qid = 0; qid < IWP_NUM_QUEUES; qid++) {
5189 5188 IWP_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
5190 5189 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
5191 5190 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
5192 5191 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5193 5192 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
5194 5193 }
5195 5194
5196 5195 iwp_mac_access_exit(sc);
5197 5196
5198 5197 /*
5199 5198 * clear "radio off" and "disable command" bits
5200 5199 */
5201 5200 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5202 5201 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
5203 5202 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5204 5203
5205 5204 /*
5206 5205 * clear any pending interrupts
5207 5206 */
5208 5207 IWP_WRITE(sc, CSR_INT, 0xffffffff);
5209 5208
5210 5209 /*
5211 5210 * enable interrupts
5212 5211 */
5213 5212 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
5214 5213
5215 5214 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5216 5215 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5217 5216
5218 5217 return (IWP_SUCCESS);
5219 5218 }
5220 5219
5221 5220 static int
5222 5221 iwp_fast_recover(iwp_sc_t *sc)
5223 5222 {
5224 5223 ieee80211com_t *ic = &sc->sc_ic;
5225 5224 int err = IWP_FAIL;
5226 5225
5227 5226 mutex_enter(&sc->sc_glock);
5228 5227
5229 5228 /* restore runtime configuration */
5230 5229 bcopy(&sc->sc_config_save, &sc->sc_config,
5231 5230 sizeof (sc->sc_config));
5232 5231
5233 5232 sc->sc_config.assoc_id = 0;
5234 5233 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
5235 5234
5236 5235 if ((err = iwp_hw_set_before_auth(sc)) != IWP_SUCCESS) {
5237 5236 cmn_err(CE_WARN, "iwp_fast_recover(): "
5238 5237 "could not setup authentication\n");
5239 5238 mutex_exit(&sc->sc_glock);
5240 5239 return (err);
5241 5240 }
5242 5241
5243 5242 bcopy(&sc->sc_config_save, &sc->sc_config,
5244 5243 sizeof (sc->sc_config));
5245 5244
5246 5245 /* update adapter's configuration */
5247 5246 err = iwp_run_state_config(sc);
5248 5247 if (err != IWP_SUCCESS) {
5249 5248 cmn_err(CE_WARN, "iwp_fast_recover(): "
5250 5249 "failed to setup association\n");
5251 5250 mutex_exit(&sc->sc_glock);
5252 5251 return (err);
5253 5252 }
5254 5253 /* set LED on */
5255 5254 iwp_set_led(sc, 2, 0, 1);
5256 5255
5257 5256 mutex_exit(&sc->sc_glock);
5258 5257
5259 5258 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
5260 5259
5261 5260 /* start queue */
5262 5261 IWP_DBG((IWP_DEBUG_FW, "iwp_fast_recover(): "
5263 5262 "resume xmit\n"));
5264 5263 mac_tx_update(ic->ic_mach);
5265 5264
5266 5265 return (IWP_SUCCESS);
5267 5266 }
5268 5267
5269 5268 static int
5270 5269 iwp_run_state_config(iwp_sc_t *sc)
5271 5270 {
5272 5271 struct ieee80211com *ic = &sc->sc_ic;
5273 5272 ieee80211_node_t *in = ic->ic_bss;
5274 5273 int err = IWP_FAIL;
5275 5274
5276 5275 /*
5277 5276 * update adapter's configuration
5278 5277 */
5279 5278 sc->sc_config.assoc_id = in->in_associd & 0x3fff;
5280 5279
5281 5280 /*
5282 5281 * short preamble/slot time are
5283 5282 * negotiated when associating
5284 5283 */
5285 5284 sc->sc_config.flags &=
5286 5285 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
5287 5286 RXON_FLG_SHORT_SLOT_MSK);
5288 5287
5289 5288 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
5290 5289 sc->sc_config.flags |=
5291 5290 LE_32(RXON_FLG_SHORT_SLOT_MSK);
5292 5291 }
5293 5292
5294 5293 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
5295 5294 sc->sc_config.flags |=
5296 5295 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
5297 5296 }
5298 5297
5299 5298 sc->sc_config.filter_flags |=
5300 5299 LE_32(RXON_FILTER_ASSOC_MSK);
5301 5300
5302 5301 if (ic->ic_opmode != IEEE80211_M_STA) {
5303 5302 sc->sc_config.filter_flags |=
5304 5303 LE_32(RXON_FILTER_BCON_AWARE_MSK);
5305 5304 }
5306 5305
5307 5306 IWP_DBG((IWP_DEBUG_80211, "iwp_run_state_config(): "
5308 5307 "config chan %d flags %x"
5309 5308 " filter_flags %x\n",
5310 5309 sc->sc_config.chan, sc->sc_config.flags,
5311 5310 sc->sc_config.filter_flags));
5312 5311
5313 5312 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
5314 5313 sizeof (iwp_rxon_cmd_t), 1);
5315 5314 if (err != IWP_SUCCESS) {
5316 5315 cmn_err(CE_WARN, "iwp_run_state_config(): "
5317 5316 "could not update configuration\n");
5318 5317 return (err);
5319 5318 }
5320 5319
5321 5320 return (err);
5322 5321 }
5323 5322
5324 5323 /*
5325 5324 * This function overwrites default configurations of
5326 5325 * ieee80211com structure in Net80211 module.
5327 5326 */
5328 5327 static void
5329 5328 iwp_overwrite_ic_default(iwp_sc_t *sc)
5330 5329 {
5331 5330 ieee80211com_t *ic = &sc->sc_ic;
5332 5331
5333 5332 sc->sc_newstate = ic->ic_newstate;
5334 5333 ic->ic_newstate = iwp_newstate;
5335 5334 ic->ic_node_alloc = iwp_node_alloc;
5336 5335 ic->ic_node_free = iwp_node_free;
5337 5336 }
5338 5337
5339 5338
5340 5339 /*
5341 5340 * This function adds AP station into hardware.
5342 5341 */
5343 5342 static int
5344 5343 iwp_add_ap_sta(iwp_sc_t *sc)
5345 5344 {
5346 5345 ieee80211com_t *ic = &sc->sc_ic;
5347 5346 ieee80211_node_t *in = ic->ic_bss;
5348 5347 iwp_add_sta_t node;
5349 5348 int err = IWP_FAIL;
5350 5349
5351 5350 /*
5352 5351 * Add AP node into hardware.
5353 5352 */
5354 5353 (void) memset(&node, 0, sizeof (node));
5355 5354 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
5356 5355 node.mode = STA_MODE_ADD_MSK;
5357 5356 node.sta.sta_id = IWP_AP_ID;
5358 5357
5359 5358 err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
5360 5359 if (err != IWP_SUCCESS) {
5361 5360 cmn_err(CE_WARN, "iwp_add_ap_sta(): "
5362 5361 "failed to add AP node\n");
5363 5362 return (err);
5364 5363 }
5365 5364
5366 5365 return (err);
5367 5366 }
5368 5367
5369 5368 /*
5370 5369 * Check EEPROM version and Calibration version.
5371 5370 */
5372 5371 static int
5373 5372 iwp_eep_ver_chk(iwp_sc_t *sc)
5374 5373 {
5375 5374 if ((IWP_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) ||
5376 5375 (sc->sc_eep_calib->tx_pow_calib_hdr.calib_version < 4)) {
5377 5376 cmn_err(CE_WARN, "iwp_eep_ver_chk(): "
5378 5377 "unsupported eeprom detected\n");
5379 5378 return (IWP_FAIL);
5380 5379 }
5381 5380
5382 5381 return (IWP_SUCCESS);
5383 5382 }
5384 5383
5385 5384 /*
5386 5385 * Determine parameters for all supported chips.
5387 5386 */
5388 5387 static void
5389 5388 iwp_set_chip_param(iwp_sc_t *sc)
5390 5389 {
5391 5390 if ((0x008d == sc->sc_dev_id) ||
5392 5391 (0x008e == sc->sc_dev_id)) {
5393 5392 sc->sc_chip_param.phy_mode = PHY_MODE_G |
5394 5393 PHY_MODE_A | PHY_MODE_N;
5395 5394
5396 5395 sc->sc_chip_param.tx_ant = ANT_A | ANT_B;
5397 5396 sc->sc_chip_param.rx_ant = ANT_A | ANT_B;
5398 5397
5399 5398 sc->sc_chip_param.pa_type = PA_TYPE_MIX;
5400 5399 }
5401 5400
5402 5401 if ((0x422c == sc->sc_dev_id) ||
5403 5402 (0x4239 == sc->sc_dev_id)) {
5404 5403 sc->sc_chip_param.phy_mode = PHY_MODE_G |
5405 5404 PHY_MODE_A | PHY_MODE_N;
5406 5405
5407 5406 sc->sc_chip_param.tx_ant = ANT_B | ANT_C;
5408 5407 sc->sc_chip_param.rx_ant = ANT_B | ANT_C;
5409 5408
5410 5409 sc->sc_chip_param.pa_type = PA_TYPE_INTER;
5411 5410 }
5412 5411
5413 5412 if ((0x422b == sc->sc_dev_id) ||
5414 5413 (0x4238 == sc->sc_dev_id)) {
5415 5414 sc->sc_chip_param.phy_mode = PHY_MODE_G |
5416 5415 PHY_MODE_A | PHY_MODE_N;
5417 5416
5418 5417 sc->sc_chip_param.tx_ant = ANT_A | ANT_B | ANT_C;
5419 5418 sc->sc_chip_param.rx_ant = ANT_A | ANT_B | ANT_C;
5420 5419
5421 5420 sc->sc_chip_param.pa_type = PA_TYPE_SYSTEM;
5422 5421 }
5423 5422 }
↓ open down ↓ |
5028 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX