7127 remove -Wno-missing-braces from Makefile.uts
1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * Copyright (c) 2009, Intel Corporation
8 * All rights reserved.
9 */
10
11 /*
12 * Copyright (c) 2006
13 * Copyright (c) 2007
14 * Damien Bergamini <damien.bergamini@free.fr>
15 *
16 * Permission to use, copy, modify, and distribute this software for any
17 * purpose with or without fee is hereby granted, provided that the above
18 * copyright notice and this permission notice appear in all copies.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 */
28
29 /*
30 * Intel(R) WiFi Link 6000 Driver
31 */
32
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58
59 #include "iwp_calibration.h"
60 #include "iwp_hw.h"
61 #include "iwp_eeprom.h"
62 #include "iwp_var.h"
63 #include <inet/wifi_ioctl.h>
64
65 #ifdef DEBUG
66 #define IWP_DEBUG_80211 (1 << 0)
67 #define IWP_DEBUG_CMD (1 << 1)
68 #define IWP_DEBUG_DMA (1 << 2)
69 #define IWP_DEBUG_EEPROM (1 << 3)
70 #define IWP_DEBUG_FW (1 << 4)
71 #define IWP_DEBUG_HW (1 << 5)
72 #define IWP_DEBUG_INTR (1 << 6)
73 #define IWP_DEBUG_MRR (1 << 7)
74 #define IWP_DEBUG_PIO (1 << 8)
75 #define IWP_DEBUG_RX (1 << 9)
76 #define IWP_DEBUG_SCAN (1 << 10)
77 #define IWP_DEBUG_TX (1 << 11)
78 #define IWP_DEBUG_RATECTL (1 << 12)
79 #define IWP_DEBUG_RADIO (1 << 13)
80 #define IWP_DEBUG_RESUME (1 << 14)
81 #define IWP_DEBUG_CALIBRATION (1 << 15)
82 /*
83 * if want to see debug message of a given section,
84 * please set this flag to one of above values
85 */
86 uint32_t iwp_dbg_flags = 0;
87 #define IWP_DBG(x) \
88 iwp_dbg x
89 #else
90 #define IWP_DBG(x)
91 #endif
92
93 static void *iwp_soft_state_p = NULL;
94
95 /*
96 * ucode will be compiled into driver image
97 */
98 static uint8_t iwp_fw_bin [] = {
99 #include "fw-iw/iwp.ucode"
100 };
101
102 /*
103 * DMA attributes for a shared page
104 */
105 static ddi_dma_attr_t sh_dma_attr = {
106 DMA_ATTR_V0, /* version of this structure */
107 0, /* lowest usable address */
108 0xffffffffU, /* highest usable address */
109 0xffffffffU, /* maximum DMAable byte count */
110 0x1000, /* alignment in bytes */
111 0x1000, /* burst sizes (any?) */
112 1, /* minimum transfer */
113 0xffffffffU, /* maximum transfer */
114 0xffffffffU, /* maximum segment length */
115 1, /* maximum number of segments */
116 1, /* granularity */
117 0, /* flags (reserved) */
118 };
119
120 /*
121 * DMA attributes for a keep warm DRAM descriptor
122 */
123 static ddi_dma_attr_t kw_dma_attr = {
124 DMA_ATTR_V0, /* version of this structure */
125 0, /* lowest usable address */
126 0xffffffffU, /* highest usable address */
127 0xffffffffU, /* maximum DMAable byte count */
128 0x1000, /* alignment in bytes */
129 0x1000, /* burst sizes (any?) */
130 1, /* minimum transfer */
131 0xffffffffU, /* maximum transfer */
132 0xffffffffU, /* maximum segment length */
133 1, /* maximum number of segments */
134 1, /* granularity */
135 0, /* flags (reserved) */
136 };
137
138 /*
139 * DMA attributes for a ring descriptor
140 */
141 static ddi_dma_attr_t ring_desc_dma_attr = {
142 DMA_ATTR_V0, /* version of this structure */
143 0, /* lowest usable address */
144 0xffffffffU, /* highest usable address */
145 0xffffffffU, /* maximum DMAable byte count */
146 0x100, /* alignment in bytes */
147 0x100, /* burst sizes (any?) */
148 1, /* minimum transfer */
149 0xffffffffU, /* maximum transfer */
150 0xffffffffU, /* maximum segment length */
151 1, /* maximum number of segments */
152 1, /* granularity */
153 0, /* flags (reserved) */
154 };
155
156 /*
157 * DMA attributes for a cmd
158 */
159 static ddi_dma_attr_t cmd_dma_attr = {
160 DMA_ATTR_V0, /* version of this structure */
161 0, /* lowest usable address */
162 0xffffffffU, /* highest usable address */
163 0xffffffffU, /* maximum DMAable byte count */
164 4, /* alignment in bytes */
165 0x100, /* burst sizes (any?) */
166 1, /* minimum transfer */
167 0xffffffffU, /* maximum transfer */
168 0xffffffffU, /* maximum segment length */
169 1, /* maximum number of segments */
170 1, /* granularity */
171 0, /* flags (reserved) */
172 };
173
174 /*
175 * DMA attributes for a rx buffer
176 */
177 static ddi_dma_attr_t rx_buffer_dma_attr = {
178 DMA_ATTR_V0, /* version of this structure */
179 0, /* lowest usable address */
180 0xffffffffU, /* highest usable address */
181 0xffffffffU, /* maximum DMAable byte count */
182 0x100, /* alignment in bytes */
183 0x100, /* burst sizes (any?) */
184 1, /* minimum transfer */
185 0xffffffffU, /* maximum transfer */
186 0xffffffffU, /* maximum segment length */
187 1, /* maximum number of segments */
188 1, /* granularity */
189 0, /* flags (reserved) */
190 };
191
192 /*
193 * DMA attributes for a tx buffer.
194 * the maximum number of segments is 4 for the hardware.
195 * now all the wifi drivers put the whole frame in a single
196 * descriptor, so we define the maximum number of segments 1,
197 * just the same as the rx_buffer. we consider leverage the HW
198 * ability in the future, that is why we don't define rx and tx
199 * buffer_dma_attr as the same.
200 */
201 static ddi_dma_attr_t tx_buffer_dma_attr = {
202 DMA_ATTR_V0, /* version of this structure */
203 0, /* lowest usable address */
204 0xffffffffU, /* highest usable address */
205 0xffffffffU, /* maximum DMAable byte count */
206 4, /* alignment in bytes */
207 0x100, /* burst sizes (any?) */
208 1, /* minimum transfer */
209 0xffffffffU, /* maximum transfer */
210 0xffffffffU, /* maximum segment length */
211 1, /* maximum number of segments */
212 1, /* granularity */
213 0, /* flags (reserved) */
214 };
215
216 /*
217 * DMA attributes for text and data part in the firmware
218 */
219 static ddi_dma_attr_t fw_dma_attr = {
220 DMA_ATTR_V0, /* version of this structure */
221 0, /* lowest usable address */
222 0xffffffffU, /* highest usable address */
223 0x7fffffff, /* maximum DMAable byte count */
224 0x10, /* alignment in bytes */
225 0x100, /* burst sizes (any?) */
226 1, /* minimum transfer */
227 0xffffffffU, /* maximum transfer */
228 0xffffffffU, /* maximum segment length */
229 1, /* maximum number of segments */
230 1, /* granularity */
231 0, /* flags (reserved) */
232 };
233
234 /*
235 * regs access attributes
236 */
237 static ddi_device_acc_attr_t iwp_reg_accattr = {
238 DDI_DEVICE_ATTR_V0,
239 DDI_STRUCTURE_LE_ACC,
240 DDI_STRICTORDER_ACC,
241 DDI_DEFAULT_ACC
242 };
243
244 /*
245 * DMA access attributes for descriptor
246 */
247 static ddi_device_acc_attr_t iwp_dma_descattr = {
248 DDI_DEVICE_ATTR_V0,
249 DDI_STRUCTURE_LE_ACC,
250 DDI_STRICTORDER_ACC,
251 DDI_DEFAULT_ACC
252 };
253
254 /*
255 * DMA access attributes
256 */
257 static ddi_device_acc_attr_t iwp_dma_accattr = {
258 DDI_DEVICE_ATTR_V0,
259 DDI_NEVERSWAP_ACC,
260 DDI_STRICTORDER_ACC,
261 DDI_DEFAULT_ACC
262 };
263
264 static int iwp_ring_init(iwp_sc_t *);
265 static void iwp_ring_free(iwp_sc_t *);
266 static int iwp_alloc_shared(iwp_sc_t *);
267 static void iwp_free_shared(iwp_sc_t *);
268 static int iwp_alloc_kw(iwp_sc_t *);
269 static void iwp_free_kw(iwp_sc_t *);
270 static int iwp_alloc_fw_dma(iwp_sc_t *);
271 static void iwp_free_fw_dma(iwp_sc_t *);
272 static int iwp_alloc_rx_ring(iwp_sc_t *);
273 static void iwp_reset_rx_ring(iwp_sc_t *);
274 static void iwp_free_rx_ring(iwp_sc_t *);
275 static int iwp_alloc_tx_ring(iwp_sc_t *, iwp_tx_ring_t *,
276 int, int);
277 static void iwp_reset_tx_ring(iwp_sc_t *, iwp_tx_ring_t *);
278 static void iwp_free_tx_ring(iwp_tx_ring_t *);
279 static ieee80211_node_t *iwp_node_alloc(ieee80211com_t *);
280 static void iwp_node_free(ieee80211_node_t *);
281 static int iwp_newstate(ieee80211com_t *, enum ieee80211_state, int);
282 static void iwp_mac_access_enter(iwp_sc_t *);
283 static void iwp_mac_access_exit(iwp_sc_t *);
284 static uint32_t iwp_reg_read(iwp_sc_t *, uint32_t);
285 static void iwp_reg_write(iwp_sc_t *, uint32_t, uint32_t);
286 static int iwp_load_init_firmware(iwp_sc_t *);
287 static int iwp_load_run_firmware(iwp_sc_t *);
288 static void iwp_tx_intr(iwp_sc_t *, iwp_rx_desc_t *);
289 static void iwp_cmd_intr(iwp_sc_t *, iwp_rx_desc_t *);
290 static uint_t iwp_intr(caddr_t, caddr_t);
291 static int iwp_eep_load(iwp_sc_t *);
292 static void iwp_get_mac_from_eep(iwp_sc_t *);
293 static int iwp_eep_sem_down(iwp_sc_t *);
294 static void iwp_eep_sem_up(iwp_sc_t *);
295 static uint_t iwp_rx_softintr(caddr_t, caddr_t);
296 static uint8_t iwp_rate_to_plcp(int);
297 static int iwp_cmd(iwp_sc_t *, int, const void *, int, int);
298 static void iwp_set_led(iwp_sc_t *, uint8_t, uint8_t, uint8_t);
299 static int iwp_hw_set_before_auth(iwp_sc_t *);
300 static int iwp_scan(iwp_sc_t *);
301 static int iwp_config(iwp_sc_t *);
302 static void iwp_stop_master(iwp_sc_t *);
303 static int iwp_power_up(iwp_sc_t *);
304 static int iwp_preinit(iwp_sc_t *);
305 static int iwp_init(iwp_sc_t *);
306 static void iwp_stop(iwp_sc_t *);
307 static int iwp_quiesce(dev_info_t *t);
308 static void iwp_amrr_init(iwp_amrr_t *);
309 static void iwp_amrr_timeout(iwp_sc_t *);
310 static void iwp_amrr_ratectl(void *, ieee80211_node_t *);
311 static void iwp_ucode_alive(iwp_sc_t *, iwp_rx_desc_t *);
312 static void iwp_rx_phy_intr(iwp_sc_t *, iwp_rx_desc_t *);
313 static void iwp_rx_mpdu_intr(iwp_sc_t *, iwp_rx_desc_t *);
314 static void iwp_release_calib_buffer(iwp_sc_t *);
315 static int iwp_init_common(iwp_sc_t *);
316 static uint8_t *iwp_eep_addr_trans(iwp_sc_t *, uint32_t);
317 static int iwp_put_seg_fw(iwp_sc_t *, uint32_t, uint32_t, uint32_t);
318 static int iwp_alive_common(iwp_sc_t *);
319 static void iwp_save_calib_result(iwp_sc_t *, iwp_rx_desc_t *);
320 static int iwp_attach(dev_info_t *, ddi_attach_cmd_t);
321 static int iwp_detach(dev_info_t *, ddi_detach_cmd_t);
322 static void iwp_destroy_locks(iwp_sc_t *);
323 static int iwp_send(ieee80211com_t *, mblk_t *, uint8_t);
324 static void iwp_thread(iwp_sc_t *);
325 static int iwp_run_state_config(iwp_sc_t *);
326 static int iwp_fast_recover(iwp_sc_t *);
327 static void iwp_overwrite_ic_default(iwp_sc_t *);
328 static int iwp_add_ap_sta(iwp_sc_t *);
329 static int iwp_alloc_dma_mem(iwp_sc_t *, size_t,
330 ddi_dma_attr_t *, ddi_device_acc_attr_t *,
331 uint_t, iwp_dma_t *);
332 static void iwp_free_dma_mem(iwp_dma_t *);
333 static int iwp_eep_ver_chk(iwp_sc_t *);
334 static void iwp_set_chip_param(iwp_sc_t *);
335
336 /*
337 * GLD specific operations
338 */
339 static int iwp_m_stat(void *, uint_t, uint64_t *);
340 static int iwp_m_start(void *);
341 static void iwp_m_stop(void *);
342 static int iwp_m_unicst(void *, const uint8_t *);
343 static int iwp_m_multicst(void *, boolean_t, const uint8_t *);
344 static int iwp_m_promisc(void *, boolean_t);
345 static mblk_t *iwp_m_tx(void *, mblk_t *);
346 static void iwp_m_ioctl(void *, queue_t *, mblk_t *);
347 static int iwp_m_setprop(void *arg, const char *pr_name,
348 mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf);
349 static int iwp_m_getprop(void *arg, const char *pr_name,
350 mac_prop_id_t wldp_pr_num, uint_t wldp_length, void *wldp_buf);
351 static void iwp_m_propinfo(void *, const char *, mac_prop_id_t,
352 mac_prop_info_handle_t);
353
354 /*
355 * Supported rates for 802.11b/g modes (in 500Kbps unit).
356 */
357 static const struct ieee80211_rateset iwp_rateset_11b =
358 { 4, { 2, 4, 11, 22 } };
359
360 static const struct ieee80211_rateset iwp_rateset_11g =
361 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
362
363 /*
364 * For mfthread only
365 */
366 extern pri_t minclsyspri;
367
368 #define DRV_NAME_SP "iwp"
369
370 /*
371 * Module Loading Data & Entry Points
372 */
373 DDI_DEFINE_STREAM_OPS(iwp_devops, nulldev, nulldev, iwp_attach,
374 iwp_detach, nodev, NULL, D_MP, NULL, iwp_quiesce);
375
376 static struct modldrv iwp_modldrv = {
377 &mod_driverops,
378 "Intel(R) PumaPeak driver(N)",
379 &iwp_devops
380 };
381
382 static struct modlinkage iwp_modlinkage = {
383 MODREV_1,
384 { &iwp_modldrv, NULL }
385 };
386
387 int
388 _init(void)
389 {
390 int status;
391
392 status = ddi_soft_state_init(&iwp_soft_state_p,
393 sizeof (iwp_sc_t), 1);
394 if (status != DDI_SUCCESS) {
395 return (status);
396 }
397
398 mac_init_ops(&iwp_devops, DRV_NAME_SP);
399 status = mod_install(&iwp_modlinkage);
400 if (status != DDI_SUCCESS) {
401 mac_fini_ops(&iwp_devops);
402 ddi_soft_state_fini(&iwp_soft_state_p);
403 }
404
405 return (status);
406 }
407
408 int
409 _fini(void)
410 {
411 int status;
412
413 status = mod_remove(&iwp_modlinkage);
414 if (DDI_SUCCESS == status) {
415 mac_fini_ops(&iwp_devops);
416 ddi_soft_state_fini(&iwp_soft_state_p);
417 }
418
419 return (status);
420 }
421
422 int
423 _info(struct modinfo *mip)
424 {
425 return (mod_info(&iwp_modlinkage, mip));
426 }
427
428 /*
429 * Mac Call Back entries
430 */
431 mac_callbacks_t iwp_m_callbacks = {
432 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
433 iwp_m_stat,
434 iwp_m_start,
435 iwp_m_stop,
436 iwp_m_promisc,
437 iwp_m_multicst,
438 iwp_m_unicst,
439 iwp_m_tx,
440 NULL,
441 iwp_m_ioctl,
442 NULL,
443 NULL,
444 NULL,
445 iwp_m_setprop,
446 iwp_m_getprop,
447 iwp_m_propinfo
448 };
449
450 #ifdef DEBUG
451 void
452 iwp_dbg(uint32_t flags, const char *fmt, ...)
453 {
454 va_list ap;
455
456 if (flags & iwp_dbg_flags) {
457 va_start(ap, fmt);
458 vcmn_err(CE_NOTE, fmt, ap);
459 va_end(ap);
460 }
461 }
462 #endif /* DEBUG */
463
464 /*
465 * device operations
466 */
467 int
468 iwp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
469 {
470 iwp_sc_t *sc;
471 ieee80211com_t *ic;
472 int instance, i;
473 char strbuf[32];
474 wifi_data_t wd = { 0 };
475 mac_register_t *macp;
476 int intr_type;
477 int intr_count;
478 int intr_actual;
479 int err = DDI_FAILURE;
480
481 switch (cmd) {
482 case DDI_ATTACH:
483 break;
484 case DDI_RESUME:
485 instance = ddi_get_instance(dip);
486 sc = ddi_get_soft_state(iwp_soft_state_p,
487 instance);
488 ASSERT(sc != NULL);
489
490 if (sc->sc_flags & IWP_F_RUNNING) {
491 (void) iwp_init(sc);
492 }
493
494 atomic_and_32(&sc->sc_flags, ~IWP_F_SUSPEND);
495
496 IWP_DBG((IWP_DEBUG_RESUME, "iwp_attach(): "
497 "resume\n"));
498 return (DDI_SUCCESS);
499 default:
500 goto attach_fail1;
501 }
502
503 instance = ddi_get_instance(dip);
504 err = ddi_soft_state_zalloc(iwp_soft_state_p, instance);
505 if (err != DDI_SUCCESS) {
506 cmn_err(CE_WARN, "iwp_attach(): "
507 "failed to allocate soft state\n");
508 goto attach_fail1;
509 }
510
511 sc = ddi_get_soft_state(iwp_soft_state_p, instance);
512 ASSERT(sc != NULL);
513
514 sc->sc_dip = dip;
515
516 /*
517 * map configure space
518 */
519 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
520 &iwp_reg_accattr, &sc->sc_cfg_handle);
521 if (err != DDI_SUCCESS) {
522 cmn_err(CE_WARN, "iwp_attach(): "
523 "failed to map config spaces regs\n");
524 goto attach_fail2;
525 }
526
527 sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle,
528 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID));
529 if ((sc->sc_dev_id != 0x422B) &&
530 (sc->sc_dev_id != 0x422C) &&
531 (sc->sc_dev_id != 0x4238) &&
532 (sc->sc_dev_id != 0x4239) &&
533 (sc->sc_dev_id != 0x008d) &&
534 (sc->sc_dev_id != 0x008e)) {
535 cmn_err(CE_WARN, "iwp_attach(): "
536 "Do not support this device\n");
537 goto attach_fail3;
538 }
539
540 iwp_set_chip_param(sc);
541
542 sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
543 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
544
545 /*
546 * keep from disturbing C3 state of CPU
547 */
548 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base +
549 PCI_CFG_RETRY_TIMEOUT), 0);
550
551 /*
552 * determine the size of buffer for frame and command to ucode
553 */
554 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
555 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
556 if (!sc->sc_clsz) {
557 sc->sc_clsz = 16;
558 }
559 sc->sc_clsz = (sc->sc_clsz << 2);
560
561 sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
562 IEEE80211_MTU + IEEE80211_CRC_LEN +
563 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
564 IEEE80211_WEP_CRCLEN), sc->sc_clsz);
565
566 /*
567 * Map operating registers
568 */
569 err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
570 0, 0, &iwp_reg_accattr, &sc->sc_handle);
571 if (err != DDI_SUCCESS) {
572 cmn_err(CE_WARN, "iwp_attach(): "
573 "failed to map device regs\n");
574 goto attach_fail3;
575 }
576
577 /*
578 * this is used to differentiate type of hardware
579 */
580 sc->sc_hw_rev = IWP_READ(sc, CSR_HW_REV);
581
582 err = ddi_intr_get_supported_types(dip, &intr_type);
583 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
584 cmn_err(CE_WARN, "iwp_attach(): "
585 "fixed type interrupt is not supported\n");
586 goto attach_fail4;
587 }
588
589 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
590 if ((err != DDI_SUCCESS) || (intr_count != 1)) {
591 cmn_err(CE_WARN, "iwp_attach(): "
592 "no fixed interrupts\n");
593 goto attach_fail4;
594 }
595
596 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
597
598 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
599 intr_count, &intr_actual, 0);
600 if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
601 cmn_err(CE_WARN, "iwp_attach(): "
602 "ddi_intr_alloc() failed 0x%x\n", err);
603 goto attach_fail5;
604 }
605
606 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
607 if (err != DDI_SUCCESS) {
608 cmn_err(CE_WARN, "iwp_attach(): "
609 "ddi_intr_get_pri() failed 0x%x\n", err);
610 goto attach_fail6;
611 }
612
613 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
614 DDI_INTR_PRI(sc->sc_intr_pri));
615 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
616 DDI_INTR_PRI(sc->sc_intr_pri));
617 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
618 DDI_INTR_PRI(sc->sc_intr_pri));
619
620 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
621 cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL);
622 cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL);
623
624 /*
625 * initialize the mfthread
626 */
627 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
628 sc->sc_mf_thread = NULL;
629 sc->sc_mf_thread_switch = 0;
630
631 /*
632 * Allocate shared buffer for communication between driver and ucode.
633 */
634 err = iwp_alloc_shared(sc);
635 if (err != DDI_SUCCESS) {
636 cmn_err(CE_WARN, "iwp_attach(): "
637 "failed to allocate shared page\n");
638 goto attach_fail7;
639 }
640
641 (void) memset(sc->sc_shared, 0, sizeof (iwp_shared_t));
642
643 /*
644 * Allocate keep warm page.
645 */
646 err = iwp_alloc_kw(sc);
647 if (err != DDI_SUCCESS) {
648 cmn_err(CE_WARN, "iwp_attach(): "
649 "failed to allocate keep warm page\n");
650 goto attach_fail8;
651 }
652
653 /*
654 * Do some necessary hardware initializations.
655 */
656 err = iwp_preinit(sc);
657 if (err != IWP_SUCCESS) {
658 cmn_err(CE_WARN, "iwp_attach(): "
659 "failed to initialize hardware\n");
660 goto attach_fail9;
661 }
662
663 /*
664 * get hardware configurations from eeprom
665 */
666 err = iwp_eep_load(sc);
667 if (err != IWP_SUCCESS) {
668 cmn_err(CE_WARN, "iwp_attach(): "
669 "failed to load eeprom\n");
670 goto attach_fail9;
671 }
672
673 /*
674 * calibration information from EEPROM
675 */
676 sc->sc_eep_calib = (struct iwp_eep_calibration *)
677 iwp_eep_addr_trans(sc, EEP_CALIBRATION);
678
679 err = iwp_eep_ver_chk(sc);
680 if (err != IWP_SUCCESS) {
681 goto attach_fail9;
682 }
683
684 /*
685 * get MAC address of this chipset
686 */
687 iwp_get_mac_from_eep(sc);
688
689
690 /*
691 * initialize TX and RX ring buffers
692 */
693 err = iwp_ring_init(sc);
694 if (err != DDI_SUCCESS) {
695 cmn_err(CE_WARN, "iwp_attach(): "
696 "failed to allocate and initialize ring\n");
697 goto attach_fail9;
698 }
699
700 sc->sc_hdr = (iwp_firmware_hdr_t *)iwp_fw_bin;
701
702 /*
703 * copy ucode to dma buffer
704 */
705 err = iwp_alloc_fw_dma(sc);
706 if (err != DDI_SUCCESS) {
707 cmn_err(CE_WARN, "iwp_attach(): "
708 "failed to allocate firmware dma\n");
709 goto attach_fail10;
710 }
711
712 /*
713 * Initialize the wifi part, which will be used by
714 * 802.11 module
715 */
716 ic = &sc->sc_ic;
717 ic->ic_phytype = IEEE80211_T_OFDM;
718 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
719 ic->ic_state = IEEE80211_S_INIT;
720 ic->ic_maxrssi = 100; /* experimental number */
721 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
722 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
723
724 /*
725 * Support WPA/WPA2
726 */
727 ic->ic_caps |= IEEE80211_C_WPA;
728
729 /*
730 * set supported .11b and .11g rates
731 */
732 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwp_rateset_11b;
733 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwp_rateset_11g;
734
735 /*
736 * set supported .11b and .11g channels (1 through 11)
737 */
738 for (i = 1; i <= 11; i++) {
739 ic->ic_sup_channels[i].ich_freq =
740 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
741 ic->ic_sup_channels[i].ich_flags =
742 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
743 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
744 IEEE80211_CHAN_PASSIVE;
745 }
746
747 ic->ic_ibss_chan = &ic->ic_sup_channels[0];
748 ic->ic_xmit = iwp_send;
749
750 /*
751 * attach to 802.11 module
752 */
753 ieee80211_attach(ic);
754
755 /*
756 * different instance has different WPA door
757 */
758 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
759 ddi_driver_name(dip),
760 ddi_get_instance(dip));
761
762 /*
763 * Overwrite 80211 default configurations.
764 */
765 iwp_overwrite_ic_default(sc);
766
767 /*
768 * initialize 802.11 module
769 */
770 ieee80211_media_init(ic);
771
772 /*
773 * initialize default tx key
774 */
775 ic->ic_def_txkey = 0;
776
777 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
778 iwp_rx_softintr, (caddr_t)sc);
779 if (err != DDI_SUCCESS) {
780 cmn_err(CE_WARN, "iwp_attach(): "
781 "add soft interrupt failed\n");
782 goto attach_fail12;
783 }
784
785 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwp_intr,
786 (caddr_t)sc, NULL);
787 if (err != DDI_SUCCESS) {
788 cmn_err(CE_WARN, "iwp_attach(): "
789 "ddi_intr_add_handle() failed\n");
790 goto attach_fail13;
791 }
792
793 err = ddi_intr_enable(sc->sc_intr_htable[0]);
794 if (err != DDI_SUCCESS) {
795 cmn_err(CE_WARN, "iwp_attach(): "
796 "ddi_intr_enable() failed\n");
797 goto attach_fail14;
798 }
799
800 /*
801 * Initialize pointer to device specific functions
802 */
803 wd.wd_secalloc = WIFI_SEC_NONE;
804 wd.wd_opmode = ic->ic_opmode;
805 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
806
807 /*
808 * create relation to GLD
809 */
810 macp = mac_alloc(MAC_VERSION);
811 if (NULL == macp) {
812 cmn_err(CE_WARN, "iwp_attach(): "
813 "failed to do mac_alloc()\n");
814 goto attach_fail15;
815 }
816
817 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
818 macp->m_driver = sc;
819 macp->m_dip = dip;
820 macp->m_src_addr = ic->ic_macaddr;
821 macp->m_callbacks = &iwp_m_callbacks;
822 macp->m_min_sdu = 0;
823 macp->m_max_sdu = IEEE80211_MTU;
824 macp->m_pdata = &wd;
825 macp->m_pdata_size = sizeof (wd);
826
827 /*
828 * Register the macp to mac
829 */
830 err = mac_register(macp, &ic->ic_mach);
831 mac_free(macp);
832 if (err != DDI_SUCCESS) {
833 cmn_err(CE_WARN, "iwp_attach(): "
834 "failed to do mac_register()\n");
835 goto attach_fail15;
836 }
837
838 /*
839 * Create minor node of type DDI_NT_NET_WIFI
840 */
841 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance);
842 err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
843 instance + 1, DDI_NT_NET_WIFI, 0);
844 if (err != DDI_SUCCESS) {
845 cmn_err(CE_WARN, "iwp_attach(): "
846 "failed to do ddi_create_minor_node()\n");
847 }
848
849 /*
850 * Notify link is down now
851 */
852 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
853
854 /*
855 * create the mf thread to handle the link status,
856 * recovery fatal error, etc.
857 */
858 sc->sc_mf_thread_switch = 1;
859 if (NULL == sc->sc_mf_thread) {
860 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
861 iwp_thread, sc, 0, &p0, TS_RUN, minclsyspri);
862 }
863
864 atomic_or_32(&sc->sc_flags, IWP_F_ATTACHED);
865
866 return (DDI_SUCCESS);
867
868 attach_fail15:
869 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
870 attach_fail14:
871 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
872 attach_fail13:
873 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
874 sc->sc_soft_hdl = NULL;
875 attach_fail12:
876 ieee80211_detach(ic);
877 attach_fail11:
878 iwp_free_fw_dma(sc);
879 attach_fail10:
880 iwp_ring_free(sc);
881 attach_fail9:
882 iwp_free_kw(sc);
883 attach_fail8:
884 iwp_free_shared(sc);
885 attach_fail7:
886 iwp_destroy_locks(sc);
887 attach_fail6:
888 (void) ddi_intr_free(sc->sc_intr_htable[0]);
889 attach_fail5:
890 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
891 attach_fail4:
892 ddi_regs_map_free(&sc->sc_handle);
893 attach_fail3:
894 ddi_regs_map_free(&sc->sc_cfg_handle);
895 attach_fail2:
896 ddi_soft_state_free(iwp_soft_state_p, instance);
897 attach_fail1:
898 return (DDI_FAILURE);
899 }
900
901 int
902 iwp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
903 {
904 iwp_sc_t *sc;
905 ieee80211com_t *ic;
906 int err;
907
908 sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
909 ASSERT(sc != NULL);
910 ic = &sc->sc_ic;
911
912 switch (cmd) {
913 case DDI_DETACH:
914 break;
915 case DDI_SUSPEND:
916 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
917 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
918
919 atomic_or_32(&sc->sc_flags, IWP_F_SUSPEND);
920
921 if (sc->sc_flags & IWP_F_RUNNING) {
922 iwp_stop(sc);
923 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
924
925 }
926
927 IWP_DBG((IWP_DEBUG_RESUME, "iwp_detach(): "
928 "suspend\n"));
929 return (DDI_SUCCESS);
930 default:
931 return (DDI_FAILURE);
932 }
933
934 if (!(sc->sc_flags & IWP_F_ATTACHED)) {
935 return (DDI_FAILURE);
936 }
937
938 /*
939 * Destroy the mf_thread
940 */
941 sc->sc_mf_thread_switch = 0;
942
943 mutex_enter(&sc->sc_mt_lock);
944 while (sc->sc_mf_thread != NULL) {
945 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) {
946 break;
947 }
948 }
949 mutex_exit(&sc->sc_mt_lock);
950
951 err = mac_disable(sc->sc_ic.ic_mach);
952 if (err != DDI_SUCCESS) {
953 return (err);
954 }
955
956 /*
957 * stop chipset
958 */
959 iwp_stop(sc);
960
961 DELAY(500000);
962
963 /*
964 * release buffer for calibration
965 */
966 iwp_release_calib_buffer(sc);
967
968 /*
969 * Unregiste from GLD
970 */
971 (void) mac_unregister(sc->sc_ic.ic_mach);
972
973 mutex_enter(&sc->sc_glock);
974 iwp_free_fw_dma(sc);
975 iwp_ring_free(sc);
976 iwp_free_kw(sc);
977 iwp_free_shared(sc);
978 mutex_exit(&sc->sc_glock);
979
980 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
981 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
982 (void) ddi_intr_free(sc->sc_intr_htable[0]);
983 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
984
985 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
986 sc->sc_soft_hdl = NULL;
987
988 /*
989 * detach from 80211 module
990 */
991 ieee80211_detach(&sc->sc_ic);
992
993 iwp_destroy_locks(sc);
994
995 ddi_regs_map_free(&sc->sc_handle);
996 ddi_regs_map_free(&sc->sc_cfg_handle);
997 ddi_remove_minor_node(dip, NULL);
998 ddi_soft_state_free(iwp_soft_state_p, ddi_get_instance(dip));
999
1000 return (DDI_SUCCESS);
1001 }
1002
1003 /*
1004 * destroy all locks
1005 */
1006 static void
1007 iwp_destroy_locks(iwp_sc_t *sc)
1008 {
1009 cv_destroy(&sc->sc_mt_cv);
1010 cv_destroy(&sc->sc_cmd_cv);
1011 cv_destroy(&sc->sc_put_seg_cv);
1012 cv_destroy(&sc->sc_ucode_cv);
1013 mutex_destroy(&sc->sc_mt_lock);
1014 mutex_destroy(&sc->sc_tx_lock);
1015 mutex_destroy(&sc->sc_glock);
1016 }
1017
1018 /*
1019 * Allocate an area of memory and a DMA handle for accessing it
1020 */
1021 static int
1022 iwp_alloc_dma_mem(iwp_sc_t *sc, size_t memsize,
1023 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
1024 uint_t dma_flags, iwp_dma_t *dma_p)
1025 {
1026 caddr_t vaddr;
1027 int err = DDI_FAILURE;
1028
1029 /*
1030 * Allocate handle
1031 */
1032 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
1033 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1034 if (err != DDI_SUCCESS) {
1035 dma_p->dma_hdl = NULL;
1036 return (DDI_FAILURE);
1037 }
1038
1039 /*
1040 * Allocate memory
1041 */
1042 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
1043 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1044 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1045 if (err != DDI_SUCCESS) {
1046 ddi_dma_free_handle(&dma_p->dma_hdl);
1047 dma_p->dma_hdl = NULL;
1048 dma_p->acc_hdl = NULL;
1049 return (DDI_FAILURE);
1050 }
1051
1052 /*
1053 * Bind the two together
1054 */
1055 dma_p->mem_va = vaddr;
1056 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1057 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1058 &dma_p->cookie, &dma_p->ncookies);
1059 if (err != DDI_DMA_MAPPED) {
1060 ddi_dma_mem_free(&dma_p->acc_hdl);
1061 ddi_dma_free_handle(&dma_p->dma_hdl);
1062 dma_p->acc_hdl = NULL;
1063 dma_p->dma_hdl = NULL;
1064 return (DDI_FAILURE);
1065 }
1066
1067 dma_p->nslots = ~0U;
1068 dma_p->size = ~0U;
1069 dma_p->token = ~0U;
1070 dma_p->offset = 0;
1071 return (DDI_SUCCESS);
1072 }
1073
1074 /*
1075 * Free one allocated area of DMAable memory
1076 */
1077 static void
1078 iwp_free_dma_mem(iwp_dma_t *dma_p)
1079 {
1080 if (dma_p->dma_hdl != NULL) {
1081 if (dma_p->ncookies) {
1082 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1083 dma_p->ncookies = 0;
1084 }
1085 ddi_dma_free_handle(&dma_p->dma_hdl);
1086 dma_p->dma_hdl = NULL;
1087 }
1088
1089 if (dma_p->acc_hdl != NULL) {
1090 ddi_dma_mem_free(&dma_p->acc_hdl);
1091 dma_p->acc_hdl = NULL;
1092 }
1093 }
1094
1095 /*
1096 * copy ucode into dma buffers
1097 */
1098 static int
1099 iwp_alloc_fw_dma(iwp_sc_t *sc)
1100 {
1101 int err = DDI_FAILURE;
1102 iwp_dma_t *dma_p;
1103 char *t;
1104
1105 /*
1106 * firmware image layout:
1107 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1108 */
1109
1110 /*
1111 * Check firmware image size.
1112 */
1113 if (LE_32(sc->sc_hdr->init_textsz) > RTC_INST_SIZE) {
1114 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1115 "firmware init text size 0x%x is too large\n",
1116 LE_32(sc->sc_hdr->init_textsz));
1117
1118 goto fail;
1119 }
1120
1121 if (LE_32(sc->sc_hdr->init_datasz) > RTC_DATA_SIZE) {
1122 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1123 "firmware init data size 0x%x is too large\n",
1124 LE_32(sc->sc_hdr->init_datasz));
1125
1126 goto fail;
1127 }
1128
1129 if (LE_32(sc->sc_hdr->textsz) > RTC_INST_SIZE) {
1130 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1131 "firmware text size 0x%x is too large\n",
1132 LE_32(sc->sc_hdr->textsz));
1133
1134 goto fail;
1135 }
1136
1137 if (LE_32(sc->sc_hdr->datasz) > RTC_DATA_SIZE) {
1138 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1139 "firmware data size 0x%x is too large\n",
1140 LE_32(sc->sc_hdr->datasz));
1141
1142 goto fail;
1143 }
1144
1145 /*
1146 * copy text of runtime ucode
1147 */
1148 t = (char *)(sc->sc_hdr + 1);
1149 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1150 &fw_dma_attr, &iwp_dma_accattr,
1151 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1152 &sc->sc_dma_fw_text);
1153 if (err != DDI_SUCCESS) {
1154 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1155 "failed to allocate text dma memory.\n");
1156 goto fail;
1157 }
1158
1159 dma_p = &sc->sc_dma_fw_text;
1160
1161 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1162 "text[ncookies:%d addr:%lx size:%lx]\n",
1163 dma_p->ncookies, dma_p->cookie.dmac_address,
1164 dma_p->cookie.dmac_size));
1165
1166 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1167
1168 /*
1169 * copy data and bak-data of runtime ucode
1170 */
1171 t += LE_32(sc->sc_hdr->textsz);
1172 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1173 &fw_dma_attr, &iwp_dma_accattr,
1174 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1175 &sc->sc_dma_fw_data);
1176 if (err != DDI_SUCCESS) {
1177 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1178 "failed to allocate data dma memory\n");
1179 goto fail;
1180 }
1181
1182 dma_p = &sc->sc_dma_fw_data;
1183
1184 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1185 "data[ncookies:%d addr:%lx size:%lx]\n",
1186 dma_p->ncookies, dma_p->cookie.dmac_address,
1187 dma_p->cookie.dmac_size));
1188
1189 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1190
1191 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1192 &fw_dma_attr, &iwp_dma_accattr,
1193 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1194 &sc->sc_dma_fw_data_bak);
1195 if (err != DDI_SUCCESS) {
1196 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1197 "failed to allocate data bakup dma memory\n");
1198 goto fail;
1199 }
1200
1201 dma_p = &sc->sc_dma_fw_data_bak;
1202
1203 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1204 "data_bak[ncookies:%d addr:%lx "
1205 "size:%lx]\n",
1206 dma_p->ncookies, dma_p->cookie.dmac_address,
1207 dma_p->cookie.dmac_size));
1208
1209 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1210
1211 /*
1212 * copy text of init ucode
1213 */
1214 t += LE_32(sc->sc_hdr->datasz);
1215 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1216 &fw_dma_attr, &iwp_dma_accattr,
1217 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1218 &sc->sc_dma_fw_init_text);
1219 if (err != DDI_SUCCESS) {
1220 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1221 "failed to allocate init text dma memory\n");
1222 goto fail;
1223 }
1224
1225 dma_p = &sc->sc_dma_fw_init_text;
1226
1227 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1228 "init_text[ncookies:%d addr:%lx "
1229 "size:%lx]\n",
1230 dma_p->ncookies, dma_p->cookie.dmac_address,
1231 dma_p->cookie.dmac_size));
1232
1233 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1234
1235 /*
1236 * copy data of init ucode
1237 */
1238 t += LE_32(sc->sc_hdr->init_textsz);
1239 err = iwp_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1240 &fw_dma_attr, &iwp_dma_accattr,
1241 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1242 &sc->sc_dma_fw_init_data);
1243 if (err != DDI_SUCCESS) {
1244 cmn_err(CE_WARN, "iwp_alloc_fw_dma(): "
1245 "failed to allocate init data dma memory\n");
1246 goto fail;
1247 }
1248
1249 dma_p = &sc->sc_dma_fw_init_data;
1250
1251 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_fw_dma(): "
1252 "init_data[ncookies:%d addr:%lx "
1253 "size:%lx]\n",
1254 dma_p->ncookies, dma_p->cookie.dmac_address,
1255 dma_p->cookie.dmac_size));
1256
1257 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1258
1259 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1260 fail:
1261 return (err);
1262 }
1263
1264 static void
1265 iwp_free_fw_dma(iwp_sc_t *sc)
1266 {
1267 iwp_free_dma_mem(&sc->sc_dma_fw_text);
1268 iwp_free_dma_mem(&sc->sc_dma_fw_data);
1269 iwp_free_dma_mem(&sc->sc_dma_fw_data_bak);
1270 iwp_free_dma_mem(&sc->sc_dma_fw_init_text);
1271 iwp_free_dma_mem(&sc->sc_dma_fw_init_data);
1272 }
1273
1274 /*
1275 * Allocate a shared buffer between host and NIC.
1276 */
1277 static int
1278 iwp_alloc_shared(iwp_sc_t *sc)
1279 {
1280 #ifdef DEBUG
1281 iwp_dma_t *dma_p;
1282 #endif
1283 int err = DDI_FAILURE;
1284
1285 /*
1286 * must be aligned on a 4K-page boundary
1287 */
1288 err = iwp_alloc_dma_mem(sc, sizeof (iwp_shared_t),
1289 &sh_dma_attr, &iwp_dma_descattr,
1290 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1291 &sc->sc_dma_sh);
1292 if (err != DDI_SUCCESS) {
1293 goto fail;
1294 }
1295
1296 sc->sc_shared = (iwp_shared_t *)sc->sc_dma_sh.mem_va;
1297
1298 #ifdef DEBUG
1299 dma_p = &sc->sc_dma_sh;
1300 #endif
1301 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_shared(): "
1302 "sh[ncookies:%d addr:%lx size:%lx]\n",
1303 dma_p->ncookies, dma_p->cookie.dmac_address,
1304 dma_p->cookie.dmac_size));
1305
1306 return (err);
1307 fail:
1308 iwp_free_shared(sc);
1309 return (err);
1310 }
1311
1312 static void
1313 iwp_free_shared(iwp_sc_t *sc)
1314 {
1315 iwp_free_dma_mem(&sc->sc_dma_sh);
1316 }
1317
1318 /*
1319 * Allocate a keep warm page.
1320 */
1321 static int
1322 iwp_alloc_kw(iwp_sc_t *sc)
1323 {
1324 #ifdef DEBUG
1325 iwp_dma_t *dma_p;
1326 #endif
1327 int err = DDI_FAILURE;
1328
1329 /*
1330 * must be aligned on a 4K-page boundary
1331 */
1332 err = iwp_alloc_dma_mem(sc, IWP_KW_SIZE,
1333 &kw_dma_attr, &iwp_dma_descattr,
1334 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1335 &sc->sc_dma_kw);
1336 if (err != DDI_SUCCESS) {
1337 goto fail;
1338 }
1339
1340 #ifdef DEBUG
1341 dma_p = &sc->sc_dma_kw;
1342 #endif
1343 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_kw(): "
1344 "kw[ncookies:%d addr:%lx size:%lx]\n",
1345 dma_p->ncookies, dma_p->cookie.dmac_address,
1346 dma_p->cookie.dmac_size));
1347
1348 return (err);
1349 fail:
1350 iwp_free_kw(sc);
1351 return (err);
1352 }
1353
1354 static void
1355 iwp_free_kw(iwp_sc_t *sc)
1356 {
1357 iwp_free_dma_mem(&sc->sc_dma_kw);
1358 }
1359
1360 /*
1361 * initialize RX ring buffers
1362 */
1363 static int
1364 iwp_alloc_rx_ring(iwp_sc_t *sc)
1365 {
1366 iwp_rx_ring_t *ring;
1367 iwp_rx_data_t *data;
1368 #ifdef DEBUG
1369 iwp_dma_t *dma_p;
1370 #endif
1371 int i, err = DDI_FAILURE;
1372
1373 ring = &sc->sc_rxq;
1374 ring->cur = 0;
1375
1376 /*
1377 * allocate RX description ring buffer
1378 */
1379 err = iwp_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1380 &ring_desc_dma_attr, &iwp_dma_descattr,
1381 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1382 &ring->dma_desc);
1383 if (err != DDI_SUCCESS) {
1384 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1385 "dma alloc rx ring desc "
1386 "failed\n"));
1387 goto fail;
1388 }
1389
1390 ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1391 #ifdef DEBUG
1392 dma_p = &ring->dma_desc;
1393 #endif
1394 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1395 "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1396 dma_p->ncookies, dma_p->cookie.dmac_address,
1397 dma_p->cookie.dmac_size));
1398
1399 /*
1400 * Allocate Rx frame buffers.
1401 */
1402 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1403 data = &ring->data[i];
1404 err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1405 &rx_buffer_dma_attr, &iwp_dma_accattr,
1406 DDI_DMA_READ | DDI_DMA_STREAMING,
1407 &data->dma_data);
1408 if (err != DDI_SUCCESS) {
1409 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1410 "dma alloc rx ring "
1411 "buf[%d] failed\n", i));
1412 goto fail;
1413 }
1414 /*
1415 * the physical address bit [8-36] are used,
1416 * instead of bit [0-31] in 3945.
1417 */
1418 ring->desc[i] = (uint32_t)
1419 (data->dma_data.cookie.dmac_address >> 8);
1420 }
1421
1422 #ifdef DEBUG
1423 dma_p = &ring->data[0].dma_data;
1424 #endif
1425 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_rx_ring(): "
1426 "rx buffer[0][ncookies:%d addr:%lx "
1427 "size:%lx]\n",
1428 dma_p->ncookies, dma_p->cookie.dmac_address,
1429 dma_p->cookie.dmac_size));
1430
1431 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1432
1433 return (err);
1434
1435 fail:
1436 iwp_free_rx_ring(sc);
1437 return (err);
1438 }
1439
1440 /*
1441 * disable RX ring
1442 */
1443 static void
1444 iwp_reset_rx_ring(iwp_sc_t *sc)
1445 {
1446 int n;
1447
1448 iwp_mac_access_enter(sc);
1449 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1450 for (n = 0; n < 2000; n++) {
1451 if (IWP_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) {
1452 break;
1453 }
1454 DELAY(1000);
1455 }
1456 #ifdef DEBUG
1457 if (2000 == n) {
1458 IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_rx_ring(): "
1459 "timeout resetting Rx ring\n"));
1460 }
1461 #endif
1462 iwp_mac_access_exit(sc);
1463
1464 sc->sc_rxq.cur = 0;
1465 }
1466
1467 static void
1468 iwp_free_rx_ring(iwp_sc_t *sc)
1469 {
1470 int i;
1471
1472 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1473 if (sc->sc_rxq.data[i].dma_data.dma_hdl) {
1474 IWP_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1475 DDI_DMA_SYNC_FORCPU);
1476 }
1477
1478 iwp_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1479 }
1480
1481 if (sc->sc_rxq.dma_desc.dma_hdl) {
1482 IWP_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1483 }
1484
1485 iwp_free_dma_mem(&sc->sc_rxq.dma_desc);
1486 }
1487
1488 /*
1489 * initialize TX ring buffers
1490 */
1491 static int
1492 iwp_alloc_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring,
1493 int slots, int qid)
1494 {
1495 iwp_tx_data_t *data;
1496 iwp_tx_desc_t *desc_h;
1497 uint32_t paddr_desc_h;
1498 iwp_cmd_t *cmd_h;
1499 uint32_t paddr_cmd_h;
1500 #ifdef DEBUG
1501 iwp_dma_t *dma_p;
1502 #endif
1503 int i, err = DDI_FAILURE;
1504 ring->qid = qid;
1505 ring->count = TFD_QUEUE_SIZE_MAX;
1506 ring->window = slots;
1507 ring->queued = 0;
1508 ring->cur = 0;
1509 ring->desc_cur = 0;
1510
1511 /*
1512 * allocate buffer for TX descriptor ring
1513 */
1514 err = iwp_alloc_dma_mem(sc,
1515 TFD_QUEUE_SIZE_MAX * sizeof (iwp_tx_desc_t),
1516 &ring_desc_dma_attr, &iwp_dma_descattr,
1517 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1518 &ring->dma_desc);
1519 if (err != DDI_SUCCESS) {
1520 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1521 "dma alloc tx ring desc[%d] "
1522 "failed\n", qid));
1523 goto fail;
1524 }
1525
1526 #ifdef DEBUG
1527 dma_p = &ring->dma_desc;
1528 #endif
1529 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1530 "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1531 dma_p->ncookies, dma_p->cookie.dmac_address,
1532 dma_p->cookie.dmac_size));
1533
1534 desc_h = (iwp_tx_desc_t *)ring->dma_desc.mem_va;
1535 paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1536
1537 /*
1538 * allocate buffer for ucode command
1539 */
1540 err = iwp_alloc_dma_mem(sc,
1541 TFD_QUEUE_SIZE_MAX * sizeof (iwp_cmd_t),
1542 &cmd_dma_attr, &iwp_dma_accattr,
1543 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1544 &ring->dma_cmd);
1545 if (err != DDI_SUCCESS) {
1546 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1547 "dma alloc tx ring cmd[%d]"
1548 " failed\n", qid));
1549 goto fail;
1550 }
1551
1552 #ifdef DEBUG
1553 dma_p = &ring->dma_cmd;
1554 #endif
1555 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1556 "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1557 dma_p->ncookies, dma_p->cookie.dmac_address,
1558 dma_p->cookie.dmac_size));
1559
1560 cmd_h = (iwp_cmd_t *)ring->dma_cmd.mem_va;
1561 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1562
1563 /*
1564 * Allocate Tx frame buffers.
1565 */
1566 ring->data = kmem_zalloc(sizeof (iwp_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1567 KM_NOSLEEP);
1568 if (NULL == ring->data) {
1569 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1570 "could not allocate "
1571 "tx data slots\n"));
1572 goto fail;
1573 }
1574
1575 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1576 data = &ring->data[i];
1577 err = iwp_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1578 &tx_buffer_dma_attr, &iwp_dma_accattr,
1579 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1580 &data->dma_data);
1581 if (err != DDI_SUCCESS) {
1582 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1583 "dma alloc tx "
1584 "ring buf[%d] failed\n", i));
1585 goto fail;
1586 }
1587
1588 data->desc = desc_h + i;
1589 data->paddr_desc = paddr_desc_h +
1590 _PTRDIFF(data->desc, desc_h);
1591 data->cmd = cmd_h + i;
1592 data->paddr_cmd = paddr_cmd_h +
1593 _PTRDIFF(data->cmd, cmd_h);
1594 }
1595 #ifdef DEBUG
1596 dma_p = &ring->data[0].dma_data;
1597 #endif
1598 IWP_DBG((IWP_DEBUG_DMA, "iwp_alloc_tx_ring(): "
1599 "tx buffer[0][ncookies:%d addr:%lx "
1600 "size:%lx]\n",
1601 dma_p->ncookies, dma_p->cookie.dmac_address,
1602 dma_p->cookie.dmac_size));
1603
1604 return (err);
1605
1606 fail:
1607 iwp_free_tx_ring(ring);
1608
1609 return (err);
1610 }
1611
1612 /*
1613 * disable TX ring
1614 */
1615 static void
1616 iwp_reset_tx_ring(iwp_sc_t *sc, iwp_tx_ring_t *ring)
1617 {
1618 iwp_tx_data_t *data;
1619 int i, n;
1620
1621 iwp_mac_access_enter(sc);
1622
1623 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1624 for (n = 0; n < 200; n++) {
1625 if (IWP_READ(sc, IWP_FH_TSSR_TX_STATUS_REG) &
1626 IWP_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) {
1627 break;
1628 }
1629 DELAY(10);
1630 }
1631
1632 #ifdef DEBUG
1633 if (200 == n) {
1634 IWP_DBG((IWP_DEBUG_DMA, "iwp_reset_tx_ring(): "
1635 "timeout reset tx ring %d\n",
1636 ring->qid));
1637 }
1638 #endif
1639
1640 iwp_mac_access_exit(sc);
1641
1642 /* by pass, if it's quiesce */
1643 if (!(sc->sc_flags & IWP_F_QUIESCED)) {
1644 for (i = 0; i < ring->count; i++) {
1645 data = &ring->data[i];
1646 IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1647 }
1648 }
1649
1650 ring->queued = 0;
1651 ring->cur = 0;
1652 ring->desc_cur = 0;
1653 }
1654
1655 static void
1656 iwp_free_tx_ring(iwp_tx_ring_t *ring)
1657 {
1658 int i;
1659
1660 if (ring->dma_desc.dma_hdl != NULL) {
1661 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1662 }
1663 iwp_free_dma_mem(&ring->dma_desc);
1664
1665 if (ring->dma_cmd.dma_hdl != NULL) {
1666 IWP_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1667 }
1668 iwp_free_dma_mem(&ring->dma_cmd);
1669
1670 if (ring->data != NULL) {
1671 for (i = 0; i < ring->count; i++) {
1672 if (ring->data[i].dma_data.dma_hdl) {
1673 IWP_DMA_SYNC(ring->data[i].dma_data,
1674 DDI_DMA_SYNC_FORDEV);
1675 }
1676 iwp_free_dma_mem(&ring->data[i].dma_data);
1677 }
1678 kmem_free(ring->data, ring->count * sizeof (iwp_tx_data_t));
1679 }
1680 }
1681
1682 /*
1683 * initialize TX and RX ring
1684 */
1685 static int
1686 iwp_ring_init(iwp_sc_t *sc)
1687 {
1688 int i, err = DDI_FAILURE;
1689
1690 for (i = 0; i < IWP_NUM_QUEUES; i++) {
1691 if (IWP_CMD_QUEUE_NUM == i) {
1692 continue;
1693 }
1694
1695 err = iwp_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1696 i);
1697 if (err != DDI_SUCCESS) {
1698 goto fail;
1699 }
1700 }
1701
1702 /*
1703 * initialize command queue
1704 */
1705 err = iwp_alloc_tx_ring(sc, &sc->sc_txq[IWP_CMD_QUEUE_NUM],
1706 TFD_CMD_SLOTS, IWP_CMD_QUEUE_NUM);
1707 if (err != DDI_SUCCESS) {
1708 goto fail;
1709 }
1710
1711 err = iwp_alloc_rx_ring(sc);
1712 if (err != DDI_SUCCESS) {
1713 goto fail;
1714 }
1715
1716 fail:
1717 return (err);
1718 }
1719
1720 static void
1721 iwp_ring_free(iwp_sc_t *sc)
1722 {
1723 int i = IWP_NUM_QUEUES;
1724
1725 iwp_free_rx_ring(sc);
1726 while (--i >= 0) {
1727 iwp_free_tx_ring(&sc->sc_txq[i]);
1728 }
1729 }
1730
1731 /* ARGSUSED */
1732 static ieee80211_node_t *
1733 iwp_node_alloc(ieee80211com_t *ic)
1734 {
1735 iwp_amrr_t *amrr;
1736
1737 amrr = kmem_zalloc(sizeof (iwp_amrr_t), KM_SLEEP);
1738 if (NULL == amrr) {
1739 cmn_err(CE_WARN, "iwp_node_alloc(): "
1740 "failed to allocate memory for amrr structure\n");
1741 return (NULL);
1742 }
1743
1744 iwp_amrr_init(amrr);
1745
1746 return (&amrr->in);
1747 }
1748
1749 static void
1750 iwp_node_free(ieee80211_node_t *in)
1751 {
1752 ieee80211com_t *ic;
1753
1754 if ((NULL == in) ||
1755 (NULL == in->in_ic)) {
1756 cmn_err(CE_WARN, "iwp_node_free() "
1757 "Got a NULL point from Net80211 module\n");
1758 return;
1759 }
1760 ic = in->in_ic;
1761
1762 if (ic->ic_node_cleanup != NULL) {
1763 ic->ic_node_cleanup(in);
1764 }
1765
1766 if (in->in_wpa_ie != NULL) {
1767 ieee80211_free(in->in_wpa_ie);
1768 }
1769
1770 if (in->in_wme_ie != NULL) {
1771 ieee80211_free(in->in_wme_ie);
1772 }
1773
1774 if (in->in_htcap_ie != NULL) {
1775 ieee80211_free(in->in_htcap_ie);
1776 }
1777
1778 kmem_free(in, sizeof (iwp_amrr_t));
1779 }
1780
1781
1782 /*
1783 * change station's state. this function will be invoked by 80211 module
1784 * when need to change staton's state.
1785 */
1786 static int
1787 iwp_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1788 {
1789 iwp_sc_t *sc;
1790 ieee80211_node_t *in;
1791 enum ieee80211_state ostate;
1792 iwp_add_sta_t node;
1793 int i, err = IWP_FAIL;
1794
1795 if (NULL == ic) {
1796 return (err);
1797 }
1798 sc = (iwp_sc_t *)ic;
1799 in = ic->ic_bss;
1800 ostate = ic->ic_state;
1801
1802 mutex_enter(&sc->sc_glock);
1803
1804 switch (nstate) {
1805 case IEEE80211_S_SCAN:
1806 switch (ostate) {
1807 case IEEE80211_S_INIT:
1808 atomic_or_32(&sc->sc_flags, IWP_F_SCANNING);
1809 iwp_set_led(sc, 2, 10, 2);
1810
1811 /*
1812 * clear association to receive beacons from
1813 * all BSS'es
1814 */
1815 sc->sc_config.assoc_id = 0;
1816 sc->sc_config.filter_flags &=
1817 ~LE_32(RXON_FILTER_ASSOC_MSK);
1818
1819 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1820 "config chan %d "
1821 "flags %x filter_flags %x\n",
1822 LE_16(sc->sc_config.chan),
1823 LE_32(sc->sc_config.flags),
1824 LE_32(sc->sc_config.filter_flags)));
1825
1826 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
1827 sizeof (iwp_rxon_cmd_t), 1);
1828 if (err != IWP_SUCCESS) {
1829 cmn_err(CE_WARN, "iwp_newstate(): "
1830 "could not clear association\n");
1831 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1832 mutex_exit(&sc->sc_glock);
1833 return (err);
1834 }
1835
1836 /* add broadcast node to send probe request */
1837 (void) memset(&node, 0, sizeof (node));
1838 (void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN);
1839 node.sta.sta_id = IWP_BROADCAST_ID;
1840 err = iwp_cmd(sc, REPLY_ADD_STA, &node,
1841 sizeof (node), 1);
1842 if (err != IWP_SUCCESS) {
1843 cmn_err(CE_WARN, "iwp_newstate(): "
1844 "could not add broadcast node\n");
1845 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1846 mutex_exit(&sc->sc_glock);
1847 return (err);
1848 }
1849 break;
1850 case IEEE80211_S_SCAN:
1851 mutex_exit(&sc->sc_glock);
1852 /* step to next channel before actual FW scan */
1853 err = sc->sc_newstate(ic, nstate, arg);
1854 mutex_enter(&sc->sc_glock);
1855 if ((err != 0) || ((err = iwp_scan(sc)) != 0)) {
1856 cmn_err(CE_WARN, "iwp_newstate(): "
1857 "could not initiate scan\n");
1858 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1859 ieee80211_cancel_scan(ic);
1860 }
1861 mutex_exit(&sc->sc_glock);
1862 return (err);
1863 default:
1864 break;
1865 }
1866 sc->sc_clk = 0;
1867 break;
1868
1869 case IEEE80211_S_AUTH:
1870 if (ostate == IEEE80211_S_SCAN) {
1871 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1872 }
1873
1874 /*
1875 * reset state to handle reassociations correctly
1876 */
1877 sc->sc_config.assoc_id = 0;
1878 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1879
1880 /*
1881 * before sending authentication and association request frame,
1882 * we need do something in the hardware, such as setting the
1883 * channel same to the target AP...
1884 */
1885 if ((err = iwp_hw_set_before_auth(sc)) != 0) {
1886 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1887 "could not send authentication request\n"));
1888 mutex_exit(&sc->sc_glock);
1889 return (err);
1890 }
1891 break;
1892
1893 case IEEE80211_S_RUN:
1894 if (ostate == IEEE80211_S_SCAN) {
1895 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1896 }
1897
1898 if (IEEE80211_M_MONITOR == ic->ic_opmode) {
1899 /* let LED blink when monitoring */
1900 iwp_set_led(sc, 2, 10, 10);
1901 break;
1902 }
1903
1904 IWP_DBG((IWP_DEBUG_80211, "iwp_newstate(): "
1905 "associated.\n"));
1906
1907 err = iwp_run_state_config(sc);
1908 if (err != IWP_SUCCESS) {
1909 cmn_err(CE_WARN, "iwp_newstate(): "
1910 "failed to set up association\n");
1911 mutex_exit(&sc->sc_glock);
1912 return (err);
1913 }
1914
1915 /*
1916 * start automatic rate control
1917 */
1918 if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) {
1919 atomic_or_32(&sc->sc_flags, IWP_F_RATE_AUTO_CTL);
1920
1921 /*
1922 * set rate to some reasonable initial value
1923 */
1924 i = in->in_rates.ir_nrates - 1;
1925 while (i > 0 && IEEE80211_RATE(i) > 72) {
1926 i--;
1927 }
1928 in->in_txrate = i;
1929
1930 } else {
1931 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
1932 }
1933
1934 /*
1935 * set LED on after associated
1936 */
1937 iwp_set_led(sc, 2, 0, 1);
1938 break;
1939
1940 case IEEE80211_S_INIT:
1941 if (ostate == IEEE80211_S_SCAN) {
1942 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1943 }
1944 /*
1945 * set LED off after init
1946 */
1947 iwp_set_led(sc, 2, 1, 0);
1948 break;
1949
1950 case IEEE80211_S_ASSOC:
1951 if (ostate == IEEE80211_S_SCAN) {
1952 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
1953 }
1954 break;
1955 }
1956
1957 mutex_exit(&sc->sc_glock);
1958
1959 return (sc->sc_newstate(ic, nstate, arg));
1960 }
1961
1962 /*
1963 * exclusive access to mac begin.
1964 */
1965 static void
1966 iwp_mac_access_enter(iwp_sc_t *sc)
1967 {
1968 uint32_t tmp;
1969 int n;
1970
1971 tmp = IWP_READ(sc, CSR_GP_CNTRL);
1972 IWP_WRITE(sc, CSR_GP_CNTRL,
1973 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1974
1975 /* wait until we succeed */
1976 for (n = 0; n < 1000; n++) {
1977 if ((IWP_READ(sc, CSR_GP_CNTRL) &
1978 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1979 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1980 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) {
1981 break;
1982 }
1983 DELAY(10);
1984 }
1985
1986 #ifdef DEBUG
1987 if (1000 == n) {
1988 IWP_DBG((IWP_DEBUG_PIO, "iwp_mac_access_enter(): "
1989 "could not lock memory\n"));
1990 }
1991 #endif
1992 }
1993
1994 /*
1995 * exclusive access to mac end.
1996 */
1997 static void
1998 iwp_mac_access_exit(iwp_sc_t *sc)
1999 {
2000 uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2001 IWP_WRITE(sc, CSR_GP_CNTRL,
2002 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2003 }
2004
2005 /*
2006 * this function defined here for future use.
2007 * static uint32_t
2008 * iwp_mem_read(iwp_sc_t *sc, uint32_t addr)
2009 * {
2010 * IWP_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
2011 * return (IWP_READ(sc, HBUS_TARG_MEM_RDAT));
2012 * }
2013 */
2014
2015 /*
2016 * write mac memory
2017 */
2018 static void
2019 iwp_mem_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2020 {
2021 IWP_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
2022 IWP_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
2023 }
2024
2025 /*
2026 * read mac register
2027 */
2028 static uint32_t
2029 iwp_reg_read(iwp_sc_t *sc, uint32_t addr)
2030 {
2031 IWP_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
2032 return (IWP_READ(sc, HBUS_TARG_PRPH_RDAT));
2033 }
2034
2035 /*
2036 * write mac register
2037 */
2038 static void
2039 iwp_reg_write(iwp_sc_t *sc, uint32_t addr, uint32_t data)
2040 {
2041 IWP_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2042 IWP_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2043 }
2044
2045
2046 /*
2047 * steps of loading ucode:
2048 * load init ucode=>init alive=>calibrate=>
2049 * receive calibration result=>reinitialize NIC=>
2050 * load runtime ucode=>runtime alive=>
2051 * send calibration result=>running.
2052 */
2053 static int
2054 iwp_load_init_firmware(iwp_sc_t *sc)
2055 {
2056 int err = IWP_FAIL;
2057 clock_t clk;
2058
2059 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2060
2061 /*
2062 * load init_text section of uCode to hardware
2063 */
2064 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address,
2065 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size);
2066 if (err != IWP_SUCCESS) {
2067 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2068 "failed to write init uCode.\n");
2069 return (err);
2070 }
2071
2072 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2073
2074 /* wait loading init_text until completed or timeout */
2075 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2076 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2077 break;
2078 }
2079 }
2080
2081 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2082 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2083 "timeout waiting for init uCode load.\n");
2084 return (IWP_FAIL);
2085 }
2086
2087 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2088
2089 /*
2090 * load init_data section of uCode to hardware
2091 */
2092 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address,
2093 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size);
2094 if (err != IWP_SUCCESS) {
2095 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2096 "failed to write init_data uCode.\n");
2097 return (err);
2098 }
2099
2100 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2101
2102 /*
2103 * wait loading init_data until completed or timeout
2104 */
2105 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2106 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2107 break;
2108 }
2109 }
2110
2111 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2112 cmn_err(CE_WARN, "iwp_load_init_firmware(): "
2113 "timeout waiting for init_data uCode load.\n");
2114 return (IWP_FAIL);
2115 }
2116
2117 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2118
2119 return (err);
2120 }
2121
2122 static int
2123 iwp_load_run_firmware(iwp_sc_t *sc)
2124 {
2125 int err = IWP_FAIL;
2126 clock_t clk;
2127
2128 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2129
2130 /*
2131 * load init_text section of uCode to hardware
2132 */
2133 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address,
2134 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size);
2135 if (err != IWP_SUCCESS) {
2136 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2137 "failed to write run uCode.\n");
2138 return (err);
2139 }
2140
2141 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2142
2143 /* wait loading run_text until completed or timeout */
2144 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2145 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2146 break;
2147 }
2148 }
2149
2150 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2151 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2152 "timeout waiting for run uCode load.\n");
2153 return (IWP_FAIL);
2154 }
2155
2156 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2157
2158 /*
2159 * load run_data section of uCode to hardware
2160 */
2161 err = iwp_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address,
2162 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size);
2163 if (err != IWP_SUCCESS) {
2164 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2165 "failed to write run_data uCode.\n");
2166 return (err);
2167 }
2168
2169 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2170
2171 /*
2172 * wait loading run_data until completed or timeout
2173 */
2174 while (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2175 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2176 break;
2177 }
2178 }
2179
2180 if (!(sc->sc_flags & IWP_F_PUT_SEG)) {
2181 cmn_err(CE_WARN, "iwp_load_run_firmware(): "
2182 "timeout waiting for run_data uCode load.\n");
2183 return (IWP_FAIL);
2184 }
2185
2186 atomic_and_32(&sc->sc_flags, ~IWP_F_PUT_SEG);
2187
2188 return (err);
2189 }
2190
2191 /*
2192 * this function will be invoked to receive phy information
2193 * when a frame is received.
2194 */
2195 static void
2196 iwp_rx_phy_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2197 {
2198
2199 sc->sc_rx_phy_res.flag = 1;
2200
2201 (void) memcpy(sc->sc_rx_phy_res.buf, (uint8_t *)(desc + 1),
2202 sizeof (iwp_rx_phy_res_t));
2203 }
2204
2205 /*
2206 * this function will be invoked to receive body of frame when
2207 * a frame is received.
2208 */
2209 static void
2210 iwp_rx_mpdu_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2211 {
2212 ieee80211com_t *ic = &sc->sc_ic;
2213 #ifdef DEBUG
2214 iwp_rx_ring_t *ring = &sc->sc_rxq;
2215 #endif
2216 struct ieee80211_frame *wh;
2217 struct iwp_rx_non_cfg_phy *phyinfo;
2218 struct iwp_rx_mpdu_body_size *mpdu_size;
2219
2220 mblk_t *mp;
2221 int16_t t;
2222 uint16_t len, rssi, agc;
2223 uint32_t temp, crc, *tail;
2224 uint32_t arssi, brssi, crssi, mrssi;
2225 iwp_rx_phy_res_t *stat;
2226 ieee80211_node_t *in;
2227
2228 /*
2229 * assuming not 11n here. cope with 11n in phase-II
2230 */
2231 mpdu_size = (struct iwp_rx_mpdu_body_size *)(desc + 1);
2232 stat = (iwp_rx_phy_res_t *)sc->sc_rx_phy_res.buf;
2233 if (stat->cfg_phy_cnt > 20) {
2234 return;
2235 }
2236
2237 phyinfo = (struct iwp_rx_non_cfg_phy *)stat->non_cfg_phy;
2238 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_AGC_IDX]);
2239 agc = (temp & IWP_OFDM_AGC_MSK) >> IWP_OFDM_AGC_BIT_POS;
2240
2241 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_AB_IDX]);
2242 arssi = (temp & IWP_OFDM_RSSI_A_MSK) >> IWP_OFDM_RSSI_A_BIT_POS;
2243 brssi = (temp & IWP_OFDM_RSSI_B_MSK) >> IWP_OFDM_RSSI_B_BIT_POS;
2244
2245 temp = LE_32(phyinfo->non_cfg_phy[IWP_RX_RES_RSSI_C_IDX]);
2246 crssi = (temp & IWP_OFDM_RSSI_C_MSK) >> IWP_OFDM_RSSI_C_BIT_POS;
2247
2248 mrssi = MAX(arssi, brssi);
2249 mrssi = MAX(mrssi, crssi);
2250
2251 t = mrssi - agc - IWP_RSSI_OFFSET;
2252 /*
2253 * convert dBm to percentage
2254 */
2255 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
2256 / (75 * 75);
2257 if (rssi > 100) {
2258 rssi = 100;
2259 }
2260 if (rssi < 1) {
2261 rssi = 1;
2262 }
2263
2264 /*
2265 * size of frame, not include FCS
2266 */
2267 len = LE_16(mpdu_size->byte_count);
2268 tail = (uint32_t *)((uint8_t *)(desc + 1) +
2269 sizeof (struct iwp_rx_mpdu_body_size) + len);
2270 bcopy(tail, &crc, 4);
2271
2272 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2273 "rx intr: idx=%d phy_len=%x len=%d "
2274 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2275 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2276 len, stat->rate.r.s.rate, stat->channel,
2277 LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2278 stat->cfg_phy_cnt, LE_32(crc)));
2279
2280 if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2281 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2282 "rx frame oversize\n"));
2283 return;
2284 }
2285
2286 /*
2287 * discard Rx frames with bad CRC
2288 */
2289 if ((LE_32(crc) &
2290 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2291 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2292 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2293 "rx crc error tail: %x\n",
2294 LE_32(crc)));
2295 sc->sc_rx_err++;
2296 return;
2297 }
2298
2299 wh = (struct ieee80211_frame *)
2300 ((uint8_t *)(desc + 1)+ sizeof (struct iwp_rx_mpdu_body_size));
2301
2302 if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) {
2303 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2304 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2305 "rx : association id = %x\n",
2306 sc->sc_assoc_id));
2307 }
2308
2309 #ifdef DEBUG
2310 if (iwp_dbg_flags & IWP_DEBUG_RX) {
2311 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2312 }
2313 #endif
2314
2315 in = ieee80211_find_rxnode(ic, wh);
2316 mp = allocb(len, BPRI_MED);
2317 if (mp) {
2318 (void) memcpy(mp->b_wptr, wh, len);
2319 mp->b_wptr += len;
2320
2321 /*
2322 * send the frame to the 802.11 layer
2323 */
2324 (void) ieee80211_input(ic, mp, in, rssi, 0);
2325 } else {
2326 sc->sc_rx_nobuf++;
2327 IWP_DBG((IWP_DEBUG_RX, "iwp_rx_mpdu_intr(): "
2328 "alloc rx buf failed\n"));
2329 }
2330
2331 /*
2332 * release node reference
2333 */
2334 ieee80211_free_node(in);
2335 }
2336
2337 /*
2338 * process correlative affairs after a frame is sent.
2339 */
2340 static void
2341 iwp_tx_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2342 {
2343 ieee80211com_t *ic = &sc->sc_ic;
2344 iwp_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2345 iwp_tx_stat_t *stat = (iwp_tx_stat_t *)(desc + 1);
2346 iwp_amrr_t *amrr;
2347
2348 if (NULL == ic->ic_bss) {
2349 return;
2350 }
2351
2352 amrr = (iwp_amrr_t *)ic->ic_bss;
2353
2354 amrr->txcnt++;
2355 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_tx_intr(): "
2356 "tx: %d cnt\n", amrr->txcnt));
2357
2358 if (stat->ntries > 0) {
2359 amrr->retrycnt++;
2360 sc->sc_tx_retries++;
2361 IWP_DBG((IWP_DEBUG_TX, "iwp_tx_intr(): "
2362 "tx: %d retries\n",
2363 sc->sc_tx_retries));
2364 }
2365
2366 mutex_enter(&sc->sc_mt_lock);
2367 sc->sc_tx_timer = 0;
2368 mutex_exit(&sc->sc_mt_lock);
2369
2370 mutex_enter(&sc->sc_tx_lock);
2371
2372 ring->queued--;
2373 if (ring->queued < 0) {
2374 ring->queued = 0;
2375 }
2376
2377 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) {
2378 sc->sc_need_reschedule = 0;
2379 mutex_exit(&sc->sc_tx_lock);
2380 mac_tx_update(ic->ic_mach);
2381 mutex_enter(&sc->sc_tx_lock);
2382 }
2383
2384 mutex_exit(&sc->sc_tx_lock);
2385 }
2386
2387 /*
2388 * inform a given command has been executed
2389 */
2390 static void
2391 iwp_cmd_intr(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2392 {
2393 if ((desc->hdr.qid & 7) != 4) {
2394 return;
2395 }
2396
2397 if (sc->sc_cmd_accum > 0) {
2398 sc->sc_cmd_accum--;
2399 return;
2400 }
2401
2402 mutex_enter(&sc->sc_glock);
2403
2404 sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2405
2406 cv_signal(&sc->sc_cmd_cv);
2407
2408 mutex_exit(&sc->sc_glock);
2409
2410 IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd_intr(): "
2411 "qid=%x idx=%d flags=%x type=0x%x\n",
2412 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2413 desc->hdr.type));
2414 }
2415
2416 /*
2417 * this function will be invoked when alive notification occur.
2418 */
2419 static void
2420 iwp_ucode_alive(iwp_sc_t *sc, iwp_rx_desc_t *desc)
2421 {
2422 uint32_t rv;
2423 struct iwp_calib_cfg_cmd cmd;
2424 struct iwp_alive_resp *ar =
2425 (struct iwp_alive_resp *)(desc + 1);
2426 struct iwp_calib_results *res_p = &sc->sc_calib_results;
2427
2428 /*
2429 * the microcontroller is ready
2430 */
2431 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2432 "microcode alive notification minor: %x major: %x type: "
2433 "%x subtype: %x\n",
2434 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2435
2436 #ifdef DEBUG
2437 if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2438 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2439 "microcontroller initialization failed\n"));
2440 }
2441 #endif
2442
2443 /*
2444 * determine if init alive or runtime alive.
2445 */
2446 if (INITIALIZE_SUBTYPE == ar->ver_subtype) {
2447 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2448 "initialization alive received.\n"));
2449
2450 (void) memcpy(&sc->sc_card_alive_init, ar,
2451 sizeof (struct iwp_init_alive_resp));
2452
2453 /*
2454 * necessary configuration to NIC
2455 */
2456 mutex_enter(&sc->sc_glock);
2457
2458 rv = iwp_alive_common(sc);
2459 if (rv != IWP_SUCCESS) {
2460 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2461 "common alive process failed in init alive.\n");
2462 mutex_exit(&sc->sc_glock);
2463 return;
2464 }
2465
2466 (void) memset(&cmd, 0, sizeof (cmd));
2467
2468 cmd.ucd_calib_cfg.once.is_enable = IWP_CALIB_INIT_CFG_ALL;
2469 cmd.ucd_calib_cfg.once.start = IWP_CALIB_INIT_CFG_ALL;
2470 cmd.ucd_calib_cfg.once.send_res = IWP_CALIB_INIT_CFG_ALL;
2471 cmd.ucd_calib_cfg.flags = IWP_CALIB_INIT_CFG_ALL;
2472
2473 /*
2474 * require ucode execute calibration
2475 */
2476 rv = iwp_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1);
2477 if (rv != IWP_SUCCESS) {
2478 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2479 "failed to send calibration configure command.\n");
2480 mutex_exit(&sc->sc_glock);
2481 return;
2482 }
2483
2484 mutex_exit(&sc->sc_glock);
2485
2486 } else { /* runtime alive */
2487
2488 IWP_DBG((IWP_DEBUG_FW, "iwp_ucode_alive(): "
2489 "runtime alive received.\n"));
2490
2491 (void) memcpy(&sc->sc_card_alive_run, ar,
2492 sizeof (struct iwp_alive_resp));
2493
2494 mutex_enter(&sc->sc_glock);
2495
2496 /*
2497 * necessary configuration to NIC
2498 */
2499 rv = iwp_alive_common(sc);
2500 if (rv != IWP_SUCCESS) {
2501 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2502 "common alive process failed in run alive.\n");
2503 mutex_exit(&sc->sc_glock);
2504 return;
2505 }
2506
2507 /*
2508 * send the result of local oscilator calibration to uCode.
2509 */
2510 if (res_p->lo_res != NULL) {
2511 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2512 res_p->lo_res, res_p->lo_res_len, 1);
2513 if (rv != IWP_SUCCESS) {
2514 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2515 "failed to send local"
2516 "oscilator calibration command.\n");
2517 mutex_exit(&sc->sc_glock);
2518 return;
2519 }
2520
2521 DELAY(1000);
2522 }
2523
2524 /*
2525 * send the result of TX IQ calibration to uCode.
2526 */
2527 if (res_p->tx_iq_res != NULL) {
2528 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2529 res_p->tx_iq_res, res_p->tx_iq_res_len, 1);
2530 if (rv != IWP_SUCCESS) {
2531 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2532 "failed to send TX IQ"
2533 "calibration command.\n");
2534 mutex_exit(&sc->sc_glock);
2535 return;
2536 }
2537
2538 DELAY(1000);
2539 }
2540
2541 /*
2542 * send the result of TX IQ perd calibration to uCode.
2543 */
2544 if (res_p->tx_iq_perd_res != NULL) {
2545 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2546 res_p->tx_iq_perd_res,
2547 res_p->tx_iq_perd_res_len, 1);
2548 if (rv != IWP_SUCCESS) {
2549 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2550 "failed to send TX IQ perd"
2551 "calibration command.\n");
2552 mutex_exit(&sc->sc_glock);
2553 return;
2554 }
2555
2556 DELAY(1000);
2557 }
2558
2559 /*
2560 * send the result of Base Band calibration to uCode.
2561 */
2562 if (res_p->base_band_res != NULL) {
2563 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2564 res_p->base_band_res,
2565 res_p->base_band_res_len, 1);
2566 if (rv != IWP_SUCCESS) {
2567 cmn_err(CE_WARN, "iwp_ucode_alive(): "
2568 "failed to send Base Band"
2569 "calibration command.\n");
2570 mutex_exit(&sc->sc_glock);
2571 return;
2572 }
2573
2574 DELAY(1000);
2575 }
2576
2577 atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2578 cv_signal(&sc->sc_ucode_cv);
2579
2580 mutex_exit(&sc->sc_glock);
2581 }
2582
2583 }
2584
2585 /*
2586 * deal with receiving frames, command response
2587 * and all notifications from ucode.
2588 */
2589 /* ARGSUSED */
2590 static uint_t
2591 iwp_rx_softintr(caddr_t arg, caddr_t unused)
2592 {
2593 iwp_sc_t *sc;
2594 ieee80211com_t *ic;
2595 iwp_rx_desc_t *desc;
2596 iwp_rx_data_t *data;
2597 uint32_t index;
2598
2599 if (NULL == arg) {
2600 return (DDI_INTR_UNCLAIMED);
2601 }
2602 sc = (iwp_sc_t *)arg;
2603 ic = &sc->sc_ic;
2604
2605 /*
2606 * firmware has moved the index of the rx queue, driver get it,
2607 * and deal with it.
2608 */
2609 index = (sc->sc_shared->val0) & 0xfff;
2610
2611 while (sc->sc_rxq.cur != index) {
2612 data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2613 desc = (iwp_rx_desc_t *)data->dma_data.mem_va;
2614
2615 IWP_DBG((IWP_DEBUG_INTR, "iwp_rx_softintr(): "
2616 "rx notification index = %d"
2617 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2618 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2619 desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2620
2621 /*
2622 * a command other than a tx need to be replied
2623 */
2624 if (!(desc->hdr.qid & 0x80) &&
2625 (desc->hdr.type != REPLY_SCAN_CMD) &&
2626 (desc->hdr.type != REPLY_TX)) {
2627 iwp_cmd_intr(sc, desc);
2628 }
2629
2630 switch (desc->hdr.type) {
2631 case REPLY_RX_PHY_CMD:
2632 iwp_rx_phy_intr(sc, desc);
2633 break;
2634
2635 case REPLY_RX_MPDU_CMD:
2636 iwp_rx_mpdu_intr(sc, desc);
2637 break;
2638
2639 case REPLY_TX:
2640 iwp_tx_intr(sc, desc);
2641 break;
2642
2643 case REPLY_ALIVE:
2644 iwp_ucode_alive(sc, desc);
2645 break;
2646
2647 case CARD_STATE_NOTIFICATION:
2648 {
2649 uint32_t *status = (uint32_t *)(desc + 1);
2650
2651 IWP_DBG((IWP_DEBUG_RADIO, "iwp_rx_softintr(): "
2652 "state changed to %x\n",
2653 LE_32(*status)));
2654
2655 if (LE_32(*status) & 1) {
2656 /*
2657 * the radio button has to be pushed(OFF). It
2658 * is considered as a hw error, the
2659 * iwp_thread() tries to recover it after the
2660 * button is pushed again(ON)
2661 */
2662 cmn_err(CE_NOTE, "iwp_rx_softintr(): "
2663 "radio transmitter is off\n");
2664 sc->sc_ostate = sc->sc_ic.ic_state;
2665 ieee80211_new_state(&sc->sc_ic,
2666 IEEE80211_S_INIT, -1);
2667 atomic_or_32(&sc->sc_flags,
2668 IWP_F_HW_ERR_RECOVER | IWP_F_RADIO_OFF);
2669 }
2670
2671 break;
2672 }
2673
2674 case SCAN_START_NOTIFICATION:
2675 {
2676 iwp_start_scan_t *scan =
2677 (iwp_start_scan_t *)(desc + 1);
2678
2679 IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2680 "scanning channel %d status %x\n",
2681 scan->chan, LE_32(scan->status)));
2682
2683 ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2684 break;
2685 }
2686
2687 case SCAN_COMPLETE_NOTIFICATION:
2688 {
2689 #ifdef DEBUG
2690 iwp_stop_scan_t *scan =
2691 (iwp_stop_scan_t *)(desc + 1);
2692
2693 IWP_DBG((IWP_DEBUG_SCAN, "iwp_rx_softintr(): "
2694 "completed channel %d (burst of %d) status %02x\n",
2695 scan->chan, scan->nchan, scan->status));
2696 #endif
2697
2698 sc->sc_scan_pending++;
2699 break;
2700 }
2701
2702 case STATISTICS_NOTIFICATION:
2703 {
2704 /*
2705 * handle statistics notification
2706 */
2707 break;
2708 }
2709
2710 case CALIBRATION_RES_NOTIFICATION:
2711 iwp_save_calib_result(sc, desc);
2712 break;
2713
2714 case CALIBRATION_COMPLETE_NOTIFICATION:
2715 mutex_enter(&sc->sc_glock);
2716 atomic_or_32(&sc->sc_flags, IWP_F_FW_INIT);
2717 cv_signal(&sc->sc_ucode_cv);
2718 mutex_exit(&sc->sc_glock);
2719 break;
2720
2721 case MISSED_BEACONS_NOTIFICATION:
2722 {
2723 struct iwp_beacon_missed *miss =
2724 (struct iwp_beacon_missed *)(desc + 1);
2725
2726 if ((ic->ic_state == IEEE80211_S_RUN) &&
2727 (LE_32(miss->consecutive) > 50)) {
2728 cmn_err(CE_NOTE, "iwp: iwp_rx_softintr(): "
2729 "beacon missed %d/%d\n",
2730 LE_32(miss->consecutive),
2731 LE_32(miss->total));
2732 (void) ieee80211_new_state(ic,
2733 IEEE80211_S_INIT, -1);
2734 }
2735 break;
2736 }
2737 }
2738
2739 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2740 }
2741
2742 /*
2743 * driver dealt with what received in rx queue and tell the information
2744 * to the firmware.
2745 */
2746 index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1;
2747 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2748
2749 /*
2750 * re-enable interrupts
2751 */
2752 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2753
2754 return (DDI_INTR_CLAIMED);
2755 }
2756
2757 /*
2758 * the handle of interrupt
2759 */
2760 /* ARGSUSED */
2761 static uint_t
2762 iwp_intr(caddr_t arg, caddr_t unused)
2763 {
2764 iwp_sc_t *sc;
2765 uint32_t r, rfh;
2766
2767 if (NULL == arg) {
2768 return (DDI_INTR_UNCLAIMED);
2769 }
2770 sc = (iwp_sc_t *)arg;
2771
2772 r = IWP_READ(sc, CSR_INT);
2773 if (0 == r || 0xffffffff == r) {
2774 return (DDI_INTR_UNCLAIMED);
2775 }
2776
2777 IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2778 "interrupt reg %x\n", r));
2779
2780 rfh = IWP_READ(sc, CSR_FH_INT_STATUS);
2781
2782 IWP_DBG((IWP_DEBUG_INTR, "iwp_intr(): "
2783 "FH interrupt reg %x\n", rfh));
2784
2785 /*
2786 * disable interrupts
2787 */
2788 IWP_WRITE(sc, CSR_INT_MASK, 0);
2789
2790 /*
2791 * ack interrupts
2792 */
2793 IWP_WRITE(sc, CSR_INT, r);
2794 IWP_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2795
2796 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2797 IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2798 "fatal firmware error\n"));
2799 iwp_stop(sc);
2800 sc->sc_ostate = sc->sc_ic.ic_state;
2801
2802 /* notify upper layer */
2803 if (!IWP_CHK_FAST_RECOVER(sc)) {
2804 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2805 }
2806
2807 atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
2808 return (DDI_INTR_CLAIMED);
2809 }
2810
2811 if (r & BIT_INT_RF_KILL) {
2812 uint32_t tmp = IWP_READ(sc, CSR_GP_CNTRL);
2813 if (tmp & (1 << 27)) {
2814 cmn_err(CE_NOTE, "RF switch: radio on\n");
2815 }
2816 }
2817
2818 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2819 (rfh & FH_INT_RX_MASK)) {
2820 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2821 return (DDI_INTR_CLAIMED);
2822 }
2823
2824 if (r & BIT_INT_FH_TX) {
2825 mutex_enter(&sc->sc_glock);
2826 atomic_or_32(&sc->sc_flags, IWP_F_PUT_SEG);
2827 cv_signal(&sc->sc_put_seg_cv);
2828 mutex_exit(&sc->sc_glock);
2829 }
2830
2831 #ifdef DEBUG
2832 if (r & BIT_INT_ALIVE) {
2833 IWP_DBG((IWP_DEBUG_FW, "iwp_intr(): "
2834 "firmware initialized.\n"));
2835 }
2836 #endif
2837
2838 /*
2839 * re-enable interrupts
2840 */
2841 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2842
2843 return (DDI_INTR_CLAIMED);
2844 }
2845
2846 static uint8_t
2847 iwp_rate_to_plcp(int rate)
2848 {
2849 uint8_t ret;
2850
2851 switch (rate) {
2852 /*
2853 * CCK rates
2854 */
2855 case 2:
2856 ret = 0xa;
2857 break;
2858
2859 case 4:
2860 ret = 0x14;
2861 break;
2862
2863 case 11:
2864 ret = 0x37;
2865 break;
2866
2867 case 22:
2868 ret = 0x6e;
2869 break;
2870
2871 /*
2872 * OFDM rates
2873 */
2874 case 12:
2875 ret = 0xd;
2876 break;
2877
2878 case 18:
2879 ret = 0xf;
2880 break;
2881
2882 case 24:
2883 ret = 0x5;
2884 break;
2885
2886 case 36:
2887 ret = 0x7;
2888 break;
2889
2890 case 48:
2891 ret = 0x9;
2892 break;
2893
2894 case 72:
2895 ret = 0xb;
2896 break;
2897
2898 case 96:
2899 ret = 0x1;
2900 break;
2901
2902 case 108:
2903 ret = 0x3;
2904 break;
2905
2906 default:
2907 ret = 0;
2908 break;
2909 }
2910
2911 return (ret);
2912 }
2913
2914 /*
2915 * invoked by GLD send frames
2916 */
2917 static mblk_t *
2918 iwp_m_tx(void *arg, mblk_t *mp)
2919 {
2920 iwp_sc_t *sc;
2921 ieee80211com_t *ic;
2922 mblk_t *next;
2923
2924 if (NULL == arg) {
2925 return (NULL);
2926 }
2927 sc = (iwp_sc_t *)arg;
2928 ic = &sc->sc_ic;
2929
2930 if (sc->sc_flags & IWP_F_SUSPEND) {
2931 freemsgchain(mp);
2932 return (NULL);
2933 }
2934
2935 if (ic->ic_state != IEEE80211_S_RUN) {
2936 freemsgchain(mp);
2937 return (NULL);
2938 }
2939
2940 if ((sc->sc_flags & IWP_F_HW_ERR_RECOVER) &&
2941 IWP_CHK_FAST_RECOVER(sc)) {
2942 IWP_DBG((IWP_DEBUG_FW, "iwp_m_tx(): "
2943 "hold queue\n"));
2944 return (mp);
2945 }
2946
2947
2948 while (mp != NULL) {
2949 next = mp->b_next;
2950 mp->b_next = NULL;
2951 if (iwp_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2952 mp->b_next = next;
2953 break;
2954 }
2955 mp = next;
2956 }
2957
2958 return (mp);
2959 }
2960
2961 /*
2962 * send frames
2963 */
2964 static int
2965 iwp_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2966 {
2967 iwp_sc_t *sc;
2968 iwp_tx_ring_t *ring;
2969 iwp_tx_desc_t *desc;
2970 iwp_tx_data_t *data;
2971 iwp_tx_data_t *desc_data;
2972 iwp_cmd_t *cmd;
2973 iwp_tx_cmd_t *tx;
2974 ieee80211_node_t *in;
2975 struct ieee80211_frame *wh;
2976 struct ieee80211_key *k = NULL;
2977 mblk_t *m, *m0;
2978 int hdrlen, len, len0, mblen, off, err = IWP_SUCCESS;
2979 uint16_t masks = 0;
2980 uint32_t rate, s_id = 0;
2981
2982 if (NULL == ic) {
2983 return (IWP_FAIL);
2984 }
2985 sc = (iwp_sc_t *)ic;
2986
2987 if (sc->sc_flags & IWP_F_SUSPEND) {
2988 if ((type & IEEE80211_FC0_TYPE_MASK) !=
2989 IEEE80211_FC0_TYPE_DATA) {
2990 freemsg(mp);
2991 }
2992 err = IWP_FAIL;
2993 goto exit;
2994 }
2995
2996 mutex_enter(&sc->sc_tx_lock);
2997 ring = &sc->sc_txq[0];
2998 data = &ring->data[ring->cur];
2999 cmd = data->cmd;
3000 bzero(cmd, sizeof (*cmd));
3001
3002 ring->cur = (ring->cur + 1) % ring->count;
3003
3004 /*
3005 * Need reschedule TX if TX buffer is full.
3006 */
3007 if (ring->queued > ring->count - IWP_MAX_WIN_SIZE) {
3008 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3009 "no txbuf\n"));
3010
3011 sc->sc_need_reschedule = 1;
3012 mutex_exit(&sc->sc_tx_lock);
3013
3014 if ((type & IEEE80211_FC0_TYPE_MASK) !=
3015 IEEE80211_FC0_TYPE_DATA) {
3016 freemsg(mp);
3017 }
3018 sc->sc_tx_nobuf++;
3019 err = IWP_FAIL;
3020 goto exit;
3021 }
3022
3023 ring->queued++;
3024
3025 mutex_exit(&sc->sc_tx_lock);
3026
3027 hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3028
3029 m = allocb(msgdsize(mp) + 32, BPRI_MED);
3030 if (NULL == m) { /* can not alloc buf, drop this package */
3031 cmn_err(CE_WARN, "iwp_send(): "
3032 "failed to allocate msgbuf\n");
3033 freemsg(mp);
3034
3035 mutex_enter(&sc->sc_tx_lock);
3036 ring->queued--;
3037 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3038 sc->sc_need_reschedule = 0;
3039 mutex_exit(&sc->sc_tx_lock);
3040 mac_tx_update(ic->ic_mach);
3041 mutex_enter(&sc->sc_tx_lock);
3042 }
3043 mutex_exit(&sc->sc_tx_lock);
3044
3045 err = IWP_SUCCESS;
3046 goto exit;
3047 }
3048
3049 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3050 mblen = MBLKL(m0);
3051 (void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
3052 off += mblen;
3053 }
3054
3055 m->b_wptr += off;
3056
3057 wh = (struct ieee80211_frame *)m->b_rptr;
3058
3059 /*
3060 * determine send which AP or station in IBSS
3061 */
3062 in = ieee80211_find_txnode(ic, wh->i_addr1);
3063 if (NULL == in) {
3064 cmn_err(CE_WARN, "iwp_send(): "
3065 "failed to find tx node\n");
3066 freemsg(mp);
3067 freemsg(m);
3068 sc->sc_tx_err++;
3069
3070 mutex_enter(&sc->sc_tx_lock);
3071 ring->queued--;
3072 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3073 sc->sc_need_reschedule = 0;
3074 mutex_exit(&sc->sc_tx_lock);
3075 mac_tx_update(ic->ic_mach);
3076 mutex_enter(&sc->sc_tx_lock);
3077 }
3078 mutex_exit(&sc->sc_tx_lock);
3079
3080 err = IWP_SUCCESS;
3081 goto exit;
3082 }
3083
3084 /*
3085 * Net80211 module encapsulate outbound data frames.
3086 * Add some feilds of 80211 frame.
3087 */
3088 if ((type & IEEE80211_FC0_TYPE_MASK) ==
3089 IEEE80211_FC0_TYPE_DATA) {
3090 (void) ieee80211_encap(ic, m, in);
3091 }
3092
3093 freemsg(mp);
3094
3095 cmd->hdr.type = REPLY_TX;
3096 cmd->hdr.flags = 0;
3097 cmd->hdr.qid = ring->qid;
3098
3099 tx = (iwp_tx_cmd_t *)cmd->data;
3100 tx->tx_flags = 0;
3101
3102 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3103 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
3104 } else {
3105 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
3106 }
3107
3108 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3109 k = ieee80211_crypto_encap(ic, m);
3110 if (NULL == k) {
3111 freemsg(m);
3112 sc->sc_tx_err++;
3113
3114 mutex_enter(&sc->sc_tx_lock);
3115 ring->queued--;
3116 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3117 sc->sc_need_reschedule = 0;
3118 mutex_exit(&sc->sc_tx_lock);
3119 mac_tx_update(ic->ic_mach);
3120 mutex_enter(&sc->sc_tx_lock);
3121 }
3122 mutex_exit(&sc->sc_tx_lock);
3123
3124 err = IWP_SUCCESS;
3125 goto exit;
3126 }
3127
3128 /* packet header may have moved, reset our local pointer */
3129 wh = (struct ieee80211_frame *)m->b_rptr;
3130 }
3131
3132 len = msgdsize(m);
3133
3134 #ifdef DEBUG
3135 if (iwp_dbg_flags & IWP_DEBUG_TX) {
3136 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
3137 }
3138 #endif
3139
3140 tx->rts_retry_limit = IWP_TX_RTS_RETRY_LIMIT;
3141 tx->data_retry_limit = IWP_TX_DATA_RETRY_LIMIT;
3142
3143 /*
3144 * specific TX parameters for management frames
3145 */
3146 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3147 IEEE80211_FC0_TYPE_MGT) {
3148 /*
3149 * mgmt frames are sent at 1M
3150 */
3151 if ((in->in_rates.ir_rates[0] &
3152 IEEE80211_RATE_VAL) != 0) {
3153 rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL;
3154 } else {
3155 rate = 2;
3156 }
3157
3158 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3159
3160 /*
3161 * tell h/w to set timestamp in probe responses
3162 */
3163 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3164 IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
3165 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
3166
3167 tx->data_retry_limit = 3;
3168 if (tx->data_retry_limit < tx->rts_retry_limit) {
3169 tx->rts_retry_limit = tx->data_retry_limit;
3170 }
3171 }
3172
3173 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3174 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
3175 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3176 IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) {
3177 tx->timeout.pm_frame_timeout = LE_16(3);
3178 } else {
3179 tx->timeout.pm_frame_timeout = LE_16(2);
3180 }
3181
3182 } else {
3183 /*
3184 * do it here for the software way rate scaling.
3185 * later for rate scaling in hardware.
3186 *
3187 * now the txrate is determined in tx cmd flags, set to the
3188 * max value 54M for 11g and 11M for 11b originally.
3189 */
3190 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3191 rate = ic->ic_fixed_rate;
3192 } else {
3193 if ((in->in_rates.ir_rates[in->in_txrate] &
3194 IEEE80211_RATE_VAL) != 0) {
3195 rate = in->in_rates.
3196 ir_rates[in->in_txrate] &
3197 IEEE80211_RATE_VAL;
3198 }
3199 }
3200
3201 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3202
3203 tx->timeout.pm_frame_timeout = 0;
3204 }
3205
3206 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3207 "tx rate[%d of %d] = %x",
3208 in->in_txrate, in->in_rates.ir_nrates, rate));
3209
3210 len0 = roundup(4 + sizeof (iwp_tx_cmd_t) + hdrlen, 4);
3211 if (len0 != (4 + sizeof (iwp_tx_cmd_t) + hdrlen)) {
3212 tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
3213 }
3214
3215 /*
3216 * retrieve destination node's id
3217 */
3218 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3219 tx->sta_id = IWP_BROADCAST_ID;
3220 } else {
3221 tx->sta_id = IWP_AP_ID;
3222 }
3223
3224 if (2 == rate || 4 == rate || 11 == rate || 22 == rate) {
3225 masks |= RATE_MCS_CCK_MSK;
3226 }
3227
3228 masks |= RATE_MCS_ANT_B_MSK;
3229 tx->rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(rate) | masks);
3230
3231 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3232 "tx flag = %x",
3233 tx->tx_flags));
3234
3235 tx->stop_time.life_time = LE_32(0xffffffff);
3236
3237 tx->len = LE_16(len);
3238
3239 tx->dram_lsb_ptr =
3240 LE_32(data->paddr_cmd + 4 + offsetof(iwp_tx_cmd_t, scratch));
3241 tx->dram_msb_ptr = 0;
3242 tx->driver_txop = 0;
3243 tx->next_frame_len = 0;
3244
3245 (void) memcpy(tx + 1, m->b_rptr, hdrlen);
3246 m->b_rptr += hdrlen;
3247 (void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
3248
3249 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3250 "sending data: qid=%d idx=%d len=%d",
3251 ring->qid, ring->cur, len));
3252
3253 /*
3254 * first segment includes the tx cmd plus the 802.11 header,
3255 * the second includes the remaining of the 802.11 frame.
3256 */
3257 mutex_enter(&sc->sc_tx_lock);
3258
3259 cmd->hdr.idx = ring->desc_cur;
3260
3261 desc_data = &ring->data[ring->desc_cur];
3262 desc = desc_data->desc;
3263 bzero(desc, sizeof (*desc));
3264 desc->val0 = 2 << 24;
3265 desc->pa[0].tb1_addr = data->paddr_cmd;
3266 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
3267 ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
3268 desc->pa[0].val2 =
3269 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
3270 ((len - hdrlen) << 20);
3271 IWP_DBG((IWP_DEBUG_TX, "iwp_send(): "
3272 "phy addr1 = 0x%x phy addr2 = 0x%x "
3273 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
3274 data->paddr_cmd, data->dma_data.cookie.dmac_address,
3275 len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
3276
3277 /*
3278 * kick ring
3279 */
3280 s_id = tx->sta_id;
3281
3282 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3283 tfd_offset[ring->desc_cur].val =
3284 (8 + len) | (s_id << 12);
3285 if (ring->desc_cur < IWP_MAX_WIN_SIZE) {
3286 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3287 tfd_offset[IWP_QUEUE_SIZE + ring->desc_cur].val =
3288 (8 + len) | (s_id << 12);
3289 }
3290
3291 IWP_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
3292 IWP_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
3293
3294 ring->desc_cur = (ring->desc_cur + 1) % ring->count;
3295 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur);
3296
3297 mutex_exit(&sc->sc_tx_lock);
3298 freemsg(m);
3299
3300 /*
3301 * release node reference
3302 */
3303 ieee80211_free_node(in);
3304
3305 ic->ic_stats.is_tx_bytes += len;
3306 ic->ic_stats.is_tx_frags++;
3307
3308 mutex_enter(&sc->sc_mt_lock);
3309 if (0 == sc->sc_tx_timer) {
3310 sc->sc_tx_timer = 4;
3311 }
3312 mutex_exit(&sc->sc_mt_lock);
3313
3314 exit:
3315 return (err);
3316 }
3317
3318 /*
3319 * invoked by GLD to deal with IOCTL affaires
3320 */
3321 static void
3322 iwp_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
3323 {
3324 iwp_sc_t *sc;
3325 ieee80211com_t *ic;
3326 int err = EINVAL;
3327
3328 if (NULL == arg) {
3329 return;
3330 }
3331 sc = (iwp_sc_t *)arg;
3332 ic = &sc->sc_ic;
3333
3334 err = ieee80211_ioctl(ic, wq, mp);
3335 if (ENETRESET == err) {
3336 /*
3337 * This is special for the hidden AP connection.
3338 * In any case, we should make sure only one 'scan'
3339 * in the driver for a 'connect' CLI command. So
3340 * when connecting to a hidden AP, the scan is just
3341 * sent out to the air when we know the desired
3342 * essid of the AP we want to connect.
3343 */
3344 if (ic->ic_des_esslen) {
3345 if (sc->sc_flags & IWP_F_RUNNING) {
3346 iwp_m_stop(sc);
3347 (void) iwp_m_start(sc);
3348 (void) ieee80211_new_state(ic,
3349 IEEE80211_S_SCAN, -1);
3350 }
3351 }
3352 }
3353 }
3354
3355 /*
3356 * Call back functions for get/set proporty
3357 */
3358 static int
3359 iwp_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3360 uint_t wldp_length, void *wldp_buf)
3361 {
3362 iwp_sc_t *sc;
3363 int err = EINVAL;
3364
3365 if (NULL == arg) {
3366 return (EINVAL);
3367 }
3368 sc = (iwp_sc_t *)arg;
3369
3370 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3371 wldp_length, wldp_buf);
3372
3373 return (err);
3374 }
3375
3376 static void
3377 iwp_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3378 mac_prop_info_handle_t prh)
3379 {
3380 iwp_sc_t *sc;
3381
3382 sc = (iwp_sc_t *)arg;
3383 ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, prh);
3384 }
3385
3386 static int
3387 iwp_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3388 uint_t wldp_length, const void *wldp_buf)
3389 {
3390 iwp_sc_t *sc;
3391 ieee80211com_t *ic;
3392 int err = EINVAL;
3393
3394 if (NULL == arg) {
3395 return (EINVAL);
3396 }
3397 sc = (iwp_sc_t *)arg;
3398 ic = &sc->sc_ic;
3399
3400 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3401 wldp_buf);
3402
3403 if (err == ENETRESET) {
3404 if (ic->ic_des_esslen) {
3405 if (sc->sc_flags & IWP_F_RUNNING) {
3406 iwp_m_stop(sc);
3407 (void) iwp_m_start(sc);
3408 (void) ieee80211_new_state(ic,
3409 IEEE80211_S_SCAN, -1);
3410 }
3411 }
3412 err = 0;
3413 }
3414 return (err);
3415 }
3416
3417 /*
3418 * invoked by GLD supply statistics NIC and driver
3419 */
3420 static int
3421 iwp_m_stat(void *arg, uint_t stat, uint64_t *val)
3422 {
3423 iwp_sc_t *sc;
3424 ieee80211com_t *ic;
3425 ieee80211_node_t *in;
3426
3427 if (NULL == arg) {
3428 return (EINVAL);
3429 }
3430 sc = (iwp_sc_t *)arg;
3431 ic = &sc->sc_ic;
3432
3433 mutex_enter(&sc->sc_glock);
3434
3435 switch (stat) {
3436 case MAC_STAT_IFSPEED:
3437 in = ic->ic_bss;
3438 *val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
3439 IEEE80211_RATE(in->in_txrate) :
3440 ic->ic_fixed_rate) / 2 * 1000000;
3441 break;
3442 case MAC_STAT_NOXMTBUF:
3443 *val = sc->sc_tx_nobuf;
3444 break;
3445 case MAC_STAT_NORCVBUF:
3446 *val = sc->sc_rx_nobuf;
3447 break;
3448 case MAC_STAT_IERRORS:
3449 *val = sc->sc_rx_err;
3450 break;
3451 case MAC_STAT_RBYTES:
3452 *val = ic->ic_stats.is_rx_bytes;
3453 break;
3454 case MAC_STAT_IPACKETS:
3455 *val = ic->ic_stats.is_rx_frags;
3456 break;
3457 case MAC_STAT_OBYTES:
3458 *val = ic->ic_stats.is_tx_bytes;
3459 break;
3460 case MAC_STAT_OPACKETS:
3461 *val = ic->ic_stats.is_tx_frags;
3462 break;
3463 case MAC_STAT_OERRORS:
3464 case WIFI_STAT_TX_FAILED:
3465 *val = sc->sc_tx_err;
3466 break;
3467 case WIFI_STAT_TX_RETRANS:
3468 *val = sc->sc_tx_retries;
3469 break;
3470 case WIFI_STAT_FCS_ERRORS:
3471 case WIFI_STAT_WEP_ERRORS:
3472 case WIFI_STAT_TX_FRAGS:
3473 case WIFI_STAT_MCAST_TX:
3474 case WIFI_STAT_RTS_SUCCESS:
3475 case WIFI_STAT_RTS_FAILURE:
3476 case WIFI_STAT_ACK_FAILURE:
3477 case WIFI_STAT_RX_FRAGS:
3478 case WIFI_STAT_MCAST_RX:
3479 case WIFI_STAT_RX_DUPS:
3480 mutex_exit(&sc->sc_glock);
3481 return (ieee80211_stat(ic, stat, val));
3482 default:
3483 mutex_exit(&sc->sc_glock);
3484 return (ENOTSUP);
3485 }
3486
3487 mutex_exit(&sc->sc_glock);
3488
3489 return (IWP_SUCCESS);
3490
3491 }
3492
3493 /*
3494 * invoked by GLD to start or open NIC
3495 */
3496 static int
3497 iwp_m_start(void *arg)
3498 {
3499 iwp_sc_t *sc;
3500 ieee80211com_t *ic;
3501 int err = IWP_FAIL;
3502
3503 if (NULL == arg) {
3504 return (EINVAL);
3505 }
3506 sc = (iwp_sc_t *)arg;
3507 ic = &sc->sc_ic;
3508
3509 err = iwp_init(sc);
3510 if (err != IWP_SUCCESS) {
3511 /*
3512 * The hw init err(eg. RF is OFF). Return Success to make
3513 * the 'plumb' succeed. The iwp_thread() tries to re-init
3514 * background.
3515 */
3516 atomic_or_32(&sc->sc_flags, IWP_F_HW_ERR_RECOVER);
3517 return (IWP_SUCCESS);
3518 }
3519
3520 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3521
3522 atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3523
3524 return (IWP_SUCCESS);
3525 }
3526
3527 /*
3528 * invoked by GLD to stop or down NIC
3529 */
3530 static void
3531 iwp_m_stop(void *arg)
3532 {
3533 iwp_sc_t *sc;
3534 ieee80211com_t *ic;
3535
3536 if (NULL == arg) {
3537 return;
3538 }
3539 sc = (iwp_sc_t *)arg;
3540 ic = &sc->sc_ic;
3541
3542 iwp_stop(sc);
3543
3544 /*
3545 * release buffer for calibration
3546 */
3547 iwp_release_calib_buffer(sc);
3548
3549 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3550
3551 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
3552 atomic_and_32(&sc->sc_flags, ~IWP_F_RATE_AUTO_CTL);
3553
3554 atomic_and_32(&sc->sc_flags, ~IWP_F_RUNNING);
3555 atomic_and_32(&sc->sc_flags, ~IWP_F_SCANNING);
3556 }
3557
3558 /*
3559 * invoked by GLD to configure NIC
3560 */
3561 static int
3562 iwp_m_unicst(void *arg, const uint8_t *macaddr)
3563 {
3564 iwp_sc_t *sc;
3565 ieee80211com_t *ic;
3566 int err = IWP_SUCCESS;
3567
3568 if (NULL == arg) {
3569 return (EINVAL);
3570 }
3571 sc = (iwp_sc_t *)arg;
3572 ic = &sc->sc_ic;
3573
3574 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3575 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3576 mutex_enter(&sc->sc_glock);
3577 err = iwp_config(sc);
3578 mutex_exit(&sc->sc_glock);
3579 if (err != IWP_SUCCESS) {
3580 cmn_err(CE_WARN, "iwp_m_unicst(): "
3581 "failed to configure device\n");
3582 goto fail;
3583 }
3584 }
3585
3586 return (err);
3587
3588 fail:
3589 return (err);
3590 }
3591
3592 /* ARGSUSED */
3593 static int
3594 iwp_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3595 {
3596 return (IWP_SUCCESS);
3597 }
3598
3599 /* ARGSUSED */
3600 static int
3601 iwp_m_promisc(void *arg, boolean_t on)
3602 {
3603 return (IWP_SUCCESS);
3604 }
3605
3606 /*
3607 * kernel thread to deal with exceptional situation
3608 */
3609 static void
3610 iwp_thread(iwp_sc_t *sc)
3611 {
3612 ieee80211com_t *ic = &sc->sc_ic;
3613 clock_t clk;
3614 int err, n = 0, timeout = 0;
3615 uint32_t tmp;
3616 #ifdef DEBUG
3617 int times = 0;
3618 #endif
3619
3620 while (sc->sc_mf_thread_switch) {
3621 tmp = IWP_READ(sc, CSR_GP_CNTRL);
3622 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3623 atomic_and_32(&sc->sc_flags, ~IWP_F_RADIO_OFF);
3624 } else {
3625 atomic_or_32(&sc->sc_flags, IWP_F_RADIO_OFF);
3626 }
3627
3628 /*
3629 * If in SUSPEND or the RF is OFF, do nothing.
3630 */
3631 if (sc->sc_flags & IWP_F_RADIO_OFF) {
3632 delay(drv_usectohz(100000));
3633 continue;
3634 }
3635
3636 /*
3637 * recovery fatal error
3638 */
3639 if (ic->ic_mach &&
3640 (sc->sc_flags & IWP_F_HW_ERR_RECOVER)) {
3641
3642 IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3643 "try to recover fatal hw error: %d\n", times++));
3644
3645 iwp_stop(sc);
3646
3647 if (IWP_CHK_FAST_RECOVER(sc)) {
3648 /* save runtime configuration */
3649 bcopy(&sc->sc_config, &sc->sc_config_save,
3650 sizeof (sc->sc_config));
3651 } else {
3652 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3653 delay(drv_usectohz(2000000 + n*500000));
3654 }
3655
3656 err = iwp_init(sc);
3657 if (err != IWP_SUCCESS) {
3658 n++;
3659 if (n < 20) {
3660 continue;
3661 }
3662 }
3663
3664 n = 0;
3665 if (!err) {
3666 atomic_or_32(&sc->sc_flags, IWP_F_RUNNING);
3667 }
3668
3669
3670 if (!IWP_CHK_FAST_RECOVER(sc) ||
3671 iwp_fast_recover(sc) != IWP_SUCCESS) {
3672 atomic_and_32(&sc->sc_flags,
3673 ~IWP_F_HW_ERR_RECOVER);
3674
3675 delay(drv_usectohz(2000000));
3676 if (sc->sc_ostate != IEEE80211_S_INIT) {
3677 ieee80211_new_state(ic,
3678 IEEE80211_S_SCAN, 0);
3679 }
3680 }
3681 }
3682
3683 if (ic->ic_mach &&
3684 (sc->sc_flags & IWP_F_SCANNING) && sc->sc_scan_pending) {
3685 IWP_DBG((IWP_DEBUG_SCAN, "iwp_thread(): "
3686 "wait for probe response\n"));
3687
3688 sc->sc_scan_pending--;
3689 delay(drv_usectohz(200000));
3690 ieee80211_next_scan(ic);
3691 }
3692
3693 /*
3694 * rate ctl
3695 */
3696 if (ic->ic_mach &&
3697 (sc->sc_flags & IWP_F_RATE_AUTO_CTL)) {
3698 clk = ddi_get_lbolt();
3699 if (clk > sc->sc_clk + drv_usectohz(1000000)) {
3700 iwp_amrr_timeout(sc);
3701 }
3702 }
3703
3704 delay(drv_usectohz(100000));
3705
3706 mutex_enter(&sc->sc_mt_lock);
3707 if (sc->sc_tx_timer) {
3708 timeout++;
3709 if (10 == timeout) {
3710 sc->sc_tx_timer--;
3711 if (0 == sc->sc_tx_timer) {
3712 atomic_or_32(&sc->sc_flags,
3713 IWP_F_HW_ERR_RECOVER);
3714 sc->sc_ostate = IEEE80211_S_RUN;
3715 IWP_DBG((IWP_DEBUG_FW, "iwp_thread(): "
3716 "try to recover from "
3717 "send fail\n"));
3718 }
3719 timeout = 0;
3720 }
3721 }
3722 mutex_exit(&sc->sc_mt_lock);
3723 }
3724
3725 mutex_enter(&sc->sc_mt_lock);
3726 sc->sc_mf_thread = NULL;
3727 cv_signal(&sc->sc_mt_cv);
3728 mutex_exit(&sc->sc_mt_lock);
3729 }
3730
3731
3732 /*
3733 * Send a command to the ucode.
3734 */
3735 static int
3736 iwp_cmd(iwp_sc_t *sc, int code, const void *buf, int size, int async)
3737 {
3738 iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3739 iwp_tx_desc_t *desc;
3740 iwp_cmd_t *cmd;
3741
3742 ASSERT(size <= sizeof (cmd->data));
3743 ASSERT(mutex_owned(&sc->sc_glock));
3744
3745 IWP_DBG((IWP_DEBUG_CMD, "iwp_cmd() "
3746 "code[%d]", code));
3747 desc = ring->data[ring->cur].desc;
3748 cmd = ring->data[ring->cur].cmd;
3749
3750 cmd->hdr.type = (uint8_t)code;
3751 cmd->hdr.flags = 0;
3752 cmd->hdr.qid = ring->qid;
3753 cmd->hdr.idx = ring->cur;
3754 (void) memcpy(cmd->data, buf, size);
3755 (void) memset(desc, 0, sizeof (*desc));
3756
3757 desc->val0 = 1 << 24;
3758 desc->pa[0].tb1_addr =
3759 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3760 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3761
3762 if (async) {
3763 sc->sc_cmd_accum++;
3764 }
3765
3766 /*
3767 * kick cmd ring XXX
3768 */
3769 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3770 tfd_offset[ring->cur].val = 8;
3771 if (ring->cur < IWP_MAX_WIN_SIZE) {
3772 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3773 tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
3774 }
3775 ring->cur = (ring->cur + 1) % ring->count;
3776 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3777
3778 if (async) {
3779 return (IWP_SUCCESS);
3780 } else {
3781 clock_t clk;
3782
3783 clk = ddi_get_lbolt() + drv_usectohz(2000000);
3784 while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) {
3785 if (cv_timedwait(&sc->sc_cmd_cv,
3786 &sc->sc_glock, clk) < 0) {
3787 break;
3788 }
3789 }
3790
3791 if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) {
3792 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3793 return (IWP_SUCCESS);
3794 } else {
3795 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3796 return (IWP_FAIL);
3797 }
3798 }
3799 }
3800
3801 /*
3802 * require ucode seting led of NIC
3803 */
3804 static void
3805 iwp_set_led(iwp_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3806 {
3807 iwp_led_cmd_t led;
3808
3809 led.interval = LE_32(100000); /* unit: 100ms */
3810 led.id = id;
3811 led.off = off;
3812 led.on = on;
3813
3814 (void) iwp_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3815 }
3816
3817 /*
3818 * necessary setting to NIC before authentication
3819 */
3820 static int
3821 iwp_hw_set_before_auth(iwp_sc_t *sc)
3822 {
3823 ieee80211com_t *ic = &sc->sc_ic;
3824 ieee80211_node_t *in = ic->ic_bss;
3825 int err = IWP_FAIL;
3826
3827 /*
3828 * update adapter's configuration according
3829 * the info of target AP
3830 */
3831 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3832 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
3833
3834 sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0;
3835 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0;
3836 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0;
3837
3838 if (IEEE80211_MODE_11B == ic->ic_curmode) {
3839 sc->sc_config.cck_basic_rates = 0x03;
3840 sc->sc_config.ofdm_basic_rates = 0;
3841 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3842 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3843 sc->sc_config.cck_basic_rates = 0;
3844 sc->sc_config.ofdm_basic_rates = 0x15;
3845 } else { /* assume 802.11b/g */
3846 sc->sc_config.cck_basic_rates = 0x0f;
3847 sc->sc_config.ofdm_basic_rates = 0xff;
3848 }
3849
3850 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3851 RXON_FLG_SHORT_SLOT_MSK);
3852
3853 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
3854 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3855 } else {
3856 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3857 }
3858
3859 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
3860 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3861 } else {
3862 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3863 }
3864
3865 IWP_DBG((IWP_DEBUG_80211, "iwp_hw_set_before_auth(): "
3866 "config chan %d flags %x "
3867 "filter_flags %x cck %x ofdm %x"
3868 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3869 LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
3870 LE_32(sc->sc_config.filter_flags),
3871 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3872 sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3873 sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3874 sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3875
3876 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
3877 sizeof (iwp_rxon_cmd_t), 1);
3878 if (err != IWP_SUCCESS) {
3879 cmn_err(CE_WARN, "iwp_hw_set_before_auth(): "
3880 "failed to config chan%d\n", sc->sc_config.chan);
3881 return (err);
3882 }
3883
3884 /*
3885 * add default AP node
3886 */
3887 err = iwp_add_ap_sta(sc);
3888 if (err != IWP_SUCCESS) {
3889 return (err);
3890 }
3891
3892
3893 return (err);
3894 }
3895
3896 /*
3897 * Send a scan request(assembly scan cmd) to the firmware.
3898 */
3899 static int
3900 iwp_scan(iwp_sc_t *sc)
3901 {
3902 ieee80211com_t *ic = &sc->sc_ic;
3903 iwp_tx_ring_t *ring = &sc->sc_txq[IWP_CMD_QUEUE_NUM];
3904 iwp_tx_desc_t *desc;
3905 iwp_tx_data_t *data;
3906 iwp_cmd_t *cmd;
3907 iwp_scan_hdr_t *hdr;
3908 iwp_scan_chan_t chan;
3909 struct ieee80211_frame *wh;
3910 ieee80211_node_t *in = ic->ic_bss;
3911 uint8_t essid[IEEE80211_NWID_LEN+1];
3912 struct ieee80211_rateset *rs;
3913 enum ieee80211_phymode mode;
3914 uint8_t *frm;
3915 int i, pktlen, nrates;
3916
3917 data = &ring->data[ring->cur];
3918 desc = data->desc;
3919 cmd = (iwp_cmd_t *)data->dma_data.mem_va;
3920
3921 cmd->hdr.type = REPLY_SCAN_CMD;
3922 cmd->hdr.flags = 0;
3923 cmd->hdr.qid = ring->qid;
3924 cmd->hdr.idx = ring->cur | 0x40;
3925
3926 hdr = (iwp_scan_hdr_t *)cmd->data;
3927 (void) memset(hdr, 0, sizeof (iwp_scan_hdr_t));
3928 hdr->nchan = 1;
3929 hdr->quiet_time = LE_16(50);
3930 hdr->quiet_plcp_th = LE_16(1);
3931
3932 hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK);
3933 hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3934 (0x7 << RXON_RX_CHAIN_VALID_POS) |
3935 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3936 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3937
3938 hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3939 hdr->tx_cmd.sta_id = IWP_BROADCAST_ID;
3940 hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
3941 hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwp_rate_to_plcp(2));
3942 hdr->tx_cmd.rate.r.rate_n_flags |=
3943 LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK);
3944 hdr->direct_scan[0].len = ic->ic_des_esslen;
3945 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID;
3946
3947 hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3948 RXON_FILTER_BCON_AWARE_MSK);
3949
3950 if (ic->ic_des_esslen) {
3951 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3952 essid[ic->ic_des_esslen] = '\0';
3953 IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3954 "directed scan %s\n", essid));
3955
3956 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3957 ic->ic_des_esslen);
3958 } else {
3959 bzero(hdr->direct_scan[0].ssid,
3960 sizeof (hdr->direct_scan[0].ssid));
3961 }
3962
3963 /*
3964 * a probe request frame is required after the REPLY_SCAN_CMD
3965 */
3966 wh = (struct ieee80211_frame *)(hdr + 1);
3967 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3968 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3969 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3970 (void) memset(wh->i_addr1, 0xff, 6);
3971 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3972 (void) memset(wh->i_addr3, 0xff, 6);
3973 *(uint16_t *)&wh->i_dur[0] = 0;
3974 *(uint16_t *)&wh->i_seq[0] = 0;
3975
3976 frm = (uint8_t *)(wh + 1);
3977
3978 /*
3979 * essid IE
3980 */
3981 if (in->in_esslen) {
3982 bcopy(in->in_essid, essid, in->in_esslen);
3983 essid[in->in_esslen] = '\0';
3984 IWP_DBG((IWP_DEBUG_SCAN, "iwp_scan(): "
3985 "probe with ESSID %s\n",
3986 essid));
3987 }
3988 *frm++ = IEEE80211_ELEMID_SSID;
3989 *frm++ = in->in_esslen;
3990 (void) memcpy(frm, in->in_essid, in->in_esslen);
3991 frm += in->in_esslen;
3992
3993 mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3994 rs = &ic->ic_sup_rates[mode];
3995
3996 /*
3997 * supported rates IE
3998 */
3999 *frm++ = IEEE80211_ELEMID_RATES;
4000 nrates = rs->ir_nrates;
4001 if (nrates > IEEE80211_RATE_SIZE) {
4002 nrates = IEEE80211_RATE_SIZE;
4003 }
4004
4005 *frm++ = (uint8_t)nrates;
4006 (void) memcpy(frm, rs->ir_rates, nrates);
4007 frm += nrates;
4008
4009 /*
4010 * supported xrates IE
4011 */
4012 if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
4013 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
4014 *frm++ = IEEE80211_ELEMID_XRATES;
4015 *frm++ = (uint8_t)nrates;
4016 (void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
4017 frm += nrates;
4018 }
4019
4020 /*
4021 * optionnal IE (usually for wpa)
4022 */
4023 if (ic->ic_opt_ie != NULL) {
4024 (void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
4025 frm += ic->ic_opt_ie_len;
4026 }
4027
4028 /* setup length of probe request */
4029 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
4030 hdr->len = LE_16(hdr->nchan * sizeof (iwp_scan_chan_t) +
4031 LE_16(hdr->tx_cmd.len) + sizeof (iwp_scan_hdr_t));
4032
4033 /*
4034 * the attribute of the scan channels are required after the probe
4035 * request frame.
4036 */
4037 for (i = 1; i <= hdr->nchan; i++) {
4038 if (ic->ic_des_esslen) {
4039 chan.type = LE_32(3);
4040 } else {
4041 chan.type = LE_32(1);
4042 }
4043
4044 chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4045 chan.tpc.tx_gain = 0x28;
4046 chan.tpc.dsp_atten = 110;
4047 chan.active_dwell = LE_16(50);
4048 chan.passive_dwell = LE_16(120);
4049
4050 bcopy(&chan, frm, sizeof (iwp_scan_chan_t));
4051 frm += sizeof (iwp_scan_chan_t);
4052 }
4053
4054 pktlen = _PTRDIFF(frm, cmd);
4055
4056 (void) memset(desc, 0, sizeof (*desc));
4057 desc->val0 = 1 << 24;
4058 desc->pa[0].tb1_addr =
4059 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
4060 desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
4061
4062 /*
4063 * maybe for cmd, filling the byte cnt table is not necessary.
4064 * anyway, we fill it here.
4065 */
4066 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
4067 .tfd_offset[ring->cur].val = 8;
4068 if (ring->cur < IWP_MAX_WIN_SIZE) {
4069 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
4070 tfd_offset[IWP_QUEUE_SIZE + ring->cur].val = 8;
4071 }
4072
4073 /*
4074 * kick cmd ring
4075 */
4076 ring->cur = (ring->cur + 1) % ring->count;
4077 IWP_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4078
4079 return (IWP_SUCCESS);
4080 }
4081
4082 /*
4083 * configure NIC by using ucode commands after loading ucode.
4084 */
4085 static int
4086 iwp_config(iwp_sc_t *sc)
4087 {
4088 ieee80211com_t *ic = &sc->sc_ic;
4089 iwp_powertable_cmd_t powertable;
4090 iwp_bt_cmd_t bt;
4091 iwp_add_sta_t node;
4092 iwp_rem_sta_t rm_sta;
4093 const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
4094 int err = IWP_FAIL;
4095
4096 /*
4097 * set power mode. Disable power management at present, do it later
4098 */
4099 (void) memset(&powertable, 0, sizeof (powertable));
4100 powertable.flags = LE_16(0x8);
4101 err = iwp_cmd(sc, POWER_TABLE_CMD, &powertable,
4102 sizeof (powertable), 0);
4103 if (err != IWP_SUCCESS) {
4104 cmn_err(CE_WARN, "iwp_config(): "
4105 "failed to set power mode\n");
4106 return (err);
4107 }
4108
4109 /*
4110 * configure bt coexistence
4111 */
4112 (void) memset(&bt, 0, sizeof (bt));
4113 bt.flags = 3;
4114 bt.lead_time = 0xaa;
4115 bt.max_kill = 1;
4116 err = iwp_cmd(sc, REPLY_BT_CONFIG, &bt,
4117 sizeof (bt), 0);
4118 if (err != IWP_SUCCESS) {
4119 cmn_err(CE_WARN, "iwp_config(): "
4120 "failed to configurate bt coexistence\n");
4121 return (err);
4122 }
4123
4124 /*
4125 * configure rxon
4126 */
4127 (void) memset(&sc->sc_config, 0, sizeof (iwp_rxon_cmd_t));
4128 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
4129 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
4130 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4131 sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK);
4132 sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4133 RXON_FLG_CHANNEL_MODE_PURE_40_MSK));
4134
4135 switch (ic->ic_opmode) {
4136 case IEEE80211_M_STA:
4137 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
4138 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4139 RXON_FILTER_DIS_DECRYPT_MSK |
4140 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4141 break;
4142 case IEEE80211_M_IBSS:
4143 case IEEE80211_M_AHDEMO:
4144 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
4145
4146 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4147 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4148 RXON_FILTER_DIS_DECRYPT_MSK |
4149 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4150 break;
4151 case IEEE80211_M_HOSTAP:
4152 sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
4153 break;
4154 case IEEE80211_M_MONITOR:
4155 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
4156 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4157 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
4158 break;
4159 }
4160
4161 /*
4162 * Support all CCK rates.
4163 */
4164 sc->sc_config.cck_basic_rates = 0x0f;
4165
4166 /*
4167 * Support all OFDM rates.
4168 */
4169 sc->sc_config.ofdm_basic_rates = 0xff;
4170
4171 sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
4172 (0x7 << RXON_RX_CHAIN_VALID_POS) |
4173 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
4174 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
4175
4176 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
4177 sizeof (iwp_rxon_cmd_t), 0);
4178 if (err != IWP_SUCCESS) {
4179 cmn_err(CE_WARN, "iwp_config(): "
4180 "failed to set configure command\n");
4181 return (err);
4182 }
4183
4184 /*
4185 * remove all nodes in NIC
4186 */
4187 (void) memset(&rm_sta, 0, sizeof (rm_sta));
4188 rm_sta.num_sta = 1;
4189 (void) memcpy(rm_sta.addr, bcast, 6);
4190
4191 err = iwp_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwp_rem_sta_t), 0);
4192 if (err != IWP_SUCCESS) {
4193 cmn_err(CE_WARN, "iwp_config(): "
4194 "failed to remove broadcast node in hardware.\n");
4195 return (err);
4196 }
4197
4198 /*
4199 * add broadcast node so that we can send broadcast frame
4200 */
4201 (void) memset(&node, 0, sizeof (node));
4202 (void) memset(node.sta.addr, 0xff, 6);
4203 node.mode = 0;
4204 node.sta.sta_id = IWP_BROADCAST_ID;
4205 node.station_flags = 0;
4206
4207 err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
4208 if (err != IWP_SUCCESS) {
4209 cmn_err(CE_WARN, "iwp_config(): "
4210 "failed to add broadcast node\n");
4211 return (err);
4212 }
4213
4214 return (err);
4215 }
4216
4217 /*
4218 * quiesce(9E) entry point.
4219 * This function is called when the system is single-threaded at high
4220 * PIL with preemption disabled. Therefore, this function must not be
4221 * blocked.
4222 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4223 * DDI_FAILURE indicates an error condition and should almost never happen.
4224 */
4225 static int
4226 iwp_quiesce(dev_info_t *dip)
4227 {
4228 iwp_sc_t *sc;
4229
4230 sc = ddi_get_soft_state(iwp_soft_state_p, ddi_get_instance(dip));
4231 if (NULL == sc) {
4232 return (DDI_FAILURE);
4233 }
4234
4235 #ifdef DEBUG
4236 /* by pass any messages, if it's quiesce */
4237 iwp_dbg_flags = 0;
4238 #endif
4239
4240 /*
4241 * No more blocking is allowed while we are in the
4242 * quiesce(9E) entry point.
4243 */
4244 atomic_or_32(&sc->sc_flags, IWP_F_QUIESCED);
4245
4246 /*
4247 * Disable and mask all interrupts.
4248 */
4249 iwp_stop(sc);
4250
4251 return (DDI_SUCCESS);
4252 }
4253
4254 static void
4255 iwp_stop_master(iwp_sc_t *sc)
4256 {
4257 uint32_t tmp;
4258 int n;
4259
4260 tmp = IWP_READ(sc, CSR_RESET);
4261 IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
4262
4263 tmp = IWP_READ(sc, CSR_GP_CNTRL);
4264 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
4265 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) {
4266 return;
4267 }
4268
4269 for (n = 0; n < 2000; n++) {
4270 if (IWP_READ(sc, CSR_RESET) &
4271 CSR_RESET_REG_FLAG_MASTER_DISABLED) {
4272 break;
4273 }
4274 DELAY(1000);
4275 }
4276
4277 #ifdef DEBUG
4278 if (2000 == n) {
4279 IWP_DBG((IWP_DEBUG_HW, "iwp_stop_master(): "
4280 "timeout waiting for master stop\n"));
4281 }
4282 #endif
4283 }
4284
4285 static int
4286 iwp_power_up(iwp_sc_t *sc)
4287 {
4288 uint32_t tmp;
4289
4290 iwp_mac_access_enter(sc);
4291 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4292 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
4293 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
4294 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4295 iwp_mac_access_exit(sc);
4296
4297 DELAY(5000);
4298 return (IWP_SUCCESS);
4299 }
4300
4301 /*
4302 * hardware initialization
4303 */
4304 static int
4305 iwp_preinit(iwp_sc_t *sc)
4306 {
4307 int n;
4308 uint8_t vlink;
4309 uint16_t radio_cfg;
4310 uint32_t tmp;
4311
4312 /*
4313 * clear any pending interrupts
4314 */
4315 IWP_WRITE(sc, CSR_INT, 0xffffffff);
4316
4317 tmp = IWP_READ(sc, CSR_GIO_CHICKEN_BITS);
4318 IWP_WRITE(sc, CSR_GIO_CHICKEN_BITS,
4319 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4320
4321 tmp = IWP_READ(sc, CSR_GP_CNTRL);
4322 IWP_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4323
4324 /*
4325 * wait for clock ready
4326 */
4327 for (n = 0; n < 1000; n++) {
4328 if (IWP_READ(sc, CSR_GP_CNTRL) &
4329 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
4330 break;
4331 }
4332 DELAY(10);
4333 }
4334
4335 if (1000 == n) {
4336 return (ETIMEDOUT);
4337 }
4338
4339 iwp_mac_access_enter(sc);
4340
4341 iwp_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4342
4343 DELAY(20);
4344 tmp = iwp_reg_read(sc, ALM_APMG_PCIDEV_STT);
4345 iwp_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4346 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4347 iwp_mac_access_exit(sc);
4348
4349 radio_cfg = IWP_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION);
4350 if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) {
4351 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4352 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4353 tmp | SP_RADIO_TYPE_MSK(radio_cfg) |
4354 SP_RADIO_STEP_MSK(radio_cfg) |
4355 SP_RADIO_DASH_MSK(radio_cfg));
4356 } else {
4357 cmn_err(CE_WARN, "iwp_preinit(): "
4358 "radio configuration information in eeprom is wrong\n");
4359 return (IWP_FAIL);
4360 }
4361
4362
4363 IWP_WRITE(sc, CSR_INT_COALESCING, 512 / 32);
4364
4365 (void) iwp_power_up(sc);
4366
4367 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4368 tmp = ddi_get32(sc->sc_cfg_handle,
4369 (uint32_t *)(sc->sc_cfg_base + 0xe8));
4370 ddi_put32(sc->sc_cfg_handle,
4371 (uint32_t *)(sc->sc_cfg_base + 0xe8),
4372 tmp & ~(1 << 11));
4373 }
4374
4375 vlink = ddi_get8(sc->sc_cfg_handle,
4376 (uint8_t *)(sc->sc_cfg_base + 0xf0));
4377 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4378 vlink & ~2);
4379
4380 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4381 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4382 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
4383 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG, tmp);
4384
4385 /*
4386 * make sure power supply on each part of the hardware
4387 */
4388 iwp_mac_access_enter(sc);
4389 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4390 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4391 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4392 DELAY(5);
4393
4394 tmp = iwp_reg_read(sc, ALM_APMG_PS_CTL);
4395 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4396 iwp_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4397 iwp_mac_access_exit(sc);
4398
4399 if (PA_TYPE_MIX == sc->sc_chip_param.pa_type) {
4400 IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4401 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_MIX);
4402 }
4403
4404 if (PA_TYPE_INTER == sc->sc_chip_param.pa_type) {
4405
4406 IWP_WRITE(sc, CSR_GP_DRIVER_REG,
4407 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
4408 }
4409
4410 return (IWP_SUCCESS);
4411 }
4412
4413 /*
4414 * set up semphore flag to own EEPROM
4415 */
4416 static int
4417 iwp_eep_sem_down(iwp_sc_t *sc)
4418 {
4419 int count1, count2;
4420 uint32_t tmp;
4421
4422 for (count1 = 0; count1 < 1000; count1++) {
4423 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4424 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4425 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4426
4427 for (count2 = 0; count2 < 2; count2++) {
4428 if (IWP_READ(sc, CSR_HW_IF_CONFIG_REG) &
4429 CSR_HW_IF_CONFIG_REG_EEP_SEM) {
4430 return (IWP_SUCCESS);
4431 }
4432 DELAY(10000);
4433 }
4434 }
4435 return (IWP_FAIL);
4436 }
4437
4438 /*
4439 * reset semphore flag to release EEPROM
4440 */
4441 static void
4442 iwp_eep_sem_up(iwp_sc_t *sc)
4443 {
4444 uint32_t tmp;
4445
4446 tmp = IWP_READ(sc, CSR_HW_IF_CONFIG_REG);
4447 IWP_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4448 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4449 }
4450
4451 /*
4452 * This function read all infomation from eeprom
4453 */
4454 static int
4455 iwp_eep_load(iwp_sc_t *sc)
4456 {
4457 int i, rr;
4458 uint32_t rv, tmp, eep_gp;
4459 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4460 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4461
4462 /*
4463 * read eeprom gp register in CSR
4464 */
4465 eep_gp = IWP_READ(sc, CSR_EEPROM_GP);
4466 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4467 CSR_EEPROM_GP_BAD_SIGNATURE) {
4468 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4469 "not find eeprom\n"));
4470 return (IWP_FAIL);
4471 }
4472
4473 rr = iwp_eep_sem_down(sc);
4474 if (rr != 0) {
4475 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4476 "driver failed to own EEPROM\n"));
4477 return (IWP_FAIL);
4478 }
4479
4480 for (addr = 0; addr < eep_sz; addr += 2) {
4481 IWP_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4482 tmp = IWP_READ(sc, CSR_EEPROM_REG);
4483 IWP_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4484
4485 for (i = 0; i < 10; i++) {
4486 rv = IWP_READ(sc, CSR_EEPROM_REG);
4487 if (rv & 1) {
4488 break;
4489 }
4490 DELAY(10);
4491 }
4492
4493 if (!(rv & 1)) {
4494 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_eep_load(): "
4495 "time out when read eeprome\n"));
4496 iwp_eep_sem_up(sc);
4497 return (IWP_FAIL);
4498 }
4499
4500 eep_p[addr/2] = LE_16(rv >> 16);
4501 }
4502
4503 iwp_eep_sem_up(sc);
4504 return (IWP_SUCCESS);
4505 }
4506
4507 /*
4508 * initialize mac address in ieee80211com_t struct
4509 */
4510 static void
4511 iwp_get_mac_from_eep(iwp_sc_t *sc)
4512 {
4513 ieee80211com_t *ic = &sc->sc_ic;
4514
4515 IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]);
4516
4517 IWP_DBG((IWP_DEBUG_EEPROM, "iwp_get_mac_from_eep(): "
4518 "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4519 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4520 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4521 }
4522
4523 /*
4524 * main initialization function
4525 */
4526 static int
4527 iwp_init(iwp_sc_t *sc)
4528 {
4529 int err = IWP_FAIL;
4530 clock_t clk;
4531
4532 /*
4533 * release buffer for calibration
4534 */
4535 iwp_release_calib_buffer(sc);
4536
4537 mutex_enter(&sc->sc_glock);
4538 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4539
4540 err = iwp_init_common(sc);
4541 if (err != IWP_SUCCESS) {
4542 mutex_exit(&sc->sc_glock);
4543 return (IWP_FAIL);
4544 }
4545
4546 /*
4547 * backup ucode data part for future use.
4548 */
4549 (void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4550 sc->sc_dma_fw_data.mem_va,
4551 sc->sc_dma_fw_data.alength);
4552
4553 /* load firmware init segment into NIC */
4554 err = iwp_load_init_firmware(sc);
4555 if (err != IWP_SUCCESS) {
4556 cmn_err(CE_WARN, "iwp_init(): "
4557 "failed to setup init firmware\n");
4558 mutex_exit(&sc->sc_glock);
4559 return (IWP_FAIL);
4560 }
4561
4562 /*
4563 * now press "execute" start running
4564 */
4565 IWP_WRITE(sc, CSR_RESET, 0);
4566
4567 clk = ddi_get_lbolt() + drv_usectohz(1000000);
4568 while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4569 if (cv_timedwait(&sc->sc_ucode_cv,
4570 &sc->sc_glock, clk) < 0) {
4571 break;
4572 }
4573 }
4574
4575 if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4576 cmn_err(CE_WARN, "iwp_init(): "
4577 "failed to process init alive.\n");
4578 mutex_exit(&sc->sc_glock);
4579 return (IWP_FAIL);
4580 }
4581
4582 mutex_exit(&sc->sc_glock);
4583
4584 /*
4585 * stop chipset for initializing chipset again
4586 */
4587 iwp_stop(sc);
4588
4589 mutex_enter(&sc->sc_glock);
4590 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4591
4592 err = iwp_init_common(sc);
4593 if (err != IWP_SUCCESS) {
4594 mutex_exit(&sc->sc_glock);
4595 return (IWP_FAIL);
4596 }
4597
4598 /*
4599 * load firmware run segment into NIC
4600 */
4601 err = iwp_load_run_firmware(sc);
4602 if (err != IWP_SUCCESS) {
4603 cmn_err(CE_WARN, "iwp_init(): "
4604 "failed to setup run firmware\n");
4605 mutex_exit(&sc->sc_glock);
4606 return (IWP_FAIL);
4607 }
4608
4609 /*
4610 * now press "execute" start running
4611 */
4612 IWP_WRITE(sc, CSR_RESET, 0);
4613
4614 clk = ddi_get_lbolt() + drv_usectohz(1000000);
4615 while (!(sc->sc_flags & IWP_F_FW_INIT)) {
4616 if (cv_timedwait(&sc->sc_ucode_cv,
4617 &sc->sc_glock, clk) < 0) {
4618 break;
4619 }
4620 }
4621
4622 if (!(sc->sc_flags & IWP_F_FW_INIT)) {
4623 cmn_err(CE_WARN, "iwp_init(): "
4624 "failed to process runtime alive.\n");
4625 mutex_exit(&sc->sc_glock);
4626 return (IWP_FAIL);
4627 }
4628
4629 mutex_exit(&sc->sc_glock);
4630
4631 DELAY(1000);
4632
4633 mutex_enter(&sc->sc_glock);
4634 atomic_and_32(&sc->sc_flags, ~IWP_F_FW_INIT);
4635
4636 /*
4637 * at this point, the firmware is loaded OK, then config the hardware
4638 * with the ucode API, including rxon, txpower, etc.
4639 */
4640 err = iwp_config(sc);
4641 if (err) {
4642 cmn_err(CE_WARN, "iwp_init(): "
4643 "failed to configure device\n");
4644 mutex_exit(&sc->sc_glock);
4645 return (IWP_FAIL);
4646 }
4647
4648 /*
4649 * at this point, hardware may receive beacons :)
4650 */
4651 mutex_exit(&sc->sc_glock);
4652 return (IWP_SUCCESS);
4653 }
4654
4655 /*
4656 * stop or disable NIC
4657 */
4658 static void
4659 iwp_stop(iwp_sc_t *sc)
4660 {
4661 uint32_t tmp;
4662 int i;
4663
4664 /* by pass if it's quiesced */
4665 if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4666 mutex_enter(&sc->sc_glock);
4667 }
4668
4669 IWP_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4670 /*
4671 * disable interrupts
4672 */
4673 IWP_WRITE(sc, CSR_INT_MASK, 0);
4674 IWP_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4675 IWP_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4676
4677 /*
4678 * reset all Tx rings
4679 */
4680 for (i = 0; i < IWP_NUM_QUEUES; i++) {
4681 iwp_reset_tx_ring(sc, &sc->sc_txq[i]);
4682 }
4683
4684 /*
4685 * reset Rx ring
4686 */
4687 iwp_reset_rx_ring(sc);
4688
4689 iwp_mac_access_enter(sc);
4690 iwp_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4691 iwp_mac_access_exit(sc);
4692
4693 DELAY(5);
4694
4695 iwp_stop_master(sc);
4696
4697 mutex_enter(&sc->sc_mt_lock);
4698 sc->sc_tx_timer = 0;
4699 mutex_exit(&sc->sc_mt_lock);
4700
4701 tmp = IWP_READ(sc, CSR_RESET);
4702 IWP_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4703
4704 /* by pass if it's quiesced */
4705 if (!(sc->sc_flags & IWP_F_QUIESCED)) {
4706 mutex_exit(&sc->sc_glock);
4707 }
4708 }
4709
4710 /*
4711 * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4712 * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4713 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4714 * INRIA Sophia - Projet Planete
4715 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4716 */
4717 #define is_success(amrr) \
4718 ((amrr)->retrycnt < (amrr)->txcnt / 10)
4719 #define is_failure(amrr) \
4720 ((amrr)->retrycnt > (amrr)->txcnt / 3)
4721 #define is_enough(amrr) \
4722 ((amrr)->txcnt > 200)
4723 #define not_very_few(amrr) \
4724 ((amrr)->txcnt > 40)
4725 #define is_min_rate(in) \
4726 (0 == (in)->in_txrate)
4727 #define is_max_rate(in) \
4728 ((in)->in_rates.ir_nrates - 1 == (in)->in_txrate)
4729 #define increase_rate(in) \
4730 ((in)->in_txrate++)
4731 #define decrease_rate(in) \
4732 ((in)->in_txrate--)
4733 #define reset_cnt(amrr) \
4734 { (amrr)->txcnt = (amrr)->retrycnt = 0; }
4735
4736 #define IWP_AMRR_MIN_SUCCESS_THRESHOLD 1
4737 #define IWP_AMRR_MAX_SUCCESS_THRESHOLD 15
4738
4739 static void
4740 iwp_amrr_init(iwp_amrr_t *amrr)
4741 {
4742 amrr->success = 0;
4743 amrr->recovery = 0;
4744 amrr->txcnt = amrr->retrycnt = 0;
4745 amrr->success_threshold = IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4746 }
4747
4748 static void
4749 iwp_amrr_timeout(iwp_sc_t *sc)
4750 {
4751 ieee80211com_t *ic = &sc->sc_ic;
4752
4753 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_timeout(): "
4754 "enter\n"));
4755
4756 if (IEEE80211_M_STA == ic->ic_opmode) {
4757 iwp_amrr_ratectl(NULL, ic->ic_bss);
4758 } else {
4759 ieee80211_iterate_nodes(&ic->ic_sta, iwp_amrr_ratectl, NULL);
4760 }
4761
4762 sc->sc_clk = ddi_get_lbolt();
4763 }
4764
4765 /* ARGSUSED */
4766 static void
4767 iwp_amrr_ratectl(void *arg, ieee80211_node_t *in)
4768 {
4769 iwp_amrr_t *amrr = (iwp_amrr_t *)in;
4770 int need_change = 0;
4771
4772 if (is_success(amrr) && is_enough(amrr)) {
4773 amrr->success++;
4774 if (amrr->success >= amrr->success_threshold &&
4775 !is_max_rate(in)) {
4776 amrr->recovery = 1;
4777 amrr->success = 0;
4778 increase_rate(in);
4779 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4780 "AMRR increasing rate %d "
4781 "(txcnt=%d retrycnt=%d)\n",
4782 in->in_txrate, amrr->txcnt,
4783 amrr->retrycnt));
4784 need_change = 1;
4785 } else {
4786 amrr->recovery = 0;
4787 }
4788 } else if (not_very_few(amrr) && is_failure(amrr)) {
4789 amrr->success = 0;
4790 if (!is_min_rate(in)) {
4791 if (amrr->recovery) {
4792 amrr->success_threshold++;
4793 if (amrr->success_threshold >
4794 IWP_AMRR_MAX_SUCCESS_THRESHOLD) {
4795 amrr->success_threshold =
4796 IWP_AMRR_MAX_SUCCESS_THRESHOLD;
4797 }
4798 } else {
4799 amrr->success_threshold =
4800 IWP_AMRR_MIN_SUCCESS_THRESHOLD;
4801 }
4802 decrease_rate(in);
4803 IWP_DBG((IWP_DEBUG_RATECTL, "iwp_amrr_ratectl(): "
4804 "AMRR decreasing rate %d "
4805 "(txcnt=%d retrycnt=%d)\n",
4806 in->in_txrate, amrr->txcnt,
4807 amrr->retrycnt));
4808 need_change = 1;
4809 }
4810 amrr->recovery = 0; /* paper is incorrect */
4811 }
4812
4813 if (is_enough(amrr) || need_change) {
4814 reset_cnt(amrr);
4815 }
4816 }
4817
4818 /*
4819 * translate indirect address in eeprom to direct address
4820 * in eeprom and return address of entry whos indirect address
4821 * is indi_addr
4822 */
4823 static uint8_t *
4824 iwp_eep_addr_trans(iwp_sc_t *sc, uint32_t indi_addr)
4825 {
4826 uint32_t di_addr;
4827 uint16_t temp;
4828
4829 if (!(indi_addr & INDIRECT_ADDRESS)) {
4830 di_addr = indi_addr;
4831 return (&sc->sc_eep_map[di_addr]);
4832 }
4833
4834 switch (indi_addr & INDIRECT_TYPE_MSK) {
4835 case INDIRECT_GENERAL:
4836 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_GENERAL);
4837 break;
4838 case INDIRECT_HOST:
4839 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_HOST);
4840 break;
4841 case INDIRECT_REGULATORY:
4842 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY);
4843 break;
4844 case INDIRECT_CALIBRATION:
4845 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION);
4846 break;
4847 case INDIRECT_PROCESS_ADJST:
4848 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST);
4849 break;
4850 case INDIRECT_OTHERS:
4851 temp = IWP_READ_EEP_SHORT(sc, EEP_LINK_OTHERS);
4852 break;
4853 default:
4854 temp = 0;
4855 cmn_err(CE_WARN, "iwp_eep_addr_trans(): "
4856 "incorrect indirect eeprom address.\n");
4857 break;
4858 }
4859
4860 di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1);
4861
4862 return (&sc->sc_eep_map[di_addr]);
4863 }
4864
4865 /*
4866 * loade a section of ucode into NIC
4867 */
4868 static int
4869 iwp_put_seg_fw(iwp_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len)
4870 {
4871
4872 iwp_mac_access_enter(sc);
4873
4874 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4875 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
4876
4877 IWP_WRITE(sc, IWP_FH_SRVC_CHNL_SRAM_ADDR_REG(IWP_FH_SRVC_CHNL), addr_d);
4878
4879 IWP_WRITE(sc, IWP_FH_TFDIB_CTRL0_REG(IWP_FH_SRVC_CHNL),
4880 (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK));
4881
4882 IWP_WRITE(sc, IWP_FH_TFDIB_CTRL1_REG(IWP_FH_SRVC_CHNL), len);
4883
4884 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_BUF_STS_REG(IWP_FH_SRVC_CHNL),
4885 (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
4886 (1 << IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
4887 IWP_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
4888
4889 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(IWP_FH_SRVC_CHNL),
4890 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4891 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
4892 IWP_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
4893
4894 iwp_mac_access_exit(sc);
4895
4896 return (IWP_SUCCESS);
4897 }
4898
4899 /*
4900 * necessary setting during alive notification
4901 */
4902 static int
4903 iwp_alive_common(iwp_sc_t *sc)
4904 {
4905 uint32_t base;
4906 uint32_t i;
4907 iwp_wimax_coex_cmd_t w_cmd;
4908 iwp_calibration_crystal_cmd_t c_cmd;
4909 uint32_t rv = IWP_FAIL;
4910
4911 /*
4912 * initialize SCD related registers to make TX work.
4913 */
4914 iwp_mac_access_enter(sc);
4915
4916 /*
4917 * read sram address of data base.
4918 */
4919 sc->sc_scd_base = iwp_reg_read(sc, IWP_SCD_SRAM_BASE_ADDR);
4920
4921 for (base = sc->sc_scd_base + IWP_SCD_CONTEXT_DATA_OFFSET;
4922 base < sc->sc_scd_base + IWP_SCD_TX_STTS_BITMAP_OFFSET;
4923 base += 4) {
4924 iwp_mem_write(sc, base, 0);
4925 }
4926
4927 for (; base < sc->sc_scd_base + IWP_SCD_TRANSLATE_TBL_OFFSET;
4928 base += 4) {
4929 iwp_mem_write(sc, base, 0);
4930 }
4931
4932 for (i = 0; i < sizeof (uint16_t) * IWP_NUM_QUEUES; i += 4) {
4933 iwp_mem_write(sc, base + i, 0);
4934 }
4935
4936 iwp_reg_write(sc, IWP_SCD_DRAM_BASE_ADDR,
4937 sc->sc_dma_sh.cookie.dmac_address >> 10);
4938
4939 iwp_reg_write(sc, IWP_SCD_QUEUECHAIN_SEL,
4940 IWP_SCD_QUEUECHAIN_SEL_ALL(IWP_NUM_QUEUES));
4941
4942 iwp_reg_write(sc, IWP_SCD_AGGR_SEL, 0);
4943
4944 for (i = 0; i < IWP_NUM_QUEUES; i++) {
4945 iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(i), 0);
4946 IWP_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8));
4947 iwp_mem_write(sc, sc->sc_scd_base +
4948 IWP_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
4949 iwp_mem_write(sc, sc->sc_scd_base +
4950 IWP_SCD_CONTEXT_QUEUE_OFFSET(i) +
4951 sizeof (uint32_t),
4952 ((SCD_WIN_SIZE << IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
4953 IWP_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
4954 ((SCD_FRAME_LIMIT <<
4955 IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
4956 IWP_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
4957 }
4958
4959 iwp_reg_write(sc, IWP_SCD_INTERRUPT_MASK, (1 << IWP_NUM_QUEUES) - 1);
4960
4961 iwp_reg_write(sc, (IWP_SCD_BASE + 0x10),
4962 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
4963
4964 IWP_WRITE(sc, HBUS_TARG_WRPTR, (IWP_CMD_QUEUE_NUM << 8));
4965 iwp_reg_write(sc, IWP_SCD_QUEUE_RDPTR(IWP_CMD_QUEUE_NUM), 0);
4966
4967 /*
4968 * queue 0-7 map to FIFO 0-7 and
4969 * all queues work under FIFO mode(none-scheduler_ack)
4970 */
4971 for (i = 0; i < 4; i++) {
4972 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4973 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4974 ((3-i) << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4975 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4976 IWP_SCD_QUEUE_STTS_REG_MSK);
4977 }
4978
4979 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(IWP_CMD_QUEUE_NUM),
4980 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4981 (IWP_CMD_FIFO_NUM << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4982 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4983 IWP_SCD_QUEUE_STTS_REG_MSK);
4984
4985 for (i = 5; i < 7; i++) {
4986 iwp_reg_write(sc, IWP_SCD_QUEUE_STATUS_BITS(i),
4987 (1 << IWP_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
4988 (i << IWP_SCD_QUEUE_STTS_REG_POS_TXF) |
4989 (1 << IWP_SCD_QUEUE_STTS_REG_POS_WSL) |
4990 IWP_SCD_QUEUE_STTS_REG_MSK);
4991 }
4992
4993 iwp_mac_access_exit(sc);
4994
4995 (void) memset(&w_cmd, 0, sizeof (w_cmd));
4996
4997 rv = iwp_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1);
4998 if (rv != IWP_SUCCESS) {
4999 cmn_err(CE_WARN, "iwp_alive_common(): "
5000 "failed to send wimax coexist command.\n");
5001 return (rv);
5002 }
5003
5004 (void) memset(&c_cmd, 0, sizeof (c_cmd));
5005
5006 c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
5007 c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]);
5008 c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]);
5009
5010 rv = iwp_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &c_cmd, sizeof (c_cmd), 1);
5011 if (rv != IWP_SUCCESS) {
5012 cmn_err(CE_WARN, "iwp_alive_common(): "
5013 "failed to send crystal frq calibration command.\n");
5014 return (rv);
5015 }
5016
5017 /*
5018 * make sure crystal frequency calibration ready
5019 * before next operations.
5020 */
5021 DELAY(1000);
5022
5023 return (IWP_SUCCESS);
5024 }
5025
5026 /*
5027 * save results of calibration from ucode
5028 */
5029 static void
5030 iwp_save_calib_result(iwp_sc_t *sc, iwp_rx_desc_t *desc)
5031 {
5032 struct iwp_calib_results *res_p = &sc->sc_calib_results;
5033 struct iwp_calib_hdr *calib_hdr = (struct iwp_calib_hdr *)(desc + 1);
5034 int len = LE_32(desc->len);
5035
5036 /*
5037 * ensure the size of buffer is not too big
5038 */
5039 len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4;
5040
5041 switch (calib_hdr->op_code) {
5042 case PHY_CALIBRATE_LO_CMD:
5043 if (NULL == res_p->lo_res) {
5044 res_p->lo_res = kmem_alloc(len, KM_NOSLEEP);
5045 }
5046
5047 if (NULL == res_p->lo_res) {
5048 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5049 "failed to allocate memory.\n");
5050 return;
5051 }
5052
5053 res_p->lo_res_len = len;
5054 (void) memcpy(res_p->lo_res, calib_hdr, len);
5055 break;
5056 case PHY_CALIBRATE_TX_IQ_CMD:
5057 if (NULL == res_p->tx_iq_res) {
5058 res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP);
5059 }
5060
5061 if (NULL == res_p->tx_iq_res) {
5062 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5063 "failed to allocate memory.\n");
5064 return;
5065 }
5066
5067 res_p->tx_iq_res_len = len;
5068 (void) memcpy(res_p->tx_iq_res, calib_hdr, len);
5069 break;
5070 case PHY_CALIBRATE_TX_IQ_PERD_CMD:
5071 if (NULL == res_p->tx_iq_perd_res) {
5072 res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP);
5073 }
5074
5075 if (NULL == res_p->tx_iq_perd_res) {
5076 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5077 "failed to allocate memory.\n");
5078 }
5079
5080 res_p->tx_iq_perd_res_len = len;
5081 (void) memcpy(res_p->tx_iq_perd_res, calib_hdr, len);
5082 break;
5083 case PHY_CALIBRATE_BASE_BAND_CMD:
5084 if (NULL == res_p->base_band_res) {
5085 res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP);
5086 }
5087
5088 if (NULL == res_p->base_band_res) {
5089 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5090 "failed to allocate memory.\n");
5091 }
5092
5093 res_p->base_band_res_len = len;
5094 (void) memcpy(res_p->base_band_res, calib_hdr, len);
5095 break;
5096 default:
5097 cmn_err(CE_WARN, "iwp_save_calib_result(): "
5098 "incorrect calibration type(%d).\n", calib_hdr->op_code);
5099 break;
5100 }
5101
5102 }
5103
5104 static void
5105 iwp_release_calib_buffer(iwp_sc_t *sc)
5106 {
5107 if (sc->sc_calib_results.lo_res != NULL) {
5108 kmem_free(sc->sc_calib_results.lo_res,
5109 sc->sc_calib_results.lo_res_len);
5110 sc->sc_calib_results.lo_res = NULL;
5111 }
5112
5113 if (sc->sc_calib_results.tx_iq_res != NULL) {
5114 kmem_free(sc->sc_calib_results.tx_iq_res,
5115 sc->sc_calib_results.tx_iq_res_len);
5116 sc->sc_calib_results.tx_iq_res = NULL;
5117 }
5118
5119 if (sc->sc_calib_results.tx_iq_perd_res != NULL) {
5120 kmem_free(sc->sc_calib_results.tx_iq_perd_res,
5121 sc->sc_calib_results.tx_iq_perd_res_len);
5122 sc->sc_calib_results.tx_iq_perd_res = NULL;
5123 }
5124
5125 if (sc->sc_calib_results.base_band_res != NULL) {
5126 kmem_free(sc->sc_calib_results.base_band_res,
5127 sc->sc_calib_results.base_band_res_len);
5128 sc->sc_calib_results.base_band_res = NULL;
5129 }
5130
5131 }
5132
5133 /*
5134 * common section of intialization
5135 */
5136 static int
5137 iwp_init_common(iwp_sc_t *sc)
5138 {
5139 int32_t qid;
5140 uint32_t tmp;
5141
5142 (void) iwp_preinit(sc);
5143
5144 tmp = IWP_READ(sc, CSR_GP_CNTRL);
5145 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
5146 cmn_err(CE_NOTE, "iwp_init_common(): "
5147 "radio transmitter is off\n");
5148 return (IWP_FAIL);
5149 }
5150
5151 /*
5152 * init Rx ring
5153 */
5154 iwp_mac_access_enter(sc);
5155 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
5156
5157 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
5158 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
5159 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
5160
5161 IWP_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
5162 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
5163 offsetof(struct iwp_shared, val0)) >> 4));
5164
5165 IWP_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
5166 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
5167 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
5168 IWP_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
5169 (RX_QUEUE_SIZE_LOG <<
5170 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
5171 iwp_mac_access_exit(sc);
5172 IWP_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
5173 (RX_QUEUE_SIZE - 1) & ~0x7);
5174
5175 /*
5176 * init Tx rings
5177 */
5178 iwp_mac_access_enter(sc);
5179 iwp_reg_write(sc, IWP_SCD_TXFACT, 0);
5180
5181 /*
5182 * keep warm page
5183 */
5184 IWP_WRITE(sc, IWP_FH_KW_MEM_ADDR_REG,
5185 sc->sc_dma_kw.cookie.dmac_address >> 4);
5186
5187 for (qid = 0; qid < IWP_NUM_QUEUES; qid++) {
5188 IWP_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
5189 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
5190 IWP_WRITE(sc, IWP_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
5191 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5192 IWP_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
5193 }
5194
5195 iwp_mac_access_exit(sc);
5196
5197 /*
5198 * clear "radio off" and "disable command" bits
5199 */
5200 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5201 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
5202 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5203
5204 /*
5205 * clear any pending interrupts
5206 */
5207 IWP_WRITE(sc, CSR_INT, 0xffffffff);
5208
5209 /*
5210 * enable interrupts
5211 */
5212 IWP_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
5213
5214 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5215 IWP_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5216
5217 return (IWP_SUCCESS);
5218 }
5219
5220 static int
5221 iwp_fast_recover(iwp_sc_t *sc)
5222 {
5223 ieee80211com_t *ic = &sc->sc_ic;
5224 int err = IWP_FAIL;
5225
5226 mutex_enter(&sc->sc_glock);
5227
5228 /* restore runtime configuration */
5229 bcopy(&sc->sc_config_save, &sc->sc_config,
5230 sizeof (sc->sc_config));
5231
5232 sc->sc_config.assoc_id = 0;
5233 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
5234
5235 if ((err = iwp_hw_set_before_auth(sc)) != IWP_SUCCESS) {
5236 cmn_err(CE_WARN, "iwp_fast_recover(): "
5237 "could not setup authentication\n");
5238 mutex_exit(&sc->sc_glock);
5239 return (err);
5240 }
5241
5242 bcopy(&sc->sc_config_save, &sc->sc_config,
5243 sizeof (sc->sc_config));
5244
5245 /* update adapter's configuration */
5246 err = iwp_run_state_config(sc);
5247 if (err != IWP_SUCCESS) {
5248 cmn_err(CE_WARN, "iwp_fast_recover(): "
5249 "failed to setup association\n");
5250 mutex_exit(&sc->sc_glock);
5251 return (err);
5252 }
5253 /* set LED on */
5254 iwp_set_led(sc, 2, 0, 1);
5255
5256 mutex_exit(&sc->sc_glock);
5257
5258 atomic_and_32(&sc->sc_flags, ~IWP_F_HW_ERR_RECOVER);
5259
5260 /* start queue */
5261 IWP_DBG((IWP_DEBUG_FW, "iwp_fast_recover(): "
5262 "resume xmit\n"));
5263 mac_tx_update(ic->ic_mach);
5264
5265 return (IWP_SUCCESS);
5266 }
5267
5268 static int
5269 iwp_run_state_config(iwp_sc_t *sc)
5270 {
5271 struct ieee80211com *ic = &sc->sc_ic;
5272 ieee80211_node_t *in = ic->ic_bss;
5273 int err = IWP_FAIL;
5274
5275 /*
5276 * update adapter's configuration
5277 */
5278 sc->sc_config.assoc_id = in->in_associd & 0x3fff;
5279
5280 /*
5281 * short preamble/slot time are
5282 * negotiated when associating
5283 */
5284 sc->sc_config.flags &=
5285 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
5286 RXON_FLG_SHORT_SLOT_MSK);
5287
5288 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
5289 sc->sc_config.flags |=
5290 LE_32(RXON_FLG_SHORT_SLOT_MSK);
5291 }
5292
5293 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
5294 sc->sc_config.flags |=
5295 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
5296 }
5297
5298 sc->sc_config.filter_flags |=
5299 LE_32(RXON_FILTER_ASSOC_MSK);
5300
5301 if (ic->ic_opmode != IEEE80211_M_STA) {
5302 sc->sc_config.filter_flags |=
5303 LE_32(RXON_FILTER_BCON_AWARE_MSK);
5304 }
5305
5306 IWP_DBG((IWP_DEBUG_80211, "iwp_run_state_config(): "
5307 "config chan %d flags %x"
5308 " filter_flags %x\n",
5309 sc->sc_config.chan, sc->sc_config.flags,
5310 sc->sc_config.filter_flags));
5311
5312 err = iwp_cmd(sc, REPLY_RXON, &sc->sc_config,
5313 sizeof (iwp_rxon_cmd_t), 1);
5314 if (err != IWP_SUCCESS) {
5315 cmn_err(CE_WARN, "iwp_run_state_config(): "
5316 "could not update configuration\n");
5317 return (err);
5318 }
5319
5320 return (err);
5321 }
5322
5323 /*
5324 * This function overwrites default configurations of
5325 * ieee80211com structure in Net80211 module.
5326 */
5327 static void
5328 iwp_overwrite_ic_default(iwp_sc_t *sc)
5329 {
5330 ieee80211com_t *ic = &sc->sc_ic;
5331
5332 sc->sc_newstate = ic->ic_newstate;
5333 ic->ic_newstate = iwp_newstate;
5334 ic->ic_node_alloc = iwp_node_alloc;
5335 ic->ic_node_free = iwp_node_free;
5336 }
5337
5338
5339 /*
5340 * This function adds AP station into hardware.
5341 */
5342 static int
5343 iwp_add_ap_sta(iwp_sc_t *sc)
5344 {
5345 ieee80211com_t *ic = &sc->sc_ic;
5346 ieee80211_node_t *in = ic->ic_bss;
5347 iwp_add_sta_t node;
5348 int err = IWP_FAIL;
5349
5350 /*
5351 * Add AP node into hardware.
5352 */
5353 (void) memset(&node, 0, sizeof (node));
5354 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
5355 node.mode = STA_MODE_ADD_MSK;
5356 node.sta.sta_id = IWP_AP_ID;
5357
5358 err = iwp_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
5359 if (err != IWP_SUCCESS) {
5360 cmn_err(CE_WARN, "iwp_add_ap_sta(): "
5361 "failed to add AP node\n");
5362 return (err);
5363 }
5364
5365 return (err);
5366 }
5367
5368 /*
5369 * Check EEPROM version and Calibration version.
5370 */
5371 static int
5372 iwp_eep_ver_chk(iwp_sc_t *sc)
5373 {
5374 if ((IWP_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) ||
5375 (sc->sc_eep_calib->tx_pow_calib_hdr.calib_version < 4)) {
5376 cmn_err(CE_WARN, "iwp_eep_ver_chk(): "
5377 "unsupported eeprom detected\n");
5378 return (IWP_FAIL);
5379 }
5380
5381 return (IWP_SUCCESS);
5382 }
5383
5384 /*
5385 * Determine parameters for all supported chips.
5386 */
5387 static void
5388 iwp_set_chip_param(iwp_sc_t *sc)
5389 {
5390 if ((0x008d == sc->sc_dev_id) ||
5391 (0x008e == sc->sc_dev_id)) {
5392 sc->sc_chip_param.phy_mode = PHY_MODE_G |
5393 PHY_MODE_A | PHY_MODE_N;
5394
5395 sc->sc_chip_param.tx_ant = ANT_A | ANT_B;
5396 sc->sc_chip_param.rx_ant = ANT_A | ANT_B;
5397
5398 sc->sc_chip_param.pa_type = PA_TYPE_MIX;
5399 }
5400
5401 if ((0x422c == sc->sc_dev_id) ||
5402 (0x4239 == sc->sc_dev_id)) {
5403 sc->sc_chip_param.phy_mode = PHY_MODE_G |
5404 PHY_MODE_A | PHY_MODE_N;
5405
5406 sc->sc_chip_param.tx_ant = ANT_B | ANT_C;
5407 sc->sc_chip_param.rx_ant = ANT_B | ANT_C;
5408
5409 sc->sc_chip_param.pa_type = PA_TYPE_INTER;
5410 }
5411
5412 if ((0x422b == sc->sc_dev_id) ||
5413 (0x4238 == sc->sc_dev_id)) {
5414 sc->sc_chip_param.phy_mode = PHY_MODE_G |
5415 PHY_MODE_A | PHY_MODE_N;
5416
5417 sc->sc_chip_param.tx_ant = ANT_A | ANT_B | ANT_C;
5418 sc->sc_chip_param.rx_ant = ANT_A | ANT_B | ANT_C;
5419
5420 sc->sc_chip_param.pa_type = PA_TYPE_SYSTEM;
5421 }
5422 }
--- EOF ---