1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * Copyright (c) 2009, Intel Corporation
8 * All rights reserved.
9 */
10
11 /*
12 * Copyright (c) 2006
13 * Copyright (c) 2007
14 * Damien Bergamini <damien.bergamini@free.fr>
15 *
16 * Permission to use, copy, modify, and distribute this software for any
17 * purpose with or without fee is hereby granted, provided that the above
18 * copyright notice and this permission notice appear in all copies.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 */
28
29 /*
30 * Intel(R) WiFi Link 5100/5300 Driver
31 */
32
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/net80211_ht.h>
56 #include <sys/varargs.h>
57 #include <sys/policy.h>
58 #include <sys/pci.h>
59
60 #include "iwh_calibration.h"
61 #include "iwh_hw.h"
62 #include "iwh_eeprom.h"
63 #include "iwh_var.h"
64 #include <inet/wifi_ioctl.h>
65
66 #ifdef DEBUG
67 #define IWH_DEBUG_80211 (1 << 0)
68 #define IWH_DEBUG_CMD (1 << 1)
69 #define IWH_DEBUG_DMA (1 << 2)
70 #define IWH_DEBUG_EEPROM (1 << 3)
71 #define IWH_DEBUG_FW (1 << 4)
72 #define IWH_DEBUG_HW (1 << 5)
73 #define IWH_DEBUG_INTR (1 << 6)
74 #define IWH_DEBUG_MRR (1 << 7)
75 #define IWH_DEBUG_PIO (1 << 8)
76 #define IWH_DEBUG_RX (1 << 9)
77 #define IWH_DEBUG_SCAN (1 << 10)
78 #define IWH_DEBUG_TX (1 << 11)
79 #define IWH_DEBUG_RATECTL (1 << 12)
80 #define IWH_DEBUG_RADIO (1 << 13)
81 #define IWH_DEBUG_RESUME (1 << 14)
82 #define IWH_DEBUG_CALIBRATION (1 << 15)
83 #define IWH_DEBUG_BA (1 << 16)
84 #define IWH_DEBUG_RXON (1 << 17)
85 #define IWH_DEBUG_HWRATE (1 << 18)
86 #define IWH_DEBUG_HTRATE (1 << 19)
87 #define IWH_DEBUG_QOS (1 << 20)
88 /*
89 * if want to see debug message of a given section,
90 * please set this flag to one of above values
91 */
92 uint32_t iwh_dbg_flags = 0;
93 #define IWH_DBG(x) \
94 iwh_dbg x
95 #else
96 #define IWH_DBG(x)
97 #endif
98
99 #define MS(v, f) (((v) & f) >> f##_S)
100
101 static void *iwh_soft_state_p = NULL;
102
103 /*
104 * ucode will be compiled into driver image
105 */
106 static uint8_t iwh_fw_5000_bin[] = {
107 #include "fw-iw/fw_5000/iwh_5000.ucode"
108 };
109
110 static uint8_t iwh_fw_5150_bin[] = {
111 #include "fw-iw/fw_5150/iwh_5150.ucode"
112 };
113
114 /*
115 * DMA attributes for a shared page
116 */
117 static ddi_dma_attr_t sh_dma_attr = {
118 DMA_ATTR_V0, /* version of this structure */
119 0, /* lowest usable address */
120 0xffffffffU, /* highest usable address */
121 0xffffffffU, /* maximum DMAable byte count */
122 0x1000, /* alignment in bytes */
123 0x1000, /* burst sizes (any?) */
124 1, /* minimum transfer */
125 0xffffffffU, /* maximum transfer */
126 0xffffffffU, /* maximum segment length */
127 1, /* maximum number of segments */
128 1, /* granularity */
129 0, /* flags (reserved) */
130 };
131
132 /*
133 * DMA attributes for a keep warm DRAM descriptor
134 */
135 static ddi_dma_attr_t kw_dma_attr = {
136 DMA_ATTR_V0, /* version of this structure */
137 0, /* lowest usable address */
138 0xffffffffU, /* highest usable address */
139 0xffffffffU, /* maximum DMAable byte count */
140 0x1000, /* alignment in bytes */
141 0x1000, /* burst sizes (any?) */
142 1, /* minimum transfer */
143 0xffffffffU, /* maximum transfer */
144 0xffffffffU, /* maximum segment length */
145 1, /* maximum number of segments */
146 1, /* granularity */
147 0, /* flags (reserved) */
148 };
149
150 /*
151 * DMA attributes for a ring descriptor
152 */
153 static ddi_dma_attr_t ring_desc_dma_attr = {
154 DMA_ATTR_V0, /* version of this structure */
155 0, /* lowest usable address */
156 0xffffffffU, /* highest usable address */
157 0xffffffffU, /* maximum DMAable byte count */
158 0x100, /* alignment in bytes */
159 0x100, /* burst sizes (any?) */
160 1, /* minimum transfer */
161 0xffffffffU, /* maximum transfer */
162 0xffffffffU, /* maximum segment length */
163 1, /* maximum number of segments */
164 1, /* granularity */
165 0, /* flags (reserved) */
166 };
167
168 /*
169 * DMA attributes for a cmd
170 */
171 static ddi_dma_attr_t cmd_dma_attr = {
172 DMA_ATTR_V0, /* version of this structure */
173 0, /* lowest usable address */
174 0xffffffffU, /* highest usable address */
175 0xffffffffU, /* maximum DMAable byte count */
176 4, /* alignment in bytes */
177 0x100, /* burst sizes (any?) */
178 1, /* minimum transfer */
179 0xffffffffU, /* maximum transfer */
180 0xffffffffU, /* maximum segment length */
181 1, /* maximum number of segments */
182 1, /* granularity */
183 0, /* flags (reserved) */
184 };
185
186 /*
187 * DMA attributes for a rx buffer
188 */
189 static ddi_dma_attr_t rx_buffer_dma_attr = {
190 DMA_ATTR_V0, /* version of this structure */
191 0, /* lowest usable address */
192 0xffffffffU, /* highest usable address */
193 0xffffffffU, /* maximum DMAable byte count */
194 0x100, /* alignment in bytes */
195 0x100, /* burst sizes (any?) */
196 1, /* minimum transfer */
197 0xffffffffU, /* maximum transfer */
198 0xffffffffU, /* maximum segment length */
199 1, /* maximum number of segments */
200 1, /* granularity */
201 0, /* flags (reserved) */
202 };
203
204 /*
205 * DMA attributes for a tx buffer.
206 * the maximum number of segments is 4 for the hardware.
207 * now all the wifi drivers put the whole frame in a single
208 * descriptor, so we define the maximum number of segments 1,
209 * just the same as the rx_buffer. we consider leverage the HW
210 * ability in the future, that is why we don't define rx and tx
211 * buffer_dma_attr as the same.
212 */
213 static ddi_dma_attr_t tx_buffer_dma_attr = {
214 DMA_ATTR_V0, /* version of this structure */
215 0, /* lowest usable address */
216 0xffffffffU, /* highest usable address */
217 0xffffffffU, /* maximum DMAable byte count */
218 4, /* alignment in bytes */
219 0x100, /* burst sizes (any?) */
220 1, /* minimum transfer */
221 0xffffffffU, /* maximum transfer */
222 0xffffffffU, /* maximum segment length */
223 1, /* maximum number of segments */
224 1, /* granularity */
225 0, /* flags (reserved) */
226 };
227
228 /*
229 * DMA attributes for text and data part in the firmware
230 */
231 static ddi_dma_attr_t fw_dma_attr = {
232 DMA_ATTR_V0, /* version of this structure */
233 0, /* lowest usable address */
234 0xffffffffU, /* highest usable address */
235 0x7fffffff, /* maximum DMAable byte count */
236 0x10, /* alignment in bytes */
237 0x100, /* burst sizes (any?) */
238 1, /* minimum transfer */
239 0xffffffffU, /* maximum transfer */
240 0xffffffffU, /* maximum segment length */
241 1, /* maximum number of segments */
242 1, /* granularity */
243 0, /* flags (reserved) */
244 };
245
246 /*
247 * regs access attributes
248 */
249 static ddi_device_acc_attr_t iwh_reg_accattr = {
250 DDI_DEVICE_ATTR_V0,
251 DDI_STRUCTURE_LE_ACC,
252 DDI_STRICTORDER_ACC,
253 DDI_DEFAULT_ACC
254 };
255
256 /*
257 * DMA access attributes for descriptor
258 */
259 static ddi_device_acc_attr_t iwh_dma_descattr = {
260 DDI_DEVICE_ATTR_V0,
261 DDI_STRUCTURE_LE_ACC,
262 DDI_STRICTORDER_ACC,
263 DDI_DEFAULT_ACC
264 };
265
266 /*
267 * DMA access attributes
268 */
269 static ddi_device_acc_attr_t iwh_dma_accattr = {
270 DDI_DEVICE_ATTR_V0,
271 DDI_NEVERSWAP_ACC,
272 DDI_STRICTORDER_ACC,
273 DDI_DEFAULT_ACC
274 };
275
276 static int iwh_ring_init(iwh_sc_t *);
277 static void iwh_ring_free(iwh_sc_t *);
278 static int iwh_alloc_shared(iwh_sc_t *);
279 static void iwh_free_shared(iwh_sc_t *);
280 static int iwh_alloc_kw(iwh_sc_t *);
281 static void iwh_free_kw(iwh_sc_t *);
282 static int iwh_alloc_fw_dma(iwh_sc_t *);
283 static void iwh_free_fw_dma(iwh_sc_t *);
284 static int iwh_alloc_rx_ring(iwh_sc_t *);
285 static void iwh_reset_rx_ring(iwh_sc_t *);
286 static void iwh_free_rx_ring(iwh_sc_t *);
287 static int iwh_alloc_tx_ring(iwh_sc_t *, iwh_tx_ring_t *,
288 int, int);
289 static void iwh_reset_tx_ring(iwh_sc_t *, iwh_tx_ring_t *);
290 static void iwh_free_tx_ring(iwh_tx_ring_t *);
291 static ieee80211_node_t *iwh_node_alloc(ieee80211com_t *);
292 static void iwh_node_free(ieee80211_node_t *);
293 static int iwh_newstate(ieee80211com_t *, enum ieee80211_state, int);
294 static void iwh_mac_access_enter(iwh_sc_t *);
295 static void iwh_mac_access_exit(iwh_sc_t *);
296 static uint32_t iwh_reg_read(iwh_sc_t *, uint32_t);
297 static void iwh_reg_write(iwh_sc_t *, uint32_t, uint32_t);
298 static int iwh_load_init_firmware(iwh_sc_t *);
299 static int iwh_load_run_firmware(iwh_sc_t *);
300 static void iwh_tx_intr(iwh_sc_t *, iwh_rx_desc_t *);
301 static void iwh_cmd_intr(iwh_sc_t *, iwh_rx_desc_t *);
302 static uint_t iwh_intr(caddr_t, caddr_t);
303 static int iwh_eep_load(iwh_sc_t *);
304 static void iwh_get_mac_from_eep(iwh_sc_t *);
305 static int iwh_eep_sem_down(iwh_sc_t *);
306 static void iwh_eep_sem_up(iwh_sc_t *);
307 static uint_t iwh_rx_softintr(caddr_t, caddr_t);
308 static uint8_t iwh_rate_to_plcp(int);
309 static int iwh_cmd(iwh_sc_t *, int, const void *, int, int);
310 static void iwh_set_led(iwh_sc_t *, uint8_t, uint8_t, uint8_t);
311 static int iwh_hw_set_before_auth(iwh_sc_t *);
312 static int iwh_scan(iwh_sc_t *);
313 static int iwh_config(iwh_sc_t *);
314 static void iwh_stop_master(iwh_sc_t *);
315 static int iwh_power_up(iwh_sc_t *);
316 static int iwh_preinit(iwh_sc_t *);
317 static int iwh_init(iwh_sc_t *);
318 static void iwh_stop(iwh_sc_t *);
319 static int iwh_quiesce(dev_info_t *t);
320 static void iwh_amrr_init(iwh_amrr_t *);
321 static void iwh_amrr_timeout(iwh_sc_t *);
322 static void iwh_amrr_ratectl(void *, ieee80211_node_t *);
323 static void iwh_ucode_alive(iwh_sc_t *, iwh_rx_desc_t *);
324 static void iwh_rx_phy_intr(iwh_sc_t *, iwh_rx_desc_t *);
325 static void iwh_rx_mpdu_intr(iwh_sc_t *, iwh_rx_desc_t *);
326 static void iwh_release_calib_buffer(iwh_sc_t *);
327 static int iwh_init_common(iwh_sc_t *);
328 static uint8_t *iwh_eep_addr_trans(iwh_sc_t *, uint32_t);
329 static int iwh_put_seg_fw(iwh_sc_t *, uint32_t, uint32_t, uint32_t);
330 static int iwh_alive_common(iwh_sc_t *);
331 static void iwh_save_calib_result(iwh_sc_t *, iwh_rx_desc_t *);
332 static int iwh_tx_power_table(iwh_sc_t *, int);
333 static int iwh_attach(dev_info_t *, ddi_attach_cmd_t);
334 static int iwh_detach(dev_info_t *, ddi_detach_cmd_t);
335 static void iwh_destroy_locks(iwh_sc_t *);
336 static int iwh_send(ieee80211com_t *, mblk_t *, uint8_t);
337 static void iwh_thread(iwh_sc_t *);
338 static int iwh_run_state_config(iwh_sc_t *);
339 static int iwh_fast_recover(iwh_sc_t *);
340 static int iwh_wme_update(ieee80211com_t *);
341 static int iwh_qosparam_to_hw(iwh_sc_t *, int);
342 static int iwh_wme_to_qos_ac(int);
343 static uint16_t iwh_cw_e_to_cw(uint8_t);
344 static int iwh_wmeparam_check(struct wmeParams *);
345 static inline int iwh_wme_tid_qos_ac(int);
346 static inline int iwh_qos_ac_to_txq(int);
347 static int iwh_wme_tid_to_txq(int);
348 static void iwh_init_ht_conf(iwh_sc_t *);
349 static void iwh_overwrite_11n_rateset(iwh_sc_t *);
350 static void iwh_overwrite_ic_default(iwh_sc_t *);
351 static void iwh_config_rxon_chain(iwh_sc_t *);
352 static int iwh_add_ap_sta(iwh_sc_t *);
353 static int iwh_ap_lq(iwh_sc_t *);
354 static void iwh_recv_action(struct ieee80211_node *,
355 const uint8_t *, const uint8_t *);
356 static int iwh_send_action(struct ieee80211_node *,
357 int, int, uint16_t[4]);
358 static int iwh_is_max_rate(ieee80211_node_t *);
359 static int iwh_is_min_rate(ieee80211_node_t *);
360 static void iwh_increase_rate(ieee80211_node_t *);
361 static void iwh_decrease_rate(ieee80211_node_t *);
362 static int iwh_alloc_dma_mem(iwh_sc_t *, size_t,
363 ddi_dma_attr_t *, ddi_device_acc_attr_t *,
364 uint_t, iwh_dma_t *);
365 static void iwh_free_dma_mem(iwh_dma_t *);
366 static int iwh_reset_hw(iwh_sc_t *);
367
368 /*
369 * GLD specific operations
370 */
371 static int iwh_m_stat(void *, uint_t, uint64_t *);
372 static int iwh_m_start(void *);
373 static void iwh_m_stop(void *);
374 static int iwh_m_unicst(void *, const uint8_t *);
375 static int iwh_m_multicst(void *, boolean_t, const uint8_t *);
376 static int iwh_m_promisc(void *, boolean_t);
377 static mblk_t *iwh_m_tx(void *, mblk_t *);
378 static void iwh_m_ioctl(void *, queue_t *, mblk_t *);
379 static int iwh_m_setprop(void *arg, const char *pr_name,
380 mac_prop_id_t wldp_pr_num, uint_t wldp_length, const void *wldp_buf);
381 static int iwh_m_getprop(void *arg, const char *pr_name,
382 mac_prop_id_t wldp_pr_num, uint_t wldp_length,
383 void *wldp_buf);
384 static void iwh_m_propinfo(void *arg, const char *pr_name,
385 mac_prop_id_t wldp_pr_num, mac_prop_info_handle_t mph);
386
387 /*
388 * Supported rates for 802.11b/g modes (in 500Kbps unit).
389 */
390 static const struct ieee80211_rateset iwh_rateset_11b =
391 { 4, { 2, 4, 11, 22 } };
392
393 static const struct ieee80211_rateset iwh_rateset_11g =
394 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
395
396 /*
397 * Default 11n reates supported by this station.
398 */
399 extern struct ieee80211_htrateset ieee80211_rateset_11n;
400
401 /*
402 * For mfthread only
403 */
404 extern pri_t minclsyspri;
405
406 #define DRV_NAME_SP "iwh"
407
408 /*
409 * Module Loading Data & Entry Points
410 */
411 DDI_DEFINE_STREAM_OPS(iwh_devops, nulldev, nulldev, iwh_attach,
412 iwh_detach, nodev, NULL, D_MP, NULL, iwh_quiesce);
413
414 static struct modldrv iwh_modldrv = {
415 &mod_driverops,
416 "Intel(R) ShirleyPeak/EchoPeak driver(N)",
417 &iwh_devops
418 };
419
420 static struct modlinkage iwh_modlinkage = {
421 MODREV_1,
422 { &iwh_modldrv, NULL }
423 };
424
425 int
426 _init(void)
427 {
428 int status;
429
430 status = ddi_soft_state_init(&iwh_soft_state_p,
431 sizeof (iwh_sc_t), 1);
432 if (status != DDI_SUCCESS) {
433 return (status);
434 }
435
436 mac_init_ops(&iwh_devops, DRV_NAME_SP);
437 status = mod_install(&iwh_modlinkage);
438 if (status != DDI_SUCCESS) {
439 mac_fini_ops(&iwh_devops);
440 ddi_soft_state_fini(&iwh_soft_state_p);
441 }
442
443 return (status);
444 }
445
446 int
447 _fini(void)
448 {
449 int status;
450
451 status = mod_remove(&iwh_modlinkage);
452 if (DDI_SUCCESS == status) {
453 mac_fini_ops(&iwh_devops);
454 ddi_soft_state_fini(&iwh_soft_state_p);
455 }
456
457 return (status);
458 }
459
460 int
461 _info(struct modinfo *mip)
462 {
463 return (mod_info(&iwh_modlinkage, mip));
464 }
465
466 /*
467 * Mac Call Back entries
468 */
469 mac_callbacks_t iwh_m_callbacks = {
470 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
471 iwh_m_stat,
472 iwh_m_start,
473 iwh_m_stop,
474 iwh_m_promisc,
475 iwh_m_multicst,
476 iwh_m_unicst,
477 iwh_m_tx,
478 NULL,
479 iwh_m_ioctl,
480 NULL,
481 NULL,
482 NULL,
483 iwh_m_setprop,
484 iwh_m_getprop,
485 iwh_m_propinfo
486 };
487
488 #ifdef DEBUG
489 void
490 iwh_dbg(uint32_t flags, const char *fmt, ...)
491 {
492 va_list ap;
493
494 if (flags & iwh_dbg_flags) {
495 va_start(ap, fmt);
496 vcmn_err(CE_NOTE, fmt, ap);
497 va_end(ap);
498 }
499 }
500 #endif /* DEBUG */
501
502 /*
503 * device operations
504 */
505 int
506 iwh_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
507 {
508 iwh_sc_t *sc;
509 ieee80211com_t *ic;
510 int instance, i;
511 char strbuf[32];
512 wifi_data_t wd = { 0 };
513 mac_register_t *macp;
514 int intr_type;
515 int intr_count;
516 int intr_actual;
517 int err = DDI_FAILURE;
518
519 switch (cmd) {
520 case DDI_ATTACH:
521 break;
522
523 case DDI_RESUME:
524 instance = ddi_get_instance(dip);
525 sc = ddi_get_soft_state(iwh_soft_state_p,
526 instance);
527 ASSERT(sc != NULL);
528
529 if (sc->sc_flags & IWH_F_RUNNING) {
530 (void) iwh_init(sc);
531 }
532
533 atomic_and_32(&sc->sc_flags, ~IWH_F_SUSPEND);
534
535 IWH_DBG((IWH_DEBUG_RESUME, "iwh_attach(): "
536 "resume\n"));
537 return (DDI_SUCCESS);
538
539 default:
540 goto attach_fail1;
541 }
542
543 instance = ddi_get_instance(dip);
544 err = ddi_soft_state_zalloc(iwh_soft_state_p, instance);
545 if (err != DDI_SUCCESS) {
546 cmn_err(CE_WARN, "iwh_attach(): "
547 "failed to allocate soft state\n");
548 goto attach_fail1;
549 }
550
551 sc = ddi_get_soft_state(iwh_soft_state_p, instance);
552 ASSERT(sc != NULL);
553
554 sc->sc_dip = dip;
555
556 /*
557 * map configure space
558 */
559 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
560 &iwh_reg_accattr, &sc->sc_cfg_handle);
561 if (err != DDI_SUCCESS) {
562 cmn_err(CE_WARN, "iwh_attach(): "
563 "failed to map config spaces regs\n");
564 goto attach_fail2;
565 }
566
567 sc->sc_dev_id = ddi_get16(sc->sc_cfg_handle,
568 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_DEVID));
569 if ((sc->sc_dev_id != 0x4232) &&
570 (sc->sc_dev_id != 0x4235) &&
571 (sc->sc_dev_id != 0x4236) &&
572 (sc->sc_dev_id != 0x4237) &&
573 (sc->sc_dev_id != 0x423a) &&
574 (sc->sc_dev_id != 0x423b) &&
575 (sc->sc_dev_id != 0x423c) &&
576 (sc->sc_dev_id != 0x423d)) {
577 cmn_err(CE_WARN, "iwh_attach(): "
578 "Do not support this device\n");
579 goto attach_fail3;
580 }
581
582 iwh_init_ht_conf(sc);
583 iwh_overwrite_11n_rateset(sc);
584
585 sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
586 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
587
588 /*
589 * keep from disturbing C3 state of CPU
590 */
591 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base +
592 PCI_CFG_RETRY_TIMEOUT), 0);
593
594 /*
595 * determine the size of buffer for frame and command to ucode
596 */
597 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
598 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
599 if (!sc->sc_clsz) {
600 sc->sc_clsz = 16;
601 }
602 sc->sc_clsz = (sc->sc_clsz << 2);
603
604 sc->sc_dmabuf_sz = roundup(0x2000 + sizeof (struct ieee80211_frame) +
605 IEEE80211_MTU + IEEE80211_CRC_LEN +
606 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
607 IEEE80211_WEP_CRCLEN), sc->sc_clsz);
608
609 /*
610 * Map operating registers
611 */
612 err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
613 0, 0, &iwh_reg_accattr, &sc->sc_handle);
614 if (err != DDI_SUCCESS) {
615 cmn_err(CE_WARN, "iwh_attach(): "
616 "failed to map device regs\n");
617 goto attach_fail3;
618 }
619
620 /*
621 * this is used to differentiate type of hardware
622 */
623 sc->sc_hw_rev = IWH_READ(sc, CSR_HW_REV);
624
625 err = ddi_intr_get_supported_types(dip, &intr_type);
626 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
627 cmn_err(CE_WARN, "iwh_attach(): "
628 "fixed type interrupt is not supported\n");
629 goto attach_fail4;
630 }
631
632 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
633 if ((err != DDI_SUCCESS) || (intr_count != 1)) {
634 cmn_err(CE_WARN, "iwh_attach(): "
635 "no fixed interrupts\n");
636 goto attach_fail4;
637 }
638
639 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
640
641 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
642 intr_count, &intr_actual, 0);
643 if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
644 cmn_err(CE_WARN, "iwh_attach(): "
645 "ddi_intr_alloc() failed 0x%x\n", err);
646 goto attach_fail5;
647 }
648
649 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
650 if (err != DDI_SUCCESS) {
651 cmn_err(CE_WARN, "iwh_attach(): "
652 "ddi_intr_get_pri() failed 0x%x\n", err);
653 goto attach_fail6;
654 }
655
656 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
657 DDI_INTR_PRI(sc->sc_intr_pri));
658 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
659 DDI_INTR_PRI(sc->sc_intr_pri));
660 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
661 DDI_INTR_PRI(sc->sc_intr_pri));
662
663 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
664 cv_init(&sc->sc_put_seg_cv, NULL, CV_DRIVER, NULL);
665 cv_init(&sc->sc_ucode_cv, NULL, CV_DRIVER, NULL);
666
667 /*
668 * initialize the mfthread
669 */
670 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
671 sc->sc_mf_thread = NULL;
672 sc->sc_mf_thread_switch = 0;
673
674 /*
675 * Allocate shared buffer for communication between driver and ucode.
676 */
677 err = iwh_alloc_shared(sc);
678 if (err != DDI_SUCCESS) {
679 cmn_err(CE_WARN, "iwh_attach(): "
680 "failed to allocate shared page\n");
681 goto attach_fail7;
682 }
683
684 (void) memset(sc->sc_shared, 0, sizeof (iwh_shared_t));
685
686 /*
687 * Allocate keep warm page.
688 */
689 err = iwh_alloc_kw(sc);
690 if (err != DDI_SUCCESS) {
691 cmn_err(CE_WARN, "iwh_attach(): "
692 "failed to allocate keep warm page\n");
693 goto attach_fail8;
694 }
695
696 err = iwh_reset_hw(sc);
697 if (err != IWH_SUCCESS) {
698 cmn_err(CE_WARN, "iwh_attach(): "
699 "failed to reset hardware\n");
700 goto attach_fail9;
701 }
702
703 /*
704 * Do some necessary hardware initializations.
705 */
706 err = iwh_preinit(sc);
707 if (err != IWH_SUCCESS) {
708 cmn_err(CE_WARN, "iwh_attach(): "
709 "failed to initialize hardware\n");
710 goto attach_fail9;
711 }
712
713 /*
714 * get hardware configurations from eeprom
715 */
716 err = iwh_eep_load(sc);
717 if (err != IWH_SUCCESS) {
718 cmn_err(CE_WARN, "iwh_attach(): "
719 "failed to load eeprom\n");
720 goto attach_fail9;
721 }
722
723 if (IWH_READ_EEP_SHORT(sc, EEP_VERSION) < 0x011a) {
724 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_attach(): "
725 "unsupported eeprom detected\n"));
726 goto attach_fail9;
727 }
728
729 /*
730 * get MAC address of this chipset
731 */
732 iwh_get_mac_from_eep(sc);
733
734 /*
735 * calibration information from EEPROM
736 */
737 sc->sc_eep_calib = (struct iwh_eep_calibration *)
738 iwh_eep_addr_trans(sc, EEP_CALIBRATION);
739
740 /*
741 * initialize TX and RX ring buffers
742 */
743 err = iwh_ring_init(sc);
744 if (err != DDI_SUCCESS) {
745 cmn_err(CE_WARN, "iwh_attach(): "
746 "failed to allocate and initialize ring\n");
747 goto attach_fail9;
748 }
749
750 if ((0x423c == sc->sc_dev_id) || (0x423d == sc->sc_dev_id)) {
751 sc->sc_hdr = (iwh_firmware_hdr_t *)iwh_fw_5150_bin;
752 } else {
753 sc->sc_hdr = (iwh_firmware_hdr_t *)iwh_fw_5000_bin;
754 }
755
756 /*
757 * copy ucode to dma buffer
758 */
759 err = iwh_alloc_fw_dma(sc);
760 if (err != DDI_SUCCESS) {
761 cmn_err(CE_WARN, "iwh_attach(): "
762 "failed to allocate firmware dma\n");
763 goto attach_fail10;
764 }
765
766 /*
767 * Initialize the wifi part, which will be used by
768 * 802.11 module
769 */
770 ic = &sc->sc_ic;
771 ic->ic_phytype = IEEE80211_T_HT;
772 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
773 ic->ic_state = IEEE80211_S_INIT;
774 ic->ic_maxrssi = 100; /* experimental number */
775 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
776 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
777
778 /*
779 * Support WPA/WPA2
780 */
781 ic->ic_caps |= IEEE80211_C_WPA;
782
783 /*
784 * Support QoS/WME
785 */
786 ic->ic_caps |= IEEE80211_C_WME;
787 ic->ic_wme.wme_update = iwh_wme_update;
788
789 /*
790 * Support 802.11n/HT
791 */
792 if (sc->sc_ht_conf.ht_support) {
793 ic->ic_htcaps = IEEE80211_HTC_HT |
794 IEEE80211_HTC_AMSDU;
795 ic->ic_htcaps |= IEEE80211_HTCAP_MAXAMSDU_7935;
796 }
797
798 /*
799 * set supported .11b and .11g rates
800 */
801 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwh_rateset_11b;
802 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwh_rateset_11g;
803
804 /*
805 * set supported .11b and .11g channels (1 through 11)
806 */
807 for (i = 1; i <= 11; i++) {
808 ic->ic_sup_channels[i].ich_freq =
809 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
810 ic->ic_sup_channels[i].ich_flags =
811 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
812 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
813 IEEE80211_CHAN_PASSIVE;
814
815 if (sc->sc_ht_conf.cap & HT_CAP_SUP_WIDTH) {
816 ic->ic_sup_channels[i].ich_flags |=
817 IEEE80211_CHAN_HT40;
818 } else {
819 ic->ic_sup_channels[i].ich_flags |=
820 IEEE80211_CHAN_HT20;
821 }
822 }
823
824 ic->ic_ibss_chan = &ic->ic_sup_channels[0];
825 ic->ic_xmit = iwh_send;
826
827 /*
828 * attach to 802.11 module
829 */
830 ieee80211_attach(ic);
831
832 /*
833 * different instance has different WPA door
834 */
835 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
836 ddi_driver_name(dip),
837 ddi_get_instance(dip));
838
839 /*
840 * Overwrite 80211 default configurations.
841 */
842 iwh_overwrite_ic_default(sc);
843
844 /*
845 * initialize 802.11 module
846 */
847 ieee80211_media_init(ic);
848
849 /*
850 * initialize default tx key
851 */
852 ic->ic_def_txkey = 0;
853
854 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
855 iwh_rx_softintr, (caddr_t)sc);
856 if (err != DDI_SUCCESS) {
857 cmn_err(CE_WARN, "iwh_attach(): "
858 "add soft interrupt failed\n");
859 goto attach_fail12;
860 }
861
862 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwh_intr,
863 (caddr_t)sc, NULL);
864 if (err != DDI_SUCCESS) {
865 cmn_err(CE_WARN, "iwh_attach(): "
866 "ddi_intr_add_handle() failed\n");
867 goto attach_fail13;
868 }
869
870 err = ddi_intr_enable(sc->sc_intr_htable[0]);
871 if (err != DDI_SUCCESS) {
872 cmn_err(CE_WARN, "iwh_attach(): "
873 "ddi_intr_enable() failed\n");
874 goto attach_fail14;
875 }
876
877 /*
878 * Initialize pointer to device specific functions
879 */
880 wd.wd_secalloc = WIFI_SEC_NONE;
881 wd.wd_opmode = ic->ic_opmode;
882 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
883
884 /*
885 * create relation to GLD
886 */
887 macp = mac_alloc(MAC_VERSION);
888 if (NULL == macp) {
889 cmn_err(CE_WARN, "iwh_attach(): "
890 "failed to do mac_alloc()\n");
891 goto attach_fail15;
892 }
893
894 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
895 macp->m_driver = sc;
896 macp->m_dip = dip;
897 macp->m_src_addr = ic->ic_macaddr;
898 macp->m_callbacks = &iwh_m_callbacks;
899 macp->m_min_sdu = 0;
900 macp->m_max_sdu = IEEE80211_MTU;
901 macp->m_pdata = &wd;
902 macp->m_pdata_size = sizeof (wd);
903
904 /*
905 * Register the macp to mac
906 */
907 err = mac_register(macp, &ic->ic_mach);
908 mac_free(macp);
909 if (err != DDI_SUCCESS) {
910 cmn_err(CE_WARN, "iwh_attach(): "
911 "failed to do mac_register()\n");
912 goto attach_fail15;
913 }
914
915 /*
916 * Create minor node of type DDI_NT_NET_WIFI
917 */
918 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_SP"%d", instance);
919 err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
920 instance + 1, DDI_NT_NET_WIFI, 0);
921 if (err != DDI_SUCCESS) {
922 cmn_err(CE_WARN, "iwh_attach(): "
923 "failed to do ddi_create_minor_node()\n");
924 }
925
926 /*
927 * Notify link is down now
928 */
929 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
930
931 /*
932 * create the mf thread to handle the link status,
933 * recovery fatal error, etc.
934 */
935 sc->sc_mf_thread_switch = 1;
936 if (NULL == sc->sc_mf_thread) {
937 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
938 iwh_thread, sc, 0, &p0, TS_RUN, minclsyspri);
939 }
940
941 atomic_or_32(&sc->sc_flags, IWH_F_ATTACHED);
942
943 return (DDI_SUCCESS);
944
945 attach_fail15:
946 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
947
948 attach_fail14:
949 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
950
951 attach_fail13:
952 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
953 sc->sc_soft_hdl = NULL;
954
955 attach_fail12:
956 ieee80211_detach(ic);
957
958 attach_fail11:
959 iwh_free_fw_dma(sc);
960
961 attach_fail10:
962 iwh_ring_free(sc);
963
964 attach_fail9:
965 iwh_free_kw(sc);
966
967 attach_fail8:
968 iwh_free_shared(sc);
969
970 attach_fail7:
971 iwh_destroy_locks(sc);
972
973 attach_fail6:
974 (void) ddi_intr_free(sc->sc_intr_htable[0]);
975
976 attach_fail5:
977 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
978
979 attach_fail4:
980 ddi_regs_map_free(&sc->sc_handle);
981
982 attach_fail3:
983 ddi_regs_map_free(&sc->sc_cfg_handle);
984
985 attach_fail2:
986 ddi_soft_state_free(iwh_soft_state_p, instance);
987
988 attach_fail1:
989 return (DDI_FAILURE);
990 }
991
992 int
993 iwh_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
994 {
995 iwh_sc_t *sc;
996 ieee80211com_t *ic;
997 int err;
998
999 sc = ddi_get_soft_state(iwh_soft_state_p, ddi_get_instance(dip));
1000 ASSERT(sc != NULL);
1001 ic = &sc->sc_ic;
1002
1003 switch (cmd) {
1004 case DDI_DETACH:
1005 break;
1006
1007 case DDI_SUSPEND:
1008 atomic_and_32(&sc->sc_flags, ~IWH_F_HW_ERR_RECOVER);
1009 atomic_and_32(&sc->sc_flags, ~IWH_F_RATE_AUTO_CTL);
1010
1011 atomic_or_32(&sc->sc_flags, IWH_F_SUSPEND);
1012
1013 if (sc->sc_flags & IWH_F_RUNNING) {
1014 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1015 iwh_stop(sc);
1016 }
1017
1018 IWH_DBG((IWH_DEBUG_RESUME, "iwh_detach(): "
1019 "suspend\n"));
1020 return (DDI_SUCCESS);
1021
1022 default:
1023 return (DDI_FAILURE);
1024 }
1025
1026 if (!(sc->sc_flags & IWH_F_ATTACHED)) {
1027 return (DDI_FAILURE);
1028 }
1029
1030 /*
1031 * Destroy the mf_thread
1032 */
1033 sc->sc_mf_thread_switch = 0;
1034
1035 mutex_enter(&sc->sc_mt_lock);
1036 while (sc->sc_mf_thread != NULL) {
1037 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0) {
1038 break;
1039 }
1040 }
1041 mutex_exit(&sc->sc_mt_lock);
1042
1043 err = mac_disable(sc->sc_ic.ic_mach);
1044 if (err != DDI_SUCCESS) {
1045 return (err);
1046 }
1047
1048 /*
1049 * stop chipset
1050 */
1051 iwh_stop(sc);
1052
1053 DELAY(500000);
1054
1055 /*
1056 * release buffer for calibration
1057 */
1058 iwh_release_calib_buffer(sc);
1059
1060 /*
1061 * Unregiste from GLD
1062 */
1063 (void) mac_unregister(sc->sc_ic.ic_mach);
1064
1065 mutex_enter(&sc->sc_glock);
1066 iwh_free_fw_dma(sc);
1067 iwh_ring_free(sc);
1068 iwh_free_kw(sc);
1069 iwh_free_shared(sc);
1070 mutex_exit(&sc->sc_glock);
1071
1072 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
1073 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
1074 (void) ddi_intr_free(sc->sc_intr_htable[0]);
1075 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1076
1077 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
1078 sc->sc_soft_hdl = NULL;
1079
1080 /*
1081 * detach from 80211 module
1082 */
1083 ieee80211_detach(&sc->sc_ic);
1084
1085 iwh_destroy_locks(sc);
1086
1087 ddi_regs_map_free(&sc->sc_handle);
1088 ddi_regs_map_free(&sc->sc_cfg_handle);
1089 ddi_remove_minor_node(dip, NULL);
1090 ddi_soft_state_free(iwh_soft_state_p, ddi_get_instance(dip));
1091
1092 return (DDI_SUCCESS);
1093 }
1094
1095 /*
1096 * destroy all locks
1097 */
1098 static void
1099 iwh_destroy_locks(iwh_sc_t *sc)
1100 {
1101 cv_destroy(&sc->sc_mt_cv);
1102 cv_destroy(&sc->sc_cmd_cv);
1103 cv_destroy(&sc->sc_put_seg_cv);
1104 cv_destroy(&sc->sc_ucode_cv);
1105 mutex_destroy(&sc->sc_mt_lock);
1106 mutex_destroy(&sc->sc_tx_lock);
1107 mutex_destroy(&sc->sc_glock);
1108 }
1109
1110 /*
1111 * Allocate an area of memory and a DMA handle for accessing it
1112 */
1113 static int
1114 iwh_alloc_dma_mem(iwh_sc_t *sc, size_t memsize,
1115 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
1116 uint_t dma_flags, iwh_dma_t *dma_p)
1117 {
1118 caddr_t vaddr;
1119 int err = DDI_FAILURE;
1120
1121 /*
1122 * Allocate handle
1123 */
1124 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
1125 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
1126 if (err != DDI_SUCCESS) {
1127 dma_p->dma_hdl = NULL;
1128 return (DDI_FAILURE);
1129 }
1130
1131 /*
1132 * Allocate memory
1133 */
1134 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
1135 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
1136 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1137 if (err != DDI_SUCCESS) {
1138 ddi_dma_free_handle(&dma_p->dma_hdl);
1139 dma_p->dma_hdl = NULL;
1140 dma_p->acc_hdl = NULL;
1141 return (DDI_FAILURE);
1142 }
1143
1144 /*
1145 * Bind the two together
1146 */
1147 dma_p->mem_va = vaddr;
1148 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1149 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1150 &dma_p->cookie, &dma_p->ncookies);
1151 if (err != DDI_DMA_MAPPED) {
1152 ddi_dma_mem_free(&dma_p->acc_hdl);
1153 ddi_dma_free_handle(&dma_p->dma_hdl);
1154 dma_p->acc_hdl = NULL;
1155 dma_p->dma_hdl = NULL;
1156 return (DDI_FAILURE);
1157 }
1158
1159 dma_p->nslots = ~0U;
1160 dma_p->size = ~0U;
1161 dma_p->token = ~0U;
1162 dma_p->offset = 0;
1163 return (DDI_SUCCESS);
1164 }
1165
1166 /*
1167 * Free one allocated area of DMAable memory
1168 */
1169 static void
1170 iwh_free_dma_mem(iwh_dma_t *dma_p)
1171 {
1172 if (dma_p->dma_hdl != NULL) {
1173 if (dma_p->ncookies) {
1174 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1175 dma_p->ncookies = 0;
1176 }
1177 ddi_dma_free_handle(&dma_p->dma_hdl);
1178 dma_p->dma_hdl = NULL;
1179 }
1180
1181 if (dma_p->acc_hdl != NULL) {
1182 ddi_dma_mem_free(&dma_p->acc_hdl);
1183 dma_p->acc_hdl = NULL;
1184 }
1185 }
1186
1187 /*
1188 * copy ucode into dma buffers
1189 */
1190 static int
1191 iwh_alloc_fw_dma(iwh_sc_t *sc)
1192 {
1193 int err = DDI_FAILURE;
1194 iwh_dma_t *dma_p;
1195 char *t;
1196
1197 /*
1198 * firmware image layout:
1199 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1200 */
1201
1202 /*
1203 * copy text of runtime ucode
1204 */
1205 t = (char *)(sc->sc_hdr + 1);
1206 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1207 &fw_dma_attr, &iwh_dma_accattr,
1208 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1209 &sc->sc_dma_fw_text);
1210 if (err != DDI_SUCCESS) {
1211 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1212 "failed to allocate text dma memory.\n");
1213 goto fail;
1214 }
1215
1216 dma_p = &sc->sc_dma_fw_text;
1217
1218 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1219 "text[ncookies:%d addr:%lx size:%lx]\n",
1220 dma_p->ncookies, dma_p->cookie.dmac_address,
1221 dma_p->cookie.dmac_size));
1222
1223 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->textsz));
1224
1225 /*
1226 * copy data and bak-data of runtime ucode
1227 */
1228 t += LE_32(sc->sc_hdr->textsz);
1229 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1230 &fw_dma_attr, &iwh_dma_accattr,
1231 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1232 &sc->sc_dma_fw_data);
1233 if (err != DDI_SUCCESS) {
1234 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1235 "failed to allocate data dma memory\n");
1236 goto fail;
1237 }
1238
1239 dma_p = &sc->sc_dma_fw_data;
1240
1241 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1242 "data[ncookies:%d addr:%lx size:%lx]\n",
1243 dma_p->ncookies, dma_p->cookie.dmac_address,
1244 dma_p->cookie.dmac_size));
1245
1246 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->datasz));
1247
1248 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1249 &fw_dma_attr, &iwh_dma_accattr,
1250 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1251 &sc->sc_dma_fw_data_bak);
1252 if (err != DDI_SUCCESS) {
1253 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1254 "failed to allocate data bakup dma memory\n");
1255 goto fail;
1256 }
1257
1258 dma_p = &sc->sc_dma_fw_data_bak;
1259
1260 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1261 "data_bak[ncookies:%d addr:%lx "
1262 "size:%lx]\n",
1263 dma_p->ncookies, dma_p->cookie.dmac_address,
1264 dma_p->cookie.dmac_size));
1265
1266 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->datasz));
1267
1268 /*
1269 * copy text of init ucode
1270 */
1271 t += LE_32(sc->sc_hdr->datasz);
1272 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1273 &fw_dma_attr, &iwh_dma_accattr,
1274 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1275 &sc->sc_dma_fw_init_text);
1276 if (err != DDI_SUCCESS) {
1277 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1278 "failed to allocate init text dma memory\n");
1279 goto fail;
1280 }
1281
1282 dma_p = &sc->sc_dma_fw_init_text;
1283
1284 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1285 "init_text[ncookies:%d addr:%lx "
1286 "size:%lx]\n",
1287 dma_p->ncookies, dma_p->cookie.dmac_address,
1288 dma_p->cookie.dmac_size));
1289
1290 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->init_textsz));
1291
1292 /*
1293 * copy data of init ucode
1294 */
1295 t += LE_32(sc->sc_hdr->init_textsz);
1296 err = iwh_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1297 &fw_dma_attr, &iwh_dma_accattr,
1298 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1299 &sc->sc_dma_fw_init_data);
1300 if (err != DDI_SUCCESS) {
1301 cmn_err(CE_WARN, "iwh_alloc_fw_dma(): "
1302 "failed to allocate init data dma memory\n");
1303 goto fail;
1304 }
1305
1306 dma_p = &sc->sc_dma_fw_init_data;
1307
1308 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_fw_dma(): "
1309 "init_data[ncookies:%d addr:%lx "
1310 "size:%lx]\n",
1311 dma_p->ncookies, dma_p->cookie.dmac_address,
1312 dma_p->cookie.dmac_size));
1313
1314 bcopy(t, dma_p->mem_va, LE_32(sc->sc_hdr->init_datasz));
1315
1316 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1317
1318 fail:
1319 return (err);
1320 }
1321
1322 static void
1323 iwh_free_fw_dma(iwh_sc_t *sc)
1324 {
1325 iwh_free_dma_mem(&sc->sc_dma_fw_text);
1326 iwh_free_dma_mem(&sc->sc_dma_fw_data);
1327 iwh_free_dma_mem(&sc->sc_dma_fw_data_bak);
1328 iwh_free_dma_mem(&sc->sc_dma_fw_init_text);
1329 iwh_free_dma_mem(&sc->sc_dma_fw_init_data);
1330 }
1331
1332 /*
1333 * Allocate a shared buffer between host and NIC.
1334 */
1335 static int
1336 iwh_alloc_shared(iwh_sc_t *sc)
1337 {
1338 #ifdef DEBUG
1339 iwh_dma_t *dma_p;
1340 #endif
1341 int err = DDI_FAILURE;
1342
1343 /*
1344 * must be aligned on a 4K-page boundary
1345 */
1346 err = iwh_alloc_dma_mem(sc, sizeof (iwh_shared_t),
1347 &sh_dma_attr, &iwh_dma_descattr,
1348 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1349 &sc->sc_dma_sh);
1350 if (err != DDI_SUCCESS) {
1351 goto fail;
1352 }
1353
1354 sc->sc_shared = (iwh_shared_t *)sc->sc_dma_sh.mem_va;
1355
1356 #ifdef DEBUG
1357 dma_p = &sc->sc_dma_sh;
1358 #endif
1359 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_shared(): "
1360 "sh[ncookies:%d addr:%lx size:%lx]\n",
1361 dma_p->ncookies, dma_p->cookie.dmac_address,
1362 dma_p->cookie.dmac_size));
1363
1364 return (err);
1365
1366 fail:
1367 iwh_free_shared(sc);
1368 return (err);
1369 }
1370
1371 static void
1372 iwh_free_shared(iwh_sc_t *sc)
1373 {
1374 iwh_free_dma_mem(&sc->sc_dma_sh);
1375 }
1376
1377 /*
1378 * Allocate a keep warm page.
1379 */
1380 static int
1381 iwh_alloc_kw(iwh_sc_t *sc)
1382 {
1383 #ifdef DEBUG
1384 iwh_dma_t *dma_p;
1385 #endif
1386 int err = DDI_FAILURE;
1387
1388 /*
1389 * must be aligned on a 4K-page boundary
1390 */
1391 err = iwh_alloc_dma_mem(sc, IWH_KW_SIZE,
1392 &kw_dma_attr, &iwh_dma_descattr,
1393 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1394 &sc->sc_dma_kw);
1395 if (err != DDI_SUCCESS) {
1396 goto fail;
1397 }
1398
1399 #ifdef DEBUG
1400 dma_p = &sc->sc_dma_kw;
1401 #endif
1402 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_kw(): "
1403 "kw[ncookies:%d addr:%lx size:%lx]\n",
1404 dma_p->ncookies, dma_p->cookie.dmac_address,
1405 dma_p->cookie.dmac_size));
1406
1407 return (err);
1408
1409 fail:
1410 iwh_free_kw(sc);
1411 return (err);
1412 }
1413
1414 static void
1415 iwh_free_kw(iwh_sc_t *sc)
1416 {
1417 iwh_free_dma_mem(&sc->sc_dma_kw);
1418 }
1419
1420 /*
1421 * initialize RX ring buffers
1422 */
1423 static int
1424 iwh_alloc_rx_ring(iwh_sc_t *sc)
1425 {
1426 iwh_rx_ring_t *ring;
1427 iwh_rx_data_t *data;
1428 #ifdef DEBUG
1429 iwh_dma_t *dma_p;
1430 #endif
1431 int i, err = DDI_FAILURE;
1432
1433 ring = &sc->sc_rxq;
1434 ring->cur = 0;
1435
1436 /*
1437 * allocate RX description ring buffer
1438 */
1439 err = iwh_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1440 &ring_desc_dma_attr, &iwh_dma_descattr,
1441 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1442 &ring->dma_desc);
1443 if (err != DDI_SUCCESS) {
1444 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1445 "dma alloc rx ring desc "
1446 "failed\n"));
1447 goto fail;
1448 }
1449
1450 ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1451 #ifdef DEBUG
1452 dma_p = &ring->dma_desc;
1453 #endif
1454 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1455 "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1456 dma_p->ncookies, dma_p->cookie.dmac_address,
1457 dma_p->cookie.dmac_size));
1458
1459 /*
1460 * Allocate Rx frame buffers.
1461 */
1462 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1463 data = &ring->data[i];
1464 err = iwh_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1465 &rx_buffer_dma_attr, &iwh_dma_accattr,
1466 DDI_DMA_READ | DDI_DMA_STREAMING,
1467 &data->dma_data);
1468 if (err != DDI_SUCCESS) {
1469 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1470 "dma alloc rx ring "
1471 "buf[%d] failed\n", i));
1472 goto fail;
1473 }
1474 /*
1475 * the physical address bit [8-36] are used,
1476 * instead of bit [0-31] in 3945.
1477 */
1478 ring->desc[i] = (uint32_t)
1479 (data->dma_data.cookie.dmac_address >> 8);
1480 }
1481
1482 #ifdef DEBUG
1483 dma_p = &ring->data[0].dma_data;
1484 #endif
1485 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_rx_ring(): "
1486 "rx buffer[0][ncookies:%d addr:%lx "
1487 "size:%lx]\n",
1488 dma_p->ncookies, dma_p->cookie.dmac_address,
1489 dma_p->cookie.dmac_size));
1490
1491 IWH_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1492
1493 return (err);
1494
1495 fail:
1496 iwh_free_rx_ring(sc);
1497 return (err);
1498 }
1499
1500 /*
1501 * disable RX ring
1502 */
1503 static void
1504 iwh_reset_rx_ring(iwh_sc_t *sc)
1505 {
1506 int n;
1507
1508 iwh_mac_access_enter(sc);
1509 IWH_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1510 for (n = 0; n < 2000; n++) {
1511 if (IWH_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24)) {
1512 break;
1513 }
1514 DELAY(1000);
1515 }
1516 #ifdef DEBUG
1517 if (2000 == n) {
1518 IWH_DBG((IWH_DEBUG_DMA, "iwh_reset_rx_ring(): "
1519 "timeout resetting Rx ring\n"));
1520 }
1521 #endif
1522 iwh_mac_access_exit(sc);
1523
1524 sc->sc_rxq.cur = 0;
1525 }
1526
1527 static void
1528 iwh_free_rx_ring(iwh_sc_t *sc)
1529 {
1530 int i;
1531
1532 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1533 if (sc->sc_rxq.data[i].dma_data.dma_hdl) {
1534 IWH_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1535 DDI_DMA_SYNC_FORCPU);
1536 }
1537
1538 iwh_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1539 }
1540
1541 if (sc->sc_rxq.dma_desc.dma_hdl) {
1542 IWH_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1543 }
1544
1545 iwh_free_dma_mem(&sc->sc_rxq.dma_desc);
1546 }
1547
1548 /*
1549 * initialize TX ring buffers
1550 */
1551 static int
1552 iwh_alloc_tx_ring(iwh_sc_t *sc, iwh_tx_ring_t *ring,
1553 int slots, int qid)
1554 {
1555 iwh_tx_data_t *data;
1556 iwh_tx_desc_t *desc_h;
1557 uint32_t paddr_desc_h;
1558 iwh_cmd_t *cmd_h;
1559 uint32_t paddr_cmd_h;
1560 #ifdef DEBUG
1561 iwh_dma_t *dma_p;
1562 #endif
1563 int i, err = DDI_FAILURE;
1564
1565 ring->qid = qid;
1566 ring->count = TFD_QUEUE_SIZE_MAX;
1567 ring->window = slots;
1568 ring->queued = 0;
1569 ring->cur = 0;
1570 ring->desc_cur = 0;
1571
1572 /*
1573 * allocate buffer for TX descriptor ring
1574 */
1575 err = iwh_alloc_dma_mem(sc,
1576 TFD_QUEUE_SIZE_MAX * sizeof (iwh_tx_desc_t),
1577 &ring_desc_dma_attr, &iwh_dma_descattr,
1578 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1579 &ring->dma_desc);
1580 if (err != DDI_SUCCESS) {
1581 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1582 "dma alloc tx ring desc[%d] "
1583 "failed\n", qid));
1584 goto fail;
1585 }
1586
1587 #ifdef DEBUG
1588 dma_p = &ring->dma_desc;
1589 #endif
1590 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1591 "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1592 dma_p->ncookies, dma_p->cookie.dmac_address,
1593 dma_p->cookie.dmac_size));
1594
1595 desc_h = (iwh_tx_desc_t *)ring->dma_desc.mem_va;
1596 paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1597
1598 /*
1599 * allocate buffer for ucode command
1600 */
1601 err = iwh_alloc_dma_mem(sc,
1602 TFD_QUEUE_SIZE_MAX * sizeof (iwh_cmd_t),
1603 &cmd_dma_attr, &iwh_dma_accattr,
1604 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1605 &ring->dma_cmd);
1606 if (err != DDI_SUCCESS) {
1607 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1608 "dma alloc tx ring cmd[%d]"
1609 " failed\n", qid));
1610 goto fail;
1611 }
1612
1613 #ifdef DEBUG
1614 dma_p = &ring->dma_cmd;
1615 #endif
1616 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1617 "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1618 dma_p->ncookies, dma_p->cookie.dmac_address,
1619 dma_p->cookie.dmac_size));
1620
1621 cmd_h = (iwh_cmd_t *)ring->dma_cmd.mem_va;
1622 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1623
1624 /*
1625 * Allocate Tx frame buffers.
1626 */
1627 ring->data = kmem_zalloc(sizeof (iwh_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1628 KM_NOSLEEP);
1629 if (NULL == ring->data) {
1630 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1631 "could not allocate "
1632 "tx data slots\n"));
1633 goto fail;
1634 }
1635
1636 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1637 data = &ring->data[i];
1638 err = iwh_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1639 &tx_buffer_dma_attr, &iwh_dma_accattr,
1640 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1641 &data->dma_data);
1642 if (err != DDI_SUCCESS) {
1643 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1644 "dma alloc tx "
1645 "ring buf[%d] failed\n", i));
1646 goto fail;
1647 }
1648
1649 data->desc = desc_h + i;
1650 data->paddr_desc = paddr_desc_h +
1651 _PTRDIFF(data->desc, desc_h);
1652 data->cmd = cmd_h + i;
1653 data->paddr_cmd = paddr_cmd_h +
1654 _PTRDIFF(data->cmd, cmd_h);
1655 }
1656 #ifdef DEBUG
1657 dma_p = &ring->data[0].dma_data;
1658 #endif
1659 IWH_DBG((IWH_DEBUG_DMA, "iwh_alloc_tx_ring(): "
1660 "tx buffer[0][ncookies:%d addr:%lx "
1661 "size:%lx]\n",
1662 dma_p->ncookies, dma_p->cookie.dmac_address,
1663 dma_p->cookie.dmac_size));
1664
1665 return (err);
1666
1667 fail:
1668 iwh_free_tx_ring(ring);
1669
1670 return (err);
1671 }
1672
1673 /*
1674 * disable TX ring
1675 */
1676 static void
1677 iwh_reset_tx_ring(iwh_sc_t *sc, iwh_tx_ring_t *ring)
1678 {
1679 iwh_tx_data_t *data;
1680 int i, n;
1681
1682 iwh_mac_access_enter(sc);
1683
1684 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1685 for (n = 0; n < 200; n++) {
1686 if (IWH_READ(sc, IWH_FH_TSSR_TX_STATUS_REG) &
1687 IWH_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid)) {
1688 break;
1689 }
1690 DELAY(10);
1691 }
1692
1693 #ifdef DEBUG
1694 if (200 == n) {
1695 IWH_DBG((IWH_DEBUG_DMA, "iwh_reset_tx_ring(): "
1696 "timeout reset tx ring %d\n",
1697 ring->qid));
1698 }
1699 #endif
1700
1701 iwh_mac_access_exit(sc);
1702
1703 /*
1704 * by pass, if it's quiesce
1705 */
1706 if (!(sc->sc_flags & IWH_F_QUIESCED)) {
1707 for (i = 0; i < ring->count; i++) {
1708 data = &ring->data[i];
1709 IWH_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1710 }
1711 }
1712
1713 ring->queued = 0;
1714 ring->cur = 0;
1715 ring->desc_cur = 0;
1716 }
1717
1718 static void
1719 iwh_free_tx_ring(iwh_tx_ring_t *ring)
1720 {
1721 int i;
1722
1723 if (ring->dma_desc.dma_hdl != NULL) {
1724 IWH_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1725 }
1726 iwh_free_dma_mem(&ring->dma_desc);
1727
1728 if (ring->dma_cmd.dma_hdl != NULL) {
1729 IWH_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1730 }
1731 iwh_free_dma_mem(&ring->dma_cmd);
1732
1733 if (ring->data != NULL) {
1734 for (i = 0; i < ring->count; i++) {
1735 if (ring->data[i].dma_data.dma_hdl) {
1736 IWH_DMA_SYNC(ring->data[i].dma_data,
1737 DDI_DMA_SYNC_FORDEV);
1738 }
1739 iwh_free_dma_mem(&ring->data[i].dma_data);
1740 }
1741 kmem_free(ring->data, ring->count * sizeof (iwh_tx_data_t));
1742 }
1743 }
1744
1745 /*
1746 * initialize TX and RX ring
1747 */
1748 static int
1749 iwh_ring_init(iwh_sc_t *sc)
1750 {
1751 int i, err = DDI_FAILURE;
1752
1753 for (i = 0; i < IWH_NUM_QUEUES; i++) {
1754 if (IWH_CMD_QUEUE_NUM == i) {
1755 continue;
1756 }
1757
1758 err = iwh_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1759 i);
1760 if (err != DDI_SUCCESS) {
1761 goto fail;
1762 }
1763 }
1764
1765 /*
1766 * initialize command queue
1767 */
1768 err = iwh_alloc_tx_ring(sc, &sc->sc_txq[IWH_CMD_QUEUE_NUM],
1769 TFD_CMD_SLOTS, IWH_CMD_QUEUE_NUM);
1770 if (err != DDI_SUCCESS) {
1771 goto fail;
1772 }
1773
1774 err = iwh_alloc_rx_ring(sc);
1775 if (err != DDI_SUCCESS) {
1776 goto fail;
1777 }
1778
1779 fail:
1780 return (err);
1781 }
1782
1783 static void
1784 iwh_ring_free(iwh_sc_t *sc)
1785 {
1786 int i = IWH_NUM_QUEUES;
1787
1788 iwh_free_rx_ring(sc);
1789 while (--i >= 0) {
1790 iwh_free_tx_ring(&sc->sc_txq[i]);
1791 }
1792 }
1793
1794 /* ARGSUSED */
1795 static ieee80211_node_t *
1796 iwh_node_alloc(ieee80211com_t *ic)
1797 {
1798 iwh_amrr_t *amrr;
1799
1800 amrr = kmem_zalloc(sizeof (iwh_amrr_t), KM_SLEEP);
1801 if (NULL == amrr) {
1802 cmn_err(CE_WARN, "iwh_node_alloc(): "
1803 "failed to allocate memory for amrr structure\n");
1804 return (NULL);
1805 }
1806
1807 iwh_amrr_init(amrr);
1808
1809 return (&amrr->in);
1810 }
1811
1812 static void
1813 iwh_node_free(ieee80211_node_t *in)
1814 {
1815 ieee80211com_t *ic;
1816
1817 if ((NULL == in) ||
1818 (NULL == in->in_ic)) {
1819 cmn_err(CE_WARN, "iwh_node_free() "
1820 "Got a NULL point from Net80211 module\n");
1821 return;
1822 }
1823 ic = in->in_ic;
1824
1825 if (ic->ic_node_cleanup != NULL) {
1826 ic->ic_node_cleanup(in);
1827 }
1828
1829 if (in->in_wpa_ie != NULL) {
1830 ieee80211_free(in->in_wpa_ie);
1831 }
1832
1833 if (in->in_wme_ie != NULL) {
1834 ieee80211_free(in->in_wme_ie);
1835 }
1836
1837 if (in->in_htcap_ie != NULL) {
1838 ieee80211_free(in->in_htcap_ie);
1839 }
1840
1841 kmem_free(in, sizeof (iwh_amrr_t));
1842 }
1843
1844 /*
1845 * change station's state. this function will be invoked by 80211 module
1846 * when need to change staton's state.
1847 */
1848 static int
1849 iwh_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1850 {
1851 iwh_sc_t *sc;
1852 ieee80211_node_t *in;
1853 enum ieee80211_state ostate;
1854 iwh_add_sta_t node;
1855 iwh_amrr_t *amrr;
1856 uint8_t r;
1857 int i, err = IWH_FAIL;
1858
1859 if (NULL == ic) {
1860 return (err);
1861 }
1862 sc = (iwh_sc_t *)ic;
1863 in = ic->ic_bss;
1864 ostate = ic->ic_state;
1865
1866 mutex_enter(&sc->sc_glock);
1867
1868 switch (nstate) {
1869 case IEEE80211_S_SCAN:
1870 switch (ostate) {
1871 case IEEE80211_S_INIT:
1872 atomic_or_32(&sc->sc_flags, IWH_F_SCANNING);
1873 iwh_set_led(sc, 2, 10, 2);
1874
1875 /*
1876 * clear association to receive beacons from
1877 * all BSS'es
1878 */
1879 sc->sc_config.assoc_id = 0;
1880 sc->sc_config.filter_flags &=
1881 ~LE_32(RXON_FILTER_ASSOC_MSK);
1882
1883 IWH_DBG((IWH_DEBUG_80211, "iwh_newstate(): "
1884 "config chan %d "
1885 "flags %x filter_flags %x\n",
1886 LE_16(sc->sc_config.chan),
1887 LE_32(sc->sc_config.flags),
1888 LE_32(sc->sc_config.filter_flags)));
1889
1890 err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
1891 sizeof (iwh_rxon_cmd_t), 1);
1892 if (err != IWH_SUCCESS) {
1893 cmn_err(CE_WARN, "iwh_newstate(): "
1894 "could not clear association\n");
1895 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1896 mutex_exit(&sc->sc_glock);
1897 return (err);
1898 }
1899
1900 /*
1901 * add broadcast node to send probe request
1902 */
1903 (void) memset(&node, 0, sizeof (node));
1904 (void) memset(&node.sta.addr, 0xff, IEEE80211_ADDR_LEN);
1905 node.sta.sta_id = IWH_BROADCAST_ID;
1906 err = iwh_cmd(sc, REPLY_ADD_STA, &node,
1907 sizeof (node), 1);
1908 if (err != IWH_SUCCESS) {
1909 cmn_err(CE_WARN, "iwh_newstate(): "
1910 "could not add broadcast node\n");
1911 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1912 mutex_exit(&sc->sc_glock);
1913 return (err);
1914 }
1915 break;
1916 case IEEE80211_S_SCAN:
1917 mutex_exit(&sc->sc_glock);
1918 /* step to next channel before actual FW scan */
1919 err = sc->sc_newstate(ic, nstate, arg);
1920 mutex_enter(&sc->sc_glock);
1921 if ((err != 0) || ((err = iwh_scan(sc)) != 0)) {
1922 cmn_err(CE_WARN, "iwh_newstate(): "
1923 "could not initiate scan\n");
1924 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1925 ieee80211_cancel_scan(ic);
1926 }
1927 mutex_exit(&sc->sc_glock);
1928 return (err);
1929 default:
1930 break;
1931 }
1932 sc->sc_clk = 0;
1933 break;
1934
1935 case IEEE80211_S_AUTH:
1936 if (ostate == IEEE80211_S_SCAN) {
1937 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1938 }
1939
1940 /*
1941 * reset state to handle reassociations correctly
1942 */
1943 sc->sc_config.assoc_id = 0;
1944 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1945
1946 /*
1947 * before sending authentication and association request frame,
1948 * we need do something in the hardware, such as setting the
1949 * channel same to the target AP...
1950 */
1951 if ((err = iwh_hw_set_before_auth(sc)) != 0) {
1952 IWH_DBG((IWH_DEBUG_80211, "iwh_newstate(): "
1953 "could not send authentication request\n"));
1954 mutex_exit(&sc->sc_glock);
1955 return (err);
1956 }
1957 break;
1958
1959 case IEEE80211_S_RUN:
1960 if (ostate == IEEE80211_S_SCAN) {
1961 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
1962 }
1963
1964 if (IEEE80211_M_MONITOR == ic->ic_opmode) {
1965 /*
1966 * let LED blink when monitoring
1967 */
1968 iwh_set_led(sc, 2, 10, 10);
1969 break;
1970 }
1971
1972 IWH_DBG((IWH_DEBUG_80211, "iwh_newstate(): "
1973 "associated.\n"));
1974
1975 err = iwh_run_state_config(sc);
1976 if (err != IWH_SUCCESS) {
1977 cmn_err(CE_WARN, "iwh_newstate(): "
1978 "failed to set up association\n");
1979 mutex_exit(&sc->sc_glock);
1980 return (err);
1981 }
1982
1983 /*
1984 * start automatic rate control
1985 */
1986 if ((in->in_flags & IEEE80211_NODE_HT) &&
1987 (sc->sc_ht_conf.ht_support) &&
1988 (in->in_htrates.rs_nrates > 0) &&
1989 (in->in_htrates.rs_nrates <= IEEE80211_HTRATE_MAXSIZE)) {
1990 amrr = (iwh_amrr_t *)in;
1991
1992 for (i = in->in_htrates.rs_nrates - 1; i > 0; i--) {
1993
1994 r = in->in_htrates.rs_rates[i] &
1995 IEEE80211_RATE_VAL;
1996 if ((r != 0) && (r <= 0xd) &&
1997 (sc->sc_ht_conf.tx_support_mcs[r/8] &
1998 (1 << (r%8)))) {
1999 amrr->ht_mcs_idx = r;
2000 atomic_or_32(&sc->sc_flags,
2001 IWH_F_RATE_AUTO_CTL);
2002 break;
2003 }
2004 }
2005 } else {
2006 if (IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) {
2007 atomic_or_32(&sc->sc_flags,
2008 IWH_F_RATE_AUTO_CTL);
2009
2010 /*
2011 * set rate to some reasonable initial value
2012 */
2013 i = in->in_rates.ir_nrates - 1;
2014 while (i > 0 && IEEE80211_RATE(i) > 72) {
2015 i--;
2016 }
2017 in->in_txrate = i;
2018
2019 } else {
2020 atomic_and_32(&sc->sc_flags,
2021 ~IWH_F_RATE_AUTO_CTL);
2022 }
2023 }
2024
2025 /*
2026 * set LED on after associated
2027 */
2028 iwh_set_led(sc, 2, 0, 1);
2029 break;
2030
2031 case IEEE80211_S_INIT:
2032 if (ostate == IEEE80211_S_SCAN) {
2033 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
2034 }
2035 /*
2036 * set LED off after init
2037 */
2038 iwh_set_led(sc, 2, 1, 0);
2039 break;
2040
2041 case IEEE80211_S_ASSOC:
2042 if (ostate == IEEE80211_S_SCAN) {
2043 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
2044 }
2045 break;
2046 }
2047
2048 mutex_exit(&sc->sc_glock);
2049
2050 return (sc->sc_newstate(ic, nstate, arg));
2051 }
2052
2053 /*
2054 * exclusive access to mac begin.
2055 */
2056 static void
2057 iwh_mac_access_enter(iwh_sc_t *sc)
2058 {
2059 uint32_t tmp;
2060 int n;
2061
2062 tmp = IWH_READ(sc, CSR_GP_CNTRL);
2063 IWH_WRITE(sc, CSR_GP_CNTRL,
2064 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2065
2066 /*
2067 * wait until we succeed
2068 */
2069 for (n = 0; n < 1000; n++) {
2070 if ((IWH_READ(sc, CSR_GP_CNTRL) &
2071 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2072 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
2073 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN) {
2074 break;
2075 }
2076 DELAY(10);
2077 }
2078
2079 #ifdef DEBUG
2080 if (1000 == n) {
2081 IWH_DBG((IWH_DEBUG_PIO, "iwh_mac_access_enter(): "
2082 "could not lock memory\n"));
2083 }
2084 #endif
2085 }
2086
2087 /*
2088 * exclusive access to mac end.
2089 */
2090 static void
2091 iwh_mac_access_exit(iwh_sc_t *sc)
2092 {
2093 uint32_t tmp = IWH_READ(sc, CSR_GP_CNTRL);
2094 IWH_WRITE(sc, CSR_GP_CNTRL,
2095 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2096 }
2097
2098 /*
2099 * this function defined here for future use.
2100 * static uint32_t
2101 * iwh_mem_read(iwh_sc_t *sc, uint32_t addr)
2102 * {
2103 * IWH_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
2104 * return (IWH_READ(sc, HBUS_TARG_MEM_RDAT));
2105 * }
2106 */
2107
2108 /*
2109 * write mac memory
2110 */
2111 static void
2112 iwh_mem_write(iwh_sc_t *sc, uint32_t addr, uint32_t data)
2113 {
2114 IWH_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
2115 IWH_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
2116 }
2117
2118 /*
2119 * read mac register
2120 */
2121 static uint32_t
2122 iwh_reg_read(iwh_sc_t *sc, uint32_t addr)
2123 {
2124 IWH_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
2125 return (IWH_READ(sc, HBUS_TARG_PRPH_RDAT));
2126 }
2127
2128 /*
2129 * write mac register
2130 */
2131 static void
2132 iwh_reg_write(iwh_sc_t *sc, uint32_t addr, uint32_t data)
2133 {
2134 IWH_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2135 IWH_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2136 }
2137
2138
2139 /*
2140 * steps of loading ucode:
2141 * load init ucode=>init alive=>calibrate=>
2142 * receive calibration result=>reinitialize NIC=>
2143 * load runtime ucode=>runtime alive=>
2144 * send calibration result=>running.
2145 */
2146 static int
2147 iwh_load_init_firmware(iwh_sc_t *sc)
2148 {
2149 int err = IWH_FAIL;
2150 clock_t clk;
2151
2152 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2153
2154 /*
2155 * load init_text section of uCode to hardware
2156 */
2157 err = iwh_put_seg_fw(sc, sc->sc_dma_fw_init_text.cookie.dmac_address,
2158 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_init_text.cookie.dmac_size);
2159 if (err != IWH_SUCCESS) {
2160 cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2161 "failed to write init uCode.\n");
2162 return (err);
2163 }
2164
2165 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2166
2167 /*
2168 * wait loading init_text until completed or timeout
2169 */
2170 while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2171 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2172 break;
2173 }
2174 }
2175
2176 if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2177 cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2178 "timeout waiting for init uCode load.\n");
2179 return (IWH_FAIL);
2180 }
2181
2182 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2183
2184 /*
2185 * load init_data section of uCode to hardware
2186 */
2187 err = iwh_put_seg_fw(sc, sc->sc_dma_fw_init_data.cookie.dmac_address,
2188 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_init_data.cookie.dmac_size);
2189 if (err != IWH_SUCCESS) {
2190 cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2191 "failed to write init_data uCode.\n");
2192 return (err);
2193 }
2194
2195 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2196
2197 /*
2198 * wait loading init_data until completed or timeout
2199 */
2200 while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2201 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2202 break;
2203 }
2204 }
2205
2206 if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2207 cmn_err(CE_WARN, "iwh_load_init_firmware(): "
2208 "timeout waiting for init_data uCode load.\n");
2209 return (IWH_FAIL);
2210 }
2211
2212 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2213
2214 return (err);
2215 }
2216
2217 static int
2218 iwh_load_run_firmware(iwh_sc_t *sc)
2219 {
2220 int err = IWH_FAIL;
2221 clock_t clk;
2222
2223 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2224
2225 /*
2226 * load init_text section of uCode to hardware
2227 */
2228 err = iwh_put_seg_fw(sc, sc->sc_dma_fw_text.cookie.dmac_address,
2229 RTC_INST_LOWER_BOUND, sc->sc_dma_fw_text.cookie.dmac_size);
2230 if (err != IWH_SUCCESS) {
2231 cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2232 "failed to write run uCode.\n");
2233 return (err);
2234 }
2235
2236 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2237
2238 /*
2239 * wait loading run_text until completed or timeout
2240 */
2241 while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2242 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2243 break;
2244 }
2245 }
2246
2247 if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2248 cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2249 "timeout waiting for run uCode load.\n");
2250 return (IWH_FAIL);
2251 }
2252
2253 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2254
2255 /*
2256 * load run_data section of uCode to hardware
2257 */
2258 err = iwh_put_seg_fw(sc, sc->sc_dma_fw_data_bak.cookie.dmac_address,
2259 RTC_DATA_LOWER_BOUND, sc->sc_dma_fw_data.cookie.dmac_size);
2260 if (err != IWH_SUCCESS) {
2261 cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2262 "failed to write run_data uCode.\n");
2263 return (err);
2264 }
2265
2266 clk = ddi_get_lbolt() + drv_usectohz(1000000);
2267
2268 /*
2269 * wait loading run_data until completed or timeout
2270 */
2271 while (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2272 if (cv_timedwait(&sc->sc_put_seg_cv, &sc->sc_glock, clk) < 0) {
2273 break;
2274 }
2275 }
2276
2277 if (!(sc->sc_flags & IWH_F_PUT_SEG)) {
2278 cmn_err(CE_WARN, "iwh_load_run_firmware(): "
2279 "timeout waiting for run_data uCode load.\n");
2280 return (IWH_FAIL);
2281 }
2282
2283 atomic_and_32(&sc->sc_flags, ~IWH_F_PUT_SEG);
2284
2285 return (err);
2286 }
2287
2288 /*
2289 * this function will be invoked to receive phy information
2290 * when a frame is received.
2291 */
2292 static void
2293 iwh_rx_phy_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2294 {
2295
2296 sc->sc_rx_phy_res.flag = 1;
2297
2298 bcopy((uint8_t *)(desc + 1), sc->sc_rx_phy_res.buf,
2299 sizeof (iwh_rx_phy_res_t));
2300 }
2301
2302 /*
2303 * this function will be invoked to receive body of frame when
2304 * a frame is received.
2305 */
2306 static void
2307 iwh_rx_mpdu_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2308 {
2309 ieee80211com_t *ic = &sc->sc_ic;
2310 #ifdef DEBUG
2311 iwh_rx_ring_t *ring = &sc->sc_rxq;
2312 #endif
2313 struct ieee80211_frame *wh;
2314 struct iwh_rx_non_cfg_phy *phyinfo;
2315 struct iwh_rx_mpdu_body_size *mpdu_size;
2316 mblk_t *mp;
2317 int16_t t;
2318 uint16_t len, rssi, agc;
2319 uint32_t temp, crc, *tail;
2320 uint32_t arssi, brssi, crssi, mrssi;
2321 iwh_rx_phy_res_t *stat;
2322 ieee80211_node_t *in;
2323
2324 /*
2325 * assuming not 11n here. cope with 11n in phase-II
2326 */
2327 mpdu_size = (struct iwh_rx_mpdu_body_size *)(desc + 1);
2328 stat = (iwh_rx_phy_res_t *)sc->sc_rx_phy_res.buf;
2329 if (stat->cfg_phy_cnt > 20) {
2330 return;
2331 }
2332
2333 phyinfo = (struct iwh_rx_non_cfg_phy *)stat->non_cfg_phy;
2334 temp = LE_32(phyinfo->non_cfg_phy[IWH_RX_RES_AGC_IDX]);
2335 agc = (temp & IWH_OFDM_AGC_MSK) >> IWH_OFDM_AGC_BIT_POS;
2336
2337 temp = LE_32(phyinfo->non_cfg_phy[IWH_RX_RES_RSSI_AB_IDX]);
2338 arssi = (temp & IWH_OFDM_RSSI_A_MSK) >> IWH_OFDM_RSSI_A_BIT_POS;
2339 brssi = (temp & IWH_OFDM_RSSI_B_MSK) >> IWH_OFDM_RSSI_B_BIT_POS;
2340
2341 temp = LE_32(phyinfo->non_cfg_phy[IWH_RX_RES_RSSI_C_IDX]);
2342 crssi = (temp & IWH_OFDM_RSSI_C_MSK) >> IWH_OFDM_RSSI_C_BIT_POS;
2343
2344 mrssi = MAX(arssi, brssi);
2345 mrssi = MAX(mrssi, crssi);
2346
2347 t = mrssi - agc - IWH_RSSI_OFFSET;
2348 /*
2349 * convert dBm to percentage
2350 */
2351 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t)))
2352 / (75 * 75);
2353 if (rssi > 100) {
2354 rssi = 100;
2355 }
2356 if (rssi < 1) {
2357 rssi = 1;
2358 }
2359
2360 /*
2361 * size of frame, not include FCS
2362 */
2363 len = LE_16(mpdu_size->byte_count);
2364 tail = (uint32_t *)((uint8_t *)(desc + 1) +
2365 sizeof (struct iwh_rx_mpdu_body_size) + len);
2366 bcopy(tail, &crc, 4);
2367
2368 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2369 "rx intr: idx=%d phy_len=%x len=%d "
2370 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2371 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2372 len, stat->rate.r.s.rate, stat->channel,
2373 LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2374 stat->cfg_phy_cnt, LE_32(crc)));
2375
2376 if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2377 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2378 "rx frame oversize\n"));
2379 return;
2380 }
2381
2382 /*
2383 * discard Rx frames with bad CRC
2384 */
2385 if ((LE_32(crc) &
2386 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2387 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2388 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2389 "rx crc error tail: %x\n",
2390 LE_32(crc)));
2391 sc->sc_rx_err++;
2392 return;
2393 }
2394
2395 wh = (struct ieee80211_frame *)
2396 ((uint8_t *)(desc + 1)+ sizeof (struct iwh_rx_mpdu_body_size));
2397
2398 if (IEEE80211_FC0_SUBTYPE_ASSOC_RESP == *(uint8_t *)wh) {
2399 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2400 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2401 "rx : association id = %x\n",
2402 sc->sc_assoc_id));
2403 }
2404
2405 #ifdef DEBUG
2406 if (iwh_dbg_flags & IWH_DEBUG_RX) {
2407 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2408 }
2409 #endif
2410
2411 in = ieee80211_find_rxnode(ic, wh);
2412 mp = allocb(len, BPRI_MED);
2413 if (mp) {
2414 bcopy(wh, mp->b_wptr, len);
2415 mp->b_wptr += len;
2416
2417 /*
2418 * send the frame to the 802.11 layer
2419 */
2420 (void) ieee80211_input(ic, mp, in, rssi, 0);
2421 } else {
2422 sc->sc_rx_nobuf++;
2423 IWH_DBG((IWH_DEBUG_RX, "iwh_rx_mpdu_intr(): "
2424 "alloc rx buf failed\n"));
2425 }
2426
2427 /*
2428 * release node reference
2429 */
2430 ieee80211_free_node(in);
2431 }
2432
2433 /*
2434 * process correlative affairs after a frame is sent.
2435 */
2436 static void
2437 iwh_tx_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2438 {
2439 ieee80211com_t *ic = &sc->sc_ic;
2440 iwh_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2441 iwh_tx_stat_t *stat = (iwh_tx_stat_t *)(desc + 1);
2442 iwh_amrr_t *amrr;
2443
2444 if (NULL == ic->ic_bss) {
2445 return;
2446 }
2447
2448 amrr = (iwh_amrr_t *)ic->ic_bss;
2449
2450 amrr->txcnt++;
2451 IWH_DBG((IWH_DEBUG_RATECTL, "iwh_tx_intr(): "
2452 "tx: %d cnt\n", amrr->txcnt));
2453
2454 if (stat->ntries > 0) {
2455 amrr->retrycnt++;
2456 sc->sc_tx_retries++;
2457 IWH_DBG((IWH_DEBUG_TX, "iwh_tx_intr(): "
2458 "tx: %d retries\n",
2459 sc->sc_tx_retries));
2460 }
2461
2462 mutex_enter(&sc->sc_mt_lock);
2463 sc->sc_tx_timer = 0;
2464 mutex_exit(&sc->sc_mt_lock);
2465
2466 mutex_enter(&sc->sc_tx_lock);
2467
2468 ring->queued--;
2469 if (ring->queued < 0) {
2470 ring->queued = 0;
2471 }
2472
2473 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count >> 3))) {
2474 sc->sc_need_reschedule = 0;
2475 mutex_exit(&sc->sc_tx_lock);
2476 mac_tx_update(ic->ic_mach);
2477 mutex_enter(&sc->sc_tx_lock);
2478 }
2479
2480 mutex_exit(&sc->sc_tx_lock);
2481 }
2482
2483 /*
2484 * inform a given command has been executed
2485 */
2486 static void
2487 iwh_cmd_intr(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2488 {
2489 if ((desc->hdr.qid & 7) != 4) {
2490 return;
2491 }
2492
2493 if (sc->sc_cmd_accum > 0) {
2494 sc->sc_cmd_accum--;
2495 return;
2496 }
2497
2498 mutex_enter(&sc->sc_glock);
2499
2500 sc->sc_cmd_flag = SC_CMD_FLG_DONE;
2501
2502 cv_signal(&sc->sc_cmd_cv);
2503
2504 mutex_exit(&sc->sc_glock);
2505
2506 IWH_DBG((IWH_DEBUG_CMD, "iwh_cmd_intr(): "
2507 "qid=%x idx=%d flags=%x type=0x%x\n",
2508 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2509 desc->hdr.type));
2510 }
2511
2512 /*
2513 * this function will be invoked when alive notification occur.
2514 */
2515 static void
2516 iwh_ucode_alive(iwh_sc_t *sc, iwh_rx_desc_t *desc)
2517 {
2518 uint32_t rv;
2519 struct iwh_calib_cfg_cmd cmd;
2520 struct iwh_alive_resp *ar =
2521 (struct iwh_alive_resp *)(desc + 1);
2522 struct iwh_calib_results *res_p = &sc->sc_calib_results;
2523
2524 /*
2525 * the microcontroller is ready
2526 */
2527 IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2528 "microcode alive notification minor: %x major: %x type: "
2529 "%x subtype: %x\n",
2530 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2531
2532 #ifdef DEBUG
2533 if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2534 IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2535 "microcontroller initialization failed\n"));
2536 }
2537 #endif
2538
2539 /*
2540 * determine if init alive or runtime alive.
2541 */
2542 if (INITIALIZE_SUBTYPE == ar->ver_subtype) {
2543 IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2544 "initialization alive received.\n"));
2545
2546 bcopy(ar, &sc->sc_card_alive_init,
2547 sizeof (struct iwh_init_alive_resp));
2548
2549 /*
2550 * necessary configuration to NIC
2551 */
2552 mutex_enter(&sc->sc_glock);
2553
2554 rv = iwh_alive_common(sc);
2555 if (rv != IWH_SUCCESS) {
2556 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2557 "common alive process failed in init alive.\n");
2558 mutex_exit(&sc->sc_glock);
2559 return;
2560 }
2561
2562 (void) memset(&cmd, 0, sizeof (cmd));
2563
2564 cmd.ucd_calib_cfg.once.is_enable = IWH_CALIB_INIT_CFG_ALL;
2565 cmd.ucd_calib_cfg.once.start = IWH_CALIB_INIT_CFG_ALL;
2566 cmd.ucd_calib_cfg.once.send_res = IWH_CALIB_INIT_CFG_ALL;
2567 cmd.ucd_calib_cfg.flags = IWH_CALIB_INIT_CFG_ALL;
2568
2569 /*
2570 * require ucode execute calibration
2571 */
2572 rv = iwh_cmd(sc, CALIBRATION_CFG_CMD, &cmd, sizeof (cmd), 1);
2573 if (rv != IWH_SUCCESS) {
2574 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2575 "failed to send calibration configure command.\n");
2576 mutex_exit(&sc->sc_glock);
2577 return;
2578 }
2579
2580 mutex_exit(&sc->sc_glock);
2581
2582 } else { /* runtime alive */
2583
2584 IWH_DBG((IWH_DEBUG_FW, "iwh_ucode_alive(): "
2585 "runtime alive received.\n"));
2586
2587 bcopy(ar, &sc->sc_card_alive_run,
2588 sizeof (struct iwh_alive_resp));
2589
2590 mutex_enter(&sc->sc_glock);
2591
2592 /*
2593 * necessary configuration to NIC
2594 */
2595 rv = iwh_alive_common(sc);
2596 if (rv != IWH_SUCCESS) {
2597 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2598 "common alive process failed in run alive.\n");
2599 mutex_exit(&sc->sc_glock);
2600 return;
2601 }
2602
2603 /*
2604 * send the result of local oscilator calibration to uCode.
2605 */
2606 if (res_p->lo_res != NULL) {
2607 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2608 res_p->lo_res, res_p->lo_res_len, 1);
2609 if (rv != IWH_SUCCESS) {
2610 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2611 "failed to send local"
2612 "oscilator calibration command.\n");
2613 mutex_exit(&sc->sc_glock);
2614 return;
2615 }
2616
2617 DELAY(1000);
2618 }
2619
2620 /*
2621 * send the result of TX IQ calibration to uCode.
2622 */
2623 if (res_p->tx_iq_res != NULL) {
2624 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2625 res_p->tx_iq_res, res_p->tx_iq_res_len, 1);
2626 if (rv != IWH_SUCCESS) {
2627 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2628 "failed to send TX IQ"
2629 "calibration command.\n");
2630 mutex_exit(&sc->sc_glock);
2631 return;
2632 }
2633
2634 DELAY(1000);
2635 }
2636
2637 /*
2638 * sned the result of TX IQ perd calibration to uCode.
2639 */
2640 if (res_p->tx_iq_perd_res != NULL) {
2641 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2642 res_p->tx_iq_perd_res,
2643 res_p->tx_iq_perd_res_len, 1);
2644 if (rv != IWH_SUCCESS) {
2645 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2646 "failed to send TX IQ perd"
2647 "calibration command.\n");
2648 mutex_exit(&sc->sc_glock);
2649 return;
2650 }
2651
2652 DELAY(1000);
2653 }
2654
2655 /*
2656 * send the result of DC calibration to uCode.
2657 */
2658 if (res_p->dc_res != NULL) {
2659 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2660 res_p->dc_res,
2661 res_p->dc_res_len, 1);
2662 if (rv != IWH_SUCCESS) {
2663 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2664 "failed to send DC"
2665 "calibration command.\n");
2666 mutex_exit(&sc->sc_glock);
2667 return;
2668 }
2669
2670 DELAY(1000);
2671 }
2672
2673 /*
2674 * send the result of BASE BAND calibration to uCode.
2675 */
2676 if (res_p->base_band_res != NULL) {
2677 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
2678 res_p->base_band_res,
2679 res_p->base_band_res_len, 1);
2680 if (rv != IWH_SUCCESS) {
2681 cmn_err(CE_WARN, "iwh_ucode_alive(): "
2682 "failed to send BASE BAND"
2683 "calibration command.\n");
2684 mutex_exit(&sc->sc_glock);
2685 return;
2686 }
2687
2688 DELAY(1000);
2689 }
2690
2691 atomic_or_32(&sc->sc_flags, IWH_F_FW_INIT);
2692 cv_signal(&sc->sc_ucode_cv);
2693
2694 mutex_exit(&sc->sc_glock);
2695 }
2696
2697 }
2698
2699 /*
2700 * deal with receiving frames, command response
2701 * and all notifications from ucode.
2702 */
2703 /* ARGSUSED */
2704 static uint_t
2705 iwh_rx_softintr(caddr_t arg, caddr_t unused)
2706 {
2707 iwh_sc_t *sc;
2708 ieee80211com_t *ic;
2709 iwh_rx_desc_t *desc;
2710 iwh_rx_data_t *data;
2711 uint32_t index;
2712
2713 if (NULL == arg) {
2714 return (DDI_INTR_UNCLAIMED);
2715 }
2716 sc = (iwh_sc_t *)arg;
2717 ic = &sc->sc_ic;
2718
2719 /*
2720 * firmware has moved the index of the rx queue, driver get it,
2721 * and deal with it.
2722 */
2723 index = (sc->sc_shared->val0) & 0xfff;
2724
2725 while (sc->sc_rxq.cur != index) {
2726 data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2727 desc = (iwh_rx_desc_t *)data->dma_data.mem_va;
2728
2729 IWH_DBG((IWH_DEBUG_INTR, "iwh_rx_softintr(): "
2730 "rx notification index = %d"
2731 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2732 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2733 desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2734
2735 /*
2736 * a command other than a tx need to be replied
2737 */
2738 if (!(desc->hdr.qid & 0x80) &&
2739 (desc->hdr.type != REPLY_SCAN_CMD) &&
2740 (desc->hdr.type != REPLY_TX)) {
2741 iwh_cmd_intr(sc, desc);
2742 }
2743
2744 switch (desc->hdr.type) {
2745 case REPLY_RX_PHY_CMD:
2746 iwh_rx_phy_intr(sc, desc);
2747 break;
2748
2749 case REPLY_RX_MPDU_CMD:
2750 iwh_rx_mpdu_intr(sc, desc);
2751 break;
2752
2753 case REPLY_TX:
2754 iwh_tx_intr(sc, desc);
2755 break;
2756
2757 case REPLY_ALIVE:
2758 iwh_ucode_alive(sc, desc);
2759 break;
2760
2761 case CARD_STATE_NOTIFICATION:
2762 {
2763 uint32_t *status = (uint32_t *)(desc + 1);
2764
2765 IWH_DBG((IWH_DEBUG_RADIO, "iwh_rx_softintr(): "
2766 "state changed to %x\n",
2767 LE_32(*status)));
2768
2769 if (LE_32(*status) & 1) {
2770 /*
2771 * the radio button has to be pushed(OFF). It
2772 * is considered as a hw error, the
2773 * iwh_thread() tries to recover it after the
2774 * button is pushed again(ON)
2775 */
2776 cmn_err(CE_NOTE, "iwh_rx_softintr(): "
2777 "radio transmitter is off\n");
2778 sc->sc_ostate = sc->sc_ic.ic_state;
2779 ieee80211_new_state(&sc->sc_ic,
2780 IEEE80211_S_INIT, -1);
2781 atomic_or_32(&sc->sc_flags,
2782 (IWH_F_HW_ERR_RECOVER | IWH_F_RADIO_OFF));
2783 }
2784
2785 break;
2786 }
2787
2788 case SCAN_START_NOTIFICATION:
2789 {
2790 iwh_start_scan_t *scan =
2791 (iwh_start_scan_t *)(desc + 1);
2792
2793 IWH_DBG((IWH_DEBUG_SCAN, "iwh_rx_softintr(): "
2794 "scanning channel %d status %x\n",
2795 scan->chan, LE_32(scan->status)));
2796
2797 ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2798 break;
2799 }
2800
2801 case SCAN_COMPLETE_NOTIFICATION:
2802 {
2803 #ifdef DEBUG
2804 iwh_stop_scan_t *scan =
2805 (iwh_stop_scan_t *)(desc + 1);
2806
2807 IWH_DBG((IWH_DEBUG_SCAN, "iwh_rx_softintr(): "
2808 "completed channel %d (burst of %d) status %02x\n",
2809 scan->chan, scan->nchan, scan->status));
2810 #endif
2811
2812 sc->sc_scan_pending++;
2813 break;
2814 }
2815
2816 case STATISTICS_NOTIFICATION:
2817 {
2818 /*
2819 * handle statistics notification
2820 */
2821 break;
2822 }
2823
2824 case CALIBRATION_RES_NOTIFICATION:
2825 iwh_save_calib_result(sc, desc);
2826 break;
2827
2828 case CALIBRATION_COMPLETE_NOTIFICATION:
2829 mutex_enter(&sc->sc_glock);
2830 atomic_or_32(&sc->sc_flags, IWH_F_FW_INIT);
2831 cv_signal(&sc->sc_ucode_cv);
2832 mutex_exit(&sc->sc_glock);
2833 break;
2834
2835 case MISSED_BEACONS_NOTIFICATION:
2836 /* handle beacon miss by software mechanism */
2837 break;
2838 }
2839
2840 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2841 }
2842
2843 /*
2844 * driver dealt with what received in rx queue and tell the information
2845 * to the firmware.
2846 */
2847 index = (0 == index) ? RX_QUEUE_SIZE - 1 : index - 1;
2848 IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2849
2850 /*
2851 * re-enable interrupts
2852 */
2853 IWH_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2854
2855 return (DDI_INTR_CLAIMED);
2856 }
2857
2858 /*
2859 * the handle of interrupt
2860 */
2861 /* ARGSUSED */
2862 static uint_t
2863 iwh_intr(caddr_t arg, caddr_t unused)
2864 {
2865 iwh_sc_t *sc;
2866 uint32_t r, rfh;
2867
2868 if (NULL == arg) {
2869 return (DDI_INTR_UNCLAIMED);
2870 }
2871 sc = (iwh_sc_t *)arg;
2872
2873 r = IWH_READ(sc, CSR_INT);
2874 if (0 == r || 0xffffffff == r) {
2875 return (DDI_INTR_UNCLAIMED);
2876 }
2877
2878 IWH_DBG((IWH_DEBUG_INTR, "iwh_intr(): "
2879 "interrupt reg %x\n", r));
2880
2881 rfh = IWH_READ(sc, CSR_FH_INT_STATUS);
2882
2883 IWH_DBG((IWH_DEBUG_INTR, "iwh_intr(): "
2884 "FH interrupt reg %x\n", rfh));
2885
2886 /*
2887 * disable interrupts
2888 */
2889 IWH_WRITE(sc, CSR_INT_MASK, 0);
2890
2891 /*
2892 * ack interrupts
2893 */
2894 IWH_WRITE(sc, CSR_INT, r);
2895 IWH_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2896
2897 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2898 IWH_DBG((IWH_DEBUG_FW, "iwh_intr(): "
2899 "fatal firmware error\n"));
2900 iwh_stop(sc);
2901 sc->sc_ostate = sc->sc_ic.ic_state;
2902
2903 /*
2904 * notify upper layer
2905 */
2906 if (!IWH_CHK_FAST_RECOVER(sc)) {
2907 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2908 }
2909
2910 atomic_or_32(&sc->sc_flags, IWH_F_HW_ERR_RECOVER);
2911 return (DDI_INTR_CLAIMED);
2912 }
2913
2914 if (r & BIT_INT_RF_KILL) {
2915 uint32_t tmp = IWH_READ(sc, CSR_GP_CNTRL);
2916 if (tmp & (1 << 27)) {
2917 cmn_err(CE_NOTE, "RF switch: radio on\n");
2918 }
2919 }
2920
2921 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2922 (rfh & FH_INT_RX_MASK)) {
2923 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2924 return (DDI_INTR_CLAIMED);
2925 }
2926
2927 if (r & BIT_INT_FH_TX) {
2928 mutex_enter(&sc->sc_glock);
2929 atomic_or_32(&sc->sc_flags, IWH_F_PUT_SEG);
2930 cv_signal(&sc->sc_put_seg_cv);
2931 mutex_exit(&sc->sc_glock);
2932 }
2933
2934 #ifdef DEBUG
2935 if (r & BIT_INT_ALIVE) {
2936 IWH_DBG((IWH_DEBUG_FW, "iwh_intr(): "
2937 "firmware initialized.\n"));
2938 }
2939 #endif
2940
2941 /*
2942 * re-enable interrupts
2943 */
2944 IWH_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2945
2946 return (DDI_INTR_CLAIMED);
2947 }
2948
2949 static uint8_t
2950 iwh_rate_to_plcp(int rate)
2951 {
2952 uint8_t ret;
2953
2954 switch (rate) {
2955 /*
2956 * CCK rates
2957 */
2958 case 2:
2959 ret = 0xa;
2960 break;
2961
2962 case 4:
2963 ret = 0x14;
2964 break;
2965
2966 case 11:
2967 ret = 0x37;
2968 break;
2969
2970 case 22:
2971 ret = 0x6e;
2972 break;
2973
2974 /*
2975 * OFDM rates
2976 */
2977 case 12:
2978 ret = 0xd;
2979 break;
2980
2981 case 18:
2982 ret = 0xf;
2983 break;
2984
2985 case 24:
2986 ret = 0x5;
2987 break;
2988
2989 case 36:
2990 ret = 0x7;
2991 break;
2992
2993 case 48:
2994 ret = 0x9;
2995 break;
2996
2997 case 72:
2998 ret = 0xb;
2999 break;
3000
3001 case 96:
3002 ret = 0x1;
3003 break;
3004
3005 case 108:
3006 ret = 0x3;
3007 break;
3008
3009 default:
3010 ret = 0;
3011 break;
3012 }
3013
3014 return (ret);
3015 }
3016
3017 /*
3018 * invoked by GLD send frames
3019 */
3020 static mblk_t *
3021 iwh_m_tx(void *arg, mblk_t *mp)
3022 {
3023 iwh_sc_t *sc;
3024 ieee80211com_t *ic;
3025 mblk_t *next;
3026
3027 if (NULL == arg) {
3028 return (NULL);
3029 }
3030 sc = (iwh_sc_t *)arg;
3031 ic = &sc->sc_ic;
3032
3033 if (sc->sc_flags & IWH_F_SUSPEND) {
3034 freemsgchain(mp);
3035 return (NULL);
3036 }
3037
3038 if (ic->ic_state != IEEE80211_S_RUN) {
3039 freemsgchain(mp);
3040 return (NULL);
3041 }
3042
3043 if ((sc->sc_flags & IWH_F_HW_ERR_RECOVER) &&
3044 IWH_CHK_FAST_RECOVER(sc)) {
3045 IWH_DBG((IWH_DEBUG_FW, "iwh_m_tx(): "
3046 "hold queue\n"));
3047 return (mp);
3048 }
3049
3050 while (mp != NULL) {
3051 next = mp->b_next;
3052 mp->b_next = NULL;
3053 if (iwh_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
3054 mp->b_next = next;
3055 break;
3056 }
3057 mp = next;
3058 }
3059
3060 return (mp);
3061 }
3062
3063 /*
3064 * send frames
3065 */
3066 static int
3067 iwh_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
3068 {
3069 iwh_sc_t *sc;
3070 iwh_tx_ring_t *ring;
3071 iwh_tx_desc_t *desc;
3072 iwh_tx_data_t *data;
3073 iwh_tx_data_t *desc_data;
3074 iwh_cmd_t *cmd;
3075 iwh_tx_cmd_t *tx;
3076 ieee80211_node_t *in;
3077 struct ieee80211_frame *wh, *mp_wh;
3078 struct ieee80211_key *k = NULL;
3079 mblk_t *m, *m0;
3080 int hdrlen, len, len0, mblen, off, err = IWH_SUCCESS;
3081 uint16_t masks = 0;
3082 uint32_t rate, s_id = 0;
3083 int txq_id = NON_QOS_TXQ;
3084 struct ieee80211_qosframe *qwh = NULL;
3085 int tid = WME_TID_INVALID;
3086
3087 if (NULL == ic) {
3088 return (IWH_FAIL);
3089 }
3090 sc = (iwh_sc_t *)ic;
3091
3092 if (sc->sc_flags & IWH_F_SUSPEND) {
3093 if ((type & IEEE80211_FC0_TYPE_MASK) !=
3094 IEEE80211_FC0_TYPE_DATA) {
3095 freemsg(mp);
3096 }
3097 err = IWH_FAIL;
3098 goto exit;
3099 }
3100
3101 if ((NULL == mp) || (MBLKL(mp) <= 0)) {
3102 return (IWH_FAIL);
3103 }
3104
3105 mp_wh = (struct ieee80211_frame *)mp->b_rptr;
3106
3107 /*
3108 * Determine send which AP or station in IBSS
3109 */
3110 in = ieee80211_find_txnode(ic, mp_wh->i_addr1);
3111 if (NULL == in) {
3112 cmn_err(CE_WARN, "iwh_send(): "
3113 "failed to find tx node\n");
3114 freemsg(mp);
3115 sc->sc_tx_err++;
3116 err = IWH_SUCCESS;
3117 goto exit;
3118 }
3119
3120 /*
3121 * Determine TX queue according to traffic ID in frame
3122 * if working in QoS mode.
3123 */
3124 if (in->in_flags & IEEE80211_NODE_QOS) {
3125
3126 if ((type & IEEE80211_FC0_TYPE_MASK) ==
3127 IEEE80211_FC0_TYPE_DATA) {
3128
3129 if (mp_wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
3130 qwh = (struct ieee80211_qosframe *)mp_wh;
3131
3132 tid = qwh->i_qos[0] & IEEE80211_QOS_TID;
3133 txq_id = iwh_wme_tid_to_txq(tid);
3134
3135 if (txq_id < TXQ_FOR_AC_MIN ||
3136 (txq_id > TXQ_FOR_AC_MAX)) {
3137 freemsg(mp);
3138 sc->sc_tx_err++;
3139 err = IWH_SUCCESS;
3140 goto exit;
3141 }
3142
3143 } else {
3144 txq_id = NON_QOS_TXQ;
3145 }
3146
3147 } else if ((type & IEEE80211_FC0_TYPE_MASK) ==
3148 IEEE80211_FC0_TYPE_MGT) {
3149 txq_id = QOS_TXQ_FOR_MGT;
3150 } else {
3151 txq_id = NON_QOS_TXQ;
3152 }
3153
3154 } else {
3155 txq_id = NON_QOS_TXQ;
3156 }
3157
3158 mutex_enter(&sc->sc_tx_lock);
3159 ring = &sc->sc_txq[txq_id];
3160 data = &ring->data[ring->cur];
3161 cmd = data->cmd;
3162 bzero(cmd, sizeof (*cmd));
3163
3164 ring->cur = (ring->cur + 1) % ring->count;
3165
3166 /*
3167 * Need reschedule TX if TX buffer is full.
3168 */
3169 if (ring->queued > ring->count - IWH_MAX_WIN_SIZE) {
3170 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3171 "no txbuf\n"));
3172
3173 sc->sc_need_reschedule = 1;
3174 mutex_exit(&sc->sc_tx_lock);
3175
3176 if ((type & IEEE80211_FC0_TYPE_MASK) !=
3177 IEEE80211_FC0_TYPE_DATA) {
3178 freemsg(mp);
3179 }
3180 sc->sc_tx_nobuf++;
3181 err = IWH_FAIL;
3182 goto exit;
3183 }
3184
3185 ring->queued++;
3186
3187 mutex_exit(&sc->sc_tx_lock);
3188
3189 hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
3190
3191 m = allocb(msgdsize(mp) + 32, BPRI_MED);
3192 if (NULL == m) { /* can not alloc buf, drop this package */
3193 cmn_err(CE_WARN, "iwh_send(): "
3194 "failed to allocate msgbuf\n");
3195 freemsg(mp);
3196
3197 mutex_enter(&sc->sc_tx_lock);
3198 ring->queued--;
3199 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3200 sc->sc_need_reschedule = 0;
3201 mutex_exit(&sc->sc_tx_lock);
3202 mac_tx_update(ic->ic_mach);
3203 mutex_enter(&sc->sc_tx_lock);
3204 }
3205 mutex_exit(&sc->sc_tx_lock);
3206
3207 err = IWH_SUCCESS;
3208 goto exit;
3209 }
3210
3211 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
3212 mblen = MBLKL(m0);
3213 bcopy(m0->b_rptr, m->b_rptr + off, mblen);
3214 off += mblen;
3215 }
3216
3217 m->b_wptr += off;
3218
3219 wh = (struct ieee80211_frame *)m->b_rptr;
3220
3221 /*
3222 * Net80211 module encapsulate outbound data frames.
3223 * Add some feilds of 80211 frame.
3224 */
3225 if ((type & IEEE80211_FC0_TYPE_MASK) ==
3226 IEEE80211_FC0_TYPE_DATA) {
3227 (void) ieee80211_encap(ic, m, in);
3228 }
3229
3230 freemsg(mp);
3231
3232 cmd->hdr.type = REPLY_TX;
3233 cmd->hdr.flags = 0;
3234 cmd->hdr.qid = ring->qid;
3235
3236 tx = (iwh_tx_cmd_t *)cmd->data;
3237 tx->tx_flags = 0;
3238
3239 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3240 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
3241 } else {
3242 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
3243 }
3244
3245 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3246 k = ieee80211_crypto_encap(ic, m);
3247 if (NULL == k) {
3248 freemsg(m);
3249 sc->sc_tx_err++;
3250
3251 mutex_enter(&sc->sc_tx_lock);
3252 ring->queued--;
3253 if ((sc->sc_need_reschedule) && (ring->queued <= 0)) {
3254 sc->sc_need_reschedule = 0;
3255 mutex_exit(&sc->sc_tx_lock);
3256 mac_tx_update(ic->ic_mach);
3257 mutex_enter(&sc->sc_tx_lock);
3258 }
3259 mutex_exit(&sc->sc_tx_lock);
3260
3261 err = IWH_SUCCESS;
3262 goto exit;
3263 }
3264
3265 /*
3266 * packet header may have moved, reset our local pointer
3267 */
3268 wh = (struct ieee80211_frame *)m->b_rptr;
3269 }
3270
3271 len = msgdsize(m);
3272
3273 #ifdef DEBUG
3274 if (iwh_dbg_flags & IWH_DEBUG_TX) {
3275 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
3276 }
3277 #endif
3278
3279 tx->rts_retry_limit = IWH_TX_RTS_RETRY_LIMIT;
3280 tx->data_retry_limit = IWH_TX_DATA_RETRY_LIMIT;
3281
3282 /*
3283 * specific TX parameters for management frames
3284 */
3285 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3286 IEEE80211_FC0_TYPE_MGT) {
3287 /*
3288 * mgmt frames are sent at 1M
3289 */
3290 if ((in->in_rates.ir_rates[0] &
3291 IEEE80211_RATE_VAL) != 0) {
3292 rate = in->in_rates.ir_rates[0] & IEEE80211_RATE_VAL;
3293 } else {
3294 rate = 2;
3295 }
3296
3297 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3298
3299 /*
3300 * tell h/w to set timestamp in probe responses
3301 */
3302 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3303 IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
3304 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
3305
3306 tx->data_retry_limit = 3;
3307 if (tx->data_retry_limit < tx->rts_retry_limit) {
3308 tx->rts_retry_limit = tx->data_retry_limit;
3309 }
3310 }
3311
3312 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3313 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
3314 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
3315 IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) {
3316 tx->timeout.pm_frame_timeout = LE_16(3);
3317 } else {
3318 tx->timeout.pm_frame_timeout = LE_16(2);
3319 }
3320
3321 } else {
3322 /*
3323 * do it here for the software way rate scaling.
3324 * later for rate scaling in hardware.
3325 *
3326 * now the txrate is determined in tx cmd flags, set to the
3327 * max value 54M for 11g and 11M for 11b and 96M for 11n
3328 * originally.
3329 */
3330 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
3331 rate = ic->ic_fixed_rate;
3332 } else {
3333 if ((in->in_flags & IEEE80211_NODE_HT) &&
3334 (sc->sc_ht_conf.ht_support)) {
3335 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
3336 rate = amrr->ht_mcs_idx;
3337 } else {
3338 if ((in->in_rates.ir_rates[in->in_txrate] &
3339 IEEE80211_RATE_VAL) != 0) {
3340 rate = in->in_rates.
3341 ir_rates[in->in_txrate] &
3342 IEEE80211_RATE_VAL;
3343 }
3344 }
3345 }
3346
3347 if (tid != WME_TID_INVALID) {
3348 tx->tid_tspec = (uint8_t)tid;
3349 tx->tx_flags &= LE_32(~TX_CMD_FLG_SEQ_CTL_MSK);
3350 } else {
3351 tx->tx_flags |= LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3352 }
3353
3354 tx->timeout.pm_frame_timeout = 0;
3355 }
3356
3357 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3358 "tx rate[%d of %d] = %x",
3359 in->in_txrate, in->in_rates.ir_nrates, rate));
3360
3361 len0 = roundup(4 + sizeof (iwh_tx_cmd_t) + hdrlen, 4);
3362 if (len0 != (4 + sizeof (iwh_tx_cmd_t) + hdrlen)) {
3363 tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
3364 }
3365
3366 /*
3367 * retrieve destination node's id
3368 */
3369 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3370 tx->sta_id = IWH_BROADCAST_ID;
3371 } else {
3372 tx->sta_id = IWH_AP_ID;
3373 }
3374
3375 if ((in->in_flags & IEEE80211_NODE_HT) &&
3376 (sc->sc_ht_conf.ht_support) &&
3377 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
3378 IEEE80211_FC0_TYPE_DATA)) {
3379 if (rate >= HT_2CHAIN_RATE_MIN_IDX) {
3380 rate |= LE_32(RATE_MCS_ANT_AB_MSK);
3381 } else {
3382 rate |= LE_32(RATE_MCS_ANT_B_MSK);
3383 }
3384
3385 rate |= LE_32((1 << RATE_MCS_HT_POS));
3386
3387 tx->rate.r.rate_n_flags = rate;
3388
3389 } else {
3390 if (2 == rate || 4 == rate || 11 == rate || 22 == rate) {
3391 masks |= RATE_MCS_CCK_MSK;
3392 }
3393
3394 masks |= RATE_MCS_ANT_B_MSK;
3395 tx->rate.r.rate_n_flags = LE_32(iwh_rate_to_plcp(rate) | masks);
3396 }
3397
3398 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3399 "tx flag = %x",
3400 tx->tx_flags));
3401
3402 tx->stop_time.life_time = LE_32(0xffffffff);
3403
3404 tx->len = LE_16(len);
3405
3406 tx->dram_lsb_ptr =
3407 LE_32(data->paddr_cmd + 4 + offsetof(iwh_tx_cmd_t, scratch));
3408 tx->dram_msb_ptr = 0;
3409 tx->driver_txop = 0;
3410 tx->next_frame_len = 0;
3411
3412 bcopy(m->b_rptr, tx + 1, hdrlen);
3413 m->b_rptr += hdrlen;
3414 bcopy(m->b_rptr, data->dma_data.mem_va, (len - hdrlen));
3415
3416 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3417 "sending data: qid=%d idx=%d len=%d",
3418 ring->qid, ring->cur, len));
3419
3420 /*
3421 * first segment includes the tx cmd plus the 802.11 header,
3422 * the second includes the remaining of the 802.11 frame.
3423 */
3424
3425 mutex_enter(&sc->sc_tx_lock);
3426 cmd->hdr.idx = ring->desc_cur;
3427 desc_data = &ring->data[ring->desc_cur];
3428 desc = desc_data->desc;
3429 bzero(desc, sizeof (*desc));
3430 desc->val0 = 2 << 24;
3431 desc->pa[0].tb1_addr = data->paddr_cmd;
3432 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
3433 ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
3434 desc->pa[0].val2 =
3435 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
3436 ((len - hdrlen) << 20);
3437 IWH_DBG((IWH_DEBUG_TX, "iwh_send(): "
3438 "phy addr1 = 0x%x phy addr2 = 0x%x "
3439 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
3440 data->paddr_cmd, data->dma_data.cookie.dmac_address,
3441 len0, len - hdrlen, desc->pa[0].val1, desc->pa[0].val2));
3442
3443 /*
3444 * kick ring
3445 */
3446 s_id = tx->sta_id;
3447
3448 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3449 tfd_offset[ring->desc_cur].val =
3450 (8 + len) | (s_id << 12);
3451 if (ring->desc_cur < IWH_MAX_WIN_SIZE) {
3452 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3453 tfd_offset[IWH_QUEUE_SIZE + ring->desc_cur].val =
3454 (8 + len) | (s_id << 12);
3455 }
3456
3457 IWH_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
3458 IWH_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
3459
3460 ring->desc_cur = (ring->desc_cur + 1) % ring->count;
3461 IWH_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->desc_cur);
3462
3463 mutex_exit(&sc->sc_tx_lock);
3464 freemsg(m);
3465
3466 /*
3467 * release node reference
3468 */
3469 ieee80211_free_node(in);
3470
3471 ic->ic_stats.is_tx_bytes += len;
3472 ic->ic_stats.is_tx_frags++;
3473
3474 mutex_enter(&sc->sc_mt_lock);
3475 if (0 == sc->sc_tx_timer) {
3476 sc->sc_tx_timer = 4;
3477 }
3478 mutex_exit(&sc->sc_mt_lock);
3479
3480 exit:
3481 return (err);
3482 }
3483
3484 /*
3485 * invoked by GLD to deal with IOCTL affaires
3486 */
3487 static void
3488 iwh_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
3489 {
3490 iwh_sc_t *sc;
3491 ieee80211com_t *ic;
3492 int err = EINVAL;
3493
3494 if (NULL == arg) {
3495 return;
3496 }
3497 sc = (iwh_sc_t *)arg;
3498 ic = &sc->sc_ic;
3499
3500 err = ieee80211_ioctl(ic, wq, mp);
3501 if (ENETRESET == err) {
3502 /*
3503 * This is special for the hidden AP connection.
3504 * In any case, we should make sure only one 'scan'
3505 * in the driver for a 'connect' CLI command. So
3506 * when connecting to a hidden AP, the scan is just
3507 * sent out to the air when we know the desired
3508 * essid of the AP we want to connect.
3509 */
3510 if (ic->ic_des_esslen) {
3511 if (sc->sc_flags & IWH_F_RUNNING) {
3512 iwh_m_stop(sc);
3513 (void) iwh_m_start(sc);
3514 (void) ieee80211_new_state(ic,
3515 IEEE80211_S_SCAN, -1);
3516 }
3517 }
3518 }
3519 }
3520
3521 /*
3522 * Call back functions for get/set proporty
3523 */
3524 static int
3525 iwh_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3526 uint_t wldp_length, void *wldp_buf)
3527 {
3528 iwh_sc_t *sc;
3529 int err = EINVAL;
3530
3531 if (NULL == arg) {
3532 return (EINVAL);
3533 }
3534 sc = (iwh_sc_t *)arg;
3535
3536 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3537 wldp_length, wldp_buf);
3538
3539 return (err);
3540 }
3541
3542 static void
3543 iwh_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3544 mac_prop_info_handle_t mph)
3545 {
3546 iwh_sc_t *sc = (iwh_sc_t *)arg;
3547
3548 ieee80211_propinfo(&sc->sc_ic, pr_name, wldp_pr_num, mph);
3549 }
3550
3551 static int
3552 iwh_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3553 uint_t wldp_length, const void *wldp_buf)
3554 {
3555 iwh_sc_t *sc;
3556 ieee80211com_t *ic;
3557 int err = EINVAL;
3558
3559 if (NULL == arg) {
3560 return (EINVAL);
3561 }
3562 sc = (iwh_sc_t *)arg;
3563 ic = &sc->sc_ic;
3564
3565 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3566 wldp_buf);
3567
3568 if (err == ENETRESET) {
3569 if (ic->ic_des_esslen) {
3570 if (sc->sc_flags & IWH_F_RUNNING) {
3571 iwh_m_stop(sc);
3572 (void) iwh_m_start(sc);
3573 (void) ieee80211_new_state(ic,
3574 IEEE80211_S_SCAN, -1);
3575 }
3576 }
3577 err = 0;
3578 }
3579 return (err);
3580 }
3581
3582 /*
3583 * invoked by GLD supply statistics NIC and driver
3584 */
3585 static int
3586 iwh_m_stat(void *arg, uint_t stat, uint64_t *val)
3587 {
3588 iwh_sc_t *sc;
3589 ieee80211com_t *ic;
3590 ieee80211_node_t *in;
3591
3592 if (NULL == arg) {
3593 return (EINVAL);
3594 }
3595 sc = (iwh_sc_t *)arg;
3596 ic = &sc->sc_ic;
3597
3598 mutex_enter(&sc->sc_glock);
3599
3600 switch (stat) {
3601 case MAC_STAT_IFSPEED:
3602 in = ic->ic_bss;
3603 *val = ((IEEE80211_FIXED_RATE_NONE == ic->ic_fixed_rate) ?
3604 IEEE80211_RATE(in->in_txrate) :
3605 ic->ic_fixed_rate) / 2 * 1000000;
3606 break;
3607
3608 case MAC_STAT_NOXMTBUF:
3609 *val = sc->sc_tx_nobuf;
3610 break;
3611
3612 case MAC_STAT_NORCVBUF:
3613 *val = sc->sc_rx_nobuf;
3614 break;
3615
3616 case MAC_STAT_IERRORS:
3617 *val = sc->sc_rx_err;
3618 break;
3619
3620 case MAC_STAT_RBYTES:
3621 *val = ic->ic_stats.is_rx_bytes;
3622 break;
3623
3624 case MAC_STAT_IPACKETS:
3625 *val = ic->ic_stats.is_rx_frags;
3626 break;
3627
3628 case MAC_STAT_OBYTES:
3629 *val = ic->ic_stats.is_tx_bytes;
3630 break;
3631
3632 case MAC_STAT_OPACKETS:
3633 *val = ic->ic_stats.is_tx_frags;
3634 break;
3635
3636 case MAC_STAT_OERRORS:
3637 case WIFI_STAT_TX_FAILED:
3638 *val = sc->sc_tx_err;
3639 break;
3640
3641 case WIFI_STAT_TX_RETRANS:
3642 *val = sc->sc_tx_retries;
3643 break;
3644
3645 case WIFI_STAT_FCS_ERRORS:
3646 case WIFI_STAT_WEP_ERRORS:
3647 case WIFI_STAT_TX_FRAGS:
3648 case WIFI_STAT_MCAST_TX:
3649 case WIFI_STAT_RTS_SUCCESS:
3650 case WIFI_STAT_RTS_FAILURE:
3651 case WIFI_STAT_ACK_FAILURE:
3652 case WIFI_STAT_RX_FRAGS:
3653 case WIFI_STAT_MCAST_RX:
3654 case WIFI_STAT_RX_DUPS:
3655 mutex_exit(&sc->sc_glock);
3656 return (ieee80211_stat(ic, stat, val));
3657
3658 default:
3659 mutex_exit(&sc->sc_glock);
3660 return (ENOTSUP);
3661 }
3662
3663 mutex_exit(&sc->sc_glock);
3664
3665 return (IWH_SUCCESS);
3666 }
3667
3668 /*
3669 * invoked by GLD to start or open NIC
3670 */
3671 static int
3672 iwh_m_start(void *arg)
3673 {
3674 iwh_sc_t *sc;
3675 ieee80211com_t *ic;
3676 int err = IWH_FAIL;
3677
3678 if (NULL == arg) {
3679 return (EINVAL);
3680 }
3681 sc = (iwh_sc_t *)arg;
3682 ic = &sc->sc_ic;
3683
3684 err = iwh_init(sc);
3685 if (err != IWH_SUCCESS) {
3686 /*
3687 * The hw init err(eg. RF is OFF). Return Success to make
3688 * the 'plumb' succeed. The iwh_thread() tries to re-init
3689 * background.
3690 */
3691 atomic_or_32(&sc->sc_flags, IWH_F_HW_ERR_RECOVER);
3692 return (IWH_SUCCESS);
3693 }
3694
3695 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3696
3697 atomic_or_32(&sc->sc_flags, IWH_F_RUNNING);
3698
3699 return (IWH_SUCCESS);
3700 }
3701
3702 /*
3703 * invoked by GLD to stop or down NIC
3704 */
3705 static void
3706 iwh_m_stop(void *arg)
3707 {
3708 iwh_sc_t *sc;
3709 ieee80211com_t *ic;
3710
3711 if (NULL == arg) {
3712 return;
3713 }
3714 sc = (iwh_sc_t *)arg;
3715 ic = &sc->sc_ic;
3716
3717 iwh_stop(sc);
3718
3719 /*
3720 * release buffer for calibration
3721 */
3722 iwh_release_calib_buffer(sc);
3723
3724 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3725
3726 atomic_and_32(&sc->sc_flags, ~IWH_F_HW_ERR_RECOVER);
3727 atomic_and_32(&sc->sc_flags, ~IWH_F_RATE_AUTO_CTL);
3728
3729 atomic_and_32(&sc->sc_flags, ~IWH_F_RUNNING);
3730 atomic_and_32(&sc->sc_flags, ~IWH_F_SCANNING);
3731 }
3732
3733 /*
3734 * invoked by GLD to configure NIC
3735 */
3736 static int
3737 iwh_m_unicst(void *arg, const uint8_t *macaddr)
3738 {
3739 iwh_sc_t *sc;
3740 ieee80211com_t *ic;
3741 int err = IWH_SUCCESS;
3742
3743 if (NULL == arg) {
3744 return (EINVAL);
3745 }
3746 sc = (iwh_sc_t *)arg;
3747 ic = &sc->sc_ic;
3748
3749 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3750 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3751 mutex_enter(&sc->sc_glock);
3752 err = iwh_config(sc);
3753 mutex_exit(&sc->sc_glock);
3754 if (err != IWH_SUCCESS) {
3755 cmn_err(CE_WARN, "iwh_m_unicst(): "
3756 "failed to configure device\n");
3757 goto fail;
3758 }
3759 }
3760
3761 fail:
3762 return (err);
3763 }
3764
3765 /* ARGSUSED */
3766 static int
3767 iwh_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3768 {
3769 return (IWH_SUCCESS);
3770 }
3771
3772 /* ARGSUSED */
3773 static int
3774 iwh_m_promisc(void *arg, boolean_t on)
3775 {
3776 return (IWH_SUCCESS);
3777 }
3778
3779 /*
3780 * kernel thread to deal with exceptional situation
3781 */
3782 static void
3783 iwh_thread(iwh_sc_t *sc)
3784 {
3785 ieee80211com_t *ic = &sc->sc_ic;
3786 clock_t clk;
3787 int err, n = 0, timeout = 0;
3788 uint32_t tmp;
3789 #ifdef DEBUG
3790 int times = 0;
3791 #endif
3792
3793 while (sc->sc_mf_thread_switch) {
3794 tmp = IWH_READ(sc, CSR_GP_CNTRL);
3795 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3796 atomic_and_32(&sc->sc_flags, ~IWH_F_RADIO_OFF);
3797 } else {
3798 atomic_or_32(&sc->sc_flags, IWH_F_RADIO_OFF);
3799 }
3800
3801 /*
3802 * If in SUSPEND or the RF is OFF, do nothing.
3803 */
3804 if (sc->sc_flags & IWH_F_RADIO_OFF) {
3805 delay(drv_usectohz(100000));
3806 continue;
3807 }
3808
3809 /*
3810 * recovery fatal error
3811 */
3812 if (ic->ic_mach &&
3813 (sc->sc_flags & IWH_F_HW_ERR_RECOVER)) {
3814
3815 IWH_DBG((IWH_DEBUG_FW, "iwh_thread(): "
3816 "try to recover fatal hw error: %d\n", times++));
3817
3818 iwh_stop(sc);
3819
3820 if (IWH_CHK_FAST_RECOVER(sc)) {
3821 /*
3822 * save runtime configuration
3823 */
3824 bcopy(&sc->sc_config, &sc->sc_config_save,
3825 sizeof (sc->sc_config));
3826 } else {
3827 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3828 delay(drv_usectohz(2000000 + n*500000));
3829 }
3830
3831 err = iwh_init(sc);
3832 if (err != IWH_SUCCESS) {
3833 n++;
3834 if (n < 20) {
3835 continue;
3836 }
3837 }
3838
3839 n = 0;
3840 if (!err) {
3841 atomic_or_32(&sc->sc_flags, IWH_F_RUNNING);
3842 }
3843
3844
3845 if (!IWH_CHK_FAST_RECOVER(sc) ||
3846 iwh_fast_recover(sc) != IWH_SUCCESS) {
3847 atomic_and_32(&sc->sc_flags,
3848 ~IWH_F_HW_ERR_RECOVER);
3849
3850 delay(drv_usectohz(2000000));
3851 if (sc->sc_ostate != IEEE80211_S_INIT) {
3852 ieee80211_new_state(ic,
3853 IEEE80211_S_SCAN, 0);
3854 }
3855 }
3856 }
3857
3858 if (ic->ic_mach &&
3859 (sc->sc_flags & IWH_F_SCANNING) && sc->sc_scan_pending) {
3860 IWH_DBG((IWH_DEBUG_SCAN, "iwh_thread(): "
3861 "wait for probe response\n"));
3862
3863 sc->sc_scan_pending--;
3864 delay(drv_usectohz(200000));
3865 ieee80211_next_scan(ic);
3866 }
3867
3868 /*
3869 * rate ctl
3870 */
3871 if (ic->ic_mach &&
3872 (sc->sc_flags & IWH_F_RATE_AUTO_CTL)) {
3873 clk = ddi_get_lbolt();
3874 if (clk > sc->sc_clk + drv_usectohz(1000000)) {
3875 iwh_amrr_timeout(sc);
3876 }
3877 }
3878
3879 if ((ic->ic_state == IEEE80211_S_RUN) &&
3880 (ic->ic_beaconmiss++ > 100)) { /* 10 seconds */
3881 cmn_err(CE_WARN, "iwh: beacon missed for 10 seconds\n");
3882 (void) ieee80211_new_state(ic,
3883 IEEE80211_S_INIT, -1);
3884 }
3885
3886 delay(drv_usectohz(100000));
3887
3888 mutex_enter(&sc->sc_mt_lock);
3889 if (sc->sc_tx_timer) {
3890 timeout++;
3891 if (10 == timeout) {
3892 sc->sc_tx_timer--;
3893 if (0 == sc->sc_tx_timer) {
3894 atomic_or_32(&sc->sc_flags,
3895 IWH_F_HW_ERR_RECOVER);
3896 sc->sc_ostate = IEEE80211_S_RUN;
3897 IWH_DBG((IWH_DEBUG_FW, "iwh_thread(): "
3898 "try to recover from "
3899 "send fail\n"));
3900 }
3901 timeout = 0;
3902 }
3903 }
3904 mutex_exit(&sc->sc_mt_lock);
3905 }
3906
3907 mutex_enter(&sc->sc_mt_lock);
3908 sc->sc_mf_thread = NULL;
3909 cv_signal(&sc->sc_mt_cv);
3910 mutex_exit(&sc->sc_mt_lock);
3911 }
3912
3913 /*
3914 * Send a command to the ucode.
3915 */
3916 static int
3917 iwh_cmd(iwh_sc_t *sc, int code, const void *buf, int size, int async)
3918 {
3919 iwh_tx_ring_t *ring = &sc->sc_txq[IWH_CMD_QUEUE_NUM];
3920 iwh_tx_desc_t *desc;
3921 iwh_cmd_t *cmd;
3922
3923 ASSERT(size <= sizeof (cmd->data));
3924 ASSERT(mutex_owned(&sc->sc_glock));
3925
3926 IWH_DBG((IWH_DEBUG_CMD, "iwh_cmd() "
3927 "code[%d]", code));
3928 desc = ring->data[ring->cur].desc;
3929 cmd = ring->data[ring->cur].cmd;
3930
3931 cmd->hdr.type = (uint8_t)code;
3932 cmd->hdr.flags = 0;
3933 cmd->hdr.qid = ring->qid;
3934 cmd->hdr.idx = ring->cur;
3935 bcopy(buf, cmd->data, size);
3936 (void) memset(desc, 0, sizeof (*desc));
3937
3938 desc->val0 = 1 << 24;
3939 desc->pa[0].tb1_addr =
3940 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3941 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3942
3943 if (async) {
3944 sc->sc_cmd_accum++;
3945 }
3946
3947 /*
3948 * kick cmd ring XXX
3949 */
3950 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3951 tfd_offset[ring->cur].val = 8;
3952 if (ring->cur < IWH_MAX_WIN_SIZE) {
3953 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3954 tfd_offset[IWH_QUEUE_SIZE + ring->cur].val = 8;
3955 }
3956 ring->cur = (ring->cur + 1) % ring->count;
3957 IWH_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3958
3959 if (async) {
3960 return (IWH_SUCCESS);
3961 } else {
3962 clock_t clk;
3963
3964 clk = ddi_get_lbolt() + drv_usectohz(2000000);
3965 while (sc->sc_cmd_flag != SC_CMD_FLG_DONE) {
3966 if (cv_timedwait(&sc->sc_cmd_cv,
3967 &sc->sc_glock, clk) < 0) {
3968 break;
3969 }
3970 }
3971
3972 if (SC_CMD_FLG_DONE == sc->sc_cmd_flag) {
3973 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3974 return (IWH_SUCCESS);
3975 } else {
3976 sc->sc_cmd_flag = SC_CMD_FLG_NONE;
3977 return (IWH_FAIL);
3978 }
3979 }
3980 }
3981
3982 /*
3983 * require ucode seting led of NIC
3984 */
3985 static void
3986 iwh_set_led(iwh_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3987 {
3988 iwh_led_cmd_t led;
3989
3990 led.interval = LE_32(100000); /* unit: 100ms */
3991 led.id = id;
3992 led.off = off;
3993 led.on = on;
3994
3995 (void) iwh_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3996 }
3997
3998 /*
3999 * necessary setting to NIC before authentication
4000 */
4001 static int
4002 iwh_hw_set_before_auth(iwh_sc_t *sc)
4003 {
4004 ieee80211com_t *ic = &sc->sc_ic;
4005 ieee80211_node_t *in = ic->ic_bss;
4006 int err = IWH_FAIL;
4007
4008 /*
4009 * update adapter's configuration according
4010 * the info of target AP
4011 */
4012 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
4013 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
4014
4015 if (ic->ic_curmode != IEEE80211_MODE_11NG) {
4016
4017 sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0;
4018 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0;
4019 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0;
4020
4021 if (IEEE80211_MODE_11B == ic->ic_curmode) {
4022 sc->sc_config.cck_basic_rates = 0x03;
4023 sc->sc_config.ofdm_basic_rates = 0;
4024 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
4025 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
4026 sc->sc_config.cck_basic_rates = 0;
4027 sc->sc_config.ofdm_basic_rates = 0x15;
4028 } else { /* assume 802.11b/g */
4029 sc->sc_config.cck_basic_rates = 0x0f;
4030 sc->sc_config.ofdm_basic_rates = 0xff;
4031 }
4032 }
4033
4034 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
4035 RXON_FLG_SHORT_SLOT_MSK);
4036
4037 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
4038 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
4039 } else {
4040 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
4041 }
4042
4043 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
4044 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4045 } else {
4046 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
4047 }
4048
4049 IWH_DBG((IWH_DEBUG_80211, "iwh_hw_set_before_auth(): "
4050 "config chan %d flags %x "
4051 "filter_flags %x cck %x ofdm %x"
4052 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
4053 LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
4054 LE_32(sc->sc_config.filter_flags),
4055 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
4056 sc->sc_config.bssid[0], sc->sc_config.bssid[1],
4057 sc->sc_config.bssid[2], sc->sc_config.bssid[3],
4058 sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
4059
4060 err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
4061 sizeof (iwh_rxon_cmd_t), 1);
4062 if (err != IWH_SUCCESS) {
4063 cmn_err(CE_WARN, "iwh_hw_set_before_auth(): "
4064 "failed to config chan%d\n", sc->sc_config.chan);
4065 return (err);
4066 }
4067
4068 if ((sc->sc_dev_id != 0x423c) &&
4069 (sc->sc_dev_id != 0x423d)) {
4070 err = iwh_tx_power_table(sc, 1);
4071 if (err != IWH_SUCCESS) {
4072 return (err);
4073 }
4074 }
4075
4076 /*
4077 * add default AP node
4078 */
4079 err = iwh_add_ap_sta(sc);
4080 if (err != IWH_SUCCESS) {
4081 return (err);
4082 }
4083
4084 if ((sc->sc_dev_id != 0x423c) &&
4085 (sc->sc_dev_id != 0x423d)) {
4086 /*
4087 * set up retry rate table for AP node
4088 */
4089 err = iwh_ap_lq(sc);
4090 if (err != IWH_SUCCESS) {
4091 return (err);
4092 }
4093 }
4094
4095 return (err);
4096 }
4097
4098 /*
4099 * Send a scan request(assembly scan cmd) to the firmware.
4100 */
4101 static int
4102 iwh_scan(iwh_sc_t *sc)
4103 {
4104 ieee80211com_t *ic = &sc->sc_ic;
4105 iwh_tx_ring_t *ring = &sc->sc_txq[IWH_CMD_QUEUE_NUM];
4106 iwh_tx_desc_t *desc;
4107 iwh_tx_data_t *data;
4108 iwh_cmd_t *cmd;
4109 iwh_scan_hdr_t *hdr;
4110 iwh_scan_chan_t chan;
4111 struct ieee80211_frame *wh;
4112 ieee80211_node_t *in = ic->ic_bss;
4113 uint8_t essid[IEEE80211_NWID_LEN+1];
4114 struct ieee80211_rateset *rs;
4115 enum ieee80211_phymode mode;
4116 uint8_t *frm;
4117 int i, pktlen, nrates;
4118
4119 data = &ring->data[ring->cur];
4120 desc = data->desc;
4121 cmd = (iwh_cmd_t *)data->dma_data.mem_va;
4122
4123 cmd->hdr.type = REPLY_SCAN_CMD;
4124 cmd->hdr.flags = 0;
4125 cmd->hdr.qid = ring->qid;
4126 cmd->hdr.idx = ring->cur | 0x40;
4127
4128 hdr = (iwh_scan_hdr_t *)cmd->data;
4129 (void) memset(hdr, 0, sizeof (iwh_scan_hdr_t));
4130 hdr->nchan = 1;
4131 hdr->quiet_time = LE_16(50);
4132 hdr->quiet_plcp_th = LE_16(1);
4133
4134 hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK);
4135 hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
4136 (0x7 << RXON_RX_CHAIN_VALID_POS) |
4137 (0x2 << RXON_RX_CHAIN_FORCE_SEL_POS) |
4138 (0x2 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
4139
4140 hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
4141 hdr->tx_cmd.sta_id = IWH_BROADCAST_ID;
4142 hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
4143 hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwh_rate_to_plcp(2));
4144 hdr->tx_cmd.rate.r.rate_n_flags |=
4145 LE_32(RATE_MCS_ANT_B_MSK |RATE_MCS_CCK_MSK);
4146 hdr->direct_scan[0].len = ic->ic_des_esslen;
4147 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4148
4149 hdr->filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4150 RXON_FILTER_BCON_AWARE_MSK);
4151
4152 if (ic->ic_des_esslen) {
4153 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
4154 essid[ic->ic_des_esslen] = '\0';
4155 IWH_DBG((IWH_DEBUG_SCAN, "iwh_scan(): "
4156 "directed scan %s\n", essid));
4157
4158 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
4159 ic->ic_des_esslen);
4160 } else {
4161 bzero(hdr->direct_scan[0].ssid,
4162 sizeof (hdr->direct_scan[0].ssid));
4163 }
4164
4165 /*
4166 * a probe request frame is required after the REPLY_SCAN_CMD
4167 */
4168 wh = (struct ieee80211_frame *)(hdr + 1);
4169 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4170 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4171 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4172 (void) memset(wh->i_addr1, 0xff, 6);
4173 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
4174 (void) memset(wh->i_addr3, 0xff, 6);
4175 *(uint16_t *)&wh->i_dur[0] = 0;
4176 *(uint16_t *)&wh->i_seq[0] = 0;
4177
4178 frm = (uint8_t *)(wh + 1);
4179
4180 /*
4181 * essid IE
4182 */
4183 if (in->in_esslen) {
4184 bcopy(in->in_essid, essid, in->in_esslen);
4185 essid[in->in_esslen] = '\0';
4186 IWH_DBG((IWH_DEBUG_SCAN, "iwh_scan(): "
4187 "probe with ESSID %s\n",
4188 essid));
4189 }
4190 *frm++ = IEEE80211_ELEMID_SSID;
4191 *frm++ = in->in_esslen;
4192 bcopy(in->in_essid, frm, in->in_esslen);
4193 frm += in->in_esslen;
4194
4195 mode = ieee80211_chan2mode(ic, ic->ic_curchan);
4196 rs = &ic->ic_sup_rates[mode];
4197
4198 /*
4199 * supported rates IE
4200 */
4201 *frm++ = IEEE80211_ELEMID_RATES;
4202 nrates = rs->ir_nrates;
4203 if (nrates > IEEE80211_RATE_SIZE) {
4204 nrates = IEEE80211_RATE_SIZE;
4205 }
4206
4207 *frm++ = (uint8_t)nrates;
4208 bcopy(rs->ir_rates, frm, nrates);
4209 frm += nrates;
4210
4211 /*
4212 * supported xrates IE
4213 */
4214 if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
4215 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
4216 *frm++ = IEEE80211_ELEMID_XRATES;
4217 *frm++ = (uint8_t)nrates;
4218 bcopy(rs->ir_rates + IEEE80211_RATE_SIZE, frm, nrates);
4219 frm += nrates;
4220 }
4221
4222 /*
4223 * optionnal IE (usually for wpa)
4224 */
4225 if (ic->ic_opt_ie != NULL) {
4226 bcopy(ic->ic_opt_ie, frm, ic->ic_opt_ie_len);
4227 frm += ic->ic_opt_ie_len;
4228 }
4229
4230 /* setup length of probe request */
4231 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
4232 hdr->len = LE_16(hdr->nchan * sizeof (iwh_scan_chan_t) +
4233 LE_16(hdr->tx_cmd.len) + sizeof (iwh_scan_hdr_t));
4234
4235 /*
4236 * the attribute of the scan channels are required after the probe
4237 * request frame.
4238 */
4239 for (i = 1; i <= hdr->nchan; i++) {
4240 if (ic->ic_des_esslen) {
4241 chan.type = LE_32(3);
4242 } else {
4243 chan.type = LE_32(1);
4244 }
4245
4246 chan.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4247 chan.tpc.tx_gain = 0x28;
4248 chan.tpc.dsp_atten = 110;
4249 chan.active_dwell = LE_16(50);
4250 chan.passive_dwell = LE_16(120);
4251
4252 bcopy(&chan, frm, sizeof (iwh_scan_chan_t));
4253 frm += sizeof (iwh_scan_chan_t);
4254 }
4255
4256 pktlen = _PTRDIFF(frm, cmd);
4257
4258 (void) memset(desc, 0, sizeof (*desc));
4259 desc->val0 = 1 << 24;
4260 desc->pa[0].tb1_addr =
4261 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
4262 desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
4263
4264 /*
4265 * maybe for cmd, filling the byte cnt table is not necessary.
4266 * anyway, we fill it here.
4267 */
4268 sc->sc_shared->queues_byte_cnt_tbls[ring->qid]
4269 .tfd_offset[ring->cur].val = 8;
4270 if (ring->cur < IWH_MAX_WIN_SIZE) {
4271 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
4272 tfd_offset[IWH_QUEUE_SIZE + ring->cur].val = 8;
4273 }
4274
4275 /*
4276 * kick cmd ring
4277 */
4278 ring->cur = (ring->cur + 1) % ring->count;
4279 IWH_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4280
4281 return (IWH_SUCCESS);
4282 }
4283
4284 /*
4285 * configure NIC by using ucode commands after loading ucode.
4286 */
4287 static int
4288 iwh_config(iwh_sc_t *sc)
4289 {
4290 ieee80211com_t *ic = &sc->sc_ic;
4291 iwh_powertable_cmd_t powertable;
4292 iwh_bt_cmd_t bt;
4293 iwh_add_sta_t node;
4294 iwh_rem_sta_t rm_sta;
4295 const uint8_t bcast[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
4296 iwh_link_quality_cmd_t link_quality;
4297 int i, err = IWH_FAIL;
4298 uint16_t masks = 0;
4299
4300 /*
4301 * set power mode. Disable power management at present, do it later
4302 */
4303 (void) memset(&powertable, 0, sizeof (powertable));
4304 powertable.flags = LE_16(0x8);
4305 err = iwh_cmd(sc, POWER_TABLE_CMD, &powertable,
4306 sizeof (powertable), 0);
4307 if (err != IWH_SUCCESS) {
4308 cmn_err(CE_WARN, "iwh_config(): "
4309 "failed to set power mode\n");
4310 return (err);
4311 }
4312
4313 /*
4314 * configure bt coexistence
4315 */
4316 (void) memset(&bt, 0, sizeof (bt));
4317 bt.flags = 3;
4318 bt.lead_time = 0xaa;
4319 bt.max_kill = 1;
4320 err = iwh_cmd(sc, REPLY_BT_CONFIG, &bt,
4321 sizeof (bt), 0);
4322 if (err != IWH_SUCCESS) {
4323 cmn_err(CE_WARN, "iwh_config(): "
4324 "failed to configurate bt coexistence\n");
4325 return (err);
4326 }
4327
4328 /*
4329 * configure rxon
4330 */
4331 (void) memset(&sc->sc_config, 0, sizeof (iwh_rxon_cmd_t));
4332 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
4333 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
4334 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
4335 sc->sc_config.flags = LE_32(RXON_FLG_BAND_24G_MSK);
4336 sc->sc_config.flags &= LE_32(~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4337 RXON_FLG_CHANNEL_MODE_PURE_40_MSK));
4338
4339 switch (ic->ic_opmode) {
4340 case IEEE80211_M_STA:
4341 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
4342 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4343 RXON_FILTER_DIS_DECRYPT_MSK |
4344 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4345 break;
4346
4347 case IEEE80211_M_IBSS:
4348 case IEEE80211_M_AHDEMO:
4349 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
4350
4351 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
4352 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4353 RXON_FILTER_DIS_DECRYPT_MSK |
4354 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
4355 break;
4356
4357 case IEEE80211_M_HOSTAP:
4358 sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
4359 break;
4360
4361 case IEEE80211_M_MONITOR:
4362 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
4363 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
4364 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
4365 break;
4366 }
4367
4368 /*
4369 * Support all CCK rates.
4370 */
4371 sc->sc_config.cck_basic_rates = 0x0f;
4372
4373 /*
4374 * Support all OFDM rates.
4375 */
4376 sc->sc_config.ofdm_basic_rates = 0xff;
4377
4378 /*
4379 * Determine HT supported rates.
4380 */
4381 switch (sc->sc_ht_conf.rx_stream_count) {
4382 case 3:
4383 sc->sc_config.ofdm_ht_triple_stream_basic_rates = 0xff;
4384 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
4385 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
4386 break;
4387 case 2:
4388 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
4389 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
4390 break;
4391 case 1:
4392 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
4393 break;
4394 default:
4395 cmn_err(CE_WARN, "iwh_config(): "
4396 "RX stream count %d is not in suitable range\n",
4397 sc->sc_ht_conf.rx_stream_count);
4398 return (IWH_FAIL);
4399 }
4400
4401 /*
4402 * set RX chains/antennas.
4403 */
4404 iwh_config_rxon_chain(sc);
4405
4406 err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
4407 sizeof (iwh_rxon_cmd_t), 0);
4408 if (err != IWH_SUCCESS) {
4409 cmn_err(CE_WARN, "iwh_config(): "
4410 "failed to set configure command\n");
4411 return (err);
4412 }
4413
4414 /*
4415 * remove all nodes in NIC
4416 */
4417 (void) memset(&rm_sta, 0, sizeof (rm_sta));
4418 rm_sta.num_sta = 1;
4419 bcopy(bcast, rm_sta.addr, 6);
4420
4421 err = iwh_cmd(sc, REPLY_REMOVE_STA, &rm_sta, sizeof (iwh_rem_sta_t), 0);
4422 if (err != IWH_SUCCESS) {
4423 cmn_err(CE_WARN, "iwh_config(): "
4424 "failed to remove broadcast node in hardware.\n");
4425 return (err);
4426 }
4427
4428 if ((sc->sc_dev_id != 0x423c) &&
4429 (sc->sc_dev_id != 0x423d)) {
4430 /*
4431 * configure TX power table
4432 */
4433 err = iwh_tx_power_table(sc, 0);
4434 if (err != IWH_SUCCESS) {
4435 return (err);
4436 }
4437 }
4438
4439 /*
4440 * add broadcast node so that we can send broadcast frame
4441 */
4442 (void) memset(&node, 0, sizeof (node));
4443 (void) memset(node.sta.addr, 0xff, 6);
4444 node.mode = 0;
4445 node.sta.sta_id = IWH_BROADCAST_ID;
4446 node.station_flags = 0;
4447
4448 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
4449 if (err != IWH_SUCCESS) {
4450 cmn_err(CE_WARN, "iwh_config(): "
4451 "failed to add broadcast node\n");
4452 return (err);
4453 }
4454
4455 if ((sc->sc_dev_id != 0x423c) &&
4456 (sc->sc_dev_id != 0x423d)) {
4457 /*
4458 * TX_LINK_QUALITY cmd
4459 */
4460 (void) memset(&link_quality, 0, sizeof (link_quality));
4461 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4462 masks |= RATE_MCS_CCK_MSK;
4463 masks |= RATE_MCS_ANT_B_MSK;
4464 masks &= ~RATE_MCS_ANT_A_MSK;
4465 link_quality.rate_n_flags[i] =
4466 LE_32(iwh_rate_to_plcp(2) | masks);
4467 }
4468
4469 link_quality.general_params.single_stream_ant_msk = 2;
4470 link_quality.general_params.dual_stream_ant_msk = 3;
4471 link_quality.agg_params.agg_dis_start_th = 3;
4472 link_quality.agg_params.agg_time_limit = LE_16(4000);
4473 link_quality.sta_id = IWH_BROADCAST_ID;
4474 err = iwh_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
4475 sizeof (link_quality), 0);
4476 if (err != IWH_SUCCESS) {
4477 cmn_err(CE_WARN, "iwh_config(): "
4478 "failed to config link quality table\n");
4479 return (err);
4480 }
4481 }
4482
4483 return (err);
4484 }
4485
4486 /*
4487 * quiesce(9E) entry point.
4488 * This function is called when the system is single-threaded at high
4489 * PIL with preemption disabled. Therefore, this function must not be
4490 * blocked.
4491 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
4492 * DDI_FAILURE indicates an error condition and should almost never happen.
4493 */
4494 static int
4495 iwh_quiesce(dev_info_t *dip)
4496 {
4497 iwh_sc_t *sc;
4498
4499 sc = ddi_get_soft_state(iwh_soft_state_p, ddi_get_instance(dip));
4500 if (sc == NULL) {
4501 return (DDI_FAILURE);
4502 }
4503
4504 #ifdef DEBUG
4505 /*
4506 * by pass any messages, if it's quiesce
4507 */
4508 iwh_dbg_flags = 0;
4509 #endif
4510
4511 /*
4512 * No more blocking is allowed while we are in the
4513 * quiesce(9E) entry point.
4514 */
4515 atomic_or_32(&sc->sc_flags, IWH_F_QUIESCED);
4516
4517 /*
4518 * Disable and mask all interrupts.
4519 */
4520 iwh_stop(sc);
4521
4522 return (DDI_SUCCESS);
4523 }
4524
4525 static void
4526 iwh_stop_master(iwh_sc_t *sc)
4527 {
4528 uint32_t tmp;
4529 int n;
4530
4531 tmp = IWH_READ(sc, CSR_RESET);
4532 IWH_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
4533
4534 tmp = IWH_READ(sc, CSR_GP_CNTRL);
4535 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
4536 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE) {
4537 return;
4538 }
4539
4540 for (n = 0; n < 2000; n++) {
4541 if (IWH_READ(sc, CSR_RESET) &
4542 CSR_RESET_REG_FLAG_MASTER_DISABLED) {
4543 break;
4544 }
4545 DELAY(1000);
4546 }
4547
4548 #ifdef DEBUG
4549 if (2000 == n) {
4550 IWH_DBG((IWH_DEBUG_HW, "iwh_stop_master(): "
4551 "timeout waiting for master stop\n"));
4552 }
4553 #endif
4554 }
4555
4556 static int
4557 iwh_power_up(iwh_sc_t *sc)
4558 {
4559 uint32_t tmp;
4560
4561 iwh_mac_access_enter(sc);
4562 tmp = iwh_reg_read(sc, ALM_APMG_PS_CTL);
4563 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
4564 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
4565 iwh_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4566 iwh_mac_access_exit(sc);
4567
4568 DELAY(5000);
4569 return (IWH_SUCCESS);
4570 }
4571
4572 /*
4573 * hardware initialization
4574 */
4575 static int
4576 iwh_preinit(iwh_sc_t *sc)
4577 {
4578 int n;
4579 uint8_t vlink;
4580 uint16_t radio_cfg;
4581 uint32_t tmp;
4582
4583 /*
4584 * clear any pending interrupts
4585 */
4586 IWH_WRITE(sc, CSR_INT, 0xffffffff);
4587
4588 tmp = IWH_READ(sc, CSR_GIO_CHICKEN_BITS);
4589 IWH_WRITE(sc, CSR_GIO_CHICKEN_BITS,
4590 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4591
4592 tmp = IWH_READ(sc, CSR_ANA_PLL_CFG);
4593 IWH_WRITE(sc, CSR_ANA_PLL_CFG, tmp | IWH_CSR_ANA_PLL_CFG);
4594
4595 tmp = IWH_READ(sc, CSR_GP_CNTRL);
4596 IWH_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4597
4598 /*
4599 * wait for clock ready
4600 */
4601 for (n = 0; n < 1000; n++) {
4602 if (IWH_READ(sc, CSR_GP_CNTRL) &
4603 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
4604 break;
4605 }
4606 DELAY(10);
4607 }
4608
4609 if (1000 == n) {
4610 return (ETIMEDOUT);
4611 }
4612
4613 iwh_mac_access_enter(sc);
4614
4615 iwh_reg_write(sc, ALM_APMG_CLK_EN, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4616
4617 DELAY(20);
4618 tmp = iwh_reg_read(sc, ALM_APMG_PCIDEV_STT);
4619 iwh_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4620 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4621 iwh_mac_access_exit(sc);
4622
4623 radio_cfg = IWH_READ_EEP_SHORT(sc, EEP_SP_RADIO_CONFIGURATION);
4624 if (SP_RADIO_TYPE_MSK(radio_cfg) < SP_RADIO_TYPE_MAX) {
4625 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4626 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4627 tmp | SP_RADIO_TYPE_MSK(radio_cfg) |
4628 SP_RADIO_STEP_MSK(radio_cfg) |
4629 SP_RADIO_DASH_MSK(radio_cfg));
4630 } else {
4631 cmn_err(CE_WARN, "iwh_preinit(): "
4632 "radio configuration information in eeprom is wrong\n");
4633 return (IWH_FAIL);
4634 }
4635
4636
4637 IWH_WRITE(sc, CSR_INT_COALESCING, 512 / 32);
4638
4639 (void) iwh_power_up(sc);
4640
4641 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4642 tmp = ddi_get32(sc->sc_cfg_handle,
4643 (uint32_t *)(sc->sc_cfg_base + 0xe8));
4644 ddi_put32(sc->sc_cfg_handle,
4645 (uint32_t *)(sc->sc_cfg_base + 0xe8),
4646 tmp & ~(1 << 11));
4647 }
4648
4649 vlink = ddi_get8(sc->sc_cfg_handle,
4650 (uint8_t *)(sc->sc_cfg_base + 0xf0));
4651 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4652 vlink & ~2);
4653
4654 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4655 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4656 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
4657 IWH_WRITE(sc, CSR_SW_VER, tmp);
4658
4659 /*
4660 * make sure power supply on each part of the hardware
4661 */
4662 iwh_mac_access_enter(sc);
4663 tmp = iwh_reg_read(sc, ALM_APMG_PS_CTL);
4664 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4665 iwh_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4666 DELAY(5);
4667
4668 tmp = iwh_reg_read(sc, ALM_APMG_PS_CTL);
4669 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4670 iwh_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4671 iwh_mac_access_exit(sc);
4672
4673 return (IWH_SUCCESS);
4674 }
4675
4676 /*
4677 * set up semphore flag to own EEPROM
4678 */
4679 static int
4680 iwh_eep_sem_down(iwh_sc_t *sc)
4681 {
4682 int count1, count2;
4683 uint32_t tmp;
4684
4685 for (count1 = 0; count1 < 1000; count1++) {
4686 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4687 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4688 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4689
4690 for (count2 = 0; count2 < 2; count2++) {
4691 if (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
4692 CSR_HW_IF_CONFIG_REG_EEP_SEM) {
4693 return (IWH_SUCCESS);
4694 }
4695 DELAY(10000);
4696 }
4697 }
4698
4699 return (IWH_FAIL);
4700 }
4701
4702 /*
4703 * reset semphore flag to release EEPROM
4704 */
4705 static void
4706 iwh_eep_sem_up(iwh_sc_t *sc)
4707 {
4708 uint32_t tmp;
4709
4710 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
4711 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4712 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4713 }
4714
4715 /*
4716 * This function read all infomation from eeprom
4717 */
4718 static int
4719 iwh_eep_load(iwh_sc_t *sc)
4720 {
4721 int i, rr;
4722 uint32_t rv, tmp, eep_gp;
4723 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4724 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4725
4726 /*
4727 * read eeprom gp register in CSR
4728 */
4729 eep_gp = IWH_READ(sc, CSR_EEPROM_GP);
4730 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4731 CSR_EEPROM_GP_BAD_SIGNATURE) {
4732 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_eep_load(): "
4733 "not find eeprom\n"));
4734 return (IWH_FAIL);
4735 }
4736
4737 rr = iwh_eep_sem_down(sc);
4738 if (rr != 0) {
4739 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_eep_load(): "
4740 "driver failed to own EEPROM\n"));
4741 return (IWH_FAIL);
4742 }
4743
4744 for (addr = 0; addr < eep_sz; addr += 2) {
4745 IWH_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4746 tmp = IWH_READ(sc, CSR_EEPROM_REG);
4747 IWH_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4748
4749 for (i = 0; i < 10; i++) {
4750 rv = IWH_READ(sc, CSR_EEPROM_REG);
4751 if (rv & 1) {
4752 break;
4753 }
4754 DELAY(10);
4755 }
4756
4757 if (!(rv & 1)) {
4758 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_eep_load(): "
4759 "time out when read eeprome\n"));
4760 iwh_eep_sem_up(sc);
4761 return (IWH_FAIL);
4762 }
4763
4764 eep_p[addr/2] = LE_16(rv >> 16);
4765 }
4766
4767 iwh_eep_sem_up(sc);
4768 return (IWH_SUCCESS);
4769 }
4770
4771 /*
4772 * initialize mac address in ieee80211com_t struct
4773 */
4774 static void
4775 iwh_get_mac_from_eep(iwh_sc_t *sc)
4776 {
4777 ieee80211com_t *ic = &sc->sc_ic;
4778
4779 IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->sc_eep_map[EEP_MAC_ADDRESS]);
4780
4781 IWH_DBG((IWH_DEBUG_EEPROM, "iwh_get_mac_from_eep(): "
4782 "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4783 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4784 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4785 }
4786
4787 /*
4788 * main initialization function
4789 */
4790 static int
4791 iwh_init(iwh_sc_t *sc)
4792 {
4793 int err = IWH_FAIL;
4794 clock_t clk;
4795
4796 /*
4797 * release buffer for calibration
4798 */
4799 iwh_release_calib_buffer(sc);
4800
4801 mutex_enter(&sc->sc_glock);
4802 atomic_and_32(&sc->sc_flags, ~IWH_F_FW_INIT);
4803
4804 err = iwh_init_common(sc);
4805 if (err != IWH_SUCCESS) {
4806 mutex_exit(&sc->sc_glock);
4807 return (IWH_FAIL);
4808 }
4809
4810 /*
4811 * backup ucode data part for future use.
4812 */
4813 bcopy(sc->sc_dma_fw_data.mem_va,
4814 sc->sc_dma_fw_data_bak.mem_va,
4815 sc->sc_dma_fw_data.alength);
4816
4817 /* load firmware init segment into NIC */
4818 err = iwh_load_init_firmware(sc);
4819 if (err != IWH_SUCCESS) {
4820 cmn_err(CE_WARN, "iwh_init(): "
4821 "failed to setup init firmware\n");
4822 mutex_exit(&sc->sc_glock);
4823 return (IWH_FAIL);
4824 }
4825
4826 /*
4827 * now press "execute" start running
4828 */
4829 IWH_WRITE(sc, CSR_RESET, 0);
4830
4831 clk = ddi_get_lbolt() + drv_usectohz(1000000);
4832 while (!(sc->sc_flags & IWH_F_FW_INIT)) {
4833 if (cv_timedwait(&sc->sc_ucode_cv,
4834 &sc->sc_glock, clk) < 0) {
4835 break;
4836 }
4837 }
4838
4839 if (!(sc->sc_flags & IWH_F_FW_INIT)) {
4840 cmn_err(CE_WARN, "iwh_init(): "
4841 "failed to process init alive.\n");
4842 mutex_exit(&sc->sc_glock);
4843 return (IWH_FAIL);
4844 }
4845
4846 mutex_exit(&sc->sc_glock);
4847
4848 /*
4849 * stop chipset for initializing chipset again
4850 */
4851 iwh_stop(sc);
4852
4853 mutex_enter(&sc->sc_glock);
4854 atomic_and_32(&sc->sc_flags, ~IWH_F_FW_INIT);
4855
4856 err = iwh_init_common(sc);
4857 if (err != IWH_SUCCESS) {
4858 mutex_exit(&sc->sc_glock);
4859 return (IWH_FAIL);
4860 }
4861
4862 /*
4863 * load firmware run segment into NIC
4864 */
4865 err = iwh_load_run_firmware(sc);
4866 if (err != IWH_SUCCESS) {
4867 cmn_err(CE_WARN, "iwh_init(): "
4868 "failed to setup run firmware\n");
4869 mutex_exit(&sc->sc_glock);
4870 return (IWH_FAIL);
4871 }
4872
4873 /*
4874 * now press "execute" start running
4875 */
4876 IWH_WRITE(sc, CSR_RESET, 0);
4877
4878 clk = ddi_get_lbolt() + drv_usectohz(1000000);
4879 while (!(sc->sc_flags & IWH_F_FW_INIT)) {
4880 if (cv_timedwait(&sc->sc_ucode_cv,
4881 &sc->sc_glock, clk) < 0) {
4882 break;
4883 }
4884 }
4885
4886 if (!(sc->sc_flags & IWH_F_FW_INIT)) {
4887 cmn_err(CE_WARN, "iwh_init(): "
4888 "failed to process runtime alive.\n");
4889 mutex_exit(&sc->sc_glock);
4890 return (IWH_FAIL);
4891 }
4892
4893 mutex_exit(&sc->sc_glock);
4894
4895 DELAY(1000);
4896
4897 mutex_enter(&sc->sc_glock);
4898 atomic_and_32(&sc->sc_flags, ~IWH_F_FW_INIT);
4899
4900 /*
4901 * at this point, the firmware is loaded OK, then config the hardware
4902 * with the ucode API, including rxon, txpower, etc.
4903 */
4904 err = iwh_config(sc);
4905 if (err) {
4906 cmn_err(CE_WARN, "iwh_init(): "
4907 "failed to configure device\n");
4908 mutex_exit(&sc->sc_glock);
4909 return (IWH_FAIL);
4910 }
4911
4912 /*
4913 * at this point, hardware may receive beacons :)
4914 */
4915 mutex_exit(&sc->sc_glock);
4916 return (IWH_SUCCESS);
4917 }
4918
4919 /*
4920 * stop or disable NIC
4921 */
4922 static void
4923 iwh_stop(iwh_sc_t *sc)
4924 {
4925 uint32_t tmp;
4926 int i;
4927
4928 /*
4929 * by pass if it's quiesced
4930 */
4931 if (!(sc->sc_flags & IWH_F_QUIESCED)) {
4932 mutex_enter(&sc->sc_glock);
4933 }
4934
4935 IWH_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4936 /*
4937 * disable interrupts
4938 */
4939 IWH_WRITE(sc, CSR_INT_MASK, 0);
4940 IWH_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4941 IWH_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4942
4943 /*
4944 * reset all Tx rings
4945 */
4946 for (i = 0; i < IWH_NUM_QUEUES; i++) {
4947 iwh_reset_tx_ring(sc, &sc->sc_txq[i]);
4948 }
4949
4950 /*
4951 * reset Rx ring
4952 */
4953 iwh_reset_rx_ring(sc);
4954
4955 iwh_mac_access_enter(sc);
4956 iwh_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4957 iwh_mac_access_exit(sc);
4958
4959 DELAY(5);
4960
4961 iwh_stop_master(sc);
4962
4963 mutex_enter(&sc->sc_mt_lock);
4964 sc->sc_tx_timer = 0;
4965 mutex_exit(&sc->sc_mt_lock);
4966
4967 tmp = IWH_READ(sc, CSR_RESET);
4968 IWH_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4969
4970 /*
4971 * by pass if it's quiesced
4972 */
4973 if (!(sc->sc_flags & IWH_F_QUIESCED)) {
4974 mutex_exit(&sc->sc_glock);
4975 }
4976 }
4977
4978 /*
4979 * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4980 * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4981 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4982 * INRIA Sophia - Projet Planete
4983 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4984 */
4985 #define is_success(amrr) \
4986 ((amrr)->retrycnt < (amrr)->txcnt / 10)
4987 #define is_failure(amrr) \
4988 ((amrr)->retrycnt > (amrr)->txcnt / 3)
4989 #define is_enough(amrr) \
4990 ((amrr)->txcnt > 200)
4991 #define not_very_few(amrr) \
4992 ((amrr)->txcnt > 40)
4993 #define is_min_rate(in) \
4994 (0 == (in)->in_txrate)
4995 #define is_max_rate(in) \
4996 ((in)->in_rates.ir_nrates - 1 == (in)->in_txrate)
4997 #define increase_rate(in) \
4998 ((in)->in_txrate++)
4999 #define decrease_rate(in) \
5000 ((in)->in_txrate--)
5001 #define reset_cnt(amrr) \
5002 { (amrr)->txcnt = (amrr)->retrycnt = 0; }
5003
5004 #define IWH_AMRR_MIN_SUCCESS_THRESHOLD 1
5005 #define IWH_AMRR_MAX_SUCCESS_THRESHOLD 15
5006
5007 static void
5008 iwh_amrr_init(iwh_amrr_t *amrr)
5009 {
5010 amrr->success = 0;
5011 amrr->recovery = 0;
5012 amrr->txcnt = amrr->retrycnt = 0;
5013 amrr->success_threshold = IWH_AMRR_MIN_SUCCESS_THRESHOLD;
5014 amrr->ht_mcs_idx = 0; /* 6Mbps */
5015 }
5016
5017 static void
5018 iwh_amrr_timeout(iwh_sc_t *sc)
5019 {
5020 ieee80211com_t *ic = &sc->sc_ic;
5021
5022 IWH_DBG((IWH_DEBUG_RATECTL, "iwh_amrr_timeout(): "
5023 "enter\n"));
5024
5025 if (IEEE80211_M_STA == ic->ic_opmode) {
5026 iwh_amrr_ratectl(NULL, ic->ic_bss);
5027 } else {
5028 ieee80211_iterate_nodes(&ic->ic_sta, iwh_amrr_ratectl, NULL);
5029 }
5030
5031 sc->sc_clk = ddi_get_lbolt();
5032 }
5033
5034 static int
5035 iwh_is_max_rate(ieee80211_node_t *in)
5036 {
5037 int i;
5038 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5039 uint8_t r = (uint8_t)amrr->ht_mcs_idx;
5040 ieee80211com_t *ic = in->in_ic;
5041 iwh_sc_t *sc = (iwh_sc_t *)ic;
5042
5043 if (in->in_flags & IEEE80211_NODE_HT) {
5044 for (i = in->in_htrates.rs_nrates - 1; i >= 0; i--) {
5045 r = in->in_htrates.rs_rates[i] &
5046 IEEE80211_RATE_VAL;
5047 if (sc->sc_ht_conf.tx_support_mcs[r/8] &
5048 (1 << (r%8))) {
5049 break;
5050 }
5051 }
5052
5053 return (r == (uint8_t)amrr->ht_mcs_idx);
5054 } else {
5055 return (is_max_rate(in));
5056 }
5057 }
5058
5059 static int
5060 iwh_is_min_rate(ieee80211_node_t *in)
5061 {
5062 int i;
5063 uint8_t r = 0;
5064 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5065 ieee80211com_t *ic = in->in_ic;
5066 iwh_sc_t *sc = (iwh_sc_t *)ic;
5067
5068 if (in->in_flags & IEEE80211_NODE_HT) {
5069 for (i = 0; i < in->in_htrates.rs_nrates; i++) {
5070 r = in->in_htrates.rs_rates[i] &
5071 IEEE80211_RATE_VAL;
5072 if (sc->sc_ht_conf.tx_support_mcs[r/8] &
5073 (1 << (r%8))) {
5074 break;
5075 }
5076 }
5077
5078 return (r == (uint8_t)amrr->ht_mcs_idx);
5079 } else {
5080 return (is_min_rate(in));
5081 }
5082 }
5083
5084 static void
5085 iwh_increase_rate(ieee80211_node_t *in)
5086 {
5087 int i;
5088 uint8_t r;
5089 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5090 ieee80211com_t *ic = in->in_ic;
5091 iwh_sc_t *sc = (iwh_sc_t *)ic;
5092
5093 if (in->in_flags & IEEE80211_NODE_HT) {
5094 again:
5095 amrr->ht_mcs_idx++;
5096
5097 for (i = 0; i < in->in_htrates.rs_nrates; i++) {
5098 r = in->in_htrates.rs_rates[i] &
5099 IEEE80211_RATE_VAL;
5100 if ((r == (uint8_t)amrr->ht_mcs_idx) &&
5101 (sc->sc_ht_conf.tx_support_mcs[r/8] &
5102 (1 << (r%8)))) {
5103 break;
5104 }
5105 }
5106
5107 if (i >= in->in_htrates.rs_nrates) {
5108 goto again;
5109 }
5110 } else {
5111 increase_rate(in);
5112 }
5113 }
5114
5115 static void
5116 iwh_decrease_rate(ieee80211_node_t *in)
5117 {
5118 int i;
5119 uint8_t r;
5120 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5121 ieee80211com_t *ic = in->in_ic;
5122 iwh_sc_t *sc = (iwh_sc_t *)ic;
5123
5124 if (in->in_flags & IEEE80211_NODE_HT) {
5125 again:
5126 amrr->ht_mcs_idx--;
5127
5128 for (i = 0; i < in->in_htrates.rs_nrates; i++) {
5129 r = in->in_htrates.rs_rates[i] &
5130 IEEE80211_RATE_VAL;
5131 if ((r == (uint8_t)amrr->ht_mcs_idx) &&
5132 (sc->sc_ht_conf.tx_support_mcs[r/8] &
5133 (1 << (r%8)))) {
5134 break;
5135 }
5136 }
5137
5138 if (i >= in->in_htrates.rs_nrates) {
5139 goto again;
5140 }
5141 } else {
5142 decrease_rate(in);
5143 }
5144 }
5145
5146 /* ARGSUSED */
5147 static void
5148 iwh_amrr_ratectl(void *arg, ieee80211_node_t *in)
5149 {
5150 iwh_amrr_t *amrr = (iwh_amrr_t *)in;
5151 int need_change = 0;
5152
5153 if (is_success(amrr) && is_enough(amrr)) {
5154 amrr->success++;
5155 if (amrr->success >= amrr->success_threshold &&
5156 !iwh_is_max_rate(in)) {
5157 amrr->recovery = 1;
5158 amrr->success = 0;
5159 iwh_increase_rate(in);
5160 IWH_DBG((IWH_DEBUG_RATECTL, "iwh_amrr_ratectl(): "
5161 "AMRR increasing rate %d "
5162 "(txcnt=%d retrycnt=%d), mcs_idx=%d\n",
5163 in->in_txrate, amrr->txcnt,
5164 amrr->retrycnt, amrr->ht_mcs_idx));
5165 need_change = 1;
5166 } else {
5167 amrr->recovery = 0;
5168 }
5169 } else if (not_very_few(amrr) && is_failure(amrr)) {
5170 amrr->success = 0;
5171 if (!iwh_is_min_rate(in)) {
5172 if (amrr->recovery) {
5173 amrr->success_threshold++;
5174 if (amrr->success_threshold >
5175 IWH_AMRR_MAX_SUCCESS_THRESHOLD) {
5176 amrr->success_threshold =
5177 IWH_AMRR_MAX_SUCCESS_THRESHOLD;
5178 }
5179 } else {
5180 amrr->success_threshold =
5181 IWH_AMRR_MIN_SUCCESS_THRESHOLD;
5182 }
5183 iwh_decrease_rate(in);
5184 IWH_DBG((IWH_DEBUG_RATECTL, "iwh_amrr_ratectl(): "
5185 "AMRR decreasing rate %d "
5186 "(txcnt=%d retrycnt=%d), mcs_idx=%d\n",
5187 in->in_txrate, amrr->txcnt,
5188 amrr->retrycnt, amrr->ht_mcs_idx));
5189 need_change = 1;
5190 }
5191 amrr->recovery = 0; /* paper is incorrect */
5192 }
5193
5194 if (is_enough(amrr) || need_change) {
5195 reset_cnt(amrr);
5196 }
5197 }
5198
5199 /*
5200 * translate indirect address in eeprom to direct address
5201 * in eeprom and return address of entry whos indirect address
5202 * is indi_addr
5203 */
5204 static uint8_t *
5205 iwh_eep_addr_trans(iwh_sc_t *sc, uint32_t indi_addr)
5206 {
5207 uint32_t di_addr;
5208 uint16_t temp;
5209
5210 if (!(indi_addr & INDIRECT_ADDRESS)) {
5211 di_addr = indi_addr;
5212 return (&sc->sc_eep_map[di_addr]);
5213 }
5214
5215 switch (indi_addr & INDIRECT_TYPE_MSK) {
5216 case INDIRECT_GENERAL:
5217 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_GENERAL);
5218 break;
5219
5220 case INDIRECT_HOST:
5221 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_HOST);
5222 break;
5223
5224 case INDIRECT_REGULATORY:
5225 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_REGULATORY);
5226 break;
5227
5228 case INDIRECT_CALIBRATION:
5229 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_CALIBRATION);
5230 break;
5231
5232 case INDIRECT_PROCESS_ADJST:
5233 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_PROCESS_ADJST);
5234 break;
5235
5236 case INDIRECT_OTHERS:
5237 temp = IWH_READ_EEP_SHORT(sc, EEP_LINK_OTHERS);
5238 break;
5239
5240 default:
5241 temp = 0;
5242 cmn_err(CE_WARN, "iwh_eep_addr_trans(): "
5243 "incorrect indirect eeprom address.\n");
5244 break;
5245 }
5246
5247 di_addr = (indi_addr & ADDRESS_MSK) + (temp << 1);
5248
5249 return (&sc->sc_eep_map[di_addr]);
5250 }
5251
5252 /*
5253 * loade a section of ucode into NIC
5254 */
5255 static int
5256 iwh_put_seg_fw(iwh_sc_t *sc, uint32_t addr_s, uint32_t addr_d, uint32_t len)
5257 {
5258
5259 iwh_mac_access_enter(sc);
5260
5261 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(IWH_FH_SRVC_CHNL),
5262 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
5263
5264 IWH_WRITE(sc, IWH_FH_SRVC_CHNL_SRAM_ADDR_REG(IWH_FH_SRVC_CHNL), addr_d);
5265
5266 IWH_WRITE(sc, IWH_FH_TFDIB_CTRL0_REG(IWH_FH_SRVC_CHNL),
5267 (addr_s & FH_MEM_TFDIB_DRAM_ADDR_LSB_MASK));
5268
5269 IWH_WRITE(sc, IWH_FH_TFDIB_CTRL1_REG(IWH_FH_SRVC_CHNL), len);
5270
5271 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_BUF_STS_REG(IWH_FH_SRVC_CHNL),
5272 (1 << IWH_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
5273 (1 << IWH_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
5274 IWH_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
5275
5276 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(IWH_FH_SRVC_CHNL),
5277 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5278 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL |
5279 IWH_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
5280
5281 iwh_mac_access_exit(sc);
5282
5283 return (IWH_SUCCESS);
5284 }
5285
5286 /*
5287 * necessary setting during alive notification
5288 */
5289 static int
5290 iwh_alive_common(iwh_sc_t *sc)
5291 {
5292 uint32_t base;
5293 uint32_t i;
5294 iwh_wimax_coex_cmd_t w_cmd;
5295 iwh_calibration_crystal_cmd_t c_cmd;
5296 uint32_t rv = IWH_FAIL;
5297
5298 /*
5299 * initialize SCD related registers to make TX work.
5300 */
5301 iwh_mac_access_enter(sc);
5302
5303 /*
5304 * read sram address of data base.
5305 */
5306 sc->sc_scd_base = iwh_reg_read(sc, IWH_SCD_SRAM_BASE_ADDR);
5307
5308 for (base = sc->sc_scd_base + IWH_SCD_CONTEXT_DATA_OFFSET;
5309 base < sc->sc_scd_base + IWH_SCD_TX_STTS_BITMAP_OFFSET;
5310 base += 4) {
5311 iwh_mem_write(sc, base, 0);
5312 }
5313
5314 for (; base < sc->sc_scd_base + IWH_SCD_TRANSLATE_TBL_OFFSET;
5315 base += 4) {
5316 iwh_mem_write(sc, base, 0);
5317 }
5318
5319 for (i = 0; i < sizeof (uint16_t) * IWH_NUM_QUEUES; i += 4) {
5320 iwh_mem_write(sc, base + i, 0);
5321 }
5322
5323 iwh_reg_write(sc, IWH_SCD_DRAM_BASE_ADDR,
5324 sc->sc_dma_sh.cookie.dmac_address >> 10);
5325
5326 iwh_reg_write(sc, IWH_SCD_QUEUECHAIN_SEL,
5327 IWH_SCD_QUEUECHAIN_SEL_ALL(IWH_NUM_QUEUES));
5328
5329 iwh_reg_write(sc, IWH_SCD_AGGR_SEL, 0);
5330
5331 for (i = 0; i < IWH_NUM_QUEUES; i++) {
5332 iwh_reg_write(sc, IWH_SCD_QUEUE_RDPTR(i), 0);
5333 IWH_WRITE(sc, HBUS_TARG_WRPTR, 0 | (i << 8));
5334 iwh_mem_write(sc, sc->sc_scd_base +
5335 IWH_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
5336 iwh_mem_write(sc, sc->sc_scd_base +
5337 IWH_SCD_CONTEXT_QUEUE_OFFSET(i) +
5338 sizeof (uint32_t),
5339 ((SCD_WIN_SIZE << IWH_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
5340 IWH_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
5341 ((SCD_FRAME_LIMIT <<
5342 IWH_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
5343 IWH_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
5344 }
5345
5346 iwh_reg_write(sc, IWH_SCD_INTERRUPT_MASK, (1 << IWH_NUM_QUEUES) - 1);
5347
5348 iwh_reg_write(sc, (IWH_SCD_BASE + 0x10),
5349 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
5350
5351 IWH_WRITE(sc, HBUS_TARG_WRPTR, (IWH_CMD_QUEUE_NUM << 8));
5352 iwh_reg_write(sc, IWH_SCD_QUEUE_RDPTR(IWH_CMD_QUEUE_NUM), 0);
5353
5354 /*
5355 * queue 0-7 map to FIFO 0-7 and
5356 * all queues work under FIFO mode(none-scheduler_ack)
5357 */
5358 for (i = 0; i < 4; i++) {
5359 iwh_reg_write(sc, IWH_SCD_QUEUE_STATUS_BITS(i),
5360 (1 << IWH_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
5361 ((3-i) << IWH_SCD_QUEUE_STTS_REG_POS_TXF) |
5362 (1 << IWH_SCD_QUEUE_STTS_REG_POS_WSL) |
5363 IWH_SCD_QUEUE_STTS_REG_MSK);
5364 }
5365
5366 iwh_reg_write(sc, IWH_SCD_QUEUE_STATUS_BITS(IWH_CMD_QUEUE_NUM),
5367 (1 << IWH_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
5368 (IWH_CMD_FIFO_NUM << IWH_SCD_QUEUE_STTS_REG_POS_TXF) |
5369 (1 << IWH_SCD_QUEUE_STTS_REG_POS_WSL) |
5370 IWH_SCD_QUEUE_STTS_REG_MSK);
5371
5372 for (i = 5; i < 7; i++) {
5373 iwh_reg_write(sc, IWH_SCD_QUEUE_STATUS_BITS(i),
5374 (1 << IWH_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
5375 (i << IWH_SCD_QUEUE_STTS_REG_POS_TXF) |
5376 (1 << IWH_SCD_QUEUE_STTS_REG_POS_WSL) |
5377 IWH_SCD_QUEUE_STTS_REG_MSK);
5378 }
5379
5380 iwh_mac_access_exit(sc);
5381
5382 (void) memset(&w_cmd, 0, sizeof (w_cmd));
5383
5384 rv = iwh_cmd(sc, COEX_PRIORITY_TABLE_CMD, &w_cmd, sizeof (w_cmd), 1);
5385 if (rv != IWH_SUCCESS) {
5386 cmn_err(CE_WARN, "iwh_alive_common(): "
5387 "failed to send wimax coexist command.\n");
5388 return (rv);
5389 }
5390
5391 if ((sc->sc_dev_id != 0x423c) &&
5392 (sc->sc_dev_id != 0x423d)) {
5393 (void) memset(&c_cmd, 0, sizeof (c_cmd));
5394
5395 c_cmd.opCode = PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
5396 c_cmd.data.cap_pin1 = LE_16(sc->sc_eep_calib->xtal_calib[0]);
5397 c_cmd.data.cap_pin2 = LE_16(sc->sc_eep_calib->xtal_calib[1]);
5398
5399 rv = iwh_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
5400 &c_cmd, sizeof (c_cmd), 1);
5401 if (rv != IWH_SUCCESS) {
5402 cmn_err(CE_WARN, "iwh_alive_common(): "
5403 "failed to send crystal"
5404 "frq calibration command.\n");
5405 return (rv);
5406 }
5407
5408 /*
5409 * make sure crystal frequency calibration ready
5410 * before next operations.
5411 */
5412 DELAY(1000);
5413 }
5414
5415 return (IWH_SUCCESS);
5416 }
5417
5418 /*
5419 * save results of calibration from ucode
5420 */
5421 static void
5422 iwh_save_calib_result(iwh_sc_t *sc, iwh_rx_desc_t *desc)
5423 {
5424 struct iwh_calib_results *res_p = &sc->sc_calib_results;
5425 struct iwh_calib_hdr *calib_hdr = (struct iwh_calib_hdr *)(desc + 1);
5426 int len = LE_32(desc->len);
5427
5428 /*
5429 * ensure the size of buffer is not too big
5430 */
5431 len = (len & FH_RSCSR_FRAME_SIZE_MASK) - 4;
5432
5433 switch (calib_hdr->op_code) {
5434 case PHY_CALIBRATE_LO_CMD:
5435 if (NULL == res_p->lo_res) {
5436 res_p->lo_res = kmem_alloc(len, KM_NOSLEEP);
5437 }
5438
5439 if (NULL == res_p->lo_res) {
5440 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5441 "failed to allocate memory.\n");
5442 return;
5443 }
5444
5445 res_p->lo_res_len = len;
5446 bcopy(calib_hdr, res_p->lo_res, len);
5447 break;
5448
5449 case PHY_CALIBRATE_TX_IQ_CMD:
5450 if (NULL == res_p->tx_iq_res) {
5451 res_p->tx_iq_res = kmem_alloc(len, KM_NOSLEEP);
5452 }
5453
5454 if (NULL == res_p->tx_iq_res) {
5455 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5456 "failed to allocate memory.\n");
5457 return;
5458 }
5459
5460 res_p->tx_iq_res_len = len;
5461 bcopy(calib_hdr, res_p->tx_iq_res, len);
5462 break;
5463
5464 case PHY_CALIBRATE_TX_IQ_PERD_CMD:
5465 if (NULL == res_p->tx_iq_perd_res) {
5466 res_p->tx_iq_perd_res = kmem_alloc(len, KM_NOSLEEP);
5467 }
5468
5469 if (NULL == res_p->tx_iq_perd_res) {
5470 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5471 "failed to allocate memory.\n");
5472 return;
5473 }
5474
5475 res_p->tx_iq_perd_res_len = len;
5476 bcopy(calib_hdr, res_p->tx_iq_perd_res, len);
5477 break;
5478
5479 case PHY_CALIBRATE_DC_CMD:
5480 if (NULL == res_p->dc_res) {
5481 res_p->dc_res = kmem_alloc(len, KM_NOSLEEP);
5482 }
5483
5484 if (NULL == res_p->dc_res) {
5485 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5486 "failed to allocate memory.\n");
5487 return;
5488 }
5489
5490 res_p->dc_res_len = len;
5491 bcopy(calib_hdr, res_p->dc_res, len);
5492 break;
5493
5494 case PHY_CALIBRATE_BASE_BAND_CMD:
5495 if (NULL == res_p->base_band_res) {
5496 res_p->base_band_res = kmem_alloc(len, KM_NOSLEEP);
5497 }
5498
5499 if (NULL == res_p->base_band_res) {
5500 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5501 "failed to allocate memory.\n");
5502 return;
5503 }
5504
5505 res_p->base_band_res_len = len;
5506 bcopy(calib_hdr, res_p->base_band_res, len);
5507 break;
5508
5509 default:
5510 cmn_err(CE_WARN, "iwh_save_calib_result(): "
5511 "incorrect calibration type(%d).\n", calib_hdr->op_code);
5512 break;
5513 }
5514
5515 }
5516
5517 /*
5518 * configure TX pwoer table
5519 */
5520 static int
5521 iwh_tx_power_table(iwh_sc_t *sc, int async)
5522 {
5523 iwh_tx_power_table_cmd_t txpower;
5524 int i, err = IWH_FAIL;
5525
5526 (void) memset(&txpower, 0, sizeof (txpower));
5527
5528 txpower.band = 1; /* for 2.4G */
5529 txpower.channel = (uint8_t)LE_16(sc->sc_config.chan);
5530 txpower.pa_measurements = 1;
5531 txpower.max_mcs = 23;
5532
5533 for (i = 0; i < 24; i++) {
5534 txpower.db.ht_ofdm_power[i].s.radio_tx_gain[0] = 0x16;
5535 txpower.db.ht_ofdm_power[i].s.radio_tx_gain[1] = 0x16;
5536 txpower.db.ht_ofdm_power[i].s.radio_tx_gain[2] = 0x16;
5537 txpower.db.ht_ofdm_power[i].s.dsp_predis_atten[0] = 0x6E;
5538 txpower.db.ht_ofdm_power[i].s.dsp_predis_atten[1] = 0x6E;
5539 txpower.db.ht_ofdm_power[i].s.dsp_predis_atten[2] = 0x6E;
5540 }
5541
5542 for (i = 0; i < 2; i++) {
5543 txpower.db.cck_power[i].s.radio_tx_gain[0] = 0x16;
5544 txpower.db.cck_power[i].s.radio_tx_gain[1] = 0x16;
5545 txpower.db.cck_power[i].s.radio_tx_gain[2] = 0x16;
5546 txpower.db.cck_power[i].s.dsp_predis_atten[0] = 0x6E;
5547 txpower.db.cck_power[i].s.dsp_predis_atten[1] = 0x6E;
5548 txpower.db.cck_power[i].s.dsp_predis_atten[2] = 0x6E;
5549 }
5550
5551 err = iwh_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
5552 sizeof (txpower), async);
5553 if (err != IWH_SUCCESS) {
5554 cmn_err(CE_WARN, "iwh_tx_power_table(): "
5555 "failed to set tx power table.\n");
5556 return (err);
5557 }
5558
5559 return (err);
5560 }
5561
5562 static void
5563 iwh_release_calib_buffer(iwh_sc_t *sc)
5564 {
5565 if (sc->sc_calib_results.lo_res != NULL) {
5566 kmem_free(sc->sc_calib_results.lo_res,
5567 sc->sc_calib_results.lo_res_len);
5568 sc->sc_calib_results.lo_res = NULL;
5569 }
5570
5571 if (sc->sc_calib_results.tx_iq_res != NULL) {
5572 kmem_free(sc->sc_calib_results.tx_iq_res,
5573 sc->sc_calib_results.tx_iq_res_len);
5574 sc->sc_calib_results.tx_iq_res = NULL;
5575 }
5576
5577 if (sc->sc_calib_results.tx_iq_perd_res != NULL) {
5578 kmem_free(sc->sc_calib_results.tx_iq_perd_res,
5579 sc->sc_calib_results.tx_iq_perd_res_len);
5580 sc->sc_calib_results.tx_iq_perd_res = NULL;
5581 }
5582
5583 if (sc->sc_calib_results.dc_res != NULL) {
5584 kmem_free(sc->sc_calib_results.dc_res,
5585 sc->sc_calib_results.dc_res_len);
5586 sc->sc_calib_results.dc_res = NULL;
5587 }
5588
5589 if (sc->sc_calib_results.base_band_res != NULL) {
5590 kmem_free(sc->sc_calib_results.base_band_res,
5591 sc->sc_calib_results.base_band_res_len);
5592 sc->sc_calib_results.base_band_res = NULL;
5593 }
5594 }
5595
5596 /*
5597 * common section of intialization
5598 */
5599 static int
5600 iwh_init_common(iwh_sc_t *sc)
5601 {
5602 int32_t qid;
5603 uint32_t tmp;
5604
5605 if (iwh_reset_hw(sc) != IWH_SUCCESS) {
5606 cmn_err(CE_WARN, "iwh_init_common(): "
5607 "failed to reset hardware\n");
5608 return (IWH_FAIL);
5609 }
5610
5611 (void) iwh_preinit(sc);
5612
5613 tmp = IWH_READ(sc, CSR_GP_CNTRL);
5614 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
5615 cmn_err(CE_NOTE, "iwh_init_common(): "
5616 "radio transmitter is off\n");
5617 return (IWH_FAIL);
5618 }
5619
5620 /*
5621 * init Rx ring
5622 */
5623 iwh_mac_access_enter(sc);
5624 IWH_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
5625
5626 IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
5627 IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
5628 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
5629
5630 IWH_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
5631 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
5632 offsetof(struct iwh_shared, val0)) >> 4));
5633
5634 IWH_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
5635 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
5636 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
5637 IWH_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K |
5638 (RX_QUEUE_SIZE_LOG <<
5639 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
5640 iwh_mac_access_exit(sc);
5641 IWH_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
5642 (RX_QUEUE_SIZE - 1) & ~0x7);
5643
5644 /*
5645 * init Tx rings
5646 */
5647 iwh_mac_access_enter(sc);
5648 iwh_reg_write(sc, IWH_SCD_TXFACT, 0);
5649
5650 /*
5651 * keep warm page
5652 */
5653 IWH_WRITE(sc, IWH_FH_KW_MEM_ADDR_REG,
5654 sc->sc_dma_kw.cookie.dmac_address >> 4);
5655
5656 for (qid = 0; qid < IWH_NUM_QUEUES; qid++) {
5657 IWH_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
5658 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
5659 IWH_WRITE(sc, IWH_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
5660 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
5661 IWH_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
5662 }
5663
5664 iwh_mac_access_exit(sc);
5665
5666 /*
5667 * clear "radio off" and "disable command" bits
5668 */
5669 IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5670 IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
5671 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5672
5673 /*
5674 * clear any pending interrupts
5675 */
5676 IWH_WRITE(sc, CSR_INT, 0xffffffff);
5677
5678 /*
5679 * enable interrupts
5680 */
5681 IWH_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
5682
5683 IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5684 IWH_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5685
5686 return (IWH_SUCCESS);
5687 }
5688
5689 static int
5690 iwh_fast_recover(iwh_sc_t *sc)
5691 {
5692 ieee80211com_t *ic = &sc->sc_ic;
5693 int err = IWH_FAIL;
5694
5695 mutex_enter(&sc->sc_glock);
5696
5697 /*
5698 * restore runtime configuration
5699 */
5700 bcopy(&sc->sc_config_save, &sc->sc_config,
5701 sizeof (sc->sc_config));
5702
5703 sc->sc_config.assoc_id = 0;
5704 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
5705
5706 if ((err = iwh_hw_set_before_auth(sc)) != IWH_SUCCESS) {
5707 cmn_err(CE_WARN, "iwh_fast_recover(): "
5708 "could not setup authentication\n");
5709 mutex_exit(&sc->sc_glock);
5710 return (err);
5711 }
5712
5713 bcopy(&sc->sc_config_save, &sc->sc_config,
5714 sizeof (sc->sc_config));
5715
5716 /*
5717 * update adapter's configuration
5718 */
5719 err = iwh_run_state_config(sc);
5720 if (err != IWH_SUCCESS) {
5721 cmn_err(CE_WARN, "iwh_fast_recover(): "
5722 "failed to setup association\n");
5723 mutex_exit(&sc->sc_glock);
5724 return (err);
5725 }
5726
5727 /*
5728 * set LED on
5729 */
5730 iwh_set_led(sc, 2, 0, 1);
5731
5732 mutex_exit(&sc->sc_glock);
5733
5734 atomic_and_32(&sc->sc_flags, ~IWH_F_HW_ERR_RECOVER);
5735
5736 /*
5737 * start queue
5738 */
5739 IWH_DBG((IWH_DEBUG_FW, "iwh_fast_recover(): "
5740 "resume xmit\n"));
5741 mac_tx_update(ic->ic_mach);
5742
5743 return (IWH_SUCCESS);
5744 }
5745
5746 static int
5747 iwh_run_state_config(iwh_sc_t *sc)
5748 {
5749 struct ieee80211com *ic = &sc->sc_ic;
5750 ieee80211_node_t *in = ic->ic_bss;
5751 uint32_t ht_protec = (uint32_t)(-1);
5752 int err = IWH_FAIL;
5753
5754 /*
5755 * update adapter's configuration
5756 */
5757 sc->sc_config.assoc_id = in->in_associd & 0x3fff;
5758
5759 /*
5760 * short preamble/slot time are
5761 * negotiated when associating
5762 */
5763 sc->sc_config.flags &=
5764 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
5765 RXON_FLG_SHORT_SLOT_MSK);
5766
5767 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
5768 sc->sc_config.flags |=
5769 LE_32(RXON_FLG_SHORT_SLOT_MSK);
5770 }
5771
5772 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
5773 sc->sc_config.flags |=
5774 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
5775 }
5776
5777 if (in->in_flags & IEEE80211_NODE_HT) {
5778 ht_protec = in->in_htopmode;
5779 if (ht_protec > 3) {
5780 cmn_err(CE_WARN, "iwh_run_state_config(): "
5781 "HT protection mode is not correct.\n");
5782 return (IWH_FAIL);
5783 } else if (NO_HT_PROT == ht_protec) {
5784 ht_protec = sc->sc_ht_conf.ht_protection;
5785 }
5786
5787 sc->sc_config.flags |=
5788 LE_32(ht_protec << RXON_FLG_HT_OPERATING_MODE_POS);
5789 }
5790
5791 /*
5792 * set RX chains/antennas.
5793 */
5794 iwh_config_rxon_chain(sc);
5795
5796 sc->sc_config.filter_flags |=
5797 LE_32(RXON_FILTER_ASSOC_MSK);
5798
5799 if (ic->ic_opmode != IEEE80211_M_STA) {
5800 sc->sc_config.filter_flags |=
5801 LE_32(RXON_FILTER_BCON_AWARE_MSK);
5802 }
5803
5804 IWH_DBG((IWH_DEBUG_80211, "iwh_run_state_config(): "
5805 "config chan %d flags %x"
5806 " filter_flags %x\n",
5807 sc->sc_config.chan, sc->sc_config.flags,
5808 sc->sc_config.filter_flags));
5809
5810 err = iwh_cmd(sc, REPLY_RXON, &sc->sc_config,
5811 sizeof (iwh_rxon_cmd_t), 1);
5812 if (err != IWH_SUCCESS) {
5813 cmn_err(CE_WARN, "iwh_run_state_config(): "
5814 "could not update configuration\n");
5815 return (err);
5816 }
5817
5818 if ((sc->sc_dev_id != 0x423c) &&
5819 (sc->sc_dev_id != 0x423d)) {
5820 /*
5821 * send tx power table command
5822 */
5823 err = iwh_tx_power_table(sc, 1);
5824 if (err != IWH_SUCCESS) {
5825 return (err);
5826 }
5827 }
5828
5829 /*
5830 * Not need to update retry rate table for AP node
5831 */
5832 err = iwh_qosparam_to_hw(sc, 1);
5833 if (err != IWH_SUCCESS) {
5834 return (err);
5835 }
5836
5837 return (err);
5838 }
5839
5840 /*
5841 * This function is only for compatibility with Net80211 module.
5842 * iwh_qosparam_to_hw() is the actual function updating EDCA
5843 * parameters to hardware.
5844 */
5845 /* ARGSUSED */
5846 static int
5847 iwh_wme_update(ieee80211com_t *ic)
5848 {
5849 return (0);
5850 }
5851
5852 static int
5853 iwh_wme_to_qos_ac(int wme_ac)
5854 {
5855 int qos_ac = QOS_AC_INVALID;
5856
5857 if (wme_ac < WME_AC_BE || wme_ac > WME_AC_VO) {
5858 cmn_err(CE_WARN, "iwh_wme_to_qos_ac(): "
5859 "WME AC index is not in suitable range.\n");
5860 return (qos_ac);
5861 }
5862
5863 switch (wme_ac) {
5864 case WME_AC_BE:
5865 qos_ac = QOS_AC_BK;
5866 break;
5867 case WME_AC_BK:
5868 qos_ac = QOS_AC_BE;
5869 break;
5870 case WME_AC_VI:
5871 qos_ac = QOS_AC_VI;
5872 break;
5873 case WME_AC_VO:
5874 qos_ac = QOS_AC_VO;
5875 break;
5876 }
5877
5878 return (qos_ac);
5879 }
5880
5881 static uint16_t
5882 iwh_cw_e_to_cw(uint8_t cw_e)
5883 {
5884 uint16_t cw = 1;
5885
5886 while (cw_e > 0) {
5887 cw <<= 1;
5888 cw_e--;
5889 }
5890
5891 cw -= 1;
5892 return (cw);
5893 }
5894
5895 static int
5896 iwh_wmeparam_check(struct wmeParams *wmeparam)
5897 {
5898 int i;
5899
5900 for (i = 0; i < WME_NUM_AC; i++) {
5901
5902 if ((wmeparam[i].wmep_logcwmax > QOS_CW_RANGE_MAX) ||
5903 (wmeparam[i].wmep_logcwmin >= wmeparam[i].wmep_logcwmax)) {
5904 cmn_err(CE_WARN, "iwh_wmeparam_check(): "
5905 "Contention window is not in suitable range.\n");
5906 return (IWH_FAIL);
5907 }
5908
5909 if ((wmeparam[i].wmep_aifsn < QOS_AIFSN_MIN) ||
5910 (wmeparam[i].wmep_aifsn > QOS_AIFSN_MAX)) {
5911 cmn_err(CE_WARN, "iwh_wmeparam_check(): "
5912 "Arbitration interframe space number"
5913 "is not in suitable range.\n");
5914 return (IWH_FAIL);
5915 }
5916 }
5917
5918 return (IWH_SUCCESS);
5919 }
5920
5921 /*
5922 * This function updates EDCA parameters into hardware.
5923 * FIFO0-background, FIFO1-best effort, FIFO2-viedo, FIFO3-voice.
5924 */
5925 static int
5926 iwh_qosparam_to_hw(iwh_sc_t *sc, int async)
5927 {
5928 ieee80211com_t *ic = &sc->sc_ic;
5929 ieee80211_node_t *in = ic->ic_bss;
5930 struct wmeParams *wmeparam;
5931 iwh_qos_param_cmd_t qosparam_cmd;
5932 int i, j;
5933 int err = IWH_FAIL;
5934
5935 if ((in->in_flags & IEEE80211_NODE_QOS) &&
5936 (IEEE80211_M_STA == ic->ic_opmode)) {
5937 wmeparam = ic->ic_wme.wme_chanParams.cap_wmeParams;
5938 } else {
5939 return (IWH_SUCCESS);
5940 }
5941
5942 (void) memset(&qosparam_cmd, 0, sizeof (qosparam_cmd));
5943
5944 err = iwh_wmeparam_check(wmeparam);
5945 if (err != IWH_SUCCESS) {
5946 return (err);
5947 }
5948
5949 if (in->in_flags & IEEE80211_NODE_QOS) {
5950 qosparam_cmd.flags |= QOS_PARAM_FLG_UPDATE_EDCA;
5951 }
5952
5953 if (in->in_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)) {
5954 qosparam_cmd.flags |= QOS_PARAM_FLG_TGN;
5955 }
5956
5957 for (i = 0; i < WME_NUM_AC; i++) {
5958
5959 j = iwh_wme_to_qos_ac(i);
5960 if (j < QOS_AC_BK || j > QOS_AC_VO) {
5961 return (IWH_FAIL);
5962 }
5963
5964 qosparam_cmd.ac[j].cw_min =
5965 iwh_cw_e_to_cw(wmeparam[i].wmep_logcwmin);
5966 qosparam_cmd.ac[j].cw_max =
5967 iwh_cw_e_to_cw(wmeparam[i].wmep_logcwmax);
5968 qosparam_cmd.ac[j].aifsn =
5969 wmeparam[i].wmep_aifsn;
5970 qosparam_cmd.ac[j].txop =
5971 (uint16_t)(wmeparam[i].wmep_txopLimit * 32);
5972 }
5973
5974 err = iwh_cmd(sc, REPLY_QOS_PARAM, &qosparam_cmd,
5975 sizeof (qosparam_cmd), async);
5976 if (err != IWH_SUCCESS) {
5977 cmn_err(CE_WARN, "iwh_qosparam_to_hw(): "
5978 "failed to update QoS parameters into hardware.\n");
5979 return (err);
5980 }
5981
5982 #ifdef DEBUG
5983 IWH_DBG((IWH_DEBUG_QOS, "iwh_qosparam_to_hw(): "
5984 "EDCA parameters are as follows:\n"));
5985
5986 IWH_DBG((IWH_DEBUG_QOS, "BK parameters are: "
5987 "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
5988 qosparam_cmd.ac[0].cw_min, qosparam_cmd.ac[0].cw_max,
5989 qosparam_cmd.ac[0].aifsn, qosparam_cmd.ac[0].txop));
5990
5991 IWH_DBG((IWH_DEBUG_QOS, "BE parameters are: "
5992 "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
5993 qosparam_cmd.ac[1].cw_min, qosparam_cmd.ac[1].cw_max,
5994 qosparam_cmd.ac[1].aifsn, qosparam_cmd.ac[1].txop));
5995
5996 IWH_DBG((IWH_DEBUG_QOS, "VI parameters are: "
5997 "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
5998 qosparam_cmd.ac[2].cw_min, qosparam_cmd.ac[2].cw_max,
5999 qosparam_cmd.ac[2].aifsn, qosparam_cmd.ac[2].txop));
6000
6001 IWH_DBG((IWH_DEBUG_QOS, "VO parameters are: "
6002 "cw_min = %d, cw_max = %d, aifsn = %d, txop = %d\n",
6003 qosparam_cmd.ac[3].cw_min, qosparam_cmd.ac[3].cw_max,
6004 qosparam_cmd.ac[3].aifsn, qosparam_cmd.ac[3].txop));
6005 #endif
6006 return (err);
6007 }
6008
6009 static inline int
6010 iwh_wme_tid_qos_ac(int tid)
6011 {
6012 switch (tid) {
6013 case 1:
6014 case 2:
6015 return (QOS_AC_BK);
6016 case 0:
6017 case 3:
6018 return (QOS_AC_BE);
6019 case 4:
6020 case 5:
6021 return (QOS_AC_VI);
6022 case 6:
6023 case 7:
6024 return (QOS_AC_VO);
6025 }
6026
6027 return (QOS_AC_BE);
6028 }
6029
6030 static inline int
6031 iwh_qos_ac_to_txq(int qos_ac)
6032 {
6033 switch (qos_ac) {
6034 case QOS_AC_BK:
6035 return (QOS_AC_BK_TO_TXQ);
6036 case QOS_AC_BE:
6037 return (QOS_AC_BE_TO_TXQ);
6038 case QOS_AC_VI:
6039 return (QOS_AC_VI_TO_TXQ);
6040 case QOS_AC_VO:
6041 return (QOS_AC_VO_TO_TXQ);
6042 }
6043
6044 return (QOS_AC_BE_TO_TXQ);
6045 }
6046
6047 static int
6048 iwh_wme_tid_to_txq(int tid)
6049 {
6050 int queue_n = TXQ_FOR_AC_INVALID;
6051 int qos_ac;
6052
6053 if (tid < WME_TID_MIN ||
6054 tid > WME_TID_MAX) {
6055 cmn_err(CE_WARN, "wme_tid_to_txq(): "
6056 "TID is not in suitable range.\n");
6057 return (queue_n);
6058 }
6059
6060 qos_ac = iwh_wme_tid_qos_ac(tid);
6061 queue_n = iwh_qos_ac_to_txq(qos_ac);
6062
6063 return (queue_n);
6064 }
6065
6066 /*
6067 * This function is used for intializing HT relevant configurations.
6068 */
6069 static void
6070 iwh_init_ht_conf(iwh_sc_t *sc)
6071 {
6072 (void) memset(&sc->sc_ht_conf, 0, sizeof (iwh_ht_conf_t));
6073
6074 if ((0x4235 == sc->sc_dev_id) ||
6075 (0x4236 == sc->sc_dev_id) ||
6076 (0x423a == sc->sc_dev_id)) {
6077 sc->sc_ht_conf.ht_support = 1;
6078
6079 sc->sc_ht_conf.valid_chains = 3;
6080 sc->sc_ht_conf.tx_stream_count = 2;
6081 sc->sc_ht_conf.rx_stream_count = 2;
6082
6083 sc->sc_ht_conf.tx_support_mcs[0] = 0xff;
6084 sc->sc_ht_conf.tx_support_mcs[1] = 0xff;
6085 sc->sc_ht_conf.rx_support_mcs[0] = 0xff;
6086 sc->sc_ht_conf.rx_support_mcs[1] = 0xff;
6087 } else {
6088 sc->sc_ht_conf.ht_support = 1;
6089
6090 sc->sc_ht_conf.valid_chains = 2;
6091 sc->sc_ht_conf.tx_stream_count = 1;
6092 sc->sc_ht_conf.rx_stream_count = 2;
6093
6094 sc->sc_ht_conf.tx_support_mcs[0] = 0xff;
6095 sc->sc_ht_conf.rx_support_mcs[0] = 0xff;
6096 sc->sc_ht_conf.rx_support_mcs[1] = 0xff;
6097 }
6098
6099 if (sc->sc_ht_conf.ht_support) {
6100 sc->sc_ht_conf.cap |= HT_CAP_GRN_FLD;
6101 sc->sc_ht_conf.cap |= HT_CAP_SGI_20;
6102 sc->sc_ht_conf.cap |= HT_CAP_MAX_AMSDU;
6103 /* should disable MIMO */
6104 sc->sc_ht_conf.cap |= HT_CAP_MIMO_PS;
6105
6106 sc->sc_ht_conf.ampdu_p.factor = HT_RX_AMPDU_FACTOR;
6107 sc->sc_ht_conf.ampdu_p.density = HT_MPDU_DENSITY;
6108
6109 sc->sc_ht_conf.ht_protection = HT_PROT_CHAN_NON_HT;
6110 }
6111 }
6112
6113 /*
6114 * This function overwrites default ieee80211_rateset_11n struc.
6115 */
6116 static void
6117 iwh_overwrite_11n_rateset(iwh_sc_t *sc)
6118 {
6119 uint8_t *ht_rs = sc->sc_ht_conf.rx_support_mcs;
6120 int mcs_idx, mcs_count = 0;
6121 int i, j;
6122
6123 for (i = 0; i < HT_RATESET_NUM; i++) {
6124 for (j = 0; j < 8; j++) {
6125 if (ht_rs[i] & (1 << j)) {
6126 mcs_idx = i * 8 + j;
6127 if (mcs_idx >= IEEE80211_HTRATE_MAXSIZE) {
6128 break;
6129 }
6130
6131 ieee80211_rateset_11n.rs_rates[mcs_idx] =
6132 (uint8_t)mcs_idx;
6133 mcs_count++;
6134 }
6135 }
6136 }
6137
6138 ieee80211_rateset_11n.rs_nrates = (uint8_t)mcs_count;
6139
6140 #ifdef DEBUG
6141 IWH_DBG((IWH_DEBUG_HTRATE, "iwh_overwrite_11n_rateset(): "
6142 "HT rates supported by this station is as follows:\n"));
6143
6144 for (i = 0; i < ieee80211_rateset_11n.rs_nrates; i++) {
6145 IWH_DBG((IWH_DEBUG_HTRATE, "Rate %d is %d\n",
6146 i, ieee80211_rateset_11n.rs_rates[i]));
6147 }
6148 #endif
6149 }
6150
6151 /*
6152 * This function overwrites default configurations of
6153 * ieee80211com structure in Net80211 module.
6154 */
6155 static void
6156 iwh_overwrite_ic_default(iwh_sc_t *sc)
6157 {
6158 ieee80211com_t *ic = &sc->sc_ic;
6159
6160 sc->sc_newstate = ic->ic_newstate;
6161 ic->ic_newstate = iwh_newstate;
6162 ic->ic_node_alloc = iwh_node_alloc;
6163 ic->ic_node_free = iwh_node_free;
6164
6165 if (sc->sc_ht_conf.ht_support) {
6166 sc->sc_recv_action = ic->ic_recv_action;
6167 ic->ic_recv_action = iwh_recv_action;
6168 sc->sc_send_action = ic->ic_send_action;
6169 ic->ic_send_action = iwh_send_action;
6170
6171 ic->ic_ampdu_rxmax = sc->sc_ht_conf.ampdu_p.factor;
6172 ic->ic_ampdu_density = sc->sc_ht_conf.ampdu_p.density;
6173 ic->ic_ampdu_limit = ic->ic_ampdu_rxmax;
6174 }
6175 }
6176
6177 /*
6178 * This function sets "RX chain selection" feild
6179 * in RXON command during plumb driver.
6180 */
6181 static void
6182 iwh_config_rxon_chain(iwh_sc_t *sc)
6183 {
6184 ieee80211com_t *ic = &sc->sc_ic;
6185 ieee80211_node_t *in = ic->ic_bss;
6186
6187 if (3 == sc->sc_ht_conf.valid_chains) {
6188 sc->sc_config.rx_chain = LE_16((RXON_RX_CHAIN_A_MSK |
6189 RXON_RX_CHAIN_B_MSK | RXON_RX_CHAIN_C_MSK) <<
6190 RXON_RX_CHAIN_VALID_POS);
6191
6192 sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6193 RXON_RX_CHAIN_B_MSK | RXON_RX_CHAIN_C_MSK) <<
6194 RXON_RX_CHAIN_FORCE_SEL_POS);
6195
6196 sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6197 RXON_RX_CHAIN_B_MSK | RXON_RX_CHAIN_C_MSK) <<
6198 RXON_RX_CHAIN_FORCE_MIMO_SEL_POS);
6199 } else {
6200 sc->sc_config.rx_chain = LE_16((RXON_RX_CHAIN_A_MSK |
6201 RXON_RX_CHAIN_B_MSK) << RXON_RX_CHAIN_VALID_POS);
6202
6203 sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6204 RXON_RX_CHAIN_B_MSK) << RXON_RX_CHAIN_FORCE_SEL_POS);
6205
6206 sc->sc_config.rx_chain |= LE_16((RXON_RX_CHAIN_A_MSK |
6207 RXON_RX_CHAIN_B_MSK) <<
6208 RXON_RX_CHAIN_FORCE_MIMO_SEL_POS);
6209 }
6210
6211 sc->sc_config.rx_chain |= LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK);
6212
6213 if ((in != NULL) &&
6214 (in->in_flags & IEEE80211_NODE_HT) &&
6215 sc->sc_ht_conf.ht_support) {
6216 if (3 == sc->sc_ht_conf.valid_chains) {
6217 sc->sc_config.rx_chain |= LE_16(3 <<
6218 RXON_RX_CHAIN_CNT_POS);
6219 sc->sc_config.rx_chain |= LE_16(3 <<
6220 RXON_RX_CHAIN_MIMO_CNT_POS);
6221 } else {
6222 sc->sc_config.rx_chain |= LE_16(2 <<
6223 RXON_RX_CHAIN_CNT_POS);
6224 sc->sc_config.rx_chain |= LE_16(2 <<
6225 RXON_RX_CHAIN_MIMO_CNT_POS);
6226 }
6227
6228 sc->sc_config.rx_chain |= LE_16(1 <<
6229 RXON_RX_CHAIN_MIMO_FORCE_POS);
6230 }
6231
6232 IWH_DBG((IWH_DEBUG_RXON, "iwh_config_rxon_chain(): "
6233 "rxon->rx_chain = %x\n", sc->sc_config.rx_chain));
6234 }
6235
6236 /*
6237 * This function adds AP station into hardware.
6238 */
6239 static int
6240 iwh_add_ap_sta(iwh_sc_t *sc)
6241 {
6242 ieee80211com_t *ic = &sc->sc_ic;
6243 ieee80211_node_t *in = ic->ic_bss;
6244 iwh_add_sta_t node;
6245 uint32_t ampdu_factor, ampdu_density;
6246 int err = IWH_FAIL;
6247
6248 /*
6249 * Add AP node into hardware.
6250 */
6251 (void) memset(&node, 0, sizeof (node));
6252 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6253 node.mode = STA_MODE_ADD_MSK;
6254 node.sta.sta_id = IWH_AP_ID;
6255
6256 if (sc->sc_ht_conf.ht_support &&
6257 (in->in_htcap_ie != NULL) &&
6258 (in->in_htcap != 0) &&
6259 (in->in_htparam != 0)) {
6260
6261 if (((in->in_htcap & HT_CAP_MIMO_PS) >> 2)
6262 == HT_CAP_MIMO_PS_DYNAMIC) {
6263 node.station_flags |= LE_32(STA_FLG_RTS_MIMO_PROT);
6264 }
6265
6266 ampdu_factor = in->in_htparam & HT_RX_AMPDU_FACTOR_MSK;
6267 node.station_flags |=
6268 LE_32(ampdu_factor << STA_FLG_MAX_AMPDU_POS);
6269
6270 ampdu_density = (in->in_htparam & HT_MPDU_DENSITY_MSK) >>
6271 HT_MPDU_DENSITY_POS;
6272 node.station_flags |=
6273 LE_32(ampdu_density << STA_FLG_AMPDU_DENSITY_POS);
6274
6275 if (in->in_htcap & LE_16(HT_CAP_SUP_WIDTH)) {
6276 node.station_flags |=
6277 LE_32(STA_FLG_FAT_EN);
6278 }
6279 }
6280
6281 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6282 if (err != IWH_SUCCESS) {
6283 cmn_err(CE_WARN, "iwh_add_ap_lq(): "
6284 "failed to add AP node\n");
6285 return (err);
6286 }
6287
6288 return (err);
6289 }
6290
6291 /*
6292 * Each station in the Shirley Peak's internal station table has
6293 * its own table of 16 TX rates and modulation modes for retrying
6294 * TX when an ACK is not received. This function replaces the entire
6295 * table for one station.Station must already be in Shirley Peak's
6296 * station talbe.
6297 */
6298 static int
6299 iwh_ap_lq(iwh_sc_t *sc)
6300 {
6301 ieee80211com_t *ic = &sc->sc_ic;
6302 ieee80211_node_t *in = ic->ic_bss;
6303 iwh_link_quality_cmd_t link_quality;
6304 const struct ieee80211_rateset *rs_sup = NULL;
6305 uint32_t masks = 0, rate;
6306 int i, err = IWH_FAIL;
6307
6308 /*
6309 * TX_LINK_QUALITY cmd
6310 */
6311 (void) memset(&link_quality, 0, sizeof (link_quality));
6312 if (in->in_chan == IEEE80211_CHAN_ANYC) /* skip null node */
6313 return (err);
6314 rs_sup = ieee80211_get_suprates(ic, in->in_chan);
6315
6316 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6317 if (i < rs_sup->ir_nrates) {
6318 rate = rs_sup->ir_rates[rs_sup->ir_nrates - i] &
6319 IEEE80211_RATE_VAL;
6320 } else {
6321 rate = 2;
6322 }
6323
6324 if (2 == rate || 4 == rate ||
6325 11 == rate || 22 == rate) {
6326 masks |= LE_32(RATE_MCS_CCK_MSK);
6327 }
6328
6329 masks |= LE_32(RATE_MCS_ANT_B_MSK);
6330
6331 link_quality.rate_n_flags[i] =
6332 LE_32(iwh_rate_to_plcp(rate) | masks);
6333 }
6334
6335 link_quality.general_params.single_stream_ant_msk = LINK_QUAL_ANT_B_MSK;
6336 link_quality.general_params.dual_stream_ant_msk = LINK_QUAL_ANT_MSK;
6337 link_quality.agg_params.agg_dis_start_th = 3;
6338 link_quality.agg_params.agg_time_limit = LE_16(4000);
6339 link_quality.sta_id = IWH_AP_ID;
6340 err = iwh_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
6341 sizeof (link_quality), 1);
6342 if (err != IWH_SUCCESS) {
6343 cmn_err(CE_WARN, "iwh_ap_lq(): "
6344 "failed to config link quality table\n");
6345 return (err);
6346 }
6347
6348 #ifdef DEBUG
6349 IWH_DBG((IWH_DEBUG_HWRATE, "iwh_ap_lq(): "
6350 "Rates in HW are as follows:\n"));
6351
6352 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6353 IWH_DBG((IWH_DEBUG_HWRATE,
6354 "Rate %d in HW is %x\n", i, link_quality.rate_n_flags[i]));
6355 }
6356 #endif
6357
6358 return (err);
6359 }
6360
6361 /*
6362 * When block ACK agreement has been set up between station and AP,
6363 * Net80211 module will call this function to inform hardware about
6364 * informations of this BA agreement.
6365 * When AP wants to delete BA agreement that was originated by it,
6366 * Net80211 modele will call this function to clean up relevant
6367 * information in hardware.
6368 */
6369 static void
6370 iwh_recv_action(struct ieee80211_node *in,
6371 const uint8_t *frm, const uint8_t *efrm)
6372 {
6373 struct ieee80211com *ic;
6374 iwh_sc_t *sc;
6375 const struct ieee80211_action *ia;
6376 uint16_t baparamset, baseqctl;
6377 uint32_t tid, ssn;
6378 iwh_add_sta_t node;
6379 int err = IWH_FAIL;
6380
6381 if ((NULL == in) || (NULL == frm)) {
6382 return;
6383 }
6384
6385 ic = in->in_ic;
6386 if (NULL == ic) {
6387 return;
6388 }
6389
6390 sc = (iwh_sc_t *)ic;
6391
6392 sc->sc_recv_action(in, frm, efrm);
6393
6394 ia = (const struct ieee80211_action *)frm;
6395 if (ia->ia_category != IEEE80211_ACTION_CAT_BA) {
6396 return;
6397 }
6398
6399 switch (ia->ia_action) {
6400 case IEEE80211_ACTION_BA_ADDBA_REQUEST:
6401 baparamset = *(uint16_t *)(frm + 3);
6402 baseqctl = *(uint16_t *)(frm + 7);
6403
6404 tid = MS(baparamset, IEEE80211_BAPS_TID);
6405 ssn = MS(baseqctl, IEEE80211_BASEQ_START);
6406
6407 (void) memset(&node, 0, sizeof (node));
6408 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6409 node.mode = STA_MODE_MODIFY_MSK;
6410 node.sta.sta_id = IWH_AP_ID;
6411
6412 node.station_flags_msk = 0;
6413 node.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
6414 node.add_immediate_ba_tid = (uint8_t)tid;
6415 node.add_immediate_ba_ssn = LE_16(ssn);
6416
6417 mutex_enter(&sc->sc_glock);
6418 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6419 if (err != IWH_SUCCESS) {
6420 cmn_err(CE_WARN, "iwh_recv_action(): "
6421 "failed to setup RX block ACK\n");
6422 mutex_exit(&sc->sc_glock);
6423 return;
6424 }
6425 mutex_exit(&sc->sc_glock);
6426
6427 IWH_DBG((IWH_DEBUG_BA, "iwh_recv_action(): "
6428 "RX block ACK "
6429 "was setup on TID %d and SSN is %d.\n", tid, ssn));
6430
6431 return;
6432
6433 case IEEE80211_ACTION_BA_DELBA:
6434 baparamset = *(uint16_t *)(frm + 2);
6435
6436 if ((baparamset & IEEE80211_DELBAPS_INIT) == 0) {
6437 return;
6438 }
6439
6440 tid = MS(baparamset, IEEE80211_DELBAPS_TID);
6441
6442 (void) memset(&node, 0, sizeof (node));
6443 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6444 node.mode = STA_MODE_MODIFY_MSK;
6445 node.sta.sta_id = IWH_AP_ID;
6446
6447 node.station_flags_msk = 0;
6448 node.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
6449 node.add_immediate_ba_tid = (uint8_t)tid;
6450
6451 mutex_enter(&sc->sc_glock);
6452 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6453 if (err != IWH_SUCCESS) {
6454 cmn_err(CE_WARN, "iwh_recv_action(): "
6455 "failed to delete RX block ACK\n");
6456 mutex_exit(&sc->sc_glock);
6457 return;
6458 }
6459 mutex_exit(&sc->sc_glock);
6460
6461 IWH_DBG((IWH_DEBUG_BA, "iwh_recv_action(): "
6462 "RX block ACK "
6463 "was deleted on TID %d.\n", tid));
6464
6465 return;
6466 }
6467 }
6468
6469 /*
6470 * When local station wants to delete BA agreement that was originated by AP,
6471 * Net80211 module will call this function to clean up relevant information
6472 * in hardware.
6473 */
6474 static int
6475 iwh_send_action(struct ieee80211_node *in,
6476 int category, int action, uint16_t args[4])
6477 {
6478 struct ieee80211com *ic;
6479 iwh_sc_t *sc;
6480 uint32_t tid;
6481 iwh_add_sta_t node;
6482 int ret = EIO;
6483 int err = IWH_FAIL;
6484
6485
6486 if (NULL == in) {
6487 return (ret);
6488 }
6489
6490 ic = in->in_ic;
6491 if (NULL == ic) {
6492 return (ret);
6493 }
6494
6495 sc = (iwh_sc_t *)ic;
6496
6497 ret = sc->sc_send_action(in, category, action, args);
6498
6499 if (category != IEEE80211_ACTION_CAT_BA) {
6500 return (ret);
6501 }
6502
6503 switch (action) {
6504 case IEEE80211_ACTION_BA_DELBA:
6505 if (IEEE80211_DELBAPS_INIT == args[1]) {
6506 return (ret);
6507 }
6508
6509 tid = args[0];
6510
6511 (void) memset(&node, 0, sizeof (node));
6512 IEEE80211_ADDR_COPY(node.sta.addr, in->in_bssid);
6513 node.mode = STA_MODE_MODIFY_MSK;
6514 node.sta.sta_id = IWH_AP_ID;
6515
6516 node.station_flags_msk = 0;
6517 node.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
6518 node.add_immediate_ba_tid = (uint8_t)tid;
6519
6520 mutex_enter(&sc->sc_glock);
6521 err = iwh_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
6522 if (err != IWH_SUCCESS) {
6523 cmn_err(CE_WARN, "iwh_send_action(): "
6524 "failed to delete RX balock ACK\n");
6525 mutex_exit(&sc->sc_glock);
6526 return (EIO);
6527 }
6528 mutex_exit(&sc->sc_glock);
6529
6530 IWH_DBG((IWH_DEBUG_BA, "iwh_send_action(): "
6531 "RX block ACK "
6532 "was deleted on TID %d.\n", tid));
6533
6534 break;
6535 }
6536
6537 return (ret);
6538 }
6539
6540 static int
6541 iwh_reset_hw(iwh_sc_t *sc)
6542 {
6543 uint32_t tmp;
6544 int n;
6545
6546 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
6547 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
6548 tmp | CSR_HW_IF_CONFIG_REG_BITS_NIC_READY);
6549
6550 /*
6551 * wait for HW ready
6552 */
6553 for (n = 0; n < 5; n++) {
6554 if (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
6555 CSR_HW_IF_CONFIG_REG_BITS_NIC_READY) {
6556 break;
6557 }
6558 DELAY(10);
6559 }
6560
6561 if (n != 5) {
6562 return (IWH_SUCCESS);
6563 }
6564
6565 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
6566 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
6567 tmp | CSR_HW_IF_CONFIG_REG_BITS_PREPARE);
6568
6569 for (n = 0; n < 15000; n++) {
6570 if (0 == (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
6571 CSR_HW_IF_CONFIG_REG_BITS_NIC_PREPARE_DONE)) {
6572 break;
6573 }
6574 DELAY(10);
6575 }
6576
6577 if (15000 == n) {
6578 return (ETIMEDOUT);
6579 }
6580
6581 tmp = IWH_READ(sc, CSR_HW_IF_CONFIG_REG);
6582 IWH_WRITE(sc, CSR_HW_IF_CONFIG_REG,
6583 tmp | CSR_HW_IF_CONFIG_REG_BITS_NIC_READY);
6584
6585 /*
6586 * wait for HW ready
6587 */
6588 for (n = 0; n < 5; n++) {
6589 if (IWH_READ(sc, CSR_HW_IF_CONFIG_REG) &
6590 CSR_HW_IF_CONFIG_REG_BITS_NIC_READY) {
6591 break;
6592 }
6593 DELAY(10);
6594 }
6595
6596 if (n != 5) {
6597 return (IWH_SUCCESS);
6598 } else {
6599 return (ETIMEDOUT);
6600 }
6601 }