1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * Copyright (c) 2007, Intel Corporation
8 * All rights reserved.
9 */
10
11 /*
12 * Copyright (c) 2006
13 * Copyright (c) 2007
14 * Damien Bergamini <damien.bergamini@free.fr>
15 *
16 * Permission to use, copy, modify, and distribute this software for any
17 * purpose with or without fee is hereby granted, provided that the above
18 * copyright notice and this permission notice appear in all copies.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
21 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
24 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
25 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
26 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 */
28
29 /*
30 * Driver for Intel PRO/Wireless 4965AGN(kedron) 802.11 network adapters.
31 */
32
33 #include <sys/types.h>
34 #include <sys/byteorder.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/stat.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/strsubr.h>
41 #include <sys/ethernet.h>
42 #include <inet/common.h>
43 #include <inet/nd.h>
44 #include <inet/mi.h>
45 #include <sys/note.h>
46 #include <sys/stream.h>
47 #include <sys/strsun.h>
48 #include <sys/modctl.h>
49 #include <sys/devops.h>
50 #include <sys/dlpi.h>
51 #include <sys/mac_provider.h>
52 #include <sys/mac_wifi.h>
53 #include <sys/net80211.h>
54 #include <sys/net80211_proto.h>
55 #include <sys/varargs.h>
56 #include <sys/policy.h>
57 #include <sys/pci.h>
58
59 #include "iwk_calibration.h"
60 #include "iwk_hw.h"
61 #include "iwk_eeprom.h"
62 #include "iwk2_var.h"
63 #include <inet/wifi_ioctl.h>
64
65 #ifdef DEBUG
66 #define IWK_DEBUG_80211 (1 << 0)
67 #define IWK_DEBUG_CMD (1 << 1)
68 #define IWK_DEBUG_DMA (1 << 2)
69 #define IWK_DEBUG_EEPROM (1 << 3)
70 #define IWK_DEBUG_FW (1 << 4)
71 #define IWK_DEBUG_HW (1 << 5)
72 #define IWK_DEBUG_INTR (1 << 6)
73 #define IWK_DEBUG_MRR (1 << 7)
74 #define IWK_DEBUG_PIO (1 << 8)
75 #define IWK_DEBUG_RX (1 << 9)
76 #define IWK_DEBUG_SCAN (1 << 10)
77 #define IWK_DEBUG_TX (1 << 11)
78 #define IWK_DEBUG_RATECTL (1 << 12)
79 #define IWK_DEBUG_RADIO (1 << 13)
80 #define IWK_DEBUG_RESUME (1 << 14)
81 #define IWK_DEBUG_CALIBRATION (1 << 15)
82 uint32_t iwk_dbg_flags = 0;
83 #define IWK_DBG(x) \
84 iwk_dbg x
85 #else
86 #define IWK_DBG(x)
87 #endif
88
89 static void *iwk_soft_state_p = NULL;
90 static uint8_t iwk_fw_bin [] = {
91 #include "fw-iw/iw4965.ucode.hex"
92 };
93
94 /* DMA attributes for a shared page */
95 static ddi_dma_attr_t sh_dma_attr = {
96 DMA_ATTR_V0, /* version of this structure */
97 0, /* lowest usable address */
98 0xffffffffU, /* highest usable address */
99 0xffffffffU, /* maximum DMAable byte count */
100 0x1000, /* alignment in bytes */
101 0x1000, /* burst sizes (any?) */
102 1, /* minimum transfer */
103 0xffffffffU, /* maximum transfer */
104 0xffffffffU, /* maximum segment length */
105 1, /* maximum number of segments */
106 1, /* granularity */
107 0, /* flags (reserved) */
108 };
109
110 /* DMA attributes for a keep warm DRAM descriptor */
111 static ddi_dma_attr_t kw_dma_attr = {
112 DMA_ATTR_V0, /* version of this structure */
113 0, /* lowest usable address */
114 0xffffffffU, /* highest usable address */
115 0xffffffffU, /* maximum DMAable byte count */
116 0x1000, /* alignment in bytes */
117 0x1000, /* burst sizes (any?) */
118 1, /* minimum transfer */
119 0xffffffffU, /* maximum transfer */
120 0xffffffffU, /* maximum segment length */
121 1, /* maximum number of segments */
122 1, /* granularity */
123 0, /* flags (reserved) */
124 };
125
126 /* DMA attributes for a ring descriptor */
127 static ddi_dma_attr_t ring_desc_dma_attr = {
128 DMA_ATTR_V0, /* version of this structure */
129 0, /* lowest usable address */
130 0xffffffffU, /* highest usable address */
131 0xffffffffU, /* maximum DMAable byte count */
132 0x100, /* alignment in bytes */
133 0x100, /* burst sizes (any?) */
134 1, /* minimum transfer */
135 0xffffffffU, /* maximum transfer */
136 0xffffffffU, /* maximum segment length */
137 1, /* maximum number of segments */
138 1, /* granularity */
139 0, /* flags (reserved) */
140 };
141
142 /* DMA attributes for a cmd */
143 static ddi_dma_attr_t cmd_dma_attr = {
144 DMA_ATTR_V0, /* version of this structure */
145 0, /* lowest usable address */
146 0xffffffffU, /* highest usable address */
147 0xffffffffU, /* maximum DMAable byte count */
148 4, /* alignment in bytes */
149 0x100, /* burst sizes (any?) */
150 1, /* minimum transfer */
151 0xffffffffU, /* maximum transfer */
152 0xffffffffU, /* maximum segment length */
153 1, /* maximum number of segments */
154 1, /* granularity */
155 0, /* flags (reserved) */
156 };
157
158 /* DMA attributes for a rx buffer */
159 static ddi_dma_attr_t rx_buffer_dma_attr = {
160 DMA_ATTR_V0, /* version of this structure */
161 0, /* lowest usable address */
162 0xffffffffU, /* highest usable address */
163 0xffffffffU, /* maximum DMAable byte count */
164 0x100, /* alignment in bytes */
165 0x100, /* burst sizes (any?) */
166 1, /* minimum transfer */
167 0xffffffffU, /* maximum transfer */
168 0xffffffffU, /* maximum segment length */
169 1, /* maximum number of segments */
170 1, /* granularity */
171 0, /* flags (reserved) */
172 };
173
174 /*
175 * DMA attributes for a tx buffer.
176 * the maximum number of segments is 4 for the hardware.
177 * now all the wifi drivers put the whole frame in a single
178 * descriptor, so we define the maximum number of segments 1,
179 * just the same as the rx_buffer. we consider leverage the HW
180 * ability in the future, that is why we don't define rx and tx
181 * buffer_dma_attr as the same.
182 */
183 static ddi_dma_attr_t tx_buffer_dma_attr = {
184 DMA_ATTR_V0, /* version of this structure */
185 0, /* lowest usable address */
186 0xffffffffU, /* highest usable address */
187 0xffffffffU, /* maximum DMAable byte count */
188 4, /* alignment in bytes */
189 0x100, /* burst sizes (any?) */
190 1, /* minimum transfer */
191 0xffffffffU, /* maximum transfer */
192 0xffffffffU, /* maximum segment length */
193 1, /* maximum number of segments */
194 1, /* granularity */
195 0, /* flags (reserved) */
196 };
197
198 /* DMA attributes for text and data part in the firmware */
199 static ddi_dma_attr_t fw_dma_attr = {
200 DMA_ATTR_V0, /* version of this structure */
201 0, /* lowest usable address */
202 0xffffffffU, /* highest usable address */
203 0x7fffffff, /* maximum DMAable byte count */
204 0x10, /* alignment in bytes */
205 0x100, /* burst sizes (any?) */
206 1, /* minimum transfer */
207 0xffffffffU, /* maximum transfer */
208 0xffffffffU, /* maximum segment length */
209 1, /* maximum number of segments */
210 1, /* granularity */
211 0, /* flags (reserved) */
212 };
213
214
215 /* regs access attributes */
216 static ddi_device_acc_attr_t iwk_reg_accattr = {
217 DDI_DEVICE_ATTR_V0,
218 DDI_STRUCTURE_LE_ACC,
219 DDI_STRICTORDER_ACC,
220 DDI_DEFAULT_ACC
221 };
222
223 /* DMA access attributes for Descriptor */
224 static ddi_device_acc_attr_t iwk_dma_descattr = {
225 DDI_DEVICE_ATTR_V0,
226 DDI_STRUCTURE_LE_ACC,
227 DDI_STRICTORDER_ACC,
228 DDI_DEFAULT_ACC
229 };
230
231 /* DMA access attributes */
232 static ddi_device_acc_attr_t iwk_dma_accattr = {
233 DDI_DEVICE_ATTR_V0,
234 DDI_NEVERSWAP_ACC,
235 DDI_STRICTORDER_ACC,
236 DDI_DEFAULT_ACC
237 };
238
239 static int iwk_ring_init(iwk_sc_t *);
240 static void iwk_ring_free(iwk_sc_t *);
241 static int iwk_alloc_shared(iwk_sc_t *);
242 static void iwk_free_shared(iwk_sc_t *);
243 static int iwk_alloc_kw(iwk_sc_t *);
244 static void iwk_free_kw(iwk_sc_t *);
245 static int iwk_alloc_fw_dma(iwk_sc_t *);
246 static void iwk_free_fw_dma(iwk_sc_t *);
247 static int iwk_alloc_rx_ring(iwk_sc_t *);
248 static void iwk_reset_rx_ring(iwk_sc_t *);
249 static void iwk_free_rx_ring(iwk_sc_t *);
250 static int iwk_alloc_tx_ring(iwk_sc_t *, iwk_tx_ring_t *,
251 int, int);
252 static void iwk_reset_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
253 static void iwk_free_tx_ring(iwk_sc_t *, iwk_tx_ring_t *);
254
255 static ieee80211_node_t *iwk_node_alloc(ieee80211com_t *);
256 static void iwk_node_free(ieee80211_node_t *);
257 static int iwk_newstate(ieee80211com_t *, enum ieee80211_state, int);
258 static int iwk_key_set(ieee80211com_t *, const struct ieee80211_key *,
259 const uint8_t mac[IEEE80211_ADDR_LEN]);
260 static void iwk_mac_access_enter(iwk_sc_t *);
261 static void iwk_mac_access_exit(iwk_sc_t *);
262 static uint32_t iwk_reg_read(iwk_sc_t *, uint32_t);
263 static void iwk_reg_write(iwk_sc_t *, uint32_t, uint32_t);
264 static void iwk_reg_write_region_4(iwk_sc_t *, uint32_t,
265 uint32_t *, int);
266 static int iwk_load_firmware(iwk_sc_t *);
267 static void iwk_rx_intr(iwk_sc_t *, iwk_rx_desc_t *,
268 iwk_rx_data_t *);
269 static void iwk_tx_intr(iwk_sc_t *, iwk_rx_desc_t *,
270 iwk_rx_data_t *);
271 static void iwk_cmd_intr(iwk_sc_t *, iwk_rx_desc_t *);
272 static uint_t iwk_intr(caddr_t, caddr_t);
273 static int iwk_eep_load(iwk_sc_t *sc);
274 static void iwk_get_mac_from_eep(iwk_sc_t *sc);
275 static int iwk_eep_sem_down(iwk_sc_t *sc);
276 static void iwk_eep_sem_up(iwk_sc_t *sc);
277 static uint_t iwk_rx_softintr(caddr_t, caddr_t);
278 static uint8_t iwk_rate_to_plcp(int);
279 static int iwk_cmd(iwk_sc_t *, int, const void *, int, int);
280 static void iwk_set_led(iwk_sc_t *, uint8_t, uint8_t, uint8_t);
281 static int iwk_hw_set_before_auth(iwk_sc_t *);
282 static int iwk_scan(iwk_sc_t *);
283 static int iwk_config(iwk_sc_t *);
284 static void iwk_stop_master(iwk_sc_t *);
285 static int iwk_power_up(iwk_sc_t *);
286 static int iwk_preinit(iwk_sc_t *);
287 static int iwk_init(iwk_sc_t *);
288 static void iwk_stop(iwk_sc_t *);
289 static void iwk_amrr_init(iwk_amrr_t *);
290 static void iwk_amrr_timeout(iwk_sc_t *);
291 static void iwk_amrr_ratectl(void *, ieee80211_node_t *);
292 static int32_t iwk_curr_tempera(iwk_sc_t *sc);
293 static int iwk_tx_power_calibration(iwk_sc_t *sc);
294 static inline int iwk_is_24G_band(iwk_sc_t *sc);
295 static inline int iwk_is_fat_channel(iwk_sc_t *sc);
296 static int iwk_txpower_grp(uint16_t channel);
297 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
298 uint16_t channel,
299 int is_24G, int is_fat, int is_hi_chan);
300 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel);
301 static int iwk_division(int32_t num, int32_t denom, int32_t *res);
302 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
303 int32_t x2, int32_t y2);
304 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
305 struct iwk_eep_calib_channel_info *chan_info);
306 static int32_t iwk_voltage_compensation(int32_t eep_voltage,
307 int32_t curr_voltage);
308 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G);
309 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
310 struct iwk_tx_power_db *tp_db);
311 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc);
312 static int iwk_is_associated(iwk_sc_t *sc);
313 static int iwk_rxgain_diff_init(iwk_sc_t *sc);
314 static int iwk_rxgain_diff(iwk_sc_t *sc);
315 static int iwk_rx_sens_init(iwk_sc_t *sc);
316 static int iwk_rx_sens(iwk_sc_t *sc);
317 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
318 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time);
319 static void iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
320 struct ieee80211_node *in, int subtype, int rssi, uint32_t rstamp);
321
322 static void iwk_write_event_log(iwk_sc_t *);
323 static void iwk_write_error_log(iwk_sc_t *);
324
325 static int iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
326 static int iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
327 static int iwk_quiesce(dev_info_t *dip);
328
329 /*
330 * GLD specific operations
331 */
332 static int iwk_m_stat(void *arg, uint_t stat, uint64_t *val);
333 static int iwk_m_start(void *arg);
334 static void iwk_m_stop(void *arg);
335 static int iwk_m_unicst(void *arg, const uint8_t *macaddr);
336 static int iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m);
337 static int iwk_m_promisc(void *arg, boolean_t on);
338 static mblk_t *iwk_m_tx(void *arg, mblk_t *mp);
339 static void iwk_m_ioctl(void *arg, queue_t *wq, mblk_t *mp);
340 static int iwk_m_setprop(void *arg, const char *pr_name,
341 mac_prop_id_t wldp_pr_name, uint_t wldp_length, const void *wldp_buf);
342 static int iwk_m_getprop(void *arg, const char *pr_name,
343 mac_prop_id_t wldp_pr_name, uint_t wldp_length, void *wldp_buf);
344 static void iwk_m_propinfo(void *arg, const char *pr_name,
345 mac_prop_id_t wldp_pr_num, mac_prop_info_handle_t mph);
346 static void iwk_destroy_locks(iwk_sc_t *sc);
347 static int iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type);
348 static void iwk_thread(iwk_sc_t *sc);
349 static void iwk_watchdog(void *arg);
350 static int iwk_run_state_config_ibss(ieee80211com_t *ic);
351 static int iwk_run_state_config_sta(ieee80211com_t *ic);
352 static int iwk_fast_recover(iwk_sc_t *sc);
353 static int iwk_start_tx_beacon(ieee80211com_t *ic);
354 static int iwk_clean_add_node_ibss(struct ieee80211com *ic,
355 uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2);
356
357 /*
358 * Supported rates for 802.11b/g modes (in 500Kbps unit).
359 * 11a and 11n support will be added later.
360 */
361 static const struct ieee80211_rateset iwk_rateset_11b =
362 { 4, { 2, 4, 11, 22 } };
363
364 static const struct ieee80211_rateset iwk_rateset_11g =
365 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
366
367 /*
368 * For mfthread only
369 */
370 extern pri_t minclsyspri;
371
372 #define DRV_NAME_4965 "iwk"
373
374 /*
375 * Module Loading Data & Entry Points
376 */
377 DDI_DEFINE_STREAM_OPS(iwk_devops, nulldev, nulldev, iwk_attach,
378 iwk_detach, nodev, NULL, D_MP, NULL, iwk_quiesce);
379
380 static struct modldrv iwk_modldrv = {
381 &mod_driverops,
382 "Intel(R) 4965AGN driver(N)",
383 &iwk_devops
384 };
385
386 static struct modlinkage iwk_modlinkage = {
387 MODREV_1,
388 { &iwk_modldrv, NULL }
389 };
390
391 int
392 _init(void)
393 {
394 int status;
395
396 status = ddi_soft_state_init(&iwk_soft_state_p,
397 sizeof (iwk_sc_t), 1);
398 if (status != DDI_SUCCESS)
399 return (status);
400
401 mac_init_ops(&iwk_devops, DRV_NAME_4965);
402 status = mod_install(&iwk_modlinkage);
403 if (status != DDI_SUCCESS) {
404 mac_fini_ops(&iwk_devops);
405 ddi_soft_state_fini(&iwk_soft_state_p);
406 }
407
408 return (status);
409 }
410
411 int
412 _fini(void)
413 {
414 int status;
415
416 status = mod_remove(&iwk_modlinkage);
417 if (status == DDI_SUCCESS) {
418 mac_fini_ops(&iwk_devops);
419 ddi_soft_state_fini(&iwk_soft_state_p);
420 }
421
422 return (status);
423 }
424
425 int
426 _info(struct modinfo *mip)
427 {
428 return (mod_info(&iwk_modlinkage, mip));
429 }
430
431 /*
432 * Mac Call Back entries
433 */
434 mac_callbacks_t iwk_m_callbacks = {
435 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
436 iwk_m_stat,
437 iwk_m_start,
438 iwk_m_stop,
439 iwk_m_promisc,
440 iwk_m_multicst,
441 iwk_m_unicst,
442 iwk_m_tx,
443 NULL,
444 iwk_m_ioctl,
445 NULL,
446 NULL,
447 NULL,
448 iwk_m_setprop,
449 iwk_m_getprop,
450 iwk_m_propinfo
451 };
452
453 #ifdef DEBUG
454 void
455 iwk_dbg(uint32_t flags, const char *fmt, ...)
456 {
457 va_list ap;
458
459 if (flags & iwk_dbg_flags) {
460 va_start(ap, fmt);
461 vcmn_err(CE_NOTE, fmt, ap);
462 va_end(ap);
463 }
464 }
465 #endif
466
467 /*
468 * device operations
469 */
470 int
471 iwk_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
472 {
473 iwk_sc_t *sc;
474 ieee80211com_t *ic;
475 int instance, err, i;
476 char strbuf[32];
477 wifi_data_t wd = { 0 };
478 mac_register_t *macp;
479
480 int intr_type;
481 int intr_count;
482 int intr_actual;
483
484 switch (cmd) {
485 case DDI_ATTACH:
486 break;
487 case DDI_RESUME:
488 sc = ddi_get_soft_state(iwk_soft_state_p,
489 ddi_get_instance(dip));
490 ASSERT(sc != NULL);
491
492 mutex_enter(&sc->sc_glock);
493 sc->sc_flags &= ~IWK_F_SUSPEND;
494 mutex_exit(&sc->sc_glock);
495
496 if (sc->sc_flags & IWK_F_RUNNING)
497 (void) iwk_init(sc);
498
499 mutex_enter(&sc->sc_glock);
500 sc->sc_flags |= IWK_F_LAZY_RESUME;
501 mutex_exit(&sc->sc_glock);
502
503 IWK_DBG((IWK_DEBUG_RESUME, "iwk: resume\n"));
504 return (DDI_SUCCESS);
505 default:
506 err = DDI_FAILURE;
507 goto attach_fail1;
508 }
509
510 instance = ddi_get_instance(dip);
511 err = ddi_soft_state_zalloc(iwk_soft_state_p, instance);
512 if (err != DDI_SUCCESS) {
513 cmn_err(CE_WARN,
514 "iwk_attach(): failed to allocate soft state\n");
515 goto attach_fail1;
516 }
517 sc = ddi_get_soft_state(iwk_soft_state_p, instance);
518 sc->sc_dip = dip;
519
520 err = ddi_regs_map_setup(dip, 0, &sc->sc_cfg_base, 0, 0,
521 &iwk_reg_accattr, &sc->sc_cfg_handle);
522 if (err != DDI_SUCCESS) {
523 cmn_err(CE_WARN,
524 "iwk_attach(): failed to map config spaces regs\n");
525 goto attach_fail2;
526 }
527 sc->sc_rev = ddi_get8(sc->sc_cfg_handle,
528 (uint8_t *)(sc->sc_cfg_base + PCI_CONF_REVID));
529 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0x41), 0);
530 sc->sc_clsz = ddi_get16(sc->sc_cfg_handle,
531 (uint16_t *)(sc->sc_cfg_base + PCI_CONF_CACHE_LINESZ));
532 if (!sc->sc_clsz)
533 sc->sc_clsz = 16;
534 sc->sc_clsz = (sc->sc_clsz << 2);
535 sc->sc_dmabuf_sz = roundup(0x1000 + sizeof (struct ieee80211_frame) +
536 IEEE80211_MTU + IEEE80211_CRC_LEN +
537 (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN +
538 IEEE80211_WEP_CRCLEN), sc->sc_clsz);
539 /*
540 * Map operating registers
541 */
542 err = ddi_regs_map_setup(dip, 1, &sc->sc_base,
543 0, 0, &iwk_reg_accattr, &sc->sc_handle);
544 if (err != DDI_SUCCESS) {
545 cmn_err(CE_WARN,
546 "iwk_attach(): failed to map device regs\n");
547 goto attach_fail2a;
548 }
549
550 err = ddi_intr_get_supported_types(dip, &intr_type);
551 if ((err != DDI_SUCCESS) || (!(intr_type & DDI_INTR_TYPE_FIXED))) {
552 cmn_err(CE_WARN, "iwk_attach(): "
553 "Fixed type interrupt is not supported\n");
554 goto attach_fail_intr_a;
555 }
556
557 err = ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &intr_count);
558 if ((err != DDI_SUCCESS) || (intr_count != 1)) {
559 cmn_err(CE_WARN, "iwk_attach(): "
560 "No fixed interrupts\n");
561 goto attach_fail_intr_a;
562 }
563
564 sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
565
566 err = ddi_intr_alloc(dip, sc->sc_intr_htable, DDI_INTR_TYPE_FIXED, 0,
567 intr_count, &intr_actual, 0);
568 if ((err != DDI_SUCCESS) || (intr_actual != 1)) {
569 cmn_err(CE_WARN, "iwk_attach(): "
570 "ddi_intr_alloc() failed 0x%x\n", err);
571 goto attach_fail_intr_b;
572 }
573
574 err = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_pri);
575 if (err != DDI_SUCCESS) {
576 cmn_err(CE_WARN, "iwk_attach(): "
577 "ddi_intr_get_pri() failed 0x%x\n", err);
578 goto attach_fail_intr_c;
579 }
580
581 mutex_init(&sc->sc_glock, NULL, MUTEX_DRIVER,
582 DDI_INTR_PRI(sc->sc_intr_pri));
583 mutex_init(&sc->sc_tx_lock, NULL, MUTEX_DRIVER,
584 DDI_INTR_PRI(sc->sc_intr_pri));
585 mutex_init(&sc->sc_mt_lock, NULL, MUTEX_DRIVER,
586 DDI_INTR_PRI(sc->sc_intr_pri));
587 mutex_init(&sc->sc_ibss.node_tb_lock, NULL, MUTEX_DRIVER,
588 DDI_INTR_PRI(sc->sc_intr_pri));
589
590 cv_init(&sc->sc_fw_cv, NULL, CV_DRIVER, NULL);
591 cv_init(&sc->sc_cmd_cv, NULL, CV_DRIVER, NULL);
592 cv_init(&sc->sc_tx_cv, "tx-ring", CV_DRIVER, NULL);
593 /*
594 * initialize the mfthread
595 */
596 cv_init(&sc->sc_mt_cv, NULL, CV_DRIVER, NULL);
597 sc->sc_mf_thread = NULL;
598 sc->sc_mf_thread_switch = 0;
599
600 /*
601 * Allocate shared page.
602 */
603 err = iwk_alloc_shared(sc);
604 if (err != DDI_SUCCESS) {
605 cmn_err(CE_WARN, "iwk_attach(): "
606 "failed to allocate shared page\n");
607 goto attach_fail3;
608 }
609
610 /*
611 * Allocate keep warm page.
612 */
613 err = iwk_alloc_kw(sc);
614 if (err != DDI_SUCCESS) {
615 cmn_err(CE_WARN, "iwk_attach(): "
616 "failed to allocate keep warm page\n");
617 goto attach_fail3a;
618 }
619
620 /*
621 * Do some necessary hardware initializations.
622 */
623 err = iwk_preinit(sc);
624 if (err != DDI_SUCCESS) {
625 cmn_err(CE_WARN, "iwk_attach(): "
626 "failed to init hardware\n");
627 goto attach_fail4;
628 }
629
630 /* initialize EEPROM */
631 err = iwk_eep_load(sc); /* get hardware configurations from eeprom */
632 if (err != 0) {
633 cmn_err(CE_WARN, "iwk_attach(): failed to load eeprom\n");
634 goto attach_fail4;
635 }
636
637 if (LE_16(sc->sc_eep_map.calib_version) < EEP_TX_POWER_VERSION_NEW) {
638 cmn_err(CE_WARN, "older EEPROM detected\n");
639 goto attach_fail4;
640 }
641
642 iwk_get_mac_from_eep(sc);
643
644 err = iwk_ring_init(sc);
645 if (err != DDI_SUCCESS) {
646 cmn_err(CE_WARN, "iwk_attach(): "
647 "failed to allocate and initialize ring\n");
648 goto attach_fail4;
649 }
650
651 sc->sc_hdr = (iwk_firmware_hdr_t *)iwk_fw_bin;
652
653 err = iwk_alloc_fw_dma(sc);
654 if (err != DDI_SUCCESS) {
655 cmn_err(CE_WARN, "iwk_attach(): "
656 "failed to allocate firmware dma\n");
657 goto attach_fail5;
658 }
659
660 /*
661 * Initialize the wifi part, which will be used by
662 * generic layer
663 */
664 ic = &sc->sc_ic;
665 ic->ic_phytype = IEEE80211_T_OFDM;
666 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
667 ic->ic_state = IEEE80211_S_INIT;
668 ic->ic_maxrssi = 100; /* experimental number */
669 ic->ic_caps = IEEE80211_C_SHPREAMBLE | IEEE80211_C_TXPMGT |
670 IEEE80211_C_PMGT | IEEE80211_C_SHSLOT;
671 /*
672 * use software WEP and TKIP, hardware CCMP;
673 */
674 ic->ic_caps |= IEEE80211_C_AES_CCM;
675 /*
676 * Support WPA/WPA2
677 */
678 ic->ic_caps |= IEEE80211_C_WPA;
679 /*
680 * support Adhoc mode
681 */
682 ic->ic_caps |= IEEE80211_C_IBSS;
683
684 /* set supported .11b and .11g rates */
685 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwk_rateset_11b;
686 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwk_rateset_11g;
687
688 /* set supported .11b and .11g channels (1 through 11) */
689 for (i = 1; i <= 11; i++) {
690 ic->ic_sup_channels[i].ich_freq =
691 ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
692 ic->ic_sup_channels[i].ich_flags =
693 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
694 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ |
695 IEEE80211_CHAN_PASSIVE;
696 }
697 ic->ic_ibss_chan = &ic->ic_sup_channels[0];
698
699 ic->ic_xmit = iwk_send;
700 /*
701 * init Wifi layer
702 */
703 ieee80211_attach(ic);
704
705 /*
706 * different instance has different WPA door
707 */
708 (void) snprintf(ic->ic_wpadoor, MAX_IEEE80211STR, "%s_%s%d", WPA_DOOR,
709 ddi_driver_name(dip),
710 ddi_get_instance(dip));
711
712 /*
713 * Override 80211 default routines
714 */
715 sc->sc_newstate = ic->ic_newstate;
716 ic->ic_newstate = iwk_newstate;
717 ic->ic_watchdog = iwk_watchdog;
718 sc->sc_recv_mgmt = ic->ic_recv_mgmt;
719 ic->ic_recv_mgmt = iwk_recv_mgmt;
720 ic->ic_node_alloc = iwk_node_alloc;
721 ic->ic_node_free = iwk_node_free;
722 ic->ic_crypto.cs_key_set = iwk_key_set;
723 ieee80211_media_init(ic);
724 /*
725 * initialize default tx key
726 */
727 ic->ic_def_txkey = 0;
728 err = ddi_intr_add_softint(dip, &sc->sc_soft_hdl, DDI_INTR_SOFTPRI_MAX,
729 iwk_rx_softintr, (caddr_t)sc);
730 if (err != DDI_SUCCESS) {
731 cmn_err(CE_WARN, "iwk_attach(): "
732 "add soft interrupt failed\n");
733 goto attach_fail7;
734 }
735
736 /*
737 * Add the interrupt handler
738 */
739 err = ddi_intr_add_handler(sc->sc_intr_htable[0], iwk_intr,
740 (caddr_t)sc, NULL);
741 if (err != DDI_SUCCESS) {
742 cmn_err(CE_WARN, "iwk_attach(): "
743 "ddi_intr_add_handle() failed\n");
744 goto attach_fail8;
745 }
746
747 err = ddi_intr_enable(sc->sc_intr_htable[0]);
748 if (err != DDI_SUCCESS) {
749 cmn_err(CE_WARN, "iwk_attach(): "
750 "ddi_intr_enable() failed\n");
751 goto attach_fail_intr_d;
752 }
753
754 /*
755 * Initialize pointer to device specific functions
756 */
757 wd.wd_secalloc = WIFI_SEC_NONE;
758 wd.wd_opmode = ic->ic_opmode;
759 IEEE80211_ADDR_COPY(wd.wd_bssid, ic->ic_macaddr);
760
761 macp = mac_alloc(MAC_VERSION);
762 if (macp == NULL) {
763 cmn_err(CE_WARN,
764 "iwk_attach(): failed to do mac_alloc()\n");
765 goto attach_fail9;
766 }
767
768 macp->m_type_ident = MAC_PLUGIN_IDENT_WIFI;
769 macp->m_driver = sc;
770 macp->m_dip = dip;
771 macp->m_src_addr = ic->ic_macaddr;
772 macp->m_callbacks = &iwk_m_callbacks;
773 macp->m_min_sdu = 0;
774 macp->m_max_sdu = IEEE80211_MTU;
775 macp->m_pdata = &wd;
776 macp->m_pdata_size = sizeof (wd);
777
778 /*
779 * Register the macp to mac
780 */
781 err = mac_register(macp, &ic->ic_mach);
782 mac_free(macp);
783 if (err != DDI_SUCCESS) {
784 cmn_err(CE_WARN,
785 "iwk_attach(): failed to do mac_register()\n");
786 goto attach_fail9;
787 }
788
789 /*
790 * Create minor node of type DDI_NT_NET_WIFI
791 */
792 (void) snprintf(strbuf, sizeof (strbuf), DRV_NAME_4965"%d", instance);
793 err = ddi_create_minor_node(dip, strbuf, S_IFCHR,
794 instance + 1, DDI_NT_NET_WIFI, 0);
795 if (err != DDI_SUCCESS)
796 cmn_err(CE_WARN,
797 "iwk_attach(): failed to do ddi_create_minor_node()\n");
798
799 /*
800 * Notify link is down now
801 */
802 mac_link_update(ic->ic_mach, LINK_STATE_DOWN);
803
804 /*
805 * create the mf thread to handle the link status,
806 * recovery fatal error, etc.
807 */
808 sc->sc_mf_thread_switch = 1;
809 if (sc->sc_mf_thread == NULL)
810 sc->sc_mf_thread = thread_create((caddr_t)NULL, 0,
811 iwk_thread, sc, 0, &p0, TS_RUN, minclsyspri);
812
813 sc->sc_flags |= IWK_F_ATTACHED;
814
815 return (DDI_SUCCESS);
816 attach_fail9:
817 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
818 attach_fail_intr_d:
819 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
820
821 attach_fail8:
822 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
823 sc->sc_soft_hdl = NULL;
824 attach_fail7:
825 ieee80211_detach(ic);
826 attach_fail6:
827 iwk_free_fw_dma(sc);
828 attach_fail5:
829 iwk_ring_free(sc);
830 attach_fail4:
831 iwk_free_kw(sc);
832 attach_fail3a:
833 iwk_free_shared(sc);
834 attach_fail3:
835 iwk_destroy_locks(sc);
836 attach_fail_intr_c:
837 (void) ddi_intr_free(sc->sc_intr_htable[0]);
838 attach_fail_intr_b:
839 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
840 attach_fail_intr_a:
841 ddi_regs_map_free(&sc->sc_handle);
842 attach_fail2a:
843 ddi_regs_map_free(&sc->sc_cfg_handle);
844 attach_fail2:
845 ddi_soft_state_free(iwk_soft_state_p, instance);
846 attach_fail1:
847 return (err);
848 }
849
850 int
851 iwk_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
852 {
853 iwk_sc_t *sc;
854 int err;
855
856 sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
857 ASSERT(sc != NULL);
858
859 switch (cmd) {
860 case DDI_DETACH:
861 break;
862 case DDI_SUSPEND:
863 mutex_enter(&sc->sc_glock);
864 sc->sc_flags |= IWK_F_SUSPEND;
865 mutex_exit(&sc->sc_glock);
866 if (sc->sc_flags & IWK_F_RUNNING) {
867 iwk_stop(sc);
868 }
869
870 IWK_DBG((IWK_DEBUG_RESUME, "iwk: suspend\n"));
871 return (DDI_SUCCESS);
872 default:
873 return (DDI_FAILURE);
874 }
875
876 if (!(sc->sc_flags & IWK_F_ATTACHED))
877 return (DDI_FAILURE);
878
879 err = mac_disable(sc->sc_ic.ic_mach);
880 if (err != DDI_SUCCESS)
881 return (err);
882
883 /*
884 * Destroy the mf_thread
885 */
886 mutex_enter(&sc->sc_mt_lock);
887 sc->sc_mf_thread_switch = 0;
888 while (sc->sc_mf_thread != NULL) {
889 if (cv_wait_sig(&sc->sc_mt_cv, &sc->sc_mt_lock) == 0)
890 break;
891 }
892 mutex_exit(&sc->sc_mt_lock);
893
894 iwk_stop(sc);
895 DELAY(500000);
896
897 /*
898 * Unregiste from the MAC layer subsystem
899 */
900 (void) mac_unregister(sc->sc_ic.ic_mach);
901
902 mutex_enter(&sc->sc_glock);
903 iwk_free_fw_dma(sc);
904 iwk_ring_free(sc);
905 iwk_free_kw(sc);
906 iwk_free_shared(sc);
907 mutex_exit(&sc->sc_glock);
908
909 (void) ddi_intr_disable(sc->sc_intr_htable[0]);
910 (void) ddi_intr_remove_handler(sc->sc_intr_htable[0]);
911 (void) ddi_intr_free(sc->sc_intr_htable[0]);
912 kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
913
914 (void) ddi_intr_remove_softint(sc->sc_soft_hdl);
915 sc->sc_soft_hdl = NULL;
916
917 /*
918 * detach ieee80211
919 */
920 ieee80211_detach(&sc->sc_ic);
921
922 iwk_destroy_locks(sc);
923
924 ddi_regs_map_free(&sc->sc_handle);
925 ddi_regs_map_free(&sc->sc_cfg_handle);
926 ddi_remove_minor_node(dip, NULL);
927 ddi_soft_state_free(iwk_soft_state_p, ddi_get_instance(dip));
928
929 return (DDI_SUCCESS);
930 }
931
932 /*
933 * quiesce(9E) entry point.
934 *
935 * This function is called when the system is single-threaded at high
936 * PIL with preemption disabled. Therefore, this function must not be
937 * blocked.
938 *
939 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
940 * DDI_FAILURE indicates an error condition and should almost never happen.
941 */
942 int
943 iwk_quiesce(dev_info_t *dip)
944 {
945 iwk_sc_t *sc;
946
947 sc = ddi_get_soft_state(iwk_soft_state_p, ddi_get_instance(dip));
948 ASSERT(sc != NULL);
949
950 /* no message prints and no lock accquisition */
951 #ifdef DEBUG
952 iwk_dbg_flags = 0;
953 #endif
954 sc->sc_flags |= IWK_F_QUIESCED;
955
956 iwk_stop(sc);
957
958 return (DDI_SUCCESS);
959 }
960
961 static void
962 iwk_destroy_locks(iwk_sc_t *sc)
963 {
964 cv_destroy(&sc->sc_mt_cv);
965 mutex_destroy(&sc->sc_mt_lock);
966 cv_destroy(&sc->sc_tx_cv);
967 cv_destroy(&sc->sc_cmd_cv);
968 cv_destroy(&sc->sc_fw_cv);
969 mutex_destroy(&sc->sc_tx_lock);
970 mutex_destroy(&sc->sc_glock);
971 }
972
973 /*
974 * Allocate an area of memory and a DMA handle for accessing it
975 */
976 static int
977 iwk_alloc_dma_mem(iwk_sc_t *sc, size_t memsize,
978 ddi_dma_attr_t *dma_attr_p, ddi_device_acc_attr_t *acc_attr_p,
979 uint_t dma_flags, iwk_dma_t *dma_p)
980 {
981 caddr_t vaddr;
982 int err;
983
984 /*
985 * Allocate handle
986 */
987 err = ddi_dma_alloc_handle(sc->sc_dip, dma_attr_p,
988 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
989 if (err != DDI_SUCCESS) {
990 dma_p->dma_hdl = NULL;
991 return (DDI_FAILURE);
992 }
993
994 /*
995 * Allocate memory
996 */
997 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, acc_attr_p,
998 dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
999 DDI_DMA_SLEEP, NULL, &vaddr, &dma_p->alength, &dma_p->acc_hdl);
1000 if (err != DDI_SUCCESS) {
1001 ddi_dma_free_handle(&dma_p->dma_hdl);
1002 dma_p->dma_hdl = NULL;
1003 dma_p->acc_hdl = NULL;
1004 return (DDI_FAILURE);
1005 }
1006
1007 /*
1008 * Bind the two together
1009 */
1010 dma_p->mem_va = vaddr;
1011 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
1012 vaddr, dma_p->alength, dma_flags, DDI_DMA_SLEEP, NULL,
1013 &dma_p->cookie, &dma_p->ncookies);
1014 if (err != DDI_DMA_MAPPED) {
1015 ddi_dma_mem_free(&dma_p->acc_hdl);
1016 ddi_dma_free_handle(&dma_p->dma_hdl);
1017 dma_p->acc_hdl = NULL;
1018 dma_p->dma_hdl = NULL;
1019 return (DDI_FAILURE);
1020 }
1021
1022 dma_p->nslots = ~0U;
1023 dma_p->size = ~0U;
1024 dma_p->token = ~0U;
1025 dma_p->offset = 0;
1026 return (DDI_SUCCESS);
1027 }
1028
1029 /*
1030 * Free one allocated area of DMAable memory
1031 */
1032 static void
1033 iwk_free_dma_mem(iwk_dma_t *dma_p)
1034 {
1035 if (dma_p->dma_hdl != NULL) {
1036 if (dma_p->ncookies) {
1037 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
1038 dma_p->ncookies = 0;
1039 }
1040 ddi_dma_free_handle(&dma_p->dma_hdl);
1041 dma_p->dma_hdl = NULL;
1042 }
1043
1044 if (dma_p->acc_hdl != NULL) {
1045 ddi_dma_mem_free(&dma_p->acc_hdl);
1046 dma_p->acc_hdl = NULL;
1047 }
1048 }
1049
1050 /*
1051 *
1052 */
1053 static int
1054 iwk_alloc_fw_dma(iwk_sc_t *sc)
1055 {
1056 int err = DDI_SUCCESS;
1057 iwk_dma_t *dma_p;
1058 char *t;
1059
1060 /*
1061 * firmware image layout:
1062 * |HDR|<-TEXT->|<-DATA->|<-INIT_TEXT->|<-INIT_DATA->|<-BOOT->|
1063 */
1064 t = (char *)(sc->sc_hdr + 1);
1065 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->textsz),
1066 &fw_dma_attr, &iwk_dma_accattr,
1067 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1068 &sc->sc_dma_fw_text);
1069 dma_p = &sc->sc_dma_fw_text;
1070 IWK_DBG((IWK_DEBUG_DMA, "text[ncookies:%d addr:%lx size:%lx]\n",
1071 dma_p->ncookies, dma_p->cookie.dmac_address,
1072 dma_p->cookie.dmac_size));
1073 if (err != DDI_SUCCESS) {
1074 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1075 " text dma memory");
1076 goto fail;
1077 }
1078 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->textsz));
1079
1080 t += LE_32(sc->sc_hdr->textsz);
1081 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1082 &fw_dma_attr, &iwk_dma_accattr,
1083 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1084 &sc->sc_dma_fw_data);
1085 dma_p = &sc->sc_dma_fw_data;
1086 IWK_DBG((IWK_DEBUG_DMA, "data[ncookies:%d addr:%lx size:%lx]\n",
1087 dma_p->ncookies, dma_p->cookie.dmac_address,
1088 dma_p->cookie.dmac_size));
1089 if (err != DDI_SUCCESS) {
1090 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1091 " data dma memory");
1092 goto fail;
1093 }
1094 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1095
1096 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->datasz),
1097 &fw_dma_attr, &iwk_dma_accattr,
1098 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1099 &sc->sc_dma_fw_data_bak);
1100 dma_p = &sc->sc_dma_fw_data_bak;
1101 IWK_DBG((IWK_DEBUG_DMA, "data_bak[ncookies:%d addr:%lx "
1102 "size:%lx]\n",
1103 dma_p->ncookies, dma_p->cookie.dmac_address,
1104 dma_p->cookie.dmac_size));
1105 if (err != DDI_SUCCESS) {
1106 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1107 " data bakeup dma memory");
1108 goto fail;
1109 }
1110 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->datasz));
1111
1112 t += LE_32(sc->sc_hdr->datasz);
1113 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_textsz),
1114 &fw_dma_attr, &iwk_dma_accattr,
1115 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1116 &sc->sc_dma_fw_init_text);
1117 dma_p = &sc->sc_dma_fw_init_text;
1118 IWK_DBG((IWK_DEBUG_DMA, "init_text[ncookies:%d addr:%lx "
1119 "size:%lx]\n",
1120 dma_p->ncookies, dma_p->cookie.dmac_address,
1121 dma_p->cookie.dmac_size));
1122 if (err != DDI_SUCCESS) {
1123 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1124 "init text dma memory");
1125 goto fail;
1126 }
1127 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_textsz));
1128
1129 t += LE_32(sc->sc_hdr->init_textsz);
1130 err = iwk_alloc_dma_mem(sc, LE_32(sc->sc_hdr->init_datasz),
1131 &fw_dma_attr, &iwk_dma_accattr,
1132 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1133 &sc->sc_dma_fw_init_data);
1134 dma_p = &sc->sc_dma_fw_init_data;
1135 IWK_DBG((IWK_DEBUG_DMA, "init_data[ncookies:%d addr:%lx "
1136 "size:%lx]\n",
1137 dma_p->ncookies, dma_p->cookie.dmac_address,
1138 dma_p->cookie.dmac_size));
1139 if (err != DDI_SUCCESS) {
1140 cmn_err(CE_WARN, "iwk_alloc_fw_dma(): failed to alloc"
1141 "init data dma memory");
1142 goto fail;
1143 }
1144 (void) memcpy(dma_p->mem_va, t, LE_32(sc->sc_hdr->init_datasz));
1145
1146 sc->sc_boot = t + LE_32(sc->sc_hdr->init_datasz);
1147 fail:
1148 return (err);
1149 }
1150
1151 static void
1152 iwk_free_fw_dma(iwk_sc_t *sc)
1153 {
1154 iwk_free_dma_mem(&sc->sc_dma_fw_text);
1155 iwk_free_dma_mem(&sc->sc_dma_fw_data);
1156 iwk_free_dma_mem(&sc->sc_dma_fw_data_bak);
1157 iwk_free_dma_mem(&sc->sc_dma_fw_init_text);
1158 iwk_free_dma_mem(&sc->sc_dma_fw_init_data);
1159 }
1160
1161 /*
1162 * Allocate a shared page between host and NIC.
1163 */
1164 static int
1165 iwk_alloc_shared(iwk_sc_t *sc)
1166 {
1167 iwk_dma_t *dma_p;
1168 int err = DDI_SUCCESS;
1169
1170 /* must be aligned on a 4K-page boundary */
1171 err = iwk_alloc_dma_mem(sc, sizeof (iwk_shared_t),
1172 &sh_dma_attr, &iwk_dma_descattr,
1173 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1174 &sc->sc_dma_sh);
1175 if (err != DDI_SUCCESS)
1176 goto fail;
1177 sc->sc_shared = (iwk_shared_t *)sc->sc_dma_sh.mem_va;
1178
1179 dma_p = &sc->sc_dma_sh;
1180 IWK_DBG((IWK_DEBUG_DMA, "sh[ncookies:%d addr:%lx size:%lx]\n",
1181 dma_p->ncookies, dma_p->cookie.dmac_address,
1182 dma_p->cookie.dmac_size));
1183
1184 return (err);
1185 fail:
1186 iwk_free_shared(sc);
1187 return (err);
1188 }
1189
1190 static void
1191 iwk_free_shared(iwk_sc_t *sc)
1192 {
1193 iwk_free_dma_mem(&sc->sc_dma_sh);
1194 }
1195
1196 /*
1197 * Allocate a keep warm page.
1198 */
1199 static int
1200 iwk_alloc_kw(iwk_sc_t *sc)
1201 {
1202 iwk_dma_t *dma_p;
1203 int err = DDI_SUCCESS;
1204
1205 /* must be aligned on a 4K-page boundary */
1206 err = iwk_alloc_dma_mem(sc, IWK_KW_SIZE,
1207 &kw_dma_attr, &iwk_dma_accattr,
1208 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1209 &sc->sc_dma_kw);
1210 if (err != DDI_SUCCESS)
1211 goto fail;
1212
1213 dma_p = &sc->sc_dma_kw;
1214 IWK_DBG((IWK_DEBUG_DMA, "kw[ncookies:%d addr:%lx size:%lx]\n",
1215 dma_p->ncookies, dma_p->cookie.dmac_address,
1216 dma_p->cookie.dmac_size));
1217
1218 return (err);
1219 fail:
1220 iwk_free_kw(sc);
1221 return (err);
1222 }
1223
1224 static void
1225 iwk_free_kw(iwk_sc_t *sc)
1226 {
1227 iwk_free_dma_mem(&sc->sc_dma_kw);
1228 }
1229
1230 static int
1231 iwk_alloc_rx_ring(iwk_sc_t *sc)
1232 {
1233 iwk_rx_ring_t *ring;
1234 iwk_rx_data_t *data;
1235 iwk_dma_t *dma_p;
1236 int i, err = DDI_SUCCESS;
1237
1238 ring = &sc->sc_rxq;
1239 ring->cur = 0;
1240
1241 err = iwk_alloc_dma_mem(sc, RX_QUEUE_SIZE * sizeof (uint32_t),
1242 &ring_desc_dma_attr, &iwk_dma_descattr,
1243 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1244 &ring->dma_desc);
1245 if (err != DDI_SUCCESS) {
1246 cmn_err(CE_WARN, "dma alloc rx ring desc failed\n");
1247 goto fail;
1248 }
1249 ring->desc = (uint32_t *)ring->dma_desc.mem_va;
1250 dma_p = &ring->dma_desc;
1251 IWK_DBG((IWK_DEBUG_DMA, "rx bd[ncookies:%d addr:%lx size:%lx]\n",
1252 dma_p->ncookies, dma_p->cookie.dmac_address,
1253 dma_p->cookie.dmac_size));
1254
1255 /*
1256 * Allocate Rx buffers.
1257 */
1258 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1259 data = &ring->data[i];
1260 err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1261 &rx_buffer_dma_attr, &iwk_dma_accattr,
1262 DDI_DMA_READ | DDI_DMA_STREAMING,
1263 &data->dma_data);
1264 if (err != DDI_SUCCESS) {
1265 cmn_err(CE_WARN, "dma alloc rx ring buf[%d] "
1266 "failed\n", i);
1267 goto fail;
1268 }
1269 /*
1270 * the physical address bit [8-36] are used,
1271 * instead of bit [0-31] in 3945.
1272 */
1273 ring->desc[i] = (uint32_t)
1274 (data->dma_data.cookie.dmac_address >> 8);
1275 }
1276 dma_p = &ring->data[0].dma_data;
1277 IWK_DBG((IWK_DEBUG_DMA, "rx buffer[0][ncookies:%d addr:%lx "
1278 "size:%lx]\n",
1279 dma_p->ncookies, dma_p->cookie.dmac_address,
1280 dma_p->cookie.dmac_size));
1281
1282 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1283
1284 return (err);
1285
1286 fail:
1287 iwk_free_rx_ring(sc);
1288 return (err);
1289 }
1290
1291 static void
1292 iwk_reset_rx_ring(iwk_sc_t *sc)
1293 {
1294 int n;
1295
1296 iwk_mac_access_enter(sc);
1297 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1298 for (n = 0; n < 2000; n++) {
1299 if (IWK_READ(sc, FH_MEM_RSSR_RX_STATUS_REG) & (1 << 24))
1300 break;
1301 DELAY(1000);
1302 }
1303
1304 if (n == 2000)
1305 IWK_DBG((IWK_DEBUG_DMA, "timeout resetting Rx ring\n"));
1306
1307 iwk_mac_access_exit(sc);
1308
1309 sc->sc_rxq.cur = 0;
1310 }
1311
1312 static void
1313 iwk_free_rx_ring(iwk_sc_t *sc)
1314 {
1315 int i;
1316
1317 for (i = 0; i < RX_QUEUE_SIZE; i++) {
1318 if (sc->sc_rxq.data[i].dma_data.dma_hdl)
1319 IWK_DMA_SYNC(sc->sc_rxq.data[i].dma_data,
1320 DDI_DMA_SYNC_FORCPU);
1321 iwk_free_dma_mem(&sc->sc_rxq.data[i].dma_data);
1322 }
1323
1324 if (sc->sc_rxq.dma_desc.dma_hdl)
1325 IWK_DMA_SYNC(sc->sc_rxq.dma_desc, DDI_DMA_SYNC_FORDEV);
1326 iwk_free_dma_mem(&sc->sc_rxq.dma_desc);
1327 }
1328
1329 static int
1330 iwk_alloc_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring,
1331 int slots, int qid)
1332 {
1333 iwk_tx_data_t *data;
1334 iwk_tx_desc_t *desc_h;
1335 uint32_t paddr_desc_h;
1336 iwk_cmd_t *cmd_h;
1337 uint32_t paddr_cmd_h;
1338 iwk_dma_t *dma_p;
1339 int i, err = DDI_SUCCESS;
1340
1341 ring->qid = qid;
1342 ring->count = TFD_QUEUE_SIZE_MAX;
1343 ring->window = slots;
1344 ring->queued = 0;
1345 ring->cur = 0;
1346
1347 err = iwk_alloc_dma_mem(sc,
1348 TFD_QUEUE_SIZE_MAX * sizeof (iwk_tx_desc_t),
1349 &ring_desc_dma_attr, &iwk_dma_descattr,
1350 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1351 &ring->dma_desc);
1352 if (err != DDI_SUCCESS) {
1353 cmn_err(CE_WARN, "dma alloc tx ring desc[%d] "
1354 "failed\n", qid);
1355 goto fail;
1356 }
1357 dma_p = &ring->dma_desc;
1358 IWK_DBG((IWK_DEBUG_DMA, "tx bd[ncookies:%d addr:%lx size:%lx]\n",
1359 dma_p->ncookies, dma_p->cookie.dmac_address,
1360 dma_p->cookie.dmac_size));
1361
1362 desc_h = (iwk_tx_desc_t *)ring->dma_desc.mem_va;
1363 paddr_desc_h = ring->dma_desc.cookie.dmac_address;
1364
1365 err = iwk_alloc_dma_mem(sc,
1366 TFD_QUEUE_SIZE_MAX * sizeof (iwk_cmd_t),
1367 &cmd_dma_attr, &iwk_dma_accattr,
1368 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1369 &ring->dma_cmd);
1370 if (err != DDI_SUCCESS) {
1371 cmn_err(CE_WARN, "dma alloc tx ring cmd[%d] "
1372 "failed\n", qid);
1373 goto fail;
1374 }
1375 dma_p = &ring->dma_cmd;
1376 IWK_DBG((IWK_DEBUG_DMA, "tx cmd[ncookies:%d addr:%lx size:%lx]\n",
1377 dma_p->ncookies, dma_p->cookie.dmac_address,
1378 dma_p->cookie.dmac_size));
1379
1380 cmd_h = (iwk_cmd_t *)ring->dma_cmd.mem_va;
1381 paddr_cmd_h = ring->dma_cmd.cookie.dmac_address;
1382
1383 /*
1384 * Allocate Tx buffers.
1385 */
1386 ring->data = kmem_zalloc(sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX,
1387 KM_NOSLEEP);
1388 if (ring->data == NULL) {
1389 cmn_err(CE_WARN, "could not allocate tx data slots\n");
1390 goto fail;
1391 }
1392
1393 for (i = 0; i < TFD_QUEUE_SIZE_MAX; i++) {
1394 data = &ring->data[i];
1395 err = iwk_alloc_dma_mem(sc, sc->sc_dmabuf_sz,
1396 &tx_buffer_dma_attr, &iwk_dma_accattr,
1397 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1398 &data->dma_data);
1399 if (err != DDI_SUCCESS) {
1400 cmn_err(CE_WARN, "dma alloc tx ring "
1401 "buf[%d] failed\n", i);
1402 goto fail;
1403 }
1404
1405 data->desc = desc_h + i;
1406 data->paddr_desc = paddr_desc_h +
1407 _PTRDIFF(data->desc, desc_h);
1408 data->cmd = cmd_h + i; /* (i % slots); */
1409 /* ((i % slots) * sizeof (iwk_cmd_t)); */
1410 data->paddr_cmd = paddr_cmd_h +
1411 _PTRDIFF(data->cmd, cmd_h);
1412 }
1413 dma_p = &ring->data[0].dma_data;
1414 IWK_DBG((IWK_DEBUG_DMA, "tx buffer[0][ncookies:%d addr:%lx "
1415 "size:%lx]\n",
1416 dma_p->ncookies, dma_p->cookie.dmac_address,
1417 dma_p->cookie.dmac_size));
1418
1419 return (err);
1420
1421 fail:
1422 if (ring->data)
1423 kmem_free(ring->data,
1424 sizeof (iwk_tx_data_t) * TFD_QUEUE_SIZE_MAX);
1425 iwk_free_tx_ring(sc, ring);
1426 return (err);
1427 }
1428
1429 static void
1430 iwk_reset_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1431 {
1432 iwk_tx_data_t *data;
1433 int i, n;
1434
1435 iwk_mac_access_enter(sc);
1436
1437 IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(ring->qid), 0);
1438 for (n = 0; n < 200; n++) {
1439 if (IWK_READ(sc, IWK_FH_TSSR_TX_STATUS_REG) &
1440 IWK_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ring->qid))
1441 break;
1442 DELAY(10);
1443 }
1444 if (n == 200) {
1445 IWK_DBG((IWK_DEBUG_DMA, "timeout reset tx ring %d\n",
1446 ring->qid));
1447 }
1448 iwk_mac_access_exit(sc);
1449
1450 for (i = 0; i < ring->count; i++) {
1451 data = &ring->data[i];
1452 IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
1453 }
1454
1455 ring->queued = 0;
1456 ring->cur = 0;
1457 }
1458
1459 /*ARGSUSED*/
1460 static void
1461 iwk_free_tx_ring(iwk_sc_t *sc, iwk_tx_ring_t *ring)
1462 {
1463 int i;
1464
1465 if (ring->dma_desc.dma_hdl != NULL)
1466 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
1467 iwk_free_dma_mem(&ring->dma_desc);
1468
1469 if (ring->dma_cmd.dma_hdl != NULL)
1470 IWK_DMA_SYNC(ring->dma_cmd, DDI_DMA_SYNC_FORDEV);
1471 iwk_free_dma_mem(&ring->dma_cmd);
1472
1473 if (ring->data != NULL) {
1474 for (i = 0; i < ring->count; i++) {
1475 if (ring->data[i].dma_data.dma_hdl)
1476 IWK_DMA_SYNC(ring->data[i].dma_data,
1477 DDI_DMA_SYNC_FORDEV);
1478 iwk_free_dma_mem(&ring->data[i].dma_data);
1479 }
1480 kmem_free(ring->data, ring->count * sizeof (iwk_tx_data_t));
1481 }
1482 }
1483
1484 static int
1485 iwk_ring_init(iwk_sc_t *sc)
1486 {
1487 int i, err = DDI_SUCCESS;
1488
1489 for (i = 0; i < IWK_NUM_QUEUES; i++) {
1490 if (i == IWK_CMD_QUEUE_NUM)
1491 continue;
1492 err = iwk_alloc_tx_ring(sc, &sc->sc_txq[i], TFD_TX_CMD_SLOTS,
1493 i);
1494 if (err != DDI_SUCCESS)
1495 goto fail;
1496 }
1497 err = iwk_alloc_tx_ring(sc, &sc->sc_txq[IWK_CMD_QUEUE_NUM],
1498 TFD_CMD_SLOTS, IWK_CMD_QUEUE_NUM);
1499 if (err != DDI_SUCCESS)
1500 goto fail;
1501 err = iwk_alloc_rx_ring(sc);
1502 if (err != DDI_SUCCESS)
1503 goto fail;
1504 return (err);
1505
1506 fail:
1507 return (err);
1508 }
1509
1510 static void
1511 iwk_ring_free(iwk_sc_t *sc)
1512 {
1513 int i = IWK_NUM_QUEUES;
1514
1515 iwk_free_rx_ring(sc);
1516 while (--i >= 0) {
1517 iwk_free_tx_ring(sc, &sc->sc_txq[i]);
1518 }
1519 }
1520
1521 /* ARGSUSED */
1522 static ieee80211_node_t *
1523 iwk_node_alloc(ieee80211com_t *ic)
1524 {
1525 iwk_amrr_t *amrr;
1526
1527 amrr = kmem_zalloc(sizeof (iwk_amrr_t), KM_SLEEP);
1528 if (amrr != NULL)
1529 iwk_amrr_init(amrr);
1530 return (&amrr->in);
1531 }
1532
1533 static void
1534 iwk_node_free(ieee80211_node_t *in)
1535 {
1536 ieee80211com_t *ic = in->in_ic;
1537
1538 ic->ic_node_cleanup(in);
1539 if (in->in_wpa_ie != NULL)
1540 ieee80211_free(in->in_wpa_ie);
1541 kmem_free(in, sizeof (iwk_amrr_t));
1542 }
1543
1544 /*ARGSUSED*/
1545 static int
1546 iwk_newstate(ieee80211com_t *ic, enum ieee80211_state nstate, int arg)
1547 {
1548 iwk_sc_t *sc = (iwk_sc_t *)ic;
1549 ieee80211_node_t *in = ic->ic_bss;
1550 enum ieee80211_state ostate = ic->ic_state;
1551 int i, err = IWK_SUCCESS;
1552
1553 mutex_enter(&sc->sc_glock);
1554 switch (nstate) {
1555 case IEEE80211_S_SCAN:
1556 switch (ostate) {
1557 case IEEE80211_S_INIT:
1558 {
1559 iwk_add_sta_t node;
1560
1561 sc->sc_flags |= IWK_F_SCANNING;
1562 sc->sc_scan_pending = 0;
1563 iwk_set_led(sc, 2, 10, 2);
1564
1565 /*
1566 * clear association to receive beacons from
1567 * all BSS'es
1568 */
1569 sc->sc_config.assoc_id = 0;
1570 sc->sc_config.filter_flags &=
1571 ~LE_32(RXON_FILTER_ASSOC_MSK);
1572
1573 IWK_DBG((IWK_DEBUG_80211, "config chan %d "
1574 "flags %x filter_flags %x\n", sc->sc_config.chan,
1575 sc->sc_config.flags, sc->sc_config.filter_flags));
1576
1577 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
1578 sizeof (iwk_rxon_cmd_t), 1);
1579 if (err != IWK_SUCCESS) {
1580 cmn_err(CE_WARN,
1581 "could not clear association\n");
1582 sc->sc_flags &= ~IWK_F_SCANNING;
1583 mutex_exit(&sc->sc_glock);
1584 return (err);
1585 }
1586
1587 /* add broadcast node to send probe request */
1588 (void) memset(&node, 0, sizeof (node));
1589 (void) memset(&node.bssid, 0xff, IEEE80211_ADDR_LEN);
1590 node.id = IWK_BROADCAST_ID;
1591 err = iwk_cmd(sc, REPLY_ADD_STA, &node,
1592 sizeof (node), 1);
1593 if (err != IWK_SUCCESS) {
1594 cmn_err(CE_WARN, "could not add "
1595 "broadcast node\n");
1596 sc->sc_flags &= ~IWK_F_SCANNING;
1597 mutex_exit(&sc->sc_glock);
1598 return (err);
1599 }
1600 break;
1601 }
1602
1603 case IEEE80211_S_AUTH:
1604 case IEEE80211_S_ASSOC:
1605 case IEEE80211_S_RUN:
1606 sc->sc_flags |= IWK_F_SCANNING;
1607 sc->sc_scan_pending = 0;
1608
1609 iwk_set_led(sc, 2, 10, 2);
1610 /* FALLTHRU */
1611 case IEEE80211_S_SCAN:
1612 mutex_exit(&sc->sc_glock);
1613 /* step to next channel before actual FW scan */
1614 err = sc->sc_newstate(ic, nstate, arg);
1615 mutex_enter(&sc->sc_glock);
1616 if ((err != 0) || ((err = iwk_scan(sc)) != 0)) {
1617 cmn_err(CE_WARN,
1618 "could not initiate scan\n");
1619 sc->sc_flags &= ~IWK_F_SCANNING;
1620 ieee80211_cancel_scan(ic);
1621 }
1622 mutex_exit(&sc->sc_glock);
1623 return (err);
1624 default:
1625 break;
1626
1627 }
1628 sc->sc_clk = 0;
1629 break;
1630
1631 case IEEE80211_S_AUTH:
1632 if (ostate == IEEE80211_S_SCAN) {
1633 sc->sc_flags &= ~IWK_F_SCANNING;
1634 }
1635
1636 /* reset state to handle reassociations correctly */
1637 sc->sc_config.assoc_id = 0;
1638 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_ASSOC_MSK);
1639
1640 /*
1641 * before sending authentication and association request frame,
1642 * we need do something in the hardware, such as setting the
1643 * channel same to the target AP...
1644 */
1645 if ((err = iwk_hw_set_before_auth(sc)) != 0) {
1646 cmn_err(CE_WARN, "could not setup firmware for "
1647 "authentication\n");
1648 mutex_exit(&sc->sc_glock);
1649 return (err);
1650 }
1651 break;
1652
1653 case IEEE80211_S_RUN:
1654 if (ostate == IEEE80211_S_SCAN) {
1655 sc->sc_flags &= ~IWK_F_SCANNING;
1656 }
1657
1658 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1659 /* let LED blink when monitoring */
1660 iwk_set_led(sc, 2, 10, 10);
1661 break;
1662 }
1663 IWK_DBG((IWK_DEBUG_80211, "iwk: associated."));
1664
1665 /* IBSS mode */
1666 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1667 /*
1668 * clean all nodes in ibss node table
1669 * in order to be consistent with hardware
1670 */
1671 err = iwk_run_state_config_ibss(ic);
1672 if (err != IWK_SUCCESS) {
1673 cmn_err(CE_WARN, "iwk_newstate(): "
1674 "failed to update configuration "
1675 "in IBSS mode\n");
1676 mutex_exit(&sc->sc_glock);
1677 return (err);
1678 }
1679 }
1680
1681 /* none IBSS mode */
1682 if (ic->ic_opmode != IEEE80211_M_IBSS) {
1683 /* update adapter's configuration */
1684 err = iwk_run_state_config_sta(ic);
1685 if (err != IWK_SUCCESS) {
1686 cmn_err(CE_WARN, "iwk_newstate(): "
1687 "failed to update configuration "
1688 "in none IBSS mode\n");
1689 mutex_exit(&sc->sc_glock);
1690 return (err);
1691 }
1692 }
1693
1694 /* obtain current temperature of chipset */
1695 sc->sc_tempera = iwk_curr_tempera(sc);
1696
1697 /*
1698 * make Tx power calibration to determine
1699 * the gains of DSP and radio
1700 */
1701 err = iwk_tx_power_calibration(sc);
1702 if (err) {
1703 cmn_err(CE_WARN, "iwk_newstate(): "
1704 "failed to set tx power table\n");
1705 mutex_exit(&sc->sc_glock);
1706 return (err);
1707 }
1708
1709 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1710
1711 /*
1712 * allocate and transmit beacon frames
1713 */
1714 err = iwk_start_tx_beacon(ic);
1715 if (err != IWK_SUCCESS) {
1716 cmn_err(CE_WARN, "iwk_newstate(): "
1717 "can't transmit beacon frames\n");
1718 mutex_exit(&sc->sc_glock);
1719 return (err);
1720 }
1721 }
1722
1723 /* start automatic rate control */
1724 mutex_enter(&sc->sc_mt_lock);
1725 if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) {
1726 sc->sc_flags |= IWK_F_RATE_AUTO_CTL;
1727 /* set rate to some reasonable initial value */
1728 i = in->in_rates.ir_nrates - 1;
1729 while (i > 0 && IEEE80211_RATE(i) > 72)
1730 i--;
1731 in->in_txrate = i;
1732 } else {
1733 sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
1734 }
1735 mutex_exit(&sc->sc_mt_lock);
1736
1737 /* set LED on after associated */
1738 iwk_set_led(sc, 2, 0, 1);
1739 break;
1740
1741 case IEEE80211_S_INIT:
1742 if (ostate == IEEE80211_S_SCAN) {
1743 sc->sc_flags &= ~IWK_F_SCANNING;
1744 }
1745
1746 /* set LED off after init */
1747 iwk_set_led(sc, 2, 1, 0);
1748 break;
1749 case IEEE80211_S_ASSOC:
1750 if (ostate == IEEE80211_S_SCAN) {
1751 sc->sc_flags &= ~IWK_F_SCANNING;
1752 }
1753
1754 break;
1755 }
1756
1757 mutex_exit(&sc->sc_glock);
1758
1759 err = sc->sc_newstate(ic, nstate, arg);
1760
1761 if (nstate == IEEE80211_S_RUN) {
1762
1763 mutex_enter(&sc->sc_glock);
1764
1765 /*
1766 * make initialization for Receiver
1767 * sensitivity calibration
1768 */
1769 err = iwk_rx_sens_init(sc);
1770 if (err) {
1771 cmn_err(CE_WARN, "iwk_newstate(): "
1772 "failed to init RX sensitivity\n");
1773 mutex_exit(&sc->sc_glock);
1774 return (err);
1775 }
1776
1777 /* make initialization for Receiver gain balance */
1778 err = iwk_rxgain_diff_init(sc);
1779 if (err) {
1780 cmn_err(CE_WARN, "iwk_newstate(): "
1781 "failed to init phy calibration\n");
1782 mutex_exit(&sc->sc_glock);
1783 return (err);
1784 }
1785
1786 mutex_exit(&sc->sc_glock);
1787
1788 }
1789
1790 return (err);
1791 }
1792
1793 static void
1794 iwk_watchdog(void *arg)
1795 {
1796 iwk_sc_t *sc = arg;
1797 struct ieee80211com *ic = &sc->sc_ic;
1798 #ifdef DEBUG
1799 timeout_id_t timeout_id = ic->ic_watchdog_timer;
1800 #endif
1801
1802 ieee80211_stop_watchdog(ic);
1803
1804 if ((ic->ic_state != IEEE80211_S_AUTH) &&
1805 (ic->ic_state != IEEE80211_S_ASSOC))
1806 return;
1807
1808 if (ic->ic_bss->in_fails > 0) {
1809 IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) reset: "
1810 "node (0x%x)\n", timeout_id, &ic->ic_bss));
1811 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1812 } else {
1813 IWK_DBG((IWK_DEBUG_80211, "watchdog (0x%x) timeout: "
1814 "node (0x%x), retry (%d)\n",
1815 timeout_id, &ic->ic_bss, ic->ic_bss->in_fails + 1));
1816 ieee80211_watchdog(ic);
1817 }
1818 }
1819
1820 /*ARGSUSED*/
1821 static int iwk_key_set(ieee80211com_t *ic, const struct ieee80211_key *k,
1822 const uint8_t mac[IEEE80211_ADDR_LEN])
1823 {
1824 iwk_sc_t *sc = (iwk_sc_t *)ic;
1825 iwk_add_sta_t node;
1826 int err;
1827 uint8_t index1;
1828
1829 switch (k->wk_cipher->ic_cipher) {
1830 case IEEE80211_CIPHER_WEP:
1831 case IEEE80211_CIPHER_TKIP:
1832 return (1); /* sofeware do it. */
1833 case IEEE80211_CIPHER_AES_CCM:
1834 break;
1835 default:
1836 return (0);
1837 }
1838 sc->sc_config.filter_flags &= ~LE_32(RXON_FILTER_DIS_DECRYPT_MSK |
1839 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
1840
1841 mutex_enter(&sc->sc_glock);
1842
1843 /* update ap/multicast node */
1844 (void) memset(&node, 0, sizeof (node));
1845 if (IEEE80211_IS_MULTICAST(mac)) {
1846 (void) memset(node.bssid, 0xff, 6);
1847 node.id = IWK_BROADCAST_ID;
1848 } else if (ic->ic_opmode == IEEE80211_M_IBSS) {
1849 mutex_exit(&sc->sc_glock);
1850 mutex_enter(&sc->sc_ibss.node_tb_lock);
1851
1852 /*
1853 * search for node in ibss node table
1854 */
1855 for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
1856 index1++) {
1857 if (sc->sc_ibss.ibss_node_tb[index1].used &&
1858 IEEE80211_ADDR_EQ(sc->sc_ibss.
1859 ibss_node_tb[index1].node.bssid,
1860 mac)) {
1861 break;
1862 }
1863 }
1864 if (index1 >= IWK_BROADCAST_ID) {
1865 cmn_err(CE_WARN, "iwk_key_set(): "
1866 "have no this node in hardware node table\n");
1867 mutex_exit(&sc->sc_ibss.node_tb_lock);
1868 return (0);
1869 } else {
1870 /*
1871 * configure key for given node in hardware
1872 */
1873 if (k->wk_flags & IEEE80211_KEY_XMIT) {
1874 sc->sc_ibss.ibss_node_tb[index1].
1875 node.key_flags = 0;
1876 sc->sc_ibss.ibss_node_tb[index1].
1877 node.keyp = k->wk_keyix;
1878 } else {
1879 sc->sc_ibss.ibss_node_tb[index1].
1880 node.key_flags = (1 << 14);
1881 sc->sc_ibss.ibss_node_tb[index1].
1882 node.keyp = k->wk_keyix + 4;
1883 }
1884
1885 (void) memcpy(sc->sc_ibss.ibss_node_tb[index1].node.key,
1886 k->wk_key, k->wk_keylen);
1887 sc->sc_ibss.ibss_node_tb[index1].node.key_flags |=
1888 (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1889 sc->sc_ibss.ibss_node_tb[index1].node.key_flags =
1890 LE_16(sc->sc_ibss.ibss_node_tb[index1].
1891 node.key_flags);
1892 sc->sc_ibss.ibss_node_tb[index1].node.sta_mask =
1893 STA_MODIFY_KEY_MASK;
1894 sc->sc_ibss.ibss_node_tb[index1].node.control = 1;
1895
1896 mutex_enter(&sc->sc_glock);
1897 err = iwk_cmd(sc, REPLY_ADD_STA,
1898 &sc->sc_ibss.ibss_node_tb[index1].node,
1899 sizeof (iwk_add_sta_t), 1);
1900 if (err != IWK_SUCCESS) {
1901 cmn_err(CE_WARN, "iwk_key_set(): "
1902 "failed to update IBSS node in hardware\n");
1903 mutex_exit(&sc->sc_glock);
1904 mutex_exit(&sc->sc_ibss.node_tb_lock);
1905 return (0);
1906 }
1907 mutex_exit(&sc->sc_glock);
1908 }
1909 mutex_exit(&sc->sc_ibss.node_tb_lock);
1910 return (1);
1911 } else {
1912 IEEE80211_ADDR_COPY(node.bssid, ic->ic_bss->in_bssid);
1913 node.id = IWK_AP_ID;
1914 }
1915 if (k->wk_flags & IEEE80211_KEY_XMIT) {
1916 node.key_flags = 0;
1917 node.keyp = k->wk_keyix;
1918 } else {
1919 node.key_flags = (1 << 14);
1920 node.keyp = k->wk_keyix + 4;
1921 }
1922 (void) memcpy(node.key, k->wk_key, k->wk_keylen);
1923 node.key_flags |= (STA_KEY_FLG_CCMP | (1 << 3) | (k->wk_keyix << 8));
1924 node.key_flags = LE_16(node.key_flags);
1925 node.sta_mask = STA_MODIFY_KEY_MASK;
1926 node.control = 1;
1927 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
1928 if (err != IWK_SUCCESS) {
1929 cmn_err(CE_WARN, "iwk_key_set():"
1930 "failed to update ap node\n");
1931 mutex_exit(&sc->sc_glock);
1932 return (0);
1933 }
1934 mutex_exit(&sc->sc_glock);
1935 return (1);
1936 }
1937
1938 /*
1939 * exclusive access to mac begin.
1940 */
1941 static void
1942 iwk_mac_access_enter(iwk_sc_t *sc)
1943 {
1944 uint32_t tmp;
1945 int n;
1946
1947 tmp = IWK_READ(sc, CSR_GP_CNTRL);
1948 IWK_WRITE(sc, CSR_GP_CNTRL,
1949 tmp | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1950
1951 /* wait until we succeed */
1952 for (n = 0; n < 1000; n++) {
1953 if ((IWK_READ(sc, CSR_GP_CNTRL) &
1954 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1955 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP)) ==
1956 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN)
1957 break;
1958 DELAY(10);
1959 }
1960 if (n == 1000)
1961 IWK_DBG((IWK_DEBUG_PIO, "could not lock memory\n"));
1962 }
1963
1964 /*
1965 * exclusive access to mac end.
1966 */
1967 static void
1968 iwk_mac_access_exit(iwk_sc_t *sc)
1969 {
1970 uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
1971 IWK_WRITE(sc, CSR_GP_CNTRL,
1972 tmp & ~CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1973 }
1974
1975 static uint32_t
1976 iwk_mem_read(iwk_sc_t *sc, uint32_t addr)
1977 {
1978 IWK_WRITE(sc, HBUS_TARG_MEM_RADDR, addr);
1979 return (IWK_READ(sc, HBUS_TARG_MEM_RDAT));
1980 }
1981
1982 static void
1983 iwk_mem_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1984 {
1985 IWK_WRITE(sc, HBUS_TARG_MEM_WADDR, addr);
1986 IWK_WRITE(sc, HBUS_TARG_MEM_WDAT, data);
1987 }
1988
1989 static uint32_t
1990 iwk_reg_read(iwk_sc_t *sc, uint32_t addr)
1991 {
1992 IWK_WRITE(sc, HBUS_TARG_PRPH_RADDR, addr | (3 << 24));
1993 return (IWK_READ(sc, HBUS_TARG_PRPH_RDAT));
1994 }
1995
1996 static void
1997 iwk_reg_write(iwk_sc_t *sc, uint32_t addr, uint32_t data)
1998 {
1999 IWK_WRITE(sc, HBUS_TARG_PRPH_WADDR, addr | (3 << 24));
2000 IWK_WRITE(sc, HBUS_TARG_PRPH_WDAT, data);
2001 }
2002
2003 static void
2004 iwk_reg_write_region_4(iwk_sc_t *sc, uint32_t addr,
2005 uint32_t *data, int wlen)
2006 {
2007 for (; wlen > 0; wlen--, data++, addr += 4)
2008 iwk_reg_write(sc, addr, LE_32(*data));
2009 }
2010
2011
2012 /*
2013 * ucode load/initialization steps:
2014 * 1) load Bootstrap State Machine (BSM) with "bootstrap" uCode image.
2015 * BSM contains a small memory that *always* stays powered up, so it can
2016 * retain the bootstrap program even when the card is in a power-saving
2017 * power-down state. The BSM loads the small program into ARC processor's
2018 * instruction memory when triggered by power-up.
2019 * 2) load Initialize image via bootstrap program.
2020 * The Initialize image sets up regulatory and calibration data for the
2021 * Runtime/Protocol uCode. This sends a REPLY_ALIVE notification when completed.
2022 * The 4965 reply contains calibration data for temperature, voltage and tx gain
2023 * correction.
2024 */
2025 static int
2026 iwk_load_firmware(iwk_sc_t *sc)
2027 {
2028 uint32_t *boot_fw = (uint32_t *)sc->sc_boot;
2029 uint32_t size = LE_32(sc->sc_hdr->bootsz);
2030 int n, err = IWK_SUCCESS;
2031
2032 /*
2033 * The physical address bit [4-35] of the initialize uCode.
2034 * In the initialize alive notify interrupt the physical address of
2035 * the runtime ucode will be set for loading.
2036 */
2037 iwk_mac_access_enter(sc);
2038
2039 iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2040 sc->sc_dma_fw_init_text.cookie.dmac_address >> 4);
2041 iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2042 sc->sc_dma_fw_init_data.cookie.dmac_address >> 4);
2043 iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2044 sc->sc_dma_fw_init_text.cookie.dmac_size);
2045 iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2046 sc->sc_dma_fw_init_data.cookie.dmac_size);
2047
2048 /* load bootstrap code into BSM memory */
2049 iwk_reg_write_region_4(sc, BSM_SRAM_LOWER_BOUND, boot_fw,
2050 size / sizeof (uint32_t));
2051
2052 iwk_reg_write(sc, BSM_WR_MEM_SRC_REG, 0);
2053 iwk_reg_write(sc, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
2054 iwk_reg_write(sc, BSM_WR_DWCOUNT_REG, size / sizeof (uint32_t));
2055
2056 /*
2057 * prepare to load initialize uCode
2058 */
2059 iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
2060
2061 /* wait while the adapter is busy loading the firmware */
2062 for (n = 0; n < 1000; n++) {
2063 if (!(iwk_reg_read(sc, BSM_WR_CTRL_REG) &
2064 BSM_WR_CTRL_REG_BIT_START))
2065 break;
2066 DELAY(10);
2067 }
2068 if (n == 1000) {
2069 cmn_err(CE_WARN, "timeout transferring firmware\n");
2070 err = ETIMEDOUT;
2071 return (err);
2072 }
2073
2074 /* for future power-save mode use */
2075 iwk_reg_write(sc, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
2076
2077 iwk_mac_access_exit(sc);
2078
2079 return (err);
2080 }
2081
2082 /*ARGSUSED*/
2083 static void
2084 iwk_rx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2085 {
2086 ieee80211com_t *ic = &sc->sc_ic;
2087 iwk_rx_ring_t *ring = &sc->sc_rxq;
2088 iwk_rx_phy_res_t *stat;
2089 ieee80211_node_t *in;
2090 uint32_t *tail;
2091 struct ieee80211_frame *wh;
2092 mblk_t *mp;
2093 uint16_t len, rssi, mrssi, agc;
2094 int16_t t;
2095 uint32_t ants, i;
2096 struct iwk_rx_non_cfg_phy *phyinfo;
2097 uint32_t crc;
2098
2099 /* assuming not 11n here. cope with 11n in phase-II */
2100 stat = (iwk_rx_phy_res_t *)(desc + 1);
2101 if (stat->cfg_phy_cnt > 20) {
2102 return;
2103 }
2104
2105 for (i = 0; i < RX_RES_PHY_CNT; i++)
2106 stat->non_cfg_phy[i] = LE_16(stat->non_cfg_phy[i]);
2107
2108 phyinfo = (struct iwk_rx_non_cfg_phy *)stat->non_cfg_phy;
2109 agc = (phyinfo->agc_info & IWK_AGC_DB_MASK) >> IWK_AGC_DB_POS;
2110 mrssi = 0;
2111 ants = (LE_16(stat->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK) >>
2112 RX_PHY_FLAGS_ANTENNAE_OFFSET;
2113 for (i = 0; i < 3; i++) {
2114 if (ants & (1 << i))
2115 mrssi = MAX(mrssi, phyinfo->rssi_info[i << 1]);
2116 }
2117 t = mrssi - agc - 44; /* t is the dBM value */
2118 /*
2119 * convert dBm to percentage ???
2120 */
2121 rssi = (100 * 75 * 75 - (-20 - t) * (15 * 75 + 62 * (-20 - t))) /
2122 (75 * 75);
2123 if (rssi > 100)
2124 rssi = 100;
2125 if (rssi < 1)
2126 rssi = 1;
2127 len = LE_16(stat->byte_count);
2128 tail = (uint32_t *)((caddr_t)(stat + 1) + stat->cfg_phy_cnt + len);
2129 bcopy(tail, &crc, 4);
2130
2131 IWK_DBG((IWK_DEBUG_RX, "rx intr: idx=%d phy_len=%x len=%d "
2132 "rate=%x chan=%d tstamp=%x non_cfg_phy_count=%x "
2133 "cfg_phy_count=%x tail=%x", ring->cur, sizeof (*stat),
2134 len, stat->rate.r.s.rate, LE_16(stat->channel),
2135 LE_32(stat->timestampl), stat->non_cfg_phy_cnt,
2136 stat->cfg_phy_cnt, LE_32(crc)));
2137
2138 if ((len < 16) || (len > sc->sc_dmabuf_sz)) {
2139 IWK_DBG((IWK_DEBUG_RX, "rx frame oversize\n"));
2140 return;
2141 }
2142
2143 /*
2144 * discard Rx frames with bad CRC
2145 */
2146 if ((LE_32(crc) &
2147 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) !=
2148 (RX_RES_STATUS_NO_CRC32_ERROR | RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2149 IWK_DBG((IWK_DEBUG_RX, "rx crc error tail: %x\n",
2150 LE_32(crc)));
2151 sc->sc_rx_err++;
2152 return;
2153 }
2154
2155 wh = (struct ieee80211_frame *)
2156 ((uint8_t *)(stat + 1)+ stat->cfg_phy_cnt);
2157 if (*(uint8_t *)wh == IEEE80211_FC0_SUBTYPE_ASSOC_RESP) {
2158 sc->sc_assoc_id = *((uint16_t *)(wh + 1) + 2);
2159 IWK_DBG((IWK_DEBUG_RX, "rx : association id = %x\n",
2160 sc->sc_assoc_id));
2161 }
2162 #ifdef DEBUG
2163 if (iwk_dbg_flags & IWK_DEBUG_RX)
2164 ieee80211_dump_pkt((uint8_t *)wh, len, 0, 0);
2165 #endif
2166 in = ieee80211_find_rxnode(ic, wh);
2167 mp = allocb(len, BPRI_MED);
2168 if (mp) {
2169 (void) memcpy(mp->b_wptr, wh, len);
2170 mp->b_wptr += len;
2171
2172 /* send the frame to the 802.11 layer */
2173 (void) ieee80211_input(ic, mp, in, rssi, 0);
2174 } else {
2175 sc->sc_rx_nobuf++;
2176 IWK_DBG((IWK_DEBUG_RX,
2177 "iwk_rx_intr(): alloc rx buf failed\n"));
2178 }
2179 /* release node reference */
2180 ieee80211_free_node(in);
2181 }
2182
2183 /*ARGSUSED*/
2184 static void
2185 iwk_tx_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc, iwk_rx_data_t *data)
2186 {
2187 ieee80211com_t *ic = &sc->sc_ic;
2188 iwk_tx_ring_t *ring = &sc->sc_txq[desc->hdr.qid & 0x3];
2189 iwk_tx_stat_t *stat = (iwk_tx_stat_t *)(desc + 1);
2190 iwk_amrr_t *amrr = (iwk_amrr_t *)ic->ic_bss;
2191
2192 IWK_DBG((IWK_DEBUG_TX, "tx done: qid=%d idx=%d"
2193 " retries=%d frame_count=%x nkill=%d "
2194 "rate=%x duration=%d status=%x\n",
2195 desc->hdr.qid, desc->hdr.idx, stat->ntries, stat->frame_count,
2196 stat->bt_kill_count, stat->rate.r.s.rate,
2197 LE_16(stat->duration), LE_32(stat->status)));
2198
2199 amrr->txcnt++;
2200 IWK_DBG((IWK_DEBUG_RATECTL, "tx: %d cnt\n", amrr->txcnt));
2201 if (stat->ntries > 0) {
2202 amrr->retrycnt++;
2203 sc->sc_tx_retries++;
2204 IWK_DBG((IWK_DEBUG_TX, "tx: %d retries\n",
2205 sc->sc_tx_retries));
2206 }
2207
2208 sc->sc_tx_timer = 0;
2209
2210 mutex_enter(&sc->sc_tx_lock);
2211 ring->queued--;
2212 if (ring->queued < 0)
2213 ring->queued = 0;
2214 if ((sc->sc_need_reschedule) && (ring->queued <= (ring->count << 3))) {
2215 sc->sc_need_reschedule = 0;
2216 mutex_exit(&sc->sc_tx_lock);
2217 mac_tx_update(ic->ic_mach);
2218 mutex_enter(&sc->sc_tx_lock);
2219 }
2220 mutex_exit(&sc->sc_tx_lock);
2221 }
2222
2223 static void
2224 iwk_cmd_intr(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2225 {
2226 if ((desc->hdr.qid & 7) != 4) {
2227 return;
2228 }
2229 mutex_enter(&sc->sc_glock);
2230 sc->sc_flags |= IWK_F_CMD_DONE;
2231 cv_signal(&sc->sc_cmd_cv);
2232 mutex_exit(&sc->sc_glock);
2233 IWK_DBG((IWK_DEBUG_CMD, "rx cmd: "
2234 "qid=%x idx=%d flags=%x type=0x%x\n",
2235 desc->hdr.qid, desc->hdr.idx, desc->hdr.flags,
2236 desc->hdr.type));
2237 }
2238
2239 static void
2240 iwk_ucode_alive(iwk_sc_t *sc, iwk_rx_desc_t *desc)
2241 {
2242 uint32_t base, i;
2243 struct iwk_alive_resp *ar =
2244 (struct iwk_alive_resp *)(desc + 1);
2245
2246 /* the microcontroller is ready */
2247 IWK_DBG((IWK_DEBUG_FW,
2248 "microcode alive notification minor: %x major: %x type:"
2249 " %x subtype: %x\n",
2250 ar->ucode_minor, ar->ucode_minor, ar->ver_type, ar->ver_subtype));
2251
2252 if (LE_32(ar->is_valid) != UCODE_VALID_OK) {
2253 IWK_DBG((IWK_DEBUG_FW,
2254 "microcontroller initialization failed\n"));
2255 }
2256 if (ar->ver_subtype == INITIALIZE_SUBTYPE) {
2257 IWK_DBG((IWK_DEBUG_FW,
2258 "initialization alive received.\n"));
2259 (void) memcpy(&sc->sc_card_alive_init, ar,
2260 sizeof (struct iwk_init_alive_resp));
2261 /* XXX get temperature */
2262 iwk_mac_access_enter(sc);
2263 iwk_reg_write(sc, BSM_DRAM_INST_PTR_REG,
2264 sc->sc_dma_fw_text.cookie.dmac_address >> 4);
2265 iwk_reg_write(sc, BSM_DRAM_DATA_PTR_REG,
2266 sc->sc_dma_fw_data_bak.cookie.dmac_address >> 4);
2267 iwk_reg_write(sc, BSM_DRAM_DATA_BYTECOUNT_REG,
2268 sc->sc_dma_fw_data.cookie.dmac_size);
2269 iwk_reg_write(sc, BSM_DRAM_INST_BYTECOUNT_REG,
2270 sc->sc_dma_fw_text.cookie.dmac_size | 0x80000000);
2271 iwk_mac_access_exit(sc);
2272 } else {
2273 IWK_DBG((IWK_DEBUG_FW, "runtime alive received.\n"));
2274 (void) memcpy(&sc->sc_card_alive_run, ar,
2275 sizeof (struct iwk_alive_resp));
2276
2277 /*
2278 * Init SCD related registers to make Tx work. XXX
2279 */
2280 iwk_mac_access_enter(sc);
2281
2282 /* read sram address of data base */
2283 sc->sc_scd_base = iwk_reg_read(sc, SCD_SRAM_BASE_ADDR);
2284
2285 /* clear and init SCD_CONTEXT_DATA_OFFSET area. 128 bytes */
2286 for (base = sc->sc_scd_base + SCD_CONTEXT_DATA_OFFSET, i = 0;
2287 i < 128; i += 4)
2288 iwk_mem_write(sc, base + i, 0);
2289
2290 /* clear and init SCD_TX_STTS_BITMAP_OFFSET area. 256 bytes */
2291 for (base = sc->sc_scd_base + SCD_TX_STTS_BITMAP_OFFSET;
2292 i < 256; i += 4)
2293 iwk_mem_write(sc, base + i, 0);
2294
2295 /* clear and init SCD_TRANSLATE_TBL_OFFSET area. 32 bytes */
2296 for (base = sc->sc_scd_base + SCD_TRANSLATE_TBL_OFFSET;
2297 i < sizeof (uint16_t) * IWK_NUM_QUEUES; i += 4)
2298 iwk_mem_write(sc, base + i, 0);
2299
2300 iwk_reg_write(sc, SCD_DRAM_BASE_ADDR,
2301 sc->sc_dma_sh.cookie.dmac_address >> 10);
2302 iwk_reg_write(sc, SCD_QUEUECHAIN_SEL, 0);
2303
2304 /* initiate the tx queues */
2305 for (i = 0; i < IWK_NUM_QUEUES; i++) {
2306 iwk_reg_write(sc, SCD_QUEUE_RDPTR(i), 0);
2307 IWK_WRITE(sc, HBUS_TARG_WRPTR, (i << 8));
2308 iwk_mem_write(sc, sc->sc_scd_base +
2309 SCD_CONTEXT_QUEUE_OFFSET(i),
2310 (SCD_WIN_SIZE & 0x7f));
2311 iwk_mem_write(sc, sc->sc_scd_base +
2312 SCD_CONTEXT_QUEUE_OFFSET(i) + sizeof (uint32_t),
2313 (SCD_FRAME_LIMIT & 0x7f) << 16);
2314 }
2315 /* interrupt enable on each queue0-7 */
2316 iwk_reg_write(sc, SCD_INTERRUPT_MASK,
2317 (1 << IWK_NUM_QUEUES) - 1);
2318 /* enable each channel 0-7 */
2319 iwk_reg_write(sc, SCD_TXFACT,
2320 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
2321 /*
2322 * queue 0-7 maps to FIFO 0-7 and
2323 * all queues work under FIFO mode (none-scheduler-ack)
2324 */
2325 for (i = 0; i < 7; i++) {
2326 iwk_reg_write(sc,
2327 SCD_QUEUE_STATUS_BITS(i),
2328 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
2329 (i << SCD_QUEUE_STTS_REG_POS_TXF)|
2330 SCD_QUEUE_STTS_REG_MSK);
2331 }
2332 iwk_mac_access_exit(sc);
2333
2334 sc->sc_flags |= IWK_F_FW_INIT;
2335 cv_signal(&sc->sc_fw_cv);
2336 }
2337
2338 }
2339
2340 static uint_t
2341 /* LINTED: argument unused in function: unused */
2342 iwk_rx_softintr(caddr_t arg, caddr_t unused)
2343 {
2344 iwk_sc_t *sc = (iwk_sc_t *)arg;
2345 ieee80211com_t *ic = &sc->sc_ic;
2346 iwk_rx_desc_t *desc;
2347 iwk_rx_data_t *data;
2348 uint32_t index;
2349
2350 mutex_enter(&sc->sc_glock);
2351 if (sc->sc_rx_softint_pending != 1) {
2352 mutex_exit(&sc->sc_glock);
2353 return (DDI_INTR_UNCLAIMED);
2354 }
2355 /* disable interrupts */
2356 IWK_WRITE(sc, CSR_INT_MASK, 0);
2357 mutex_exit(&sc->sc_glock);
2358
2359 /*
2360 * firmware has moved the index of the rx queue, driver get it,
2361 * and deal with it.
2362 */
2363 index = sc->sc_shared->val0 & 0xfff;
2364
2365 while (sc->sc_rxq.cur != index) {
2366 data = &sc->sc_rxq.data[sc->sc_rxq.cur];
2367 desc = (iwk_rx_desc_t *)data->dma_data.mem_va;
2368
2369 IWK_DBG((IWK_DEBUG_INTR, "rx notification index = %d"
2370 " cur = %d qid=%x idx=%d flags=%x type=%x len=%d\n",
2371 index, sc->sc_rxq.cur, desc->hdr.qid, desc->hdr.idx,
2372 desc->hdr.flags, desc->hdr.type, LE_32(desc->len)));
2373
2374 /* a command other than a tx need to be replied */
2375 if (!(desc->hdr.qid & 0x80) &&
2376 (desc->hdr.type != REPLY_RX_PHY_CMD) &&
2377 (desc->hdr.type != REPLY_TX) &&
2378 (desc->hdr.type != REPLY_TX_PWR_TABLE_CMD) &&
2379 (desc->hdr.type != REPLY_PHY_CALIBRATION_CMD) &&
2380 (desc->hdr.type != SENSITIVITY_CMD))
2381 iwk_cmd_intr(sc, desc);
2382
2383 switch (desc->hdr.type) {
2384 case REPLY_4965_RX:
2385 iwk_rx_intr(sc, desc, data);
2386 break;
2387
2388 case REPLY_TX:
2389 iwk_tx_intr(sc, desc, data);
2390 break;
2391
2392 case REPLY_ALIVE:
2393 iwk_ucode_alive(sc, desc);
2394 break;
2395
2396 case CARD_STATE_NOTIFICATION:
2397 {
2398 uint32_t *status = (uint32_t *)(desc + 1);
2399
2400 IWK_DBG((IWK_DEBUG_RADIO, "state changed to %x\n",
2401 LE_32(*status)));
2402
2403 if (LE_32(*status) & 1) {
2404 /*
2405 * the radio button has to be pushed(OFF). It
2406 * is considered as a hw error, the
2407 * iwk_thread() tries to recover it after the
2408 * button is pushed again(ON)
2409 */
2410 cmn_err(CE_NOTE,
2411 "iwk_rx_softintr(): "
2412 "Radio transmitter is off\n");
2413 sc->sc_ostate = sc->sc_ic.ic_state;
2414 ieee80211_new_state(&sc->sc_ic,
2415 IEEE80211_S_INIT, -1);
2416 sc->sc_flags |=
2417 (IWK_F_HW_ERR_RECOVER | IWK_F_RADIO_OFF);
2418 }
2419 break;
2420 }
2421 case SCAN_START_NOTIFICATION:
2422 {
2423 iwk_start_scan_t *scan =
2424 (iwk_start_scan_t *)(desc + 1);
2425
2426 IWK_DBG((IWK_DEBUG_SCAN,
2427 "scanning channel %d status %x\n",
2428 scan->chan, LE_32(scan->status)));
2429
2430 ic->ic_curchan = &ic->ic_sup_channels[scan->chan];
2431 break;
2432 }
2433 case SCAN_COMPLETE_NOTIFICATION:
2434 {
2435 iwk_stop_scan_t *scan =
2436 (iwk_stop_scan_t *)(desc + 1);
2437
2438 IWK_DBG((IWK_DEBUG_SCAN,
2439 "completed channel %d (burst of %d) status %02x\n",
2440 scan->chan, scan->nchan, scan->status));
2441
2442 sc->sc_scan_pending++;
2443 break;
2444 }
2445 case STATISTICS_NOTIFICATION:
2446 /* handle statistics notification */
2447 iwk_statistics_notify(sc, desc);
2448 break;
2449 }
2450
2451 sc->sc_rxq.cur = (sc->sc_rxq.cur + 1) % RX_QUEUE_SIZE;
2452 }
2453
2454 /*
2455 * driver dealt with what reveived in rx queue and tell the information
2456 * to the firmware.
2457 */
2458 index = (index == 0) ? RX_QUEUE_SIZE - 1 : index - 1;
2459 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, index & (~7));
2460
2461 mutex_enter(&sc->sc_glock);
2462 /* re-enable interrupts */
2463 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2464 sc->sc_rx_softint_pending = 0;
2465 mutex_exit(&sc->sc_glock);
2466
2467 return (DDI_INTR_CLAIMED);
2468 }
2469
2470 static uint_t
2471 /* LINTED: argument unused in function: unused */
2472 iwk_intr(caddr_t arg, caddr_t unused)
2473 {
2474 iwk_sc_t *sc = (iwk_sc_t *)arg;
2475 uint32_t r, rfh;
2476
2477 mutex_enter(&sc->sc_glock);
2478
2479 if (sc->sc_flags & IWK_F_SUSPEND) {
2480 mutex_exit(&sc->sc_glock);
2481 return (DDI_INTR_UNCLAIMED);
2482 }
2483
2484 r = IWK_READ(sc, CSR_INT);
2485 if (r == 0 || r == 0xffffffff) {
2486 mutex_exit(&sc->sc_glock);
2487 return (DDI_INTR_UNCLAIMED);
2488 }
2489
2490 IWK_DBG((IWK_DEBUG_INTR, "interrupt reg %x\n", r));
2491
2492 rfh = IWK_READ(sc, CSR_FH_INT_STATUS);
2493 IWK_DBG((IWK_DEBUG_INTR, "FH interrupt reg %x\n", rfh));
2494 /* disable interrupts */
2495 IWK_WRITE(sc, CSR_INT_MASK, 0);
2496 /* ack interrupts */
2497 IWK_WRITE(sc, CSR_INT, r);
2498 IWK_WRITE(sc, CSR_FH_INT_STATUS, rfh);
2499
2500 if (sc->sc_soft_hdl == NULL) {
2501 mutex_exit(&sc->sc_glock);
2502 return (DDI_INTR_CLAIMED);
2503 }
2504 if (r & (BIT_INT_SWERROR | BIT_INT_ERR)) {
2505 cmn_err(CE_WARN, "fatal firmware error\n");
2506 mutex_exit(&sc->sc_glock);
2507 #ifdef DEBUG
2508 /* dump event and error logs to dmesg */
2509 iwk_write_error_log(sc);
2510 iwk_write_event_log(sc);
2511 #endif /* DEBUG */
2512 iwk_stop(sc);
2513 sc->sc_ostate = sc->sc_ic.ic_state;
2514
2515 /* not capable of fast recovery */
2516 if (!IWK_CHK_FAST_RECOVER(sc))
2517 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
2518
2519 sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
2520 return (DDI_INTR_CLAIMED);
2521 }
2522
2523 if (r & BIT_INT_RF_KILL) {
2524 uint32_t tmp = IWK_READ(sc, CSR_GP_CNTRL);
2525 if (tmp & (1 << 27))
2526 cmn_err(CE_NOTE, "RF switch: radio on\n");
2527 }
2528
2529 if ((r & (BIT_INT_FH_RX | BIT_INT_SW_RX)) ||
2530 (rfh & FH_INT_RX_MASK)) {
2531 sc->sc_rx_softint_pending = 1;
2532 (void) ddi_intr_trigger_softint(sc->sc_soft_hdl, NULL);
2533 }
2534
2535 if (r & BIT_INT_ALIVE) {
2536 IWK_DBG((IWK_DEBUG_FW, "firmware initialized.\n"));
2537 }
2538
2539 /* re-enable interrupts */
2540 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
2541 mutex_exit(&sc->sc_glock);
2542
2543 return (DDI_INTR_CLAIMED);
2544 }
2545
2546 static uint8_t
2547 iwk_rate_to_plcp(int rate)
2548 {
2549 uint8_t ret;
2550
2551 switch (rate) {
2552 /* CCK rates */
2553 case 2:
2554 ret = 0xa;
2555 break;
2556 case 4:
2557 ret = 0x14;
2558 break;
2559 case 11:
2560 ret = 0x37;
2561 break;
2562 case 22:
2563 ret = 0x6e;
2564 break;
2565 /* OFDM rates */
2566 case 12:
2567 ret = 0xd;
2568 break;
2569 case 18:
2570 ret = 0xf;
2571 break;
2572 case 24:
2573 ret = 0x5;
2574 break;
2575 case 36:
2576 ret = 0x7;
2577 break;
2578 case 48:
2579 ret = 0x9;
2580 break;
2581 case 72:
2582 ret = 0xb;
2583 break;
2584 case 96:
2585 ret = 0x1;
2586 break;
2587 case 108:
2588 ret = 0x3;
2589 break;
2590 default:
2591 ret = 0;
2592 break;
2593 }
2594 return (ret);
2595 }
2596
2597 static mblk_t *
2598 iwk_m_tx(void *arg, mblk_t *mp)
2599 {
2600 iwk_sc_t *sc = (iwk_sc_t *)arg;
2601 ieee80211com_t *ic = &sc->sc_ic;
2602 mblk_t *next;
2603
2604 if (sc->sc_flags & IWK_F_SUSPEND) {
2605 freemsgchain(mp);
2606 return (NULL);
2607 }
2608
2609 if (ic->ic_state != IEEE80211_S_RUN) {
2610 freemsgchain(mp);
2611 return (NULL);
2612 }
2613
2614 if ((sc->sc_flags & IWK_F_HW_ERR_RECOVER) &&
2615 IWK_CHK_FAST_RECOVER(sc)) {
2616 IWK_DBG((IWK_DEBUG_FW, "iwk_m_tx(): hold queue\n"));
2617 return (mp);
2618 }
2619
2620 while (mp != NULL) {
2621 next = mp->b_next;
2622 mp->b_next = NULL;
2623 if (iwk_send(ic, mp, IEEE80211_FC0_TYPE_DATA) != 0) {
2624 mp->b_next = next;
2625 break;
2626 }
2627 mp = next;
2628 }
2629 return (mp);
2630 }
2631
2632 /* ARGSUSED */
2633 static int
2634 iwk_send(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2635 {
2636 iwk_sc_t *sc = (iwk_sc_t *)ic;
2637 iwk_tx_ring_t *ring;
2638 iwk_tx_desc_t *desc;
2639 iwk_tx_data_t *data;
2640 iwk_cmd_t *cmd;
2641 iwk_tx_cmd_t *tx;
2642 ieee80211_node_t *in;
2643 struct ieee80211_frame *wh;
2644 struct ieee80211_key *k = NULL;
2645 mblk_t *m, *m0;
2646 int rate, hdrlen, len, len0, mblen, off, err = IWK_SUCCESS;
2647 uint16_t masks = 0;
2648 uint8_t index, index1, index2;
2649
2650 ring = &sc->sc_txq[0];
2651 data = &ring->data[ring->cur];
2652 desc = data->desc;
2653 cmd = data->cmd;
2654 bzero(desc, sizeof (*desc));
2655 bzero(cmd, sizeof (*cmd));
2656
2657 mutex_enter(&sc->sc_tx_lock);
2658 if (sc->sc_flags & IWK_F_SUSPEND) {
2659 mutex_exit(&sc->sc_tx_lock);
2660 if ((type & IEEE80211_FC0_TYPE_MASK) !=
2661 IEEE80211_FC0_TYPE_DATA) {
2662 freemsg(mp);
2663 }
2664 err = IWK_FAIL;
2665 goto exit;
2666 }
2667
2668 if (ring->queued > ring->count - 64) {
2669 IWK_DBG((IWK_DEBUG_TX, "iwk_send(): no txbuf\n"));
2670 sc->sc_need_reschedule = 1;
2671 mutex_exit(&sc->sc_tx_lock);
2672 if ((type & IEEE80211_FC0_TYPE_MASK) !=
2673 IEEE80211_FC0_TYPE_DATA) {
2674 freemsg(mp);
2675 }
2676 sc->sc_tx_nobuf++;
2677 err = IWK_FAIL;
2678 goto exit;
2679 }
2680 mutex_exit(&sc->sc_tx_lock);
2681
2682 hdrlen = sizeof (struct ieee80211_frame);
2683
2684 m = allocb(msgdsize(mp) + 32, BPRI_MED);
2685 if (m == NULL) { /* can not alloc buf, drop this package */
2686 cmn_err(CE_WARN,
2687 "iwk_send(): failed to allocate msgbuf\n");
2688 freemsg(mp);
2689 err = IWK_SUCCESS;
2690 goto exit;
2691 }
2692 for (off = 0, m0 = mp; m0 != NULL; m0 = m0->b_cont) {
2693 mblen = MBLKL(m0);
2694 (void) memcpy(m->b_rptr + off, m0->b_rptr, mblen);
2695 off += mblen;
2696 }
2697 m->b_wptr += off;
2698 freemsg(mp);
2699
2700 wh = (struct ieee80211_frame *)m->b_rptr;
2701
2702 if (ic->ic_opmode == IEEE80211_M_IBSS &&
2703 (!(IEEE80211_IS_MULTICAST(wh->i_addr1)))) {
2704 mutex_enter(&sc->sc_glock);
2705 mutex_enter(&sc->sc_ibss.node_tb_lock);
2706
2707 /*
2708 * search for node in ibss node table
2709 */
2710 for (index1 = IWK_STA_ID;
2711 index1 < IWK_STATION_COUNT; index1++) {
2712 if (sc->sc_ibss.ibss_node_tb[index1].used &&
2713 IEEE80211_ADDR_EQ(sc->sc_ibss.
2714 ibss_node_tb[index1].node.bssid,
2715 wh->i_addr1)) {
2716 break;
2717 }
2718 }
2719
2720 /*
2721 * if don't find in ibss node table
2722 */
2723 if (index1 >= IWK_BROADCAST_ID) {
2724 err = iwk_clean_add_node_ibss(ic,
2725 wh->i_addr1, &index2);
2726 if (err != IWK_SUCCESS) {
2727 cmn_err(CE_WARN, "iwk_send(): "
2728 "failed to clean all nodes "
2729 "and add one node\n");
2730 mutex_exit(&sc->sc_ibss.node_tb_lock);
2731 mutex_exit(&sc->sc_glock);
2732 freemsg(m);
2733 sc->sc_tx_err++;
2734 err = IWK_SUCCESS;
2735 goto exit;
2736 }
2737 index = index2;
2738 } else {
2739 index = index1;
2740 }
2741 mutex_exit(&sc->sc_ibss.node_tb_lock);
2742 mutex_exit(&sc->sc_glock);
2743 }
2744
2745 in = ieee80211_find_txnode(ic, wh->i_addr1);
2746 if (in == NULL) {
2747 cmn_err(CE_WARN, "iwk_send(): failed to find tx node\n");
2748 freemsg(m);
2749 sc->sc_tx_err++;
2750 err = IWK_SUCCESS;
2751 goto exit;
2752 }
2753 (void) ieee80211_encap(ic, m, in);
2754
2755 cmd->hdr.type = REPLY_TX;
2756 cmd->hdr.flags = 0;
2757 cmd->hdr.qid = ring->qid;
2758 cmd->hdr.idx = ring->cur;
2759
2760 tx = (iwk_tx_cmd_t *)cmd->data;
2761 tx->tx_flags = 0;
2762
2763 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2764 tx->tx_flags &= ~(LE_32(TX_CMD_FLG_ACK_MSK));
2765 } else {
2766 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2767 }
2768
2769 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2770 k = ieee80211_crypto_encap(ic, m);
2771 if (k == NULL) {
2772 freemsg(m);
2773 sc->sc_tx_err++;
2774 err = IWK_SUCCESS;
2775 goto exit;
2776 }
2777
2778 if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
2779 tx->sec_ctl = 2; /* for CCMP */
2780 tx->tx_flags |= LE_32(TX_CMD_FLG_ACK_MSK);
2781 (void) memcpy(&tx->key, k->wk_key, k->wk_keylen);
2782 }
2783
2784 /* packet header may have moved, reset our local pointer */
2785 wh = (struct ieee80211_frame *)m->b_rptr;
2786 }
2787
2788 len = msgdsize(m);
2789
2790 #ifdef DEBUG
2791 if (iwk_dbg_flags & IWK_DEBUG_TX)
2792 ieee80211_dump_pkt((uint8_t *)wh, hdrlen, 0, 0);
2793 #endif
2794
2795 /* pickup a rate */
2796 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2797 IEEE80211_FC0_TYPE_MGT) {
2798 /* mgmt frames are sent at 1M */
2799 rate = in->in_rates.ir_rates[0];
2800 } else {
2801 /*
2802 * do it here for the software way rate control.
2803 * later for rate scaling in hardware.
2804 * maybe like the following, for management frame:
2805 * tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1;
2806 * for data frame:
2807 * tx->tx_flags |= (LE_32(TX_CMD_FLG_STA_RATE_MSK));
2808 * rate = in->in_rates.ir_rates[in->in_txrate];
2809 * tx->initial_rate_index = 1;
2810 *
2811 * now the txrate is determined in tx cmd flags, set to the
2812 * max value 54M for 11g and 11M for 11b.
2813 */
2814
2815 if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) {
2816 rate = ic->ic_fixed_rate;
2817 } else {
2818 rate = in->in_rates.ir_rates[in->in_txrate];
2819 }
2820 }
2821 rate &= IEEE80211_RATE_VAL;
2822 IWK_DBG((IWK_DEBUG_TX, "tx rate[%d of %d] = %x",
2823 in->in_txrate, in->in_rates.ir_nrates, rate));
2824
2825 tx->tx_flags |= (LE_32(TX_CMD_FLG_SEQ_CTL_MSK));
2826
2827 len0 = roundup(4 + sizeof (iwk_tx_cmd_t) + hdrlen, 4);
2828 if (len0 != (4 + sizeof (iwk_tx_cmd_t) + hdrlen))
2829 tx->tx_flags |= LE_32(TX_CMD_FLG_MH_PAD_MSK);
2830
2831 /* retrieve destination node's id */
2832 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2833 tx->sta_id = IWK_BROADCAST_ID;
2834 } else {
2835 if (ic->ic_opmode == IEEE80211_M_IBSS)
2836 tx->sta_id = index;
2837 else
2838 tx->sta_id = IWK_AP_ID;
2839 }
2840
2841 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
2842 IEEE80211_FC0_TYPE_MGT) {
2843 /* tell h/w to set timestamp in probe responses */
2844 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2845 IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2846 tx->tx_flags |= LE_32(TX_CMD_FLG_TSF_MSK);
2847
2848 if (((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2849 IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
2850 ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2851 IEEE80211_FC0_SUBTYPE_REASSOC_REQ))
2852 tx->timeout.pm_frame_timeout = LE_16(3);
2853 else
2854 tx->timeout.pm_frame_timeout = LE_16(2);
2855 } else
2856 tx->timeout.pm_frame_timeout = 0;
2857 if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
2858 masks |= RATE_MCS_CCK_MSK;
2859
2860 masks |= RATE_MCS_ANT_B_MSK;
2861 tx->rate.r.rate_n_flags = LE_32(iwk_rate_to_plcp(rate) | masks);
2862
2863 IWK_DBG((IWK_DEBUG_TX, "tx flag = %x",
2864 LE_32(tx->tx_flags)));
2865
2866 tx->rts_retry_limit = 60;
2867 tx->data_retry_limit = 15;
2868
2869 tx->stop_time.life_time = LE_32(0xffffffff);
2870
2871 tx->len = LE_16(len);
2872
2873 tx->dram_lsb_ptr =
2874 LE_32(data->paddr_cmd + 4 + offsetof(iwk_tx_cmd_t, scratch));
2875 tx->dram_msb_ptr = 0;
2876 tx->driver_txop = 0;
2877 tx->next_frame_len = 0;
2878
2879 (void) memcpy(tx + 1, m->b_rptr, hdrlen);
2880 m->b_rptr += hdrlen;
2881 (void) memcpy(data->dma_data.mem_va, m->b_rptr, len - hdrlen);
2882
2883 IWK_DBG((IWK_DEBUG_TX, "sending data: qid=%d idx=%d len=%d",
2884 ring->qid, ring->cur, len));
2885
2886 /*
2887 * first segment includes the tx cmd plus the 802.11 header,
2888 * the second includes the remaining of the 802.11 frame.
2889 */
2890 desc->val0 = 2 << 24;
2891 desc->pa[0].tb1_addr = data->paddr_cmd;
2892 desc->pa[0].val1 = ((len0 << 4) & 0xfff0) |
2893 ((data->dma_data.cookie.dmac_address & 0xffff) << 16);
2894 desc->pa[0].val2 =
2895 ((data->dma_data.cookie.dmac_address & 0xffff0000) >> 16) |
2896 ((len - hdrlen) << 20);
2897 IWK_DBG((IWK_DEBUG_TX, "phy addr1 = 0x%x phy addr2 = 0x%x "
2898 "len1 = 0x%x, len2 = 0x%x val1 = 0x%x val2 = 0x%x",
2899 data->paddr_cmd, data->dma_data.cookie.dmac_address,
2900 len0, len - hdrlen, LE_32(desc->pa[0].val1),
2901 LE_32(desc->pa[0].val2)));
2902
2903 mutex_enter(&sc->sc_tx_lock);
2904 ring->queued++;
2905 mutex_exit(&sc->sc_tx_lock);
2906
2907 /* kick ring */
2908 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2909 tfd_offset[ring->cur].val = 8 + len;
2910 if (ring->cur < IWK_MAX_WIN_SIZE) {
2911 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
2912 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8 + len;
2913 }
2914
2915 IWK_DMA_SYNC(data->dma_data, DDI_DMA_SYNC_FORDEV);
2916 IWK_DMA_SYNC(ring->dma_desc, DDI_DMA_SYNC_FORDEV);
2917
2918 ring->cur = (ring->cur + 1) % ring->count;
2919 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2920 freemsg(m);
2921 /* release node reference */
2922 ieee80211_free_node(in);
2923
2924 ic->ic_stats.is_tx_bytes += len;
2925 ic->ic_stats.is_tx_frags++;
2926
2927 if (sc->sc_tx_timer == 0)
2928 sc->sc_tx_timer = 4;
2929
2930 exit:
2931 return (err);
2932 }
2933
2934 static void
2935 iwk_m_ioctl(void* arg, queue_t *wq, mblk_t *mp)
2936 {
2937 iwk_sc_t *sc = (iwk_sc_t *)arg;
2938 ieee80211com_t *ic = &sc->sc_ic;
2939
2940 enum ieee80211_opmode oldmod;
2941 iwk_tx_power_table_cmd_t txpower;
2942 iwk_add_sta_t node;
2943 iwk_link_quality_cmd_t link_quality;
2944 uint16_t masks = 0;
2945 int i, err, err1;
2946
2947 oldmod = ic->ic_opmode;
2948
2949 err = ieee80211_ioctl(ic, wq, mp);
2950
2951 /*
2952 * return to STA mode
2953 */
2954 if ((0 == err || ENETRESET == err) && (oldmod != ic->ic_opmode) &&
2955 (ic->ic_opmode == IEEE80211_M_STA)) {
2956 /* configure rxon */
2957 (void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
2958 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
2959 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
2960 sc->sc_config.chan =
2961 LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
2962 sc->sc_config.flags = LE_32(RXON_FLG_TSF2HOST_MSK |
2963 RXON_FLG_AUTO_DETECT_MSK |
2964 RXON_FLG_BAND_24G_MSK);
2965 sc->sc_config.flags &= LE_32(~RXON_FLG_CCK_MSK);
2966 switch (ic->ic_opmode) {
2967 case IEEE80211_M_STA:
2968 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
2969 sc->sc_config.filter_flags |=
2970 LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2971 RXON_FILTER_DIS_DECRYPT_MSK |
2972 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
2973 break;
2974 case IEEE80211_M_IBSS:
2975 case IEEE80211_M_AHDEMO:
2976 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
2977 sc->sc_config.flags |=
2978 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
2979 sc->sc_config.filter_flags =
2980 LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2981 RXON_FILTER_DIS_DECRYPT_MSK |
2982 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
2983 break;
2984 case IEEE80211_M_HOSTAP:
2985 sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
2986 break;
2987 case IEEE80211_M_MONITOR:
2988 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
2989 sc->sc_config.filter_flags |=
2990 LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
2991 RXON_FILTER_CTL2HOST_MSK |
2992 RXON_FILTER_PROMISC_MSK);
2993 break;
2994 }
2995 sc->sc_config.cck_basic_rates = 0x0f;
2996 sc->sc_config.ofdm_basic_rates = 0xff;
2997 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
2998 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
2999 /* set antenna */
3000 mutex_enter(&sc->sc_glock);
3001 sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3002 (0x7 << RXON_RX_CHAIN_VALID_POS) |
3003 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3004 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3005 err1 = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3006 sizeof (iwk_rxon_cmd_t), 1);
3007 if (err1 != IWK_SUCCESS) {
3008 cmn_err(CE_WARN, "iwk_m_ioctl(): "
3009 "failed to set configure command"
3010 " please run (ifconfig unplumb and"
3011 " ifconfig plumb)\n");
3012 }
3013 /*
3014 * set Tx power for 2.4GHz channels
3015 * (need further investigation. fix tx power at present)
3016 */
3017 (void) memset(&txpower, 0, sizeof (txpower));
3018 txpower.band = 1; /* for 2.4G */
3019 txpower.channel = sc->sc_config.chan;
3020 txpower.channel_normal_width = 0;
3021 for (i = 0; i < POWER_TABLE_NUM_HT_OFDM_ENTRIES; i++) {
3022 txpower.tx_power.ht_ofdm_power[i].
3023 s.ramon_tx_gain = LE_16(0x3f3f);
3024 txpower.tx_power.ht_ofdm_power[i].
3025 s.dsp_predis_atten = LE_16(110 | (110 << 8));
3026 }
3027 txpower.tx_power.legacy_cck_power.s.
3028 ramon_tx_gain = LE_16(0x3f3f);
3029 txpower.tx_power.legacy_cck_power.s.
3030 dsp_predis_atten = LE_16(110 | (110 << 8));
3031 err1 = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &txpower,
3032 sizeof (txpower), 1);
3033 if (err1 != IWK_SUCCESS) {
3034 cmn_err(CE_WARN, "iwk_m_ioctl(): failed to set txpower"
3035 " please run (ifconfig unplumb "
3036 "and ifconfig plumb)\n");
3037 }
3038 /* add broadcast node so that we can send broadcast frame */
3039 (void) memset(&node, 0, sizeof (node));
3040 (void) memset(node.bssid, 0xff, 6);
3041 node.id = IWK_BROADCAST_ID;
3042 err1 = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3043 if (err1 != IWK_SUCCESS) {
3044 cmn_err(CE_WARN, "iwk_m_ioctl(): "
3045 "failed to add broadcast node\n");
3046 }
3047
3048 /* TX_LINK_QUALITY cmd */
3049 (void) memset(&link_quality, 0, sizeof (link_quality));
3050 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3051 masks |= RATE_MCS_CCK_MSK;
3052 masks |= RATE_MCS_ANT_B_MSK;
3053 masks &= ~RATE_MCS_ANT_A_MSK;
3054 link_quality.rate_n_flags[i] =
3055 LE_32(iwk_rate_to_plcp(2) | masks);
3056 }
3057 link_quality.general_params.single_stream_ant_msk = 2;
3058 link_quality.general_params.dual_stream_ant_msk = 3;
3059 link_quality.agg_params.agg_dis_start_th = 3;
3060 link_quality.agg_params.agg_time_limit = LE_16(4000);
3061 link_quality.sta_id = IWK_BROADCAST_ID;
3062 err1 = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3063 sizeof (link_quality), 1);
3064 if (err1 != IWK_SUCCESS) {
3065 cmn_err(CE_WARN, "iwk_m_ioctl(): "
3066 "failed to config link quality table\n");
3067 }
3068 mutex_exit(&sc->sc_glock);
3069 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3070 }
3071
3072 if (err == ENETRESET) {
3073 /*
3074 * This is special for the hidden AP connection.
3075 * In any case, we should make sure only one 'scan'
3076 * in the driver for a 'connect' CLI command. So
3077 * when connecting to a hidden AP, the scan is just
3078 * sent out to the air when we know the desired
3079 * essid of the AP we want to connect.
3080 */
3081 if (ic->ic_des_esslen) {
3082 if (sc->sc_flags & IWK_F_RUNNING) {
3083 iwk_m_stop(sc);
3084 (void) iwk_m_start(sc);
3085 (void) ieee80211_new_state(ic,
3086 IEEE80211_S_SCAN, -1);
3087 }
3088 }
3089 }
3090 }
3091
3092 /*
3093 * callback functions for set/get properties
3094 */
3095
3096 static int
3097 iwk_m_getprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3098 uint_t wldp_length, void *wldp_buf)
3099 {
3100 int err = 0;
3101 iwk_sc_t *sc = (iwk_sc_t *)arg;
3102
3103 err = ieee80211_getprop(&sc->sc_ic, pr_name, wldp_pr_num,
3104 wldp_length, wldp_buf);
3105
3106 return (err);
3107 }
3108
3109 static int
3110 iwk_m_setprop(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3111 uint_t wldp_length, const void *wldp_buf)
3112 {
3113 int err;
3114 iwk_sc_t *sc = (iwk_sc_t *)arg;
3115 ieee80211com_t *ic = &sc->sc_ic;
3116
3117 err = ieee80211_setprop(ic, pr_name, wldp_pr_num, wldp_length,
3118 wldp_buf);
3119
3120 if (err == ENETRESET) {
3121 if (ic->ic_des_esslen) {
3122 if (sc->sc_flags & IWK_F_RUNNING) {
3123 iwk_m_stop(sc);
3124 (void) iwk_m_start(sc);
3125 (void) ieee80211_new_state(ic,
3126 IEEE80211_S_SCAN, -1);
3127 }
3128 }
3129 err = 0;
3130 }
3131
3132 return (err);
3133 }
3134
3135 static void
3136 iwk_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t wldp_pr_num,
3137 mac_prop_info_handle_t mph)
3138 {
3139 iwk_sc_t *sc = (iwk_sc_t *)arg;
3140 ieee80211com_t *ic = &sc->sc_ic;
3141
3142 ieee80211_propinfo(ic, pr_name, wldp_pr_num, mph);
3143 }
3144
3145 /*ARGSUSED*/
3146 static int
3147 iwk_m_stat(void *arg, uint_t stat, uint64_t *val)
3148 {
3149 iwk_sc_t *sc = (iwk_sc_t *)arg;
3150 ieee80211com_t *ic = &sc->sc_ic;
3151 ieee80211_node_t *in;
3152
3153 mutex_enter(&sc->sc_glock);
3154 switch (stat) {
3155 case MAC_STAT_IFSPEED:
3156 in = ic->ic_bss;
3157 *val = ((ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) ?
3158 IEEE80211_RATE(in->in_txrate) :
3159 ic->ic_fixed_rate) / 2 * 1000000;
3160 break;
3161 case MAC_STAT_NOXMTBUF:
3162 *val = sc->sc_tx_nobuf;
3163 break;
3164 case MAC_STAT_NORCVBUF:
3165 *val = sc->sc_rx_nobuf;
3166 break;
3167 case MAC_STAT_IERRORS:
3168 *val = sc->sc_rx_err;
3169 break;
3170 case MAC_STAT_RBYTES:
3171 *val = ic->ic_stats.is_rx_bytes;
3172 break;
3173 case MAC_STAT_IPACKETS:
3174 *val = ic->ic_stats.is_rx_frags;
3175 break;
3176 case MAC_STAT_OBYTES:
3177 *val = ic->ic_stats.is_tx_bytes;
3178 break;
3179 case MAC_STAT_OPACKETS:
3180 *val = ic->ic_stats.is_tx_frags;
3181 break;
3182 case MAC_STAT_OERRORS:
3183 case WIFI_STAT_TX_FAILED:
3184 *val = sc->sc_tx_err;
3185 break;
3186 case WIFI_STAT_TX_RETRANS:
3187 *val = sc->sc_tx_retries;
3188 break;
3189 case WIFI_STAT_FCS_ERRORS:
3190 case WIFI_STAT_WEP_ERRORS:
3191 case WIFI_STAT_TX_FRAGS:
3192 case WIFI_STAT_MCAST_TX:
3193 case WIFI_STAT_RTS_SUCCESS:
3194 case WIFI_STAT_RTS_FAILURE:
3195 case WIFI_STAT_ACK_FAILURE:
3196 case WIFI_STAT_RX_FRAGS:
3197 case WIFI_STAT_MCAST_RX:
3198 case WIFI_STAT_RX_DUPS:
3199 mutex_exit(&sc->sc_glock);
3200 return (ieee80211_stat(ic, stat, val));
3201 default:
3202 mutex_exit(&sc->sc_glock);
3203 return (ENOTSUP);
3204 }
3205 mutex_exit(&sc->sc_glock);
3206
3207 return (IWK_SUCCESS);
3208
3209 }
3210
3211 static int
3212 iwk_m_start(void *arg)
3213 {
3214 iwk_sc_t *sc = (iwk_sc_t *)arg;
3215 ieee80211com_t *ic = &sc->sc_ic;
3216 int err;
3217
3218 err = iwk_init(sc);
3219
3220 if (err != IWK_SUCCESS) {
3221 /*
3222 * The hw init err(eg. RF is OFF). Return Success to make
3223 * the 'plumb' succeed. The iwk_thread() tries to re-init
3224 * background.
3225 */
3226 mutex_enter(&sc->sc_glock);
3227 sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3228 mutex_exit(&sc->sc_glock);
3229 return (IWK_SUCCESS);
3230 }
3231
3232 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3233
3234 mutex_enter(&sc->sc_glock);
3235 sc->sc_flags |= IWK_F_RUNNING;
3236 mutex_exit(&sc->sc_glock);
3237
3238 return (IWK_SUCCESS);
3239 }
3240
3241 static void
3242 iwk_m_stop(void *arg)
3243 {
3244 iwk_sc_t *sc = (iwk_sc_t *)arg;
3245 ieee80211com_t *ic = &sc->sc_ic;
3246
3247 iwk_stop(sc);
3248 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3249 ieee80211_stop_watchdog(ic);
3250 mutex_enter(&sc->sc_mt_lock);
3251 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3252 sc->sc_flags &= ~IWK_F_RATE_AUTO_CTL;
3253 mutex_exit(&sc->sc_mt_lock);
3254 mutex_enter(&sc->sc_glock);
3255 sc->sc_flags &= ~IWK_F_RUNNING;
3256 mutex_exit(&sc->sc_glock);
3257 }
3258
3259 /*ARGSUSED*/
3260 static int
3261 iwk_m_unicst(void *arg, const uint8_t *macaddr)
3262 {
3263 iwk_sc_t *sc = (iwk_sc_t *)arg;
3264 ieee80211com_t *ic = &sc->sc_ic;
3265 int err;
3266
3267 if (!IEEE80211_ADDR_EQ(ic->ic_macaddr, macaddr)) {
3268 IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
3269 mutex_enter(&sc->sc_glock);
3270 err = iwk_config(sc);
3271 mutex_exit(&sc->sc_glock);
3272 if (err != IWK_SUCCESS) {
3273 cmn_err(CE_WARN,
3274 "iwk_m_unicst(): "
3275 "failed to configure device\n");
3276 goto fail;
3277 }
3278 }
3279 return (IWK_SUCCESS);
3280 fail:
3281 return (err);
3282 }
3283
3284 /*ARGSUSED*/
3285 static int
3286 iwk_m_multicst(void *arg, boolean_t add, const uint8_t *m)
3287 {
3288 return (IWK_SUCCESS);
3289 }
3290
3291 /*ARGSUSED*/
3292 static int
3293 iwk_m_promisc(void *arg, boolean_t on)
3294 {
3295 return (IWK_SUCCESS);
3296 }
3297
3298 static void
3299 iwk_thread(iwk_sc_t *sc)
3300 {
3301 ieee80211com_t *ic = &sc->sc_ic;
3302 clock_t clk;
3303 int times = 0, err, n = 0, timeout = 0;
3304 uint32_t tmp;
3305
3306 mutex_enter(&sc->sc_mt_lock);
3307 while (sc->sc_mf_thread_switch) {
3308 tmp = IWK_READ(sc, CSR_GP_CNTRL);
3309 if (tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) {
3310 sc->sc_flags &= ~IWK_F_RADIO_OFF;
3311 } else {
3312 sc->sc_flags |= IWK_F_RADIO_OFF;
3313 }
3314 /*
3315 * If in SUSPEND or the RF is OFF, do nothing
3316 */
3317 if ((sc->sc_flags & IWK_F_SUSPEND) ||
3318 (sc->sc_flags & IWK_F_RADIO_OFF)) {
3319 mutex_exit(&sc->sc_mt_lock);
3320 delay(drv_usectohz(100000));
3321 mutex_enter(&sc->sc_mt_lock);
3322 continue;
3323 }
3324
3325 /*
3326 * recovery fatal error
3327 */
3328 if (ic->ic_mach &&
3329 (sc->sc_flags & IWK_F_HW_ERR_RECOVER)) {
3330
3331 IWK_DBG((IWK_DEBUG_FW,
3332 "iwk_thread(): "
3333 "try to recover fatal hw error: %d\n", times++));
3334
3335 iwk_stop(sc);
3336
3337 if (IWK_CHK_FAST_RECOVER(sc)) {
3338 /* save runtime configuration */
3339 bcopy(&sc->sc_config, &sc->sc_config_save,
3340 sizeof (sc->sc_config));
3341 } else {
3342 mutex_exit(&sc->sc_mt_lock);
3343 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
3344 delay(drv_usectohz(2000000 + n*500000));
3345 mutex_enter(&sc->sc_mt_lock);
3346 }
3347
3348 err = iwk_init(sc);
3349 if (err != IWK_SUCCESS) {
3350 n++;
3351 if (n < 20)
3352 continue;
3353 }
3354 n = 0;
3355 if (!err)
3356 sc->sc_flags |= IWK_F_RUNNING;
3357
3358 if (!IWK_CHK_FAST_RECOVER(sc) ||
3359 iwk_fast_recover(sc) != IWK_SUCCESS) {
3360 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
3361
3362 mutex_exit(&sc->sc_mt_lock);
3363 delay(drv_usectohz(2000000));
3364 if (sc->sc_ostate != IEEE80211_S_INIT)
3365 ieee80211_new_state(ic,
3366 IEEE80211_S_SCAN, 0);
3367 mutex_enter(&sc->sc_mt_lock);
3368 }
3369 }
3370
3371 if (ic->ic_mach && (sc->sc_flags & IWK_F_LAZY_RESUME)) {
3372 IWK_DBG((IWK_DEBUG_RESUME,
3373 "iwk_thread(): lazy resume\n"));
3374
3375 sc->sc_flags &= ~IWK_F_LAZY_RESUME;
3376 mutex_exit(&sc->sc_mt_lock);
3377 /*
3378 * NB: under WPA mode, this call hangs (door problem?)
3379 * when called in iwk_attach() and iwk_detach() while
3380 * system is in the procedure of CPR. To be safe, let
3381 * the thread do this.
3382 */
3383 ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1);
3384 mutex_enter(&sc->sc_mt_lock);
3385 }
3386
3387 if (ic->ic_mach &&
3388 (sc->sc_flags & IWK_F_SCANNING) && sc->sc_scan_pending) {
3389 IWK_DBG((IWK_DEBUG_SCAN,
3390 "iwk_thread(): "
3391 "wait for probe response\n"));
3392 sc->sc_scan_pending--;
3393 mutex_exit(&sc->sc_mt_lock);
3394 delay(drv_usectohz(200000));
3395 if (sc->sc_flags & IWK_F_SCANNING)
3396 ieee80211_next_scan(ic);
3397 mutex_enter(&sc->sc_mt_lock);
3398 }
3399
3400 /*
3401 * rate ctl
3402 */
3403 if (ic->ic_mach &&
3404 (sc->sc_flags & IWK_F_RATE_AUTO_CTL)) {
3405 clk = ddi_get_lbolt();
3406 if (clk > sc->sc_clk + drv_usectohz(500000)) {
3407 iwk_amrr_timeout(sc);
3408 }
3409 }
3410
3411 if ((ic->ic_state == IEEE80211_S_RUN) &&
3412 (ic->ic_beaconmiss++ > 50)) { /* 5 seconds */
3413 cmn_err(CE_WARN, "iwk: beacon missed for 5 seconds\n");
3414 (void) ieee80211_new_state(ic,
3415 IEEE80211_S_INIT, -1);
3416 }
3417
3418 mutex_exit(&sc->sc_mt_lock);
3419 delay(drv_usectohz(100000));
3420 mutex_enter(&sc->sc_mt_lock);
3421
3422 if (sc->sc_tx_timer) {
3423 timeout++;
3424 if (timeout == 10) {
3425 sc->sc_tx_timer--;
3426 if (sc->sc_tx_timer == 0) {
3427 sc->sc_flags |= IWK_F_HW_ERR_RECOVER;
3428 sc->sc_ostate = IEEE80211_S_RUN;
3429 IWK_DBG((IWK_DEBUG_FW,
3430 "iwk_thread(): try to recover from"
3431 " 'send fail\n"));
3432 }
3433 timeout = 0;
3434 }
3435 }
3436
3437 }
3438 sc->sc_mf_thread = NULL;
3439 cv_signal(&sc->sc_mt_cv);
3440 mutex_exit(&sc->sc_mt_lock);
3441 }
3442
3443
3444 /*
3445 * Send a command to the firmware.
3446 */
3447 static int
3448 iwk_cmd(iwk_sc_t *sc, int code, const void *buf, int size, int async)
3449 {
3450 iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3451 iwk_tx_desc_t *desc;
3452 iwk_cmd_t *cmd;
3453 clock_t clk;
3454
3455 ASSERT(size <= sizeof (cmd->data));
3456 ASSERT(mutex_owned(&sc->sc_glock));
3457
3458 IWK_DBG((IWK_DEBUG_CMD, "iwk_cmd() code[%d]", code));
3459 desc = ring->data[ring->cur].desc;
3460 cmd = ring->data[ring->cur].cmd;
3461
3462 cmd->hdr.type = (uint8_t)code;
3463 cmd->hdr.flags = 0;
3464 cmd->hdr.qid = ring->qid;
3465 cmd->hdr.idx = ring->cur;
3466 (void) memcpy(cmd->data, buf, size);
3467 (void) memset(desc, 0, sizeof (*desc));
3468
3469 desc->val0 = 1 << 24;
3470 desc->pa[0].tb1_addr =
3471 (uint32_t)(ring->data[ring->cur].paddr_cmd & 0xffffffff);
3472 desc->pa[0].val1 = ((4 + size) << 4) & 0xfff0;
3473
3474 /* kick cmd ring XXX */
3475 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3476 tfd_offset[ring->cur].val = 8;
3477 if (ring->cur < IWK_MAX_WIN_SIZE) {
3478 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3479 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3480 }
3481 ring->cur = (ring->cur + 1) % ring->count;
3482 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3483
3484 if (async)
3485 return (IWK_SUCCESS);
3486 else {
3487 sc->sc_flags &= ~IWK_F_CMD_DONE;
3488 clk = ddi_get_lbolt() + drv_usectohz(2000000);
3489 while (!(sc->sc_flags & IWK_F_CMD_DONE)) {
3490 if (cv_timedwait(&sc->sc_cmd_cv, &sc->sc_glock, clk) <
3491 0)
3492 break;
3493 }
3494 if (sc->sc_flags & IWK_F_CMD_DONE)
3495 return (IWK_SUCCESS);
3496 else
3497 return (IWK_FAIL);
3498 }
3499 }
3500
3501 static void
3502 iwk_set_led(iwk_sc_t *sc, uint8_t id, uint8_t off, uint8_t on)
3503 {
3504 iwk_led_cmd_t led;
3505
3506 led.interval = LE_32(100000); /* unit: 100ms */
3507 led.id = id;
3508 led.off = off;
3509 led.on = on;
3510
3511 (void) iwk_cmd(sc, REPLY_LEDS_CMD, &led, sizeof (led), 1);
3512 }
3513
3514 static int
3515 iwk_hw_set_before_auth(iwk_sc_t *sc)
3516 {
3517 ieee80211com_t *ic = &sc->sc_ic;
3518 ieee80211_node_t *in = ic->ic_bss;
3519 iwk_add_sta_t node;
3520 iwk_link_quality_cmd_t link_quality;
3521 struct ieee80211_rateset rs;
3522 uint16_t masks = 0, rate;
3523 int i, err;
3524
3525 if (in->in_chan == IEEE80211_CHAN_ANYC) {
3526 cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3527 "channel (%d) isn't in proper range\n",
3528 LE_16(ieee80211_chan2ieee(ic, in->in_chan)));
3529 return (IWK_FAIL);
3530 }
3531
3532 /* update adapter's configuration according the info of target AP */
3533 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
3534 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, in->in_chan));
3535 if (ic->ic_curmode == IEEE80211_MODE_11B) {
3536 sc->sc_config.cck_basic_rates = 0x03;
3537 sc->sc_config.ofdm_basic_rates = 0;
3538 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
3539 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
3540 sc->sc_config.cck_basic_rates = 0;
3541 sc->sc_config.ofdm_basic_rates = 0x15;
3542 } else { /* assume 802.11b/g */
3543 sc->sc_config.cck_basic_rates = 0x0f;
3544 sc->sc_config.ofdm_basic_rates = 0xff;
3545 }
3546
3547 sc->sc_config.flags &= ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
3548 RXON_FLG_SHORT_SLOT_MSK);
3549
3550 if (ic->ic_flags & IEEE80211_F_SHSLOT)
3551 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_SLOT_MSK);
3552 else
3553 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_SLOT_MSK);
3554
3555 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
3556 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3557 else
3558 sc->sc_config.flags &= LE_32(~RXON_FLG_SHORT_PREAMBLE_MSK);
3559
3560 IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x "
3561 "filter_flags %x cck %x ofdm %x"
3562 " bssid:%02x:%02x:%02x:%02x:%02x:%2x\n",
3563 LE_16(sc->sc_config.chan), LE_32(sc->sc_config.flags),
3564 LE_32(sc->sc_config.filter_flags),
3565 sc->sc_config.cck_basic_rates, sc->sc_config.ofdm_basic_rates,
3566 sc->sc_config.bssid[0], sc->sc_config.bssid[1],
3567 sc->sc_config.bssid[2], sc->sc_config.bssid[3],
3568 sc->sc_config.bssid[4], sc->sc_config.bssid[5]));
3569 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3570 sizeof (iwk_rxon_cmd_t), 1);
3571 if (err != IWK_SUCCESS) {
3572 cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3573 " failed to config chan%d\n",
3574 sc->sc_config.chan);
3575 return (err);
3576 }
3577
3578 /* obtain current temperature of chipset */
3579 sc->sc_tempera = iwk_curr_tempera(sc);
3580
3581 /* make Tx power calibration to determine the gains of DSP and radio */
3582 err = iwk_tx_power_calibration(sc);
3583 if (err) {
3584 cmn_err(CE_WARN, "iwk_hw_set_before_auth():"
3585 "failed to set tx power table\n");
3586 return (err);
3587 }
3588
3589 /* add default AP node */
3590 (void) memset(&node, 0, sizeof (node));
3591 IEEE80211_ADDR_COPY(node.bssid, in->in_bssid);
3592 node.id = IWK_AP_ID;
3593 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 1);
3594 if (err != IWK_SUCCESS) {
3595 cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3596 "failed to add BSS node\n");
3597 return (err);
3598 }
3599
3600 /* TX_LINK_QUALITY cmd */
3601 (void) memset(&link_quality, 0, sizeof (link_quality));
3602 rs = ic->ic_sup_rates[ieee80211_chan2mode(ic, ic->ic_curchan)];
3603 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3604 if (i < rs.ir_nrates)
3605 rate = rs.ir_rates[rs.ir_nrates - i];
3606 else
3607 rate = 2;
3608 if (rate == 2 || rate == 4 || rate == 11 || rate == 22)
3609 masks |= RATE_MCS_CCK_MSK;
3610 masks |= RATE_MCS_ANT_B_MSK;
3611 masks &= ~RATE_MCS_ANT_A_MSK;
3612 link_quality.rate_n_flags[i] =
3613 LE_32(iwk_rate_to_plcp(rate) | masks);
3614 }
3615
3616 link_quality.general_params.single_stream_ant_msk = 2;
3617 link_quality.general_params.dual_stream_ant_msk = 3;
3618 link_quality.agg_params.agg_dis_start_th = 3;
3619 link_quality.agg_params.agg_time_limit = LE_16(4000);
3620 link_quality.sta_id = IWK_AP_ID;
3621 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3622 sizeof (link_quality), 1);
3623 if (err != IWK_SUCCESS) {
3624 cmn_err(CE_WARN, "iwk_hw_set_before_auth(): "
3625 "failed to config link quality table\n");
3626 return (err);
3627 }
3628
3629 return (IWK_SUCCESS);
3630 }
3631
3632 /*
3633 * Send a scan request(assembly scan cmd) to the firmware.
3634 */
3635 static int
3636 iwk_scan(iwk_sc_t *sc)
3637 {
3638 ieee80211com_t *ic = &sc->sc_ic;
3639 iwk_tx_ring_t *ring = &sc->sc_txq[IWK_CMD_QUEUE_NUM];
3640 iwk_tx_desc_t *desc;
3641 iwk_tx_data_t *data;
3642 iwk_cmd_t *cmd;
3643 iwk_scan_hdr_t *hdr;
3644 iwk_scan_chan_t *chan;
3645 struct ieee80211_frame *wh;
3646 ieee80211_node_t *in = ic->ic_bss;
3647 uint8_t essid[IEEE80211_NWID_LEN+1];
3648 struct ieee80211_rateset *rs;
3649 enum ieee80211_phymode mode;
3650 uint8_t *frm;
3651 int i, pktlen, nrates;
3652
3653 data = &ring->data[ring->cur];
3654 desc = data->desc;
3655 cmd = (iwk_cmd_t *)data->dma_data.mem_va;
3656
3657 cmd->hdr.type = REPLY_SCAN_CMD;
3658 cmd->hdr.flags = 0;
3659 cmd->hdr.qid = ring->qid;
3660 cmd->hdr.idx = ring->cur | 0x40;
3661
3662 hdr = (iwk_scan_hdr_t *)cmd->data;
3663 (void) memset(hdr, 0, sizeof (iwk_scan_hdr_t));
3664 hdr->nchan = 1;
3665 hdr->quiet_time = LE_16(50);
3666 hdr->quiet_plcp_th = LE_16(1);
3667
3668 hdr->flags = LE_32(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK);
3669 hdr->rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3670 (0x7 << RXON_RX_CHAIN_VALID_POS) |
3671 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3672 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3673
3674 hdr->tx_cmd.tx_flags = LE_32(TX_CMD_FLG_SEQ_CTL_MSK);
3675 hdr->tx_cmd.sta_id = IWK_BROADCAST_ID;
3676 hdr->tx_cmd.stop_time.life_time = LE_32(0xffffffff);
3677 hdr->tx_cmd.tx_flags |= LE_32(0x200);
3678 hdr->tx_cmd.rate.r.rate_n_flags = LE_32(iwk_rate_to_plcp(2));
3679 hdr->tx_cmd.rate.r.rate_n_flags |=
3680 LE_32(RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
3681 hdr->direct_scan[0].len = ic->ic_des_esslen;
3682 hdr->direct_scan[0].id = IEEE80211_ELEMID_SSID;
3683
3684 if (ic->ic_des_esslen) {
3685 bcopy(ic->ic_des_essid, essid, ic->ic_des_esslen);
3686 essid[ic->ic_des_esslen] = '\0';
3687 IWK_DBG((IWK_DEBUG_SCAN, "directed scan %s\n", essid));
3688
3689 bcopy(ic->ic_des_essid, hdr->direct_scan[0].ssid,
3690 ic->ic_des_esslen);
3691 } else {
3692 bzero(hdr->direct_scan[0].ssid,
3693 sizeof (hdr->direct_scan[0].ssid));
3694 }
3695 /*
3696 * a probe request frame is required after the REPLY_SCAN_CMD
3697 */
3698 wh = (struct ieee80211_frame *)(hdr + 1);
3699 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
3700 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
3701 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
3702 (void) memset(wh->i_addr1, 0xff, 6);
3703 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_macaddr);
3704 (void) memset(wh->i_addr3, 0xff, 6);
3705 *(uint16_t *)&wh->i_dur[0] = 0;
3706 *(uint16_t *)&wh->i_seq[0] = 0;
3707
3708 frm = (uint8_t *)(wh + 1);
3709
3710 /* essid IE */
3711 if (in->in_esslen) {
3712 bcopy(in->in_essid, essid, in->in_esslen);
3713 essid[in->in_esslen] = '\0';
3714 IWK_DBG((IWK_DEBUG_SCAN, "probe with ESSID %s\n",
3715 essid));
3716 }
3717 *frm++ = IEEE80211_ELEMID_SSID;
3718 *frm++ = in->in_esslen;
3719 (void) memcpy(frm, in->in_essid, in->in_esslen);
3720 frm += in->in_esslen;
3721
3722 mode = ieee80211_chan2mode(ic, ic->ic_curchan);
3723 rs = &ic->ic_sup_rates[mode];
3724
3725 /* supported rates IE */
3726 *frm++ = IEEE80211_ELEMID_RATES;
3727 nrates = rs->ir_nrates;
3728 if (nrates > IEEE80211_RATE_SIZE)
3729 nrates = IEEE80211_RATE_SIZE;
3730 *frm++ = (uint8_t)nrates;
3731 (void) memcpy(frm, rs->ir_rates, nrates);
3732 frm += nrates;
3733
3734 /* supported xrates IE */
3735 if (rs->ir_nrates > IEEE80211_RATE_SIZE) {
3736 nrates = rs->ir_nrates - IEEE80211_RATE_SIZE;
3737 *frm++ = IEEE80211_ELEMID_XRATES;
3738 *frm++ = (uint8_t)nrates;
3739 (void) memcpy(frm, rs->ir_rates + IEEE80211_RATE_SIZE, nrates);
3740 frm += nrates;
3741 }
3742
3743 /* optionnal IE (usually for wpa) */
3744 if (ic->ic_opt_ie != NULL) {
3745 (void) memcpy(frm, ic->ic_opt_ie, ic->ic_opt_ie_len);
3746 frm += ic->ic_opt_ie_len;
3747 }
3748
3749 /* setup length of probe request */
3750 hdr->tx_cmd.len = LE_16(_PTRDIFF(frm, wh));
3751 hdr->len = LE_16(hdr->nchan * sizeof (iwk_scan_chan_t) +
3752 LE_16(hdr->tx_cmd.len) + sizeof (iwk_scan_hdr_t));
3753
3754 /*
3755 * the attribute of the scan channels are required after the probe
3756 * request frame.
3757 */
3758 chan = (iwk_scan_chan_t *)frm;
3759 for (i = 1; i <= hdr->nchan; i++, chan++) {
3760 if (ic->ic_des_esslen) {
3761 chan->type = 3;
3762 } else {
3763 chan->type = 1;
3764 }
3765
3766 chan->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
3767 chan->tpc.tx_gain = 0x3f;
3768 chan->tpc.dsp_atten = 110;
3769 chan->active_dwell = LE_16(50);
3770 chan->passive_dwell = LE_16(120);
3771
3772 frm += sizeof (iwk_scan_chan_t);
3773 }
3774
3775 pktlen = _PTRDIFF(frm, cmd);
3776
3777 (void) memset(desc, 0, sizeof (*desc));
3778 desc->val0 = 1 << 24;
3779 desc->pa[0].tb1_addr =
3780 (uint32_t)(data->dma_data.cookie.dmac_address & 0xffffffff);
3781 desc->pa[0].val1 = (pktlen << 4) & 0xfff0;
3782
3783 /*
3784 * maybe for cmd, filling the byte cnt table is not necessary.
3785 * anyway, we fill it here.
3786 */
3787 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3788 tfd_offset[ring->cur].val = 8;
3789 if (ring->cur < IWK_MAX_WIN_SIZE) {
3790 sc->sc_shared->queues_byte_cnt_tbls[ring->qid].
3791 tfd_offset[IWK_QUEUE_SIZE + ring->cur].val = 8;
3792 }
3793
3794 /* kick cmd ring */
3795 ring->cur = (ring->cur + 1) % ring->count;
3796 IWK_WRITE(sc, HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3797
3798 return (IWK_SUCCESS);
3799 }
3800
3801 static int
3802 iwk_config(iwk_sc_t *sc)
3803 {
3804 ieee80211com_t *ic = &sc->sc_ic;
3805 iwk_powertable_cmd_t powertable;
3806 iwk_bt_cmd_t bt;
3807 iwk_add_sta_t node;
3808 iwk_link_quality_cmd_t link_quality;
3809 int i, err;
3810 uint16_t masks = 0;
3811
3812 /*
3813 * set power mode. Disable power management at present, do it later
3814 */
3815 (void) memset(&powertable, 0, sizeof (powertable));
3816 powertable.flags = LE_16(0x8);
3817 err = iwk_cmd(sc, POWER_TABLE_CMD, &powertable,
3818 sizeof (powertable), 0);
3819 if (err != IWK_SUCCESS) {
3820 cmn_err(CE_WARN, "iwk_config(): failed to set power mode\n");
3821 return (err);
3822 }
3823
3824 /* configure bt coexistence */
3825 (void) memset(&bt, 0, sizeof (bt));
3826 bt.flags = 3;
3827 bt.lead_time = 0xaa;
3828 bt.max_kill = 1;
3829 err = iwk_cmd(sc, REPLY_BT_CONFIG, &bt,
3830 sizeof (bt), 0);
3831 if (err != IWK_SUCCESS) {
3832 cmn_err(CE_WARN,
3833 "iwk_config(): "
3834 "failed to configurate bt coexistence\n");
3835 return (err);
3836 }
3837
3838 /* configure rxon */
3839 (void) memset(&sc->sc_config, 0, sizeof (iwk_rxon_cmd_t));
3840 IEEE80211_ADDR_COPY(sc->sc_config.node_addr, ic->ic_macaddr);
3841 IEEE80211_ADDR_COPY(sc->sc_config.wlap_bssid, ic->ic_macaddr);
3842 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic, ic->ic_curchan));
3843 sc->sc_config.flags = LE_32(RXON_FLG_TSF2HOST_MSK |
3844 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_BAND_24G_MSK);
3845 sc->sc_config.flags &= LE_32(~RXON_FLG_CCK_MSK);
3846 switch (ic->ic_opmode) {
3847 case IEEE80211_M_STA:
3848 sc->sc_config.dev_type = RXON_DEV_TYPE_ESS;
3849 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3850 RXON_FILTER_DIS_DECRYPT_MSK |
3851 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3852 break;
3853 case IEEE80211_M_IBSS:
3854 case IEEE80211_M_AHDEMO:
3855 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
3856 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
3857 sc->sc_config.filter_flags = LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3858 RXON_FILTER_DIS_DECRYPT_MSK |
3859 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
3860 break;
3861 case IEEE80211_M_HOSTAP:
3862 sc->sc_config.dev_type = RXON_DEV_TYPE_AP;
3863 break;
3864 case IEEE80211_M_MONITOR:
3865 sc->sc_config.dev_type = RXON_DEV_TYPE_SNIFFER;
3866 sc->sc_config.filter_flags |= LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
3867 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3868 break;
3869 }
3870 sc->sc_config.cck_basic_rates = 0x0f;
3871 sc->sc_config.ofdm_basic_rates = 0xff;
3872
3873 sc->sc_config.ofdm_ht_single_stream_basic_rates = 0xff;
3874 sc->sc_config.ofdm_ht_dual_stream_basic_rates = 0xff;
3875
3876 /* set antenna */
3877
3878 sc->sc_config.rx_chain = LE_16(RXON_RX_CHAIN_DRIVER_FORCE_MSK |
3879 (0x7 << RXON_RX_CHAIN_VALID_POS) |
3880 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
3881 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
3882
3883 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
3884 sizeof (iwk_rxon_cmd_t), 0);
3885 if (err != IWK_SUCCESS) {
3886 cmn_err(CE_WARN, "iwk_config(): "
3887 "failed to set configure command\n");
3888 return (err);
3889 }
3890 /* obtain current temperature of chipset */
3891 sc->sc_tempera = iwk_curr_tempera(sc);
3892
3893 /* make Tx power calibration to determine the gains of DSP and radio */
3894 err = iwk_tx_power_calibration(sc);
3895 if (err) {
3896 cmn_err(CE_WARN, "iwk_config(): "
3897 "failed to set tx power table\n");
3898 return (err);
3899 }
3900
3901 /* add broadcast node so that we can send broadcast frame */
3902 (void) memset(&node, 0, sizeof (node));
3903 (void) memset(node.bssid, 0xff, 6);
3904 node.id = IWK_BROADCAST_ID;
3905 err = iwk_cmd(sc, REPLY_ADD_STA, &node, sizeof (node), 0);
3906 if (err != IWK_SUCCESS) {
3907 cmn_err(CE_WARN, "iwk_config(): "
3908 "failed to add broadcast node\n");
3909 return (err);
3910 }
3911
3912 /* TX_LINK_QUALITY cmd ? */
3913 (void) memset(&link_quality, 0, sizeof (link_quality));
3914 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3915 masks |= RATE_MCS_CCK_MSK;
3916 masks |= RATE_MCS_ANT_B_MSK;
3917 masks &= ~RATE_MCS_ANT_A_MSK;
3918 link_quality.rate_n_flags[i] =
3919 LE_32(iwk_rate_to_plcp(2) | masks);
3920 }
3921
3922 link_quality.general_params.single_stream_ant_msk = 2;
3923 link_quality.general_params.dual_stream_ant_msk = 3;
3924 link_quality.agg_params.agg_dis_start_th = 3;
3925 link_quality.agg_params.agg_time_limit = LE_16(4000);
3926 link_quality.sta_id = IWK_BROADCAST_ID;
3927 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD, &link_quality,
3928 sizeof (link_quality), 0);
3929 if (err != IWK_SUCCESS) {
3930 cmn_err(CE_WARN, "iwk_config(): "
3931 "failed to config link quality table\n");
3932 return (err);
3933 }
3934
3935 return (IWK_SUCCESS);
3936 }
3937
3938 static void
3939 iwk_stop_master(iwk_sc_t *sc)
3940 {
3941 uint32_t tmp;
3942 int n;
3943
3944 tmp = IWK_READ(sc, CSR_RESET);
3945 IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_STOP_MASTER);
3946
3947 tmp = IWK_READ(sc, CSR_GP_CNTRL);
3948 if ((tmp & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE) ==
3949 CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE)
3950 return;
3951
3952 for (n = 0; n < 2000; n++) {
3953 if (IWK_READ(sc, CSR_RESET) &
3954 CSR_RESET_REG_FLAG_MASTER_DISABLED)
3955 break;
3956 DELAY(1000);
3957 }
3958 if (n == 2000)
3959 IWK_DBG((IWK_DEBUG_HW,
3960 "timeout waiting for master stop\n"));
3961 }
3962
3963 static int
3964 iwk_power_up(iwk_sc_t *sc)
3965 {
3966 uint32_t tmp;
3967
3968 iwk_mac_access_enter(sc);
3969 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
3970 tmp &= ~APMG_PS_CTRL_REG_MSK_POWER_SRC;
3971 tmp |= APMG_PS_CTRL_REG_VAL_POWER_SRC_VMAIN;
3972 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
3973 iwk_mac_access_exit(sc);
3974
3975 DELAY(5000);
3976 return (IWK_SUCCESS);
3977 }
3978
3979 static int
3980 iwk_preinit(iwk_sc_t *sc)
3981 {
3982 uint32_t tmp;
3983 int n;
3984 uint8_t vlink;
3985
3986 /* clear any pending interrupts */
3987 IWK_WRITE(sc, CSR_INT, 0xffffffff);
3988
3989 tmp = IWK_READ(sc, CSR_GIO_CHICKEN_BITS);
3990 IWK_WRITE(sc, CSR_GIO_CHICKEN_BITS,
3991 tmp | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
3992
3993 tmp = IWK_READ(sc, CSR_GP_CNTRL);
3994 IWK_WRITE(sc, CSR_GP_CNTRL, tmp | CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
3995
3996 /* wait for clock ready */
3997 for (n = 0; n < 1000; n++) {
3998 if (IWK_READ(sc, CSR_GP_CNTRL) &
3999 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY)
4000 break;
4001 DELAY(10);
4002 }
4003 if (n == 1000) {
4004 cmn_err(CE_WARN,
4005 "iwk_preinit(): timeout waiting for clock ready\n");
4006 return (ETIMEDOUT);
4007 }
4008 iwk_mac_access_enter(sc);
4009 tmp = iwk_reg_read(sc, APMG_CLK_CTRL_REG);
4010 iwk_reg_write(sc, APMG_CLK_CTRL_REG, tmp |
4011 APMG_CLK_REG_VAL_DMA_CLK_RQT | APMG_CLK_REG_VAL_BSM_CLK_RQT);
4012
4013 DELAY(20);
4014 tmp = iwk_reg_read(sc, ALM_APMG_PCIDEV_STT);
4015 iwk_reg_write(sc, ALM_APMG_PCIDEV_STT, tmp |
4016 APMG_DEV_STATE_REG_VAL_L1_ACTIVE_DISABLE);
4017 iwk_mac_access_exit(sc);
4018
4019 IWK_WRITE(sc, CSR_INT_COALESCING, 512 / 32); /* ??? */
4020
4021 (void) iwk_power_up(sc);
4022
4023 if ((sc->sc_rev & 0x80) == 0x80 && (sc->sc_rev & 0x7f) < 8) {
4024 tmp = ddi_get32(sc->sc_cfg_handle,
4025 (uint32_t *)(sc->sc_cfg_base + 0xe8));
4026 ddi_put32(sc->sc_cfg_handle,
4027 (uint32_t *)(sc->sc_cfg_base + 0xe8),
4028 tmp & ~(1 << 11));
4029 }
4030
4031
4032 vlink = ddi_get8(sc->sc_cfg_handle,
4033 (uint8_t *)(sc->sc_cfg_base + 0xf0));
4034 ddi_put8(sc->sc_cfg_handle, (uint8_t *)(sc->sc_cfg_base + 0xf0),
4035 vlink & ~2);
4036
4037 tmp = IWK_READ(sc, CSR_SW_VER);
4038 tmp |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
4039 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
4040 CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R;
4041 IWK_WRITE(sc, CSR_SW_VER, tmp);
4042
4043 /* make sure power supply on each part of the hardware */
4044 iwk_mac_access_enter(sc);
4045 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
4046 tmp |= APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4047 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4048 DELAY(5);
4049 tmp = iwk_reg_read(sc, ALM_APMG_PS_CTL);
4050 tmp &= ~APMG_PS_CTRL_REG_VAL_ALM_R_RESET_REQ;
4051 iwk_reg_write(sc, ALM_APMG_PS_CTL, tmp);
4052 iwk_mac_access_exit(sc);
4053 return (IWK_SUCCESS);
4054 }
4055
4056 /*
4057 * set up semphore flag to own EEPROM
4058 */
4059 static int iwk_eep_sem_down(iwk_sc_t *sc)
4060 {
4061 int count1, count2;
4062 uint32_t tmp;
4063
4064 for (count1 = 0; count1 < 1000; count1++) {
4065 tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
4066 IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4067 tmp | CSR_HW_IF_CONFIG_REG_EEP_SEM);
4068
4069 for (count2 = 0; count2 < 2; count2++) {
4070 if (IWK_READ(sc, CSR_HW_IF_CONFIG_REG) &
4071 CSR_HW_IF_CONFIG_REG_EEP_SEM)
4072 return (IWK_SUCCESS);
4073 DELAY(10000);
4074 }
4075 }
4076 return (IWK_FAIL);
4077 }
4078
4079 /*
4080 * reset semphore flag to release EEPROM
4081 */
4082 static void iwk_eep_sem_up(iwk_sc_t *sc)
4083 {
4084 uint32_t tmp;
4085
4086 tmp = IWK_READ(sc, CSR_HW_IF_CONFIG_REG);
4087 IWK_WRITE(sc, CSR_HW_IF_CONFIG_REG,
4088 tmp & (~CSR_HW_IF_CONFIG_REG_EEP_SEM));
4089 }
4090
4091 /*
4092 * This function load all infomation in eeprom into iwk_eep
4093 * structure in iwk_sc_t structure
4094 */
4095 static int iwk_eep_load(iwk_sc_t *sc)
4096 {
4097 int i, rr;
4098 uint32_t rv, tmp, eep_gp;
4099 uint16_t addr, eep_sz = sizeof (sc->sc_eep_map);
4100 uint16_t *eep_p = (uint16_t *)&sc->sc_eep_map;
4101
4102 /* read eeprom gp register in CSR */
4103 eep_gp = IWK_READ(sc, CSR_EEPROM_GP);
4104 if ((eep_gp & CSR_EEPROM_GP_VALID_MSK) ==
4105 CSR_EEPROM_GP_BAD_SIGNATURE) {
4106 cmn_err(CE_WARN, "EEPROM not found\n");
4107 return (IWK_FAIL);
4108 }
4109
4110 rr = iwk_eep_sem_down(sc);
4111 if (rr != 0) {
4112 cmn_err(CE_WARN, "failed to own EEPROM\n");
4113 return (IWK_FAIL);
4114 }
4115
4116 for (addr = 0; addr < eep_sz; addr += 2) {
4117 IWK_WRITE(sc, CSR_EEPROM_REG, addr<<1);
4118 tmp = IWK_READ(sc, CSR_EEPROM_REG);
4119 IWK_WRITE(sc, CSR_EEPROM_REG, tmp & ~(0x2));
4120
4121 for (i = 0; i < 10; i++) {
4122 rv = IWK_READ(sc, CSR_EEPROM_REG);
4123 if (rv & 1)
4124 break;
4125 DELAY(10);
4126 }
4127
4128 if (!(rv & 1)) {
4129 cmn_err(CE_WARN, "time out when read EEPROM\n");
4130 iwk_eep_sem_up(sc);
4131 return (IWK_FAIL);
4132 }
4133
4134 eep_p[addr/2] = LE_16(rv >> 16);
4135 }
4136
4137 iwk_eep_sem_up(sc);
4138 return (IWK_SUCCESS);
4139 }
4140
4141 /*
4142 * init mac address in ieee80211com_t struct
4143 */
4144 static void iwk_get_mac_from_eep(iwk_sc_t *sc)
4145 {
4146 ieee80211com_t *ic = &sc->sc_ic;
4147 struct iwk_eep *ep = &sc->sc_eep_map;
4148
4149 IEEE80211_ADDR_COPY(ic->ic_macaddr, ep->mac_address);
4150
4151 IWK_DBG((IWK_DEBUG_EEPROM, "mac:%2x:%2x:%2x:%2x:%2x:%2x\n",
4152 ic->ic_macaddr[0], ic->ic_macaddr[1], ic->ic_macaddr[2],
4153 ic->ic_macaddr[3], ic->ic_macaddr[4], ic->ic_macaddr[5]));
4154 }
4155
4156 static int
4157 iwk_init(iwk_sc_t *sc)
4158 {
4159 int qid, n, err;
4160 clock_t clk;
4161 uint32_t tmp;
4162
4163 mutex_enter(&sc->sc_glock);
4164 sc->sc_flags &= ~IWK_F_FW_INIT;
4165
4166 (void) iwk_preinit(sc);
4167
4168 tmp = IWK_READ(sc, CSR_GP_CNTRL);
4169 if (!(tmp & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) {
4170 cmn_err(CE_NOTE, "iwk_init(): Radio transmitter is off\n");
4171 goto fail1;
4172 }
4173
4174 /* init Rx ring */
4175 iwk_mac_access_enter(sc);
4176 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
4177
4178 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
4179 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
4180 sc->sc_rxq.dma_desc.cookie.dmac_address >> 8);
4181
4182 IWK_WRITE(sc, FH_RSCSR_CHNL0_STTS_WPTR_REG,
4183 ((uint32_t)(sc->sc_dma_sh.cookie.dmac_address +
4184 offsetof(struct iwk_shared, val0)) >> 4));
4185
4186 IWK_WRITE(sc, FH_MEM_RCSR_CHNL0_CONFIG_REG,
4187 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
4188 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
4189 IWK_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
4190 (RX_QUEUE_SIZE_LOG <<
4191 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
4192 iwk_mac_access_exit(sc);
4193 IWK_WRITE(sc, FH_RSCSR_CHNL0_RBDCB_WPTR_REG,
4194 (RX_QUEUE_SIZE - 1) & ~0x7);
4195
4196 /* init Tx rings */
4197 iwk_mac_access_enter(sc);
4198 iwk_reg_write(sc, SCD_TXFACT, 0);
4199
4200 /* keep warm page */
4201 iwk_reg_write(sc, IWK_FH_KW_MEM_ADDR_REG,
4202 sc->sc_dma_kw.cookie.dmac_address >> 4);
4203
4204 for (qid = 0; qid < IWK_NUM_QUEUES; qid++) {
4205 IWK_WRITE(sc, FH_MEM_CBBC_QUEUE(qid),
4206 sc->sc_txq[qid].dma_desc.cookie.dmac_address >> 8);
4207 IWK_WRITE(sc, IWK_FH_TCSR_CHNL_TX_CONFIG_REG(qid),
4208 IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4209 IWK_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
4210 }
4211 iwk_mac_access_exit(sc);
4212
4213 /* clear "radio off" and "disable command" bits */
4214 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4215 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR,
4216 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4217
4218 /* clear any pending interrupts */
4219 IWK_WRITE(sc, CSR_INT, 0xffffffff);
4220
4221 /* enable interrupts */
4222 IWK_WRITE(sc, CSR_INT_MASK, CSR_INI_SET_MASK);
4223
4224 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4225 IWK_WRITE(sc, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
4226
4227 /*
4228 * backup ucode data part for future use.
4229 */
4230 (void) memcpy(sc->sc_dma_fw_data_bak.mem_va,
4231 sc->sc_dma_fw_data.mem_va,
4232 sc->sc_dma_fw_data.alength);
4233
4234 for (n = 0; n < 2; n++) {
4235 /* load firmware init segment into NIC */
4236 err = iwk_load_firmware(sc);
4237 if (err != IWK_SUCCESS) {
4238 cmn_err(CE_WARN, "iwk_init(): "
4239 "failed to setup boot firmware\n");
4240 continue;
4241 }
4242
4243 /* now press "execute" start running */
4244 IWK_WRITE(sc, CSR_RESET, 0);
4245 break;
4246 }
4247 if (n == 2) {
4248 cmn_err(CE_WARN, "iwk_init(): failed to load firmware\n");
4249 goto fail1;
4250 }
4251 /* ..and wait at most one second for adapter to initialize */
4252 clk = ddi_get_lbolt() + drv_usectohz(2000000);
4253 while (!(sc->sc_flags & IWK_F_FW_INIT)) {
4254 if (cv_timedwait(&sc->sc_fw_cv, &sc->sc_glock, clk) < 0)
4255 break;
4256 }
4257 if (!(sc->sc_flags & IWK_F_FW_INIT)) {
4258 cmn_err(CE_WARN,
4259 "iwk_init(): timeout waiting for firmware init\n");
4260 goto fail1;
4261 }
4262
4263 /*
4264 * at this point, the firmware is loaded OK, then config the hardware
4265 * with the ucode API, including rxon, txpower, etc.
4266 */
4267 err = iwk_config(sc);
4268 if (err) {
4269 cmn_err(CE_WARN, "iwk_init(): failed to configure device\n");
4270 goto fail1;
4271 }
4272
4273 /* at this point, hardware may receive beacons :) */
4274 mutex_exit(&sc->sc_glock);
4275 return (IWK_SUCCESS);
4276
4277 fail1:
4278 err = IWK_FAIL;
4279 mutex_exit(&sc->sc_glock);
4280 return (err);
4281 }
4282
4283 static void
4284 iwk_stop(iwk_sc_t *sc)
4285 {
4286 uint32_t tmp;
4287 int i;
4288
4289 if (!(sc->sc_flags & IWK_F_QUIESCED))
4290 mutex_enter(&sc->sc_glock);
4291
4292 IWK_WRITE(sc, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
4293 /* disable interrupts */
4294 IWK_WRITE(sc, CSR_INT_MASK, 0);
4295 IWK_WRITE(sc, CSR_INT, CSR_INI_SET_MASK);
4296 IWK_WRITE(sc, CSR_FH_INT_STATUS, 0xffffffff);
4297
4298 /* reset all Tx rings */
4299 for (i = 0; i < IWK_NUM_QUEUES; i++)
4300 iwk_reset_tx_ring(sc, &sc->sc_txq[i]);
4301
4302 /* reset Rx ring */
4303 iwk_reset_rx_ring(sc);
4304
4305 iwk_mac_access_enter(sc);
4306 iwk_reg_write(sc, ALM_APMG_CLK_DIS, APMG_CLK_REG_VAL_DMA_CLK_RQT);
4307 iwk_mac_access_exit(sc);
4308
4309 DELAY(5);
4310
4311 iwk_stop_master(sc);
4312
4313 sc->sc_tx_timer = 0;
4314 sc->sc_flags &= ~IWK_F_SCANNING;
4315 sc->sc_scan_pending = 0;
4316
4317 tmp = IWK_READ(sc, CSR_RESET);
4318 IWK_WRITE(sc, CSR_RESET, tmp | CSR_RESET_REG_FLAG_SW_RESET);
4319
4320 if (!(sc->sc_flags & IWK_F_QUIESCED))
4321 mutex_exit(&sc->sc_glock);
4322 }
4323
4324 /*
4325 * Naive implementation of the Adaptive Multi Rate Retry algorithm:
4326 * "IEEE 802.11 Rate Adaptation: A Practical Approach"
4327 * Mathieu Lacage, Hossein Manshaei, Thierry Turletti
4328 * INRIA Sophia - Projet Planete
4329 * http://www-sop.inria.fr/rapports/sophia/RR-5208.html
4330 */
4331 #define is_success(amrr) \
4332 ((amrr)->retrycnt < (amrr)->txcnt / 10)
4333 #define is_failure(amrr) \
4334 ((amrr)->retrycnt > (amrr)->txcnt / 3)
4335 #define is_enough(amrr) \
4336 ((amrr)->txcnt > 100)
4337 #define is_min_rate(in) \
4338 ((in)->in_txrate == 0)
4339 #define is_max_rate(in) \
4340 ((in)->in_txrate == (in)->in_rates.ir_nrates - 1)
4341 #define increase_rate(in) \
4342 ((in)->in_txrate++)
4343 #define decrease_rate(in) \
4344 ((in)->in_txrate--)
4345 #define reset_cnt(amrr) \
4346 { (amrr)->txcnt = (amrr)->retrycnt = 0; }
4347
4348 #define IWK_AMRR_MIN_SUCCESS_THRESHOLD 1
4349 #define IWK_AMRR_MAX_SUCCESS_THRESHOLD 15
4350
4351 static void
4352 iwk_amrr_init(iwk_amrr_t *amrr)
4353 {
4354 amrr->success = 0;
4355 amrr->recovery = 0;
4356 amrr->txcnt = amrr->retrycnt = 0;
4357 amrr->success_threshold = IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4358 }
4359
4360 static void
4361 iwk_amrr_timeout(iwk_sc_t *sc)
4362 {
4363 ieee80211com_t *ic = &sc->sc_ic;
4364
4365 IWK_DBG((IWK_DEBUG_RATECTL, "iwk_amrr_timeout() enter\n"));
4366 if (ic->ic_opmode == IEEE80211_M_STA)
4367 iwk_amrr_ratectl(NULL, ic->ic_bss);
4368 else
4369 ieee80211_iterate_nodes(&ic->ic_sta, iwk_amrr_ratectl, NULL);
4370 sc->sc_clk = ddi_get_lbolt();
4371 }
4372
4373 /* ARGSUSED */
4374 static void
4375 iwk_amrr_ratectl(void *arg, ieee80211_node_t *in)
4376 {
4377 iwk_amrr_t *amrr = (iwk_amrr_t *)in;
4378 int need_change = 0;
4379
4380 if (is_success(amrr) && is_enough(amrr)) {
4381 amrr->success++;
4382 if (amrr->success >= amrr->success_threshold &&
4383 !is_max_rate(in)) {
4384 amrr->recovery = 1;
4385 amrr->success = 0;
4386 increase_rate(in);
4387 IWK_DBG((IWK_DEBUG_RATECTL,
4388 "AMRR increasing rate %d (txcnt=%d retrycnt=%d)\n",
4389 in->in_txrate, amrr->txcnt, amrr->retrycnt));
4390 need_change = 1;
4391 } else {
4392 amrr->recovery = 0;
4393 }
4394 } else if (is_failure(amrr)) {
4395 amrr->success = 0;
4396 if (!is_min_rate(in)) {
4397 if (amrr->recovery) {
4398 amrr->success_threshold++;
4399 if (amrr->success_threshold >
4400 IWK_AMRR_MAX_SUCCESS_THRESHOLD)
4401 amrr->success_threshold =
4402 IWK_AMRR_MAX_SUCCESS_THRESHOLD;
4403 } else {
4404 amrr->success_threshold =
4405 IWK_AMRR_MIN_SUCCESS_THRESHOLD;
4406 }
4407 decrease_rate(in);
4408 IWK_DBG((IWK_DEBUG_RATECTL,
4409 "AMRR decreasing rate %d (txcnt=%d retrycnt=%d)\n",
4410 in->in_txrate, amrr->txcnt, amrr->retrycnt));
4411 need_change = 1;
4412 }
4413 amrr->recovery = 0; /* paper is incorrect */
4414 }
4415
4416 if (is_enough(amrr) || need_change)
4417 reset_cnt(amrr);
4418 }
4419
4420 /*
4421 * calculate 4965 chipset's kelvin temperature according to
4422 * the data of init alive and satistics notification.
4423 * The details is described in iwk_calibration.h file
4424 */
4425 static int32_t iwk_curr_tempera(iwk_sc_t *sc)
4426 {
4427 int32_t tempera;
4428 int32_t r1, r2, r3;
4429 uint32_t r4_u;
4430 int32_t r4_s;
4431
4432 if (iwk_is_fat_channel(sc)) {
4433 r1 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r1[1]);
4434 r2 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r2[1]);
4435 r3 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r3[1]);
4436 r4_u = LE_32(sc->sc_card_alive_init.therm_r4[1]);
4437 } else {
4438 r1 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r1[0]);
4439 r2 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r2[0]);
4440 r3 = (int32_t)LE_32(sc->sc_card_alive_init.therm_r3[0]);
4441 r4_u = LE_32(sc->sc_card_alive_init.therm_r4[0]);
4442 }
4443
4444 if (sc->sc_flags & IWK_F_STATISTICS) {
4445 r4_s = (int32_t)(LE_32(sc->sc_statistics.general.temperature) <<
4446 (31-23)) >> (31-23);
4447 } else {
4448 r4_s = (int32_t)(r4_u << (31-23)) >> (31-23);
4449 }
4450
4451 IWK_DBG((IWK_DEBUG_CALIBRATION, "temperature R[1-4]: %d %d %d %d\n",
4452 r1, r2, r3, r4_s));
4453
4454 if (r3 == r1) {
4455 cmn_err(CE_WARN, "iwk_curr_tempera(): "
4456 "failed to calculate temperature"
4457 "because r3 = r1\n");
4458 return (DDI_FAILURE);
4459 }
4460
4461 tempera = TEMPERATURE_CALIB_A_VAL * (r4_s - r2);
4462 tempera /= (r3 - r1);
4463 tempera = (tempera*97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
4464
4465 IWK_DBG((IWK_DEBUG_CALIBRATION, "calculated temperature: %dK, %dC\n",
4466 tempera, KELVIN_TO_CELSIUS(tempera)));
4467
4468 return (tempera);
4469 }
4470
4471 /* Determine whether 4965 is using 2.4 GHz band */
4472 static inline int iwk_is_24G_band(iwk_sc_t *sc)
4473 {
4474 return (LE_32(sc->sc_config.flags) & RXON_FLG_BAND_24G_MSK);
4475 }
4476
4477 /* Determine whether 4965 is using fat channel */
4478 static inline int iwk_is_fat_channel(iwk_sc_t *sc)
4479 {
4480 return ((LE_32(sc->sc_config.flags) &
4481 RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
4482 (LE_32(sc->sc_config.flags) & RXON_FLG_CHANNEL_MODE_MIXED_MSK));
4483 }
4484
4485 /*
4486 * In MIMO mode, determine which group 4965's current channel belong to.
4487 * For more infomation about "channel group",
4488 * please refer to iwk_calibration.h file
4489 */
4490 static int iwk_txpower_grp(uint16_t channel)
4491 {
4492 if (channel >= CALIB_IWK_TX_ATTEN_GR5_FCH &&
4493 channel <= CALIB_IWK_TX_ATTEN_GR5_LCH) {
4494 return (CALIB_CH_GROUP_5);
4495 }
4496
4497 if (channel >= CALIB_IWK_TX_ATTEN_GR1_FCH &&
4498 channel <= CALIB_IWK_TX_ATTEN_GR1_LCH) {
4499 return (CALIB_CH_GROUP_1);
4500 }
4501
4502 if (channel >= CALIB_IWK_TX_ATTEN_GR2_FCH &&
4503 channel <= CALIB_IWK_TX_ATTEN_GR2_LCH) {
4504 return (CALIB_CH_GROUP_2);
4505 }
4506
4507 if (channel >= CALIB_IWK_TX_ATTEN_GR3_FCH &&
4508 channel <= CALIB_IWK_TX_ATTEN_GR3_LCH) {
4509 return (CALIB_CH_GROUP_3);
4510 }
4511
4512 if (channel >= CALIB_IWK_TX_ATTEN_GR4_FCH &&
4513 channel <= CALIB_IWK_TX_ATTEN_GR4_LCH) {
4514 return (CALIB_CH_GROUP_4);
4515 }
4516
4517 cmn_err(CE_WARN, "iwk_txpower_grp(): "
4518 "can't find txpower group for channel %d.\n", channel);
4519
4520 return (DDI_FAILURE);
4521 }
4522
4523 /* 2.4 GHz */
4524 static uint16_t iwk_eep_band_1[14] = {
4525 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4526 };
4527
4528 /* 5.2 GHz bands */
4529 static uint16_t iwk_eep_band_2[13] = {
4530 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4531 };
4532
4533 static uint16_t iwk_eep_band_3[12] = {
4534 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4535 };
4536
4537 static uint16_t iwk_eep_band_4[11] = {
4538 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4539 };
4540
4541 static uint16_t iwk_eep_band_5[6] = {
4542 145, 149, 153, 157, 161, 165
4543 };
4544
4545 static uint16_t iwk_eep_band_6[7] = {
4546 1, 2, 3, 4, 5, 6, 7
4547 };
4548
4549 static uint16_t iwk_eep_band_7[11] = {
4550 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
4551 };
4552
4553 /* Get regulatory data from eeprom for a given channel */
4554 static struct iwk_eep_channel *iwk_get_eep_channel(iwk_sc_t *sc,
4555 uint16_t channel,
4556 int is_24G, int is_fat, int is_hi_chan)
4557 {
4558 int32_t i;
4559 uint16_t chan;
4560
4561 if (is_fat) { /* 11n mode */
4562
4563 if (is_hi_chan) {
4564 chan = channel - 4;
4565 } else {
4566 chan = channel;
4567 }
4568
4569 for (i = 0; i < 7; i++) {
4570 if (iwk_eep_band_6[i] == chan) {
4571 return (&sc->sc_eep_map.band_24_channels[i]);
4572 }
4573 }
4574 for (i = 0; i < 11; i++) {
4575 if (iwk_eep_band_7[i] == chan) {
4576 return (&sc->sc_eep_map.band_52_channels[i]);
4577 }
4578 }
4579 } else if (is_24G) { /* 2.4 GHz band */
4580 for (i = 0; i < 14; i++) {
4581 if (iwk_eep_band_1[i] == channel) {
4582 return (&sc->sc_eep_map.band_1_channels[i]);
4583 }
4584 }
4585 } else { /* 5 GHz band */
4586 for (i = 0; i < 13; i++) {
4587 if (iwk_eep_band_2[i] == channel) {
4588 return (&sc->sc_eep_map.band_2_channels[i]);
4589 }
4590 }
4591 for (i = 0; i < 12; i++) {
4592 if (iwk_eep_band_3[i] == channel) {
4593 return (&sc->sc_eep_map.band_3_channels[i]);
4594 }
4595 }
4596 for (i = 0; i < 11; i++) {
4597 if (iwk_eep_band_4[i] == channel) {
4598 return (&sc->sc_eep_map.band_4_channels[i]);
4599 }
4600 }
4601 for (i = 0; i < 6; i++) {
4602 if (iwk_eep_band_5[i] == channel) {
4603 return (&sc->sc_eep_map.band_5_channels[i]);
4604 }
4605 }
4606 }
4607
4608 return (NULL);
4609 }
4610
4611 /*
4612 * Determine which subband a given channel belongs
4613 * to in 2.4 GHz or 5 GHz band
4614 */
4615 static int32_t iwk_band_number(iwk_sc_t *sc, uint16_t channel)
4616 {
4617 int32_t b_n = -1;
4618
4619 for (b_n = 0; b_n < EEP_TX_POWER_BANDS; b_n++) {
4620 if (0 == sc->sc_eep_map.calib_info.band_info_tbl[b_n].ch_from) {
4621 continue;
4622 }
4623
4624 if ((channel >=
4625 (uint16_t)sc->sc_eep_map.calib_info.
4626 band_info_tbl[b_n].ch_from) &&
4627 (channel <=
4628 (uint16_t)sc->sc_eep_map.calib_info.
4629 band_info_tbl[b_n].ch_to)) {
4630 break;
4631 }
4632 }
4633
4634 return (b_n);
4635 }
4636
4637 /* Make a special division for interpolation operation */
4638 static int iwk_division(int32_t num, int32_t denom, int32_t *res)
4639 {
4640 int32_t sign = 1;
4641
4642 if (num < 0) {
4643 sign = -sign;
4644 num = -num;
4645 }
4646
4647 if (denom < 0) {
4648 sign = -sign;
4649 denom = -denom;
4650 }
4651
4652 *res = ((num*2 + denom) / (denom*2)) * sign;
4653
4654 return (IWK_SUCCESS);
4655 }
4656
4657 /* Make interpolation operation */
4658 static int32_t iwk_interpolate_value(int32_t x, int32_t x1, int32_t y1,
4659 int32_t x2, int32_t y2)
4660 {
4661 int32_t val;
4662
4663 if (x2 == x1) {
4664 return (y1);
4665 } else {
4666 (void) iwk_division((x2-x)*(y1-y2), (x2-x1), &val);
4667 return (val + y2);
4668 }
4669 }
4670
4671 /* Get interpolation measurement data of a given channel for all chains. */
4672 static int iwk_channel_interpolate(iwk_sc_t *sc, uint16_t channel,
4673 struct iwk_eep_calib_channel_info *chan_info)
4674 {
4675 int32_t ban_n;
4676 uint32_t ch1_n, ch2_n;
4677 int32_t c, m;
4678 struct iwk_eep_calib_measure *m1_p, *m2_p, *m_p;
4679
4680 /* determine subband number */
4681 ban_n = iwk_band_number(sc, channel);
4682 if (ban_n >= EEP_TX_POWER_BANDS) {
4683 return (DDI_FAILURE);
4684 }
4685
4686 ch1_n =
4687 (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch1.ch_num;
4688 ch2_n =
4689 (uint32_t)sc->sc_eep_map.calib_info.band_info_tbl[ban_n].ch2.ch_num;
4690
4691 chan_info->ch_num = (uint8_t)channel; /* given channel number */
4692
4693 /*
4694 * go through all chains on chipset
4695 */
4696 for (c = 0; c < EEP_TX_POWER_TX_CHAINS; c++) {
4697 /*
4698 * go through all factory measurements
4699 */
4700 for (m = 0; m < EEP_TX_POWER_MEASUREMENTS; m++) {
4701 m1_p =
4702 &(sc->sc_eep_map.calib_info.
4703 band_info_tbl[ban_n].ch1.measure[c][m]);
4704 m2_p =
4705 &(sc->sc_eep_map.calib_info.band_info_tbl[ban_n].
4706 ch2.measure[c][m]);
4707 m_p = &(chan_info->measure[c][m]);
4708
4709 /*
4710 * make interpolation to get actual
4711 * Tx power for given channel
4712 */
4713 m_p->actual_pow = iwk_interpolate_value(channel,
4714 ch1_n, m1_p->actual_pow,
4715 ch2_n, m2_p->actual_pow);
4716
4717 /* make interpolation to get index into gain table */
4718 m_p->gain_idx = iwk_interpolate_value(channel,
4719 ch1_n, m1_p->gain_idx,
4720 ch2_n, m2_p->gain_idx);
4721
4722 /* make interpolation to get chipset temperature */
4723 m_p->temperature = iwk_interpolate_value(channel,
4724 ch1_n, m1_p->temperature,
4725 ch2_n, m2_p->temperature);
4726
4727 /*
4728 * make interpolation to get power
4729 * amp detector level
4730 */
4731 m_p->pa_det = iwk_interpolate_value(channel, ch1_n,
4732 m1_p->pa_det,
4733 ch2_n, m2_p->pa_det);
4734 }
4735 }
4736
4737 return (IWK_SUCCESS);
4738 }
4739
4740 /*
4741 * Calculate voltage compensation for Tx power. For more infomation,
4742 * please refer to iwk_calibration.h file
4743 */
4744 static int32_t iwk_voltage_compensation(int32_t eep_voltage,
4745 int32_t curr_voltage)
4746 {
4747 int32_t vol_comp = 0;
4748
4749 if ((TX_POWER_IWK_ILLEGAL_VOLTAGE == eep_voltage) ||
4750 (TX_POWER_IWK_ILLEGAL_VOLTAGE == curr_voltage)) {
4751 return (vol_comp);
4752 }
4753
4754 (void) iwk_division(curr_voltage-eep_voltage,
4755 TX_POWER_IWK_VOLTAGE_CODES_PER_03V, &vol_comp);
4756
4757 if (curr_voltage > eep_voltage) {
4758 vol_comp *= 2;
4759 }
4760 if ((vol_comp < -2) || (vol_comp > 2)) {
4761 vol_comp = 0;
4762 }
4763
4764 return (vol_comp);
4765 }
4766
4767 /*
4768 * Thermal compensation values for txpower for various frequency ranges ...
4769 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust
4770 */
4771 static struct iwk_txpower_tempera_comp {
4772 int32_t degrees_per_05db_a;
4773 int32_t degrees_per_05db_a_denom;
4774 } txpower_tempera_comp_table[CALIB_CH_GROUP_MAX] = {
4775 {9, 2}, /* group 0 5.2, ch 34-43 */
4776 {4, 1}, /* group 1 5.2, ch 44-70 */
4777 {4, 1}, /* group 2 5.2, ch 71-124 */
4778 {4, 1}, /* group 3 5.2, ch 125-200 */
4779 {3, 1} /* group 4 2.4, ch all */
4780 };
4781
4782 /*
4783 * bit-rate-dependent table to prevent Tx distortion, in half-dB units,
4784 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates.
4785 */
4786 static int32_t back_off_table[] = {
4787 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
4788 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
4789 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
4790 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
4791 10 /* CCK */
4792 };
4793
4794 /* determine minimum Tx power index in gain table */
4795 static int32_t iwk_min_power_index(int32_t rate_pow_idx, int32_t is_24G)
4796 {
4797 if ((!is_24G) && ((rate_pow_idx & 7) <= 4)) {
4798 return (MIN_TX_GAIN_INDEX_52GHZ_EXT);
4799 }
4800
4801 return (MIN_TX_GAIN_INDEX);
4802 }
4803
4804 /*
4805 * Determine DSP and radio gain according to temperature and other factors.
4806 * This function is the majority of Tx power calibration
4807 */
4808 static int iwk_txpower_table_cmd_init(iwk_sc_t *sc,
4809 struct iwk_tx_power_db *tp_db)
4810 {
4811 int is_24G, is_fat, is_high_chan, is_mimo;
4812 int c, r;
4813 int32_t target_power;
4814 int32_t tx_grp = CALIB_CH_GROUP_MAX;
4815 uint16_t channel;
4816 uint8_t saturation_power;
4817 int32_t regu_power;
4818 int32_t curr_regu_power;
4819 struct iwk_eep_channel *eep_chan_p;
4820 struct iwk_eep_calib_channel_info eep_chan_calib;
4821 int32_t eep_voltage, init_voltage;
4822 int32_t voltage_compensation;
4823 int32_t temperature;
4824 int32_t degrees_per_05db_num;
4825 int32_t degrees_per_05db_denom;
4826 struct iwk_eep_calib_measure *measure_p;
4827 int32_t interpo_temp;
4828 int32_t power_limit;
4829 int32_t atten_value;
4830 int32_t tempera_comp[2];
4831 int32_t interpo_gain_idx[2];
4832 int32_t interpo_actual_pow[2];
4833 union iwk_tx_power_dual_stream txpower_gains;
4834 int32_t txpower_gains_idx;
4835
4836 channel = LE_16(sc->sc_config.chan);
4837
4838 /* 2.4 GHz or 5 GHz band */
4839 is_24G = iwk_is_24G_band(sc);
4840
4841 /* fat channel or not */
4842 is_fat = iwk_is_fat_channel(sc);
4843
4844 /*
4845 * using low half channel number or high half channel number
4846 * identify fat channel
4847 */
4848 if (is_fat && (LE_32(sc->sc_config.flags) &
4849 RXON_FLG_CONTROL_CHANNEL_LOC_HIGH_MSK)) {
4850 is_high_chan = 1;
4851 }
4852
4853 if ((channel > 0) && (channel < 200)) {
4854 /* get regulatory channel data from eeprom */
4855 eep_chan_p = iwk_get_eep_channel(sc, channel, is_24G,
4856 is_fat, is_high_chan);
4857 if (NULL == eep_chan_p) {
4858 cmn_err(CE_WARN,
4859 "iwk_txpower_table_cmd_init(): "
4860 "can't get channel infomation\n");
4861 return (DDI_FAILURE);
4862 }
4863 } else {
4864 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4865 "channel(%d) isn't in proper range\n",
4866 channel);
4867 return (DDI_FAILURE);
4868 }
4869
4870 /* initial value of Tx power */
4871 sc->sc_user_txpower = (int32_t)eep_chan_p->max_power_avg;
4872 if (sc->sc_user_txpower < IWK_TX_POWER_TARGET_POWER_MIN) {
4873 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4874 "user TX power is too weak\n");
4875 return (DDI_FAILURE);
4876 } else if (sc->sc_user_txpower > IWK_TX_POWER_TARGET_POWER_MAX) {
4877 cmn_err(CE_WARN, "iwk_txpower_table_cmd_init(): "
4878 "user TX power is too strong\n");
4879 return (DDI_FAILURE);
4880 }
4881
4882 target_power = 2 * sc->sc_user_txpower;
4883
4884 /* determine which group current channel belongs to */
4885 tx_grp = iwk_txpower_grp(channel);
4886 if (tx_grp < 0) {
4887 return (tx_grp);
4888 }
4889
4890
4891 if (is_fat) {
4892 if (is_high_chan) {
4893 channel -= 2;
4894 } else {
4895 channel += 2;
4896 }
4897 }
4898
4899 /* determine saturation power */
4900 if (is_24G) {
4901 saturation_power =
4902 sc->sc_eep_map.calib_info.saturation_power24;
4903 } else {
4904 saturation_power =
4905 sc->sc_eep_map.calib_info.saturation_power52;
4906 }
4907
4908 if (saturation_power < IWK_TX_POWER_SATURATION_MIN ||
4909 saturation_power > IWK_TX_POWER_SATURATION_MAX) {
4910 if (is_24G) {
4911 saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_24;
4912 } else {
4913 saturation_power = IWK_TX_POWER_DEFAULT_SATURATION_52;
4914 }
4915 }
4916
4917 /* determine regulatory power */
4918 regu_power = (int32_t)eep_chan_p->max_power_avg * 2;
4919 if ((regu_power < IWK_TX_POWER_REGULATORY_MIN) ||
4920 (regu_power > IWK_TX_POWER_REGULATORY_MAX)) {
4921 if (is_24G) {
4922 regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_24;
4923 } else {
4924 regu_power = IWK_TX_POWER_DEFAULT_REGULATORY_52;
4925 }
4926 }
4927
4928 /*
4929 * get measurement data for current channel
4930 * suach as temperature,index to gain table,actual Tx power
4931 */
4932 (void) iwk_channel_interpolate(sc, channel, &eep_chan_calib);
4933
4934 eep_voltage = (int32_t)LE_16(sc->sc_eep_map.calib_info.voltage);
4935 init_voltage = (int32_t)LE_32(sc->sc_card_alive_init.voltage);
4936
4937 /* calculate voltage compensation to Tx power */
4938 voltage_compensation =
4939 iwk_voltage_compensation(eep_voltage, init_voltage);
4940
4941 if (sc->sc_tempera >= IWK_TX_POWER_TEMPERATURE_MIN) {
4942 temperature = sc->sc_tempera;
4943 } else {
4944 temperature = IWK_TX_POWER_TEMPERATURE_MIN;
4945 }
4946 if (sc->sc_tempera <= IWK_TX_POWER_TEMPERATURE_MAX) {
4947 temperature = sc->sc_tempera;
4948 } else {
4949 temperature = IWK_TX_POWER_TEMPERATURE_MAX;
4950 }
4951 temperature = KELVIN_TO_CELSIUS(temperature);
4952
4953 degrees_per_05db_num =
4954 txpower_tempera_comp_table[tx_grp].degrees_per_05db_a;
4955 degrees_per_05db_denom =
4956 txpower_tempera_comp_table[tx_grp].degrees_per_05db_a_denom;
4957
4958 for (c = 0; c < 2; c++) { /* go through all chains */
4959 measure_p = &eep_chan_calib.measure[c][1];
4960 interpo_temp = measure_p->temperature;
4961
4962 /* determine temperature compensation to Tx power */
4963 (void) iwk_division(
4964 (temperature-interpo_temp)*degrees_per_05db_denom,
4965 degrees_per_05db_num, &tempera_comp[c]);
4966
4967 interpo_gain_idx[c] = measure_p->gain_idx;
4968 interpo_actual_pow[c] = measure_p->actual_pow;
4969 }
4970
4971 /*
4972 * go through all rate entries in Tx power table
4973 */
4974 for (r = 0; r < POWER_TABLE_NUM_ENTRIES; r++) {
4975 if (r & 0x8) {
4976 /* need to lower regulatory power for MIMO mode */
4977 curr_regu_power = regu_power -
4978 IWK_TX_POWER_MIMO_REGULATORY_COMPENSATION;
4979 is_mimo = 1;
4980 } else {
4981 curr_regu_power = regu_power;
4982 is_mimo = 0;
4983 }
4984
4985 power_limit = saturation_power - back_off_table[r];
4986 if (power_limit > curr_regu_power) {
4987 /* final Tx power limit */
4988 power_limit = curr_regu_power;
4989 }
4990
4991 if (target_power > power_limit) {
4992 target_power = power_limit; /* final target Tx power */
4993 }
4994
4995 for (c = 0; c < 2; c++) { /* go through all Tx chains */
4996 if (is_mimo) {
4997 atten_value =
4998 LE_32(sc->sc_card_alive_init.
4999 tx_atten[tx_grp][c]);
5000 } else {
5001 atten_value = 0;
5002 }
5003
5004 /*
5005 * calculate index in gain table
5006 * this step is very important
5007 */
5008 txpower_gains_idx = interpo_gain_idx[c] -
5009 (target_power - interpo_actual_pow[c]) -
5010 tempera_comp[c] - voltage_compensation +
5011 atten_value;
5012
5013 if (txpower_gains_idx <
5014 iwk_min_power_index(r, is_24G)) {
5015 txpower_gains_idx =
5016 iwk_min_power_index(r, is_24G);
5017 }
5018
5019 if (!is_24G) {
5020 /*
5021 * support negative index for 5 GHz
5022 * band
5023 */
5024 txpower_gains_idx += 9;
5025 }
5026
5027 if (POWER_TABLE_CCK_ENTRY == r) {
5028 /* for CCK mode, make necessary attenuaton */
5029 txpower_gains_idx +=
5030 IWK_TX_POWER_CCK_COMPENSATION_C_STEP;
5031 }
5032
5033 if (txpower_gains_idx > 107) {
5034 txpower_gains_idx = 107;
5035 } else if (txpower_gains_idx < 0) {
5036 txpower_gains_idx = 0;
5037 }
5038
5039 /* search DSP and radio gains in gain table */
5040 txpower_gains.s.radio_tx_gain[c] =
5041 gains_table[is_24G][txpower_gains_idx].radio;
5042 txpower_gains.s.dsp_predis_atten[c] =
5043 gains_table[is_24G][txpower_gains_idx].dsp;
5044
5045 IWK_DBG((IWK_DEBUG_CALIBRATION,
5046 "rate_index: %d, "
5047 "gain_index %d, c: %d,is_mimo: %d\n",
5048 r, txpower_gains_idx, c, is_mimo));
5049 }
5050
5051 /* initialize Tx power table */
5052 if (r < POWER_TABLE_NUM_HT_OFDM_ENTRIES) {
5053 tp_db->ht_ofdm_power[r].dw = LE_32(txpower_gains.dw);
5054 } else {
5055 tp_db->legacy_cck_power.dw = LE_32(txpower_gains.dw);
5056 }
5057 }
5058
5059 return (IWK_SUCCESS);
5060 }
5061
5062 /*
5063 * make Tx power calibration to adjust Tx power.
5064 * This is completed by sending out Tx power table command.
5065 */
5066 static int iwk_tx_power_calibration(iwk_sc_t *sc)
5067 {
5068 iwk_tx_power_table_cmd_t cmd;
5069 int rv;
5070
5071 if (sc->sc_flags & IWK_F_SCANNING) {
5072 return (IWK_SUCCESS);
5073 }
5074
5075 /* necessary initialization to Tx power table command */
5076 cmd.band = (uint8_t)iwk_is_24G_band(sc);
5077 cmd.channel = sc->sc_config.chan;
5078 cmd.channel_normal_width = 0;
5079
5080 /* initialize Tx power table */
5081 rv = iwk_txpower_table_cmd_init(sc, &cmd.tx_power);
5082 if (rv) {
5083 cmn_err(CE_NOTE, "rv= %d\n", rv);
5084 return (rv);
5085 }
5086
5087 /* send out Tx power table command */
5088 rv = iwk_cmd(sc, REPLY_TX_PWR_TABLE_CMD, &cmd, sizeof (cmd), 1);
5089 if (rv) {
5090 return (rv);
5091 }
5092
5093 /* record current temperature */
5094 sc->sc_last_tempera = sc->sc_tempera;
5095
5096 return (IWK_SUCCESS);
5097 }
5098
5099 /* This function is the handler of statistics notification from uCode */
5100 static void iwk_statistics_notify(iwk_sc_t *sc, iwk_rx_desc_t *desc)
5101 {
5102 int is_diff;
5103 struct iwk_notif_statistics *statistics_p =
5104 (struct iwk_notif_statistics *)(desc + 1);
5105
5106 mutex_enter(&sc->sc_glock);
5107
5108 is_diff = (sc->sc_statistics.general.temperature !=
5109 statistics_p->general.temperature) ||
5110 (LE_32(sc->sc_statistics.flag) &
5111 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
5112 (LE_32(statistics_p->flag) & STATISTICS_REPLY_FLG_FAT_MODE_MSK);
5113
5114 /* update statistics data */
5115 (void) memcpy(&sc->sc_statistics, statistics_p,
5116 sizeof (struct iwk_notif_statistics));
5117
5118 sc->sc_flags |= IWK_F_STATISTICS;
5119
5120 if (!(sc->sc_flags & IWK_F_SCANNING)) {
5121 /* make Receiver gain balance calibration */
5122 (void) iwk_rxgain_diff(sc);
5123
5124 /* make Receiver sensitivity calibration */
5125 (void) iwk_rx_sens(sc);
5126 }
5127
5128
5129 if (!is_diff) {
5130 mutex_exit(&sc->sc_glock);
5131 return;
5132 }
5133
5134 /* calibration current temperature of 4965 chipset */
5135 sc->sc_tempera = iwk_curr_tempera(sc);
5136
5137 /* distinct temperature change will trigger Tx power calibration */
5138 if (((sc->sc_tempera - sc->sc_last_tempera) >= 3) ||
5139 ((sc->sc_last_tempera - sc->sc_tempera) >= 3)) {
5140 /* make Tx power calibration */
5141 (void) iwk_tx_power_calibration(sc);
5142 }
5143
5144 mutex_exit(&sc->sc_glock);
5145 }
5146
5147 /* Determine this station is in associated state or not */
5148 static int iwk_is_associated(iwk_sc_t *sc)
5149 {
5150 return (LE_32(sc->sc_config.filter_flags) & RXON_FILTER_ASSOC_MSK);
5151 }
5152
5153 /* Make necessary preparation for Receiver gain balance calibration */
5154 static int iwk_rxgain_diff_init(iwk_sc_t *sc)
5155 {
5156 int i, rv;
5157 struct iwk_calibration_cmd cmd;
5158 struct iwk_rx_gain_diff *gain_diff_p;
5159
5160 gain_diff_p = &sc->sc_rxgain_diff;
5161
5162 (void) memset(gain_diff_p, 0, sizeof (struct iwk_rx_gain_diff));
5163 (void) memset(&cmd, 0, sizeof (struct iwk_calibration_cmd));
5164
5165 for (i = 0; i < RX_CHAINS_NUM; i++) {
5166 gain_diff_p->gain_diff_chain[i] = CHAIN_GAIN_DIFF_INIT_VAL;
5167 }
5168
5169 if (iwk_is_associated(sc)) {
5170 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5171 cmd.diff_gain_a = 0;
5172 cmd.diff_gain_b = 0;
5173 cmd.diff_gain_c = 0;
5174
5175 /* assume the gains of every Rx chains is balanceable */
5176 rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD, &cmd,
5177 sizeof (cmd), 1);
5178 if (rv) {
5179 return (rv);
5180 }
5181
5182 gain_diff_p->state = IWK_GAIN_DIFF_ACCUMULATE;
5183 }
5184
5185 return (IWK_SUCCESS);
5186 }
5187
5188 /*
5189 * make Receiver gain balance to balance Rx gain between Rx chains
5190 * and determine which chain is disconnected
5191 */
5192 static int iwk_rxgain_diff(iwk_sc_t *sc)
5193 {
5194 int i, is_24G, rv;
5195 int max_beacon_chain_n;
5196 int min_noise_chain_n;
5197 uint16_t channel_n;
5198 int32_t beacon_diff;
5199 int32_t noise_diff;
5200 uint32_t noise_chain_a, noise_chain_b, noise_chain_c;
5201 uint32_t beacon_chain_a, beacon_chain_b, beacon_chain_c;
5202 struct iwk_calibration_cmd cmd;
5203 uint32_t beacon_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
5204 uint32_t noise_aver[RX_CHAINS_NUM] = {0xFFFFFFFF};
5205 struct statistics_rx_non_phy *rx_general_p =
5206 &sc->sc_statistics.rx.general;
5207 struct iwk_rx_gain_diff *gain_diff_p = &sc->sc_rxgain_diff;
5208
5209 if (INTERFERENCE_DATA_AVAILABLE !=
5210 LE_32(rx_general_p->interference_data_flag)) {
5211 return (IWK_SUCCESS);
5212 }
5213
5214 if (IWK_GAIN_DIFF_ACCUMULATE != gain_diff_p->state) {
5215 return (IWK_SUCCESS);
5216 }
5217
5218 is_24G = iwk_is_24G_band(sc);
5219 channel_n = sc->sc_config.chan; /* channel number */
5220
5221 if ((channel_n != (LE_32(sc->sc_statistics.flag) >> 16)) ||
5222 ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
5223 (LE_32(sc->sc_statistics.flag) &
5224 STATISTICS_REPLY_FLG_BAND_24G_MSK)) &&
5225 !is_24G)) {
5226 return (IWK_SUCCESS);
5227 }
5228
5229 /* Rx chain's noise strength from statistics notification */
5230 noise_chain_a = LE_32(rx_general_p->beacon_silence_rssi_a) & 0xFF;
5231 noise_chain_b = LE_32(rx_general_p->beacon_silence_rssi_b) & 0xFF;
5232 noise_chain_c = LE_32(rx_general_p->beacon_silence_rssi_c) & 0xFF;
5233
5234 /* Rx chain's beacon strength from statistics notification */
5235 beacon_chain_a = LE_32(rx_general_p->beacon_rssi_a) & 0xFF;
5236 beacon_chain_b = LE_32(rx_general_p->beacon_rssi_b) & 0xFF;
5237 beacon_chain_c = LE_32(rx_general_p->beacon_rssi_c) & 0xFF;
5238
5239 gain_diff_p->beacon_count++;
5240
5241 /* accumulate chain's noise strength */
5242 gain_diff_p->noise_stren_a += noise_chain_a;
5243 gain_diff_p->noise_stren_b += noise_chain_b;
5244 gain_diff_p->noise_stren_c += noise_chain_c;
5245
5246 /* accumulate chain's beacon strength */
5247 gain_diff_p->beacon_stren_a += beacon_chain_a;
5248 gain_diff_p->beacon_stren_b += beacon_chain_b;
5249 gain_diff_p->beacon_stren_c += beacon_chain_c;
5250
5251 if (BEACON_NUM_20 == gain_diff_p->beacon_count) {
5252 /* calculate average beacon strength */
5253 beacon_aver[0] = (gain_diff_p->beacon_stren_a) / BEACON_NUM_20;
5254 beacon_aver[1] = (gain_diff_p->beacon_stren_b) / BEACON_NUM_20;
5255 beacon_aver[2] = (gain_diff_p->beacon_stren_c) / BEACON_NUM_20;
5256
5257 /* calculate average noise strength */
5258 noise_aver[0] = (gain_diff_p->noise_stren_a) / BEACON_NUM_20;
5259 noise_aver[1] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
5260 noise_aver[2] = (gain_diff_p->noise_stren_b) / BEACON_NUM_20;
5261
5262 /* determine maximum beacon strength among 3 chains */
5263 if ((beacon_aver[0] >= beacon_aver[1]) &&
5264 (beacon_aver[0] >= beacon_aver[2])) {
5265 max_beacon_chain_n = 0;
5266 gain_diff_p->connected_chains = 1 << 0;
5267 } else if (beacon_aver[1] >= beacon_aver[2]) {
5268 max_beacon_chain_n = 1;
5269 gain_diff_p->connected_chains = 1 << 1;
5270 } else {
5271 max_beacon_chain_n = 2;
5272 gain_diff_p->connected_chains = 1 << 2;
5273 }
5274
5275 /* determine which chain is disconnected */
5276 for (i = 0; i < RX_CHAINS_NUM; i++) {
5277 if (i != max_beacon_chain_n) {
5278 beacon_diff = beacon_aver[max_beacon_chain_n] -
5279 beacon_aver[i];
5280 if (beacon_diff > MAX_ALLOWED_DIFF) {
5281 gain_diff_p->disconnect_chain[i] = 1;
5282 } else {
5283 gain_diff_p->connected_chains |=
5284 (1 << i);
5285 }
5286 }
5287 }
5288
5289 /*
5290 * if chain A and B are both disconnected,
5291 * assume the stronger in beacon strength is connected
5292 */
5293 if (gain_diff_p->disconnect_chain[0] &&
5294 gain_diff_p->disconnect_chain[1]) {
5295 if (beacon_aver[0] >= beacon_aver[1]) {
5296 gain_diff_p->disconnect_chain[0] = 0;
5297 gain_diff_p->connected_chains |= (1 << 0);
5298 } else {
5299 gain_diff_p->disconnect_chain[1] = 0;
5300 gain_diff_p->connected_chains |= (1 << 1);
5301 }
5302 }
5303
5304 /* determine minimum noise strength among 3 chains */
5305 if (!gain_diff_p->disconnect_chain[0]) {
5306 min_noise_chain_n = 0;
5307
5308 for (i = 0; i < RX_CHAINS_NUM; i++) {
5309 if (!gain_diff_p->disconnect_chain[i] &&
5310 (noise_aver[i] <=
5311 noise_aver[min_noise_chain_n])) {
5312 min_noise_chain_n = i;
5313 }
5314
5315 }
5316 } else {
5317 min_noise_chain_n = 1;
5318
5319 for (i = 0; i < RX_CHAINS_NUM; i++) {
5320 if (!gain_diff_p->disconnect_chain[i] &&
5321 (noise_aver[i] <=
5322 noise_aver[min_noise_chain_n])) {
5323 min_noise_chain_n = i;
5324 }
5325 }
5326 }
5327
5328 gain_diff_p->gain_diff_chain[min_noise_chain_n] = 0;
5329
5330 /* determine gain difference between chains */
5331 for (i = 0; i < RX_CHAINS_NUM; i++) {
5332 if (!gain_diff_p->disconnect_chain[i] &&
5333 (CHAIN_GAIN_DIFF_INIT_VAL ==
5334 gain_diff_p->gain_diff_chain[i])) {
5335
5336 noise_diff = noise_aver[i] -
5337 noise_aver[min_noise_chain_n];
5338 gain_diff_p->gain_diff_chain[i] =
5339 (uint8_t)((noise_diff * 10) / 15);
5340
5341 if (gain_diff_p->gain_diff_chain[i] > 3) {
5342 gain_diff_p->gain_diff_chain[i] = 3;
5343 }
5344
5345 gain_diff_p->gain_diff_chain[i] |= (1 << 2);
5346 } else {
5347 gain_diff_p->gain_diff_chain[i] = 0;
5348 }
5349 }
5350
5351 if (!gain_diff_p->gain_diff_send) {
5352 gain_diff_p->gain_diff_send = 1;
5353
5354 (void) memset(&cmd, 0, sizeof (cmd));
5355
5356 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
5357 cmd.diff_gain_a = gain_diff_p->gain_diff_chain[0];
5358 cmd.diff_gain_b = gain_diff_p->gain_diff_chain[1];
5359 cmd.diff_gain_c = gain_diff_p->gain_diff_chain[2];
5360
5361 /*
5362 * send out PHY calibration command to
5363 * adjust every chain's Rx gain
5364 */
5365 rv = iwk_cmd(sc, REPLY_PHY_CALIBRATION_CMD,
5366 &cmd, sizeof (cmd), 1);
5367 if (rv) {
5368 return (rv);
5369 }
5370
5371 gain_diff_p->state = IWK_GAIN_DIFF_CALIBRATED;
5372 }
5373
5374 gain_diff_p->beacon_stren_a = 0;
5375 gain_diff_p->beacon_stren_b = 0;
5376 gain_diff_p->beacon_stren_c = 0;
5377
5378 gain_diff_p->noise_stren_a = 0;
5379 gain_diff_p->noise_stren_b = 0;
5380 gain_diff_p->noise_stren_c = 0;
5381 }
5382
5383 return (IWK_SUCCESS);
5384 }
5385
5386 /* Make necessary preparation for Receiver sensitivity calibration */
5387 static int iwk_rx_sens_init(iwk_sc_t *sc)
5388 {
5389 int i, rv;
5390 struct iwk_rx_sensitivity_cmd cmd;
5391 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5392
5393 (void) memset(&cmd, 0, sizeof (struct iwk_rx_sensitivity_cmd));
5394 (void) memset(rx_sens_p, 0, sizeof (struct iwk_rx_sensitivity));
5395
5396 rx_sens_p->auto_corr_ofdm_x4 = 90;
5397 rx_sens_p->auto_corr_mrc_ofdm_x4 = 170;
5398 rx_sens_p->auto_corr_ofdm_x1 = 105;
5399 rx_sens_p->auto_corr_mrc_ofdm_x1 = 220;
5400
5401 rx_sens_p->auto_corr_cck_x4 = 125;
5402 rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5403 rx_sens_p->min_energy_det_cck = 100;
5404
5405 rx_sens_p->flags &= (~IWK_SENSITIVITY_CALIB_ALLOW_MSK);
5406 rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5407 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5408
5409 rx_sens_p->last_bad_plcp_cnt_ofdm = 0;
5410 rx_sens_p->last_false_alarm_cnt_ofdm = 0;
5411 rx_sens_p->last_bad_plcp_cnt_cck = 0;
5412 rx_sens_p->last_false_alarm_cnt_cck = 0;
5413
5414 rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5415 rx_sens_p->cck_prev_state = IWK_TOO_MANY_FALSE_ALARM;
5416 rx_sens_p->cck_no_false_alarm_num = 0;
5417 rx_sens_p->cck_beacon_idx = 0;
5418
5419 for (i = 0; i < 10; i++) {
5420 rx_sens_p->cck_beacon_min[i] = 0;
5421 }
5422
5423 rx_sens_p->cck_noise_idx = 0;
5424 rx_sens_p->cck_noise_ref = 0;
5425
5426 for (i = 0; i < 20; i++) {
5427 rx_sens_p->cck_noise_max[i] = 0;
5428 }
5429
5430 rx_sens_p->cck_noise_diff = 0;
5431 rx_sens_p->cck_no_false_alarm_num = 0;
5432
5433 cmd.control = LE_16(IWK_SENSITIVITY_CONTROL_WORK_TABLE);
5434
5435 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5436 LE_16(rx_sens_p->auto_corr_ofdm_x4);
5437 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5438 LE_16(rx_sens_p->auto_corr_mrc_ofdm_x4);
5439 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5440 LE_16(rx_sens_p->auto_corr_ofdm_x1);
5441 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5442 LE_16(rx_sens_p->auto_corr_mrc_ofdm_x1);
5443
5444 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5445 LE_16(rx_sens_p->auto_corr_cck_x4);
5446 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5447 LE_16(rx_sens_p->auto_corr_mrc_cck_x4);
5448 cmd.table[MIN_ENERGY_CCK_DET_IDX] =
5449 LE_16(rx_sens_p->min_energy_det_cck);
5450
5451 cmd.table[MIN_ENERGY_OFDM_DET_IDX] = LE_16(100);
5452 cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = LE_16(190);
5453 cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = LE_16(390);
5454 cmd.table[PTAM_ENERGY_TH_IDX] = LE_16(62);
5455
5456 /* at first, set up Rx to maximum sensitivity */
5457 rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5458 if (rv) {
5459 cmn_err(CE_WARN, "iwk_rx_sens_init(): "
5460 "in the process of initialization, "
5461 "failed to send rx sensitivity command\n");
5462 return (rv);
5463 }
5464
5465 rx_sens_p->flags |= IWK_SENSITIVITY_CALIB_ALLOW_MSK;
5466
5467 return (IWK_SUCCESS);
5468 }
5469
5470 /*
5471 * make Receiver sensitivity calibration to adjust every chain's Rx sensitivity.
5472 * for more infomation, please refer to iwk_calibration.h file
5473 */
5474 static int iwk_rx_sens(iwk_sc_t *sc)
5475 {
5476 int rv;
5477 uint32_t actual_rx_time;
5478 struct statistics_rx_non_phy *rx_general_p =
5479 &sc->sc_statistics.rx.general;
5480 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5481 struct iwk_rx_sensitivity_cmd cmd;
5482
5483 if (!(rx_sens_p->flags & IWK_SENSITIVITY_CALIB_ALLOW_MSK)) {
5484 cmn_err(CE_WARN, "iwk_rx_sens(): "
5485 "sensitivity initialization has not finished.\n");
5486 return (DDI_FAILURE);
5487 }
5488
5489 if (INTERFERENCE_DATA_AVAILABLE !=
5490 LE_32(rx_general_p->interference_data_flag)) {
5491 cmn_err(CE_WARN, "iwk_rx_sens(): "
5492 "can't make rx sensitivity calibration,"
5493 "because of invalid statistics\n");
5494 return (DDI_FAILURE);
5495 }
5496
5497 actual_rx_time = LE_32(rx_general_p->channel_load);
5498 if (!actual_rx_time) {
5499 IWK_DBG((IWK_DEBUG_CALIBRATION, "iwk_rx_sens(): "
5500 "can't make rx sensitivity calibration,"
5501 "because has not enough rx time\n"));
5502 return (DDI_FAILURE);
5503 }
5504
5505 /* make Rx sensitivity calibration for OFDM mode */
5506 rv = iwk_ofdm_sens(sc, actual_rx_time);
5507 if (rv) {
5508 return (rv);
5509 }
5510
5511 /* make Rx sensitivity calibration for CCK mode */
5512 rv = iwk_cck_sens(sc, actual_rx_time);
5513 if (rv) {
5514 return (rv);
5515 }
5516
5517 /*
5518 * if the sum of false alarm had not changed, nothing will be done
5519 */
5520 if ((!(rx_sens_p->flags & IWK_SENSITIVITY_OFDM_UPDATE_MSK)) &&
5521 (!(rx_sens_p->flags & IWK_SENSITIVITY_CCK_UPDATE_MSK))) {
5522 return (IWK_SUCCESS);
5523 }
5524
5525 cmd.control = IWK_SENSITIVITY_CONTROL_WORK_TABLE;
5526
5527 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
5528 rx_sens_p->auto_corr_ofdm_x4;
5529 cmd.table[AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
5530 rx_sens_p->auto_corr_mrc_ofdm_x4;
5531 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
5532 rx_sens_p->auto_corr_ofdm_x1;
5533 cmd.table[AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
5534 rx_sens_p->auto_corr_mrc_ofdm_x1;
5535
5536 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
5537 rx_sens_p->auto_corr_cck_x4;
5538 cmd.table[AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
5539 rx_sens_p->auto_corr_mrc_cck_x4;
5540 cmd.table[MIN_ENERGY_CCK_DET_IDX] =
5541 rx_sens_p->min_energy_det_cck;
5542
5543 cmd.table[MIN_ENERGY_OFDM_DET_IDX] = 100;
5544 cmd.table[BARKER_CORR_TH_ADD_MIN_IDX] = 190;
5545 cmd.table[BARKER_CORR_TH_ADD_MIN_MRC_IDX] = 390;
5546 cmd.table[PTAM_ENERGY_TH_IDX] = 62;
5547
5548 /*
5549 * send sensitivity command to complete actual sensitivity calibration
5550 */
5551 rv = iwk_cmd(sc, SENSITIVITY_CMD, &cmd, sizeof (cmd), 1);
5552 if (rv) {
5553 cmn_err(CE_WARN, "iwk_rx_sens(): "
5554 "fail to send rx sensitivity command\n");
5555 return (rv);
5556 }
5557
5558 return (IWK_SUCCESS);
5559
5560 }
5561
5562 /*
5563 * make Rx sensitivity calibration for CCK mode.
5564 * This is preparing parameters for Sensitivity command
5565 */
5566 static int iwk_cck_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5567 {
5568 int i;
5569 uint8_t noise_a, noise_b, noise_c;
5570 uint8_t max_noise_abc, max_noise_20;
5571 uint32_t beacon_a, beacon_b, beacon_c;
5572 uint32_t min_beacon_abc, max_beacon_10;
5573 uint32_t cck_fa, cck_bp;
5574 uint32_t cck_sum_fa_bp;
5575 uint32_t temp;
5576 struct statistics_rx_non_phy *rx_general_p =
5577 &sc->sc_statistics.rx.general;
5578 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5579
5580 cck_fa = LE_32(sc->sc_statistics.rx.cck.false_alarm_cnt);
5581 cck_bp = LE_32(sc->sc_statistics.rx.cck.plcp_err);
5582
5583 /* accumulate false alarm */
5584 if (rx_sens_p->last_false_alarm_cnt_cck > cck_fa) {
5585 temp = rx_sens_p->last_false_alarm_cnt_cck;
5586 rx_sens_p->last_false_alarm_cnt_cck = cck_fa;
5587 cck_fa += (0xFFFFFFFF - temp);
5588 } else {
5589 cck_fa -= rx_sens_p->last_false_alarm_cnt_cck;
5590 rx_sens_p->last_false_alarm_cnt_cck += cck_fa;
5591 }
5592
5593 /* accumulate bad plcp */
5594 if (rx_sens_p->last_bad_plcp_cnt_cck > cck_bp) {
5595 temp = rx_sens_p->last_bad_plcp_cnt_cck;
5596 rx_sens_p->last_bad_plcp_cnt_cck = cck_bp;
5597 cck_bp += (0xFFFFFFFF - temp);
5598 } else {
5599 cck_bp -= rx_sens_p->last_bad_plcp_cnt_cck;
5600 rx_sens_p->last_bad_plcp_cnt_cck += cck_bp;
5601 }
5602
5603 /*
5604 * calculate relative value
5605 */
5606 cck_sum_fa_bp = (cck_fa + cck_bp) * 200 * 1024;
5607 rx_sens_p->cck_noise_diff = 0;
5608
5609 noise_a =
5610 (uint8_t)((LE_32(rx_general_p->beacon_silence_rssi_a) & 0xFF00) >>
5611 8);
5612 noise_b =
5613 (uint8_t)((LE_32(rx_general_p->beacon_silence_rssi_b) & 0xFF00) >>
5614 8);
5615 noise_c =
5616 (uint8_t)((LE_32(rx_general_p->beacon_silence_rssi_c) & 0xFF00) >>
5617 8);
5618
5619 beacon_a = LE_32(rx_general_p->beacon_energy_a);
5620 beacon_b = LE_32(rx_general_p->beacon_energy_b);
5621 beacon_c = LE_32(rx_general_p->beacon_energy_c);
5622
5623 /* determine maximum noise among 3 chains */
5624 if ((noise_a >= noise_b) && (noise_a >= noise_c)) {
5625 max_noise_abc = noise_a;
5626 } else if (noise_b >= noise_c) {
5627 max_noise_abc = noise_b;
5628 } else {
5629 max_noise_abc = noise_c;
5630 }
5631
5632 /* record maximum noise among 3 chains */
5633 rx_sens_p->cck_noise_max[rx_sens_p->cck_noise_idx] = max_noise_abc;
5634 rx_sens_p->cck_noise_idx++;
5635 if (rx_sens_p->cck_noise_idx >= 20) {
5636 rx_sens_p->cck_noise_idx = 0;
5637 }
5638
5639 /* determine maximum noise among 20 max noise */
5640 max_noise_20 = rx_sens_p->cck_noise_max[0];
5641 for (i = 0; i < 20; i++) {
5642 if (rx_sens_p->cck_noise_max[i] >= max_noise_20) {
5643 max_noise_20 = rx_sens_p->cck_noise_max[i];
5644 }
5645 }
5646
5647 /* determine minimum beacon among 3 chains */
5648 if ((beacon_a <= beacon_b) && (beacon_a <= beacon_c)) {
5649 min_beacon_abc = beacon_a;
5650 } else if (beacon_b <= beacon_c) {
5651 min_beacon_abc = beacon_b;
5652 } else {
5653 min_beacon_abc = beacon_c;
5654 }
5655
5656 /* record miminum beacon among 3 chains */
5657 rx_sens_p->cck_beacon_min[rx_sens_p->cck_beacon_idx] = min_beacon_abc;
5658 rx_sens_p->cck_beacon_idx++;
5659 if (rx_sens_p->cck_beacon_idx >= 10) {
5660 rx_sens_p->cck_beacon_idx = 0;
5661 }
5662
5663 /* determine maximum beacon among 10 miminum beacon among 3 chains */
5664 max_beacon_10 = rx_sens_p->cck_beacon_min[0];
5665 for (i = 0; i < 10; i++) {
5666 if (rx_sens_p->cck_beacon_min[i] >= max_beacon_10) {
5667 max_beacon_10 = rx_sens_p->cck_beacon_min[i];
5668 }
5669 }
5670
5671 /* add a little margin */
5672 max_beacon_10 += 6;
5673
5674 /* record the count of having no false alarms */
5675 if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5676 rx_sens_p->cck_no_false_alarm_num++;
5677 } else {
5678 rx_sens_p->cck_no_false_alarm_num = 0;
5679 }
5680
5681 /*
5682 * adjust parameters in sensitivity command
5683 * according to different status.
5684 * for more infomation, please refer to iwk_calibration.h file
5685 */
5686 if (cck_sum_fa_bp > (50 * actual_rx_time)) {
5687 rx_sens_p->cck_curr_state = IWK_TOO_MANY_FALSE_ALARM;
5688
5689 if (rx_sens_p->auto_corr_cck_x4 > 160) {
5690 rx_sens_p->cck_noise_ref = max_noise_20;
5691
5692 if (rx_sens_p->min_energy_det_cck > 2) {
5693 rx_sens_p->min_energy_det_cck -= 2;
5694 }
5695 }
5696
5697 if (rx_sens_p->auto_corr_cck_x4 < 160) {
5698 rx_sens_p->auto_corr_cck_x4 = 160 + 1;
5699 } else {
5700 if ((rx_sens_p->auto_corr_cck_x4 + 3) < 200) {
5701 rx_sens_p->auto_corr_cck_x4 += 3;
5702 } else {
5703 rx_sens_p->auto_corr_cck_x4 = 200;
5704 }
5705 }
5706
5707 if ((rx_sens_p->auto_corr_mrc_cck_x4 + 3) < 400) {
5708 rx_sens_p->auto_corr_mrc_cck_x4 += 3;
5709 } else {
5710 rx_sens_p->auto_corr_mrc_cck_x4 = 400;
5711 }
5712
5713 rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5714
5715 } else if (cck_sum_fa_bp < (5 * actual_rx_time)) {
5716 rx_sens_p->cck_curr_state = IWK_TOO_FEW_FALSE_ALARM;
5717
5718 rx_sens_p->cck_noise_diff = (int32_t)rx_sens_p->cck_noise_ref -
5719 (int32_t)max_noise_20;
5720
5721 if ((rx_sens_p->cck_prev_state != IWK_TOO_MANY_FALSE_ALARM) &&
5722 ((rx_sens_p->cck_noise_diff > 2) ||
5723 (rx_sens_p->cck_no_false_alarm_num > 100))) {
5724 if ((rx_sens_p->min_energy_det_cck + 2) < 97) {
5725 rx_sens_p->min_energy_det_cck += 2;
5726 } else {
5727 rx_sens_p->min_energy_det_cck = 97;
5728 }
5729
5730 if ((rx_sens_p->auto_corr_cck_x4 - 3) > 125) {
5731 rx_sens_p->auto_corr_cck_x4 -= 3;
5732 } else {
5733 rx_sens_p->auto_corr_cck_x4 = 125;
5734 }
5735
5736 if ((rx_sens_p->auto_corr_mrc_cck_x4 -3) > 200) {
5737 rx_sens_p->auto_corr_mrc_cck_x4 -= 3;
5738 } else {
5739 rx_sens_p->auto_corr_mrc_cck_x4 = 200;
5740 }
5741
5742 rx_sens_p->flags |= IWK_SENSITIVITY_CCK_UPDATE_MSK;
5743 } else {
5744 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5745 }
5746 } else {
5747 rx_sens_p->cck_curr_state = IWK_GOOD_RANGE_FALSE_ALARM;
5748
5749 rx_sens_p->cck_noise_ref = max_noise_20;
5750
5751 if (IWK_TOO_MANY_FALSE_ALARM == rx_sens_p->cck_prev_state) {
5752 rx_sens_p->min_energy_det_cck -= 8;
5753 }
5754
5755 rx_sens_p->flags &= (~IWK_SENSITIVITY_CCK_UPDATE_MSK);
5756 }
5757
5758 if (rx_sens_p->min_energy_det_cck < max_beacon_10) {
5759 rx_sens_p->min_energy_det_cck = (uint16_t)max_beacon_10;
5760 }
5761
5762 rx_sens_p->cck_prev_state = rx_sens_p->cck_curr_state;
5763
5764 return (IWK_SUCCESS);
5765 }
5766
5767 /*
5768 * make Rx sensitivity calibration for OFDM mode.
5769 * This is preparing parameters for Sensitivity command
5770 */
5771 static int iwk_ofdm_sens(iwk_sc_t *sc, uint32_t actual_rx_time)
5772 {
5773 uint32_t temp;
5774 uint16_t temp1;
5775 uint32_t ofdm_fa, ofdm_bp;
5776 uint32_t ofdm_sum_fa_bp;
5777 struct iwk_rx_sensitivity *rx_sens_p = &sc->sc_rx_sens;
5778
5779 ofdm_fa = LE_32(sc->sc_statistics.rx.ofdm.false_alarm_cnt);
5780 ofdm_bp = LE_32(sc->sc_statistics.rx.ofdm.plcp_err);
5781
5782 /* accumulate false alarm */
5783 if (rx_sens_p->last_false_alarm_cnt_ofdm > ofdm_fa) {
5784 temp = rx_sens_p->last_false_alarm_cnt_ofdm;
5785 rx_sens_p->last_false_alarm_cnt_ofdm = ofdm_fa;
5786 ofdm_fa += (0xFFFFFFFF - temp);
5787 } else {
5788 ofdm_fa -= rx_sens_p->last_false_alarm_cnt_ofdm;
5789 rx_sens_p->last_false_alarm_cnt_ofdm += ofdm_fa;
5790 }
5791
5792 /* accumulate bad plcp */
5793 if (rx_sens_p->last_bad_plcp_cnt_ofdm > ofdm_bp) {
5794 temp = rx_sens_p->last_bad_plcp_cnt_ofdm;
5795 rx_sens_p->last_bad_plcp_cnt_ofdm = ofdm_bp;
5796 ofdm_bp += (0xFFFFFFFF - temp);
5797 } else {
5798 ofdm_bp -= rx_sens_p->last_bad_plcp_cnt_ofdm;
5799 rx_sens_p->last_bad_plcp_cnt_ofdm += ofdm_bp;
5800 }
5801
5802 ofdm_sum_fa_bp = (ofdm_fa + ofdm_bp) * 200 * 1024; /* relative value */
5803
5804 /*
5805 * adjust parameter in sensitivity command according to different status
5806 */
5807 if (ofdm_sum_fa_bp > (50 * actual_rx_time)) {
5808 temp1 = rx_sens_p->auto_corr_ofdm_x4 + 1;
5809 rx_sens_p->auto_corr_ofdm_x4 = (temp1 <= 120) ? temp1 : 120;
5810
5811 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 + 1;
5812 rx_sens_p->auto_corr_mrc_ofdm_x4 =
5813 (temp1 <= 210) ? temp1 : 210;
5814
5815 temp1 = rx_sens_p->auto_corr_ofdm_x1 + 1;
5816 rx_sens_p->auto_corr_ofdm_x1 = (temp1 <= 140) ? temp1 : 140;
5817
5818 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 + 1;
5819 rx_sens_p->auto_corr_mrc_ofdm_x1 =
5820 (temp1 <= 270) ? temp1 : 270;
5821
5822 rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5823
5824 } else if (ofdm_sum_fa_bp < (5 * actual_rx_time)) {
5825 temp1 = rx_sens_p->auto_corr_ofdm_x4 - 1;
5826 rx_sens_p->auto_corr_ofdm_x4 = (temp1 >= 85) ? temp1 : 85;
5827
5828 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x4 - 1;
5829 rx_sens_p->auto_corr_mrc_ofdm_x4 =
5830 (temp1 >= 170) ? temp1 : 170;
5831
5832 temp1 = rx_sens_p->auto_corr_ofdm_x1 - 1;
5833 rx_sens_p->auto_corr_ofdm_x1 = (temp1 >= 105) ? temp1 : 105;
5834
5835 temp1 = rx_sens_p->auto_corr_mrc_ofdm_x1 - 1;
5836 rx_sens_p->auto_corr_mrc_ofdm_x1 =
5837 (temp1 >= 220) ? temp1 : 220;
5838
5839 rx_sens_p->flags |= IWK_SENSITIVITY_OFDM_UPDATE_MSK;
5840
5841 } else {
5842 rx_sens_p->flags &= (~IWK_SENSITIVITY_OFDM_UPDATE_MSK);
5843 }
5844
5845 return (IWK_SUCCESS);
5846 }
5847
5848 /*
5849 * additional process to management frames
5850 */
5851 static void iwk_recv_mgmt(struct ieee80211com *ic, mblk_t *mp,
5852 struct ieee80211_node *in,
5853 int subtype, int rssi, uint32_t rstamp)
5854 {
5855 iwk_sc_t *sc = (iwk_sc_t *)ic;
5856 struct ieee80211_frame *wh;
5857 uint8_t index1, index2;
5858 int err;
5859
5860 sc->sc_recv_mgmt(ic, mp, in, subtype, rssi, rstamp);
5861
5862 mutex_enter(&sc->sc_glock);
5863 switch (subtype) {
5864 case IEEE80211_FC0_SUBTYPE_BEACON:
5865 if (sc->sc_ibss.ibss_beacon.syncbeacon && in == ic->ic_bss &&
5866 ic->ic_state == IEEE80211_S_RUN) {
5867 if (ieee80211_beacon_update(ic, in,
5868 &sc->sc_ibss.ibss_beacon.iwk_boff,
5869 sc->sc_ibss.ibss_beacon.mp, 0)) {
5870 bcopy(sc->sc_ibss.ibss_beacon.mp->b_rptr,
5871 sc->sc_ibss.ibss_beacon.beacon_cmd.
5872 bcon_frame,
5873 MBLKL(sc->sc_ibss.ibss_beacon.mp));
5874 }
5875 err = iwk_cmd(sc, REPLY_TX_BEACON,
5876 &sc->sc_ibss.ibss_beacon.beacon_cmd,
5877 sc->sc_ibss.ibss_beacon.beacon_cmd_len, 1);
5878 if (err != IWK_SUCCESS) {
5879 cmn_err(CE_WARN, "iwk_recv_mgmt(): "
5880 "failed to TX beacon.\n");
5881 }
5882 sc->sc_ibss.ibss_beacon.syncbeacon = 0;
5883 }
5884 if (ic->ic_opmode == IEEE80211_M_IBSS &&
5885 ic->ic_state == IEEE80211_S_RUN) {
5886 wh = (struct ieee80211_frame *)mp->b_rptr;
5887 mutex_enter(&sc->sc_ibss.node_tb_lock);
5888 /*
5889 * search for node in ibss node table
5890 */
5891 for (index1 = IWK_STA_ID; index1 < IWK_STATION_COUNT;
5892 index1++) {
5893 if (sc->sc_ibss.ibss_node_tb[index1].used &&
5894 IEEE80211_ADDR_EQ(sc->sc_ibss.
5895 ibss_node_tb[index1].node.bssid,
5896 wh->i_addr2)) {
5897 break;
5898 }
5899 }
5900 /*
5901 * if don't find in ibss node table
5902 */
5903 if (index1 >= IWK_BROADCAST_ID) {
5904 err = iwk_clean_add_node_ibss(ic,
5905 wh->i_addr2, &index2);
5906 if (err != IWK_SUCCESS) {
5907 cmn_err(CE_WARN, "iwk_recv_mgmt(): "
5908 "failed to clean all nodes "
5909 "and add one node\n");
5910 }
5911 }
5912 mutex_exit(&sc->sc_ibss.node_tb_lock);
5913 }
5914 break;
5915 case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
5916 break;
5917 }
5918 mutex_exit(&sc->sc_glock);
5919 }
5920
5921 /*
5922 * 1) log_event_table_ptr indicates base of the event log. This traces
5923 * a 256-entry history of uCode execution within a circular buffer.
5924 * Its header format is:
5925 *
5926 * uint32_t log_size; log capacity (in number of entries)
5927 * uint32_t type; (1) timestamp with each entry, (0) no timestamp
5928 * uint32_t wraps; # times uCode has wrapped to top of circular buffer
5929 * uint32_t write_index; next circular buffer entry that uCode would fill
5930 *
5931 * The header is followed by the circular buffer of log entries. Entries
5932 * with timestamps have the following format:
5933 *
5934 * uint32_t event_id; range 0 - 1500
5935 * uint32_t timestamp; low 32 bits of TSF (of network, if associated)
5936 * uint32_t data; event_id-specific data value
5937 *
5938 * Entries without timestamps contain only event_id and data.
5939 */
5940
5941 /*
5942 * iwk_write_event_log - Write event log to dmesg
5943 */
5944 static void iwk_write_event_log(iwk_sc_t *sc)
5945 {
5946 uint32_t log_event_table_ptr; /* Start address of event table */
5947 uint32_t startptr; /* Start address of log data */
5948 uint32_t logptr; /* address of log data entry */
5949 uint32_t i, n, num_events;
5950 uint32_t event_id, data1, data2; /* log data */
5951
5952 uint32_t log_size; /* log capacity (in number of entries) */
5953 uint32_t type; /* (1)timestamp with each entry,(0) no timestamp */
5954 uint32_t wraps; /* # times uCode has wrapped to */
5955 /* the top of circular buffer */
5956 uint32_t idx; /* index of entry to be filled in next */
5957
5958 log_event_table_ptr = LE_32(sc->sc_card_alive_run.log_event_table_ptr);
5959 if (!(log_event_table_ptr)) {
5960 IWK_DBG((IWK_DEBUG_EEPROM, "NULL event table pointer\n"));
5961 return;
5962 }
5963
5964 iwk_mac_access_enter(sc);
5965
5966 /* Read log header */
5967 log_size = iwk_mem_read(sc, log_event_table_ptr);
5968 log_event_table_ptr += sizeof (uint32_t); /* addr of "type" */
5969 type = iwk_mem_read(sc, log_event_table_ptr);
5970 log_event_table_ptr += sizeof (uint32_t); /* addr of "wraps" */
5971 wraps = iwk_mem_read(sc, log_event_table_ptr);
5972 log_event_table_ptr += sizeof (uint32_t); /* addr of "idx" */
5973 idx = iwk_mem_read(sc, log_event_table_ptr);
5974 startptr = log_event_table_ptr +
5975 sizeof (uint32_t); /* addr of start of log data */
5976 if (!log_size & !wraps) {
5977 IWK_DBG((IWK_DEBUG_EEPROM, "Empty log\n"));
5978 iwk_mac_access_exit(sc);
5979 return;
5980 }
5981
5982 if (!wraps) {
5983 num_events = idx;
5984 logptr = startptr;
5985 } else {
5986 num_events = log_size - idx;
5987 n = type ? 2 : 3;
5988 logptr = startptr + (idx * n * sizeof (uint32_t));
5989 }
5990
5991 for (i = 0; i < num_events; i++) {
5992 event_id = iwk_mem_read(sc, logptr);
5993 logptr += sizeof (uint32_t);
5994 data1 = iwk_mem_read(sc, logptr);
5995 logptr += sizeof (uint32_t);
5996 if (type == 0) { /* no timestamp */
5997 IWK_DBG((IWK_DEBUG_EEPROM, "Event ID=%d, Data=%x0x",
5998 event_id, data1));
5999 } else { /* timestamp */
6000 data2 = iwk_mem_read(sc, logptr);
6001 IWK_DBG((IWK_DEBUG_EEPROM,
6002 "Time=%d, Event ID=%d, Data=0x%x\n",
6003 data1, event_id, data2));
6004 logptr += sizeof (uint32_t);
6005 }
6006 }
6007
6008 /*
6009 * Print the wrapped around entries, if any
6010 */
6011 if (wraps) {
6012 logptr = startptr;
6013 for (i = 0; i < idx; i++) {
6014 event_id = iwk_mem_read(sc, logptr);
6015 logptr += sizeof (uint32_t);
6016 data1 = iwk_mem_read(sc, logptr);
6017 logptr += sizeof (uint32_t);
6018 if (type == 0) { /* no timestamp */
6019 IWK_DBG((IWK_DEBUG_EEPROM,
6020 "Event ID=%d, Data=%x0x", event_id, data1));
6021 } else { /* timestamp */
6022 data2 = iwk_mem_read(sc, logptr);
6023 IWK_DBG((IWK_DEBUG_EEPROM,
6024 "Time = %d, Event ID=%d, Data=0x%x\n",
6025 data1, event_id, data2));
6026 logptr += sizeof (uint32_t);
6027 }
6028 }
6029 }
6030
6031 iwk_mac_access_exit(sc);
6032 }
6033
6034 /*
6035 * error_event_table_ptr indicates base of the error log. This contains
6036 * information about any uCode error that occurs. For 4965, the format is:
6037 *
6038 * uint32_t valid; (nonzero) valid, (0) log is empty
6039 * uint32_t error_id; type of error
6040 * uint32_t pc; program counter
6041 * uint32_t blink1; branch link
6042 * uint32_t blink2; branch link
6043 * uint32_t ilink1; interrupt link
6044 * uint32_t ilink2; interrupt link
6045 * uint32_t data1; error-specific data
6046 * uint32_t data2; error-specific data
6047 * uint32_t line; source code line of error
6048 * uint32_t bcon_time; beacon timer
6049 * uint32_t tsf_low; network timestamp function timer
6050 * uint32_t tsf_hi; network timestamp function timer
6051 */
6052 /*
6053 * iwk_write_error_log - Write error log to dmesg
6054 */
6055 static void iwk_write_error_log(iwk_sc_t *sc)
6056 {
6057 uint32_t err_ptr; /* Start address of error log */
6058 uint32_t valid; /* is error log valid */
6059
6060 err_ptr = LE_32(sc->sc_card_alive_run.error_event_table_ptr);
6061 if (!(err_ptr)) {
6062 IWK_DBG((IWK_DEBUG_EEPROM, "NULL error table pointer\n"));
6063 return;
6064 }
6065
6066 iwk_mac_access_enter(sc);
6067
6068 valid = iwk_mem_read(sc, err_ptr);
6069 if (!(valid)) {
6070 IWK_DBG((IWK_DEBUG_EEPROM, "Error data not valid\n"));
6071 iwk_mac_access_exit(sc);
6072 return;
6073 }
6074 err_ptr += sizeof (uint32_t);
6075 IWK_DBG((IWK_DEBUG_EEPROM, "err=%d ", iwk_mem_read(sc, err_ptr)));
6076 err_ptr += sizeof (uint32_t);
6077 IWK_DBG((IWK_DEBUG_EEPROM, "pc=0x%X ", iwk_mem_read(sc, err_ptr)));
6078 err_ptr += sizeof (uint32_t);
6079 IWK_DBG((IWK_DEBUG_EEPROM,
6080 "branch link1=0x%X ", iwk_mem_read(sc, err_ptr)));
6081 err_ptr += sizeof (uint32_t);
6082 IWK_DBG((IWK_DEBUG_EEPROM,
6083 "branch link2=0x%X ", iwk_mem_read(sc, err_ptr)));
6084 err_ptr += sizeof (uint32_t);
6085 IWK_DBG((IWK_DEBUG_EEPROM,
6086 "interrupt link1=0x%X ", iwk_mem_read(sc, err_ptr)));
6087 err_ptr += sizeof (uint32_t);
6088 IWK_DBG((IWK_DEBUG_EEPROM,
6089 "interrupt link2=0x%X ", iwk_mem_read(sc, err_ptr)));
6090 err_ptr += sizeof (uint32_t);
6091 IWK_DBG((IWK_DEBUG_EEPROM, "data1=0x%X ", iwk_mem_read(sc, err_ptr)));
6092 err_ptr += sizeof (uint32_t);
6093 IWK_DBG((IWK_DEBUG_EEPROM, "data2=0x%X ", iwk_mem_read(sc, err_ptr)));
6094 err_ptr += sizeof (uint32_t);
6095 IWK_DBG((IWK_DEBUG_EEPROM, "line=%d ", iwk_mem_read(sc, err_ptr)));
6096 err_ptr += sizeof (uint32_t);
6097 IWK_DBG((IWK_DEBUG_EEPROM, "bcon_time=%d ", iwk_mem_read(sc, err_ptr)));
6098 err_ptr += sizeof (uint32_t);
6099 IWK_DBG((IWK_DEBUG_EEPROM, "tsf_low=%d ", iwk_mem_read(sc, err_ptr)));
6100 err_ptr += sizeof (uint32_t);
6101 IWK_DBG((IWK_DEBUG_EEPROM, "tsf_hi=%d\n", iwk_mem_read(sc, err_ptr)));
6102
6103 iwk_mac_access_exit(sc);
6104 }
6105
6106 static int
6107 iwk_run_state_config_ibss(ieee80211com_t *ic)
6108 {
6109 iwk_sc_t *sc = (iwk_sc_t *)ic;
6110 ieee80211_node_t *in = ic->ic_bss;
6111 int i, err = IWK_SUCCESS;
6112
6113 mutex_enter(&sc->sc_ibss.node_tb_lock);
6114
6115 /*
6116 * clean all nodes in ibss node table assure be
6117 * consistent with hardware
6118 */
6119 for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) {
6120 sc->sc_ibss.ibss_node_tb[i].used = 0;
6121 (void) memset(&sc->sc_ibss.ibss_node_tb[i].node,
6122 0,
6123 sizeof (iwk_add_sta_t));
6124 }
6125
6126 sc->sc_ibss.node_number = 0;
6127
6128 mutex_exit(&sc->sc_ibss.node_tb_lock);
6129
6130 /*
6131 * configure RX and TX
6132 */
6133 sc->sc_config.dev_type = RXON_DEV_TYPE_IBSS;
6134
6135 sc->sc_config.flags |= LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6136 sc->sc_config.filter_flags =
6137 LE_32(RXON_FILTER_ACCEPT_GRP_MSK |
6138 RXON_FILTER_DIS_DECRYPT_MSK |
6139 RXON_FILTER_DIS_GRP_DECRYPT_MSK);
6140
6141 sc->sc_config.assoc_id = 0;
6142
6143 IEEE80211_ADDR_COPY(sc->sc_config.bssid, in->in_bssid);
6144 sc->sc_config.chan = LE_16(ieee80211_chan2ieee(ic,
6145 in->in_chan));
6146
6147 if (ic->ic_curmode == IEEE80211_MODE_11B) {
6148 sc->sc_config.cck_basic_rates = 0x03;
6149 sc->sc_config.ofdm_basic_rates = 0;
6150 } else if ((in->in_chan != IEEE80211_CHAN_ANYC) &&
6151 (IEEE80211_IS_CHAN_5GHZ(in->in_chan))) {
6152 sc->sc_config.cck_basic_rates = 0;
6153 sc->sc_config.ofdm_basic_rates = 0x15;
6154
6155 } else {
6156 sc->sc_config.cck_basic_rates = 0x0f;
6157 sc->sc_config.ofdm_basic_rates = 0xff;
6158 }
6159
6160 sc->sc_config.flags &=
6161 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
6162 RXON_FLG_SHORT_SLOT_MSK);
6163
6164 if (ic->ic_flags & IEEE80211_F_SHSLOT) {
6165 sc->sc_config.flags |=
6166 LE_32(RXON_FLG_SHORT_SLOT_MSK);
6167 }
6168
6169 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) {
6170 sc->sc_config.flags |=
6171 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6172 }
6173
6174 sc->sc_config.filter_flags |=
6175 LE_32(RXON_FILTER_ASSOC_MSK);
6176
6177 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
6178 sizeof (iwk_rxon_cmd_t), 1);
6179 if (err != IWK_SUCCESS) {
6180 cmn_err(CE_WARN, "iwk_run_state_config_ibss(): "
6181 "failed to update configuration.\n");
6182 return (err);
6183 }
6184
6185 return (err);
6186
6187 }
6188
6189 static int
6190 iwk_run_state_config_sta(ieee80211com_t *ic)
6191 {
6192 iwk_sc_t *sc = (iwk_sc_t *)ic;
6193 ieee80211_node_t *in = ic->ic_bss;
6194 int err = IWK_SUCCESS;
6195
6196 /* update adapter's configuration */
6197 if (sc->sc_assoc_id != in->in_associd) {
6198 cmn_err(CE_WARN, "iwk_run_state_config_sta(): "
6199 "associate ID mismatch: expected %d, "
6200 "got %d\n",
6201 in->in_associd, sc->sc_assoc_id);
6202 }
6203 sc->sc_config.assoc_id = LE_16(in->in_associd & 0x3fff);
6204
6205 /*
6206 * short preamble/slot time are
6207 * negotiated when associating
6208 */
6209 sc->sc_config.flags &=
6210 ~LE_32(RXON_FLG_SHORT_PREAMBLE_MSK |
6211 RXON_FLG_SHORT_SLOT_MSK);
6212
6213 if (ic->ic_flags & IEEE80211_F_SHSLOT)
6214 sc->sc_config.flags |=
6215 LE_32(RXON_FLG_SHORT_SLOT_MSK);
6216
6217 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6218 sc->sc_config.flags |=
6219 LE_32(RXON_FLG_SHORT_PREAMBLE_MSK);
6220
6221 sc->sc_config.filter_flags |=
6222 LE_32(RXON_FILTER_ASSOC_MSK);
6223
6224 if (ic->ic_opmode != IEEE80211_M_STA)
6225 sc->sc_config.filter_flags |=
6226 LE_32(RXON_FILTER_BCON_AWARE_MSK);
6227
6228 IWK_DBG((IWK_DEBUG_80211, "config chan %d flags %x"
6229 " filter_flags %x\n",
6230 sc->sc_config.chan, sc->sc_config.flags,
6231 sc->sc_config.filter_flags));
6232
6233 err = iwk_cmd(sc, REPLY_RXON, &sc->sc_config,
6234 sizeof (iwk_rxon_cmd_t), 1);
6235 if (err != IWK_SUCCESS) {
6236 cmn_err(CE_WARN, "iwk_run_state_config_sta(): "
6237 "failed to update configuration\n");
6238 return (err);
6239 }
6240
6241 return (err);
6242 }
6243
6244 static int
6245 iwk_fast_recover(iwk_sc_t *sc)
6246 {
6247 ieee80211com_t *ic = &sc->sc_ic;
6248 int err;
6249
6250 mutex_enter(&sc->sc_glock);
6251
6252 /* restore runtime configuration */
6253 bcopy(&sc->sc_config_save, &sc->sc_config,
6254 sizeof (sc->sc_config));
6255
6256 /* reset state to handle reassociations correctly */
6257 sc->sc_config.assoc_id = 0;
6258 sc->sc_config.filter_flags &=
6259 ~LE_32(RXON_FILTER_ASSOC_MSK);
6260
6261 if ((err = iwk_hw_set_before_auth(sc)) != 0) {
6262 cmn_err(CE_WARN, "iwk_fast_recover(): "
6263 "failed to setup authentication\n");
6264 mutex_exit(&sc->sc_glock);
6265 return (err);
6266 }
6267
6268 bcopy(&sc->sc_config_save, &sc->sc_config,
6269 sizeof (sc->sc_config));
6270
6271 /* update adapter's configuration */
6272 err = iwk_run_state_config_sta(ic);
6273 if (err != IWK_SUCCESS) {
6274 cmn_err(CE_WARN, "iwk_fast_recover(): "
6275 "failed to setup association\n");
6276 mutex_exit(&sc->sc_glock);
6277 return (err);
6278 }
6279
6280 /* obtain current temperature of chipset */
6281 sc->sc_tempera = iwk_curr_tempera(sc);
6282
6283 /*
6284 * make Tx power calibration to determine
6285 * the gains of DSP and radio
6286 */
6287 err = iwk_tx_power_calibration(sc);
6288 if (err) {
6289 cmn_err(CE_WARN, "iwk_fast_recover(): "
6290 "failed to set tx power table\n");
6291 mutex_exit(&sc->sc_glock);
6292 return (err);
6293 }
6294
6295 /*
6296 * make initialization for Receiver
6297 * sensitivity calibration
6298 */
6299 err = iwk_rx_sens_init(sc);
6300 if (err) {
6301 cmn_err(CE_WARN, "iwk_fast_recover(): "
6302 "failed to init RX sensitivity\n");
6303 mutex_exit(&sc->sc_glock);
6304 return (err);
6305 }
6306
6307 /* make initialization for Receiver gain balance */
6308 err = iwk_rxgain_diff_init(sc);
6309 if (err) {
6310 cmn_err(CE_WARN, "iwk_fast_recover(): "
6311 "failed to init phy calibration\n");
6312 mutex_exit(&sc->sc_glock);
6313 return (err);
6314
6315 }
6316 /* set LED on */
6317 iwk_set_led(sc, 2, 0, 1);
6318
6319 mutex_exit(&sc->sc_glock);
6320
6321 /* update keys */
6322 if (ic->ic_flags & IEEE80211_F_PRIVACY) {
6323 for (int i = 0; i < IEEE80211_KEY_MAX; i++) {
6324 if (ic->ic_nw_keys[i].wk_keyix == IEEE80211_KEYIX_NONE)
6325 continue;
6326 err = iwk_key_set(ic, &ic->ic_nw_keys[i],
6327 ic->ic_bss->in_macaddr);
6328 /* failure */
6329 if (err == 0) {
6330 cmn_err(CE_WARN, "iwk_fast_recover(): "
6331 "failed to setup hardware keys\n");
6332 return (IWK_FAIL);
6333 }
6334 }
6335 }
6336
6337 sc->sc_flags &= ~IWK_F_HW_ERR_RECOVER;
6338
6339 /* start queue */
6340 IWK_DBG((IWK_DEBUG_FW, "iwk_fast_recover(): resume xmit\n"));
6341 mac_tx_update(ic->ic_mach);
6342
6343
6344 return (IWK_SUCCESS);
6345 }
6346
6347 static int
6348 iwk_start_tx_beacon(ieee80211com_t *ic)
6349 {
6350 iwk_sc_t *sc = (iwk_sc_t *)ic;
6351 ieee80211_node_t *in = ic->ic_bss;
6352 int err = IWK_SUCCESS;
6353 iwk_tx_beacon_cmd_t *tx_beacon_p;
6354 uint16_t masks = 0;
6355 mblk_t *mp;
6356 int rate;
6357
6358 /*
6359 * allocate and transmit beacon frames
6360 */
6361 tx_beacon_p = &sc->sc_ibss.ibss_beacon.beacon_cmd;
6362
6363 (void) memset(tx_beacon_p, 0,
6364 sizeof (iwk_tx_beacon_cmd_t));
6365 rate = 0;
6366 masks = 0;
6367
6368 tx_beacon_p->config.sta_id = IWK_BROADCAST_ID;
6369 tx_beacon_p->config.stop_time.life_time =
6370 LE_32(0xffffffff);
6371
6372 if (sc->sc_ibss.ibss_beacon.mp != NULL) {
6373 freemsg(sc->sc_ibss.ibss_beacon.mp);
6374 sc->sc_ibss.ibss_beacon.mp = NULL;
6375 }
6376
6377 sc->sc_ibss.ibss_beacon.mp =
6378 ieee80211_beacon_alloc(ic, in,
6379 &sc->sc_ibss.ibss_beacon.iwk_boff);
6380 if (sc->sc_ibss.ibss_beacon.mp == NULL) {
6381 cmn_err(CE_WARN, "iwk_start_tx_beacon(): "
6382 "failed to get beacon frame.\n");
6383 return (IWK_FAIL);
6384 }
6385
6386 mp = sc->sc_ibss.ibss_beacon.mp;
6387
6388 ASSERT(mp->b_cont == NULL);
6389
6390 bcopy(mp->b_rptr, tx_beacon_p->bcon_frame, MBLKL(mp));
6391
6392 tx_beacon_p->config.len = LE_16((uint16_t)(MBLKL(mp)));
6393 sc->sc_ibss.ibss_beacon.beacon_cmd_len =
6394 sizeof (iwk_tx_cmd_t) +
6395 4 + LE_16(tx_beacon_p->config.len);
6396
6397 /*
6398 * beacons are sent at 1M
6399 */
6400 rate = in->in_rates.ir_rates[0];
6401 rate &= IEEE80211_RATE_VAL;
6402
6403 if (2 == rate || 4 == rate || 11 == rate ||
6404 22 == rate) {
6405 masks |= RATE_MCS_CCK_MSK;
6406 }
6407
6408 masks |= RATE_MCS_ANT_B_MSK;
6409
6410 tx_beacon_p->config.rate.r.rate_n_flags =
6411 LE_32(iwk_rate_to_plcp(rate) | masks);
6412
6413
6414 tx_beacon_p->config.tx_flags =
6415 LE_32(TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
6416
6417 if (ic->ic_bss->in_tstamp.tsf != 0) {
6418 sc->sc_ibss.ibss_beacon.syncbeacon = 1;
6419 } else {
6420 if (ieee80211_beacon_update(ic, in,
6421 &sc->sc_ibss.ibss_beacon.iwk_boff,
6422 mp, 0)) {
6423 bcopy(mp->b_rptr,
6424 tx_beacon_p->bcon_frame,
6425 MBLKL(mp));
6426 }
6427
6428 err = iwk_cmd(sc, REPLY_TX_BEACON,
6429 tx_beacon_p,
6430 sc->sc_ibss.ibss_beacon.beacon_cmd_len,
6431 1);
6432 if (err != IWK_SUCCESS) {
6433 cmn_err(CE_WARN, "iwk_start_tx_beacon(): "
6434 "failed to TX beacon.\n");
6435 return (err);
6436 }
6437
6438 sc->sc_ibss.ibss_beacon.syncbeacon = 0;
6439 }
6440
6441 return (err);
6442 }
6443
6444 static int
6445 iwk_clean_add_node_ibss(struct ieee80211com *ic,
6446 uint8_t addr[IEEE80211_ADDR_LEN], uint8_t *index2)
6447 {
6448 iwk_sc_t *sc = (iwk_sc_t *)ic;
6449 uint8_t index;
6450 iwk_add_sta_t bc_node;
6451 iwk_link_quality_cmd_t bc_link_quality;
6452 iwk_link_quality_cmd_t link_quality;
6453 uint16_t bc_masks = 0;
6454 uint16_t masks = 0;
6455 int i, rate;
6456 struct ieee80211_rateset rs;
6457 iwk_ibss_node_t *ibss_node_p;
6458 int err = IWK_SUCCESS;
6459
6460 /*
6461 * find a location that is not
6462 * used in ibss node table
6463 */
6464 for (index = IWK_STA_ID;
6465 index < IWK_STATION_COUNT; index++) {
6466 if (!sc->sc_ibss.ibss_node_tb[index].used) {
6467 break;
6468 }
6469 }
6470
6471 /*
6472 * if have too many nodes in hardware, clean up
6473 */
6474 if (index < IWK_BROADCAST_ID &&
6475 sc->sc_ibss.node_number >= 25) {
6476 if (iwk_cmd(sc, REPLY_REMOVE_ALL_STA,
6477 NULL, 0, 1) != IWK_SUCCESS) {
6478 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6479 "failed to remove all nodes in hardware\n");
6480 return (IWK_FAIL);
6481 }
6482
6483 for (i = IWK_STA_ID; i < IWK_STATION_COUNT; i++) {
6484 sc->sc_ibss.ibss_node_tb[i].used = 0;
6485 (void) memset(&sc->sc_ibss.ibss_node_tb[i].node,
6486 0, sizeof (iwk_add_sta_t));
6487 }
6488
6489 sc->sc_ibss.node_number = 0;
6490
6491 /*
6492 * add broadcast node so that we
6493 * can send broadcast frame
6494 */
6495 (void) memset(&bc_node, 0, sizeof (bc_node));
6496 (void) memset(bc_node.bssid, 0xff, 6);
6497 bc_node.id = IWK_BROADCAST_ID;
6498
6499 err = iwk_cmd(sc, REPLY_ADD_STA, &bc_node, sizeof (bc_node), 1);
6500 if (err != IWK_SUCCESS) {
6501 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6502 "failed to add broadcast node\n");
6503 return (err);
6504 }
6505
6506 /* TX_LINK_QUALITY cmd */
6507 (void) memset(&bc_link_quality, 0, sizeof (bc_link_quality));
6508 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6509 bc_masks |= RATE_MCS_CCK_MSK;
6510 bc_masks |= RATE_MCS_ANT_B_MSK;
6511 bc_masks &= ~RATE_MCS_ANT_A_MSK;
6512 bc_link_quality.rate_n_flags[i] =
6513 LE_32(iwk_rate_to_plcp(2) | bc_masks);
6514 }
6515
6516 bc_link_quality.general_params.single_stream_ant_msk = 2;
6517 bc_link_quality.general_params.dual_stream_ant_msk = 3;
6518 bc_link_quality.agg_params.agg_dis_start_th = 3;
6519 bc_link_quality.agg_params.agg_time_limit = LE_16(4000);
6520 bc_link_quality.sta_id = IWK_BROADCAST_ID;
6521
6522 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD,
6523 &bc_link_quality, sizeof (bc_link_quality), 1);
6524 if (err != IWK_SUCCESS) {
6525 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6526 "failed to config link quality table\n");
6527 return (err);
6528 }
6529 }
6530
6531 if (index >= IWK_BROADCAST_ID) {
6532 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6533 "the count of node in hardware is too much\n");
6534 return (IWK_FAIL);
6535 }
6536
6537 /*
6538 * add a node into hardware
6539 */
6540 ibss_node_p = &sc->sc_ibss.ibss_node_tb[index];
6541
6542 ibss_node_p->used = 1;
6543
6544 (void) memset(&ibss_node_p->node, 0,
6545 sizeof (iwk_add_sta_t));
6546
6547 IEEE80211_ADDR_COPY(ibss_node_p->node.bssid, addr);
6548 ibss_node_p->node.id = index;
6549 ibss_node_p->node.control = 0;
6550 ibss_node_p->node.flags = 0;
6551
6552 err = iwk_cmd(sc, REPLY_ADD_STA, &ibss_node_p->node,
6553 sizeof (iwk_add_sta_t), 1);
6554 if (err != IWK_SUCCESS) {
6555 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6556 "failed to add IBSS node\n");
6557 ibss_node_p->used = 0;
6558 (void) memset(&ibss_node_p->node, 0,
6559 sizeof (iwk_add_sta_t));
6560 return (err);
6561 }
6562
6563 sc->sc_ibss.node_number++;
6564
6565 (void) memset(&link_quality, 0, sizeof (link_quality));
6566
6567 rs = ic->ic_sup_rates[ieee80211_chan2mode(ic,
6568 ic->ic_curchan)];
6569
6570 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
6571 if (i < rs.ir_nrates) {
6572 rate = rs.
6573 ir_rates[rs.ir_nrates - i];
6574 } else {
6575 rate = 2;
6576 }
6577
6578 if (2 == rate || 4 == rate ||
6579 11 == rate || 22 == rate) {
6580 masks |= RATE_MCS_CCK_MSK;
6581 }
6582
6583 masks |= RATE_MCS_ANT_B_MSK;
6584 masks &= ~RATE_MCS_ANT_A_MSK;
6585
6586 link_quality.rate_n_flags[i] =
6587 LE_32(iwk_rate_to_plcp(rate) | masks);
6588 }
6589
6590 link_quality.general_params.single_stream_ant_msk = 2;
6591 link_quality.general_params.dual_stream_ant_msk = 3;
6592 link_quality.agg_params.agg_dis_start_th = 3;
6593 link_quality.agg_params.agg_time_limit = LE_16(4000);
6594 link_quality.sta_id = ibss_node_p->node.id;
6595
6596 err = iwk_cmd(sc, REPLY_TX_LINK_QUALITY_CMD,
6597 &link_quality, sizeof (link_quality), 1);
6598 if (err != IWK_SUCCESS) {
6599 cmn_err(CE_WARN, "iwk_clean_add_node_ibss(): "
6600 "failed to set up TX link quality\n");
6601 ibss_node_p->used = 0;
6602 (void) memset(ibss_node_p->node.bssid, 0, 6);
6603 return (err);
6604 }
6605
6606 *index2 = index;
6607
6608 return (err);
6609 }