1 /******************************************************************************
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *****************************************************************************/
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/version.h>
30 #include <linux/init.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/delay.h>
34 #include <linux/skbuff.h>
35 #include <linux/netdevice.h>
36 #include <linux/wireless.h>
37 #include <net/mac80211.h>
38 #include <linux/etherdevice.h>
39 #include <asm/unaligned.h>
41 #include "iwl-eeprom.h"
45 #include "iwl-helpers.h"
46 #include "iwl-calib.h"
48 /* module parameters */
49 static struct iwl_mod_params iwl4965_mod_params = {
50 .num_of_queues = IWL49_NUM_QUEUES,
53 /* the rest are 0 by default */
56 static void iwl4965_hw_card_show_info(struct iwl_priv *priv);
58 #ifdef CONFIG_IWL4965_HT
60 static const u16 default_tid_to_tx_fifo[] = {
80 #endif /*CONFIG_IWL4965_HT */
82 /* check contents of special bootstrap uCode SRAM */
83 static int iwl4965_verify_bsm(struct iwl_priv *priv)
85 __le32 *image = priv->ucode_boot.v_addr;
86 u32 len = priv->ucode_boot.len;
90 IWL_DEBUG_INFO("Begin verify bsm\n");
92 /* verify BSM SRAM contents */
93 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
94 for (reg = BSM_SRAM_LOWER_BOUND;
95 reg < BSM_SRAM_LOWER_BOUND + len;
96 reg += sizeof(u32), image++) {
97 val = iwl_read_prph(priv, reg);
98 if (val != le32_to_cpu(*image)) {
99 IWL_ERROR("BSM uCode verification failed at "
100 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
101 BSM_SRAM_LOWER_BOUND,
102 reg - BSM_SRAM_LOWER_BOUND, len,
103 val, le32_to_cpu(*image));
108 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
114 * iwl4965_load_bsm - Load bootstrap instructions
118 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
119 * in special SRAM that does not power down during RFKILL. When powering back
120 * up after power-saving sleeps (or during initial uCode load), the BSM loads
121 * the bootstrap program into the on-board processor, and starts it.
123 * The bootstrap program loads (via DMA) instructions and data for a new
124 * program from host DRAM locations indicated by the host driver in the
125 * BSM_DRAM_* registers. Once the new program is loaded, it starts
128 * When initializing the NIC, the host driver points the BSM to the
129 * "initialize" uCode image. This uCode sets up some internal data, then
130 * notifies host via "initialize alive" that it is complete.
132 * The host then replaces the BSM_DRAM_* pointer values to point to the
133 * normal runtime uCode instructions and a backup uCode data cache buffer
134 * (filled initially with starting data values for the on-board processor),
135 * then triggers the "initialize" uCode to load and launch the runtime uCode,
136 * which begins normal operation.
138 * When doing a power-save shutdown, runtime uCode saves data SRAM into
139 * the backup data cache in DRAM before SRAM is powered down.
141 * When powering back up, the BSM loads the bootstrap program. This reloads
142 * the runtime uCode instructions and the backup data cache into SRAM,
143 * and re-launches the runtime uCode from where it left off.
145 static int iwl4965_load_bsm(struct iwl_priv *priv)
147 __le32 *image = priv->ucode_boot.v_addr;
148 u32 len = priv->ucode_boot.len;
158 IWL_DEBUG_INFO("Begin load bsm\n");
160 /* make sure bootstrap program is no larger than BSM's SRAM size */
161 if (len > IWL_MAX_BSM_SIZE)
164 /* Tell bootstrap uCode where to find the "Initialize" uCode
165 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
166 * NOTE: iwl4965_initialize_alive_start() will replace these values,
167 * after the "initialize" uCode has run, to point to
168 * runtime/protocol instructions and backup data cache. */
169 pinst = priv->ucode_init.p_addr >> 4;
170 pdata = priv->ucode_init_data.p_addr >> 4;
171 inst_len = priv->ucode_init.len;
172 data_len = priv->ucode_init_data.len;
174 ret = iwl_grab_nic_access(priv);
178 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
179 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
180 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
181 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
183 /* Fill BSM memory with bootstrap instructions */
184 for (reg_offset = BSM_SRAM_LOWER_BOUND;
185 reg_offset < BSM_SRAM_LOWER_BOUND + len;
186 reg_offset += sizeof(u32), image++)
187 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
189 ret = iwl4965_verify_bsm(priv);
191 iwl_release_nic_access(priv);
195 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
196 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
197 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
198 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
200 /* Load bootstrap code into instruction SRAM now,
201 * to prepare to load "initialize" uCode */
202 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
204 /* Wait for load of bootstrap uCode to finish */
205 for (i = 0; i < 100; i++) {
206 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
207 if (!(done & BSM_WR_CTRL_REG_BIT_START))
212 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
214 IWL_ERROR("BSM write did not complete!\n");
218 /* Enable future boot loads whenever power management unit triggers it
219 * (e.g. when powering back up after power-save shutdown) */
220 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
222 iwl_release_nic_access(priv);
227 static int is_fat_channel(__le32 rxon_flags)
229 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
230 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
233 int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
237 /* 4965 HT rate format */
238 if (rate_n_flags & RATE_MCS_HT_MSK) {
239 idx = (rate_n_flags & 0xff);
241 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
242 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
244 idx += IWL_FIRST_OFDM_RATE;
245 /* skip 9M not supported in ht*/
246 if (idx >= IWL_RATE_9M_INDEX)
248 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
251 /* 4965 legacy rate format, search for match in table */
253 for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++)
254 if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF))
262 * translate ucode response to mac80211 tx status control values
264 void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
265 struct ieee80211_tx_control *control)
269 control->antenna_sel_tx =
270 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
271 if (rate_n_flags & RATE_MCS_HT_MSK)
272 control->flags |= IEEE80211_TXCTL_OFDM_HT;
273 if (rate_n_flags & RATE_MCS_GF_MSK)
274 control->flags |= IEEE80211_TXCTL_GREEN_FIELD;
275 if (rate_n_flags & RATE_MCS_FAT_MSK)
276 control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH;
277 if (rate_n_flags & RATE_MCS_DUP_MSK)
278 control->flags |= IEEE80211_TXCTL_DUP_DATA;
279 if (rate_n_flags & RATE_MCS_SGI_MSK)
280 control->flags |= IEEE80211_TXCTL_SHORT_GI;
281 /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use
282 * IEEE80211_BAND_2GHZ band as it contains all the rates */
283 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
284 if (rate_index == -1)
285 control->tx_rate = NULL;
288 &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index];
291 int iwl4965_hw_rxq_stop(struct iwl_priv *priv)
296 spin_lock_irqsave(&priv->lock, flags);
297 rc = iwl_grab_nic_access(priv);
299 spin_unlock_irqrestore(&priv->lock, flags);
304 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
305 rc = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
308 IWL_ERROR("Can't stop Rx DMA.\n");
310 iwl_release_nic_access(priv);
311 spin_unlock_irqrestore(&priv->lock, flags);
320 static int iwl4965_eeprom_check_version(struct iwl_priv *priv)
325 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
327 calib_ver = iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
329 if (eeprom_ver < EEPROM_4965_EEPROM_VERSION ||
330 calib_ver < EEPROM_4965_TX_POWER_VERSION)
335 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
336 eeprom_ver, EEPROM_4965_EEPROM_VERSION,
337 calib_ver, EEPROM_4965_TX_POWER_VERSION);
341 int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
346 spin_lock_irqsave(&priv->lock, flags);
347 ret = iwl_grab_nic_access(priv);
349 spin_unlock_irqrestore(&priv->lock, flags);
353 if (src == IWL_PWR_SRC_VAUX) {
355 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
358 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
359 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
360 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
361 ~APMG_PS_CTRL_MSK_PWR_SRC);
364 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
365 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
366 ~APMG_PS_CTRL_MSK_PWR_SRC);
369 iwl_release_nic_access(priv);
370 spin_unlock_irqrestore(&priv->lock, flags);
375 static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
379 unsigned int rb_size;
381 spin_lock_irqsave(&priv->lock, flags);
382 ret = iwl_grab_nic_access(priv);
384 spin_unlock_irqrestore(&priv->lock, flags);
388 if (priv->cfg->mod_params->amsdu_size_8K)
389 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
391 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
394 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
396 /* Reset driver's Rx queue write index */
397 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
399 /* Tell device where to find RBD circular buffer in DRAM */
400 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
403 /* Tell device where in DRAM to update its Rx status */
404 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
406 offsetof(struct iwl4965_shared, rb_closed)) >> 4);
408 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
409 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
410 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
411 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
414 (RX_QUEUE_SIZE_LOG <<
415 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
418 * iwl_write32(priv,CSR_INT_COAL_REG,0);
421 iwl_release_nic_access(priv);
422 spin_unlock_irqrestore(&priv->lock, flags);
427 /* Tell 4965 where to find the "keep warm" buffer */
428 static int iwl4965_kw_init(struct iwl_priv *priv)
433 spin_lock_irqsave(&priv->lock, flags);
434 rc = iwl_grab_nic_access(priv);
438 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG,
439 priv->kw.dma_addr >> 4);
440 iwl_release_nic_access(priv);
442 spin_unlock_irqrestore(&priv->lock, flags);
446 static int iwl4965_kw_alloc(struct iwl_priv *priv)
448 struct pci_dev *dev = priv->pci_dev;
449 struct iwl4965_kw *kw = &priv->kw;
451 kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
452 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
460 * iwl4965_kw_free - Free the "keep warm" buffer
462 static void iwl4965_kw_free(struct iwl_priv *priv)
464 struct pci_dev *dev = priv->pci_dev;
465 struct iwl4965_kw *kw = &priv->kw;
468 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
469 memset(kw, 0, sizeof(*kw));
474 * iwl4965_txq_ctx_reset - Reset TX queue context
475 * Destroys all DMA structures and initialise them again
480 static int iwl4965_txq_ctx_reset(struct iwl_priv *priv)
483 int txq_id, slots_num;
486 iwl4965_kw_free(priv);
488 /* Free all tx/cmd queues and keep-warm buffer */
489 iwl4965_hw_txq_ctx_free(priv);
491 /* Alloc keep-warm buffer */
492 rc = iwl4965_kw_alloc(priv);
494 IWL_ERROR("Keep Warm allocation failed");
498 spin_lock_irqsave(&priv->lock, flags);
500 rc = iwl_grab_nic_access(priv);
502 IWL_ERROR("TX reset failed");
503 spin_unlock_irqrestore(&priv->lock, flags);
507 /* Turn off all Tx DMA channels */
508 iwl_write_prph(priv, IWL49_SCD_TXFACT, 0);
509 iwl_release_nic_access(priv);
510 spin_unlock_irqrestore(&priv->lock, flags);
512 /* Tell 4965 where to find the keep-warm buffer */
513 rc = iwl4965_kw_init(priv);
515 IWL_ERROR("kw_init failed\n");
519 /* Alloc and init all (default 16) Tx queues,
520 * including the command queue (#4) */
521 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
522 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
523 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
524 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
527 IWL_ERROR("Tx %d queue init failed\n", txq_id);
535 iwl4965_hw_txq_ctx_free(priv);
537 iwl4965_kw_free(priv);
541 static int iwl4965_apm_init(struct iwl_priv *priv)
546 spin_lock_irqsave(&priv->lock, flags);
547 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
548 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
550 /* set "initialization complete" bit to move adapter
551 * D0U* --> D0A* state */
552 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
554 /* wait for clock stabilization */
555 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
556 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
557 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
559 IWL_DEBUG_INFO("Failed to init the card\n");
563 ret = iwl_grab_nic_access(priv);
568 iwl_write_prph(priv, APMG_CLK_CTRL_REG,
569 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
573 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
574 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
576 iwl_release_nic_access(priv);
578 spin_unlock_irqrestore(&priv->lock, flags);
583 static void iwl4965_nic_config(struct iwl_priv *priv)
590 spin_lock_irqsave(&priv->lock, flags);
592 if ((priv->rev_id & 0x80) == 0x80 && (priv->rev_id & 0x7f) < 8) {
593 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
594 /* Enable No Snoop field */
595 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
599 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
601 /* disable L1 entry -- workaround for pre-B1 */
602 pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02);
604 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
606 /* write radio config values to register */
607 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
608 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
609 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
610 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
611 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
613 /* set CSR_HW_CONFIG_REG for uCode use */
614 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
615 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
616 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
618 priv->calib_info = (struct iwl_eeprom_calib_info *)
619 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
621 spin_unlock_irqrestore(&priv->lock, flags);
625 int iwl4965_hw_nic_init(struct iwl_priv *priv)
628 struct iwl4965_rx_queue *rxq = &priv->rxq;
632 priv->cfg->ops->lib->apm_ops.init(priv);
634 spin_lock_irqsave(&priv->lock, flags);
635 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
636 spin_unlock_irqrestore(&priv->lock, flags);
638 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
640 priv->cfg->ops->lib->apm_ops.config(priv);
642 iwl4965_hw_card_show_info(priv);
646 /* Allocate the RX queue, or reset if it is already allocated */
648 ret = iwl4965_rx_queue_alloc(priv);
650 IWL_ERROR("Unable to initialize Rx queue\n");
654 iwl4965_rx_queue_reset(priv, rxq);
656 iwl4965_rx_replenish(priv);
658 iwl4965_rx_init(priv, rxq);
660 spin_lock_irqsave(&priv->lock, flags);
662 rxq->need_update = 1;
663 iwl4965_rx_queue_update_write_ptr(priv, rxq);
665 spin_unlock_irqrestore(&priv->lock, flags);
667 /* Allocate and init all Tx and Command queues */
668 ret = iwl4965_txq_ctx_reset(priv);
672 set_bit(STATUS_INIT, &priv->status);
677 int iwl4965_hw_nic_stop_master(struct iwl_priv *priv)
683 spin_lock_irqsave(&priv->lock, flags);
685 /* set stop master bit */
686 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
688 reg_val = iwl_read32(priv, CSR_GP_CNTRL);
690 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
691 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
692 IWL_DEBUG_INFO("Card in power save, master is already "
695 rc = iwl_poll_bit(priv, CSR_RESET,
696 CSR_RESET_REG_FLAG_MASTER_DISABLED,
697 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
699 spin_unlock_irqrestore(&priv->lock, flags);
704 spin_unlock_irqrestore(&priv->lock, flags);
705 IWL_DEBUG_INFO("stop master\n");
711 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
713 void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv)
719 /* Stop each Tx DMA channel, and wait for it to be idle */
720 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
721 spin_lock_irqsave(&priv->lock, flags);
722 if (iwl_grab_nic_access(priv)) {
723 spin_unlock_irqrestore(&priv->lock, flags);
727 iwl_write_direct32(priv,
728 FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
729 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
730 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
732 iwl_release_nic_access(priv);
733 spin_unlock_irqrestore(&priv->lock, flags);
736 /* Deallocate memory for all Tx queues */
737 iwl4965_hw_txq_ctx_free(priv);
740 int iwl4965_hw_nic_reset(struct iwl_priv *priv)
745 iwl4965_hw_nic_stop_master(priv);
747 spin_lock_irqsave(&priv->lock, flags);
749 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
753 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
754 rc = iwl_poll_bit(priv, CSR_RESET,
755 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
756 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
760 rc = iwl_grab_nic_access(priv);
762 iwl_write_prph(priv, APMG_CLK_EN_REG,
763 APMG_CLK_VAL_DMA_CLK_RQT |
764 APMG_CLK_VAL_BSM_CLK_RQT);
768 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
769 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
771 iwl_release_nic_access(priv);
774 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
775 wake_up_interruptible(&priv->wait_command_queue);
777 spin_unlock_irqrestore(&priv->lock, flags);
783 #define REG_RECALIB_PERIOD (60)
786 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
788 * This callback is provided in order to send a statistics request.
790 * This timer function is continually reset to execute within
791 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
792 * was received. We need to ensure we receive the statistics in order
793 * to update the temperature used for calibrating the TXPOWER.
795 static void iwl4965_bg_statistics_periodic(unsigned long data)
797 struct iwl_priv *priv = (struct iwl_priv *)data;
799 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
802 iwl_send_statistics_request(priv, CMD_ASYNC);
805 void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
807 struct iwl4965_ct_kill_config cmd;
811 spin_lock_irqsave(&priv->lock, flags);
812 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
813 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
814 spin_unlock_irqrestore(&priv->lock, flags);
816 cmd.critical_temperature_R =
817 cpu_to_le32(priv->hw_params.ct_kill_threshold);
819 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
822 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
824 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded, "
825 "critical temperature is %d\n",
826 cmd.critical_temperature_R);
829 #ifdef CONFIG_IWL4965_RUN_TIME_CALIB
831 /* Reset differential Rx gains in NIC to prepare for chain noise calibration.
832 * Called after every association, but this runs only once!
833 * ... once chain noise is calibrated the first time, it's good forever. */
834 static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
836 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
838 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
839 struct iwl4965_calibration_cmd cmd;
841 memset(&cmd, 0, sizeof(cmd));
842 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
846 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
848 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
849 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
850 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
854 static void iwl4965_gain_computation(struct iwl_priv *priv,
856 u16 min_average_noise_antenna_i,
857 u32 min_average_noise)
860 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
862 data->delta_gain_code[min_average_noise_antenna_i] = 0;
864 for (i = 0; i < NUM_RX_CHAINS; i++) {
867 if (!(data->disconn_array[i]) &&
868 (data->delta_gain_code[i] ==
869 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
870 delta_g = average_noise[i] - min_average_noise;
871 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
872 data->delta_gain_code[i] =
873 min(data->delta_gain_code[i],
874 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
876 data->delta_gain_code[i] =
877 (data->delta_gain_code[i] | (1 << 2));
879 data->delta_gain_code[i] = 0;
882 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
883 data->delta_gain_code[0],
884 data->delta_gain_code[1],
885 data->delta_gain_code[2]);
887 /* Differential gain gets sent to uCode only once */
888 if (!data->radio_write) {
889 struct iwl4965_calibration_cmd cmd;
890 data->radio_write = 1;
892 memset(&cmd, 0, sizeof(cmd));
893 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
894 cmd.diff_gain_a = data->delta_gain_code[0];
895 cmd.diff_gain_b = data->delta_gain_code[1];
896 cmd.diff_gain_c = data->delta_gain_code[2];
897 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
900 IWL_DEBUG_CALIB("fail sending cmd "
901 "REPLY_PHY_CALIBRATION_CMD \n");
903 /* TODO we might want recalculate
904 * rx_chain in rxon cmd */
906 /* Mark so we run this algo only once! */
907 data->state = IWL_CHAIN_NOISE_CALIBRATED;
909 data->chain_noise_a = 0;
910 data->chain_noise_b = 0;
911 data->chain_noise_c = 0;
912 data->chain_signal_a = 0;
913 data->chain_signal_b = 0;
914 data->chain_signal_c = 0;
915 data->beacon_count = 0;
918 static void iwl4965_bg_sensitivity_work(struct work_struct *work)
920 struct iwl_priv *priv = container_of(work, struct iwl_priv,
923 mutex_lock(&priv->mutex);
925 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
926 test_bit(STATUS_SCANNING, &priv->status)) {
927 mutex_unlock(&priv->mutex);
931 if (priv->start_calib) {
932 iwl_chain_noise_calibration(priv, &priv->statistics);
934 iwl_sensitivity_calibration(priv, &priv->statistics);
937 mutex_unlock(&priv->mutex);
940 #endif /*CONFIG_IWL4965_RUN_TIME_CALIB*/
942 static void iwl4965_bg_txpower_work(struct work_struct *work)
944 struct iwl_priv *priv = container_of(work, struct iwl_priv,
947 /* If a scan happened to start before we got here
948 * then just return; the statistics notification will
949 * kick off another scheduled work to compensate for
950 * any temperature delta we missed here. */
951 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
952 test_bit(STATUS_SCANNING, &priv->status))
955 mutex_lock(&priv->mutex);
957 /* Regardless of if we are assocaited, we must reconfigure the
958 * TX power since frames can be sent on non-radar channels while
960 iwl4965_hw_reg_send_txpower(priv);
962 /* Update last_temperature to keep is_calib_needed from running
963 * when it isn't needed... */
964 priv->last_temperature = priv->temperature;
966 mutex_unlock(&priv->mutex);
970 * Acquire priv->lock before calling this function !
972 static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
974 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
975 (index & 0xff) | (txq_id << 8));
976 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
980 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
981 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
982 * @scd_retry: (1) Indicates queue will be used in aggregation mode
984 * NOTE: Acquire priv->lock before calling this function !
986 static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
987 struct iwl4965_tx_queue *txq,
988 int tx_fifo_id, int scd_retry)
990 int txq_id = txq->q.id;
992 /* Find out whether to activate Tx queue */
993 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
995 /* Set up and activate */
996 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
997 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
998 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
999 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
1000 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1001 IWL49_SCD_QUEUE_STTS_REG_MSK);
1003 txq->sched_retry = scd_retry;
1005 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
1006 active ? "Activate" : "Deactivate",
1007 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
1010 static const u16 default_queue_to_tx_fifo[] = {
1020 static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1022 set_bit(txq_id, &priv->txq_ctx_active_msk);
1025 static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1027 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1030 int iwl4965_alive_notify(struct iwl_priv *priv)
1034 unsigned long flags;
1037 spin_lock_irqsave(&priv->lock, flags);
1039 #ifdef CONFIG_IWL4965_RUN_TIME_CALIB
1040 memset(&(priv->sensitivity_data), 0,
1041 sizeof(struct iwl_sensitivity_data));
1042 memset(&(priv->chain_noise_data), 0,
1043 sizeof(struct iwl_chain_noise_data));
1044 for (i = 0; i < NUM_RX_CHAINS; i++)
1045 priv->chain_noise_data.delta_gain_code[i] =
1046 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1047 #endif /* CONFIG_IWL4965_RUN_TIME_CALIB*/
1048 ret = iwl_grab_nic_access(priv);
1050 spin_unlock_irqrestore(&priv->lock, flags);
1054 /* Clear 4965's internal Tx Scheduler data base */
1055 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
1056 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1057 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1058 iwl_write_targ_mem(priv, a, 0);
1059 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1060 iwl_write_targ_mem(priv, a, 0);
1061 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
1062 iwl_write_targ_mem(priv, a, 0);
1064 /* Tel 4965 where to find Tx byte count tables */
1065 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
1066 (priv->shared_phys +
1067 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
1069 /* Disable chain mode for all queues */
1070 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
1072 /* Initialize each Tx queue (including the command queue) */
1073 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
1075 /* TFD circular buffer read/write indexes */
1076 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
1077 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1079 /* Max Tx Window size for Scheduler-ACK mode */
1080 iwl_write_targ_mem(priv, priv->scd_base_addr +
1081 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1083 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1084 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1087 iwl_write_targ_mem(priv, priv->scd_base_addr +
1088 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
1091 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1092 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1095 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
1096 (1 << priv->hw_params.max_txq_num) - 1);
1098 /* Activate all Tx DMA/FIFO channels */
1099 iwl_write_prph(priv, IWL49_SCD_TXFACT,
1100 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1102 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
1104 /* Map each Tx/cmd queue to its corresponding fifo */
1105 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
1106 int ac = default_queue_to_tx_fifo[i];
1107 iwl4965_txq_ctx_activate(priv, i);
1108 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1111 iwl_release_nic_access(priv);
1112 spin_unlock_irqrestore(&priv->lock, flags);
1114 /* Ask for statistics now, the uCode will send statistics notification
1115 * periodically after association */
1116 iwl_send_statistics_request(priv, CMD_ASYNC);
1120 #ifdef CONFIG_IWL4965_RUN_TIME_CALIB
1121 static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
1125 .auto_corr_min_ofdm = 85,
1126 .auto_corr_min_ofdm_mrc = 170,
1127 .auto_corr_min_ofdm_x1 = 105,
1128 .auto_corr_min_ofdm_mrc_x1 = 220,
1130 .auto_corr_max_ofdm = 120,
1131 .auto_corr_max_ofdm_mrc = 210,
1132 .auto_corr_max_ofdm_x1 = 140,
1133 .auto_corr_max_ofdm_mrc_x1 = 270,
1135 .auto_corr_min_cck = 125,
1136 .auto_corr_max_cck = 200,
1137 .auto_corr_min_cck_mrc = 200,
1138 .auto_corr_max_cck_mrc = 400,
1146 * iwl4965_hw_set_hw_params
1148 * Called when initializing driver
1150 int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
1153 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
1154 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
1155 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
1156 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
1160 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
1161 priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto;
1162 priv->hw_params.tx_cmd_len = sizeof(struct iwl4965_tx_cmd);
1163 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1164 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
1165 if (priv->cfg->mod_params->amsdu_size_8K)
1166 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
1168 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
1169 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
1170 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
1171 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
1173 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
1174 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
1175 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
1176 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_5GHZ);
1178 priv->hw_params.tx_chains_num = 2;
1179 priv->hw_params.rx_chains_num = 2;
1180 priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
1181 priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
1182 priv->hw_params.ct_kill_threshold = CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
1184 #ifdef CONFIG_IWL4965_RUN_TIME_CALIB
1185 priv->hw_params.sens = &iwl4965_sensitivity;
1192 * iwl4965_hw_txq_ctx_free - Free TXQ Context
1194 * Destroy all TX DMA queues and structures
1196 void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
1201 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1202 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]);
1204 /* Keep-warm buffer */
1205 iwl4965_kw_free(priv);
1209 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1211 * Does NOT advance any TFD circular buffer read/write indexes
1212 * Does NOT free the TFD itself (which is within circular buffer)
1214 int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
1216 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0];
1217 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
1218 struct pci_dev *dev = priv->pci_dev;
1223 /* Host command buffers stay mapped in memory, nothing to clean */
1224 if (txq->q.id == IWL_CMD_QUEUE_NUM)
1227 /* Sanity check on number of chunks */
1228 counter = IWL_GET_BITS(*bd, num_tbs);
1229 if (counter > MAX_NUM_OF_TBS) {
1230 IWL_ERROR("Too many chunks: %i\n", counter);
1231 /* @todo issue fatal error, it is quite serious situation */
1235 /* Unmap chunks, if any.
1236 * TFD info for odd chunks is different format than for even chunks. */
1237 for (i = 0; i < counter; i++) {
1244 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
1245 (IWL_GET_BITS(bd->pa[index],
1246 tb2_addr_hi20) << 16),
1247 IWL_GET_BITS(bd->pa[index], tb2_len),
1251 pci_unmap_single(dev,
1252 le32_to_cpu(bd->pa[index].tb1_addr),
1253 IWL_GET_BITS(bd->pa[index], tb1_len),
1256 /* Free SKB, if any, for this chunk */
1257 if (txq->txb[txq->q.read_ptr].skb[i]) {
1258 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
1261 txq->txb[txq->q.read_ptr].skb[i] = NULL;
1267 /* set card power command */
1268 static int iwl4965_set_power(struct iwl_priv *priv,
1273 ret = iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
1274 sizeof(struct iwl4965_powertable_cmd),
1278 int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1280 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n");
1284 static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
1297 *res = ((num * 2 + denom) / (denom * 2)) * sign;
1303 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
1305 * Determines power supply voltage compensation for txpower calculations.
1306 * Returns number of 1/2-dB steps to subtract from gain table index,
1307 * to compensate for difference between power supply voltage during
1308 * factory measurements, vs. current power supply voltage.
1310 * Voltage indication is higher for lower voltage.
1311 * Lower voltage requires more gain (lower gain table index).
1313 static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
1314 s32 current_voltage)
1318 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
1319 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
1322 iwl4965_math_div_round(current_voltage - eeprom_voltage,
1323 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
1325 if (current_voltage > eeprom_voltage)
1327 if ((comp < -2) || (comp > 2))
1333 static const struct iwl_channel_info *
1334 iwl4965_get_channel_txpower_info(struct iwl_priv *priv,
1335 enum ieee80211_band band, u16 channel)
1337 const struct iwl_channel_info *ch_info;
1339 ch_info = iwl_get_channel_info(priv, band, channel);
1341 if (!is_channel_valid(ch_info))
1347 static s32 iwl4965_get_tx_atten_grp(u16 channel)
1349 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
1350 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
1351 return CALIB_CH_GROUP_5;
1353 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
1354 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
1355 return CALIB_CH_GROUP_1;
1357 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
1358 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
1359 return CALIB_CH_GROUP_2;
1361 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
1362 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
1363 return CALIB_CH_GROUP_3;
1365 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
1366 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
1367 return CALIB_CH_GROUP_4;
1369 IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
1373 static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
1377 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
1378 if (priv->calib_info->band_info[b].ch_from == 0)
1381 if ((channel >= priv->calib_info->band_info[b].ch_from)
1382 && (channel <= priv->calib_info->band_info[b].ch_to))
1389 static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
1396 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
1402 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
1404 * Interpolates factory measurements from the two sample channels within a
1405 * sub-band, to apply to channel of interest. Interpolation is proportional to
1406 * differences in channel frequencies, which is proportional to differences
1407 * in channel number.
1409 static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
1410 struct iwl_eeprom_calib_ch_info *chan_info)
1415 const struct iwl_eeprom_calib_measure *m1;
1416 const struct iwl_eeprom_calib_measure *m2;
1417 struct iwl_eeprom_calib_measure *omeas;
1421 s = iwl4965_get_sub_band(priv, channel);
1422 if (s >= EEPROM_TX_POWER_BANDS) {
1423 IWL_ERROR("Tx Power can not find channel %d ", channel);
1427 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
1428 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
1429 chan_info->ch_num = (u8) channel;
1431 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
1432 channel, s, ch_i1, ch_i2);
1434 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
1435 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
1436 m1 = &(priv->calib_info->band_info[s].ch1.
1437 measurements[c][m]);
1438 m2 = &(priv->calib_info->band_info[s].ch2.
1439 measurements[c][m]);
1440 omeas = &(chan_info->measurements[c][m]);
1443 (u8) iwl4965_interpolate_value(channel, ch_i1,
1448 (u8) iwl4965_interpolate_value(channel, ch_i1,
1449 m1->gain_idx, ch_i2,
1451 omeas->temperature =
1452 (u8) iwl4965_interpolate_value(channel, ch_i1,
1457 (s8) iwl4965_interpolate_value(channel, ch_i1,
1462 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
1463 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
1465 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
1466 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
1468 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
1469 m1->pa_det, m2->pa_det, omeas->pa_det);
1471 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
1472 m1->temperature, m2->temperature,
1473 omeas->temperature);
1480 /* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
1481 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
1482 static s32 back_off_table[] = {
1483 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
1484 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
1485 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
1486 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
1490 /* Thermal compensation values for txpower for various frequency ranges ...
1491 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
1492 static struct iwl4965_txpower_comp_entry {
1493 s32 degrees_per_05db_a;
1494 s32 degrees_per_05db_a_denom;
1495 } tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
1496 {9, 2}, /* group 0 5.2, ch 34-43 */
1497 {4, 1}, /* group 1 5.2, ch 44-70 */
1498 {4, 1}, /* group 2 5.2, ch 71-124 */
1499 {4, 1}, /* group 3 5.2, ch 125-200 */
1500 {3, 1} /* group 4 2.4, ch all */
1503 static s32 get_min_power_index(s32 rate_power_index, u32 band)
1506 if ((rate_power_index & 7) <= 4)
1507 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
1509 return MIN_TX_GAIN_INDEX;
1517 static const struct gain_entry gain_table[2][108] = {
1518 /* 5.2GHz power gain index table */
1520 {123, 0x3F}, /* highest txpower */
1629 /* 2.4GHz power gain index table */
1631 {110, 0x3f}, /* highest txpower */
1742 static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
1743 u8 is_fat, u8 ctrl_chan_high,
1744 struct iwl4965_tx_power_db *tx_power_tbl)
1746 u8 saturation_power;
1748 s32 user_target_power;
1752 s32 current_regulatory;
1753 s32 txatten_grp = CALIB_CH_GROUP_MAX;
1756 const struct iwl_channel_info *ch_info = NULL;
1757 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
1758 const struct iwl_eeprom_calib_measure *measurement;
1761 s32 voltage_compensation;
1762 s32 degrees_per_05db_num;
1763 s32 degrees_per_05db_denom;
1765 s32 temperature_comp[2];
1766 s32 factory_gain_index[2];
1767 s32 factory_actual_pwr[2];
1770 /* Sanity check requested level (dBm) */
1771 if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) {
1772 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
1773 priv->user_txpower_limit);
1776 if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) {
1777 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
1778 priv->user_txpower_limit);
1782 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
1783 * are used for indexing into txpower table) */
1784 user_target_power = 2 * priv->user_txpower_limit;
1786 /* Get current (RXON) channel, band, width */
1788 iwl4965_get_channel_txpower_info(priv, priv->band, channel);
1790 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
1796 /* get txatten group, used to select 1) thermal txpower adjustment
1797 * and 2) mimo txpower balance between Tx chains. */
1798 txatten_grp = iwl4965_get_tx_atten_grp(channel);
1799 if (txatten_grp < 0)
1802 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
1803 channel, txatten_grp);
1812 /* hardware txpower limits ...
1813 * saturation (clipping distortion) txpowers are in half-dBm */
1815 saturation_power = priv->calib_info->saturation_power24;
1817 saturation_power = priv->calib_info->saturation_power52;
1819 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
1820 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
1822 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
1824 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
1827 /* regulatory txpower limits ... reg_limit values are in half-dBm,
1828 * max_power_avg values are in dBm, convert * 2 */
1830 reg_limit = ch_info->fat_max_power_avg * 2;
1832 reg_limit = ch_info->max_power_avg * 2;
1834 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
1835 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
1837 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
1839 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
1842 /* Interpolate txpower calibration values for this channel,
1843 * based on factory calibration tests on spaced channels. */
1844 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
1846 /* calculate tx gain adjustment based on power supply voltage */
1847 voltage = priv->calib_info->voltage;
1848 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
1849 voltage_compensation =
1850 iwl4965_get_voltage_compensation(voltage, init_voltage);
1852 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
1854 voltage, voltage_compensation);
1856 /* get current temperature (Celsius) */
1857 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
1858 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
1859 current_temp = KELVIN_TO_CELSIUS(current_temp);
1861 /* select thermal txpower adjustment params, based on channel group
1862 * (same frequency group used for mimo txatten adjustment) */
1863 degrees_per_05db_num =
1864 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
1865 degrees_per_05db_denom =
1866 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
1868 /* get per-chain txpower values from factory measurements */
1869 for (c = 0; c < 2; c++) {
1870 measurement = &ch_eeprom_info.measurements[c][1];
1872 /* txgain adjustment (in half-dB steps) based on difference
1873 * between factory and current temperature */
1874 factory_temp = measurement->temperature;
1875 iwl4965_math_div_round((current_temp - factory_temp) *
1876 degrees_per_05db_denom,
1877 degrees_per_05db_num,
1878 &temperature_comp[c]);
1880 factory_gain_index[c] = measurement->gain_idx;
1881 factory_actual_pwr[c] = measurement->actual_pow;
1883 IWL_DEBUG_TXPOWER("chain = %d\n", c);
1884 IWL_DEBUG_TXPOWER("fctry tmp %d, "
1885 "curr tmp %d, comp %d steps\n",
1886 factory_temp, current_temp,
1887 temperature_comp[c]);
1889 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
1890 factory_gain_index[c],
1891 factory_actual_pwr[c]);
1894 /* for each of 33 bit-rates (including 1 for CCK) */
1895 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1897 union iwl4965_tx_power_dual_stream tx_power;
1899 /* for mimo, reduce each chain's txpower by half
1900 * (3dB, 6 steps), so total output power is regulatory
1903 current_regulatory = reg_limit -
1904 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1907 current_regulatory = reg_limit;
1911 /* find txpower limit, either hardware or regulatory */
1912 power_limit = saturation_power - back_off_table[i];
1913 if (power_limit > current_regulatory)
1914 power_limit = current_regulatory;
1916 /* reduce user's txpower request if necessary
1917 * for this rate on this channel */
1918 target_power = user_target_power;
1919 if (target_power > power_limit)
1920 target_power = power_limit;
1922 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
1923 i, saturation_power - back_off_table[i],
1924 current_regulatory, user_target_power,
1927 /* for each of 2 Tx chains (radio transmitters) */
1928 for (c = 0; c < 2; c++) {
1933 (s32)le32_to_cpu(priv->card_alive_init.
1934 tx_atten[txatten_grp][c]);
1938 /* calculate index; higher index means lower txpower */
1939 power_index = (u8) (factory_gain_index[c] -
1941 factory_actual_pwr[c]) -
1942 temperature_comp[c] -
1943 voltage_compensation +
1946 /* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
1949 if (power_index < get_min_power_index(i, band))
1950 power_index = get_min_power_index(i, band);
1952 /* adjust 5 GHz index to support negative indexes */
1956 /* CCK, rate 32, reduce txpower for CCK */
1957 if (i == POWER_TABLE_CCK_ENTRY)
1959 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1961 /* stay within the table! */
1962 if (power_index > 107) {
1963 IWL_WARNING("txpower index %d > 107\n",
1967 if (power_index < 0) {
1968 IWL_WARNING("txpower index %d < 0\n",
1973 /* fill txpower command for this rate/chain */
1974 tx_power.s.radio_tx_gain[c] =
1975 gain_table[band][power_index].radio;
1976 tx_power.s.dsp_predis_atten[c] =
1977 gain_table[band][power_index].dsp;
1979 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
1980 "gain 0x%02x dsp %d\n",
1981 c, atten_value, power_index,
1982 tx_power.s.radio_tx_gain[c],
1983 tx_power.s.dsp_predis_atten[c]);
1984 }/* for each chain */
1986 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1988 }/* for each rate */
1994 * iwl4965_hw_reg_send_txpower - Configure the TXPOWER level user limit
1996 * Uses the active RXON for channel, band, and characteristics (fat, high)
1997 * The power limit is taken from priv->user_txpower_limit.
1999 int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv)
2001 struct iwl4965_txpowertable_cmd cmd = { 0 };
2005 u8 ctrl_chan_high = 0;
2007 if (test_bit(STATUS_SCANNING, &priv->status)) {
2008 /* If this gets hit a lot, switch it to a BUG() and catch
2009 * the stack trace to find out who is calling this during
2011 IWL_WARNING("TX Power requested while scanning!\n");
2015 band = priv->band == IEEE80211_BAND_2GHZ;
2017 is_fat = is_fat_channel(priv->active_rxon.flags);
2020 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2024 cmd.channel = priv->active_rxon.channel;
2026 ret = iwl4965_fill_txpower_tbl(priv, band,
2027 le16_to_cpu(priv->active_rxon.channel),
2028 is_fat, ctrl_chan_high, &cmd.tx_power);
2032 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
2038 static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
2041 struct iwl4965_rxon_assoc_cmd rxon_assoc;
2042 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon;
2043 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon;
2045 if ((rxon1->flags == rxon2->flags) &&
2046 (rxon1->filter_flags == rxon2->filter_flags) &&
2047 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
2048 (rxon1->ofdm_ht_single_stream_basic_rates ==
2049 rxon2->ofdm_ht_single_stream_basic_rates) &&
2050 (rxon1->ofdm_ht_dual_stream_basic_rates ==
2051 rxon2->ofdm_ht_dual_stream_basic_rates) &&
2052 (rxon1->rx_chain == rxon2->rx_chain) &&
2053 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
2054 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
2058 rxon_assoc.flags = priv->staging_rxon.flags;
2059 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
2060 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
2061 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
2062 rxon_assoc.reserved = 0;
2063 rxon_assoc.ofdm_ht_single_stream_basic_rates =
2064 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
2065 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
2066 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
2067 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
2069 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
2070 sizeof(rxon_assoc), &rxon_assoc, NULL);
2078 int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
2083 u8 ctrl_chan_high = 0;
2084 struct iwl4965_channel_switch_cmd cmd = { 0 };
2085 const struct iwl_channel_info *ch_info;
2087 band = priv->band == IEEE80211_BAND_2GHZ;
2089 ch_info = iwl_get_channel_info(priv, priv->band, channel);
2091 is_fat = is_fat_channel(priv->staging_rxon.flags);
2094 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2098 cmd.expect_beacon = 0;
2099 cmd.channel = cpu_to_le16(channel);
2100 cmd.rxon_flags = priv->active_rxon.flags;
2101 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
2102 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
2104 cmd.expect_beacon = is_channel_radar(ch_info);
2106 cmd.expect_beacon = 1;
2108 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
2109 ctrl_chan_high, &cmd.tx_power);
2111 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
2115 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
2119 #define RTS_HCCA_RETRY_LIMIT 3
2120 #define RTS_DFAULT_RETRY_LIMIT 60
2122 void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
2123 struct iwl_cmd *cmd,
2124 struct ieee80211_tx_control *ctrl,
2125 struct ieee80211_hdr *hdr, int sta_id,
2128 struct iwl4965_tx_cmd *tx = &cmd->cmd.tx;
2129 u8 rts_retry_limit = 0;
2130 u8 data_retry_limit = 0;
2131 u16 fc = le16_to_cpu(hdr->frame_control);
2134 int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
2136 rate_plcp = iwl4965_rates[rate_idx].plcp;
2138 rts_retry_limit = (is_hcca) ?
2139 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2141 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2142 rate_flags |= RATE_MCS_CCK_MSK;
2145 if (ieee80211_is_probe_response(fc)) {
2146 data_retry_limit = 3;
2147 if (data_retry_limit < rts_retry_limit)
2148 rts_retry_limit = data_retry_limit;
2150 data_retry_limit = IWL_DEFAULT_TX_RETRY;
2152 if (priv->data_retry_limit != -1)
2153 data_retry_limit = priv->data_retry_limit;
2156 if (ieee80211_is_data(fc)) {
2157 tx->initial_rate_index = 0;
2158 tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
2160 switch (fc & IEEE80211_FCTL_STYPE) {
2161 case IEEE80211_STYPE_AUTH:
2162 case IEEE80211_STYPE_DEAUTH:
2163 case IEEE80211_STYPE_ASSOC_REQ:
2164 case IEEE80211_STYPE_REASSOC_REQ:
2165 if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) {
2166 tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2167 tx->tx_flags |= TX_CMD_FLG_CTS_MSK;
2174 /* Alternate between antenna A and B for successive frames */
2175 if (priv->use_ant_b_for_management_frame) {
2176 priv->use_ant_b_for_management_frame = 0;
2177 rate_flags |= RATE_MCS_ANT_B_MSK;
2179 priv->use_ant_b_for_management_frame = 1;
2180 rate_flags |= RATE_MCS_ANT_A_MSK;
2184 tx->rts_retry_limit = rts_retry_limit;
2185 tx->data_retry_limit = data_retry_limit;
2186 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
2189 int iwl4965_hw_get_rx_read(struct iwl_priv *priv)
2191 struct iwl4965_shared *s = priv->shared_virt;
2192 return le32_to_cpu(s->rb_closed) & 0xFFF;
2195 int iwl4965_hw_get_temperature(struct iwl_priv *priv)
2197 return priv->temperature;
2200 unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
2201 struct iwl4965_frame *frame, u8 rate)
2203 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
2204 unsigned int frame_size;
2206 tx_beacon_cmd = &frame->u.beacon;
2207 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2209 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
2210 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2212 frame_size = iwl4965_fill_beacon_frame(priv,
2213 tx_beacon_cmd->frame,
2214 iwl4965_broadcast_addr,
2215 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2217 BUG_ON(frame_size > MAX_MPDU_SIZE);
2218 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2220 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
2221 tx_beacon_cmd->tx.rate_n_flags =
2222 iwl4965_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
2224 tx_beacon_cmd->tx.rate_n_flags =
2225 iwl4965_hw_set_rate_n_flags(rate, 0);
2227 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2228 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
2229 return (sizeof(*tx_beacon_cmd) + frame_size);
2233 * Tell 4965 where to find circular buffer of Tx Frame Descriptors for
2234 * given Tx queue, and enable the DMA channel used for that queue.
2236 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
2237 * channels supported in hardware.
2239 int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
2242 unsigned long flags;
2243 int txq_id = txq->q.id;
2245 spin_lock_irqsave(&priv->lock, flags);
2246 rc = iwl_grab_nic_access(priv);
2248 spin_unlock_irqrestore(&priv->lock, flags);
2252 /* Circular buffer (TFD queue in DRAM) physical base address */
2253 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
2254 txq->q.dma_addr >> 8);
2256 /* Enable DMA channel, using same id as for TFD queue */
2258 priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
2259 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2260 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
2261 iwl_release_nic_access(priv);
2262 spin_unlock_irqrestore(&priv->lock, flags);
2267 int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
2268 dma_addr_t addr, u16 len)
2271 struct iwl4965_tfd_frame *tfd = ptr;
2272 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
2274 /* Each TFD can point to a maximum 20 Tx buffers */
2275 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
2276 IWL_ERROR("Error can not send more than %d chunks\n",
2281 index = num_tbs / 2;
2282 is_odd = num_tbs & 0x1;
2285 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
2286 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
2287 iwl_get_dma_hi_address(addr));
2288 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
2290 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
2291 (u32) (addr & 0xffff));
2292 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
2293 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
2296 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
2301 static void iwl4965_hw_card_show_info(struct iwl_priv *priv)
2303 u16 hw_version = iwl_eeprom_query16(priv, EEPROM_4965_BOARD_REVISION);
2305 IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n",
2306 ((hw_version >> 8) & 0x0F),
2307 ((hw_version >> 8) >> 4), (hw_version & 0x00FF));
2309 IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
2310 &priv->eeprom[EEPROM_4965_BOARD_PBA]);
2313 static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
2315 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
2316 sizeof(struct iwl4965_shared),
2317 &priv->shared_phys);
2318 if (!priv->shared_virt)
2321 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
2326 static void iwl4965_free_shared_mem(struct iwl_priv *priv)
2328 if (priv->shared_virt)
2329 pci_free_consistent(priv->pci_dev,
2330 sizeof(struct iwl4965_shared),
2336 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
2338 static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
2339 struct iwl4965_tx_queue *txq,
2343 int txq_id = txq->q.id;
2344 struct iwl4965_shared *shared_data = priv->shared_virt;
2346 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
2348 /* Set up byte count within first 256 entries */
2349 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2350 tfd_offset[txq->q.write_ptr], byte_cnt, len);
2352 /* If within first 64 entries, duplicate at end */
2353 if (txq->q.write_ptr < IWL49_MAX_WIN_SIZE)
2354 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2355 tfd_offset[IWL49_QUEUE_SIZE + txq->q.write_ptr],
2360 * sign_extend - Sign extend a value using specified bit as sign-bit
2362 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
2363 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
2365 * @param oper value to sign extend
2366 * @param index 0 based bit index (0<=index<32) to sign bit
2368 static s32 sign_extend(u32 oper, int index)
2370 u8 shift = 31 - index;
2372 return (s32)(oper << shift) >> shift;
2376 * iwl4965_get_temperature - return the calibrated temperature (in Kelvin)
2377 * @statistics: Provides the temperature reading from the uCode
2379 * A return of <0 indicates bogus data in the statistics
2381 int iwl4965_get_temperature(const struct iwl_priv *priv)
2388 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
2389 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
2390 IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
2391 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
2392 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
2393 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
2394 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
2396 IWL_DEBUG_TEMP("Running temperature calibration\n");
2397 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
2398 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
2399 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
2400 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
2404 * Temperature is only 23 bits, so sign extend out to 32.
2406 * NOTE If we haven't received a statistics notification yet
2407 * with an updated temperature, use R4 provided to us in the
2408 * "initialize" ALIVE response.
2410 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
2411 vt = sign_extend(R4, 23);
2414 le32_to_cpu(priv->statistics.general.temperature), 23);
2416 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n",
2420 IWL_ERROR("Calibration conflict R1 == R3\n");
2424 /* Calculate temperature in degrees Kelvin, adjust by 97%.
2425 * Add offset to center the adjustment around 0 degrees Centigrade. */
2426 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
2427 temperature /= (R3 - R1);
2428 temperature = (temperature * 97) / 100 +
2429 TEMPERATURE_CALIB_KELVIN_OFFSET;
2431 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
2432 KELVIN_TO_CELSIUS(temperature));
2437 /* Adjust Txpower only if temperature variance is greater than threshold. */
2438 #define IWL_TEMPERATURE_THRESHOLD 3
2441 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
2443 * If the temperature changed has changed sufficiently, then a recalibration
2446 * Assumes caller will replace priv->last_temperature once calibration
2449 static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
2453 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
2454 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
2458 temp_diff = priv->temperature - priv->last_temperature;
2460 /* get absolute value */
2461 if (temp_diff < 0) {
2462 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
2463 temp_diff = -temp_diff;
2464 } else if (temp_diff == 0)
2465 IWL_DEBUG_POWER("Same temp, \n");
2467 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
2469 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
2470 IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
2474 IWL_DEBUG_POWER("Thermal txpower calib needed\n");
2479 /* Calculate noise level, based on measurements during network silence just
2480 * before arriving beacon. This measurement can be done only if we know
2481 * exactly when to expect beacons, therefore only when we're associated. */
2482 static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
2484 struct statistics_rx_non_phy *rx_info
2485 = &(priv->statistics.rx.general);
2486 int num_active_rx = 0;
2487 int total_silence = 0;
2489 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
2491 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
2493 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
2495 if (bcn_silence_a) {
2496 total_silence += bcn_silence_a;
2499 if (bcn_silence_b) {
2500 total_silence += bcn_silence_b;
2503 if (bcn_silence_c) {
2504 total_silence += bcn_silence_c;
2508 /* Average among active antennas */
2510 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
2512 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
2514 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
2515 bcn_silence_a, bcn_silence_b, bcn_silence_c,
2516 priv->last_rx_noise);
2519 void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
2521 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
2525 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n",
2526 (int)sizeof(priv->statistics), pkt->len);
2528 change = ((priv->statistics.general.temperature !=
2529 pkt->u.stats.general.temperature) ||
2530 ((priv->statistics.flag &
2531 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
2532 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
2534 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
2536 set_bit(STATUS_STATISTICS, &priv->status);
2538 /* Reschedule the statistics timer to occur in
2539 * REG_RECALIB_PERIOD seconds to ensure we get a
2540 * thermal update even if the uCode doesn't give
2542 mod_timer(&priv->statistics_periodic, jiffies +
2543 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
2545 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
2546 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
2547 iwl4965_rx_calc_noise(priv);
2548 #ifdef CONFIG_IWL4965_RUN_TIME_CALIB
2549 queue_work(priv->workqueue, &priv->sensitivity_work);
2553 iwl_leds_background(priv);
2555 /* If the hardware hasn't reported a change in
2556 * temperature then don't bother computing a
2557 * calibrated temperature value */
2561 temp = iwl4965_get_temperature(priv);
2565 if (priv->temperature != temp) {
2566 if (priv->temperature)
2567 IWL_DEBUG_TEMP("Temperature changed "
2568 "from %dC to %dC\n",
2569 KELVIN_TO_CELSIUS(priv->temperature),
2570 KELVIN_TO_CELSIUS(temp));
2572 IWL_DEBUG_TEMP("Temperature "
2573 "initialized to %dC\n",
2574 KELVIN_TO_CELSIUS(temp));
2577 priv->temperature = temp;
2578 set_bit(STATUS_TEMPERATURE, &priv->status);
2580 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
2581 iwl4965_is_temp_calib_needed(priv))
2582 queue_work(priv->workqueue, &priv->txpower_work);
2585 static void iwl4965_add_radiotap(struct iwl_priv *priv,
2586 struct sk_buff *skb,
2587 struct iwl4965_rx_phy_res *rx_start,
2588 struct ieee80211_rx_status *stats,
2591 s8 signal = stats->ssi;
2593 int rate = stats->rate_idx;
2594 u64 tsf = stats->mactime;
2596 __le16 phy_flags_hw = rx_start->phy_flags;
2597 struct iwl4965_rt_rx_hdr {
2598 struct ieee80211_radiotap_header rt_hdr;
2599 __le64 rt_tsf; /* TSF */
2600 u8 rt_flags; /* radiotap packet flags */
2601 u8 rt_rate; /* rate in 500kb/s */
2602 __le16 rt_channelMHz; /* channel in MHz */
2603 __le16 rt_chbitmask; /* channel bitfield */
2604 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
2606 u8 rt_antenna; /* antenna number */
2607 } __attribute__ ((packed)) *iwl4965_rt;
2609 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
2610 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
2611 if (net_ratelimit())
2612 printk(KERN_ERR "not enough headroom [%d] for "
2613 "radiotap head [%zd]\n",
2614 skb_headroom(skb), sizeof(*iwl4965_rt));
2618 /* put radiotap header in front of 802.11 header and data */
2619 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
2621 /* initialise radiotap header */
2622 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
2623 iwl4965_rt->rt_hdr.it_pad = 0;
2625 /* total header + data */
2626 put_unaligned(cpu_to_le16(sizeof(*iwl4965_rt)),
2627 &iwl4965_rt->rt_hdr.it_len);
2629 /* Indicate all the fields we add to the radiotap header */
2630 put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
2631 (1 << IEEE80211_RADIOTAP_FLAGS) |
2632 (1 << IEEE80211_RADIOTAP_RATE) |
2633 (1 << IEEE80211_RADIOTAP_CHANNEL) |
2634 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
2635 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
2636 (1 << IEEE80211_RADIOTAP_ANTENNA)),
2637 &iwl4965_rt->rt_hdr.it_present);
2639 /* Zero the flags, we'll add to them as we go */
2640 iwl4965_rt->rt_flags = 0;
2642 put_unaligned(cpu_to_le64(tsf), &iwl4965_rt->rt_tsf);
2644 iwl4965_rt->rt_dbmsignal = signal;
2645 iwl4965_rt->rt_dbmnoise = noise;
2647 /* Convert the channel frequency and set the flags */
2648 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
2649 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
2650 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
2651 IEEE80211_CHAN_5GHZ),
2652 &iwl4965_rt->rt_chbitmask);
2653 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
2654 put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK |
2655 IEEE80211_CHAN_2GHZ),
2656 &iwl4965_rt->rt_chbitmask);
2658 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
2659 IEEE80211_CHAN_2GHZ),
2660 &iwl4965_rt->rt_chbitmask);
2663 iwl4965_rt->rt_rate = 0;
2665 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
2670 * It seems that the antenna field in the phy flags value
2671 * is actually a bitfield. This is undefined by radiotap,
2672 * it wants an actual antenna number but I always get "7"
2673 * for most legacy frames I receive indicating that the
2674 * same frame was received on all three RX chains.
2676 * I think this field should be removed in favour of a
2677 * new 802.11n radiotap field "RX chains" that is defined
2680 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
2681 iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
2683 /* set the preamble flag if appropriate */
2684 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
2685 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2687 stats->flag |= RX_FLAG_RADIOTAP;
2690 static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
2692 /* 0 - mgmt, 1 - cnt, 2 - data */
2693 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
2694 priv->rx_stats[idx].cnt++;
2695 priv->rx_stats[idx].bytes += len;
2699 * returns non-zero if packet should be dropped
2701 static int iwl4965_set_decrypted_flag(struct iwl_priv *priv,
2702 struct ieee80211_hdr *hdr,
2704 struct ieee80211_rx_status *stats)
2706 u16 fc = le16_to_cpu(hdr->frame_control);
2708 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2711 if (!(fc & IEEE80211_FCTL_PROTECTED))
2714 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2715 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2716 case RX_RES_STATUS_SEC_TYPE_TKIP:
2717 /* The uCode has got a bad phase 1 Key, pushes the packet.
2718 * Decryption will be done in SW. */
2719 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2720 RX_RES_STATUS_BAD_KEY_TTAK)
2723 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2724 RX_RES_STATUS_BAD_ICV_MIC) {
2725 /* bad ICV, the packet is destroyed since the
2726 * decryption is inplace, drop it */
2727 IWL_DEBUG_RX("Packet destroyed\n");
2730 case RX_RES_STATUS_SEC_TYPE_WEP:
2731 case RX_RES_STATUS_SEC_TYPE_CCMP:
2732 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2733 RX_RES_STATUS_DECRYPT_OK) {
2734 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2735 stats->flag |= RX_FLAG_DECRYPTED;
2745 static u32 iwl4965_translate_rx_status(u32 decrypt_in)
2747 u32 decrypt_out = 0;
2749 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
2750 RX_RES_STATUS_STATION_FOUND)
2751 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
2752 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
2754 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
2756 /* packet was not encrypted */
2757 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
2758 RX_RES_STATUS_SEC_TYPE_NONE)
2761 /* packet was encrypted with unknown alg */
2762 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
2763 RX_RES_STATUS_SEC_TYPE_ERR)
2766 /* decryption was not done in HW */
2767 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
2768 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
2771 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
2773 case RX_RES_STATUS_SEC_TYPE_CCMP:
2774 /* alg is CCM: check MIC only */
2775 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
2777 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
2779 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
2783 case RX_RES_STATUS_SEC_TYPE_TKIP:
2784 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
2786 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
2789 /* fall through if TTAK OK */
2791 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
2792 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
2794 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
2798 IWL_DEBUG_RX("decrypt_in:0x%x decrypt_out = 0x%x\n",
2799 decrypt_in, decrypt_out);
2804 static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
2806 struct iwl4965_rx_mem_buffer *rxb,
2807 struct ieee80211_rx_status *stats)
2809 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
2810 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
2811 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
2812 struct ieee80211_hdr *hdr;
2815 unsigned int skblen;
2817 u32 ampdu_status_legacy;
2819 if (!include_phy && priv->last_phy_res[0])
2820 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
2823 IWL_ERROR("MPDU frame without a PHY data\n");
2827 hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] +
2828 rx_start->cfg_phy_cnt);
2830 len = le16_to_cpu(rx_start->byte_count);
2832 rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] +
2833 sizeof(struct iwl4965_rx_phy_res) +
2834 rx_start->cfg_phy_cnt + len);
2837 struct iwl4965_rx_mpdu_res_start *amsdu =
2838 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
2840 hdr = (struct ieee80211_hdr *)(pkt->u.raw +
2841 sizeof(struct iwl4965_rx_mpdu_res_start));
2842 len = le16_to_cpu(amsdu->byte_count);
2843 rx_start->byte_count = amsdu->byte_count;
2844 rx_end = (__le32 *) (((u8 *) hdr) + len);
2846 if (len > priv->hw_params.max_pkt_size || len < 16) {
2847 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
2851 ampdu_status = le32_to_cpu(*rx_end);
2852 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
2855 /* New status scheme, need to translate */
2856 ampdu_status_legacy = ampdu_status;
2857 ampdu_status = iwl4965_translate_rx_status(ampdu_status);
2860 /* start from MAC */
2861 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
2862 skb_put(rxb->skb, len); /* end where data ends */
2864 /* We only process data packets if the interface is open */
2865 if (unlikely(!priv->is_open)) {
2866 IWL_DEBUG_DROP_LIMIT
2867 ("Dropping packet while interface is not open.\n");
2872 hdr = (struct ieee80211_hdr *)rxb->skb->data;
2874 /* in case of HW accelerated crypto and bad decryption, drop */
2875 if (!priv->hw_params.sw_crypto &&
2876 iwl4965_set_decrypted_flag(priv, hdr, ampdu_status, stats))
2879 if (priv->add_radiotap)
2880 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
2882 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len);
2883 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
2884 priv->alloc_rxb_skb--;
2888 /* Calc max signal level (dBm) among 3 possible receivers */
2889 static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
2891 /* data from PHY/DSP regarding signal strength, etc.,
2892 * contents are always there, not configurable by host. */
2893 struct iwl4965_rx_non_cfg_phy *ncphy =
2894 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
2895 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
2898 u32 valid_antennae =
2899 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
2900 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
2904 /* Find max rssi among 3 possible receivers.
2905 * These values are measured by the digital signal processor (DSP).
2906 * They should stay fairly constant even as the signal strength varies,
2907 * if the radio's automatic gain control (AGC) is working right.
2908 * AGC value (see below) will provide the "interesting" info. */
2909 for (i = 0; i < 3; i++)
2910 if (valid_antennae & (1 << i))
2911 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2913 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
2914 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2917 /* dBm = max_rssi dB - agc dB - constant.
2918 * Higher AGC (higher radio gain) means lower signal. */
2919 return (max_rssi - agc - IWL_RSSI_OFFSET);
2922 static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
2924 unsigned long flags;
2926 spin_lock_irqsave(&priv->sta_lock, flags);
2927 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
2928 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
2929 priv->stations[sta_id].sta.sta.modify_mask = 0;
2930 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
2931 spin_unlock_irqrestore(&priv->sta_lock, flags);
2933 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
2936 static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
2938 /* FIXME: need locking over ps_status ??? */
2939 u8 sta_id = iwl_find_station(priv, addr);
2941 if (sta_id != IWL_INVALID_STATION) {
2942 u8 sta_awake = priv->stations[sta_id].
2943 ps_status == STA_PS_STATUS_WAKE;
2945 if (sta_awake && ps_bit)
2946 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
2947 else if (!sta_awake && !ps_bit) {
2948 iwl4965_sta_modify_ps_wake(priv, sta_id);
2949 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
2953 #ifdef CONFIG_IWLWIFI_DEBUG
2956 * iwl4965_dbg_report_frame - dump frame to syslog during debug sessions
2958 * You may hack this function to show different aspects of received frames,
2959 * including selective frame dumps.
2960 * group100 parameter selects whether to show 1 out of 100 good frames.
2962 * TODO: This was originally written for 3945, need to audit for
2963 * proper operation with 4965.
2965 static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
2966 struct iwl4965_rx_packet *pkt,
2967 struct ieee80211_hdr *header, int group100)
2970 u32 print_summary = 0;
2971 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
2988 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
2989 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
2990 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
2991 u8 *data = IWL_RX_DATA(pkt);
2993 if (likely(!(iwl_debug_level & IWL_DL_RX)))
2997 fc = le16_to_cpu(header->frame_control);
2998 seq_ctl = le16_to_cpu(header->seq_ctrl);
3001 channel = le16_to_cpu(rx_hdr->channel);
3002 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
3003 rate_sym = rx_hdr->rate;
3004 length = le16_to_cpu(rx_hdr->len);
3006 /* end-of-frame status and timestamp */
3007 status = le32_to_cpu(rx_end->status);
3008 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
3009 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
3010 tsf = le64_to_cpu(rx_end->timestamp);
3012 /* signal statistics */
3013 rssi = rx_stats->rssi;
3014 agc = rx_stats->agc;
3015 sig_avg = le16_to_cpu(rx_stats->sig_avg);
3016 noise_diff = le16_to_cpu(rx_stats->noise_diff);
3018 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
3020 /* if data frame is to us and all is good,
3021 * (optionally) print summary for only 1 out of every 100 */
3022 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
3023 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
3026 print_summary = 1; /* print each frame */
3027 else if (priv->framecnt_to_us < 100) {
3028 priv->framecnt_to_us++;
3031 priv->framecnt_to_us = 0;
3036 /* print summary for all other frames */
3040 if (print_summary) {
3046 title = "100Frames";
3047 else if (fc & IEEE80211_FCTL_RETRY)
3049 else if (ieee80211_is_assoc_response(fc))
3051 else if (ieee80211_is_reassoc_response(fc))
3053 else if (ieee80211_is_probe_response(fc)) {
3055 print_dump = 1; /* dump frame contents */
3056 } else if (ieee80211_is_beacon(fc)) {
3058 print_dump = 1; /* dump frame contents */
3059 } else if (ieee80211_is_atim(fc))
3061 else if (ieee80211_is_auth(fc))
3063 else if (ieee80211_is_deauth(fc))
3065 else if (ieee80211_is_disassoc(fc))
3070 rate_idx = iwl4965_hwrate_to_plcp_idx(rate_sym);
3071 if (unlikely(rate_idx == -1))
3074 bitrate = iwl4965_rates[rate_idx].ieee / 2;
3076 /* print frame summary.
3077 * MAC addresses show just the last byte (for brevity),
3078 * but you can hack it to show more, if you'd like to. */
3080 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
3081 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
3082 title, fc, header->addr1[5],
3083 length, rssi, channel, bitrate);
3085 /* src/dst addresses assume managed mode */
3086 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
3087 "src=0x%02x, rssi=%u, tim=%lu usec, "
3088 "phy=0x%02x, chnl=%d\n",
3089 title, fc, header->addr1[5],
3090 header->addr3[5], rssi,
3091 tsf_low - priv->scan_start_tsf,
3092 phy_flags, channel);
3096 iwl_print_hex_dump(IWL_DL_RX, data, length);
3099 static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3100 struct iwl4965_rx_packet *pkt,
3101 struct ieee80211_hdr *header,
3109 /* Called for REPLY_RX (legacy ABG frames), or
3110 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
3111 static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
3112 struct iwl4965_rx_mem_buffer *rxb)
3114 struct ieee80211_hdr *header;
3115 struct ieee80211_rx_status rx_status;
3116 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3117 /* Use phy data (Rx signal strength, etc.) contained within
3118 * this rx packet for legacy frames,
3119 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
3120 int include_phy = (pkt->hdr.cmd == REPLY_RX);
3121 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3122 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
3123 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3125 unsigned int len = 0;
3129 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
3131 ieee80211_frequency_to_channel(le16_to_cpu(rx_start->channel));
3132 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3133 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
3134 rx_status.rate_idx =
3135 iwl4965_hwrate_to_plcp_idx(le32_to_cpu(rx_start->rate_n_flags));
3136 if (rx_status.band == IEEE80211_BAND_5GHZ)
3137 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
3139 rx_status.antenna = 0;
3142 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
3143 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n",
3144 rx_start->cfg_phy_cnt);
3149 if (priv->last_phy_res[0])
3150 rx_start = (struct iwl4965_rx_phy_res *)
3151 &priv->last_phy_res[1];
3157 IWL_ERROR("MPDU frame without a PHY data\n");
3162 header = (struct ieee80211_hdr *)((u8 *) & rx_start[1]
3163 + rx_start->cfg_phy_cnt);
3165 len = le16_to_cpu(rx_start->byte_count);
3166 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
3167 sizeof(struct iwl4965_rx_phy_res) + len);
3169 struct iwl4965_rx_mpdu_res_start *amsdu =
3170 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3172 header = (void *)(pkt->u.raw +
3173 sizeof(struct iwl4965_rx_mpdu_res_start));
3174 len = le16_to_cpu(amsdu->byte_count);
3175 rx_end = (__le32 *) (pkt->u.raw +
3176 sizeof(struct iwl4965_rx_mpdu_res_start) + len);
3179 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
3180 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
3181 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
3182 le32_to_cpu(*rx_end));
3186 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
3188 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
3189 rx_status.ssi = iwl4965_calc_rssi(rx_start);
3191 /* Meaningful noise values are available only from beacon statistics,
3192 * which are gathered only when associated, and indicate noise
3193 * only for the associated network channel ...
3194 * Ignore these noise values while scanning (other channels) */
3195 if (iwl_is_associated(priv) &&
3196 !test_bit(STATUS_SCANNING, &priv->status)) {
3197 rx_status.noise = priv->last_rx_noise;
3198 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi,
3201 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3202 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0);
3205 /* Reset beacon noise level if not associated. */
3206 if (!iwl_is_associated(priv))
3207 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3209 /* Set "1" to report good data frames in groups of 100 */
3210 /* FIXME: need to optimze the call: */
3211 iwl4965_dbg_report_frame(priv, pkt, header, 1);
3213 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
3214 rx_status.ssi, rx_status.noise, rx_status.signal,
3215 (unsigned long long)rx_status.mactime);
3217 network_packet = iwl4965_is_network_packet(priv, header);
3218 if (network_packet) {
3219 priv->last_rx_rssi = rx_status.ssi;
3220 priv->last_beacon_time = priv->ucode_beacon_time;
3221 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
3224 fc = le16_to_cpu(header->frame_control);
3225 switch (fc & IEEE80211_FCTL_FTYPE) {
3226 case IEEE80211_FTYPE_MGMT:
3227 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3228 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3230 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &rx_status);
3233 case IEEE80211_FTYPE_CTL:
3234 #ifdef CONFIG_IWL4965_HT
3235 switch (fc & IEEE80211_FCTL_STYPE) {
3236 case IEEE80211_STYPE_BACK_REQ:
3237 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
3238 iwl4965_handle_data_packet(priv, 0, include_phy,
3247 case IEEE80211_FTYPE_DATA: {
3248 DECLARE_MAC_BUF(mac1);
3249 DECLARE_MAC_BUF(mac2);
3250 DECLARE_MAC_BUF(mac3);
3252 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3253 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3256 if (unlikely(!network_packet))
3257 IWL_DEBUG_DROP("Dropping (non network): "
3259 print_mac(mac1, header->addr1),
3260 print_mac(mac2, header->addr2),
3261 print_mac(mac3, header->addr3));
3262 else if (unlikely(iwl4965_is_duplicate_packet(priv, header)))
3263 IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n",
3264 print_mac(mac1, header->addr1),
3265 print_mac(mac2, header->addr2),
3266 print_mac(mac3, header->addr3));
3268 iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
3278 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
3279 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
3280 static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
3281 struct iwl4965_rx_mem_buffer *rxb)
3283 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3284 priv->last_phy_res[0] = 1;
3285 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
3286 sizeof(struct iwl4965_rx_phy_res));
3288 static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
3289 struct iwl4965_rx_mem_buffer *rxb)
3292 #ifdef CONFIG_IWL4965_RUN_TIME_CALIB
3293 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3294 struct iwl4965_missed_beacon_notif *missed_beacon;
3296 missed_beacon = &pkt->u.missed_beacon;
3297 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
3298 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
3299 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
3300 le32_to_cpu(missed_beacon->total_missed_becons),
3301 le32_to_cpu(missed_beacon->num_recvd_beacons),
3302 le32_to_cpu(missed_beacon->num_expected_beacons));
3303 if (!test_bit(STATUS_SCANNING, &priv->status))
3304 iwl_init_sensitivity(priv);
3306 #endif /*CONFIG_IWL4965_RUN_TIME_CALIB*/
3308 #ifdef CONFIG_IWL4965_HT
3311 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
3313 static void iwl4965_sta_modify_enable_tid_tx(struct iwl_priv *priv,
3314 int sta_id, int tid)
3316 unsigned long flags;
3318 /* Remove "disable" flag, to enable Tx for this TID */
3319 spin_lock_irqsave(&priv->sta_lock, flags);
3320 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3321 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3322 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3323 spin_unlock_irqrestore(&priv->sta_lock, flags);
3325 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
3329 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
3331 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
3332 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
3334 static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
3335 struct iwl4965_ht_agg *agg,
3336 struct iwl4965_compressed_ba_resp*
3341 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
3342 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
3345 struct ieee80211_tx_status *tx_status;
3347 if (unlikely(!agg->wait_for_ba)) {
3348 IWL_ERROR("Received BA when not expected\n");
3352 /* Mark that the expected block-ack response arrived */
3353 agg->wait_for_ba = 0;
3354 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
3356 /* Calculate shift to align block-ack bits with our Tx window bits */
3357 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
3358 if (sh < 0) /* tbw something is wrong with indices */
3361 /* don't use 64-bit values for now */
3362 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
3364 if (agg->frame_count > (64 - sh)) {
3365 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
3369 /* check for success or failure according to the
3370 * transmitted bitmap and block-ack bitmap */
3371 bitmap &= agg->bitmap;
3373 /* For each frame attempted in aggregation,
3374 * update driver's record of tx frame's status. */
3375 for (i = 0; i < agg->frame_count ; i++) {
3376 ack = bitmap & (1 << i);
3378 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
3379 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
3380 agg->start_idx + i);
3383 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status;
3384 tx_status->flags = IEEE80211_TX_STATUS_ACK;
3385 tx_status->flags |= IEEE80211_TX_STATUS_AMPDU;
3386 tx_status->ampdu_ack_map = successes;
3387 tx_status->ampdu_ack_len = agg->frame_count;
3388 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags,
3389 &tx_status->control);
3391 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
3397 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
3399 static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
3402 /* Simply stop the queue, but don't change any configuration;
3403 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
3404 iwl_write_prph(priv,
3405 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
3406 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
3407 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
3411 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
3412 * priv->lock must be held by the caller
3414 static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
3415 u16 ssn_idx, u8 tx_fifo)
3419 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
3420 IWL_WARNING("queue number too small: %d, must be > %d\n",
3421 txq_id, IWL_BACK_QUEUE_FIRST_ID);
3425 ret = iwl_grab_nic_access(priv);
3429 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
3431 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
3433 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
3434 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
3435 /* supposes that ssn_idx is valid (!= 0xFFF) */
3436 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
3438 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
3439 iwl4965_txq_ctx_deactivate(priv, txq_id);
3440 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
3442 iwl_release_nic_access(priv);
3447 int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
3450 struct iwl4965_queue *q = &priv->txq[txq_id].q;
3451 u8 *addr = priv->stations[sta_id].sta.sta.addr;
3452 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
3454 switch (priv->stations[sta_id].tid[tid].agg.state) {
3455 case IWL_EMPTYING_HW_QUEUE_DELBA:
3456 /* We are reclaiming the last packet of the */
3457 /* aggregated HW queue */
3458 if (txq_id == tid_data->agg.txq_id &&
3459 q->read_ptr == q->write_ptr) {
3460 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
3461 int tx_fifo = default_tid_to_tx_fifo[tid];
3462 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
3463 iwl4965_tx_queue_agg_disable(priv, txq_id,
3465 tid_data->agg.state = IWL_AGG_OFF;
3466 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
3469 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3470 /* We are reclaiming the last packet of the queue */
3471 if (tid_data->tfds_in_queue == 0) {
3472 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
3473 tid_data->agg.state = IWL_AGG_ON;
3474 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
3482 * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
3483 * @index -- current index
3484 * @n_bd -- total number of entries in queue (s/b power of 2)
3486 static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
3488 return (index == 0) ? n_bd - 1 : index - 1;
3492 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
3494 * Handles block-acknowledge notification from device, which reports success
3495 * of frames sent via aggregation.
3497 static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
3498 struct iwl4965_rx_mem_buffer *rxb)
3500 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3501 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
3503 struct iwl4965_tx_queue *txq = NULL;
3504 struct iwl4965_ht_agg *agg;
3505 DECLARE_MAC_BUF(mac);
3507 /* "flow" corresponds to Tx queue */
3508 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
3510 /* "ssn" is start of block-ack Tx window, corresponds to index
3511 * (in Tx queue's circular buffer) of first TFD/frame in window */
3512 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
3514 if (scd_flow >= priv->hw_params.max_txq_num) {
3515 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
3519 txq = &priv->txq[scd_flow];
3520 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
3522 /* Find index just before block-ack window */
3523 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
3525 /* TODO: Need to get this copy more safely - now good for debug */
3527 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
3530 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
3532 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
3533 "%d, scd_ssn = %d\n",
3536 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
3539 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
3541 (unsigned long long)agg->bitmap);
3543 /* Update driver's record of ACK vs. not for each frame in window */
3544 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
3546 /* Release all TFDs before the SSN, i.e. all TFDs in front of
3547 * block-ack window (we assume that they've been successfully
3548 * transmitted ... if not, it's too late anyway). */
3549 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
3550 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
3551 priv->stations[ba_resp->sta_id].
3552 tid[ba_resp->tid].tfds_in_queue -= freed;
3553 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3554 priv->mac80211_registered &&
3555 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
3556 ieee80211_wake_queue(priv->hw, scd_flow);
3557 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
3558 ba_resp->tid, scd_flow);
3563 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
3565 static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
3572 scd_q2ratid = ra_tid & IWL49_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
3574 tbl_dw_addr = priv->scd_base_addr +
3575 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
3577 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
3580 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
3582 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
3584 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
3591 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
3593 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
3594 * i.e. it must be one of the higher queues used for aggregation
3596 static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
3597 int tx_fifo, int sta_id, int tid,
3600 unsigned long flags;
3604 if (IWL_BACK_QUEUE_FIRST_ID > txq_id)
3605 IWL_WARNING("queue number too small: %d, must be > %d\n",
3606 txq_id, IWL_BACK_QUEUE_FIRST_ID);
3608 ra_tid = BUILD_RAxTID(sta_id, tid);
3610 /* Modify device's station table to Tx this TID */
3611 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid);
3613 spin_lock_irqsave(&priv->lock, flags);
3614 rc = iwl_grab_nic_access(priv);
3616 spin_unlock_irqrestore(&priv->lock, flags);
3620 /* Stop this Tx queue before configuring it */
3621 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
3623 /* Map receiver-address / traffic-ID to this queue */
3624 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
3626 /* Set this queue as a chain-building queue */
3627 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
3629 /* Place first TFD at index corresponding to start sequence number.
3630 * Assumes that ssn_idx is valid (!= 0xFFF) */
3631 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
3632 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
3633 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
3635 /* Set up Tx window size and frame limit for this queue */
3636 iwl_write_targ_mem(priv,
3637 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
3638 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
3639 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
3641 iwl_write_targ_mem(priv, priv->scd_base_addr +
3642 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
3643 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
3644 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
3646 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
3648 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
3649 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
3651 iwl_release_nic_access(priv);
3652 spin_unlock_irqrestore(&priv->lock, flags);
3657 #endif /* CONFIG_IWL4965_HT */
3660 * iwl4965_add_station - Initialize a station's hardware rate table
3662 * The uCode's station table contains a table of fallback rates
3663 * for automatic fallback during transmission.
3665 * NOTE: This sets up a default set of values. These will be replaced later
3666 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
3669 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
3670 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
3671 * which requires station table entry to exist).
3673 void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
3676 struct iwl_link_quality_cmd link_cmd = {
3681 /* Set up the rate scaling to start at selected rate, fall back
3682 * all the way down to 1M in IEEE order, and then spin on 1M */
3684 r = IWL_RATE_54M_INDEX;
3685 else if (priv->band == IEEE80211_BAND_5GHZ)
3686 r = IWL_RATE_6M_INDEX;
3688 r = IWL_RATE_1M_INDEX;
3690 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3692 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
3693 rate_flags |= RATE_MCS_CCK_MSK;
3695 /* Use Tx antenna B only */
3696 rate_flags |= RATE_MCS_ANT_B_MSK; /*FIXME:RS*/
3698 link_cmd.rs_table[i].rate_n_flags =
3699 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
3700 r = iwl4965_get_prev_ieee_rate(r);
3703 link_cmd.general_params.single_stream_ant_msk = 2;
3704 link_cmd.general_params.dual_stream_ant_msk = 3;
3705 link_cmd.agg_params.agg_dis_start_th = 3;
3706 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
3708 /* Update the rate scaling for control frame Tx to AP */
3709 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
3711 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
3712 sizeof(link_cmd), &link_cmd, NULL);
3715 #ifdef CONFIG_IWL4965_HT
3717 static u8 iwl4965_is_channel_extension(struct iwl_priv *priv,
3718 enum ieee80211_band band,
3719 u16 channel, u8 extension_chan_offset)
3721 const struct iwl_channel_info *ch_info;
3723 ch_info = iwl_get_channel_info(priv, band, channel);
3724 if (!is_channel_valid(ch_info))
3727 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
3730 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
3731 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
3737 static u8 iwl4965_is_fat_tx_allowed(struct iwl_priv *priv,
3738 struct ieee80211_ht_info *sta_ht_inf)
3740 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
3742 if ((!iwl_ht_conf->is_ht) ||
3743 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
3744 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
3748 if ((!sta_ht_inf->ht_supported) ||
3749 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
3753 return (iwl4965_is_channel_extension(priv, priv->band,
3754 iwl_ht_conf->control_channel,
3755 iwl_ht_conf->extension_chan_offset));
3758 void iwl4965_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
3760 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
3763 if (!ht_info->is_ht)
3766 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
3767 if (iwl4965_is_fat_tx_allowed(priv, NULL))
3768 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
3770 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
3771 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
3773 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
3774 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
3775 le16_to_cpu(rxon->channel),
3776 ht_info->control_channel);
3777 rxon->channel = cpu_to_le16(ht_info->control_channel);
3781 /* Note: control channel is opposite of extension channel */
3782 switch (ht_info->extension_chan_offset) {
3783 case IWL_EXT_CHANNEL_OFFSET_ABOVE:
3784 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3786 case IWL_EXT_CHANNEL_OFFSET_BELOW:
3787 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3789 case IWL_EXT_CHANNEL_OFFSET_NONE:
3791 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
3795 val = ht_info->ht_protection;
3797 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
3799 iwl_set_rxon_chain(priv);
3801 IWL_DEBUG_ASSOC("supported HT rate 0x%X 0x%X 0x%X "
3802 "rxon flags 0x%X operation mode :0x%X "
3803 "extension channel offset 0x%x "
3804 "control chan %d\n",
3805 ht_info->supp_mcs_set[0],
3806 ht_info->supp_mcs_set[1],
3807 ht_info->supp_mcs_set[2],
3808 le32_to_cpu(rxon->flags), ht_info->ht_protection,
3809 ht_info->extension_chan_offset,
3810 ht_info->control_channel);
3814 void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index,
3815 struct ieee80211_ht_info *sta_ht_inf)
3820 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
3823 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
3825 sta_flags = priv->stations[index].sta.station_flags;
3827 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
3829 switch (mimo_ps_mode) {
3830 case WLAN_HT_CAP_MIMO_PS_STATIC:
3831 sta_flags |= STA_FLG_MIMO_DIS_MSK;
3833 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
3834 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
3836 case WLAN_HT_CAP_MIMO_PS_DISABLED:
3839 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
3843 sta_flags |= cpu_to_le32(
3844 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
3846 sta_flags |= cpu_to_le32(
3847 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
3849 if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
3850 sta_flags |= STA_FLG_FAT_EN_MSK;
3852 sta_flags &= ~STA_FLG_FAT_EN_MSK;
3854 priv->stations[index].sta.station_flags = sta_flags;
3859 static int iwl4965_rx_agg_start(struct iwl_priv *priv,
3860 const u8 *addr, int tid, u16 ssn)
3862 unsigned long flags;
3865 sta_id = iwl_find_station(priv, addr);
3866 if (sta_id == IWL_INVALID_STATION)
3869 spin_lock_irqsave(&priv->sta_lock, flags);
3870 priv->stations[sta_id].sta.station_flags_msk = 0;
3871 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3872 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
3873 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3874 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3875 spin_unlock_irqrestore(&priv->sta_lock, flags);
3877 return iwl4965_send_add_station(priv, &priv->stations[sta_id].sta,
3881 static int iwl4965_rx_agg_stop(struct iwl_priv *priv,
3882 const u8 *addr, int tid)
3884 unsigned long flags;
3887 sta_id = iwl_find_station(priv, addr);
3888 if (sta_id == IWL_INVALID_STATION)
3891 spin_lock_irqsave(&priv->sta_lock, flags);
3892 priv->stations[sta_id].sta.station_flags_msk = 0;
3893 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3894 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
3895 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3896 spin_unlock_irqrestore(&priv->sta_lock, flags);
3898 return iwl4965_send_add_station(priv, &priv->stations[sta_id].sta,
3903 * Find first available (lowest unused) Tx Queue, mark it "active".
3904 * Called only when finding queue for aggregation.
3905 * Should never return anything < 7, because they should already
3906 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
3908 static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
3912 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
3913 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
3918 static int iwl4965_tx_agg_start(struct ieee80211_hw *hw, const u8 *ra,
3919 u16 tid, u16 *start_seq_num)
3921 struct iwl_priv *priv = hw->priv;
3927 unsigned long flags;
3928 struct iwl4965_tid_data *tid_data;
3929 DECLARE_MAC_BUF(mac);
3931 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
3932 tx_fifo = default_tid_to_tx_fifo[tid];
3936 IWL_WARNING("%s on ra = %s tid = %d\n",
3937 __func__, print_mac(mac, ra), tid);
3939 sta_id = iwl_find_station(priv, ra);
3940 if (sta_id == IWL_INVALID_STATION)
3943 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
3944 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
3948 txq_id = iwl4965_txq_ctx_activate_free(priv);
3952 spin_lock_irqsave(&priv->sta_lock, flags);
3953 tid_data = &priv->stations[sta_id].tid[tid];
3954 ssn = SEQ_TO_SN(tid_data->seq_number);
3955 tid_data->agg.txq_id = txq_id;
3956 spin_unlock_irqrestore(&priv->sta_lock, flags);
3958 *start_seq_num = ssn;
3959 ret = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
3965 if (tid_data->tfds_in_queue == 0) {
3966 printk(KERN_ERR "HW queue is empty\n");
3967 tid_data->agg.state = IWL_AGG_ON;
3968 ieee80211_start_tx_ba_cb_irqsafe(hw, ra, tid);
3970 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
3971 tid_data->tfds_in_queue);
3972 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
3977 static int iwl4965_tx_agg_stop(struct ieee80211_hw *hw, const u8 *ra, u16 tid)
3979 struct iwl_priv *priv = hw->priv;
3980 int tx_fifo_id, txq_id, sta_id, ssn = -1;
3981 struct iwl4965_tid_data *tid_data;
3982 int ret, write_ptr, read_ptr;
3983 unsigned long flags;
3984 DECLARE_MAC_BUF(mac);
3987 IWL_ERROR("ra = NULL\n");
3991 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
3992 tx_fifo_id = default_tid_to_tx_fifo[tid];
3996 sta_id = iwl_find_station(priv, ra);
3998 if (sta_id == IWL_INVALID_STATION)
4001 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
4002 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
4004 tid_data = &priv->stations[sta_id].tid[tid];
4005 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
4006 txq_id = tid_data->agg.txq_id;
4007 write_ptr = priv->txq[txq_id].q.write_ptr;
4008 read_ptr = priv->txq[txq_id].q.read_ptr;
4010 /* The queue is not empty */
4011 if (write_ptr != read_ptr) {
4012 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
4013 priv->stations[sta_id].tid[tid].agg.state =
4014 IWL_EMPTYING_HW_QUEUE_DELBA;
4018 IWL_DEBUG_HT("HW queue is empty\n");
4019 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
4021 spin_lock_irqsave(&priv->lock, flags);
4022 ret = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
4023 spin_unlock_irqrestore(&priv->lock, flags);
4028 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
4033 int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
4034 enum ieee80211_ampdu_mlme_action action,
4035 const u8 *addr, u16 tid, u16 *ssn)
4037 struct iwl_priv *priv = hw->priv;
4038 DECLARE_MAC_BUF(mac);
4040 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
4041 print_mac(mac, addr), tid);
4044 case IEEE80211_AMPDU_RX_START:
4045 IWL_DEBUG_HT("start Rx\n");
4046 return iwl4965_rx_agg_start(priv, addr, tid, *ssn);
4047 case IEEE80211_AMPDU_RX_STOP:
4048 IWL_DEBUG_HT("stop Rx\n");
4049 return iwl4965_rx_agg_stop(priv, addr, tid);
4050 case IEEE80211_AMPDU_TX_START:
4051 IWL_DEBUG_HT("start Tx\n");
4052 return iwl4965_tx_agg_start(hw, addr, tid, ssn);
4053 case IEEE80211_AMPDU_TX_STOP:
4054 IWL_DEBUG_HT("stop Tx\n");
4055 return iwl4965_tx_agg_stop(hw, addr, tid);
4057 IWL_DEBUG_HT("unknown\n");
4064 #endif /* CONFIG_IWL4965_HT */
4066 /* Set up 4965-specific Rx frame reply handlers */
4067 static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
4069 /* Legacy Rx frames */
4070 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
4072 /* High-throughput (HT) Rx frames */
4073 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4074 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4076 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4077 iwl4965_rx_missed_beacon_notif;
4079 #ifdef CONFIG_IWL4965_HT
4080 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
4081 #endif /* CONFIG_IWL4965_HT */
4084 void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv)
4086 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4087 #ifdef CONFIG_IWL4965_RUN_TIME_CALIB
4088 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4090 init_timer(&priv->statistics_periodic);
4091 priv->statistics_periodic.data = (unsigned long)priv;
4092 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
4095 void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv)
4097 del_timer_sync(&priv->statistics_periodic);
4099 cancel_delayed_work(&priv->init_alive_start);
4103 static struct iwl_hcmd_ops iwl4965_hcmd = {
4104 .rxon_assoc = iwl4965_send_rxon_assoc,
4107 static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
4108 .enqueue_hcmd = iwl4965_enqueue_hcmd,
4109 #ifdef CONFIG_IWL4965_RUN_TIME_CALIB
4110 .chain_noise_reset = iwl4965_chain_noise_reset,
4111 .gain_computation = iwl4965_gain_computation,
4115 static struct iwl_lib_ops iwl4965_lib = {
4116 .set_hw_params = iwl4965_hw_set_hw_params,
4117 .alloc_shared_mem = iwl4965_alloc_shared_mem,
4118 .free_shared_mem = iwl4965_free_shared_mem,
4119 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
4120 .hw_nic_init = iwl4965_hw_nic_init,
4121 .rx_handler_setup = iwl4965_rx_handler_setup,
4122 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
4123 .alive_notify = iwl4965_alive_notify,
4124 .load_ucode = iwl4965_load_bsm,
4126 .init = iwl4965_apm_init,
4127 .config = iwl4965_nic_config,
4128 .set_pwr_src = iwl4965_set_pwr_src,
4131 .regulatory_bands = {
4132 EEPROM_REGULATORY_BAND_1_CHANNELS,
4133 EEPROM_REGULATORY_BAND_2_CHANNELS,
4134 EEPROM_REGULATORY_BAND_3_CHANNELS,
4135 EEPROM_REGULATORY_BAND_4_CHANNELS,
4136 EEPROM_REGULATORY_BAND_5_CHANNELS,
4137 EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS,
4138 EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS
4140 .verify_signature = iwlcore_eeprom_verify_signature,
4141 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
4142 .release_semaphore = iwlcore_eeprom_release_semaphore,
4143 .check_version = iwl4965_eeprom_check_version,
4144 .query_addr = iwlcore_eeprom_query_addr,
4146 .radio_kill_sw = iwl4965_radio_kill_sw,
4147 .set_power = iwl4965_set_power,
4148 .update_chain_flags = iwl4965_update_chain_flags,
4151 static struct iwl_ops iwl4965_ops = {
4152 .lib = &iwl4965_lib,
4153 .hcmd = &iwl4965_hcmd,
4154 .utils = &iwl4965_hcmd_utils,
4157 struct iwl_cfg iwl4965_agn_cfg = {
4159 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
4160 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
4161 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
4162 .ops = &iwl4965_ops,
4163 .mod_params = &iwl4965_mod_params,
4166 module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
4167 MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4168 module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
4169 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
4170 module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
4171 MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])\n");
4172 module_param_named(debug, iwl4965_mod_params.debug, int, 0444);
4173 MODULE_PARM_DESC(debug, "debug output mask");
4175 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444);
4176 MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
4178 module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
4179 MODULE_PARM_DESC(queues_num, "number of hw queues.");
4182 module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
4183 MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
4184 module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
4185 MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");