#define RX_FREE_BUFFERS 64
#define RX_LOW_WATERMARK 8
-/* Size of one Rx buffer in host DRAM */
-#define IWL_RX_BUF_SIZE 3000
-
/* Sizes and addresses for instruction and data memory (SRAM) in
* 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
#define RTC_INST_LOWER_BOUND (0x000000)
}
priv->hw_setting.ac_queue_count = AC_NUM;
- priv->hw_setting.rx_buffer_size = IWL_RX_BUF_SIZE;
+ priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE;
+ priv->hw_setting.max_pkt_size = 2342;
priv->hw_setting.tx_cmd_len = sizeof(struct iwl3945_tx_cmd);
priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE;
priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG;
* else RTS for data/management frames where MPDU is larger
* than RTS value.
*/
+#define IWL_RX_BUF_SIZE 3000U
#define DEFAULT_RTS_THRESHOLD 2347U
#define MIN_RTS_THRESHOLD 0U
#define MAX_RTS_THRESHOLD 2347U
};
};
+#ifdef CONFIG_IWL3945_HT
+#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3)
+#define CFG_HT_MPDU_DENSITY_2USEC (0x5)
+#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_2USEC
+
+struct sta_ht_info {
+ u8 is_ht;
+ u16 rx_mimo_ps_mode;
+ u16 tx_mimo_ps_mode;
+ u16 control_channel;
+ u8 max_amsdu_size;
+ u8 ampdu_factor;
+ u8 mpdu_density;
+ u8 operating_mode;
+ u8 supported_chan_width;
+ u8 extension_chan_offset;
+ u8 is_green_field;
+ u8 sgf;
+ u8 supp_rates[16];
+ u8 tx_chan_width;
+ u8 chan_width_cap;
+};
+#endif /*CONFIG_IWL3945_HT */
+
#ifdef CONFIG_IWL3945_QOS
union iwl3945_qos_capabity {
* @ac_queue_count: # Tx queues for EDCA Access Categories (AC)
* @tx_cmd_len: Size of Tx command (but not including frame itself)
* @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
- * @rx_buffer_size:
+ * @rx_buf_size:
+ * @max_pkt_size:
* @max_rxq_log: Log-base-2 of max_rxq_size
* @max_stations:
* @bcast_sta_id:
u16 ac_queue_count;
u16 tx_cmd_len;
u16 max_rxq_size;
- u32 rx_buffer_size;
+ u32 rx_buf_size;
+ u32 max_pkt_size;
u16 max_rxq_log;
u8 max_stations;
u8 bcast_sta_id;
#define RX_LOW_WATERMARK 8
/* Size of one Rx buffer in host DRAM */
-#define IWL_RX_BUF_SIZE (4 * 1024)
+#define IWL_RX_BUF_SIZE_4K (4 * 1024)
+#define IWL_RX_BUF_SIZE_8K (8 * 1024)
/* Sizes and addresses for instruction and data memory (SRAM) in
* 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MASK (0xC0000000) /* bits 30-31 */
#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT (20)
-#define FH_RCSR_RX_CONFIG_RB_SIZE_BITSHIFT (16)
+#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_BITSHIFT (4)
+#define RX_RB_TIMEOUT (0x10)
#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
-#define IWL_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
{
int rc;
unsigned long flags;
+ unsigned int rb_size;
spin_lock_irqsave(&priv->lock, flags);
rc = iwl4965_grab_nic_access(priv);
return rc;
}
+ if (iwl4965_param_amsdu_size_8K)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
/* Stop Rx DMA */
iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- IWL_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
+ rb_size |
/*0x10 << 4 | */
(RX_QUEUE_SIZE_LOG <<
FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
priv->hw_setting.tx_cmd_len = sizeof(struct iwl4965_tx_cmd);
priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE;
priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG;
-
+ if (iwl4965_param_amsdu_size_8K)
+ priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_8K;
+ else
+ priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_4K;
+ priv->hw_setting.max_pkt_size = priv->hw_setting.rx_buf_size - 256;
priv->hw_setting.max_stations = IWL4965_STATION_COUNT;
priv->hw_setting.bcast_sta_id = IWL4965_BROADCAST_ID;
return 0;
rx_start->byte_count = amsdu->byte_count;
rx_end = (__le32 *) (((u8 *) hdr) + len);
}
- if (len > IWL_RX_BUF_SIZE || len < 16) {
+ if (len > priv->hw_setting.max_pkt_size || len < 16) {
IWL_WARNING("byte count out of range [16,4K]"
" : %d\n", len);
return;
ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
(IWL_MIMO_PS_NONE << 2));
+ if (iwl4965_param_amsdu_size_8K) {
+ printk(KERN_DEBUG "iwl4965 in A-MSDU 8K support mode\n");
+ ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
+ }
ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
/* Module parameters accessible from iwl-*.c */
extern int iwl4965_param_hwcrypto;
extern int iwl4965_param_queues_num;
+extern int iwl4965_param_amsdu_size_8K;
enum iwl4965_antenna {
IWL_ANTENNA_DIVERSITY,
#ifdef CONFIG_IWL4965_HT
#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3)
-#define HT_IE_MAX_AMSDU_SIZE_4K (0)
#define CFG_HT_MPDU_DENSITY_2USEC (0x5)
#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_2USEC
u16 ac_queue_count;
u16 tx_cmd_len;
u16 max_rxq_size;
- u32 rx_buffer_size;
+ u32 rx_buf_size;
+ u32 max_pkt_size;
u16 max_rxq_log;
u8 max_stations;
u8 bcast_sta_id;
int iwl4965_param_hwcrypto; /* def: using software encryption */
static int iwl4965_param_qos_enable = 1; /* def: 1 = use quality of service */
int iwl4965_param_queues_num = IWL_MAX_NUM_QUEUES; /* def: 16 Tx queues */
+int iwl4965_param_amsdu_size_8K; /* def: enable 8K amsdu size */
/*
* module name, copyright, version, etc.
__le16 phy_flags_hw = cpu_to_le16(phy_flags);
/* We received data from the HW, so stop the watchdog */
- if (len > IWL_RX_BUF_SIZE - sizeof(*iwl4965_rt)) {
+ if (len > priv->hw_setting.rx_buf_size - sizeof(*iwl4965_rt)) {
IWL_DEBUG_DROP("Dropping too large packet in monitor\n");
return;
}
/* Alloc a new receive buffer */
rxb->skb =
- alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC);
+ alloc_skb(priv->hw_setting.rx_buf_size,
+ __GFP_NOWARN | GFP_ATOMIC);
if (!rxb->skb) {
if (net_ratelimit())
printk(KERN_CRIT DRV_NAME
/* Get physical address of RB/SKB */
rxb->dma_addr =
pci_map_single(priv->pci_dev, rxb->skb->data,
- IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ priv->hw_setting.rx_buf_size, PCI_DMA_FROMDEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev,
rxq->pool[i].dma_addr,
- IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ priv->hw_setting.rx_buf_size,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb(rxq->pool[i].skb);
}
}
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev,
rxq->pool[i].dma_addr,
- IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ priv->hw_setting.rx_buf_size,
+ PCI_DMA_FROMDEVICE);
priv->alloc_rxb_skb--;
dev_kfree_skb(rxq->pool[i].skb);
rxq->pool[i].skb = NULL;
rxq->queue[i] = NULL;
pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
- IWL_RX_BUF_SIZE,
+ priv->hw_setting.rx_buf_size,
PCI_DMA_FROMDEVICE);
pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
}
pci_unmap_single(priv->pci_dev, rxb->dma_addr,
- IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ priv->hw_setting.rx_buf_size,
+ PCI_DMA_FROMDEVICE);
spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &priv->rxq.rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
/* QoS */
module_param_named(qos_enable, iwl4965_param_qos_enable, int, 0444);
MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
+module_param_named(amsdu_size_8K, iwl4965_param_amsdu_size_8K, int, 0444);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
module_exit(iwl4965_exit);
module_init(iwl4965_init);