This patch moves txq_ctx_stop into iwl-tx.c iwlcore module.
Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
return ret;
}
-static int iwl4965_disable_tx_fifo(struct iwl_priv *priv)
+/*
+ * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
+ * must be called under priv->lock and mac access
+ */
+static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- ret = iwl_grab_nic_access(priv);
- if (unlikely(ret)) {
- IWL_ERROR("Tx fifo reset failed");
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
-
- iwl_write_prph(priv, IWL49_SCD_TXFACT, 0);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
+ iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
}
static int iwl4965_apm_init(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
}
-/**
- * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
- */
-void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv)
-{
-
- int txq_id;
- unsigned long flags;
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_grab_nic_access(priv)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- continue;
- }
-
- iwl_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
- iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
- (txq_id), 200);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- /* Deallocate memory for all Tx queues */
- iwl_hw_txq_ctx_free(priv);
-}
-
static int iwl4965_apm_stop_master(struct iwl_priv *priv)
{
int ret = 0;
(1 << priv->hw_params.max_txq_num) - 1);
/* Activate all Tx DMA/FIFO channels */
- iwl_write_prph(priv, IWL49_SCD_TXFACT,
- SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
+ priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
.free_shared_mem = iwl4965_free_shared_mem,
.shared_mem_rx_idx = iwl4965_shared_mem_rx_idx,
.txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
- .disable_tx_fifo = iwl4965_disable_tx_fifo,
+ .txq_set_sched = iwl4965_txq_set_sched,
.rx_handler_setup = iwl4965_rx_handler_setup,
.is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
.alive_notify = iwl4965_alive_notify,
}
iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
- (1 << priv->hw_params.max_txq_num) - 1);
+ IWL_MASK(0, priv->hw_params.max_txq_num));
- iwl_write_prph(priv, IWL50_SCD_TXFACT,
- SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
+ /* Activate all Tx DMA/FIFO channels */
+ priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
/* map qos queues to fifos one-to-one */
}
-static int iwl5000_disable_tx_fifo(struct iwl_priv *priv)
+/*
+ * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
+ * must be called under priv->lock and mac access
+ */
+static void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- ret = iwl_grab_nic_access(priv);
- if (unlikely(ret)) {
- IWL_ERROR("Tx fifo reset failed");
- spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
- }
-
- iwl_write_prph(priv, IWL50_SCD_TXFACT, 0);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
+ iwl_write_prph(priv, IWL50_SCD_TXFACT, mask);
}
/* Currently 5000 is the supperset of everything */
.free_shared_mem = iwl5000_free_shared_mem,
.shared_mem_rx_idx = iwl5000_shared_mem_rx_idx,
.txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
- .disable_tx_fifo = iwl5000_disable_tx_fifo,
+ .txq_set_sched = iwl5000_txq_set_sched,
.rx_handler_setup = iwl5000_rx_handler_setup,
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.load_ucode = iwl5000_load_ucode,
/* setup Rx handler */
void (*rx_handler_setup)(struct iwl_priv *priv);
/* nic Tx fifo handling */
- int (*disable_tx_fifo)(struct iwl_priv *priv);
+ void (*txq_set_sched)(struct iwl_priv *priv, u32 mask);
/* alive notification after init uCode load */
void (*init_alive_start)(struct iwl_priv *priv);
/* alive notification */
extern void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv);
extern int iwl4965_hw_rxq_stop(struct iwl_priv *priv);
extern int iwl4965_hw_set_hw_params(struct iwl_priv *priv);
-extern void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv);
+extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
extern int iwl4965_hw_get_temperature(struct iwl_priv *priv);
extern unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
struct iwl_frame *frame, u8 rate);
#define KELVIN_TO_CELSIUS(x) ((x)-273)
#define CELSIUS_TO_KELVIN(x) ((x)+273)
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
#define IEEE80211_CHAN_W_RADAR_DETECT 0x00000010
* 7- 0: Enable (1), disable (0), one bit for each channel 0-7
*/
#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
-
-/* Mask to enable contiguous Tx DMA/FIFO channels between "lo" and "hi". */
-#define SCD_TXFACT_REG_TXFIFO_MASK(lo, hi) \
- ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
-
/*
* Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
* Initialized and updated by driver as new TFDs are added to queue.
EXPORT_SYMBOL(iwl_queue_space);
-/**
- * iwl_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- iwl_tx_queue_free(priv, &priv->txq[txq_id]);
-
- /* Keep-warm buffer */
- iwl_kw_free(priv);
-}
-EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
-
/**
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
return 0;
}
+/**
+ * iwl_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
+{
+ int txq_id;
+
+ /* Tx queues */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ iwl_tx_queue_free(priv, &priv->txq[txq_id]);
+
+ /* Keep-warm buffer */
+ iwl_kw_free(priv);
+}
+EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
+
/**
* iwl_txq_ctx_reset - Reset TX queue context
{
int ret = 0;
int txq_id, slots_num;
+ unsigned long flags;
iwl_kw_free(priv);
IWL_ERROR("Keep Warm allocation failed");
goto error_kw;
}
+ spin_lock_irqsave(&priv->lock, flags);
+ ret = iwl_grab_nic_access(priv);
+ if (unlikely(ret)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ goto error_reset;
+ }
/* Turn off all Tx DMA fifos */
- ret = priv->cfg->ops->lib->disable_tx_fifo(priv);
- if (unlikely(ret))
- goto error_reset;
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
/* Tell nic where to find the keep-warm buffer */
ret = iwl_kw_init(priv);
goto error_reset;
}
- /* Alloc and init all (default 16) Tx queues,
- * including the command queue (#4) */
+ /* Alloc and init all Tx queues, including the command queue (#4) */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
error_kw:
return ret;
}
+/**
+ * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
+ */
+void iwl_txq_ctx_stop(struct iwl_priv *priv)
+{
+
+ int txq_id;
+ unsigned long flags;
+
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (iwl_grab_nic_access(priv)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ iwl_write_direct32(priv,
+ FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
+ iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
+ (txq_id), 200);
+ }
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Deallocate memory for all Tx queues */
+ iwl_hw_txq_ctx_free(priv);
+}
+EXPORT_SYMBOL(iwl_txq_ctx_stop);
/*
* handle build REPLY_TX command notification.
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
spin_unlock_irqrestore(&priv->lock, flags);
- iwl4965_hw_txq_ctx_stop(priv);
+ iwl_txq_ctx_stop(priv);
iwl4965_hw_rxq_stop(priv);
spin_lock_irqsave(&priv->lock, flags);