]> err.no Git - linux-2.6/blob - drivers/net/wireless/iwlwifi/iwl-core.c
iwlwifi: refactor setting tx power
[linux-2.6] / drivers / net / wireless / iwlwifi / iwl-core.c
1 /******************************************************************************
2  *
3  * GPL LICENSE SUMMARY
4  *
5  * Copyright(c) 2008 Intel Corporation. All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of version 2 of the GNU General Public License as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19  * USA
20  *
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * Contact Information:
25  * Tomas Winkler <tomas.winkler@intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *****************************************************************************/
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/version.h>
32 #include <net/mac80211.h>
33
34 struct iwl_priv; /* FIXME: remove */
35 #include "iwl-debug.h"
36 #include "iwl-eeprom.h"
37 #include "iwl-dev.h" /* FIXME: remove */
38 #include "iwl-core.h"
39 #include "iwl-io.h"
40 #include "iwl-rfkill.h"
41 #include "iwl-power.h"
42
43
44 MODULE_DESCRIPTION("iwl core");
45 MODULE_VERSION(IWLWIFI_VERSION);
46 MODULE_AUTHOR(DRV_COPYRIGHT);
47 MODULE_LICENSE("GPL");
48
49 #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
50         [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \
51                                     IWL_RATE_SISO_##s##M_PLCP, \
52                                     IWL_RATE_MIMO2_##s##M_PLCP,\
53                                     IWL_RATE_MIMO3_##s##M_PLCP,\
54                                     IWL_RATE_##r##M_IEEE,      \
55                                     IWL_RATE_##ip##M_INDEX,    \
56                                     IWL_RATE_##in##M_INDEX,    \
57                                     IWL_RATE_##rp##M_INDEX,    \
58                                     IWL_RATE_##rn##M_INDEX,    \
59                                     IWL_RATE_##pp##M_INDEX,    \
60                                     IWL_RATE_##np##M_INDEX }
61
62 /*
63  * Parameter order:
64  *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
65  *
66  * If there isn't a valid next or previous rate then INV is used which
67  * maps to IWL_RATE_INVALID
68  *
69  */
70 const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
71         IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),    /*  1mbps */
72         IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),          /*  2mbps */
73         IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
74         IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),      /* 11mbps */
75         IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
76         IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),       /*  9mbps */
77         IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
78         IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
79         IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
80         IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
81         IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
82         IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
83         IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
84         /* FIXME:RS:          ^^    should be INV (legacy) */
85 };
86 EXPORT_SYMBOL(iwl_rates);
87
88
89 const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
90 EXPORT_SYMBOL(iwl_bcast_addr);
91
92
93 /* This function both allocates and initializes hw and priv. */
94 struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
95                 struct ieee80211_ops *hw_ops)
96 {
97         struct iwl_priv *priv;
98
99         /* mac80211 allocates memory for this device instance, including
100          *   space for this driver's private structure */
101         struct ieee80211_hw *hw =
102                 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops);
103         if (hw == NULL) {
104                 IWL_ERROR("Can not allocate network device\n");
105                 goto out;
106         }
107
108         priv = hw->priv;
109         priv->hw = hw;
110
111 out:
112         return hw;
113 }
114 EXPORT_SYMBOL(iwl_alloc_all);
115
116 void iwl_hw_detect(struct iwl_priv *priv)
117 {
118         priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
119         priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
120         pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
121 }
122 EXPORT_SYMBOL(iwl_hw_detect);
123
124 /* Tell nic where to find the "keep warm" buffer */
125 int iwl_kw_init(struct iwl_priv *priv)
126 {
127         unsigned long flags;
128         int ret;
129
130         spin_lock_irqsave(&priv->lock, flags);
131         ret = iwl_grab_nic_access(priv);
132         if (ret)
133                 goto out;
134
135         iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG,
136                              priv->kw.dma_addr >> 4);
137         iwl_release_nic_access(priv);
138 out:
139         spin_unlock_irqrestore(&priv->lock, flags);
140         return ret;
141 }
142
143 int iwl_kw_alloc(struct iwl_priv *priv)
144 {
145         struct pci_dev *dev = priv->pci_dev;
146         struct iwl_kw *kw = &priv->kw;
147
148         kw->size = IWL_KW_SIZE;
149         kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
150         if (!kw->v_addr)
151                 return -ENOMEM;
152
153         return 0;
154 }
155
156 /**
157  * iwl_kw_free - Free the "keep warm" buffer
158  */
159 void iwl_kw_free(struct iwl_priv *priv)
160 {
161         struct pci_dev *dev = priv->pci_dev;
162         struct iwl_kw *kw = &priv->kw;
163
164         if (kw->v_addr) {
165                 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
166                 memset(kw, 0, sizeof(*kw));
167         }
168 }
169
170 int iwl_hw_nic_init(struct iwl_priv *priv)
171 {
172         unsigned long flags;
173         struct iwl_rx_queue *rxq = &priv->rxq;
174         int ret;
175
176         /* nic_init */
177         spin_lock_irqsave(&priv->lock, flags);
178         priv->cfg->ops->lib->apm_ops.init(priv);
179         iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
180         spin_unlock_irqrestore(&priv->lock, flags);
181
182         ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
183
184         priv->cfg->ops->lib->apm_ops.config(priv);
185
186         /* Allocate the RX queue, or reset if it is already allocated */
187         if (!rxq->bd) {
188                 ret = iwl_rx_queue_alloc(priv);
189                 if (ret) {
190                         IWL_ERROR("Unable to initialize Rx queue\n");
191                         return -ENOMEM;
192                 }
193         } else
194                 iwl_rx_queue_reset(priv, rxq);
195
196         iwl_rx_replenish(priv);
197
198         iwl_rx_init(priv, rxq);
199
200         spin_lock_irqsave(&priv->lock, flags);
201
202         rxq->need_update = 1;
203         iwl_rx_queue_update_write_ptr(priv, rxq);
204
205         spin_unlock_irqrestore(&priv->lock, flags);
206
207         /* Allocate and init all Tx and Command queues */
208         ret = iwl_txq_ctx_reset(priv);
209         if (ret)
210                 return ret;
211
212         set_bit(STATUS_INIT, &priv->status);
213
214         return 0;
215 }
216 EXPORT_SYMBOL(iwl_hw_nic_init);
217
218 /**
219  * iwlcore_clear_stations_table - Clear the driver's station table
220  *
221  * NOTE:  This does not clear or otherwise alter the device's station table.
222  */
223 void iwlcore_clear_stations_table(struct iwl_priv *priv)
224 {
225         unsigned long flags;
226
227         spin_lock_irqsave(&priv->sta_lock, flags);
228
229         priv->num_stations = 0;
230         memset(priv->stations, 0, sizeof(priv->stations));
231
232         spin_unlock_irqrestore(&priv->sta_lock, flags);
233 }
234 EXPORT_SYMBOL(iwlcore_clear_stations_table);
235
236 void iwl_reset_qos(struct iwl_priv *priv)
237 {
238         u16 cw_min = 15;
239         u16 cw_max = 1023;
240         u8 aifs = 2;
241         u8 is_legacy = 0;
242         unsigned long flags;
243         int i;
244
245         spin_lock_irqsave(&priv->lock, flags);
246         priv->qos_data.qos_active = 0;
247
248         if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
249                 if (priv->qos_data.qos_enable)
250                         priv->qos_data.qos_active = 1;
251                 if (!(priv->active_rate & 0xfff0)) {
252                         cw_min = 31;
253                         is_legacy = 1;
254                 }
255         } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
256                 if (priv->qos_data.qos_enable)
257                         priv->qos_data.qos_active = 1;
258         } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
259                 cw_min = 31;
260                 is_legacy = 1;
261         }
262
263         if (priv->qos_data.qos_active)
264                 aifs = 3;
265
266         priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
267         priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
268         priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
269         priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
270         priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
271
272         if (priv->qos_data.qos_active) {
273                 i = 1;
274                 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
275                 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
276                 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
277                 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
278                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
279
280                 i = 2;
281                 priv->qos_data.def_qos_parm.ac[i].cw_min =
282                         cpu_to_le16((cw_min + 1) / 2 - 1);
283                 priv->qos_data.def_qos_parm.ac[i].cw_max =
284                         cpu_to_le16(cw_max);
285                 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
286                 if (is_legacy)
287                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
288                                 cpu_to_le16(6016);
289                 else
290                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
291                                 cpu_to_le16(3008);
292                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
293
294                 i = 3;
295                 priv->qos_data.def_qos_parm.ac[i].cw_min =
296                         cpu_to_le16((cw_min + 1) / 4 - 1);
297                 priv->qos_data.def_qos_parm.ac[i].cw_max =
298                         cpu_to_le16((cw_max + 1) / 2 - 1);
299                 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
300                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
301                 if (is_legacy)
302                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
303                                 cpu_to_le16(3264);
304                 else
305                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
306                                 cpu_to_le16(1504);
307         } else {
308                 for (i = 1; i < 4; i++) {
309                         priv->qos_data.def_qos_parm.ac[i].cw_min =
310                                 cpu_to_le16(cw_min);
311                         priv->qos_data.def_qos_parm.ac[i].cw_max =
312                                 cpu_to_le16(cw_max);
313                         priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
314                         priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
315                         priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
316                 }
317         }
318         IWL_DEBUG_QOS("set QoS to default \n");
319
320         spin_unlock_irqrestore(&priv->lock, flags);
321 }
322 EXPORT_SYMBOL(iwl_reset_qos);
323
324 #define MAX_BIT_RATE_40_MHZ 0x96; /* 150 Mbps */
325 #define MAX_BIT_RATE_20_MHZ 0x48; /* 72 Mbps */
326 static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
327                               struct ieee80211_ht_info *ht_info,
328                               enum ieee80211_band band)
329 {
330         u16 max_bit_rate = 0;
331         u8 rx_chains_num = priv->hw_params.rx_chains_num;
332         u8 tx_chains_num = priv->hw_params.tx_chains_num;
333
334         ht_info->cap = 0;
335         memset(ht_info->supp_mcs_set, 0, 16);
336
337         ht_info->ht_supported = 1;
338
339         ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
340         ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
341         ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
342                              (IWL_MIMO_PS_NONE << 2));
343
344         max_bit_rate = MAX_BIT_RATE_20_MHZ;
345         if (priv->hw_params.fat_channel & BIT(band)) {
346                 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
347                 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
348                 ht_info->supp_mcs_set[4] = 0x01;
349                 max_bit_rate = MAX_BIT_RATE_40_MHZ;
350         }
351
352         if (priv->cfg->mod_params->amsdu_size_8K)
353                 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
354
355         ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
356         ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
357
358         ht_info->supp_mcs_set[0] = 0xFF;
359         if (rx_chains_num >= 2)
360                 ht_info->supp_mcs_set[1] = 0xFF;
361         if (rx_chains_num >= 3)
362                 ht_info->supp_mcs_set[2] = 0xFF;
363
364         /* Highest supported Rx data rate */
365         max_bit_rate *= rx_chains_num;
366         ht_info->supp_mcs_set[10] = (u8)(max_bit_rate & 0x00FF);
367         ht_info->supp_mcs_set[11] = (u8)((max_bit_rate & 0xFF00) >> 8);
368
369         /* Tx MCS capabilities */
370         ht_info->supp_mcs_set[12] = IEEE80211_HT_CAP_MCS_TX_DEFINED;
371         if (tx_chains_num != rx_chains_num) {
372                 ht_info->supp_mcs_set[12] |= IEEE80211_HT_CAP_MCS_TX_RX_DIFF;
373                 ht_info->supp_mcs_set[12] |= ((tx_chains_num - 1) << 2);
374         }
375 }
376
377 static void iwlcore_init_hw_rates(struct iwl_priv *priv,
378                               struct ieee80211_rate *rates)
379 {
380         int i;
381
382         for (i = 0; i < IWL_RATE_COUNT; i++) {
383                 rates[i].bitrate = iwl_rates[i].ieee * 5;
384                 rates[i].hw_value = i; /* Rate scaling will work on indexes */
385                 rates[i].hw_value_short = i;
386                 rates[i].flags = 0;
387                 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
388                         /*
389                          * If CCK != 1M then set short preamble rate flag.
390                          */
391                         rates[i].flags |=
392                                 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
393                                         0 : IEEE80211_RATE_SHORT_PREAMBLE;
394                 }
395         }
396 }
397
398 /**
399  * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
400  */
401 static int iwlcore_init_geos(struct iwl_priv *priv)
402 {
403         struct iwl_channel_info *ch;
404         struct ieee80211_supported_band *sband;
405         struct ieee80211_channel *channels;
406         struct ieee80211_channel *geo_ch;
407         struct ieee80211_rate *rates;
408         int i = 0;
409
410         if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
411             priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
412                 IWL_DEBUG_INFO("Geography modes already initialized.\n");
413                 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
414                 return 0;
415         }
416
417         channels = kzalloc(sizeof(struct ieee80211_channel) *
418                            priv->channel_count, GFP_KERNEL);
419         if (!channels)
420                 return -ENOMEM;
421
422         rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
423                         GFP_KERNEL);
424         if (!rates) {
425                 kfree(channels);
426                 return -ENOMEM;
427         }
428
429         /* 5.2GHz channels start after the 2.4GHz channels */
430         sband = &priv->bands[IEEE80211_BAND_5GHZ];
431         sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
432         /* just OFDM */
433         sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
434         sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
435
436         iwlcore_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_5GHZ);
437
438         sband = &priv->bands[IEEE80211_BAND_2GHZ];
439         sband->channels = channels;
440         /* OFDM & CCK */
441         sband->bitrates = rates;
442         sband->n_bitrates = IWL_RATE_COUNT;
443
444         iwlcore_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_2GHZ);
445
446         priv->ieee_channels = channels;
447         priv->ieee_rates = rates;
448
449         iwlcore_init_hw_rates(priv, rates);
450
451         for (i = 0;  i < priv->channel_count; i++) {
452                 ch = &priv->channel_info[i];
453
454                 /* FIXME: might be removed if scan is OK */
455                 if (!is_channel_valid(ch))
456                         continue;
457
458                 if (is_channel_a_band(ch))
459                         sband =  &priv->bands[IEEE80211_BAND_5GHZ];
460                 else
461                         sband =  &priv->bands[IEEE80211_BAND_2GHZ];
462
463                 geo_ch = &sband->channels[sband->n_channels++];
464
465                 geo_ch->center_freq =
466                                 ieee80211_channel_to_frequency(ch->channel);
467                 geo_ch->max_power = ch->max_power_avg;
468                 geo_ch->max_antenna_gain = 0xff;
469                 geo_ch->hw_value = ch->channel;
470
471                 if (is_channel_valid(ch)) {
472                         if (!(ch->flags & EEPROM_CHANNEL_IBSS))
473                                 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
474
475                         if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
476                                 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
477
478                         if (ch->flags & EEPROM_CHANNEL_RADAR)
479                                 geo_ch->flags |= IEEE80211_CHAN_RADAR;
480
481                         geo_ch->flags |= ch->fat_extension_channel;
482
483                         if (ch->max_power_avg > priv->tx_power_channel_lmt)
484                                 priv->tx_power_channel_lmt = ch->max_power_avg;
485                 } else {
486                         geo_ch->flags |= IEEE80211_CHAN_DISABLED;
487                 }
488
489                 /* Save flags for reg domain usage */
490                 geo_ch->orig_flags = geo_ch->flags;
491
492                 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
493                                 ch->channel, geo_ch->center_freq,
494                                 is_channel_a_band(ch) ?  "5.2" : "2.4",
495                                 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
496                                 "restricted" : "valid",
497                                  geo_ch->flags);
498         }
499
500         if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
501              priv->cfg->sku & IWL_SKU_A) {
502                 printk(KERN_INFO DRV_NAME
503                        ": Incorrectly detected BG card as ABG.  Please send "
504                        "your PCI ID 0x%04X:0x%04X to maintainer.\n",
505                        priv->pci_dev->device, priv->pci_dev->subsystem_device);
506                 priv->cfg->sku &= ~IWL_SKU_A;
507         }
508
509         printk(KERN_INFO DRV_NAME
510                ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
511                priv->bands[IEEE80211_BAND_2GHZ].n_channels,
512                priv->bands[IEEE80211_BAND_5GHZ].n_channels);
513
514
515         set_bit(STATUS_GEO_CONFIGURED, &priv->status);
516
517         return 0;
518 }
519
520 /*
521  * iwlcore_free_geos - undo allocations in iwlcore_init_geos
522  */
523 static void iwlcore_free_geos(struct iwl_priv *priv)
524 {
525         kfree(priv->ieee_channels);
526         kfree(priv->ieee_rates);
527         clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
528 }
529
530 static u8 is_single_rx_stream(struct iwl_priv *priv)
531 {
532         return !priv->current_ht_config.is_ht ||
533                ((priv->current_ht_config.supp_mcs_set[1] == 0) &&
534                 (priv->current_ht_config.supp_mcs_set[2] == 0)) ||
535                priv->ps_mode == IWL_MIMO_PS_STATIC;
536 }
537
538 static u8 iwl_is_channel_extension(struct iwl_priv *priv,
539                                    enum ieee80211_band band,
540                                    u16 channel, u8 extension_chan_offset)
541 {
542         const struct iwl_channel_info *ch_info;
543
544         ch_info = iwl_get_channel_info(priv, band, channel);
545         if (!is_channel_valid(ch_info))
546                 return 0;
547
548         if (extension_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE)
549                 return !(ch_info->fat_extension_channel &
550                                         IEEE80211_CHAN_NO_FAT_ABOVE);
551         else if (extension_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW)
552                 return !(ch_info->fat_extension_channel &
553                                         IEEE80211_CHAN_NO_FAT_BELOW);
554
555         return 0;
556 }
557
558 u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
559                              struct ieee80211_ht_info *sta_ht_inf)
560 {
561         struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
562
563         if ((!iwl_ht_conf->is_ht) ||
564            (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
565            (iwl_ht_conf->extension_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE))
566                 return 0;
567
568         if (sta_ht_inf) {
569                 if ((!sta_ht_inf->ht_supported) ||
570                    (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
571                         return 0;
572         }
573
574         return iwl_is_channel_extension(priv, priv->band,
575                                          iwl_ht_conf->control_channel,
576                                          iwl_ht_conf->extension_chan_offset);
577 }
578 EXPORT_SYMBOL(iwl_is_fat_tx_allowed);
579
580 void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
581 {
582         struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
583         u32 val;
584
585         if (!ht_info->is_ht)
586                 return;
587
588         /* Set up channel bandwidth:  20 MHz only, or 20/40 mixed if fat ok */
589         if (iwl_is_fat_tx_allowed(priv, NULL))
590                 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
591         else
592                 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
593                                  RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
594
595         if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
596                 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
597                                 le16_to_cpu(rxon->channel),
598                                 ht_info->control_channel);
599                 return;
600         }
601
602         /* Note: control channel is opposite of extension channel */
603         switch (ht_info->extension_chan_offset) {
604         case IEEE80211_HT_IE_CHA_SEC_ABOVE:
605                 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
606                 break;
607         case IEEE80211_HT_IE_CHA_SEC_BELOW:
608                 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
609                 break;
610         case IEEE80211_HT_IE_CHA_SEC_NONE:
611         default:
612                 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
613                 break;
614         }
615
616         val = ht_info->ht_protection;
617
618         rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
619
620         iwl_set_rxon_chain(priv);
621
622         IWL_DEBUG_ASSOC("supported HT rate 0x%X 0x%X 0x%X "
623                         "rxon flags 0x%X operation mode :0x%X "
624                         "extension channel offset 0x%x "
625                         "control chan %d\n",
626                         ht_info->supp_mcs_set[0],
627                         ht_info->supp_mcs_set[1],
628                         ht_info->supp_mcs_set[2],
629                         le32_to_cpu(rxon->flags), ht_info->ht_protection,
630                         ht_info->extension_chan_offset,
631                         ht_info->control_channel);
632         return;
633 }
634 EXPORT_SYMBOL(iwl_set_rxon_ht);
635
636 /*
637  * Determine how many receiver/antenna chains to use.
638  * More provides better reception via diversity.  Fewer saves power.
639  * MIMO (dual stream) requires at least 2, but works better with 3.
640  * This does not determine *which* chains to use, just how many.
641  */
642 static int iwlcore_get_rx_chain_counter(struct iwl_priv *priv,
643                                         u8 *idle_state, u8 *rx_state)
644 {
645         u8 is_single = is_single_rx_stream(priv);
646         u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
647
648         /* # of Rx chains to use when expecting MIMO. */
649         if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
650                 *rx_state = 2;
651         else
652                 *rx_state = 3;
653
654         /* # Rx chains when idling and maybe trying to save power */
655         switch (priv->ps_mode) {
656         case IWL_MIMO_PS_STATIC:
657         case IWL_MIMO_PS_DYNAMIC:
658                 *idle_state = (is_cam) ? 2 : 1;
659                 break;
660         case IWL_MIMO_PS_NONE:
661                 *idle_state = (is_cam) ? *rx_state : 1;
662                 break;
663         default:
664                 *idle_state = 1;
665                 break;
666         }
667
668         return 0;
669 }
670
671 /**
672  * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
673  *
674  * Selects how many and which Rx receivers/antennas/chains to use.
675  * This should not be used for scan command ... it puts data in wrong place.
676  */
677 void iwl_set_rxon_chain(struct iwl_priv *priv)
678 {
679         u8 is_single = is_single_rx_stream(priv);
680         u8 idle_state, rx_state;
681
682         priv->staging_rxon.rx_chain = 0;
683         rx_state = idle_state = 3;
684
685         /* Tell uCode which antennas are actually connected.
686          * Before first association, we assume all antennas are connected.
687          * Just after first association, iwl_chain_noise_calibration()
688          *    checks which antennas actually *are* connected. */
689         priv->staging_rxon.rx_chain |=
690                     cpu_to_le16(priv->hw_params.valid_rx_ant <<
691                                                  RXON_RX_CHAIN_VALID_POS);
692
693         /* How many receivers should we use? */
694         iwlcore_get_rx_chain_counter(priv, &idle_state, &rx_state);
695         priv->staging_rxon.rx_chain |=
696                 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
697         priv->staging_rxon.rx_chain |=
698                 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
699
700         if (!is_single && (rx_state >= 2) &&
701             !test_bit(STATUS_POWER_PMI, &priv->status))
702                 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
703         else
704                 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
705
706         IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
707 }
708 EXPORT_SYMBOL(iwl_set_rxon_chain);
709
710 /**
711  * iwlcore_set_rxon_channel - Set the phymode and channel values in staging RXON
712  * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
713  * @channel: Any channel valid for the requested phymode
714
715  * In addition to setting the staging RXON, priv->phymode is also set.
716  *
717  * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
718  * in the staging RXON flag structure based on the phymode
719  */
720 int iwl_set_rxon_channel(struct iwl_priv *priv,
721                                 enum ieee80211_band band,
722                                 u16 channel)
723 {
724         if (!iwl_get_channel_info(priv, band, channel)) {
725                 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
726                                channel, band);
727                 return -EINVAL;
728         }
729
730         if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
731             (priv->band == band))
732                 return 0;
733
734         priv->staging_rxon.channel = cpu_to_le16(channel);
735         if (band == IEEE80211_BAND_5GHZ)
736                 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
737         else
738                 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
739
740         priv->band = band;
741
742         IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band);
743
744         return 0;
745 }
746 EXPORT_SYMBOL(iwl_set_rxon_channel);
747
748 int iwl_setup_mac(struct iwl_priv *priv)
749 {
750         int ret;
751         struct ieee80211_hw *hw = priv->hw;
752         hw->rate_control_algorithm = "iwl-4965-rs";
753
754         /* Tell mac80211 our characteristics */
755         hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE |
756                     IEEE80211_HW_SIGNAL_DBM |
757                     IEEE80211_HW_NOISE_DBM;
758         /* Default value; 4 EDCA QOS priorities */
759         hw->queues = 4;
760         /* Enhanced value; more queues, to support 11n aggregation */
761         hw->ampdu_queues = 12;
762
763         hw->conf.beacon_int = 100;
764
765         if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
766                 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
767                         &priv->bands[IEEE80211_BAND_2GHZ];
768         if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
769                 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
770                         &priv->bands[IEEE80211_BAND_5GHZ];
771
772         ret = ieee80211_register_hw(priv->hw);
773         if (ret) {
774                 IWL_ERROR("Failed to register hw (error %d)\n", ret);
775                 return ret;
776         }
777         priv->mac80211_registered = 1;
778
779         return 0;
780 }
781 EXPORT_SYMBOL(iwl_setup_mac);
782
783
784 int iwl_init_drv(struct iwl_priv *priv)
785 {
786         int ret;
787         int i;
788
789         priv->retry_rate = 1;
790         priv->ibss_beacon = NULL;
791
792         spin_lock_init(&priv->lock);
793         spin_lock_init(&priv->power_data.lock);
794         spin_lock_init(&priv->sta_lock);
795         spin_lock_init(&priv->hcmd_lock);
796         spin_lock_init(&priv->lq_mngr.lock);
797
798         for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
799                 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
800
801         INIT_LIST_HEAD(&priv->free_frames);
802
803         mutex_init(&priv->mutex);
804
805         /* Clear the driver's (not device's) station table */
806         iwlcore_clear_stations_table(priv);
807
808         priv->data_retry_limit = -1;
809         priv->ieee_channels = NULL;
810         priv->ieee_rates = NULL;
811         priv->band = IEEE80211_BAND_2GHZ;
812
813         priv->iw_mode = IEEE80211_IF_TYPE_STA;
814
815         priv->use_ant_b_for_management_frame = 1; /* start with ant B */
816         priv->ps_mode = IWL_MIMO_PS_NONE;
817
818         /* Choose which receivers/antennas to use */
819         iwl_set_rxon_chain(priv);
820
821         if (priv->cfg->mod_params->enable_qos)
822                 priv->qos_data.qos_enable = 1;
823
824         iwl_reset_qos(priv);
825
826         priv->qos_data.qos_active = 0;
827         priv->qos_data.qos_cap.val = 0;
828
829         iwl_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
830
831         priv->rates_mask = IWL_RATES_MASK;
832         /* If power management is turned on, default to AC mode */
833         priv->power_mode = IWL_POWER_AC;
834         priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MAX;
835
836         ret = iwl_init_channel_map(priv);
837         if (ret) {
838                 IWL_ERROR("initializing regulatory failed: %d\n", ret);
839                 goto err;
840         }
841
842         ret = iwlcore_init_geos(priv);
843         if (ret) {
844                 IWL_ERROR("initializing geos failed: %d\n", ret);
845                 goto err_free_channel_map;
846         }
847
848         return 0;
849
850 err_free_channel_map:
851         iwl_free_channel_map(priv);
852 err:
853         return ret;
854 }
855 EXPORT_SYMBOL(iwl_init_drv);
856
857 void iwl_free_calib_results(struct iwl_priv *priv)
858 {
859         kfree(priv->calib_results.lo_res);
860         priv->calib_results.lo_res = NULL;
861         priv->calib_results.lo_res_len = 0;
862
863         kfree(priv->calib_results.tx_iq_res);
864         priv->calib_results.tx_iq_res = NULL;
865         priv->calib_results.tx_iq_res_len = 0;
866
867         kfree(priv->calib_results.tx_iq_perd_res);
868         priv->calib_results.tx_iq_perd_res = NULL;
869         priv->calib_results.tx_iq_perd_res_len = 0;
870 }
871 EXPORT_SYMBOL(iwl_free_calib_results);
872
873 int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
874 {
875         int ret = 0;
876         if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) {
877                 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
878                             priv->tx_power_user_lmt);
879                 return -EINVAL;
880         }
881
882         if (tx_power > IWL_TX_POWER_TARGET_POWER_MAX) {
883                 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
884                             priv->tx_power_user_lmt);
885                 return -EINVAL;
886         }
887
888         if (priv->tx_power_user_lmt != tx_power)
889                 force = true;
890
891         priv->tx_power_user_lmt = tx_power;
892
893         if (force && priv->cfg->ops->lib->send_tx_power)
894                 ret = priv->cfg->ops->lib->send_tx_power(priv);
895
896         return ret;
897 }
898 EXPORT_SYMBOL(iwl_set_tx_power);
899
900
901 void iwl_uninit_drv(struct iwl_priv *priv)
902 {
903         iwl_free_calib_results(priv);
904         iwlcore_free_geos(priv);
905         iwl_free_channel_map(priv);
906         kfree(priv->scan);
907 }
908 EXPORT_SYMBOL(iwl_uninit_drv);
909
910
911
912 /* Low level driver call this function to update iwlcore with
913  * driver status.
914  */
915 int iwlcore_low_level_notify(struct iwl_priv *priv,
916                               enum iwlcore_card_notify notify)
917 {
918         int ret;
919         switch (notify) {
920         case IWLCORE_INIT_EVT:
921                 ret = iwl_rfkill_init(priv);
922                 if (ret)
923                         IWL_ERROR("Unable to initialize RFKILL system. "
924                                   "Ignoring error: %d\n", ret);
925                 iwl_power_initialize(priv);
926                 break;
927         case IWLCORE_START_EVT:
928                 iwl_power_update_mode(priv, 1);
929                 break;
930         case IWLCORE_STOP_EVT:
931                 break;
932         case IWLCORE_REMOVE_EVT:
933                 iwl_rfkill_unregister(priv);
934                 break;
935         }
936
937         return 0;
938 }
939 EXPORT_SYMBOL(iwlcore_low_level_notify);
940
941 int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags)
942 {
943         u32 stat_flags = 0;
944         struct iwl_host_cmd cmd = {
945                 .id = REPLY_STATISTICS_CMD,
946                 .meta.flags = flags,
947                 .len = sizeof(stat_flags),
948                 .data = (u8 *) &stat_flags,
949         };
950         return iwl_send_cmd(priv, &cmd);
951 }
952 EXPORT_SYMBOL(iwl_send_statistics_request);
953
954 /**
955  * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
956  *   using sample data 100 bytes apart.  If these sample points are good,
957  *   it's a pretty good bet that everything between them is good, too.
958  */
959 static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
960 {
961         u32 val;
962         int ret = 0;
963         u32 errcnt = 0;
964         u32 i;
965
966         IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
967
968         ret = iwl_grab_nic_access(priv);
969         if (ret)
970                 return ret;
971
972         for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
973                 /* read data comes through single port, auto-incr addr */
974                 /* NOTE: Use the debugless read so we don't flood kernel log
975                  * if IWL_DL_IO is set */
976                 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
977                         i + RTC_INST_LOWER_BOUND);
978                 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
979                 if (val != le32_to_cpu(*image)) {
980                         ret = -EIO;
981                         errcnt++;
982                         if (errcnt >= 3)
983                                 break;
984                 }
985         }
986
987         iwl_release_nic_access(priv);
988
989         return ret;
990 }
991
992 /**
993  * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
994  *     looking at all data.
995  */
996 static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
997                                  u32 len)
998 {
999         u32 val;
1000         u32 save_len = len;
1001         int ret = 0;
1002         u32 errcnt;
1003
1004         IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
1005
1006         ret = iwl_grab_nic_access(priv);
1007         if (ret)
1008                 return ret;
1009
1010         iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
1011
1012         errcnt = 0;
1013         for (; len > 0; len -= sizeof(u32), image++) {
1014                 /* read data comes through single port, auto-incr addr */
1015                 /* NOTE: Use the debugless read so we don't flood kernel log
1016                  * if IWL_DL_IO is set */
1017                 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1018                 if (val != le32_to_cpu(*image)) {
1019                         IWL_ERROR("uCode INST section is invalid at "
1020                                   "offset 0x%x, is 0x%x, s/b 0x%x\n",
1021                                   save_len - len, val, le32_to_cpu(*image));
1022                         ret = -EIO;
1023                         errcnt++;
1024                         if (errcnt >= 20)
1025                                 break;
1026                 }
1027         }
1028
1029         iwl_release_nic_access(priv);
1030
1031         if (!errcnt)
1032                 IWL_DEBUG_INFO
1033                     ("ucode image in INSTRUCTION memory is good\n");
1034
1035         return ret;
1036 }
1037
1038 /**
1039  * iwl_verify_ucode - determine which instruction image is in SRAM,
1040  *    and verify its contents
1041  */
1042 int iwl_verify_ucode(struct iwl_priv *priv)
1043 {
1044         __le32 *image;
1045         u32 len;
1046         int ret;
1047
1048         /* Try bootstrap */
1049         image = (__le32 *)priv->ucode_boot.v_addr;
1050         len = priv->ucode_boot.len;
1051         ret = iwlcore_verify_inst_sparse(priv, image, len);
1052         if (!ret) {
1053                 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
1054                 return 0;
1055         }
1056
1057         /* Try initialize */
1058         image = (__le32 *)priv->ucode_init.v_addr;
1059         len = priv->ucode_init.len;
1060         ret = iwlcore_verify_inst_sparse(priv, image, len);
1061         if (!ret) {
1062                 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
1063                 return 0;
1064         }
1065
1066         /* Try runtime/protocol */
1067         image = (__le32 *)priv->ucode_code.v_addr;
1068         len = priv->ucode_code.len;
1069         ret = iwlcore_verify_inst_sparse(priv, image, len);
1070         if (!ret) {
1071                 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
1072                 return 0;
1073         }
1074
1075         IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1076
1077         /* Since nothing seems to match, show first several data entries in
1078          * instruction SRAM, so maybe visual inspection will give a clue.
1079          * Selection of bootstrap image (vs. other images) is arbitrary. */
1080         image = (__le32 *)priv->ucode_boot.v_addr;
1081         len = priv->ucode_boot.len;
1082         ret = iwl_verify_inst_full(priv, image, len);
1083
1084         return ret;
1085 }
1086 EXPORT_SYMBOL(iwl_verify_ucode);
1087
1088
1089 static const char *desc_lookup(int i)
1090 {
1091         switch (i) {
1092         case 1:
1093                 return "FAIL";
1094         case 2:
1095                 return "BAD_PARAM";
1096         case 3:
1097                 return "BAD_CHECKSUM";
1098         case 4:
1099                 return "NMI_INTERRUPT";
1100         case 5:
1101                 return "SYSASSERT";
1102         case 6:
1103                 return "FATAL_ERROR";
1104         }
1105
1106         return "UNKNOWN";
1107 }
1108
1109 #define ERROR_START_OFFSET  (1 * sizeof(u32))
1110 #define ERROR_ELEM_SIZE     (7 * sizeof(u32))
1111
1112 void iwl_dump_nic_error_log(struct iwl_priv *priv)
1113 {
1114         u32 data2, line;
1115         u32 desc, time, count, base, data1;
1116         u32 blink1, blink2, ilink1, ilink2;
1117         int ret;
1118
1119         if (priv->ucode_type == UCODE_INIT)
1120                 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1121         else
1122                 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1123
1124         if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1125                 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
1126                 return;
1127         }
1128
1129         ret = iwl_grab_nic_access(priv);
1130         if (ret) {
1131                 IWL_WARNING("Can not read from adapter at this time.\n");
1132                 return;
1133         }
1134
1135         count = iwl_read_targ_mem(priv, base);
1136
1137         if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1138                 IWL_ERROR("Start IWL Error Log Dump:\n");
1139                 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count);
1140         }
1141
1142         desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
1143         blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
1144         blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
1145         ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
1146         ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
1147         data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
1148         data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
1149         line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1150         time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1151
1152         IWL_ERROR("Desc        Time       "
1153                 "data1      data2      line\n");
1154         IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
1155                 desc_lookup(desc), desc, time, data1, data2, line);
1156         IWL_ERROR("blink1  blink2  ilink1  ilink2\n");
1157         IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
1158                 ilink1, ilink2);
1159
1160         iwl_release_nic_access(priv);
1161 }
1162 EXPORT_SYMBOL(iwl_dump_nic_error_log);
1163
1164 #define EVENT_START_OFFSET  (4 * sizeof(u32))
1165
1166 /**
1167  * iwl_print_event_log - Dump error event log to syslog
1168  *
1169  * NOTE: Must be called with iwl4965_grab_nic_access() already obtained!
1170  */
1171 void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1172                                 u32 num_events, u32 mode)
1173 {
1174         u32 i;
1175         u32 base;       /* SRAM byte address of event log header */
1176         u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1177         u32 ptr;        /* SRAM byte address of log data */
1178         u32 ev, time, data; /* event log data */
1179
1180         if (num_events == 0)
1181                 return;
1182         if (priv->ucode_type == UCODE_INIT)
1183                 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1184         else
1185                 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1186
1187         if (mode == 0)
1188                 event_size = 2 * sizeof(u32);
1189         else
1190                 event_size = 3 * sizeof(u32);
1191
1192         ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1193
1194         /* "time" is actually "data" for mode 0 (no timestamp).
1195         * place event id # at far right for easier visual parsing. */
1196         for (i = 0; i < num_events; i++) {
1197                 ev = iwl_read_targ_mem(priv, ptr);
1198                 ptr += sizeof(u32);
1199                 time = iwl_read_targ_mem(priv, ptr);
1200                 ptr += sizeof(u32);
1201                 if (mode == 0) {
1202                         /* data, ev */
1203                         IWL_ERROR("EVT_LOG:0x%08x:%04u\n", time, ev);
1204                 } else {
1205                         data = iwl_read_targ_mem(priv, ptr);
1206                         ptr += sizeof(u32);
1207                         IWL_ERROR("EVT_LOGT:%010u:0x%08x:%04u\n",
1208                                         time, data, ev);
1209                 }
1210         }
1211 }
1212 EXPORT_SYMBOL(iwl_print_event_log);
1213
1214
1215 void iwl_dump_nic_event_log(struct iwl_priv *priv)
1216 {
1217         int ret;
1218         u32 base;       /* SRAM byte address of event log header */
1219         u32 capacity;   /* event log capacity in # entries */
1220         u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
1221         u32 num_wraps;  /* # times uCode wrapped to top of log */
1222         u32 next_entry; /* index of next entry to be written by uCode */
1223         u32 size;       /* # entries that we'll print */
1224
1225         if (priv->ucode_type == UCODE_INIT)
1226                 base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
1227         else
1228                 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
1229
1230         if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1231                 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
1232                 return;
1233         }
1234
1235         ret = iwl_grab_nic_access(priv);
1236         if (ret) {
1237                 IWL_WARNING("Can not read from adapter at this time.\n");
1238                 return;
1239         }
1240
1241         /* event log header */
1242         capacity = iwl_read_targ_mem(priv, base);
1243         mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1244         num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1245         next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1246
1247         size = num_wraps ? capacity : next_entry;
1248
1249         /* bail out if nothing in log */
1250         if (size == 0) {
1251                 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
1252                 iwl_release_nic_access(priv);
1253                 return;
1254         }
1255
1256         IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
1257                         size, num_wraps);
1258
1259         /* if uCode has wrapped back to top of log, start at the oldest entry,
1260          * i.e the next one that uCode would fill. */
1261         if (num_wraps)
1262                 iwl_print_event_log(priv, next_entry,
1263                                         capacity - next_entry, mode);
1264         /* (then/else) start at top of log */
1265         iwl_print_event_log(priv, 0, next_entry, mode);
1266
1267         iwl_release_nic_access(priv);
1268 }
1269 EXPORT_SYMBOL(iwl_dump_nic_event_log);
1270
1271