#define ROPE6_CTL 0x230
#define ROPE7_CTL 0x238
+#define IOC_ROPE0_CFG 0x500 /* pluto only */
+#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
+
+
+
#define HF_ENABLE 0x40
**
**************************************************************************/
-static void __iomem *ioc_remap(struct sba_device *sba_dev, int offset)
+static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
{
- return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
+ return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
}
static void sba_hw_init(struct sba_device *sba_dev)
sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
- if (err < 0) {
- BUG();
- }
+ BUG_ON(err < 0);
} else if (IS_PLUTO(sba_dev->iodc)) {
int err;
sba_dev->num_ioc = num_ioc;
for (i = 0; i < num_ioc; i++) {
- /*
- ** Make sure the box crashes if we get any errors on a rope.
- */
- WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE0_CTL);
- WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE1_CTL);
- WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE2_CTL);
- WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE3_CTL);
- WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE4_CTL);
- WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE5_CTL);
- WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE6_CTL);
- WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
-
- /* flush out the writes */
+ unsigned long ioc_hpa = sba_dev->ioc[i].ioc_hpa;
+ unsigned int j;
+
+ for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
+
+ /*
+ * Clear ROPE(N)_CONFIG AO bit.
+ * Disables "NT Ordering" (~= !"Relaxed Ordering")
+ * Overrides bit 1 in DMA Hint Sets.
+ * Improves netperf UDP_STREAM by ~10% for bcm5701.
+ */
+ if (IS_PLUTO(sba_dev->iodc)) {
+ unsigned long rope_cfg, cfg_val;
+
+ rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
+ cfg_val = READ_REG(rope_cfg);
+ cfg_val &= ~IOC_ROPE_AO;
+ WRITE_REG(cfg_val, rope_cfg);
+ }
+
+ /*
+ ** Make sure the box crashes on rope errors.
+ */
+ WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
+ }
+
+ /* flush out the last writes */
READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
u32 func_class;
int i;
char *version;
- void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
+ void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
struct proc_dir_entry *info_entry, *bitmap_entry, *root;
sba_dump_ranges(sba_addr);
int i;
int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
- if ((t!=HPHW_IOA) && (t!=HPHW_BCPORT))
- BUG();
+ BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
r->start = r->end = 0;
int base, size;
int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
- if ((t!=HPHW_IOA) && (t!=HPHW_BCPORT))
- BUG();
+ BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
r->start = r->end = 0;