/* Zero out the Multicast HASH table */
DEBUGOUT("Zeroing the MTA\n");
mta_size = E1000_MC_TBL_SIZE;
- for(i = 0; i < mta_size; i++)
+ for(i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ /* use write flush to prevent Memory Write Block (MWB) from
+ * occuring when accessing our register space */
+ E1000_WRITE_FLUSH(hw);
+ }
/* Set the PCI priority bit correctly in the CTRL register. This
* determines if the adapter gives priority to receives, or if it
DEBUGOUT("Clearing RAR[1-15]\n");
for(i = 1; i < rar_num; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ E1000_WRITE_FLUSH(hw);
}
}
for(i = rar_used_count; i < num_rar_entry; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ E1000_WRITE_FLUSH(hw);
}
/* Clear the MTA */
num_mta_entry = E1000_NUM_MTA_REGISTERS;
for(i = 0; i < num_mta_entry; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ E1000_WRITE_FLUSH(hw);
}
/* Add the new addresses */
if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp);
+ E1000_WRITE_FLUSH(hw);
} else {
E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+ E1000_WRITE_FLUSH(hw);
}
}
}
E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+ E1000_WRITE_FLUSH(hw);
}
/******************************************************************************
if((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+ E1000_WRITE_FLUSH(hw);
} else {
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH(hw);
}
}
* manageability unit */
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+ E1000_WRITE_FLUSH(hw);
}
}
length >>= 2;
/* The device driver writes the relevant command block into the ram area. */
- for (i = 0; i < length; i++)
+ for (i = 0; i < length; i++) {
E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i));
+ E1000_WRITE_FLUSH(hw);
+ }
return E1000_SUCCESS;
}
tdba = adapter->tx_ring[0].dma;
tdlen = adapter->tx_ring[0].count *
sizeof(struct e1000_tx_desc);
- E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
- E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
E1000_WRITE_REG(hw, TDLEN, tdlen);
- E1000_WRITE_REG(hw, TDH, 0);
+ E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
+ E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
E1000_WRITE_REG(hw, TDT, 0);
+ E1000_WRITE_REG(hw, TDH, 0);
adapter->tx_ring[0].tdh = E1000_TDH;
adapter->tx_ring[0].tdt = E1000_TDT;
break;
case 1:
default:
rdba = adapter->rx_ring[0].dma;
- E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
- E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
E1000_WRITE_REG(hw, RDLEN, rdlen);
- E1000_WRITE_REG(hw, RDH, 0);
+ E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
+ E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
E1000_WRITE_REG(hw, RDT, 0);
+ E1000_WRITE_REG(hw, RDH, 0);
adapter->rx_ring[0].rdh = E1000_RDH;
adapter->rx_ring[0].rdt = E1000_RDT;
break;
mc_ptr = mc_ptr->next;
} else {
E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+ E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+ E1000_WRITE_FLUSH(hw);
}
}
/* clear the old settings from the multicast hash table */
- for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
+ for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ E1000_WRITE_FLUSH(hw);
+ }
/* load any remaining addresses into the hash table */