base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW |
(window->ioid & IOPTE_IOID_Mask);
#endif
+ if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)))
+ base_pte &= ~IOPTE_SO_RW;
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
static unsigned long cell_dma_direct_offset;
static unsigned long dma_iommu_fixed_base;
-struct dma_mapping_ops dma_iommu_fixed_ops;
+
+/* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */
+static int iommu_fixed_is_weak;
static struct iommu_table *cell_get_iommu_table(struct device *dev)
{
return &window->table;
}
+/* A coherent allocation implies strong ordering */
+
+static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ if (iommu_fixed_is_weak)
+ return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
+ size, dma_handle,
+ device_to_mask(dev), flag,
+ dev->archdata.numa_node);
+ else
+ return dma_direct_ops.alloc_coherent(dev, size, dma_handle,
+ flag);
+}
+
+static void dma_fixed_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ if (iommu_fixed_is_weak)
+ iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
+ dma_handle);
+ else
+ dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle);
+}
+
+static dma_addr_t dma_fixed_map_single(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ return dma_direct_ops.map_single(dev, ptr, size, direction,
+ attrs);
+ else
+ return iommu_map_single(dev, cell_get_iommu_table(dev), ptr,
+ size, device_to_mask(dev), direction,
+ attrs);
+}
+
+static void dma_fixed_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ dma_direct_ops.unmap_single(dev, dma_addr, size, direction,
+ attrs);
+ else
+ iommu_unmap_single(cell_get_iommu_table(dev), dma_addr, size,
+ direction, attrs);
+}
+
+static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
+ else
+ return iommu_map_sg(dev, cell_get_iommu_table(dev), sg, nents,
+ device_to_mask(dev), direction, attrs);
+}
+
+static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
+ else
+ iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, direction,
+ attrs);
+}
+
+static int dma_fixed_dma_supported(struct device *dev, u64 mask)
+{
+ return mask == DMA_64BIT_MASK;
+}
+
+static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
+
+struct dma_mapping_ops dma_iommu_fixed_ops = {
+ .alloc_coherent = dma_fixed_alloc_coherent,
+ .free_coherent = dma_fixed_free_coherent,
+ .map_single = dma_fixed_map_single,
+ .unmap_single = dma_fixed_unmap_single,
+ .map_sg = dma_fixed_map_sg,
+ .unmap_sg = dma_fixed_unmap_sg,
+ .dma_supported = dma_fixed_dma_supported,
+ .set_dma_mask = dma_set_mask_and_switch,
+};
+
static void cell_dma_dev_setup_fixed(struct device *dev);
static void cell_dma_dev_setup(struct device *dev)
pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
- base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW
+ base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M
| (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask);
+ if (iommu_fixed_is_weak)
+ pr_info("IOMMU: Using weak ordering for fixed mapping\n");
+ else {
+ pr_info("IOMMU: Using strong ordering for fixed mapping\n");
+ base_pte |= IOPTE_SO_RW;
+ }
+
for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
/* Don't touch the dynamic region */
ioaddr = uaddr + fbase;
cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
}
- dma_iommu_fixed_ops = dma_direct_ops;
- dma_iommu_fixed_ops.set_dma_mask = dma_set_mask_and_switch;
-
dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch;
set_pci_dma_ops(&dma_iommu_ops);
if (strcmp(str, "off") == 0)
iommu_fixed_disabled = 1;
+ else if (strcmp(str, "weak") == 0)
+ iommu_fixed_is_weak = 1;
+
return 1;
}
__setup("iommu_fixed=", setup_iommu_fixed);