]> err.no Git - linux-2.6/blobdiff - arch/x86/kernel/amd_iommu.c
Merge branch 'generic-ipi' into generic-ipi-for-linus
[linux-2.6] / arch / x86 / kernel / amd_iommu.c
index bed5f820898ddfa68c3c9a8a9d3bbc76b7668b94..f2766d84c7a00c78c4f24951a7f2912f550ef475 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/proto.h>
 #include <asm/gart.h>
 #include <asm/amd_iommu_types.h>
+#include <asm/amd_iommu.h>
 
 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
 
@@ -139,16 +140,22 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
 static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
                u64 address, size_t size)
 {
-       int i;
+       int s = 0;
        unsigned pages = to_pages(address, size);
 
        address &= PAGE_MASK;
 
-       for (i = 0; i < pages; ++i) {
-               iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 0);
-               address += PAGE_SIZE;
+       if (pages > 1) {
+               /*
+                * If we have to flush more than one page, flush all
+                * TLB entries for this domain
+                */
+               address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+               s = 1;
        }
 
+       iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
+
        return 0;
 }
 
@@ -300,7 +307,6 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
                                0, boundary_size, 0);
 
        if (likely(address != -1)) {
-               set_bit_string(dom->bitmap, address, pages);
                dom->next_bit = address + pages;
                address <<= PAGE_SHIFT;
        } else
@@ -906,3 +912,51 @@ void prealloc_protection_domains(void)
        }
 }
 
+static struct dma_mapping_ops amd_iommu_dma_ops = {
+       .alloc_coherent = alloc_coherent,
+       .free_coherent = free_coherent,
+       .map_single = map_single,
+       .unmap_single = unmap_single,
+       .map_sg = map_sg,
+       .unmap_sg = unmap_sg,
+};
+
+int __init amd_iommu_init_dma_ops(void)
+{
+       struct amd_iommu *iommu;
+       int order = amd_iommu_aperture_order;
+       int ret;
+
+       list_for_each_entry(iommu, &amd_iommu_list, list) {
+               iommu->default_dom = dma_ops_domain_alloc(iommu, order);
+               if (iommu->default_dom == NULL)
+                       return -ENOMEM;
+               ret = iommu_init_unity_mappings(iommu);
+               if (ret)
+                       goto free_domains;
+       }
+
+       if (amd_iommu_isolate)
+               prealloc_protection_domains();
+
+       iommu_detected = 1;
+       force_iommu = 1;
+       bad_dma_address = 0;
+#ifdef CONFIG_GART_IOMMU
+       gart_iommu_aperture_disabled = 1;
+       gart_iommu_aperture = 0;
+#endif
+
+       dma_ops = &amd_iommu_dma_ops;
+
+       return 0;
+
+free_domains:
+
+       list_for_each_entry(iommu, &amd_iommu_list, list) {
+               if (iommu->default_dom)
+                       dma_ops_domain_free(iommu->default_dom);
+       }
+
+       return ret;
+}