Allocate IOMMU tables local to the relevant node.
Signed-off-by: Anton Blanchard <anton@samba.org>
Acked-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Paul Mackerras <paulus@samba.org>
* Build a iommu_table structure. This contains a bit map which
* is used to manage allocation of the tce space.
*/
-struct iommu_table *iommu_init_table(struct iommu_table *tbl)
+struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{
unsigned long sz;
static int welcomed = 0;
+ struct page *page;
/* Set aside 1/4 of the table for large allocations. */
tbl->it_halfpoint = tbl->it_size * 3 / 4;
/* number of bytes needed for the bitmap */
sz = (tbl->it_size + 7) >> 3;
- tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
- if (!tbl->it_map)
+ page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
+ if (!page)
panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
-
+ tbl->it_map = page_address(page);
memset(tbl->it_map, 0, sz);
tbl->it_hint = 0;
vio_iommu_table = veth_iommu_table;
vio_iommu_table.it_offset += veth_iommu_table.it_size;
- if (!iommu_init_table(&veth_iommu_table))
+ if (!iommu_init_table(&veth_iommu_table, -1))
printk("Virtual Bus VETH TCE table failed.\n");
- if (!iommu_init_table(&vio_iommu_table))
+ if (!iommu_init_table(&vio_iommu_table, -1))
printk("Virtual Bus VIO TCE table failed.\n");
}
#endif
tbl->it_busno = 0;
tbl->it_type = TCE_VB;
- return iommu_init_table(tbl);
+ return iommu_init_table(tbl, -1);
}
}
/* Look for existing tce table */
pdn->iommu_table = iommu_table_find(tbl);
if (pdn->iommu_table == NULL)
- pdn->iommu_table = iommu_init_table(tbl);
+ pdn->iommu_table = iommu_init_table(tbl, -1);
else
kfree(tbl);
}
pci->phb->dma_window_size = 0x8000000ul;
pci->phb->dma_window_base_cur = 0x8000000ul;
- tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+ tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ pci->phb->node);
iommu_table_setparms(pci->phb, dn, tbl);
- pci->iommu_table = iommu_init_table(tbl);
+ pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
/* Divide the rest (1.75GB) among the children */
pci->phb->dma_window_size = 0x80000000ul;
ppci->bussubno = bus->number;
- tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
- GFP_KERNEL);
+ tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ ppci->phb->node);
iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
- ppci->iommu_table = iommu_init_table(tbl);
+ ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
}
if (pdn != dn)
*/
if (!dev->bus->self) {
DBG(" --> first child, no bridge. Allocating iommu table.\n");
- tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+ tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ PCI_DN(dn)->phb->node);
iommu_table_setparms(PCI_DN(dn)->phb, dn, tbl);
- PCI_DN(mydn)->iommu_table = iommu_init_table(tbl);
+ PCI_DN(dn)->iommu_table = iommu_init_table(tbl,
+ PCI_DN(dn)->phb->node);
return;
}
/* iommu_table_setparms_lpar needs bussubno. */
pci->bussubno = pci->phb->bus->number;
- tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
- GFP_KERNEL);
+ tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+ pci->phb->node);
iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
- pci->iommu_table = iommu_init_table(tbl);
+ pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
}
if (pdn != dn)
iommu_table_dart.it_base = (unsigned long)dart_vbase;
iommu_table_dart.it_index = 0;
iommu_table_dart.it_blocksize = 1;
- iommu_init_table(&iommu_table_dart);
+ iommu_init_table(&iommu_table_dart, -1);
/* Reserve the last page of the DART to avoid possible prefetch
* past the DART mapped area
/* Initializes an iommu_table based in values set in the passed-in
* structure
*/
-extern struct iommu_table *iommu_init_table(struct iommu_table * tbl);
+extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
+ int nid);
extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems, unsigned long mask,