buf->nbufs = 1;
buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT;
- buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
+ buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
size, &t, GFP_KERNEL);
- if (!buf->u.direct.buf)
+ if (!buf->direct.buf)
return -ENOMEM;
- buf->u.direct.map = t;
+ buf->direct.map = t;
while (t & ((1 << buf->page_shift) - 1)) {
--buf->page_shift;
buf->npages *= 2;
}
- memset(buf->u.direct.buf, 0, size);
+ memset(buf->direct.buf, 0, size);
} else {
int i;
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
- buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list,
+ buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list,
GFP_KERNEL);
- if (!buf->u.page_list)
+ if (!buf->page_list)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
- buf->u.page_list[i].buf =
+ buf->page_list[i].buf =
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
&t, GFP_KERNEL);
- if (!buf->u.page_list[i].buf)
+ if (!buf->page_list[i].buf)
goto err_free;
- buf->u.page_list[i].map = t;
+ buf->page_list[i].map = t;
- memset(buf->u.page_list[i].buf, 0, PAGE_SIZE);
+ memset(buf->page_list[i].buf, 0, PAGE_SIZE);
}
if (BITS_PER_LONG == 64) {
if (!pages)
goto err_free;
for (i = 0; i < buf->nbufs; ++i)
- pages[i] = virt_to_page(buf->u.page_list[i].buf);
- buf->u.direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
+ pages[i] = virt_to_page(buf->page_list[i].buf);
+ buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
kfree(pages);
- if (!buf->u.direct.buf)
+ if (!buf->direct.buf)
goto err_free;
}
}
int i;
if (buf->nbufs == 1)
- dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf,
- buf->u.direct.map);
+ dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
+ buf->direct.map);
else {
if (BITS_PER_LONG == 64)
- vunmap(buf->u.direct.buf);
+ vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i)
- if (buf->u.page_list[i].buf)
+ if (buf->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
- buf->u.page_list[i].buf,
- buf->u.page_list[i].map);
- kfree(buf->u.page_list);
+ buf->page_list[i].buf,
+ buf->page_list[i].map);
+ kfree(buf->page_list);
}
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
};
struct mlx4_buf {
- struct {
- struct mlx4_buf_list direct;
- struct mlx4_buf_list *page_list;
- } u;
+ struct mlx4_buf_list direct;
+ struct mlx4_buf_list *page_list;
int nbufs;
int npages;
int page_shift;
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
if (BITS_PER_LONG == 64 || buf->nbufs == 1)
- return buf->u.direct.buf + offset;
+ return buf->direct.buf + offset;
else
- return buf->u.page_list[offset >> PAGE_SHIFT].buf +
+ return buf->page_list[offset >> PAGE_SHIFT].buf +
(offset & (PAGE_SIZE - 1));
}