preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
- smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm);
+ smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else {
cpumask_t mask = cpu_online_map;
unsigned int cpu;
static void flush_tlb_range_ipi(void *info)
{
- struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ struct flush_tlb_data *fd = info;
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
}
fd.vma = vma;
fd.addr1 = start;
fd.addr2 = end;
- smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
+ smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
} else {
cpumask_t mask = cpu_online_map;
unsigned int cpu;
static void flush_tlb_kernel_range_ipi(void *info)
{
- struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ struct flush_tlb_data *fd = info;
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}
fd.addr1 = start;
fd.addr2 = end;
- on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
+ on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1);
}
static void flush_tlb_page_ipi(void *info)
{
- struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
+ struct flush_tlb_data *fd = info;
local_flush_tlb_page(fd->vma, fd->addr1);
}
fd.vma = vma;
fd.addr1 = page;
- smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
+ smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
} else {
cpumask_t mask = cpu_online_map;
unsigned int cpu;