]> err.no Git - linux-2.6/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 Jul 2008 01:14:24 +0000 (18:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 Jul 2008 01:14:24 +0000 (18:14 -0700)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (21 commits)
  x86/PCI: use dev_printk when possible
  PCI: add D3 power state avoidance quirk
  PCI: fix bogus "'device' may be used uninitialized" warning in pci_slot
  PCI: add an option to allow ASPM enabled forcibly
  PCI: disable ASPM on pre-1.1 PCIe devices
  PCI: disable ASPM per ACPI FADT setting
  PCI MSI: Don't disable MSIs if the mask bit isn't supported
  PCI: handle 64-bit resources better on 32-bit machines
  PCI: rewrite PCI BAR reading code
  PCI: document pci_target_state
  PCI hotplug: fix typo in pcie hotplug output
  x86 gart: replace to_pages macro with iommu_num_pages
  x86, AMD IOMMU: replace to_pages macro with iommu_num_pages
  iommu: add iommu_num_pages helper function
  dma-coherent: add documentation to new interfaces
  Cris: convert to using generic dma-coherent mem allocator
  Sh: use generic per-device coherent dma allocator
  ARM: support generic per-device coherent dma mem
  Generic dma-coherent: fix DMA_MEMORY_EXCLUSIVE
  x86: use generic per-device dma coherent allocator
  ...

61 files changed:
Documentation/kdump/kdump.txt
arch/arm/Kconfig
arch/x86/kernel/acpi/cstate.c
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/ldt.c
arch/x86/kernel/microcode.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup_percpu.c
arch/x86/kvm/Kconfig
arch/x86/mm/gup.c
drivers/acpi/processor_throttling.c
drivers/char/pcmcia/ipwireless/hardware.c
drivers/firmware/dcdbas.c
drivers/misc/sgi-xp/xpc_main.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/spi/mpc52xx_psc_spi.c
drivers/spi/spi_s3c24xx.c
drivers/video/sh7760fb.c
fs/bio-integrity.c
fs/buffer.c
fs/ecryptfs/crypto.c
fs/exec.c
fs/ext2/inode.c
fs/ext3/inode.c
fs/ext4/inode.c
include/asm-arm/arch-s3c2410/spi.h
include/asm-generic/gpio.h
include/asm-generic/pgtable-nopmd.h
include/linux/buffer_head.h
include/linux/cpumask.h
include/linux/fs.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmu_notifier.h [new file with mode: 0644]
include/linux/pagemap.h
include/linux/rculist.h
include/linux/rmap.h
kernel/cpu.c
kernel/fork.c
kernel/time/tick-common.c
kernel/trace/trace_sysprof.c
lib/ratelimit.c
lib/smp_processor_id.c
mm/Kconfig
mm/Makefile
mm/filemap.c
mm/filemap_xip.c
mm/fremap.c
mm/hugetlb.c
mm/memory.c
mm/mmap.c
mm/mmu_notifier.c [new file with mode: 0644]
mm/mprotect.c
mm/mremap.c
mm/rmap.c
mm/shmem.c
net/sunrpc/svc.c

index 9691c7f5166c1ea60acfbc383799dc5ec4dcbd21..0705040531a534c2c153c1289fe0febf92926a61 100644 (file)
@@ -65,26 +65,26 @@ Install kexec-tools
 
 2) Download the kexec-tools user-space package from the following URL:
 
-http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/kexec-tools-testing.tar.gz
+http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/kexec-tools.tar.gz
 
-This is a symlink to the latest version, which at the time of writing is
-20061214, the only release of kexec-tools-testing so far. As other versions
-are released, the older ones will remain available at
-http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
+This is a symlink to the latest version.
 
-Note: Latest kexec-tools-testing git tree is available at
+The latest kexec-tools git tree is available at:
 
-git://git.kernel.org/pub/scm/linux/kernel/git/horms/kexec-tools-testing.git
+git://git.kernel.org/pub/scm/linux/kernel/git/horms/kexec-tools.git
 or
-http://www.kernel.org/git/?p=linux/kernel/git/horms/kexec-tools-testing.git;a=summary
+http://www.kernel.org/git/?p=linux/kernel/git/horms/kexec-tools.git
+
+More information about kexec-tools can be found at
+http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/README.html
 
 3) Unpack the tarball with the tar command, as follows:
 
-   tar xvpzf kexec-tools-testing.tar.gz
+   tar xvpzf kexec-tools.tar.gz
 
 4) Change to the kexec-tools directory, as follows:
 
-   cd kexec-tools-testing-VERSION
+   cd kexec-tools-VERSION
 
 5) Configure the package, as follows:
 
index 652cd32a09c57d33afea25ab83399b199a46b46e..257033c691f2618adf9c6103d9e1e9090b470fcb 100644 (file)
@@ -235,6 +235,7 @@ config ARCH_VERSATILE
 config ARCH_AT91
        bool "Atmel AT91"
        select GENERIC_GPIO
+       select HAVE_CLK
        help
          This enables support for systems based on the Atmel AT91RM9200,
          AT91SAM9 and AT91CAP9 processors.
@@ -268,7 +269,6 @@ config ARCH_EP93XX
        select ARM_VIC
        select GENERIC_GPIO
        select HAVE_CLK
-       select HAVE_CLK
        select ARCH_REQUIRE_GPIOLIB
        help
          This enables support for the Cirrus EP93xx series of CPUs.
index 9220cf46aa10645a50ceb65e8ca24c69c9841958..c2502eb9aa8355488a7057602bfdfa71134785a7 100644 (file)
@@ -73,7 +73,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
        struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        cpumask_t saved_mask;
-       cpumask_of_cpu_ptr(new_mask, cpu);
        int retval;
        unsigned int eax, ebx, ecx, edx;
        unsigned int edx_part;
@@ -92,7 +91,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
 
        /* Make sure we are running on right CPU */
        saved_mask = current->cpus_allowed;
-       retval = set_cpus_allowed_ptr(current, new_mask);
+       retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (retval)
                return -1;
 
index ff2fff56f0a8f2f1340a98a3305da24cd1dd3da1..dd097b835839dd73d4139d807c23e8fca1648344 100644 (file)
@@ -200,12 +200,10 @@ static void drv_read(struct drv_cmd *cmd)
 static void drv_write(struct drv_cmd *cmd)
 {
        cpumask_t saved_mask = current->cpus_allowed;
-       cpumask_of_cpu_ptr_declare(cpu_mask);
        unsigned int i;
 
        for_each_cpu_mask_nr(i, cmd->mask) {
-               cpumask_of_cpu_ptr_next(cpu_mask, i);
-               set_cpus_allowed_ptr(current, cpu_mask);
+               set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
                do_drv_write(cmd);
        }
 
@@ -269,12 +267,11 @@ static unsigned int get_measured_perf(unsigned int cpu)
        } aperf_cur, mperf_cur;
 
        cpumask_t saved_mask;
-       cpumask_of_cpu_ptr(cpu_mask, cpu);
        unsigned int perf_percent;
        unsigned int retval;
 
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, cpu_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (get_cpu() != cpu) {
                /* We were not able to run on requested processor */
                put_cpu();
@@ -340,7 +337,6 @@ static unsigned int get_measured_perf(unsigned int cpu)
 
 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 {
-       cpumask_of_cpu_ptr(cpu_mask, cpu);
        struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
        unsigned int freq;
        unsigned int cached_freq;
@@ -353,7 +349,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
        }
 
        cached_freq = data->freq_table[data->acpi_data->state].frequency;
-       freq = extract_freq(get_cur_val(cpu_mask), data);
+       freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
        if (freq != cached_freq) {
                /*
                 * The dreaded BIOS frequency change behind our back.
index 53c7b6936973c6da85137145ed0eea8eec18746f..c45ca6d4dce101fbe7e5044420410edab5ad8b42 100644 (file)
@@ -479,12 +479,11 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
 static int check_supported_cpu(unsigned int cpu)
 {
        cpumask_t oldmask;
-       cpumask_of_cpu_ptr(cpu_mask, cpu);
        u32 eax, ebx, ecx, edx;
        unsigned int rc = 0;
 
        oldmask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, cpu_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 
        if (smp_processor_id() != cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
@@ -1017,7 +1016,6 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
 static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
 {
        cpumask_t oldmask;
-       cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
        struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
        u32 checkfid;
        u32 checkvid;
@@ -1032,7 +1030,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
 
        /* only run on specific CPU from here on */
        oldmask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, cpu_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
 
        if (smp_processor_id() != pol->cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1107,7 +1105,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 {
        struct powernow_k8_data *data;
        cpumask_t oldmask;
-       cpumask_of_cpu_ptr_declare(newmask);
        int rc;
 
        if (!cpu_online(pol->cpu))
@@ -1159,8 +1156,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 
        /* only run on specific CPU from here on */
        oldmask = current->cpus_allowed;
-       cpumask_of_cpu_ptr_next(newmask, pol->cpu);
-       set_cpus_allowed_ptr(current, newmask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
 
        if (smp_processor_id() != pol->cpu) {
                printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1182,7 +1178,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
        set_cpus_allowed_ptr(current, &oldmask);
 
        if (cpu_family == CPU_HW_PSTATE)
-               pol->cpus = *newmask;
+               pol->cpus = cpumask_of_cpu(pol->cpu);
        else
                pol->cpus = per_cpu(cpu_core_map, pol->cpu);
        data->available_cores = &(pol->cpus);
@@ -1248,7 +1244,6 @@ static unsigned int powernowk8_get (unsigned int cpu)
 {
        struct powernow_k8_data *data;
        cpumask_t oldmask = current->cpus_allowed;
-       cpumask_of_cpu_ptr(newmask, cpu);
        unsigned int khz = 0;
        unsigned int first;
 
@@ -1258,7 +1253,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
        if (!data)
                return -EINVAL;
 
-       set_cpus_allowed_ptr(current, newmask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (smp_processor_id() != cpu) {
                printk(KERN_ERR PFX
                        "limiting to CPU %d failed in powernowk8_get\n", cpu);
index ca2ac13b7af20b2221aaa1d883a7fb89821bf4d5..15e13c01cc3621ce3d4d610ab34225437eb8f413 100644 (file)
@@ -324,10 +324,9 @@ static unsigned int get_cur_freq(unsigned int cpu)
        unsigned l, h;
        unsigned clock_freq;
        cpumask_t saved_mask;
-       cpumask_of_cpu_ptr(new_mask, cpu);
 
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, new_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (smp_processor_id() != cpu)
                return 0;
 
@@ -585,15 +584,12 @@ static int centrino_target (struct cpufreq_policy *policy,
                 * Best effort undo..
                 */
 
-               if (!cpus_empty(*covered_cpus)) {
-                       cpumask_of_cpu_ptr_declare(new_mask);
-
+               if (!cpus_empty(*covered_cpus))
                        for_each_cpu_mask_nr(j, *covered_cpus) {
-                               cpumask_of_cpu_ptr_next(new_mask, j);
-                               set_cpus_allowed_ptr(current, new_mask);
+                               set_cpus_allowed_ptr(current,
+                                                    &cpumask_of_cpu(j));
                                wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
                        }
-               }
 
                tmp = freqs.new;
                freqs.new = freqs.old;
index 2f3728dc24f60cc41db81e0066927a4ff15f409f..191f7263c61dce1b42864cd4709e249773339243 100644 (file)
@@ -244,8 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
 
 static unsigned int speedstep_get(unsigned int cpu)
 {
-       cpumask_of_cpu_ptr(newmask, cpu);
-       return _speedstep_get(newmask);
+       return _speedstep_get(&cpumask_of_cpu(cpu));
 }
 
 /**
index 650d40f7912bea81023dc7f9698b8b10ced4d6fa..6b0a10b002f184406dac952f1370ac5c4eeb34e7 100644 (file)
@@ -516,7 +516,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
        unsigned long           j;
        int                     retval;
        cpumask_t               oldmask;
-       cpumask_of_cpu_ptr(newmask, cpu);
 
        if (num_cache_leaves == 0)
                return -ENOENT;
@@ -527,7 +526,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
                return -ENOMEM;
 
        oldmask = current->cpus_allowed;
-       retval = set_cpus_allowed_ptr(current, newmask);
+       retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (retval)
                goto out;
 
index 3fee2aa50f3f63d594304611090338d36f3fcc01..b68e21f06f4f84615baf2fbc47179fd437bea321 100644 (file)
@@ -62,12 +62,10 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
 
        if (reload) {
 #ifdef CONFIG_SMP
-               cpumask_of_cpu_ptr_declare(mask);
-
                preempt_disable();
                load_LDT(pc);
-               cpumask_of_cpu_ptr_next(mask, smp_processor_id());
-               if (!cpus_equal(current->mm->cpu_vm_mask, *mask))
+               if (!cpus_equal(current->mm->cpu_vm_mask,
+                               cpumask_of_cpu(smp_processor_id())))
                        smp_call_function(flush_ldt, current->mm, 1);
                preempt_enable();
 #else
index 6994c751590ec14ea31b7418bfd4659722466b2e..652fa5c38ebe34c0846fc56f0a8e0eb89e5f3c6a 100644 (file)
@@ -388,7 +388,6 @@ static int do_microcode_update (void)
        void *new_mc = NULL;
        int cpu;
        cpumask_t old;
-       cpumask_of_cpu_ptr_declare(newmask);
 
        old = current->cpus_allowed;
 
@@ -405,8 +404,7 @@ static int do_microcode_update (void)
 
                        if (!uci->valid)
                                continue;
-                       cpumask_of_cpu_ptr_next(newmask, cpu);
-                       set_cpus_allowed_ptr(current, newmask);
+                       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
                        error = get_maching_microcode(new_mc, cpu);
                        if (error < 0)
                                goto out;
@@ -576,7 +574,6 @@ static int apply_microcode_check_cpu(int cpu)
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        cpumask_t old;
-       cpumask_of_cpu_ptr(newmask, cpu);
        unsigned int val[2];
        int err = 0;
 
@@ -585,7 +582,7 @@ static int apply_microcode_check_cpu(int cpu)
                return 0;
 
        old = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, newmask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 
        /* Check if the microcode we have in memory matches the CPU */
        if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
@@ -623,12 +620,11 @@ static int apply_microcode_check_cpu(int cpu)
 static void microcode_init_cpu(int cpu, int resume)
 {
        cpumask_t old;
-       cpumask_of_cpu_ptr(newmask, cpu);
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 
        old = current->cpus_allowed;
 
-       set_cpus_allowed_ptr(current, newmask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        mutex_lock(&microcode_mutex);
        collect_cpu_info(cpu);
        if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
@@ -661,13 +657,10 @@ static ssize_t reload_store(struct sys_device *dev,
        if (end == buf)
                return -EINVAL;
        if (val == 1) {
-               cpumask_t old;
-               cpumask_of_cpu_ptr(newmask, cpu);
-
-               old = current->cpus_allowed;
+               cpumask_t old = current->cpus_allowed;
 
                get_online_cpus();
-               set_cpus_allowed_ptr(current, newmask);
+               set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
 
                mutex_lock(&microcode_mutex);
                if (uci->valid)
index 06a9f643817ee0310d1dce58da6a86f30b5c23cd..724adfc63cb9a7b60d6ee5c82efd919fe237f69c 100644 (file)
@@ -414,25 +414,20 @@ void native_machine_shutdown(void)
 
        /* The boot cpu is always logical cpu 0 */
        int reboot_cpu_id = 0;
-       cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
 
 #ifdef CONFIG_X86_32
        /* See if there has been given a command line override */
        if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
-               cpu_online(reboot_cpu)) {
+               cpu_online(reboot_cpu))
                reboot_cpu_id = reboot_cpu;
-               cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
-       }
 #endif
 
        /* Make certain the cpu I'm about to reboot on is online */
-       if (!cpu_online(reboot_cpu_id)) {
+       if (!cpu_online(reboot_cpu_id))
                reboot_cpu_id = smp_processor_id();
-               cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
-       }
 
        /* Make certain I only run on the appropriate processor */
-       set_cpus_allowed_ptr(current, newmask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
 
        /* O.K Now that I'm on the appropriate processor,
         * stop all of the others.
index f7745f94c0061236b8b224afd44d3a19eb63f3d1..76e305e064f96f9f7ece081665de18e7fb189e9a 100644 (file)
@@ -80,24 +80,6 @@ static void __init setup_per_cpu_maps(void)
 #endif
 }
 
-#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
-cpumask_t *cpumask_of_cpu_map __read_mostly;
-EXPORT_SYMBOL(cpumask_of_cpu_map);
-
-/* requires nr_cpu_ids to be initialized */
-static void __init setup_cpumask_of_cpu(void)
-{
-       int i;
-
-       /* alloc_bootmem zeroes memory */
-       cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
-       for (i = 0; i < nr_cpu_ids; i++)
-               cpu_set(i, cpumask_of_cpu_map[i]);
-}
-#else
-static inline void setup_cpumask_of_cpu(void) { }
-#endif
-
 #ifdef CONFIG_X86_32
 /*
  * Great future not-so-futuristic plan: make i386 and x86_64 do it
@@ -197,9 +179,6 @@ void __init setup_per_cpu_areas(void)
 
        /* Setup node to cpumask map */
        setup_node_to_cpumask_map();
-
-       /* Setup cpumask_of_cpu map */
-       setup_cpumask_of_cpu();
 }
 
 #endif
index 8d45fabc5f3baf556b1da2558a42df183ab09e80..ce3251ce5504e478ff65f936608245be3df7312e 100644 (file)
@@ -21,6 +21,7 @@ config KVM
        tristate "Kernel-based Virtual Machine (KVM) support"
        depends on HAVE_KVM
        select PREEMPT_NOTIFIERS
+       select MMU_NOTIFIER
        select ANON_INODES
        ---help---
          Support hosting fully virtualized guest machines using hardware
index 3085f25b4355de827fee53d8851180a844777517..007bb06c750477b60c158f667f5d0563c2d77433 100644 (file)
@@ -223,14 +223,17 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages)
 {
        struct mm_struct *mm = current->mm;
-       unsigned long end = start + (nr_pages << PAGE_SHIFT);
-       unsigned long addr = start;
+       unsigned long addr, len, end;
        unsigned long next;
        pgd_t *pgdp;
        int nr = 0;
 
+       start &= PAGE_MASK;
+       addr = start;
+       len = (unsigned long) nr_pages << PAGE_SHIFT;
+       end = start + len;
        if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
-                                       start, nr_pages*PAGE_SIZE)))
+                                       start, len)))
                goto slow_irqon;
 
        /*
index a2c3f9cfa5490fe5fef15c4c0e48676ad22f837a..a56fc6c4394bb1dab26cb06527d3f89239333683 100644 (file)
@@ -827,7 +827,6 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
 static int acpi_processor_get_throttling(struct acpi_processor *pr)
 {
        cpumask_t saved_mask;
-       cpumask_of_cpu_ptr_declare(new_mask);
        int ret;
 
        if (!pr)
@@ -839,8 +838,7 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
         * Migrate task to the cpu pointed by pr.
         */
        saved_mask = current->cpus_allowed;
-       cpumask_of_cpu_ptr_next(new_mask, pr->id);
-       set_cpus_allowed_ptr(current, new_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
        ret = pr->throttling.acpi_processor_get_throttling(pr);
        /* restore the previous state */
        set_cpus_allowed_ptr(current, &saved_mask);
@@ -989,7 +987,6 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
 int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
 {
        cpumask_t saved_mask;
-       cpumask_of_cpu_ptr_declare(new_mask);
        int ret = 0;
        unsigned int i;
        struct acpi_processor *match_pr;
@@ -1028,8 +1025,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
         * it can be called only for the cpu pointed by pr.
         */
        if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
-               cpumask_of_cpu_ptr_next(new_mask, pr->id);
-               set_cpus_allowed_ptr(current, new_mask);
+               set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
                ret = p_throttling->acpi_processor_set_throttling(pr,
                                                t_state.target_state);
        } else {
@@ -1060,8 +1056,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
                                continue;
                        }
                        t_state.cpu = i;
-                       cpumask_of_cpu_ptr_next(new_mask, i);
-                       set_cpus_allowed_ptr(current, new_mask);
+                       set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
                        ret = match_pr->throttling.
                                acpi_processor_set_throttling(
                                match_pr, t_state.target_state);
index 7d500f82195a08b22d1ab550ab95bb43e541dd7a..4c1820cad71230045624f002368c6c03efbd0061 100644 (file)
@@ -568,7 +568,7 @@ static struct ipw_rx_packet *pool_allocate(struct ipw_hardware *hw,
                        list_del(&packet->queue);
                } else {
                        const int min_capacity =
-                               ipwireless_ppp_mru(hw->network + 2);
+                               ipwireless_ppp_mru(hw->network) + 2;
                        int new_capacity;
 
                        spin_unlock_irqrestore(&hw->lock, flags);
index c66817e7717b8f6b42e364d85d687726983afc24..50a071f1c945c0c3d8079ffdf713c577315a8415 100644 (file)
@@ -245,7 +245,6 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
 static int smi_request(struct smi_cmd *smi_cmd)
 {
        cpumask_t old_mask;
-       cpumask_of_cpu_ptr(new_mask, 0);
        int ret = 0;
 
        if (smi_cmd->magic != SMI_CMD_MAGIC) {
@@ -256,7 +255,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
 
        /* SMI requires CPU 0 */
        old_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, new_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
        if (smp_processor_id() != 0) {
                dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
                        __func__);
index 579b01ff82d4ed73580365334f1790b29e70805d..c3b4227f48a5bbd78e5c6f85d54c87a8d68d403e 100644 (file)
@@ -229,11 +229,10 @@ xpc_hb_checker(void *ignore)
        int last_IRQ_count = 0;
        int new_IRQ_count;
        int force_IRQ = 0;
-       cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU);
 
        /* this thread was marked active by xpc_hb_init() */
 
-       set_cpus_allowed_ptr(current, cpumask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
 
        /* set our heartbeating to other partitions into motion */
        xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
index 7a4409ab30eaaab0b751226f4e0713b5469166d5..a319a20ed440748da301b5a268941d5e57d00294 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/kthread.h>
 #include <linux/vmalloc.h>
+#include <linux/delay.h>
 
 static int qla24xx_vport_disable(struct fc_vport *, bool);
 
index 604e5f0a2d951d5ae347a380733c342848198792..25eda71f4bf4c29c442633c5ff2914dad2839e5a 100644 (file)
@@ -148,7 +148,6 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
        unsigned rfalarm;
        unsigned send_at_once = MPC52xx_PSC_BUFSIZE;
        unsigned recv_at_once;
-       unsigned bpw = mps->bits_per_word / 8;
 
        if (!t->tx_buf && !t->rx_buf && t->len)
                return -EINVAL;
@@ -164,22 +163,15 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
                }
 
                dev_dbg(&spi->dev, "send %d bytes...\n", send_at_once);
-               if (tx_buf) {
-                       for (; send_at_once; sb++, send_at_once--) {
-                               /* set EOF flag */
-                               if (mps->bits_per_word
-                                               && (sb + 1) % bpw == 0)
-                                       out_8(&psc->ircr2, 0x01);
+               for (; send_at_once; sb++, send_at_once--) {
+                       /* set EOF flag before the last word is sent */
+                       if (send_at_once == 1)
+                               out_8(&psc->ircr2, 0x01);
+
+                       if (tx_buf)
                                out_8(&psc->mpc52xx_psc_buffer_8, tx_buf[sb]);
-                       }
-               } else {
-                       for (; send_at_once; sb++, send_at_once--) {
-                               /* set EOF flag */
-                               if (mps->bits_per_word
-                                               && ((sb + 1) % bpw) == 0)
-                                       out_8(&psc->ircr2, 0x01);
+                       else
                                out_8(&psc->mpc52xx_psc_buffer_8, 0);
-                       }
                }
 
 
index 0885cc357a371552dc2190a6da682f98a652abdb..1c643c9e1f15ff793e5d7c140e9f93ee96320544 100644 (file)
@@ -270,6 +270,7 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
        /* setup the master state. */
 
        master->num_chipselect = hw->pdata->num_cs;
+       master->bus_num = pdata->bus_num;
 
        /* setup the state for the bitbang driver */
 
index 4d0e28c5790b5eb35e34e439703a1ee019340a33..8d0212da4514eb3e98a614b7139f48391f00f779 100644 (file)
@@ -152,6 +152,7 @@ static int sh7760fb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
                col |= ((*g) & 0xff) << 8;
                col |= ((*b) & 0xff);
                col &= SH7760FB_PALETTE_MASK;
+               iowrite32(col, par->base + LDPR(s));
 
                if (s < 16)
                        ((u32 *) (info->pseudo_palette))[s] = s;
index 63e2ee63058de4edb3e5d5036ade3b9a79e92ffa..c3e174b35fe6a179baac8ef098b465f0ab51f515 100644 (file)
@@ -705,7 +705,6 @@ void __init bio_integrity_init_slab(void)
        bio_integrity_slab = KMEM_CACHE(bio_integrity_payload,
                                        SLAB_HWCACHE_ALIGN|SLAB_PANIC);
 }
-EXPORT_SYMBOL(bio_integrity_init_slab);
 
 static int __init integrity_init(void)
 {
index f95805019639a54f993b3271ed3c8b8a45c478ad..ca12a6bb82b10f6a45da86f3019f1e7d692dfc61 100644 (file)
@@ -2095,6 +2095,52 @@ int generic_write_end(struct file *file, struct address_space *mapping,
 }
 EXPORT_SYMBOL(generic_write_end);
 
+/*
+ * block_is_partially_uptodate checks whether buffers within a page are
+ * uptodate or not.
+ *
+ * Returns true if all buffers which correspond to a file portion
+ * we want to read are uptodate.
+ */
+int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
+                                       unsigned long from)
+{
+       struct inode *inode = page->mapping->host;
+       unsigned block_start, block_end, blocksize;
+       unsigned to;
+       struct buffer_head *bh, *head;
+       int ret = 1;
+
+       if (!page_has_buffers(page))
+               return 0;
+
+       blocksize = 1 << inode->i_blkbits;
+       to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
+       to = from + to;
+       if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
+               return 0;
+
+       head = page_buffers(page);
+       bh = head;
+       block_start = 0;
+       do {
+               block_end = block_start + blocksize;
+               if (block_end > from && block_start < to) {
+                       if (!buffer_uptodate(bh)) {
+                               ret = 0;
+                               break;
+                       }
+                       if (block_end >= to)
+                               break;
+               }
+               block_start = block_end;
+               bh = bh->b_this_page;
+       } while (bh != head);
+
+       return ret;
+}
+EXPORT_SYMBOL(block_is_partially_uptodate);
+
 /*
  * Generic "read page" function for block devices that have the normal
  * get_block functionality. This is most of the block device filesystems.
index 7b99917ffadc05e07dd480e54aaec385c378298c..06db79d05c1262a8de76fc9dcb820b910b464163 100644 (file)
@@ -475,8 +475,8 @@ int ecryptfs_encrypt_page(struct page *page)
 {
        struct inode *ecryptfs_inode;
        struct ecryptfs_crypt_stat *crypt_stat;
-       char *enc_extent_virt = NULL;
-       struct page *enc_extent_page;
+       char *enc_extent_virt;
+       struct page *enc_extent_page = NULL;
        loff_t extent_offset;
        int rc = 0;
 
@@ -492,14 +492,14 @@ int ecryptfs_encrypt_page(struct page *page)
                               page->index);
                goto out;
        }
-       enc_extent_virt = kmalloc(PAGE_CACHE_SIZE, GFP_USER);
-       if (!enc_extent_virt) {
+       enc_extent_page = alloc_page(GFP_USER);
+       if (!enc_extent_page) {
                rc = -ENOMEM;
                ecryptfs_printk(KERN_ERR, "Error allocating memory for "
                                "encrypted extent\n");
                goto out;
        }
-       enc_extent_page = virt_to_page(enc_extent_virt);
+       enc_extent_virt = kmap(enc_extent_page);
        for (extent_offset = 0;
             extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
             extent_offset++) {
@@ -527,7 +527,10 @@ int ecryptfs_encrypt_page(struct page *page)
                }
        }
 out:
-       kfree(enc_extent_virt);
+       if (enc_extent_page) {
+               kunmap(enc_extent_page);
+               __free_page(enc_extent_page);
+       }
        return rc;
 }
 
@@ -609,8 +612,8 @@ int ecryptfs_decrypt_page(struct page *page)
 {
        struct inode *ecryptfs_inode;
        struct ecryptfs_crypt_stat *crypt_stat;
-       char *enc_extent_virt = NULL;
-       struct page *enc_extent_page;
+       char *enc_extent_virt;
+       struct page *enc_extent_page = NULL;
        unsigned long extent_offset;
        int rc = 0;
 
@@ -627,14 +630,14 @@ int ecryptfs_decrypt_page(struct page *page)
                               page->index);
                goto out;
        }
-       enc_extent_virt = kmalloc(PAGE_CACHE_SIZE, GFP_USER);
-       if (!enc_extent_virt) {
+       enc_extent_page = alloc_page(GFP_USER);
+       if (!enc_extent_page) {
                rc = -ENOMEM;
                ecryptfs_printk(KERN_ERR, "Error allocating memory for "
                                "encrypted extent\n");
                goto out;
        }
-       enc_extent_page = virt_to_page(enc_extent_virt);
+       enc_extent_virt = kmap(enc_extent_page);
        for (extent_offset = 0;
             extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
             extent_offset++) {
@@ -662,7 +665,10 @@ int ecryptfs_decrypt_page(struct page *page)
                }
        }
 out:
-       kfree(enc_extent_virt);
+       if (enc_extent_page) {
+               kunmap(enc_extent_page);
+               __free_page(enc_extent_page);
+       }
        return rc;
 }
 
index 9696bbf0f0b1c1b1bd4578fdbfac331fa62ffc48..32993beecbe91a10670cc4f5a9a72a5faf813c56 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -32,6 +32,7 @@
 #include <linux/swap.h>
 #include <linux/string.h>
 #include <linux/init.h>
+#include <linux/pagemap.h>
 #include <linux/highmem.h>
 #include <linux/spinlock.h>
 #include <linux/key.h>
index 384fc0d1dd743b080c0fc1fa7f887663d8ed8d8e..991d6dfeb51f078bfe746e6dede400e7e2525817 100644 (file)
@@ -791,6 +791,7 @@ const struct address_space_operations ext2_aops = {
        .direct_IO              = ext2_direct_IO,
        .writepages             = ext2_writepages,
        .migratepage            = buffer_migrate_page,
+       .is_partially_uptodate  = block_is_partially_uptodate,
 };
 
 const struct address_space_operations ext2_aops_xip = {
index 3bf07d70b914063b043342710833f5745456915e..507d8689b111662b403c874e2c269417635f5555 100644 (file)
@@ -1767,44 +1767,47 @@ static int ext3_journalled_set_page_dirty(struct page *page)
 }
 
 static const struct address_space_operations ext3_ordered_aops = {
-       .readpage       = ext3_readpage,
-       .readpages      = ext3_readpages,
-       .writepage      = ext3_ordered_writepage,
-       .sync_page      = block_sync_page,
-       .write_begin    = ext3_write_begin,
-       .write_end      = ext3_ordered_write_end,
-       .bmap           = ext3_bmap,
-       .invalidatepage = ext3_invalidatepage,
-       .releasepage    = ext3_releasepage,
-       .direct_IO      = ext3_direct_IO,
-       .migratepage    = buffer_migrate_page,
+       .readpage               = ext3_readpage,
+       .readpages              = ext3_readpages,
+       .writepage              = ext3_ordered_writepage,
+       .sync_page              = block_sync_page,
+       .write_begin            = ext3_write_begin,
+       .write_end              = ext3_ordered_write_end,
+       .bmap                   = ext3_bmap,
+       .invalidatepage         = ext3_invalidatepage,
+       .releasepage            = ext3_releasepage,
+       .direct_IO              = ext3_direct_IO,
+       .migratepage            = buffer_migrate_page,
+       .is_partially_uptodate  = block_is_partially_uptodate,
 };
 
 static const struct address_space_operations ext3_writeback_aops = {
-       .readpage       = ext3_readpage,
-       .readpages      = ext3_readpages,
-       .writepage      = ext3_writeback_writepage,
-       .sync_page      = block_sync_page,
-       .write_begin    = ext3_write_begin,
-       .write_end      = ext3_writeback_write_end,
-       .bmap           = ext3_bmap,
-       .invalidatepage = ext3_invalidatepage,
-       .releasepage    = ext3_releasepage,
-       .direct_IO      = ext3_direct_IO,
-       .migratepage    = buffer_migrate_page,
+       .readpage               = ext3_readpage,
+       .readpages              = ext3_readpages,
+       .writepage              = ext3_writeback_writepage,
+       .sync_page              = block_sync_page,
+       .write_begin            = ext3_write_begin,
+       .write_end              = ext3_writeback_write_end,
+       .bmap                   = ext3_bmap,
+       .invalidatepage         = ext3_invalidatepage,
+       .releasepage            = ext3_releasepage,
+       .direct_IO              = ext3_direct_IO,
+       .migratepage            = buffer_migrate_page,
+       .is_partially_uptodate  = block_is_partially_uptodate,
 };
 
 static const struct address_space_operations ext3_journalled_aops = {
-       .readpage       = ext3_readpage,
-       .readpages      = ext3_readpages,
-       .writepage      = ext3_journalled_writepage,
-       .sync_page      = block_sync_page,
-       .write_begin    = ext3_write_begin,
-       .write_end      = ext3_journalled_write_end,
-       .set_page_dirty = ext3_journalled_set_page_dirty,
-       .bmap           = ext3_bmap,
-       .invalidatepage = ext3_invalidatepage,
-       .releasepage    = ext3_releasepage,
+       .readpage               = ext3_readpage,
+       .readpages              = ext3_readpages,
+       .writepage              = ext3_journalled_writepage,
+       .sync_page              = block_sync_page,
+       .write_begin            = ext3_write_begin,
+       .write_end              = ext3_journalled_write_end,
+       .set_page_dirty         = ext3_journalled_set_page_dirty,
+       .bmap                   = ext3_bmap,
+       .invalidatepage         = ext3_invalidatepage,
+       .releasepage            = ext3_releasepage,
+       .is_partially_uptodate  = block_is_partially_uptodate,
 };
 
 void ext3_set_aops(struct inode *inode)
index 8ca2763df091051fea3e02ae7bba35a1e82e21d9..9843b046c2358974bff4bea0449583c688c27358 100644 (file)
@@ -2806,59 +2806,63 @@ static int ext4_journalled_set_page_dirty(struct page *page)
 }
 
 static const struct address_space_operations ext4_ordered_aops = {
-       .readpage       = ext4_readpage,
-       .readpages      = ext4_readpages,
-       .writepage      = ext4_normal_writepage,
-       .sync_page      = block_sync_page,
-       .write_begin    = ext4_write_begin,
-       .write_end      = ext4_ordered_write_end,
-       .bmap           = ext4_bmap,
-       .invalidatepage = ext4_invalidatepage,
-       .releasepage    = ext4_releasepage,
-       .direct_IO      = ext4_direct_IO,
-       .migratepage    = buffer_migrate_page,
+       .readpage               = ext4_readpage,
+       .readpages              = ext4_readpages,
+       .writepage              = ext4_normal_writepage,
+       .sync_page              = block_sync_page,
+       .write_begin            = ext4_write_begin,
+       .write_end              = ext4_ordered_write_end,
+       .bmap                   = ext4_bmap,
+       .invalidatepage         = ext4_invalidatepage,
+       .releasepage            = ext4_releasepage,
+       .direct_IO              = ext4_direct_IO,
+       .migratepage            = buffer_migrate_page,
+       .is_partially_uptodate  = block_is_partially_uptodate,
 };
 
 static const struct address_space_operations ext4_writeback_aops = {
-       .readpage       = ext4_readpage,
-       .readpages      = ext4_readpages,
-       .writepage      = ext4_normal_writepage,
-       .sync_page      = block_sync_page,
-       .write_begin    = ext4_write_begin,
-       .write_end      = ext4_writeback_write_end,
-       .bmap           = ext4_bmap,
-       .invalidatepage = ext4_invalidatepage,
-       .releasepage    = ext4_releasepage,
-       .direct_IO      = ext4_direct_IO,
-       .migratepage    = buffer_migrate_page,
+       .readpage               = ext4_readpage,
+       .readpages              = ext4_readpages,
+       .writepage              = ext4_normal_writepage,
+       .sync_page              = block_sync_page,
+       .write_begin            = ext4_write_begin,
+       .write_end              = ext4_writeback_write_end,
+       .bmap                   = ext4_bmap,
+       .invalidatepage         = ext4_invalidatepage,
+       .releasepage            = ext4_releasepage,
+       .direct_IO              = ext4_direct_IO,
+       .migratepage            = buffer_migrate_page,
+       .is_partially_uptodate  = block_is_partially_uptodate,
 };
 
 static const struct address_space_operations ext4_journalled_aops = {
-       .readpage       = ext4_readpage,
-       .readpages      = ext4_readpages,
-       .writepage      = ext4_journalled_writepage,
-       .sync_page      = block_sync_page,
-       .write_begin    = ext4_write_begin,
-       .write_end      = ext4_journalled_write_end,
-       .set_page_dirty = ext4_journalled_set_page_dirty,
-       .bmap           = ext4_bmap,
-       .invalidatepage = ext4_invalidatepage,
-       .releasepage    = ext4_releasepage,
+       .readpage               = ext4_readpage,
+       .readpages              = ext4_readpages,
+       .writepage              = ext4_journalled_writepage,
+       .sync_page              = block_sync_page,
+       .write_begin            = ext4_write_begin,
+       .write_end              = ext4_journalled_write_end,
+       .set_page_dirty         = ext4_journalled_set_page_dirty,
+       .bmap                   = ext4_bmap,
+       .invalidatepage         = ext4_invalidatepage,
+       .releasepage            = ext4_releasepage,
+       .is_partially_uptodate  = block_is_partially_uptodate,
 };
 
 static const struct address_space_operations ext4_da_aops = {
-       .readpage       = ext4_readpage,
-       .readpages      = ext4_readpages,
-       .writepage      = ext4_da_writepage,
-       .writepages     = ext4_da_writepages,
-       .sync_page      = block_sync_page,
-       .write_begin    = ext4_da_write_begin,
-       .write_end      = ext4_da_write_end,
-       .bmap           = ext4_bmap,
-       .invalidatepage = ext4_da_invalidatepage,
-       .releasepage    = ext4_releasepage,
-       .direct_IO      = ext4_direct_IO,
-       .migratepage    = buffer_migrate_page,
+       .readpage               = ext4_readpage,
+       .readpages              = ext4_readpages,
+       .writepage              = ext4_da_writepage,
+       .writepages             = ext4_da_writepages,
+       .sync_page              = block_sync_page,
+       .write_begin            = ext4_da_write_begin,
+       .write_end              = ext4_da_write_end,
+       .bmap                   = ext4_bmap,
+       .invalidatepage         = ext4_da_invalidatepage,
+       .releasepage            = ext4_releasepage,
+       .direct_IO              = ext4_direct_IO,
+       .migratepage            = buffer_migrate_page,
+       .is_partially_uptodate  = block_is_partially_uptodate,
 };
 
 void ext4_set_aops(struct inode *inode)
index 352d33860b6383a6584011edb5239b23b3ebcd1b..442169887d3b0d4e810df46059b2c65ca76df64d 100644 (file)
@@ -16,6 +16,7 @@
 struct s3c2410_spi_info {
        unsigned long            pin_cs;        /* simple gpio cs */
        unsigned int             num_cs;        /* total chipselects */
+       int                      bus_num;       /* bus number to use. */
 
        void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol);
 };
index c764a8fcb058e33fb703e56af17eaa7fb966d7ea..0f99ad38b012f93ef10ccb5b0f9105b4e7f8daab 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_GENERIC_GPIO_H
 
 #include <linux/types.h>
+#include <linux/errno.h>
 
 #ifdef CONFIG_GPIOLIB
 
index 087325ede76cf31e4a9798664d36913928f058b3..a7cdc48e8b78703156456f8eb2472251e66c8d78 100644 (file)
@@ -5,6 +5,8 @@
 
 #include <asm-generic/pgtable-nopud.h>
 
+struct mm_struct;
+
 #define __PAGETABLE_PMD_FOLDED
 
 /*
@@ -54,7 +56,9 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
  * inside the pud, so has no extra memory associated with it.
  */
 #define pmd_alloc_one(mm, address)             NULL
-#define pmd_free(mm, x)                                do { } while (0)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+}
 #define __pmd_free_tlb(tlb, x)                 do { } while (0)
 
 #undef  pmd_addr_end
index 82aa36c53ea7a38e413a384a646f241ef1ebaf1c..50cfe8ceb478508c3fd587fbe8109f0ad146259e 100644 (file)
@@ -205,6 +205,8 @@ void block_invalidatepage(struct page *page, unsigned long offset);
 int block_write_full_page(struct page *page, get_block_t *get_block,
                                struct writeback_control *wbc);
 int block_read_full_page(struct page*, get_block_t*);
+int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
+                               unsigned long from);
 int block_write_begin(struct file *, struct address_space *,
                                loff_t, unsigned, unsigned,
                                struct page **, void **, get_block_t*);
index 1b5c98e7fef79fffd94805cb1d8cfd9fdadb39f5..96d0509fb8d85a6ce9ba52842080df475fc1aef4 100644 (file)
  * int next_cpu_nr(cpu, mask)          Next cpu past 'cpu', or nr_cpu_ids
  *
  * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
- *ifdef CONFIG_HAS_CPUMASK_OF_CPU
- * cpumask_of_cpu_ptr_declare(v)       Declares cpumask_t *v
- * cpumask_of_cpu_ptr_next(v, cpu)     Sets v = &cpumask_of_cpu_map[cpu]
- * cpumask_of_cpu_ptr(v, cpu)          Combines above two operations
- *else
- * cpumask_of_cpu_ptr_declare(v)       Declares cpumask_t _v and *v = &_v
- * cpumask_of_cpu_ptr_next(v, cpu)     Sets _v = cpumask_of_cpu(cpu)
- * cpumask_of_cpu_ptr(v, cpu)          Combines above two operations
- *endif
+ *                                     (can be used as an lvalue)
  * CPU_MASK_ALL                                Initializer - all bits set
  * CPU_MASK_NONE                       Initializer - no bits set
  * unsigned long *cpus_addr(mask)      Array of unsigned long's in mask
@@ -273,37 +265,30 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
        bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
 }
 
+/*
+ * Special-case data structure for "single bit set only" constant CPU masks.
+ *
+ * We pre-generate all the 64 (or 32) possible bit positions, with enough
+ * padding to the left and the right, and return the constant pointer
+ * appropriately offset.
+ */
+extern const unsigned long
+       cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
+
+static inline const cpumask_t *get_cpu_mask(unsigned int cpu)
+{
+       const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
+       p -= cpu / BITS_PER_LONG;
+       return (const cpumask_t *)p;
+}
+
+/*
+ * In cases where we take the address of the cpumask immediately,
+ * gcc optimizes it out (it's a constant) and there's no huge stack
+ * variable created:
+ */
+#define cpumask_of_cpu(cpu) ({ *get_cpu_mask(cpu); })
 
-#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
-extern cpumask_t *cpumask_of_cpu_map;
-#define cpumask_of_cpu(cpu)    (cpumask_of_cpu_map[cpu])
-#define        cpumask_of_cpu_ptr(v, cpu)                                      \
-               const cpumask_t *v = &cpumask_of_cpu(cpu)
-#define        cpumask_of_cpu_ptr_declare(v)                                   \
-               const cpumask_t *v
-#define cpumask_of_cpu_ptr_next(v, cpu)                                        \
-                                       v = &cpumask_of_cpu(cpu)
-#else
-#define cpumask_of_cpu(cpu)                                            \
-({                                                                     \
-       typeof(_unused_cpumask_arg_) m;                                 \
-       if (sizeof(m) == sizeof(unsigned long)) {                       \
-               m.bits[0] = 1UL<<(cpu);                                 \
-       } else {                                                        \
-               cpus_clear(m);                                          \
-               cpu_set((cpu), m);                                      \
-       }                                                               \
-       m;                                                              \
-})
-#define        cpumask_of_cpu_ptr(v, cpu)                                      \
-               cpumask_t _##v = cpumask_of_cpu(cpu);                   \
-               const cpumask_t *v = &_##v
-#define        cpumask_of_cpu_ptr_declare(v)                                   \
-               cpumask_t _##v;                                         \
-               const cpumask_t *v = &_##v
-#define cpumask_of_cpu_ptr_next(v, cpu)                                        \
-                                       _##v = cpumask_of_cpu(cpu)
-#endif
 
 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
 
index 8252b045e62489523452bdec777d8bd630e20cfc..580b513668feffbb32f9d161113e10b17718dfe5 100644 (file)
@@ -443,6 +443,27 @@ static inline size_t iov_iter_count(struct iov_iter *i)
        return i->count;
 }
 
+/*
+ * "descriptor" for what we're up to with a read.
+ * This allows us to use the same read code yet
+ * have multiple different users of the data that
+ * we read from a file.
+ *
+ * The simplest case just copies the data to user
+ * mode.
+ */
+typedef struct {
+       size_t written;
+       size_t count;
+       union {
+               char __user *buf;
+               void *data;
+       } arg;
+       int error;
+} read_descriptor_t;
+
+typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
+               unsigned long, unsigned long);
 
 struct address_space_operations {
        int (*writepage)(struct page *page, struct writeback_control *wbc);
@@ -484,6 +505,8 @@ struct address_space_operations {
        int (*migratepage) (struct address_space *,
                        struct page *, struct page *);
        int (*launder_page) (struct page *);
+       int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
+                                       unsigned long);
 };
 
 /*
@@ -1198,27 +1221,6 @@ struct block_device_operations {
        struct module *owner;
 };
 
-/*
- * "descriptor" for what we're up to with a read.
- * This allows us to use the same read code yet
- * have multiple different users of the data that
- * we read from a file.
- *
- * The simplest case just copies the data to user
- * mode.
- */
-typedef struct {
-       size_t written;
-       size_t count;
-       union {
-               char __user * buf;
-               void *data;
-       } arg;
-       int error;
-} read_descriptor_t;
-
-typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long);
-
 /* These macros are for out of kernel modules to test that
  * the kernel supports the unlocked_ioctl and compat_ioctl
  * fields in struct file_operations. */
index 6e695eaab4ceebbf3af49563b3c3d3d587d535a6..866a3dbe5c75b98659ecdd7d2514f63fb9de87cc 100644 (file)
@@ -1104,6 +1104,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
        unsigned long addr, unsigned long len, pgoff_t pgoff);
 extern void exit_mmap(struct mm_struct *);
 
+extern int mm_take_all_locks(struct mm_struct *mm);
+extern void mm_drop_all_locks(struct mm_struct *mm);
+
 #ifdef CONFIG_PROC_FS
 /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
 extern void added_exe_file_vma(struct mm_struct *mm);
index 746f975b58ef3a4843b00622dbfa1958f1000074..386edbe2cb4e98b1a01767e15978b02df896d895 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/rbtree.h>
 #include <linux/rwsem.h>
 #include <linux/completion.h>
+#include <linux/cpumask.h>
 #include <asm/page.h>
 #include <asm/mmu.h>
 
@@ -253,6 +254,9 @@ struct mm_struct {
        struct file *exe_file;
        unsigned long num_exe_file_vmas;
 #endif
+#ifdef CONFIG_MMU_NOTIFIER
+       struct mmu_notifier_mm *mmu_notifier_mm;
+#endif
 };
 
 #endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
new file mode 100644 (file)
index 0000000..b77486d
--- /dev/null
@@ -0,0 +1,279 @@
+#ifndef _LINUX_MMU_NOTIFIER_H
+#define _LINUX_MMU_NOTIFIER_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mm_types.h>
+
+struct mmu_notifier;
+struct mmu_notifier_ops;
+
+#ifdef CONFIG_MMU_NOTIFIER
+
+/*
+ * The mmu notifier_mm structure is allocated and installed in
+ * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
+ * critical section and it's released only when mm_count reaches zero
+ * in mmdrop().
+ */
+struct mmu_notifier_mm {
+       /* all mmu notifiers registerd in this mm are queued in this list */
+       struct hlist_head list;
+       /* to serialize the list modifications and hlist_unhashed */
+       spinlock_t lock;
+};
+
+struct mmu_notifier_ops {
+       /*
+        * Called either by mmu_notifier_unregister or when the mm is
+        * being destroyed by exit_mmap, always before all pages are
+        * freed. This can run concurrently with other mmu notifier
+        * methods (the ones invoked outside the mm context) and it
+        * should tear down all secondary mmu mappings and freeze the
+        * secondary mmu. If this method isn't implemented you've to
+        * be sure that nothing could possibly write to the pages
+        * through the secondary mmu by the time the last thread with
+        * tsk->mm == mm exits.
+        *
+        * As side note: the pages freed after ->release returns could
+        * be immediately reallocated by the gart at an alias physical
+        * address with a different cache model, so if ->release isn't
+        * implemented because all _software_ driven memory accesses
+        * through the secondary mmu are terminated by the time the
+        * last thread of this mm quits, you've also to be sure that
+        * speculative _hardware_ operations can't allocate dirty
+        * cachelines in the cpu that could not be snooped and made
+        * coherent with the other read and write operations happening
+        * through the gart alias address, so leading to memory
+        * corruption.
+        */
+       void (*release)(struct mmu_notifier *mn,
+                       struct mm_struct *mm);
+
+       /*
+        * clear_flush_young is called after the VM is
+        * test-and-clearing the young/accessed bitflag in the
+        * pte. This way the VM will provide proper aging to the
+        * accesses to the page through the secondary MMUs and not
+        * only to the ones through the Linux pte.
+        */
+       int (*clear_flush_young)(struct mmu_notifier *mn,
+                                struct mm_struct *mm,
+                                unsigned long address);
+
+       /*
+        * Before this is invoked any secondary MMU is still ok to
+        * read/write to the page previously pointed to by the Linux
+        * pte because the page hasn't been freed yet and it won't be
+        * freed until this returns. If required set_page_dirty has to
+        * be called internally to this method.
+        */
+       void (*invalidate_page)(struct mmu_notifier *mn,
+                               struct mm_struct *mm,
+                               unsigned long address);
+
+       /*
+        * invalidate_range_start() and invalidate_range_end() must be
+        * paired and are called only when the mmap_sem and/or the
+        * locks protecting the reverse maps are held. The subsystem
+        * must guarantee that no additional references are taken to
+        * the pages in the range established between the call to
+        * invalidate_range_start() and the matching call to
+        * invalidate_range_end().
+        *
+        * Invalidation of multiple concurrent ranges may be
+        * optionally permitted by the driver. Either way the
+        * establishment of sptes is forbidden in the range passed to
+        * invalidate_range_begin/end for the whole duration of the
+        * invalidate_range_begin/end critical section.
+        *
+        * invalidate_range_start() is called when all pages in the
+        * range are still mapped and have at least a refcount of one.
+        *
+        * invalidate_range_end() is called when all pages in the
+        * range have been unmapped and the pages have been freed by
+        * the VM.
+        *
+        * The VM will remove the page table entries and potentially
+        * the page between invalidate_range_start() and
+        * invalidate_range_end(). If the page must not be freed
+        * because of pending I/O or other circumstances then the
+        * invalidate_range_start() callback (or the initial mapping
+        * by the driver) must make sure that the refcount is kept
+        * elevated.
+        *
+        * If the driver increases the refcount when the pages are
+        * initially mapped into an address space then either
+        * invalidate_range_start() or invalidate_range_end() may
+        * decrease the refcount. If the refcount is decreased on
+        * invalidate_range_start() then the VM can free pages as page
+        * table entries are removed.  If the refcount is only
+        * droppped on invalidate_range_end() then the driver itself
+        * will drop the last refcount but it must take care to flush
+        * any secondary tlb before doing the final free on the
+        * page. Pages will no longer be referenced by the linux
+        * address space but may still be referenced by sptes until
+        * the last refcount is dropped.
+        */
+       void (*invalidate_range_start)(struct mmu_notifier *mn,
+                                      struct mm_struct *mm,
+                                      unsigned long start, unsigned long end);
+       void (*invalidate_range_end)(struct mmu_notifier *mn,
+                                    struct mm_struct *mm,
+                                    unsigned long start, unsigned long end);
+};
+
+/*
+ * The notifier chains are protected by mmap_sem and/or the reverse map
+ * semaphores. Notifier chains are only changed when all reverse maps and
+ * the mmap_sem locks are taken.
+ *
+ * Therefore notifier chains can only be traversed when either
+ *
+ * 1. mmap_sem is held.
+ * 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock).
+ * 3. No other concurrent thread can access the list (release)
+ */
+struct mmu_notifier {
+       struct hlist_node hlist;
+       const struct mmu_notifier_ops *ops;
+};
+
+static inline int mm_has_notifiers(struct mm_struct *mm)
+{
+       return unlikely(mm->mmu_notifier_mm);
+}
+
+extern int mmu_notifier_register(struct mmu_notifier *mn,
+                                struct mm_struct *mm);
+extern int __mmu_notifier_register(struct mmu_notifier *mn,
+                                  struct mm_struct *mm);
+extern void mmu_notifier_unregister(struct mmu_notifier *mn,
+                                   struct mm_struct *mm);
+extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
+extern void __mmu_notifier_release(struct mm_struct *mm);
+extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
+                                         unsigned long address);
+extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+                                         unsigned long address);
+extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end);
+
+static inline void mmu_notifier_release(struct mm_struct *mm)
+{
+       if (mm_has_notifiers(mm))
+               __mmu_notifier_release(mm);
+}
+
+static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
+                                         unsigned long address)
+{
+       if (mm_has_notifiers(mm))
+               return __mmu_notifier_clear_flush_young(mm, address);
+       return 0;
+}
+
+static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
+                                         unsigned long address)
+{
+       if (mm_has_notifiers(mm))
+               __mmu_notifier_invalidate_page(mm, address);
+}
+
+static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end)
+{
+       if (mm_has_notifiers(mm))
+               __mmu_notifier_invalidate_range_start(mm, start, end);
+}
+
+static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end)
+{
+       if (mm_has_notifiers(mm))
+               __mmu_notifier_invalidate_range_end(mm, start, end);
+}
+
+static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+{
+       mm->mmu_notifier_mm = NULL;
+}
+
+static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+{
+       if (mm_has_notifiers(mm))
+               __mmu_notifier_mm_destroy(mm);
+}
+
+/*
+ * These two macros will sometime replace ptep_clear_flush.
+ * ptep_clear_flush is impleemnted as macro itself, so this also is
+ * implemented as a macro until ptep_clear_flush will converted to an
+ * inline function, to diminish the risk of compilation failure. The
+ * invalidate_page method over time can be moved outside the PT lock
+ * and these two macros can be later removed.
+ */
+#define ptep_clear_flush_notify(__vma, __address, __ptep)              \
+({                                                                     \
+       pte_t __pte;                                                    \
+       struct vm_area_struct *___vma = __vma;                          \
+       unsigned long ___address = __address;                           \
+       __pte = ptep_clear_flush(___vma, ___address, __ptep);           \
+       mmu_notifier_invalidate_page(___vma->vm_mm, ___address);        \
+       __pte;                                                          \
+})
+
+#define ptep_clear_flush_young_notify(__vma, __address, __ptep)                \
+({                                                                     \
+       int __young;                                                    \
+       struct vm_area_struct *___vma = __vma;                          \
+       unsigned long ___address = __address;                           \
+       __young = ptep_clear_flush_young(___vma, ___address, __ptep);   \
+       __young |= mmu_notifier_clear_flush_young(___vma->vm_mm,        \
+                                                 ___address);          \
+       __young;                                                        \
+})
+
+#else /* CONFIG_MMU_NOTIFIER */
+
+static inline void mmu_notifier_release(struct mm_struct *mm)
+{
+}
+
+static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
+                                         unsigned long address)
+{
+       return 0;
+}
+
+static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
+                                         unsigned long address)
+{
+}
+
+static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end)
+{
+}
+
+static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end)
+{
+}
+
+static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+{
+}
+
+static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+{
+}
+
+#define ptep_clear_flush_young_notify ptep_clear_flush_young
+#define ptep_clear_flush_notify ptep_clear_flush
+
+#endif /* CONFIG_MMU_NOTIFIER */
+
+#endif /* _LINUX_MMU_NOTIFIER_H */
index a81d818904221a30119aea1731e85512911aaf5e..a39b38ccdc976e920b077fcd484cabb8cc402ba8 100644 (file)
@@ -20,6 +20,7 @@
  */
 #define        AS_EIO          (__GFP_BITS_SHIFT + 0)  /* IO error on async write */
 #define AS_ENOSPC      (__GFP_BITS_SHIFT + 1)  /* ENOSPC on async write */
+#define AS_MM_ALL_LOCKS        (__GFP_BITS_SHIFT + 2)  /* under mm_take_all_locks() */
 
 static inline void mapping_set_error(struct address_space *mapping, int error)
 {
index b0f39be08b6c31a6d723f107e52cc31a7f3b1f78..eb4443c7e05be213f49596b81b50f209923ab4da 100644 (file)
@@ -97,6 +97,34 @@ static inline void list_del_rcu(struct list_head *entry)
        entry->prev = LIST_POISON2;
 }
 
+/**
+ * hlist_del_init_rcu - deletes entry from hash list with re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: list_unhashed() on the node return true after this. It is
+ * useful for RCU based read lockfree traversal if the writer side
+ * must know if the list entry is still hashed or already unhashed.
+ *
+ * In particular, it means that we can not poison the forward pointers
+ * that may still be used for walking the hash list and we can only
+ * zero the pprev pointer so list_unhashed() will return true after
+ * this.
+ *
+ * The caller must take whatever precautions are necessary (such as
+ * holding appropriate locks) to avoid racing with another
+ * list-mutation primitive, such as hlist_add_head_rcu() or
+ * hlist_del_rcu(), running on this same list.  However, it is
+ * perfectly legal to run concurrently with the _rcu list-traversal
+ * primitives, such as hlist_for_each_entry_rcu().
+ */
+static inline void hlist_del_init_rcu(struct hlist_node *n)
+{
+       if (!hlist_unhashed(n)) {
+               __hlist_del(n);
+               n->pprev = NULL;
+       }
+}
+
 /**
  * list_replace_rcu - replace old entry by new one
  * @old : the element to be replaced
index 1383692ac5bd8c8dc5c06498a38795a256f56c93..69407f85e10b32daa4bd8ac82f88d43e36b82f78 100644 (file)
  */
 struct anon_vma {
        spinlock_t lock;        /* Serialize access to vma list */
+       /*
+        * NOTE: the LSB of the head.next is set by
+        * mm_take_all_locks() _after_ taking the above lock. So the
+        * head must only be read/written after taking the above lock
+        * to be sure to see a valid next pointer. The LSB bit itself
+        * is serialized by a system wide lock only visible to
+        * mm_take_all_locks() (mm_all_locks_mutex).
+        */
        struct list_head head;  /* List of private "related" vmas */
 };
 
index 29510d68338a3ef7bfd80def013f8f15f650155e..e202a68d1cc180cf62ade78b025ec2329c8aabcd 100644 (file)
@@ -455,3 +455,28 @@ out:
 #endif /* CONFIG_PM_SLEEP_SMP */
 
 #endif /* CONFIG_SMP */
+
+/*
+ * cpu_bit_bitmap[] is a special, "compressed" data structure that
+ * represents all NR_CPUS bits binary values of 1<<nr.
+ *
+ * It is used by cpumask_of_cpu() to get a constant address to a CPU
+ * mask value that has a single bit set only.
+ */
+
+/* cpu_bit_bitmap[0] is empty - so we can back into it */
+#define MASK_DECLARE_1(x)      [x+1][0] = 1UL << (x)
+#define MASK_DECLARE_2(x)      MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
+#define MASK_DECLARE_4(x)      MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
+#define MASK_DECLARE_8(x)      MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
+
+const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
+
+       MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
+       MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
+#if BITS_PER_LONG > 32
+       MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
+       MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
+#endif
+};
+EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
index 8214ba7c8bb1ad78e110687ade3459bff8b40073..7ce2ebe847964ecd0701c3c74c18994e3eebcf26 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/key.h>
 #include <linux/binfmts.h>
 #include <linux/mman.h>
+#include <linux/mmu_notifier.h>
 #include <linux/fs.h>
 #include <linux/nsproxy.h>
 #include <linux/capability.h>
@@ -414,6 +415,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
 
        if (likely(!mm_alloc_pgd(mm))) {
                mm->def_flags = 0;
+               mmu_notifier_mm_init(mm);
                return mm;
        }
 
@@ -446,6 +448,7 @@ void __mmdrop(struct mm_struct *mm)
        BUG_ON(mm == &init_mm);
        mm_free_pgd(mm);
        destroy_context(mm);
+       mmu_notifier_mm_destroy(mm);
        free_mm(mm);
 }
 EXPORT_SYMBOL_GPL(__mmdrop);
index bf43284d6855ad083319d653d1b0fbc3235dde08..80c4336f4188409a077d3f4ae4dde727f856de55 100644 (file)
@@ -196,12 +196,10 @@ static int tick_check_new_device(struct clock_event_device *newdev)
        struct tick_device *td;
        int cpu, ret = NOTIFY_OK;
        unsigned long flags;
-       cpumask_of_cpu_ptr_declare(cpumask);
 
        spin_lock_irqsave(&tick_device_lock, flags);
 
        cpu = smp_processor_id();
-       cpumask_of_cpu_ptr_next(cpumask, cpu);
        if (!cpu_isset(cpu, newdev->cpumask))
                goto out_bc;
 
@@ -209,7 +207,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
        curdev = td->evtdev;
 
        /* cpu local device ? */
-       if (!cpus_equal(newdev->cpumask, *cpumask)) {
+       if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) {
 
                /*
                 * If the cpu affinity of the device interrupt can not
@@ -222,7 +220,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
                 * If we have a cpu local device already, do not replace it
                 * by a non cpu local device
                 */
-               if (curdev && cpus_equal(curdev->cpumask, *cpumask))
+               if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu)))
                        goto out_bc;
        }
 
@@ -254,7 +252,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
                curdev = NULL;
        }
        clockevents_exchange_device(curdev, newdev);
-       tick_setup_device(td, newdev, cpu, cpumask);
+       tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu));
        if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
                tick_oneshot_notify();
 
index ce2d723c10e19d49495da09c585d122538bbbf89..bb948e52ce204bc1ec90b6bf6276a70203121de6 100644 (file)
@@ -213,9 +213,7 @@ static void start_stack_timers(void)
        int cpu;
 
        for_each_online_cpu(cpu) {
-               cpumask_of_cpu_ptr(new_mask, cpu);
-
-               set_cpus_allowed_ptr(current, new_mask);
+               set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
                start_stack_timer(cpu);
        }
        set_cpus_allowed_ptr(current, &saved_mask);
index 35136671b215f68222f498e086248fb8df51e64b..26187edcc7ead6c8c14bd69f1bb77cb3ca4640b4 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/module.h>
 
 static DEFINE_SPINLOCK(ratelimit_lock);
-static unsigned long flags;
 
 /*
  * __ratelimit - rate limiting
@@ -26,6 +25,8 @@ static unsigned long flags;
  */
 int __ratelimit(struct ratelimit_state *rs)
 {
+       unsigned long flags;
+
        if (!rs->interval)
                return 1;
 
index c4381d9516f658ff62c6a2fd767310c78d80b7bc..0f8fc22ed1030a200f1fd3c203ca6789c5646ac8 100644 (file)
@@ -11,7 +11,6 @@ notrace unsigned int debug_smp_processor_id(void)
 {
        unsigned long preempt_count = preempt_count();
        int this_cpu = raw_smp_processor_id();
-       cpumask_of_cpu_ptr_declare(this_mask);
 
        if (likely(preempt_count))
                goto out;
@@ -23,9 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
         * Kernel threads bound to a single CPU can safely use
         * smp_processor_id():
         */
-       cpumask_of_cpu_ptr_next(this_mask, this_cpu);
-
-       if (cpus_equal(current->cpus_allowed, *this_mask))
+       if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu)))
                goto out;
 
        /*
index efee5d379df4adb74afba4bbc83118a64f0a9767..446c6588c75398ab91e9413c20363dcd076432da 100644 (file)
@@ -208,3 +208,6 @@ config NR_QUICK
 config VIRT_TO_BUS
        def_bool y
        depends on !ARCH_NO_VIRT_TO_BUS
+
+config MMU_NOTIFIER
+       bool
index 06ca2381fef1bc561eceb16fa91751ca7c7ae79a..da4ccf015aea72ba4d18dc69cf47e7ac97481129 100644 (file)
@@ -25,6 +25,7 @@ obj-$(CONFIG_SHMEM) += shmem.o
 obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o
 obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o
 obj-$(CONFIG_SLOB) += slob.o
+obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
 obj-$(CONFIG_SLAB) += slab.o
 obj-$(CONFIG_SLUB) += slub.o
 obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
index 5de7633e1dbe5c4da6b749f9fbd5bfd9841a382b..42bbc6909ba4852ac41506b175584ba4d00e32eb 100644 (file)
@@ -1023,8 +1023,17 @@ find_page:
                                        ra, filp, page,
                                        index, last_index - index);
                }
-               if (!PageUptodate(page))
-                       goto page_not_up_to_date;
+               if (!PageUptodate(page)) {
+                       if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
+                                       !mapping->a_ops->is_partially_uptodate)
+                               goto page_not_up_to_date;
+                       if (TestSetPageLocked(page))
+                               goto page_not_up_to_date;
+                       if (!mapping->a_ops->is_partially_uptodate(page,
+                                                               desc, offset))
+                               goto page_not_up_to_date_locked;
+                       unlock_page(page);
+               }
 page_ok:
                /*
                 * i_size must be checked after we know the page is Uptodate.
@@ -1094,6 +1103,7 @@ page_not_up_to_date:
                if (lock_page_killable(page))
                        goto readpage_eio;
 
+page_not_up_to_date_locked:
                /* Did it get truncated before we got the lock? */
                if (!page->mapping) {
                        unlock_page(page);
index 98a3f31ccd6ab2070aed454351c22279efa338f5..380ab402d71160a05d82a00598e5e35efeb5f116 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/uio.h>
 #include <linux/rmap.h>
+#include <linux/mmu_notifier.h>
 #include <linux/sched.h>
 #include <asm/tlbflush.h>
 #include <asm/io.h>
@@ -188,7 +189,7 @@ __xip_unmap (struct address_space * mapping,
                if (pte) {
                        /* Nuke the page table entry. */
                        flush_cache_page(vma, address, pte_pfn(*pte));
-                       pteval = ptep_clear_flush(vma, address, pte);
+                       pteval = ptep_clear_flush_notify(vma, address, pte);
                        page_remove_rmap(page, vma);
                        dec_mm_counter(mm, file_rss);
                        BUG_ON(pte_dirty(pteval));
index 07a9c82ce1a3fa0e24da2f2da23e9416e3f1819c..7881638e4a12bd8c5489c8d08b5779b5ae5a0080 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/rmap.h>
 #include <linux/module.h>
 #include <linux/syscalls.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
@@ -214,7 +215,9 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
                spin_unlock(&mapping->i_mmap_lock);
        }
 
+       mmu_notifier_invalidate_range_start(mm, start, start + size);
        err = populate_range(mm, vma, start, size, pgoff);
+       mmu_notifier_invalidate_range_end(mm, start, start + size);
        if (!err && !(flags & MAP_NONBLOCK)) {
                if (unlikely(has_write_lock)) {
                        downgrade_write(&mm->mmap_sem);
index 3be79dc18c5c65a0c3b251d4849e3c895eadefd3..254ce2b901588f48a06f61da5692aa82b93ee0cd 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mm.h>
 #include <linux/sysctl.h>
 #include <linux/highmem.h>
+#include <linux/mmu_notifier.h>
 #include <linux/nodemask.h>
 #include <linux/pagemap.h>
 #include <linux/mempolicy.h>
@@ -19,6 +20,7 @@
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/io.h>
 
 #include <linux/hugetlb.h>
 #include "internal.h"
@@ -1672,6 +1674,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        BUG_ON(start & ~huge_page_mask(h));
        BUG_ON(end & ~huge_page_mask(h));
 
+       mmu_notifier_invalidate_range_start(mm, start, end);
        spin_lock(&mm->page_table_lock);
        for (address = start; address < end; address += sz) {
                ptep = huge_pte_offset(mm, address);
@@ -1713,6 +1716,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        }
        spin_unlock(&mm->page_table_lock);
        flush_tlb_range(vma, start, end);
+       mmu_notifier_invalidate_range_end(mm, start, end);
        list_for_each_entry_safe(page, tmp, &page_list, lru) {
                list_del(&page->lru);
                put_page(page);
index a8ca04faaea62c6d28e9dc908a0615e927d73ddb..67f0ab9077d9472f99387aa66013bd2665d58887 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/init.h>
 #include <linux/writeback.h>
 #include <linux/memcontrol.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
@@ -652,6 +653,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        unsigned long next;
        unsigned long addr = vma->vm_start;
        unsigned long end = vma->vm_end;
+       int ret;
 
        /*
         * Don't copy ptes where a page fault will fill them correctly.
@@ -667,17 +669,33 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        if (is_vm_hugetlb_page(vma))
                return copy_hugetlb_page_range(dst_mm, src_mm, vma);
 
+       /*
+        * We need to invalidate the secondary MMU mappings only when
+        * there could be a permission downgrade on the ptes of the
+        * parent mm. And a permission downgrade will only happen if
+        * is_cow_mapping() returns true.
+        */
+       if (is_cow_mapping(vma->vm_flags))
+               mmu_notifier_invalidate_range_start(src_mm, addr, end);
+
+       ret = 0;
        dst_pgd = pgd_offset(dst_mm, addr);
        src_pgd = pgd_offset(src_mm, addr);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(src_pgd))
                        continue;
-               if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
-                                               vma, addr, next))
-                       return -ENOMEM;
+               if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
+                                           vma, addr, next))) {
+                       ret = -ENOMEM;
+                       break;
+               }
        } while (dst_pgd++, src_pgd++, addr = next, addr != end);
-       return 0;
+
+       if (is_cow_mapping(vma->vm_flags))
+               mmu_notifier_invalidate_range_end(src_mm,
+                                                 vma->vm_start, end);
+       return ret;
 }
 
 static unsigned long zap_pte_range(struct mmu_gather *tlb,
@@ -881,7 +899,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
        unsigned long start = start_addr;
        spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
        int fullmm = (*tlbp)->fullmm;
+       struct mm_struct *mm = vma->vm_mm;
 
+       mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
        for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
                unsigned long end;
 
@@ -946,6 +966,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                }
        }
 out:
+       mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
        return start;   /* which is now the end (or restart) address */
 }
 
@@ -1616,10 +1637,11 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
 {
        pgd_t *pgd;
        unsigned long next;
-       unsigned long end = addr + size;
+       unsigned long start = addr, end = addr + size;
        int err;
 
        BUG_ON(addr >= end);
+       mmu_notifier_invalidate_range_start(mm, start, end);
        pgd = pgd_offset(mm, addr);
        do {
                next = pgd_addr_end(addr, end);
@@ -1627,6 +1649,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
+       mmu_notifier_invalidate_range_end(mm, start, end);
        return err;
 }
 EXPORT_SYMBOL_GPL(apply_to_page_range);
@@ -1839,7 +1862,7 @@ gotten:
                 * seen in the presence of one thread doing SMC and another
                 * thread doing COW.
                 */
-               ptep_clear_flush(vma, address, page_table);
+               ptep_clear_flush_notify(vma, address, page_table);
                set_pte_at(mm, address, page_table, entry);
                update_mmu_cache(vma, address, entry);
                lru_cache_add_active(new_page);
index 5e0cc99e9cd57704a1019dc2c6764d895ad3d756..245c3d69067b7bc4c0b83ba2fa6a08df28bc7f9b 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -26,6 +26,7 @@
 #include <linux/mount.h>
 #include <linux/mempolicy.h>
 #include <linux/rmap.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -2061,6 +2062,7 @@ void exit_mmap(struct mm_struct *mm)
 
        /* mm's last user has gone, and its about to be pulled down */
        arch_exit_mmap(mm);
+       mmu_notifier_release(mm);
 
        lru_add_drain();
        flush_cache_mm(mm);
@@ -2268,3 +2270,161 @@ int install_special_mapping(struct mm_struct *mm,
 
        return 0;
 }
+
+static DEFINE_MUTEX(mm_all_locks_mutex);
+
+static void vm_lock_anon_vma(struct anon_vma *anon_vma)
+{
+       if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
+               /*
+                * The LSB of head.next can't change from under us
+                * because we hold the mm_all_locks_mutex.
+                */
+               spin_lock(&anon_vma->lock);
+               /*
+                * We can safely modify head.next after taking the
+                * anon_vma->lock. If some other vma in this mm shares
+                * the same anon_vma we won't take it again.
+                *
+                * No need of atomic instructions here, head.next
+                * can't change from under us thanks to the
+                * anon_vma->lock.
+                */
+               if (__test_and_set_bit(0, (unsigned long *)
+                                      &anon_vma->head.next))
+                       BUG();
+       }
+}
+
+static void vm_lock_mapping(struct address_space *mapping)
+{
+       if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
+               /*
+                * AS_MM_ALL_LOCKS can't change from under us because
+                * we hold the mm_all_locks_mutex.
+                *
+                * Operations on ->flags have to be atomic because
+                * even if AS_MM_ALL_LOCKS is stable thanks to the
+                * mm_all_locks_mutex, there may be other cpus
+                * changing other bitflags in parallel to us.
+                */
+               if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
+                       BUG();
+               spin_lock(&mapping->i_mmap_lock);
+       }
+}
+
+/*
+ * This operation locks against the VM for all pte/vma/mm related
+ * operations that could ever happen on a certain mm. This includes
+ * vmtruncate, try_to_unmap, and all page faults.
+ *
+ * The caller must take the mmap_sem in write mode before calling
+ * mm_take_all_locks(). The caller isn't allowed to release the
+ * mmap_sem until mm_drop_all_locks() returns.
+ *
+ * mmap_sem in write mode is required in order to block all operations
+ * that could modify pagetables and free pages without need of
+ * altering the vma layout (for example populate_range() with
+ * nonlinear vmas). It's also needed in write mode to avoid new
+ * anon_vmas to be associated with existing vmas.
+ *
+ * A single task can't take more than one mm_take_all_locks() in a row
+ * or it would deadlock.
+ *
+ * The LSB in anon_vma->head.next and the AS_MM_ALL_LOCKS bitflag in
+ * mapping->flags avoid to take the same lock twice, if more than one
+ * vma in this mm is backed by the same anon_vma or address_space.
+ *
+ * We can take all the locks in random order because the VM code
+ * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never
+ * takes more than one of them in a row. Secondly we're protected
+ * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
+ *
+ * mm_take_all_locks() and mm_drop_all_locks are expensive operations
+ * that may have to take thousand of locks.
+ *
+ * mm_take_all_locks() can fail if it's interrupted by signals.
+ */
+int mm_take_all_locks(struct mm_struct *mm)
+{
+       struct vm_area_struct *vma;
+       int ret = -EINTR;
+
+       BUG_ON(down_read_trylock(&mm->mmap_sem));
+
+       mutex_lock(&mm_all_locks_mutex);
+
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               if (signal_pending(current))
+                       goto out_unlock;
+               if (vma->anon_vma)
+                       vm_lock_anon_vma(vma->anon_vma);
+               if (vma->vm_file && vma->vm_file->f_mapping)
+                       vm_lock_mapping(vma->vm_file->f_mapping);
+       }
+       ret = 0;
+
+out_unlock:
+       if (ret)
+               mm_drop_all_locks(mm);
+
+       return ret;
+}
+
+static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
+{
+       if (test_bit(0, (unsigned long *) &anon_vma->head.next)) {
+               /*
+                * The LSB of head.next can't change to 0 from under
+                * us because we hold the mm_all_locks_mutex.
+                *
+                * We must however clear the bitflag before unlocking
+                * the vma so the users using the anon_vma->head will
+                * never see our bitflag.
+                *
+                * No need of atomic instructions here, head.next
+                * can't change from under us until we release the
+                * anon_vma->lock.
+                */
+               if (!__test_and_clear_bit(0, (unsigned long *)
+                                         &anon_vma->head.next))
+                       BUG();
+               spin_unlock(&anon_vma->lock);
+       }
+}
+
+static void vm_unlock_mapping(struct address_space *mapping)
+{
+       if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
+               /*
+                * AS_MM_ALL_LOCKS can't change to 0 from under us
+                * because we hold the mm_all_locks_mutex.
+                */
+               spin_unlock(&mapping->i_mmap_lock);
+               if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
+                                       &mapping->flags))
+                       BUG();
+       }
+}
+
+/*
+ * The mmap_sem cannot be released by the caller until
+ * mm_drop_all_locks() returns.
+ */
+void mm_drop_all_locks(struct mm_struct *mm)
+{
+       struct vm_area_struct *vma;
+
+       BUG_ON(down_read_trylock(&mm->mmap_sem));
+       BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
+
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               if (vma->anon_vma)
+                       vm_unlock_anon_vma(vma->anon_vma);
+               if (vma->vm_file && vma->vm_file->f_mapping)
+                       vm_unlock_mapping(vma->vm_file->f_mapping);
+       }
+
+       mutex_unlock(&mm_all_locks_mutex);
+}
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
new file mode 100644 (file)
index 0000000..5f4ef02
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ *  linux/mm/mmu_notifier.c
+ *
+ *  Copyright (C) 2008  Qumranet, Inc.
+ *  Copyright (C) 2008  SGI
+ *             Christoph Lameter <clameter@sgi.com>
+ *
+ *  This work is licensed under the terms of the GNU GPL, version 2. See
+ *  the COPYING file in the top-level directory.
+ */
+
+#include <linux/rculist.h>
+#include <linux/mmu_notifier.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+
+/*
+ * This function can't run concurrently against mmu_notifier_register
+ * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
+ * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
+ * in parallel despite there being no task using this mm any more,
+ * through the vmas outside of the exit_mmap context, such as with
+ * vmtruncate. This serializes against mmu_notifier_unregister with
+ * the mmu_notifier_mm->lock in addition to RCU and it serializes
+ * against the other mmu notifiers with RCU. struct mmu_notifier_mm
+ * can't go away from under us as exit_mmap holds an mm_count pin
+ * itself.
+ */
+void __mmu_notifier_release(struct mm_struct *mm)
+{
+       struct mmu_notifier *mn;
+
+       spin_lock(&mm->mmu_notifier_mm->lock);
+       while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+               mn = hlist_entry(mm->mmu_notifier_mm->list.first,
+                                struct mmu_notifier,
+                                hlist);
+               /*
+                * We arrived before mmu_notifier_unregister so
+                * mmu_notifier_unregister will do nothing other than
+                * to wait ->release to finish and
+                * mmu_notifier_unregister to return.
+                */
+               hlist_del_init_rcu(&mn->hlist);
+               /*
+                * RCU here will block mmu_notifier_unregister until
+                * ->release returns.
+                */
+               rcu_read_lock();
+               spin_unlock(&mm->mmu_notifier_mm->lock);
+               /*
+                * if ->release runs before mmu_notifier_unregister it
+                * must be handled as it's the only way for the driver
+                * to flush all existing sptes and stop the driver
+                * from establishing any more sptes before all the
+                * pages in the mm are freed.
+                */
+               if (mn->ops->release)
+                       mn->ops->release(mn, mm);
+               rcu_read_unlock();
+               spin_lock(&mm->mmu_notifier_mm->lock);
+       }
+       spin_unlock(&mm->mmu_notifier_mm->lock);
+
+       /*
+        * synchronize_rcu here prevents mmu_notifier_release to
+        * return to exit_mmap (which would proceed freeing all pages
+        * in the mm) until the ->release method returns, if it was
+        * invoked by mmu_notifier_unregister.
+        *
+        * The mmu_notifier_mm can't go away from under us because one
+        * mm_count is hold by exit_mmap.
+        */
+       synchronize_rcu();
+}
+
+/*
+ * If no young bitflag is supported by the hardware, ->clear_flush_young can
+ * unmap the address and return 1 or 0 depending if the mapping previously
+ * existed or not.
+ */
+int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
+                                       unsigned long address)
+{
+       struct mmu_notifier *mn;
+       struct hlist_node *n;
+       int young = 0;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+               if (mn->ops->clear_flush_young)
+                       young |= mn->ops->clear_flush_young(mn, mm, address);
+       }
+       rcu_read_unlock();
+
+       return young;
+}
+
+void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+                                         unsigned long address)
+{
+       struct mmu_notifier *mn;
+       struct hlist_node *n;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+               if (mn->ops->invalidate_page)
+                       mn->ops->invalidate_page(mn, mm, address);
+       }
+       rcu_read_unlock();
+}
+
+void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end)
+{
+       struct mmu_notifier *mn;
+       struct hlist_node *n;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+               if (mn->ops->invalidate_range_start)
+                       mn->ops->invalidate_range_start(mn, mm, start, end);
+       }
+       rcu_read_unlock();
+}
+
+void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end)
+{
+       struct mmu_notifier *mn;
+       struct hlist_node *n;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+               if (mn->ops->invalidate_range_end)
+                       mn->ops->invalidate_range_end(mn, mm, start, end);
+       }
+       rcu_read_unlock();
+}
+
+static int do_mmu_notifier_register(struct mmu_notifier *mn,
+                                   struct mm_struct *mm,
+                                   int take_mmap_sem)
+{
+       struct mmu_notifier_mm *mmu_notifier_mm;
+       int ret;
+
+       BUG_ON(atomic_read(&mm->mm_users) <= 0);
+
+       ret = -ENOMEM;
+       mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
+       if (unlikely(!mmu_notifier_mm))
+               goto out;
+
+       if (take_mmap_sem)
+               down_write(&mm->mmap_sem);
+       ret = mm_take_all_locks(mm);
+       if (unlikely(ret))
+               goto out_cleanup;
+
+       if (!mm_has_notifiers(mm)) {
+               INIT_HLIST_HEAD(&mmu_notifier_mm->list);
+               spin_lock_init(&mmu_notifier_mm->lock);
+               mm->mmu_notifier_mm = mmu_notifier_mm;
+               mmu_notifier_mm = NULL;
+       }
+       atomic_inc(&mm->mm_count);
+
+       /*
+        * Serialize the update against mmu_notifier_unregister. A
+        * side note: mmu_notifier_release can't run concurrently with
+        * us because we hold the mm_users pin (either implicitly as
+        * current->mm or explicitly with get_task_mm() or similar).
+        * We can't race against any other mmu notifier method either
+        * thanks to mm_take_all_locks().
+        */
+       spin_lock(&mm->mmu_notifier_mm->lock);
+       hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
+       spin_unlock(&mm->mmu_notifier_mm->lock);
+
+       mm_drop_all_locks(mm);
+out_cleanup:
+       if (take_mmap_sem)
+               up_write(&mm->mmap_sem);
+       /* kfree() does nothing if mmu_notifier_mm is NULL */
+       kfree(mmu_notifier_mm);
+out:
+       BUG_ON(atomic_read(&mm->mm_users) <= 0);
+       return ret;
+}
+
+/*
+ * Must not hold mmap_sem nor any other VM related lock when calling
+ * this registration function. Must also ensure mm_users can't go down
+ * to zero while this runs to avoid races with mmu_notifier_release,
+ * so mm has to be current->mm or the mm should be pinned safely such
+ * as with get_task_mm(). If the mm is not current->mm, the mm_users
+ * pin should be released by calling mmput after mmu_notifier_register
+ * returns. mmu_notifier_unregister must be always called to
+ * unregister the notifier. mm_count is automatically pinned to allow
+ * mmu_notifier_unregister to safely run at any time later, before or
+ * after exit_mmap. ->release will always be called before exit_mmap
+ * frees the pages.
+ */
+int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+       return do_mmu_notifier_register(mn, mm, 1);
+}
+EXPORT_SYMBOL_GPL(mmu_notifier_register);
+
+/*
+ * Same as mmu_notifier_register but here the caller must hold the
+ * mmap_sem in write mode.
+ */
+int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+       return do_mmu_notifier_register(mn, mm, 0);
+}
+EXPORT_SYMBOL_GPL(__mmu_notifier_register);
+
+/* this is called after the last mmu_notifier_unregister() returned */
+void __mmu_notifier_mm_destroy(struct mm_struct *mm)
+{
+       BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
+       kfree(mm->mmu_notifier_mm);
+       mm->mmu_notifier_mm = LIST_POISON1; /* debug */
+}
+
+/*
+ * This releases the mm_count pin automatically and frees the mm
+ * structure if it was the last user of it. It serializes against
+ * running mmu notifiers with RCU and against mmu_notifier_unregister
+ * with the unregister lock + RCU. All sptes must be dropped before
+ * calling mmu_notifier_unregister. ->release or any other notifier
+ * method may be invoked concurrently with mmu_notifier_unregister,
+ * and only after mmu_notifier_unregister returned we're guaranteed
+ * that ->release or any other method can't run anymore.
+ */
+void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+{
+       BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
+       spin_lock(&mm->mmu_notifier_mm->lock);
+       if (!hlist_unhashed(&mn->hlist)) {
+               hlist_del_rcu(&mn->hlist);
+
+               /*
+                * RCU here will force exit_mmap to wait ->release to finish
+                * before freeing the pages.
+                */
+               rcu_read_lock();
+               spin_unlock(&mm->mmu_notifier_mm->lock);
+               /*
+                * exit_mmap will block in mmu_notifier_release to
+                * guarantee ->release is called before freeing the
+                * pages.
+                */
+               if (mn->ops->release)
+                       mn->ops->release(mn, mm);
+               rcu_read_unlock();
+       } else
+               spin_unlock(&mm->mmu_notifier_mm->lock);
+
+       /*
+        * Wait any running method to finish, of course including
+        * ->release if it was run by mmu_notifier_relase instead of us.
+        */
+       synchronize_rcu();
+
+       BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
+       mmdrop(mm);
+}
+EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
index abd645a3b0a021bb34d5de49cff94efae109966c..fded06f923f4b939d75f745225b38b8e41e8ee08 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/syscalls.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
+#include <linux/mmu_notifier.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
@@ -203,10 +204,12 @@ success:
                dirty_accountable = 1;
        }
 
+       mmu_notifier_invalidate_range_start(mm, start, end);
        if (is_vm_hugetlb_page(vma))
                hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
        else
                change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
+       mmu_notifier_invalidate_range_end(mm, start, end);
        vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
        vm_stat_account(mm, newflags, vma->vm_file, nrpages);
        return 0;
index 08e3c7f2bd15a1b2e502aeb3e9f509148cb5dfa8..1a7743923c8c42c536d79ecb64f45457b2a3f994 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/highmem.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -74,7 +75,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        struct mm_struct *mm = vma->vm_mm;
        pte_t *old_pte, *new_pte, pte;
        spinlock_t *old_ptl, *new_ptl;
+       unsigned long old_start;
 
+       old_start = old_addr;
+       mmu_notifier_invalidate_range_start(vma->vm_mm,
+                                           old_start, old_end);
        if (vma->vm_file) {
                /*
                 * Subtle point from Rajesh Venkatasubramanian: before
@@ -116,6 +121,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        pte_unmap_unlock(old_pte - 1, old_ptl);
        if (mapping)
                spin_unlock(&mapping->i_mmap_lock);
+       mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
 }
 
 #define LATENCY_LIMIT  (64 * PAGE_SIZE)
index 39ae5a9bf382615120e14e3fbe4efd3bc7cda153..99bc3f9cd796f34a58ec1d61798c27c583a0eaf3 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -49,6 +49,7 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/memcontrol.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/tlbflush.h>
 
@@ -287,7 +288,7 @@ static int page_referenced_one(struct page *page,
        if (vma->vm_flags & VM_LOCKED) {
                referenced++;
                *mapcount = 1;  /* break early from loop */
-       } else if (ptep_clear_flush_young(vma, address, pte))
+       } else if (ptep_clear_flush_young_notify(vma, address, pte))
                referenced++;
 
        /* Pretend the page is referenced if the task has the
@@ -457,7 +458,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
                pte_t entry;
 
                flush_cache_page(vma, address, pte_pfn(*pte));
-               entry = ptep_clear_flush(vma, address, pte);
+               entry = ptep_clear_flush_notify(vma, address, pte);
                entry = pte_wrprotect(entry);
                entry = pte_mkclean(entry);
                set_pte_at(mm, address, pte, entry);
@@ -705,14 +706,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
         * skipped over this mm) then we should reactivate it.
         */
        if (!migration && ((vma->vm_flags & VM_LOCKED) ||
-                       (ptep_clear_flush_young(vma, address, pte)))) {
+                       (ptep_clear_flush_young_notify(vma, address, pte)))) {
                ret = SWAP_FAIL;
                goto out_unmap;
        }
 
        /* Nuke the page table entry. */
        flush_cache_page(vma, address, page_to_pfn(page));
-       pteval = ptep_clear_flush(vma, address, pte);
+       pteval = ptep_clear_flush_notify(vma, address, pte);
 
        /* Move the dirty bit to the physical page now the pte is gone. */
        if (pte_dirty(pteval))
@@ -837,12 +838,12 @@ static void try_to_unmap_cluster(unsigned long cursor,
                page = vm_normal_page(vma, address, *pte);
                BUG_ON(!page || PageAnon(page));
 
-               if (ptep_clear_flush_young(vma, address, pte))
+               if (ptep_clear_flush_young_notify(vma, address, pte))
                        continue;
 
                /* Nuke the page table entry. */
                flush_cache_page(vma, address, pte_pfn(*pte));
-               pteval = ptep_clear_flush(vma, address, pte);
+               pteval = ptep_clear_flush_notify(vma, address, pte);
 
                /* If nonlinear, store the file page offset in the pte. */
                if (page->index != linear_page_index(vma, address))
index 952d361774bb6d723477bd75f001a543dde04f92..c1e5a3b4f7583087d4f2558f96b1992bc2508696 100644 (file)
@@ -1513,7 +1513,6 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
                inode->i_uid = current->fsuid;
                inode->i_gid = current->fsgid;
                inode->i_blocks = 0;
-               inode->i_mapping->a_ops = &shmem_aops;
                inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
                inode->i_generation = get_seconds();
@@ -1528,6 +1527,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
                        init_special_inode(inode, mode, dev);
                        break;
                case S_IFREG:
+                       inode->i_mapping->a_ops = &shmem_aops;
                        inode->i_op = &shmem_inode_operations;
                        inode->i_fop = &shmem_file_operations;
                        mpol_shared_policy_init(&info->policy,
@@ -1929,6 +1929,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
                        return error;
                }
                unlock_page(page);
+               inode->i_mapping->a_ops = &shmem_aops;
                inode->i_op = &shmem_symlink_inode_operations;
                kaddr = kmap_atomic(page, KM_USER0);
                memcpy(kaddr, symname, len);
index 835d274130838361212a8ffcbae3d4767af0cf57..5a32cb7c4bb486267a03d15892adc7ce5db93c93 100644 (file)
@@ -310,8 +310,7 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
        switch (m->mode) {
        case SVC_POOL_PERCPU:
        {
-               cpumask_of_cpu_ptr(cpumask, node);
-               set_cpus_allowed_ptr(task, cpumask);
+               set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
                break;
        }
        case SVC_POOL_PERNODE: