]> err.no Git - linux-2.6/commitdiff
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 26 Sep 2007 16:08:04 +0000 (09:08 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 26 Sep 2007 16:08:04 +0000 (09:08 -0700)
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6:
  [SCSI] esp: fix instance numbering.

59 files changed:
Documentation/crypto/async-tx-api.txt [new file with mode: 0644]
Makefile
arch/arm/mach-ep93xx/core.c
arch/arm/mm/cache-l2x0.c
arch/mips/kernel/i8259.c
arch/mips/kernel/irq-msc01.c
arch/mips/kernel/irq.c
arch/mips/kernel/smtc.c
arch/powerpc/kernel/process.c
arch/powerpc/platforms/cell/spufs/file.c
crypto/async_tx/async_tx.c
drivers/acpi/processor_core.c
drivers/acpi/processor_idle.c
drivers/acpi/sleep/Makefile
drivers/acpi/sleep/main.c
drivers/acpi/sleep/poweroff.c [deleted file]
drivers/acpi/video.c
drivers/ata/pata_sis.c
drivers/ata/sata_sil24.c
drivers/cdrom/cdrom.c
drivers/char/hpet.c
drivers/char/mspec.c
drivers/infiniband/hw/mlx4/qp.c
drivers/input/mouse/appletouch.c
drivers/kvm/Kconfig
drivers/lguest/lguest_asm.S
drivers/md/raid5.c
drivers/net/pcmcia/3c589_cs.c
drivers/net/ppp_mppe.c
drivers/net/r8169.c
drivers/net/sky2.c
drivers/net/sky2.h
drivers/power/power_supply_sysfs.c
drivers/serial/sunsab.c
drivers/w1/w1.c
fs/compat_ioctl.c
fs/ufs/super.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_log_recover.c
include/acpi/acpi_drivers.h
include/acpi/processor.h
include/asm-mips/irq.h
include/net/sctp/sm.h
include/net/sctp/structs.h
kernel/time/tick-broadcast.c
lib/Kconfig.debug
net/ieee80211/softmac/ieee80211softmac_assoc.c
net/ieee80211/softmac/ieee80211softmac_wx.c
net/mac80211/ieee80211.c
net/mac80211/rc80211_simple.c
net/mac80211/wme.c
net/sctp/input.c
net/sctp/inqueue.c
net/sctp/sm_make_chunk.c
net/sctp/sm_statefuns.c
net/sctp/sm_statetable.c
net/wireless/core.c
net/wireless/sysfs.c
sound/core/memalloc.c

diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt
new file mode 100644 (file)
index 0000000..c1e9545
--- /dev/null
@@ -0,0 +1,219 @@
+                Asynchronous Transfers/Transforms API
+
+1 INTRODUCTION
+
+2 GENEALOGY
+
+3 USAGE
+3.1 General format of the API
+3.2 Supported operations
+3.3 Descriptor management
+3.4 When does the operation execute?
+3.5 When does the operation complete?
+3.6 Constraints
+3.7 Example
+
+4 DRIVER DEVELOPER NOTES
+4.1 Conformance points
+4.2 "My application needs finer control of hardware channels"
+
+5 SOURCE
+
+---
+
+1 INTRODUCTION
+
+The async_tx API provides methods for describing a chain of asynchronous
+bulk memory transfers/transforms with support for inter-transactional
+dependencies.  It is implemented as a dmaengine client that smooths over
+the details of different hardware offload engine implementations.  Code
+that is written to the API can optimize for asynchronous operation and
+the API will fit the chain of operations to the available offload
+resources.
+
+2 GENEALOGY
+
+The API was initially designed to offload the memory copy and
+xor-parity-calculations of the md-raid5 driver using the offload engines
+present in the Intel(R) Xscale series of I/O processors.  It also built
+on the 'dmaengine' layer developed for offloading memory copies in the
+network stack using Intel(R) I/OAT engines.  The following design
+features surfaced as a result:
+1/ implicit synchronous path: users of the API do not need to know if
+   the platform they are running on has offload capabilities.  The
+   operation will be offloaded when an engine is available and carried out
+   in software otherwise.
+2/ cross channel dependency chains: the API allows a chain of dependent
+   operations to be submitted, like xor->copy->xor in the raid5 case.  The
+   API automatically handles cases where the transition from one operation
+   to another implies a hardware channel switch.
+3/ dmaengine extensions to support multiple clients and operation types
+   beyond 'memcpy'
+
+3 USAGE
+
+3.1 General format of the API:
+struct dma_async_tx_descriptor *
+async_<operation>(<op specific parameters>,
+                 enum async_tx_flags flags,
+                 struct dma_async_tx_descriptor *dependency,
+                 dma_async_tx_callback callback_routine,
+                 void *callback_parameter);
+
+3.2 Supported operations:
+memcpy       - memory copy between a source and a destination buffer
+memset       - fill a destination buffer with a byte value
+xor          - xor a series of source buffers and write the result to a
+              destination buffer
+xor_zero_sum - xor a series of source buffers and set a flag if the
+              result is zero.  The implementation attempts to prevent
+              writes to memory
+
+3.3 Descriptor management:
+The return value is non-NULL and points to a 'descriptor' when the operation
+has been queued to execute asynchronously.  Descriptors are recycled
+resources, under control of the offload engine driver, to be reused as
+operations complete.  When an application needs to submit a chain of
+operations it must guarantee that the descriptor is not automatically recycled
+before the dependency is submitted.  This requires that all descriptors be
+acknowledged by the application before the offload engine driver is allowed to
+recycle (or free) the descriptor.  A descriptor can be acked by one of the
+following methods:
+1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted
+2/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent
+   descriptor of a new operation.
+3/ calling async_tx_ack() on the descriptor.
+
+3.4 When does the operation execute?
+Operations do not immediately issue after return from the
+async_<operation> call.  Offload engine drivers batch operations to
+improve performance by reducing the number of mmio cycles needed to
+manage the channel.  Once a driver-specific threshold is met the driver
+automatically issues pending operations.  An application can force this
+event by calling async_tx_issue_pending_all().  This operates on all
+channels since the application has no knowledge of channel to operation
+mapping.
+
+3.5 When does the operation complete?
+There are two methods for an application to learn about the completion
+of an operation.
+1/ Call dma_wait_for_async_tx().  This call causes the CPU to spin while
+   it polls for the completion of the operation.  It handles dependency
+   chains and issuing pending operations.
+2/ Specify a completion callback.  The callback routine runs in tasklet
+   context if the offload engine driver supports interrupts, or it is
+   called in application context if the operation is carried out
+   synchronously in software.  The callback can be set in the call to
+   async_<operation>, or when the application needs to submit a chain of
+   unknown length it can use the async_trigger_callback() routine to set a
+   completion interrupt/callback at the end of the chain.
+
+3.6 Constraints:
+1/ Calls to async_<operation> are not permitted in IRQ context.  Other
+   contexts are permitted provided constraint #2 is not violated.
+2/ Completion callback routines cannot submit new operations.  This
+   results in recursion in the synchronous case and spin_locks being
+   acquired twice in the asynchronous case.
+
+3.7 Example:
+Perform a xor->copy->xor operation where each operation depends on the
+result from the previous operation:
+
+void complete_xor_copy_xor(void *param)
+{
+       printk("complete\n");
+}
+
+int run_xor_copy_xor(struct page **xor_srcs,
+                    int xor_src_cnt,
+                    struct page *xor_dest,
+                    size_t xor_len,
+                    struct page *copy_src,
+                    struct page *copy_dest,
+                    size_t copy_len)
+{
+       struct dma_async_tx_descriptor *tx;
+
+       tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len,
+                      ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL);
+       tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len,
+                         ASYNC_TX_DEP_ACK, tx, NULL, NULL);
+       tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len,
+                      ASYNC_TX_XOR_DROP_DST | ASYNC_TX_DEP_ACK | ASYNC_TX_ACK,
+                      tx, complete_xor_copy_xor, NULL);
+
+       async_tx_issue_pending_all();
+}
+
+See include/linux/async_tx.h for more information on the flags.  See the
+ops_run_* and ops_complete_* routines in drivers/md/raid5.c for more
+implementation examples.
+
+4 DRIVER DEVELOPMENT NOTES
+4.1 Conformance points:
+There are a few conformance points required in dmaengine drivers to
+accommodate assumptions made by applications using the async_tx API:
+1/ Completion callbacks are expected to happen in tasklet context
+2/ dma_async_tx_descriptor fields are never manipulated in IRQ context
+3/ Use async_tx_run_dependencies() in the descriptor clean up path to
+   handle submission of dependent operations
+
+4.2 "My application needs finer control of hardware channels"
+This requirement seems to arise from cases where a DMA engine driver is
+trying to support device-to-memory DMA.  The dmaengine and async_tx
+implementations were designed for offloading memory-to-memory
+operations; however, there are some capabilities of the dmaengine layer
+that can be used for platform-specific channel management.
+Platform-specific constraints can be handled by registering the
+application as a 'dma_client' and implementing a 'dma_event_callback' to
+apply a filter to the available channels in the system.  Before showing
+how to implement a custom dma_event callback some background of
+dmaengine's client support is required.
+
+The following routines in dmaengine support multiple clients requesting
+use of a channel:
+- dma_async_client_register(struct dma_client *client)
+- dma_async_client_chan_request(struct dma_client *client)
+
+dma_async_client_register takes a pointer to an initialized dma_client
+structure.  It expects that the 'event_callback' and 'cap_mask' fields
+are already initialized.
+
+dma_async_client_chan_request triggers dmaengine to notify the client of
+all channels that satisfy the capability mask.  It is up to the client's
+event_callback routine to track how many channels the client needs and
+how many it is currently using.  The dma_event_callback routine returns a
+dma_state_client code to let dmaengine know the status of the
+allocation.
+
+Below is the example of how to extend this functionality for
+platform-specific filtering of the available channels beyond the
+standard capability mask:
+
+static enum dma_state_client
+my_dma_client_callback(struct dma_client *client,
+                       struct dma_chan *chan, enum dma_state state)
+{
+       struct dma_device *dma_dev;
+       struct my_platform_specific_dma *plat_dma_dev;
+       
+       dma_dev = chan->device;
+       plat_dma_dev = container_of(dma_dev,
+                                   struct my_platform_specific_dma,
+                                   dma_dev);
+
+       if (!plat_dma_dev->platform_specific_capability)
+               return DMA_DUP;
+
+       . . .
+}
+
+5 SOURCE
+include/linux/dmaengine.h: core header file for DMA drivers and clients
+drivers/dma/dmaengine.c: offload engine channel management routines
+drivers/dma/: location for offload engine drivers
+include/linux/async_tx.h: core header file for the async_tx api
+crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code
+crypto/async_tx/async_memcpy.c: copy offload
+crypto/async_tx/async_memset.c: memory fill offload
+crypto/async_tx/async_xor.c: xor and xor zero sum offload
index c265e41ec55a48b9966a5d65845b930b3185dcd0..4dac25301d5f068eab6a8ee6f674fb23e6e41aa9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 23
-EXTRAVERSION =-rc7
+EXTRAVERSION =-rc8
 NAME = Arr Matey! A Hairy Bilge Rat!
 
 # *DOCUMENTATION*
index 851cc7158ca305bbaa1bc707979cd276a93807f7..70b2c78011102a7dea24b05f99bd97473942f1d0 100644 (file)
@@ -336,7 +336,7 @@ static int ep93xx_gpio_irq_type(unsigned int irq, unsigned int type)
        if (line >= 0 && line < 16) {
                gpio_line_config(line, GPIO_IN);
        } else {
-               gpio_line_config(EP93XX_GPIO_LINE_F(line), GPIO_IN);
+               gpio_line_config(EP93XX_GPIO_LINE_F(line-16), GPIO_IN);
        }
 
        port = line >> 3;
index b4e9b734e0bd939cddcdc392889368fe43f30846..76b800a951917d96b7aad433c344562f484743df 100644 (file)
@@ -57,7 +57,17 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
 {
        unsigned long addr;
 
-       start &= ~(CACHE_LINE_SIZE - 1);
+       if (start & (CACHE_LINE_SIZE - 1)) {
+               start &= ~(CACHE_LINE_SIZE - 1);
+               sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1);
+               start += CACHE_LINE_SIZE;
+       }
+
+       if (end & (CACHE_LINE_SIZE - 1)) {
+               end &= ~(CACHE_LINE_SIZE - 1);
+               sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1);
+       }
+
        for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
                sync_writel(addr, L2X0_INV_LINE_PA, 1);
        cache_sync();
index b6c30800c66774170f719bba39948613a7a118c3..3a2d255361bcc364031652eb02261a57994bd0db 100644 (file)
@@ -177,10 +177,7 @@ handle_real_irq:
                outb(cached_master_mask, PIC_MASTER_IMR);
                outb(0x60+irq,PIC_MASTER_CMD);  /* 'Specific EOI to master */
        }
-#ifdef CONFIG_MIPS_MT_SMTC
-       if (irq_hwmask[irq] & ST0_IM)
-               set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+       smtc_im_ack_irq(irq);
        spin_unlock_irqrestore(&i8259A_lock, flags);
        return;
 
index 410868b5ea5f499df73f497f104edb33405cbb02..1ecdd50bfc60333a82d2b60fba9181967a1ae277 100644 (file)
@@ -52,11 +52,8 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
        mask_msc_irq(irq);
        if (!cpu_has_veic)
                MSCIC_WRITE(MSC01_IC_EOI, 0);
-#ifdef CONFIG_MIPS_MT_SMTC
        /* This actually needs to be a call into platform code */
-       if (irq_hwmask[irq] & ST0_IM)
-               set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+       smtc_im_ack_irq(irq);
 }
 
 /*
@@ -73,10 +70,7 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
                MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
                MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
        }
-#ifdef CONFIG_MIPS_MT_SMTC
-       if (irq_hwmask[irq] & ST0_IM)
-               set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+       smtc_im_ack_irq(irq);
 }
 
 /*
index aeded6c17de52af0ed308cacd953aa3936363f14..a990aad2f0492746375f06ee6f932a2643b4597a 100644 (file)
@@ -74,20 +74,12 @@ EXPORT_SYMBOL_GPL(free_irqno);
  */
 void ack_bad_irq(unsigned int irq)
 {
+       smtc_im_ack_irq(irq);
        printk("unexpected IRQ # %d\n", irq);
 }
 
 atomic_t irq_err_count;
 
-#ifdef CONFIG_MIPS_MT_SMTC
-/*
- * SMTC Kernel needs to manipulate low-level CPU interrupt mask
- * in do_IRQ. These are passed in setup_irq_smtc() and stored
- * in this table.
- */
-unsigned long irq_hwmask[NR_IRQS];
-#endif /* CONFIG_MIPS_MT_SMTC */
-
 /*
  * Generic, controller-independent functions:
  */
index 43826c16101d15ee3f556c67e055749cfb7fcce4..f09404377ef18397840c6dba44f4cd00449b8493 100644 (file)
 #include <asm/smtc_proc.h>
 
 /*
- * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
+ * SMTC Kernel needs to manipulate low-level CPU interrupt mask
+ * in do_IRQ. These are passed in setup_irq_smtc() and stored
+ * in this table.
  */
+unsigned long irq_hwmask[NR_IRQS];
 
 #define LOCK_MT_PRA() \
        local_irq_save(flags); \
index e477c9d0498bed92efd0ebad78db4db4187afd9f..8a1b001d0b110fa5a31a3fb53ed215312680f04c 100644 (file)
@@ -605,6 +605,13 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
        regs->ccr = 0;
        regs->gpr[1] = sp;
 
+       /*
+        * We have just cleared all the nonvolatile GPRs, so make
+        * FULL_REGS(regs) return true.  This is necessary to allow
+        * ptrace to examine the thread immediately after exec.
+        */
+       regs->trap &= ~1UL;
+
 #ifdef CONFIG_PPC32
        regs->mq = 0;
        regs->nip = start;
index 4100ddc52f0227fb0fcb300fb50da78bffa9b9d3..7de4e919687b48b717bdb6001f30c088cfe49997 100644 (file)
@@ -2177,8 +2177,8 @@ struct tree_descr spufs_dir_contents[] = {
        { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
        { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
        { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
-       { "signal1", &spufs_signal1_nosched_fops, 0222, },
-       { "signal2", &spufs_signal2_nosched_fops, 0222, },
+       { "signal1", &spufs_signal1_fops, 0666, },
+       { "signal2", &spufs_signal2_fops, 0666, },
        { "signal1_type", &spufs_signal1_type, 0666, },
        { "signal2_type", &spufs_signal2_type, 0666, },
        { "cntl", &spufs_cntl_fops,  0666, },
index 035007145e780d2cedd3e38cbb1a34e39345d3c2..bc18cbb8ea79562ce05401a5734d8310cf2ea938 100644 (file)
@@ -80,6 +80,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
 {
        enum dma_status status;
        struct dma_async_tx_descriptor *iter;
+       struct dma_async_tx_descriptor *parent;
 
        if (!tx)
                return DMA_SUCCESS;
@@ -87,8 +88,15 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
        /* poll through the dependency chain, return when tx is complete */
        do {
                iter = tx;
-               while (iter->cookie == -EBUSY)
-                       iter = iter->parent;
+
+               /* find the root of the unsubmitted dependency chain */
+               while (iter->cookie == -EBUSY) {
+                       parent = iter->parent;
+                       if (parent && parent->cookie == -EBUSY)
+                               iter = iter->parent;
+                       else
+                               break;
+               }
 
                status = dma_sync_wait(iter->chan, iter->cookie);
        } while (status == DMA_IN_PROGRESS || (iter != tx));
index 2afb3d2086b3d03b3e4858caffba20f6f456c90f..9f11dc296cdd7bc29f5545cd8524eb03ede594e3 100644 (file)
@@ -102,6 +102,8 @@ static struct acpi_driver acpi_processor_driver = {
                .add = acpi_processor_add,
                .remove = acpi_processor_remove,
                .start = acpi_processor_start,
+               .suspend = acpi_processor_suspend,
+               .resume = acpi_processor_resume,
                },
 };
 
index d9b8af763e1ec59e64671655fda51e0601b55da9..f18261368e76e87059606d8d8dfc4e753266b99a 100644 (file)
@@ -325,6 +325,23 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr,
 
 #endif
 
+/*
+ * Suspend / resume control
+ */
+static int acpi_idle_suspend;
+
+int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
+{
+       acpi_idle_suspend = 1;
+       return 0;
+}
+
+int acpi_processor_resume(struct acpi_device * device)
+{
+       acpi_idle_suspend = 0;
+       return 0;
+}
+
 static void acpi_processor_idle(void)
 {
        struct acpi_processor *pr = NULL;
@@ -355,7 +372,7 @@ static void acpi_processor_idle(void)
        }
 
        cx = pr->power.state;
-       if (!cx) {
+       if (!cx || acpi_idle_suspend) {
                if (pm_idle_save)
                        pm_idle_save();
                else
index 195a4f69c0f773d4ba73119d3efd0512c9d10454..f1fb888c2d293752448d7124e8308740afc7d534 100644 (file)
@@ -1,5 +1,5 @@
-obj-y                                  := poweroff.o wakeup.o
-obj-$(CONFIG_ACPI_SLEEP)               += main.o
+obj-y                                  := wakeup.o
+obj-y                                  += main.o
 obj-$(CONFIG_ACPI_SLEEP)               += proc.o
 
 EXTRA_CFLAGS += $(ACPI_CFLAGS)
index c52ade816fb494768000d11cfb0b277c37483071..2cbb9aabd00eba4c8eeb395deb922dcc36fb6980 100644 (file)
 #include <linux/dmi.h>
 #include <linux/device.h>
 #include <linux/suspend.h>
+
+#include <asm/io.h>
+
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 #include "sleep.h"
 
 u8 sleep_states[ACPI_S_STATE_COUNT];
 
+#ifdef CONFIG_PM_SLEEP
 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+#endif
+
+int acpi_sleep_prepare(u32 acpi_state)
+{
+#ifdef CONFIG_ACPI_SLEEP
+       /* do we have a wakeup address for S2 and S3? */
+       if (acpi_state == ACPI_STATE_S3) {
+               if (!acpi_wakeup_address) {
+                       return -EFAULT;
+               }
+               acpi_set_firmware_waking_vector((acpi_physical_address)
+                                               virt_to_phys((void *)
+                                                            acpi_wakeup_address));
+
+       }
+       ACPI_FLUSH_CPU_CACHE();
+       acpi_enable_wakeup_device_prep(acpi_state);
+#endif
+       acpi_gpe_sleep_prepare(acpi_state);
+       acpi_enter_sleep_state_prep(acpi_state);
+       return 0;
+}
 
 #ifdef CONFIG_SUSPEND
 static struct pm_ops acpi_pm_ops;
@@ -275,6 +301,7 @@ int acpi_suspend(u32 acpi_state)
        return -EINVAL;
 }
 
+#ifdef CONFIG_PM_SLEEP
 /**
  *     acpi_pm_device_sleep_state - return preferred power state of ACPI device
  *             in the system sleep state given by %acpi_target_sleep_state
@@ -349,6 +376,21 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p)
                *d_min_p = d_min;
        return d_max;
 }
+#endif
+
+static void acpi_power_off_prepare(void)
+{
+       /* Prepare to power off the system */
+       acpi_sleep_prepare(ACPI_STATE_S5);
+}
+
+static void acpi_power_off(void)
+{
+       /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
+       printk("%s called\n", __FUNCTION__);
+       local_irq_disable();
+       acpi_enter_sleep_state(ACPI_STATE_S5);
+}
 
 int __init acpi_sleep_init(void)
 {
@@ -363,16 +405,17 @@ int __init acpi_sleep_init(void)
        if (acpi_disabled)
                return 0;
 
+       sleep_states[ACPI_STATE_S0] = 1;
+       printk(KERN_INFO PREFIX "(supports S0");
+
 #ifdef CONFIG_SUSPEND
-       printk(KERN_INFO PREFIX "(supports");
-       for (i = ACPI_STATE_S0; i < ACPI_STATE_S4; i++) {
+       for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
                status = acpi_get_sleep_type_data(i, &type_a, &type_b);
                if (ACPI_SUCCESS(status)) {
                        sleep_states[i] = 1;
                        printk(" S%d", i);
                }
        }
-       printk(")\n");
 
        pm_set_ops(&acpi_pm_ops);
 #endif
@@ -382,10 +425,16 @@ int __init acpi_sleep_init(void)
        if (ACPI_SUCCESS(status)) {
                hibernation_set_ops(&acpi_hibernation_ops);
                sleep_states[ACPI_STATE_S4] = 1;
+               printk(" S4");
        }
-#else
-       sleep_states[ACPI_STATE_S4] = 0;
 #endif
-
+       status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
+       if (ACPI_SUCCESS(status)) {
+               sleep_states[ACPI_STATE_S5] = 1;
+               printk(" S5");
+               pm_power_off_prepare = acpi_power_off_prepare;
+               pm_power_off = acpi_power_off;
+       }
+       printk(")\n");
        return 0;
 }
diff --git a/drivers/acpi/sleep/poweroff.c b/drivers/acpi/sleep/poweroff.c
deleted file mode 100644 (file)
index 39e40d5..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * poweroff.c - ACPI handler for powering off the system.
- *
- * AKA S5, but it is independent of whether or not the kernel supports
- * any other sleep support in the system.
- *
- * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/pm.h>
-#include <linux/init.h>
-#include <acpi/acpi_bus.h>
-#include <linux/sysdev.h>
-#include <asm/io.h>
-#include "sleep.h"
-
-int acpi_sleep_prepare(u32 acpi_state)
-{
-#ifdef CONFIG_ACPI_SLEEP
-       /* do we have a wakeup address for S2 and S3? */
-       if (acpi_state == ACPI_STATE_S3) {
-               if (!acpi_wakeup_address) {
-                       return -EFAULT;
-               }
-               acpi_set_firmware_waking_vector((acpi_physical_address)
-                                               virt_to_phys((void *)
-                                                            acpi_wakeup_address));
-
-       }
-       ACPI_FLUSH_CPU_CACHE();
-       acpi_enable_wakeup_device_prep(acpi_state);
-#endif
-       acpi_gpe_sleep_prepare(acpi_state);
-       acpi_enter_sleep_state_prep(acpi_state);
-       return 0;
-}
-
-#ifdef CONFIG_PM
-
-static void acpi_power_off_prepare(void)
-{
-       /* Prepare to power off the system */
-       acpi_sleep_prepare(ACPI_STATE_S5);
-}
-
-static void acpi_power_off(void)
-{
-       /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
-       printk("%s called\n", __FUNCTION__);
-       local_irq_disable();
-       /* Some SMP machines only can poweroff in boot CPU */
-       acpi_enter_sleep_state(ACPI_STATE_S5);
-}
-
-static int acpi_poweroff_init(void)
-{
-       if (!acpi_disabled) {
-               u8 type_a, type_b;
-               acpi_status status;
-
-               status =
-                   acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
-               if (ACPI_SUCCESS(status)) {
-                       pm_power_off_prepare = acpi_power_off_prepare;
-                       pm_power_off = acpi_power_off;
-               }
-       }
-       return 0;
-}
-
-late_initcall(acpi_poweroff_init);
-
-#endif                         /* CONFIG_PM */
index 3c9bb85a6a93f9465917876a856c42c30e4f15bf..d05891f16282338d96d5cf5a5f3f22d30d6cd5c0 100644 (file)
@@ -417,7 +417,6 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
        arg0.integer.value = level;
        status = acpi_evaluate_object(device->dev->handle, "_BCM", &args, NULL);
 
-       printk(KERN_DEBUG "set_level status: %x\n", status);
        return status;
 }
 
@@ -1754,7 +1753,7 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
 
 static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
 {
-       return acpi_video_bus_DOS(video, 1, 0);
+       return acpi_video_bus_DOS(video, 0, 0);
 }
 
 static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
index 2bd7645f1a8846a49cbefb3019aace5dae70a73f..cce2834b2b60990140071be8ad10ec3fbcee2b84 100644 (file)
@@ -375,8 +375,9 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
        int drive_pci = sis_old_port_base(adev);
        u16 timing;
 
+       /* MWDMA 0-2 and UDMA 0-5 */
        const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
-       const u16 udma_bits[]  = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000};
+       const u16 udma_bits[]  = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
 
        pci_read_config_word(pdev, drive_pci, &timing);
 
index ef83e6b1e314d1aaf3842ca673012425b92bd66f..233e886933959c63f3a5d8d1ea04df0f0a2dfb6b 100644 (file)
@@ -888,6 +888,16 @@ static inline void sil24_host_intr(struct ata_port *ap)
        u32 slot_stat, qc_active;
        int rc;
 
+       /* If PCIX_IRQ_WOC, there's an inherent race window between
+        * clearing IRQ pending status and reading PORT_SLOT_STAT
+        * which may cause spurious interrupts afterwards.  This is
+        * unavoidable and much better than losing interrupts which
+        * happens if IRQ pending is cleared after reading
+        * PORT_SLOT_STAT.
+        */
+       if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
+               writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
+
        slot_stat = readl(port + PORT_SLOT_STAT);
 
        if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
@@ -895,9 +905,6 @@ static inline void sil24_host_intr(struct ata_port *ap)
                return;
        }
 
-       if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
-               writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
-
        qc_active = slot_stat & ~HOST_SSTAT_ATTN;
        rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
        if (rc > 0)
@@ -910,7 +917,8 @@ static inline void sil24_host_intr(struct ata_port *ap)
                return;
        }
 
-       if (ata_ratelimit())
+       /* spurious interrupts are expected if PCIX_IRQ_WOC */
+       if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
                ata_port_printk(ap, KERN_INFO, "spurious interrupt "
                        "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
                        slot_stat, ap->active_tag, ap->sactive);
index 67ee3d4b2878c1d2916dbc55f1d60757add33951..79245714f0a777430d459eb2a3c09aa7746410b4 100644 (file)
@@ -1032,6 +1032,10 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
        check_disk_change(ip->i_bdev);
        return 0;
 err_release:
+       if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
+               cdi->ops->lock_door(cdi, 0);
+               cdinfo(CD_OPEN, "door unlocked.\n");
+       }
        cdi->ops->release(cdi);
 err:
        cdi->use_count--;
index 7ecffc9c738f98b56693f61aa4150bfeb655f712..fd51554ab081bea0865d1e2ad0848be925f1850e 100644 (file)
@@ -943,14 +943,14 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
                        printk(KERN_DEBUG "%s: 0x%lx is busy\n",
                                __FUNCTION__, hdp->hd_phys_address);
                        iounmap(hdp->hd_address);
-                       return -EBUSY;
+                       return AE_ALREADY_EXISTS;
                }
        } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
                struct acpi_resource_fixed_memory32 *fixmem32;
 
                fixmem32 = &res->data.fixed_memory32;
                if (!fixmem32)
-                       return -EINVAL;
+                       return AE_NO_MEMORY;
 
                hdp->hd_phys_address = fixmem32->address;
                hdp->hd_address = ioremap(fixmem32->address,
@@ -960,7 +960,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
                        printk(KERN_DEBUG "%s: 0x%lx is busy\n",
                                __FUNCTION__, hdp->hd_phys_address);
                        iounmap(hdp->hd_address);
-                       return -EBUSY;
+                       return AE_ALREADY_EXISTS;
                }
        } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
                struct acpi_resource_extended_irq *irqp;
index 049a46cc9f87612b157d2abbe0aedd568d3e3372..04ac155d3a0794cf3d7958722e1532e198aa2c52 100644 (file)
@@ -155,23 +155,22 @@ mspec_open(struct vm_area_struct *vma)
  * mspec_close
  *
  * Called when unmapping a device mapping. Frees all mspec pages
- * belonging to the vma.
+ * belonging to all the vma's sharing this vma_data structure.
  */
 static void
 mspec_close(struct vm_area_struct *vma)
 {
        struct vma_data *vdata;
-       int index, last_index, result;
+       int index, last_index;
        unsigned long my_page;
 
        vdata = vma->vm_private_data;
 
-       BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end);
+       if (!atomic_dec_and_test(&vdata->refcnt))
+               return;
 
-       spin_lock(&vdata->lock);
-       index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT;
-       last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT;
-       for (; index < last_index; index++) {
+       last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
+       for (index = 0; index < last_index; index++) {
                if (vdata->maddr[index] == 0)
                        continue;
                /*
@@ -180,20 +179,12 @@ mspec_close(struct vm_area_struct *vma)
                 */
                my_page = vdata->maddr[index];
                vdata->maddr[index] = 0;
-               spin_unlock(&vdata->lock);
-               result = mspec_zero_block(my_page, PAGE_SIZE);
-               if (!result)
+               if (!mspec_zero_block(my_page, PAGE_SIZE))
                        uncached_free_page(my_page);
                else
                        printk(KERN_WARNING "mspec_close(): "
-                              "failed to zero page %i\n",
-                              result);
-               spin_lock(&vdata->lock);
+                              "failed to zero page %ld\n", my_page);
        }
-       spin_unlock(&vdata->lock);
-
-       if (!atomic_dec_and_test(&vdata->refcnt))
-               return;
 
        if (vdata->flags & VMD_VMALLOCED)
                vfree(vdata);
@@ -201,7 +192,6 @@ mspec_close(struct vm_area_struct *vma)
                kfree(vdata);
 }
 
-
 /*
  * mspec_nopfn
  *
index ba0428d872aa4bc0a6cc2993049c6f60f2eba25e..85c51bdc36f1d403130fdf81655c51c7eca6e817 100644 (file)
@@ -1211,12 +1211,42 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
        dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
 }
 
-static void set_data_seg(struct mlx4_wqe_data_seg *dseg,
-                        struct ib_sge *sg)
+static void set_mlx_icrc_seg(void *dseg)
+{
+       u32 *t = dseg;
+       struct mlx4_wqe_inline_seg *iseg = dseg;
+
+       t[1] = 0;
+
+       /*
+        * Need a barrier here before writing the byte_count field to
+        * make sure that all the data is visible before the
+        * byte_count field is set.  Otherwise, if the segment begins
+        * a new cacheline, the HCA prefetcher could grab the 64-byte
+        * chunk and get a valid (!= * 0xffffffff) byte count but
+        * stale data, and end up sending the wrong data.
+        */
+       wmb();
+
+       iseg->byte_count = cpu_to_be32((1 << 31) | 4);
+}
+
+static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
 {
-       dseg->byte_count = cpu_to_be32(sg->length);
        dseg->lkey       = cpu_to_be32(sg->lkey);
        dseg->addr       = cpu_to_be64(sg->addr);
+
+       /*
+        * Need a barrier here before writing the byte_count field to
+        * make sure that all the data is visible before the
+        * byte_count field is set.  Otherwise, if the segment begins
+        * a new cacheline, the HCA prefetcher could grab the 64-byte
+        * chunk and get a valid (!= * 0xffffffff) byte count but
+        * stale data, and end up sending the wrong data.
+        */
+       wmb();
+
+       dseg->byte_count = cpu_to_be32(sg->length);
 }
 
 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
@@ -1225,6 +1255,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        struct mlx4_ib_qp *qp = to_mqp(ibqp);
        void *wqe;
        struct mlx4_wqe_ctrl_seg *ctrl;
+       struct mlx4_wqe_data_seg *dseg;
        unsigned long flags;
        int nreq;
        int err = 0;
@@ -1324,22 +1355,27 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        break;
                }
 
-               for (i = 0; i < wr->num_sge; ++i) {
-                       set_data_seg(wqe, wr->sg_list + i);
+               /*
+                * Write data segments in reverse order, so as to
+                * overwrite cacheline stamp last within each
+                * cacheline.  This avoids issues with WQE
+                * prefetching.
+                */
 
-                       wqe  += sizeof (struct mlx4_wqe_data_seg);
-                       size += sizeof (struct mlx4_wqe_data_seg) / 16;
-               }
+               dseg = wqe;
+               dseg += wr->num_sge - 1;
+               size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
 
                /* Add one more inline data segment for ICRC for MLX sends */
-               if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) {
-                       ((struct mlx4_wqe_inline_seg *) wqe)->byte_count =
-                               cpu_to_be32((1 << 31) | 4);
-                       ((u32 *) wqe)[1] = 0;
-                       wqe  += sizeof (struct mlx4_wqe_data_seg);
+               if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI ||
+                            qp->ibqp.qp_type == IB_QPT_GSI)) {
+                       set_mlx_icrc_seg(dseg + 1);
                        size += sizeof (struct mlx4_wqe_data_seg) / 16;
                }
 
+               for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
+                       set_data_seg(dseg, wr->sg_list + i);
+
                ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
                                    MLX4_WQE_CTRL_FENCE : 0) | size;
 
index 2bea1b2c631c81274b80304f3e4be11216013068..a1804bfdbb8ce38510f783c233682e12e5dabac8 100644 (file)
@@ -328,6 +328,7 @@ static void atp_complete(struct urb* urb)
 {
        int x, y, x_z, y_z, x_f, y_f;
        int retval, i, j;
+       int key;
        struct atp *dev = urb->context;
 
        switch (urb->status) {
@@ -468,6 +469,7 @@ static void atp_complete(struct urb* urb)
                              ATP_XFACT, &x_z, &x_f);
        y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS,
                              ATP_YFACT, &y_z, &y_f);
+       key = dev->data[dev->datalen - 1] & 1;
 
        if (x && y) {
                if (dev->x_old != -1) {
@@ -505,7 +507,7 @@ static void atp_complete(struct urb* urb)
                   the first touch unless reinitialised. Do so if it's been
                   idle for a while in order to avoid waking the kernel up
                   several hundred times a second */
-               if (atp_is_geyser_3(dev)) {
+               if (!key && atp_is_geyser_3(dev)) {
                        dev->idlecount++;
                        if (dev->idlecount == 10) {
                                dev->valid = 0;
@@ -514,7 +516,7 @@ static void atp_complete(struct urb* urb)
                }
        }
 
-       input_report_key(dev->input, BTN_LEFT, dev->data[dev->datalen - 1] & 1);
+       input_report_key(dev->input, BTN_LEFT, key);
        input_sync(dev->input);
 
 exit:
index 7b64fd4aa2f3d4c99e6f4ffef4c3cdcb48ab9a90..0a419a0de603a352cb2411cfacbb005f17590d9c 100644 (file)
@@ -6,7 +6,8 @@ menuconfig VIRTUALIZATION
        depends on X86
        default y
        ---help---
-         Say Y here to get to see options for virtualization guest drivers.
+         Say Y here to get to see options for using your Linux host to run other
+         operating systems inside virtual machines (guests).
          This option alone does not add any kernel code.
 
          If you say N, all options in this submenu will be skipped and disabled.
index f182c6a36209c4c35b5f0c1b5bba3e5bf1b57f0c..1ddcd5cd20f6070b72428622b80f531f3ac9fe08 100644 (file)
@@ -22,8 +22,9 @@
        jmp lguest_init
 
 /*G:055 We create a macro which puts the assembler code between lgstart_ and
- * lgend_ markers.  These templates end up in the .init.text section, so they
- * are discarded after boot. */
+ * lgend_ markers.  These templates are put in the .text section: they can't be
+ * discarded after boot as we may need to patch modules, too. */
+.text
 #define LGUEST_PATCH(name, insns...)                   \
        lgstart_##name: insns; lgend_##name:;           \
        .globl lgstart_##name; .globl lgend_##name
@@ -34,7 +35,6 @@ LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
 LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
 /*:*/
 
-.text
 /* These demark the EIP range where host should never deliver interrupts. */
 .global lguest_noirq_start
 .global lguest_noirq_end
index 4d63773ee73a634fe31d72eb04edad4d8942f8e2..f96dea975fa50d94e805c40c46be82df7cfe80bc 100644 (file)
@@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
        struct stripe_head *sh = stripe_head_ref;
        struct bio *return_bi = NULL;
        raid5_conf_t *conf = sh->raid_conf;
-       int i, more_to_read = 0;
+       int i;
 
        pr_debug("%s: stripe %llu\n", __FUNCTION__,
                (unsigned long long)sh->sector);
@@ -522,16 +522,14 @@ static void ops_complete_biofill(void *stripe_head_ref)
        /* clear completed biofills */
        for (i = sh->disks; i--; ) {
                struct r5dev *dev = &sh->dev[i];
-               /* check if this stripe has new incoming reads */
-               if (dev->toread)
-                       more_to_read++;
 
                /* acknowledge completion of a biofill operation */
-               /* and check if we need to reply to a read request
-               */
-               if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) {
+               /* and check if we need to reply to a read request,
+                * new R5_Wantfill requests are held off until
+                * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
+                */
+               if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
                        struct bio *rbi, *rbi2;
-                       clear_bit(R5_Wantfill, &dev->flags);
 
                        /* The access to dev->read is outside of the
                         * spin_lock_irq(&conf->device_lock), but is protected
@@ -558,8 +556,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
 
        return_io(return_bi);
 
-       if (more_to_read)
-               set_bit(STRIPE_HANDLE, &sh->state);
+       set_bit(STRIPE_HANDLE, &sh->state);
        release_stripe(sh);
 }
 
index c06cae3f0b564c3240058f4a97c3656d47e9bf27..503f2685fb73c434ae4a2d6d98be4f03a3d4b7d9 100644 (file)
@@ -116,7 +116,7 @@ struct el3_private {
     spinlock_t         lock;
 };
 
-static const char *if_names[] = { "auto", "10base2", "10baseT", "AUI" };
+static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
 
 /*====================================================================*/
 
index f79cf87a2bff555aef80eba31da6c17eb340d65c..c0b6d19d1457598ba7a83738a194400344725197 100644 (file)
@@ -136,7 +136,7 @@ struct ppp_mppe_state {
  * Key Derivation, from RFC 3078, RFC 3079.
  * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079.
  */
-static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey)
+static void get_new_key_from_sha(struct ppp_mppe_state * state)
 {
        struct hash_desc desc;
        struct scatterlist sg[4];
@@ -153,8 +153,6 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I
        desc.flags = 0;
 
        crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
-
-       memcpy(InterimKey, state->sha1_digest, state->keylen);
 }
 
 /*
@@ -163,21 +161,21 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I
  */
 static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
 {
-       unsigned char InterimKey[MPPE_MAX_KEY_LEN];
        struct scatterlist sg_in[1], sg_out[1];
        struct blkcipher_desc desc = { .tfm = state->arc4 };
 
-       get_new_key_from_sha(state, InterimKey);
+       get_new_key_from_sha(state);
        if (!initial_key) {
-               crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen);
-               setup_sg(sg_in, InterimKey, state->keylen);
+               crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
+                                       state->keylen);
+               setup_sg(sg_in, state->sha1_digest, state->keylen);
                setup_sg(sg_out, state->session_key, state->keylen);
                if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
                                             state->keylen) != 0) {
                    printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
                }
        } else {
-               memcpy(state->session_key, InterimKey, state->keylen);
+               memcpy(state->session_key, state->sha1_digest, state->keylen);
        }
        if (state->keylen == 8) {
                /* See RFC 3078 */
index b85ab4a8f2a3377354bf357940334be642f4a6ef..c921ec32c232cb265323f9075a2daf27275e188b 100644 (file)
@@ -1228,7 +1228,10 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
                return;
        }
 
-       /* phy config for RTL8169s mac_version C chip */
+       if ((tp->mac_version != RTL_GIGA_MAC_VER_02) &&
+           (tp->mac_version != RTL_GIGA_MAC_VER_03))
+               return;
+
        mdio_write(ioaddr, 31, 0x0001);                 //w 31 2 0 1
        mdio_write(ioaddr, 21, 0x1000);                 //w 21 15 0 1000
        mdio_write(ioaddr, 24, 0x65c7);                 //w 24 15 0 65c7
@@ -2567,6 +2570,15 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
                    (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
                        netif_wake_queue(dev);
                }
+               /*
+                * 8168 hack: TxPoll requests are lost when the Tx packets are
+                * too close. Let's kick an extra TxPoll request when a burst
+                * of start_xmit activity is detected (if it is not detected,
+                * it is slow enough). -- FR
+                */
+               smp_rmb();
+               if (tp->cur_tx != dirty_tx)
+                       RTL_W8(TxPoll, NPQ);
        }
 }
 
index eaffe551d1d8108e68e7d8a59a0db3e5f1b85a22..0792031a5cf959a1543f32f4e0f2ab4ccb7b0ec2 100644 (file)
@@ -338,6 +338,16 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
                if (!(hw->flags & SKY2_HW_GIGABIT)) {
                        /* enable automatic crossover */
                        ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
+
+                       if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+                           hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+                               u16 spec;
+
+                               /* Enable Class A driver for FE+ A0 */
+                               spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
+                               spec |= PHY_M_FESC_SEL_CL_A;
+                               gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
+                       }
                } else {
                        /* disable energy detect */
                        ctrl &= ~PHY_M_PC_EN_DET_MSK;
@@ -816,7 +826,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
        sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
        sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
 
-       if (!(hw->flags & SKY2_HW_RAMBUFFER)) {
+       /* On chips without ram buffer, pause is controled by MAC level */
+       if (sky2_read8(hw, B2_E_0) == 0) {
                sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
                sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
 
@@ -1271,7 +1282,7 @@ static int sky2_up(struct net_device *dev)
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
        unsigned port = sky2->port;
-       u32 imask;
+       u32 imask, ramsize;
        int cap, err = -ENOMEM;
        struct net_device *otherdev = hw->dev[sky2->port^1];
 
@@ -1326,13 +1337,12 @@ static int sky2_up(struct net_device *dev)
 
        sky2_mac_init(hw, port);
 
-       if (hw->flags & SKY2_HW_RAMBUFFER) {
-               /* Register is number of 4K blocks on internal RAM buffer. */
-               u32 ramsize = sky2_read8(hw, B2_E_0) * 4;
+       /* Register is number of 4K blocks on internal RAM buffer. */
+       ramsize = sky2_read8(hw, B2_E_0) * 4;
+       if (ramsize > 0) {
                u32 rxspace;
 
-               printk(KERN_DEBUG PFX "%s: ram buffer %dK\n", dev->name, ramsize);
-
+               pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
                if (ramsize < 16)
                        rxspace = ramsize / 2;
                else
@@ -1995,7 +2005,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
 
        synchronize_irq(hw->pdev->irq);
 
-       if (!(hw->flags & SKY2_HW_RAMBUFFER))
+       if (sky2_read8(hw, B2_E_0) == 0)
                sky2_set_tx_stfwd(hw, port);
 
        ctl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2526,7 +2536,7 @@ static void sky2_watchdog(unsigned long arg)
                        ++active;
 
                        /* For chips with Rx FIFO, check if stuck */
-                       if ((hw->flags & SKY2_HW_RAMBUFFER) &&
+                       if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) &&
                             sky2_rx_hung(dev)) {
                                pr_info(PFX "%s: receiver hang detected\n",
                                        dev->name);
@@ -2684,8 +2694,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
        switch(hw->chip_id) {
        case CHIP_ID_YUKON_XL:
                hw->flags = SKY2_HW_GIGABIT
-                       | SKY2_HW_NEWER_PHY
-                       | SKY2_HW_RAMBUFFER;
+                       | SKY2_HW_NEWER_PHY;
+               if (hw->chip_rev < 3)
+                       hw->flags |= SKY2_HW_FIFO_HANG_CHECK;
+
                break;
 
        case CHIP_ID_YUKON_EC_U:
@@ -2711,11 +2723,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
                        dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
                        return -EOPNOTSUPP;
                }
-               hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RAMBUFFER;
+               hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK;
                break;
 
        case CHIP_ID_YUKON_FE:
-               hw->flags = SKY2_HW_RAMBUFFER;
                break;
 
        case CHIP_ID_YUKON_FE_P:
index 69cd98400fe669639b596ea99185d66e9b49e265..8bc5c54e3efacd2cdc63e12aabd31a5ce37b0051 100644 (file)
@@ -2063,7 +2063,7 @@ struct sky2_hw {
 #define SKY2_HW_FIBRE_PHY      0x00000002
 #define SKY2_HW_GIGABIT                0x00000004
 #define SKY2_HW_NEWER_PHY      0x00000008
-#define SKY2_HW_RAMBUFFER      0x00000010      /* chip has RAM FIFO */
+#define SKY2_HW_FIFO_HANG_CHECK        0x00000010
 #define SKY2_HW_NEW_LE         0x00000020      /* new LSOv2 format */
 #define SKY2_HW_AUTO_TX_SUM    0x00000040      /* new IP decode for Tx */
 #define SKY2_HW_ADV_POWER_CTL  0x00000080      /* additional PHY power regs */
index c7c4574729b190c6358e57e7f02d04427be3c029..de3155b21285df91c29def0571409e28973bdbdc 100644 (file)
@@ -289,6 +289,7 @@ int power_supply_uevent(struct device *dev, char **envp, int num_envp,
                if (ret)
                        goto out;
        }
+       envp[i] = NULL;
 
 out:
        free_page((unsigned long)prop_buf);
index e348ba684050eca227fa0e8a5ed8f148a0521ccc..ff610c23314bd74e5b9fa9d3f103334c1c957581 100644 (file)
@@ -38,7 +38,7 @@
 #include <asm/prom.h>
 #include <asm/of_device.h>
 
-#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
 #define SUPPORT_SYSRQ
 #endif
 
index 8d7ab74170d57944b2f1c68fb94f74bcd21c3519..a593f900eff4ff1936f9e9504747592ab27e9b4d 100644 (file)
@@ -431,6 +431,7 @@ static int w1_uevent(struct device *dev, char **envp, int num_envp,
        err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size,
                        &cur_len, "W1_SLAVE_ID=%024LX",
                        (unsigned long long)sl->reg_num.id);
+       envp[cur_index] = NULL;
        if (err)
                return err;
 
index 5a5b7116cefb28cc5b56a85f089985796c7b6423..37310b0e81076caf3d5e5970530c3b1325e9e7aa 100644 (file)
@@ -3190,6 +3190,8 @@ COMPATIBLE_IOCTL(SIOCSIWRETRY)
 COMPATIBLE_IOCTL(SIOCGIWRETRY)
 COMPATIBLE_IOCTL(SIOCSIWPOWER)
 COMPATIBLE_IOCTL(SIOCGIWPOWER)
+COMPATIBLE_IOCTL(SIOCSIWAUTH)
+COMPATIBLE_IOCTL(SIOCGIWAUTH)
 /* hiddev */
 COMPATIBLE_IOCTL(HIDIOCGVERSION)
 COMPATIBLE_IOCTL(HIDIOCAPPLICATION)
index 73402c5eeb8afc90452768b57f0cbdb13fcc3469..38eb0b7a1f3d3111c0e4a1b604d7ac40e110cc46 100644 (file)
@@ -894,7 +894,7 @@ magic_found:
                goto again;
        }
 
-
+       sbi->s_flags = flags;/*after that line some functions use s_flags*/
        ufs_print_super_stuff(sb, usb1, usb2, usb3);
 
        /*
@@ -1025,8 +1025,6 @@ magic_found:
            UFS_MOUNT_UFSTYPE_44BSD)
                uspi->s_maxsymlinklen =
                    fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
-       
-       sbi->s_flags = flags;
 
        inode = iget(sb, UFS_ROOTINO);
        if (!inode || is_bad_inode(inode))
index 16f8e175167d1d8ed95738a4a082b4c192d26fca..36d8f6aa11afeed014b3223c1a00334a502c9e38 100644 (file)
@@ -350,9 +350,10 @@ _xfs_filestream_update_ag(
 /* xfs_fstrm_free_func(): callback for freeing cached stream items. */
 void
 xfs_fstrm_free_func(
-       xfs_ino_t       ino,
-       fstrm_item_t    *item)
+       unsigned long   ino,
+       void            *data)
 {
+       fstrm_item_t    *item  = (fstrm_item_t *)data;
        xfs_inode_t     *ip = item->ip;
        int ref;
 
@@ -438,7 +439,7 @@ xfs_filestream_mount(
        grp_count = 10;
 
        err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count,
-                            (xfs_mru_cache_free_func_t)xfs_fstrm_free_func);
+                            xfs_fstrm_free_func);
 
        return err;
 }
index dacb19739cc2830ae561a811321362e0fbf2bd6d..7174991f4bef2b43ce2611b8c143e40630a83145 100644 (file)
@@ -1920,9 +1920,9 @@ xlog_recover_do_reg_buffer(
                                stale_buf = 1;
                                break;
                        }
-                       if (be16_to_cpu(dip->di_core.di_mode))
+                       if (dip->di_core.di_mode)
                                mode_count++;
-                       if (be16_to_cpu(dip->di_core.di_gen))
+                       if (dip->di_core.di_gen)
                                gen_count++;
                }
 
index 202acb9ff4d071299cb0926c8887cc5d3b57bd53..f85f77a538aa67afb418b4b35ba8c3ccd945516f 100644 (file)
@@ -147,10 +147,6 @@ static inline void unregister_hotplug_dock_device(acpi_handle handle)
 /*--------------------------------------------------------------------------
                                   Suspend/Resume
   -------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_SLEEP
 extern int acpi_sleep_init(void);
-#else
-static inline int acpi_sleep_init(void) { return 0; }
-#endif
 
 #endif /*__ACPI_DRIVERS_H__*/
index ec3ffdadb4d257c452d091b30e8322c01c049e3f..99934a999e6651e904bc23310d7c36a17153f05f 100644 (file)
@@ -320,6 +320,8 @@ int acpi_processor_power_init(struct acpi_processor *pr,
 int acpi_processor_cst_has_changed(struct acpi_processor *pr);
 int acpi_processor_power_exit(struct acpi_processor *pr,
                              struct acpi_device *device);
+int acpi_processor_suspend(struct acpi_device * device, pm_message_t state);
+int acpi_processor_resume(struct acpi_device * device);
 
 /* in processor_thermal.c */
 int acpi_processor_get_limit_info(struct acpi_processor *pr);
index 97102ebc54b19d44137945dcac23833eceec6ada..2cb52cf8bd4ebe378b2e6f37890dc857a330dca1 100644 (file)
@@ -24,7 +24,30 @@ static inline int irq_canonicalize(int irq)
 #define irq_canonicalize(irq) (irq)    /* Sane hardware, sane code ... */
 #endif
 
+#ifdef CONFIG_MIPS_MT_SMTC
+
+struct irqaction;
+
+extern unsigned long irq_hwmask[];
+extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
+                          unsigned long hwmask);
+
+static inline void smtc_im_ack_irq(unsigned int irq)
+{
+       if (irq_hwmask[irq] & ST0_IM)
+               set_c0_status(irq_hwmask[irq] & ST0_IM);
+}
+
+#else
+
+static inline void smtc_im_ack_irq(unsigned int irq)
+{
+}
+
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
+
 /*
  * Clear interrupt mask handling "backstop" if irq_hwmask
  * entry so indicates. This implies that the ack() or end()
@@ -38,6 +61,7 @@ do {                                                                  \
                                   ~(irq_hwmask[irq] & 0x0000ff00));    \
 } while (0)
 #else
+
 #define __DO_IRQ_SMTC_HOOK(irq) do { } while (0)
 #endif
 
@@ -60,14 +84,6 @@ do {                                                                 \
 extern void arch_init_irq(void);
 extern void spurious_interrupt(void);
 
-#ifdef CONFIG_MIPS_MT_SMTC
-struct irqaction;
-
-extern unsigned long irq_hwmask[];
-extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
-                          unsigned long hwmask);
-#endif /* CONFIG_MIPS_MT_SMTC */
-
 extern int allocate_irqno(void);
 extern void alloc_legacy_irqno(void);
 extern void free_irqno(unsigned int irq);
index 991c85bb9e3648e83cf44599d36845aa92bd9fc4..e8e3a64eb32254d50a86d87e7028042aae8578d4 100644 (file)
@@ -114,7 +114,6 @@ sctp_state_fn_t sctp_sf_do_4_C;
 sctp_state_fn_t sctp_sf_eat_data_6_2;
 sctp_state_fn_t sctp_sf_eat_data_fast_4_4;
 sctp_state_fn_t sctp_sf_eat_sack_6_2;
-sctp_state_fn_t sctp_sf_tabort_8_4_8;
 sctp_state_fn_t sctp_sf_operr_notify;
 sctp_state_fn_t sctp_sf_t1_init_timer_expire;
 sctp_state_fn_t sctp_sf_t1_cookie_timer_expire;
@@ -247,6 +246,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
                                              int, __be16);
 struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
                                             union sctp_addr *addr);
+int sctp_verify_asconf(const struct sctp_association *asoc,
+                      struct sctp_paramhdr *param_hdr, void *chunk_end,
+                      struct sctp_paramhdr **errp);
 struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                                       struct sctp_chunk *asconf);
 int sctp_process_asconf_ack(struct sctp_association *asoc,
index c2fe2dcc9afc9b5d1e2b618b82a7a26aa7c19e0c..490a2928817cbf47aeb3ab69daae1913af0dc4b9 100644 (file)
@@ -421,6 +421,7 @@ struct sctp_signed_cookie {
  * internally.
  */
 union sctp_addr_param {
+       struct sctp_paramhdr p;
        struct sctp_ipv4addr_param v4;
        struct sctp_ipv6addr_param v6;
 };
index aab881c86a1ab94eb03b3468fbfe3a2b65ebd7e9..0962e0577660722b11d69171a8d4ce5a57017ad6 100644 (file)
@@ -382,23 +382,8 @@ static int tick_broadcast_set_event(ktime_t expires, int force)
 
 int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
 {
-       int cpu = smp_processor_id();
-
-       /*
-        * If the CPU is marked for broadcast, enforce oneshot
-        * broadcast mode. The jinxed VAIO does not resume otherwise.
-        * No idea why it ends up in a lower C State during resume
-        * without notifying the clock events layer.
-        */
-       if (cpu_isset(cpu, tick_broadcast_mask))
-               cpu_set(cpu, tick_broadcast_oneshot_mask);
-
        clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
-
-       if(!cpus_empty(tick_broadcast_oneshot_mask))
-               tick_broadcast_set_event(ktime_get(), 1);
-
-       return cpu_isset(cpu, tick_broadcast_oneshot_mask);
+       return 0;
 }
 
 /*
index 50a94eee4d92565e2f247eac3c92ca80c7d45581..495863a500cdca770e332b8faeb96882740aeb80 100644 (file)
@@ -284,7 +284,7 @@ config LOCKDEP
        select KALLSYMS_ALL
 
 config LOCK_STAT
-       bool "Lock usage statisitics"
+       bool "Lock usage statistics"
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
        select LOCKDEP
        select DEBUG_SPINLOCK
index afb6c6698b279f84bbaf1e258e4de94d76e53fee..e475f2e1be1375e0125a6a51159833b70bbf6e4e 100644 (file)
@@ -273,8 +273,6 @@ ieee80211softmac_assoc_work(struct work_struct *work)
                        ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL);
                        if (ieee80211softmac_start_scan(mac)) {
                                dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n");
-                               mac->associnfo.associating = 0;
-                               mac->associnfo.associated = 0;
                        }
                        goto out;
                } else {
index d054e9224b3e270110790f08f540df208cbc0556..442b9875f3fb84acfe9b0577bfc4869944ee0c79 100644 (file)
@@ -70,44 +70,30 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
                              char *extra)
 {
        struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
-       struct ieee80211softmac_network *n;
        struct ieee80211softmac_auth_queue_item *authptr;
        int length = 0;
 
 check_assoc_again:
        mutex_lock(&sm->associnfo.mutex);
-       /* Check if we're already associating to this or another network
-        * If it's another network, cancel and start over with our new network
-        * If it's our network, ignore the change, we're already doing it!
-        */
        if((sm->associnfo.associating || sm->associnfo.associated) &&
           (data->essid.flags && data->essid.length)) {
-               /* Get the associating network */
-               n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid);
-               if(n && n->essid.len == data->essid.length &&
-                  !memcmp(n->essid.data, extra, n->essid.len)) {
-                       dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n",
-                               MAC_ARG(sm->associnfo.bssid));
-                       goto out;
-               } else {
-                       dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
-                       /* Cancel assoc work */
-                       cancel_delayed_work(&sm->associnfo.work);
-                       /* We don't have to do this, but it's a little cleaner */
-                       list_for_each_entry(authptr, &sm->auth_queue, list)
-                               cancel_delayed_work(&authptr->work);
-                       sm->associnfo.bssvalid = 0;
-                       sm->associnfo.bssfixed = 0;
-                       sm->associnfo.associating = 0;
-                       sm->associnfo.associated = 0;
-                       /* We must unlock to avoid deadlocks with the assoc workqueue
-                        * on the associnfo.mutex */
-                       mutex_unlock(&sm->associnfo.mutex);
-                       flush_scheduled_work();
-                       /* Avoid race! Check assoc status again. Maybe someone started an
-                        * association while we flushed. */
-                       goto check_assoc_again;
-               }
+               dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
+               /* Cancel assoc work */
+               cancel_delayed_work(&sm->associnfo.work);
+               /* We don't have to do this, but it's a little cleaner */
+               list_for_each_entry(authptr, &sm->auth_queue, list)
+                       cancel_delayed_work(&authptr->work);
+               sm->associnfo.bssvalid = 0;
+               sm->associnfo.bssfixed = 0;
+               sm->associnfo.associating = 0;
+               sm->associnfo.associated = 0;
+               /* We must unlock to avoid deadlocks with the assoc workqueue
+                * on the associnfo.mutex */
+               mutex_unlock(&sm->associnfo.mutex);
+               flush_scheduled_work();
+               /* Avoid race! Check assoc status again. Maybe someone started an
+                * association while we flushed. */
+               goto check_assoc_again;
        }
 
        sm->associnfo.static_essid = 0;
@@ -153,13 +139,13 @@ ieee80211softmac_wx_get_essid(struct net_device *net_dev,
                data->essid.length = sm->associnfo.req_essid.len;
                data->essid.flags = 1;  /* active */
                memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len);
-       }
-
+               dprintk(KERN_INFO PFX "Getting essid from req_essid\n");
+       } else if (sm->associnfo.associated || sm->associnfo.associating) {
        /* If we're associating/associated, return that */
-       if (sm->associnfo.associated || sm->associnfo.associating) {
                data->essid.length = sm->associnfo.associate_essid.len;
                data->essid.flags = 1;  /* active */
                memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len);
+               dprintk(KERN_INFO PFX "Getting essid from associate_essid\n");
        }
        mutex_unlock(&sm->associnfo.mutex);
 
index 7286c389a4d0ccd418a1d953e3dd921b179a9730..ff2172ffd8611758ba00efa2abc349892ca11015 100644 (file)
@@ -5259,7 +5259,7 @@ static void __exit ieee80211_exit(void)
 }
 
 
-module_init(ieee80211_init);
+subsys_initcall(ieee80211_init);
 module_exit(ieee80211_exit);
 
 MODULE_DESCRIPTION("IEEE 802.11 subsystem");
index f6780d63b34279e672b96cb0cf6400e37f770f5d..17b9f46bbf2bb6dd912c1b11e1d3b1891ee1208f 100644 (file)
@@ -431,7 +431,7 @@ static void __exit rate_control_simple_exit(void)
 }
 
 
-module_init(rate_control_simple_init);
+subsys_initcall(rate_control_simple_init);
 module_exit(rate_control_simple_exit);
 
 MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211");
index 89ce815296942651885fb6ec68949f082e9ca410..7ab82b376e1bc92f8d656bfa30f7a3cf4b2b1152 100644 (file)
@@ -424,7 +424,7 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct rtattr *opt)
                skb_queue_head_init(&q->requeued[i]);
                q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
                                                 qd->handle);
-               if (q->queues[i] == 0) {
+               if (!q->queues[i]) {
                        q->queues[i] = &noop_qdisc;
                        printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
                }
index 47e56017f4ce887716e0dee8a4ba87d3ece37ac9..f9a0c9276e3b55f8c1497beacba30c8426ae0d28 100644 (file)
@@ -622,6 +622,14 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
                if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
                        goto discard;
 
+               /* RFC 4460, 2.11.2
+                * This will discard packets with INIT chunk bundled as
+                * subsequent chunks in the packet.  When INIT is first,
+                * the normal INIT processing will discard the chunk.
+                */
+               if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
+                       goto discard;
+
                /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
                 * or a COOKIE ACK the SCTP Packet should be silently
                 * discarded.
index 88aa2240754935522962a001088286516a8f8011..e4ea7fdf36ed798c23d3b2dfb6e3f2a0e54e893b 100644 (file)
@@ -130,6 +130,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
                        /* Force chunk->skb->data to chunk->chunk_end.  */
                        skb_pull(chunk->skb,
                                 chunk->chunk_end - chunk->skb->data);
+
+                       /* Verify that we have at least chunk headers
+                        * worth of buffer left.
+                        */
+                       if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
+                               sctp_chunk_free(chunk);
+                               chunk = queue->in_progress = NULL;
+                       }
                }
        }
 
index 2e34220d94cde899fffb551091305eddf9323b7c..23ae37ec871167d17c37ddbdbb9d4095e26a4575 100644 (file)
@@ -2499,6 +2499,52 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
        return SCTP_ERROR_NO_ERROR;
 }
 
+/* Verify the ASCONF packet before we process it.  */
+int sctp_verify_asconf(const struct sctp_association *asoc,
+                      struct sctp_paramhdr *param_hdr, void *chunk_end,
+                      struct sctp_paramhdr **errp) {
+       sctp_addip_param_t *asconf_param;
+       union sctp_params param;
+       int length, plen;
+
+       param.v = (sctp_paramhdr_t *) param_hdr;
+       while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
+               length = ntohs(param.p->length);
+               *errp = param.p;
+
+               if (param.v > chunk_end - length ||
+                   length < sizeof(sctp_paramhdr_t))
+                       return 0;
+
+               switch (param.p->type) {
+               case SCTP_PARAM_ADD_IP:
+               case SCTP_PARAM_DEL_IP:
+               case SCTP_PARAM_SET_PRIMARY:
+                       asconf_param = (sctp_addip_param_t *)param.v;
+                       plen = ntohs(asconf_param->param_hdr.length);
+                       if (plen < sizeof(sctp_addip_param_t) +
+                           sizeof(sctp_paramhdr_t))
+                               return 0;
+                       break;
+               case SCTP_PARAM_SUCCESS_REPORT:
+               case SCTP_PARAM_ADAPTATION_LAYER_IND:
+                       if (length != sizeof(sctp_addip_param_t))
+                               return 0;
+
+                       break;
+               default:
+                       break;
+               }
+
+               param.v += WORD_ROUND(length);
+       }
+
+       if (param.v != chunk_end)
+               return 0;
+
+       return 1;
+}
+
 /* Process an incoming ASCONF chunk with the next expected serial no. and
  * return an ASCONF_ACK chunk to be sent in response.
  */
index 177528ed3e1b1d9f50ea2c9be844d5a5d0553187..a583d67cab63859439c5a38fb54133afc406a401 100644 (file)
@@ -90,6 +90,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
                                             const sctp_subtype_t type,
                                             void *arg,
                                             sctp_cmd_seq_t *commands);
+static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+                                       const struct sctp_association *asoc,
+                                       const sctp_subtype_t type,
+                                       void *arg,
+                                       sctp_cmd_seq_t *commands);
 static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
 
 static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
@@ -98,6 +103,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
                                           struct sctp_transport *transport);
 
 static sctp_disposition_t sctp_sf_abort_violation(
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     void *arg,
                                     sctp_cmd_seq_t *commands,
@@ -111,6 +117,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
                                     void *arg,
                                     sctp_cmd_seq_t *commands);
 
+static sctp_disposition_t sctp_sf_violation_paramlen(
+                                    const struct sctp_endpoint *ep,
+                                    const struct sctp_association *asoc,
+                                    const sctp_subtype_t type,
+                                    void *arg,
+                                    sctp_cmd_seq_t *commands);
+
 static sctp_disposition_t sctp_sf_violation_ctsn(
                                     const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
@@ -118,6 +131,13 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
                                     void *arg,
                                     sctp_cmd_seq_t *commands);
 
+static sctp_disposition_t sctp_sf_violation_chunk(
+                                    const struct sctp_endpoint *ep,
+                                    const struct sctp_association *asoc,
+                                    const sctp_subtype_t type,
+                                    void *arg,
+                                    sctp_cmd_seq_t *commands);
+
 /* Small helper function that checks if the chunk length
  * is of the appropriate length.  The 'required_length' argument
  * is set to be the size of a specific chunk we are testing.
@@ -181,16 +201,21 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = arg;
        struct sctp_ulpevent *ev;
 
+       if (!sctp_vtag_verify_either(chunk, asoc))
+               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+
        /* RFC 2960 6.10 Bundling
         *
         * An endpoint MUST NOT bundle INIT, INIT ACK or
         * SHUTDOWN COMPLETE with any other chunks.
         */
        if (!chunk->singleton)
-               return SCTP_DISPOSITION_VIOLATION;
+               return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
 
-       if (!sctp_vtag_verify_either(chunk, asoc))
-               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+       /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
+       if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                                                 commands);
 
        /* RFC 2960 10.2 SCTP-to-ULP
         *
@@ -450,17 +475,17 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
        if (!sctp_vtag_verify(chunk, asoc))
                return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
 
-       /* Make sure that the INIT-ACK chunk has a valid length */
-       if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
-               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
-                                                 commands);
        /* 6.10 Bundling
         * An endpoint MUST NOT bundle INIT, INIT ACK or
         * SHUTDOWN COMPLETE with any other chunks.
         */
        if (!chunk->singleton)
-               return SCTP_DISPOSITION_VIOLATION;
+               return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
 
+       /* Make sure that the INIT-ACK chunk has a valid length */
+       if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
+               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                                                 commands);
        /* Grab the INIT header.  */
        chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
 
@@ -585,7 +610,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
         * control endpoint, respond with an ABORT.
         */
        if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
-               return sctp_sf_ootb(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
 
        /* Make sure that the COOKIE_ECHO chunk has a valid length.
         * In this case, we check that we have enough for at least a
@@ -2496,6 +2521,11 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
        struct sctp_chunk *chunk = (struct sctp_chunk *) arg;
        struct sctp_chunk *reply;
 
+       /* Make sure that the chunk has a valid length */
+       if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                                                 commands);
+
        /* Since we are not going to really process this INIT, there
         * is no point in verifying chunk boundries.  Just generate
         * the SHUTDOWN ACK.
@@ -2929,7 +2959,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
  *
  * The return value is the disposition of the chunk.
 */
-sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const sctp_subtype_t type,
                                        void *arg,
@@ -2965,6 +2995,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
 
                SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
 
+               sctp_sf_pdiscard(ep, asoc, type, arg, commands);
                return SCTP_DISPOSITION_CONSUME;
        }
 
@@ -3125,14 +3156,14 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
 
        ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
        do {
-               /* Break out if chunk length is less then minimal. */
+               /* Report violation if the chunk is less then minimal */
                if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
-                       break;
-
-               ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
-               if (ch_end > skb_tail_pointer(skb))
-                       break;
+                       return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                                                 commands);
 
+               /* Now that we know we at least have a chunk header,
+                * do things that are type appropriate.
+                */
                if (SCTP_CID_SHUTDOWN_ACK == ch->type)
                        ootb_shut_ack = 1;
 
@@ -3144,15 +3175,19 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
                if (SCTP_CID_ABORT == ch->type)
                        return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
 
+               /* Report violation if chunk len overflows */
+               ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+               if (ch_end > skb_tail_pointer(skb))
+                       return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                                                 commands);
+
                ch = (sctp_chunkhdr_t *) ch_end;
        } while (ch_end < skb_tail_pointer(skb));
 
        if (ootb_shut_ack)
-               sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
+               return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
        else
-               sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
-
-       return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+               return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
 }
 
 /*
@@ -3218,7 +3253,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
                if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
                        return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
 
-               return SCTP_DISPOSITION_CONSUME;
+               /* We need to discard the rest of the packet to prevent
+                * potential bomming attacks from additional bundled chunks.
+                * This is documented in SCTP Threats ID.
+                */
+               return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
        }
 
        return SCTP_DISPOSITION_NOMEM;
@@ -3241,6 +3280,13 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
                                      void *arg,
                                      sctp_cmd_seq_t *commands)
 {
+       struct sctp_chunk *chunk = arg;
+
+       /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
+       if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                                                 commands);
+
        /* Although we do have an association in this case, it corresponds
         * to a restarted association. So the packet is treated as an OOTB
         * packet and the state function that handles OOTB SHUTDOWN_ACK is
@@ -3257,8 +3303,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
 {
        struct sctp_chunk       *chunk = arg;
        struct sctp_chunk       *asconf_ack = NULL;
+       struct sctp_paramhdr    *err_param = NULL;
        sctp_addiphdr_t         *hdr;
+       union sctp_addr_param   *addr_param;
        __u32                   serial;
+       int                     length;
 
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
@@ -3274,6 +3323,20 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
        hdr = (sctp_addiphdr_t *)chunk->skb->data;
        serial = ntohl(hdr->serial);
 
+       addr_param = (union sctp_addr_param *)hdr->params;
+       length = ntohs(addr_param->p.length);
+       if (length < sizeof(sctp_paramhdr_t))
+               return sctp_sf_violation_paramlen(ep, asoc, type,
+                          (void *)addr_param, commands);
+
+       /* Verify the ASCONF chunk before processing it. */
+       if (!sctp_verify_asconf(asoc,
+           (sctp_paramhdr_t *)((void *)addr_param + length),
+           (void *)chunk->chunk_end,
+           &err_param))
+               return sctp_sf_violation_paramlen(ep, asoc, type,
+                          (void *)&err_param, commands);
+
        /* ADDIP 4.2 C1) Compare the value of the serial number to the value
         * the endpoint stored in a new association variable
         * 'Peer-Serial-Number'.
@@ -3328,6 +3391,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
        struct sctp_chunk       *asconf_ack = arg;
        struct sctp_chunk       *last_asconf = asoc->addip_last_asconf;
        struct sctp_chunk       *abort;
+       struct sctp_paramhdr    *err_param = NULL;
        sctp_addiphdr_t         *addip_hdr;
        __u32                   sent_serial, rcvd_serial;
 
@@ -3345,6 +3409,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
        addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
        rcvd_serial = ntohl(addip_hdr->serial);
 
+       /* Verify the ASCONF-ACK chunk before processing it. */
+       if (!sctp_verify_asconf(asoc,
+           (sctp_paramhdr_t *)addip_hdr->params,
+           (void *)asconf_ack->chunk_end,
+           &err_param))
+               return sctp_sf_violation_paramlen(ep, asoc, type,
+                          (void *)&err_param, commands);
+
        if (last_asconf) {
                addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr;
                sent_serial = ntohl(addip_hdr->serial);
@@ -3655,6 +3727,16 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
                                         void *arg,
                                         sctp_cmd_seq_t *commands)
 {
+       struct sctp_chunk *chunk = arg;
+
+       /* Make sure that the chunk has a valid length.
+        * Since we don't know the chunk type, we use a general
+        * chunkhdr structure to make a comparison.
+        */
+       if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                                                 commands);
+
        SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk);
        return SCTP_DISPOSITION_DISCARD;
 }
@@ -3710,6 +3792,13 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
                                     void *arg,
                                     sctp_cmd_seq_t *commands)
 {
+       struct sctp_chunk *chunk = arg;
+
+       /* Make sure that the chunk has a valid length. */
+       if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+               return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+                                                 commands);
+
        return SCTP_DISPOSITION_VIOLATION;
 }
 
@@ -3717,12 +3806,14 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
  * Common function to handle a protocol violation.
  */
 static sctp_disposition_t sctp_sf_abort_violation(
+                                    const struct sctp_endpoint *ep,
                                     const struct sctp_association *asoc,
                                     void *arg,
                                     sctp_cmd_seq_t *commands,
                                     const __u8 *payload,
                                     const size_t paylen)
 {
+       struct sctp_packet *packet = NULL;
        struct sctp_chunk *chunk =  arg;
        struct sctp_chunk *abort = NULL;
 
@@ -3731,30 +3822,51 @@ static sctp_disposition_t sctp_sf_abort_violation(
        if (!abort)
                goto nomem;
 
-       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
-       SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+       if (asoc) {
+               sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
 
-       if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
-               sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
-                               SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
-               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
-                               SCTP_ERROR(ECONNREFUSED));
-               sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
-                               SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
+               if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
+                       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+                                       SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+                       sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                                       SCTP_ERROR(ECONNREFUSED));
+                       sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
+                                       SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
+               } else {
+                       sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                                       SCTP_ERROR(ECONNABORTED));
+                       sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+                                       SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
+                       SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               }
        } else {
-               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
-                               SCTP_ERROR(ECONNABORTED));
-               sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
-                               SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
-               SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+               packet = sctp_ootb_pkt_new(asoc, chunk);
+
+               if (!packet)
+                       goto nomem_pkt;
+
+               if (sctp_test_T_bit(abort))
+                       packet->vtag = ntohl(chunk->sctp_hdr->vtag);
+
+               abort->skb->sk = ep->base.sk;
+
+               sctp_packet_append_chunk(packet, abort);
+
+               sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
+                       SCTP_PACKET(packet));
+
+               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
        }
 
-       sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
+       sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
 
        SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
 
        return SCTP_DISPOSITION_ABORT;
 
+nomem_pkt:
+       sctp_chunk_free(abort);
 nomem:
        return SCTP_DISPOSITION_NOMEM;
 }
@@ -3787,7 +3899,24 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
 {
        char err_str[]="The following chunk had invalid length:";
 
-       return sctp_sf_abort_violation(asoc, arg, commands, err_str,
+       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+                                       sizeof(err_str));
+}
+
+/*
+ * Handle a protocol violation when the parameter length is invalid.
+ * "Invalid" length is identified as smaller then the minimal length a
+ * given parameter can be.
+ */
+static sctp_disposition_t sctp_sf_violation_paramlen(
+                                    const struct sctp_endpoint *ep,
+                                    const struct sctp_association *asoc,
+                                    const sctp_subtype_t type,
+                                    void *arg,
+                                    sctp_cmd_seq_t *commands) {
+       char err_str[] = "The following parameter had invalid length:";
+
+       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
 }
 
@@ -3806,10 +3935,31 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
 {
        char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
 
-       return sctp_sf_abort_violation(asoc, arg, commands, err_str,
+       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
                                        sizeof(err_str));
 }
 
+/* Handle protocol violation of an invalid chunk bundling.  For example,
+ * when we have an association and we recieve bundled INIT-ACK, or
+ * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle"
+ * statement from the specs.  Additinally, there might be an attacker
+ * on the path and we may not want to continue this communication.
+ */
+static sctp_disposition_t sctp_sf_violation_chunk(
+                                    const struct sctp_endpoint *ep,
+                                    const struct sctp_association *asoc,
+                                    const sctp_subtype_t type,
+                                    void *arg,
+                                    sctp_cmd_seq_t *commands)
+{
+       char err_str[]="The following chunk violates protocol:";
+
+       if (!asoc)
+               return sctp_sf_violation(ep, asoc, type, arg, commands);
+
+       return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+                                       sizeof(err_str));
+}
 /***************************************************************************
  * These are the state functions for handling primitive (Section 10) events.
  ***************************************************************************/
@@ -5176,7 +5326,22 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
         * association exists, otherwise, use the peer's vtag.
         */
        if (asoc) {
-               vtag = asoc->peer.i.init_tag;
+               /* Special case the INIT-ACK as there is no peer's vtag
+                * yet.
+                */
+               switch(chunk->chunk_hdr->type) {
+               case SCTP_CID_INIT_ACK:
+               {
+                       sctp_initack_chunk_t *initack;
+
+                       initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
+                       vtag = ntohl(initack->init_hdr.init_tag);
+                       break;
+               }
+               default:
+                       vtag = asoc->peer.i.init_tag;
+                       break;
+               }
        } else {
                /* Special case the INIT and stale COOKIE_ECHO as there is no
                 * vtag yet.
index 70a91ece3c49444de91b41b7db7f4b486a16e7de..ddb0ba3974b05a31807c8652f97ad9a2cfc10fcf 100644 (file)
@@ -110,7 +110,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
        /* SCTP_STATE_EMPTY */ \
        TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_CLOSED */ \
-       TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+       TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_COOKIE_WAIT */ \
        TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
        /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -173,7 +173,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
        /*  SCTP_STATE_EMPTY */ \
        TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_CLOSED */ \
-       TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+       TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_COOKIE_WAIT */ \
        TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
        /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -194,7 +194,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
        /*  SCTP_STATE_EMPTY */ \
        TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_CLOSED */ \
-       TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+       TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_COOKIE_WAIT */ \
        TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
        /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -216,7 +216,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
        /*  SCTP_STATE_EMPTY */ \
        TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_CLOSED */ \
-       TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+       TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_COOKIE_WAIT */ \
        TYPE_SCTP_FUNC(sctp_sf_violation), \
        /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -258,7 +258,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
        /* SCTP_STATE_EMPTY */ \
        TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_CLOSED */ \
-       TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+       TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_COOKIE_WAIT */ \
        TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
        /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -300,7 +300,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
        /* SCTP_STATE_EMPTY */ \
        TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_CLOSED */ \
-       TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+       TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_COOKIE_WAIT */ \
        TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
        /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -499,7 +499,7 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_
        /* SCTP_STATE_EMPTY */ \
        TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_CLOSED */ \
-       TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+       TYPE_SCTP_FUNC(sctp_sf_ootb), \
        /* SCTP_STATE_COOKIE_WAIT */ \
        TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
        /* SCTP_STATE_COOKIE_ECHOED */ \
@@ -528,7 +528,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
        /* SCTP_STATE_EMPTY */
        TYPE_SCTP_FUNC(sctp_sf_ootb),
        /* SCTP_STATE_CLOSED */
-       TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8),
+       TYPE_SCTP_FUNC(sctp_sf_ootb),
        /* SCTP_STATE_COOKIE_WAIT */
        TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
        /* SCTP_STATE_COOKIE_ECHOED */
index 7eabd55417a5eab222a4c756d1e2181513575cc7..9771451eae217d9624db3efbac4e08f91ec9ab5c 100644 (file)
@@ -213,7 +213,7 @@ out_fail_notifier:
 out_fail_sysfs:
        return err;
 }
-module_init(cfg80211_init);
+subsys_initcall(cfg80211_init);
 
 static void cfg80211_exit(void)
 {
index 88aaacd9f82275a3266b67165bb9a470d8069f4c..2d5d2255a27cd54bb12c1a21724680a5cdba617d 100644 (file)
@@ -52,12 +52,14 @@ static void wiphy_dev_release(struct device *dev)
        cfg80211_dev_free(rdev);
 }
 
+#ifdef CONFIG_HOTPLUG
 static int wiphy_uevent(struct device *dev, char **envp,
                        int num_envp, char *buf, int size)
 {
        /* TODO, we probably need stuff here */
        return 0;
 }
+#endif
 
 struct class ieee80211_class = {
        .name = "ieee80211",
index f057430db0d01021d6b2c27df157b64a4958fd7a..9b5656d8bccac7d140168bfbd1e65af4a0e96c13 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/seq_file.h>
 #include <asm/uaccess.h>
 #include <linux/dma-mapping.h>
 #include <linux/moduleparam.h>
@@ -481,53 +482,54 @@ static void free_all_reserved_pages(void)
 #define SND_MEM_PROC_FILE      "driver/snd-page-alloc"
 static struct proc_dir_entry *snd_mem_proc;
 
-static int snd_mem_proc_read(char *page, char **start, off_t off,
-                            int count, int *eof, void *data)
+static int snd_mem_proc_read(struct seq_file *seq, void *offset)
 {
-       int len = 0;
        long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
        struct snd_mem_list *mem;
        int devno;
        static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" };
 
        mutex_lock(&list_mutex);
-       len += snprintf(page + len, count - len,
-                       "pages  : %li bytes (%li pages per %likB)\n",
-                       pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
+       seq_printf(seq, "pages  : %li bytes (%li pages per %likB)\n",
+                  pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
        devno = 0;
        list_for_each_entry(mem, &mem_list_head, list) {
                devno++;
-               len += snprintf(page + len, count - len,
-                               "buffer %d : ID %08x : type %s\n",
-                               devno, mem->id, types[mem->buffer.dev.type]);
-               len += snprintf(page + len, count - len,
-                               "  addr = 0x%lx, size = %d bytes\n",
-                               (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes);
+               seq_printf(seq, "buffer %d : ID %08x : type %s\n",
+                          devno, mem->id, types[mem->buffer.dev.type]);
+               seq_printf(seq, "  addr = 0x%lx, size = %d bytes\n",
+                          (unsigned long)mem->buffer.addr,
+                          (int)mem->buffer.bytes);
        }
        mutex_unlock(&list_mutex);
-       return len;
+       return 0;
+}
+
+static int snd_mem_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, snd_mem_proc_read, NULL);
 }
 
 /* FIXME: for pci only - other bus? */
 #ifdef CONFIG_PCI
 #define gettoken(bufp) strsep(bufp, " \t\n")
 
-static int snd_mem_proc_write(struct file *file, const char __user *buffer,
-                             unsigned long count, void *data)
+static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
+                                 size_t count, loff_t * ppos)
 {
        char buf[128];
        char *token, *p;
 
-       if (count > ARRAY_SIZE(buf) - 1)
-               count = ARRAY_SIZE(buf) - 1;
+       if (count > sizeof(buf) - 1)
+               return -EINVAL;
        if (copy_from_user(buf, buffer, count))
                return -EFAULT;
-       buf[ARRAY_SIZE(buf) - 1] = '\0';
+       buf[count] = '\0';
 
        p = buf;
        token = gettoken(&p);
        if (! token || *token == '#')
-               return (int)count;
+               return count;
        if (strcmp(token, "add") == 0) {
                char *endp;
                int vendor, device, size, buffers;
@@ -548,7 +550,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
                    (buffers = simple_strtol(token, NULL, 0)) <= 0 ||
                    buffers > 4) {
                        printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
-                       return (int)count;
+                       return count;
                }
                vendor &= 0xffff;
                device &= 0xffff;
@@ -560,7 +562,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
                                if (pci_set_dma_mask(pci, mask) < 0 ||
                                    pci_set_consistent_dma_mask(pci, mask) < 0) {
                                        printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
-                                       return (int)count;
+                                       return count;
                                }
                        }
                        for (i = 0; i < buffers; i++) {
@@ -570,7 +572,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
                                                        size, &dmab) < 0) {
                                        printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
                                        pci_dev_put(pci);
-                                       return (int)count;
+                                       return count;
                                }
                                snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
                        }
@@ -596,9 +598,21 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
                free_all_reserved_pages();
        else
                printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
-       return (int)count;
+       return count;
 }
 #endif /* CONFIG_PCI */
+
+static const struct file_operations snd_mem_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = snd_mem_proc_open,
+       .read           = seq_read,
+#ifdef CONFIG_PCI
+       .write          = snd_mem_proc_write,
+#endif
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 #endif /* CONFIG_PROC_FS */
 
 /*
@@ -609,12 +623,8 @@ static int __init snd_mem_init(void)
 {
 #ifdef CONFIG_PROC_FS
        snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL);
-       if (snd_mem_proc) {
-               snd_mem_proc->read_proc = snd_mem_proc_read;
-#ifdef CONFIG_PCI
-               snd_mem_proc->write_proc = snd_mem_proc_write;
-#endif
-       }
+       if (snd_mem_proc)
+               snd_mem_proc->proc_fops = &snd_mem_proc_fops;
 #endif
        return 0;
 }